pax_global_header00006660000000000000000000000064132474654470014532gustar00rootroot0000000000000052 comment=6feee1ccc265c765181533bbc719f91c364de2b3 gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/000077500000000000000000000000001324746544700220175ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/.codeclimate.yml000066400000000000000000000003701324746544700250710ustar00rootroot00000000000000--- engines: bundler-audit: enabled: true duplication: enabled: true config: languages: - ruby fixme: enabled: true rubocop: enabled: true exclude_paths: - spec/ - lib/vendor/ - go/vendor/ - tmp/ - coverage/ gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/.gitignore000066400000000000000000000003011324746544700240010ustar00rootroot00000000000000config.yml tmp/* .idea *.log /*.log* authorized_keys.lock coverage/ .gitlab_shell_secret .bundle tags .bundle/ custom_hooks hooks/*.d /go_build /bin/gitaly-upload-pack /bin/gitaly-receive-pack gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/.gitlab-ci.yml000066400000000000000000000030101324746544700244450ustar00rootroot00000000000000image: "ruby:2.3" before_script: - export PATH=~/bin:/usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin:/usr/local/go/bin - apt update - apt install rsync -y - gem install --bindir /usr/local/bin bundler - cp config.yml.example config.yml - bundle install rspec: script: - bundle exec rspec -f d spec tags: - ruby except: - tags rubocop: script: - bundle exec rubocop tags: - ruby except: - tags #ruby 2.2 rspec:ruby2.2: image: ruby:2.2 script: - bundle exec rspec -f d spec tags: - ruby except: - tags #ruby 2.1 rspec:ruby2.1: image: ruby:2.1 script: - bundle exec rspec -f d spec tags: - ruby except: - tags .go: &go_definition before_script: - apt-get update -qq && apt-get install -y ruby - ruby -v script: - go version - which go - bin/compile - support/go-test - support/go-format check go:1.8: <<: *go_definition image: golang:1.8 codeclimate: before_script: [] image: docker:latest variables: DOCKER_DRIVER: overlay services: - docker:dind script: - docker pull codeclimate/codeclimate - docker run --env CODECLIMATE_CODE="$PWD" --volume "$PWD":/code --volume /var/run/docker.sock:/var/run/docker.sock --volume /tmp/cc:/tmp/cc codeclimate/codeclimate analyze -f json > codeclimate.json artifacts: paths: [codeclimate.json] sast: before_script: [] image: registry.gitlab.com/gitlab-org/gl-sast:latest script: - /app/bin/run . artifacts: paths: [gl-sast-report.json]gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/.rspec000066400000000000000000000000101324746544700231230ustar00rootroot00000000000000--color gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/.rubocop.yml000066400000000000000000000017331324746544700242750ustar00rootroot00000000000000# Exclude some of GitLab files AllCops: Exclude: - 'spec/**/*' - 'vendor/**/*' - 'tmp/**/*' - 'bin/**/*' - 'hooks/**/*' - 'Guardfile' Layout/DotPosition: Enabled: false Lint/AmbiguousBlockAssociation: Enabled: false Metrics/LineLength: Enabled: false Metrics/MethodLength: Enabled: false Metrics/BlockLength: Enabled: false Metrics/ParameterLists: Enabled: false Metrics/AbcSize: Enabled: false Metrics/CyclomaticComplexity: Enabled: false Metrics/PerceivedComplexity: Enabled: false Style/Documentation: Enabled: false Style/StringLiterals: Enabled: false Style/StringLiterals: Enabled: false Style/GlobalVars: Enabled: false Style/AccessorMethodName: Enabled: false Style/GuardClause: Enabled: false Style/RescueModifier: Enabled: false Style/PercentLiteralDelimiters: Enabled: false Style/IfUnlessModifier: Enabled: false Style/RegexpLiteral: Enabled: false Style/SpecialGlobalVars: Enabled: false gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/CHANGELOG000066400000000000000000000245601324746544700232400ustar00rootroot00000000000000v6.0.4 - Don't delete GL_REPOSITORY environment variable from post-receive hook (!191) v6.0.3 - Print new project information in post-receive v6.0.2 - Use grpc-go 1.9.1 (!184) - Update gitaly-proto and gitaly libs (!185) v6.0.1 - Fix git push by removing a bad require in the pre-receive hook (!183) v6.0.0 - Remove bin/gitlab_projects (!180) - Remove direct redis integration (!181) - Remove support unhiding of all references for Geo nodes (!179) v5.11.0 - Introduce a more-complete implementation of bin/authorized_keys (!178) v5.10.3 - Remove unused redis bin configuration v5.10.2 - Print redirection message when pushing into renamed project v5.10.1 - Use 'git clone --no-local' when creating a fork (!176) v5.10.0 - Add a 'fork-repository' command that works with hashed storage (!174) v5.9.4 - Add relative git object dir envvars to check access request v5.9.3 - Expose GitLab username to hooks in `GL_USERNAME` environment variable v5.9.2 - Fix pre-receive error when gitlab doesn't have /internal/pre_receive (!169) v5.9.1 - Adds --force option to push branches v5.9.0 - Support new /internal/pre-receive API endpoint for post-receive operations - Support new /internal/post-receive API endpoint for post-receive operations - Support `redis` field on /internal/check API endpoint v5.8.1 - Support old versions of ruby without monotonic clock v5.8.0 - Fix SSH support for Git for Windows v2.14 v5.7.0 - Support unhiding of all refs via Gitaly v5.6.2 - Bump redis-rb library to 3.3.3 v5.6.1 - Fix setting permissions of SSH key tempfiles - Fix a missing constant error when using SSH authentication v5.6.0 - SSH authentication support v5.5.0 - Support unhiding of all references for Geo nodes v5.4.0 - Update Gitaly vendoring to use new RPC calls instead of old deprecated ones v5.3.1 - Gracefully handle internal API errors when getting merge request URLs v5.3.0 - Add ability to have git-receive-pack and git-upload-pack to go over Gitaly v5.2.1 - Revert changes in v5.2.0 v5.2.0 - Disable RubyGems to increase performance v5.1.1 - Revert "Remove old `project` parameter, use `gl_repository` instead" v5.1.0 - Add `gitlab-keys list-key-ids` subcommand for iterating over key IDs to find keys that should be deleted v5.0.6 - Remove old `project` parameter, use `gl_repository` instead - Use v4 of the GitLab REST API v5.0.5 - Use gl_repository if present when enqueing Sidekiq PostReceive jobs v5.0.4 - Handle GL_REPOSITORY env variable and use it in API calls and Sidekiq enqueuing v5.0.3 - Use recursive lookup for git repositories in the bin/create-hooks script v5.0.2 - Adds timeout option to push branches v5.0.1 - Fetch repositories with `--quiet` option by default v5.0.0 - Remove support for git-annex v4.1.1 - Send (a selection of) git environment variables while making the API call to `/allowed`, !112 v4.1.0 - Add support for global custom hooks and chained hook directories (Elan Ruusamäe, Dirk Hörner), !113, !111, !93, !89, #32 - Clear up text with merge request after new branch push (Lisanne Fellinger) v4.0.3 - Fetch repositories with `--prune` option by default v4.0.2 - Fix gitlab_custom_hook dependencies v4.0.1 - Add instrumentation to push hooks v4.0.0 - Use full repository path for API calls v3.6.6 - Re-use the default logger when logging metrics data v3.6.5 - Test against ruby 2.3 - Instrument GitLab Shell and log metrics data to a file v3.6.4 - Fix rsync with ionice command building - Fix short circuit logic between rsync with and without ionice for storage migrations v3.6.3 - Re-exposing GL_ID to custom hooks v3.6.2 - Enable GIT_TRACE/GIT_TRACE_PACKET/GIT_TRACE_PERFORMANCE by providing the git_trace_log_file config key v3.6.1 - Set a low IO priority for storage moves to lower performance impact v3.6.0 - Added full support for `git-lfs-authenticate` to properly handle LFS requests and pass them on to Workhorse v3.5.0 - Add option to recover 2FA via SSH v3.4.0 - Redis Sentinel support v3.3.3 - Print URL for new or existing merge request after push v3.3.2 - Improve authorized_keys check v3.3.1 - Manage authorized_keys permissions continuously v3.3.0 - Track ongoing push commands - Add command to move repositories between repository storages v3.2.1 - Allow gitlab-project's fork-project command to fork projects between different repository storages v3.2.0 - Allow GitLab Shell to check for allowed access based on the used Git protocol - Add an error message when using shell commands with incompatible GitLab versions v3.1.0 - Refactor repository paths handling to allow multiple git mount points v3.0.1 - Update PostReceive worker to provide enqueued_at time. v3.0.0 - Remove rm-tag command (Robert Schilling) - Remove create-branch and rm-branch commands (Robert Schilling) - Update PostReceive worker so it logs a unique JID in Sidekiq - Remove update-head command - Use Redis Ruby client instead of shelling out to redis-cli v2.7.2 - Do not prune objects during 'git gc' v2.7.1 - Add new command to list tags from a remote repo - Add the ability to fetch remote repo with or without tags v2.7.0 - Add support for ssh AuthorizedKeysCommand query by key v2.6.13 - Add push-branches command - Add delete-remote-branches command v2.6.12 - Fix git-annex issue not working using custom SSH port repositories v2.6.11 - Increase HTTP timeout and log request durations - Workaround for a Webrick issue on Ruby 2.2 - New optional `--force` parameter for `gitlab-projects fetch-remote` v2.6.10 - Add git gc for housekeeping v2.6.9 - Remove trailing slashes from gitlab_url v2.6.8 - Revert git-lfs-authenticate command from white list v2.6.7 - Exit with non-zero status when import-repository fails - Add fetch-remote command v2.6.6 - Do not clean LANG environment variable for the git hooks when working through the SSH-protocol - Add git-lfs-authenticate command to white list (this command is used by git-lfs for SSO authentication through SSH-protocol) - Handle git-annex and gcryptsetup v2.6.5 - Handle broken symlinks in create-hooks v2.6.4 - Remove keys from authorized_keys in-place - Increase batch_add_keys lock timeout to 300 seconds - If git-annex is enabled set GIT_ANNEX_SHELL_LIMITED variable v2.6.3 - Prevent keys with a very specific comment from accidentally being deleted. v2.6.2 - Include ecdsa keys in `gitlab_keys list-keys`. - Refactor logic around GL_ID v2.6.1 - Write errors to stderr to get git to abort and show them as such. v2.6.0 - Prevent character encoding issues by sending received changes as raw data. v2.5.4 - Remove recursive commands from bin/install v2.5.3 - Improve git-annex integration v2.5.2 - Safer line sub for git-annex command v2.5.1 - Expect broadcast message to return empty JSON if no message now v2.5.0 - Support git-annex tool (disabled by default) - Add rubocop (Ruby static code analyzer) for development v2.4.3 - Print broadcast message if one is available v2.4.2 - Pass git changes list as string instead of array v2.4.1 - Access token masking in url before loging v2.4.0 - Show error message when git push is rejected v2.2.0 - Support for custom hooks (Drew Blessing and Jose Kahan) v2.1.0 - Use secret token with GitLab internal API. Requires GitLab 7.5 or higher v2.0.1 - Send post-receive changes to redis as a string instead of array v2.0.0 - Works with GitLab v7.3+ - Replace raise with abort when checking path to prevent path exposure - Handle invalid number of arguments on remote commands - Replace update hook with pre-receive and post-receive hooks. - Symlink the whole hooks directory - Ignore missing repositories in create-hooks - Connect to Redis via sockets by default v1.9.7 - Increased test coverage - By default use direct unicorn connection (localhost:8080) - Fix wrong repo path send to GitLab by GitlabUpdate hook v1.9.6 - Explicitly require 'timeout' from the standard library v1.9.5 - Put authorized_keys.lock in the same directory as authorized_keys - Use lock file when add new entries to authorized_keys v1.9.4 - Use lock file when modify authorized_keys v1.9.3 - Ignore force push detection for new branch or branch remove push v1.9.2 - Add support for force push detection v1.9.1 - Update hook sends branch and tag name v1.9.0 - Call api in update hook for both ssdh and http push. Requires GitLab 6.7+ - Pass oldrev and newrev to api.allowed? v1.8.5 - Add `gitlab-keys batch-add-keys` subcommand for authorized_keys rebuilds v1.8.4 - Dont do import if repository exists v1.8.3 - Add timeout option for repository import v1.8.2 - Fix broken 1.8.1 v1.8.1 - Restrict Environment Variables - Add bin/create-hooks command - More safe shell execution v1.8.0 - Fix return values in GitlabKeys v1.7.9 - Fix escape of repository path for custom ssh port v1.7.8 - Escape repository path to prevent relative links (CVE-2013-4583) v1.7.7 - Separate options from arguments with -- (CVE-2013-4582) - Bypass shell and use stdlib JSON for GitlabUpdate (CVE-2013-4581) v1.7.6 - Fix gitlab-projects update-head for improted repo when branch exists but not listed in refs/head v1.7.5 - Remove keys from authorized_keys using ruby instead of shell v1.7.4 - More protection against shell injection (CVE-2013-4546) v1.7.3 - Use Kernel#open to append lines to authorized_keys (CVE-2013-4490) v1.7.2 - More safe command execution v1.7.1 - Fixed issue when developers are able to push to protected branches that contain a '/' in the branch name. v1.7.0 - Clean authorized_keys file with `gitlab-keys clear` v1.6.0 - Create branch/tag functionality - Remove branch/tag functionality v1.5.0 - Logger - Ability to specify ca_file/ca_path - Update-head command for project - Better regexp for key_id inside shell v1.4.0 - Regex used in rm-key command was too lax v1.3.0 - Fork-project command - Custom redis configuration - Interpret login with deploy key as anonymous one v1.2.0 - Return non-zero result if gitlab-projects and gitlab-keys execution was not successful - http_settings configuration option added v1.1.0 - added mv-project feature - increased test coverage v1.0.4 - requires gitlab c9ca15e - don't use post-receive file any more. Make all updates in update - fixed issue with invalid GL_USER - use GL_ID instead of GL_USER gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/CONTRIBUTING.md000066400000000000000000000050361324746544700242540ustar00rootroot00000000000000## Contributing Thank you for your interest in contributing to this GitLab project! We welcome all contributions. By participating in this project, you agree to abide by the [code of conduct](#code-of-conduct). ## Developer Certificate of Origin + License By contributing to GitLab B.V., You accept and agree to the following terms and conditions for Your present and future Contributions submitted to GitLab B.V. Except for the license granted herein to GitLab B.V. and recipients of software distributed by GitLab B.V., You reserve all right, title, and interest in and to Your Contributions. All Contributions are subject to the following DCO + License terms. [DCO + License](https://gitlab.com/gitlab-org/dco/blob/master/README.md) _This notice should stay as the first item in the CONTRIBUTING.md file._ ## Code of conduct As contributors and maintainers of this project, we pledge to respect all people who contribute through reporting issues, posting feature requests, updating documentation, submitting pull requests or patches, and other activities. We are committed to making participation in this project a harassment-free experience for everyone, regardless of level of experience, gender, gender identity and expression, sexual orientation, disability, personal appearance, body size, race, ethnicity, age, or religion. Examples of unacceptable behavior by participants include the use of sexual language or imagery, derogatory comments or personal attacks, trolling, public or private harassment, insults, or other unprofessional conduct. Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct. Project maintainers who do not follow the Code of Conduct may be removed from the project team. This code of conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Instances of abusive, harassing, or otherwise unacceptable behavior can be reported by emailing contact@gitlab.com. This Code of Conduct is adapted from the [Contributor Covenant][contributor-covenant], version 1.1.0, available at [http://contributor-covenant.org/version/1/1/0/](http://contributor-covenant.org/version/1/1/0/). [contributor-covenant]: http://contributor-covenant.org [individual-agreement]: https://docs.gitlab.com/ee/legal/individual_contributor_license_agreement.html [corporate-agreement]: https://docs.gitlab.com/ee/legal/corporate_contributor_license_agreement.html gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/Gemfile000066400000000000000000000003441324746544700233130ustar00rootroot00000000000000source "http://rubygems.org" group :development, :test do gem 'guard' gem 'guard-rspec' gem 'rspec', '~> 2.14.0' gem 'rubocop', '0.49.1', require: false gem 'simplecov', require: false gem 'vcr' gem 'webmock' end gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/Gemfile.lock000066400000000000000000000030451324746544700242430ustar00rootroot00000000000000GEM remote: http://rubygems.org/ specs: addressable (2.3.2) ast (2.4.0) coderay (1.0.8) crack (0.3.2) diff-lcs (1.2.5) docile (1.1.5) guard (1.5.4) listen (>= 0.4.2) lumberjack (>= 1.0.2) pry (>= 0.9.10) thor (>= 0.14.6) guard-rspec (2.1.2) guard (>= 1.1) rspec (~> 2.11) listen (0.5.3) lumberjack (1.0.2) method_source (0.8.1) multi_json (1.10.1) parallel (1.12.1) parser (2.5.0.2) ast (~> 2.4.0) powerpack (0.1.1) pry (0.9.10) coderay (~> 1.0.5) method_source (~> 0.8) slop (~> 3.3.1) rainbow (2.2.2) rake rake (12.3.0) rspec (2.14.1) rspec-core (~> 2.14.0) rspec-expectations (~> 2.14.0) rspec-mocks (~> 2.14.0) rspec-core (2.14.8) rspec-expectations (2.14.5) diff-lcs (>= 1.1.3, < 2.0) rspec-mocks (2.14.6) rubocop (0.49.1) parallel (~> 1.10) parser (>= 2.3.3.1, < 3.0) powerpack (~> 0.1) rainbow (>= 1.99.1, < 3.0) ruby-progressbar (~> 1.7) unicode-display_width (~> 1.0, >= 1.0.1) ruby-progressbar (1.9.0) simplecov (0.9.1) docile (~> 1.1.0) multi_json (~> 1.0) simplecov-html (~> 0.8.0) simplecov-html (0.8.0) slop (3.3.3) thor (0.19.1) unicode-display_width (1.3.0) vcr (2.4.0) webmock (1.9.0) addressable (>= 2.2.7) crack (>= 0.1.7) PLATFORMS ruby DEPENDENCIES guard guard-rspec rspec (~> 2.14.0) rubocop (= 0.49.1) simplecov vcr webmock BUNDLED WITH 1.16.1 gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/Guardfile000066400000000000000000000021761324746544700236520ustar00rootroot00000000000000# A sample Guardfile # More info at https://github.com/guard/guard#readme guard 'rspec' do watch(%r{^spec/.+_spec\.rb$}) watch(%r{^lib/(.+)\.rb$}) { |m| "spec/lib/#{m[1]}_spec.rb" } watch('spec/spec_helper.rb') { "spec" } # Rails example watch(%r{^app/(.+)\.rb$}) { |m| "spec/#{m[1]}_spec.rb" } watch(%r{^app/(.*)(\.erb|\.haml)$}) { |m| "spec/#{m[1]}#{m[2]}_spec.rb" } watch(%r{^app/controllers/(.+)_(controller)\.rb$}) { |m| ["spec/routing/#{m[1]}_routing_spec.rb", "spec/#{m[2]}s/#{m[1]}_#{m[2]}_spec.rb", "spec/acceptance/#{m[1]}_spec.rb"] } watch(%r{^spec/support/(.+)\.rb$}) { "spec" } watch('config/routes.rb') { "spec/routing" } watch('app/controllers/application_controller.rb') { "spec/controllers" } # Capybara features specs watch(%r{^app/views/(.+)/.*\.(erb|haml)$}) { |m| "spec/features/#{m[1]}_spec.rb" } # Turnip features and steps watch(%r{^spec/acceptance/(.+)\.feature$}) watch(%r{^spec/acceptance/steps/(.+)_steps\.rb$}) { |m| Dir[File.join("**/#{m[1]}.feature")][0] || 'spec/acceptance' } end gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/LICENSE000066400000000000000000000023741324746544700230320ustar00rootroot00000000000000Copyright (c) 2011-2017 GitLab B.V. With regard to the GitLab Software: Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. For all third party components incorporated into the GitLab Software, those components are licensed under the original license provided by the owner of the applicable component. gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/README.md000066400000000000000000000132311324746544700232760ustar00rootroot00000000000000# GitLab Shell ## GitLab Shell handles git commands for GitLab GitLab Shell handles git commands for GitLab and modifies the list of authorized keys. GitLab Shell is not a Unix shell nor a replacement for Bash or Zsh. When you access the GitLab server over ssh then GitLab Shell will: 1. Limits you to predefined git commands (git push, git pull). 1. Call the GitLab Rails API to check if you are authorized 1. It will execute the pre-receive hooks (called Git Hooks in GitLab Enterprise Edition) 1. It will execute the action you requested 1. Process the GitLab post-receive actions 1. Process any custom post-receive actions If you access a GitLab server over http(s) what happens depends on if you pull from or push to the git repository. If you pull from git repositories over http(s) the GitLab Rails app will completely handle the authentication and execution. If you push to git repositories over http(s) the GitLab Rails app will not handle any authentication or execution but it will delegate the following to GitLab Shell: 1. Call the GitLab Rails API to check if you are authorized 1. It will execute the pre-receive hooks (called Git Hooks in GitLab Enterprise Edition) 1. It will excute the action you requested 1. Process the GitLab post-receive actions 1. Process any custom post-receive actions Maybe you wonder why in the case of git push over http(s) the Rails app doesn't handle authentication before delegating to GitLab Shell. This is because GitLab Rails doesn't have the logic to interpret git push commands. The idea is to have these interpretation code in only one place and this is GitLab Shell so we can reuse it for ssh access. Actually GitLab Shell executes all git push commands without checking authorizations and relies on the pre-receive hooks to check authorizations. When you do a git pull command the authorizations are checked before executing the commands (either in GitLab Rails or GitLab Shell with an API call to GitLab Rails). The authorization checks for git pull are much simpler since you only have to check if a user can access the repo (no need to check branch permissions). An overview of the four cases described above: 1. git pull over ssh -> gitlab-shell -> API call to gitlab-rails (Authorization) -> accept or decline -> execute git command 1. git pull over http -> gitlab-rails (Authorization) -> accept or decline -> execute git command 1. git push over ssh -> gitlab-shell (git command is not executed yet) -> execute git command -> gitlab-shell pre-receive hook -> API call to gitlab-rails (authorization) -> accept or decline push 1. git push over http -> gitlab-rails (git command is not executed yet) -> execute git command -> gitlab-shell pre-receive hook -> API call to gitlab-rails (authorization) -> accept or decline push ## Code status [![pipeline status](https://gitlab.com/gitlab-org/gitlab-shell/badges/master/pipeline.svg)](https://gitlab.com/gitlab-org/gitlab-shell/commits/master) [![coverage report](https://gitlab.com/gitlab-org/gitlab-shell/badges/master/coverage.svg)](https://gitlab.com/gitlab-org/gitlab-shell/commits/master) [![Code Climate](https://codeclimate.com/github/gitlabhq/gitlab-shell.svg)](https://codeclimate.com/github/gitlabhq/gitlab-shell) ## Requirements **GitLab shell will always use your system ruby (normally located at /usr/bin/ruby) and will not use the ruby your installed with a ruby version manager (such as RVM).** It requires ruby 2.0 or higher. Please uninstall any old ruby versions from your system: ``` sudo apt-get remove ruby1.8 ``` Download Ruby and compile it with: ``` mkdir /tmp/ruby && cd /tmp/ruby curl -L --progress http://cache.ruby-lang.org/pub/ruby/2.1/ruby-2.1.5.tar.gz | tar xz cd ruby-2.1.5 ./configure --disable-install-rdoc make sudo make install ``` To install gitlab-shell you also need a Go compiler version 1.8 or newer. https://golang.org/dl/ ## Setup ./bin/install ./bin/compile ## Check ./bin/check ## Keys Add key: ./bin/gitlab-keys add-key key-782 "ssh-rsa AAAAx321..." Remove key: ./bin/gitlab-keys rm-key key-23 "ssh-rsa AAAAx321..." List all keys: ./bin/gitlab-keys list-keys Remove all keys from authorized_keys file: ./bin/gitlab-keys clear ## Git LFS remark Starting with GitLab 8.12, GitLab supports Git LFS authentication through ssh. ## Releasing a new version GitLab Shell is versioned by git tags, and the version used by the Rails application is stored in [`GITLAB_SHELL_VERSION`](https://gitlab.com/gitlab-org/gitlab-ce/blob/master/GITLAB_SHELL_VERSION). For each version, there is a raw version and a tag version: - The **raw version** is the version number. For instance, `15.2.8`. - The **tag version** is the raw version prefixed with `v`. For instance, `v15.2.8`. To release a new version of GitLab Shell and have that version available to the Rails application: 1. Update the [`CHANGELOG`](CHANGELOG) with the **tag version** and the [`VERSION`](VERSION) file with the **raw version**. 2. Add a new git tag with the **tag version**. 3. Update `GITLAB_SHELL_VERSION` in the Rails application to the **raw version**. (Note: this can be done as a separate MR to that, or in and MR that will make use of the latest GitLab Shell changes.) ## Updating VCR fixtures In order to generate new VCR fixtures you need to have a local GitLab instance running and have next data: 1. gitlab-org/gitlab-test project. 2. SSH key with access to the project and ID 1 that belongs to Administrator. 3. SSH key without access to the project and ID 2. You also need to modify `secret` variable at `spec/gitlab_net_spec.rb` so tests can connect to your local instance. ## Contributing See [CONTRIBUTING.md](./CONTRIBUTING.md). ## License See [LICENSE](./LICENSE). gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/VERSION000066400000000000000000000000061324746544700230630ustar00rootroot000000000000006.0.4 gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/bin/000077500000000000000000000000001324746544700225675ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/bin/authorized_keys000077500000000000000000000013121324746544700257230ustar00rootroot00000000000000#!/usr/bin/env ruby # # GitLab shell authorized_keys. Query GitLab API to get the authorized command for a given ssh key fingerprint # # Ex. # /bin/authorized_keys BASE64-KEY # # Returns # command="/bin/gitlab-shell key-#",no-port-forwarding,no-X11-forwarding,no-agent-forwarding,no-pty ssh-rsa AAAAB3NzaC1yc2EAAAADAQA... # key = ARGV[0] abort "# No key provided" if key.nil? || key.empty? require_relative "../lib/gitlab_init" require_relative "../lib/gitlab_net" require_relative "../lib/gitlab_keys" authorized_key = GitlabNet.new.authorized_key(key) if authorized_key.nil? puts "# No key was found for #{key}" else puts GitlabKeys.key_line("key-#{authorized_key['id']}", authorized_key["key"]) end gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/bin/check000077500000000000000000000014321324746544700235720ustar00rootroot00000000000000#!/usr/bin/env ruby require_relative '../lib/gitlab_init' require_relative '../lib/gitlab_net' # # GitLab shell check task # print "Check GitLab API access: " begin resp = GitlabNet.new.check if resp.code != "200" abort "FAILED. code: #{resp.code}" end puts 'OK' check_values = JSON.parse(resp.body) print 'Redis available via internal API: ' if check_values['redis'] puts 'OK' else abort 'FAILED' end rescue GitlabNet::ApiUnreachableError abort "FAILED: Failed to connect to internal API" end config = GitlabConfig.new abort("ERROR: missing option in config.yml") unless config.auth_file print "\nAccess to #{config.auth_file}: " if system(File.dirname(__FILE__) + '/gitlab-keys', 'check-permissions') print 'OK' else abort "FAILED" end puts "\n" gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/bin/compile000077500000000000000000000005301324746544700241430ustar00rootroot00000000000000#!/usr/bin/env ruby require 'fileutils' require_relative '../support/go_build' include GoBuild def main create_fresh_build_dir run!(GO_ENV, %W[go install #{GO_PACKAGE}/cmd/...]) executables = Dir[File.join(BUILD_DIR, 'bin', '*')] FileUtils.chmod(0755, executables) FileUtils.cp(executables, File.join(ROOT_PATH, 'bin')) end main gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/bin/create-hooks000077500000000000000000000025341324746544700251050ustar00rootroot00000000000000#!/usr/bin/env ruby # Recreate GitLab hooks in the Git repositories managed by GitLab. # # This script is used when restoring a GitLab backup. require_relative '../lib/gitlab_init' require File.join(ROOT_PATH, 'lib', 'gitlab_metrics') def create_hooks(path) global_hooks_directory = File.join(ROOT_PATH, 'hooks') local_hooks_directory = File.join(path, 'hooks') real_local_hooks_directory = :not_found begin real_local_hooks_directory = File.realpath(local_hooks_directory) rescue Errno::ENOENT # real_local_hooks_directory == :not_found end if real_local_hooks_directory != File.realpath(global_hooks_directory) if File.exist?(local_hooks_directory) $logger.info "Moving existing hooks directory and symlinking global hooks directory for #{path}." FileUtils.mv(local_hooks_directory, "#{local_hooks_directory}.old.#{Time.now.to_i}") end FileUtils.ln_sf(global_hooks_directory, local_hooks_directory) else $logger.info "Hooks already exist for #{path}." true end end repository_storage_paths = ARGV repository_storage_paths.each do |repo_path| Dir["#{repo_path.chomp('/')}/**/*.git"].each do |repo| begin GitlabMetrics.measure('command-create-hooks') do create_hooks(repo) end rescue Errno::ENOENT # The user must have deleted their repository. Ignore. end end end gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/bin/gitlab-keys000077500000000000000000000010611324746544700247260ustar00rootroot00000000000000#!/usr/bin/env ruby require_relative '../lib/gitlab_init' # # GitLab Keys shell. Add/remove keys from ~/.ssh/authorized_keys # # Ex. # /bin/gitlab-keys add-key key-782 "ssh-rsa AAAAx321..." # # printf "key-782\tssh-rsa AAAAx321...\n" | /bin/gitlab-keys batch-add-keys # # /bin/gitlab-keys rm-key key-23 "ssh-rsa AAAAx321..." # # /bin/gitlab-keys list-keys # # /bin/gitlab-keys clear # require File.join(ROOT_PATH, 'lib', 'gitlab_keys') # Return non-zero if command execution was not successful if GitlabKeys.new.exec exit 0 else exit 1 end gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/bin/gitlab-shell000077500000000000000000000006301324746544700250630ustar00rootroot00000000000000#!/usr/bin/env ruby unless ENV['SSH_CONNECTION'] puts "Only ssh allowed" exit end key_id = /key-[0-9]+/.match(ARGV.join).to_s original_cmd = ENV.delete('SSH_ORIGINAL_COMMAND') require_relative '../lib/gitlab_init' # # # GitLab shell, invoked from ~/.ssh/authorized_keys # # require File.join(ROOT_PATH, 'lib', 'gitlab_shell') if GitlabShell.new(key_id).exec(original_cmd) exit 0 else exit 1 end gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/bin/gitlab-shell-authorized-keys-check000077500000000000000000000027431324746544700312720ustar00rootroot00000000000000#!/usr/bin/env ruby # # GitLab shell authorized_keys helper. Query GitLab API to get the authorized # command for a given ssh key fingerprint # # Ex. # bin/gitlab-shell-authorized-keys-check # # Returns # command="/bin/gitlab-shell key-#",no-port-forwarding,no-X11-forwarding,no-agent-forwarding,no-pty ssh-rsa AAAA... # # Expects to be called by the SSH daemon, via configuration like: # AuthorizedKeysCommandUser git # AuthorizedKeysCommand /bin/gitlab-shell-authorized-keys-check git %u %k abort "# Wrong number of arguments. #{ARGV.size}. Usage: # gitlab-shell-authorized-keys-check " unless ARGV.size == 3 expected_username = ARGV[0] abort '# No username provided' if expected_username.nil? || expected_username == '' actual_username = ARGV[1] abort '# No username provided' if actual_username.nil? || actual_username == '' # Only check access if the requested username matches the configured username. # Normally, these would both be 'git', but it can be configured by the user exit 0 unless expected_username == actual_username key = ARGV[2] abort "# No key provided" if key.nil? || key == '' require_relative '../lib/gitlab_init' require_relative '../lib/gitlab_net' require_relative '../lib/gitlab_keys' authorized_key = GitlabNet.new.authorized_key(key) if authorized_key.nil? puts "# No key was found for #{key}" else puts GitlabKeys.key_line("key-#{authorized_key['id']}", authorized_key['key']) end gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/bin/install000077500000000000000000000011741324746544700241660ustar00rootroot00000000000000#!/usr/bin/env ruby require_relative '../lib/gitlab_init' # # GitLab shell, invoked from ~/.ssh/authorized_keys # config = GitlabConfig.new key_dir = File.dirname("#{config.auth_file}") repository_storage_paths = ARGV commands = [ %W(mkdir -p #{key_dir}), %W(chmod 700 #{key_dir}), ] repository_storage_paths.each do |repository_storage_path| commands << %W(mkdir -p #{repository_storage_path}) commands << %W(chmod ug+rwX,o-rwx #{repository_storage_path}) end commands.each do |cmd| print "#{cmd.join(' ')}: " if system(*cmd) puts 'OK' else puts 'Failed' abort "#{$PROGRAM_NAME} failed" end end exit gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/config.yml.example000066400000000000000000000041571324746544700254500ustar00rootroot00000000000000# # If you change this file in a Merge Request, please also create # a Merge Request on https://gitlab.com/gitlab-org/omnibus-gitlab/merge_requests # # GitLab user. git by default user: git # URL to GitLab instance, used for API calls. Default: http://localhost:8080. # For relative URL support read http://doc.gitlab.com/ce/install/relative_url.html # You only have to change the default if you have configured Unicorn # to listen on a custom port, or if you have configured Unicorn to # only listen on a Unix domain socket. For Unix domain sockets use # "http+unix://", e.g. # "http+unix://%2Fpath%2Fto%2Fsocket" gitlab_url: "http://localhost:8080" # See installation.md#using-https for additional HTTPS configuration details. http_settings: # read_timeout: 300 # user: someone # password: somepass # ca_file: /etc/ssl/cert.pem # ca_path: /etc/pki/tls/certs self_signed_cert: false # File used as authorized_keys for gitlab user auth_file: "/home/git/.ssh/authorized_keys" # File that contains the secret key for verifying access to GitLab. # Default is .gitlab_shell_secret in the gitlab-shell directory. # secret_file: "/home/git/gitlab-shell/.gitlab_shell_secret" # Parent directory for global custom hook directories (pre-receive.d, update.d, post-receive.d) # Default is hooks in the gitlab-shell directory. # custom_hooks_dir: "/home/git/gitlab-shell/hooks" # Log file. # Default is gitlab-shell.log in the root directory. # log_file: "/home/git/gitlab-shell/gitlab-shell.log" # Log level. INFO by default log_level: INFO # Audit usernames. # Set to true to see real usernames in the logs instead of key ids, which is easier to follow, but # incurs an extra API call on every gitlab-shell command. audit_usernames: false # Git trace log file. # If set, git commands receive GIT_TRACE* environment variables # See https://git-scm.com/book/es/v2/Git-Internals-Environment-Variables#Debugging for documentation # An absolute path starting with / – the trace output will be appended to that file. # It needs to exist so we can check permissions and avoid to throwing warnings to the users. git_trace_log_file: gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/000077500000000000000000000000001324746544700224245ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/README.md000066400000000000000000000004501324746544700237020ustar00rootroot00000000000000# Go executables for gitlab-shell This directory contains Go executables for use in gitlab-shell. To add a new command `foobar` create a subdirectory `cmd/foobar` and put your code in `package main` under `cmd/foobar`. This will automatically get compiled into `bin/foobar` by `../bin/compile`. gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/cmd/000077500000000000000000000000001324746544700231675ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/cmd/gitaly-receive-pack/000077500000000000000000000000001324746544700270145ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/cmd/gitaly-receive-pack/main.go000066400000000000000000000014641324746544700302740ustar00rootroot00000000000000package main import ( "encoding/json" "fmt" "os" "gitlab.com/gitlab-org/gitlab-shell/go/internal/handler" "gitlab.com/gitlab-org/gitlab-shell/go/internal/logger" pb "gitlab.com/gitlab-org/gitaly-proto/go" ) func init() { logger.ProgName = "gitaly-receive-pack" } func main() { if err := handler.Prepare(); err != nil { logger.Fatal("preparation failed", err) } if n := len(os.Args); n != 3 { logger.Fatal("wrong number of arguments", fmt.Errorf("expected 2 arguments, got %v", os.Args)) } var request pb.SSHReceivePackRequest if err := json.Unmarshal([]byte(os.Args[2]), &request); err != nil { logger.Fatal("unmarshaling request json failed", err) } code, err := handler.ReceivePack(os.Args[1], &request) if err != nil { logger.Fatal("receive-pack failed", err) } os.Exit(int(code)) } gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/cmd/gitaly-upload-pack/000077500000000000000000000000001324746544700266565ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/cmd/gitaly-upload-pack/main.go000066400000000000000000000014601324746544700301320ustar00rootroot00000000000000package main import ( "encoding/json" "fmt" "os" "gitlab.com/gitlab-org/gitlab-shell/go/internal/handler" "gitlab.com/gitlab-org/gitlab-shell/go/internal/logger" pb "gitlab.com/gitlab-org/gitaly-proto/go" ) func init() { logger.ProgName = "gitaly-upload-pack" } func main() { if err := handler.Prepare(); err != nil { logger.Fatal("preparation failed", err) } if n := len(os.Args); n != 3 { logger.Fatal("wrong number of arguments", fmt.Errorf("expected 2 arguments, got %v", os.Args)) } var request pb.SSHUploadPackRequest if err := json.Unmarshal([]byte(os.Args[2]), &request); err != nil { logger.Fatal("unmarshaling request json failed", err) } code, err := handler.UploadPack(os.Args[1], &request) if err != nil { logger.Fatal("upload-pack failed", err) } os.Exit(int(code)) } gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/internal/000077500000000000000000000000001324746544700242405ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/internal/config/000077500000000000000000000000001324746544700255055ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/internal/config/config.go000066400000000000000000000016751324746544700273120ustar00rootroot00000000000000package config import ( "io/ioutil" "os" "path" "gopkg.in/yaml.v2" ) const ( configFile = "config.yml" logFile = "gitlab-shell.log" ) type Config struct { RootDir string LogFile string `yaml:"log_file"` } func New() (*Config, error) { cfg := Config{} dir, err := os.Getwd() if err != nil { return nil, err } cfg.RootDir = dir configBytes, err := ioutil.ReadFile(path.Join(cfg.RootDir, configFile)) if err != nil { return nil, err } if err := parseConfig(configBytes, &cfg); err != nil { return nil, err } return &cfg, nil } // parseConfig expects YAML data in configBytes and a Config instance with RootDir set. func parseConfig(configBytes []byte, cfg *Config) error { if err := yaml.Unmarshal(configBytes, cfg); err != nil { return err } if cfg.LogFile == "" { cfg.LogFile = logFile } if len(cfg.LogFile) > 0 && cfg.LogFile[0] != '/' { cfg.LogFile = path.Join(cfg.RootDir, cfg.LogFile) } return nil } gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/internal/config/config_test.go000066400000000000000000000011371324746544700303420ustar00rootroot00000000000000package config import ( "testing" ) func TestConfigLogFile(t *testing.T) { testRoot := "/foo/bar" testCases := []struct { yaml string path string }{ {path: "/foo/bar/gitlab-shell.log"}, {yaml: "log_file: my-log.log", path: "/foo/bar/my-log.log"}, {yaml: "log_file: /qux/my-log.log", path: "/qux/my-log.log"}, } for _, tc := range testCases { cfg := Config{RootDir: testRoot} if err := parseConfig([]byte(tc.yaml), &cfg); err != nil { t.Fatalf("%q: %v", tc.yaml, err) } if cfg.LogFile != tc.path { t.Fatalf("%q: expected %q, got %q", tc.yaml, tc.path, cfg.LogFile) } } } gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/internal/handler/000077500000000000000000000000001324746544700256555ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/internal/handler/handler.go000066400000000000000000000020201324746544700276130ustar00rootroot00000000000000package handler import ( "os" "os/exec" "syscall" "google.golang.org/grpc" "gitlab.com/gitlab-org/gitaly/auth" "gitlab.com/gitlab-org/gitaly/client" "gitlab.com/gitlab-org/gitlab-shell/go/internal/config" "gitlab.com/gitlab-org/gitlab-shell/go/internal/logger" ) func Prepare() error { cfg, err := config.New() if err != nil { return err } if err := logger.Configure(cfg); err != nil { return err } // Use a working directory that won't get removed or unmounted. if err := os.Chdir("/"); err != nil { return err } return nil } func execCommand(command string, args ...string) error { binPath, err := exec.LookPath(command) if err != nil { return err } args = append([]string{binPath}, args...) return syscall.Exec(binPath, args, os.Environ()) } func dialOpts() []grpc.DialOption { connOpts := client.DefaultDialOpts if token := os.Getenv("GITALY_TOKEN"); token != "" { connOpts = append(client.DefaultDialOpts, grpc.WithPerRPCCredentials(gitalyauth.RPCCredentials(token))) } return connOpts } gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/internal/handler/receive_pack.go000066400000000000000000000010741324746544700306260ustar00rootroot00000000000000package handler import ( "context" "fmt" "os" pb "gitlab.com/gitlab-org/gitaly-proto/go" "gitlab.com/gitlab-org/gitaly/client" ) func ReceivePack(gitalyAddress string, request *pb.SSHReceivePackRequest) (int32, error) { if gitalyAddress == "" { return 0, fmt.Errorf("no gitaly_address given") } conn, err := client.Dial(gitalyAddress, dialOpts()) if err != nil { return 0, err } defer conn.Close() ctx, cancel := context.WithCancel(context.Background()) defer cancel() return client.ReceivePack(ctx, conn, os.Stdin, os.Stdout, os.Stderr, request) } gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/internal/handler/upload_pack.go000066400000000000000000000010711324746544700304650ustar00rootroot00000000000000package handler import ( "context" "fmt" "os" pb "gitlab.com/gitlab-org/gitaly-proto/go" "gitlab.com/gitlab-org/gitaly/client" ) func UploadPack(gitalyAddress string, request *pb.SSHUploadPackRequest) (int32, error) { if gitalyAddress == "" { return 0, fmt.Errorf("no gitaly_address given") } conn, err := client.Dial(gitalyAddress, dialOpts()) if err != nil { return 0, err } defer conn.Close() ctx, cancel := context.WithCancel(context.Background()) defer cancel() return client.UploadPack(ctx, conn, os.Stdin, os.Stdout, os.Stderr, request) } gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/internal/logger/000077500000000000000000000000001324746544700255175ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/internal/logger/logger.go000066400000000000000000000031501324746544700273240ustar00rootroot00000000000000package logger import ( "fmt" "io" "log" "log/syslog" "os" "sync" "time" "gitlab.com/gitlab-org/gitlab-shell/go/internal/config" ) var ( logWriter io.Writer bootstrapLogger *log.Logger pid int mutex sync.Mutex ProgName string ) func Configure(cfg *config.Config) error { mutex.Lock() defer mutex.Unlock() pid = os.Getpid() var err error logWriter, err = os.OpenFile(cfg.LogFile, os.O_WRONLY|os.O_APPEND, 0) return err } func logPrint(msg string, err error) { mutex.Lock() defer mutex.Unlock() if logWriter == nil { bootstrapLogPrint(msg, err) return } // Emulate the existing log format of gitlab-shell t := time.Now().Format("2006-01-02T15:04:05.999999") prefix := fmt.Sprintf("E, [%s #%d] ERROR -- : %s:", t, pid, ProgName) fmt.Fprintf(logWriter, "%s %s: %v\n", prefix, msg, err) } func Fatal(msg string, err error) { logPrint(msg, err) // We don't show the error to the end user because it can leak // information that is private to the GitLab server. fmt.Fprintf(os.Stderr, "%s: fatal: %s\n", ProgName, msg) os.Exit(1) } // If our log file is not available we want to log somewhere else, but // not to standard error because that leaks information to the user. This // function attemps to log to syslog. // // We assume the logging mutex is already locked. func bootstrapLogPrint(msg string, err error) { if bootstrapLogger == nil { var err error bootstrapLogger, err = syslog.NewLogger(syslog.LOG_ERR|syslog.LOG_USER, 0) if err != nil { // The message will not be logged. return } } bootstrapLogger.Print(ProgName+":", msg+":", err) } gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/000077500000000000000000000000001324746544700237215ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/github.com/000077500000000000000000000000001324746544700257605ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/github.com/golang/000077500000000000000000000000001324746544700272275ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/github.com/golang/protobuf/000077500000000000000000000000001324746544700310675ustar00rootroot00000000000000LICENSE000066400000000000000000000030571324746544700320220ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/github.com/golang/protobufGo support for Protocol Buffers - Google's data interchange format Copyright 2010 The Go Authors. All rights reserved. https://github.com/golang/protobuf Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Google Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. proto/000077500000000000000000000000001324746544700321535ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/github.com/golang/protobufMakefile000066400000000000000000000036161324746544700336210ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/github.com/golang/protobuf/proto# Go support for Protocol Buffers - Google's data interchange format # # Copyright 2010 The Go Authors. All rights reserved. # https://github.com/golang/protobuf # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. install: go install test: install generate-test-pbs go test generate-test-pbs: make install make -C testdata protoc --go_out=Mtestdata/test.proto=github.com/golang/protobuf/proto/testdata,Mgoogle/protobuf/any.proto=github.com/golang/protobuf/ptypes/any:. proto3_proto/proto3.proto make clone.go000066400000000000000000000154711324746544700336120ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/github.com/golang/protobuf/proto// Go support for Protocol Buffers - Google's data interchange format // // Copyright 2011 The Go Authors. All rights reserved. // https://github.com/golang/protobuf // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // Protocol buffer deep copy and merge. // TODO: RawMessage. package proto import ( "log" "reflect" "strings" ) // Clone returns a deep copy of a protocol buffer. func Clone(pb Message) Message { in := reflect.ValueOf(pb) if in.IsNil() { return pb } out := reflect.New(in.Type().Elem()) // out is empty so a merge is a deep copy. mergeStruct(out.Elem(), in.Elem()) return out.Interface().(Message) } // Merge merges src into dst. // Required and optional fields that are set in src will be set to that value in dst. // Elements of repeated fields will be appended. // Merge panics if src and dst are not the same type, or if dst is nil. func Merge(dst, src Message) { in := reflect.ValueOf(src) out := reflect.ValueOf(dst) if out.IsNil() { panic("proto: nil destination") } if in.Type() != out.Type() { // Explicit test prior to mergeStruct so that mistyped nils will fail panic("proto: type mismatch") } if in.IsNil() { // Merging nil into non-nil is a quiet no-op return } mergeStruct(out.Elem(), in.Elem()) } func mergeStruct(out, in reflect.Value) { sprop := GetProperties(in.Type()) for i := 0; i < in.NumField(); i++ { f := in.Type().Field(i) if strings.HasPrefix(f.Name, "XXX_") { continue } mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i]) } if emIn, ok := extendable(in.Addr().Interface()); ok { emOut, _ := extendable(out.Addr().Interface()) mIn, muIn := emIn.extensionsRead() if mIn != nil { mOut := emOut.extensionsWrite() muIn.Lock() mergeExtension(mOut, mIn) muIn.Unlock() } } uf := in.FieldByName("XXX_unrecognized") if !uf.IsValid() { return } uin := uf.Bytes() if len(uin) > 0 { out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...)) } } // mergeAny performs a merge between two values of the same type. // viaPtr indicates whether the values were indirected through a pointer (implying proto2). // prop is set if this is a struct field (it may be nil). func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) { if in.Type() == protoMessageType { if !in.IsNil() { if out.IsNil() { out.Set(reflect.ValueOf(Clone(in.Interface().(Message)))) } else { Merge(out.Interface().(Message), in.Interface().(Message)) } } return } switch in.Kind() { case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, reflect.String, reflect.Uint32, reflect.Uint64: if !viaPtr && isProto3Zero(in) { return } out.Set(in) case reflect.Interface: // Probably a oneof field; copy non-nil values. if in.IsNil() { return } // Allocate destination if it is not set, or set to a different type. // Otherwise we will merge as normal. if out.IsNil() || out.Elem().Type() != in.Elem().Type() { out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T) } mergeAny(out.Elem(), in.Elem(), false, nil) case reflect.Map: if in.Len() == 0 { return } if out.IsNil() { out.Set(reflect.MakeMap(in.Type())) } // For maps with value types of *T or []byte we need to deep copy each value. elemKind := in.Type().Elem().Kind() for _, key := range in.MapKeys() { var val reflect.Value switch elemKind { case reflect.Ptr: val = reflect.New(in.Type().Elem().Elem()) mergeAny(val, in.MapIndex(key), false, nil) case reflect.Slice: val = in.MapIndex(key) val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) default: val = in.MapIndex(key) } out.SetMapIndex(key, val) } case reflect.Ptr: if in.IsNil() { return } if out.IsNil() { out.Set(reflect.New(in.Elem().Type())) } mergeAny(out.Elem(), in.Elem(), true, nil) case reflect.Slice: if in.IsNil() { return } if in.Type().Elem().Kind() == reflect.Uint8 { // []byte is a scalar bytes field, not a repeated field. // Edge case: if this is in a proto3 message, a zero length // bytes field is considered the zero value, and should not // be merged. if prop != nil && prop.proto3 && in.Len() == 0 { return } // Make a deep copy. // Append to []byte{} instead of []byte(nil) so that we never end up // with a nil result. out.SetBytes(append([]byte{}, in.Bytes()...)) return } n := in.Len() if out.IsNil() { out.Set(reflect.MakeSlice(in.Type(), 0, n)) } switch in.Type().Elem().Kind() { case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, reflect.String, reflect.Uint32, reflect.Uint64: out.Set(reflect.AppendSlice(out, in)) default: for i := 0; i < n; i++ { x := reflect.Indirect(reflect.New(in.Type().Elem())) mergeAny(x, in.Index(i), false, nil) out.Set(reflect.Append(out, x)) } } case reflect.Struct: mergeStruct(out, in) default: // unknown type, so not a protocol buffer log.Printf("proto: don't know how to copy %v", in) } } func mergeExtension(out, in map[int32]Extension) { for extNum, eIn := range in { eOut := Extension{desc: eIn.desc} if eIn.value != nil { v := reflect.New(reflect.TypeOf(eIn.value)).Elem() mergeAny(v, reflect.ValueOf(eIn.value), false, nil) eOut.value = v.Interface() } if eIn.enc != nil { eOut.enc = make([]byte, len(eIn.enc)) copy(eOut.enc, eIn.enc) } out[extNum] = eOut } } decode.go000066400000000000000000000575531324746544700337440ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/github.com/golang/protobuf/proto// Go support for Protocol Buffers - Google's data interchange format // // Copyright 2010 The Go Authors. All rights reserved. // https://github.com/golang/protobuf // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. package proto /* * Routines for decoding protocol buffer data to construct in-memory representations. */ import ( "errors" "fmt" "io" "os" "reflect" ) // errOverflow is returned when an integer is too large to be represented. var errOverflow = errors.New("proto: integer overflow") // ErrInternalBadWireType is returned by generated code when an incorrect // wire type is encountered. It does not get returned to user code. var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof") // The fundamental decoders that interpret bytes on the wire. // Those that take integer types all return uint64 and are // therefore of type valueDecoder. // DecodeVarint reads a varint-encoded integer from the slice. // It returns the integer and the number of bytes consumed, or // zero if there is not enough. // This is the format for the // int32, int64, uint32, uint64, bool, and enum // protocol buffer types. func DecodeVarint(buf []byte) (x uint64, n int) { for shift := uint(0); shift < 64; shift += 7 { if n >= len(buf) { return 0, 0 } b := uint64(buf[n]) n++ x |= (b & 0x7F) << shift if (b & 0x80) == 0 { return x, n } } // The number is too large to represent in a 64-bit value. return 0, 0 } func (p *Buffer) decodeVarintSlow() (x uint64, err error) { i := p.index l := len(p.buf) for shift := uint(0); shift < 64; shift += 7 { if i >= l { err = io.ErrUnexpectedEOF return } b := p.buf[i] i++ x |= (uint64(b) & 0x7F) << shift if b < 0x80 { p.index = i return } } // The number is too large to represent in a 64-bit value. err = errOverflow return } // DecodeVarint reads a varint-encoded integer from the Buffer. // This is the format for the // int32, int64, uint32, uint64, bool, and enum // protocol buffer types. func (p *Buffer) DecodeVarint() (x uint64, err error) { i := p.index buf := p.buf if i >= len(buf) { return 0, io.ErrUnexpectedEOF } else if buf[i] < 0x80 { p.index++ return uint64(buf[i]), nil } else if len(buf)-i < 10 { return p.decodeVarintSlow() } var b uint64 // we already checked the first byte x = uint64(buf[i]) - 0x80 i++ b = uint64(buf[i]) i++ x += b << 7 if b&0x80 == 0 { goto done } x -= 0x80 << 7 b = uint64(buf[i]) i++ x += b << 14 if b&0x80 == 0 { goto done } x -= 0x80 << 14 b = uint64(buf[i]) i++ x += b << 21 if b&0x80 == 0 { goto done } x -= 0x80 << 21 b = uint64(buf[i]) i++ x += b << 28 if b&0x80 == 0 { goto done } x -= 0x80 << 28 b = uint64(buf[i]) i++ x += b << 35 if b&0x80 == 0 { goto done } x -= 0x80 << 35 b = uint64(buf[i]) i++ x += b << 42 if b&0x80 == 0 { goto done } x -= 0x80 << 42 b = uint64(buf[i]) i++ x += b << 49 if b&0x80 == 0 { goto done } x -= 0x80 << 49 b = uint64(buf[i]) i++ x += b << 56 if b&0x80 == 0 { goto done } x -= 0x80 << 56 b = uint64(buf[i]) i++ x += b << 63 if b&0x80 == 0 { goto done } // x -= 0x80 << 63 // Always zero. return 0, errOverflow done: p.index = i return x, nil } // DecodeFixed64 reads a 64-bit integer from the Buffer. // This is the format for the // fixed64, sfixed64, and double protocol buffer types. func (p *Buffer) DecodeFixed64() (x uint64, err error) { // x, err already 0 i := p.index + 8 if i < 0 || i > len(p.buf) { err = io.ErrUnexpectedEOF return } p.index = i x = uint64(p.buf[i-8]) x |= uint64(p.buf[i-7]) << 8 x |= uint64(p.buf[i-6]) << 16 x |= uint64(p.buf[i-5]) << 24 x |= uint64(p.buf[i-4]) << 32 x |= uint64(p.buf[i-3]) << 40 x |= uint64(p.buf[i-2]) << 48 x |= uint64(p.buf[i-1]) << 56 return } // DecodeFixed32 reads a 32-bit integer from the Buffer. // This is the format for the // fixed32, sfixed32, and float protocol buffer types. func (p *Buffer) DecodeFixed32() (x uint64, err error) { // x, err already 0 i := p.index + 4 if i < 0 || i > len(p.buf) { err = io.ErrUnexpectedEOF return } p.index = i x = uint64(p.buf[i-4]) x |= uint64(p.buf[i-3]) << 8 x |= uint64(p.buf[i-2]) << 16 x |= uint64(p.buf[i-1]) << 24 return } // DecodeZigzag64 reads a zigzag-encoded 64-bit integer // from the Buffer. // This is the format used for the sint64 protocol buffer type. func (p *Buffer) DecodeZigzag64() (x uint64, err error) { x, err = p.DecodeVarint() if err != nil { return } x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63) return } // DecodeZigzag32 reads a zigzag-encoded 32-bit integer // from the Buffer. // This is the format used for the sint32 protocol buffer type. func (p *Buffer) DecodeZigzag32() (x uint64, err error) { x, err = p.DecodeVarint() if err != nil { return } x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31)) return } // These are not ValueDecoders: they produce an array of bytes or a string. // bytes, embedded messages // DecodeRawBytes reads a count-delimited byte buffer from the Buffer. // This is the format used for the bytes protocol buffer // type and for embedded messages. func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) { n, err := p.DecodeVarint() if err != nil { return nil, err } nb := int(n) if nb < 0 { return nil, fmt.Errorf("proto: bad byte length %d", nb) } end := p.index + nb if end < p.index || end > len(p.buf) { return nil, io.ErrUnexpectedEOF } if !alloc { // todo: check if can get more uses of alloc=false buf = p.buf[p.index:end] p.index += nb return } buf = make([]byte, nb) copy(buf, p.buf[p.index:]) p.index += nb return } // DecodeStringBytes reads an encoded string from the Buffer. // This is the format used for the proto2 string type. func (p *Buffer) DecodeStringBytes() (s string, err error) { buf, err := p.DecodeRawBytes(false) if err != nil { return } return string(buf), nil } // Skip the next item in the buffer. Its wire type is decoded and presented as an argument. // If the protocol buffer has extensions, and the field matches, add it as an extension. // Otherwise, if the XXX_unrecognized field exists, append the skipped data there. func (o *Buffer) skipAndSave(t reflect.Type, tag, wire int, base structPointer, unrecField field) error { oi := o.index err := o.skip(t, tag, wire) if err != nil { return err } if !unrecField.IsValid() { return nil } ptr := structPointer_Bytes(base, unrecField) // Add the skipped field to struct field obuf := o.buf o.buf = *ptr o.EncodeVarint(uint64(tag<<3 | wire)) *ptr = append(o.buf, obuf[oi:o.index]...) o.buf = obuf return nil } // Skip the next item in the buffer. Its wire type is decoded and presented as an argument. func (o *Buffer) skip(t reflect.Type, tag, wire int) error { var u uint64 var err error switch wire { case WireVarint: _, err = o.DecodeVarint() case WireFixed64: _, err = o.DecodeFixed64() case WireBytes: _, err = o.DecodeRawBytes(false) case WireFixed32: _, err = o.DecodeFixed32() case WireStartGroup: for { u, err = o.DecodeVarint() if err != nil { break } fwire := int(u & 0x7) if fwire == WireEndGroup { break } ftag := int(u >> 3) err = o.skip(t, ftag, fwire) if err != nil { break } } default: err = fmt.Errorf("proto: can't skip unknown wire type %d for %s", wire, t) } return err } // Unmarshaler is the interface representing objects that can // unmarshal themselves. The method should reset the receiver before // decoding starts. The argument points to data that may be // overwritten, so implementations should not keep references to the // buffer. type Unmarshaler interface { Unmarshal([]byte) error } // Unmarshal parses the protocol buffer representation in buf and places the // decoded result in pb. If the struct underlying pb does not match // the data in buf, the results can be unpredictable. // // Unmarshal resets pb before starting to unmarshal, so any // existing data in pb is always removed. Use UnmarshalMerge // to preserve and append to existing data. func Unmarshal(buf []byte, pb Message) error { pb.Reset() return UnmarshalMerge(buf, pb) } // UnmarshalMerge parses the protocol buffer representation in buf and // writes the decoded result to pb. If the struct underlying pb does not match // the data in buf, the results can be unpredictable. // // UnmarshalMerge merges into existing data in pb. // Most code should use Unmarshal instead. func UnmarshalMerge(buf []byte, pb Message) error { // If the object can unmarshal itself, let it. if u, ok := pb.(Unmarshaler); ok { return u.Unmarshal(buf) } return NewBuffer(buf).Unmarshal(pb) } // DecodeMessage reads a count-delimited message from the Buffer. func (p *Buffer) DecodeMessage(pb Message) error { enc, err := p.DecodeRawBytes(false) if err != nil { return err } return NewBuffer(enc).Unmarshal(pb) } // DecodeGroup reads a tag-delimited group from the Buffer. func (p *Buffer) DecodeGroup(pb Message) error { typ, base, err := getbase(pb) if err != nil { return err } return p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), true, base) } // Unmarshal parses the protocol buffer representation in the // Buffer and places the decoded result in pb. If the struct // underlying pb does not match the data in the buffer, the results can be // unpredictable. // // Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal. func (p *Buffer) Unmarshal(pb Message) error { // If the object can unmarshal itself, let it. if u, ok := pb.(Unmarshaler); ok { err := u.Unmarshal(p.buf[p.index:]) p.index = len(p.buf) return err } typ, base, err := getbase(pb) if err != nil { return err } err = p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), false, base) if collectStats { stats.Decode++ } return err } // unmarshalType does the work of unmarshaling a structure. func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group bool, base structPointer) error { var state errorState required, reqFields := prop.reqCount, uint64(0) var err error for err == nil && o.index < len(o.buf) { oi := o.index var u uint64 u, err = o.DecodeVarint() if err != nil { break } wire := int(u & 0x7) if wire == WireEndGroup { if is_group { if required > 0 { // Not enough information to determine the exact field. // (See below.) return &RequiredNotSetError{"{Unknown}"} } return nil // input is satisfied } return fmt.Errorf("proto: %s: wiretype end group for non-group", st) } tag := int(u >> 3) if tag <= 0 { return fmt.Errorf("proto: %s: illegal tag %d (wire type %d)", st, tag, wire) } fieldnum, ok := prop.decoderTags.get(tag) if !ok { // Maybe it's an extension? if prop.extendable { if e, _ := extendable(structPointer_Interface(base, st)); isExtensionField(e, int32(tag)) { if err = o.skip(st, tag, wire); err == nil { extmap := e.extensionsWrite() ext := extmap[int32(tag)] // may be missing ext.enc = append(ext.enc, o.buf[oi:o.index]...) extmap[int32(tag)] = ext } continue } } // Maybe it's a oneof? if prop.oneofUnmarshaler != nil { m := structPointer_Interface(base, st).(Message) // First return value indicates whether tag is a oneof field. ok, err = prop.oneofUnmarshaler(m, tag, wire, o) if err == ErrInternalBadWireType { // Map the error to something more descriptive. // Do the formatting here to save generated code space. err = fmt.Errorf("bad wiretype for oneof field in %T", m) } if ok { continue } } err = o.skipAndSave(st, tag, wire, base, prop.unrecField) continue } p := prop.Prop[fieldnum] if p.dec == nil { fmt.Fprintf(os.Stderr, "proto: no protobuf decoder for %s.%s\n", st, st.Field(fieldnum).Name) continue } dec := p.dec if wire != WireStartGroup && wire != p.WireType { if wire == WireBytes && p.packedDec != nil { // a packable field dec = p.packedDec } else { err = fmt.Errorf("proto: bad wiretype for field %s.%s: got wiretype %d, want %d", st, st.Field(fieldnum).Name, wire, p.WireType) continue } } decErr := dec(o, p, base) if decErr != nil && !state.shouldContinue(decErr, p) { err = decErr } if err == nil && p.Required { // Successfully decoded a required field. if tag <= 64 { // use bitmap for fields 1-64 to catch field reuse. var mask uint64 = 1 << uint64(tag-1) if reqFields&mask == 0 { // new required field reqFields |= mask required-- } } else { // This is imprecise. It can be fooled by a required field // with a tag > 64 that is encoded twice; that's very rare. // A fully correct implementation would require allocating // a data structure, which we would like to avoid. required-- } } } if err == nil { if is_group { return io.ErrUnexpectedEOF } if state.err != nil { return state.err } if required > 0 { // Not enough information to determine the exact field. If we use extra // CPU, we could determine the field only if the missing required field // has a tag <= 64 and we check reqFields. return &RequiredNotSetError{"{Unknown}"} } } return err } // Individual type decoders // For each, // u is the decoded value, // v is a pointer to the field (pointer) in the struct // Sizes of the pools to allocate inside the Buffer. // The goal is modest amortization and allocation // on at least 16-byte boundaries. const ( boolPoolSize = 16 uint32PoolSize = 8 uint64PoolSize = 4 ) // Decode a bool. func (o *Buffer) dec_bool(p *Properties, base structPointer) error { u, err := p.valDec(o) if err != nil { return err } if len(o.bools) == 0 { o.bools = make([]bool, boolPoolSize) } o.bools[0] = u != 0 *structPointer_Bool(base, p.field) = &o.bools[0] o.bools = o.bools[1:] return nil } func (o *Buffer) dec_proto3_bool(p *Properties, base structPointer) error { u, err := p.valDec(o) if err != nil { return err } *structPointer_BoolVal(base, p.field) = u != 0 return nil } // Decode an int32. func (o *Buffer) dec_int32(p *Properties, base structPointer) error { u, err := p.valDec(o) if err != nil { return err } word32_Set(structPointer_Word32(base, p.field), o, uint32(u)) return nil } func (o *Buffer) dec_proto3_int32(p *Properties, base structPointer) error { u, err := p.valDec(o) if err != nil { return err } word32Val_Set(structPointer_Word32Val(base, p.field), uint32(u)) return nil } // Decode an int64. func (o *Buffer) dec_int64(p *Properties, base structPointer) error { u, err := p.valDec(o) if err != nil { return err } word64_Set(structPointer_Word64(base, p.field), o, u) return nil } func (o *Buffer) dec_proto3_int64(p *Properties, base structPointer) error { u, err := p.valDec(o) if err != nil { return err } word64Val_Set(structPointer_Word64Val(base, p.field), o, u) return nil } // Decode a string. func (o *Buffer) dec_string(p *Properties, base structPointer) error { s, err := o.DecodeStringBytes() if err != nil { return err } *structPointer_String(base, p.field) = &s return nil } func (o *Buffer) dec_proto3_string(p *Properties, base structPointer) error { s, err := o.DecodeStringBytes() if err != nil { return err } *structPointer_StringVal(base, p.field) = s return nil } // Decode a slice of bytes ([]byte). func (o *Buffer) dec_slice_byte(p *Properties, base structPointer) error { b, err := o.DecodeRawBytes(true) if err != nil { return err } *structPointer_Bytes(base, p.field) = b return nil } // Decode a slice of bools ([]bool). func (o *Buffer) dec_slice_bool(p *Properties, base structPointer) error { u, err := p.valDec(o) if err != nil { return err } v := structPointer_BoolSlice(base, p.field) *v = append(*v, u != 0) return nil } // Decode a slice of bools ([]bool) in packed format. func (o *Buffer) dec_slice_packed_bool(p *Properties, base structPointer) error { v := structPointer_BoolSlice(base, p.field) nn, err := o.DecodeVarint() if err != nil { return err } nb := int(nn) // number of bytes of encoded bools fin := o.index + nb if fin < o.index { return errOverflow } y := *v for o.index < fin { u, err := p.valDec(o) if err != nil { return err } y = append(y, u != 0) } *v = y return nil } // Decode a slice of int32s ([]int32). func (o *Buffer) dec_slice_int32(p *Properties, base structPointer) error { u, err := p.valDec(o) if err != nil { return err } structPointer_Word32Slice(base, p.field).Append(uint32(u)) return nil } // Decode a slice of int32s ([]int32) in packed format. func (o *Buffer) dec_slice_packed_int32(p *Properties, base structPointer) error { v := structPointer_Word32Slice(base, p.field) nn, err := o.DecodeVarint() if err != nil { return err } nb := int(nn) // number of bytes of encoded int32s fin := o.index + nb if fin < o.index { return errOverflow } for o.index < fin { u, err := p.valDec(o) if err != nil { return err } v.Append(uint32(u)) } return nil } // Decode a slice of int64s ([]int64). func (o *Buffer) dec_slice_int64(p *Properties, base structPointer) error { u, err := p.valDec(o) if err != nil { return err } structPointer_Word64Slice(base, p.field).Append(u) return nil } // Decode a slice of int64s ([]int64) in packed format. func (o *Buffer) dec_slice_packed_int64(p *Properties, base structPointer) error { v := structPointer_Word64Slice(base, p.field) nn, err := o.DecodeVarint() if err != nil { return err } nb := int(nn) // number of bytes of encoded int64s fin := o.index + nb if fin < o.index { return errOverflow } for o.index < fin { u, err := p.valDec(o) if err != nil { return err } v.Append(u) } return nil } // Decode a slice of strings ([]string). func (o *Buffer) dec_slice_string(p *Properties, base structPointer) error { s, err := o.DecodeStringBytes() if err != nil { return err } v := structPointer_StringSlice(base, p.field) *v = append(*v, s) return nil } // Decode a slice of slice of bytes ([][]byte). func (o *Buffer) dec_slice_slice_byte(p *Properties, base structPointer) error { b, err := o.DecodeRawBytes(true) if err != nil { return err } v := structPointer_BytesSlice(base, p.field) *v = append(*v, b) return nil } // Decode a map field. func (o *Buffer) dec_new_map(p *Properties, base structPointer) error { raw, err := o.DecodeRawBytes(false) if err != nil { return err } oi := o.index // index at the end of this map entry o.index -= len(raw) // move buffer back to start of map entry mptr := structPointer_NewAt(base, p.field, p.mtype) // *map[K]V if mptr.Elem().IsNil() { mptr.Elem().Set(reflect.MakeMap(mptr.Type().Elem())) } v := mptr.Elem() // map[K]V // Prepare addressable doubly-indirect placeholders for the key and value types. // See enc_new_map for why. keyptr := reflect.New(reflect.PtrTo(p.mtype.Key())).Elem() // addressable *K keybase := toStructPointer(keyptr.Addr()) // **K var valbase structPointer var valptr reflect.Value switch p.mtype.Elem().Kind() { case reflect.Slice: // []byte var dummy []byte valptr = reflect.ValueOf(&dummy) // *[]byte valbase = toStructPointer(valptr) // *[]byte case reflect.Ptr: // message; valptr is **Msg; need to allocate the intermediate pointer valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V valptr.Set(reflect.New(valptr.Type().Elem())) valbase = toStructPointer(valptr) default: // everything else valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V valbase = toStructPointer(valptr.Addr()) // **V } // Decode. // This parses a restricted wire format, namely the encoding of a message // with two fields. See enc_new_map for the format. for o.index < oi { // tagcode for key and value properties are always a single byte // because they have tags 1 and 2. tagcode := o.buf[o.index] o.index++ switch tagcode { case p.mkeyprop.tagcode[0]: if err := p.mkeyprop.dec(o, p.mkeyprop, keybase); err != nil { return err } case p.mvalprop.tagcode[0]: if err := p.mvalprop.dec(o, p.mvalprop, valbase); err != nil { return err } default: // TODO: Should we silently skip this instead? return fmt.Errorf("proto: bad map data tag %d", raw[0]) } } keyelem, valelem := keyptr.Elem(), valptr.Elem() if !keyelem.IsValid() { keyelem = reflect.Zero(p.mtype.Key()) } if !valelem.IsValid() { valelem = reflect.Zero(p.mtype.Elem()) } v.SetMapIndex(keyelem, valelem) return nil } // Decode a group. func (o *Buffer) dec_struct_group(p *Properties, base structPointer) error { bas := structPointer_GetStructPointer(base, p.field) if structPointer_IsNil(bas) { // allocate new nested message bas = toStructPointer(reflect.New(p.stype)) structPointer_SetStructPointer(base, p.field, bas) } return o.unmarshalType(p.stype, p.sprop, true, bas) } // Decode an embedded message. func (o *Buffer) dec_struct_message(p *Properties, base structPointer) (err error) { raw, e := o.DecodeRawBytes(false) if e != nil { return e } bas := structPointer_GetStructPointer(base, p.field) if structPointer_IsNil(bas) { // allocate new nested message bas = toStructPointer(reflect.New(p.stype)) structPointer_SetStructPointer(base, p.field, bas) } // If the object can unmarshal itself, let it. if p.isUnmarshaler { iv := structPointer_Interface(bas, p.stype) return iv.(Unmarshaler).Unmarshal(raw) } obuf := o.buf oi := o.index o.buf = raw o.index = 0 err = o.unmarshalType(p.stype, p.sprop, false, bas) o.buf = obuf o.index = oi return err } // Decode a slice of embedded messages. func (o *Buffer) dec_slice_struct_message(p *Properties, base structPointer) error { return o.dec_slice_struct(p, false, base) } // Decode a slice of embedded groups. func (o *Buffer) dec_slice_struct_group(p *Properties, base structPointer) error { return o.dec_slice_struct(p, true, base) } // Decode a slice of structs ([]*struct). func (o *Buffer) dec_slice_struct(p *Properties, is_group bool, base structPointer) error { v := reflect.New(p.stype) bas := toStructPointer(v) structPointer_StructPointerSlice(base, p.field).Append(bas) if is_group { err := o.unmarshalType(p.stype, p.sprop, is_group, bas) return err } raw, err := o.DecodeRawBytes(false) if err != nil { return err } // If the object can unmarshal itself, let it. if p.isUnmarshaler { iv := v.Interface() return iv.(Unmarshaler).Unmarshal(raw) } obuf := o.buf oi := o.index o.buf = raw o.index = 0 err = o.unmarshalType(p.stype, p.sprop, is_group, bas) o.buf = obuf o.index = oi return err } encode.go000066400000000000000000001037461324746544700337520ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/github.com/golang/protobuf/proto// Go support for Protocol Buffers - Google's data interchange format // // Copyright 2010 The Go Authors. All rights reserved. // https://github.com/golang/protobuf // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. package proto /* * Routines for encoding data into the wire format for protocol buffers. */ import ( "errors" "fmt" "reflect" "sort" ) // RequiredNotSetError is the error returned if Marshal is called with // a protocol buffer struct whose required fields have not // all been initialized. It is also the error returned if Unmarshal is // called with an encoded protocol buffer that does not include all the // required fields. // // When printed, RequiredNotSetError reports the first unset required field in a // message. If the field cannot be precisely determined, it is reported as // "{Unknown}". type RequiredNotSetError struct { field string } func (e *RequiredNotSetError) Error() string { return fmt.Sprintf("proto: required field %q not set", e.field) } var ( // errRepeatedHasNil is the error returned if Marshal is called with // a struct with a repeated field containing a nil element. errRepeatedHasNil = errors.New("proto: repeated field has nil element") // errOneofHasNil is the error returned if Marshal is called with // a struct with a oneof field containing a nil element. errOneofHasNil = errors.New("proto: oneof field has nil value") // ErrNil is the error returned if Marshal is called with nil. ErrNil = errors.New("proto: Marshal called with nil") // ErrTooLarge is the error returned if Marshal is called with a // message that encodes to >2GB. ErrTooLarge = errors.New("proto: message encodes to over 2 GB") ) // The fundamental encoders that put bytes on the wire. // Those that take integer types all accept uint64 and are // therefore of type valueEncoder. const maxVarintBytes = 10 // maximum length of a varint // maxMarshalSize is the largest allowed size of an encoded protobuf, // since C++ and Java use signed int32s for the size. const maxMarshalSize = 1<<31 - 1 // EncodeVarint returns the varint encoding of x. // This is the format for the // int32, int64, uint32, uint64, bool, and enum // protocol buffer types. // Not used by the package itself, but helpful to clients // wishing to use the same encoding. func EncodeVarint(x uint64) []byte { var buf [maxVarintBytes]byte var n int for n = 0; x > 127; n++ { buf[n] = 0x80 | uint8(x&0x7F) x >>= 7 } buf[n] = uint8(x) n++ return buf[0:n] } // EncodeVarint writes a varint-encoded integer to the Buffer. // This is the format for the // int32, int64, uint32, uint64, bool, and enum // protocol buffer types. func (p *Buffer) EncodeVarint(x uint64) error { for x >= 1<<7 { p.buf = append(p.buf, uint8(x&0x7f|0x80)) x >>= 7 } p.buf = append(p.buf, uint8(x)) return nil } // SizeVarint returns the varint encoding size of an integer. func SizeVarint(x uint64) int { return sizeVarint(x) } func sizeVarint(x uint64) (n int) { for { n++ x >>= 7 if x == 0 { break } } return n } // EncodeFixed64 writes a 64-bit integer to the Buffer. // This is the format for the // fixed64, sfixed64, and double protocol buffer types. func (p *Buffer) EncodeFixed64(x uint64) error { p.buf = append(p.buf, uint8(x), uint8(x>>8), uint8(x>>16), uint8(x>>24), uint8(x>>32), uint8(x>>40), uint8(x>>48), uint8(x>>56)) return nil } func sizeFixed64(x uint64) int { return 8 } // EncodeFixed32 writes a 32-bit integer to the Buffer. // This is the format for the // fixed32, sfixed32, and float protocol buffer types. func (p *Buffer) EncodeFixed32(x uint64) error { p.buf = append(p.buf, uint8(x), uint8(x>>8), uint8(x>>16), uint8(x>>24)) return nil } func sizeFixed32(x uint64) int { return 4 } // EncodeZigzag64 writes a zigzag-encoded 64-bit integer // to the Buffer. // This is the format used for the sint64 protocol buffer type. func (p *Buffer) EncodeZigzag64(x uint64) error { // use signed number to get arithmetic right shift. return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } func sizeZigzag64(x uint64) int { return sizeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } // EncodeZigzag32 writes a zigzag-encoded 32-bit integer // to the Buffer. // This is the format used for the sint32 protocol buffer type. func (p *Buffer) EncodeZigzag32(x uint64) error { // use signed number to get arithmetic right shift. return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) } func sizeZigzag32(x uint64) int { return sizeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) } // EncodeRawBytes writes a count-delimited byte buffer to the Buffer. // This is the format used for the bytes protocol buffer // type and for embedded messages. func (p *Buffer) EncodeRawBytes(b []byte) error { p.EncodeVarint(uint64(len(b))) p.buf = append(p.buf, b...) return nil } func sizeRawBytes(b []byte) int { return sizeVarint(uint64(len(b))) + len(b) } // EncodeStringBytes writes an encoded string to the Buffer. // This is the format used for the proto2 string type. func (p *Buffer) EncodeStringBytes(s string) error { p.EncodeVarint(uint64(len(s))) p.buf = append(p.buf, s...) return nil } func sizeStringBytes(s string) int { return sizeVarint(uint64(len(s))) + len(s) } // Marshaler is the interface representing objects that can marshal themselves. type Marshaler interface { Marshal() ([]byte, error) } // Marshal takes the protocol buffer // and encodes it into the wire format, returning the data. func Marshal(pb Message) ([]byte, error) { // Can the object marshal itself? if m, ok := pb.(Marshaler); ok { return m.Marshal() } p := NewBuffer(nil) err := p.Marshal(pb) if p.buf == nil && err == nil { // Return a non-nil slice on success. return []byte{}, nil } return p.buf, err } // EncodeMessage writes the protocol buffer to the Buffer, // prefixed by a varint-encoded length. func (p *Buffer) EncodeMessage(pb Message) error { t, base, err := getbase(pb) if structPointer_IsNil(base) { return ErrNil } if err == nil { var state errorState err = p.enc_len_struct(GetProperties(t.Elem()), base, &state) } return err } // Marshal takes the protocol buffer // and encodes it into the wire format, writing the result to the // Buffer. func (p *Buffer) Marshal(pb Message) error { // Can the object marshal itself? if m, ok := pb.(Marshaler); ok { data, err := m.Marshal() p.buf = append(p.buf, data...) return err } t, base, err := getbase(pb) if structPointer_IsNil(base) { return ErrNil } if err == nil { err = p.enc_struct(GetProperties(t.Elem()), base) } if collectStats { (stats).Encode++ // Parens are to work around a goimports bug. } if len(p.buf) > maxMarshalSize { return ErrTooLarge } return err } // Size returns the encoded size of a protocol buffer. func Size(pb Message) (n int) { // Can the object marshal itself? If so, Size is slow. // TODO: add Size to Marshaler, or add a Sizer interface. if m, ok := pb.(Marshaler); ok { b, _ := m.Marshal() return len(b) } t, base, err := getbase(pb) if structPointer_IsNil(base) { return 0 } if err == nil { n = size_struct(GetProperties(t.Elem()), base) } if collectStats { (stats).Size++ // Parens are to work around a goimports bug. } return } // Individual type encoders. // Encode a bool. func (o *Buffer) enc_bool(p *Properties, base structPointer) error { v := *structPointer_Bool(base, p.field) if v == nil { return ErrNil } x := 0 if *v { x = 1 } o.buf = append(o.buf, p.tagcode...) p.valEnc(o, uint64(x)) return nil } func (o *Buffer) enc_proto3_bool(p *Properties, base structPointer) error { v := *structPointer_BoolVal(base, p.field) if !v { return ErrNil } o.buf = append(o.buf, p.tagcode...) p.valEnc(o, 1) return nil } func size_bool(p *Properties, base structPointer) int { v := *structPointer_Bool(base, p.field) if v == nil { return 0 } return len(p.tagcode) + 1 // each bool takes exactly one byte } func size_proto3_bool(p *Properties, base structPointer) int { v := *structPointer_BoolVal(base, p.field) if !v && !p.oneof { return 0 } return len(p.tagcode) + 1 // each bool takes exactly one byte } // Encode an int32. func (o *Buffer) enc_int32(p *Properties, base structPointer) error { v := structPointer_Word32(base, p.field) if word32_IsNil(v) { return ErrNil } x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range o.buf = append(o.buf, p.tagcode...) p.valEnc(o, uint64(x)) return nil } func (o *Buffer) enc_proto3_int32(p *Properties, base structPointer) error { v := structPointer_Word32Val(base, p.field) x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range if x == 0 { return ErrNil } o.buf = append(o.buf, p.tagcode...) p.valEnc(o, uint64(x)) return nil } func size_int32(p *Properties, base structPointer) (n int) { v := structPointer_Word32(base, p.field) if word32_IsNil(v) { return 0 } x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range n += len(p.tagcode) n += p.valSize(uint64(x)) return } func size_proto3_int32(p *Properties, base structPointer) (n int) { v := structPointer_Word32Val(base, p.field) x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range if x == 0 && !p.oneof { return 0 } n += len(p.tagcode) n += p.valSize(uint64(x)) return } // Encode a uint32. // Exactly the same as int32, except for no sign extension. func (o *Buffer) enc_uint32(p *Properties, base structPointer) error { v := structPointer_Word32(base, p.field) if word32_IsNil(v) { return ErrNil } x := word32_Get(v) o.buf = append(o.buf, p.tagcode...) p.valEnc(o, uint64(x)) return nil } func (o *Buffer) enc_proto3_uint32(p *Properties, base structPointer) error { v := structPointer_Word32Val(base, p.field) x := word32Val_Get(v) if x == 0 { return ErrNil } o.buf = append(o.buf, p.tagcode...) p.valEnc(o, uint64(x)) return nil } func size_uint32(p *Properties, base structPointer) (n int) { v := structPointer_Word32(base, p.field) if word32_IsNil(v) { return 0 } x := word32_Get(v) n += len(p.tagcode) n += p.valSize(uint64(x)) return } func size_proto3_uint32(p *Properties, base structPointer) (n int) { v := structPointer_Word32Val(base, p.field) x := word32Val_Get(v) if x == 0 && !p.oneof { return 0 } n += len(p.tagcode) n += p.valSize(uint64(x)) return } // Encode an int64. func (o *Buffer) enc_int64(p *Properties, base structPointer) error { v := structPointer_Word64(base, p.field) if word64_IsNil(v) { return ErrNil } x := word64_Get(v) o.buf = append(o.buf, p.tagcode...) p.valEnc(o, x) return nil } func (o *Buffer) enc_proto3_int64(p *Properties, base structPointer) error { v := structPointer_Word64Val(base, p.field) x := word64Val_Get(v) if x == 0 { return ErrNil } o.buf = append(o.buf, p.tagcode...) p.valEnc(o, x) return nil } func size_int64(p *Properties, base structPointer) (n int) { v := structPointer_Word64(base, p.field) if word64_IsNil(v) { return 0 } x := word64_Get(v) n += len(p.tagcode) n += p.valSize(x) return } func size_proto3_int64(p *Properties, base structPointer) (n int) { v := structPointer_Word64Val(base, p.field) x := word64Val_Get(v) if x == 0 && !p.oneof { return 0 } n += len(p.tagcode) n += p.valSize(x) return } // Encode a string. func (o *Buffer) enc_string(p *Properties, base structPointer) error { v := *structPointer_String(base, p.field) if v == nil { return ErrNil } x := *v o.buf = append(o.buf, p.tagcode...) o.EncodeStringBytes(x) return nil } func (o *Buffer) enc_proto3_string(p *Properties, base structPointer) error { v := *structPointer_StringVal(base, p.field) if v == "" { return ErrNil } o.buf = append(o.buf, p.tagcode...) o.EncodeStringBytes(v) return nil } func size_string(p *Properties, base structPointer) (n int) { v := *structPointer_String(base, p.field) if v == nil { return 0 } x := *v n += len(p.tagcode) n += sizeStringBytes(x) return } func size_proto3_string(p *Properties, base structPointer) (n int) { v := *structPointer_StringVal(base, p.field) if v == "" && !p.oneof { return 0 } n += len(p.tagcode) n += sizeStringBytes(v) return } // All protocol buffer fields are nillable, but be careful. func isNil(v reflect.Value) bool { switch v.Kind() { case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: return v.IsNil() } return false } // Encode a message struct. func (o *Buffer) enc_struct_message(p *Properties, base structPointer) error { var state errorState structp := structPointer_GetStructPointer(base, p.field) if structPointer_IsNil(structp) { return ErrNil } // Can the object marshal itself? if p.isMarshaler { m := structPointer_Interface(structp, p.stype).(Marshaler) data, err := m.Marshal() if err != nil && !state.shouldContinue(err, nil) { return err } o.buf = append(o.buf, p.tagcode...) o.EncodeRawBytes(data) return state.err } o.buf = append(o.buf, p.tagcode...) return o.enc_len_struct(p.sprop, structp, &state) } func size_struct_message(p *Properties, base structPointer) int { structp := structPointer_GetStructPointer(base, p.field) if structPointer_IsNil(structp) { return 0 } // Can the object marshal itself? if p.isMarshaler { m := structPointer_Interface(structp, p.stype).(Marshaler) data, _ := m.Marshal() n0 := len(p.tagcode) n1 := sizeRawBytes(data) return n0 + n1 } n0 := len(p.tagcode) n1 := size_struct(p.sprop, structp) n2 := sizeVarint(uint64(n1)) // size of encoded length return n0 + n1 + n2 } // Encode a group struct. func (o *Buffer) enc_struct_group(p *Properties, base structPointer) error { var state errorState b := structPointer_GetStructPointer(base, p.field) if structPointer_IsNil(b) { return ErrNil } o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup)) err := o.enc_struct(p.sprop, b) if err != nil && !state.shouldContinue(err, nil) { return err } o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup)) return state.err } func size_struct_group(p *Properties, base structPointer) (n int) { b := structPointer_GetStructPointer(base, p.field) if structPointer_IsNil(b) { return 0 } n += sizeVarint(uint64((p.Tag << 3) | WireStartGroup)) n += size_struct(p.sprop, b) n += sizeVarint(uint64((p.Tag << 3) | WireEndGroup)) return } // Encode a slice of bools ([]bool). func (o *Buffer) enc_slice_bool(p *Properties, base structPointer) error { s := *structPointer_BoolSlice(base, p.field) l := len(s) if l == 0 { return ErrNil } for _, x := range s { o.buf = append(o.buf, p.tagcode...) v := uint64(0) if x { v = 1 } p.valEnc(o, v) } return nil } func size_slice_bool(p *Properties, base structPointer) int { s := *structPointer_BoolSlice(base, p.field) l := len(s) if l == 0 { return 0 } return l * (len(p.tagcode) + 1) // each bool takes exactly one byte } // Encode a slice of bools ([]bool) in packed format. func (o *Buffer) enc_slice_packed_bool(p *Properties, base structPointer) error { s := *structPointer_BoolSlice(base, p.field) l := len(s) if l == 0 { return ErrNil } o.buf = append(o.buf, p.tagcode...) o.EncodeVarint(uint64(l)) // each bool takes exactly one byte for _, x := range s { v := uint64(0) if x { v = 1 } p.valEnc(o, v) } return nil } func size_slice_packed_bool(p *Properties, base structPointer) (n int) { s := *structPointer_BoolSlice(base, p.field) l := len(s) if l == 0 { return 0 } n += len(p.tagcode) n += sizeVarint(uint64(l)) n += l // each bool takes exactly one byte return } // Encode a slice of bytes ([]byte). func (o *Buffer) enc_slice_byte(p *Properties, base structPointer) error { s := *structPointer_Bytes(base, p.field) if s == nil { return ErrNil } o.buf = append(o.buf, p.tagcode...) o.EncodeRawBytes(s) return nil } func (o *Buffer) enc_proto3_slice_byte(p *Properties, base structPointer) error { s := *structPointer_Bytes(base, p.field) if len(s) == 0 { return ErrNil } o.buf = append(o.buf, p.tagcode...) o.EncodeRawBytes(s) return nil } func size_slice_byte(p *Properties, base structPointer) (n int) { s := *structPointer_Bytes(base, p.field) if s == nil && !p.oneof { return 0 } n += len(p.tagcode) n += sizeRawBytes(s) return } func size_proto3_slice_byte(p *Properties, base structPointer) (n int) { s := *structPointer_Bytes(base, p.field) if len(s) == 0 && !p.oneof { return 0 } n += len(p.tagcode) n += sizeRawBytes(s) return } // Encode a slice of int32s ([]int32). func (o *Buffer) enc_slice_int32(p *Properties, base structPointer) error { s := structPointer_Word32Slice(base, p.field) l := s.Len() if l == 0 { return ErrNil } for i := 0; i < l; i++ { o.buf = append(o.buf, p.tagcode...) x := int32(s.Index(i)) // permit sign extension to use full 64-bit range p.valEnc(o, uint64(x)) } return nil } func size_slice_int32(p *Properties, base structPointer) (n int) { s := structPointer_Word32Slice(base, p.field) l := s.Len() if l == 0 { return 0 } for i := 0; i < l; i++ { n += len(p.tagcode) x := int32(s.Index(i)) // permit sign extension to use full 64-bit range n += p.valSize(uint64(x)) } return } // Encode a slice of int32s ([]int32) in packed format. func (o *Buffer) enc_slice_packed_int32(p *Properties, base structPointer) error { s := structPointer_Word32Slice(base, p.field) l := s.Len() if l == 0 { return ErrNil } // TODO: Reuse a Buffer. buf := NewBuffer(nil) for i := 0; i < l; i++ { x := int32(s.Index(i)) // permit sign extension to use full 64-bit range p.valEnc(buf, uint64(x)) } o.buf = append(o.buf, p.tagcode...) o.EncodeVarint(uint64(len(buf.buf))) o.buf = append(o.buf, buf.buf...) return nil } func size_slice_packed_int32(p *Properties, base structPointer) (n int) { s := structPointer_Word32Slice(base, p.field) l := s.Len() if l == 0 { return 0 } var bufSize int for i := 0; i < l; i++ { x := int32(s.Index(i)) // permit sign extension to use full 64-bit range bufSize += p.valSize(uint64(x)) } n += len(p.tagcode) n += sizeVarint(uint64(bufSize)) n += bufSize return } // Encode a slice of uint32s ([]uint32). // Exactly the same as int32, except for no sign extension. func (o *Buffer) enc_slice_uint32(p *Properties, base structPointer) error { s := structPointer_Word32Slice(base, p.field) l := s.Len() if l == 0 { return ErrNil } for i := 0; i < l; i++ { o.buf = append(o.buf, p.tagcode...) x := s.Index(i) p.valEnc(o, uint64(x)) } return nil } func size_slice_uint32(p *Properties, base structPointer) (n int) { s := structPointer_Word32Slice(base, p.field) l := s.Len() if l == 0 { return 0 } for i := 0; i < l; i++ { n += len(p.tagcode) x := s.Index(i) n += p.valSize(uint64(x)) } return } // Encode a slice of uint32s ([]uint32) in packed format. // Exactly the same as int32, except for no sign extension. func (o *Buffer) enc_slice_packed_uint32(p *Properties, base structPointer) error { s := structPointer_Word32Slice(base, p.field) l := s.Len() if l == 0 { return ErrNil } // TODO: Reuse a Buffer. buf := NewBuffer(nil) for i := 0; i < l; i++ { p.valEnc(buf, uint64(s.Index(i))) } o.buf = append(o.buf, p.tagcode...) o.EncodeVarint(uint64(len(buf.buf))) o.buf = append(o.buf, buf.buf...) return nil } func size_slice_packed_uint32(p *Properties, base structPointer) (n int) { s := structPointer_Word32Slice(base, p.field) l := s.Len() if l == 0 { return 0 } var bufSize int for i := 0; i < l; i++ { bufSize += p.valSize(uint64(s.Index(i))) } n += len(p.tagcode) n += sizeVarint(uint64(bufSize)) n += bufSize return } // Encode a slice of int64s ([]int64). func (o *Buffer) enc_slice_int64(p *Properties, base structPointer) error { s := structPointer_Word64Slice(base, p.field) l := s.Len() if l == 0 { return ErrNil } for i := 0; i < l; i++ { o.buf = append(o.buf, p.tagcode...) p.valEnc(o, s.Index(i)) } return nil } func size_slice_int64(p *Properties, base structPointer) (n int) { s := structPointer_Word64Slice(base, p.field) l := s.Len() if l == 0 { return 0 } for i := 0; i < l; i++ { n += len(p.tagcode) n += p.valSize(s.Index(i)) } return } // Encode a slice of int64s ([]int64) in packed format. func (o *Buffer) enc_slice_packed_int64(p *Properties, base structPointer) error { s := structPointer_Word64Slice(base, p.field) l := s.Len() if l == 0 { return ErrNil } // TODO: Reuse a Buffer. buf := NewBuffer(nil) for i := 0; i < l; i++ { p.valEnc(buf, s.Index(i)) } o.buf = append(o.buf, p.tagcode...) o.EncodeVarint(uint64(len(buf.buf))) o.buf = append(o.buf, buf.buf...) return nil } func size_slice_packed_int64(p *Properties, base structPointer) (n int) { s := structPointer_Word64Slice(base, p.field) l := s.Len() if l == 0 { return 0 } var bufSize int for i := 0; i < l; i++ { bufSize += p.valSize(s.Index(i)) } n += len(p.tagcode) n += sizeVarint(uint64(bufSize)) n += bufSize return } // Encode a slice of slice of bytes ([][]byte). func (o *Buffer) enc_slice_slice_byte(p *Properties, base structPointer) error { ss := *structPointer_BytesSlice(base, p.field) l := len(ss) if l == 0 { return ErrNil } for i := 0; i < l; i++ { o.buf = append(o.buf, p.tagcode...) o.EncodeRawBytes(ss[i]) } return nil } func size_slice_slice_byte(p *Properties, base structPointer) (n int) { ss := *structPointer_BytesSlice(base, p.field) l := len(ss) if l == 0 { return 0 } n += l * len(p.tagcode) for i := 0; i < l; i++ { n += sizeRawBytes(ss[i]) } return } // Encode a slice of strings ([]string). func (o *Buffer) enc_slice_string(p *Properties, base structPointer) error { ss := *structPointer_StringSlice(base, p.field) l := len(ss) for i := 0; i < l; i++ { o.buf = append(o.buf, p.tagcode...) o.EncodeStringBytes(ss[i]) } return nil } func size_slice_string(p *Properties, base structPointer) (n int) { ss := *structPointer_StringSlice(base, p.field) l := len(ss) n += l * len(p.tagcode) for i := 0; i < l; i++ { n += sizeStringBytes(ss[i]) } return } // Encode a slice of message structs ([]*struct). func (o *Buffer) enc_slice_struct_message(p *Properties, base structPointer) error { var state errorState s := structPointer_StructPointerSlice(base, p.field) l := s.Len() for i := 0; i < l; i++ { structp := s.Index(i) if structPointer_IsNil(structp) { return errRepeatedHasNil } // Can the object marshal itself? if p.isMarshaler { m := structPointer_Interface(structp, p.stype).(Marshaler) data, err := m.Marshal() if err != nil && !state.shouldContinue(err, nil) { return err } o.buf = append(o.buf, p.tagcode...) o.EncodeRawBytes(data) continue } o.buf = append(o.buf, p.tagcode...) err := o.enc_len_struct(p.sprop, structp, &state) if err != nil && !state.shouldContinue(err, nil) { if err == ErrNil { return errRepeatedHasNil } return err } } return state.err } func size_slice_struct_message(p *Properties, base structPointer) (n int) { s := structPointer_StructPointerSlice(base, p.field) l := s.Len() n += l * len(p.tagcode) for i := 0; i < l; i++ { structp := s.Index(i) if structPointer_IsNil(structp) { return // return the size up to this point } // Can the object marshal itself? if p.isMarshaler { m := structPointer_Interface(structp, p.stype).(Marshaler) data, _ := m.Marshal() n += sizeRawBytes(data) continue } n0 := size_struct(p.sprop, structp) n1 := sizeVarint(uint64(n0)) // size of encoded length n += n0 + n1 } return } // Encode a slice of group structs ([]*struct). func (o *Buffer) enc_slice_struct_group(p *Properties, base structPointer) error { var state errorState s := structPointer_StructPointerSlice(base, p.field) l := s.Len() for i := 0; i < l; i++ { b := s.Index(i) if structPointer_IsNil(b) { return errRepeatedHasNil } o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup)) err := o.enc_struct(p.sprop, b) if err != nil && !state.shouldContinue(err, nil) { if err == ErrNil { return errRepeatedHasNil } return err } o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup)) } return state.err } func size_slice_struct_group(p *Properties, base structPointer) (n int) { s := structPointer_StructPointerSlice(base, p.field) l := s.Len() n += l * sizeVarint(uint64((p.Tag<<3)|WireStartGroup)) n += l * sizeVarint(uint64((p.Tag<<3)|WireEndGroup)) for i := 0; i < l; i++ { b := s.Index(i) if structPointer_IsNil(b) { return // return size up to this point } n += size_struct(p.sprop, b) } return } // Encode an extension map. func (o *Buffer) enc_map(p *Properties, base structPointer) error { exts := structPointer_ExtMap(base, p.field) if err := encodeExtensionsMap(*exts); err != nil { return err } return o.enc_map_body(*exts) } func (o *Buffer) enc_exts(p *Properties, base structPointer) error { exts := structPointer_Extensions(base, p.field) v, mu := exts.extensionsRead() if v == nil { return nil } mu.Lock() defer mu.Unlock() if err := encodeExtensionsMap(v); err != nil { return err } return o.enc_map_body(v) } func (o *Buffer) enc_map_body(v map[int32]Extension) error { // Fast-path for common cases: zero or one extensions. if len(v) <= 1 { for _, e := range v { o.buf = append(o.buf, e.enc...) } return nil } // Sort keys to provide a deterministic encoding. keys := make([]int, 0, len(v)) for k := range v { keys = append(keys, int(k)) } sort.Ints(keys) for _, k := range keys { o.buf = append(o.buf, v[int32(k)].enc...) } return nil } func size_map(p *Properties, base structPointer) int { v := structPointer_ExtMap(base, p.field) return extensionsMapSize(*v) } func size_exts(p *Properties, base structPointer) int { v := structPointer_Extensions(base, p.field) return extensionsSize(v) } // Encode a map field. func (o *Buffer) enc_new_map(p *Properties, base structPointer) error { var state errorState // XXX: or do we need to plumb this through? /* A map defined as map map_field = N; is encoded in the same way as message MapFieldEntry { key_type key = 1; value_type value = 2; } repeated MapFieldEntry map_field = N; */ v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V if v.Len() == 0 { return nil } keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype) enc := func() error { if err := p.mkeyprop.enc(o, p.mkeyprop, keybase); err != nil { return err } if err := p.mvalprop.enc(o, p.mvalprop, valbase); err != nil && err != ErrNil { return err } return nil } // Don't sort map keys. It is not required by the spec, and C++ doesn't do it. for _, key := range v.MapKeys() { val := v.MapIndex(key) keycopy.Set(key) valcopy.Set(val) o.buf = append(o.buf, p.tagcode...) if err := o.enc_len_thing(enc, &state); err != nil { return err } } return nil } func size_new_map(p *Properties, base structPointer) int { v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype) n := 0 for _, key := range v.MapKeys() { val := v.MapIndex(key) keycopy.Set(key) valcopy.Set(val) // Tag codes for key and val are the responsibility of the sub-sizer. keysize := p.mkeyprop.size(p.mkeyprop, keybase) valsize := p.mvalprop.size(p.mvalprop, valbase) entry := keysize + valsize // Add on tag code and length of map entry itself. n += len(p.tagcode) + sizeVarint(uint64(entry)) + entry } return n } // mapEncodeScratch returns a new reflect.Value matching the map's value type, // and a structPointer suitable for passing to an encoder or sizer. func mapEncodeScratch(mapType reflect.Type) (keycopy, valcopy reflect.Value, keybase, valbase structPointer) { // Prepare addressable doubly-indirect placeholders for the key and value types. // This is needed because the element-type encoders expect **T, but the map iteration produces T. keycopy = reflect.New(mapType.Key()).Elem() // addressable K keyptr := reflect.New(reflect.PtrTo(keycopy.Type())).Elem() // addressable *K keyptr.Set(keycopy.Addr()) // keybase = toStructPointer(keyptr.Addr()) // **K // Value types are more varied and require special handling. switch mapType.Elem().Kind() { case reflect.Slice: // []byte var dummy []byte valcopy = reflect.ValueOf(&dummy).Elem() // addressable []byte valbase = toStructPointer(valcopy.Addr()) case reflect.Ptr: // message; the generated field type is map[K]*Msg (so V is *Msg), // so we only need one level of indirection. valcopy = reflect.New(mapType.Elem()).Elem() // addressable V valbase = toStructPointer(valcopy.Addr()) default: // everything else valcopy = reflect.New(mapType.Elem()).Elem() // addressable V valptr := reflect.New(reflect.PtrTo(valcopy.Type())).Elem() // addressable *V valptr.Set(valcopy.Addr()) // valbase = toStructPointer(valptr.Addr()) // **V } return } // Encode a struct. func (o *Buffer) enc_struct(prop *StructProperties, base structPointer) error { var state errorState // Encode fields in tag order so that decoders may use optimizations // that depend on the ordering. // https://developers.google.com/protocol-buffers/docs/encoding#order for _, i := range prop.order { p := prop.Prop[i] if p.enc != nil { err := p.enc(o, p, base) if err != nil { if err == ErrNil { if p.Required && state.err == nil { state.err = &RequiredNotSetError{p.Name} } } else if err == errRepeatedHasNil { // Give more context to nil values in repeated fields. return errors.New("repeated field " + p.OrigName + " has nil element") } else if !state.shouldContinue(err, p) { return err } } if len(o.buf) > maxMarshalSize { return ErrTooLarge } } } // Do oneof fields. if prop.oneofMarshaler != nil { m := structPointer_Interface(base, prop.stype).(Message) if err := prop.oneofMarshaler(m, o); err == ErrNil { return errOneofHasNil } else if err != nil { return err } } // Add unrecognized fields at the end. if prop.unrecField.IsValid() { v := *structPointer_Bytes(base, prop.unrecField) if len(o.buf)+len(v) > maxMarshalSize { return ErrTooLarge } if len(v) > 0 { o.buf = append(o.buf, v...) } } return state.err } func size_struct(prop *StructProperties, base structPointer) (n int) { for _, i := range prop.order { p := prop.Prop[i] if p.size != nil { n += p.size(p, base) } } // Add unrecognized fields at the end. if prop.unrecField.IsValid() { v := *structPointer_Bytes(base, prop.unrecField) n += len(v) } // Factor in any oneof fields. if prop.oneofSizer != nil { m := structPointer_Interface(base, prop.stype).(Message) n += prop.oneofSizer(m) } return } var zeroes [20]byte // longer than any conceivable sizeVarint // Encode a struct, preceded by its encoded length (as a varint). func (o *Buffer) enc_len_struct(prop *StructProperties, base structPointer, state *errorState) error { return o.enc_len_thing(func() error { return o.enc_struct(prop, base) }, state) } // Encode something, preceded by its encoded length (as a varint). func (o *Buffer) enc_len_thing(enc func() error, state *errorState) error { iLen := len(o.buf) o.buf = append(o.buf, 0, 0, 0, 0) // reserve four bytes for length iMsg := len(o.buf) err := enc() if err != nil && !state.shouldContinue(err, nil) { return err } lMsg := len(o.buf) - iMsg lLen := sizeVarint(uint64(lMsg)) switch x := lLen - (iMsg - iLen); { case x > 0: // actual length is x bytes larger than the space we reserved // Move msg x bytes right. o.buf = append(o.buf, zeroes[:x]...) copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg]) case x < 0: // actual length is x bytes smaller than the space we reserved // Move msg x bytes left. copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg]) o.buf = o.buf[:len(o.buf)+x] // x is negative } // Encode the length in the reserved space. o.buf = o.buf[:iLen] o.EncodeVarint(uint64(lMsg)) o.buf = o.buf[:len(o.buf)+lMsg] return state.err } // errorState maintains the first error that occurs and updates that error // with additional context. type errorState struct { err error } // shouldContinue reports whether encoding should continue upon encountering the // given error. If the error is RequiredNotSetError, shouldContinue returns true // and, if this is the first appearance of that error, remembers it for future // reporting. // // If prop is not nil, it may update any error with additional context about the // field with the error. func (s *errorState) shouldContinue(err error, prop *Properties) bool { // Ignore unset required fields. reqNotSet, ok := err.(*RequiredNotSetError) if !ok { return false } if s.err == nil { if prop != nil { err = &RequiredNotSetError{prop.Name + "." + reqNotSet.field} } s.err = err } return true } equal.go000066400000000000000000000206611324746544700336160ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/github.com/golang/protobuf/proto// Go support for Protocol Buffers - Google's data interchange format // // Copyright 2011 The Go Authors. All rights reserved. // https://github.com/golang/protobuf // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // Protocol buffer comparison. package proto import ( "bytes" "log" "reflect" "strings" ) /* Equal returns true iff protocol buffers a and b are equal. The arguments must both be pointers to protocol buffer structs. Equality is defined in this way: - Two messages are equal iff they are the same type, corresponding fields are equal, unknown field sets are equal, and extensions sets are equal. - Two set scalar fields are equal iff their values are equal. If the fields are of a floating-point type, remember that NaN != x for all x, including NaN. If the message is defined in a proto3 .proto file, fields are not "set"; specifically, zero length proto3 "bytes" fields are equal (nil == {}). - Two repeated fields are equal iff their lengths are the same, and their corresponding elements are equal. Note a "bytes" field, although represented by []byte, is not a repeated field and the rule for the scalar fields described above applies. - Two unset fields are equal. - Two unknown field sets are equal if their current encoded state is equal. - Two extension sets are equal iff they have corresponding elements that are pairwise equal. - Two map fields are equal iff their lengths are the same, and they contain the same set of elements. Zero-length map fields are equal. - Every other combination of things are not equal. The return value is undefined if a and b are not protocol buffers. */ func Equal(a, b Message) bool { if a == nil || b == nil { return a == b } v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b) if v1.Type() != v2.Type() { return false } if v1.Kind() == reflect.Ptr { if v1.IsNil() { return v2.IsNil() } if v2.IsNil() { return false } v1, v2 = v1.Elem(), v2.Elem() } if v1.Kind() != reflect.Struct { return false } return equalStruct(v1, v2) } // v1 and v2 are known to have the same type. func equalStruct(v1, v2 reflect.Value) bool { sprop := GetProperties(v1.Type()) for i := 0; i < v1.NumField(); i++ { f := v1.Type().Field(i) if strings.HasPrefix(f.Name, "XXX_") { continue } f1, f2 := v1.Field(i), v2.Field(i) if f.Type.Kind() == reflect.Ptr { if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 { // both unset continue } else if n1 != n2 { // set/unset mismatch return false } b1, ok := f1.Interface().(raw) if ok { b2 := f2.Interface().(raw) // RawMessage if !bytes.Equal(b1.Bytes(), b2.Bytes()) { return false } continue } f1, f2 = f1.Elem(), f2.Elem() } if !equalAny(f1, f2, sprop.Prop[i]) { return false } } if em1 := v1.FieldByName("XXX_InternalExtensions"); em1.IsValid() { em2 := v2.FieldByName("XXX_InternalExtensions") if !equalExtensions(v1.Type(), em1.Interface().(XXX_InternalExtensions), em2.Interface().(XXX_InternalExtensions)) { return false } } if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() { em2 := v2.FieldByName("XXX_extensions") if !equalExtMap(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) { return false } } uf := v1.FieldByName("XXX_unrecognized") if !uf.IsValid() { return true } u1 := uf.Bytes() u2 := v2.FieldByName("XXX_unrecognized").Bytes() if !bytes.Equal(u1, u2) { return false } return true } // v1 and v2 are known to have the same type. // prop may be nil. func equalAny(v1, v2 reflect.Value, prop *Properties) bool { if v1.Type() == protoMessageType { m1, _ := v1.Interface().(Message) m2, _ := v2.Interface().(Message) return Equal(m1, m2) } switch v1.Kind() { case reflect.Bool: return v1.Bool() == v2.Bool() case reflect.Float32, reflect.Float64: return v1.Float() == v2.Float() case reflect.Int32, reflect.Int64: return v1.Int() == v2.Int() case reflect.Interface: // Probably a oneof field; compare the inner values. n1, n2 := v1.IsNil(), v2.IsNil() if n1 || n2 { return n1 == n2 } e1, e2 := v1.Elem(), v2.Elem() if e1.Type() != e2.Type() { return false } return equalAny(e1, e2, nil) case reflect.Map: if v1.Len() != v2.Len() { return false } for _, key := range v1.MapKeys() { val2 := v2.MapIndex(key) if !val2.IsValid() { // This key was not found in the second map. return false } if !equalAny(v1.MapIndex(key), val2, nil) { return false } } return true case reflect.Ptr: // Maps may have nil values in them, so check for nil. if v1.IsNil() && v2.IsNil() { return true } if v1.IsNil() != v2.IsNil() { return false } return equalAny(v1.Elem(), v2.Elem(), prop) case reflect.Slice: if v1.Type().Elem().Kind() == reflect.Uint8 { // short circuit: []byte // Edge case: if this is in a proto3 message, a zero length // bytes field is considered the zero value. if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 { return true } if v1.IsNil() != v2.IsNil() { return false } return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte)) } if v1.Len() != v2.Len() { return false } for i := 0; i < v1.Len(); i++ { if !equalAny(v1.Index(i), v2.Index(i), prop) { return false } } return true case reflect.String: return v1.Interface().(string) == v2.Interface().(string) case reflect.Struct: return equalStruct(v1, v2) case reflect.Uint32, reflect.Uint64: return v1.Uint() == v2.Uint() } // unknown type, so not a protocol buffer log.Printf("proto: don't know how to compare %v", v1) return false } // base is the struct type that the extensions are based on. // x1 and x2 are InternalExtensions. func equalExtensions(base reflect.Type, x1, x2 XXX_InternalExtensions) bool { em1, _ := x1.extensionsRead() em2, _ := x2.extensionsRead() return equalExtMap(base, em1, em2) } func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool { if len(em1) != len(em2) { return false } for extNum, e1 := range em1 { e2, ok := em2[extNum] if !ok { return false } m1, m2 := e1.value, e2.value if m1 != nil && m2 != nil { // Both are unencoded. if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { return false } continue } // At least one is encoded. To do a semantically correct comparison // we need to unmarshal them first. var desc *ExtensionDesc if m := extensionMaps[base]; m != nil { desc = m[extNum] } if desc == nil { log.Printf("proto: don't know how to compare extension %d of %v", extNum, base) continue } var err error if m1 == nil { m1, err = decodeExtension(e1.enc, desc) } if m2 == nil && err == nil { m2, err = decodeExtension(e2.enc, desc) } if err != nil { // The encoded form is invalid. log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err) return false } if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { return false } } return true } extensions.go000066400000000000000000000434301324746544700347050ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/github.com/golang/protobuf/proto// Go support for Protocol Buffers - Google's data interchange format // // Copyright 2010 The Go Authors. All rights reserved. // https://github.com/golang/protobuf // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. package proto /* * Types and routines for supporting protocol buffer extensions. */ import ( "errors" "fmt" "reflect" "strconv" "sync" ) // ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message. var ErrMissingExtension = errors.New("proto: missing extension") // ExtensionRange represents a range of message extensions for a protocol buffer. // Used in code generated by the protocol compiler. type ExtensionRange struct { Start, End int32 // both inclusive } // extendableProto is an interface implemented by any protocol buffer generated by the current // proto compiler that may be extended. type extendableProto interface { Message ExtensionRangeArray() []ExtensionRange extensionsWrite() map[int32]Extension extensionsRead() (map[int32]Extension, sync.Locker) } // extendableProtoV1 is an interface implemented by a protocol buffer generated by the previous // version of the proto compiler that may be extended. type extendableProtoV1 interface { Message ExtensionRangeArray() []ExtensionRange ExtensionMap() map[int32]Extension } // extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto. type extensionAdapter struct { extendableProtoV1 } func (e extensionAdapter) extensionsWrite() map[int32]Extension { return e.ExtensionMap() } func (e extensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) { return e.ExtensionMap(), notLocker{} } // notLocker is a sync.Locker whose Lock and Unlock methods are nops. type notLocker struct{} func (n notLocker) Lock() {} func (n notLocker) Unlock() {} // extendable returns the extendableProto interface for the given generated proto message. // If the proto message has the old extension format, it returns a wrapper that implements // the extendableProto interface. func extendable(p interface{}) (extendableProto, bool) { if ep, ok := p.(extendableProto); ok { return ep, ok } if ep, ok := p.(extendableProtoV1); ok { return extensionAdapter{ep}, ok } return nil, false } // XXX_InternalExtensions is an internal representation of proto extensions. // // Each generated message struct type embeds an anonymous XXX_InternalExtensions field, // thus gaining the unexported 'extensions' method, which can be called only from the proto package. // // The methods of XXX_InternalExtensions are not concurrency safe in general, // but calls to logically read-only methods such as has and get may be executed concurrently. type XXX_InternalExtensions struct { // The struct must be indirect so that if a user inadvertently copies a // generated message and its embedded XXX_InternalExtensions, they // avoid the mayhem of a copied mutex. // // The mutex serializes all logically read-only operations to p.extensionMap. // It is up to the client to ensure that write operations to p.extensionMap are // mutually exclusive with other accesses. p *struct { mu sync.Mutex extensionMap map[int32]Extension } } // extensionsWrite returns the extension map, creating it on first use. func (e *XXX_InternalExtensions) extensionsWrite() map[int32]Extension { if e.p == nil { e.p = new(struct { mu sync.Mutex extensionMap map[int32]Extension }) e.p.extensionMap = make(map[int32]Extension) } return e.p.extensionMap } // extensionsRead returns the extensions map for read-only use. It may be nil. // The caller must hold the returned mutex's lock when accessing Elements within the map. func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Locker) { if e.p == nil { return nil, nil } return e.p.extensionMap, &e.p.mu } var extendableProtoType = reflect.TypeOf((*extendableProto)(nil)).Elem() var extendableProtoV1Type = reflect.TypeOf((*extendableProtoV1)(nil)).Elem() // ExtensionDesc represents an extension specification. // Used in generated code from the protocol compiler. type ExtensionDesc struct { ExtendedType Message // nil pointer to the type that is being extended ExtensionType interface{} // nil pointer to the extension type Field int32 // field number Name string // fully-qualified name of extension, for text formatting Tag string // protobuf tag style Filename string // name of the file in which the extension is defined } func (ed *ExtensionDesc) repeated() bool { t := reflect.TypeOf(ed.ExtensionType) return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 } // Extension represents an extension in a message. type Extension struct { // When an extension is stored in a message using SetExtension // only desc and value are set. When the message is marshaled // enc will be set to the encoded form of the message. // // When a message is unmarshaled and contains extensions, each // extension will have only enc set. When such an extension is // accessed using GetExtension (or GetExtensions) desc and value // will be set. desc *ExtensionDesc value interface{} enc []byte } // SetRawExtension is for testing only. func SetRawExtension(base Message, id int32, b []byte) { epb, ok := extendable(base) if !ok { return } extmap := epb.extensionsWrite() extmap[id] = Extension{enc: b} } // isExtensionField returns true iff the given field number is in an extension range. func isExtensionField(pb extendableProto, field int32) bool { for _, er := range pb.ExtensionRangeArray() { if er.Start <= field && field <= er.End { return true } } return false } // checkExtensionTypes checks that the given extension is valid for pb. func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error { var pbi interface{} = pb // Check the extended type. if ea, ok := pbi.(extensionAdapter); ok { pbi = ea.extendableProtoV1 } if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b { return errors.New("proto: bad extended type; " + b.String() + " does not extend " + a.String()) } // Check the range. if !isExtensionField(pb, extension.Field) { return errors.New("proto: bad extension number; not in declared ranges") } return nil } // extPropKey is sufficient to uniquely identify an extension. type extPropKey struct { base reflect.Type field int32 } var extProp = struct { sync.RWMutex m map[extPropKey]*Properties }{ m: make(map[extPropKey]*Properties), } func extensionProperties(ed *ExtensionDesc) *Properties { key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field} extProp.RLock() if prop, ok := extProp.m[key]; ok { extProp.RUnlock() return prop } extProp.RUnlock() extProp.Lock() defer extProp.Unlock() // Check again. if prop, ok := extProp.m[key]; ok { return prop } prop := new(Properties) prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil) extProp.m[key] = prop return prop } // encode encodes any unmarshaled (unencoded) extensions in e. func encodeExtensions(e *XXX_InternalExtensions) error { m, mu := e.extensionsRead() if m == nil { return nil // fast path } mu.Lock() defer mu.Unlock() return encodeExtensionsMap(m) } // encode encodes any unmarshaled (unencoded) extensions in e. func encodeExtensionsMap(m map[int32]Extension) error { for k, e := range m { if e.value == nil || e.desc == nil { // Extension is only in its encoded form. continue } // We don't skip extensions that have an encoded form set, // because the extension value may have been mutated after // the last time this function was called. et := reflect.TypeOf(e.desc.ExtensionType) props := extensionProperties(e.desc) p := NewBuffer(nil) // If e.value has type T, the encoder expects a *struct{ X T }. // Pass a *T with a zero field and hope it all works out. x := reflect.New(et) x.Elem().Set(reflect.ValueOf(e.value)) if err := props.enc(p, props, toStructPointer(x)); err != nil { return err } e.enc = p.buf m[k] = e } return nil } func extensionsSize(e *XXX_InternalExtensions) (n int) { m, mu := e.extensionsRead() if m == nil { return 0 } mu.Lock() defer mu.Unlock() return extensionsMapSize(m) } func extensionsMapSize(m map[int32]Extension) (n int) { for _, e := range m { if e.value == nil || e.desc == nil { // Extension is only in its encoded form. n += len(e.enc) continue } // We don't skip extensions that have an encoded form set, // because the extension value may have been mutated after // the last time this function was called. et := reflect.TypeOf(e.desc.ExtensionType) props := extensionProperties(e.desc) // If e.value has type T, the encoder expects a *struct{ X T }. // Pass a *T with a zero field and hope it all works out. x := reflect.New(et) x.Elem().Set(reflect.ValueOf(e.value)) n += props.size(props, toStructPointer(x)) } return } // HasExtension returns whether the given extension is present in pb. func HasExtension(pb Message, extension *ExtensionDesc) bool { // TODO: Check types, field numbers, etc.? epb, ok := extendable(pb) if !ok { return false } extmap, mu := epb.extensionsRead() if extmap == nil { return false } mu.Lock() _, ok = extmap[extension.Field] mu.Unlock() return ok } // ClearExtension removes the given extension from pb. func ClearExtension(pb Message, extension *ExtensionDesc) { epb, ok := extendable(pb) if !ok { return } // TODO: Check types, field numbers, etc.? extmap := epb.extensionsWrite() delete(extmap, extension.Field) } // GetExtension parses and returns the given extension of pb. // If the extension is not present and has no default value it returns ErrMissingExtension. func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) { epb, ok := extendable(pb) if !ok { return nil, errors.New("proto: not an extendable proto") } if err := checkExtensionTypes(epb, extension); err != nil { return nil, err } emap, mu := epb.extensionsRead() if emap == nil { return defaultExtensionValue(extension) } mu.Lock() defer mu.Unlock() e, ok := emap[extension.Field] if !ok { // defaultExtensionValue returns the default value or // ErrMissingExtension if there is no default. return defaultExtensionValue(extension) } if e.value != nil { // Already decoded. Check the descriptor, though. if e.desc != extension { // This shouldn't happen. If it does, it means that // GetExtension was called twice with two different // descriptors with the same field number. return nil, errors.New("proto: descriptor conflict") } return e.value, nil } v, err := decodeExtension(e.enc, extension) if err != nil { return nil, err } // Remember the decoded version and drop the encoded version. // That way it is safe to mutate what we return. e.value = v e.desc = extension e.enc = nil emap[extension.Field] = e return e.value, nil } // defaultExtensionValue returns the default value for extension. // If no default for an extension is defined ErrMissingExtension is returned. func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) { t := reflect.TypeOf(extension.ExtensionType) props := extensionProperties(extension) sf, _, err := fieldDefault(t, props) if err != nil { return nil, err } if sf == nil || sf.value == nil { // There is no default value. return nil, ErrMissingExtension } if t.Kind() != reflect.Ptr { // We do not need to return a Ptr, we can directly return sf.value. return sf.value, nil } // We need to return an interface{} that is a pointer to sf.value. value := reflect.New(t).Elem() value.Set(reflect.New(value.Type().Elem())) if sf.kind == reflect.Int32 { // We may have an int32 or an enum, but the underlying data is int32. // Since we can't set an int32 into a non int32 reflect.value directly // set it as a int32. value.Elem().SetInt(int64(sf.value.(int32))) } else { value.Elem().Set(reflect.ValueOf(sf.value)) } return value.Interface(), nil } // decodeExtension decodes an extension encoded in b. func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) { o := NewBuffer(b) t := reflect.TypeOf(extension.ExtensionType) props := extensionProperties(extension) // t is a pointer to a struct, pointer to basic type or a slice. // Allocate a "field" to store the pointer/slice itself; the // pointer/slice will be stored here. We pass // the address of this field to props.dec. // This passes a zero field and a *t and lets props.dec // interpret it as a *struct{ x t }. value := reflect.New(t).Elem() for { // Discard wire type and field number varint. It isn't needed. if _, err := o.DecodeVarint(); err != nil { return nil, err } if err := props.dec(o, props, toStructPointer(value.Addr())); err != nil { return nil, err } if o.index >= len(o.buf) { break } } return value.Interface(), nil } // GetExtensions returns a slice of the extensions present in pb that are also listed in es. // The returned slice has the same length as es; missing extensions will appear as nil elements. func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) { epb, ok := extendable(pb) if !ok { return nil, errors.New("proto: not an extendable proto") } extensions = make([]interface{}, len(es)) for i, e := range es { extensions[i], err = GetExtension(epb, e) if err == ErrMissingExtension { err = nil } if err != nil { return } } return } // ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order. // For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing // just the Field field, which defines the extension's field number. func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) { epb, ok := extendable(pb) if !ok { return nil, fmt.Errorf("proto: %T is not an extendable proto.Message", pb) } registeredExtensions := RegisteredExtensions(pb) emap, mu := epb.extensionsRead() if emap == nil { return nil, nil } mu.Lock() defer mu.Unlock() extensions := make([]*ExtensionDesc, 0, len(emap)) for extid, e := range emap { desc := e.desc if desc == nil { desc = registeredExtensions[extid] if desc == nil { desc = &ExtensionDesc{Field: extid} } } extensions = append(extensions, desc) } return extensions, nil } // SetExtension sets the specified extension of pb to the specified value. func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error { epb, ok := extendable(pb) if !ok { return errors.New("proto: not an extendable proto") } if err := checkExtensionTypes(epb, extension); err != nil { return err } typ := reflect.TypeOf(extension.ExtensionType) if typ != reflect.TypeOf(value) { return errors.New("proto: bad extension value type") } // nil extension values need to be caught early, because the // encoder can't distinguish an ErrNil due to a nil extension // from an ErrNil due to a missing field. Extensions are // always optional, so the encoder would just swallow the error // and drop all the extensions from the encoded message. if reflect.ValueOf(value).IsNil() { return fmt.Errorf("proto: SetExtension called with nil value of type %T", value) } extmap := epb.extensionsWrite() extmap[extension.Field] = Extension{desc: extension, value: value} return nil } // ClearAllExtensions clears all extensions from pb. func ClearAllExtensions(pb Message) { epb, ok := extendable(pb) if !ok { return } m := epb.extensionsWrite() for k := range m { delete(m, k) } } // A global registry of extensions. // The generated code will register the generated descriptors by calling RegisterExtension. var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc) // RegisterExtension is called from the generated code. func RegisterExtension(desc *ExtensionDesc) { st := reflect.TypeOf(desc.ExtendedType).Elem() m := extensionMaps[st] if m == nil { m = make(map[int32]*ExtensionDesc) extensionMaps[st] = m } if _, ok := m[desc.Field]; ok { panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field))) } m[desc.Field] = desc } // RegisteredExtensions returns a map of the registered extensions of a // protocol buffer struct, indexed by the extension number. // The argument pb should be a nil pointer to the struct type. func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc { return extensionMaps[reflect.TypeOf(pb).Elem()] } lib.go000066400000000000000000000572451324746544700332650ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/github.com/golang/protobuf/proto// Go support for Protocol Buffers - Google's data interchange format // // Copyright 2010 The Go Authors. All rights reserved. // https://github.com/golang/protobuf // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. /* Package proto converts data structures to and from the wire format of protocol buffers. It works in concert with the Go source code generated for .proto files by the protocol compiler. A summary of the properties of the protocol buffer interface for a protocol buffer variable v: - Names are turned from camel_case to CamelCase for export. - There are no methods on v to set fields; just treat them as structure fields. - There are getters that return a field's value if set, and return the field's default value if unset. The getters work even if the receiver is a nil message. - The zero value for a struct is its correct initialization state. All desired fields must be set before marshaling. - A Reset() method will restore a protobuf struct to its zero state. - Non-repeated fields are pointers to the values; nil means unset. That is, optional or required field int32 f becomes F *int32. - Repeated fields are slices. - Helper functions are available to aid the setting of fields. msg.Foo = proto.String("hello") // set field - Constants are defined to hold the default values of all fields that have them. They have the form Default_StructName_FieldName. Because the getter methods handle defaulted values, direct use of these constants should be rare. - Enums are given type names and maps from names to values. Enum values are prefixed by the enclosing message's name, or by the enum's type name if it is a top-level enum. Enum types have a String method, and a Enum method to assist in message construction. - Nested messages, groups and enums have type names prefixed with the name of the surrounding message type. - Extensions are given descriptor names that start with E_, followed by an underscore-delimited list of the nested messages that contain it (if any) followed by the CamelCased name of the extension field itself. HasExtension, ClearExtension, GetExtension and SetExtension are functions for manipulating extensions. - Oneof field sets are given a single field in their message, with distinguished wrapper types for each possible field value. - Marshal and Unmarshal are functions to encode and decode the wire format. When the .proto file specifies `syntax="proto3"`, there are some differences: - Non-repeated fields of non-message type are values instead of pointers. - Getters are only generated for message and oneof fields. - Enum types do not get an Enum method. The simplest way to describe this is to see an example. Given file test.proto, containing package example; enum FOO { X = 17; } message Test { required string label = 1; optional int32 type = 2 [default=77]; repeated int64 reps = 3; optional group OptionalGroup = 4 { required string RequiredField = 5; } oneof union { int32 number = 6; string name = 7; } } The resulting file, test.pb.go, is: package example import proto "github.com/golang/protobuf/proto" import math "math" type FOO int32 const ( FOO_X FOO = 17 ) var FOO_name = map[int32]string{ 17: "X", } var FOO_value = map[string]int32{ "X": 17, } func (x FOO) Enum() *FOO { p := new(FOO) *p = x return p } func (x FOO) String() string { return proto.EnumName(FOO_name, int32(x)) } func (x *FOO) UnmarshalJSON(data []byte) error { value, err := proto.UnmarshalJSONEnum(FOO_value, data) if err != nil { return err } *x = FOO(value) return nil } type Test struct { Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"` Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"` Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"` Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"` // Types that are valid to be assigned to Union: // *Test_Number // *Test_Name Union isTest_Union `protobuf_oneof:"union"` XXX_unrecognized []byte `json:"-"` } func (m *Test) Reset() { *m = Test{} } func (m *Test) String() string { return proto.CompactTextString(m) } func (*Test) ProtoMessage() {} type isTest_Union interface { isTest_Union() } type Test_Number struct { Number int32 `protobuf:"varint,6,opt,name=number"` } type Test_Name struct { Name string `protobuf:"bytes,7,opt,name=name"` } func (*Test_Number) isTest_Union() {} func (*Test_Name) isTest_Union() {} func (m *Test) GetUnion() isTest_Union { if m != nil { return m.Union } return nil } const Default_Test_Type int32 = 77 func (m *Test) GetLabel() string { if m != nil && m.Label != nil { return *m.Label } return "" } func (m *Test) GetType() int32 { if m != nil && m.Type != nil { return *m.Type } return Default_Test_Type } func (m *Test) GetOptionalgroup() *Test_OptionalGroup { if m != nil { return m.Optionalgroup } return nil } type Test_OptionalGroup struct { RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"` } func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} } func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) } func (m *Test_OptionalGroup) GetRequiredField() string { if m != nil && m.RequiredField != nil { return *m.RequiredField } return "" } func (m *Test) GetNumber() int32 { if x, ok := m.GetUnion().(*Test_Number); ok { return x.Number } return 0 } func (m *Test) GetName() string { if x, ok := m.GetUnion().(*Test_Name); ok { return x.Name } return "" } func init() { proto.RegisterEnum("example.FOO", FOO_name, FOO_value) } To create and play with a Test object: package main import ( "log" "github.com/golang/protobuf/proto" pb "./example.pb" ) func main() { test := &pb.Test{ Label: proto.String("hello"), Type: proto.Int32(17), Reps: []int64{1, 2, 3}, Optionalgroup: &pb.Test_OptionalGroup{ RequiredField: proto.String("good bye"), }, Union: &pb.Test_Name{"fred"}, } data, err := proto.Marshal(test) if err != nil { log.Fatal("marshaling error: ", err) } newTest := &pb.Test{} err = proto.Unmarshal(data, newTest) if err != nil { log.Fatal("unmarshaling error: ", err) } // Now test and newTest contain the same data. if test.GetLabel() != newTest.GetLabel() { log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel()) } // Use a type switch to determine which oneof was set. switch u := test.Union.(type) { case *pb.Test_Number: // u.Number contains the number. case *pb.Test_Name: // u.Name contains the string. } // etc. } */ package proto import ( "encoding/json" "fmt" "log" "reflect" "sort" "strconv" "sync" ) // Message is implemented by generated protocol buffer messages. type Message interface { Reset() String() string ProtoMessage() } // Stats records allocation details about the protocol buffer encoders // and decoders. Useful for tuning the library itself. type Stats struct { Emalloc uint64 // mallocs in encode Dmalloc uint64 // mallocs in decode Encode uint64 // number of encodes Decode uint64 // number of decodes Chit uint64 // number of cache hits Cmiss uint64 // number of cache misses Size uint64 // number of sizes } // Set to true to enable stats collection. const collectStats = false var stats Stats // GetStats returns a copy of the global Stats structure. func GetStats() Stats { return stats } // A Buffer is a buffer manager for marshaling and unmarshaling // protocol buffers. It may be reused between invocations to // reduce memory usage. It is not necessary to use a Buffer; // the global functions Marshal and Unmarshal create a // temporary Buffer and are fine for most applications. type Buffer struct { buf []byte // encode/decode byte stream index int // read point // pools of basic types to amortize allocation. bools []bool uint32s []uint32 uint64s []uint64 // extra pools, only used with pointer_reflect.go int32s []int32 int64s []int64 float32s []float32 float64s []float64 } // NewBuffer allocates a new Buffer and initializes its internal data to // the contents of the argument slice. func NewBuffer(e []byte) *Buffer { return &Buffer{buf: e} } // Reset resets the Buffer, ready for marshaling a new protocol buffer. func (p *Buffer) Reset() { p.buf = p.buf[0:0] // for reading/writing p.index = 0 // for reading } // SetBuf replaces the internal buffer with the slice, // ready for unmarshaling the contents of the slice. func (p *Buffer) SetBuf(s []byte) { p.buf = s p.index = 0 } // Bytes returns the contents of the Buffer. func (p *Buffer) Bytes() []byte { return p.buf } /* * Helper routines for simplifying the creation of optional fields of basic type. */ // Bool is a helper routine that allocates a new bool value // to store v and returns a pointer to it. func Bool(v bool) *bool { return &v } // Int32 is a helper routine that allocates a new int32 value // to store v and returns a pointer to it. func Int32(v int32) *int32 { return &v } // Int is a helper routine that allocates a new int32 value // to store v and returns a pointer to it, but unlike Int32 // its argument value is an int. func Int(v int) *int32 { p := new(int32) *p = int32(v) return p } // Int64 is a helper routine that allocates a new int64 value // to store v and returns a pointer to it. func Int64(v int64) *int64 { return &v } // Float32 is a helper routine that allocates a new float32 value // to store v and returns a pointer to it. func Float32(v float32) *float32 { return &v } // Float64 is a helper routine that allocates a new float64 value // to store v and returns a pointer to it. func Float64(v float64) *float64 { return &v } // Uint32 is a helper routine that allocates a new uint32 value // to store v and returns a pointer to it. func Uint32(v uint32) *uint32 { return &v } // Uint64 is a helper routine that allocates a new uint64 value // to store v and returns a pointer to it. func Uint64(v uint64) *uint64 { return &v } // String is a helper routine that allocates a new string value // to store v and returns a pointer to it. func String(v string) *string { return &v } // EnumName is a helper function to simplify printing protocol buffer enums // by name. Given an enum map and a value, it returns a useful string. func EnumName(m map[int32]string, v int32) string { s, ok := m[v] if ok { return s } return strconv.Itoa(int(v)) } // UnmarshalJSONEnum is a helper function to simplify recovering enum int values // from their JSON-encoded representation. Given a map from the enum's symbolic // names to its int values, and a byte buffer containing the JSON-encoded // value, it returns an int32 that can be cast to the enum type by the caller. // // The function can deal with both JSON representations, numeric and symbolic. func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) { if data[0] == '"' { // New style: enums are strings. var repr string if err := json.Unmarshal(data, &repr); err != nil { return -1, err } val, ok := m[repr] if !ok { return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr) } return val, nil } // Old style: enums are ints. var val int32 if err := json.Unmarshal(data, &val); err != nil { return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName) } return val, nil } // DebugPrint dumps the encoded data in b in a debugging format with a header // including the string s. Used in testing but made available for general debugging. func (p *Buffer) DebugPrint(s string, b []byte) { var u uint64 obuf := p.buf index := p.index p.buf = b p.index = 0 depth := 0 fmt.Printf("\n--- %s ---\n", s) out: for { for i := 0; i < depth; i++ { fmt.Print(" ") } index := p.index if index == len(p.buf) { break } op, err := p.DecodeVarint() if err != nil { fmt.Printf("%3d: fetching op err %v\n", index, err) break out } tag := op >> 3 wire := op & 7 switch wire { default: fmt.Printf("%3d: t=%3d unknown wire=%d\n", index, tag, wire) break out case WireBytes: var r []byte r, err = p.DecodeRawBytes(false) if err != nil { break out } fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r)) if len(r) <= 6 { for i := 0; i < len(r); i++ { fmt.Printf(" %.2x", r[i]) } } else { for i := 0; i < 3; i++ { fmt.Printf(" %.2x", r[i]) } fmt.Printf(" ..") for i := len(r) - 3; i < len(r); i++ { fmt.Printf(" %.2x", r[i]) } } fmt.Printf("\n") case WireFixed32: u, err = p.DecodeFixed32() if err != nil { fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err) break out } fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u) case WireFixed64: u, err = p.DecodeFixed64() if err != nil { fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err) break out } fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u) case WireVarint: u, err = p.DecodeVarint() if err != nil { fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err) break out } fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u) case WireStartGroup: fmt.Printf("%3d: t=%3d start\n", index, tag) depth++ case WireEndGroup: depth-- fmt.Printf("%3d: t=%3d end\n", index, tag) } } if depth != 0 { fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth) } fmt.Printf("\n") p.buf = obuf p.index = index } // SetDefaults sets unset protocol buffer fields to their default values. // It only modifies fields that are both unset and have defined defaults. // It recursively sets default values in any non-nil sub-messages. func SetDefaults(pb Message) { setDefaults(reflect.ValueOf(pb), true, false) } // v is a pointer to a struct. func setDefaults(v reflect.Value, recur, zeros bool) { v = v.Elem() defaultMu.RLock() dm, ok := defaults[v.Type()] defaultMu.RUnlock() if !ok { dm = buildDefaultMessage(v.Type()) defaultMu.Lock() defaults[v.Type()] = dm defaultMu.Unlock() } for _, sf := range dm.scalars { f := v.Field(sf.index) if !f.IsNil() { // field already set continue } dv := sf.value if dv == nil && !zeros { // no explicit default, and don't want to set zeros continue } fptr := f.Addr().Interface() // **T // TODO: Consider batching the allocations we do here. switch sf.kind { case reflect.Bool: b := new(bool) if dv != nil { *b = dv.(bool) } *(fptr.(**bool)) = b case reflect.Float32: f := new(float32) if dv != nil { *f = dv.(float32) } *(fptr.(**float32)) = f case reflect.Float64: f := new(float64) if dv != nil { *f = dv.(float64) } *(fptr.(**float64)) = f case reflect.Int32: // might be an enum if ft := f.Type(); ft != int32PtrType { // enum f.Set(reflect.New(ft.Elem())) if dv != nil { f.Elem().SetInt(int64(dv.(int32))) } } else { // int32 field i := new(int32) if dv != nil { *i = dv.(int32) } *(fptr.(**int32)) = i } case reflect.Int64: i := new(int64) if dv != nil { *i = dv.(int64) } *(fptr.(**int64)) = i case reflect.String: s := new(string) if dv != nil { *s = dv.(string) } *(fptr.(**string)) = s case reflect.Uint8: // exceptional case: []byte var b []byte if dv != nil { db := dv.([]byte) b = make([]byte, len(db)) copy(b, db) } else { b = []byte{} } *(fptr.(*[]byte)) = b case reflect.Uint32: u := new(uint32) if dv != nil { *u = dv.(uint32) } *(fptr.(**uint32)) = u case reflect.Uint64: u := new(uint64) if dv != nil { *u = dv.(uint64) } *(fptr.(**uint64)) = u default: log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind) } } for _, ni := range dm.nested { f := v.Field(ni) // f is *T or []*T or map[T]*T switch f.Kind() { case reflect.Ptr: if f.IsNil() { continue } setDefaults(f, recur, zeros) case reflect.Slice: for i := 0; i < f.Len(); i++ { e := f.Index(i) if e.IsNil() { continue } setDefaults(e, recur, zeros) } case reflect.Map: for _, k := range f.MapKeys() { e := f.MapIndex(k) if e.IsNil() { continue } setDefaults(e, recur, zeros) } } } } var ( // defaults maps a protocol buffer struct type to a slice of the fields, // with its scalar fields set to their proto-declared non-zero default values. defaultMu sync.RWMutex defaults = make(map[reflect.Type]defaultMessage) int32PtrType = reflect.TypeOf((*int32)(nil)) ) // defaultMessage represents information about the default values of a message. type defaultMessage struct { scalars []scalarField nested []int // struct field index of nested messages } type scalarField struct { index int // struct field index kind reflect.Kind // element type (the T in *T or []T) value interface{} // the proto-declared default value, or nil } // t is a struct type. func buildDefaultMessage(t reflect.Type) (dm defaultMessage) { sprop := GetProperties(t) for _, prop := range sprop.Prop { fi, ok := sprop.decoderTags.get(prop.Tag) if !ok { // XXX_unrecognized continue } ft := t.Field(fi).Type sf, nested, err := fieldDefault(ft, prop) switch { case err != nil: log.Print(err) case nested: dm.nested = append(dm.nested, fi) case sf != nil: sf.index = fi dm.scalars = append(dm.scalars, *sf) } } return dm } // fieldDefault returns the scalarField for field type ft. // sf will be nil if the field can not have a default. // nestedMessage will be true if this is a nested message. // Note that sf.index is not set on return. func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) { var canHaveDefault bool switch ft.Kind() { case reflect.Ptr: if ft.Elem().Kind() == reflect.Struct { nestedMessage = true } else { canHaveDefault = true // proto2 scalar field } case reflect.Slice: switch ft.Elem().Kind() { case reflect.Ptr: nestedMessage = true // repeated message case reflect.Uint8: canHaveDefault = true // bytes field } case reflect.Map: if ft.Elem().Kind() == reflect.Ptr { nestedMessage = true // map with message values } } if !canHaveDefault { if nestedMessage { return nil, true, nil } return nil, false, nil } // We now know that ft is a pointer or slice. sf = &scalarField{kind: ft.Elem().Kind()} // scalar fields without defaults if !prop.HasDefault { return sf, false, nil } // a scalar field: either *T or []byte switch ft.Elem().Kind() { case reflect.Bool: x, err := strconv.ParseBool(prop.Default) if err != nil { return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err) } sf.value = x case reflect.Float32: x, err := strconv.ParseFloat(prop.Default, 32) if err != nil { return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err) } sf.value = float32(x) case reflect.Float64: x, err := strconv.ParseFloat(prop.Default, 64) if err != nil { return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err) } sf.value = x case reflect.Int32: x, err := strconv.ParseInt(prop.Default, 10, 32) if err != nil { return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err) } sf.value = int32(x) case reflect.Int64: x, err := strconv.ParseInt(prop.Default, 10, 64) if err != nil { return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err) } sf.value = x case reflect.String: sf.value = prop.Default case reflect.Uint8: // []byte (not *uint8) sf.value = []byte(prop.Default) case reflect.Uint32: x, err := strconv.ParseUint(prop.Default, 10, 32) if err != nil { return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err) } sf.value = uint32(x) case reflect.Uint64: x, err := strconv.ParseUint(prop.Default, 10, 64) if err != nil { return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err) } sf.value = x default: return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind()) } return sf, false, nil } // Map fields may have key types of non-float scalars, strings and enums. // The easiest way to sort them in some deterministic order is to use fmt. // If this turns out to be inefficient we can always consider other options, // such as doing a Schwartzian transform. func mapKeys(vs []reflect.Value) sort.Interface { s := mapKeySorter{ vs: vs, // default Less function: textual comparison less: func(a, b reflect.Value) bool { return fmt.Sprint(a.Interface()) < fmt.Sprint(b.Interface()) }, } // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps; // numeric keys are sorted numerically. if len(vs) == 0 { return s } switch vs[0].Kind() { case reflect.Int32, reflect.Int64: s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() } case reflect.Uint32, reflect.Uint64: s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() } } return s } type mapKeySorter struct { vs []reflect.Value less func(a, b reflect.Value) bool } func (s mapKeySorter) Len() int { return len(s.vs) } func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] } func (s mapKeySorter) Less(i, j int) bool { return s.less(s.vs[i], s.vs[j]) } // isProto3Zero reports whether v is a zero proto3 value. func isProto3Zero(v reflect.Value) bool { switch v.Kind() { case reflect.Bool: return !v.Bool() case reflect.Int32, reflect.Int64: return v.Int() == 0 case reflect.Uint32, reflect.Uint64: return v.Uint() == 0 case reflect.Float32, reflect.Float64: return v.Float() == 0 case reflect.String: return v.String() == "" } return false } // ProtoPackageIsVersion2 is referenced from generated protocol buffer files // to assert that that code is compatible with this version of the proto package. const ProtoPackageIsVersion2 = true // ProtoPackageIsVersion1 is referenced from generated protocol buffer files // to assert that that code is compatible with this version of the proto package. const ProtoPackageIsVersion1 = true message_set.go000066400000000000000000000216111324746544700350020ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/github.com/golang/protobuf/proto// Go support for Protocol Buffers - Google's data interchange format // // Copyright 2010 The Go Authors. All rights reserved. // https://github.com/golang/protobuf // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. package proto /* * Support for message sets. */ import ( "bytes" "encoding/json" "errors" "fmt" "reflect" "sort" ) // errNoMessageTypeID occurs when a protocol buffer does not have a message type ID. // A message type ID is required for storing a protocol buffer in a message set. var errNoMessageTypeID = errors.New("proto does not have a message type ID") // The first two types (_MessageSet_Item and messageSet) // model what the protocol compiler produces for the following protocol message: // message MessageSet { // repeated group Item = 1 { // required int32 type_id = 2; // required string message = 3; // }; // } // That is the MessageSet wire format. We can't use a proto to generate these // because that would introduce a circular dependency between it and this package. type _MessageSet_Item struct { TypeId *int32 `protobuf:"varint,2,req,name=type_id"` Message []byte `protobuf:"bytes,3,req,name=message"` } type messageSet struct { Item []*_MessageSet_Item `protobuf:"group,1,rep"` XXX_unrecognized []byte // TODO: caching? } // Make sure messageSet is a Message. var _ Message = (*messageSet)(nil) // messageTypeIder is an interface satisfied by a protocol buffer type // that may be stored in a MessageSet. type messageTypeIder interface { MessageTypeId() int32 } func (ms *messageSet) find(pb Message) *_MessageSet_Item { mti, ok := pb.(messageTypeIder) if !ok { return nil } id := mti.MessageTypeId() for _, item := range ms.Item { if *item.TypeId == id { return item } } return nil } func (ms *messageSet) Has(pb Message) bool { if ms.find(pb) != nil { return true } return false } func (ms *messageSet) Unmarshal(pb Message) error { if item := ms.find(pb); item != nil { return Unmarshal(item.Message, pb) } if _, ok := pb.(messageTypeIder); !ok { return errNoMessageTypeID } return nil // TODO: return error instead? } func (ms *messageSet) Marshal(pb Message) error { msg, err := Marshal(pb) if err != nil { return err } if item := ms.find(pb); item != nil { // reuse existing item item.Message = msg return nil } mti, ok := pb.(messageTypeIder) if !ok { return errNoMessageTypeID } mtid := mti.MessageTypeId() ms.Item = append(ms.Item, &_MessageSet_Item{ TypeId: &mtid, Message: msg, }) return nil } func (ms *messageSet) Reset() { *ms = messageSet{} } func (ms *messageSet) String() string { return CompactTextString(ms) } func (*messageSet) ProtoMessage() {} // Support for the message_set_wire_format message option. func skipVarint(buf []byte) []byte { i := 0 for ; buf[i]&0x80 != 0; i++ { } return buf[i+1:] } // MarshalMessageSet encodes the extension map represented by m in the message set wire format. // It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option. func MarshalMessageSet(exts interface{}) ([]byte, error) { var m map[int32]Extension switch exts := exts.(type) { case *XXX_InternalExtensions: if err := encodeExtensions(exts); err != nil { return nil, err } m, _ = exts.extensionsRead() case map[int32]Extension: if err := encodeExtensionsMap(exts); err != nil { return nil, err } m = exts default: return nil, errors.New("proto: not an extension map") } // Sort extension IDs to provide a deterministic encoding. // See also enc_map in encode.go. ids := make([]int, 0, len(m)) for id := range m { ids = append(ids, int(id)) } sort.Ints(ids) ms := &messageSet{Item: make([]*_MessageSet_Item, 0, len(m))} for _, id := range ids { e := m[int32(id)] // Remove the wire type and field number varint, as well as the length varint. msg := skipVarint(skipVarint(e.enc)) ms.Item = append(ms.Item, &_MessageSet_Item{ TypeId: Int32(int32(id)), Message: msg, }) } return Marshal(ms) } // UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. // It is called by generated Unmarshal methods on protocol buffer messages with the message_set_wire_format option. func UnmarshalMessageSet(buf []byte, exts interface{}) error { var m map[int32]Extension switch exts := exts.(type) { case *XXX_InternalExtensions: m = exts.extensionsWrite() case map[int32]Extension: m = exts default: return errors.New("proto: not an extension map") } ms := new(messageSet) if err := Unmarshal(buf, ms); err != nil { return err } for _, item := range ms.Item { id := *item.TypeId msg := item.Message // Restore wire type and field number varint, plus length varint. // Be careful to preserve duplicate items. b := EncodeVarint(uint64(id)<<3 | WireBytes) if ext, ok := m[id]; ok { // Existing data; rip off the tag and length varint // so we join the new data correctly. // We can assume that ext.enc is set because we are unmarshaling. o := ext.enc[len(b):] // skip wire type and field number _, n := DecodeVarint(o) // calculate length of length varint o = o[n:] // skip length varint msg = append(o, msg...) // join old data and new data } b = append(b, EncodeVarint(uint64(len(msg)))...) b = append(b, msg...) m[id] = Extension{enc: b} } return nil } // MarshalMessageSetJSON encodes the extension map represented by m in JSON format. // It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option. func MarshalMessageSetJSON(exts interface{}) ([]byte, error) { var m map[int32]Extension switch exts := exts.(type) { case *XXX_InternalExtensions: m, _ = exts.extensionsRead() case map[int32]Extension: m = exts default: return nil, errors.New("proto: not an extension map") } var b bytes.Buffer b.WriteByte('{') // Process the map in key order for deterministic output. ids := make([]int32, 0, len(m)) for id := range m { ids = append(ids, id) } sort.Sort(int32Slice(ids)) // int32Slice defined in text.go for i, id := range ids { ext := m[id] if i > 0 { b.WriteByte(',') } msd, ok := messageSetMap[id] if !ok { // Unknown type; we can't render it, so skip it. continue } fmt.Fprintf(&b, `"[%s]":`, msd.name) x := ext.value if x == nil { x = reflect.New(msd.t.Elem()).Interface() if err := Unmarshal(ext.enc, x.(Message)); err != nil { return nil, err } } d, err := json.Marshal(x) if err != nil { return nil, err } b.Write(d) } b.WriteByte('}') return b.Bytes(), nil } // UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format. // It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option. func UnmarshalMessageSetJSON(buf []byte, exts interface{}) error { // Common-case fast path. if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) { return nil } // This is fairly tricky, and it's not clear that it is needed. return errors.New("TODO: UnmarshalMessageSetJSON not yet implemented") } // A global registry of types that can be used in a MessageSet. var messageSetMap = make(map[int32]messageSetDesc) type messageSetDesc struct { t reflect.Type // pointer to struct name string } // RegisterMessageSetType is called from the generated code. func RegisterMessageSetType(m Message, fieldNum int32, name string) { messageSetMap[fieldNum] = messageSetDesc{ t: reflect.TypeOf(m), name: name, } } pointer_reflect.go000066400000000000000000000331101324746544700356640ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/github.com/golang/protobuf/proto// Go support for Protocol Buffers - Google's data interchange format // // Copyright 2012 The Go Authors. All rights reserved. // https://github.com/golang/protobuf // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // +build appengine js // This file contains an implementation of proto field accesses using package reflect. // It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can // be used on App Engine. package proto import ( "math" "reflect" ) // A structPointer is a pointer to a struct. type structPointer struct { v reflect.Value } // toStructPointer returns a structPointer equivalent to the given reflect value. // The reflect value must itself be a pointer to a struct. func toStructPointer(v reflect.Value) structPointer { return structPointer{v} } // IsNil reports whether p is nil. func structPointer_IsNil(p structPointer) bool { return p.v.IsNil() } // Interface returns the struct pointer as an interface value. func structPointer_Interface(p structPointer, _ reflect.Type) interface{} { return p.v.Interface() } // A field identifies a field in a struct, accessible from a structPointer. // In this implementation, a field is identified by the sequence of field indices // passed to reflect's FieldByIndex. type field []int // toField returns a field equivalent to the given reflect field. func toField(f *reflect.StructField) field { return f.Index } // invalidField is an invalid field identifier. var invalidField = field(nil) // IsValid reports whether the field identifier is valid. func (f field) IsValid() bool { return f != nil } // field returns the given field in the struct as a reflect value. func structPointer_field(p structPointer, f field) reflect.Value { // Special case: an extension map entry with a value of type T // passes a *T to the struct-handling code with a zero field, // expecting that it will be treated as equivalent to *struct{ X T }, // which has the same memory layout. We have to handle that case // specially, because reflect will panic if we call FieldByIndex on a // non-struct. if f == nil { return p.v.Elem() } return p.v.Elem().FieldByIndex(f) } // ifield returns the given field in the struct as an interface value. func structPointer_ifield(p structPointer, f field) interface{} { return structPointer_field(p, f).Addr().Interface() } // Bytes returns the address of a []byte field in the struct. func structPointer_Bytes(p structPointer, f field) *[]byte { return structPointer_ifield(p, f).(*[]byte) } // BytesSlice returns the address of a [][]byte field in the struct. func structPointer_BytesSlice(p structPointer, f field) *[][]byte { return structPointer_ifield(p, f).(*[][]byte) } // Bool returns the address of a *bool field in the struct. func structPointer_Bool(p structPointer, f field) **bool { return structPointer_ifield(p, f).(**bool) } // BoolVal returns the address of a bool field in the struct. func structPointer_BoolVal(p structPointer, f field) *bool { return structPointer_ifield(p, f).(*bool) } // BoolSlice returns the address of a []bool field in the struct. func structPointer_BoolSlice(p structPointer, f field) *[]bool { return structPointer_ifield(p, f).(*[]bool) } // String returns the address of a *string field in the struct. func structPointer_String(p structPointer, f field) **string { return structPointer_ifield(p, f).(**string) } // StringVal returns the address of a string field in the struct. func structPointer_StringVal(p structPointer, f field) *string { return structPointer_ifield(p, f).(*string) } // StringSlice returns the address of a []string field in the struct. func structPointer_StringSlice(p structPointer, f field) *[]string { return structPointer_ifield(p, f).(*[]string) } // Extensions returns the address of an extension map field in the struct. func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions { return structPointer_ifield(p, f).(*XXX_InternalExtensions) } // ExtMap returns the address of an extension map field in the struct. func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension { return structPointer_ifield(p, f).(*map[int32]Extension) } // NewAt returns the reflect.Value for a pointer to a field in the struct. func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value { return structPointer_field(p, f).Addr() } // SetStructPointer writes a *struct field in the struct. func structPointer_SetStructPointer(p structPointer, f field, q structPointer) { structPointer_field(p, f).Set(q.v) } // GetStructPointer reads a *struct field in the struct. func structPointer_GetStructPointer(p structPointer, f field) structPointer { return structPointer{structPointer_field(p, f)} } // StructPointerSlice the address of a []*struct field in the struct. func structPointer_StructPointerSlice(p structPointer, f field) structPointerSlice { return structPointerSlice{structPointer_field(p, f)} } // A structPointerSlice represents the address of a slice of pointers to structs // (themselves messages or groups). That is, v.Type() is *[]*struct{...}. type structPointerSlice struct { v reflect.Value } func (p structPointerSlice) Len() int { return p.v.Len() } func (p structPointerSlice) Index(i int) structPointer { return structPointer{p.v.Index(i)} } func (p structPointerSlice) Append(q structPointer) { p.v.Set(reflect.Append(p.v, q.v)) } var ( int32Type = reflect.TypeOf(int32(0)) uint32Type = reflect.TypeOf(uint32(0)) float32Type = reflect.TypeOf(float32(0)) int64Type = reflect.TypeOf(int64(0)) uint64Type = reflect.TypeOf(uint64(0)) float64Type = reflect.TypeOf(float64(0)) ) // A word32 represents a field of type *int32, *uint32, *float32, or *enum. // That is, v.Type() is *int32, *uint32, *float32, or *enum and v is assignable. type word32 struct { v reflect.Value } // IsNil reports whether p is nil. func word32_IsNil(p word32) bool { return p.v.IsNil() } // Set sets p to point at a newly allocated word with bits set to x. func word32_Set(p word32, o *Buffer, x uint32) { t := p.v.Type().Elem() switch t { case int32Type: if len(o.int32s) == 0 { o.int32s = make([]int32, uint32PoolSize) } o.int32s[0] = int32(x) p.v.Set(reflect.ValueOf(&o.int32s[0])) o.int32s = o.int32s[1:] return case uint32Type: if len(o.uint32s) == 0 { o.uint32s = make([]uint32, uint32PoolSize) } o.uint32s[0] = x p.v.Set(reflect.ValueOf(&o.uint32s[0])) o.uint32s = o.uint32s[1:] return case float32Type: if len(o.float32s) == 0 { o.float32s = make([]float32, uint32PoolSize) } o.float32s[0] = math.Float32frombits(x) p.v.Set(reflect.ValueOf(&o.float32s[0])) o.float32s = o.float32s[1:] return } // must be enum p.v.Set(reflect.New(t)) p.v.Elem().SetInt(int64(int32(x))) } // Get gets the bits pointed at by p, as a uint32. func word32_Get(p word32) uint32 { elem := p.v.Elem() switch elem.Kind() { case reflect.Int32: return uint32(elem.Int()) case reflect.Uint32: return uint32(elem.Uint()) case reflect.Float32: return math.Float32bits(float32(elem.Float())) } panic("unreachable") } // Word32 returns a reference to a *int32, *uint32, *float32, or *enum field in the struct. func structPointer_Word32(p structPointer, f field) word32 { return word32{structPointer_field(p, f)} } // A word32Val represents a field of type int32, uint32, float32, or enum. // That is, v.Type() is int32, uint32, float32, or enum and v is assignable. type word32Val struct { v reflect.Value } // Set sets *p to x. func word32Val_Set(p word32Val, x uint32) { switch p.v.Type() { case int32Type: p.v.SetInt(int64(x)) return case uint32Type: p.v.SetUint(uint64(x)) return case float32Type: p.v.SetFloat(float64(math.Float32frombits(x))) return } // must be enum p.v.SetInt(int64(int32(x))) } // Get gets the bits pointed at by p, as a uint32. func word32Val_Get(p word32Val) uint32 { elem := p.v switch elem.Kind() { case reflect.Int32: return uint32(elem.Int()) case reflect.Uint32: return uint32(elem.Uint()) case reflect.Float32: return math.Float32bits(float32(elem.Float())) } panic("unreachable") } // Word32Val returns a reference to a int32, uint32, float32, or enum field in the struct. func structPointer_Word32Val(p structPointer, f field) word32Val { return word32Val{structPointer_field(p, f)} } // A word32Slice is a slice of 32-bit values. // That is, v.Type() is []int32, []uint32, []float32, or []enum. type word32Slice struct { v reflect.Value } func (p word32Slice) Append(x uint32) { n, m := p.v.Len(), p.v.Cap() if n < m { p.v.SetLen(n + 1) } else { t := p.v.Type().Elem() p.v.Set(reflect.Append(p.v, reflect.Zero(t))) } elem := p.v.Index(n) switch elem.Kind() { case reflect.Int32: elem.SetInt(int64(int32(x))) case reflect.Uint32: elem.SetUint(uint64(x)) case reflect.Float32: elem.SetFloat(float64(math.Float32frombits(x))) } } func (p word32Slice) Len() int { return p.v.Len() } func (p word32Slice) Index(i int) uint32 { elem := p.v.Index(i) switch elem.Kind() { case reflect.Int32: return uint32(elem.Int()) case reflect.Uint32: return uint32(elem.Uint()) case reflect.Float32: return math.Float32bits(float32(elem.Float())) } panic("unreachable") } // Word32Slice returns a reference to a []int32, []uint32, []float32, or []enum field in the struct. func structPointer_Word32Slice(p structPointer, f field) word32Slice { return word32Slice{structPointer_field(p, f)} } // word64 is like word32 but for 64-bit values. type word64 struct { v reflect.Value } func word64_Set(p word64, o *Buffer, x uint64) { t := p.v.Type().Elem() switch t { case int64Type: if len(o.int64s) == 0 { o.int64s = make([]int64, uint64PoolSize) } o.int64s[0] = int64(x) p.v.Set(reflect.ValueOf(&o.int64s[0])) o.int64s = o.int64s[1:] return case uint64Type: if len(o.uint64s) == 0 { o.uint64s = make([]uint64, uint64PoolSize) } o.uint64s[0] = x p.v.Set(reflect.ValueOf(&o.uint64s[0])) o.uint64s = o.uint64s[1:] return case float64Type: if len(o.float64s) == 0 { o.float64s = make([]float64, uint64PoolSize) } o.float64s[0] = math.Float64frombits(x) p.v.Set(reflect.ValueOf(&o.float64s[0])) o.float64s = o.float64s[1:] return } panic("unreachable") } func word64_IsNil(p word64) bool { return p.v.IsNil() } func word64_Get(p word64) uint64 { elem := p.v.Elem() switch elem.Kind() { case reflect.Int64: return uint64(elem.Int()) case reflect.Uint64: return elem.Uint() case reflect.Float64: return math.Float64bits(elem.Float()) } panic("unreachable") } func structPointer_Word64(p structPointer, f field) word64 { return word64{structPointer_field(p, f)} } // word64Val is like word32Val but for 64-bit values. type word64Val struct { v reflect.Value } func word64Val_Set(p word64Val, o *Buffer, x uint64) { switch p.v.Type() { case int64Type: p.v.SetInt(int64(x)) return case uint64Type: p.v.SetUint(x) return case float64Type: p.v.SetFloat(math.Float64frombits(x)) return } panic("unreachable") } func word64Val_Get(p word64Val) uint64 { elem := p.v switch elem.Kind() { case reflect.Int64: return uint64(elem.Int()) case reflect.Uint64: return elem.Uint() case reflect.Float64: return math.Float64bits(elem.Float()) } panic("unreachable") } func structPointer_Word64Val(p structPointer, f field) word64Val { return word64Val{structPointer_field(p, f)} } type word64Slice struct { v reflect.Value } func (p word64Slice) Append(x uint64) { n, m := p.v.Len(), p.v.Cap() if n < m { p.v.SetLen(n + 1) } else { t := p.v.Type().Elem() p.v.Set(reflect.Append(p.v, reflect.Zero(t))) } elem := p.v.Index(n) switch elem.Kind() { case reflect.Int64: elem.SetInt(int64(int64(x))) case reflect.Uint64: elem.SetUint(uint64(x)) case reflect.Float64: elem.SetFloat(float64(math.Float64frombits(x))) } } func (p word64Slice) Len() int { return p.v.Len() } func (p word64Slice) Index(i int) uint64 { elem := p.v.Index(i) switch elem.Kind() { case reflect.Int64: return uint64(elem.Int()) case reflect.Uint64: return uint64(elem.Uint()) case reflect.Float64: return math.Float64bits(float64(elem.Float())) } panic("unreachable") } func structPointer_Word64Slice(p structPointer, f field) word64Slice { return word64Slice{structPointer_field(p, f)} } pointer_unsafe.go000066400000000000000000000224251324746544700355300ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/github.com/golang/protobuf/proto// Go support for Protocol Buffers - Google's data interchange format // // Copyright 2012 The Go Authors. All rights reserved. // https://github.com/golang/protobuf // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // +build !appengine,!js // This file contains the implementation of the proto field accesses using package unsafe. package proto import ( "reflect" "unsafe" ) // NOTE: These type_Foo functions would more idiomatically be methods, // but Go does not allow methods on pointer types, and we must preserve // some pointer type for the garbage collector. We use these // funcs with clunky names as our poor approximation to methods. // // An alternative would be // type structPointer struct { p unsafe.Pointer } // but that does not registerize as well. // A structPointer is a pointer to a struct. type structPointer unsafe.Pointer // toStructPointer returns a structPointer equivalent to the given reflect value. func toStructPointer(v reflect.Value) structPointer { return structPointer(unsafe.Pointer(v.Pointer())) } // IsNil reports whether p is nil. func structPointer_IsNil(p structPointer) bool { return p == nil } // Interface returns the struct pointer, assumed to have element type t, // as an interface value. func structPointer_Interface(p structPointer, t reflect.Type) interface{} { return reflect.NewAt(t, unsafe.Pointer(p)).Interface() } // A field identifies a field in a struct, accessible from a structPointer. // In this implementation, a field is identified by its byte offset from the start of the struct. type field uintptr // toField returns a field equivalent to the given reflect field. func toField(f *reflect.StructField) field { return field(f.Offset) } // invalidField is an invalid field identifier. const invalidField = ^field(0) // IsValid reports whether the field identifier is valid. func (f field) IsValid() bool { return f != ^field(0) } // Bytes returns the address of a []byte field in the struct. func structPointer_Bytes(p structPointer, f field) *[]byte { return (*[]byte)(unsafe.Pointer(uintptr(p) + uintptr(f))) } // BytesSlice returns the address of a [][]byte field in the struct. func structPointer_BytesSlice(p structPointer, f field) *[][]byte { return (*[][]byte)(unsafe.Pointer(uintptr(p) + uintptr(f))) } // Bool returns the address of a *bool field in the struct. func structPointer_Bool(p structPointer, f field) **bool { return (**bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) } // BoolVal returns the address of a bool field in the struct. func structPointer_BoolVal(p structPointer, f field) *bool { return (*bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) } // BoolSlice returns the address of a []bool field in the struct. func structPointer_BoolSlice(p structPointer, f field) *[]bool { return (*[]bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) } // String returns the address of a *string field in the struct. func structPointer_String(p structPointer, f field) **string { return (**string)(unsafe.Pointer(uintptr(p) + uintptr(f))) } // StringVal returns the address of a string field in the struct. func structPointer_StringVal(p structPointer, f field) *string { return (*string)(unsafe.Pointer(uintptr(p) + uintptr(f))) } // StringSlice returns the address of a []string field in the struct. func structPointer_StringSlice(p structPointer, f field) *[]string { return (*[]string)(unsafe.Pointer(uintptr(p) + uintptr(f))) } // ExtMap returns the address of an extension map field in the struct. func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions { return (*XXX_InternalExtensions)(unsafe.Pointer(uintptr(p) + uintptr(f))) } func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension { return (*map[int32]Extension)(unsafe.Pointer(uintptr(p) + uintptr(f))) } // NewAt returns the reflect.Value for a pointer to a field in the struct. func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value { return reflect.NewAt(typ, unsafe.Pointer(uintptr(p)+uintptr(f))) } // SetStructPointer writes a *struct field in the struct. func structPointer_SetStructPointer(p structPointer, f field, q structPointer) { *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) = q } // GetStructPointer reads a *struct field in the struct. func structPointer_GetStructPointer(p structPointer, f field) structPointer { return *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) } // StructPointerSlice the address of a []*struct field in the struct. func structPointer_StructPointerSlice(p structPointer, f field) *structPointerSlice { return (*structPointerSlice)(unsafe.Pointer(uintptr(p) + uintptr(f))) } // A structPointerSlice represents a slice of pointers to structs (themselves submessages or groups). type structPointerSlice []structPointer func (v *structPointerSlice) Len() int { return len(*v) } func (v *structPointerSlice) Index(i int) structPointer { return (*v)[i] } func (v *structPointerSlice) Append(p structPointer) { *v = append(*v, p) } // A word32 is the address of a "pointer to 32-bit value" field. type word32 **uint32 // IsNil reports whether *v is nil. func word32_IsNil(p word32) bool { return *p == nil } // Set sets *v to point at a newly allocated word set to x. func word32_Set(p word32, o *Buffer, x uint32) { if len(o.uint32s) == 0 { o.uint32s = make([]uint32, uint32PoolSize) } o.uint32s[0] = x *p = &o.uint32s[0] o.uint32s = o.uint32s[1:] } // Get gets the value pointed at by *v. func word32_Get(p word32) uint32 { return **p } // Word32 returns the address of a *int32, *uint32, *float32, or *enum field in the struct. func structPointer_Word32(p structPointer, f field) word32 { return word32((**uint32)(unsafe.Pointer(uintptr(p) + uintptr(f)))) } // A word32Val is the address of a 32-bit value field. type word32Val *uint32 // Set sets *p to x. func word32Val_Set(p word32Val, x uint32) { *p = x } // Get gets the value pointed at by p. func word32Val_Get(p word32Val) uint32 { return *p } // Word32Val returns the address of a *int32, *uint32, *float32, or *enum field in the struct. func structPointer_Word32Val(p structPointer, f field) word32Val { return word32Val((*uint32)(unsafe.Pointer(uintptr(p) + uintptr(f)))) } // A word32Slice is a slice of 32-bit values. type word32Slice []uint32 func (v *word32Slice) Append(x uint32) { *v = append(*v, x) } func (v *word32Slice) Len() int { return len(*v) } func (v *word32Slice) Index(i int) uint32 { return (*v)[i] } // Word32Slice returns the address of a []int32, []uint32, []float32, or []enum field in the struct. func structPointer_Word32Slice(p structPointer, f field) *word32Slice { return (*word32Slice)(unsafe.Pointer(uintptr(p) + uintptr(f))) } // word64 is like word32 but for 64-bit values. type word64 **uint64 func word64_Set(p word64, o *Buffer, x uint64) { if len(o.uint64s) == 0 { o.uint64s = make([]uint64, uint64PoolSize) } o.uint64s[0] = x *p = &o.uint64s[0] o.uint64s = o.uint64s[1:] } func word64_IsNil(p word64) bool { return *p == nil } func word64_Get(p word64) uint64 { return **p } func structPointer_Word64(p structPointer, f field) word64 { return word64((**uint64)(unsafe.Pointer(uintptr(p) + uintptr(f)))) } // word64Val is like word32Val but for 64-bit values. type word64Val *uint64 func word64Val_Set(p word64Val, o *Buffer, x uint64) { *p = x } func word64Val_Get(p word64Val) uint64 { return *p } func structPointer_Word64Val(p structPointer, f field) word64Val { return word64Val((*uint64)(unsafe.Pointer(uintptr(p) + uintptr(f)))) } // word64Slice is like word32Slice but for 64-bit values. type word64Slice []uint64 func (v *word64Slice) Append(x uint64) { *v = append(*v, x) } func (v *word64Slice) Len() int { return len(*v) } func (v *word64Slice) Index(i int) uint64 { return (*v)[i] } func structPointer_Word64Slice(p structPointer, f field) *word64Slice { return (*word64Slice)(unsafe.Pointer(uintptr(p) + uintptr(f))) } properties.go000066400000000000000000000623311324746544700347030ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/github.com/golang/protobuf/proto// Go support for Protocol Buffers - Google's data interchange format // // Copyright 2010 The Go Authors. All rights reserved. // https://github.com/golang/protobuf // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. package proto /* * Routines for encoding data into the wire format for protocol buffers. */ import ( "fmt" "log" "os" "reflect" "sort" "strconv" "strings" "sync" ) const debug bool = false // Constants that identify the encoding of a value on the wire. const ( WireVarint = 0 WireFixed64 = 1 WireBytes = 2 WireStartGroup = 3 WireEndGroup = 4 WireFixed32 = 5 ) const startSize = 10 // initial slice/string sizes // Encoders are defined in encode.go // An encoder outputs the full representation of a field, including its // tag and encoder type. type encoder func(p *Buffer, prop *Properties, base structPointer) error // A valueEncoder encodes a single integer in a particular encoding. type valueEncoder func(o *Buffer, x uint64) error // Sizers are defined in encode.go // A sizer returns the encoded size of a field, including its tag and encoder // type. type sizer func(prop *Properties, base structPointer) int // A valueSizer returns the encoded size of a single integer in a particular // encoding. type valueSizer func(x uint64) int // Decoders are defined in decode.go // A decoder creates a value from its wire representation. // Unrecognized subelements are saved in unrec. type decoder func(p *Buffer, prop *Properties, base structPointer) error // A valueDecoder decodes a single integer in a particular encoding. type valueDecoder func(o *Buffer) (x uint64, err error) // A oneofMarshaler does the marshaling for all oneof fields in a message. type oneofMarshaler func(Message, *Buffer) error // A oneofUnmarshaler does the unmarshaling for a oneof field in a message. type oneofUnmarshaler func(Message, int, int, *Buffer) (bool, error) // A oneofSizer does the sizing for all oneof fields in a message. type oneofSizer func(Message) int // tagMap is an optimization over map[int]int for typical protocol buffer // use-cases. Encoded protocol buffers are often in tag order with small tag // numbers. type tagMap struct { fastTags []int slowTags map[int]int } // tagMapFastLimit is the upper bound on the tag number that will be stored in // the tagMap slice rather than its map. const tagMapFastLimit = 1024 func (p *tagMap) get(t int) (int, bool) { if t > 0 && t < tagMapFastLimit { if t >= len(p.fastTags) { return 0, false } fi := p.fastTags[t] return fi, fi >= 0 } fi, ok := p.slowTags[t] return fi, ok } func (p *tagMap) put(t int, fi int) { if t > 0 && t < tagMapFastLimit { for len(p.fastTags) < t+1 { p.fastTags = append(p.fastTags, -1) } p.fastTags[t] = fi return } if p.slowTags == nil { p.slowTags = make(map[int]int) } p.slowTags[t] = fi } // StructProperties represents properties for all the fields of a struct. // decoderTags and decoderOrigNames should only be used by the decoder. type StructProperties struct { Prop []*Properties // properties for each field reqCount int // required count decoderTags tagMap // map from proto tag to struct field number decoderOrigNames map[string]int // map from original name to struct field number order []int // list of struct field numbers in tag order unrecField field // field id of the XXX_unrecognized []byte field extendable bool // is this an extendable proto oneofMarshaler oneofMarshaler oneofUnmarshaler oneofUnmarshaler oneofSizer oneofSizer stype reflect.Type // OneofTypes contains information about the oneof fields in this message. // It is keyed by the original name of a field. OneofTypes map[string]*OneofProperties } // OneofProperties represents information about a specific field in a oneof. type OneofProperties struct { Type reflect.Type // pointer to generated struct type for this oneof field Field int // struct field number of the containing oneof in the message Prop *Properties } // Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec. // See encode.go, (*Buffer).enc_struct. func (sp *StructProperties) Len() int { return len(sp.order) } func (sp *StructProperties) Less(i, j int) bool { return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag } func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] } // Properties represents the protocol-specific behavior of a single struct field. type Properties struct { Name string // name of the field, for error messages OrigName string // original name before protocol compiler (always set) JSONName string // name to use for JSON; determined by protoc Wire string WireType int Tag int Required bool Optional bool Repeated bool Packed bool // relevant for repeated primitives only Enum string // set for enum types only proto3 bool // whether this is known to be a proto3 field; set for []byte only oneof bool // whether this is a oneof field Default string // default value HasDefault bool // whether an explicit default was provided def_uint64 uint64 enc encoder valEnc valueEncoder // set for bool and numeric types only field field tagcode []byte // encoding of EncodeVarint((Tag<<3)|WireType) tagbuf [8]byte stype reflect.Type // set for struct types only sprop *StructProperties // set for struct types only isMarshaler bool isUnmarshaler bool mtype reflect.Type // set for map types only mkeyprop *Properties // set for map types only mvalprop *Properties // set for map types only size sizer valSize valueSizer // set for bool and numeric types only dec decoder valDec valueDecoder // set for bool and numeric types only // If this is a packable field, this will be the decoder for the packed version of the field. packedDec decoder } // String formats the properties in the protobuf struct field tag style. func (p *Properties) String() string { s := p.Wire s = "," s += strconv.Itoa(p.Tag) if p.Required { s += ",req" } if p.Optional { s += ",opt" } if p.Repeated { s += ",rep" } if p.Packed { s += ",packed" } s += ",name=" + p.OrigName if p.JSONName != p.OrigName { s += ",json=" + p.JSONName } if p.proto3 { s += ",proto3" } if p.oneof { s += ",oneof" } if len(p.Enum) > 0 { s += ",enum=" + p.Enum } if p.HasDefault { s += ",def=" + p.Default } return s } // Parse populates p by parsing a string in the protobuf struct field tag style. func (p *Properties) Parse(s string) { // "bytes,49,opt,name=foo,def=hello!" fields := strings.Split(s, ",") // breaks def=, but handled below. if len(fields) < 2 { fmt.Fprintf(os.Stderr, "proto: tag has too few fields: %q\n", s) return } p.Wire = fields[0] switch p.Wire { case "varint": p.WireType = WireVarint p.valEnc = (*Buffer).EncodeVarint p.valDec = (*Buffer).DecodeVarint p.valSize = sizeVarint case "fixed32": p.WireType = WireFixed32 p.valEnc = (*Buffer).EncodeFixed32 p.valDec = (*Buffer).DecodeFixed32 p.valSize = sizeFixed32 case "fixed64": p.WireType = WireFixed64 p.valEnc = (*Buffer).EncodeFixed64 p.valDec = (*Buffer).DecodeFixed64 p.valSize = sizeFixed64 case "zigzag32": p.WireType = WireVarint p.valEnc = (*Buffer).EncodeZigzag32 p.valDec = (*Buffer).DecodeZigzag32 p.valSize = sizeZigzag32 case "zigzag64": p.WireType = WireVarint p.valEnc = (*Buffer).EncodeZigzag64 p.valDec = (*Buffer).DecodeZigzag64 p.valSize = sizeZigzag64 case "bytes", "group": p.WireType = WireBytes // no numeric converter for non-numeric types default: fmt.Fprintf(os.Stderr, "proto: tag has unknown wire type: %q\n", s) return } var err error p.Tag, err = strconv.Atoi(fields[1]) if err != nil { return } for i := 2; i < len(fields); i++ { f := fields[i] switch { case f == "req": p.Required = true case f == "opt": p.Optional = true case f == "rep": p.Repeated = true case f == "packed": p.Packed = true case strings.HasPrefix(f, "name="): p.OrigName = f[5:] case strings.HasPrefix(f, "json="): p.JSONName = f[5:] case strings.HasPrefix(f, "enum="): p.Enum = f[5:] case f == "proto3": p.proto3 = true case f == "oneof": p.oneof = true case strings.HasPrefix(f, "def="): p.HasDefault = true p.Default = f[4:] // rest of string if i+1 < len(fields) { // Commas aren't escaped, and def is always last. p.Default += "," + strings.Join(fields[i+1:], ",") break } } } } func logNoSliceEnc(t1, t2 reflect.Type) { fmt.Fprintf(os.Stderr, "proto: no slice oenc for %T = []%T\n", t1, t2) } var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem() // Initialize the fields for encoding and decoding. func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lockGetProp bool) { p.enc = nil p.dec = nil p.size = nil switch t1 := typ; t1.Kind() { default: fmt.Fprintf(os.Stderr, "proto: no coders for %v\n", t1) // proto3 scalar types case reflect.Bool: p.enc = (*Buffer).enc_proto3_bool p.dec = (*Buffer).dec_proto3_bool p.size = size_proto3_bool case reflect.Int32: p.enc = (*Buffer).enc_proto3_int32 p.dec = (*Buffer).dec_proto3_int32 p.size = size_proto3_int32 case reflect.Uint32: p.enc = (*Buffer).enc_proto3_uint32 p.dec = (*Buffer).dec_proto3_int32 // can reuse p.size = size_proto3_uint32 case reflect.Int64, reflect.Uint64: p.enc = (*Buffer).enc_proto3_int64 p.dec = (*Buffer).dec_proto3_int64 p.size = size_proto3_int64 case reflect.Float32: p.enc = (*Buffer).enc_proto3_uint32 // can just treat them as bits p.dec = (*Buffer).dec_proto3_int32 p.size = size_proto3_uint32 case reflect.Float64: p.enc = (*Buffer).enc_proto3_int64 // can just treat them as bits p.dec = (*Buffer).dec_proto3_int64 p.size = size_proto3_int64 case reflect.String: p.enc = (*Buffer).enc_proto3_string p.dec = (*Buffer).dec_proto3_string p.size = size_proto3_string case reflect.Ptr: switch t2 := t1.Elem(); t2.Kind() { default: fmt.Fprintf(os.Stderr, "proto: no encoder function for %v -> %v\n", t1, t2) break case reflect.Bool: p.enc = (*Buffer).enc_bool p.dec = (*Buffer).dec_bool p.size = size_bool case reflect.Int32: p.enc = (*Buffer).enc_int32 p.dec = (*Buffer).dec_int32 p.size = size_int32 case reflect.Uint32: p.enc = (*Buffer).enc_uint32 p.dec = (*Buffer).dec_int32 // can reuse p.size = size_uint32 case reflect.Int64, reflect.Uint64: p.enc = (*Buffer).enc_int64 p.dec = (*Buffer).dec_int64 p.size = size_int64 case reflect.Float32: p.enc = (*Buffer).enc_uint32 // can just treat them as bits p.dec = (*Buffer).dec_int32 p.size = size_uint32 case reflect.Float64: p.enc = (*Buffer).enc_int64 // can just treat them as bits p.dec = (*Buffer).dec_int64 p.size = size_int64 case reflect.String: p.enc = (*Buffer).enc_string p.dec = (*Buffer).dec_string p.size = size_string case reflect.Struct: p.stype = t1.Elem() p.isMarshaler = isMarshaler(t1) p.isUnmarshaler = isUnmarshaler(t1) if p.Wire == "bytes" { p.enc = (*Buffer).enc_struct_message p.dec = (*Buffer).dec_struct_message p.size = size_struct_message } else { p.enc = (*Buffer).enc_struct_group p.dec = (*Buffer).dec_struct_group p.size = size_struct_group } } case reflect.Slice: switch t2 := t1.Elem(); t2.Kind() { default: logNoSliceEnc(t1, t2) break case reflect.Bool: if p.Packed { p.enc = (*Buffer).enc_slice_packed_bool p.size = size_slice_packed_bool } else { p.enc = (*Buffer).enc_slice_bool p.size = size_slice_bool } p.dec = (*Buffer).dec_slice_bool p.packedDec = (*Buffer).dec_slice_packed_bool case reflect.Int32: if p.Packed { p.enc = (*Buffer).enc_slice_packed_int32 p.size = size_slice_packed_int32 } else { p.enc = (*Buffer).enc_slice_int32 p.size = size_slice_int32 } p.dec = (*Buffer).dec_slice_int32 p.packedDec = (*Buffer).dec_slice_packed_int32 case reflect.Uint32: if p.Packed { p.enc = (*Buffer).enc_slice_packed_uint32 p.size = size_slice_packed_uint32 } else { p.enc = (*Buffer).enc_slice_uint32 p.size = size_slice_uint32 } p.dec = (*Buffer).dec_slice_int32 p.packedDec = (*Buffer).dec_slice_packed_int32 case reflect.Int64, reflect.Uint64: if p.Packed { p.enc = (*Buffer).enc_slice_packed_int64 p.size = size_slice_packed_int64 } else { p.enc = (*Buffer).enc_slice_int64 p.size = size_slice_int64 } p.dec = (*Buffer).dec_slice_int64 p.packedDec = (*Buffer).dec_slice_packed_int64 case reflect.Uint8: p.dec = (*Buffer).dec_slice_byte if p.proto3 { p.enc = (*Buffer).enc_proto3_slice_byte p.size = size_proto3_slice_byte } else { p.enc = (*Buffer).enc_slice_byte p.size = size_slice_byte } case reflect.Float32, reflect.Float64: switch t2.Bits() { case 32: // can just treat them as bits if p.Packed { p.enc = (*Buffer).enc_slice_packed_uint32 p.size = size_slice_packed_uint32 } else { p.enc = (*Buffer).enc_slice_uint32 p.size = size_slice_uint32 } p.dec = (*Buffer).dec_slice_int32 p.packedDec = (*Buffer).dec_slice_packed_int32 case 64: // can just treat them as bits if p.Packed { p.enc = (*Buffer).enc_slice_packed_int64 p.size = size_slice_packed_int64 } else { p.enc = (*Buffer).enc_slice_int64 p.size = size_slice_int64 } p.dec = (*Buffer).dec_slice_int64 p.packedDec = (*Buffer).dec_slice_packed_int64 default: logNoSliceEnc(t1, t2) break } case reflect.String: p.enc = (*Buffer).enc_slice_string p.dec = (*Buffer).dec_slice_string p.size = size_slice_string case reflect.Ptr: switch t3 := t2.Elem(); t3.Kind() { default: fmt.Fprintf(os.Stderr, "proto: no ptr oenc for %T -> %T -> %T\n", t1, t2, t3) break case reflect.Struct: p.stype = t2.Elem() p.isMarshaler = isMarshaler(t2) p.isUnmarshaler = isUnmarshaler(t2) if p.Wire == "bytes" { p.enc = (*Buffer).enc_slice_struct_message p.dec = (*Buffer).dec_slice_struct_message p.size = size_slice_struct_message } else { p.enc = (*Buffer).enc_slice_struct_group p.dec = (*Buffer).dec_slice_struct_group p.size = size_slice_struct_group } } case reflect.Slice: switch t2.Elem().Kind() { default: fmt.Fprintf(os.Stderr, "proto: no slice elem oenc for %T -> %T -> %T\n", t1, t2, t2.Elem()) break case reflect.Uint8: p.enc = (*Buffer).enc_slice_slice_byte p.dec = (*Buffer).dec_slice_slice_byte p.size = size_slice_slice_byte } } case reflect.Map: p.enc = (*Buffer).enc_new_map p.dec = (*Buffer).dec_new_map p.size = size_new_map p.mtype = t1 p.mkeyprop = &Properties{} p.mkeyprop.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp) p.mvalprop = &Properties{} vtype := p.mtype.Elem() if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice { // The value type is not a message (*T) or bytes ([]byte), // so we need encoders for the pointer to this type. vtype = reflect.PtrTo(vtype) } p.mvalprop.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp) } // precalculate tag code wire := p.WireType if p.Packed { wire = WireBytes } x := uint32(p.Tag)<<3 | uint32(wire) i := 0 for i = 0; x > 127; i++ { p.tagbuf[i] = 0x80 | uint8(x&0x7F) x >>= 7 } p.tagbuf[i] = uint8(x) p.tagcode = p.tagbuf[0 : i+1] if p.stype != nil { if lockGetProp { p.sprop = GetProperties(p.stype) } else { p.sprop = getPropertiesLocked(p.stype) } } } var ( marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem() ) // isMarshaler reports whether type t implements Marshaler. func isMarshaler(t reflect.Type) bool { // We're checking for (likely) pointer-receiver methods // so if t is not a pointer, something is very wrong. // The calls above only invoke isMarshaler on pointer types. if t.Kind() != reflect.Ptr { panic("proto: misuse of isMarshaler") } return t.Implements(marshalerType) } // isUnmarshaler reports whether type t implements Unmarshaler. func isUnmarshaler(t reflect.Type) bool { // We're checking for (likely) pointer-receiver methods // so if t is not a pointer, something is very wrong. // The calls above only invoke isUnmarshaler on pointer types. if t.Kind() != reflect.Ptr { panic("proto: misuse of isUnmarshaler") } return t.Implements(unmarshalerType) } // Init populates the properties from a protocol buffer struct tag. func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) { p.init(typ, name, tag, f, true) } func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) { // "bytes,49,opt,def=hello!" p.Name = name p.OrigName = name if f != nil { p.field = toField(f) } if tag == "" { return } p.Parse(tag) p.setEncAndDec(typ, f, lockGetProp) } var ( propertiesMu sync.RWMutex propertiesMap = make(map[reflect.Type]*StructProperties) ) // GetProperties returns the list of properties for the type represented by t. // t must represent a generated struct type of a protocol message. func GetProperties(t reflect.Type) *StructProperties { if t.Kind() != reflect.Struct { panic("proto: type must have kind struct") } // Most calls to GetProperties in a long-running program will be // retrieving details for types we have seen before. propertiesMu.RLock() sprop, ok := propertiesMap[t] propertiesMu.RUnlock() if ok { if collectStats { stats.Chit++ } return sprop } propertiesMu.Lock() sprop = getPropertiesLocked(t) propertiesMu.Unlock() return sprop } // getPropertiesLocked requires that propertiesMu is held. func getPropertiesLocked(t reflect.Type) *StructProperties { if prop, ok := propertiesMap[t]; ok { if collectStats { stats.Chit++ } return prop } if collectStats { stats.Cmiss++ } prop := new(StructProperties) // in case of recursive protos, fill this in now. propertiesMap[t] = prop // build properties prop.extendable = reflect.PtrTo(t).Implements(extendableProtoType) || reflect.PtrTo(t).Implements(extendableProtoV1Type) prop.unrecField = invalidField prop.Prop = make([]*Properties, t.NumField()) prop.order = make([]int, t.NumField()) for i := 0; i < t.NumField(); i++ { f := t.Field(i) p := new(Properties) name := f.Name p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false) if f.Name == "XXX_InternalExtensions" { // special case p.enc = (*Buffer).enc_exts p.dec = nil // not needed p.size = size_exts } else if f.Name == "XXX_extensions" { // special case p.enc = (*Buffer).enc_map p.dec = nil // not needed p.size = size_map } else if f.Name == "XXX_unrecognized" { // special case prop.unrecField = toField(&f) } oneof := f.Tag.Get("protobuf_oneof") // special case if oneof != "" { // Oneof fields don't use the traditional protobuf tag. p.OrigName = oneof } prop.Prop[i] = p prop.order[i] = i if debug { print(i, " ", f.Name, " ", t.String(), " ") if p.Tag > 0 { print(p.String()) } print("\n") } if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") && oneof == "" { fmt.Fprintln(os.Stderr, "proto: no encoder for", f.Name, f.Type.String(), "[GetProperties]") } } // Re-order prop.order. sort.Sort(prop) type oneofMessage interface { XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) } if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok { var oots []interface{} prop.oneofMarshaler, prop.oneofUnmarshaler, prop.oneofSizer, oots = om.XXX_OneofFuncs() prop.stype = t // Interpret oneof metadata. prop.OneofTypes = make(map[string]*OneofProperties) for _, oot := range oots { oop := &OneofProperties{ Type: reflect.ValueOf(oot).Type(), // *T Prop: new(Properties), } sft := oop.Type.Elem().Field(0) oop.Prop.Name = sft.Name oop.Prop.Parse(sft.Tag.Get("protobuf")) // There will be exactly one interface field that // this new value is assignable to. for i := 0; i < t.NumField(); i++ { f := t.Field(i) if f.Type.Kind() != reflect.Interface { continue } if !oop.Type.AssignableTo(f.Type) { continue } oop.Field = i break } prop.OneofTypes[oop.Prop.OrigName] = oop } } // build required counts // build tags reqCount := 0 prop.decoderOrigNames = make(map[string]int) for i, p := range prop.Prop { if strings.HasPrefix(p.Name, "XXX_") { // Internal fields should not appear in tags/origNames maps. // They are handled specially when encoding and decoding. continue } if p.Required { reqCount++ } prop.decoderTags.put(p.Tag, i) prop.decoderOrigNames[p.OrigName] = i } prop.reqCount = reqCount return prop } // Return the Properties object for the x[0]'th field of the structure. func propByIndex(t reflect.Type, x []int) *Properties { if len(x) != 1 { fmt.Fprintf(os.Stderr, "proto: field index dimension %d (not 1) for type %s\n", len(x), t) return nil } prop := GetProperties(t) return prop.Prop[x[0]] } // Get the address and type of a pointer to a struct from an interface. func getbase(pb Message) (t reflect.Type, b structPointer, err error) { if pb == nil { err = ErrNil return } // get the reflect type of the pointer to the struct. t = reflect.TypeOf(pb) // get the address of the struct. value := reflect.ValueOf(pb) b = toStructPointer(value) return } // A global registry of enum types. // The generated code will register the generated maps by calling RegisterEnum. var enumValueMaps = make(map[string]map[string]int32) // RegisterEnum is called from the generated code to install the enum descriptor // maps into the global table to aid parsing text format protocol buffers. func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) { if _, ok := enumValueMaps[typeName]; ok { panic("proto: duplicate enum registered: " + typeName) } enumValueMaps[typeName] = valueMap } // EnumValueMap returns the mapping from names to integers of the // enum type enumType, or a nil if not found. func EnumValueMap(enumType string) map[string]int32 { return enumValueMaps[enumType] } // A registry of all linked message types. // The string is a fully-qualified proto name ("pkg.Message"). var ( protoTypes = make(map[string]reflect.Type) revProtoTypes = make(map[reflect.Type]string) ) // RegisterType is called from generated code and maps from the fully qualified // proto name to the type (pointer to struct) of the protocol buffer. func RegisterType(x Message, name string) { if _, ok := protoTypes[name]; ok { // TODO: Some day, make this a panic. log.Printf("proto: duplicate proto type registered: %s", name) return } t := reflect.TypeOf(x) protoTypes[name] = t revProtoTypes[t] = name } // MessageName returns the fully-qualified proto name for the given message type. func MessageName(x Message) string { type xname interface { XXX_MessageName() string } if m, ok := x.(xname); ok { return m.XXX_MessageName() } return revProtoTypes[reflect.TypeOf(x)] } // MessageType returns the message type (pointer to struct) for a named message. func MessageType(name string) reflect.Type { return protoTypes[name] } // A registry of all linked proto files. var ( protoFiles = make(map[string][]byte) // file name => fileDescriptor ) // RegisterFile is called from generated code and maps from the // full file name of a .proto file to its compressed FileDescriptorProto. func RegisterFile(filename string, fileDescriptor []byte) { protoFiles[filename] = fileDescriptor } // FileDescriptor returns the compressed FileDescriptorProto for a .proto file. func FileDescriptor(filename string) []byte { return protoFiles[filename] } text.go000066400000000000000000000513141324746544700334720ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/github.com/golang/protobuf/proto// Go support for Protocol Buffers - Google's data interchange format // // Copyright 2010 The Go Authors. All rights reserved. // https://github.com/golang/protobuf // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. package proto // Functions for writing the text protocol buffer format. import ( "bufio" "bytes" "encoding" "errors" "fmt" "io" "log" "math" "reflect" "sort" "strings" ) var ( newline = []byte("\n") spaces = []byte(" ") gtNewline = []byte(">\n") endBraceNewline = []byte("}\n") backslashN = []byte{'\\', 'n'} backslashR = []byte{'\\', 'r'} backslashT = []byte{'\\', 't'} backslashDQ = []byte{'\\', '"'} backslashBS = []byte{'\\', '\\'} posInf = []byte("inf") negInf = []byte("-inf") nan = []byte("nan") ) type writer interface { io.Writer WriteByte(byte) error } // textWriter is an io.Writer that tracks its indentation level. type textWriter struct { ind int complete bool // if the current position is a complete line compact bool // whether to write out as a one-liner w writer } func (w *textWriter) WriteString(s string) (n int, err error) { if !strings.Contains(s, "\n") { if !w.compact && w.complete { w.writeIndent() } w.complete = false return io.WriteString(w.w, s) } // WriteString is typically called without newlines, so this // codepath and its copy are rare. We copy to avoid // duplicating all of Write's logic here. return w.Write([]byte(s)) } func (w *textWriter) Write(p []byte) (n int, err error) { newlines := bytes.Count(p, newline) if newlines == 0 { if !w.compact && w.complete { w.writeIndent() } n, err = w.w.Write(p) w.complete = false return n, err } frags := bytes.SplitN(p, newline, newlines+1) if w.compact { for i, frag := range frags { if i > 0 { if err := w.w.WriteByte(' '); err != nil { return n, err } n++ } nn, err := w.w.Write(frag) n += nn if err != nil { return n, err } } return n, nil } for i, frag := range frags { if w.complete { w.writeIndent() } nn, err := w.w.Write(frag) n += nn if err != nil { return n, err } if i+1 < len(frags) { if err := w.w.WriteByte('\n'); err != nil { return n, err } n++ } } w.complete = len(frags[len(frags)-1]) == 0 return n, nil } func (w *textWriter) WriteByte(c byte) error { if w.compact && c == '\n' { c = ' ' } if !w.compact && w.complete { w.writeIndent() } err := w.w.WriteByte(c) w.complete = c == '\n' return err } func (w *textWriter) indent() { w.ind++ } func (w *textWriter) unindent() { if w.ind == 0 { log.Print("proto: textWriter unindented too far") return } w.ind-- } func writeName(w *textWriter, props *Properties) error { if _, err := w.WriteString(props.OrigName); err != nil { return err } if props.Wire != "group" { return w.WriteByte(':') } return nil } // raw is the interface satisfied by RawMessage. type raw interface { Bytes() []byte } func requiresQuotes(u string) bool { // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted. for _, ch := range u { switch { case ch == '.' || ch == '/' || ch == '_': continue case '0' <= ch && ch <= '9': continue case 'A' <= ch && ch <= 'Z': continue case 'a' <= ch && ch <= 'z': continue default: return true } } return false } // isAny reports whether sv is a google.protobuf.Any message func isAny(sv reflect.Value) bool { type wkt interface { XXX_WellKnownType() string } t, ok := sv.Addr().Interface().(wkt) return ok && t.XXX_WellKnownType() == "Any" } // writeProto3Any writes an expanded google.protobuf.Any message. // // It returns (false, nil) if sv value can't be unmarshaled (e.g. because // required messages are not linked in). // // It returns (true, error) when sv was written in expanded format or an error // was encountered. func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) { turl := sv.FieldByName("TypeUrl") val := sv.FieldByName("Value") if !turl.IsValid() || !val.IsValid() { return true, errors.New("proto: invalid google.protobuf.Any message") } b, ok := val.Interface().([]byte) if !ok { return true, errors.New("proto: invalid google.protobuf.Any message") } parts := strings.Split(turl.String(), "/") mt := MessageType(parts[len(parts)-1]) if mt == nil { return false, nil } m := reflect.New(mt.Elem()) if err := Unmarshal(b, m.Interface().(Message)); err != nil { return false, nil } w.Write([]byte("[")) u := turl.String() if requiresQuotes(u) { writeString(w, u) } else { w.Write([]byte(u)) } if w.compact { w.Write([]byte("]:<")) } else { w.Write([]byte("]: <\n")) w.ind++ } if err := tm.writeStruct(w, m.Elem()); err != nil { return true, err } if w.compact { w.Write([]byte("> ")) } else { w.ind-- w.Write([]byte(">\n")) } return true, nil } func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { if tm.ExpandAny && isAny(sv) { if canExpand, err := tm.writeProto3Any(w, sv); canExpand { return err } } st := sv.Type() sprops := GetProperties(st) for i := 0; i < sv.NumField(); i++ { fv := sv.Field(i) props := sprops.Prop[i] name := st.Field(i).Name if strings.HasPrefix(name, "XXX_") { // There are two XXX_ fields: // XXX_unrecognized []byte // XXX_extensions map[int32]proto.Extension // The first is handled here; // the second is handled at the bottom of this function. if name == "XXX_unrecognized" && !fv.IsNil() { if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil { return err } } continue } if fv.Kind() == reflect.Ptr && fv.IsNil() { // Field not filled in. This could be an optional field or // a required field that wasn't filled in. Either way, there // isn't anything we can show for it. continue } if fv.Kind() == reflect.Slice && fv.IsNil() { // Repeated field that is empty, or a bytes field that is unused. continue } if props.Repeated && fv.Kind() == reflect.Slice { // Repeated field. for j := 0; j < fv.Len(); j++ { if err := writeName(w, props); err != nil { return err } if !w.compact { if err := w.WriteByte(' '); err != nil { return err } } v := fv.Index(j) if v.Kind() == reflect.Ptr && v.IsNil() { // A nil message in a repeated field is not valid, // but we can handle that more gracefully than panicking. if _, err := w.Write([]byte("\n")); err != nil { return err } continue } if err := tm.writeAny(w, v, props); err != nil { return err } if err := w.WriteByte('\n'); err != nil { return err } } continue } if fv.Kind() == reflect.Map { // Map fields are rendered as a repeated struct with key/value fields. keys := fv.MapKeys() sort.Sort(mapKeys(keys)) for _, key := range keys { val := fv.MapIndex(key) if err := writeName(w, props); err != nil { return err } if !w.compact { if err := w.WriteByte(' '); err != nil { return err } } // open struct if err := w.WriteByte('<'); err != nil { return err } if !w.compact { if err := w.WriteByte('\n'); err != nil { return err } } w.indent() // key if _, err := w.WriteString("key:"); err != nil { return err } if !w.compact { if err := w.WriteByte(' '); err != nil { return err } } if err := tm.writeAny(w, key, props.mkeyprop); err != nil { return err } if err := w.WriteByte('\n'); err != nil { return err } // nil values aren't legal, but we can avoid panicking because of them. if val.Kind() != reflect.Ptr || !val.IsNil() { // value if _, err := w.WriteString("value:"); err != nil { return err } if !w.compact { if err := w.WriteByte(' '); err != nil { return err } } if err := tm.writeAny(w, val, props.mvalprop); err != nil { return err } if err := w.WriteByte('\n'); err != nil { return err } } // close struct w.unindent() if err := w.WriteByte('>'); err != nil { return err } if err := w.WriteByte('\n'); err != nil { return err } } continue } if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 { // empty bytes field continue } if fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice { // proto3 non-repeated scalar field; skip if zero value if isProto3Zero(fv) { continue } } if fv.Kind() == reflect.Interface { // Check if it is a oneof. if st.Field(i).Tag.Get("protobuf_oneof") != "" { // fv is nil, or holds a pointer to generated struct. // That generated struct has exactly one field, // which has a protobuf struct tag. if fv.IsNil() { continue } inner := fv.Elem().Elem() // interface -> *T -> T tag := inner.Type().Field(0).Tag.Get("protobuf") props = new(Properties) // Overwrite the outer props var, but not its pointee. props.Parse(tag) // Write the value in the oneof, not the oneof itself. fv = inner.Field(0) // Special case to cope with malformed messages gracefully: // If the value in the oneof is a nil pointer, don't panic // in writeAny. if fv.Kind() == reflect.Ptr && fv.IsNil() { // Use errors.New so writeAny won't render quotes. msg := errors.New("/* nil */") fv = reflect.ValueOf(&msg).Elem() } } } if err := writeName(w, props); err != nil { return err } if !w.compact { if err := w.WriteByte(' '); err != nil { return err } } if b, ok := fv.Interface().(raw); ok { if err := writeRaw(w, b.Bytes()); err != nil { return err } continue } // Enums have a String method, so writeAny will work fine. if err := tm.writeAny(w, fv, props); err != nil { return err } if err := w.WriteByte('\n'); err != nil { return err } } // Extensions (the XXX_extensions field). pv := sv.Addr() if _, ok := extendable(pv.Interface()); ok { if err := tm.writeExtensions(w, pv); err != nil { return err } } return nil } // writeRaw writes an uninterpreted raw message. func writeRaw(w *textWriter, b []byte) error { if err := w.WriteByte('<'); err != nil { return err } if !w.compact { if err := w.WriteByte('\n'); err != nil { return err } } w.indent() if err := writeUnknownStruct(w, b); err != nil { return err } w.unindent() if err := w.WriteByte('>'); err != nil { return err } return nil } // writeAny writes an arbitrary field. func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error { v = reflect.Indirect(v) // Floats have special cases. if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 { x := v.Float() var b []byte switch { case math.IsInf(x, 1): b = posInf case math.IsInf(x, -1): b = negInf case math.IsNaN(x): b = nan } if b != nil { _, err := w.Write(b) return err } // Other values are handled below. } // We don't attempt to serialise every possible value type; only those // that can occur in protocol buffers. switch v.Kind() { case reflect.Slice: // Should only be a []byte; repeated fields are handled in writeStruct. if err := writeString(w, string(v.Bytes())); err != nil { return err } case reflect.String: if err := writeString(w, v.String()); err != nil { return err } case reflect.Struct: // Required/optional group/message. var bra, ket byte = '<', '>' if props != nil && props.Wire == "group" { bra, ket = '{', '}' } if err := w.WriteByte(bra); err != nil { return err } if !w.compact { if err := w.WriteByte('\n'); err != nil { return err } } w.indent() if etm, ok := v.Interface().(encoding.TextMarshaler); ok { text, err := etm.MarshalText() if err != nil { return err } if _, err = w.Write(text); err != nil { return err } } else if err := tm.writeStruct(w, v); err != nil { return err } w.unindent() if err := w.WriteByte(ket); err != nil { return err } default: _, err := fmt.Fprint(w, v.Interface()) return err } return nil } // equivalent to C's isprint. func isprint(c byte) bool { return c >= 0x20 && c < 0x7f } // writeString writes a string in the protocol buffer text format. // It is similar to strconv.Quote except we don't use Go escape sequences, // we treat the string as a byte sequence, and we use octal escapes. // These differences are to maintain interoperability with the other // languages' implementations of the text format. func writeString(w *textWriter, s string) error { // use WriteByte here to get any needed indent if err := w.WriteByte('"'); err != nil { return err } // Loop over the bytes, not the runes. for i := 0; i < len(s); i++ { var err error // Divergence from C++: we don't escape apostrophes. // There's no need to escape them, and the C++ parser // copes with a naked apostrophe. switch c := s[i]; c { case '\n': _, err = w.w.Write(backslashN) case '\r': _, err = w.w.Write(backslashR) case '\t': _, err = w.w.Write(backslashT) case '"': _, err = w.w.Write(backslashDQ) case '\\': _, err = w.w.Write(backslashBS) default: if isprint(c) { err = w.w.WriteByte(c) } else { _, err = fmt.Fprintf(w.w, "\\%03o", c) } } if err != nil { return err } } return w.WriteByte('"') } func writeUnknownStruct(w *textWriter, data []byte) (err error) { if !w.compact { if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil { return err } } b := NewBuffer(data) for b.index < len(b.buf) { x, err := b.DecodeVarint() if err != nil { _, err := fmt.Fprintf(w, "/* %v */\n", err) return err } wire, tag := x&7, x>>3 if wire == WireEndGroup { w.unindent() if _, err := w.Write(endBraceNewline); err != nil { return err } continue } if _, err := fmt.Fprint(w, tag); err != nil { return err } if wire != WireStartGroup { if err := w.WriteByte(':'); err != nil { return err } } if !w.compact || wire == WireStartGroup { if err := w.WriteByte(' '); err != nil { return err } } switch wire { case WireBytes: buf, e := b.DecodeRawBytes(false) if e == nil { _, err = fmt.Fprintf(w, "%q", buf) } else { _, err = fmt.Fprintf(w, "/* %v */", e) } case WireFixed32: x, err = b.DecodeFixed32() err = writeUnknownInt(w, x, err) case WireFixed64: x, err = b.DecodeFixed64() err = writeUnknownInt(w, x, err) case WireStartGroup: err = w.WriteByte('{') w.indent() case WireVarint: x, err = b.DecodeVarint() err = writeUnknownInt(w, x, err) default: _, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire) } if err != nil { return err } if err = w.WriteByte('\n'); err != nil { return err } } return nil } func writeUnknownInt(w *textWriter, x uint64, err error) error { if err == nil { _, err = fmt.Fprint(w, x) } else { _, err = fmt.Fprintf(w, "/* %v */", err) } return err } type int32Slice []int32 func (s int32Slice) Len() int { return len(s) } func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] } func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } // writeExtensions writes all the extensions in pv. // pv is assumed to be a pointer to a protocol message struct that is extendable. func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error { emap := extensionMaps[pv.Type().Elem()] ep, _ := extendable(pv.Interface()) // Order the extensions by ID. // This isn't strictly necessary, but it will give us // canonical output, which will also make testing easier. m, mu := ep.extensionsRead() if m == nil { return nil } mu.Lock() ids := make([]int32, 0, len(m)) for id := range m { ids = append(ids, id) } sort.Sort(int32Slice(ids)) mu.Unlock() for _, extNum := range ids { ext := m[extNum] var desc *ExtensionDesc if emap != nil { desc = emap[extNum] } if desc == nil { // Unknown extension. if err := writeUnknownStruct(w, ext.enc); err != nil { return err } continue } pb, err := GetExtension(ep, desc) if err != nil { return fmt.Errorf("failed getting extension: %v", err) } // Repeated extensions will appear as a slice. if !desc.repeated() { if err := tm.writeExtension(w, desc.Name, pb); err != nil { return err } } else { v := reflect.ValueOf(pb) for i := 0; i < v.Len(); i++ { if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil { return err } } } } return nil } func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error { if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil { return err } if !w.compact { if err := w.WriteByte(' '); err != nil { return err } } if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil { return err } if err := w.WriteByte('\n'); err != nil { return err } return nil } func (w *textWriter) writeIndent() { if !w.complete { return } remain := w.ind * 2 for remain > 0 { n := remain if n > len(spaces) { n = len(spaces) } w.w.Write(spaces[:n]) remain -= n } w.complete = false } // TextMarshaler is a configurable text format marshaler. type TextMarshaler struct { Compact bool // use compact text format (one line). ExpandAny bool // expand google.protobuf.Any messages of known types } // Marshal writes a given protocol buffer in text format. // The only errors returned are from w. func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error { val := reflect.ValueOf(pb) if pb == nil || val.IsNil() { w.Write([]byte("")) return nil } var bw *bufio.Writer ww, ok := w.(writer) if !ok { bw = bufio.NewWriter(w) ww = bw } aw := &textWriter{ w: ww, complete: true, compact: tm.Compact, } if etm, ok := pb.(encoding.TextMarshaler); ok { text, err := etm.MarshalText() if err != nil { return err } if _, err = aw.Write(text); err != nil { return err } if bw != nil { return bw.Flush() } return nil } // Dereference the received pointer so we don't have outer < and >. v := reflect.Indirect(val) if err := tm.writeStruct(aw, v); err != nil { return err } if bw != nil { return bw.Flush() } return nil } // Text is the same as Marshal, but returns the string directly. func (tm *TextMarshaler) Text(pb Message) string { var buf bytes.Buffer tm.Marshal(&buf, pb) return buf.String() } var ( defaultTextMarshaler = TextMarshaler{} compactTextMarshaler = TextMarshaler{Compact: true} ) // TODO: consider removing some of the Marshal functions below. // MarshalText writes a given protocol buffer in text format. // The only errors returned are from w. func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) } // MarshalTextString is the same as MarshalText, but returns the string directly. func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) } // CompactText writes a given protocol buffer in compact text format (one line). func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) } // CompactTextString is the same as CompactText, but returns the string directly. func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) } text_parser.go000066400000000000000000000536451324746544700350570ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/github.com/golang/protobuf/proto// Go support for Protocol Buffers - Google's data interchange format // // Copyright 2010 The Go Authors. All rights reserved. // https://github.com/golang/protobuf // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. package proto // Functions for parsing the Text protocol buffer format. // TODO: message sets. import ( "encoding" "errors" "fmt" "reflect" "strconv" "strings" "unicode/utf8" ) // Error string emitted when deserializing Any and fields are already set const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set" type ParseError struct { Message string Line int // 1-based line number Offset int // 0-based byte offset from start of input } func (p *ParseError) Error() string { if p.Line == 1 { // show offset only for first line return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message) } return fmt.Sprintf("line %d: %v", p.Line, p.Message) } type token struct { value string err *ParseError line int // line number offset int // byte number from start of input, not start of line unquoted string // the unquoted version of value, if it was a quoted string } func (t *token) String() string { if t.err == nil { return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset) } return fmt.Sprintf("parse error: %v", t.err) } type textParser struct { s string // remaining input done bool // whether the parsing is finished (success or error) backed bool // whether back() was called offset, line int cur token } func newTextParser(s string) *textParser { p := new(textParser) p.s = s p.line = 1 p.cur.line = 1 return p } func (p *textParser) errorf(format string, a ...interface{}) *ParseError { pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset} p.cur.err = pe p.done = true return pe } // Numbers and identifiers are matched by [-+._A-Za-z0-9] func isIdentOrNumberChar(c byte) bool { switch { case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z': return true case '0' <= c && c <= '9': return true } switch c { case '-', '+', '.', '_': return true } return false } func isWhitespace(c byte) bool { switch c { case ' ', '\t', '\n', '\r': return true } return false } func isQuote(c byte) bool { switch c { case '"', '\'': return true } return false } func (p *textParser) skipWhitespace() { i := 0 for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') { if p.s[i] == '#' { // comment; skip to end of line or input for i < len(p.s) && p.s[i] != '\n' { i++ } if i == len(p.s) { break } } if p.s[i] == '\n' { p.line++ } i++ } p.offset += i p.s = p.s[i:len(p.s)] if len(p.s) == 0 { p.done = true } } func (p *textParser) advance() { // Skip whitespace p.skipWhitespace() if p.done { return } // Start of non-whitespace p.cur.err = nil p.cur.offset, p.cur.line = p.offset, p.line p.cur.unquoted = "" switch p.s[0] { case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/': // Single symbol p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)] case '"', '\'': // Quoted string i := 1 for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' { if p.s[i] == '\\' && i+1 < len(p.s) { // skip escaped char i++ } i++ } if i >= len(p.s) || p.s[i] != p.s[0] { p.errorf("unmatched quote") return } unq, err := unquoteC(p.s[1:i], rune(p.s[0])) if err != nil { p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err) return } p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)] p.cur.unquoted = unq default: i := 0 for i < len(p.s) && isIdentOrNumberChar(p.s[i]) { i++ } if i == 0 { p.errorf("unexpected byte %#x", p.s[0]) return } p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)] } p.offset += len(p.cur.value) } var ( errBadUTF8 = errors.New("proto: bad UTF-8") errBadHex = errors.New("proto: bad hexadecimal") ) func unquoteC(s string, quote rune) (string, error) { // This is based on C++'s tokenizer.cc. // Despite its name, this is *not* parsing C syntax. // For instance, "\0" is an invalid quoted string. // Avoid allocation in trivial cases. simple := true for _, r := range s { if r == '\\' || r == quote { simple = false break } } if simple { return s, nil } buf := make([]byte, 0, 3*len(s)/2) for len(s) > 0 { r, n := utf8.DecodeRuneInString(s) if r == utf8.RuneError && n == 1 { return "", errBadUTF8 } s = s[n:] if r != '\\' { if r < utf8.RuneSelf { buf = append(buf, byte(r)) } else { buf = append(buf, string(r)...) } continue } ch, tail, err := unescape(s) if err != nil { return "", err } buf = append(buf, ch...) s = tail } return string(buf), nil } func unescape(s string) (ch string, tail string, err error) { r, n := utf8.DecodeRuneInString(s) if r == utf8.RuneError && n == 1 { return "", "", errBadUTF8 } s = s[n:] switch r { case 'a': return "\a", s, nil case 'b': return "\b", s, nil case 'f': return "\f", s, nil case 'n': return "\n", s, nil case 'r': return "\r", s, nil case 't': return "\t", s, nil case 'v': return "\v", s, nil case '?': return "?", s, nil // trigraph workaround case '\'', '"', '\\': return string(r), s, nil case '0', '1', '2', '3', '4', '5', '6', '7', 'x', 'X': if len(s) < 2 { return "", "", fmt.Errorf(`\%c requires 2 following digits`, r) } base := 8 ss := s[:2] s = s[2:] if r == 'x' || r == 'X' { base = 16 } else { ss = string(r) + ss } i, err := strconv.ParseUint(ss, base, 8) if err != nil { return "", "", err } return string([]byte{byte(i)}), s, nil case 'u', 'U': n := 4 if r == 'U' { n = 8 } if len(s) < n { return "", "", fmt.Errorf(`\%c requires %d digits`, r, n) } bs := make([]byte, n/2) for i := 0; i < n; i += 2 { a, ok1 := unhex(s[i]) b, ok2 := unhex(s[i+1]) if !ok1 || !ok2 { return "", "", errBadHex } bs[i/2] = a<<4 | b } s = s[n:] return string(bs), s, nil } return "", "", fmt.Errorf(`unknown escape \%c`, r) } // Adapted from src/pkg/strconv/quote.go. func unhex(b byte) (v byte, ok bool) { switch { case '0' <= b && b <= '9': return b - '0', true case 'a' <= b && b <= 'f': return b - 'a' + 10, true case 'A' <= b && b <= 'F': return b - 'A' + 10, true } return 0, false } // Back off the parser by one token. Can only be done between calls to next(). // It makes the next advance() a no-op. func (p *textParser) back() { p.backed = true } // Advances the parser and returns the new current token. func (p *textParser) next() *token { if p.backed || p.done { p.backed = false return &p.cur } p.advance() if p.done { p.cur.value = "" } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) { // Look for multiple quoted strings separated by whitespace, // and concatenate them. cat := p.cur for { p.skipWhitespace() if p.done || !isQuote(p.s[0]) { break } p.advance() if p.cur.err != nil { return &p.cur } cat.value += " " + p.cur.value cat.unquoted += p.cur.unquoted } p.done = false // parser may have seen EOF, but we want to return cat p.cur = cat } return &p.cur } func (p *textParser) consumeToken(s string) error { tok := p.next() if tok.err != nil { return tok.err } if tok.value != s { p.back() return p.errorf("expected %q, found %q", s, tok.value) } return nil } // Return a RequiredNotSetError indicating which required field was not set. func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError { st := sv.Type() sprops := GetProperties(st) for i := 0; i < st.NumField(); i++ { if !isNil(sv.Field(i)) { continue } props := sprops.Prop[i] if props.Required { return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)} } } return &RequiredNotSetError{fmt.Sprintf("%v.", st)} // should not happen } // Returns the index in the struct for the named field, as well as the parsed tag properties. func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) { i, ok := sprops.decoderOrigNames[name] if ok { return i, sprops.Prop[i], true } return -1, nil, false } // Consume a ':' from the input stream (if the next token is a colon), // returning an error if a colon is needed but not present. func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError { tok := p.next() if tok.err != nil { return tok.err } if tok.value != ":" { // Colon is optional when the field is a group or message. needColon := true switch props.Wire { case "group": needColon = false case "bytes": // A "bytes" field is either a message, a string, or a repeated field; // those three become *T, *string and []T respectively, so we can check for // this field being a pointer to a non-string. if typ.Kind() == reflect.Ptr { // *T or *string if typ.Elem().Kind() == reflect.String { break } } else if typ.Kind() == reflect.Slice { // []T or []*T if typ.Elem().Kind() != reflect.Ptr { break } } else if typ.Kind() == reflect.String { // The proto3 exception is for a string field, // which requires a colon. break } needColon = false } if needColon { return p.errorf("expected ':', found %q", tok.value) } p.back() } return nil } func (p *textParser) readStruct(sv reflect.Value, terminator string) error { st := sv.Type() sprops := GetProperties(st) reqCount := sprops.reqCount var reqFieldErr error fieldSet := make(map[string]bool) // A struct is a sequence of "name: value", terminated by one of // '>' or '}', or the end of the input. A name may also be // "[extension]" or "[type/url]". // // The whole struct can also be an expanded Any message, like: // [type/url] < ... struct contents ... > for { tok := p.next() if tok.err != nil { return tok.err } if tok.value == terminator { break } if tok.value == "[" { // Looks like an extension or an Any. // // TODO: Check whether we need to handle // namespace rooted names (e.g. ".something.Foo"). extName, err := p.consumeExtName() if err != nil { return err } if s := strings.LastIndex(extName, "/"); s >= 0 { // If it contains a slash, it's an Any type URL. messageName := extName[s+1:] mt := MessageType(messageName) if mt == nil { return p.errorf("unrecognized message %q in google.protobuf.Any", messageName) } tok = p.next() if tok.err != nil { return tok.err } // consume an optional colon if tok.value == ":" { tok = p.next() if tok.err != nil { return tok.err } } var terminator string switch tok.value { case "<": terminator = ">" case "{": terminator = "}" default: return p.errorf("expected '{' or '<', found %q", tok.value) } v := reflect.New(mt.Elem()) if pe := p.readStruct(v.Elem(), terminator); pe != nil { return pe } b, err := Marshal(v.Interface().(Message)) if err != nil { return p.errorf("failed to marshal message of type %q: %v", messageName, err) } if fieldSet["type_url"] { return p.errorf(anyRepeatedlyUnpacked, "type_url") } if fieldSet["value"] { return p.errorf(anyRepeatedlyUnpacked, "value") } sv.FieldByName("TypeUrl").SetString(extName) sv.FieldByName("Value").SetBytes(b) fieldSet["type_url"] = true fieldSet["value"] = true continue } var desc *ExtensionDesc // This could be faster, but it's functional. // TODO: Do something smarter than a linear scan. for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) { if d.Name == extName { desc = d break } } if desc == nil { return p.errorf("unrecognized extension %q", extName) } props := &Properties{} props.Parse(desc.Tag) typ := reflect.TypeOf(desc.ExtensionType) if err := p.checkForColon(props, typ); err != nil { return err } rep := desc.repeated() // Read the extension structure, and set it in // the value we're constructing. var ext reflect.Value if !rep { ext = reflect.New(typ).Elem() } else { ext = reflect.New(typ.Elem()).Elem() } if err := p.readAny(ext, props); err != nil { if _, ok := err.(*RequiredNotSetError); !ok { return err } reqFieldErr = err } ep := sv.Addr().Interface().(Message) if !rep { SetExtension(ep, desc, ext.Interface()) } else { old, err := GetExtension(ep, desc) var sl reflect.Value if err == nil { sl = reflect.ValueOf(old) // existing slice } else { sl = reflect.MakeSlice(typ, 0, 1) } sl = reflect.Append(sl, ext) SetExtension(ep, desc, sl.Interface()) } if err := p.consumeOptionalSeparator(); err != nil { return err } continue } // This is a normal, non-extension field. name := tok.value var dst reflect.Value fi, props, ok := structFieldByName(sprops, name) if ok { dst = sv.Field(fi) } else if oop, ok := sprops.OneofTypes[name]; ok { // It is a oneof. props = oop.Prop nv := reflect.New(oop.Type.Elem()) dst = nv.Elem().Field(0) field := sv.Field(oop.Field) if !field.IsNil() { return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, sv.Type().Field(oop.Field).Name) } field.Set(nv) } if !dst.IsValid() { return p.errorf("unknown field name %q in %v", name, st) } if dst.Kind() == reflect.Map { // Consume any colon. if err := p.checkForColon(props, dst.Type()); err != nil { return err } // Construct the map if it doesn't already exist. if dst.IsNil() { dst.Set(reflect.MakeMap(dst.Type())) } key := reflect.New(dst.Type().Key()).Elem() val := reflect.New(dst.Type().Elem()).Elem() // The map entry should be this sequence of tokens: // < key : KEY value : VALUE > // However, implementations may omit key or value, and technically // we should support them in any order. See b/28924776 for a time // this went wrong. tok := p.next() var terminator string switch tok.value { case "<": terminator = ">" case "{": terminator = "}" default: return p.errorf("expected '{' or '<', found %q", tok.value) } for { tok := p.next() if tok.err != nil { return tok.err } if tok.value == terminator { break } switch tok.value { case "key": if err := p.consumeToken(":"); err != nil { return err } if err := p.readAny(key, props.mkeyprop); err != nil { return err } if err := p.consumeOptionalSeparator(); err != nil { return err } case "value": if err := p.checkForColon(props.mvalprop, dst.Type().Elem()); err != nil { return err } if err := p.readAny(val, props.mvalprop); err != nil { return err } if err := p.consumeOptionalSeparator(); err != nil { return err } default: p.back() return p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value) } } dst.SetMapIndex(key, val) continue } // Check that it's not already set if it's not a repeated field. if !props.Repeated && fieldSet[name] { return p.errorf("non-repeated field %q was repeated", name) } if err := p.checkForColon(props, dst.Type()); err != nil { return err } // Parse into the field. fieldSet[name] = true if err := p.readAny(dst, props); err != nil { if _, ok := err.(*RequiredNotSetError); !ok { return err } reqFieldErr = err } if props.Required { reqCount-- } if err := p.consumeOptionalSeparator(); err != nil { return err } } if reqCount > 0 { return p.missingRequiredFieldError(sv) } return reqFieldErr } // consumeExtName consumes extension name or expanded Any type URL and the // following ']'. It returns the name or URL consumed. func (p *textParser) consumeExtName() (string, error) { tok := p.next() if tok.err != nil { return "", tok.err } // If extension name or type url is quoted, it's a single token. if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] { name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0])) if err != nil { return "", err } return name, p.consumeToken("]") } // Consume everything up to "]" var parts []string for tok.value != "]" { parts = append(parts, tok.value) tok = p.next() if tok.err != nil { return "", p.errorf("unrecognized type_url or extension name: %s", tok.err) } } return strings.Join(parts, ""), nil } // consumeOptionalSeparator consumes an optional semicolon or comma. // It is used in readStruct to provide backward compatibility. func (p *textParser) consumeOptionalSeparator() error { tok := p.next() if tok.err != nil { return tok.err } if tok.value != ";" && tok.value != "," { p.back() } return nil } func (p *textParser) readAny(v reflect.Value, props *Properties) error { tok := p.next() if tok.err != nil { return tok.err } if tok.value == "" { return p.errorf("unexpected EOF") } switch fv := v; fv.Kind() { case reflect.Slice: at := v.Type() if at.Elem().Kind() == reflect.Uint8 { // Special case for []byte if tok.value[0] != '"' && tok.value[0] != '\'' { // Deliberately written out here, as the error after // this switch statement would write "invalid []byte: ...", // which is not as user-friendly. return p.errorf("invalid string: %v", tok.value) } bytes := []byte(tok.unquoted) fv.Set(reflect.ValueOf(bytes)) return nil } // Repeated field. if tok.value == "[" { // Repeated field with list notation, like [1,2,3]. for { fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) err := p.readAny(fv.Index(fv.Len()-1), props) if err != nil { return err } tok := p.next() if tok.err != nil { return tok.err } if tok.value == "]" { break } if tok.value != "," { return p.errorf("Expected ']' or ',' found %q", tok.value) } } return nil } // One value of the repeated field. p.back() fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) return p.readAny(fv.Index(fv.Len()-1), props) case reflect.Bool: // true/1/t/True or false/f/0/False. switch tok.value { case "true", "1", "t", "True": fv.SetBool(true) return nil case "false", "0", "f", "False": fv.SetBool(false) return nil } case reflect.Float32, reflect.Float64: v := tok.value // Ignore 'f' for compatibility with output generated by C++, but don't // remove 'f' when the value is "-inf" or "inf". if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" { v = v[:len(v)-1] } if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil { fv.SetFloat(f) return nil } case reflect.Int32: if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil { fv.SetInt(x) return nil } if len(props.Enum) == 0 { break } m, ok := enumValueMaps[props.Enum] if !ok { break } x, ok := m[tok.value] if !ok { break } fv.SetInt(int64(x)) return nil case reflect.Int64: if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil { fv.SetInt(x) return nil } case reflect.Ptr: // A basic field (indirected through pointer), or a repeated message/group p.back() fv.Set(reflect.New(fv.Type().Elem())) return p.readAny(fv.Elem(), props) case reflect.String: if tok.value[0] == '"' || tok.value[0] == '\'' { fv.SetString(tok.unquoted) return nil } case reflect.Struct: var terminator string switch tok.value { case "{": terminator = "}" case "<": terminator = ">" default: return p.errorf("expected '{' or '<', found %q", tok.value) } // TODO: Handle nested messages which implement encoding.TextUnmarshaler. return p.readStruct(fv, terminator) case reflect.Uint32: if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { fv.SetUint(uint64(x)) return nil } case reflect.Uint64: if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil { fv.SetUint(x) return nil } } return p.errorf("invalid %v: %v", v.Type(), tok.value) } // UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb // before starting to unmarshal, so any existing data in pb is always removed. // If a required field is not set and no other error occurs, // UnmarshalText returns *RequiredNotSetError. func UnmarshalText(s string, pb Message) error { if um, ok := pb.(encoding.TextUnmarshaler); ok { err := um.UnmarshalText([]byte(s)) return err } pb.Reset() v := reflect.ValueOf(pb) if pe := newTextParser(s).readStruct(v.Elem(), ""); pe != nil { return pe } return nil } ptypes/000077500000000000000000000000001324746544700323345ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/github.com/golang/protobufany.go000066400000000000000000000112131324746544700334500ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/github.com/golang/protobuf/ptypes// Go support for Protocol Buffers - Google's data interchange format // // Copyright 2016 The Go Authors. All rights reserved. // https://github.com/golang/protobuf // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. package ptypes // This file implements functions to marshal proto.Message to/from // google.protobuf.Any message. import ( "fmt" "reflect" "strings" "github.com/golang/protobuf/proto" "github.com/golang/protobuf/ptypes/any" ) const googleApis = "type.googleapis.com/" // AnyMessageName returns the name of the message contained in a google.protobuf.Any message. // // Note that regular type assertions should be done using the Is // function. AnyMessageName is provided for less common use cases like filtering a // sequence of Any messages based on a set of allowed message type names. func AnyMessageName(any *any.Any) (string, error) { if any == nil { return "", fmt.Errorf("message is nil") } slash := strings.LastIndex(any.TypeUrl, "/") if slash < 0 { return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl) } return any.TypeUrl[slash+1:], nil } // MarshalAny takes the protocol buffer and encodes it into google.protobuf.Any. func MarshalAny(pb proto.Message) (*any.Any, error) { value, err := proto.Marshal(pb) if err != nil { return nil, err } return &any.Any{TypeUrl: googleApis + proto.MessageName(pb), Value: value}, nil } // DynamicAny is a value that can be passed to UnmarshalAny to automatically // allocate a proto.Message for the type specified in a google.protobuf.Any // message. The allocated message is stored in the embedded proto.Message. // // Example: // // var x ptypes.DynamicAny // if err := ptypes.UnmarshalAny(a, &x); err != nil { ... } // fmt.Printf("unmarshaled message: %v", x.Message) type DynamicAny struct { proto.Message } // Empty returns a new proto.Message of the type specified in a // google.protobuf.Any message. It returns an error if corresponding message // type isn't linked in. func Empty(any *any.Any) (proto.Message, error) { aname, err := AnyMessageName(any) if err != nil { return nil, err } t := proto.MessageType(aname) if t == nil { return nil, fmt.Errorf("any: message type %q isn't linked in", aname) } return reflect.New(t.Elem()).Interface().(proto.Message), nil } // UnmarshalAny parses the protocol buffer representation in a google.protobuf.Any // message and places the decoded result in pb. It returns an error if type of // contents of Any message does not match type of pb message. // // pb can be a proto.Message, or a *DynamicAny. func UnmarshalAny(any *any.Any, pb proto.Message) error { if d, ok := pb.(*DynamicAny); ok { if d.Message == nil { var err error d.Message, err = Empty(any) if err != nil { return err } } return UnmarshalAny(any, d.Message) } aname, err := AnyMessageName(any) if err != nil { return err } mname := proto.MessageName(pb) if aname != mname { return fmt.Errorf("mismatched message type: got %q want %q", aname, mname) } return proto.Unmarshal(any.Value, pb) } // Is returns true if any value contains a given message type. func Is(any *any.Any, pb proto.Message) bool { aname, err := AnyMessageName(any) if err != nil { return false } return aname == proto.MessageName(pb) } any/000077500000000000000000000000001324746544700331235ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/github.com/golang/protobuf/ptypesany.pb.go000066400000000000000000000134321324746544700346440ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/github.com/golang/protobuf/ptypes/any// Code generated by protoc-gen-go. // source: github.com/golang/protobuf/ptypes/any/any.proto // DO NOT EDIT! /* Package any is a generated protocol buffer package. It is generated from these files: github.com/golang/protobuf/ptypes/any/any.proto It has these top-level messages: Any */ package any import proto "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package // `Any` contains an arbitrary serialized protocol buffer message along with a // URL that describes the type of the serialized message. // // Protobuf library provides support to pack/unpack Any values in the form // of utility functions or additional generated methods of the Any type. // // Example 1: Pack and unpack a message in C++. // // Foo foo = ...; // Any any; // any.PackFrom(foo); // ... // if (any.UnpackTo(&foo)) { // ... // } // // Example 2: Pack and unpack a message in Java. // // Foo foo = ...; // Any any = Any.pack(foo); // ... // if (any.is(Foo.class)) { // foo = any.unpack(Foo.class); // } // // Example 3: Pack and unpack a message in Python. // // foo = Foo(...) // any = Any() // any.Pack(foo) // ... // if any.Is(Foo.DESCRIPTOR): // any.Unpack(foo) // ... // // The pack methods provided by protobuf library will by default use // 'type.googleapis.com/full.type.name' as the type URL and the unpack // methods only use the fully qualified type name after the last '/' // in the type URL, for example "foo.bar.com/x/y.z" will yield type // name "y.z". // // // JSON // ==== // The JSON representation of an `Any` value uses the regular // representation of the deserialized, embedded message, with an // additional field `@type` which contains the type URL. Example: // // package google.profile; // message Person { // string first_name = 1; // string last_name = 2; // } // // { // "@type": "type.googleapis.com/google.profile.Person", // "firstName": , // "lastName": // } // // If the embedded message type is well-known and has a custom JSON // representation, that representation will be embedded adding a field // `value` which holds the custom JSON in addition to the `@type` // field. Example (for message [google.protobuf.Duration][]): // // { // "@type": "type.googleapis.com/google.protobuf.Duration", // "value": "1.212s" // } // type Any struct { // A URL/resource name whose content describes the type of the // serialized protocol buffer message. // // For URLs which use the scheme `http`, `https`, or no scheme, the // following restrictions and interpretations apply: // // * If no scheme is provided, `https` is assumed. // * The last segment of the URL's path must represent the fully // qualified name of the type (as in `path/google.protobuf.Duration`). // The name should be in a canonical form (e.g., leading "." is // not accepted). // * An HTTP GET on the URL must yield a [google.protobuf.Type][] // value in binary format, or produce an error. // * Applications are allowed to cache lookup results based on the // URL, or have them precompiled into a binary to avoid any // lookup. Therefore, binary compatibility needs to be preserved // on changes to types. (Use versioned type names to manage // breaking changes.) // // Schemes other than `http`, `https` (or the empty scheme) might be // used with implementation specific semantics. // TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl" json:"type_url,omitempty"` // Must be a valid serialized protocol buffer of the above specified type. Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` } func (m *Any) Reset() { *m = Any{} } func (m *Any) String() string { return proto.CompactTextString(m) } func (*Any) ProtoMessage() {} func (*Any) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } func (*Any) XXX_WellKnownType() string { return "Any" } func init() { proto.RegisterType((*Any)(nil), "google.protobuf.Any") } func init() { proto.RegisterFile("github.com/golang/protobuf/ptypes/any/any.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ // 187 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xd2, 0x4f, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0xd7, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x28, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x4f, 0xcc, 0xab, 0x04, 0x61, 0x3d, 0xb0, 0xb8, 0x10, 0x7f, 0x7a, 0x7e, 0x7e, 0x7a, 0x4e, 0xaa, 0x1e, 0x4c, 0x95, 0x92, 0x19, 0x17, 0xb3, 0x63, 0x5e, 0xa5, 0x90, 0x24, 0x17, 0x07, 0x48, 0x79, 0x7c, 0x69, 0x51, 0x8e, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x67, 0x10, 0x3b, 0x88, 0x1f, 0x5a, 0x94, 0x23, 0x24, 0xc2, 0xc5, 0x5a, 0x96, 0x98, 0x53, 0x9a, 0x2a, 0xc1, 0xa4, 0xc0, 0xa8, 0xc1, 0x13, 0x04, 0xe1, 0x38, 0x15, 0x71, 0x09, 0x27, 0xe7, 0xe7, 0xea, 0xa1, 0x19, 0xe7, 0xc4, 0xe1, 0x98, 0x57, 0x19, 0x00, 0xe2, 0x04, 0x30, 0x46, 0xa9, 0x12, 0xe5, 0xb8, 0x05, 0x8c, 0x8c, 0x8b, 0x98, 0x98, 0xdd, 0x03, 0x9c, 0x56, 0x31, 0xc9, 0xb9, 0x43, 0x4c, 0x0b, 0x80, 0xaa, 0xd2, 0x0b, 0x4f, 0xcd, 0xc9, 0xf1, 0xce, 0xcb, 0x2f, 0xcf, 0x0b, 0x01, 0xa9, 0x4e, 0x62, 0x03, 0x6b, 0x37, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0xc6, 0x4d, 0x03, 0x23, 0xf6, 0x00, 0x00, 0x00, } any.proto000066400000000000000000000122411324746544700347770ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/github.com/golang/protobuf/ptypes/any// Protocol Buffers - Google's data interchange format // Copyright 2008 Google Inc. All rights reserved. // https://developers.google.com/protocol-buffers/ // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. syntax = "proto3"; package google.protobuf; option csharp_namespace = "Google.Protobuf.WellKnownTypes"; option go_package = "github.com/golang/protobuf/ptypes/any"; option java_package = "com.google.protobuf"; option java_outer_classname = "AnyProto"; option java_multiple_files = true; option java_generate_equals_and_hash = true; option objc_class_prefix = "GPB"; // `Any` contains an arbitrary serialized protocol buffer message along with a // URL that describes the type of the serialized message. // // Protobuf library provides support to pack/unpack Any values in the form // of utility functions or additional generated methods of the Any type. // // Example 1: Pack and unpack a message in C++. // // Foo foo = ...; // Any any; // any.PackFrom(foo); // ... // if (any.UnpackTo(&foo)) { // ... // } // // Example 2: Pack and unpack a message in Java. // // Foo foo = ...; // Any any = Any.pack(foo); // ... // if (any.is(Foo.class)) { // foo = any.unpack(Foo.class); // } // // Example 3: Pack and unpack a message in Python. // // foo = Foo(...) // any = Any() // any.Pack(foo) // ... // if any.Is(Foo.DESCRIPTOR): // any.Unpack(foo) // ... // // The pack methods provided by protobuf library will by default use // 'type.googleapis.com/full.type.name' as the type URL and the unpack // methods only use the fully qualified type name after the last '/' // in the type URL, for example "foo.bar.com/x/y.z" will yield type // name "y.z". // // // JSON // ==== // The JSON representation of an `Any` value uses the regular // representation of the deserialized, embedded message, with an // additional field `@type` which contains the type URL. Example: // // package google.profile; // message Person { // string first_name = 1; // string last_name = 2; // } // // { // "@type": "type.googleapis.com/google.profile.Person", // "firstName": , // "lastName": // } // // If the embedded message type is well-known and has a custom JSON // representation, that representation will be embedded adding a field // `value` which holds the custom JSON in addition to the `@type` // field. Example (for message [google.protobuf.Duration][]): // // { // "@type": "type.googleapis.com/google.protobuf.Duration", // "value": "1.212s" // } // message Any { // A URL/resource name whose content describes the type of the // serialized protocol buffer message. // // For URLs which use the scheme `http`, `https`, or no scheme, the // following restrictions and interpretations apply: // // * If no scheme is provided, `https` is assumed. // * The last segment of the URL's path must represent the fully // qualified name of the type (as in `path/google.protobuf.Duration`). // The name should be in a canonical form (e.g., leading "." is // not accepted). // * An HTTP GET on the URL must yield a [google.protobuf.Type][] // value in binary format, or produce an error. // * Applications are allowed to cache lookup results based on the // URL, or have them precompiled into a binary to avoid any // lookup. Therefore, binary compatibility needs to be preserved // on changes to types. (Use versioned type names to manage // breaking changes.) // // Schemes other than `http`, `https` (or the empty scheme) might be // used with implementation specific semantics. // string type_url = 1; // Must be a valid serialized protocol buffer of the above specified type. bytes value = 2; } doc.go000066400000000000000000000033361324746544700334350ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/github.com/golang/protobuf/ptypes// Go support for Protocol Buffers - Google's data interchange format // // Copyright 2016 The Go Authors. All rights reserved. // https://github.com/golang/protobuf // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. /* Package ptypes contains code for interacting with well-known types. */ package ptypes duration.go000066400000000000000000000074431324746544700345200ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/github.com/golang/protobuf/ptypes// Go support for Protocol Buffers - Google's data interchange format // // Copyright 2016 The Go Authors. All rights reserved. // https://github.com/golang/protobuf // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. package ptypes // This file implements conversions between google.protobuf.Duration // and time.Duration. import ( "errors" "fmt" "time" durpb "github.com/golang/protobuf/ptypes/duration" ) const ( // Range of a durpb.Duration in seconds, as specified in // google/protobuf/duration.proto. This is about 10,000 years in seconds. maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60) minSeconds = -maxSeconds ) // validateDuration determines whether the durpb.Duration is valid according to the // definition in google/protobuf/duration.proto. A valid durpb.Duration // may still be too large to fit into a time.Duration (the range of durpb.Duration // is about 10,000 years, and the range of time.Duration is about 290). func validateDuration(d *durpb.Duration) error { if d == nil { return errors.New("duration: nil Duration") } if d.Seconds < minSeconds || d.Seconds > maxSeconds { return fmt.Errorf("duration: %v: seconds out of range", d) } if d.Nanos <= -1e9 || d.Nanos >= 1e9 { return fmt.Errorf("duration: %v: nanos out of range", d) } // Seconds and Nanos must have the same sign, unless d.Nanos is zero. if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) { return fmt.Errorf("duration: %v: seconds and nanos have different signs", d) } return nil } // Duration converts a durpb.Duration to a time.Duration. Duration // returns an error if the durpb.Duration is invalid or is too large to be // represented in a time.Duration. func Duration(p *durpb.Duration) (time.Duration, error) { if err := validateDuration(p); err != nil { return 0, err } d := time.Duration(p.Seconds) * time.Second if int64(d/time.Second) != p.Seconds { return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p) } if p.Nanos != 0 { d += time.Duration(p.Nanos) if (d < 0) != (p.Nanos < 0) { return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p) } } return d, nil } // DurationProto converts a time.Duration to a durpb.Duration. func DurationProto(d time.Duration) *durpb.Duration { nanos := d.Nanoseconds() secs := nanos / 1e9 nanos -= secs * 1e9 return &durpb.Duration{ Seconds: secs, Nanos: int32(nanos), } } duration/000077500000000000000000000000001324746544700341615ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/github.com/golang/protobuf/ptypesduration.pb.go000066400000000000000000000131271324746544700367410ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/github.com/golang/protobuf/ptypes/duration// Code generated by protoc-gen-go. DO NOT EDIT. // source: google/protobuf/duration.proto /* Package duration is a generated protocol buffer package. It is generated from these files: google/protobuf/duration.proto It has these top-level messages: Duration */ package duration import proto "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package // A Duration represents a signed, fixed-length span of time represented // as a count of seconds and fractions of seconds at nanosecond // resolution. It is independent of any calendar and concepts like "day" // or "month". It is related to Timestamp in that the difference between // two Timestamp values is a Duration and it can be added or subtracted // from a Timestamp. Range is approximately +-10,000 years. // // # Examples // // Example 1: Compute Duration from two Timestamps in pseudo code. // // Timestamp start = ...; // Timestamp end = ...; // Duration duration = ...; // // duration.seconds = end.seconds - start.seconds; // duration.nanos = end.nanos - start.nanos; // // if (duration.seconds < 0 && duration.nanos > 0) { // duration.seconds += 1; // duration.nanos -= 1000000000; // } else if (durations.seconds > 0 && duration.nanos < 0) { // duration.seconds -= 1; // duration.nanos += 1000000000; // } // // Example 2: Compute Timestamp from Timestamp + Duration in pseudo code. // // Timestamp start = ...; // Duration duration = ...; // Timestamp end = ...; // // end.seconds = start.seconds + duration.seconds; // end.nanos = start.nanos + duration.nanos; // // if (end.nanos < 0) { // end.seconds -= 1; // end.nanos += 1000000000; // } else if (end.nanos >= 1000000000) { // end.seconds += 1; // end.nanos -= 1000000000; // } // // Example 3: Compute Duration from datetime.timedelta in Python. // // td = datetime.timedelta(days=3, minutes=10) // duration = Duration() // duration.FromTimedelta(td) // // # JSON Mapping // // In JSON format, the Duration type is encoded as a string rather than an // object, where the string ends in the suffix "s" (indicating seconds) and // is preceded by the number of seconds, with nanoseconds expressed as // fractional seconds. For example, 3 seconds with 0 nanoseconds should be // encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should // be expressed in JSON format as "3.000000001s", and 3 seconds and 1 // microsecond should be expressed in JSON format as "3.000001s". // // type Duration struct { // Signed seconds of the span of time. Must be from -315,576,000,000 // to +315,576,000,000 inclusive. Note: these bounds are computed from: // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"` // Signed fractions of a second at nanosecond resolution of the span // of time. Durations less than one second are represented with a 0 // `seconds` field and a positive or negative `nanos` field. For durations // of one second or more, a non-zero value for the `nanos` field must be // of the same sign as the `seconds` field. Must be from -999,999,999 // to +999,999,999 inclusive. Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"` } func (m *Duration) Reset() { *m = Duration{} } func (m *Duration) String() string { return proto.CompactTextString(m) } func (*Duration) ProtoMessage() {} func (*Duration) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } func (*Duration) XXX_WellKnownType() string { return "Duration" } func (m *Duration) GetSeconds() int64 { if m != nil { return m.Seconds } return 0 } func (m *Duration) GetNanos() int32 { if m != nil { return m.Nanos } return 0 } func init() { proto.RegisterType((*Duration)(nil), "google.protobuf.Duration") } func init() { proto.RegisterFile("google/protobuf/duration.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ // 190 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f, 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0x29, 0x2d, 0x4a, 0x2c, 0xc9, 0xcc, 0xcf, 0xd3, 0x03, 0x8b, 0x08, 0xf1, 0x43, 0xe4, 0xf5, 0x60, 0xf2, 0x4a, 0x56, 0x5c, 0x1c, 0x2e, 0x50, 0x25, 0x42, 0x12, 0x5c, 0xec, 0xc5, 0xa9, 0xc9, 0xf9, 0x79, 0x29, 0xc5, 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0xcc, 0x41, 0x30, 0xae, 0x90, 0x08, 0x17, 0x6b, 0x5e, 0x62, 0x5e, 0x7e, 0xb1, 0x04, 0x93, 0x02, 0xa3, 0x06, 0x6b, 0x10, 0x84, 0xe3, 0x54, 0xc3, 0x25, 0x9c, 0x9c, 0x9f, 0xab, 0x87, 0x66, 0xa4, 0x13, 0x2f, 0xcc, 0xc0, 0x00, 0x90, 0x48, 0x00, 0x63, 0x94, 0x56, 0x7a, 0x66, 0x49, 0x46, 0x69, 0x92, 0x5e, 0x72, 0x7e, 0xae, 0x7e, 0x7a, 0x7e, 0x4e, 0x62, 0x5e, 0x3a, 0xc2, 0x7d, 0x05, 0x25, 0x95, 0x05, 0xa9, 0xc5, 0x70, 0x67, 0xfe, 0x60, 0x64, 0x5c, 0xc4, 0xc4, 0xec, 0x1e, 0xe0, 0xb4, 0x8a, 0x49, 0xce, 0x1d, 0x62, 0x6e, 0x00, 0x54, 0xa9, 0x5e, 0x78, 0x6a, 0x4e, 0x8e, 0x77, 0x5e, 0x7e, 0x79, 0x5e, 0x08, 0x48, 0x4b, 0x12, 0x1b, 0xd8, 0x0c, 0x63, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0xdc, 0x84, 0x30, 0xff, 0xf3, 0x00, 0x00, 0x00, } duration.proto000066400000000000000000000114321324746544700370740ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/github.com/golang/protobuf/ptypes/duration// Protocol Buffers - Google's data interchange format // Copyright 2008 Google Inc. All rights reserved. // https://developers.google.com/protocol-buffers/ // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. syntax = "proto3"; package google.protobuf; option csharp_namespace = "Google.Protobuf.WellKnownTypes"; option cc_enable_arenas = true; option go_package = "github.com/golang/protobuf/ptypes/duration"; option java_package = "com.google.protobuf"; option java_outer_classname = "DurationProto"; option java_multiple_files = true; option objc_class_prefix = "GPB"; // A Duration represents a signed, fixed-length span of time represented // as a count of seconds and fractions of seconds at nanosecond // resolution. It is independent of any calendar and concepts like "day" // or "month". It is related to Timestamp in that the difference between // two Timestamp values is a Duration and it can be added or subtracted // from a Timestamp. Range is approximately +-10,000 years. // // # Examples // // Example 1: Compute Duration from two Timestamps in pseudo code. // // Timestamp start = ...; // Timestamp end = ...; // Duration duration = ...; // // duration.seconds = end.seconds - start.seconds; // duration.nanos = end.nanos - start.nanos; // // if (duration.seconds < 0 && duration.nanos > 0) { // duration.seconds += 1; // duration.nanos -= 1000000000; // } else if (durations.seconds > 0 && duration.nanos < 0) { // duration.seconds -= 1; // duration.nanos += 1000000000; // } // // Example 2: Compute Timestamp from Timestamp + Duration in pseudo code. // // Timestamp start = ...; // Duration duration = ...; // Timestamp end = ...; // // end.seconds = start.seconds + duration.seconds; // end.nanos = start.nanos + duration.nanos; // // if (end.nanos < 0) { // end.seconds -= 1; // end.nanos += 1000000000; // } else if (end.nanos >= 1000000000) { // end.seconds += 1; // end.nanos -= 1000000000; // } // // Example 3: Compute Duration from datetime.timedelta in Python. // // td = datetime.timedelta(days=3, minutes=10) // duration = Duration() // duration.FromTimedelta(td) // // # JSON Mapping // // In JSON format, the Duration type is encoded as a string rather than an // object, where the string ends in the suffix "s" (indicating seconds) and // is preceded by the number of seconds, with nanoseconds expressed as // fractional seconds. For example, 3 seconds with 0 nanoseconds should be // encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should // be expressed in JSON format as "3.000000001s", and 3 seconds and 1 // microsecond should be expressed in JSON format as "3.000001s". // // message Duration { // Signed seconds of the span of time. Must be from -315,576,000,000 // to +315,576,000,000 inclusive. Note: these bounds are computed from: // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years int64 seconds = 1; // Signed fractions of a second at nanosecond resolution of the span // of time. Durations less than one second are represented with a 0 // `seconds` field and a positive or negative `nanos` field. For durations // of one second or more, a non-zero value for the `nanos` field must be // of the same sign as the `seconds` field. Must be from -999,999,999 // to +999,999,999 inclusive. int32 nanos = 2; } regen.sh000077500000000000000000000022261324746544700337750ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/github.com/golang/protobuf/ptypes#!/bin/bash -e # # This script fetches and rebuilds the "well-known types" protocol buffers. # To run this you will need protoc and goprotobuf installed; # see https://github.com/golang/protobuf for instructions. # You also need Go and Git installed. PKG=github.com/golang/protobuf/ptypes UPSTREAM=https://github.com/google/protobuf UPSTREAM_SUBDIR=src/google/protobuf PROTO_FILES=(any duration empty struct timestamp wrappers) function die() { echo 1>&2 $* exit 1 } # Sanity check that the right tools are accessible. for tool in go git protoc protoc-gen-go; do q=$(which $tool) || die "didn't find $tool" echo 1>&2 "$tool: $q" done tmpdir=$(mktemp -d -t regen-wkt.XXXXXX) trap 'rm -rf $tmpdir' EXIT echo -n 1>&2 "finding package dir... " pkgdir=$(go list -f '{{.Dir}}' $PKG) echo 1>&2 $pkgdir base=$(echo $pkgdir | sed "s,/$PKG\$,,") echo 1>&2 "base: $base" cd "$base" echo 1>&2 "fetching latest protos... " git clone -q $UPSTREAM $tmpdir for file in ${PROTO_FILES[@]}; do echo 1>&2 "* $file" protoc --go_out=. -I$tmpdir/src $tmpdir/src/google/protobuf/$file.proto || die cp $tmpdir/src/google/protobuf/$file.proto $PKG/$file done echo 1>&2 "All OK" timestamp.go000066400000000000000000000114601324746544700346700ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/github.com/golang/protobuf/ptypes// Go support for Protocol Buffers - Google's data interchange format // // Copyright 2016 The Go Authors. All rights reserved. // https://github.com/golang/protobuf // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. package ptypes // This file implements operations on google.protobuf.Timestamp. import ( "errors" "fmt" "time" tspb "github.com/golang/protobuf/ptypes/timestamp" ) const ( // Seconds field of the earliest valid Timestamp. // This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). minValidSeconds = -62135596800 // Seconds field just after the latest valid Timestamp. // This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). maxValidSeconds = 253402300800 ) // validateTimestamp determines whether a Timestamp is valid. // A valid timestamp represents a time in the range // [0001-01-01, 10000-01-01) and has a Nanos field // in the range [0, 1e9). // // If the Timestamp is valid, validateTimestamp returns nil. // Otherwise, it returns an error that describes // the problem. // // Every valid Timestamp can be represented by a time.Time, but the converse is not true. func validateTimestamp(ts *tspb.Timestamp) error { if ts == nil { return errors.New("timestamp: nil Timestamp") } if ts.Seconds < minValidSeconds { return fmt.Errorf("timestamp: %v before 0001-01-01", ts) } if ts.Seconds >= maxValidSeconds { return fmt.Errorf("timestamp: %v after 10000-01-01", ts) } if ts.Nanos < 0 || ts.Nanos >= 1e9 { return fmt.Errorf("timestamp: %v: nanos not in range [0, 1e9)", ts) } return nil } // Timestamp converts a google.protobuf.Timestamp proto to a time.Time. // It returns an error if the argument is invalid. // // Unlike most Go functions, if Timestamp returns an error, the first return value // is not the zero time.Time. Instead, it is the value obtained from the // time.Unix function when passed the contents of the Timestamp, in the UTC // locale. This may or may not be a meaningful time; many invalid Timestamps // do map to valid time.Times. // // A nil Timestamp returns an error. The first return value in that case is // undefined. func Timestamp(ts *tspb.Timestamp) (time.Time, error) { // Don't return the zero value on error, because corresponds to a valid // timestamp. Instead return whatever time.Unix gives us. var t time.Time if ts == nil { t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp } else { t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC() } return t, validateTimestamp(ts) } // TimestampNow returns a google.protobuf.Timestamp for the current time. func TimestampNow() *tspb.Timestamp { ts, err := TimestampProto(time.Now()) if err != nil { panic("ptypes: time.Now() out of Timestamp range") } return ts } // TimestampProto converts the time.Time to a google.protobuf.Timestamp proto. // It returns an error if the resulting Timestamp is invalid. func TimestampProto(t time.Time) (*tspb.Timestamp, error) { seconds := t.Unix() nanos := int32(t.Sub(time.Unix(seconds, 0))) ts := &tspb.Timestamp{ Seconds: seconds, Nanos: nanos, } if err := validateTimestamp(ts); err != nil { return nil, err } return ts, nil } // TimestampString returns the RFC 3339 string for valid Timestamps. For invalid // Timestamps, it returns an error message in parentheses. func TimestampString(ts *tspb.Timestamp) string { t, err := Timestamp(ts) if err != nil { return fmt.Sprintf("(%v)", err) } return t.Format(time.RFC3339Nano) } timestamp/000077500000000000000000000000001324746544700343375ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/github.com/golang/protobuf/ptypestimestamp.pb.go000066400000000000000000000124541324746544700372770ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/github.com/golang/protobuf/ptypes/timestamp// Code generated by protoc-gen-go. // source: github.com/golang/protobuf/ptypes/timestamp/timestamp.proto // DO NOT EDIT! /* Package timestamp is a generated protocol buffer package. It is generated from these files: github.com/golang/protobuf/ptypes/timestamp/timestamp.proto It has these top-level messages: Timestamp */ package timestamp import proto "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package // A Timestamp represents a point in time independent of any time zone // or calendar, represented as seconds and fractions of seconds at // nanosecond resolution in UTC Epoch time. It is encoded using the // Proleptic Gregorian Calendar which extends the Gregorian calendar // backwards to year one. It is encoded assuming all minutes are 60 // seconds long, i.e. leap seconds are "smeared" so that no leap second // table is needed for interpretation. Range is from // 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. // By restricting to that range, we ensure that we can convert to // and from RFC 3339 date strings. // See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt). // // Example 1: Compute Timestamp from POSIX `time()`. // // Timestamp timestamp; // timestamp.set_seconds(time(NULL)); // timestamp.set_nanos(0); // // Example 2: Compute Timestamp from POSIX `gettimeofday()`. // // struct timeval tv; // gettimeofday(&tv, NULL); // // Timestamp timestamp; // timestamp.set_seconds(tv.tv_sec); // timestamp.set_nanos(tv.tv_usec * 1000); // // Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`. // // FILETIME ft; // GetSystemTimeAsFileTime(&ft); // UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; // // // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z // // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. // Timestamp timestamp; // timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); // timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); // // Example 4: Compute Timestamp from Java `System.currentTimeMillis()`. // // long millis = System.currentTimeMillis(); // // Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) // .setNanos((int) ((millis % 1000) * 1000000)).build(); // // // Example 5: Compute Timestamp from current time in Python. // // now = time.time() // seconds = int(now) // nanos = int((now - seconds) * 10**9) // timestamp = Timestamp(seconds=seconds, nanos=nanos) // // type Timestamp struct { // Represents seconds of UTC time since Unix epoch // 1970-01-01T00:00:00Z. Must be from from 0001-01-01T00:00:00Z to // 9999-12-31T23:59:59Z inclusive. Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"` // Non-negative fractions of a second at nanosecond resolution. Negative // second values with fractions must still have non-negative nanos values // that count forward in time. Must be from 0 to 999,999,999 // inclusive. Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"` } func (m *Timestamp) Reset() { *m = Timestamp{} } func (m *Timestamp) String() string { return proto.CompactTextString(m) } func (*Timestamp) ProtoMessage() {} func (*Timestamp) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } func (*Timestamp) XXX_WellKnownType() string { return "Timestamp" } func init() { proto.RegisterType((*Timestamp)(nil), "google.protobuf.Timestamp") } func init() { proto.RegisterFile("github.com/golang/protobuf/ptypes/timestamp/timestamp.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ // 194 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xb2, 0x4e, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0xd7, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x28, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x2f, 0xc9, 0xcc, 0x4d, 0x2d, 0x2e, 0x49, 0xcc, 0x2d, 0x40, 0xb0, 0xf4, 0xc0, 0x6a, 0x84, 0xf8, 0xd3, 0xf3, 0xf3, 0xd3, 0x73, 0x52, 0xf5, 0x60, 0x3a, 0x94, 0xac, 0xb9, 0x38, 0x43, 0x60, 0x6a, 0x84, 0x24, 0xb8, 0xd8, 0x8b, 0x53, 0x93, 0xf3, 0xf3, 0x52, 0x8a, 0x25, 0x18, 0x15, 0x18, 0x35, 0x98, 0x83, 0x60, 0x5c, 0x21, 0x11, 0x2e, 0xd6, 0xbc, 0xc4, 0xbc, 0xfc, 0x62, 0x09, 0x26, 0x05, 0x46, 0x0d, 0xd6, 0x20, 0x08, 0xc7, 0xa9, 0x91, 0x91, 0x4b, 0x38, 0x39, 0x3f, 0x57, 0x0f, 0xcd, 0x50, 0x27, 0x3e, 0xb8, 0x91, 0x01, 0x20, 0xa1, 0x00, 0xc6, 0x28, 0x6d, 0x12, 0x1c, 0xbd, 0x80, 0x91, 0xf1, 0x07, 0x23, 0xe3, 0x22, 0x26, 0x66, 0xf7, 0x00, 0xa7, 0x55, 0x4c, 0x72, 0xee, 0x10, 0xc3, 0x03, 0xa0, 0xca, 0xf5, 0xc2, 0x53, 0x73, 0x72, 0xbc, 0xf3, 0xf2, 0xcb, 0xf3, 0x42, 0x40, 0xda, 0x92, 0xd8, 0xc0, 0xe6, 0x18, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0x17, 0x5f, 0xb7, 0xdc, 0x17, 0x01, 0x00, 0x00, } timestamp.proto000066400000000000000000000111141324746544700374250ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/github.com/golang/protobuf/ptypes/timestamp// Protocol Buffers - Google's data interchange format // Copyright 2008 Google Inc. All rights reserved. // https://developers.google.com/protocol-buffers/ // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. syntax = "proto3"; package google.protobuf; option csharp_namespace = "Google.Protobuf.WellKnownTypes"; option cc_enable_arenas = true; option go_package = "github.com/golang/protobuf/ptypes/timestamp"; option java_package = "com.google.protobuf"; option java_outer_classname = "TimestampProto"; option java_multiple_files = true; option java_generate_equals_and_hash = true; option objc_class_prefix = "GPB"; // A Timestamp represents a point in time independent of any time zone // or calendar, represented as seconds and fractions of seconds at // nanosecond resolution in UTC Epoch time. It is encoded using the // Proleptic Gregorian Calendar which extends the Gregorian calendar // backwards to year one. It is encoded assuming all minutes are 60 // seconds long, i.e. leap seconds are "smeared" so that no leap second // table is needed for interpretation. Range is from // 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. // By restricting to that range, we ensure that we can convert to // and from RFC 3339 date strings. // See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt). // // Example 1: Compute Timestamp from POSIX `time()`. // // Timestamp timestamp; // timestamp.set_seconds(time(NULL)); // timestamp.set_nanos(0); // // Example 2: Compute Timestamp from POSIX `gettimeofday()`. // // struct timeval tv; // gettimeofday(&tv, NULL); // // Timestamp timestamp; // timestamp.set_seconds(tv.tv_sec); // timestamp.set_nanos(tv.tv_usec * 1000); // // Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`. // // FILETIME ft; // GetSystemTimeAsFileTime(&ft); // UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; // // // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z // // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. // Timestamp timestamp; // timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); // timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); // // Example 4: Compute Timestamp from Java `System.currentTimeMillis()`. // // long millis = System.currentTimeMillis(); // // Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) // .setNanos((int) ((millis % 1000) * 1000000)).build(); // // // Example 5: Compute Timestamp from current time in Python. // // now = time.time() // seconds = int(now) // nanos = int((now - seconds) * 10**9) // timestamp = Timestamp(seconds=seconds, nanos=nanos) // // message Timestamp { // Represents seconds of UTC time since Unix epoch // 1970-01-01T00:00:00Z. Must be from from 0001-01-01T00:00:00Z to // 9999-12-31T23:59:59Z inclusive. int64 seconds = 1; // Non-negative fractions of a second at nanosecond resolution. Negative // second values with fractions must still have non-negative nanos values // that count forward in time. Must be from 0 to 999,999,999 // inclusive. int32 nanos = 2; } gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/gitlab.com/000077500000000000000000000000001324746544700257405ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/gitlab.com/gitlab-org/000077500000000000000000000000001324746544700277675ustar00rootroot00000000000000gitaly-proto/000077500000000000000000000000001324746544700323425ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/gitlab.com/gitlab-orgLICENSE000066400000000000000000000020731324746544700333510ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/gitlab.com/gitlab-org/gitaly-protoThe MIT License (MIT) Copyright (c) 2016-2017 GitLab B.V. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. go/000077500000000000000000000000001324746544700327475ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/gitlab.com/gitlab-org/gitaly-protoREADME.md000066400000000000000000000002341324746544700342250ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/gitlab.com/gitlab-org/gitaly-proto/go# Auto-generated Go gRPC bindings for gitaly This Go package is used both by the Gitaly server itself and by Go Gitaly clients (such as gitlab-workhorse). VERSION000066400000000000000000000000071324746544700340140ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/gitlab.com/gitlab-org/gitaly-proto/go0.75.0 blob.pb.go000066400000000000000000000524621324746544700346250ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/gitlab.com/gitlab-org/gitaly-proto/go// Code generated by protoc-gen-go. DO NOT EDIT. // source: blob.proto /* Package gitaly is a generated protocol buffer package. It is generated from these files: blob.proto commit.proto conflicts.proto deprecated-services.proto diff.proto namespace.proto notifications.proto operations.proto ref.proto remote.proto repository-service.proto shared.proto smarthttp.proto ssh.proto wiki.proto It has these top-level messages: GetBlobRequest GetBlobResponse GetBlobsRequest GetBlobsResponse LFSPointer GetLFSPointersRequest GetLFSPointersResponse CommitStatsRequest CommitStatsResponse CommitIsAncestorRequest CommitIsAncestorResponse TreeEntryRequest TreeEntryResponse CommitsBetweenRequest CommitsBetweenResponse CountCommitsRequest CountCommitsResponse TreeEntry GetTreeEntriesRequest GetTreeEntriesResponse ListFilesRequest ListFilesResponse FindCommitRequest FindCommitResponse ListCommitsByOidRequest ListCommitsByOidResponse FindAllCommitsRequest FindAllCommitsResponse FindCommitsRequest FindCommitsResponse CommitLanguagesRequest CommitLanguagesResponse RawBlameRequest RawBlameResponse LastCommitForPathRequest LastCommitForPathResponse CommitsByMessageRequest CommitsByMessageResponse FilterShasWithSignaturesRequest FilterShasWithSignaturesResponse ExtractCommitSignatureRequest ExtractCommitSignatureResponse ListConflictFilesRequest ConflictFileHeader ConflictFile ListConflictFilesResponse ResolveConflictsRequestHeader ResolveConflictsRequest ResolveConflictsResponse CommitDiffRequest CommitDiffResponse CommitDeltaRequest CommitDelta CommitDeltaResponse CommitPatchRequest CommitPatchResponse RawDiffRequest RawDiffResponse RawPatchRequest RawPatchResponse AddNamespaceRequest RemoveNamespaceRequest RenameNamespaceRequest NamespaceExistsRequest NamespaceExistsResponse AddNamespaceResponse RemoveNamespaceResponse RenameNamespaceResponse PostReceiveRequest PostReceiveResponse UserCreateBranchRequest UserCreateBranchResponse UserDeleteBranchRequest UserDeleteBranchResponse UserDeleteTagRequest UserDeleteTagResponse UserCreateTagRequest UserCreateTagResponse UserMergeBranchRequest UserMergeBranchResponse OperationBranchUpdate UserFFBranchRequest UserFFBranchResponse UserCherryPickRequest UserCherryPickResponse UserRevertRequest UserRevertResponse UserCommitFilesActionHeader UserCommitFilesAction UserCommitFilesRequestHeader UserCommitFilesRequest UserCommitFilesResponse UserRebaseRequest UserRebaseResponse FindDefaultBranchNameRequest FindDefaultBranchNameResponse FindAllBranchNamesRequest FindAllBranchNamesResponse FindAllTagNamesRequest FindAllTagNamesResponse FindRefNameRequest FindRefNameResponse FindLocalBranchesRequest FindLocalBranchesResponse FindLocalBranchResponse FindLocalBranchCommitAuthor FindAllBranchesRequest FindAllBranchesResponse FindAllTagsRequest FindAllTagsResponse RefExistsRequest RefExistsResponse CreateBranchRequest CreateBranchResponse DeleteBranchRequest DeleteBranchResponse FindBranchRequest FindBranchResponse DeleteRefsRequest DeleteRefsResponse ListBranchNamesContainingCommitRequest ListBranchNamesContainingCommitResponse ListTagNamesContainingCommitRequest ListTagNamesContainingCommitResponse AddRemoteRequest AddRemoteResponse RemoveRemoteRequest RemoveRemoteResponse FetchInternalRemoteRequest FetchInternalRemoteResponse UpdateRemoteMirrorRequest UpdateRemoteMirrorResponse RepositoryExistsRequest RepositoryExistsResponse RepositoryIsEmptyRequest RepositoryIsEmptyResponse RepackIncrementalRequest RepackIncrementalResponse RepackFullRequest RepackFullResponse GarbageCollectRequest GarbageCollectResponse RepositorySizeRequest RepositorySizeResponse ApplyGitattributesRequest ApplyGitattributesResponse FetchRemoteRequest FetchRemoteResponse CreateRepositoryRequest CreateRepositoryResponse GetArchiveRequest GetArchiveResponse HasLocalBranchesRequest HasLocalBranchesResponse FetchSourceBranchRequest FetchSourceBranchResponse FsckRequest FsckResponse WriteRefRequest WriteRefResponse FindMergeBaseRequest FindMergeBaseResponse CreateForkRequest CreateForkResponse IsRebaseInProgressRequest IsRebaseInProgressResponse CreateRepositoryFromURLRequest CreateRepositoryFromURLResponse Repository GitCommit CommitAuthor ExitStatus Branch Tag User InfoRefsRequest InfoRefsResponse PostUploadPackRequest PostUploadPackResponse PostReceivePackRequest PostReceivePackResponse SSHUploadPackRequest SSHUploadPackResponse SSHReceivePackRequest SSHReceivePackResponse WikiCommitDetails WikiPageVersion WikiPage WikiGetPageVersionsRequest WikiGetPageVersionsResponse WikiWritePageRequest WikiWritePageResponse WikiUpdatePageRequest WikiUpdatePageResponse WikiDeletePageRequest WikiDeletePageResponse WikiFindPageRequest WikiFindPageResponse WikiFindFileRequest WikiFindFileResponse WikiGetAllPagesRequest WikiGetAllPagesResponse */ package gitaly import proto "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" import ( context "golang.org/x/net/context" grpc "google.golang.org/grpc" ) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package type GetBlobRequest struct { Repository *Repository `protobuf:"bytes,1,opt,name=repository" json:"repository,omitempty"` // Object ID (SHA1) of the blob we want to get Oid string `protobuf:"bytes,2,opt,name=oid" json:"oid,omitempty"` // Maximum number of bytes we want to receive. Use '-1' to get the full blob no matter how big. Limit int64 `protobuf:"varint,3,opt,name=limit" json:"limit,omitempty"` } func (m *GetBlobRequest) Reset() { *m = GetBlobRequest{} } func (m *GetBlobRequest) String() string { return proto.CompactTextString(m) } func (*GetBlobRequest) ProtoMessage() {} func (*GetBlobRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } func (m *GetBlobRequest) GetRepository() *Repository { if m != nil { return m.Repository } return nil } func (m *GetBlobRequest) GetOid() string { if m != nil { return m.Oid } return "" } func (m *GetBlobRequest) GetLimit() int64 { if m != nil { return m.Limit } return 0 } type GetBlobResponse struct { // Blob size; present only in first response message Size int64 `protobuf:"varint,1,opt,name=size" json:"size,omitempty"` // Chunk of blob data Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` // Object ID of the actual blob returned. Empty if no blob was found. Oid string `protobuf:"bytes,3,opt,name=oid" json:"oid,omitempty"` } func (m *GetBlobResponse) Reset() { *m = GetBlobResponse{} } func (m *GetBlobResponse) String() string { return proto.CompactTextString(m) } func (*GetBlobResponse) ProtoMessage() {} func (*GetBlobResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } func (m *GetBlobResponse) GetSize() int64 { if m != nil { return m.Size } return 0 } func (m *GetBlobResponse) GetData() []byte { if m != nil { return m.Data } return nil } func (m *GetBlobResponse) GetOid() string { if m != nil { return m.Oid } return "" } type GetBlobsRequest struct { Repository *Repository `protobuf:"bytes,1,opt,name=repository" json:"repository,omitempty"` // Object IDs (SHA1) of the blobs we want to get Oids []string `protobuf:"bytes,2,rep,name=oids" json:"oids,omitempty"` // Maximum number of bytes we want to receive. Use '-1' to get the full blobs no matter how big. Limit int64 `protobuf:"varint,3,opt,name=limit" json:"limit,omitempty"` } func (m *GetBlobsRequest) Reset() { *m = GetBlobsRequest{} } func (m *GetBlobsRequest) String() string { return proto.CompactTextString(m) } func (*GetBlobsRequest) ProtoMessage() {} func (*GetBlobsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } func (m *GetBlobsRequest) GetRepository() *Repository { if m != nil { return m.Repository } return nil } func (m *GetBlobsRequest) GetOids() []string { if m != nil { return m.Oids } return nil } func (m *GetBlobsRequest) GetLimit() int64 { if m != nil { return m.Limit } return 0 } type GetBlobsResponse struct { // Blob size; present only on the first message per blob Size int64 `protobuf:"varint,1,opt,name=size" json:"size,omitempty"` // Chunk of blob data Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` // Object ID of the current blob. Only present on the first message per blob. Empty if no blob was found. Oid string `protobuf:"bytes,3,opt,name=oid" json:"oid,omitempty"` } func (m *GetBlobsResponse) Reset() { *m = GetBlobsResponse{} } func (m *GetBlobsResponse) String() string { return proto.CompactTextString(m) } func (*GetBlobsResponse) ProtoMessage() {} func (*GetBlobsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } func (m *GetBlobsResponse) GetSize() int64 { if m != nil { return m.Size } return 0 } func (m *GetBlobsResponse) GetData() []byte { if m != nil { return m.Data } return nil } func (m *GetBlobsResponse) GetOid() string { if m != nil { return m.Oid } return "" } type LFSPointer struct { Size int64 `protobuf:"varint,1,opt,name=size" json:"size,omitempty"` Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` Oid string `protobuf:"bytes,3,opt,name=oid" json:"oid,omitempty"` } func (m *LFSPointer) Reset() { *m = LFSPointer{} } func (m *LFSPointer) String() string { return proto.CompactTextString(m) } func (*LFSPointer) ProtoMessage() {} func (*LFSPointer) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } func (m *LFSPointer) GetSize() int64 { if m != nil { return m.Size } return 0 } func (m *LFSPointer) GetData() []byte { if m != nil { return m.Data } return nil } func (m *LFSPointer) GetOid() string { if m != nil { return m.Oid } return "" } type GetLFSPointersRequest struct { Repository *Repository `protobuf:"bytes,1,opt,name=repository" json:"repository,omitempty"` BlobIds []string `protobuf:"bytes,2,rep,name=blob_ids,json=blobIds" json:"blob_ids,omitempty"` } func (m *GetLFSPointersRequest) Reset() { *m = GetLFSPointersRequest{} } func (m *GetLFSPointersRequest) String() string { return proto.CompactTextString(m) } func (*GetLFSPointersRequest) ProtoMessage() {} func (*GetLFSPointersRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } func (m *GetLFSPointersRequest) GetRepository() *Repository { if m != nil { return m.Repository } return nil } func (m *GetLFSPointersRequest) GetBlobIds() []string { if m != nil { return m.BlobIds } return nil } type GetLFSPointersResponse struct { LfsPointers []*LFSPointer `protobuf:"bytes,1,rep,name=lfs_pointers,json=lfsPointers" json:"lfs_pointers,omitempty"` } func (m *GetLFSPointersResponse) Reset() { *m = GetLFSPointersResponse{} } func (m *GetLFSPointersResponse) String() string { return proto.CompactTextString(m) } func (*GetLFSPointersResponse) ProtoMessage() {} func (*GetLFSPointersResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } func (m *GetLFSPointersResponse) GetLfsPointers() []*LFSPointer { if m != nil { return m.LfsPointers } return nil } func init() { proto.RegisterType((*GetBlobRequest)(nil), "gitaly.GetBlobRequest") proto.RegisterType((*GetBlobResponse)(nil), "gitaly.GetBlobResponse") proto.RegisterType((*GetBlobsRequest)(nil), "gitaly.GetBlobsRequest") proto.RegisterType((*GetBlobsResponse)(nil), "gitaly.GetBlobsResponse") proto.RegisterType((*LFSPointer)(nil), "gitaly.LFSPointer") proto.RegisterType((*GetLFSPointersRequest)(nil), "gitaly.GetLFSPointersRequest") proto.RegisterType((*GetLFSPointersResponse)(nil), "gitaly.GetLFSPointersResponse") } // Reference imports to suppress errors if they are not otherwise used. var _ context.Context var _ grpc.ClientConn // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. const _ = grpc.SupportPackageIsVersion4 // Client API for BlobService service type BlobServiceClient interface { // GetBlob returns the contents of a blob object referenced by its object // ID. We use a stream to return a chunked arbitrarily large binary // response GetBlob(ctx context.Context, in *GetBlobRequest, opts ...grpc.CallOption) (BlobService_GetBlobClient, error) // GetBlobsBySHA returns the contents of a blob objects referenced by their object // ID. We use a stream to return a chunked arbitrarily large binary response. // The blobs are sent in a continous stream, the caller is responsible for spliting // them up into multiple blobs by their object IDs. GetBlobs(ctx context.Context, in *GetBlobsRequest, opts ...grpc.CallOption) (BlobService_GetBlobsClient, error) GetLFSPointers(ctx context.Context, in *GetLFSPointersRequest, opts ...grpc.CallOption) (BlobService_GetLFSPointersClient, error) } type blobServiceClient struct { cc *grpc.ClientConn } func NewBlobServiceClient(cc *grpc.ClientConn) BlobServiceClient { return &blobServiceClient{cc} } func (c *blobServiceClient) GetBlob(ctx context.Context, in *GetBlobRequest, opts ...grpc.CallOption) (BlobService_GetBlobClient, error) { stream, err := grpc.NewClientStream(ctx, &_BlobService_serviceDesc.Streams[0], c.cc, "/gitaly.BlobService/GetBlob", opts...) if err != nil { return nil, err } x := &blobServiceGetBlobClient{stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } if err := x.ClientStream.CloseSend(); err != nil { return nil, err } return x, nil } type BlobService_GetBlobClient interface { Recv() (*GetBlobResponse, error) grpc.ClientStream } type blobServiceGetBlobClient struct { grpc.ClientStream } func (x *blobServiceGetBlobClient) Recv() (*GetBlobResponse, error) { m := new(GetBlobResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func (c *blobServiceClient) GetBlobs(ctx context.Context, in *GetBlobsRequest, opts ...grpc.CallOption) (BlobService_GetBlobsClient, error) { stream, err := grpc.NewClientStream(ctx, &_BlobService_serviceDesc.Streams[1], c.cc, "/gitaly.BlobService/GetBlobs", opts...) if err != nil { return nil, err } x := &blobServiceGetBlobsClient{stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } if err := x.ClientStream.CloseSend(); err != nil { return nil, err } return x, nil } type BlobService_GetBlobsClient interface { Recv() (*GetBlobsResponse, error) grpc.ClientStream } type blobServiceGetBlobsClient struct { grpc.ClientStream } func (x *blobServiceGetBlobsClient) Recv() (*GetBlobsResponse, error) { m := new(GetBlobsResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func (c *blobServiceClient) GetLFSPointers(ctx context.Context, in *GetLFSPointersRequest, opts ...grpc.CallOption) (BlobService_GetLFSPointersClient, error) { stream, err := grpc.NewClientStream(ctx, &_BlobService_serviceDesc.Streams[2], c.cc, "/gitaly.BlobService/GetLFSPointers", opts...) if err != nil { return nil, err } x := &blobServiceGetLFSPointersClient{stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } if err := x.ClientStream.CloseSend(); err != nil { return nil, err } return x, nil } type BlobService_GetLFSPointersClient interface { Recv() (*GetLFSPointersResponse, error) grpc.ClientStream } type blobServiceGetLFSPointersClient struct { grpc.ClientStream } func (x *blobServiceGetLFSPointersClient) Recv() (*GetLFSPointersResponse, error) { m := new(GetLFSPointersResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } // Server API for BlobService service type BlobServiceServer interface { // GetBlob returns the contents of a blob object referenced by its object // ID. We use a stream to return a chunked arbitrarily large binary // response GetBlob(*GetBlobRequest, BlobService_GetBlobServer) error // GetBlobsBySHA returns the contents of a blob objects referenced by their object // ID. We use a stream to return a chunked arbitrarily large binary response. // The blobs are sent in a continous stream, the caller is responsible for spliting // them up into multiple blobs by their object IDs. GetBlobs(*GetBlobsRequest, BlobService_GetBlobsServer) error GetLFSPointers(*GetLFSPointersRequest, BlobService_GetLFSPointersServer) error } func RegisterBlobServiceServer(s *grpc.Server, srv BlobServiceServer) { s.RegisterService(&_BlobService_serviceDesc, srv) } func _BlobService_GetBlob_Handler(srv interface{}, stream grpc.ServerStream) error { m := new(GetBlobRequest) if err := stream.RecvMsg(m); err != nil { return err } return srv.(BlobServiceServer).GetBlob(m, &blobServiceGetBlobServer{stream}) } type BlobService_GetBlobServer interface { Send(*GetBlobResponse) error grpc.ServerStream } type blobServiceGetBlobServer struct { grpc.ServerStream } func (x *blobServiceGetBlobServer) Send(m *GetBlobResponse) error { return x.ServerStream.SendMsg(m) } func _BlobService_GetBlobs_Handler(srv interface{}, stream grpc.ServerStream) error { m := new(GetBlobsRequest) if err := stream.RecvMsg(m); err != nil { return err } return srv.(BlobServiceServer).GetBlobs(m, &blobServiceGetBlobsServer{stream}) } type BlobService_GetBlobsServer interface { Send(*GetBlobsResponse) error grpc.ServerStream } type blobServiceGetBlobsServer struct { grpc.ServerStream } func (x *blobServiceGetBlobsServer) Send(m *GetBlobsResponse) error { return x.ServerStream.SendMsg(m) } func _BlobService_GetLFSPointers_Handler(srv interface{}, stream grpc.ServerStream) error { m := new(GetLFSPointersRequest) if err := stream.RecvMsg(m); err != nil { return err } return srv.(BlobServiceServer).GetLFSPointers(m, &blobServiceGetLFSPointersServer{stream}) } type BlobService_GetLFSPointersServer interface { Send(*GetLFSPointersResponse) error grpc.ServerStream } type blobServiceGetLFSPointersServer struct { grpc.ServerStream } func (x *blobServiceGetLFSPointersServer) Send(m *GetLFSPointersResponse) error { return x.ServerStream.SendMsg(m) } var _BlobService_serviceDesc = grpc.ServiceDesc{ ServiceName: "gitaly.BlobService", HandlerType: (*BlobServiceServer)(nil), Methods: []grpc.MethodDesc{}, Streams: []grpc.StreamDesc{ { StreamName: "GetBlob", Handler: _BlobService_GetBlob_Handler, ServerStreams: true, }, { StreamName: "GetBlobs", Handler: _BlobService_GetBlobs_Handler, ServerStreams: true, }, { StreamName: "GetLFSPointers", Handler: _BlobService_GetLFSPointers_Handler, ServerStreams: true, }, }, Metadata: "blob.proto", } func init() { proto.RegisterFile("blob.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ // 353 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x53, 0xcd, 0x4e, 0xe3, 0x30, 0x10, 0x5e, 0xd7, 0xdd, 0xfe, 0x4c, 0xab, 0xdd, 0x6a, 0xb4, 0x5b, 0x42, 0x24, 0x50, 0x94, 0x53, 0x4e, 0x15, 0x2a, 0xe2, 0x8a, 0x04, 0x87, 0x56, 0x88, 0x4a, 0x20, 0xf7, 0x01, 0xaa, 0x84, 0xb8, 0x60, 0xc9, 0xd4, 0x21, 0x36, 0x48, 0xe5, 0x7d, 0x79, 0x0f, 0x14, 0xa7, 0xf9, 0xa1, 0x55, 0x4f, 0xb9, 0x8d, 0x67, 0xe6, 0xfb, 0xc9, 0x17, 0x1b, 0x20, 0x92, 0x2a, 0x9a, 0x24, 0xa9, 0x32, 0x0a, 0x3b, 0xcf, 0xc2, 0x84, 0x72, 0xeb, 0x0e, 0xf5, 0x4b, 0x98, 0xf2, 0x38, 0xef, 0xfa, 0x12, 0xfe, 0xcc, 0xb9, 0xb9, 0x95, 0x2a, 0x62, 0xfc, 0xed, 0x9d, 0x6b, 0x83, 0x53, 0x80, 0x94, 0x27, 0x4a, 0x0b, 0xa3, 0xd2, 0xad, 0x43, 0x3c, 0x12, 0x0c, 0xa6, 0x38, 0xc9, 0xc1, 0x13, 0x56, 0x4e, 0x58, 0x6d, 0x0b, 0x47, 0x40, 0x95, 0x88, 0x9d, 0x96, 0x47, 0x82, 0x3e, 0xcb, 0x4a, 0xfc, 0x07, 0xbf, 0xa5, 0x78, 0x15, 0xc6, 0xa1, 0x1e, 0x09, 0x28, 0xcb, 0x0f, 0xfe, 0x3d, 0xfc, 0x2d, 0xd5, 0x74, 0xa2, 0x36, 0x9a, 0x23, 0x42, 0x5b, 0x8b, 0x4f, 0x6e, 0x85, 0x28, 0xb3, 0x75, 0xd6, 0x8b, 0x43, 0x13, 0x5a, 0xbe, 0x21, 0xb3, 0x75, 0x21, 0x41, 0x4b, 0x09, 0x5f, 0x95, 0x64, 0xba, 0x89, 0x77, 0x84, 0xb6, 0x12, 0xb1, 0x76, 0x5a, 0x1e, 0x0d, 0xfa, 0xcc, 0xd6, 0x47, 0xdc, 0x2f, 0x60, 0x54, 0x09, 0x36, 0xb6, 0x3f, 0x03, 0x58, 0xcc, 0x96, 0x8f, 0x4a, 0x6c, 0x0c, 0x4f, 0x1b, 0xf0, 0xac, 0xe1, 0xff, 0x9c, 0x9b, 0x8a, 0xaa, 0x51, 0x18, 0xa7, 0xd0, 0xcb, 0xae, 0xcc, 0xaa, 0x0a, 0xa4, 0x9b, 0x9d, 0xef, 0x62, 0xed, 0x3f, 0xc0, 0x78, 0x5f, 0x67, 0x97, 0xc1, 0x15, 0x0c, 0xe5, 0x5a, 0xaf, 0x92, 0x5d, 0xdf, 0x21, 0x1e, 0xad, 0x4b, 0x55, 0x10, 0x36, 0x90, 0x6b, 0x5d, 0xc0, 0xa7, 0x5f, 0x04, 0x06, 0x59, 0x98, 0x4b, 0x9e, 0x7e, 0x88, 0x27, 0x8e, 0xd7, 0xd0, 0xdd, 0xc5, 0x8b, 0xe3, 0x02, 0xfb, 0xf3, 0x6e, 0xba, 0x27, 0x07, 0xfd, 0xdc, 0x82, 0xff, 0xeb, 0x82, 0xe0, 0x0d, 0xf4, 0x8a, 0xdf, 0x83, 0xfb, 0x8b, 0x45, 0x28, 0xae, 0x73, 0x38, 0xa8, 0x51, 0x2c, 0xed, 0x6b, 0xa8, 0x7d, 0x23, 0x9e, 0xd5, 0xf6, 0x0f, 0x33, 0x76, 0xcf, 0x8f, 0x8d, 0x2b, 0xd2, 0xa8, 0x63, 0x5f, 0xda, 0xe5, 0x77, 0x00, 0x00, 0x00, 0xff, 0xff, 0xeb, 0x6b, 0x28, 0x11, 0x8d, 0x03, 0x00, 0x00, } commit.pb.go000066400000000000000000002330041324746544700351700ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/gitlab.com/gitlab-org/gitaly-proto/go// Code generated by protoc-gen-go. DO NOT EDIT. // source: commit.proto package gitaly import proto "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" import google_protobuf "github.com/golang/protobuf/ptypes/timestamp" import ( context "golang.org/x/net/context" grpc "google.golang.org/grpc" ) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf type TreeEntryResponse_ObjectType int32 const ( TreeEntryResponse_COMMIT TreeEntryResponse_ObjectType = 0 TreeEntryResponse_BLOB TreeEntryResponse_ObjectType = 1 TreeEntryResponse_TREE TreeEntryResponse_ObjectType = 2 TreeEntryResponse_TAG TreeEntryResponse_ObjectType = 3 ) var TreeEntryResponse_ObjectType_name = map[int32]string{ 0: "COMMIT", 1: "BLOB", 2: "TREE", 3: "TAG", } var TreeEntryResponse_ObjectType_value = map[string]int32{ "COMMIT": 0, "BLOB": 1, "TREE": 2, "TAG": 3, } func (x TreeEntryResponse_ObjectType) String() string { return proto.EnumName(TreeEntryResponse_ObjectType_name, int32(x)) } func (TreeEntryResponse_ObjectType) EnumDescriptor() ([]byte, []int) { return fileDescriptor1, []int{5, 0} } type TreeEntry_EntryType int32 const ( TreeEntry_BLOB TreeEntry_EntryType = 0 TreeEntry_TREE TreeEntry_EntryType = 1 TreeEntry_COMMIT TreeEntry_EntryType = 3 ) var TreeEntry_EntryType_name = map[int32]string{ 0: "BLOB", 1: "TREE", 3: "COMMIT", } var TreeEntry_EntryType_value = map[string]int32{ "BLOB": 0, "TREE": 1, "COMMIT": 3, } func (x TreeEntry_EntryType) String() string { return proto.EnumName(TreeEntry_EntryType_name, int32(x)) } func (TreeEntry_EntryType) EnumDescriptor() ([]byte, []int) { return fileDescriptor1, []int{10, 0} } type FindAllCommitsRequest_Order int32 const ( FindAllCommitsRequest_NONE FindAllCommitsRequest_Order = 0 FindAllCommitsRequest_TOPO FindAllCommitsRequest_Order = 1 FindAllCommitsRequest_DATE FindAllCommitsRequest_Order = 2 ) var FindAllCommitsRequest_Order_name = map[int32]string{ 0: "NONE", 1: "TOPO", 2: "DATE", } var FindAllCommitsRequest_Order_value = map[string]int32{ "NONE": 0, "TOPO": 1, "DATE": 2, } func (x FindAllCommitsRequest_Order) String() string { return proto.EnumName(FindAllCommitsRequest_Order_name, int32(x)) } func (FindAllCommitsRequest_Order) EnumDescriptor() ([]byte, []int) { return fileDescriptor1, []int{19, 0} } type CommitStatsRequest struct { Repository *Repository `protobuf:"bytes,1,opt,name=repository" json:"repository,omitempty"` Revision []byte `protobuf:"bytes,2,opt,name=revision,proto3" json:"revision,omitempty"` } func (m *CommitStatsRequest) Reset() { *m = CommitStatsRequest{} } func (m *CommitStatsRequest) String() string { return proto.CompactTextString(m) } func (*CommitStatsRequest) ProtoMessage() {} func (*CommitStatsRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{0} } func (m *CommitStatsRequest) GetRepository() *Repository { if m != nil { return m.Repository } return nil } func (m *CommitStatsRequest) GetRevision() []byte { if m != nil { return m.Revision } return nil } type CommitStatsResponse struct { // OID is the commit. Empty means not found Oid string `protobuf:"bytes,1,opt,name=oid" json:"oid,omitempty"` Additions int32 `protobuf:"varint,2,opt,name=additions" json:"additions,omitempty"` Deletions int32 `protobuf:"varint,3,opt,name=deletions" json:"deletions,omitempty"` } func (m *CommitStatsResponse) Reset() { *m = CommitStatsResponse{} } func (m *CommitStatsResponse) String() string { return proto.CompactTextString(m) } func (*CommitStatsResponse) ProtoMessage() {} func (*CommitStatsResponse) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{1} } func (m *CommitStatsResponse) GetOid() string { if m != nil { return m.Oid } return "" } func (m *CommitStatsResponse) GetAdditions() int32 { if m != nil { return m.Additions } return 0 } func (m *CommitStatsResponse) GetDeletions() int32 { if m != nil { return m.Deletions } return 0 } type CommitIsAncestorRequest struct { Repository *Repository `protobuf:"bytes,1,opt,name=repository" json:"repository,omitempty"` AncestorId string `protobuf:"bytes,2,opt,name=ancestor_id,json=ancestorId" json:"ancestor_id,omitempty"` ChildId string `protobuf:"bytes,3,opt,name=child_id,json=childId" json:"child_id,omitempty"` } func (m *CommitIsAncestorRequest) Reset() { *m = CommitIsAncestorRequest{} } func (m *CommitIsAncestorRequest) String() string { return proto.CompactTextString(m) } func (*CommitIsAncestorRequest) ProtoMessage() {} func (*CommitIsAncestorRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{2} } func (m *CommitIsAncestorRequest) GetRepository() *Repository { if m != nil { return m.Repository } return nil } func (m *CommitIsAncestorRequest) GetAncestorId() string { if m != nil { return m.AncestorId } return "" } func (m *CommitIsAncestorRequest) GetChildId() string { if m != nil { return m.ChildId } return "" } type CommitIsAncestorResponse struct { Value bool `protobuf:"varint,1,opt,name=value" json:"value,omitempty"` } func (m *CommitIsAncestorResponse) Reset() { *m = CommitIsAncestorResponse{} } func (m *CommitIsAncestorResponse) String() string { return proto.CompactTextString(m) } func (*CommitIsAncestorResponse) ProtoMessage() {} func (*CommitIsAncestorResponse) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{3} } func (m *CommitIsAncestorResponse) GetValue() bool { if m != nil { return m.Value } return false } type TreeEntryRequest struct { Repository *Repository `protobuf:"bytes,1,opt,name=repository" json:"repository,omitempty"` // commit ID or refname Revision []byte `protobuf:"bytes,2,opt,name=revision,proto3" json:"revision,omitempty"` // entry path relative to repository root Path []byte `protobuf:"bytes,3,opt,name=path,proto3" json:"path,omitempty"` Limit int64 `protobuf:"varint,4,opt,name=limit" json:"limit,omitempty"` } func (m *TreeEntryRequest) Reset() { *m = TreeEntryRequest{} } func (m *TreeEntryRequest) String() string { return proto.CompactTextString(m) } func (*TreeEntryRequest) ProtoMessage() {} func (*TreeEntryRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{4} } func (m *TreeEntryRequest) GetRepository() *Repository { if m != nil { return m.Repository } return nil } func (m *TreeEntryRequest) GetRevision() []byte { if m != nil { return m.Revision } return nil } func (m *TreeEntryRequest) GetPath() []byte { if m != nil { return m.Path } return nil } func (m *TreeEntryRequest) GetLimit() int64 { if m != nil { return m.Limit } return 0 } type TreeEntryResponse struct { Type TreeEntryResponse_ObjectType `protobuf:"varint,1,opt,name=type,enum=gitaly.TreeEntryResponse_ObjectType" json:"type,omitempty"` // SHA1 object ID Oid string `protobuf:"bytes,2,opt,name=oid" json:"oid,omitempty"` Size int64 `protobuf:"varint,3,opt,name=size" json:"size,omitempty"` // file mode Mode int32 `protobuf:"varint,4,opt,name=mode" json:"mode,omitempty"` // raw object contents Data []byte `protobuf:"bytes,5,opt,name=data,proto3" json:"data,omitempty"` } func (m *TreeEntryResponse) Reset() { *m = TreeEntryResponse{} } func (m *TreeEntryResponse) String() string { return proto.CompactTextString(m) } func (*TreeEntryResponse) ProtoMessage() {} func (*TreeEntryResponse) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{5} } func (m *TreeEntryResponse) GetType() TreeEntryResponse_ObjectType { if m != nil { return m.Type } return TreeEntryResponse_COMMIT } func (m *TreeEntryResponse) GetOid() string { if m != nil { return m.Oid } return "" } func (m *TreeEntryResponse) GetSize() int64 { if m != nil { return m.Size } return 0 } func (m *TreeEntryResponse) GetMode() int32 { if m != nil { return m.Mode } return 0 } func (m *TreeEntryResponse) GetData() []byte { if m != nil { return m.Data } return nil } type CommitsBetweenRequest struct { Repository *Repository `protobuf:"bytes,1,opt,name=repository" json:"repository,omitempty"` From []byte `protobuf:"bytes,2,opt,name=from,proto3" json:"from,omitempty"` To []byte `protobuf:"bytes,3,opt,name=to,proto3" json:"to,omitempty"` } func (m *CommitsBetweenRequest) Reset() { *m = CommitsBetweenRequest{} } func (m *CommitsBetweenRequest) String() string { return proto.CompactTextString(m) } func (*CommitsBetweenRequest) ProtoMessage() {} func (*CommitsBetweenRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{6} } func (m *CommitsBetweenRequest) GetRepository() *Repository { if m != nil { return m.Repository } return nil } func (m *CommitsBetweenRequest) GetFrom() []byte { if m != nil { return m.From } return nil } func (m *CommitsBetweenRequest) GetTo() []byte { if m != nil { return m.To } return nil } type CommitsBetweenResponse struct { Commits []*GitCommit `protobuf:"bytes,1,rep,name=commits" json:"commits,omitempty"` } func (m *CommitsBetweenResponse) Reset() { *m = CommitsBetweenResponse{} } func (m *CommitsBetweenResponse) String() string { return proto.CompactTextString(m) } func (*CommitsBetweenResponse) ProtoMessage() {} func (*CommitsBetweenResponse) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{7} } func (m *CommitsBetweenResponse) GetCommits() []*GitCommit { if m != nil { return m.Commits } return nil } type CountCommitsRequest struct { Repository *Repository `protobuf:"bytes,1,opt,name=repository" json:"repository,omitempty"` Revision []byte `protobuf:"bytes,2,opt,name=revision,proto3" json:"revision,omitempty"` After *google_protobuf.Timestamp `protobuf:"bytes,3,opt,name=after" json:"after,omitempty"` Before *google_protobuf.Timestamp `protobuf:"bytes,4,opt,name=before" json:"before,omitempty"` Path []byte `protobuf:"bytes,5,opt,name=path,proto3" json:"path,omitempty"` MaxCount int32 `protobuf:"varint,6,opt,name=max_count,json=maxCount" json:"max_count,omitempty"` } func (m *CountCommitsRequest) Reset() { *m = CountCommitsRequest{} } func (m *CountCommitsRequest) String() string { return proto.CompactTextString(m) } func (*CountCommitsRequest) ProtoMessage() {} func (*CountCommitsRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{8} } func (m *CountCommitsRequest) GetRepository() *Repository { if m != nil { return m.Repository } return nil } func (m *CountCommitsRequest) GetRevision() []byte { if m != nil { return m.Revision } return nil } func (m *CountCommitsRequest) GetAfter() *google_protobuf.Timestamp { if m != nil { return m.After } return nil } func (m *CountCommitsRequest) GetBefore() *google_protobuf.Timestamp { if m != nil { return m.Before } return nil } func (m *CountCommitsRequest) GetPath() []byte { if m != nil { return m.Path } return nil } func (m *CountCommitsRequest) GetMaxCount() int32 { if m != nil { return m.MaxCount } return 0 } type CountCommitsResponse struct { Count int32 `protobuf:"varint,1,opt,name=count" json:"count,omitempty"` } func (m *CountCommitsResponse) Reset() { *m = CountCommitsResponse{} } func (m *CountCommitsResponse) String() string { return proto.CompactTextString(m) } func (*CountCommitsResponse) ProtoMessage() {} func (*CountCommitsResponse) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{9} } func (m *CountCommitsResponse) GetCount() int32 { if m != nil { return m.Count } return 0 } type TreeEntry struct { // OID of the object this tree entry points to Oid string `protobuf:"bytes,1,opt,name=oid" json:"oid,omitempty"` // OID of the tree attached to commit_oid RootOid string `protobuf:"bytes,2,opt,name=root_oid,json=rootOid" json:"root_oid,omitempty"` // Path relative to repository root Path []byte `protobuf:"bytes,3,opt,name=path,proto3" json:"path,omitempty"` Type TreeEntry_EntryType `protobuf:"varint,4,opt,name=type,enum=gitaly.TreeEntry_EntryType" json:"type,omitempty"` // File mode e.g. 0644 Mode int32 `protobuf:"varint,5,opt,name=mode" json:"mode,omitempty"` // The commit object via which this entry was retrieved CommitOid string `protobuf:"bytes,6,opt,name=commit_oid,json=commitOid" json:"commit_oid,omitempty"` // Relative path of the first subdir that doesn't have only one directory descendant FlatPath []byte `protobuf:"bytes,7,opt,name=flat_path,json=flatPath,proto3" json:"flat_path,omitempty"` } func (m *TreeEntry) Reset() { *m = TreeEntry{} } func (m *TreeEntry) String() string { return proto.CompactTextString(m) } func (*TreeEntry) ProtoMessage() {} func (*TreeEntry) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{10} } func (m *TreeEntry) GetOid() string { if m != nil { return m.Oid } return "" } func (m *TreeEntry) GetRootOid() string { if m != nil { return m.RootOid } return "" } func (m *TreeEntry) GetPath() []byte { if m != nil { return m.Path } return nil } func (m *TreeEntry) GetType() TreeEntry_EntryType { if m != nil { return m.Type } return TreeEntry_BLOB } func (m *TreeEntry) GetMode() int32 { if m != nil { return m.Mode } return 0 } func (m *TreeEntry) GetCommitOid() string { if m != nil { return m.CommitOid } return "" } func (m *TreeEntry) GetFlatPath() []byte { if m != nil { return m.FlatPath } return nil } type GetTreeEntriesRequest struct { Repository *Repository `protobuf:"bytes,1,opt,name=repository" json:"repository,omitempty"` Revision []byte `protobuf:"bytes,2,opt,name=revision,proto3" json:"revision,omitempty"` Path []byte `protobuf:"bytes,3,opt,name=path,proto3" json:"path,omitempty"` } func (m *GetTreeEntriesRequest) Reset() { *m = GetTreeEntriesRequest{} } func (m *GetTreeEntriesRequest) String() string { return proto.CompactTextString(m) } func (*GetTreeEntriesRequest) ProtoMessage() {} func (*GetTreeEntriesRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{11} } func (m *GetTreeEntriesRequest) GetRepository() *Repository { if m != nil { return m.Repository } return nil } func (m *GetTreeEntriesRequest) GetRevision() []byte { if m != nil { return m.Revision } return nil } func (m *GetTreeEntriesRequest) GetPath() []byte { if m != nil { return m.Path } return nil } type GetTreeEntriesResponse struct { Entries []*TreeEntry `protobuf:"bytes,1,rep,name=entries" json:"entries,omitempty"` } func (m *GetTreeEntriesResponse) Reset() { *m = GetTreeEntriesResponse{} } func (m *GetTreeEntriesResponse) String() string { return proto.CompactTextString(m) } func (*GetTreeEntriesResponse) ProtoMessage() {} func (*GetTreeEntriesResponse) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{12} } func (m *GetTreeEntriesResponse) GetEntries() []*TreeEntry { if m != nil { return m.Entries } return nil } type ListFilesRequest struct { Repository *Repository `protobuf:"bytes,1,opt,name=repository" json:"repository,omitempty"` Revision []byte `protobuf:"bytes,2,opt,name=revision,proto3" json:"revision,omitempty"` } func (m *ListFilesRequest) Reset() { *m = ListFilesRequest{} } func (m *ListFilesRequest) String() string { return proto.CompactTextString(m) } func (*ListFilesRequest) ProtoMessage() {} func (*ListFilesRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{13} } func (m *ListFilesRequest) GetRepository() *Repository { if m != nil { return m.Repository } return nil } func (m *ListFilesRequest) GetRevision() []byte { if m != nil { return m.Revision } return nil } // A single 'page' of the paginated response type ListFilesResponse struct { // Remember to force encoding utf-8 on the client side Paths [][]byte `protobuf:"bytes,1,rep,name=paths,proto3" json:"paths,omitempty"` } func (m *ListFilesResponse) Reset() { *m = ListFilesResponse{} } func (m *ListFilesResponse) String() string { return proto.CompactTextString(m) } func (*ListFilesResponse) ProtoMessage() {} func (*ListFilesResponse) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{14} } func (m *ListFilesResponse) GetPaths() [][]byte { if m != nil { return m.Paths } return nil } type FindCommitRequest struct { Repository *Repository `protobuf:"bytes,1,opt,name=repository" json:"repository,omitempty"` Revision []byte `protobuf:"bytes,2,opt,name=revision,proto3" json:"revision,omitempty"` } func (m *FindCommitRequest) Reset() { *m = FindCommitRequest{} } func (m *FindCommitRequest) String() string { return proto.CompactTextString(m) } func (*FindCommitRequest) ProtoMessage() {} func (*FindCommitRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{15} } func (m *FindCommitRequest) GetRepository() *Repository { if m != nil { return m.Repository } return nil } func (m *FindCommitRequest) GetRevision() []byte { if m != nil { return m.Revision } return nil } type FindCommitResponse struct { // commit is nil when the commit was not found Commit *GitCommit `protobuf:"bytes,1,opt,name=commit" json:"commit,omitempty"` } func (m *FindCommitResponse) Reset() { *m = FindCommitResponse{} } func (m *FindCommitResponse) String() string { return proto.CompactTextString(m) } func (*FindCommitResponse) ProtoMessage() {} func (*FindCommitResponse) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{16} } func (m *FindCommitResponse) GetCommit() *GitCommit { if m != nil { return m.Commit } return nil } type ListCommitsByOidRequest struct { Repository *Repository `protobuf:"bytes,1,opt,name=repository" json:"repository,omitempty"` Oid []string `protobuf:"bytes,2,rep,name=oid" json:"oid,omitempty"` } func (m *ListCommitsByOidRequest) Reset() { *m = ListCommitsByOidRequest{} } func (m *ListCommitsByOidRequest) String() string { return proto.CompactTextString(m) } func (*ListCommitsByOidRequest) ProtoMessage() {} func (*ListCommitsByOidRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{17} } func (m *ListCommitsByOidRequest) GetRepository() *Repository { if m != nil { return m.Repository } return nil } func (m *ListCommitsByOidRequest) GetOid() []string { if m != nil { return m.Oid } return nil } type ListCommitsByOidResponse struct { Commits []*GitCommit `protobuf:"bytes,1,rep,name=commits" json:"commits,omitempty"` } func (m *ListCommitsByOidResponse) Reset() { *m = ListCommitsByOidResponse{} } func (m *ListCommitsByOidResponse) String() string { return proto.CompactTextString(m) } func (*ListCommitsByOidResponse) ProtoMessage() {} func (*ListCommitsByOidResponse) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{18} } func (m *ListCommitsByOidResponse) GetCommits() []*GitCommit { if m != nil { return m.Commits } return nil } type FindAllCommitsRequest struct { Repository *Repository `protobuf:"bytes,1,opt,name=repository" json:"repository,omitempty"` // When nil, return all commits reachable by any branch in the repo Revision []byte `protobuf:"bytes,2,opt,name=revision,proto3" json:"revision,omitempty"` MaxCount int32 `protobuf:"varint,3,opt,name=max_count,json=maxCount" json:"max_count,omitempty"` Skip int32 `protobuf:"varint,4,opt,name=skip" json:"skip,omitempty"` Order FindAllCommitsRequest_Order `protobuf:"varint,5,opt,name=order,enum=gitaly.FindAllCommitsRequest_Order" json:"order,omitempty"` } func (m *FindAllCommitsRequest) Reset() { *m = FindAllCommitsRequest{} } func (m *FindAllCommitsRequest) String() string { return proto.CompactTextString(m) } func (*FindAllCommitsRequest) ProtoMessage() {} func (*FindAllCommitsRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{19} } func (m *FindAllCommitsRequest) GetRepository() *Repository { if m != nil { return m.Repository } return nil } func (m *FindAllCommitsRequest) GetRevision() []byte { if m != nil { return m.Revision } return nil } func (m *FindAllCommitsRequest) GetMaxCount() int32 { if m != nil { return m.MaxCount } return 0 } func (m *FindAllCommitsRequest) GetSkip() int32 { if m != nil { return m.Skip } return 0 } func (m *FindAllCommitsRequest) GetOrder() FindAllCommitsRequest_Order { if m != nil { return m.Order } return FindAllCommitsRequest_NONE } // A single 'page' of the result set type FindAllCommitsResponse struct { Commits []*GitCommit `protobuf:"bytes,1,rep,name=commits" json:"commits,omitempty"` } func (m *FindAllCommitsResponse) Reset() { *m = FindAllCommitsResponse{} } func (m *FindAllCommitsResponse) String() string { return proto.CompactTextString(m) } func (*FindAllCommitsResponse) ProtoMessage() {} func (*FindAllCommitsResponse) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{20} } func (m *FindAllCommitsResponse) GetCommits() []*GitCommit { if m != nil { return m.Commits } return nil } type FindCommitsRequest struct { Repository *Repository `protobuf:"bytes,1,opt,name=repository" json:"repository,omitempty"` Revision []byte `protobuf:"bytes,2,opt,name=revision,proto3" json:"revision,omitempty"` Limit int32 `protobuf:"varint,3,opt,name=limit" json:"limit,omitempty"` Offset int32 `protobuf:"varint,4,opt,name=offset" json:"offset,omitempty"` Paths [][]byte `protobuf:"bytes,5,rep,name=paths,proto3" json:"paths,omitempty"` Follow bool `protobuf:"varint,6,opt,name=follow" json:"follow,omitempty"` SkipMerges bool `protobuf:"varint,7,opt,name=skip_merges,json=skipMerges" json:"skip_merges,omitempty"` DisableWalk bool `protobuf:"varint,8,opt,name=disable_walk,json=disableWalk" json:"disable_walk,omitempty"` After *google_protobuf.Timestamp `protobuf:"bytes,9,opt,name=after" json:"after,omitempty"` Before *google_protobuf.Timestamp `protobuf:"bytes,10,opt,name=before" json:"before,omitempty"` } func (m *FindCommitsRequest) Reset() { *m = FindCommitsRequest{} } func (m *FindCommitsRequest) String() string { return proto.CompactTextString(m) } func (*FindCommitsRequest) ProtoMessage() {} func (*FindCommitsRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{21} } func (m *FindCommitsRequest) GetRepository() *Repository { if m != nil { return m.Repository } return nil } func (m *FindCommitsRequest) GetRevision() []byte { if m != nil { return m.Revision } return nil } func (m *FindCommitsRequest) GetLimit() int32 { if m != nil { return m.Limit } return 0 } func (m *FindCommitsRequest) GetOffset() int32 { if m != nil { return m.Offset } return 0 } func (m *FindCommitsRequest) GetPaths() [][]byte { if m != nil { return m.Paths } return nil } func (m *FindCommitsRequest) GetFollow() bool { if m != nil { return m.Follow } return false } func (m *FindCommitsRequest) GetSkipMerges() bool { if m != nil { return m.SkipMerges } return false } func (m *FindCommitsRequest) GetDisableWalk() bool { if m != nil { return m.DisableWalk } return false } func (m *FindCommitsRequest) GetAfter() *google_protobuf.Timestamp { if m != nil { return m.After } return nil } func (m *FindCommitsRequest) GetBefore() *google_protobuf.Timestamp { if m != nil { return m.Before } return nil } // A single 'page' of the result set type FindCommitsResponse struct { Commits []*GitCommit `protobuf:"bytes,1,rep,name=commits" json:"commits,omitempty"` } func (m *FindCommitsResponse) Reset() { *m = FindCommitsResponse{} } func (m *FindCommitsResponse) String() string { return proto.CompactTextString(m) } func (*FindCommitsResponse) ProtoMessage() {} func (*FindCommitsResponse) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{22} } func (m *FindCommitsResponse) GetCommits() []*GitCommit { if m != nil { return m.Commits } return nil } type CommitLanguagesRequest struct { Repository *Repository `protobuf:"bytes,1,opt,name=repository" json:"repository,omitempty"` Revision []byte `protobuf:"bytes,2,opt,name=revision,proto3" json:"revision,omitempty"` } func (m *CommitLanguagesRequest) Reset() { *m = CommitLanguagesRequest{} } func (m *CommitLanguagesRequest) String() string { return proto.CompactTextString(m) } func (*CommitLanguagesRequest) ProtoMessage() {} func (*CommitLanguagesRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{23} } func (m *CommitLanguagesRequest) GetRepository() *Repository { if m != nil { return m.Repository } return nil } func (m *CommitLanguagesRequest) GetRevision() []byte { if m != nil { return m.Revision } return nil } type CommitLanguagesResponse struct { Languages []*CommitLanguagesResponse_Language `protobuf:"bytes,1,rep,name=languages" json:"languages,omitempty"` } func (m *CommitLanguagesResponse) Reset() { *m = CommitLanguagesResponse{} } func (m *CommitLanguagesResponse) String() string { return proto.CompactTextString(m) } func (*CommitLanguagesResponse) ProtoMessage() {} func (*CommitLanguagesResponse) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{24} } func (m *CommitLanguagesResponse) GetLanguages() []*CommitLanguagesResponse_Language { if m != nil { return m.Languages } return nil } type CommitLanguagesResponse_Language struct { Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` Share float32 `protobuf:"fixed32,2,opt,name=share" json:"share,omitempty"` Color string `protobuf:"bytes,3,opt,name=color" json:"color,omitempty"` } func (m *CommitLanguagesResponse_Language) Reset() { *m = CommitLanguagesResponse_Language{} } func (m *CommitLanguagesResponse_Language) String() string { return proto.CompactTextString(m) } func (*CommitLanguagesResponse_Language) ProtoMessage() {} func (*CommitLanguagesResponse_Language) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{24, 0} } func (m *CommitLanguagesResponse_Language) GetName() string { if m != nil { return m.Name } return "" } func (m *CommitLanguagesResponse_Language) GetShare() float32 { if m != nil { return m.Share } return 0 } func (m *CommitLanguagesResponse_Language) GetColor() string { if m != nil { return m.Color } return "" } type RawBlameRequest struct { Repository *Repository `protobuf:"bytes,1,opt,name=repository" json:"repository,omitempty"` Revision []byte `protobuf:"bytes,2,opt,name=revision,proto3" json:"revision,omitempty"` Path []byte `protobuf:"bytes,3,opt,name=path,proto3" json:"path,omitempty"` } func (m *RawBlameRequest) Reset() { *m = RawBlameRequest{} } func (m *RawBlameRequest) String() string { return proto.CompactTextString(m) } func (*RawBlameRequest) ProtoMessage() {} func (*RawBlameRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{25} } func (m *RawBlameRequest) GetRepository() *Repository { if m != nil { return m.Repository } return nil } func (m *RawBlameRequest) GetRevision() []byte { if m != nil { return m.Revision } return nil } func (m *RawBlameRequest) GetPath() []byte { if m != nil { return m.Path } return nil } type RawBlameResponse struct { Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` } func (m *RawBlameResponse) Reset() { *m = RawBlameResponse{} } func (m *RawBlameResponse) String() string { return proto.CompactTextString(m) } func (*RawBlameResponse) ProtoMessage() {} func (*RawBlameResponse) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{26} } func (m *RawBlameResponse) GetData() []byte { if m != nil { return m.Data } return nil } type LastCommitForPathRequest struct { Repository *Repository `protobuf:"bytes,1,opt,name=repository" json:"repository,omitempty"` Revision []byte `protobuf:"bytes,2,opt,name=revision,proto3" json:"revision,omitempty"` Path []byte `protobuf:"bytes,3,opt,name=path,proto3" json:"path,omitempty"` } func (m *LastCommitForPathRequest) Reset() { *m = LastCommitForPathRequest{} } func (m *LastCommitForPathRequest) String() string { return proto.CompactTextString(m) } func (*LastCommitForPathRequest) ProtoMessage() {} func (*LastCommitForPathRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{27} } func (m *LastCommitForPathRequest) GetRepository() *Repository { if m != nil { return m.Repository } return nil } func (m *LastCommitForPathRequest) GetRevision() []byte { if m != nil { return m.Revision } return nil } func (m *LastCommitForPathRequest) GetPath() []byte { if m != nil { return m.Path } return nil } type LastCommitForPathResponse struct { // commit is nil when the commit was not found Commit *GitCommit `protobuf:"bytes,1,opt,name=commit" json:"commit,omitempty"` } func (m *LastCommitForPathResponse) Reset() { *m = LastCommitForPathResponse{} } func (m *LastCommitForPathResponse) String() string { return proto.CompactTextString(m) } func (*LastCommitForPathResponse) ProtoMessage() {} func (*LastCommitForPathResponse) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{28} } func (m *LastCommitForPathResponse) GetCommit() *GitCommit { if m != nil { return m.Commit } return nil } type CommitsByMessageRequest struct { Repository *Repository `protobuf:"bytes,1,opt,name=repository" json:"repository,omitempty"` Revision []byte `protobuf:"bytes,2,opt,name=revision,proto3" json:"revision,omitempty"` Offset int32 `protobuf:"varint,3,opt,name=offset" json:"offset,omitempty"` Limit int32 `protobuf:"varint,4,opt,name=limit" json:"limit,omitempty"` Path []byte `protobuf:"bytes,5,opt,name=path,proto3" json:"path,omitempty"` Query string `protobuf:"bytes,6,opt,name=query" json:"query,omitempty"` } func (m *CommitsByMessageRequest) Reset() { *m = CommitsByMessageRequest{} } func (m *CommitsByMessageRequest) String() string { return proto.CompactTextString(m) } func (*CommitsByMessageRequest) ProtoMessage() {} func (*CommitsByMessageRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{29} } func (m *CommitsByMessageRequest) GetRepository() *Repository { if m != nil { return m.Repository } return nil } func (m *CommitsByMessageRequest) GetRevision() []byte { if m != nil { return m.Revision } return nil } func (m *CommitsByMessageRequest) GetOffset() int32 { if m != nil { return m.Offset } return 0 } func (m *CommitsByMessageRequest) GetLimit() int32 { if m != nil { return m.Limit } return 0 } func (m *CommitsByMessageRequest) GetPath() []byte { if m != nil { return m.Path } return nil } func (m *CommitsByMessageRequest) GetQuery() string { if m != nil { return m.Query } return "" } // One 'page' of the paginated response of CommitsByMessage type CommitsByMessageResponse struct { Commits []*GitCommit `protobuf:"bytes,1,rep,name=commits" json:"commits,omitempty"` } func (m *CommitsByMessageResponse) Reset() { *m = CommitsByMessageResponse{} } func (m *CommitsByMessageResponse) String() string { return proto.CompactTextString(m) } func (*CommitsByMessageResponse) ProtoMessage() {} func (*CommitsByMessageResponse) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{30} } func (m *CommitsByMessageResponse) GetCommits() []*GitCommit { if m != nil { return m.Commits } return nil } type FilterShasWithSignaturesRequest struct { Repository *Repository `protobuf:"bytes,1,opt,name=repository" json:"repository,omitempty"` Shas [][]byte `protobuf:"bytes,2,rep,name=shas,proto3" json:"shas,omitempty"` } func (m *FilterShasWithSignaturesRequest) Reset() { *m = FilterShasWithSignaturesRequest{} } func (m *FilterShasWithSignaturesRequest) String() string { return proto.CompactTextString(m) } func (*FilterShasWithSignaturesRequest) ProtoMessage() {} func (*FilterShasWithSignaturesRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{31} } func (m *FilterShasWithSignaturesRequest) GetRepository() *Repository { if m != nil { return m.Repository } return nil } func (m *FilterShasWithSignaturesRequest) GetShas() [][]byte { if m != nil { return m.Shas } return nil } type FilterShasWithSignaturesResponse struct { Shas [][]byte `protobuf:"bytes,1,rep,name=shas,proto3" json:"shas,omitempty"` } func (m *FilterShasWithSignaturesResponse) Reset() { *m = FilterShasWithSignaturesResponse{} } func (m *FilterShasWithSignaturesResponse) String() string { return proto.CompactTextString(m) } func (*FilterShasWithSignaturesResponse) ProtoMessage() {} func (*FilterShasWithSignaturesResponse) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{32} } func (m *FilterShasWithSignaturesResponse) GetShas() [][]byte { if m != nil { return m.Shas } return nil } type ExtractCommitSignatureRequest struct { Repository *Repository `protobuf:"bytes,1,opt,name=repository" json:"repository,omitempty"` CommitId string `protobuf:"bytes,2,opt,name=commit_id,json=commitId" json:"commit_id,omitempty"` } func (m *ExtractCommitSignatureRequest) Reset() { *m = ExtractCommitSignatureRequest{} } func (m *ExtractCommitSignatureRequest) String() string { return proto.CompactTextString(m) } func (*ExtractCommitSignatureRequest) ProtoMessage() {} func (*ExtractCommitSignatureRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{33} } func (m *ExtractCommitSignatureRequest) GetRepository() *Repository { if m != nil { return m.Repository } return nil } func (m *ExtractCommitSignatureRequest) GetCommitId() string { if m != nil { return m.CommitId } return "" } // Either of the 'signature' and 'signed_text' fields may be present. It // is up to the caller to stitch them together. type ExtractCommitSignatureResponse struct { Signature []byte `protobuf:"bytes,1,opt,name=signature,proto3" json:"signature,omitempty"` SignedText []byte `protobuf:"bytes,2,opt,name=signed_text,json=signedText,proto3" json:"signed_text,omitempty"` } func (m *ExtractCommitSignatureResponse) Reset() { *m = ExtractCommitSignatureResponse{} } func (m *ExtractCommitSignatureResponse) String() string { return proto.CompactTextString(m) } func (*ExtractCommitSignatureResponse) ProtoMessage() {} func (*ExtractCommitSignatureResponse) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{34} } func (m *ExtractCommitSignatureResponse) GetSignature() []byte { if m != nil { return m.Signature } return nil } func (m *ExtractCommitSignatureResponse) GetSignedText() []byte { if m != nil { return m.SignedText } return nil } func init() { proto.RegisterType((*CommitStatsRequest)(nil), "gitaly.CommitStatsRequest") proto.RegisterType((*CommitStatsResponse)(nil), "gitaly.CommitStatsResponse") proto.RegisterType((*CommitIsAncestorRequest)(nil), "gitaly.CommitIsAncestorRequest") proto.RegisterType((*CommitIsAncestorResponse)(nil), "gitaly.CommitIsAncestorResponse") proto.RegisterType((*TreeEntryRequest)(nil), "gitaly.TreeEntryRequest") proto.RegisterType((*TreeEntryResponse)(nil), "gitaly.TreeEntryResponse") proto.RegisterType((*CommitsBetweenRequest)(nil), "gitaly.CommitsBetweenRequest") proto.RegisterType((*CommitsBetweenResponse)(nil), "gitaly.CommitsBetweenResponse") proto.RegisterType((*CountCommitsRequest)(nil), "gitaly.CountCommitsRequest") proto.RegisterType((*CountCommitsResponse)(nil), "gitaly.CountCommitsResponse") proto.RegisterType((*TreeEntry)(nil), "gitaly.TreeEntry") proto.RegisterType((*GetTreeEntriesRequest)(nil), "gitaly.GetTreeEntriesRequest") proto.RegisterType((*GetTreeEntriesResponse)(nil), "gitaly.GetTreeEntriesResponse") proto.RegisterType((*ListFilesRequest)(nil), "gitaly.ListFilesRequest") proto.RegisterType((*ListFilesResponse)(nil), "gitaly.ListFilesResponse") proto.RegisterType((*FindCommitRequest)(nil), "gitaly.FindCommitRequest") proto.RegisterType((*FindCommitResponse)(nil), "gitaly.FindCommitResponse") proto.RegisterType((*ListCommitsByOidRequest)(nil), "gitaly.ListCommitsByOidRequest") proto.RegisterType((*ListCommitsByOidResponse)(nil), "gitaly.ListCommitsByOidResponse") proto.RegisterType((*FindAllCommitsRequest)(nil), "gitaly.FindAllCommitsRequest") proto.RegisterType((*FindAllCommitsResponse)(nil), "gitaly.FindAllCommitsResponse") proto.RegisterType((*FindCommitsRequest)(nil), "gitaly.FindCommitsRequest") proto.RegisterType((*FindCommitsResponse)(nil), "gitaly.FindCommitsResponse") proto.RegisterType((*CommitLanguagesRequest)(nil), "gitaly.CommitLanguagesRequest") proto.RegisterType((*CommitLanguagesResponse)(nil), "gitaly.CommitLanguagesResponse") proto.RegisterType((*CommitLanguagesResponse_Language)(nil), "gitaly.CommitLanguagesResponse.Language") proto.RegisterType((*RawBlameRequest)(nil), "gitaly.RawBlameRequest") proto.RegisterType((*RawBlameResponse)(nil), "gitaly.RawBlameResponse") proto.RegisterType((*LastCommitForPathRequest)(nil), "gitaly.LastCommitForPathRequest") proto.RegisterType((*LastCommitForPathResponse)(nil), "gitaly.LastCommitForPathResponse") proto.RegisterType((*CommitsByMessageRequest)(nil), "gitaly.CommitsByMessageRequest") proto.RegisterType((*CommitsByMessageResponse)(nil), "gitaly.CommitsByMessageResponse") proto.RegisterType((*FilterShasWithSignaturesRequest)(nil), "gitaly.FilterShasWithSignaturesRequest") proto.RegisterType((*FilterShasWithSignaturesResponse)(nil), "gitaly.FilterShasWithSignaturesResponse") proto.RegisterType((*ExtractCommitSignatureRequest)(nil), "gitaly.ExtractCommitSignatureRequest") proto.RegisterType((*ExtractCommitSignatureResponse)(nil), "gitaly.ExtractCommitSignatureResponse") proto.RegisterEnum("gitaly.TreeEntryResponse_ObjectType", TreeEntryResponse_ObjectType_name, TreeEntryResponse_ObjectType_value) proto.RegisterEnum("gitaly.TreeEntry_EntryType", TreeEntry_EntryType_name, TreeEntry_EntryType_value) proto.RegisterEnum("gitaly.FindAllCommitsRequest_Order", FindAllCommitsRequest_Order_name, FindAllCommitsRequest_Order_value) } // Reference imports to suppress errors if they are not otherwise used. var _ context.Context var _ grpc.ClientConn // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. const _ = grpc.SupportPackageIsVersion4 // Client API for CommitService service type CommitServiceClient interface { CommitIsAncestor(ctx context.Context, in *CommitIsAncestorRequest, opts ...grpc.CallOption) (*CommitIsAncestorResponse, error) TreeEntry(ctx context.Context, in *TreeEntryRequest, opts ...grpc.CallOption) (CommitService_TreeEntryClient, error) CommitsBetween(ctx context.Context, in *CommitsBetweenRequest, opts ...grpc.CallOption) (CommitService_CommitsBetweenClient, error) CountCommits(ctx context.Context, in *CountCommitsRequest, opts ...grpc.CallOption) (*CountCommitsResponse, error) GetTreeEntries(ctx context.Context, in *GetTreeEntriesRequest, opts ...grpc.CallOption) (CommitService_GetTreeEntriesClient, error) ListFiles(ctx context.Context, in *ListFilesRequest, opts ...grpc.CallOption) (CommitService_ListFilesClient, error) FindCommit(ctx context.Context, in *FindCommitRequest, opts ...grpc.CallOption) (*FindCommitResponse, error) CommitStats(ctx context.Context, in *CommitStatsRequest, opts ...grpc.CallOption) (*CommitStatsResponse, error) // Use a stream to paginate the result set FindAllCommits(ctx context.Context, in *FindAllCommitsRequest, opts ...grpc.CallOption) (CommitService_FindAllCommitsClient, error) FindCommits(ctx context.Context, in *FindCommitsRequest, opts ...grpc.CallOption) (CommitService_FindCommitsClient, error) CommitLanguages(ctx context.Context, in *CommitLanguagesRequest, opts ...grpc.CallOption) (*CommitLanguagesResponse, error) RawBlame(ctx context.Context, in *RawBlameRequest, opts ...grpc.CallOption) (CommitService_RawBlameClient, error) LastCommitForPath(ctx context.Context, in *LastCommitForPathRequest, opts ...grpc.CallOption) (*LastCommitForPathResponse, error) CommitsByMessage(ctx context.Context, in *CommitsByMessageRequest, opts ...grpc.CallOption) (CommitService_CommitsByMessageClient, error) ListCommitsByOid(ctx context.Context, in *ListCommitsByOidRequest, opts ...grpc.CallOption) (CommitService_ListCommitsByOidClient, error) FilterShasWithSignatures(ctx context.Context, opts ...grpc.CallOption) (CommitService_FilterShasWithSignaturesClient, error) // ExtractCommitSignature returns a stream because the signed text may be // arbitrarily large and signature verification is impossible without the // full text. ExtractCommitSignature(ctx context.Context, in *ExtractCommitSignatureRequest, opts ...grpc.CallOption) (CommitService_ExtractCommitSignatureClient, error) } type commitServiceClient struct { cc *grpc.ClientConn } func NewCommitServiceClient(cc *grpc.ClientConn) CommitServiceClient { return &commitServiceClient{cc} } func (c *commitServiceClient) CommitIsAncestor(ctx context.Context, in *CommitIsAncestorRequest, opts ...grpc.CallOption) (*CommitIsAncestorResponse, error) { out := new(CommitIsAncestorResponse) err := grpc.Invoke(ctx, "/gitaly.CommitService/CommitIsAncestor", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } func (c *commitServiceClient) TreeEntry(ctx context.Context, in *TreeEntryRequest, opts ...grpc.CallOption) (CommitService_TreeEntryClient, error) { stream, err := grpc.NewClientStream(ctx, &_CommitService_serviceDesc.Streams[0], c.cc, "/gitaly.CommitService/TreeEntry", opts...) if err != nil { return nil, err } x := &commitServiceTreeEntryClient{stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } if err := x.ClientStream.CloseSend(); err != nil { return nil, err } return x, nil } type CommitService_TreeEntryClient interface { Recv() (*TreeEntryResponse, error) grpc.ClientStream } type commitServiceTreeEntryClient struct { grpc.ClientStream } func (x *commitServiceTreeEntryClient) Recv() (*TreeEntryResponse, error) { m := new(TreeEntryResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func (c *commitServiceClient) CommitsBetween(ctx context.Context, in *CommitsBetweenRequest, opts ...grpc.CallOption) (CommitService_CommitsBetweenClient, error) { stream, err := grpc.NewClientStream(ctx, &_CommitService_serviceDesc.Streams[1], c.cc, "/gitaly.CommitService/CommitsBetween", opts...) if err != nil { return nil, err } x := &commitServiceCommitsBetweenClient{stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } if err := x.ClientStream.CloseSend(); err != nil { return nil, err } return x, nil } type CommitService_CommitsBetweenClient interface { Recv() (*CommitsBetweenResponse, error) grpc.ClientStream } type commitServiceCommitsBetweenClient struct { grpc.ClientStream } func (x *commitServiceCommitsBetweenClient) Recv() (*CommitsBetweenResponse, error) { m := new(CommitsBetweenResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func (c *commitServiceClient) CountCommits(ctx context.Context, in *CountCommitsRequest, opts ...grpc.CallOption) (*CountCommitsResponse, error) { out := new(CountCommitsResponse) err := grpc.Invoke(ctx, "/gitaly.CommitService/CountCommits", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } func (c *commitServiceClient) GetTreeEntries(ctx context.Context, in *GetTreeEntriesRequest, opts ...grpc.CallOption) (CommitService_GetTreeEntriesClient, error) { stream, err := grpc.NewClientStream(ctx, &_CommitService_serviceDesc.Streams[2], c.cc, "/gitaly.CommitService/GetTreeEntries", opts...) if err != nil { return nil, err } x := &commitServiceGetTreeEntriesClient{stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } if err := x.ClientStream.CloseSend(); err != nil { return nil, err } return x, nil } type CommitService_GetTreeEntriesClient interface { Recv() (*GetTreeEntriesResponse, error) grpc.ClientStream } type commitServiceGetTreeEntriesClient struct { grpc.ClientStream } func (x *commitServiceGetTreeEntriesClient) Recv() (*GetTreeEntriesResponse, error) { m := new(GetTreeEntriesResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func (c *commitServiceClient) ListFiles(ctx context.Context, in *ListFilesRequest, opts ...grpc.CallOption) (CommitService_ListFilesClient, error) { stream, err := grpc.NewClientStream(ctx, &_CommitService_serviceDesc.Streams[3], c.cc, "/gitaly.CommitService/ListFiles", opts...) if err != nil { return nil, err } x := &commitServiceListFilesClient{stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } if err := x.ClientStream.CloseSend(); err != nil { return nil, err } return x, nil } type CommitService_ListFilesClient interface { Recv() (*ListFilesResponse, error) grpc.ClientStream } type commitServiceListFilesClient struct { grpc.ClientStream } func (x *commitServiceListFilesClient) Recv() (*ListFilesResponse, error) { m := new(ListFilesResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func (c *commitServiceClient) FindCommit(ctx context.Context, in *FindCommitRequest, opts ...grpc.CallOption) (*FindCommitResponse, error) { out := new(FindCommitResponse) err := grpc.Invoke(ctx, "/gitaly.CommitService/FindCommit", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } func (c *commitServiceClient) CommitStats(ctx context.Context, in *CommitStatsRequest, opts ...grpc.CallOption) (*CommitStatsResponse, error) { out := new(CommitStatsResponse) err := grpc.Invoke(ctx, "/gitaly.CommitService/CommitStats", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } func (c *commitServiceClient) FindAllCommits(ctx context.Context, in *FindAllCommitsRequest, opts ...grpc.CallOption) (CommitService_FindAllCommitsClient, error) { stream, err := grpc.NewClientStream(ctx, &_CommitService_serviceDesc.Streams[4], c.cc, "/gitaly.CommitService/FindAllCommits", opts...) if err != nil { return nil, err } x := &commitServiceFindAllCommitsClient{stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } if err := x.ClientStream.CloseSend(); err != nil { return nil, err } return x, nil } type CommitService_FindAllCommitsClient interface { Recv() (*FindAllCommitsResponse, error) grpc.ClientStream } type commitServiceFindAllCommitsClient struct { grpc.ClientStream } func (x *commitServiceFindAllCommitsClient) Recv() (*FindAllCommitsResponse, error) { m := new(FindAllCommitsResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func (c *commitServiceClient) FindCommits(ctx context.Context, in *FindCommitsRequest, opts ...grpc.CallOption) (CommitService_FindCommitsClient, error) { stream, err := grpc.NewClientStream(ctx, &_CommitService_serviceDesc.Streams[5], c.cc, "/gitaly.CommitService/FindCommits", opts...) if err != nil { return nil, err } x := &commitServiceFindCommitsClient{stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } if err := x.ClientStream.CloseSend(); err != nil { return nil, err } return x, nil } type CommitService_FindCommitsClient interface { Recv() (*FindCommitsResponse, error) grpc.ClientStream } type commitServiceFindCommitsClient struct { grpc.ClientStream } func (x *commitServiceFindCommitsClient) Recv() (*FindCommitsResponse, error) { m := new(FindCommitsResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func (c *commitServiceClient) CommitLanguages(ctx context.Context, in *CommitLanguagesRequest, opts ...grpc.CallOption) (*CommitLanguagesResponse, error) { out := new(CommitLanguagesResponse) err := grpc.Invoke(ctx, "/gitaly.CommitService/CommitLanguages", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } func (c *commitServiceClient) RawBlame(ctx context.Context, in *RawBlameRequest, opts ...grpc.CallOption) (CommitService_RawBlameClient, error) { stream, err := grpc.NewClientStream(ctx, &_CommitService_serviceDesc.Streams[6], c.cc, "/gitaly.CommitService/RawBlame", opts...) if err != nil { return nil, err } x := &commitServiceRawBlameClient{stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } if err := x.ClientStream.CloseSend(); err != nil { return nil, err } return x, nil } type CommitService_RawBlameClient interface { Recv() (*RawBlameResponse, error) grpc.ClientStream } type commitServiceRawBlameClient struct { grpc.ClientStream } func (x *commitServiceRawBlameClient) Recv() (*RawBlameResponse, error) { m := new(RawBlameResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func (c *commitServiceClient) LastCommitForPath(ctx context.Context, in *LastCommitForPathRequest, opts ...grpc.CallOption) (*LastCommitForPathResponse, error) { out := new(LastCommitForPathResponse) err := grpc.Invoke(ctx, "/gitaly.CommitService/LastCommitForPath", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } func (c *commitServiceClient) CommitsByMessage(ctx context.Context, in *CommitsByMessageRequest, opts ...grpc.CallOption) (CommitService_CommitsByMessageClient, error) { stream, err := grpc.NewClientStream(ctx, &_CommitService_serviceDesc.Streams[7], c.cc, "/gitaly.CommitService/CommitsByMessage", opts...) if err != nil { return nil, err } x := &commitServiceCommitsByMessageClient{stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } if err := x.ClientStream.CloseSend(); err != nil { return nil, err } return x, nil } type CommitService_CommitsByMessageClient interface { Recv() (*CommitsByMessageResponse, error) grpc.ClientStream } type commitServiceCommitsByMessageClient struct { grpc.ClientStream } func (x *commitServiceCommitsByMessageClient) Recv() (*CommitsByMessageResponse, error) { m := new(CommitsByMessageResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func (c *commitServiceClient) ListCommitsByOid(ctx context.Context, in *ListCommitsByOidRequest, opts ...grpc.CallOption) (CommitService_ListCommitsByOidClient, error) { stream, err := grpc.NewClientStream(ctx, &_CommitService_serviceDesc.Streams[8], c.cc, "/gitaly.CommitService/ListCommitsByOid", opts...) if err != nil { return nil, err } x := &commitServiceListCommitsByOidClient{stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } if err := x.ClientStream.CloseSend(); err != nil { return nil, err } return x, nil } type CommitService_ListCommitsByOidClient interface { Recv() (*ListCommitsByOidResponse, error) grpc.ClientStream } type commitServiceListCommitsByOidClient struct { grpc.ClientStream } func (x *commitServiceListCommitsByOidClient) Recv() (*ListCommitsByOidResponse, error) { m := new(ListCommitsByOidResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func (c *commitServiceClient) FilterShasWithSignatures(ctx context.Context, opts ...grpc.CallOption) (CommitService_FilterShasWithSignaturesClient, error) { stream, err := grpc.NewClientStream(ctx, &_CommitService_serviceDesc.Streams[9], c.cc, "/gitaly.CommitService/FilterShasWithSignatures", opts...) if err != nil { return nil, err } x := &commitServiceFilterShasWithSignaturesClient{stream} return x, nil } type CommitService_FilterShasWithSignaturesClient interface { Send(*FilterShasWithSignaturesRequest) error Recv() (*FilterShasWithSignaturesResponse, error) grpc.ClientStream } type commitServiceFilterShasWithSignaturesClient struct { grpc.ClientStream } func (x *commitServiceFilterShasWithSignaturesClient) Send(m *FilterShasWithSignaturesRequest) error { return x.ClientStream.SendMsg(m) } func (x *commitServiceFilterShasWithSignaturesClient) Recv() (*FilterShasWithSignaturesResponse, error) { m := new(FilterShasWithSignaturesResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func (c *commitServiceClient) ExtractCommitSignature(ctx context.Context, in *ExtractCommitSignatureRequest, opts ...grpc.CallOption) (CommitService_ExtractCommitSignatureClient, error) { stream, err := grpc.NewClientStream(ctx, &_CommitService_serviceDesc.Streams[10], c.cc, "/gitaly.CommitService/ExtractCommitSignature", opts...) if err != nil { return nil, err } x := &commitServiceExtractCommitSignatureClient{stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } if err := x.ClientStream.CloseSend(); err != nil { return nil, err } return x, nil } type CommitService_ExtractCommitSignatureClient interface { Recv() (*ExtractCommitSignatureResponse, error) grpc.ClientStream } type commitServiceExtractCommitSignatureClient struct { grpc.ClientStream } func (x *commitServiceExtractCommitSignatureClient) Recv() (*ExtractCommitSignatureResponse, error) { m := new(ExtractCommitSignatureResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } // Server API for CommitService service type CommitServiceServer interface { CommitIsAncestor(context.Context, *CommitIsAncestorRequest) (*CommitIsAncestorResponse, error) TreeEntry(*TreeEntryRequest, CommitService_TreeEntryServer) error CommitsBetween(*CommitsBetweenRequest, CommitService_CommitsBetweenServer) error CountCommits(context.Context, *CountCommitsRequest) (*CountCommitsResponse, error) GetTreeEntries(*GetTreeEntriesRequest, CommitService_GetTreeEntriesServer) error ListFiles(*ListFilesRequest, CommitService_ListFilesServer) error FindCommit(context.Context, *FindCommitRequest) (*FindCommitResponse, error) CommitStats(context.Context, *CommitStatsRequest) (*CommitStatsResponse, error) // Use a stream to paginate the result set FindAllCommits(*FindAllCommitsRequest, CommitService_FindAllCommitsServer) error FindCommits(*FindCommitsRequest, CommitService_FindCommitsServer) error CommitLanguages(context.Context, *CommitLanguagesRequest) (*CommitLanguagesResponse, error) RawBlame(*RawBlameRequest, CommitService_RawBlameServer) error LastCommitForPath(context.Context, *LastCommitForPathRequest) (*LastCommitForPathResponse, error) CommitsByMessage(*CommitsByMessageRequest, CommitService_CommitsByMessageServer) error ListCommitsByOid(*ListCommitsByOidRequest, CommitService_ListCommitsByOidServer) error FilterShasWithSignatures(CommitService_FilterShasWithSignaturesServer) error // ExtractCommitSignature returns a stream because the signed text may be // arbitrarily large and signature verification is impossible without the // full text. ExtractCommitSignature(*ExtractCommitSignatureRequest, CommitService_ExtractCommitSignatureServer) error } func RegisterCommitServiceServer(s *grpc.Server, srv CommitServiceServer) { s.RegisterService(&_CommitService_serviceDesc, srv) } func _CommitService_CommitIsAncestor_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(CommitIsAncestorRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(CommitServiceServer).CommitIsAncestor(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/gitaly.CommitService/CommitIsAncestor", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(CommitServiceServer).CommitIsAncestor(ctx, req.(*CommitIsAncestorRequest)) } return interceptor(ctx, in, info, handler) } func _CommitService_TreeEntry_Handler(srv interface{}, stream grpc.ServerStream) error { m := new(TreeEntryRequest) if err := stream.RecvMsg(m); err != nil { return err } return srv.(CommitServiceServer).TreeEntry(m, &commitServiceTreeEntryServer{stream}) } type CommitService_TreeEntryServer interface { Send(*TreeEntryResponse) error grpc.ServerStream } type commitServiceTreeEntryServer struct { grpc.ServerStream } func (x *commitServiceTreeEntryServer) Send(m *TreeEntryResponse) error { return x.ServerStream.SendMsg(m) } func _CommitService_CommitsBetween_Handler(srv interface{}, stream grpc.ServerStream) error { m := new(CommitsBetweenRequest) if err := stream.RecvMsg(m); err != nil { return err } return srv.(CommitServiceServer).CommitsBetween(m, &commitServiceCommitsBetweenServer{stream}) } type CommitService_CommitsBetweenServer interface { Send(*CommitsBetweenResponse) error grpc.ServerStream } type commitServiceCommitsBetweenServer struct { grpc.ServerStream } func (x *commitServiceCommitsBetweenServer) Send(m *CommitsBetweenResponse) error { return x.ServerStream.SendMsg(m) } func _CommitService_CountCommits_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(CountCommitsRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(CommitServiceServer).CountCommits(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/gitaly.CommitService/CountCommits", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(CommitServiceServer).CountCommits(ctx, req.(*CountCommitsRequest)) } return interceptor(ctx, in, info, handler) } func _CommitService_GetTreeEntries_Handler(srv interface{}, stream grpc.ServerStream) error { m := new(GetTreeEntriesRequest) if err := stream.RecvMsg(m); err != nil { return err } return srv.(CommitServiceServer).GetTreeEntries(m, &commitServiceGetTreeEntriesServer{stream}) } type CommitService_GetTreeEntriesServer interface { Send(*GetTreeEntriesResponse) error grpc.ServerStream } type commitServiceGetTreeEntriesServer struct { grpc.ServerStream } func (x *commitServiceGetTreeEntriesServer) Send(m *GetTreeEntriesResponse) error { return x.ServerStream.SendMsg(m) } func _CommitService_ListFiles_Handler(srv interface{}, stream grpc.ServerStream) error { m := new(ListFilesRequest) if err := stream.RecvMsg(m); err != nil { return err } return srv.(CommitServiceServer).ListFiles(m, &commitServiceListFilesServer{stream}) } type CommitService_ListFilesServer interface { Send(*ListFilesResponse) error grpc.ServerStream } type commitServiceListFilesServer struct { grpc.ServerStream } func (x *commitServiceListFilesServer) Send(m *ListFilesResponse) error { return x.ServerStream.SendMsg(m) } func _CommitService_FindCommit_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(FindCommitRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(CommitServiceServer).FindCommit(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/gitaly.CommitService/FindCommit", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(CommitServiceServer).FindCommit(ctx, req.(*FindCommitRequest)) } return interceptor(ctx, in, info, handler) } func _CommitService_CommitStats_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(CommitStatsRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(CommitServiceServer).CommitStats(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/gitaly.CommitService/CommitStats", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(CommitServiceServer).CommitStats(ctx, req.(*CommitStatsRequest)) } return interceptor(ctx, in, info, handler) } func _CommitService_FindAllCommits_Handler(srv interface{}, stream grpc.ServerStream) error { m := new(FindAllCommitsRequest) if err := stream.RecvMsg(m); err != nil { return err } return srv.(CommitServiceServer).FindAllCommits(m, &commitServiceFindAllCommitsServer{stream}) } type CommitService_FindAllCommitsServer interface { Send(*FindAllCommitsResponse) error grpc.ServerStream } type commitServiceFindAllCommitsServer struct { grpc.ServerStream } func (x *commitServiceFindAllCommitsServer) Send(m *FindAllCommitsResponse) error { return x.ServerStream.SendMsg(m) } func _CommitService_FindCommits_Handler(srv interface{}, stream grpc.ServerStream) error { m := new(FindCommitsRequest) if err := stream.RecvMsg(m); err != nil { return err } return srv.(CommitServiceServer).FindCommits(m, &commitServiceFindCommitsServer{stream}) } type CommitService_FindCommitsServer interface { Send(*FindCommitsResponse) error grpc.ServerStream } type commitServiceFindCommitsServer struct { grpc.ServerStream } func (x *commitServiceFindCommitsServer) Send(m *FindCommitsResponse) error { return x.ServerStream.SendMsg(m) } func _CommitService_CommitLanguages_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(CommitLanguagesRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(CommitServiceServer).CommitLanguages(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/gitaly.CommitService/CommitLanguages", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(CommitServiceServer).CommitLanguages(ctx, req.(*CommitLanguagesRequest)) } return interceptor(ctx, in, info, handler) } func _CommitService_RawBlame_Handler(srv interface{}, stream grpc.ServerStream) error { m := new(RawBlameRequest) if err := stream.RecvMsg(m); err != nil { return err } return srv.(CommitServiceServer).RawBlame(m, &commitServiceRawBlameServer{stream}) } type CommitService_RawBlameServer interface { Send(*RawBlameResponse) error grpc.ServerStream } type commitServiceRawBlameServer struct { grpc.ServerStream } func (x *commitServiceRawBlameServer) Send(m *RawBlameResponse) error { return x.ServerStream.SendMsg(m) } func _CommitService_LastCommitForPath_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(LastCommitForPathRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(CommitServiceServer).LastCommitForPath(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/gitaly.CommitService/LastCommitForPath", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(CommitServiceServer).LastCommitForPath(ctx, req.(*LastCommitForPathRequest)) } return interceptor(ctx, in, info, handler) } func _CommitService_CommitsByMessage_Handler(srv interface{}, stream grpc.ServerStream) error { m := new(CommitsByMessageRequest) if err := stream.RecvMsg(m); err != nil { return err } return srv.(CommitServiceServer).CommitsByMessage(m, &commitServiceCommitsByMessageServer{stream}) } type CommitService_CommitsByMessageServer interface { Send(*CommitsByMessageResponse) error grpc.ServerStream } type commitServiceCommitsByMessageServer struct { grpc.ServerStream } func (x *commitServiceCommitsByMessageServer) Send(m *CommitsByMessageResponse) error { return x.ServerStream.SendMsg(m) } func _CommitService_ListCommitsByOid_Handler(srv interface{}, stream grpc.ServerStream) error { m := new(ListCommitsByOidRequest) if err := stream.RecvMsg(m); err != nil { return err } return srv.(CommitServiceServer).ListCommitsByOid(m, &commitServiceListCommitsByOidServer{stream}) } type CommitService_ListCommitsByOidServer interface { Send(*ListCommitsByOidResponse) error grpc.ServerStream } type commitServiceListCommitsByOidServer struct { grpc.ServerStream } func (x *commitServiceListCommitsByOidServer) Send(m *ListCommitsByOidResponse) error { return x.ServerStream.SendMsg(m) } func _CommitService_FilterShasWithSignatures_Handler(srv interface{}, stream grpc.ServerStream) error { return srv.(CommitServiceServer).FilterShasWithSignatures(&commitServiceFilterShasWithSignaturesServer{stream}) } type CommitService_FilterShasWithSignaturesServer interface { Send(*FilterShasWithSignaturesResponse) error Recv() (*FilterShasWithSignaturesRequest, error) grpc.ServerStream } type commitServiceFilterShasWithSignaturesServer struct { grpc.ServerStream } func (x *commitServiceFilterShasWithSignaturesServer) Send(m *FilterShasWithSignaturesResponse) error { return x.ServerStream.SendMsg(m) } func (x *commitServiceFilterShasWithSignaturesServer) Recv() (*FilterShasWithSignaturesRequest, error) { m := new(FilterShasWithSignaturesRequest) if err := x.ServerStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func _CommitService_ExtractCommitSignature_Handler(srv interface{}, stream grpc.ServerStream) error { m := new(ExtractCommitSignatureRequest) if err := stream.RecvMsg(m); err != nil { return err } return srv.(CommitServiceServer).ExtractCommitSignature(m, &commitServiceExtractCommitSignatureServer{stream}) } type CommitService_ExtractCommitSignatureServer interface { Send(*ExtractCommitSignatureResponse) error grpc.ServerStream } type commitServiceExtractCommitSignatureServer struct { grpc.ServerStream } func (x *commitServiceExtractCommitSignatureServer) Send(m *ExtractCommitSignatureResponse) error { return x.ServerStream.SendMsg(m) } var _CommitService_serviceDesc = grpc.ServiceDesc{ ServiceName: "gitaly.CommitService", HandlerType: (*CommitServiceServer)(nil), Methods: []grpc.MethodDesc{ { MethodName: "CommitIsAncestor", Handler: _CommitService_CommitIsAncestor_Handler, }, { MethodName: "CountCommits", Handler: _CommitService_CountCommits_Handler, }, { MethodName: "FindCommit", Handler: _CommitService_FindCommit_Handler, }, { MethodName: "CommitStats", Handler: _CommitService_CommitStats_Handler, }, { MethodName: "CommitLanguages", Handler: _CommitService_CommitLanguages_Handler, }, { MethodName: "LastCommitForPath", Handler: _CommitService_LastCommitForPath_Handler, }, }, Streams: []grpc.StreamDesc{ { StreamName: "TreeEntry", Handler: _CommitService_TreeEntry_Handler, ServerStreams: true, }, { StreamName: "CommitsBetween", Handler: _CommitService_CommitsBetween_Handler, ServerStreams: true, }, { StreamName: "GetTreeEntries", Handler: _CommitService_GetTreeEntries_Handler, ServerStreams: true, }, { StreamName: "ListFiles", Handler: _CommitService_ListFiles_Handler, ServerStreams: true, }, { StreamName: "FindAllCommits", Handler: _CommitService_FindAllCommits_Handler, ServerStreams: true, }, { StreamName: "FindCommits", Handler: _CommitService_FindCommits_Handler, ServerStreams: true, }, { StreamName: "RawBlame", Handler: _CommitService_RawBlame_Handler, ServerStreams: true, }, { StreamName: "CommitsByMessage", Handler: _CommitService_CommitsByMessage_Handler, ServerStreams: true, }, { StreamName: "ListCommitsByOid", Handler: _CommitService_ListCommitsByOid_Handler, ServerStreams: true, }, { StreamName: "FilterShasWithSignatures", Handler: _CommitService_FilterShasWithSignatures_Handler, ServerStreams: true, ClientStreams: true, }, { StreamName: "ExtractCommitSignature", Handler: _CommitService_ExtractCommitSignature_Handler, ServerStreams: true, }, }, Metadata: "commit.proto", } func init() { proto.RegisterFile("commit.proto", fileDescriptor1) } var fileDescriptor1 = []byte{ // 1559 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0xdd, 0x6e, 0x1a, 0x47, 0x14, 0xf6, 0x82, 0xc1, 0x70, 0xa0, 0x0e, 0x9e, 0xfc, 0xe1, 0x75, 0x12, 0x3b, 0xdb, 0x26, 0x25, 0x4a, 0x45, 0x2c, 0xaa, 0x56, 0xed, 0x55, 0x65, 0x27, 0xd8, 0x75, 0x6a, 0x87, 0x68, 0x8d, 0x14, 0xa5, 0x37, 0x68, 0xcc, 0x0e, 0x30, 0xf5, 0xc2, 0x92, 0xdd, 0x21, 0x36, 0xad, 0xd4, 0xfb, 0x4a, 0x7d, 0x92, 0x4a, 0x7d, 0x88, 0xbe, 0x42, 0x1f, 0xa3, 0x8f, 0x10, 0xf5, 0xa2, 0x9a, 0x9f, 0xdd, 0x59, 0x60, 0x71, 0x1a, 0x5b, 0xe4, 0x06, 0xed, 0x9c, 0x99, 0x39, 0xe7, 0x3b, 0x67, 0xce, 0x2f, 0x50, 0x6c, 0x7b, 0xfd, 0x3e, 0x65, 0xd5, 0xa1, 0xef, 0x31, 0x0f, 0x65, 0xbb, 0x94, 0x61, 0x77, 0x6c, 0x16, 0x83, 0x1e, 0xf6, 0x89, 0x23, 0xa9, 0xe6, 0x66, 0xd7, 0xf3, 0xba, 0x2e, 0x79, 0x22, 0x56, 0x27, 0xa3, 0xce, 0x13, 0x46, 0xfb, 0x24, 0x60, 0xb8, 0x3f, 0x94, 0x07, 0x2c, 0x07, 0xd0, 0x53, 0xc1, 0xe6, 0x98, 0x61, 0x16, 0xd8, 0xe4, 0xcd, 0x88, 0x04, 0x0c, 0xd5, 0x00, 0x7c, 0x32, 0xf4, 0x02, 0xca, 0x3c, 0x7f, 0x5c, 0x36, 0xb6, 0x8c, 0x4a, 0xa1, 0x86, 0xaa, 0x52, 0x42, 0xd5, 0x8e, 0x76, 0xec, 0xd8, 0x29, 0x64, 0x42, 0xce, 0x27, 0x6f, 0x69, 0x40, 0xbd, 0x41, 0x39, 0xb5, 0x65, 0x54, 0x8a, 0x76, 0xb4, 0xb6, 0xda, 0x70, 0x7d, 0x42, 0x4a, 0x30, 0xf4, 0x06, 0x01, 0x41, 0x25, 0x48, 0x7b, 0xd4, 0x11, 0xfc, 0xf3, 0x36, 0xff, 0x44, 0x77, 0x20, 0x8f, 0x1d, 0x87, 0x32, 0xea, 0x0d, 0x02, 0xc1, 0x25, 0x63, 0x6b, 0x02, 0xdf, 0x75, 0x88, 0x4b, 0xe4, 0x6e, 0x5a, 0xee, 0x46, 0x04, 0xeb, 0x37, 0x03, 0x6e, 0x4b, 0x29, 0x07, 0xc1, 0xce, 0xa0, 0x4d, 0x02, 0xe6, 0xf9, 0x57, 0x51, 0x68, 0x13, 0x0a, 0x58, 0xb1, 0x69, 0x51, 0x47, 0xa0, 0xc9, 0xdb, 0x10, 0x92, 0x0e, 0x1c, 0xb4, 0x0e, 0xb9, 0x76, 0x8f, 0xba, 0x0e, 0xdf, 0x4d, 0x8b, 0xdd, 0x15, 0xb1, 0x3e, 0x70, 0xac, 0x6d, 0x28, 0xcf, 0x42, 0x51, 0x5a, 0xdf, 0x80, 0xcc, 0x5b, 0xec, 0x8e, 0x88, 0x80, 0x91, 0xb3, 0xe5, 0xc2, 0xfa, 0xdd, 0x80, 0x52, 0xd3, 0x27, 0xa4, 0x3e, 0x60, 0xfe, 0x78, 0x41, 0xef, 0x80, 0x10, 0x2c, 0x0f, 0x31, 0xeb, 0x09, 0xb4, 0x45, 0x5b, 0x7c, 0x73, 0x38, 0x2e, 0xed, 0x53, 0x56, 0x5e, 0xde, 0x32, 0x2a, 0x69, 0x5b, 0x2e, 0xac, 0xbf, 0x0d, 0x58, 0x8b, 0xc1, 0x51, 0xd0, 0xbf, 0x81, 0x65, 0x36, 0x1e, 0x4a, 0xe4, 0xab, 0xb5, 0xcf, 0x42, 0x24, 0x33, 0x07, 0xab, 0x8d, 0x93, 0x9f, 0x48, 0x9b, 0x35, 0xc7, 0x43, 0x62, 0x8b, 0x1b, 0xe1, 0x53, 0xa7, 0xf4, 0x53, 0x23, 0x58, 0x0e, 0xe8, 0xcf, 0x44, 0x60, 0x49, 0xdb, 0xe2, 0x9b, 0xd3, 0xfa, 0x9e, 0x43, 0x04, 0x94, 0x8c, 0x2d, 0xbe, 0x39, 0xcd, 0xc1, 0x0c, 0x97, 0x33, 0x12, 0x33, 0xff, 0xb6, 0xbe, 0x02, 0xd0, 0x12, 0x10, 0x40, 0xf6, 0x69, 0xe3, 0xe8, 0xe8, 0xa0, 0x59, 0x5a, 0x42, 0x39, 0x58, 0xde, 0x3d, 0x6c, 0xec, 0x96, 0x0c, 0xfe, 0xd5, 0xb4, 0xeb, 0xf5, 0x52, 0x0a, 0xad, 0x40, 0xba, 0xb9, 0xb3, 0x5f, 0x4a, 0x5b, 0x1e, 0xdc, 0x94, 0xaf, 0x12, 0xec, 0x12, 0x76, 0x46, 0xc8, 0xe0, 0x2a, 0x76, 0x46, 0xb0, 0xdc, 0xf1, 0xbd, 0xbe, 0xb2, 0xb1, 0xf8, 0x46, 0xab, 0x90, 0x62, 0x9e, 0xb2, 0x6e, 0x8a, 0x79, 0x56, 0x1d, 0x6e, 0x4d, 0x0b, 0x54, 0x96, 0x7c, 0x0c, 0x2b, 0x32, 0x7c, 0x83, 0xb2, 0xb1, 0x95, 0xae, 0x14, 0x6a, 0x6b, 0xa1, 0xb8, 0x7d, 0xca, 0xe4, 0x1d, 0x3b, 0x3c, 0x61, 0xfd, 0x6b, 0xf0, 0xf8, 0x19, 0x0d, 0xd4, 0xc6, 0xa2, 0xc2, 0x14, 0x6d, 0x43, 0x06, 0x77, 0x18, 0xf1, 0x85, 0x06, 0x85, 0x9a, 0x59, 0x95, 0xd9, 0xa3, 0x1a, 0x66, 0x8f, 0x6a, 0x33, 0xcc, 0x1e, 0xb6, 0x3c, 0x88, 0x6a, 0x90, 0x3d, 0x21, 0x1d, 0xcf, 0x97, 0x4f, 0x76, 0xf1, 0x15, 0x75, 0x32, 0x72, 0xc2, 0x4c, 0xcc, 0x09, 0x37, 0x20, 0xdf, 0xc7, 0xe7, 0xad, 0x36, 0x57, 0xb2, 0x9c, 0x15, 0xaf, 0x9f, 0xeb, 0xe3, 0x73, 0xa1, 0xb4, 0xf5, 0x05, 0xdc, 0x98, 0xd4, 0x5e, 0x07, 0x92, 0xbc, 0x60, 0x88, 0x0b, 0x72, 0x61, 0xbd, 0x33, 0x20, 0x1f, 0x39, 0x64, 0x42, 0x8a, 0x59, 0x87, 0x9c, 0xef, 0x79, 0xac, 0xa5, 0xdd, 0x71, 0x85, 0xaf, 0x1b, 0xd2, 0x25, 0x67, 0xc2, 0xe3, 0x89, 0x72, 0xf9, 0x65, 0xe1, 0xf2, 0x1b, 0x33, 0x2e, 0x5f, 0x15, 0xbf, 0x31, 0x4f, 0x0f, 0x7d, 0x38, 0x13, 0xf3, 0xe1, 0xbb, 0x00, 0xf2, 0x2d, 0x85, 0xd4, 0xac, 0x90, 0x9a, 0x97, 0x14, 0x2e, 0x77, 0x03, 0xf2, 0x1d, 0x17, 0xb3, 0x96, 0x10, 0xbe, 0x22, 0x1f, 0x85, 0x13, 0x5e, 0x62, 0xd6, 0xb3, 0x1e, 0x43, 0x3e, 0x12, 0x11, 0xb9, 0xf7, 0x52, 0xe4, 0xde, 0x46, 0xcc, 0xfd, 0xd3, 0xd6, 0x2f, 0x70, 0x73, 0x9f, 0xb0, 0x10, 0x1c, 0x25, 0xc1, 0x47, 0xcc, 0x24, 0xdc, 0xdb, 0xa7, 0x85, 0x6b, 0x6f, 0x27, 0x92, 0x34, 0xed, 0xed, 0x3a, 0x75, 0x84, 0x27, 0xac, 0x13, 0x28, 0x1d, 0xd2, 0x80, 0xed, 0x51, 0x77, 0x61, 0xf0, 0xad, 0x47, 0xb0, 0x16, 0x93, 0xa1, 0xfd, 0x89, 0xeb, 0x21, 0x31, 0x16, 0x6d, 0xb9, 0xb0, 0xda, 0xb0, 0xb6, 0x47, 0x07, 0x8e, 0x8a, 0xc9, 0x05, 0xe1, 0xf9, 0x0e, 0x50, 0x5c, 0x88, 0x02, 0xf4, 0x08, 0xb2, 0xd2, 0x49, 0x94, 0x84, 0x84, 0x1c, 0xa1, 0x0e, 0x58, 0x2d, 0xb8, 0xcd, 0x15, 0x0a, 0xb3, 0xcd, 0xb8, 0x41, 0x9d, 0xab, 0x60, 0x8d, 0xd2, 0x75, 0x5a, 0x85, 0x8d, 0xb5, 0x0f, 0xe5, 0x59, 0x01, 0x97, 0x49, 0x66, 0xef, 0x0c, 0xb8, 0xc9, 0x75, 0xdd, 0x71, 0xdd, 0x05, 0xa7, 0xb3, 0x89, 0xa4, 0x92, 0x9e, 0x4c, 0x2a, 0xa2, 0xfc, 0x9c, 0xd2, 0x61, 0x58, 0x6a, 0xf8, 0x37, 0xfa, 0x16, 0x32, 0x9e, 0xef, 0x10, 0x5f, 0xc4, 0xee, 0x6a, 0xed, 0xd3, 0x50, 0x76, 0x22, 0xdc, 0x6a, 0x83, 0x1f, 0xb5, 0xe5, 0x0d, 0xeb, 0x01, 0x64, 0xc4, 0x9a, 0xc7, 0xe5, 0x8b, 0xc6, 0x8b, 0xba, 0x8a, 0xd0, 0xc6, 0xcb, 0x86, 0x2c, 0x45, 0xcf, 0x76, 0x9a, 0xf5, 0x52, 0x8a, 0x87, 0xc8, 0x34, 0xb3, 0xcb, 0xd8, 0xf0, 0x9f, 0x54, 0xdc, 0x5f, 0x16, 0x66, 0xc0, 0xa8, 0x35, 0x90, 0xc6, 0x93, 0x0b, 0x74, 0x0b, 0xb2, 0x5e, 0xa7, 0x13, 0x10, 0xa6, 0x6c, 0xa7, 0x56, 0x3a, 0x7c, 0x32, 0xb1, 0xf0, 0xe1, 0xa7, 0x3b, 0x9e, 0xeb, 0x7a, 0x67, 0x22, 0xed, 0xe5, 0x6c, 0xb5, 0xe2, 0xdd, 0x15, 0xb7, 0x79, 0xab, 0x4f, 0xfc, 0x2e, 0x09, 0x44, 0xd6, 0xcb, 0xd9, 0xc0, 0x49, 0x47, 0x82, 0x82, 0xee, 0x43, 0xd1, 0xa1, 0x01, 0x3e, 0x71, 0x49, 0xeb, 0x0c, 0xbb, 0xa7, 0xe5, 0x9c, 0x38, 0x51, 0x50, 0xb4, 0x57, 0xd8, 0x3d, 0xd5, 0xf5, 0x2a, 0xff, 0xe1, 0xf5, 0x0a, 0xfe, 0x6f, 0xbd, 0xb2, 0x76, 0xe1, 0xfa, 0x84, 0xad, 0x2f, 0xf3, 0x60, 0xbd, 0xb0, 0x11, 0x38, 0xc4, 0x83, 0xee, 0x08, 0x77, 0x17, 0x97, 0xd9, 0xfe, 0x8c, 0xba, 0xe0, 0x98, 0x28, 0x05, 0x79, 0x0f, 0xf2, 0x6e, 0x48, 0x54, 0xa0, 0x2b, 0xa1, 0xa8, 0x39, 0x77, 0xaa, 0x21, 0xc5, 0xd6, 0x57, 0xcd, 0xe7, 0x90, 0x0b, 0xc9, 0x3c, 0x8e, 0x06, 0xb8, 0x4f, 0x54, 0x85, 0x15, 0xdf, 0xdc, 0x13, 0xc4, 0x14, 0x22, 0xc0, 0xa5, 0x6c, 0xb9, 0x90, 0xe5, 0xda, 0xf5, 0x7c, 0xd5, 0x2b, 0xcb, 0x85, 0x35, 0x82, 0x6b, 0x36, 0x3e, 0xdb, 0x75, 0x71, 0x9f, 0x7c, 0xcc, 0x5a, 0xf5, 0x10, 0x4a, 0x5a, 0xac, 0x32, 0x4f, 0xd8, 0x69, 0x1a, 0xb1, 0x4e, 0xf3, 0x57, 0x28, 0x1f, 0xe2, 0x30, 0xed, 0xed, 0x79, 0x3e, 0x2f, 0xc9, 0x1f, 0x13, 0xe7, 0x1e, 0xac, 0x27, 0xc8, 0xff, 0xf0, 0xfa, 0xf0, 0x57, 0xe4, 0x16, 0xc1, 0xee, 0xf8, 0x88, 0x04, 0x01, 0x7f, 0xd2, 0x05, 0xe9, 0xa1, 0x13, 0x44, 0x7a, 0x3a, 0x41, 0xe8, 0x49, 0x23, 0x4a, 0x27, 0x49, 0xed, 0xe0, 0x0d, 0xc8, 0xbc, 0x19, 0x11, 0x7f, 0xac, 0x5a, 0x25, 0xb9, 0xe0, 0x25, 0x68, 0x56, 0x85, 0xcb, 0x44, 0x23, 0x85, 0xcd, 0x3d, 0xea, 0x32, 0xe2, 0x1f, 0xf7, 0x70, 0xf0, 0x8a, 0xb2, 0xde, 0x31, 0xed, 0x0e, 0x30, 0x1b, 0xf9, 0x57, 0x0b, 0x4b, 0x5e, 0x52, 0x7a, 0x38, 0x10, 0x55, 0xb3, 0x68, 0x8b, 0x6f, 0xeb, 0x6b, 0xd8, 0x9a, 0x2f, 0x4a, 0xfb, 0x9d, 0xb8, 0x67, 0xc4, 0xee, 0x0d, 0xe1, 0x6e, 0xfd, 0x9c, 0xf9, 0xb8, 0xad, 0xc0, 0x47, 0xd7, 0xae, 0x02, 0x70, 0x03, 0x54, 0xd3, 0xa9, 0xe7, 0xd9, 0x9c, 0x24, 0x1c, 0x38, 0x56, 0x0b, 0xee, 0xcd, 0x93, 0xa8, 0x70, 0xde, 0x81, 0x7c, 0x10, 0x12, 0x55, 0x90, 0x68, 0x82, 0x48, 0xe8, 0xb4, 0x3b, 0x20, 0x4e, 0x8b, 0x91, 0x73, 0xa6, 0x9c, 0x02, 0x24, 0xa9, 0x49, 0xce, 0x59, 0xed, 0x8f, 0x02, 0x7c, 0xa2, 0x58, 0x13, 0xff, 0x2d, 0x6d, 0x13, 0xf4, 0x0a, 0x4a, 0xd3, 0x53, 0x32, 0xda, 0x9c, 0x4c, 0x48, 0x33, 0xa3, 0xbc, 0xb9, 0x35, 0xff, 0x80, 0xc4, 0x69, 0x2d, 0xa1, 0x67, 0xf1, 0x11, 0xa0, 0x9c, 0x30, 0xa6, 0x4a, 0x56, 0xeb, 0x73, 0x07, 0x58, 0x6b, 0x69, 0xdb, 0x40, 0xc7, 0xb0, 0x3a, 0x39, 0xbd, 0xa1, 0xbb, 0x93, 0xb2, 0xa7, 0xc6, 0x48, 0xf3, 0xde, 0xbc, 0xed, 0x18, 0xd3, 0x1f, 0xa0, 0x18, 0x1f, 0x66, 0xd0, 0x86, 0xbe, 0x33, 0x33, 0xe0, 0x99, 0x77, 0x92, 0x37, 0x23, 0x3d, 0x8f, 0x61, 0x75, 0xb2, 0xe3, 0xd6, 0x08, 0x13, 0xc7, 0x00, 0x8d, 0x30, 0xb9, 0x51, 0x17, 0x08, 0x9f, 0x41, 0x3e, 0xea, 0x8d, 0xb5, 0xf1, 0xa6, 0x5b, 0x72, 0x6d, 0xbc, 0x99, 0x46, 0x5a, 0x70, 0xa9, 0x03, 0xe8, 0xaa, 0x89, 0xd6, 0xe3, 0xad, 0xd4, 0x44, 0x2b, 0x6d, 0x9a, 0x49, 0x5b, 0x91, 0x86, 0xdf, 0x43, 0x21, 0xf6, 0xcf, 0x11, 0x32, 0x27, 0x2d, 0x1c, 0xff, 0xd3, 0xca, 0xdc, 0x48, 0xdc, 0x8b, 0xdb, 0x6a, 0xb2, 0xf5, 0xd2, 0xb6, 0x4a, 0xec, 0xef, 0xb4, 0xad, 0x92, 0x3b, 0x36, 0xa1, 0xe5, 0x73, 0x28, 0xc4, 0x7a, 0x03, 0x94, 0xa0, 0xcb, 0x2c, 0xbc, 0x84, 0x66, 0x42, 0xf0, 0x6a, 0xc2, 0xb5, 0xa9, 0x22, 0x8c, 0xee, 0xcd, 0xad, 0xce, 0x92, 0xe7, 0xe6, 0x7b, 0xaa, 0xb7, 0xb5, 0x84, 0x76, 0x20, 0x17, 0x16, 0x3a, 0x74, 0x3b, 0xca, 0x0f, 0x93, 0x15, 0xd7, 0x2c, 0xcf, 0x6e, 0xc4, 0x80, 0xfd, 0x08, 0x6b, 0x33, 0x35, 0x08, 0x45, 0x61, 0x38, 0xaf, 0x3c, 0x9a, 0xf7, 0x2f, 0x38, 0x11, 0xc1, 0x7b, 0x1d, 0xa6, 0x00, 0x9d, 0xd3, 0xa7, 0x53, 0xc0, 0x4c, 0xc1, 0x9a, 0x4e, 0x01, 0xb3, 0xe5, 0x40, 0xc0, 0x7e, 0x2d, 0xe7, 0xc8, 0xf8, 0xc4, 0xa2, 0x59, 0xcf, 0x19, 0x96, 0x34, 0xeb, 0x79, 0xc3, 0x8e, 0x60, 0x1d, 0x40, 0x79, 0x5e, 0x56, 0x47, 0x9f, 0xeb, 0x77, 0xbe, 0xb0, 0xc4, 0x98, 0x95, 0xf7, 0x1f, 0x0c, 0x45, 0x56, 0x8c, 0x6d, 0x03, 0x9d, 0xc2, 0xad, 0xe4, 0x04, 0x8d, 0x1e, 0x84, 0x9c, 0x2e, 0x2c, 0x19, 0xe6, 0xc3, 0xf7, 0x1d, 0xd3, 0x1a, 0x9e, 0x64, 0x45, 0x43, 0xfc, 0xe5, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xc0, 0x6e, 0x68, 0x46, 0x65, 0x16, 0x00, 0x00, } conflicts.pb.go000066400000000000000000000571341324746544700356740ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/gitlab.com/gitlab-org/gitaly-proto/go// Code generated by protoc-gen-go. DO NOT EDIT. // source: conflicts.proto package gitaly import proto "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" import ( context "golang.org/x/net/context" grpc "google.golang.org/grpc" ) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf type ListConflictFilesRequest struct { Repository *Repository `protobuf:"bytes,1,opt,name=repository" json:"repository,omitempty"` OurCommitOid string `protobuf:"bytes,2,opt,name=our_commit_oid,json=ourCommitOid" json:"our_commit_oid,omitempty"` TheirCommitOid string `protobuf:"bytes,3,opt,name=their_commit_oid,json=theirCommitOid" json:"their_commit_oid,omitempty"` } func (m *ListConflictFilesRequest) Reset() { *m = ListConflictFilesRequest{} } func (m *ListConflictFilesRequest) String() string { return proto.CompactTextString(m) } func (*ListConflictFilesRequest) ProtoMessage() {} func (*ListConflictFilesRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{0} } func (m *ListConflictFilesRequest) GetRepository() *Repository { if m != nil { return m.Repository } return nil } func (m *ListConflictFilesRequest) GetOurCommitOid() string { if m != nil { return m.OurCommitOid } return "" } func (m *ListConflictFilesRequest) GetTheirCommitOid() string { if m != nil { return m.TheirCommitOid } return "" } type ConflictFileHeader struct { Repository *Repository `protobuf:"bytes,1,opt,name=repository" json:"repository,omitempty"` CommitOid string `protobuf:"bytes,2,opt,name=commit_oid,json=commitOid" json:"commit_oid,omitempty"` TheirPath []byte `protobuf:"bytes,3,opt,name=their_path,json=theirPath,proto3" json:"their_path,omitempty"` OurPath []byte `protobuf:"bytes,4,opt,name=our_path,json=ourPath,proto3" json:"our_path,omitempty"` OurMode int32 `protobuf:"varint,5,opt,name=our_mode,json=ourMode" json:"our_mode,omitempty"` } func (m *ConflictFileHeader) Reset() { *m = ConflictFileHeader{} } func (m *ConflictFileHeader) String() string { return proto.CompactTextString(m) } func (*ConflictFileHeader) ProtoMessage() {} func (*ConflictFileHeader) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{1} } func (m *ConflictFileHeader) GetRepository() *Repository { if m != nil { return m.Repository } return nil } func (m *ConflictFileHeader) GetCommitOid() string { if m != nil { return m.CommitOid } return "" } func (m *ConflictFileHeader) GetTheirPath() []byte { if m != nil { return m.TheirPath } return nil } func (m *ConflictFileHeader) GetOurPath() []byte { if m != nil { return m.OurPath } return nil } func (m *ConflictFileHeader) GetOurMode() int32 { if m != nil { return m.OurMode } return 0 } type ConflictFile struct { // Types that are valid to be assigned to ConflictFilePayload: // *ConflictFile_Header // *ConflictFile_Content ConflictFilePayload isConflictFile_ConflictFilePayload `protobuf_oneof:"conflict_file_payload"` } func (m *ConflictFile) Reset() { *m = ConflictFile{} } func (m *ConflictFile) String() string { return proto.CompactTextString(m) } func (*ConflictFile) ProtoMessage() {} func (*ConflictFile) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{2} } type isConflictFile_ConflictFilePayload interface { isConflictFile_ConflictFilePayload() } type ConflictFile_Header struct { Header *ConflictFileHeader `protobuf:"bytes,1,opt,name=header,oneof"` } type ConflictFile_Content struct { Content []byte `protobuf:"bytes,2,opt,name=content,proto3,oneof"` } func (*ConflictFile_Header) isConflictFile_ConflictFilePayload() {} func (*ConflictFile_Content) isConflictFile_ConflictFilePayload() {} func (m *ConflictFile) GetConflictFilePayload() isConflictFile_ConflictFilePayload { if m != nil { return m.ConflictFilePayload } return nil } func (m *ConflictFile) GetHeader() *ConflictFileHeader { if x, ok := m.GetConflictFilePayload().(*ConflictFile_Header); ok { return x.Header } return nil } func (m *ConflictFile) GetContent() []byte { if x, ok := m.GetConflictFilePayload().(*ConflictFile_Content); ok { return x.Content } return nil } // XXX_OneofFuncs is for the internal use of the proto package. func (*ConflictFile) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { return _ConflictFile_OneofMarshaler, _ConflictFile_OneofUnmarshaler, _ConflictFile_OneofSizer, []interface{}{ (*ConflictFile_Header)(nil), (*ConflictFile_Content)(nil), } } func _ConflictFile_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { m := msg.(*ConflictFile) // conflict_file_payload switch x := m.ConflictFilePayload.(type) { case *ConflictFile_Header: b.EncodeVarint(1<<3 | proto.WireBytes) if err := b.EncodeMessage(x.Header); err != nil { return err } case *ConflictFile_Content: b.EncodeVarint(2<<3 | proto.WireBytes) b.EncodeRawBytes(x.Content) case nil: default: return fmt.Errorf("ConflictFile.ConflictFilePayload has unexpected type %T", x) } return nil } func _ConflictFile_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { m := msg.(*ConflictFile) switch tag { case 1: // conflict_file_payload.header if wire != proto.WireBytes { return true, proto.ErrInternalBadWireType } msg := new(ConflictFileHeader) err := b.DecodeMessage(msg) m.ConflictFilePayload = &ConflictFile_Header{msg} return true, err case 2: // conflict_file_payload.content if wire != proto.WireBytes { return true, proto.ErrInternalBadWireType } x, err := b.DecodeRawBytes(true) m.ConflictFilePayload = &ConflictFile_Content{x} return true, err default: return false, nil } } func _ConflictFile_OneofSizer(msg proto.Message) (n int) { m := msg.(*ConflictFile) // conflict_file_payload switch x := m.ConflictFilePayload.(type) { case *ConflictFile_Header: s := proto.Size(x.Header) n += proto.SizeVarint(1<<3 | proto.WireBytes) n += proto.SizeVarint(uint64(s)) n += s case *ConflictFile_Content: n += proto.SizeVarint(2<<3 | proto.WireBytes) n += proto.SizeVarint(uint64(len(x.Content))) n += len(x.Content) case nil: default: panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) } return n } type ListConflictFilesResponse struct { Files []*ConflictFile `protobuf:"bytes,1,rep,name=files" json:"files,omitempty"` } func (m *ListConflictFilesResponse) Reset() { *m = ListConflictFilesResponse{} } func (m *ListConflictFilesResponse) String() string { return proto.CompactTextString(m) } func (*ListConflictFilesResponse) ProtoMessage() {} func (*ListConflictFilesResponse) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{3} } func (m *ListConflictFilesResponse) GetFiles() []*ConflictFile { if m != nil { return m.Files } return nil } type ResolveConflictsRequestHeader struct { Repository *Repository `protobuf:"bytes,1,opt,name=repository" json:"repository,omitempty"` OurCommitOid string `protobuf:"bytes,2,opt,name=our_commit_oid,json=ourCommitOid" json:"our_commit_oid,omitempty"` TargetRepository *Repository `protobuf:"bytes,3,opt,name=target_repository,json=targetRepository" json:"target_repository,omitempty"` TheirCommitOid string `protobuf:"bytes,4,opt,name=their_commit_oid,json=theirCommitOid" json:"their_commit_oid,omitempty"` SourceBranch []byte `protobuf:"bytes,5,opt,name=source_branch,json=sourceBranch,proto3" json:"source_branch,omitempty"` TargetBranch []byte `protobuf:"bytes,6,opt,name=target_branch,json=targetBranch,proto3" json:"target_branch,omitempty"` CommitMessage []byte `protobuf:"bytes,7,opt,name=commit_message,json=commitMessage,proto3" json:"commit_message,omitempty"` User *User `protobuf:"bytes,8,opt,name=user" json:"user,omitempty"` } func (m *ResolveConflictsRequestHeader) Reset() { *m = ResolveConflictsRequestHeader{} } func (m *ResolveConflictsRequestHeader) String() string { return proto.CompactTextString(m) } func (*ResolveConflictsRequestHeader) ProtoMessage() {} func (*ResolveConflictsRequestHeader) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{4} } func (m *ResolveConflictsRequestHeader) GetRepository() *Repository { if m != nil { return m.Repository } return nil } func (m *ResolveConflictsRequestHeader) GetOurCommitOid() string { if m != nil { return m.OurCommitOid } return "" } func (m *ResolveConflictsRequestHeader) GetTargetRepository() *Repository { if m != nil { return m.TargetRepository } return nil } func (m *ResolveConflictsRequestHeader) GetTheirCommitOid() string { if m != nil { return m.TheirCommitOid } return "" } func (m *ResolveConflictsRequestHeader) GetSourceBranch() []byte { if m != nil { return m.SourceBranch } return nil } func (m *ResolveConflictsRequestHeader) GetTargetBranch() []byte { if m != nil { return m.TargetBranch } return nil } func (m *ResolveConflictsRequestHeader) GetCommitMessage() []byte { if m != nil { return m.CommitMessage } return nil } func (m *ResolveConflictsRequestHeader) GetUser() *User { if m != nil { return m.User } return nil } type ResolveConflictsRequest struct { // Types that are valid to be assigned to ResolveConflictsRequestPayload: // *ResolveConflictsRequest_Header // *ResolveConflictsRequest_FilesJson ResolveConflictsRequestPayload isResolveConflictsRequest_ResolveConflictsRequestPayload `protobuf_oneof:"resolve_conflicts_request_payload"` } func (m *ResolveConflictsRequest) Reset() { *m = ResolveConflictsRequest{} } func (m *ResolveConflictsRequest) String() string { return proto.CompactTextString(m) } func (*ResolveConflictsRequest) ProtoMessage() {} func (*ResolveConflictsRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{5} } type isResolveConflictsRequest_ResolveConflictsRequestPayload interface { isResolveConflictsRequest_ResolveConflictsRequestPayload() } type ResolveConflictsRequest_Header struct { Header *ResolveConflictsRequestHeader `protobuf:"bytes,1,opt,name=header,oneof"` } type ResolveConflictsRequest_FilesJson struct { FilesJson []byte `protobuf:"bytes,2,opt,name=files_json,json=filesJson,proto3,oneof"` } func (*ResolveConflictsRequest_Header) isResolveConflictsRequest_ResolveConflictsRequestPayload() {} func (*ResolveConflictsRequest_FilesJson) isResolveConflictsRequest_ResolveConflictsRequestPayload() {} func (m *ResolveConflictsRequest) GetResolveConflictsRequestPayload() isResolveConflictsRequest_ResolveConflictsRequestPayload { if m != nil { return m.ResolveConflictsRequestPayload } return nil } func (m *ResolveConflictsRequest) GetHeader() *ResolveConflictsRequestHeader { if x, ok := m.GetResolveConflictsRequestPayload().(*ResolveConflictsRequest_Header); ok { return x.Header } return nil } func (m *ResolveConflictsRequest) GetFilesJson() []byte { if x, ok := m.GetResolveConflictsRequestPayload().(*ResolveConflictsRequest_FilesJson); ok { return x.FilesJson } return nil } // XXX_OneofFuncs is for the internal use of the proto package. func (*ResolveConflictsRequest) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { return _ResolveConflictsRequest_OneofMarshaler, _ResolveConflictsRequest_OneofUnmarshaler, _ResolveConflictsRequest_OneofSizer, []interface{}{ (*ResolveConflictsRequest_Header)(nil), (*ResolveConflictsRequest_FilesJson)(nil), } } func _ResolveConflictsRequest_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { m := msg.(*ResolveConflictsRequest) // resolve_conflicts_request_payload switch x := m.ResolveConflictsRequestPayload.(type) { case *ResolveConflictsRequest_Header: b.EncodeVarint(1<<3 | proto.WireBytes) if err := b.EncodeMessage(x.Header); err != nil { return err } case *ResolveConflictsRequest_FilesJson: b.EncodeVarint(2<<3 | proto.WireBytes) b.EncodeRawBytes(x.FilesJson) case nil: default: return fmt.Errorf("ResolveConflictsRequest.ResolveConflictsRequestPayload has unexpected type %T", x) } return nil } func _ResolveConflictsRequest_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { m := msg.(*ResolveConflictsRequest) switch tag { case 1: // resolve_conflicts_request_payload.header if wire != proto.WireBytes { return true, proto.ErrInternalBadWireType } msg := new(ResolveConflictsRequestHeader) err := b.DecodeMessage(msg) m.ResolveConflictsRequestPayload = &ResolveConflictsRequest_Header{msg} return true, err case 2: // resolve_conflicts_request_payload.files_json if wire != proto.WireBytes { return true, proto.ErrInternalBadWireType } x, err := b.DecodeRawBytes(true) m.ResolveConflictsRequestPayload = &ResolveConflictsRequest_FilesJson{x} return true, err default: return false, nil } } func _ResolveConflictsRequest_OneofSizer(msg proto.Message) (n int) { m := msg.(*ResolveConflictsRequest) // resolve_conflicts_request_payload switch x := m.ResolveConflictsRequestPayload.(type) { case *ResolveConflictsRequest_Header: s := proto.Size(x.Header) n += proto.SizeVarint(1<<3 | proto.WireBytes) n += proto.SizeVarint(uint64(s)) n += s case *ResolveConflictsRequest_FilesJson: n += proto.SizeVarint(2<<3 | proto.WireBytes) n += proto.SizeVarint(uint64(len(x.FilesJson))) n += len(x.FilesJson) case nil: default: panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) } return n } type ResolveConflictsResponse struct { ResolutionError string `protobuf:"bytes,1,opt,name=resolution_error,json=resolutionError" json:"resolution_error,omitempty"` } func (m *ResolveConflictsResponse) Reset() { *m = ResolveConflictsResponse{} } func (m *ResolveConflictsResponse) String() string { return proto.CompactTextString(m) } func (*ResolveConflictsResponse) ProtoMessage() {} func (*ResolveConflictsResponse) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{6} } func (m *ResolveConflictsResponse) GetResolutionError() string { if m != nil { return m.ResolutionError } return "" } func init() { proto.RegisterType((*ListConflictFilesRequest)(nil), "gitaly.ListConflictFilesRequest") proto.RegisterType((*ConflictFileHeader)(nil), "gitaly.ConflictFileHeader") proto.RegisterType((*ConflictFile)(nil), "gitaly.ConflictFile") proto.RegisterType((*ListConflictFilesResponse)(nil), "gitaly.ListConflictFilesResponse") proto.RegisterType((*ResolveConflictsRequestHeader)(nil), "gitaly.ResolveConflictsRequestHeader") proto.RegisterType((*ResolveConflictsRequest)(nil), "gitaly.ResolveConflictsRequest") proto.RegisterType((*ResolveConflictsResponse)(nil), "gitaly.ResolveConflictsResponse") } // Reference imports to suppress errors if they are not otherwise used. var _ context.Context var _ grpc.ClientConn // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. const _ = grpc.SupportPackageIsVersion4 // Client API for ConflictsService service type ConflictsServiceClient interface { ListConflictFiles(ctx context.Context, in *ListConflictFilesRequest, opts ...grpc.CallOption) (ConflictsService_ListConflictFilesClient, error) ResolveConflicts(ctx context.Context, opts ...grpc.CallOption) (ConflictsService_ResolveConflictsClient, error) } type conflictsServiceClient struct { cc *grpc.ClientConn } func NewConflictsServiceClient(cc *grpc.ClientConn) ConflictsServiceClient { return &conflictsServiceClient{cc} } func (c *conflictsServiceClient) ListConflictFiles(ctx context.Context, in *ListConflictFilesRequest, opts ...grpc.CallOption) (ConflictsService_ListConflictFilesClient, error) { stream, err := grpc.NewClientStream(ctx, &_ConflictsService_serviceDesc.Streams[0], c.cc, "/gitaly.ConflictsService/ListConflictFiles", opts...) if err != nil { return nil, err } x := &conflictsServiceListConflictFilesClient{stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } if err := x.ClientStream.CloseSend(); err != nil { return nil, err } return x, nil } type ConflictsService_ListConflictFilesClient interface { Recv() (*ListConflictFilesResponse, error) grpc.ClientStream } type conflictsServiceListConflictFilesClient struct { grpc.ClientStream } func (x *conflictsServiceListConflictFilesClient) Recv() (*ListConflictFilesResponse, error) { m := new(ListConflictFilesResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func (c *conflictsServiceClient) ResolveConflicts(ctx context.Context, opts ...grpc.CallOption) (ConflictsService_ResolveConflictsClient, error) { stream, err := grpc.NewClientStream(ctx, &_ConflictsService_serviceDesc.Streams[1], c.cc, "/gitaly.ConflictsService/ResolveConflicts", opts...) if err != nil { return nil, err } x := &conflictsServiceResolveConflictsClient{stream} return x, nil } type ConflictsService_ResolveConflictsClient interface { Send(*ResolveConflictsRequest) error CloseAndRecv() (*ResolveConflictsResponse, error) grpc.ClientStream } type conflictsServiceResolveConflictsClient struct { grpc.ClientStream } func (x *conflictsServiceResolveConflictsClient) Send(m *ResolveConflictsRequest) error { return x.ClientStream.SendMsg(m) } func (x *conflictsServiceResolveConflictsClient) CloseAndRecv() (*ResolveConflictsResponse, error) { if err := x.ClientStream.CloseSend(); err != nil { return nil, err } m := new(ResolveConflictsResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } // Server API for ConflictsService service type ConflictsServiceServer interface { ListConflictFiles(*ListConflictFilesRequest, ConflictsService_ListConflictFilesServer) error ResolveConflicts(ConflictsService_ResolveConflictsServer) error } func RegisterConflictsServiceServer(s *grpc.Server, srv ConflictsServiceServer) { s.RegisterService(&_ConflictsService_serviceDesc, srv) } func _ConflictsService_ListConflictFiles_Handler(srv interface{}, stream grpc.ServerStream) error { m := new(ListConflictFilesRequest) if err := stream.RecvMsg(m); err != nil { return err } return srv.(ConflictsServiceServer).ListConflictFiles(m, &conflictsServiceListConflictFilesServer{stream}) } type ConflictsService_ListConflictFilesServer interface { Send(*ListConflictFilesResponse) error grpc.ServerStream } type conflictsServiceListConflictFilesServer struct { grpc.ServerStream } func (x *conflictsServiceListConflictFilesServer) Send(m *ListConflictFilesResponse) error { return x.ServerStream.SendMsg(m) } func _ConflictsService_ResolveConflicts_Handler(srv interface{}, stream grpc.ServerStream) error { return srv.(ConflictsServiceServer).ResolveConflicts(&conflictsServiceResolveConflictsServer{stream}) } type ConflictsService_ResolveConflictsServer interface { SendAndClose(*ResolveConflictsResponse) error Recv() (*ResolveConflictsRequest, error) grpc.ServerStream } type conflictsServiceResolveConflictsServer struct { grpc.ServerStream } func (x *conflictsServiceResolveConflictsServer) SendAndClose(m *ResolveConflictsResponse) error { return x.ServerStream.SendMsg(m) } func (x *conflictsServiceResolveConflictsServer) Recv() (*ResolveConflictsRequest, error) { m := new(ResolveConflictsRequest) if err := x.ServerStream.RecvMsg(m); err != nil { return nil, err } return m, nil } var _ConflictsService_serviceDesc = grpc.ServiceDesc{ ServiceName: "gitaly.ConflictsService", HandlerType: (*ConflictsServiceServer)(nil), Methods: []grpc.MethodDesc{}, Streams: []grpc.StreamDesc{ { StreamName: "ListConflictFiles", Handler: _ConflictsService_ListConflictFiles_Handler, ServerStreams: true, }, { StreamName: "ResolveConflicts", Handler: _ConflictsService_ResolveConflicts_Handler, ClientStreams: true, }, }, Metadata: "conflicts.proto", } func init() { proto.RegisterFile("conflicts.proto", fileDescriptor2) } var fileDescriptor2 = []byte{ // 575 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x94, 0xd1, 0x6a, 0x13, 0x41, 0x14, 0x86, 0xbb, 0x6d, 0x93, 0x34, 0xa7, 0xdb, 0x34, 0x1d, 0x94, 0x6e, 0x03, 0xa1, 0xdb, 0xad, 0x85, 0xd5, 0x8b, 0x20, 0xd1, 0xfb, 0x42, 0x4a, 0x35, 0x88, 0x45, 0x19, 0xf1, 0x42, 0x10, 0x96, 0xed, 0xee, 0x69, 0x76, 0x64, 0xb3, 0x13, 0x67, 0x66, 0x0b, 0x79, 0x19, 0xf1, 0x41, 0x7c, 0x03, 0x1f, 0xc8, 0x5b, 0xc9, 0xcc, 0xee, 0x26, 0x6d, 0x92, 0x2a, 0x7a, 0xfb, 0x9f, 0x8f, 0x73, 0xfe, 0x33, 0xe7, 0x67, 0x60, 0x3f, 0xe2, 0xd9, 0x4d, 0xca, 0x22, 0x25, 0x7b, 0x13, 0xc1, 0x15, 0x27, 0xf5, 0x11, 0x53, 0x61, 0x3a, 0xed, 0xd8, 0x32, 0x09, 0x05, 0xc6, 0x46, 0xf5, 0xbe, 0x59, 0xe0, 0xbc, 0x65, 0x52, 0x5d, 0x14, 0xf4, 0x2b, 0x96, 0xa2, 0xa4, 0xf8, 0x35, 0x47, 0xa9, 0x48, 0x1f, 0x40, 0xe0, 0x84, 0x4b, 0xa6, 0xb8, 0x98, 0x3a, 0x96, 0x6b, 0xf9, 0xbb, 0x7d, 0xd2, 0x33, 0x7d, 0x7a, 0xb4, 0xaa, 0xd0, 0x05, 0x8a, 0x3c, 0x81, 0x16, 0xcf, 0x45, 0x10, 0xf1, 0xf1, 0x98, 0xa9, 0x80, 0xb3, 0xd8, 0xd9, 0x74, 0x2d, 0xbf, 0x49, 0x6d, 0x9e, 0x8b, 0x0b, 0x2d, 0xbe, 0x63, 0x31, 0xf1, 0xa1, 0xad, 0x12, 0x64, 0x77, 0xb8, 0x2d, 0xcd, 0xb5, 0xb4, 0x5e, 0x91, 0xde, 0x0f, 0x0b, 0xc8, 0xa2, 0xb9, 0x21, 0x86, 0x31, 0x8a, 0x7f, 0xb2, 0xd6, 0x05, 0x58, 0xb2, 0xd5, 0x8c, 0x2a, 0x4f, 0x5d, 0x00, 0xe3, 0x69, 0x12, 0xaa, 0x44, 0xbb, 0xb1, 0x69, 0x53, 0x2b, 0xef, 0x43, 0x95, 0x90, 0x23, 0xd8, 0x99, 0x2d, 0xa6, 0x8b, 0xdb, 0xba, 0xd8, 0xe0, 0xf9, 0x9d, 0xd2, 0x98, 0xc7, 0xe8, 0xd4, 0x5c, 0xcb, 0xaf, 0xe9, 0xd2, 0x15, 0x8f, 0xd1, 0x9b, 0x82, 0xbd, 0xe8, 0x9e, 0xbc, 0x84, 0x7a, 0xa2, 0x37, 0x28, 0x3c, 0x77, 0x4a, 0xcf, 0xcb, 0x3b, 0x0e, 0x37, 0x68, 0xc1, 0x92, 0x0e, 0x34, 0x22, 0x9e, 0x29, 0xcc, 0x94, 0xb6, 0x6d, 0x0f, 0x37, 0x68, 0x29, 0x0c, 0x0e, 0xe1, 0x71, 0x79, 0xea, 0xe0, 0x86, 0xa5, 0x18, 0x4c, 0xc2, 0x69, 0xca, 0xc3, 0xd8, 0x7b, 0x0d, 0x47, 0x2b, 0x2e, 0x2b, 0x27, 0x3c, 0x93, 0x48, 0x9e, 0x41, 0x6d, 0x06, 0x4b, 0xc7, 0x72, 0xb7, 0xfc, 0xdd, 0xfe, 0xa3, 0x55, 0x36, 0xa8, 0x41, 0xbc, 0x5f, 0x9b, 0xd0, 0xa5, 0x28, 0x79, 0x7a, 0x8b, 0x65, 0xb9, 0x8c, 0xc8, 0x7f, 0x5c, 0xe3, 0xef, 0x82, 0x72, 0x0e, 0x07, 0x2a, 0x14, 0x23, 0x54, 0xc1, 0xc2, 0x80, 0xad, 0xb5, 0x03, 0xda, 0x06, 0x9e, 0x2b, 0x2b, 0x93, 0xb6, 0xbd, 0x2a, 0x69, 0xe4, 0x14, 0xf6, 0x24, 0xcf, 0x45, 0x84, 0xc1, 0xb5, 0x08, 0xb3, 0x28, 0xd1, 0xa7, 0xb4, 0xa9, 0x6d, 0xc4, 0x81, 0xd6, 0x66, 0x50, 0xe1, 0xa7, 0x80, 0xea, 0x06, 0x32, 0x62, 0x01, 0x9d, 0x41, 0xab, 0x98, 0x36, 0x46, 0x29, 0xc3, 0x11, 0x3a, 0x0d, 0x4d, 0xed, 0x19, 0xf5, 0xca, 0x88, 0xc4, 0x85, 0xed, 0x5c, 0xa2, 0x70, 0x76, 0xf4, 0x3a, 0x76, 0xb9, 0xce, 0x47, 0x89, 0x82, 0xea, 0x8a, 0xf7, 0xdd, 0x82, 0xc3, 0x35, 0x2f, 0x4f, 0xce, 0xef, 0x25, 0xe9, 0x6c, 0xfe, 0x1c, 0x0f, 0x9c, 0x6a, 0x21, 0x54, 0xc7, 0x00, 0xfa, 0xbe, 0xc1, 0x17, 0xc9, 0xb3, 0x2a, 0x57, 0x4d, 0xad, 0xbd, 0x91, 0x3c, 0x1b, 0x9c, 0xc2, 0x89, 0x30, 0xbd, 0x82, 0xea, 0x33, 0x09, 0x84, 0xe9, 0x56, 0xa5, 0xec, 0x12, 0x9c, 0xe5, 0x81, 0x45, 0xc8, 0x9e, 0x42, 0x5b, 0x37, 0xc8, 0x15, 0xe3, 0x59, 0x80, 0x42, 0x70, 0x63, 0xb6, 0x49, 0xf7, 0xe7, 0xfa, 0xe5, 0x4c, 0xee, 0xff, 0xb4, 0xa0, 0x5d, 0x35, 0xf8, 0x80, 0xe2, 0x96, 0x45, 0x48, 0x3e, 0xc3, 0xc1, 0x52, 0x82, 0x89, 0x5b, 0xee, 0xb9, 0xee, 0xdb, 0xea, 0x9c, 0x3c, 0x40, 0x18, 0x67, 0xde, 0xc6, 0x73, 0x8b, 0x7c, 0x82, 0xf6, 0x7d, 0xe7, 0xe4, 0xf8, 0x0f, 0x8f, 0xd8, 0x71, 0xd7, 0x03, 0x65, 0x6b, 0xdf, 0xba, 0xae, 0xeb, 0xcf, 0xf5, 0xc5, 0xef, 0x00, 0x00, 0x00, 0xff, 0xff, 0x2c, 0xb1, 0x16, 0xeb, 0x85, 0x05, 0x00, 0x00, } deprecated-services.pb.go000066400000000000000000001051561324746544700376270ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/gitlab.com/gitlab-org/gitaly-proto/go// Code generated by protoc-gen-go. DO NOT EDIT. // source: deprecated-services.proto package gitaly import proto "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" import ( context "golang.org/x/net/context" grpc "google.golang.org/grpc" ) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf // Reference imports to suppress errors if they are not otherwise used. var _ context.Context var _ grpc.ClientConn // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. const _ = grpc.SupportPackageIsVersion4 // Client API for Commit service type CommitClient interface { CommitIsAncestor(ctx context.Context, in *CommitIsAncestorRequest, opts ...grpc.CallOption) (*CommitIsAncestorResponse, error) TreeEntry(ctx context.Context, in *TreeEntryRequest, opts ...grpc.CallOption) (Commit_TreeEntryClient, error) } type commitClient struct { cc *grpc.ClientConn } func NewCommitClient(cc *grpc.ClientConn) CommitClient { return &commitClient{cc} } func (c *commitClient) CommitIsAncestor(ctx context.Context, in *CommitIsAncestorRequest, opts ...grpc.CallOption) (*CommitIsAncestorResponse, error) { out := new(CommitIsAncestorResponse) err := grpc.Invoke(ctx, "/gitaly.Commit/CommitIsAncestor", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } func (c *commitClient) TreeEntry(ctx context.Context, in *TreeEntryRequest, opts ...grpc.CallOption) (Commit_TreeEntryClient, error) { stream, err := grpc.NewClientStream(ctx, &_Commit_serviceDesc.Streams[0], c.cc, "/gitaly.Commit/TreeEntry", opts...) if err != nil { return nil, err } x := &commitTreeEntryClient{stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } if err := x.ClientStream.CloseSend(); err != nil { return nil, err } return x, nil } type Commit_TreeEntryClient interface { Recv() (*TreeEntryResponse, error) grpc.ClientStream } type commitTreeEntryClient struct { grpc.ClientStream } func (x *commitTreeEntryClient) Recv() (*TreeEntryResponse, error) { m := new(TreeEntryResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } // Server API for Commit service type CommitServer interface { CommitIsAncestor(context.Context, *CommitIsAncestorRequest) (*CommitIsAncestorResponse, error) TreeEntry(*TreeEntryRequest, Commit_TreeEntryServer) error } func RegisterCommitServer(s *grpc.Server, srv CommitServer) { s.RegisterService(&_Commit_serviceDesc, srv) } func _Commit_CommitIsAncestor_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(CommitIsAncestorRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(CommitServer).CommitIsAncestor(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/gitaly.Commit/CommitIsAncestor", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(CommitServer).CommitIsAncestor(ctx, req.(*CommitIsAncestorRequest)) } return interceptor(ctx, in, info, handler) } func _Commit_TreeEntry_Handler(srv interface{}, stream grpc.ServerStream) error { m := new(TreeEntryRequest) if err := stream.RecvMsg(m); err != nil { return err } return srv.(CommitServer).TreeEntry(m, &commitTreeEntryServer{stream}) } type Commit_TreeEntryServer interface { Send(*TreeEntryResponse) error grpc.ServerStream } type commitTreeEntryServer struct { grpc.ServerStream } func (x *commitTreeEntryServer) Send(m *TreeEntryResponse) error { return x.ServerStream.SendMsg(m) } var _Commit_serviceDesc = grpc.ServiceDesc{ ServiceName: "gitaly.Commit", HandlerType: (*CommitServer)(nil), Methods: []grpc.MethodDesc{ { MethodName: "CommitIsAncestor", Handler: _Commit_CommitIsAncestor_Handler, }, }, Streams: []grpc.StreamDesc{ { StreamName: "TreeEntry", Handler: _Commit_TreeEntry_Handler, ServerStreams: true, }, }, Metadata: "deprecated-services.proto", } // Client API for Diff service type DiffClient interface { // Returns stream of CommitDiffResponse with patches chunked over messages CommitDiff(ctx context.Context, in *CommitDiffRequest, opts ...grpc.CallOption) (Diff_CommitDiffClient, error) // Return a stream so we can divide the response in chunks of deltas CommitDelta(ctx context.Context, in *CommitDeltaRequest, opts ...grpc.CallOption) (Diff_CommitDeltaClient, error) } type diffClient struct { cc *grpc.ClientConn } func NewDiffClient(cc *grpc.ClientConn) DiffClient { return &diffClient{cc} } func (c *diffClient) CommitDiff(ctx context.Context, in *CommitDiffRequest, opts ...grpc.CallOption) (Diff_CommitDiffClient, error) { stream, err := grpc.NewClientStream(ctx, &_Diff_serviceDesc.Streams[0], c.cc, "/gitaly.Diff/CommitDiff", opts...) if err != nil { return nil, err } x := &diffCommitDiffClient{stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } if err := x.ClientStream.CloseSend(); err != nil { return nil, err } return x, nil } type Diff_CommitDiffClient interface { Recv() (*CommitDiffResponse, error) grpc.ClientStream } type diffCommitDiffClient struct { grpc.ClientStream } func (x *diffCommitDiffClient) Recv() (*CommitDiffResponse, error) { m := new(CommitDiffResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func (c *diffClient) CommitDelta(ctx context.Context, in *CommitDeltaRequest, opts ...grpc.CallOption) (Diff_CommitDeltaClient, error) { stream, err := grpc.NewClientStream(ctx, &_Diff_serviceDesc.Streams[1], c.cc, "/gitaly.Diff/CommitDelta", opts...) if err != nil { return nil, err } x := &diffCommitDeltaClient{stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } if err := x.ClientStream.CloseSend(); err != nil { return nil, err } return x, nil } type Diff_CommitDeltaClient interface { Recv() (*CommitDeltaResponse, error) grpc.ClientStream } type diffCommitDeltaClient struct { grpc.ClientStream } func (x *diffCommitDeltaClient) Recv() (*CommitDeltaResponse, error) { m := new(CommitDeltaResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } // Server API for Diff service type DiffServer interface { // Returns stream of CommitDiffResponse with patches chunked over messages CommitDiff(*CommitDiffRequest, Diff_CommitDiffServer) error // Return a stream so we can divide the response in chunks of deltas CommitDelta(*CommitDeltaRequest, Diff_CommitDeltaServer) error } func RegisterDiffServer(s *grpc.Server, srv DiffServer) { s.RegisterService(&_Diff_serviceDesc, srv) } func _Diff_CommitDiff_Handler(srv interface{}, stream grpc.ServerStream) error { m := new(CommitDiffRequest) if err := stream.RecvMsg(m); err != nil { return err } return srv.(DiffServer).CommitDiff(m, &diffCommitDiffServer{stream}) } type Diff_CommitDiffServer interface { Send(*CommitDiffResponse) error grpc.ServerStream } type diffCommitDiffServer struct { grpc.ServerStream } func (x *diffCommitDiffServer) Send(m *CommitDiffResponse) error { return x.ServerStream.SendMsg(m) } func _Diff_CommitDelta_Handler(srv interface{}, stream grpc.ServerStream) error { m := new(CommitDeltaRequest) if err := stream.RecvMsg(m); err != nil { return err } return srv.(DiffServer).CommitDelta(m, &diffCommitDeltaServer{stream}) } type Diff_CommitDeltaServer interface { Send(*CommitDeltaResponse) error grpc.ServerStream } type diffCommitDeltaServer struct { grpc.ServerStream } func (x *diffCommitDeltaServer) Send(m *CommitDeltaResponse) error { return x.ServerStream.SendMsg(m) } var _Diff_serviceDesc = grpc.ServiceDesc{ ServiceName: "gitaly.Diff", HandlerType: (*DiffServer)(nil), Methods: []grpc.MethodDesc{}, Streams: []grpc.StreamDesc{ { StreamName: "CommitDiff", Handler: _Diff_CommitDiff_Handler, ServerStreams: true, }, { StreamName: "CommitDelta", Handler: _Diff_CommitDelta_Handler, ServerStreams: true, }, }, Metadata: "deprecated-services.proto", } // Client API for Notifications service type NotificationsClient interface { PostReceive(ctx context.Context, in *PostReceiveRequest, opts ...grpc.CallOption) (*PostReceiveResponse, error) } type notificationsClient struct { cc *grpc.ClientConn } func NewNotificationsClient(cc *grpc.ClientConn) NotificationsClient { return ¬ificationsClient{cc} } func (c *notificationsClient) PostReceive(ctx context.Context, in *PostReceiveRequest, opts ...grpc.CallOption) (*PostReceiveResponse, error) { out := new(PostReceiveResponse) err := grpc.Invoke(ctx, "/gitaly.Notifications/PostReceive", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } // Server API for Notifications service type NotificationsServer interface { PostReceive(context.Context, *PostReceiveRequest) (*PostReceiveResponse, error) } func RegisterNotificationsServer(s *grpc.Server, srv NotificationsServer) { s.RegisterService(&_Notifications_serviceDesc, srv) } func _Notifications_PostReceive_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(PostReceiveRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(NotificationsServer).PostReceive(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/gitaly.Notifications/PostReceive", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(NotificationsServer).PostReceive(ctx, req.(*PostReceiveRequest)) } return interceptor(ctx, in, info, handler) } var _Notifications_serviceDesc = grpc.ServiceDesc{ ServiceName: "gitaly.Notifications", HandlerType: (*NotificationsServer)(nil), Methods: []grpc.MethodDesc{ { MethodName: "PostReceive", Handler: _Notifications_PostReceive_Handler, }, }, Streams: []grpc.StreamDesc{}, Metadata: "deprecated-services.proto", } // Client API for Ref service type RefClient interface { FindDefaultBranchName(ctx context.Context, in *FindDefaultBranchNameRequest, opts ...grpc.CallOption) (*FindDefaultBranchNameResponse, error) FindAllBranchNames(ctx context.Context, in *FindAllBranchNamesRequest, opts ...grpc.CallOption) (Ref_FindAllBranchNamesClient, error) FindAllTagNames(ctx context.Context, in *FindAllTagNamesRequest, opts ...grpc.CallOption) (Ref_FindAllTagNamesClient, error) // Find a Ref matching the given constraints. Response may be empty. FindRefName(ctx context.Context, in *FindRefNameRequest, opts ...grpc.CallOption) (*FindRefNameResponse, error) // Return a stream so we can divide the response in chunks of branches FindLocalBranches(ctx context.Context, in *FindLocalBranchesRequest, opts ...grpc.CallOption) (Ref_FindLocalBranchesClient, error) } type refClient struct { cc *grpc.ClientConn } func NewRefClient(cc *grpc.ClientConn) RefClient { return &refClient{cc} } func (c *refClient) FindDefaultBranchName(ctx context.Context, in *FindDefaultBranchNameRequest, opts ...grpc.CallOption) (*FindDefaultBranchNameResponse, error) { out := new(FindDefaultBranchNameResponse) err := grpc.Invoke(ctx, "/gitaly.Ref/FindDefaultBranchName", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } func (c *refClient) FindAllBranchNames(ctx context.Context, in *FindAllBranchNamesRequest, opts ...grpc.CallOption) (Ref_FindAllBranchNamesClient, error) { stream, err := grpc.NewClientStream(ctx, &_Ref_serviceDesc.Streams[0], c.cc, "/gitaly.Ref/FindAllBranchNames", opts...) if err != nil { return nil, err } x := &refFindAllBranchNamesClient{stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } if err := x.ClientStream.CloseSend(); err != nil { return nil, err } return x, nil } type Ref_FindAllBranchNamesClient interface { Recv() (*FindAllBranchNamesResponse, error) grpc.ClientStream } type refFindAllBranchNamesClient struct { grpc.ClientStream } func (x *refFindAllBranchNamesClient) Recv() (*FindAllBranchNamesResponse, error) { m := new(FindAllBranchNamesResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func (c *refClient) FindAllTagNames(ctx context.Context, in *FindAllTagNamesRequest, opts ...grpc.CallOption) (Ref_FindAllTagNamesClient, error) { stream, err := grpc.NewClientStream(ctx, &_Ref_serviceDesc.Streams[1], c.cc, "/gitaly.Ref/FindAllTagNames", opts...) if err != nil { return nil, err } x := &refFindAllTagNamesClient{stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } if err := x.ClientStream.CloseSend(); err != nil { return nil, err } return x, nil } type Ref_FindAllTagNamesClient interface { Recv() (*FindAllTagNamesResponse, error) grpc.ClientStream } type refFindAllTagNamesClient struct { grpc.ClientStream } func (x *refFindAllTagNamesClient) Recv() (*FindAllTagNamesResponse, error) { m := new(FindAllTagNamesResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func (c *refClient) FindRefName(ctx context.Context, in *FindRefNameRequest, opts ...grpc.CallOption) (*FindRefNameResponse, error) { out := new(FindRefNameResponse) err := grpc.Invoke(ctx, "/gitaly.Ref/FindRefName", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } func (c *refClient) FindLocalBranches(ctx context.Context, in *FindLocalBranchesRequest, opts ...grpc.CallOption) (Ref_FindLocalBranchesClient, error) { stream, err := grpc.NewClientStream(ctx, &_Ref_serviceDesc.Streams[2], c.cc, "/gitaly.Ref/FindLocalBranches", opts...) if err != nil { return nil, err } x := &refFindLocalBranchesClient{stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } if err := x.ClientStream.CloseSend(); err != nil { return nil, err } return x, nil } type Ref_FindLocalBranchesClient interface { Recv() (*FindLocalBranchesResponse, error) grpc.ClientStream } type refFindLocalBranchesClient struct { grpc.ClientStream } func (x *refFindLocalBranchesClient) Recv() (*FindLocalBranchesResponse, error) { m := new(FindLocalBranchesResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } // Server API for Ref service type RefServer interface { FindDefaultBranchName(context.Context, *FindDefaultBranchNameRequest) (*FindDefaultBranchNameResponse, error) FindAllBranchNames(*FindAllBranchNamesRequest, Ref_FindAllBranchNamesServer) error FindAllTagNames(*FindAllTagNamesRequest, Ref_FindAllTagNamesServer) error // Find a Ref matching the given constraints. Response may be empty. FindRefName(context.Context, *FindRefNameRequest) (*FindRefNameResponse, error) // Return a stream so we can divide the response in chunks of branches FindLocalBranches(*FindLocalBranchesRequest, Ref_FindLocalBranchesServer) error } func RegisterRefServer(s *grpc.Server, srv RefServer) { s.RegisterService(&_Ref_serviceDesc, srv) } func _Ref_FindDefaultBranchName_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(FindDefaultBranchNameRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(RefServer).FindDefaultBranchName(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/gitaly.Ref/FindDefaultBranchName", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(RefServer).FindDefaultBranchName(ctx, req.(*FindDefaultBranchNameRequest)) } return interceptor(ctx, in, info, handler) } func _Ref_FindAllBranchNames_Handler(srv interface{}, stream grpc.ServerStream) error { m := new(FindAllBranchNamesRequest) if err := stream.RecvMsg(m); err != nil { return err } return srv.(RefServer).FindAllBranchNames(m, &refFindAllBranchNamesServer{stream}) } type Ref_FindAllBranchNamesServer interface { Send(*FindAllBranchNamesResponse) error grpc.ServerStream } type refFindAllBranchNamesServer struct { grpc.ServerStream } func (x *refFindAllBranchNamesServer) Send(m *FindAllBranchNamesResponse) error { return x.ServerStream.SendMsg(m) } func _Ref_FindAllTagNames_Handler(srv interface{}, stream grpc.ServerStream) error { m := new(FindAllTagNamesRequest) if err := stream.RecvMsg(m); err != nil { return err } return srv.(RefServer).FindAllTagNames(m, &refFindAllTagNamesServer{stream}) } type Ref_FindAllTagNamesServer interface { Send(*FindAllTagNamesResponse) error grpc.ServerStream } type refFindAllTagNamesServer struct { grpc.ServerStream } func (x *refFindAllTagNamesServer) Send(m *FindAllTagNamesResponse) error { return x.ServerStream.SendMsg(m) } func _Ref_FindRefName_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(FindRefNameRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(RefServer).FindRefName(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/gitaly.Ref/FindRefName", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(RefServer).FindRefName(ctx, req.(*FindRefNameRequest)) } return interceptor(ctx, in, info, handler) } func _Ref_FindLocalBranches_Handler(srv interface{}, stream grpc.ServerStream) error { m := new(FindLocalBranchesRequest) if err := stream.RecvMsg(m); err != nil { return err } return srv.(RefServer).FindLocalBranches(m, &refFindLocalBranchesServer{stream}) } type Ref_FindLocalBranchesServer interface { Send(*FindLocalBranchesResponse) error grpc.ServerStream } type refFindLocalBranchesServer struct { grpc.ServerStream } func (x *refFindLocalBranchesServer) Send(m *FindLocalBranchesResponse) error { return x.ServerStream.SendMsg(m) } var _Ref_serviceDesc = grpc.ServiceDesc{ ServiceName: "gitaly.Ref", HandlerType: (*RefServer)(nil), Methods: []grpc.MethodDesc{ { MethodName: "FindDefaultBranchName", Handler: _Ref_FindDefaultBranchName_Handler, }, { MethodName: "FindRefName", Handler: _Ref_FindRefName_Handler, }, }, Streams: []grpc.StreamDesc{ { StreamName: "FindAllBranchNames", Handler: _Ref_FindAllBranchNames_Handler, ServerStreams: true, }, { StreamName: "FindAllTagNames", Handler: _Ref_FindAllTagNames_Handler, ServerStreams: true, }, { StreamName: "FindLocalBranches", Handler: _Ref_FindLocalBranches_Handler, ServerStreams: true, }, }, Metadata: "deprecated-services.proto", } // Client API for SmartHTTP service type SmartHTTPClient interface { // The response body for GET /info/refs?service=git-upload-pack InfoRefsUploadPack(ctx context.Context, in *InfoRefsRequest, opts ...grpc.CallOption) (SmartHTTP_InfoRefsUploadPackClient, error) // The response body for GET /info/refs?service=git-receive-pack InfoRefsReceivePack(ctx context.Context, in *InfoRefsRequest, opts ...grpc.CallOption) (SmartHTTP_InfoRefsReceivePackClient, error) // Request and response body for POST /upload-pack PostUploadPack(ctx context.Context, opts ...grpc.CallOption) (SmartHTTP_PostUploadPackClient, error) // Request and response body for POST /receive-pack PostReceivePack(ctx context.Context, opts ...grpc.CallOption) (SmartHTTP_PostReceivePackClient, error) } type smartHTTPClient struct { cc *grpc.ClientConn } func NewSmartHTTPClient(cc *grpc.ClientConn) SmartHTTPClient { return &smartHTTPClient{cc} } func (c *smartHTTPClient) InfoRefsUploadPack(ctx context.Context, in *InfoRefsRequest, opts ...grpc.CallOption) (SmartHTTP_InfoRefsUploadPackClient, error) { stream, err := grpc.NewClientStream(ctx, &_SmartHTTP_serviceDesc.Streams[0], c.cc, "/gitaly.SmartHTTP/InfoRefsUploadPack", opts...) if err != nil { return nil, err } x := &smartHTTPInfoRefsUploadPackClient{stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } if err := x.ClientStream.CloseSend(); err != nil { return nil, err } return x, nil } type SmartHTTP_InfoRefsUploadPackClient interface { Recv() (*InfoRefsResponse, error) grpc.ClientStream } type smartHTTPInfoRefsUploadPackClient struct { grpc.ClientStream } func (x *smartHTTPInfoRefsUploadPackClient) Recv() (*InfoRefsResponse, error) { m := new(InfoRefsResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func (c *smartHTTPClient) InfoRefsReceivePack(ctx context.Context, in *InfoRefsRequest, opts ...grpc.CallOption) (SmartHTTP_InfoRefsReceivePackClient, error) { stream, err := grpc.NewClientStream(ctx, &_SmartHTTP_serviceDesc.Streams[1], c.cc, "/gitaly.SmartHTTP/InfoRefsReceivePack", opts...) if err != nil { return nil, err } x := &smartHTTPInfoRefsReceivePackClient{stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } if err := x.ClientStream.CloseSend(); err != nil { return nil, err } return x, nil } type SmartHTTP_InfoRefsReceivePackClient interface { Recv() (*InfoRefsResponse, error) grpc.ClientStream } type smartHTTPInfoRefsReceivePackClient struct { grpc.ClientStream } func (x *smartHTTPInfoRefsReceivePackClient) Recv() (*InfoRefsResponse, error) { m := new(InfoRefsResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func (c *smartHTTPClient) PostUploadPack(ctx context.Context, opts ...grpc.CallOption) (SmartHTTP_PostUploadPackClient, error) { stream, err := grpc.NewClientStream(ctx, &_SmartHTTP_serviceDesc.Streams[2], c.cc, "/gitaly.SmartHTTP/PostUploadPack", opts...) if err != nil { return nil, err } x := &smartHTTPPostUploadPackClient{stream} return x, nil } type SmartHTTP_PostUploadPackClient interface { Send(*PostUploadPackRequest) error Recv() (*PostUploadPackResponse, error) grpc.ClientStream } type smartHTTPPostUploadPackClient struct { grpc.ClientStream } func (x *smartHTTPPostUploadPackClient) Send(m *PostUploadPackRequest) error { return x.ClientStream.SendMsg(m) } func (x *smartHTTPPostUploadPackClient) Recv() (*PostUploadPackResponse, error) { m := new(PostUploadPackResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func (c *smartHTTPClient) PostReceivePack(ctx context.Context, opts ...grpc.CallOption) (SmartHTTP_PostReceivePackClient, error) { stream, err := grpc.NewClientStream(ctx, &_SmartHTTP_serviceDesc.Streams[3], c.cc, "/gitaly.SmartHTTP/PostReceivePack", opts...) if err != nil { return nil, err } x := &smartHTTPPostReceivePackClient{stream} return x, nil } type SmartHTTP_PostReceivePackClient interface { Send(*PostReceivePackRequest) error Recv() (*PostReceivePackResponse, error) grpc.ClientStream } type smartHTTPPostReceivePackClient struct { grpc.ClientStream } func (x *smartHTTPPostReceivePackClient) Send(m *PostReceivePackRequest) error { return x.ClientStream.SendMsg(m) } func (x *smartHTTPPostReceivePackClient) Recv() (*PostReceivePackResponse, error) { m := new(PostReceivePackResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } // Server API for SmartHTTP service type SmartHTTPServer interface { // The response body for GET /info/refs?service=git-upload-pack InfoRefsUploadPack(*InfoRefsRequest, SmartHTTP_InfoRefsUploadPackServer) error // The response body for GET /info/refs?service=git-receive-pack InfoRefsReceivePack(*InfoRefsRequest, SmartHTTP_InfoRefsReceivePackServer) error // Request and response body for POST /upload-pack PostUploadPack(SmartHTTP_PostUploadPackServer) error // Request and response body for POST /receive-pack PostReceivePack(SmartHTTP_PostReceivePackServer) error } func RegisterSmartHTTPServer(s *grpc.Server, srv SmartHTTPServer) { s.RegisterService(&_SmartHTTP_serviceDesc, srv) } func _SmartHTTP_InfoRefsUploadPack_Handler(srv interface{}, stream grpc.ServerStream) error { m := new(InfoRefsRequest) if err := stream.RecvMsg(m); err != nil { return err } return srv.(SmartHTTPServer).InfoRefsUploadPack(m, &smartHTTPInfoRefsUploadPackServer{stream}) } type SmartHTTP_InfoRefsUploadPackServer interface { Send(*InfoRefsResponse) error grpc.ServerStream } type smartHTTPInfoRefsUploadPackServer struct { grpc.ServerStream } func (x *smartHTTPInfoRefsUploadPackServer) Send(m *InfoRefsResponse) error { return x.ServerStream.SendMsg(m) } func _SmartHTTP_InfoRefsReceivePack_Handler(srv interface{}, stream grpc.ServerStream) error { m := new(InfoRefsRequest) if err := stream.RecvMsg(m); err != nil { return err } return srv.(SmartHTTPServer).InfoRefsReceivePack(m, &smartHTTPInfoRefsReceivePackServer{stream}) } type SmartHTTP_InfoRefsReceivePackServer interface { Send(*InfoRefsResponse) error grpc.ServerStream } type smartHTTPInfoRefsReceivePackServer struct { grpc.ServerStream } func (x *smartHTTPInfoRefsReceivePackServer) Send(m *InfoRefsResponse) error { return x.ServerStream.SendMsg(m) } func _SmartHTTP_PostUploadPack_Handler(srv interface{}, stream grpc.ServerStream) error { return srv.(SmartHTTPServer).PostUploadPack(&smartHTTPPostUploadPackServer{stream}) } type SmartHTTP_PostUploadPackServer interface { Send(*PostUploadPackResponse) error Recv() (*PostUploadPackRequest, error) grpc.ServerStream } type smartHTTPPostUploadPackServer struct { grpc.ServerStream } func (x *smartHTTPPostUploadPackServer) Send(m *PostUploadPackResponse) error { return x.ServerStream.SendMsg(m) } func (x *smartHTTPPostUploadPackServer) Recv() (*PostUploadPackRequest, error) { m := new(PostUploadPackRequest) if err := x.ServerStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func _SmartHTTP_PostReceivePack_Handler(srv interface{}, stream grpc.ServerStream) error { return srv.(SmartHTTPServer).PostReceivePack(&smartHTTPPostReceivePackServer{stream}) } type SmartHTTP_PostReceivePackServer interface { Send(*PostReceivePackResponse) error Recv() (*PostReceivePackRequest, error) grpc.ServerStream } type smartHTTPPostReceivePackServer struct { grpc.ServerStream } func (x *smartHTTPPostReceivePackServer) Send(m *PostReceivePackResponse) error { return x.ServerStream.SendMsg(m) } func (x *smartHTTPPostReceivePackServer) Recv() (*PostReceivePackRequest, error) { m := new(PostReceivePackRequest) if err := x.ServerStream.RecvMsg(m); err != nil { return nil, err } return m, nil } var _SmartHTTP_serviceDesc = grpc.ServiceDesc{ ServiceName: "gitaly.SmartHTTP", HandlerType: (*SmartHTTPServer)(nil), Methods: []grpc.MethodDesc{}, Streams: []grpc.StreamDesc{ { StreamName: "InfoRefsUploadPack", Handler: _SmartHTTP_InfoRefsUploadPack_Handler, ServerStreams: true, }, { StreamName: "InfoRefsReceivePack", Handler: _SmartHTTP_InfoRefsReceivePack_Handler, ServerStreams: true, }, { StreamName: "PostUploadPack", Handler: _SmartHTTP_PostUploadPack_Handler, ServerStreams: true, ClientStreams: true, }, { StreamName: "PostReceivePack", Handler: _SmartHTTP_PostReceivePack_Handler, ServerStreams: true, ClientStreams: true, }, }, Metadata: "deprecated-services.proto", } // Client API for SSH service type SSHClient interface { // To forward 'git upload-pack' to Gitaly for SSH sessions SSHUploadPack(ctx context.Context, opts ...grpc.CallOption) (SSH_SSHUploadPackClient, error) // To forward 'git receive-pack' to Gitaly for SSH sessions SSHReceivePack(ctx context.Context, opts ...grpc.CallOption) (SSH_SSHReceivePackClient, error) } type sSHClient struct { cc *grpc.ClientConn } func NewSSHClient(cc *grpc.ClientConn) SSHClient { return &sSHClient{cc} } func (c *sSHClient) SSHUploadPack(ctx context.Context, opts ...grpc.CallOption) (SSH_SSHUploadPackClient, error) { stream, err := grpc.NewClientStream(ctx, &_SSH_serviceDesc.Streams[0], c.cc, "/gitaly.SSH/SSHUploadPack", opts...) if err != nil { return nil, err } x := &sSHSSHUploadPackClient{stream} return x, nil } type SSH_SSHUploadPackClient interface { Send(*SSHUploadPackRequest) error Recv() (*SSHUploadPackResponse, error) grpc.ClientStream } type sSHSSHUploadPackClient struct { grpc.ClientStream } func (x *sSHSSHUploadPackClient) Send(m *SSHUploadPackRequest) error { return x.ClientStream.SendMsg(m) } func (x *sSHSSHUploadPackClient) Recv() (*SSHUploadPackResponse, error) { m := new(SSHUploadPackResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func (c *sSHClient) SSHReceivePack(ctx context.Context, opts ...grpc.CallOption) (SSH_SSHReceivePackClient, error) { stream, err := grpc.NewClientStream(ctx, &_SSH_serviceDesc.Streams[1], c.cc, "/gitaly.SSH/SSHReceivePack", opts...) if err != nil { return nil, err } x := &sSHSSHReceivePackClient{stream} return x, nil } type SSH_SSHReceivePackClient interface { Send(*SSHReceivePackRequest) error Recv() (*SSHReceivePackResponse, error) grpc.ClientStream } type sSHSSHReceivePackClient struct { grpc.ClientStream } func (x *sSHSSHReceivePackClient) Send(m *SSHReceivePackRequest) error { return x.ClientStream.SendMsg(m) } func (x *sSHSSHReceivePackClient) Recv() (*SSHReceivePackResponse, error) { m := new(SSHReceivePackResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } // Server API for SSH service type SSHServer interface { // To forward 'git upload-pack' to Gitaly for SSH sessions SSHUploadPack(SSH_SSHUploadPackServer) error // To forward 'git receive-pack' to Gitaly for SSH sessions SSHReceivePack(SSH_SSHReceivePackServer) error } func RegisterSSHServer(s *grpc.Server, srv SSHServer) { s.RegisterService(&_SSH_serviceDesc, srv) } func _SSH_SSHUploadPack_Handler(srv interface{}, stream grpc.ServerStream) error { return srv.(SSHServer).SSHUploadPack(&sSHSSHUploadPackServer{stream}) } type SSH_SSHUploadPackServer interface { Send(*SSHUploadPackResponse) error Recv() (*SSHUploadPackRequest, error) grpc.ServerStream } type sSHSSHUploadPackServer struct { grpc.ServerStream } func (x *sSHSSHUploadPackServer) Send(m *SSHUploadPackResponse) error { return x.ServerStream.SendMsg(m) } func (x *sSHSSHUploadPackServer) Recv() (*SSHUploadPackRequest, error) { m := new(SSHUploadPackRequest) if err := x.ServerStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func _SSH_SSHReceivePack_Handler(srv interface{}, stream grpc.ServerStream) error { return srv.(SSHServer).SSHReceivePack(&sSHSSHReceivePackServer{stream}) } type SSH_SSHReceivePackServer interface { Send(*SSHReceivePackResponse) error Recv() (*SSHReceivePackRequest, error) grpc.ServerStream } type sSHSSHReceivePackServer struct { grpc.ServerStream } func (x *sSHSSHReceivePackServer) Send(m *SSHReceivePackResponse) error { return x.ServerStream.SendMsg(m) } func (x *sSHSSHReceivePackServer) Recv() (*SSHReceivePackRequest, error) { m := new(SSHReceivePackRequest) if err := x.ServerStream.RecvMsg(m); err != nil { return nil, err } return m, nil } var _SSH_serviceDesc = grpc.ServiceDesc{ ServiceName: "gitaly.SSH", HandlerType: (*SSHServer)(nil), Methods: []grpc.MethodDesc{}, Streams: []grpc.StreamDesc{ { StreamName: "SSHUploadPack", Handler: _SSH_SSHUploadPack_Handler, ServerStreams: true, ClientStreams: true, }, { StreamName: "SSHReceivePack", Handler: _SSH_SSHReceivePack_Handler, ServerStreams: true, ClientStreams: true, }, }, Metadata: "deprecated-services.proto", } func init() { proto.RegisterFile("deprecated-services.proto", fileDescriptor3) } var fileDescriptor3 = []byte{ // 534 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x94, 0x5d, 0x6e, 0xd3, 0x40, 0x10, 0x80, 0x31, 0x41, 0x91, 0x32, 0xa1, 0x0d, 0x6c, 0x85, 0x68, 0x0c, 0x4d, 0xda, 0x0a, 0x24, 0x5e, 0x88, 0xaa, 0x70, 0x82, 0x42, 0x00, 0x17, 0xaa, 0x2a, 0xb2, 0x13, 0x7e, 0x24, 0x24, 0xb4, 0x38, 0xb3, 0x8d, 0x85, 0xe3, 0x0d, 0xde, 0x6d, 0xa5, 0x9e, 0x85, 0x03, 0xf0, 0xca, 0x49, 0x78, 0xe5, 0x3a, 0xc8, 0x3f, 0x13, 0xaf, 0x13, 0xbb, 0x3c, 0xf4, 0x6d, 0x33, 0xdf, 0xcc, 0xb7, 0x93, 0xf1, 0xd8, 0xd0, 0x9d, 0xe1, 0x32, 0x46, 0x9f, 0x6b, 0x9c, 0x3d, 0x57, 0x18, 0x5f, 0x06, 0x3e, 0xaa, 0xc1, 0x32, 0x96, 0x5a, 0xb2, 0xe6, 0x79, 0xa0, 0x79, 0x78, 0x65, 0xdf, 0xf5, 0xe5, 0x62, 0x11, 0xe8, 0x2c, 0x6a, 0xc3, 0x2c, 0x10, 0x22, 0x3f, 0xef, 0x44, 0x52, 0x07, 0x22, 0xf0, 0xb9, 0x0e, 0x64, 0x94, 0x97, 0xd9, 0xad, 0x18, 0x89, 0x77, 0xd4, 0x82, 0xc7, 0x7a, 0xae, 0xf5, 0x92, 0x98, 0x52, 0xf3, 0xec, 0x38, 0xfc, 0x65, 0x41, 0xf3, 0x55, 0x2a, 0x66, 0x1f, 0xe1, 0x5e, 0x76, 0x3a, 0x51, 0xc7, 0x91, 0x8f, 0x4a, 0xcb, 0x98, 0xf5, 0x07, 0xd9, 0xed, 0x83, 0x75, 0xe2, 0xe2, 0x8f, 0x0b, 0x54, 0xda, 0xde, 0xaf, 0x4f, 0x50, 0x4b, 0x19, 0x29, 0x3c, 0xbc, 0xc5, 0x46, 0xd0, 0x9a, 0xc4, 0x88, 0xaf, 0x23, 0x1d, 0x5f, 0xb1, 0x5d, 0x2a, 0x58, 0x85, 0x48, 0xd5, 0xad, 0x20, 0xe4, 0x38, 0xb2, 0x86, 0x3f, 0x2d, 0xb8, 0x33, 0x0a, 0x84, 0x60, 0x6f, 0x01, 0xb2, 0xcb, 0xd2, 0x5f, 0xdd, 0x72, 0x03, 0x49, 0x8c, 0x84, 0x76, 0x15, 0x2a, 0x8c, 0xec, 0x1d, 0xb4, 0x73, 0x82, 0xa1, 0xe6, 0x6c, 0x3d, 0x3d, 0x09, 0x92, 0xea, 0x51, 0x25, 0x33, 0xba, 0xfb, 0x0c, 0x5b, 0x67, 0xe6, 0x53, 0x60, 0x0e, 0xb4, 0xc7, 0x52, 0x69, 0x17, 0x7d, 0x0c, 0x2e, 0xb1, 0x90, 0x1b, 0xc1, 0x0d, 0x79, 0x89, 0x91, 0x7c, 0xf8, 0xa7, 0x01, 0x0d, 0x17, 0x05, 0x13, 0xf0, 0xe0, 0x4d, 0x10, 0xcd, 0x46, 0x28, 0xf8, 0x45, 0xa8, 0x5f, 0xc6, 0x3c, 0xf2, 0xe7, 0x67, 0x7c, 0x81, 0xec, 0x09, 0xd5, 0x57, 0x62, 0xba, 0xe5, 0xe9, 0x7f, 0xb2, 0x56, 0x8f, 0xeb, 0x2b, 0xb0, 0x24, 0xe5, 0x38, 0x0c, 0x0b, 0xac, 0xd8, 0x81, 0x59, 0x5e, 0x66, 0x74, 0xc3, 0xe1, 0x75, 0x29, 0xc6, 0xdc, 0x3f, 0x40, 0x27, 0xcf, 0x98, 0xf0, 0xf3, 0xcc, 0xde, 0x5b, 0x2b, 0x25, 0x40, 0xea, 0x7e, 0x2d, 0x37, 0xbc, 0x0e, 0xb4, 0x13, 0xec, 0xa2, 0x48, 0xc7, 0x62, 0x9b, 0x35, 0x79, 0x70, 0x63, 0xe4, 0x25, 0xb6, 0x1a, 0xc1, 0x17, 0xb8, 0x9f, 0x80, 0x53, 0xe9, 0xf3, 0xfc, 0x5f, 0xa0, 0x62, 0xfb, 0x66, 0x4d, 0x09, 0x91, 0xf5, 0xe0, 0x9a, 0x0c, 0x63, 0x57, 0xfe, 0xde, 0x86, 0x96, 0x97, 0xbc, 0x92, 0xce, 0x64, 0x32, 0x66, 0xef, 0x81, 0x9d, 0x44, 0x42, 0xba, 0x28, 0xd4, 0x74, 0x19, 0x4a, 0x3e, 0x1b, 0x73, 0xff, 0x3b, 0x7b, 0x48, 0x2a, 0x62, 0x74, 0xc7, 0xee, 0x26, 0x30, 0x46, 0x70, 0x0a, 0x3b, 0x45, 0x3c, 0x5d, 0xa4, 0x9b, 0xd8, 0xa6, 0xb0, 0x9d, 0xac, 0xa4, 0xd1, 0xd6, 0x9e, 0xb9, 0xaa, 0x45, 0x9c, 0x74, 0xbd, 0x3a, 0x4c, 0xd2, 0x67, 0xd6, 0x91, 0xc5, 0x3e, 0x41, 0xc7, 0xd8, 0xf4, 0xd4, 0xdb, 0xab, 0x78, 0x05, 0x4c, 0x71, 0xbf, 0x96, 0x9b, 0xe6, 0xe1, 0x6f, 0x0b, 0x1a, 0x9e, 0xe7, 0x30, 0x17, 0xb6, 0x3c, 0xcf, 0x31, 0xfa, 0x7e, 0x4c, 0xf5, 0xa5, 0x30, 0xd9, 0xf7, 0x6a, 0x68, 0xa9, 0xeb, 0x29, 0x6c, 0x7b, 0x9e, 0x63, 0x36, 0x6d, 0x96, 0x55, 0xf4, 0xdc, 0xab, 0xc3, 0xa6, 0xf6, 0x5b, 0x33, 0xfd, 0x0e, 0xbf, 0xf8, 0x17, 0x00, 0x00, 0xff, 0xff, 0x79, 0x83, 0xb6, 0xb0, 0x02, 0x06, 0x00, 0x00, } diff.pb.go000066400000000000000000000721741324746544700346210ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/gitlab.com/gitlab-org/gitaly-proto/go// Code generated by protoc-gen-go. DO NOT EDIT. // source: diff.proto package gitaly import proto "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" import ( context "golang.org/x/net/context" grpc "google.golang.org/grpc" ) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf type CommitDiffRequest struct { Repository *Repository `protobuf:"bytes,1,opt,name=repository" json:"repository,omitempty"` LeftCommitId string `protobuf:"bytes,2,opt,name=left_commit_id,json=leftCommitId" json:"left_commit_id,omitempty"` RightCommitId string `protobuf:"bytes,3,opt,name=right_commit_id,json=rightCommitId" json:"right_commit_id,omitempty"` IgnoreWhitespaceChange bool `protobuf:"varint,4,opt,name=ignore_whitespace_change,json=ignoreWhitespaceChange" json:"ignore_whitespace_change,omitempty"` Paths [][]byte `protobuf:"bytes,5,rep,name=paths,proto3" json:"paths,omitempty"` CollapseDiffs bool `protobuf:"varint,6,opt,name=collapse_diffs,json=collapseDiffs" json:"collapse_diffs,omitempty"` EnforceLimits bool `protobuf:"varint,7,opt,name=enforce_limits,json=enforceLimits" json:"enforce_limits,omitempty"` MaxFiles int32 `protobuf:"varint,8,opt,name=max_files,json=maxFiles" json:"max_files,omitempty"` MaxLines int32 `protobuf:"varint,9,opt,name=max_lines,json=maxLines" json:"max_lines,omitempty"` MaxBytes int32 `protobuf:"varint,10,opt,name=max_bytes,json=maxBytes" json:"max_bytes,omitempty"` SafeMaxFiles int32 `protobuf:"varint,11,opt,name=safe_max_files,json=safeMaxFiles" json:"safe_max_files,omitempty"` SafeMaxLines int32 `protobuf:"varint,12,opt,name=safe_max_lines,json=safeMaxLines" json:"safe_max_lines,omitempty"` SafeMaxBytes int32 `protobuf:"varint,13,opt,name=safe_max_bytes,json=safeMaxBytes" json:"safe_max_bytes,omitempty"` } func (m *CommitDiffRequest) Reset() { *m = CommitDiffRequest{} } func (m *CommitDiffRequest) String() string { return proto.CompactTextString(m) } func (*CommitDiffRequest) ProtoMessage() {} func (*CommitDiffRequest) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{0} } func (m *CommitDiffRequest) GetRepository() *Repository { if m != nil { return m.Repository } return nil } func (m *CommitDiffRequest) GetLeftCommitId() string { if m != nil { return m.LeftCommitId } return "" } func (m *CommitDiffRequest) GetRightCommitId() string { if m != nil { return m.RightCommitId } return "" } func (m *CommitDiffRequest) GetIgnoreWhitespaceChange() bool { if m != nil { return m.IgnoreWhitespaceChange } return false } func (m *CommitDiffRequest) GetPaths() [][]byte { if m != nil { return m.Paths } return nil } func (m *CommitDiffRequest) GetCollapseDiffs() bool { if m != nil { return m.CollapseDiffs } return false } func (m *CommitDiffRequest) GetEnforceLimits() bool { if m != nil { return m.EnforceLimits } return false } func (m *CommitDiffRequest) GetMaxFiles() int32 { if m != nil { return m.MaxFiles } return 0 } func (m *CommitDiffRequest) GetMaxLines() int32 { if m != nil { return m.MaxLines } return 0 } func (m *CommitDiffRequest) GetMaxBytes() int32 { if m != nil { return m.MaxBytes } return 0 } func (m *CommitDiffRequest) GetSafeMaxFiles() int32 { if m != nil { return m.SafeMaxFiles } return 0 } func (m *CommitDiffRequest) GetSafeMaxLines() int32 { if m != nil { return m.SafeMaxLines } return 0 } func (m *CommitDiffRequest) GetSafeMaxBytes() int32 { if m != nil { return m.SafeMaxBytes } return 0 } // A CommitDiffResponse corresponds to a single changed file in a commit. type CommitDiffResponse struct { FromPath []byte `protobuf:"bytes,1,opt,name=from_path,json=fromPath,proto3" json:"from_path,omitempty"` ToPath []byte `protobuf:"bytes,2,opt,name=to_path,json=toPath,proto3" json:"to_path,omitempty"` // Blob ID as returned via `git diff --full-index` FromId string `protobuf:"bytes,3,opt,name=from_id,json=fromId" json:"from_id,omitempty"` ToId string `protobuf:"bytes,4,opt,name=to_id,json=toId" json:"to_id,omitempty"` OldMode int32 `protobuf:"varint,5,opt,name=old_mode,json=oldMode" json:"old_mode,omitempty"` NewMode int32 `protobuf:"varint,6,opt,name=new_mode,json=newMode" json:"new_mode,omitempty"` Binary bool `protobuf:"varint,7,opt,name=binary" json:"binary,omitempty"` RawPatchData []byte `protobuf:"bytes,9,opt,name=raw_patch_data,json=rawPatchData,proto3" json:"raw_patch_data,omitempty"` EndOfPatch bool `protobuf:"varint,10,opt,name=end_of_patch,json=endOfPatch" json:"end_of_patch,omitempty"` // Indicates the diff file at which we overflow according to the limitations sent, // in which case only this attribute will be set. OverflowMarker bool `protobuf:"varint,11,opt,name=overflow_marker,json=overflowMarker" json:"overflow_marker,omitempty"` Collapsed bool `protobuf:"varint,12,opt,name=collapsed" json:"collapsed,omitempty"` } func (m *CommitDiffResponse) Reset() { *m = CommitDiffResponse{} } func (m *CommitDiffResponse) String() string { return proto.CompactTextString(m) } func (*CommitDiffResponse) ProtoMessage() {} func (*CommitDiffResponse) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{1} } func (m *CommitDiffResponse) GetFromPath() []byte { if m != nil { return m.FromPath } return nil } func (m *CommitDiffResponse) GetToPath() []byte { if m != nil { return m.ToPath } return nil } func (m *CommitDiffResponse) GetFromId() string { if m != nil { return m.FromId } return "" } func (m *CommitDiffResponse) GetToId() string { if m != nil { return m.ToId } return "" } func (m *CommitDiffResponse) GetOldMode() int32 { if m != nil { return m.OldMode } return 0 } func (m *CommitDiffResponse) GetNewMode() int32 { if m != nil { return m.NewMode } return 0 } func (m *CommitDiffResponse) GetBinary() bool { if m != nil { return m.Binary } return false } func (m *CommitDiffResponse) GetRawPatchData() []byte { if m != nil { return m.RawPatchData } return nil } func (m *CommitDiffResponse) GetEndOfPatch() bool { if m != nil { return m.EndOfPatch } return false } func (m *CommitDiffResponse) GetOverflowMarker() bool { if m != nil { return m.OverflowMarker } return false } func (m *CommitDiffResponse) GetCollapsed() bool { if m != nil { return m.Collapsed } return false } type CommitDeltaRequest struct { Repository *Repository `protobuf:"bytes,1,opt,name=repository" json:"repository,omitempty"` LeftCommitId string `protobuf:"bytes,2,opt,name=left_commit_id,json=leftCommitId" json:"left_commit_id,omitempty"` RightCommitId string `protobuf:"bytes,3,opt,name=right_commit_id,json=rightCommitId" json:"right_commit_id,omitempty"` Paths [][]byte `protobuf:"bytes,4,rep,name=paths,proto3" json:"paths,omitempty"` } func (m *CommitDeltaRequest) Reset() { *m = CommitDeltaRequest{} } func (m *CommitDeltaRequest) String() string { return proto.CompactTextString(m) } func (*CommitDeltaRequest) ProtoMessage() {} func (*CommitDeltaRequest) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{2} } func (m *CommitDeltaRequest) GetRepository() *Repository { if m != nil { return m.Repository } return nil } func (m *CommitDeltaRequest) GetLeftCommitId() string { if m != nil { return m.LeftCommitId } return "" } func (m *CommitDeltaRequest) GetRightCommitId() string { if m != nil { return m.RightCommitId } return "" } func (m *CommitDeltaRequest) GetPaths() [][]byte { if m != nil { return m.Paths } return nil } type CommitDelta struct { FromPath []byte `protobuf:"bytes,1,opt,name=from_path,json=fromPath,proto3" json:"from_path,omitempty"` ToPath []byte `protobuf:"bytes,2,opt,name=to_path,json=toPath,proto3" json:"to_path,omitempty"` // Blob ID as returned via `git diff --full-index` FromId string `protobuf:"bytes,3,opt,name=from_id,json=fromId" json:"from_id,omitempty"` ToId string `protobuf:"bytes,4,opt,name=to_id,json=toId" json:"to_id,omitempty"` OldMode int32 `protobuf:"varint,5,opt,name=old_mode,json=oldMode" json:"old_mode,omitempty"` NewMode int32 `protobuf:"varint,6,opt,name=new_mode,json=newMode" json:"new_mode,omitempty"` } func (m *CommitDelta) Reset() { *m = CommitDelta{} } func (m *CommitDelta) String() string { return proto.CompactTextString(m) } func (*CommitDelta) ProtoMessage() {} func (*CommitDelta) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{3} } func (m *CommitDelta) GetFromPath() []byte { if m != nil { return m.FromPath } return nil } func (m *CommitDelta) GetToPath() []byte { if m != nil { return m.ToPath } return nil } func (m *CommitDelta) GetFromId() string { if m != nil { return m.FromId } return "" } func (m *CommitDelta) GetToId() string { if m != nil { return m.ToId } return "" } func (m *CommitDelta) GetOldMode() int32 { if m != nil { return m.OldMode } return 0 } func (m *CommitDelta) GetNewMode() int32 { if m != nil { return m.NewMode } return 0 } type CommitDeltaResponse struct { Deltas []*CommitDelta `protobuf:"bytes,1,rep,name=deltas" json:"deltas,omitempty"` } func (m *CommitDeltaResponse) Reset() { *m = CommitDeltaResponse{} } func (m *CommitDeltaResponse) String() string { return proto.CompactTextString(m) } func (*CommitDeltaResponse) ProtoMessage() {} func (*CommitDeltaResponse) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{4} } func (m *CommitDeltaResponse) GetDeltas() []*CommitDelta { if m != nil { return m.Deltas } return nil } type CommitPatchRequest struct { Repository *Repository `protobuf:"bytes,1,opt,name=repository" json:"repository,omitempty"` Revision []byte `protobuf:"bytes,2,opt,name=revision,proto3" json:"revision,omitempty"` } func (m *CommitPatchRequest) Reset() { *m = CommitPatchRequest{} } func (m *CommitPatchRequest) String() string { return proto.CompactTextString(m) } func (*CommitPatchRequest) ProtoMessage() {} func (*CommitPatchRequest) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{5} } func (m *CommitPatchRequest) GetRepository() *Repository { if m != nil { return m.Repository } return nil } func (m *CommitPatchRequest) GetRevision() []byte { if m != nil { return m.Revision } return nil } type CommitPatchResponse struct { Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` } func (m *CommitPatchResponse) Reset() { *m = CommitPatchResponse{} } func (m *CommitPatchResponse) String() string { return proto.CompactTextString(m) } func (*CommitPatchResponse) ProtoMessage() {} func (*CommitPatchResponse) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{6} } func (m *CommitPatchResponse) GetData() []byte { if m != nil { return m.Data } return nil } type RawDiffRequest struct { Repository *Repository `protobuf:"bytes,1,opt,name=repository" json:"repository,omitempty"` LeftCommitId string `protobuf:"bytes,2,opt,name=left_commit_id,json=leftCommitId" json:"left_commit_id,omitempty"` RightCommitId string `protobuf:"bytes,3,opt,name=right_commit_id,json=rightCommitId" json:"right_commit_id,omitempty"` } func (m *RawDiffRequest) Reset() { *m = RawDiffRequest{} } func (m *RawDiffRequest) String() string { return proto.CompactTextString(m) } func (*RawDiffRequest) ProtoMessage() {} func (*RawDiffRequest) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{7} } func (m *RawDiffRequest) GetRepository() *Repository { if m != nil { return m.Repository } return nil } func (m *RawDiffRequest) GetLeftCommitId() string { if m != nil { return m.LeftCommitId } return "" } func (m *RawDiffRequest) GetRightCommitId() string { if m != nil { return m.RightCommitId } return "" } type RawDiffResponse struct { Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` } func (m *RawDiffResponse) Reset() { *m = RawDiffResponse{} } func (m *RawDiffResponse) String() string { return proto.CompactTextString(m) } func (*RawDiffResponse) ProtoMessage() {} func (*RawDiffResponse) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{8} } func (m *RawDiffResponse) GetData() []byte { if m != nil { return m.Data } return nil } type RawPatchRequest struct { Repository *Repository `protobuf:"bytes,1,opt,name=repository" json:"repository,omitempty"` LeftCommitId string `protobuf:"bytes,2,opt,name=left_commit_id,json=leftCommitId" json:"left_commit_id,omitempty"` RightCommitId string `protobuf:"bytes,3,opt,name=right_commit_id,json=rightCommitId" json:"right_commit_id,omitempty"` } func (m *RawPatchRequest) Reset() { *m = RawPatchRequest{} } func (m *RawPatchRequest) String() string { return proto.CompactTextString(m) } func (*RawPatchRequest) ProtoMessage() {} func (*RawPatchRequest) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{9} } func (m *RawPatchRequest) GetRepository() *Repository { if m != nil { return m.Repository } return nil } func (m *RawPatchRequest) GetLeftCommitId() string { if m != nil { return m.LeftCommitId } return "" } func (m *RawPatchRequest) GetRightCommitId() string { if m != nil { return m.RightCommitId } return "" } type RawPatchResponse struct { Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` } func (m *RawPatchResponse) Reset() { *m = RawPatchResponse{} } func (m *RawPatchResponse) String() string { return proto.CompactTextString(m) } func (*RawPatchResponse) ProtoMessage() {} func (*RawPatchResponse) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{10} } func (m *RawPatchResponse) GetData() []byte { if m != nil { return m.Data } return nil } func init() { proto.RegisterType((*CommitDiffRequest)(nil), "gitaly.CommitDiffRequest") proto.RegisterType((*CommitDiffResponse)(nil), "gitaly.CommitDiffResponse") proto.RegisterType((*CommitDeltaRequest)(nil), "gitaly.CommitDeltaRequest") proto.RegisterType((*CommitDelta)(nil), "gitaly.CommitDelta") proto.RegisterType((*CommitDeltaResponse)(nil), "gitaly.CommitDeltaResponse") proto.RegisterType((*CommitPatchRequest)(nil), "gitaly.CommitPatchRequest") proto.RegisterType((*CommitPatchResponse)(nil), "gitaly.CommitPatchResponse") proto.RegisterType((*RawDiffRequest)(nil), "gitaly.RawDiffRequest") proto.RegisterType((*RawDiffResponse)(nil), "gitaly.RawDiffResponse") proto.RegisterType((*RawPatchRequest)(nil), "gitaly.RawPatchRequest") proto.RegisterType((*RawPatchResponse)(nil), "gitaly.RawPatchResponse") } // Reference imports to suppress errors if they are not otherwise used. var _ context.Context var _ grpc.ClientConn // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. const _ = grpc.SupportPackageIsVersion4 // Client API for DiffService service type DiffServiceClient interface { // Returns stream of CommitDiffResponse with patches chunked over messages CommitDiff(ctx context.Context, in *CommitDiffRequest, opts ...grpc.CallOption) (DiffService_CommitDiffClient, error) // Return a stream so we can divide the response in chunks of deltas CommitDelta(ctx context.Context, in *CommitDeltaRequest, opts ...grpc.CallOption) (DiffService_CommitDeltaClient, error) CommitPatch(ctx context.Context, in *CommitPatchRequest, opts ...grpc.CallOption) (DiffService_CommitPatchClient, error) RawDiff(ctx context.Context, in *RawDiffRequest, opts ...grpc.CallOption) (DiffService_RawDiffClient, error) RawPatch(ctx context.Context, in *RawPatchRequest, opts ...grpc.CallOption) (DiffService_RawPatchClient, error) } type diffServiceClient struct { cc *grpc.ClientConn } func NewDiffServiceClient(cc *grpc.ClientConn) DiffServiceClient { return &diffServiceClient{cc} } func (c *diffServiceClient) CommitDiff(ctx context.Context, in *CommitDiffRequest, opts ...grpc.CallOption) (DiffService_CommitDiffClient, error) { stream, err := grpc.NewClientStream(ctx, &_DiffService_serviceDesc.Streams[0], c.cc, "/gitaly.DiffService/CommitDiff", opts...) if err != nil { return nil, err } x := &diffServiceCommitDiffClient{stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } if err := x.ClientStream.CloseSend(); err != nil { return nil, err } return x, nil } type DiffService_CommitDiffClient interface { Recv() (*CommitDiffResponse, error) grpc.ClientStream } type diffServiceCommitDiffClient struct { grpc.ClientStream } func (x *diffServiceCommitDiffClient) Recv() (*CommitDiffResponse, error) { m := new(CommitDiffResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func (c *diffServiceClient) CommitDelta(ctx context.Context, in *CommitDeltaRequest, opts ...grpc.CallOption) (DiffService_CommitDeltaClient, error) { stream, err := grpc.NewClientStream(ctx, &_DiffService_serviceDesc.Streams[1], c.cc, "/gitaly.DiffService/CommitDelta", opts...) if err != nil { return nil, err } x := &diffServiceCommitDeltaClient{stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } if err := x.ClientStream.CloseSend(); err != nil { return nil, err } return x, nil } type DiffService_CommitDeltaClient interface { Recv() (*CommitDeltaResponse, error) grpc.ClientStream } type diffServiceCommitDeltaClient struct { grpc.ClientStream } func (x *diffServiceCommitDeltaClient) Recv() (*CommitDeltaResponse, error) { m := new(CommitDeltaResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func (c *diffServiceClient) CommitPatch(ctx context.Context, in *CommitPatchRequest, opts ...grpc.CallOption) (DiffService_CommitPatchClient, error) { stream, err := grpc.NewClientStream(ctx, &_DiffService_serviceDesc.Streams[2], c.cc, "/gitaly.DiffService/CommitPatch", opts...) if err != nil { return nil, err } x := &diffServiceCommitPatchClient{stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } if err := x.ClientStream.CloseSend(); err != nil { return nil, err } return x, nil } type DiffService_CommitPatchClient interface { Recv() (*CommitPatchResponse, error) grpc.ClientStream } type diffServiceCommitPatchClient struct { grpc.ClientStream } func (x *diffServiceCommitPatchClient) Recv() (*CommitPatchResponse, error) { m := new(CommitPatchResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func (c *diffServiceClient) RawDiff(ctx context.Context, in *RawDiffRequest, opts ...grpc.CallOption) (DiffService_RawDiffClient, error) { stream, err := grpc.NewClientStream(ctx, &_DiffService_serviceDesc.Streams[3], c.cc, "/gitaly.DiffService/RawDiff", opts...) if err != nil { return nil, err } x := &diffServiceRawDiffClient{stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } if err := x.ClientStream.CloseSend(); err != nil { return nil, err } return x, nil } type DiffService_RawDiffClient interface { Recv() (*RawDiffResponse, error) grpc.ClientStream } type diffServiceRawDiffClient struct { grpc.ClientStream } func (x *diffServiceRawDiffClient) Recv() (*RawDiffResponse, error) { m := new(RawDiffResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func (c *diffServiceClient) RawPatch(ctx context.Context, in *RawPatchRequest, opts ...grpc.CallOption) (DiffService_RawPatchClient, error) { stream, err := grpc.NewClientStream(ctx, &_DiffService_serviceDesc.Streams[4], c.cc, "/gitaly.DiffService/RawPatch", opts...) if err != nil { return nil, err } x := &diffServiceRawPatchClient{stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } if err := x.ClientStream.CloseSend(); err != nil { return nil, err } return x, nil } type DiffService_RawPatchClient interface { Recv() (*RawPatchResponse, error) grpc.ClientStream } type diffServiceRawPatchClient struct { grpc.ClientStream } func (x *diffServiceRawPatchClient) Recv() (*RawPatchResponse, error) { m := new(RawPatchResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } // Server API for DiffService service type DiffServiceServer interface { // Returns stream of CommitDiffResponse with patches chunked over messages CommitDiff(*CommitDiffRequest, DiffService_CommitDiffServer) error // Return a stream so we can divide the response in chunks of deltas CommitDelta(*CommitDeltaRequest, DiffService_CommitDeltaServer) error CommitPatch(*CommitPatchRequest, DiffService_CommitPatchServer) error RawDiff(*RawDiffRequest, DiffService_RawDiffServer) error RawPatch(*RawPatchRequest, DiffService_RawPatchServer) error } func RegisterDiffServiceServer(s *grpc.Server, srv DiffServiceServer) { s.RegisterService(&_DiffService_serviceDesc, srv) } func _DiffService_CommitDiff_Handler(srv interface{}, stream grpc.ServerStream) error { m := new(CommitDiffRequest) if err := stream.RecvMsg(m); err != nil { return err } return srv.(DiffServiceServer).CommitDiff(m, &diffServiceCommitDiffServer{stream}) } type DiffService_CommitDiffServer interface { Send(*CommitDiffResponse) error grpc.ServerStream } type diffServiceCommitDiffServer struct { grpc.ServerStream } func (x *diffServiceCommitDiffServer) Send(m *CommitDiffResponse) error { return x.ServerStream.SendMsg(m) } func _DiffService_CommitDelta_Handler(srv interface{}, stream grpc.ServerStream) error { m := new(CommitDeltaRequest) if err := stream.RecvMsg(m); err != nil { return err } return srv.(DiffServiceServer).CommitDelta(m, &diffServiceCommitDeltaServer{stream}) } type DiffService_CommitDeltaServer interface { Send(*CommitDeltaResponse) error grpc.ServerStream } type diffServiceCommitDeltaServer struct { grpc.ServerStream } func (x *diffServiceCommitDeltaServer) Send(m *CommitDeltaResponse) error { return x.ServerStream.SendMsg(m) } func _DiffService_CommitPatch_Handler(srv interface{}, stream grpc.ServerStream) error { m := new(CommitPatchRequest) if err := stream.RecvMsg(m); err != nil { return err } return srv.(DiffServiceServer).CommitPatch(m, &diffServiceCommitPatchServer{stream}) } type DiffService_CommitPatchServer interface { Send(*CommitPatchResponse) error grpc.ServerStream } type diffServiceCommitPatchServer struct { grpc.ServerStream } func (x *diffServiceCommitPatchServer) Send(m *CommitPatchResponse) error { return x.ServerStream.SendMsg(m) } func _DiffService_RawDiff_Handler(srv interface{}, stream grpc.ServerStream) error { m := new(RawDiffRequest) if err := stream.RecvMsg(m); err != nil { return err } return srv.(DiffServiceServer).RawDiff(m, &diffServiceRawDiffServer{stream}) } type DiffService_RawDiffServer interface { Send(*RawDiffResponse) error grpc.ServerStream } type diffServiceRawDiffServer struct { grpc.ServerStream } func (x *diffServiceRawDiffServer) Send(m *RawDiffResponse) error { return x.ServerStream.SendMsg(m) } func _DiffService_RawPatch_Handler(srv interface{}, stream grpc.ServerStream) error { m := new(RawPatchRequest) if err := stream.RecvMsg(m); err != nil { return err } return srv.(DiffServiceServer).RawPatch(m, &diffServiceRawPatchServer{stream}) } type DiffService_RawPatchServer interface { Send(*RawPatchResponse) error grpc.ServerStream } type diffServiceRawPatchServer struct { grpc.ServerStream } func (x *diffServiceRawPatchServer) Send(m *RawPatchResponse) error { return x.ServerStream.SendMsg(m) } var _DiffService_serviceDesc = grpc.ServiceDesc{ ServiceName: "gitaly.DiffService", HandlerType: (*DiffServiceServer)(nil), Methods: []grpc.MethodDesc{}, Streams: []grpc.StreamDesc{ { StreamName: "CommitDiff", Handler: _DiffService_CommitDiff_Handler, ServerStreams: true, }, { StreamName: "CommitDelta", Handler: _DiffService_CommitDelta_Handler, ServerStreams: true, }, { StreamName: "CommitPatch", Handler: _DiffService_CommitPatch_Handler, ServerStreams: true, }, { StreamName: "RawDiff", Handler: _DiffService_RawDiff_Handler, ServerStreams: true, }, { StreamName: "RawPatch", Handler: _DiffService_RawPatch_Handler, ServerStreams: true, }, }, Metadata: "diff.proto", } func init() { proto.RegisterFile("diff.proto", fileDescriptor4) } var fileDescriptor4 = []byte{ // 753 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x56, 0x4d, 0x6f, 0xdb, 0x46, 0x10, 0x2d, 0xf5, 0x41, 0x51, 0x23, 0x5a, 0x76, 0xd7, 0x85, 0x4d, 0xcb, 0x3d, 0x08, 0x44, 0xed, 0xaa, 0x28, 0x60, 0x14, 0xea, 0xa5, 0xa7, 0x02, 0xb5, 0x8d, 0x16, 0x36, 0x6c, 0xd4, 0x60, 0x0e, 0x39, 0x12, 0x6b, 0xed, 0x52, 0x5a, 0x84, 0xe4, 0x2a, 0xbb, 0x1b, 0xcb, 0xfa, 0x1b, 0xc9, 0x8f, 0xc8, 0x25, 0xff, 0x28, 0xbf, 0x22, 0xf7, 0x1c, 0x82, 0xdd, 0x25, 0x29, 0xca, 0x56, 0x72, 0x71, 0x0e, 0xbe, 0x69, 0xdf, 0x7b, 0x9c, 0x19, 0xbe, 0x37, 0x4b, 0x08, 0x80, 0xb0, 0x24, 0x39, 0x99, 0x0b, 0xae, 0x38, 0x72, 0xa7, 0x4c, 0xe1, 0x74, 0x39, 0xf0, 0xe5, 0x0c, 0x0b, 0x4a, 0x2c, 0x1a, 0x7e, 0x6e, 0xc2, 0x8f, 0x67, 0x3c, 0xcb, 0x98, 0x3a, 0x67, 0x49, 0x12, 0xd1, 0xd7, 0x6f, 0xa8, 0x54, 0x68, 0x0c, 0x20, 0xe8, 0x9c, 0x4b, 0xa6, 0xb8, 0x58, 0x06, 0xce, 0xd0, 0x19, 0xf5, 0xc6, 0xe8, 0xc4, 0x16, 0x38, 0x89, 0x2a, 0x26, 0xaa, 0xa9, 0xd0, 0x2f, 0xd0, 0x4f, 0x69, 0xa2, 0xe2, 0x89, 0xa9, 0x16, 0x33, 0x12, 0x34, 0x86, 0xce, 0xa8, 0x1b, 0xf9, 0x1a, 0xb5, 0x2d, 0x2e, 0x08, 0x3a, 0x86, 0x6d, 0xc1, 0xa6, 0xb3, 0xba, 0xac, 0x69, 0x64, 0x5b, 0x06, 0xae, 0x74, 0x7f, 0x41, 0xc0, 0xa6, 0x39, 0x17, 0x34, 0x5e, 0xcc, 0x98, 0xa2, 0x72, 0x8e, 0x27, 0x34, 0x9e, 0xcc, 0x70, 0x3e, 0xa5, 0x41, 0x6b, 0xe8, 0x8c, 0xbc, 0x68, 0xcf, 0xf2, 0x2f, 0x2b, 0xfa, 0xcc, 0xb0, 0xe8, 0x27, 0x68, 0xcf, 0xb1, 0x9a, 0xc9, 0xa0, 0x3d, 0x6c, 0x8e, 0xfc, 0xc8, 0x1e, 0xd0, 0x11, 0xf4, 0x27, 0x3c, 0x4d, 0xf1, 0x5c, 0xd2, 0x58, 0x9b, 0x22, 0x03, 0xd7, 0x54, 0xd9, 0x2a, 0x51, 0xfd, 0xfa, 0x46, 0x46, 0xf3, 0x84, 0x8b, 0x09, 0x8d, 0x53, 0x96, 0x31, 0x25, 0x83, 0x8e, 0x95, 0x15, 0xe8, 0x95, 0x01, 0xd1, 0x21, 0x74, 0x33, 0x7c, 0x1f, 0x27, 0x2c, 0xa5, 0x32, 0xf0, 0x86, 0xce, 0xa8, 0x1d, 0x79, 0x19, 0xbe, 0xff, 0x57, 0x9f, 0x4b, 0x32, 0x65, 0x39, 0x95, 0x41, 0xb7, 0x22, 0xaf, 0xf4, 0xb9, 0x24, 0x6f, 0x97, 0x8a, 0xca, 0x00, 0x2a, 0xf2, 0x54, 0x9f, 0xb5, 0x85, 0x12, 0x27, 0x34, 0x5e, 0xd5, 0xee, 0x19, 0x85, 0xaf, 0xd1, 0xeb, 0xb2, 0x7e, 0x5d, 0x65, 0x9b, 0xf8, 0x6b, 0x2a, 0xdb, 0xa8, 0xae, 0xb2, 0xdd, 0xb6, 0xd6, 0x54, 0xa6, 0x63, 0xf8, 0xb1, 0x01, 0xa8, 0x1e, 0xbf, 0x9c, 0xf3, 0x5c, 0x52, 0x3d, 0x65, 0x22, 0x78, 0x16, 0x6b, 0xef, 0x4c, 0xfc, 0x7e, 0xe4, 0x69, 0xe0, 0x06, 0xab, 0x19, 0xda, 0x87, 0x8e, 0xe2, 0x96, 0x6a, 0x18, 0xca, 0x55, 0xbc, 0x24, 0xcc, 0x53, 0x55, 0xa6, 0xae, 0x3e, 0x5e, 0x10, 0xb4, 0x0b, 0x6d, 0xc5, 0x35, 0xdc, 0x32, 0x70, 0x4b, 0xf1, 0x0b, 0x82, 0x0e, 0xc0, 0xe3, 0x29, 0x89, 0x33, 0x4e, 0x68, 0xd0, 0x36, 0xa3, 0x75, 0x78, 0x4a, 0xae, 0x39, 0xa1, 0x9a, 0xca, 0xe9, 0xc2, 0x52, 0xae, 0xa5, 0x72, 0xba, 0x30, 0xd4, 0x1e, 0xb8, 0xb7, 0x2c, 0xc7, 0x62, 0x59, 0x04, 0x53, 0x9c, 0xf4, 0xeb, 0x0a, 0xbc, 0xd0, 0x53, 0x4d, 0x66, 0x31, 0xc1, 0x0a, 0x1b, 0xe7, 0xfd, 0xc8, 0x17, 0x78, 0x71, 0xa3, 0xc1, 0x73, 0xac, 0x30, 0x1a, 0x82, 0x4f, 0x73, 0x12, 0xf3, 0xc4, 0x0a, 0x4d, 0x00, 0x5e, 0x04, 0x34, 0x27, 0xff, 0x27, 0x46, 0x85, 0x7e, 0x85, 0x6d, 0x7e, 0x47, 0x45, 0x92, 0xf2, 0x45, 0x9c, 0x61, 0xf1, 0x8a, 0x0a, 0x93, 0x81, 0x17, 0xf5, 0x4b, 0xf8, 0xda, 0xa0, 0xe8, 0x67, 0xe8, 0x96, 0xab, 0x43, 0x4c, 0x00, 0x5e, 0xb4, 0x02, 0x2e, 0x5b, 0x9e, 0xb7, 0xd3, 0x0d, 0x3f, 0x38, 0x95, 0xbb, 0x34, 0x55, 0xf8, 0xf9, 0xdc, 0xae, 0xea, 0x8e, 0xb4, 0x6a, 0x77, 0x24, 0x7c, 0xef, 0x40, 0xaf, 0x36, 0xee, 0xf3, 0xdd, 0x82, 0xf0, 0x14, 0x76, 0xd7, 0x7c, 0x2d, 0xd6, 0xf6, 0x77, 0x70, 0x89, 0x06, 0x64, 0xe0, 0x0c, 0x9b, 0xa3, 0xde, 0x78, 0xb7, 0x34, 0xb5, 0x2e, 0x2e, 0x24, 0x21, 0x29, 0xb3, 0x31, 0xc1, 0x3f, 0x25, 0x9b, 0x01, 0x78, 0x82, 0xde, 0x31, 0xc9, 0x78, 0x5e, 0x78, 0x51, 0x9d, 0xc3, 0xdf, 0xca, 0x49, 0x8b, 0x2e, 0xc5, 0xa4, 0x08, 0x5a, 0x66, 0x49, 0xad, 0xab, 0xe6, 0x77, 0xf8, 0xd6, 0x81, 0x7e, 0x84, 0x17, 0xcf, 0xea, 0x3b, 0x1c, 0x1e, 0xc1, 0x76, 0x35, 0xd3, 0x37, 0x66, 0x7f, 0xe7, 0x18, 0xdd, 0x93, 0xad, 0xfc, 0xbe, 0xc3, 0x1f, 0xc3, 0xce, 0x6a, 0xa8, 0xaf, 0x4f, 0x3f, 0xfe, 0xd4, 0x80, 0x9e, 0x7e, 0xc5, 0x17, 0x54, 0xdc, 0xb1, 0x09, 0x45, 0xff, 0x01, 0xac, 0x3e, 0x8a, 0xe8, 0xe0, 0xc1, 0x16, 0xad, 0xf2, 0x19, 0x0c, 0x36, 0x51, 0xb6, 0x51, 0xf8, 0xc3, 0x1f, 0x0e, 0xba, 0x5c, 0xbf, 0x50, 0x83, 0x4d, 0xfb, 0x58, 0x94, 0x3a, 0xdc, 0xc8, 0x6d, 0xaa, 0x65, 0x3f, 0x54, 0x0f, 0x6a, 0xd5, 0x9d, 0x7f, 0x58, 0x6b, 0xcd, 0x00, 0x53, 0xeb, 0x6f, 0xe8, 0x14, 0xa9, 0xa2, 0xbd, 0x2a, 0x91, 0xb5, 0xd5, 0x1b, 0xec, 0x3f, 0xc2, 0x6b, 0xcf, 0xff, 0x03, 0x5e, 0x69, 0x2c, 0xaa, 0x0b, 0xd7, 0xa6, 0x08, 0x1e, 0x13, 0xab, 0x12, 0xb7, 0xae, 0xf9, 0xff, 0xf1, 0xe7, 0x97, 0x00, 0x00, 0x00, 0xff, 0xff, 0x55, 0x7c, 0x0d, 0x4f, 0xa3, 0x08, 0x00, 0x00, } namespace.pb.go000066400000000000000000000342011324746544700356320ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/gitlab.com/gitlab-org/gitaly-proto/go// Code generated by protoc-gen-go. DO NOT EDIT. // source: namespace.proto package gitaly import proto "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" import ( context "golang.org/x/net/context" grpc "google.golang.org/grpc" ) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf type AddNamespaceRequest struct { StorageName string `protobuf:"bytes,1,opt,name=storage_name,json=storageName" json:"storage_name,omitempty"` Name string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"` } func (m *AddNamespaceRequest) Reset() { *m = AddNamespaceRequest{} } func (m *AddNamespaceRequest) String() string { return proto.CompactTextString(m) } func (*AddNamespaceRequest) ProtoMessage() {} func (*AddNamespaceRequest) Descriptor() ([]byte, []int) { return fileDescriptor5, []int{0} } func (m *AddNamespaceRequest) GetStorageName() string { if m != nil { return m.StorageName } return "" } func (m *AddNamespaceRequest) GetName() string { if m != nil { return m.Name } return "" } type RemoveNamespaceRequest struct { StorageName string `protobuf:"bytes,1,opt,name=storage_name,json=storageName" json:"storage_name,omitempty"` Name string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"` } func (m *RemoveNamespaceRequest) Reset() { *m = RemoveNamespaceRequest{} } func (m *RemoveNamespaceRequest) String() string { return proto.CompactTextString(m) } func (*RemoveNamespaceRequest) ProtoMessage() {} func (*RemoveNamespaceRequest) Descriptor() ([]byte, []int) { return fileDescriptor5, []int{1} } func (m *RemoveNamespaceRequest) GetStorageName() string { if m != nil { return m.StorageName } return "" } func (m *RemoveNamespaceRequest) GetName() string { if m != nil { return m.Name } return "" } type RenameNamespaceRequest struct { StorageName string `protobuf:"bytes,1,opt,name=storage_name,json=storageName" json:"storage_name,omitempty"` From string `protobuf:"bytes,2,opt,name=from" json:"from,omitempty"` To string `protobuf:"bytes,3,opt,name=to" json:"to,omitempty"` } func (m *RenameNamespaceRequest) Reset() { *m = RenameNamespaceRequest{} } func (m *RenameNamespaceRequest) String() string { return proto.CompactTextString(m) } func (*RenameNamespaceRequest) ProtoMessage() {} func (*RenameNamespaceRequest) Descriptor() ([]byte, []int) { return fileDescriptor5, []int{2} } func (m *RenameNamespaceRequest) GetStorageName() string { if m != nil { return m.StorageName } return "" } func (m *RenameNamespaceRequest) GetFrom() string { if m != nil { return m.From } return "" } func (m *RenameNamespaceRequest) GetTo() string { if m != nil { return m.To } return "" } type NamespaceExistsRequest struct { StorageName string `protobuf:"bytes,1,opt,name=storage_name,json=storageName" json:"storage_name,omitempty"` Name string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"` } func (m *NamespaceExistsRequest) Reset() { *m = NamespaceExistsRequest{} } func (m *NamespaceExistsRequest) String() string { return proto.CompactTextString(m) } func (*NamespaceExistsRequest) ProtoMessage() {} func (*NamespaceExistsRequest) Descriptor() ([]byte, []int) { return fileDescriptor5, []int{3} } func (m *NamespaceExistsRequest) GetStorageName() string { if m != nil { return m.StorageName } return "" } func (m *NamespaceExistsRequest) GetName() string { if m != nil { return m.Name } return "" } type NamespaceExistsResponse struct { Exists bool `protobuf:"varint,1,opt,name=exists" json:"exists,omitempty"` } func (m *NamespaceExistsResponse) Reset() { *m = NamespaceExistsResponse{} } func (m *NamespaceExistsResponse) String() string { return proto.CompactTextString(m) } func (*NamespaceExistsResponse) ProtoMessage() {} func (*NamespaceExistsResponse) Descriptor() ([]byte, []int) { return fileDescriptor5, []int{4} } func (m *NamespaceExistsResponse) GetExists() bool { if m != nil { return m.Exists } return false } type AddNamespaceResponse struct { } func (m *AddNamespaceResponse) Reset() { *m = AddNamespaceResponse{} } func (m *AddNamespaceResponse) String() string { return proto.CompactTextString(m) } func (*AddNamespaceResponse) ProtoMessage() {} func (*AddNamespaceResponse) Descriptor() ([]byte, []int) { return fileDescriptor5, []int{5} } type RemoveNamespaceResponse struct { } func (m *RemoveNamespaceResponse) Reset() { *m = RemoveNamespaceResponse{} } func (m *RemoveNamespaceResponse) String() string { return proto.CompactTextString(m) } func (*RemoveNamespaceResponse) ProtoMessage() {} func (*RemoveNamespaceResponse) Descriptor() ([]byte, []int) { return fileDescriptor5, []int{6} } type RenameNamespaceResponse struct { } func (m *RenameNamespaceResponse) Reset() { *m = RenameNamespaceResponse{} } func (m *RenameNamespaceResponse) String() string { return proto.CompactTextString(m) } func (*RenameNamespaceResponse) ProtoMessage() {} func (*RenameNamespaceResponse) Descriptor() ([]byte, []int) { return fileDescriptor5, []int{7} } func init() { proto.RegisterType((*AddNamespaceRequest)(nil), "gitaly.AddNamespaceRequest") proto.RegisterType((*RemoveNamespaceRequest)(nil), "gitaly.RemoveNamespaceRequest") proto.RegisterType((*RenameNamespaceRequest)(nil), "gitaly.RenameNamespaceRequest") proto.RegisterType((*NamespaceExistsRequest)(nil), "gitaly.NamespaceExistsRequest") proto.RegisterType((*NamespaceExistsResponse)(nil), "gitaly.NamespaceExistsResponse") proto.RegisterType((*AddNamespaceResponse)(nil), "gitaly.AddNamespaceResponse") proto.RegisterType((*RemoveNamespaceResponse)(nil), "gitaly.RemoveNamespaceResponse") proto.RegisterType((*RenameNamespaceResponse)(nil), "gitaly.RenameNamespaceResponse") } // Reference imports to suppress errors if they are not otherwise used. var _ context.Context var _ grpc.ClientConn // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. const _ = grpc.SupportPackageIsVersion4 // Client API for NamespaceService service type NamespaceServiceClient interface { AddNamespace(ctx context.Context, in *AddNamespaceRequest, opts ...grpc.CallOption) (*AddNamespaceResponse, error) RemoveNamespace(ctx context.Context, in *RemoveNamespaceRequest, opts ...grpc.CallOption) (*RemoveNamespaceResponse, error) RenameNamespace(ctx context.Context, in *RenameNamespaceRequest, opts ...grpc.CallOption) (*RenameNamespaceResponse, error) NamespaceExists(ctx context.Context, in *NamespaceExistsRequest, opts ...grpc.CallOption) (*NamespaceExistsResponse, error) } type namespaceServiceClient struct { cc *grpc.ClientConn } func NewNamespaceServiceClient(cc *grpc.ClientConn) NamespaceServiceClient { return &namespaceServiceClient{cc} } func (c *namespaceServiceClient) AddNamespace(ctx context.Context, in *AddNamespaceRequest, opts ...grpc.CallOption) (*AddNamespaceResponse, error) { out := new(AddNamespaceResponse) err := grpc.Invoke(ctx, "/gitaly.NamespaceService/AddNamespace", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } func (c *namespaceServiceClient) RemoveNamespace(ctx context.Context, in *RemoveNamespaceRequest, opts ...grpc.CallOption) (*RemoveNamespaceResponse, error) { out := new(RemoveNamespaceResponse) err := grpc.Invoke(ctx, "/gitaly.NamespaceService/RemoveNamespace", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } func (c *namespaceServiceClient) RenameNamespace(ctx context.Context, in *RenameNamespaceRequest, opts ...grpc.CallOption) (*RenameNamespaceResponse, error) { out := new(RenameNamespaceResponse) err := grpc.Invoke(ctx, "/gitaly.NamespaceService/RenameNamespace", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } func (c *namespaceServiceClient) NamespaceExists(ctx context.Context, in *NamespaceExistsRequest, opts ...grpc.CallOption) (*NamespaceExistsResponse, error) { out := new(NamespaceExistsResponse) err := grpc.Invoke(ctx, "/gitaly.NamespaceService/NamespaceExists", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } // Server API for NamespaceService service type NamespaceServiceServer interface { AddNamespace(context.Context, *AddNamespaceRequest) (*AddNamespaceResponse, error) RemoveNamespace(context.Context, *RemoveNamespaceRequest) (*RemoveNamespaceResponse, error) RenameNamespace(context.Context, *RenameNamespaceRequest) (*RenameNamespaceResponse, error) NamespaceExists(context.Context, *NamespaceExistsRequest) (*NamespaceExistsResponse, error) } func RegisterNamespaceServiceServer(s *grpc.Server, srv NamespaceServiceServer) { s.RegisterService(&_NamespaceService_serviceDesc, srv) } func _NamespaceService_AddNamespace_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(AddNamespaceRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(NamespaceServiceServer).AddNamespace(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/gitaly.NamespaceService/AddNamespace", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(NamespaceServiceServer).AddNamespace(ctx, req.(*AddNamespaceRequest)) } return interceptor(ctx, in, info, handler) } func _NamespaceService_RemoveNamespace_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(RemoveNamespaceRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(NamespaceServiceServer).RemoveNamespace(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/gitaly.NamespaceService/RemoveNamespace", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(NamespaceServiceServer).RemoveNamespace(ctx, req.(*RemoveNamespaceRequest)) } return interceptor(ctx, in, info, handler) } func _NamespaceService_RenameNamespace_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(RenameNamespaceRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(NamespaceServiceServer).RenameNamespace(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/gitaly.NamespaceService/RenameNamespace", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(NamespaceServiceServer).RenameNamespace(ctx, req.(*RenameNamespaceRequest)) } return interceptor(ctx, in, info, handler) } func _NamespaceService_NamespaceExists_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(NamespaceExistsRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(NamespaceServiceServer).NamespaceExists(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/gitaly.NamespaceService/NamespaceExists", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(NamespaceServiceServer).NamespaceExists(ctx, req.(*NamespaceExistsRequest)) } return interceptor(ctx, in, info, handler) } var _NamespaceService_serviceDesc = grpc.ServiceDesc{ ServiceName: "gitaly.NamespaceService", HandlerType: (*NamespaceServiceServer)(nil), Methods: []grpc.MethodDesc{ { MethodName: "AddNamespace", Handler: _NamespaceService_AddNamespace_Handler, }, { MethodName: "RemoveNamespace", Handler: _NamespaceService_RemoveNamespace_Handler, }, { MethodName: "RenameNamespace", Handler: _NamespaceService_RenameNamespace_Handler, }, { MethodName: "NamespaceExists", Handler: _NamespaceService_NamespaceExists_Handler, }, }, Streams: []grpc.StreamDesc{}, Metadata: "namespace.proto", } func init() { proto.RegisterFile("namespace.proto", fileDescriptor5) } var fileDescriptor5 = []byte{ // 291 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0xcf, 0x4b, 0xcc, 0x4d, 0x2d, 0x2e, 0x48, 0x4c, 0x4e, 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x4b, 0xcf, 0x2c, 0x49, 0xcc, 0xa9, 0x54, 0xf2, 0xe1, 0x12, 0x76, 0x4c, 0x49, 0xf1, 0x83, 0xc9, 0x06, 0xa5, 0x16, 0x96, 0xa6, 0x16, 0x97, 0x08, 0x29, 0x72, 0xf1, 0x14, 0x97, 0xe4, 0x17, 0x25, 0xa6, 0xa7, 0xc6, 0x83, 0x74, 0x4a, 0x30, 0x2a, 0x30, 0x6a, 0x70, 0x06, 0x71, 0x43, 0xc5, 0x40, 0xca, 0x85, 0x84, 0xb8, 0x58, 0xc0, 0x52, 0x4c, 0x60, 0x29, 0x30, 0x5b, 0xc9, 0x9f, 0x4b, 0x2c, 0x28, 0x35, 0x37, 0xbf, 0x2c, 0x95, 0x5a, 0x06, 0xc6, 0x83, 0x0c, 0x04, 0xb1, 0xc8, 0x34, 0x30, 0xad, 0x28, 0x3f, 0x17, 0x66, 0x20, 0x88, 0x2d, 0xc4, 0xc7, 0xc5, 0x54, 0x92, 0x2f, 0xc1, 0x0c, 0x16, 0x61, 0x2a, 0xc9, 0x07, 0xb9, 0x18, 0x6e, 0xb4, 0x6b, 0x45, 0x66, 0x71, 0x49, 0x31, 0x85, 0x2e, 0x36, 0xe4, 0x12, 0xc7, 0x30, 0xb0, 0xb8, 0x20, 0x3f, 0xaf, 0x38, 0x55, 0x48, 0x8c, 0x8b, 0x2d, 0x15, 0x2c, 0x02, 0x36, 0x8b, 0x23, 0x08, 0xca, 0x53, 0x12, 0xe3, 0x12, 0x41, 0x8d, 0x03, 0x88, 0x7a, 0x25, 0x49, 0x2e, 0x71, 0x8c, 0xd0, 0x44, 0x96, 0x42, 0x0b, 0x17, 0x88, 0x94, 0xd1, 0x43, 0x26, 0x2e, 0x01, 0xb8, 0x68, 0x70, 0x6a, 0x51, 0x59, 0x66, 0x72, 0xaa, 0x90, 0x37, 0x17, 0x0f, 0xb2, 0x15, 0x42, 0xd2, 0x7a, 0x90, 0xf8, 0xd7, 0xc3, 0x12, 0xf9, 0x52, 0x32, 0xd8, 0x25, 0xa1, 0x56, 0x33, 0x08, 0x85, 0x70, 0xf1, 0xa3, 0xb9, 0x4b, 0x48, 0x0e, 0xa6, 0x05, 0x7b, 0xf4, 0x4b, 0xc9, 0xe3, 0x94, 0x47, 0x35, 0x15, 0xc5, 0x4b, 0xc8, 0xa6, 0x62, 0x4b, 0x03, 0xc8, 0xa6, 0x62, 0x0d, 0x0b, 0x88, 0xa9, 0x68, 0xd1, 0x81, 0x30, 0x15, 0x7b, 0xc4, 0x23, 0x4c, 0xc5, 0x11, 0x8f, 0x4a, 0x0c, 0x49, 0x6c, 0xe0, 0x4c, 0x64, 0x0c, 0x08, 0x00, 0x00, 0xff, 0xff, 0xe3, 0x36, 0x4b, 0x73, 0x57, 0x03, 0x00, 0x00, } notifications.pb.go000066400000000000000000000117241324746544700365540ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/gitlab.com/gitlab-org/gitaly-proto/go// Code generated by protoc-gen-go. DO NOT EDIT. // source: notifications.proto package gitaly import proto "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" import ( context "golang.org/x/net/context" grpc "google.golang.org/grpc" ) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf type PostReceiveRequest struct { Repository *Repository `protobuf:"bytes,1,opt,name=repository" json:"repository,omitempty"` } func (m *PostReceiveRequest) Reset() { *m = PostReceiveRequest{} } func (m *PostReceiveRequest) String() string { return proto.CompactTextString(m) } func (*PostReceiveRequest) ProtoMessage() {} func (*PostReceiveRequest) Descriptor() ([]byte, []int) { return fileDescriptor6, []int{0} } func (m *PostReceiveRequest) GetRepository() *Repository { if m != nil { return m.Repository } return nil } type PostReceiveResponse struct { } func (m *PostReceiveResponse) Reset() { *m = PostReceiveResponse{} } func (m *PostReceiveResponse) String() string { return proto.CompactTextString(m) } func (*PostReceiveResponse) ProtoMessage() {} func (*PostReceiveResponse) Descriptor() ([]byte, []int) { return fileDescriptor6, []int{1} } func init() { proto.RegisterType((*PostReceiveRequest)(nil), "gitaly.PostReceiveRequest") proto.RegisterType((*PostReceiveResponse)(nil), "gitaly.PostReceiveResponse") } // Reference imports to suppress errors if they are not otherwise used. var _ context.Context var _ grpc.ClientConn // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. const _ = grpc.SupportPackageIsVersion4 // Client API for NotificationService service type NotificationServiceClient interface { PostReceive(ctx context.Context, in *PostReceiveRequest, opts ...grpc.CallOption) (*PostReceiveResponse, error) } type notificationServiceClient struct { cc *grpc.ClientConn } func NewNotificationServiceClient(cc *grpc.ClientConn) NotificationServiceClient { return ¬ificationServiceClient{cc} } func (c *notificationServiceClient) PostReceive(ctx context.Context, in *PostReceiveRequest, opts ...grpc.CallOption) (*PostReceiveResponse, error) { out := new(PostReceiveResponse) err := grpc.Invoke(ctx, "/gitaly.NotificationService/PostReceive", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } // Server API for NotificationService service type NotificationServiceServer interface { PostReceive(context.Context, *PostReceiveRequest) (*PostReceiveResponse, error) } func RegisterNotificationServiceServer(s *grpc.Server, srv NotificationServiceServer) { s.RegisterService(&_NotificationService_serviceDesc, srv) } func _NotificationService_PostReceive_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(PostReceiveRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(NotificationServiceServer).PostReceive(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/gitaly.NotificationService/PostReceive", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(NotificationServiceServer).PostReceive(ctx, req.(*PostReceiveRequest)) } return interceptor(ctx, in, info, handler) } var _NotificationService_serviceDesc = grpc.ServiceDesc{ ServiceName: "gitaly.NotificationService", HandlerType: (*NotificationServiceServer)(nil), Methods: []grpc.MethodDesc{ { MethodName: "PostReceive", Handler: _NotificationService_PostReceive_Handler, }, }, Streams: []grpc.StreamDesc{}, Metadata: "notifications.proto", } func init() { proto.RegisterFile("notifications.proto", fileDescriptor6) } var fileDescriptor6 = []byte{ // 170 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0xce, 0xcb, 0x2f, 0xc9, 0x4c, 0xcb, 0x4c, 0x4e, 0x2c, 0xc9, 0xcc, 0xcf, 0x2b, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x4b, 0xcf, 0x2c, 0x49, 0xcc, 0xa9, 0x94, 0xe2, 0x29, 0xce, 0x48, 0x2c, 0x4a, 0x4d, 0x81, 0x88, 0x2a, 0x79, 0x70, 0x09, 0x05, 0xe4, 0x17, 0x97, 0x04, 0xa5, 0x26, 0xa7, 0x66, 0x96, 0xa5, 0x06, 0xa5, 0x16, 0x96, 0xa6, 0x16, 0x97, 0x08, 0x19, 0x71, 0x71, 0x15, 0xa5, 0x16, 0xe4, 0x17, 0x67, 0x96, 0xe4, 0x17, 0x55, 0x4a, 0x30, 0x2a, 0x30, 0x6a, 0x70, 0x1b, 0x09, 0xe9, 0x41, 0x0c, 0xd0, 0x0b, 0x82, 0xcb, 0x04, 0x21, 0xa9, 0x52, 0x12, 0xe5, 0x12, 0x46, 0x31, 0xa9, 0xb8, 0x20, 0x3f, 0xaf, 0x38, 0xd5, 0x28, 0x9e, 0x4b, 0xd8, 0x0f, 0xc9, 0x35, 0xc1, 0xa9, 0x45, 0x65, 0x99, 0xc9, 0xa9, 0x42, 0x1e, 0x5c, 0xdc, 0x48, 0xaa, 0x85, 0xa4, 0x60, 0x86, 0x63, 0x3a, 0x46, 0x4a, 0x1a, 0xab, 0x1c, 0xc4, 0x78, 0x25, 0x86, 0x24, 0x36, 0xb0, 0x47, 0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0x98, 0xea, 0xcc, 0xff, 0xf5, 0x00, 0x00, 0x00, } operations.pb.go000066400000000000000000001772561324746544700361030ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/gitlab.com/gitlab-org/gitaly-proto/go// Code generated by protoc-gen-go. DO NOT EDIT. // source: operations.proto package gitaly import proto "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" import ( context "golang.org/x/net/context" grpc "google.golang.org/grpc" ) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf type UserCommitFilesActionHeader_ActionType int32 const ( UserCommitFilesActionHeader_CREATE UserCommitFilesActionHeader_ActionType = 0 UserCommitFilesActionHeader_CREATE_DIR UserCommitFilesActionHeader_ActionType = 1 UserCommitFilesActionHeader_UPDATE UserCommitFilesActionHeader_ActionType = 2 UserCommitFilesActionHeader_MOVE UserCommitFilesActionHeader_ActionType = 3 UserCommitFilesActionHeader_DELETE UserCommitFilesActionHeader_ActionType = 4 ) var UserCommitFilesActionHeader_ActionType_name = map[int32]string{ 0: "CREATE", 1: "CREATE_DIR", 2: "UPDATE", 3: "MOVE", 4: "DELETE", } var UserCommitFilesActionHeader_ActionType_value = map[string]int32{ "CREATE": 0, "CREATE_DIR": 1, "UPDATE": 2, "MOVE": 3, "DELETE": 4, } func (x UserCommitFilesActionHeader_ActionType) String() string { return proto.EnumName(UserCommitFilesActionHeader_ActionType_name, int32(x)) } func (UserCommitFilesActionHeader_ActionType) EnumDescriptor() ([]byte, []int) { return fileDescriptor7, []int{17, 0} } type UserCreateBranchRequest struct { Repository *Repository `protobuf:"bytes,1,opt,name=repository" json:"repository,omitempty"` BranchName []byte `protobuf:"bytes,2,opt,name=branch_name,json=branchName,proto3" json:"branch_name,omitempty"` User *User `protobuf:"bytes,3,opt,name=user" json:"user,omitempty"` StartPoint []byte `protobuf:"bytes,4,opt,name=start_point,json=startPoint,proto3" json:"start_point,omitempty"` } func (m *UserCreateBranchRequest) Reset() { *m = UserCreateBranchRequest{} } func (m *UserCreateBranchRequest) String() string { return proto.CompactTextString(m) } func (*UserCreateBranchRequest) ProtoMessage() {} func (*UserCreateBranchRequest) Descriptor() ([]byte, []int) { return fileDescriptor7, []int{0} } func (m *UserCreateBranchRequest) GetRepository() *Repository { if m != nil { return m.Repository } return nil } func (m *UserCreateBranchRequest) GetBranchName() []byte { if m != nil { return m.BranchName } return nil } func (m *UserCreateBranchRequest) GetUser() *User { if m != nil { return m.User } return nil } func (m *UserCreateBranchRequest) GetStartPoint() []byte { if m != nil { return m.StartPoint } return nil } type UserCreateBranchResponse struct { Branch *Branch `protobuf:"bytes,1,opt,name=branch" json:"branch,omitempty"` // Error returned by the pre-receive hook. If no error was thrown, // it's the empty string ("") PreReceiveError string `protobuf:"bytes,2,opt,name=pre_receive_error,json=preReceiveError" json:"pre_receive_error,omitempty"` } func (m *UserCreateBranchResponse) Reset() { *m = UserCreateBranchResponse{} } func (m *UserCreateBranchResponse) String() string { return proto.CompactTextString(m) } func (*UserCreateBranchResponse) ProtoMessage() {} func (*UserCreateBranchResponse) Descriptor() ([]byte, []int) { return fileDescriptor7, []int{1} } func (m *UserCreateBranchResponse) GetBranch() *Branch { if m != nil { return m.Branch } return nil } func (m *UserCreateBranchResponse) GetPreReceiveError() string { if m != nil { return m.PreReceiveError } return "" } type UserDeleteBranchRequest struct { Repository *Repository `protobuf:"bytes,1,opt,name=repository" json:"repository,omitempty"` BranchName []byte `protobuf:"bytes,2,opt,name=branch_name,json=branchName,proto3" json:"branch_name,omitempty"` User *User `protobuf:"bytes,3,opt,name=user" json:"user,omitempty"` } func (m *UserDeleteBranchRequest) Reset() { *m = UserDeleteBranchRequest{} } func (m *UserDeleteBranchRequest) String() string { return proto.CompactTextString(m) } func (*UserDeleteBranchRequest) ProtoMessage() {} func (*UserDeleteBranchRequest) Descriptor() ([]byte, []int) { return fileDescriptor7, []int{2} } func (m *UserDeleteBranchRequest) GetRepository() *Repository { if m != nil { return m.Repository } return nil } func (m *UserDeleteBranchRequest) GetBranchName() []byte { if m != nil { return m.BranchName } return nil } func (m *UserDeleteBranchRequest) GetUser() *User { if m != nil { return m.User } return nil } type UserDeleteBranchResponse struct { PreReceiveError string `protobuf:"bytes,1,opt,name=pre_receive_error,json=preReceiveError" json:"pre_receive_error,omitempty"` } func (m *UserDeleteBranchResponse) Reset() { *m = UserDeleteBranchResponse{} } func (m *UserDeleteBranchResponse) String() string { return proto.CompactTextString(m) } func (*UserDeleteBranchResponse) ProtoMessage() {} func (*UserDeleteBranchResponse) Descriptor() ([]byte, []int) { return fileDescriptor7, []int{3} } func (m *UserDeleteBranchResponse) GetPreReceiveError() string { if m != nil { return m.PreReceiveError } return "" } type UserDeleteTagRequest struct { Repository *Repository `protobuf:"bytes,1,opt,name=repository" json:"repository,omitempty"` TagName []byte `protobuf:"bytes,2,opt,name=tag_name,json=tagName,proto3" json:"tag_name,omitempty"` User *User `protobuf:"bytes,3,opt,name=user" json:"user,omitempty"` } func (m *UserDeleteTagRequest) Reset() { *m = UserDeleteTagRequest{} } func (m *UserDeleteTagRequest) String() string { return proto.CompactTextString(m) } func (*UserDeleteTagRequest) ProtoMessage() {} func (*UserDeleteTagRequest) Descriptor() ([]byte, []int) { return fileDescriptor7, []int{4} } func (m *UserDeleteTagRequest) GetRepository() *Repository { if m != nil { return m.Repository } return nil } func (m *UserDeleteTagRequest) GetTagName() []byte { if m != nil { return m.TagName } return nil } func (m *UserDeleteTagRequest) GetUser() *User { if m != nil { return m.User } return nil } type UserDeleteTagResponse struct { PreReceiveError string `protobuf:"bytes,1,opt,name=pre_receive_error,json=preReceiveError" json:"pre_receive_error,omitempty"` } func (m *UserDeleteTagResponse) Reset() { *m = UserDeleteTagResponse{} } func (m *UserDeleteTagResponse) String() string { return proto.CompactTextString(m) } func (*UserDeleteTagResponse) ProtoMessage() {} func (*UserDeleteTagResponse) Descriptor() ([]byte, []int) { return fileDescriptor7, []int{5} } func (m *UserDeleteTagResponse) GetPreReceiveError() string { if m != nil { return m.PreReceiveError } return "" } type UserCreateTagRequest struct { Repository *Repository `protobuf:"bytes,1,opt,name=repository" json:"repository,omitempty"` TagName []byte `protobuf:"bytes,2,opt,name=tag_name,json=tagName,proto3" json:"tag_name,omitempty"` User *User `protobuf:"bytes,3,opt,name=user" json:"user,omitempty"` TargetRevision []byte `protobuf:"bytes,4,opt,name=target_revision,json=targetRevision,proto3" json:"target_revision,omitempty"` Message []byte `protobuf:"bytes,5,opt,name=message,proto3" json:"message,omitempty"` } func (m *UserCreateTagRequest) Reset() { *m = UserCreateTagRequest{} } func (m *UserCreateTagRequest) String() string { return proto.CompactTextString(m) } func (*UserCreateTagRequest) ProtoMessage() {} func (*UserCreateTagRequest) Descriptor() ([]byte, []int) { return fileDescriptor7, []int{6} } func (m *UserCreateTagRequest) GetRepository() *Repository { if m != nil { return m.Repository } return nil } func (m *UserCreateTagRequest) GetTagName() []byte { if m != nil { return m.TagName } return nil } func (m *UserCreateTagRequest) GetUser() *User { if m != nil { return m.User } return nil } func (m *UserCreateTagRequest) GetTargetRevision() []byte { if m != nil { return m.TargetRevision } return nil } func (m *UserCreateTagRequest) GetMessage() []byte { if m != nil { return m.Message } return nil } type UserCreateTagResponse struct { Tag *Tag `protobuf:"bytes,1,opt,name=tag" json:"tag,omitempty"` Exists bool `protobuf:"varint,2,opt,name=exists" json:"exists,omitempty"` PreReceiveError string `protobuf:"bytes,3,opt,name=pre_receive_error,json=preReceiveError" json:"pre_receive_error,omitempty"` } func (m *UserCreateTagResponse) Reset() { *m = UserCreateTagResponse{} } func (m *UserCreateTagResponse) String() string { return proto.CompactTextString(m) } func (*UserCreateTagResponse) ProtoMessage() {} func (*UserCreateTagResponse) Descriptor() ([]byte, []int) { return fileDescriptor7, []int{7} } func (m *UserCreateTagResponse) GetTag() *Tag { if m != nil { return m.Tag } return nil } func (m *UserCreateTagResponse) GetExists() bool { if m != nil { return m.Exists } return false } func (m *UserCreateTagResponse) GetPreReceiveError() string { if m != nil { return m.PreReceiveError } return "" } type UserMergeBranchRequest struct { // First message Repository *Repository `protobuf:"bytes,1,opt,name=repository" json:"repository,omitempty"` User *User `protobuf:"bytes,2,opt,name=user" json:"user,omitempty"` CommitId string `protobuf:"bytes,3,opt,name=commit_id,json=commitId" json:"commit_id,omitempty"` Branch []byte `protobuf:"bytes,4,opt,name=branch,proto3" json:"branch,omitempty"` Message []byte `protobuf:"bytes,5,opt,name=message,proto3" json:"message,omitempty"` // Second message // Tell the server to apply the merge to the branch Apply bool `protobuf:"varint,6,opt,name=apply" json:"apply,omitempty"` } func (m *UserMergeBranchRequest) Reset() { *m = UserMergeBranchRequest{} } func (m *UserMergeBranchRequest) String() string { return proto.CompactTextString(m) } func (*UserMergeBranchRequest) ProtoMessage() {} func (*UserMergeBranchRequest) Descriptor() ([]byte, []int) { return fileDescriptor7, []int{8} } func (m *UserMergeBranchRequest) GetRepository() *Repository { if m != nil { return m.Repository } return nil } func (m *UserMergeBranchRequest) GetUser() *User { if m != nil { return m.User } return nil } func (m *UserMergeBranchRequest) GetCommitId() string { if m != nil { return m.CommitId } return "" } func (m *UserMergeBranchRequest) GetBranch() []byte { if m != nil { return m.Branch } return nil } func (m *UserMergeBranchRequest) GetMessage() []byte { if m != nil { return m.Message } return nil } func (m *UserMergeBranchRequest) GetApply() bool { if m != nil { return m.Apply } return false } type UserMergeBranchResponse struct { // First message // The merge commit the branch will be updated to. The caller can still abort the merge. CommitId string `protobuf:"bytes,1,opt,name=commit_id,json=commitId" json:"commit_id,omitempty"` // Second message // If set, the merge has been applied to the branch. BranchUpdate *OperationBranchUpdate `protobuf:"bytes,3,opt,name=branch_update,json=branchUpdate" json:"branch_update,omitempty"` } func (m *UserMergeBranchResponse) Reset() { *m = UserMergeBranchResponse{} } func (m *UserMergeBranchResponse) String() string { return proto.CompactTextString(m) } func (*UserMergeBranchResponse) ProtoMessage() {} func (*UserMergeBranchResponse) Descriptor() ([]byte, []int) { return fileDescriptor7, []int{9} } func (m *UserMergeBranchResponse) GetCommitId() string { if m != nil { return m.CommitId } return "" } func (m *UserMergeBranchResponse) GetBranchUpdate() *OperationBranchUpdate { if m != nil { return m.BranchUpdate } return nil } type OperationBranchUpdate struct { // If this string is non-empty the branch has been updated. CommitId string `protobuf:"bytes,1,opt,name=commit_id,json=commitId" json:"commit_id,omitempty"` // Used for cache invalidation in GitLab RepoCreated bool `protobuf:"varint,2,opt,name=repo_created,json=repoCreated" json:"repo_created,omitempty"` // Used for cache invalidation in GitLab BranchCreated bool `protobuf:"varint,3,opt,name=branch_created,json=branchCreated" json:"branch_created,omitempty"` } func (m *OperationBranchUpdate) Reset() { *m = OperationBranchUpdate{} } func (m *OperationBranchUpdate) String() string { return proto.CompactTextString(m) } func (*OperationBranchUpdate) ProtoMessage() {} func (*OperationBranchUpdate) Descriptor() ([]byte, []int) { return fileDescriptor7, []int{10} } func (m *OperationBranchUpdate) GetCommitId() string { if m != nil { return m.CommitId } return "" } func (m *OperationBranchUpdate) GetRepoCreated() bool { if m != nil { return m.RepoCreated } return false } func (m *OperationBranchUpdate) GetBranchCreated() bool { if m != nil { return m.BranchCreated } return false } type UserFFBranchRequest struct { Repository *Repository `protobuf:"bytes,1,opt,name=repository" json:"repository,omitempty"` User *User `protobuf:"bytes,2,opt,name=user" json:"user,omitempty"` CommitId string `protobuf:"bytes,3,opt,name=commit_id,json=commitId" json:"commit_id,omitempty"` Branch []byte `protobuf:"bytes,4,opt,name=branch,proto3" json:"branch,omitempty"` } func (m *UserFFBranchRequest) Reset() { *m = UserFFBranchRequest{} } func (m *UserFFBranchRequest) String() string { return proto.CompactTextString(m) } func (*UserFFBranchRequest) ProtoMessage() {} func (*UserFFBranchRequest) Descriptor() ([]byte, []int) { return fileDescriptor7, []int{11} } func (m *UserFFBranchRequest) GetRepository() *Repository { if m != nil { return m.Repository } return nil } func (m *UserFFBranchRequest) GetUser() *User { if m != nil { return m.User } return nil } func (m *UserFFBranchRequest) GetCommitId() string { if m != nil { return m.CommitId } return "" } func (m *UserFFBranchRequest) GetBranch() []byte { if m != nil { return m.Branch } return nil } type UserFFBranchResponse struct { BranchUpdate *OperationBranchUpdate `protobuf:"bytes,1,opt,name=branch_update,json=branchUpdate" json:"branch_update,omitempty"` PreReceiveError string `protobuf:"bytes,2,opt,name=pre_receive_error,json=preReceiveError" json:"pre_receive_error,omitempty"` } func (m *UserFFBranchResponse) Reset() { *m = UserFFBranchResponse{} } func (m *UserFFBranchResponse) String() string { return proto.CompactTextString(m) } func (*UserFFBranchResponse) ProtoMessage() {} func (*UserFFBranchResponse) Descriptor() ([]byte, []int) { return fileDescriptor7, []int{12} } func (m *UserFFBranchResponse) GetBranchUpdate() *OperationBranchUpdate { if m != nil { return m.BranchUpdate } return nil } func (m *UserFFBranchResponse) GetPreReceiveError() string { if m != nil { return m.PreReceiveError } return "" } type UserCherryPickRequest struct { Repository *Repository `protobuf:"bytes,1,opt,name=repository" json:"repository,omitempty"` User *User `protobuf:"bytes,2,opt,name=user" json:"user,omitempty"` Commit *GitCommit `protobuf:"bytes,3,opt,name=commit" json:"commit,omitempty"` BranchName []byte `protobuf:"bytes,4,opt,name=branch_name,json=branchName,proto3" json:"branch_name,omitempty"` Message []byte `protobuf:"bytes,5,opt,name=message,proto3" json:"message,omitempty"` StartBranchName []byte `protobuf:"bytes,6,opt,name=start_branch_name,json=startBranchName,proto3" json:"start_branch_name,omitempty"` StartRepository *Repository `protobuf:"bytes,7,opt,name=start_repository,json=startRepository" json:"start_repository,omitempty"` } func (m *UserCherryPickRequest) Reset() { *m = UserCherryPickRequest{} } func (m *UserCherryPickRequest) String() string { return proto.CompactTextString(m) } func (*UserCherryPickRequest) ProtoMessage() {} func (*UserCherryPickRequest) Descriptor() ([]byte, []int) { return fileDescriptor7, []int{13} } func (m *UserCherryPickRequest) GetRepository() *Repository { if m != nil { return m.Repository } return nil } func (m *UserCherryPickRequest) GetUser() *User { if m != nil { return m.User } return nil } func (m *UserCherryPickRequest) GetCommit() *GitCommit { if m != nil { return m.Commit } return nil } func (m *UserCherryPickRequest) GetBranchName() []byte { if m != nil { return m.BranchName } return nil } func (m *UserCherryPickRequest) GetMessage() []byte { if m != nil { return m.Message } return nil } func (m *UserCherryPickRequest) GetStartBranchName() []byte { if m != nil { return m.StartBranchName } return nil } func (m *UserCherryPickRequest) GetStartRepository() *Repository { if m != nil { return m.StartRepository } return nil } type UserCherryPickResponse struct { BranchUpdate *OperationBranchUpdate `protobuf:"bytes,1,opt,name=branch_update,json=branchUpdate" json:"branch_update,omitempty"` CreateTreeError string `protobuf:"bytes,2,opt,name=create_tree_error,json=createTreeError" json:"create_tree_error,omitempty"` CommitError string `protobuf:"bytes,3,opt,name=commit_error,json=commitError" json:"commit_error,omitempty"` PreReceiveError string `protobuf:"bytes,4,opt,name=pre_receive_error,json=preReceiveError" json:"pre_receive_error,omitempty"` } func (m *UserCherryPickResponse) Reset() { *m = UserCherryPickResponse{} } func (m *UserCherryPickResponse) String() string { return proto.CompactTextString(m) } func (*UserCherryPickResponse) ProtoMessage() {} func (*UserCherryPickResponse) Descriptor() ([]byte, []int) { return fileDescriptor7, []int{14} } func (m *UserCherryPickResponse) GetBranchUpdate() *OperationBranchUpdate { if m != nil { return m.BranchUpdate } return nil } func (m *UserCherryPickResponse) GetCreateTreeError() string { if m != nil { return m.CreateTreeError } return "" } func (m *UserCherryPickResponse) GetCommitError() string { if m != nil { return m.CommitError } return "" } func (m *UserCherryPickResponse) GetPreReceiveError() string { if m != nil { return m.PreReceiveError } return "" } type UserRevertRequest struct { Repository *Repository `protobuf:"bytes,1,opt,name=repository" json:"repository,omitempty"` User *User `protobuf:"bytes,2,opt,name=user" json:"user,omitempty"` Commit *GitCommit `protobuf:"bytes,3,opt,name=commit" json:"commit,omitempty"` BranchName []byte `protobuf:"bytes,4,opt,name=branch_name,json=branchName,proto3" json:"branch_name,omitempty"` Message []byte `protobuf:"bytes,5,opt,name=message,proto3" json:"message,omitempty"` StartBranchName []byte `protobuf:"bytes,6,opt,name=start_branch_name,json=startBranchName,proto3" json:"start_branch_name,omitempty"` StartRepository *Repository `protobuf:"bytes,7,opt,name=start_repository,json=startRepository" json:"start_repository,omitempty"` } func (m *UserRevertRequest) Reset() { *m = UserRevertRequest{} } func (m *UserRevertRequest) String() string { return proto.CompactTextString(m) } func (*UserRevertRequest) ProtoMessage() {} func (*UserRevertRequest) Descriptor() ([]byte, []int) { return fileDescriptor7, []int{15} } func (m *UserRevertRequest) GetRepository() *Repository { if m != nil { return m.Repository } return nil } func (m *UserRevertRequest) GetUser() *User { if m != nil { return m.User } return nil } func (m *UserRevertRequest) GetCommit() *GitCommit { if m != nil { return m.Commit } return nil } func (m *UserRevertRequest) GetBranchName() []byte { if m != nil { return m.BranchName } return nil } func (m *UserRevertRequest) GetMessage() []byte { if m != nil { return m.Message } return nil } func (m *UserRevertRequest) GetStartBranchName() []byte { if m != nil { return m.StartBranchName } return nil } func (m *UserRevertRequest) GetStartRepository() *Repository { if m != nil { return m.StartRepository } return nil } type UserRevertResponse struct { BranchUpdate *OperationBranchUpdate `protobuf:"bytes,1,opt,name=branch_update,json=branchUpdate" json:"branch_update,omitempty"` CreateTreeError string `protobuf:"bytes,2,opt,name=create_tree_error,json=createTreeError" json:"create_tree_error,omitempty"` CommitError string `protobuf:"bytes,3,opt,name=commit_error,json=commitError" json:"commit_error,omitempty"` PreReceiveError string `protobuf:"bytes,4,opt,name=pre_receive_error,json=preReceiveError" json:"pre_receive_error,omitempty"` } func (m *UserRevertResponse) Reset() { *m = UserRevertResponse{} } func (m *UserRevertResponse) String() string { return proto.CompactTextString(m) } func (*UserRevertResponse) ProtoMessage() {} func (*UserRevertResponse) Descriptor() ([]byte, []int) { return fileDescriptor7, []int{16} } func (m *UserRevertResponse) GetBranchUpdate() *OperationBranchUpdate { if m != nil { return m.BranchUpdate } return nil } func (m *UserRevertResponse) GetCreateTreeError() string { if m != nil { return m.CreateTreeError } return "" } func (m *UserRevertResponse) GetCommitError() string { if m != nil { return m.CommitError } return "" } func (m *UserRevertResponse) GetPreReceiveError() string { if m != nil { return m.PreReceiveError } return "" } type UserCommitFilesActionHeader struct { Action UserCommitFilesActionHeader_ActionType `protobuf:"varint,1,opt,name=action,enum=gitaly.UserCommitFilesActionHeader_ActionType" json:"action,omitempty"` FilePath []byte `protobuf:"bytes,2,opt,name=file_path,json=filePath,proto3" json:"file_path,omitempty"` PreviousPath []byte `protobuf:"bytes,3,opt,name=previous_path,json=previousPath,proto3" json:"previous_path,omitempty"` Base64Content bool `protobuf:"varint,4,opt,name=base64_content,json=base64Content" json:"base64_content,omitempty"` } func (m *UserCommitFilesActionHeader) Reset() { *m = UserCommitFilesActionHeader{} } func (m *UserCommitFilesActionHeader) String() string { return proto.CompactTextString(m) } func (*UserCommitFilesActionHeader) ProtoMessage() {} func (*UserCommitFilesActionHeader) Descriptor() ([]byte, []int) { return fileDescriptor7, []int{17} } func (m *UserCommitFilesActionHeader) GetAction() UserCommitFilesActionHeader_ActionType { if m != nil { return m.Action } return UserCommitFilesActionHeader_CREATE } func (m *UserCommitFilesActionHeader) GetFilePath() []byte { if m != nil { return m.FilePath } return nil } func (m *UserCommitFilesActionHeader) GetPreviousPath() []byte { if m != nil { return m.PreviousPath } return nil } func (m *UserCommitFilesActionHeader) GetBase64Content() bool { if m != nil { return m.Base64Content } return false } type UserCommitFilesAction struct { // Types that are valid to be assigned to UserCommitFilesActionPayload: // *UserCommitFilesAction_Header // *UserCommitFilesAction_Content UserCommitFilesActionPayload isUserCommitFilesAction_UserCommitFilesActionPayload `protobuf_oneof:"user_commit_files_action_payload"` } func (m *UserCommitFilesAction) Reset() { *m = UserCommitFilesAction{} } func (m *UserCommitFilesAction) String() string { return proto.CompactTextString(m) } func (*UserCommitFilesAction) ProtoMessage() {} func (*UserCommitFilesAction) Descriptor() ([]byte, []int) { return fileDescriptor7, []int{18} } type isUserCommitFilesAction_UserCommitFilesActionPayload interface { isUserCommitFilesAction_UserCommitFilesActionPayload() } type UserCommitFilesAction_Header struct { Header *UserCommitFilesActionHeader `protobuf:"bytes,1,opt,name=header,oneof"` } type UserCommitFilesAction_Content struct { Content []byte `protobuf:"bytes,2,opt,name=content,proto3,oneof"` } func (*UserCommitFilesAction_Header) isUserCommitFilesAction_UserCommitFilesActionPayload() {} func (*UserCommitFilesAction_Content) isUserCommitFilesAction_UserCommitFilesActionPayload() {} func (m *UserCommitFilesAction) GetUserCommitFilesActionPayload() isUserCommitFilesAction_UserCommitFilesActionPayload { if m != nil { return m.UserCommitFilesActionPayload } return nil } func (m *UserCommitFilesAction) GetHeader() *UserCommitFilesActionHeader { if x, ok := m.GetUserCommitFilesActionPayload().(*UserCommitFilesAction_Header); ok { return x.Header } return nil } func (m *UserCommitFilesAction) GetContent() []byte { if x, ok := m.GetUserCommitFilesActionPayload().(*UserCommitFilesAction_Content); ok { return x.Content } return nil } // XXX_OneofFuncs is for the internal use of the proto package. func (*UserCommitFilesAction) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { return _UserCommitFilesAction_OneofMarshaler, _UserCommitFilesAction_OneofUnmarshaler, _UserCommitFilesAction_OneofSizer, []interface{}{ (*UserCommitFilesAction_Header)(nil), (*UserCommitFilesAction_Content)(nil), } } func _UserCommitFilesAction_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { m := msg.(*UserCommitFilesAction) // user_commit_files_action_payload switch x := m.UserCommitFilesActionPayload.(type) { case *UserCommitFilesAction_Header: b.EncodeVarint(1<<3 | proto.WireBytes) if err := b.EncodeMessage(x.Header); err != nil { return err } case *UserCommitFilesAction_Content: b.EncodeVarint(2<<3 | proto.WireBytes) b.EncodeRawBytes(x.Content) case nil: default: return fmt.Errorf("UserCommitFilesAction.UserCommitFilesActionPayload has unexpected type %T", x) } return nil } func _UserCommitFilesAction_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { m := msg.(*UserCommitFilesAction) switch tag { case 1: // user_commit_files_action_payload.header if wire != proto.WireBytes { return true, proto.ErrInternalBadWireType } msg := new(UserCommitFilesActionHeader) err := b.DecodeMessage(msg) m.UserCommitFilesActionPayload = &UserCommitFilesAction_Header{msg} return true, err case 2: // user_commit_files_action_payload.content if wire != proto.WireBytes { return true, proto.ErrInternalBadWireType } x, err := b.DecodeRawBytes(true) m.UserCommitFilesActionPayload = &UserCommitFilesAction_Content{x} return true, err default: return false, nil } } func _UserCommitFilesAction_OneofSizer(msg proto.Message) (n int) { m := msg.(*UserCommitFilesAction) // user_commit_files_action_payload switch x := m.UserCommitFilesActionPayload.(type) { case *UserCommitFilesAction_Header: s := proto.Size(x.Header) n += proto.SizeVarint(1<<3 | proto.WireBytes) n += proto.SizeVarint(uint64(s)) n += s case *UserCommitFilesAction_Content: n += proto.SizeVarint(2<<3 | proto.WireBytes) n += proto.SizeVarint(uint64(len(x.Content))) n += len(x.Content) case nil: default: panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) } return n } type UserCommitFilesRequestHeader struct { Repository *Repository `protobuf:"bytes,1,opt,name=repository" json:"repository,omitempty"` User *User `protobuf:"bytes,2,opt,name=user" json:"user,omitempty"` BranchName []byte `protobuf:"bytes,3,opt,name=branch_name,json=branchName,proto3" json:"branch_name,omitempty"` CommitMessage []byte `protobuf:"bytes,4,opt,name=commit_message,json=commitMessage,proto3" json:"commit_message,omitempty"` CommitAuthorName []byte `protobuf:"bytes,5,opt,name=commit_author_name,json=commitAuthorName,proto3" json:"commit_author_name,omitempty"` CommitAuthorEmail []byte `protobuf:"bytes,6,opt,name=commit_author_email,json=commitAuthorEmail,proto3" json:"commit_author_email,omitempty"` StartBranchName []byte `protobuf:"bytes,7,opt,name=start_branch_name,json=startBranchName,proto3" json:"start_branch_name,omitempty"` StartRepository *Repository `protobuf:"bytes,8,opt,name=start_repository,json=startRepository" json:"start_repository,omitempty"` } func (m *UserCommitFilesRequestHeader) Reset() { *m = UserCommitFilesRequestHeader{} } func (m *UserCommitFilesRequestHeader) String() string { return proto.CompactTextString(m) } func (*UserCommitFilesRequestHeader) ProtoMessage() {} func (*UserCommitFilesRequestHeader) Descriptor() ([]byte, []int) { return fileDescriptor7, []int{19} } func (m *UserCommitFilesRequestHeader) GetRepository() *Repository { if m != nil { return m.Repository } return nil } func (m *UserCommitFilesRequestHeader) GetUser() *User { if m != nil { return m.User } return nil } func (m *UserCommitFilesRequestHeader) GetBranchName() []byte { if m != nil { return m.BranchName } return nil } func (m *UserCommitFilesRequestHeader) GetCommitMessage() []byte { if m != nil { return m.CommitMessage } return nil } func (m *UserCommitFilesRequestHeader) GetCommitAuthorName() []byte { if m != nil { return m.CommitAuthorName } return nil } func (m *UserCommitFilesRequestHeader) GetCommitAuthorEmail() []byte { if m != nil { return m.CommitAuthorEmail } return nil } func (m *UserCommitFilesRequestHeader) GetStartBranchName() []byte { if m != nil { return m.StartBranchName } return nil } func (m *UserCommitFilesRequestHeader) GetStartRepository() *Repository { if m != nil { return m.StartRepository } return nil } type UserCommitFilesRequest struct { // Types that are valid to be assigned to UserCommitFilesRequestPayload: // *UserCommitFilesRequest_Header // *UserCommitFilesRequest_Action UserCommitFilesRequestPayload isUserCommitFilesRequest_UserCommitFilesRequestPayload `protobuf_oneof:"user_commit_files_request_payload"` } func (m *UserCommitFilesRequest) Reset() { *m = UserCommitFilesRequest{} } func (m *UserCommitFilesRequest) String() string { return proto.CompactTextString(m) } func (*UserCommitFilesRequest) ProtoMessage() {} func (*UserCommitFilesRequest) Descriptor() ([]byte, []int) { return fileDescriptor7, []int{20} } type isUserCommitFilesRequest_UserCommitFilesRequestPayload interface { isUserCommitFilesRequest_UserCommitFilesRequestPayload() } type UserCommitFilesRequest_Header struct { Header *UserCommitFilesRequestHeader `protobuf:"bytes,1,opt,name=header,oneof"` } type UserCommitFilesRequest_Action struct { Action *UserCommitFilesAction `protobuf:"bytes,2,opt,name=action,oneof"` } func (*UserCommitFilesRequest_Header) isUserCommitFilesRequest_UserCommitFilesRequestPayload() {} func (*UserCommitFilesRequest_Action) isUserCommitFilesRequest_UserCommitFilesRequestPayload() {} func (m *UserCommitFilesRequest) GetUserCommitFilesRequestPayload() isUserCommitFilesRequest_UserCommitFilesRequestPayload { if m != nil { return m.UserCommitFilesRequestPayload } return nil } func (m *UserCommitFilesRequest) GetHeader() *UserCommitFilesRequestHeader { if x, ok := m.GetUserCommitFilesRequestPayload().(*UserCommitFilesRequest_Header); ok { return x.Header } return nil } func (m *UserCommitFilesRequest) GetAction() *UserCommitFilesAction { if x, ok := m.GetUserCommitFilesRequestPayload().(*UserCommitFilesRequest_Action); ok { return x.Action } return nil } // XXX_OneofFuncs is for the internal use of the proto package. func (*UserCommitFilesRequest) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { return _UserCommitFilesRequest_OneofMarshaler, _UserCommitFilesRequest_OneofUnmarshaler, _UserCommitFilesRequest_OneofSizer, []interface{}{ (*UserCommitFilesRequest_Header)(nil), (*UserCommitFilesRequest_Action)(nil), } } func _UserCommitFilesRequest_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { m := msg.(*UserCommitFilesRequest) // user_commit_files_request_payload switch x := m.UserCommitFilesRequestPayload.(type) { case *UserCommitFilesRequest_Header: b.EncodeVarint(1<<3 | proto.WireBytes) if err := b.EncodeMessage(x.Header); err != nil { return err } case *UserCommitFilesRequest_Action: b.EncodeVarint(2<<3 | proto.WireBytes) if err := b.EncodeMessage(x.Action); err != nil { return err } case nil: default: return fmt.Errorf("UserCommitFilesRequest.UserCommitFilesRequestPayload has unexpected type %T", x) } return nil } func _UserCommitFilesRequest_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { m := msg.(*UserCommitFilesRequest) switch tag { case 1: // user_commit_files_request_payload.header if wire != proto.WireBytes { return true, proto.ErrInternalBadWireType } msg := new(UserCommitFilesRequestHeader) err := b.DecodeMessage(msg) m.UserCommitFilesRequestPayload = &UserCommitFilesRequest_Header{msg} return true, err case 2: // user_commit_files_request_payload.action if wire != proto.WireBytes { return true, proto.ErrInternalBadWireType } msg := new(UserCommitFilesAction) err := b.DecodeMessage(msg) m.UserCommitFilesRequestPayload = &UserCommitFilesRequest_Action{msg} return true, err default: return false, nil } } func _UserCommitFilesRequest_OneofSizer(msg proto.Message) (n int) { m := msg.(*UserCommitFilesRequest) // user_commit_files_request_payload switch x := m.UserCommitFilesRequestPayload.(type) { case *UserCommitFilesRequest_Header: s := proto.Size(x.Header) n += proto.SizeVarint(1<<3 | proto.WireBytes) n += proto.SizeVarint(uint64(s)) n += s case *UserCommitFilesRequest_Action: s := proto.Size(x.Action) n += proto.SizeVarint(2<<3 | proto.WireBytes) n += proto.SizeVarint(uint64(s)) n += s case nil: default: panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) } return n } type UserCommitFilesResponse struct { BranchUpdate *OperationBranchUpdate `protobuf:"bytes,1,opt,name=branch_update,json=branchUpdate" json:"branch_update,omitempty"` IndexError string `protobuf:"bytes,2,opt,name=index_error,json=indexError" json:"index_error,omitempty"` PreReceiveError string `protobuf:"bytes,3,opt,name=pre_receive_error,json=preReceiveError" json:"pre_receive_error,omitempty"` } func (m *UserCommitFilesResponse) Reset() { *m = UserCommitFilesResponse{} } func (m *UserCommitFilesResponse) String() string { return proto.CompactTextString(m) } func (*UserCommitFilesResponse) ProtoMessage() {} func (*UserCommitFilesResponse) Descriptor() ([]byte, []int) { return fileDescriptor7, []int{21} } func (m *UserCommitFilesResponse) GetBranchUpdate() *OperationBranchUpdate { if m != nil { return m.BranchUpdate } return nil } func (m *UserCommitFilesResponse) GetIndexError() string { if m != nil { return m.IndexError } return "" } func (m *UserCommitFilesResponse) GetPreReceiveError() string { if m != nil { return m.PreReceiveError } return "" } type UserRebaseRequest struct { Repository *Repository `protobuf:"bytes,1,opt,name=repository" json:"repository,omitempty"` User *User `protobuf:"bytes,2,opt,name=user" json:"user,omitempty"` RebaseId string `protobuf:"bytes,3,opt,name=rebase_id,json=rebaseId" json:"rebase_id,omitempty"` Branch []byte `protobuf:"bytes,4,opt,name=branch,proto3" json:"branch,omitempty"` BranchSha string `protobuf:"bytes,5,opt,name=branch_sha,json=branchSha" json:"branch_sha,omitempty"` RemoteRepository *Repository `protobuf:"bytes,6,opt,name=remote_repository,json=remoteRepository" json:"remote_repository,omitempty"` RemoteBranch []byte `protobuf:"bytes,7,opt,name=remote_branch,json=remoteBranch,proto3" json:"remote_branch,omitempty"` } func (m *UserRebaseRequest) Reset() { *m = UserRebaseRequest{} } func (m *UserRebaseRequest) String() string { return proto.CompactTextString(m) } func (*UserRebaseRequest) ProtoMessage() {} func (*UserRebaseRequest) Descriptor() ([]byte, []int) { return fileDescriptor7, []int{22} } func (m *UserRebaseRequest) GetRepository() *Repository { if m != nil { return m.Repository } return nil } func (m *UserRebaseRequest) GetUser() *User { if m != nil { return m.User } return nil } func (m *UserRebaseRequest) GetRebaseId() string { if m != nil { return m.RebaseId } return "" } func (m *UserRebaseRequest) GetBranch() []byte { if m != nil { return m.Branch } return nil } func (m *UserRebaseRequest) GetBranchSha() string { if m != nil { return m.BranchSha } return "" } func (m *UserRebaseRequest) GetRemoteRepository() *Repository { if m != nil { return m.RemoteRepository } return nil } func (m *UserRebaseRequest) GetRemoteBranch() []byte { if m != nil { return m.RemoteBranch } return nil } type UserRebaseResponse struct { RebaseSha string `protobuf:"bytes,1,opt,name=rebase_sha,json=rebaseSha" json:"rebase_sha,omitempty"` PreReceiveError string `protobuf:"bytes,2,opt,name=pre_receive_error,json=preReceiveError" json:"pre_receive_error,omitempty"` GitError string `protobuf:"bytes,3,opt,name=git_error,json=gitError" json:"git_error,omitempty"` } func (m *UserRebaseResponse) Reset() { *m = UserRebaseResponse{} } func (m *UserRebaseResponse) String() string { return proto.CompactTextString(m) } func (*UserRebaseResponse) ProtoMessage() {} func (*UserRebaseResponse) Descriptor() ([]byte, []int) { return fileDescriptor7, []int{23} } func (m *UserRebaseResponse) GetRebaseSha() string { if m != nil { return m.RebaseSha } return "" } func (m *UserRebaseResponse) GetPreReceiveError() string { if m != nil { return m.PreReceiveError } return "" } func (m *UserRebaseResponse) GetGitError() string { if m != nil { return m.GitError } return "" } func init() { proto.RegisterType((*UserCreateBranchRequest)(nil), "gitaly.UserCreateBranchRequest") proto.RegisterType((*UserCreateBranchResponse)(nil), "gitaly.UserCreateBranchResponse") proto.RegisterType((*UserDeleteBranchRequest)(nil), "gitaly.UserDeleteBranchRequest") proto.RegisterType((*UserDeleteBranchResponse)(nil), "gitaly.UserDeleteBranchResponse") proto.RegisterType((*UserDeleteTagRequest)(nil), "gitaly.UserDeleteTagRequest") proto.RegisterType((*UserDeleteTagResponse)(nil), "gitaly.UserDeleteTagResponse") proto.RegisterType((*UserCreateTagRequest)(nil), "gitaly.UserCreateTagRequest") proto.RegisterType((*UserCreateTagResponse)(nil), "gitaly.UserCreateTagResponse") proto.RegisterType((*UserMergeBranchRequest)(nil), "gitaly.UserMergeBranchRequest") proto.RegisterType((*UserMergeBranchResponse)(nil), "gitaly.UserMergeBranchResponse") proto.RegisterType((*OperationBranchUpdate)(nil), "gitaly.OperationBranchUpdate") proto.RegisterType((*UserFFBranchRequest)(nil), "gitaly.UserFFBranchRequest") proto.RegisterType((*UserFFBranchResponse)(nil), "gitaly.UserFFBranchResponse") proto.RegisterType((*UserCherryPickRequest)(nil), "gitaly.UserCherryPickRequest") proto.RegisterType((*UserCherryPickResponse)(nil), "gitaly.UserCherryPickResponse") proto.RegisterType((*UserRevertRequest)(nil), "gitaly.UserRevertRequest") proto.RegisterType((*UserRevertResponse)(nil), "gitaly.UserRevertResponse") proto.RegisterType((*UserCommitFilesActionHeader)(nil), "gitaly.UserCommitFilesActionHeader") proto.RegisterType((*UserCommitFilesAction)(nil), "gitaly.UserCommitFilesAction") proto.RegisterType((*UserCommitFilesRequestHeader)(nil), "gitaly.UserCommitFilesRequestHeader") proto.RegisterType((*UserCommitFilesRequest)(nil), "gitaly.UserCommitFilesRequest") proto.RegisterType((*UserCommitFilesResponse)(nil), "gitaly.UserCommitFilesResponse") proto.RegisterType((*UserRebaseRequest)(nil), "gitaly.UserRebaseRequest") proto.RegisterType((*UserRebaseResponse)(nil), "gitaly.UserRebaseResponse") proto.RegisterEnum("gitaly.UserCommitFilesActionHeader_ActionType", UserCommitFilesActionHeader_ActionType_name, UserCommitFilesActionHeader_ActionType_value) } // Reference imports to suppress errors if they are not otherwise used. var _ context.Context var _ grpc.ClientConn // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. const _ = grpc.SupportPackageIsVersion4 // Client API for OperationService service type OperationServiceClient interface { UserCreateBranch(ctx context.Context, in *UserCreateBranchRequest, opts ...grpc.CallOption) (*UserCreateBranchResponse, error) UserDeleteBranch(ctx context.Context, in *UserDeleteBranchRequest, opts ...grpc.CallOption) (*UserDeleteBranchResponse, error) UserCreateTag(ctx context.Context, in *UserCreateTagRequest, opts ...grpc.CallOption) (*UserCreateTagResponse, error) UserDeleteTag(ctx context.Context, in *UserDeleteTagRequest, opts ...grpc.CallOption) (*UserDeleteTagResponse, error) UserMergeBranch(ctx context.Context, opts ...grpc.CallOption) (OperationService_UserMergeBranchClient, error) UserFFBranch(ctx context.Context, in *UserFFBranchRequest, opts ...grpc.CallOption) (*UserFFBranchResponse, error) UserCherryPick(ctx context.Context, in *UserCherryPickRequest, opts ...grpc.CallOption) (*UserCherryPickResponse, error) UserRevert(ctx context.Context, in *UserRevertRequest, opts ...grpc.CallOption) (*UserRevertResponse, error) UserCommitFiles(ctx context.Context, opts ...grpc.CallOption) (OperationService_UserCommitFilesClient, error) UserRebase(ctx context.Context, in *UserRebaseRequest, opts ...grpc.CallOption) (*UserRebaseResponse, error) } type operationServiceClient struct { cc *grpc.ClientConn } func NewOperationServiceClient(cc *grpc.ClientConn) OperationServiceClient { return &operationServiceClient{cc} } func (c *operationServiceClient) UserCreateBranch(ctx context.Context, in *UserCreateBranchRequest, opts ...grpc.CallOption) (*UserCreateBranchResponse, error) { out := new(UserCreateBranchResponse) err := grpc.Invoke(ctx, "/gitaly.OperationService/UserCreateBranch", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } func (c *operationServiceClient) UserDeleteBranch(ctx context.Context, in *UserDeleteBranchRequest, opts ...grpc.CallOption) (*UserDeleteBranchResponse, error) { out := new(UserDeleteBranchResponse) err := grpc.Invoke(ctx, "/gitaly.OperationService/UserDeleteBranch", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } func (c *operationServiceClient) UserCreateTag(ctx context.Context, in *UserCreateTagRequest, opts ...grpc.CallOption) (*UserCreateTagResponse, error) { out := new(UserCreateTagResponse) err := grpc.Invoke(ctx, "/gitaly.OperationService/UserCreateTag", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } func (c *operationServiceClient) UserDeleteTag(ctx context.Context, in *UserDeleteTagRequest, opts ...grpc.CallOption) (*UserDeleteTagResponse, error) { out := new(UserDeleteTagResponse) err := grpc.Invoke(ctx, "/gitaly.OperationService/UserDeleteTag", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } func (c *operationServiceClient) UserMergeBranch(ctx context.Context, opts ...grpc.CallOption) (OperationService_UserMergeBranchClient, error) { stream, err := grpc.NewClientStream(ctx, &_OperationService_serviceDesc.Streams[0], c.cc, "/gitaly.OperationService/UserMergeBranch", opts...) if err != nil { return nil, err } x := &operationServiceUserMergeBranchClient{stream} return x, nil } type OperationService_UserMergeBranchClient interface { Send(*UserMergeBranchRequest) error Recv() (*UserMergeBranchResponse, error) grpc.ClientStream } type operationServiceUserMergeBranchClient struct { grpc.ClientStream } func (x *operationServiceUserMergeBranchClient) Send(m *UserMergeBranchRequest) error { return x.ClientStream.SendMsg(m) } func (x *operationServiceUserMergeBranchClient) Recv() (*UserMergeBranchResponse, error) { m := new(UserMergeBranchResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func (c *operationServiceClient) UserFFBranch(ctx context.Context, in *UserFFBranchRequest, opts ...grpc.CallOption) (*UserFFBranchResponse, error) { out := new(UserFFBranchResponse) err := grpc.Invoke(ctx, "/gitaly.OperationService/UserFFBranch", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } func (c *operationServiceClient) UserCherryPick(ctx context.Context, in *UserCherryPickRequest, opts ...grpc.CallOption) (*UserCherryPickResponse, error) { out := new(UserCherryPickResponse) err := grpc.Invoke(ctx, "/gitaly.OperationService/UserCherryPick", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } func (c *operationServiceClient) UserRevert(ctx context.Context, in *UserRevertRequest, opts ...grpc.CallOption) (*UserRevertResponse, error) { out := new(UserRevertResponse) err := grpc.Invoke(ctx, "/gitaly.OperationService/UserRevert", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } func (c *operationServiceClient) UserCommitFiles(ctx context.Context, opts ...grpc.CallOption) (OperationService_UserCommitFilesClient, error) { stream, err := grpc.NewClientStream(ctx, &_OperationService_serviceDesc.Streams[1], c.cc, "/gitaly.OperationService/UserCommitFiles", opts...) if err != nil { return nil, err } x := &operationServiceUserCommitFilesClient{stream} return x, nil } type OperationService_UserCommitFilesClient interface { Send(*UserCommitFilesRequest) error CloseAndRecv() (*UserCommitFilesResponse, error) grpc.ClientStream } type operationServiceUserCommitFilesClient struct { grpc.ClientStream } func (x *operationServiceUserCommitFilesClient) Send(m *UserCommitFilesRequest) error { return x.ClientStream.SendMsg(m) } func (x *operationServiceUserCommitFilesClient) CloseAndRecv() (*UserCommitFilesResponse, error) { if err := x.ClientStream.CloseSend(); err != nil { return nil, err } m := new(UserCommitFilesResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func (c *operationServiceClient) UserRebase(ctx context.Context, in *UserRebaseRequest, opts ...grpc.CallOption) (*UserRebaseResponse, error) { out := new(UserRebaseResponse) err := grpc.Invoke(ctx, "/gitaly.OperationService/UserRebase", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } // Server API for OperationService service type OperationServiceServer interface { UserCreateBranch(context.Context, *UserCreateBranchRequest) (*UserCreateBranchResponse, error) UserDeleteBranch(context.Context, *UserDeleteBranchRequest) (*UserDeleteBranchResponse, error) UserCreateTag(context.Context, *UserCreateTagRequest) (*UserCreateTagResponse, error) UserDeleteTag(context.Context, *UserDeleteTagRequest) (*UserDeleteTagResponse, error) UserMergeBranch(OperationService_UserMergeBranchServer) error UserFFBranch(context.Context, *UserFFBranchRequest) (*UserFFBranchResponse, error) UserCherryPick(context.Context, *UserCherryPickRequest) (*UserCherryPickResponse, error) UserRevert(context.Context, *UserRevertRequest) (*UserRevertResponse, error) UserCommitFiles(OperationService_UserCommitFilesServer) error UserRebase(context.Context, *UserRebaseRequest) (*UserRebaseResponse, error) } func RegisterOperationServiceServer(s *grpc.Server, srv OperationServiceServer) { s.RegisterService(&_OperationService_serviceDesc, srv) } func _OperationService_UserCreateBranch_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(UserCreateBranchRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(OperationServiceServer).UserCreateBranch(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/gitaly.OperationService/UserCreateBranch", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(OperationServiceServer).UserCreateBranch(ctx, req.(*UserCreateBranchRequest)) } return interceptor(ctx, in, info, handler) } func _OperationService_UserDeleteBranch_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(UserDeleteBranchRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(OperationServiceServer).UserDeleteBranch(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/gitaly.OperationService/UserDeleteBranch", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(OperationServiceServer).UserDeleteBranch(ctx, req.(*UserDeleteBranchRequest)) } return interceptor(ctx, in, info, handler) } func _OperationService_UserCreateTag_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(UserCreateTagRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(OperationServiceServer).UserCreateTag(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/gitaly.OperationService/UserCreateTag", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(OperationServiceServer).UserCreateTag(ctx, req.(*UserCreateTagRequest)) } return interceptor(ctx, in, info, handler) } func _OperationService_UserDeleteTag_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(UserDeleteTagRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(OperationServiceServer).UserDeleteTag(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/gitaly.OperationService/UserDeleteTag", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(OperationServiceServer).UserDeleteTag(ctx, req.(*UserDeleteTagRequest)) } return interceptor(ctx, in, info, handler) } func _OperationService_UserMergeBranch_Handler(srv interface{}, stream grpc.ServerStream) error { return srv.(OperationServiceServer).UserMergeBranch(&operationServiceUserMergeBranchServer{stream}) } type OperationService_UserMergeBranchServer interface { Send(*UserMergeBranchResponse) error Recv() (*UserMergeBranchRequest, error) grpc.ServerStream } type operationServiceUserMergeBranchServer struct { grpc.ServerStream } func (x *operationServiceUserMergeBranchServer) Send(m *UserMergeBranchResponse) error { return x.ServerStream.SendMsg(m) } func (x *operationServiceUserMergeBranchServer) Recv() (*UserMergeBranchRequest, error) { m := new(UserMergeBranchRequest) if err := x.ServerStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func _OperationService_UserFFBranch_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(UserFFBranchRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(OperationServiceServer).UserFFBranch(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/gitaly.OperationService/UserFFBranch", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(OperationServiceServer).UserFFBranch(ctx, req.(*UserFFBranchRequest)) } return interceptor(ctx, in, info, handler) } func _OperationService_UserCherryPick_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(UserCherryPickRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(OperationServiceServer).UserCherryPick(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/gitaly.OperationService/UserCherryPick", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(OperationServiceServer).UserCherryPick(ctx, req.(*UserCherryPickRequest)) } return interceptor(ctx, in, info, handler) } func _OperationService_UserRevert_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(UserRevertRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(OperationServiceServer).UserRevert(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/gitaly.OperationService/UserRevert", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(OperationServiceServer).UserRevert(ctx, req.(*UserRevertRequest)) } return interceptor(ctx, in, info, handler) } func _OperationService_UserCommitFiles_Handler(srv interface{}, stream grpc.ServerStream) error { return srv.(OperationServiceServer).UserCommitFiles(&operationServiceUserCommitFilesServer{stream}) } type OperationService_UserCommitFilesServer interface { SendAndClose(*UserCommitFilesResponse) error Recv() (*UserCommitFilesRequest, error) grpc.ServerStream } type operationServiceUserCommitFilesServer struct { grpc.ServerStream } func (x *operationServiceUserCommitFilesServer) SendAndClose(m *UserCommitFilesResponse) error { return x.ServerStream.SendMsg(m) } func (x *operationServiceUserCommitFilesServer) Recv() (*UserCommitFilesRequest, error) { m := new(UserCommitFilesRequest) if err := x.ServerStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func _OperationService_UserRebase_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(UserRebaseRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(OperationServiceServer).UserRebase(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/gitaly.OperationService/UserRebase", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(OperationServiceServer).UserRebase(ctx, req.(*UserRebaseRequest)) } return interceptor(ctx, in, info, handler) } var _OperationService_serviceDesc = grpc.ServiceDesc{ ServiceName: "gitaly.OperationService", HandlerType: (*OperationServiceServer)(nil), Methods: []grpc.MethodDesc{ { MethodName: "UserCreateBranch", Handler: _OperationService_UserCreateBranch_Handler, }, { MethodName: "UserDeleteBranch", Handler: _OperationService_UserDeleteBranch_Handler, }, { MethodName: "UserCreateTag", Handler: _OperationService_UserCreateTag_Handler, }, { MethodName: "UserDeleteTag", Handler: _OperationService_UserDeleteTag_Handler, }, { MethodName: "UserFFBranch", Handler: _OperationService_UserFFBranch_Handler, }, { MethodName: "UserCherryPick", Handler: _OperationService_UserCherryPick_Handler, }, { MethodName: "UserRevert", Handler: _OperationService_UserRevert_Handler, }, { MethodName: "UserRebase", Handler: _OperationService_UserRebase_Handler, }, }, Streams: []grpc.StreamDesc{ { StreamName: "UserMergeBranch", Handler: _OperationService_UserMergeBranch_Handler, ServerStreams: true, ClientStreams: true, }, { StreamName: "UserCommitFiles", Handler: _OperationService_UserCommitFiles_Handler, ClientStreams: true, }, }, Metadata: "operations.proto", } func init() { proto.RegisterFile("operations.proto", fileDescriptor7) } var fileDescriptor7 = []byte{ // 1357 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x58, 0xcd, 0x6f, 0x1b, 0x45, 0x14, 0xf7, 0xda, 0xee, 0xc6, 0x79, 0x76, 0x1c, 0x7b, 0xfa, 0x81, 0xeb, 0x36, 0x4d, 0xba, 0xa5, 0x50, 0x2a, 0x64, 0xa1, 0x80, 0xe0, 0x54, 0x50, 0x3e, 0x1c, 0xd2, 0x42, 0xda, 0xb0, 0x4d, 0x0a, 0xb7, 0xd5, 0xc4, 0x1e, 0xec, 0x15, 0xb6, 0x77, 0x99, 0x9d, 0x44, 0x35, 0x42, 0x88, 0x0b, 0x70, 0xe5, 0xc4, 0x81, 0x13, 0x12, 0x37, 0x6e, 0x5c, 0x38, 0x70, 0xe0, 0xc4, 0x89, 0x6b, 0x0f, 0xfc, 0x3b, 0x68, 0x66, 0xde, 0xda, 0xbb, 0xeb, 0xdd, 0x28, 0x81, 0x44, 0x54, 0x88, 0xdb, 0xee, 0x7b, 0x6f, 0xde, 0xbc, 0xf7, 0x7b, 0x1f, 0xf3, 0x66, 0xa0, 0xe6, 0xf9, 0x8c, 0x53, 0xe1, 0x7a, 0xa3, 0xa0, 0xe5, 0x73, 0x4f, 0x78, 0xc4, 0xec, 0xb9, 0x82, 0x0e, 0xc6, 0xcd, 0x4a, 0xd0, 0xa7, 0x9c, 0x75, 0x35, 0xd5, 0xfa, 0xd9, 0x80, 0x17, 0xf6, 0x03, 0xc6, 0x37, 0x38, 0xa3, 0x82, 0xad, 0x73, 0x3a, 0xea, 0xf4, 0x6d, 0xf6, 0xe9, 0x21, 0x0b, 0x04, 0x59, 0x05, 0xe0, 0xcc, 0xf7, 0x02, 0x57, 0x78, 0x7c, 0xdc, 0x30, 0x56, 0x8c, 0x3b, 0xe5, 0x55, 0xd2, 0xd2, 0x6a, 0x5a, 0xf6, 0x84, 0x63, 0x47, 0xa4, 0xc8, 0x32, 0x94, 0x0f, 0x94, 0x12, 0x67, 0x44, 0x87, 0xac, 0x91, 0x5f, 0x31, 0xee, 0x54, 0x6c, 0xd0, 0xa4, 0x87, 0x74, 0xc8, 0xc8, 0x0a, 0x14, 0x0f, 0x03, 0xc6, 0x1b, 0x05, 0xa5, 0xae, 0x12, 0xaa, 0x93, 0x36, 0xd8, 0x8a, 0x23, 0x55, 0x04, 0x82, 0x72, 0xe1, 0xf8, 0x9e, 0x3b, 0x12, 0x8d, 0xa2, 0x56, 0xa1, 0x48, 0xbb, 0x92, 0x62, 0x8d, 0xa0, 0x31, 0x6b, 0x72, 0xe0, 0x7b, 0xa3, 0x80, 0x91, 0x97, 0xc0, 0xd4, 0x9b, 0xa1, 0xbd, 0xd5, 0x70, 0x03, 0x94, 0x43, 0x2e, 0xb9, 0x0b, 0x75, 0x9f, 0x33, 0x87, 0xb3, 0x0e, 0x73, 0x8f, 0x98, 0xc3, 0x38, 0xf7, 0xb8, 0xb2, 0x76, 0xde, 0x5e, 0xf4, 0x39, 0xb3, 0x35, 0xbd, 0x2d, 0xc9, 0xd6, 0xb7, 0x88, 0xd1, 0x26, 0x1b, 0xb0, 0xe7, 0x03, 0x23, 0x6b, 0x4b, 0x43, 0x10, 0xb7, 0x08, 0x21, 0x48, 0x75, 0xcd, 0x48, 0x77, 0xed, 0x1b, 0x03, 0x2e, 0x4d, 0x15, 0xed, 0xd1, 0xde, 0x3f, 0xf1, 0xeb, 0x2a, 0x94, 0x04, 0xed, 0x45, 0x9d, 0x9a, 0x13, 0xb4, 0x77, 0x42, 0x8f, 0x36, 0xe0, 0x72, 0xc2, 0x90, 0xbf, 0xe1, 0xce, 0x1f, 0xe8, 0x8e, 0x4e, 0x8d, 0x7f, 0xd1, 0x1d, 0xf2, 0x32, 0x2c, 0x0a, 0xca, 0x7b, 0x4c, 0x38, 0x9c, 0x1d, 0xb9, 0x81, 0xeb, 0x8d, 0x30, 0x91, 0xab, 0x9a, 0x6c, 0x23, 0x95, 0x34, 0x60, 0x6e, 0xc8, 0x82, 0x80, 0xf6, 0x58, 0xe3, 0x82, 0xde, 0x04, 0x7f, 0xad, 0xcf, 0x34, 0x22, 0x11, 0x5f, 0x10, 0x91, 0x25, 0x28, 0x08, 0xda, 0x43, 0x2f, 0xca, 0xe1, 0xe6, 0x52, 0x42, 0xd2, 0xc9, 0x15, 0x30, 0xd9, 0x53, 0x37, 0x10, 0x81, 0xb2, 0xba, 0x64, 0xe3, 0x5f, 0x3a, 0x90, 0x85, 0x74, 0x20, 0x9f, 0x19, 0x70, 0x45, 0x6e, 0xbe, 0xc3, 0x78, 0xef, 0x0c, 0x32, 0x3e, 0xc4, 0x2b, 0x9f, 0x89, 0xd7, 0x35, 0x98, 0xef, 0x78, 0xc3, 0xa1, 0x2b, 0x1c, 0xb7, 0x8b, 0x46, 0x95, 0x34, 0xe1, 0x7e, 0x57, 0x7a, 0x84, 0x45, 0xad, 0x31, 0x0c, 0x8b, 0x38, 0x13, 0x3b, 0x72, 0x09, 0x2e, 0x50, 0xdf, 0x1f, 0x8c, 0x1b, 0xa6, 0x82, 0x40, 0xff, 0x58, 0x5f, 0x62, 0x21, 0xc7, 0xbc, 0x42, 0x50, 0x63, 0x06, 0x18, 0x09, 0x03, 0xd6, 0x61, 0x01, 0x2b, 0xf6, 0xd0, 0xef, 0x52, 0xc1, 0x30, 0xf0, 0x4b, 0xa1, 0x23, 0x8f, 0xc2, 0x66, 0xab, 0x95, 0xee, 0x2b, 0x21, 0xbb, 0x72, 0x10, 0xf9, 0x7b, 0x50, 0x2c, 0xe5, 0x6b, 0x05, 0xeb, 0x0b, 0xb8, 0x9c, 0x2a, 0x7c, 0xfc, 0xfe, 0x37, 0xa1, 0x22, 0xd1, 0x74, 0x3a, 0x2a, 0x17, 0xba, 0x18, 0xd8, 0xb2, 0xa4, 0xe9, 0xf4, 0xe8, 0x92, 0xdb, 0x50, 0x45, 0x13, 0x43, 0xa1, 0x82, 0x12, 0x42, 0xc3, 0x51, 0xcc, 0xfa, 0xc1, 0x80, 0x8b, 0x12, 0x82, 0xad, 0xad, 0xe7, 0x35, 0xaa, 0xd6, 0xd7, 0x58, 0xc4, 0x53, 0x13, 0x31, 0x44, 0x33, 0x51, 0x30, 0x4e, 0x1d, 0x85, 0x53, 0xf5, 0xfd, 0xdf, 0xf2, 0x58, 0x81, 0x7d, 0xc6, 0xf9, 0x78, 0xd7, 0xed, 0x7c, 0x72, 0xbe, 0x68, 0xbd, 0x02, 0xa6, 0x06, 0x07, 0xd3, 0xab, 0x1e, 0xca, 0xbc, 0xeb, 0x8a, 0x0d, 0xc5, 0xb0, 0x51, 0x20, 0x79, 0x84, 0x14, 0x67, 0x8e, 0x90, 0xec, 0xd2, 0xb8, 0x0b, 0x75, 0x7d, 0xbc, 0x46, 0x15, 0x98, 0x4a, 0x66, 0x51, 0x31, 0xd6, 0xa7, 0x5a, 0xee, 0x41, 0x4d, 0xcb, 0x46, 0xbc, 0x9d, 0xcb, 0xf4, 0x56, 0x2f, 0x9f, 0x12, 0xac, 0x3f, 0xb1, 0x8b, 0x44, 0x01, 0x3c, 0xdb, 0x58, 0xea, 0x5c, 0x77, 0x04, 0x67, 0x89, 0x58, 0x6a, 0xc6, 0x1e, 0x67, 0x3a, 0x96, 0xb2, 0x82, 0x30, 0x13, 0xa3, 0x7d, 0xaf, 0xac, 0x69, 0x5a, 0x24, 0x35, 0x35, 0x8a, 0xe9, 0xa9, 0xf1, 0x6b, 0x1e, 0xea, 0x2a, 0x72, 0xec, 0x88, 0x49, 0x97, 0xff, 0x4f, 0x8b, 0x53, 0xa4, 0xc5, 0x33, 0x03, 0x48, 0x14, 0xbc, 0xff, 0x46, 0x4a, 0x7c, 0x9f, 0x87, 0x6b, 0x2a, 0xd9, 0xd5, 0xfa, 0x2d, 0x77, 0xc0, 0x82, 0xb5, 0x8e, 0x34, 0x77, 0x9b, 0xd1, 0x2e, 0xe3, 0x64, 0x0b, 0x4c, 0xaa, 0xfe, 0x95, 0x5f, 0xd5, 0xd5, 0x56, 0x34, 0xd4, 0x19, 0x8b, 0x5a, 0xfa, 0x67, 0x6f, 0xec, 0x33, 0x1b, 0x57, 0xcb, 0x9e, 0xfa, 0xb1, 0x3b, 0x60, 0x8e, 0x4f, 0x45, 0x1f, 0xe7, 0x92, 0x92, 0x24, 0xec, 0x52, 0xd1, 0x27, 0xb7, 0x60, 0xc1, 0x97, 0x03, 0x87, 0x77, 0x18, 0x68, 0x81, 0x82, 0x12, 0xa8, 0x84, 0x44, 0x25, 0x24, 0x8f, 0x0a, 0x1a, 0xb0, 0x37, 0xdf, 0x70, 0x3a, 0xde, 0x48, 0x30, 0x9c, 0xb1, 0xe5, 0x51, 0xa1, 0xa8, 0x1b, 0x9a, 0x68, 0x3d, 0x00, 0x98, 0x6e, 0x4f, 0x00, 0xcc, 0x0d, 0xbb, 0xbd, 0xb6, 0xd7, 0xae, 0xe5, 0x48, 0x15, 0x40, 0x7f, 0x3b, 0x9b, 0xf7, 0xed, 0x9a, 0x21, 0x79, 0xfb, 0xbb, 0x9b, 0x92, 0x97, 0x27, 0x25, 0x28, 0xee, 0x3c, 0x7a, 0xd2, 0xae, 0x15, 0x24, 0x75, 0xb3, 0xfd, 0x7e, 0x7b, 0xaf, 0x5d, 0x2b, 0x5a, 0xdf, 0x19, 0xd8, 0x4a, 0x93, 0x7e, 0x92, 0x7b, 0x60, 0xf6, 0x95, 0xaf, 0x18, 0xee, 0x5b, 0x27, 0x80, 0x65, 0x3b, 0x67, 0xe3, 0x22, 0xd2, 0x84, 0xb9, 0xd0, 0x09, 0x85, 0xc5, 0x76, 0xce, 0x0e, 0x09, 0xeb, 0x16, 0xac, 0xc8, 0x02, 0x72, 0x30, 0xca, 0x12, 0xa4, 0xc0, 0xd1, 0x28, 0x3a, 0x3e, 0x1d, 0x0f, 0x3c, 0xda, 0xb5, 0xbe, 0x2a, 0xc0, 0xf5, 0xc4, 0x4e, 0x58, 0xcd, 0x18, 0xb6, 0xf3, 0xa9, 0xe9, 0x44, 0xa1, 0x16, 0x66, 0x0a, 0xf5, 0x36, 0x54, 0xd1, 0xec, 0xb0, 0x5e, 0x75, 0x31, 0x2f, 0x68, 0xea, 0x0e, 0x56, 0xed, 0xab, 0x40, 0x50, 0x8c, 0x1e, 0x8a, 0xbe, 0xc7, 0xb5, 0x3a, 0x5d, 0xda, 0x35, 0xcd, 0x59, 0x53, 0x0c, 0xa5, 0xb4, 0x05, 0x17, 0xe3, 0xd2, 0x6c, 0x48, 0xdd, 0x01, 0x56, 0x79, 0x3d, 0x2a, 0xde, 0x96, 0x8c, 0xf4, 0x9e, 0x30, 0x77, 0xf2, 0x9e, 0x50, 0x3a, 0x79, 0x4f, 0xf8, 0x25, 0x3c, 0x2a, 0x66, 0xe2, 0x40, 0xde, 0x4e, 0x64, 0xc8, 0x8b, 0x19, 0x19, 0x12, 0x8b, 0x5b, 0x24, 0x45, 0xde, 0x9a, 0x14, 0x5e, 0x3e, 0xde, 0x50, 0xd2, 0x33, 0x2c, 0x17, 0x56, 0xda, 0xfa, 0x2d, 0xb8, 0x39, 0x9b, 0x3f, 0x5c, 0xef, 0x32, 0x49, 0xa0, 0x9f, 0xc2, 0x0b, 0x74, 0xd4, 0x90, 0x33, 0xec, 0x68, 0xcb, 0x50, 0x76, 0x47, 0x5d, 0xf6, 0x34, 0xd6, 0xcb, 0x40, 0x91, 0x8e, 0xe9, 0x51, 0x19, 0x63, 0xfd, 0x8f, 0x93, 0x63, 0x4b, 0x96, 0xfa, 0xb9, 0xcf, 0x7e, 0x5c, 0x6d, 0x13, 0x99, 0xfd, 0x34, 0xe1, 0x98, 0x89, 0x7e, 0x09, 0xb0, 0x08, 0x9c, 0xa0, 0x4f, 0x55, 0x1e, 0xcf, 0xdb, 0xf3, 0x9a, 0xf2, 0xb8, 0x4f, 0xc9, 0x3b, 0x50, 0xe7, 0x6c, 0xe8, 0x09, 0x16, 0xcd, 0x32, 0x33, 0xd3, 0xe0, 0x9a, 0x16, 0x9e, 0x52, 0x64, 0x7f, 0x44, 0x05, 0xb8, 0xbd, 0xce, 0xe6, 0x8a, 0x26, 0xea, 0x30, 0x58, 0x9f, 0x87, 0xc7, 0x93, 0x06, 0x69, 0x72, 0xeb, 0x02, 0xf4, 0x47, 0x9a, 0xa6, 0x27, 0x74, 0xf4, 0x50, 0x9a, 0x76, 0x8a, 0xc1, 0x52, 0x42, 0xd3, 0x4b, 0x1c, 0x3b, 0xa5, 0x1e, 0x9e, 0x39, 0xab, 0xbf, 0x9b, 0x50, 0x9b, 0x24, 0xc6, 0x63, 0xc6, 0x8f, 0xdc, 0x0e, 0x23, 0x1f, 0x42, 0x2d, 0xf9, 0xe4, 0x41, 0x96, 0x63, 0x79, 0x3c, 0xfb, 0x7e, 0xd3, 0x5c, 0xc9, 0x16, 0xd0, 0x3e, 0x59, 0xb9, 0x50, 0x71, 0xf4, 0x21, 0x21, 0xae, 0x38, 0xe5, 0xd1, 0x23, 0xae, 0x38, 0xed, 0x0d, 0xc2, 0xca, 0x91, 0x87, 0xb0, 0x10, 0xbb, 0xbd, 0x92, 0xeb, 0xb3, 0xd6, 0x4c, 0x2f, 0xe8, 0xcd, 0xa5, 0x0c, 0x6e, 0x52, 0xdf, 0xe4, 0x7d, 0x20, 0xae, 0x2f, 0xf9, 0x7e, 0x11, 0xd7, 0x37, 0xf3, 0xa8, 0x60, 0xe5, 0xc8, 0x47, 0xb0, 0x98, 0xb8, 0x0a, 0x92, 0x1b, 0xd1, 0x35, 0xb3, 0x37, 0xdf, 0xe6, 0x72, 0x26, 0x3f, 0xd4, 0x7a, 0xc7, 0x78, 0xcd, 0x20, 0xef, 0x41, 0x25, 0x7a, 0x7d, 0x21, 0xd7, 0xa2, 0xcb, 0x12, 0xf7, 0xae, 0xe6, 0xf5, 0x74, 0xe6, 0xc4, 0xcc, 0x0f, 0xa0, 0x1a, 0x9f, 0xa0, 0x49, 0x1c, 0xa9, 0xe4, 0xd5, 0xa4, 0x79, 0x23, 0x8b, 0x3d, 0x51, 0xd9, 0x06, 0x98, 0x4e, 0x5f, 0xe4, 0x6a, 0xac, 0x74, 0xa3, 0xe3, 0x6c, 0xb3, 0x99, 0xc6, 0x9a, 0xa8, 0x79, 0xa2, 0x01, 0x8c, 0xf4, 0xbd, 0x38, 0x80, 0xb3, 0x9d, 0x39, 0x0e, 0x60, 0x4a, 0xc3, 0x94, 0x00, 0x4e, 0xcd, 0x93, 0x95, 0x95, 0x34, 0x2f, 0xd2, 0xb6, 0x92, 0xe6, 0x45, 0x8b, 0xd5, 0xca, 0x1d, 0x98, 0xea, 0x7d, 0xf3, 0xf5, 0xbf, 0x02, 0x00, 0x00, 0xff, 0xff, 0x39, 0x0c, 0xa6, 0xa6, 0x09, 0x15, 0x00, 0x00, } ref.pb.go000066400000000000000000001631101324746544700344540ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/gitlab.com/gitlab-org/gitaly-proto/go// Code generated by protoc-gen-go. DO NOT EDIT. // source: ref.proto package gitaly import proto "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" import google_protobuf "github.com/golang/protobuf/ptypes/timestamp" import ( context "golang.org/x/net/context" grpc "google.golang.org/grpc" ) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf type FindLocalBranchesRequest_SortBy int32 const ( FindLocalBranchesRequest_NAME FindLocalBranchesRequest_SortBy = 0 FindLocalBranchesRequest_UPDATED_ASC FindLocalBranchesRequest_SortBy = 1 FindLocalBranchesRequest_UPDATED_DESC FindLocalBranchesRequest_SortBy = 2 ) var FindLocalBranchesRequest_SortBy_name = map[int32]string{ 0: "NAME", 1: "UPDATED_ASC", 2: "UPDATED_DESC", } var FindLocalBranchesRequest_SortBy_value = map[string]int32{ "NAME": 0, "UPDATED_ASC": 1, "UPDATED_DESC": 2, } func (x FindLocalBranchesRequest_SortBy) String() string { return proto.EnumName(FindLocalBranchesRequest_SortBy_name, int32(x)) } func (FindLocalBranchesRequest_SortBy) EnumDescriptor() ([]byte, []int) { return fileDescriptor8, []int{8, 0} } type CreateBranchResponse_Status int32 const ( CreateBranchResponse_OK CreateBranchResponse_Status = 0 CreateBranchResponse_ERR_EXISTS CreateBranchResponse_Status = 1 CreateBranchResponse_ERR_INVALID CreateBranchResponse_Status = 2 CreateBranchResponse_ERR_INVALID_START_POINT CreateBranchResponse_Status = 3 ) var CreateBranchResponse_Status_name = map[int32]string{ 0: "OK", 1: "ERR_EXISTS", 2: "ERR_INVALID", 3: "ERR_INVALID_START_POINT", } var CreateBranchResponse_Status_value = map[string]int32{ "OK": 0, "ERR_EXISTS": 1, "ERR_INVALID": 2, "ERR_INVALID_START_POINT": 3, } func (x CreateBranchResponse_Status) String() string { return proto.EnumName(CreateBranchResponse_Status_name, int32(x)) } func (CreateBranchResponse_Status) EnumDescriptor() ([]byte, []int) { return fileDescriptor8, []int{19, 0} } type FindDefaultBranchNameRequest struct { Repository *Repository `protobuf:"bytes,1,opt,name=repository" json:"repository,omitempty"` } func (m *FindDefaultBranchNameRequest) Reset() { *m = FindDefaultBranchNameRequest{} } func (m *FindDefaultBranchNameRequest) String() string { return proto.CompactTextString(m) } func (*FindDefaultBranchNameRequest) ProtoMessage() {} func (*FindDefaultBranchNameRequest) Descriptor() ([]byte, []int) { return fileDescriptor8, []int{0} } func (m *FindDefaultBranchNameRequest) GetRepository() *Repository { if m != nil { return m.Repository } return nil } type FindDefaultBranchNameResponse struct { Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` } func (m *FindDefaultBranchNameResponse) Reset() { *m = FindDefaultBranchNameResponse{} } func (m *FindDefaultBranchNameResponse) String() string { return proto.CompactTextString(m) } func (*FindDefaultBranchNameResponse) ProtoMessage() {} func (*FindDefaultBranchNameResponse) Descriptor() ([]byte, []int) { return fileDescriptor8, []int{1} } func (m *FindDefaultBranchNameResponse) GetName() []byte { if m != nil { return m.Name } return nil } type FindAllBranchNamesRequest struct { Repository *Repository `protobuf:"bytes,1,opt,name=repository" json:"repository,omitempty"` } func (m *FindAllBranchNamesRequest) Reset() { *m = FindAllBranchNamesRequest{} } func (m *FindAllBranchNamesRequest) String() string { return proto.CompactTextString(m) } func (*FindAllBranchNamesRequest) ProtoMessage() {} func (*FindAllBranchNamesRequest) Descriptor() ([]byte, []int) { return fileDescriptor8, []int{2} } func (m *FindAllBranchNamesRequest) GetRepository() *Repository { if m != nil { return m.Repository } return nil } type FindAllBranchNamesResponse struct { Names [][]byte `protobuf:"bytes,1,rep,name=names,proto3" json:"names,omitempty"` } func (m *FindAllBranchNamesResponse) Reset() { *m = FindAllBranchNamesResponse{} } func (m *FindAllBranchNamesResponse) String() string { return proto.CompactTextString(m) } func (*FindAllBranchNamesResponse) ProtoMessage() {} func (*FindAllBranchNamesResponse) Descriptor() ([]byte, []int) { return fileDescriptor8, []int{3} } func (m *FindAllBranchNamesResponse) GetNames() [][]byte { if m != nil { return m.Names } return nil } type FindAllTagNamesRequest struct { Repository *Repository `protobuf:"bytes,1,opt,name=repository" json:"repository,omitempty"` } func (m *FindAllTagNamesRequest) Reset() { *m = FindAllTagNamesRequest{} } func (m *FindAllTagNamesRequest) String() string { return proto.CompactTextString(m) } func (*FindAllTagNamesRequest) ProtoMessage() {} func (*FindAllTagNamesRequest) Descriptor() ([]byte, []int) { return fileDescriptor8, []int{4} } func (m *FindAllTagNamesRequest) GetRepository() *Repository { if m != nil { return m.Repository } return nil } type FindAllTagNamesResponse struct { Names [][]byte `protobuf:"bytes,1,rep,name=names,proto3" json:"names,omitempty"` } func (m *FindAllTagNamesResponse) Reset() { *m = FindAllTagNamesResponse{} } func (m *FindAllTagNamesResponse) String() string { return proto.CompactTextString(m) } func (*FindAllTagNamesResponse) ProtoMessage() {} func (*FindAllTagNamesResponse) Descriptor() ([]byte, []int) { return fileDescriptor8, []int{5} } func (m *FindAllTagNamesResponse) GetNames() [][]byte { if m != nil { return m.Names } return nil } type FindRefNameRequest struct { Repository *Repository `protobuf:"bytes,1,opt,name=repository" json:"repository,omitempty"` // Require that the resulting ref contains this commit as an ancestor CommitId string `protobuf:"bytes,2,opt,name=commit_id,json=commitId" json:"commit_id,omitempty"` // Example prefix: "refs/heads/". Type bytes because that is the type of ref names. Prefix []byte `protobuf:"bytes,3,opt,name=prefix,proto3" json:"prefix,omitempty"` } func (m *FindRefNameRequest) Reset() { *m = FindRefNameRequest{} } func (m *FindRefNameRequest) String() string { return proto.CompactTextString(m) } func (*FindRefNameRequest) ProtoMessage() {} func (*FindRefNameRequest) Descriptor() ([]byte, []int) { return fileDescriptor8, []int{6} } func (m *FindRefNameRequest) GetRepository() *Repository { if m != nil { return m.Repository } return nil } func (m *FindRefNameRequest) GetCommitId() string { if m != nil { return m.CommitId } return "" } func (m *FindRefNameRequest) GetPrefix() []byte { if m != nil { return m.Prefix } return nil } type FindRefNameResponse struct { // Example name: "refs/heads/master". Cannot assume UTF8, so the type is bytes. Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` } func (m *FindRefNameResponse) Reset() { *m = FindRefNameResponse{} } func (m *FindRefNameResponse) String() string { return proto.CompactTextString(m) } func (*FindRefNameResponse) ProtoMessage() {} func (*FindRefNameResponse) Descriptor() ([]byte, []int) { return fileDescriptor8, []int{7} } func (m *FindRefNameResponse) GetName() []byte { if m != nil { return m.Name } return nil } type FindLocalBranchesRequest struct { Repository *Repository `protobuf:"bytes,1,opt,name=repository" json:"repository,omitempty"` SortBy FindLocalBranchesRequest_SortBy `protobuf:"varint,2,opt,name=sort_by,json=sortBy,enum=gitaly.FindLocalBranchesRequest_SortBy" json:"sort_by,omitempty"` } func (m *FindLocalBranchesRequest) Reset() { *m = FindLocalBranchesRequest{} } func (m *FindLocalBranchesRequest) String() string { return proto.CompactTextString(m) } func (*FindLocalBranchesRequest) ProtoMessage() {} func (*FindLocalBranchesRequest) Descriptor() ([]byte, []int) { return fileDescriptor8, []int{8} } func (m *FindLocalBranchesRequest) GetRepository() *Repository { if m != nil { return m.Repository } return nil } func (m *FindLocalBranchesRequest) GetSortBy() FindLocalBranchesRequest_SortBy { if m != nil { return m.SortBy } return FindLocalBranchesRequest_NAME } type FindLocalBranchesResponse struct { Branches []*FindLocalBranchResponse `protobuf:"bytes,1,rep,name=branches" json:"branches,omitempty"` } func (m *FindLocalBranchesResponse) Reset() { *m = FindLocalBranchesResponse{} } func (m *FindLocalBranchesResponse) String() string { return proto.CompactTextString(m) } func (*FindLocalBranchesResponse) ProtoMessage() {} func (*FindLocalBranchesResponse) Descriptor() ([]byte, []int) { return fileDescriptor8, []int{9} } func (m *FindLocalBranchesResponse) GetBranches() []*FindLocalBranchResponse { if m != nil { return m.Branches } return nil } type FindLocalBranchResponse struct { Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` CommitId string `protobuf:"bytes,2,opt,name=commit_id,json=commitId" json:"commit_id,omitempty"` CommitSubject []byte `protobuf:"bytes,3,opt,name=commit_subject,json=commitSubject,proto3" json:"commit_subject,omitempty"` CommitAuthor *FindLocalBranchCommitAuthor `protobuf:"bytes,4,opt,name=commit_author,json=commitAuthor" json:"commit_author,omitempty"` CommitCommitter *FindLocalBranchCommitAuthor `protobuf:"bytes,5,opt,name=commit_committer,json=commitCommitter" json:"commit_committer,omitempty"` } func (m *FindLocalBranchResponse) Reset() { *m = FindLocalBranchResponse{} } func (m *FindLocalBranchResponse) String() string { return proto.CompactTextString(m) } func (*FindLocalBranchResponse) ProtoMessage() {} func (*FindLocalBranchResponse) Descriptor() ([]byte, []int) { return fileDescriptor8, []int{10} } func (m *FindLocalBranchResponse) GetName() []byte { if m != nil { return m.Name } return nil } func (m *FindLocalBranchResponse) GetCommitId() string { if m != nil { return m.CommitId } return "" } func (m *FindLocalBranchResponse) GetCommitSubject() []byte { if m != nil { return m.CommitSubject } return nil } func (m *FindLocalBranchResponse) GetCommitAuthor() *FindLocalBranchCommitAuthor { if m != nil { return m.CommitAuthor } return nil } func (m *FindLocalBranchResponse) GetCommitCommitter() *FindLocalBranchCommitAuthor { if m != nil { return m.CommitCommitter } return nil } type FindLocalBranchCommitAuthor struct { Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` Email []byte `protobuf:"bytes,2,opt,name=email,proto3" json:"email,omitempty"` Date *google_protobuf.Timestamp `protobuf:"bytes,3,opt,name=date" json:"date,omitempty"` } func (m *FindLocalBranchCommitAuthor) Reset() { *m = FindLocalBranchCommitAuthor{} } func (m *FindLocalBranchCommitAuthor) String() string { return proto.CompactTextString(m) } func (*FindLocalBranchCommitAuthor) ProtoMessage() {} func (*FindLocalBranchCommitAuthor) Descriptor() ([]byte, []int) { return fileDescriptor8, []int{11} } func (m *FindLocalBranchCommitAuthor) GetName() []byte { if m != nil { return m.Name } return nil } func (m *FindLocalBranchCommitAuthor) GetEmail() []byte { if m != nil { return m.Email } return nil } func (m *FindLocalBranchCommitAuthor) GetDate() *google_protobuf.Timestamp { if m != nil { return m.Date } return nil } type FindAllBranchesRequest struct { Repository *Repository `protobuf:"bytes,1,opt,name=repository" json:"repository,omitempty"` // Only return branches that are merged into root ref MergedOnly bool `protobuf:"varint,2,opt,name=merged_only,json=mergedOnly" json:"merged_only,omitempty"` // If merged_only is true, this is a list of branches from which we // return those merged into the root ref MergedBranches [][]byte `protobuf:"bytes,3,rep,name=merged_branches,json=mergedBranches,proto3" json:"merged_branches,omitempty"` } func (m *FindAllBranchesRequest) Reset() { *m = FindAllBranchesRequest{} } func (m *FindAllBranchesRequest) String() string { return proto.CompactTextString(m) } func (*FindAllBranchesRequest) ProtoMessage() {} func (*FindAllBranchesRequest) Descriptor() ([]byte, []int) { return fileDescriptor8, []int{12} } func (m *FindAllBranchesRequest) GetRepository() *Repository { if m != nil { return m.Repository } return nil } func (m *FindAllBranchesRequest) GetMergedOnly() bool { if m != nil { return m.MergedOnly } return false } func (m *FindAllBranchesRequest) GetMergedBranches() [][]byte { if m != nil { return m.MergedBranches } return nil } type FindAllBranchesResponse struct { Branches []*FindAllBranchesResponse_Branch `protobuf:"bytes,1,rep,name=branches" json:"branches,omitempty"` } func (m *FindAllBranchesResponse) Reset() { *m = FindAllBranchesResponse{} } func (m *FindAllBranchesResponse) String() string { return proto.CompactTextString(m) } func (*FindAllBranchesResponse) ProtoMessage() {} func (*FindAllBranchesResponse) Descriptor() ([]byte, []int) { return fileDescriptor8, []int{13} } func (m *FindAllBranchesResponse) GetBranches() []*FindAllBranchesResponse_Branch { if m != nil { return m.Branches } return nil } type FindAllBranchesResponse_Branch struct { Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` Target *GitCommit `protobuf:"bytes,2,opt,name=target" json:"target,omitempty"` } func (m *FindAllBranchesResponse_Branch) Reset() { *m = FindAllBranchesResponse_Branch{} } func (m *FindAllBranchesResponse_Branch) String() string { return proto.CompactTextString(m) } func (*FindAllBranchesResponse_Branch) ProtoMessage() {} func (*FindAllBranchesResponse_Branch) Descriptor() ([]byte, []int) { return fileDescriptor8, []int{13, 0} } func (m *FindAllBranchesResponse_Branch) GetName() []byte { if m != nil { return m.Name } return nil } func (m *FindAllBranchesResponse_Branch) GetTarget() *GitCommit { if m != nil { return m.Target } return nil } type FindAllTagsRequest struct { Repository *Repository `protobuf:"bytes,1,opt,name=repository" json:"repository,omitempty"` } func (m *FindAllTagsRequest) Reset() { *m = FindAllTagsRequest{} } func (m *FindAllTagsRequest) String() string { return proto.CompactTextString(m) } func (*FindAllTagsRequest) ProtoMessage() {} func (*FindAllTagsRequest) Descriptor() ([]byte, []int) { return fileDescriptor8, []int{14} } func (m *FindAllTagsRequest) GetRepository() *Repository { if m != nil { return m.Repository } return nil } type FindAllTagsResponse struct { Tags []*Tag `protobuf:"bytes,1,rep,name=tags" json:"tags,omitempty"` } func (m *FindAllTagsResponse) Reset() { *m = FindAllTagsResponse{} } func (m *FindAllTagsResponse) String() string { return proto.CompactTextString(m) } func (*FindAllTagsResponse) ProtoMessage() {} func (*FindAllTagsResponse) Descriptor() ([]byte, []int) { return fileDescriptor8, []int{15} } func (m *FindAllTagsResponse) GetTags() []*Tag { if m != nil { return m.Tags } return nil } type RefExistsRequest struct { Repository *Repository `protobuf:"bytes,1,opt,name=repository" json:"repository,omitempty"` // Any ref, e.g. 'refs/heads/master' or 'refs/tags/v1.0.1'. Must start with 'refs/'. Ref []byte `protobuf:"bytes,2,opt,name=ref,proto3" json:"ref,omitempty"` } func (m *RefExistsRequest) Reset() { *m = RefExistsRequest{} } func (m *RefExistsRequest) String() string { return proto.CompactTextString(m) } func (*RefExistsRequest) ProtoMessage() {} func (*RefExistsRequest) Descriptor() ([]byte, []int) { return fileDescriptor8, []int{16} } func (m *RefExistsRequest) GetRepository() *Repository { if m != nil { return m.Repository } return nil } func (m *RefExistsRequest) GetRef() []byte { if m != nil { return m.Ref } return nil } type RefExistsResponse struct { Value bool `protobuf:"varint,1,opt,name=value" json:"value,omitempty"` } func (m *RefExistsResponse) Reset() { *m = RefExistsResponse{} } func (m *RefExistsResponse) String() string { return proto.CompactTextString(m) } func (*RefExistsResponse) ProtoMessage() {} func (*RefExistsResponse) Descriptor() ([]byte, []int) { return fileDescriptor8, []int{17} } func (m *RefExistsResponse) GetValue() bool { if m != nil { return m.Value } return false } type CreateBranchRequest struct { Repository *Repository `protobuf:"bytes,1,opt,name=repository" json:"repository,omitempty"` Name []byte `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` StartPoint []byte `protobuf:"bytes,3,opt,name=start_point,json=startPoint,proto3" json:"start_point,omitempty"` } func (m *CreateBranchRequest) Reset() { *m = CreateBranchRequest{} } func (m *CreateBranchRequest) String() string { return proto.CompactTextString(m) } func (*CreateBranchRequest) ProtoMessage() {} func (*CreateBranchRequest) Descriptor() ([]byte, []int) { return fileDescriptor8, []int{18} } func (m *CreateBranchRequest) GetRepository() *Repository { if m != nil { return m.Repository } return nil } func (m *CreateBranchRequest) GetName() []byte { if m != nil { return m.Name } return nil } func (m *CreateBranchRequest) GetStartPoint() []byte { if m != nil { return m.StartPoint } return nil } type CreateBranchResponse struct { Status CreateBranchResponse_Status `protobuf:"varint,1,opt,name=status,enum=gitaly.CreateBranchResponse_Status" json:"status,omitempty"` Branch *Branch `protobuf:"bytes,2,opt,name=branch" json:"branch,omitempty"` } func (m *CreateBranchResponse) Reset() { *m = CreateBranchResponse{} } func (m *CreateBranchResponse) String() string { return proto.CompactTextString(m) } func (*CreateBranchResponse) ProtoMessage() {} func (*CreateBranchResponse) Descriptor() ([]byte, []int) { return fileDescriptor8, []int{19} } func (m *CreateBranchResponse) GetStatus() CreateBranchResponse_Status { if m != nil { return m.Status } return CreateBranchResponse_OK } func (m *CreateBranchResponse) GetBranch() *Branch { if m != nil { return m.Branch } return nil } type DeleteBranchRequest struct { Repository *Repository `protobuf:"bytes,1,opt,name=repository" json:"repository,omitempty"` Name []byte `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` } func (m *DeleteBranchRequest) Reset() { *m = DeleteBranchRequest{} } func (m *DeleteBranchRequest) String() string { return proto.CompactTextString(m) } func (*DeleteBranchRequest) ProtoMessage() {} func (*DeleteBranchRequest) Descriptor() ([]byte, []int) { return fileDescriptor8, []int{20} } func (m *DeleteBranchRequest) GetRepository() *Repository { if m != nil { return m.Repository } return nil } func (m *DeleteBranchRequest) GetName() []byte { if m != nil { return m.Name } return nil } // Not clear if we need to do status signaling; we can add fields later. type DeleteBranchResponse struct { } func (m *DeleteBranchResponse) Reset() { *m = DeleteBranchResponse{} } func (m *DeleteBranchResponse) String() string { return proto.CompactTextString(m) } func (*DeleteBranchResponse) ProtoMessage() {} func (*DeleteBranchResponse) Descriptor() ([]byte, []int) { return fileDescriptor8, []int{21} } type FindBranchRequest struct { Repository *Repository `protobuf:"bytes,1,opt,name=repository" json:"repository,omitempty"` // Name can be 'master' but also 'refs/heads/master' Name []byte `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` } func (m *FindBranchRequest) Reset() { *m = FindBranchRequest{} } func (m *FindBranchRequest) String() string { return proto.CompactTextString(m) } func (*FindBranchRequest) ProtoMessage() {} func (*FindBranchRequest) Descriptor() ([]byte, []int) { return fileDescriptor8, []int{22} } func (m *FindBranchRequest) GetRepository() *Repository { if m != nil { return m.Repository } return nil } func (m *FindBranchRequest) GetName() []byte { if m != nil { return m.Name } return nil } type FindBranchResponse struct { Branch *Branch `protobuf:"bytes,1,opt,name=branch" json:"branch,omitempty"` } func (m *FindBranchResponse) Reset() { *m = FindBranchResponse{} } func (m *FindBranchResponse) String() string { return proto.CompactTextString(m) } func (*FindBranchResponse) ProtoMessage() {} func (*FindBranchResponse) Descriptor() ([]byte, []int) { return fileDescriptor8, []int{23} } func (m *FindBranchResponse) GetBranch() *Branch { if m != nil { return m.Branch } return nil } type DeleteRefsRequest struct { Repository *Repository `protobuf:"bytes,1,opt,name=repository" json:"repository,omitempty"` ExceptWithPrefix [][]byte `protobuf:"bytes,2,rep,name=except_with_prefix,json=exceptWithPrefix,proto3" json:"except_with_prefix,omitempty"` } func (m *DeleteRefsRequest) Reset() { *m = DeleteRefsRequest{} } func (m *DeleteRefsRequest) String() string { return proto.CompactTextString(m) } func (*DeleteRefsRequest) ProtoMessage() {} func (*DeleteRefsRequest) Descriptor() ([]byte, []int) { return fileDescriptor8, []int{24} } func (m *DeleteRefsRequest) GetRepository() *Repository { if m != nil { return m.Repository } return nil } func (m *DeleteRefsRequest) GetExceptWithPrefix() [][]byte { if m != nil { return m.ExceptWithPrefix } return nil } type DeleteRefsResponse struct { } func (m *DeleteRefsResponse) Reset() { *m = DeleteRefsResponse{} } func (m *DeleteRefsResponse) String() string { return proto.CompactTextString(m) } func (*DeleteRefsResponse) ProtoMessage() {} func (*DeleteRefsResponse) Descriptor() ([]byte, []int) { return fileDescriptor8, []int{25} } type ListBranchNamesContainingCommitRequest struct { Repository *Repository `protobuf:"bytes,1,opt,name=repository" json:"repository,omitempty"` CommitId string `protobuf:"bytes,2,opt,name=commit_id,json=commitId" json:"commit_id,omitempty"` } func (m *ListBranchNamesContainingCommitRequest) Reset() { *m = ListBranchNamesContainingCommitRequest{} } func (m *ListBranchNamesContainingCommitRequest) String() string { return proto.CompactTextString(m) } func (*ListBranchNamesContainingCommitRequest) ProtoMessage() {} func (*ListBranchNamesContainingCommitRequest) Descriptor() ([]byte, []int) { return fileDescriptor8, []int{26} } func (m *ListBranchNamesContainingCommitRequest) GetRepository() *Repository { if m != nil { return m.Repository } return nil } func (m *ListBranchNamesContainingCommitRequest) GetCommitId() string { if m != nil { return m.CommitId } return "" } type ListBranchNamesContainingCommitResponse struct { BranchNames [][]byte `protobuf:"bytes,2,rep,name=branch_names,json=branchNames,proto3" json:"branch_names,omitempty"` } func (m *ListBranchNamesContainingCommitResponse) Reset() { *m = ListBranchNamesContainingCommitResponse{} } func (m *ListBranchNamesContainingCommitResponse) String() string { return proto.CompactTextString(m) } func (*ListBranchNamesContainingCommitResponse) ProtoMessage() {} func (*ListBranchNamesContainingCommitResponse) Descriptor() ([]byte, []int) { return fileDescriptor8, []int{27} } func (m *ListBranchNamesContainingCommitResponse) GetBranchNames() [][]byte { if m != nil { return m.BranchNames } return nil } type ListTagNamesContainingCommitRequest struct { Repository *Repository `protobuf:"bytes,1,opt,name=repository" json:"repository,omitempty"` CommitId string `protobuf:"bytes,2,opt,name=commit_id,json=commitId" json:"commit_id,omitempty"` } func (m *ListTagNamesContainingCommitRequest) Reset() { *m = ListTagNamesContainingCommitRequest{} } func (m *ListTagNamesContainingCommitRequest) String() string { return proto.CompactTextString(m) } func (*ListTagNamesContainingCommitRequest) ProtoMessage() {} func (*ListTagNamesContainingCommitRequest) Descriptor() ([]byte, []int) { return fileDescriptor8, []int{28} } func (m *ListTagNamesContainingCommitRequest) GetRepository() *Repository { if m != nil { return m.Repository } return nil } func (m *ListTagNamesContainingCommitRequest) GetCommitId() string { if m != nil { return m.CommitId } return "" } type ListTagNamesContainingCommitResponse struct { TagNames [][]byte `protobuf:"bytes,2,rep,name=tag_names,json=tagNames,proto3" json:"tag_names,omitempty"` } func (m *ListTagNamesContainingCommitResponse) Reset() { *m = ListTagNamesContainingCommitResponse{} } func (m *ListTagNamesContainingCommitResponse) String() string { return proto.CompactTextString(m) } func (*ListTagNamesContainingCommitResponse) ProtoMessage() {} func (*ListTagNamesContainingCommitResponse) Descriptor() ([]byte, []int) { return fileDescriptor8, []int{29} } func (m *ListTagNamesContainingCommitResponse) GetTagNames() [][]byte { if m != nil { return m.TagNames } return nil } func init() { proto.RegisterType((*FindDefaultBranchNameRequest)(nil), "gitaly.FindDefaultBranchNameRequest") proto.RegisterType((*FindDefaultBranchNameResponse)(nil), "gitaly.FindDefaultBranchNameResponse") proto.RegisterType((*FindAllBranchNamesRequest)(nil), "gitaly.FindAllBranchNamesRequest") proto.RegisterType((*FindAllBranchNamesResponse)(nil), "gitaly.FindAllBranchNamesResponse") proto.RegisterType((*FindAllTagNamesRequest)(nil), "gitaly.FindAllTagNamesRequest") proto.RegisterType((*FindAllTagNamesResponse)(nil), "gitaly.FindAllTagNamesResponse") proto.RegisterType((*FindRefNameRequest)(nil), "gitaly.FindRefNameRequest") proto.RegisterType((*FindRefNameResponse)(nil), "gitaly.FindRefNameResponse") proto.RegisterType((*FindLocalBranchesRequest)(nil), "gitaly.FindLocalBranchesRequest") proto.RegisterType((*FindLocalBranchesResponse)(nil), "gitaly.FindLocalBranchesResponse") proto.RegisterType((*FindLocalBranchResponse)(nil), "gitaly.FindLocalBranchResponse") proto.RegisterType((*FindLocalBranchCommitAuthor)(nil), "gitaly.FindLocalBranchCommitAuthor") proto.RegisterType((*FindAllBranchesRequest)(nil), "gitaly.FindAllBranchesRequest") proto.RegisterType((*FindAllBranchesResponse)(nil), "gitaly.FindAllBranchesResponse") proto.RegisterType((*FindAllBranchesResponse_Branch)(nil), "gitaly.FindAllBranchesResponse.Branch") proto.RegisterType((*FindAllTagsRequest)(nil), "gitaly.FindAllTagsRequest") proto.RegisterType((*FindAllTagsResponse)(nil), "gitaly.FindAllTagsResponse") proto.RegisterType((*RefExistsRequest)(nil), "gitaly.RefExistsRequest") proto.RegisterType((*RefExistsResponse)(nil), "gitaly.RefExistsResponse") proto.RegisterType((*CreateBranchRequest)(nil), "gitaly.CreateBranchRequest") proto.RegisterType((*CreateBranchResponse)(nil), "gitaly.CreateBranchResponse") proto.RegisterType((*DeleteBranchRequest)(nil), "gitaly.DeleteBranchRequest") proto.RegisterType((*DeleteBranchResponse)(nil), "gitaly.DeleteBranchResponse") proto.RegisterType((*FindBranchRequest)(nil), "gitaly.FindBranchRequest") proto.RegisterType((*FindBranchResponse)(nil), "gitaly.FindBranchResponse") proto.RegisterType((*DeleteRefsRequest)(nil), "gitaly.DeleteRefsRequest") proto.RegisterType((*DeleteRefsResponse)(nil), "gitaly.DeleteRefsResponse") proto.RegisterType((*ListBranchNamesContainingCommitRequest)(nil), "gitaly.ListBranchNamesContainingCommitRequest") proto.RegisterType((*ListBranchNamesContainingCommitResponse)(nil), "gitaly.ListBranchNamesContainingCommitResponse") proto.RegisterType((*ListTagNamesContainingCommitRequest)(nil), "gitaly.ListTagNamesContainingCommitRequest") proto.RegisterType((*ListTagNamesContainingCommitResponse)(nil), "gitaly.ListTagNamesContainingCommitResponse") proto.RegisterEnum("gitaly.FindLocalBranchesRequest_SortBy", FindLocalBranchesRequest_SortBy_name, FindLocalBranchesRequest_SortBy_value) proto.RegisterEnum("gitaly.CreateBranchResponse_Status", CreateBranchResponse_Status_name, CreateBranchResponse_Status_value) } // Reference imports to suppress errors if they are not otherwise used. var _ context.Context var _ grpc.ClientConn // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. const _ = grpc.SupportPackageIsVersion4 // Client API for RefService service type RefServiceClient interface { FindDefaultBranchName(ctx context.Context, in *FindDefaultBranchNameRequest, opts ...grpc.CallOption) (*FindDefaultBranchNameResponse, error) FindAllBranchNames(ctx context.Context, in *FindAllBranchNamesRequest, opts ...grpc.CallOption) (RefService_FindAllBranchNamesClient, error) FindAllTagNames(ctx context.Context, in *FindAllTagNamesRequest, opts ...grpc.CallOption) (RefService_FindAllTagNamesClient, error) // Find a Ref matching the given constraints. Response may be empty. FindRefName(ctx context.Context, in *FindRefNameRequest, opts ...grpc.CallOption) (*FindRefNameResponse, error) // Return a stream so we can divide the response in chunks of branches FindLocalBranches(ctx context.Context, in *FindLocalBranchesRequest, opts ...grpc.CallOption) (RefService_FindLocalBranchesClient, error) FindAllBranches(ctx context.Context, in *FindAllBranchesRequest, opts ...grpc.CallOption) (RefService_FindAllBranchesClient, error) FindAllTags(ctx context.Context, in *FindAllTagsRequest, opts ...grpc.CallOption) (RefService_FindAllTagsClient, error) RefExists(ctx context.Context, in *RefExistsRequest, opts ...grpc.CallOption) (*RefExistsResponse, error) CreateBranch(ctx context.Context, in *CreateBranchRequest, opts ...grpc.CallOption) (*CreateBranchResponse, error) DeleteBranch(ctx context.Context, in *DeleteBranchRequest, opts ...grpc.CallOption) (*DeleteBranchResponse, error) FindBranch(ctx context.Context, in *FindBranchRequest, opts ...grpc.CallOption) (*FindBranchResponse, error) DeleteRefs(ctx context.Context, in *DeleteRefsRequest, opts ...grpc.CallOption) (*DeleteRefsResponse, error) ListBranchNamesContainingCommit(ctx context.Context, in *ListBranchNamesContainingCommitRequest, opts ...grpc.CallOption) (*ListBranchNamesContainingCommitResponse, error) ListTagNamesContainingCommit(ctx context.Context, in *ListTagNamesContainingCommitRequest, opts ...grpc.CallOption) (*ListTagNamesContainingCommitResponse, error) } type refServiceClient struct { cc *grpc.ClientConn } func NewRefServiceClient(cc *grpc.ClientConn) RefServiceClient { return &refServiceClient{cc} } func (c *refServiceClient) FindDefaultBranchName(ctx context.Context, in *FindDefaultBranchNameRequest, opts ...grpc.CallOption) (*FindDefaultBranchNameResponse, error) { out := new(FindDefaultBranchNameResponse) err := grpc.Invoke(ctx, "/gitaly.RefService/FindDefaultBranchName", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } func (c *refServiceClient) FindAllBranchNames(ctx context.Context, in *FindAllBranchNamesRequest, opts ...grpc.CallOption) (RefService_FindAllBranchNamesClient, error) { stream, err := grpc.NewClientStream(ctx, &_RefService_serviceDesc.Streams[0], c.cc, "/gitaly.RefService/FindAllBranchNames", opts...) if err != nil { return nil, err } x := &refServiceFindAllBranchNamesClient{stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } if err := x.ClientStream.CloseSend(); err != nil { return nil, err } return x, nil } type RefService_FindAllBranchNamesClient interface { Recv() (*FindAllBranchNamesResponse, error) grpc.ClientStream } type refServiceFindAllBranchNamesClient struct { grpc.ClientStream } func (x *refServiceFindAllBranchNamesClient) Recv() (*FindAllBranchNamesResponse, error) { m := new(FindAllBranchNamesResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func (c *refServiceClient) FindAllTagNames(ctx context.Context, in *FindAllTagNamesRequest, opts ...grpc.CallOption) (RefService_FindAllTagNamesClient, error) { stream, err := grpc.NewClientStream(ctx, &_RefService_serviceDesc.Streams[1], c.cc, "/gitaly.RefService/FindAllTagNames", opts...) if err != nil { return nil, err } x := &refServiceFindAllTagNamesClient{stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } if err := x.ClientStream.CloseSend(); err != nil { return nil, err } return x, nil } type RefService_FindAllTagNamesClient interface { Recv() (*FindAllTagNamesResponse, error) grpc.ClientStream } type refServiceFindAllTagNamesClient struct { grpc.ClientStream } func (x *refServiceFindAllTagNamesClient) Recv() (*FindAllTagNamesResponse, error) { m := new(FindAllTagNamesResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func (c *refServiceClient) FindRefName(ctx context.Context, in *FindRefNameRequest, opts ...grpc.CallOption) (*FindRefNameResponse, error) { out := new(FindRefNameResponse) err := grpc.Invoke(ctx, "/gitaly.RefService/FindRefName", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } func (c *refServiceClient) FindLocalBranches(ctx context.Context, in *FindLocalBranchesRequest, opts ...grpc.CallOption) (RefService_FindLocalBranchesClient, error) { stream, err := grpc.NewClientStream(ctx, &_RefService_serviceDesc.Streams[2], c.cc, "/gitaly.RefService/FindLocalBranches", opts...) if err != nil { return nil, err } x := &refServiceFindLocalBranchesClient{stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } if err := x.ClientStream.CloseSend(); err != nil { return nil, err } return x, nil } type RefService_FindLocalBranchesClient interface { Recv() (*FindLocalBranchesResponse, error) grpc.ClientStream } type refServiceFindLocalBranchesClient struct { grpc.ClientStream } func (x *refServiceFindLocalBranchesClient) Recv() (*FindLocalBranchesResponse, error) { m := new(FindLocalBranchesResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func (c *refServiceClient) FindAllBranches(ctx context.Context, in *FindAllBranchesRequest, opts ...grpc.CallOption) (RefService_FindAllBranchesClient, error) { stream, err := grpc.NewClientStream(ctx, &_RefService_serviceDesc.Streams[3], c.cc, "/gitaly.RefService/FindAllBranches", opts...) if err != nil { return nil, err } x := &refServiceFindAllBranchesClient{stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } if err := x.ClientStream.CloseSend(); err != nil { return nil, err } return x, nil } type RefService_FindAllBranchesClient interface { Recv() (*FindAllBranchesResponse, error) grpc.ClientStream } type refServiceFindAllBranchesClient struct { grpc.ClientStream } func (x *refServiceFindAllBranchesClient) Recv() (*FindAllBranchesResponse, error) { m := new(FindAllBranchesResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func (c *refServiceClient) FindAllTags(ctx context.Context, in *FindAllTagsRequest, opts ...grpc.CallOption) (RefService_FindAllTagsClient, error) { stream, err := grpc.NewClientStream(ctx, &_RefService_serviceDesc.Streams[4], c.cc, "/gitaly.RefService/FindAllTags", opts...) if err != nil { return nil, err } x := &refServiceFindAllTagsClient{stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } if err := x.ClientStream.CloseSend(); err != nil { return nil, err } return x, nil } type RefService_FindAllTagsClient interface { Recv() (*FindAllTagsResponse, error) grpc.ClientStream } type refServiceFindAllTagsClient struct { grpc.ClientStream } func (x *refServiceFindAllTagsClient) Recv() (*FindAllTagsResponse, error) { m := new(FindAllTagsResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func (c *refServiceClient) RefExists(ctx context.Context, in *RefExistsRequest, opts ...grpc.CallOption) (*RefExistsResponse, error) { out := new(RefExistsResponse) err := grpc.Invoke(ctx, "/gitaly.RefService/RefExists", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } func (c *refServiceClient) CreateBranch(ctx context.Context, in *CreateBranchRequest, opts ...grpc.CallOption) (*CreateBranchResponse, error) { out := new(CreateBranchResponse) err := grpc.Invoke(ctx, "/gitaly.RefService/CreateBranch", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } func (c *refServiceClient) DeleteBranch(ctx context.Context, in *DeleteBranchRequest, opts ...grpc.CallOption) (*DeleteBranchResponse, error) { out := new(DeleteBranchResponse) err := grpc.Invoke(ctx, "/gitaly.RefService/DeleteBranch", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } func (c *refServiceClient) FindBranch(ctx context.Context, in *FindBranchRequest, opts ...grpc.CallOption) (*FindBranchResponse, error) { out := new(FindBranchResponse) err := grpc.Invoke(ctx, "/gitaly.RefService/FindBranch", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } func (c *refServiceClient) DeleteRefs(ctx context.Context, in *DeleteRefsRequest, opts ...grpc.CallOption) (*DeleteRefsResponse, error) { out := new(DeleteRefsResponse) err := grpc.Invoke(ctx, "/gitaly.RefService/DeleteRefs", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } func (c *refServiceClient) ListBranchNamesContainingCommit(ctx context.Context, in *ListBranchNamesContainingCommitRequest, opts ...grpc.CallOption) (*ListBranchNamesContainingCommitResponse, error) { out := new(ListBranchNamesContainingCommitResponse) err := grpc.Invoke(ctx, "/gitaly.RefService/ListBranchNamesContainingCommit", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } func (c *refServiceClient) ListTagNamesContainingCommit(ctx context.Context, in *ListTagNamesContainingCommitRequest, opts ...grpc.CallOption) (*ListTagNamesContainingCommitResponse, error) { out := new(ListTagNamesContainingCommitResponse) err := grpc.Invoke(ctx, "/gitaly.RefService/ListTagNamesContainingCommit", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } // Server API for RefService service type RefServiceServer interface { FindDefaultBranchName(context.Context, *FindDefaultBranchNameRequest) (*FindDefaultBranchNameResponse, error) FindAllBranchNames(*FindAllBranchNamesRequest, RefService_FindAllBranchNamesServer) error FindAllTagNames(*FindAllTagNamesRequest, RefService_FindAllTagNamesServer) error // Find a Ref matching the given constraints. Response may be empty. FindRefName(context.Context, *FindRefNameRequest) (*FindRefNameResponse, error) // Return a stream so we can divide the response in chunks of branches FindLocalBranches(*FindLocalBranchesRequest, RefService_FindLocalBranchesServer) error FindAllBranches(*FindAllBranchesRequest, RefService_FindAllBranchesServer) error FindAllTags(*FindAllTagsRequest, RefService_FindAllTagsServer) error RefExists(context.Context, *RefExistsRequest) (*RefExistsResponse, error) CreateBranch(context.Context, *CreateBranchRequest) (*CreateBranchResponse, error) DeleteBranch(context.Context, *DeleteBranchRequest) (*DeleteBranchResponse, error) FindBranch(context.Context, *FindBranchRequest) (*FindBranchResponse, error) DeleteRefs(context.Context, *DeleteRefsRequest) (*DeleteRefsResponse, error) ListBranchNamesContainingCommit(context.Context, *ListBranchNamesContainingCommitRequest) (*ListBranchNamesContainingCommitResponse, error) ListTagNamesContainingCommit(context.Context, *ListTagNamesContainingCommitRequest) (*ListTagNamesContainingCommitResponse, error) } func RegisterRefServiceServer(s *grpc.Server, srv RefServiceServer) { s.RegisterService(&_RefService_serviceDesc, srv) } func _RefService_FindDefaultBranchName_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(FindDefaultBranchNameRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(RefServiceServer).FindDefaultBranchName(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/gitaly.RefService/FindDefaultBranchName", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(RefServiceServer).FindDefaultBranchName(ctx, req.(*FindDefaultBranchNameRequest)) } return interceptor(ctx, in, info, handler) } func _RefService_FindAllBranchNames_Handler(srv interface{}, stream grpc.ServerStream) error { m := new(FindAllBranchNamesRequest) if err := stream.RecvMsg(m); err != nil { return err } return srv.(RefServiceServer).FindAllBranchNames(m, &refServiceFindAllBranchNamesServer{stream}) } type RefService_FindAllBranchNamesServer interface { Send(*FindAllBranchNamesResponse) error grpc.ServerStream } type refServiceFindAllBranchNamesServer struct { grpc.ServerStream } func (x *refServiceFindAllBranchNamesServer) Send(m *FindAllBranchNamesResponse) error { return x.ServerStream.SendMsg(m) } func _RefService_FindAllTagNames_Handler(srv interface{}, stream grpc.ServerStream) error { m := new(FindAllTagNamesRequest) if err := stream.RecvMsg(m); err != nil { return err } return srv.(RefServiceServer).FindAllTagNames(m, &refServiceFindAllTagNamesServer{stream}) } type RefService_FindAllTagNamesServer interface { Send(*FindAllTagNamesResponse) error grpc.ServerStream } type refServiceFindAllTagNamesServer struct { grpc.ServerStream } func (x *refServiceFindAllTagNamesServer) Send(m *FindAllTagNamesResponse) error { return x.ServerStream.SendMsg(m) } func _RefService_FindRefName_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(FindRefNameRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(RefServiceServer).FindRefName(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/gitaly.RefService/FindRefName", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(RefServiceServer).FindRefName(ctx, req.(*FindRefNameRequest)) } return interceptor(ctx, in, info, handler) } func _RefService_FindLocalBranches_Handler(srv interface{}, stream grpc.ServerStream) error { m := new(FindLocalBranchesRequest) if err := stream.RecvMsg(m); err != nil { return err } return srv.(RefServiceServer).FindLocalBranches(m, &refServiceFindLocalBranchesServer{stream}) } type RefService_FindLocalBranchesServer interface { Send(*FindLocalBranchesResponse) error grpc.ServerStream } type refServiceFindLocalBranchesServer struct { grpc.ServerStream } func (x *refServiceFindLocalBranchesServer) Send(m *FindLocalBranchesResponse) error { return x.ServerStream.SendMsg(m) } func _RefService_FindAllBranches_Handler(srv interface{}, stream grpc.ServerStream) error { m := new(FindAllBranchesRequest) if err := stream.RecvMsg(m); err != nil { return err } return srv.(RefServiceServer).FindAllBranches(m, &refServiceFindAllBranchesServer{stream}) } type RefService_FindAllBranchesServer interface { Send(*FindAllBranchesResponse) error grpc.ServerStream } type refServiceFindAllBranchesServer struct { grpc.ServerStream } func (x *refServiceFindAllBranchesServer) Send(m *FindAllBranchesResponse) error { return x.ServerStream.SendMsg(m) } func _RefService_FindAllTags_Handler(srv interface{}, stream grpc.ServerStream) error { m := new(FindAllTagsRequest) if err := stream.RecvMsg(m); err != nil { return err } return srv.(RefServiceServer).FindAllTags(m, &refServiceFindAllTagsServer{stream}) } type RefService_FindAllTagsServer interface { Send(*FindAllTagsResponse) error grpc.ServerStream } type refServiceFindAllTagsServer struct { grpc.ServerStream } func (x *refServiceFindAllTagsServer) Send(m *FindAllTagsResponse) error { return x.ServerStream.SendMsg(m) } func _RefService_RefExists_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(RefExistsRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(RefServiceServer).RefExists(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/gitaly.RefService/RefExists", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(RefServiceServer).RefExists(ctx, req.(*RefExistsRequest)) } return interceptor(ctx, in, info, handler) } func _RefService_CreateBranch_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(CreateBranchRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(RefServiceServer).CreateBranch(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/gitaly.RefService/CreateBranch", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(RefServiceServer).CreateBranch(ctx, req.(*CreateBranchRequest)) } return interceptor(ctx, in, info, handler) } func _RefService_DeleteBranch_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(DeleteBranchRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(RefServiceServer).DeleteBranch(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/gitaly.RefService/DeleteBranch", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(RefServiceServer).DeleteBranch(ctx, req.(*DeleteBranchRequest)) } return interceptor(ctx, in, info, handler) } func _RefService_FindBranch_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(FindBranchRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(RefServiceServer).FindBranch(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/gitaly.RefService/FindBranch", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(RefServiceServer).FindBranch(ctx, req.(*FindBranchRequest)) } return interceptor(ctx, in, info, handler) } func _RefService_DeleteRefs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(DeleteRefsRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(RefServiceServer).DeleteRefs(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/gitaly.RefService/DeleteRefs", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(RefServiceServer).DeleteRefs(ctx, req.(*DeleteRefsRequest)) } return interceptor(ctx, in, info, handler) } func _RefService_ListBranchNamesContainingCommit_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(ListBranchNamesContainingCommitRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(RefServiceServer).ListBranchNamesContainingCommit(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/gitaly.RefService/ListBranchNamesContainingCommit", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(RefServiceServer).ListBranchNamesContainingCommit(ctx, req.(*ListBranchNamesContainingCommitRequest)) } return interceptor(ctx, in, info, handler) } func _RefService_ListTagNamesContainingCommit_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(ListTagNamesContainingCommitRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(RefServiceServer).ListTagNamesContainingCommit(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/gitaly.RefService/ListTagNamesContainingCommit", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(RefServiceServer).ListTagNamesContainingCommit(ctx, req.(*ListTagNamesContainingCommitRequest)) } return interceptor(ctx, in, info, handler) } var _RefService_serviceDesc = grpc.ServiceDesc{ ServiceName: "gitaly.RefService", HandlerType: (*RefServiceServer)(nil), Methods: []grpc.MethodDesc{ { MethodName: "FindDefaultBranchName", Handler: _RefService_FindDefaultBranchName_Handler, }, { MethodName: "FindRefName", Handler: _RefService_FindRefName_Handler, }, { MethodName: "RefExists", Handler: _RefService_RefExists_Handler, }, { MethodName: "CreateBranch", Handler: _RefService_CreateBranch_Handler, }, { MethodName: "DeleteBranch", Handler: _RefService_DeleteBranch_Handler, }, { MethodName: "FindBranch", Handler: _RefService_FindBranch_Handler, }, { MethodName: "DeleteRefs", Handler: _RefService_DeleteRefs_Handler, }, { MethodName: "ListBranchNamesContainingCommit", Handler: _RefService_ListBranchNamesContainingCommit_Handler, }, { MethodName: "ListTagNamesContainingCommit", Handler: _RefService_ListTagNamesContainingCommit_Handler, }, }, Streams: []grpc.StreamDesc{ { StreamName: "FindAllBranchNames", Handler: _RefService_FindAllBranchNames_Handler, ServerStreams: true, }, { StreamName: "FindAllTagNames", Handler: _RefService_FindAllTagNames_Handler, ServerStreams: true, }, { StreamName: "FindLocalBranches", Handler: _RefService_FindLocalBranches_Handler, ServerStreams: true, }, { StreamName: "FindAllBranches", Handler: _RefService_FindAllBranches_Handler, ServerStreams: true, }, { StreamName: "FindAllTags", Handler: _RefService_FindAllTags_Handler, ServerStreams: true, }, }, Metadata: "ref.proto", } func init() { proto.RegisterFile("ref.proto", fileDescriptor8) } var fileDescriptor8 = []byte{ // 1218 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x56, 0xdd, 0x72, 0x22, 0x45, 0x14, 0xce, 0x10, 0x16, 0xe1, 0x80, 0x64, 0xd2, 0x89, 0x59, 0x32, 0xc4, 0x25, 0xe9, 0xdd, 0xcd, 0x4f, 0xb9, 0x45, 0x2c, 0xb6, 0xf4, 0x46, 0x2f, 0x24, 0x80, 0x1b, 0x76, 0x23, 0x49, 0x35, 0xb8, 0xa6, 0x4a, 0xad, 0xa9, 0x01, 0x1a, 0x32, 0x16, 0x30, 0x38, 0xd3, 0x64, 0x43, 0x59, 0x7a, 0xe5, 0x9d, 0xf7, 0x3e, 0x82, 0xaf, 0xe2, 0x85, 0x4f, 0xe2, 0x5b, 0x58, 0x74, 0xf7, 0xfc, 0x40, 0x06, 0x92, 0x12, 0xe3, 0x15, 0xcc, 0xe9, 0x73, 0xbe, 0xd3, 0xe7, 0xa7, 0xbf, 0x73, 0x20, 0x61, 0xd3, 0x4e, 0x7e, 0x68, 0x5b, 0xcc, 0x42, 0xb1, 0xae, 0xc9, 0x8c, 0xde, 0x58, 0x4b, 0x39, 0x57, 0x86, 0x4d, 0xdb, 0x42, 0xaa, 0xe5, 0xba, 0x96, 0xd5, 0xed, 0xd1, 0x63, 0xfe, 0xd5, 0x1c, 0x75, 0x8e, 0x99, 0xd9, 0xa7, 0x0e, 0x33, 0xfa, 0x43, 0xa1, 0x80, 0x09, 0xec, 0x7c, 0x69, 0x0e, 0xda, 0x65, 0xda, 0x31, 0x46, 0x3d, 0x76, 0x62, 0x1b, 0x83, 0xd6, 0x55, 0xcd, 0xe8, 0x53, 0x42, 0x7f, 0x1c, 0x51, 0x87, 0xa1, 0x02, 0x80, 0x4d, 0x87, 0x96, 0x63, 0x32, 0xcb, 0x1e, 0x67, 0x94, 0x5d, 0xe5, 0x30, 0x59, 0x40, 0x79, 0xe1, 0x2b, 0x4f, 0xbc, 0x13, 0x12, 0xd0, 0xc2, 0x2f, 0xe1, 0xc3, 0x39, 0x98, 0xce, 0xd0, 0x1a, 0x38, 0x14, 0x21, 0x88, 0x0e, 0x8c, 0x3e, 0xe5, 0x70, 0x29, 0xc2, 0xff, 0xe3, 0x73, 0xd8, 0x9e, 0x18, 0x15, 0x7b, 0x3d, 0xdf, 0xc0, 0x59, 0xe6, 0x16, 0x05, 0xd0, 0xc2, 0x00, 0xe5, 0x15, 0x36, 0xe1, 0xd1, 0xc4, 0xad, 0x93, 0x51, 0x76, 0x57, 0x0f, 0x53, 0x44, 0x7c, 0xe0, 0x33, 0xd8, 0x92, 0x36, 0x0d, 0xa3, 0xbb, 0xf4, 0x0d, 0x8e, 0xe1, 0xf1, 0x2d, 0xb4, 0x85, 0xee, 0x7f, 0x06, 0x34, 0x31, 0x20, 0xb4, 0xb3, 0x64, 0x09, 0x50, 0x16, 0x12, 0x2d, 0xab, 0xdf, 0x37, 0x99, 0x6e, 0xb6, 0x33, 0x91, 0x5d, 0xe5, 0x30, 0x41, 0xe2, 0x42, 0x50, 0x6d, 0xa3, 0x2d, 0x88, 0x0d, 0x6d, 0xda, 0x31, 0x6f, 0x32, 0xab, 0xbc, 0x00, 0xf2, 0x0b, 0x1f, 0xc1, 0xc6, 0x94, 0xfb, 0x05, 0xd5, 0xfa, 0x53, 0x81, 0xcc, 0x44, 0xf7, 0xcc, 0x6a, 0x19, 0x32, 0xbf, 0x4b, 0xe5, 0x0a, 0x7d, 0x01, 0xef, 0x39, 0x96, 0xcd, 0xf4, 0xe6, 0x98, 0x5f, 0x37, 0x5d, 0x38, 0x70, 0x0d, 0xe6, 0xb9, 0xc9, 0xd7, 0x2d, 0x9b, 0x9d, 0x8c, 0x49, 0xcc, 0xe1, 0xbf, 0xf8, 0x13, 0x88, 0x09, 0x09, 0x8a, 0x43, 0xb4, 0x56, 0xfc, 0xaa, 0xa2, 0xae, 0xa0, 0x35, 0x48, 0x7e, 0x7d, 0x51, 0x2e, 0x36, 0x2a, 0x65, 0xbd, 0x58, 0x2f, 0xa9, 0x0a, 0x52, 0x21, 0xe5, 0x0a, 0xca, 0x95, 0x7a, 0x49, 0x8d, 0xe0, 0x4b, 0xd1, 0x77, 0x33, 0x1e, 0x64, 0xe8, 0x9f, 0x41, 0xbc, 0x29, 0x65, 0xbc, 0x52, 0xc9, 0x42, 0x6e, 0xce, 0xb5, 0x5c, 0x13, 0xe2, 0x19, 0xe0, 0xdf, 0x22, 0xa2, 0xfe, 0x21, 0x5a, 0x61, 0x39, 0x5d, 0x5c, 0xb3, 0xe7, 0x90, 0x96, 0x87, 0xce, 0xa8, 0xf9, 0x03, 0x6d, 0x31, 0x59, 0xbb, 0xf7, 0x85, 0xb4, 0x2e, 0x84, 0xe8, 0x14, 0xa4, 0x40, 0x37, 0x46, 0xec, 0xca, 0xb2, 0x33, 0x51, 0x9e, 0xfd, 0xa7, 0x73, 0x6e, 0x5d, 0xe2, 0xba, 0x45, 0xae, 0x4a, 0x52, 0xad, 0xc0, 0x17, 0xaa, 0x81, 0x2a, 0x91, 0xc4, 0x0f, 0xa3, 0x76, 0xe6, 0xd1, 0xfd, 0xc1, 0xd6, 0x84, 0x55, 0xc9, 0xb5, 0xc5, 0xef, 0x20, 0xbb, 0x40, 0x3f, 0x34, 0x21, 0x9b, 0xf0, 0x88, 0xf6, 0x0d, 0xb3, 0xc7, 0x93, 0x91, 0x22, 0xe2, 0x03, 0xe5, 0x21, 0xda, 0x36, 0x18, 0xe5, 0xf1, 0x27, 0x0b, 0x5a, 0x5e, 0x30, 0x5c, 0xde, 0x65, 0xb8, 0x7c, 0xc3, 0x65, 0x38, 0xc2, 0xf5, 0xf0, 0xef, 0x8a, 0xf7, 0xa8, 0xff, 0x8b, 0x46, 0xcd, 0x41, 0xb2, 0x4f, 0xed, 0x2e, 0x6d, 0xeb, 0xd6, 0xa0, 0x27, 0x9a, 0x35, 0x4e, 0x40, 0x88, 0xce, 0x07, 0xbd, 0x31, 0x3a, 0x80, 0x35, 0xa9, 0xe0, 0xb5, 0xce, 0x2a, 0x7f, 0xe4, 0x69, 0x21, 0x76, 0x2f, 0x81, 0xff, 0x50, 0x3c, 0x7e, 0xb8, 0xd5, 0x78, 0x27, 0xb7, 0x1a, 0x6f, 0x3f, 0x98, 0xf5, 0x10, 0x93, 0xbc, 0xec, 0x30, 0xcf, 0x4e, 0x7b, 0x05, 0x31, 0x21, 0x0b, 0x4d, 0xee, 0x11, 0xc4, 0x98, 0x61, 0x77, 0x29, 0xe3, 0x21, 0x24, 0x0b, 0xeb, 0x2e, 0xfe, 0x2b, 0xb7, 0x6a, 0x44, 0x2a, 0xe0, 0x53, 0x41, 0x4b, 0x82, 0xc7, 0x96, 0x62, 0xc4, 0x4f, 0x05, 0xc3, 0x78, 0x48, 0x32, 0xda, 0x1c, 0x44, 0x99, 0xd1, 0x75, 0x23, 0x4d, 0xba, 0x20, 0x0d, 0xa3, 0x4b, 0xf8, 0x01, 0xbe, 0x04, 0x95, 0xd0, 0x4e, 0xe5, 0xc6, 0x74, 0xd8, 0x52, 0xc5, 0x53, 0x61, 0xd5, 0xa6, 0x1d, 0xd9, 0x4f, 0x93, 0xbf, 0xf8, 0x08, 0xd6, 0x03, 0xc8, 0x3e, 0x3b, 0x5f, 0x1b, 0xbd, 0x91, 0x48, 0x58, 0x9c, 0x88, 0x0f, 0xfc, 0x0b, 0x6c, 0x94, 0x6c, 0x6a, 0x30, 0xea, 0xbe, 0xe5, 0x7f, 0x7f, 0x0f, 0xb7, 0x20, 0x91, 0x40, 0x41, 0x72, 0x90, 0x74, 0x98, 0x61, 0x33, 0x7d, 0x68, 0x99, 0x03, 0xf7, 0x79, 0x03, 0x17, 0x5d, 0x4c, 0x24, 0xf8, 0x2f, 0x05, 0x36, 0xa7, 0x2f, 0xe0, 0xb1, 0x54, 0xcc, 0x61, 0x06, 0x1b, 0x39, 0xdc, 0x7b, 0xda, 0x7f, 0xa0, 0x61, 0xda, 0xf9, 0x3a, 0x57, 0x25, 0xd2, 0x04, 0xed, 0x43, 0x4c, 0x74, 0x8c, 0xec, 0x83, 0xb4, 0x6b, 0x2c, 0xcd, 0xe4, 0x29, 0xae, 0x41, 0x4c, 0x58, 0xa2, 0x18, 0x44, 0xce, 0xdf, 0xa8, 0x2b, 0x28, 0x0d, 0x50, 0x21, 0x44, 0xaf, 0x5c, 0x56, 0xeb, 0x8d, 0xba, 0xaa, 0x4c, 0xc8, 0x76, 0xf2, 0x5d, 0xad, 0xbd, 0x2d, 0x9e, 0x55, 0xcb, 0x6a, 0x04, 0x65, 0xe1, 0x71, 0x40, 0xa0, 0xd7, 0x1b, 0x45, 0xd2, 0xd0, 0x2f, 0xce, 0xab, 0xb5, 0x86, 0xba, 0x8a, 0xbf, 0x87, 0x8d, 0x32, 0xed, 0xd1, 0x07, 0xca, 0x26, 0xde, 0x82, 0xcd, 0x69, 0x78, 0x11, 0x3d, 0xfe, 0x16, 0xd6, 0x27, 0x1d, 0xf8, 0x30, 0x4e, 0x3f, 0x17, 0x0f, 0x65, 0xa6, 0x3c, 0x7e, 0x86, 0x95, 0x85, 0x19, 0x1e, 0xc1, 0xba, 0xb8, 0x32, 0xa1, 0x9d, 0xa5, 0xba, 0xfc, 0x05, 0x20, 0x7a, 0xd3, 0xa2, 0x43, 0xa6, 0xbf, 0x33, 0xd9, 0x95, 0x2e, 0x67, 0x7d, 0x84, 0x93, 0x90, 0x2a, 0x4e, 0xbe, 0x31, 0xd9, 0xd5, 0x85, 0x98, 0xfa, 0x9b, 0x80, 0x82, 0x6e, 0x65, 0x9e, 0xc6, 0xb0, 0x7f, 0x66, 0x3a, 0x81, 0xe5, 0xcd, 0x29, 0x59, 0x03, 0x66, 0x98, 0x03, 0x73, 0xd0, 0x95, 0xf4, 0xf0, 0x40, 0xeb, 0x09, 0x26, 0x70, 0x70, 0xa7, 0x6b, 0x99, 0xda, 0x3d, 0x48, 0x89, 0xe4, 0xe9, 0x62, 0x9b, 0x12, 0x31, 0x26, 0x9b, 0xbe, 0xe9, 0xeb, 0x68, 0x5c, 0x51, 0x23, 0xf8, 0x1a, 0x9e, 0x4e, 0x30, 0xdd, 0x3d, 0xec, 0x7f, 0x8b, 0xa5, 0x0a, 0xcf, 0x16, 0xfb, 0x95, 0x81, 0x64, 0x21, 0xc1, 0x8c, 0xee, 0x54, 0x14, 0x71, 0x26, 0x8d, 0x44, 0x08, 0x85, 0xbf, 0x13, 0x00, 0x84, 0x76, 0xea, 0xd4, 0xbe, 0x36, 0x5b, 0x14, 0x75, 0xe0, 0x83, 0xd0, 0x25, 0x1b, 0x3d, 0x0b, 0x0e, 0x8a, 0x79, 0x7b, 0xbd, 0xf6, 0xfc, 0x0e, 0x2d, 0xd9, 0x06, 0x2b, 0x48, 0xf7, 0xc8, 0x3f, 0x50, 0x10, 0xb4, 0x17, 0x3a, 0x8d, 0x82, 0x1b, 0xb3, 0x86, 0x17, 0xa9, 0xb8, 0xf0, 0x1f, 0x2b, 0xe8, 0x2d, 0xac, 0xcd, 0x6c, 0xc9, 0xe8, 0xc9, 0x8c, 0xe9, 0xcc, 0x32, 0xae, 0xe5, 0xe6, 0x9e, 0x07, 0x70, 0x4f, 0x21, 0x19, 0xd8, 0x66, 0x91, 0x16, 0xb4, 0x99, 0xde, 0xb0, 0xb5, 0x6c, 0xe8, 0x99, 0x97, 0x82, 0xef, 0x04, 0x67, 0x4c, 0xad, 0x88, 0x68, 0xf7, 0xae, 0xfd, 0x54, 0xdb, 0x5b, 0xa0, 0x11, 0x1a, 0xbf, 0x87, 0xfd, 0x64, 0xee, 0xac, 0x0f, 0x8f, 0x3f, 0x14, 0xf7, 0xb5, 0x88, 0x5f, 0xce, 0xda, 0xe9, 0xf8, 0xa7, 0x47, 0xf9, 0x74, 0xfc, 0x33, 0xc3, 0x99, 0x63, 0x9d, 0x40, 0xc2, 0x9b, 0x92, 0x28, 0xe3, 0x3f, 0x88, 0xe9, 0x91, 0xac, 0x6d, 0x87, 0x9c, 0x78, 0x59, 0x7c, 0x03, 0xa9, 0xe0, 0x3c, 0x42, 0xd9, 0xf0, 0x29, 0x25, 0x90, 0x76, 0x16, 0x8d, 0x30, 0x01, 0x16, 0xa4, 0x77, 0x1f, 0x2c, 0x64, 0xa6, 0xf8, 0x60, 0xa1, 0x13, 0x61, 0x05, 0x55, 0x00, 0x7c, 0xda, 0x46, 0xdb, 0xc1, 0x64, 0x4c, 0x03, 0x69, 0x61, 0x47, 0x41, 0x18, 0x9f, 0x48, 0x7d, 0x98, 0x5b, 0x9c, 0xee, 0xc3, 0x84, 0xf0, 0xee, 0x0a, 0xfa, 0x55, 0x81, 0xdc, 0x1d, 0xfc, 0x87, 0xf2, 0x2e, 0xc2, 0xfd, 0x38, 0x5a, 0x3b, 0xbe, 0xb7, 0xbe, 0x77, 0x8d, 0x9f, 0x60, 0x67, 0x11, 0x73, 0xa1, 0x8f, 0x82, 0x90, 0x77, 0xf0, 0xaa, 0xf6, 0xe2, 0x7e, 0xca, 0xae, 0xf3, 0x66, 0x8c, 0x6f, 0xf3, 0x2f, 0xff, 0x09, 0x00, 0x00, 0xff, 0xff, 0x58, 0x18, 0xe8, 0x58, 0xe0, 0x10, 0x00, 0x00, } remote.pb.go000066400000000000000000000410311324746544700351700ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/gitlab.com/gitlab-org/gitaly-proto/go// Code generated by protoc-gen-go. DO NOT EDIT. // source: remote.proto package gitaly import proto "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" import ( context "golang.org/x/net/context" grpc "google.golang.org/grpc" ) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf type AddRemoteRequest struct { Repository *Repository `protobuf:"bytes,1,opt,name=repository" json:"repository,omitempty"` Name string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"` Url string `protobuf:"bytes,3,opt,name=url" json:"url,omitempty"` // If any, the remote is configured as a mirror with those mappings MirrorRefmaps []string `protobuf:"bytes,5,rep,name=mirror_refmaps,json=mirrorRefmaps" json:"mirror_refmaps,omitempty"` } func (m *AddRemoteRequest) Reset() { *m = AddRemoteRequest{} } func (m *AddRemoteRequest) String() string { return proto.CompactTextString(m) } func (*AddRemoteRequest) ProtoMessage() {} func (*AddRemoteRequest) Descriptor() ([]byte, []int) { return fileDescriptor9, []int{0} } func (m *AddRemoteRequest) GetRepository() *Repository { if m != nil { return m.Repository } return nil } func (m *AddRemoteRequest) GetName() string { if m != nil { return m.Name } return "" } func (m *AddRemoteRequest) GetUrl() string { if m != nil { return m.Url } return "" } func (m *AddRemoteRequest) GetMirrorRefmaps() []string { if m != nil { return m.MirrorRefmaps } return nil } type AddRemoteResponse struct { } func (m *AddRemoteResponse) Reset() { *m = AddRemoteResponse{} } func (m *AddRemoteResponse) String() string { return proto.CompactTextString(m) } func (*AddRemoteResponse) ProtoMessage() {} func (*AddRemoteResponse) Descriptor() ([]byte, []int) { return fileDescriptor9, []int{1} } type RemoveRemoteRequest struct { Repository *Repository `protobuf:"bytes,1,opt,name=repository" json:"repository,omitempty"` Name string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"` } func (m *RemoveRemoteRequest) Reset() { *m = RemoveRemoteRequest{} } func (m *RemoveRemoteRequest) String() string { return proto.CompactTextString(m) } func (*RemoveRemoteRequest) ProtoMessage() {} func (*RemoveRemoteRequest) Descriptor() ([]byte, []int) { return fileDescriptor9, []int{2} } func (m *RemoveRemoteRequest) GetRepository() *Repository { if m != nil { return m.Repository } return nil } func (m *RemoveRemoteRequest) GetName() string { if m != nil { return m.Name } return "" } type RemoveRemoteResponse struct { Result bool `protobuf:"varint,1,opt,name=result" json:"result,omitempty"` } func (m *RemoveRemoteResponse) Reset() { *m = RemoveRemoteResponse{} } func (m *RemoveRemoteResponse) String() string { return proto.CompactTextString(m) } func (*RemoveRemoteResponse) ProtoMessage() {} func (*RemoveRemoteResponse) Descriptor() ([]byte, []int) { return fileDescriptor9, []int{3} } func (m *RemoveRemoteResponse) GetResult() bool { if m != nil { return m.Result } return false } type FetchInternalRemoteRequest struct { Repository *Repository `protobuf:"bytes,1,opt,name=repository" json:"repository,omitempty"` RemoteRepository *Repository `protobuf:"bytes,2,opt,name=remote_repository,json=remoteRepository" json:"remote_repository,omitempty"` } func (m *FetchInternalRemoteRequest) Reset() { *m = FetchInternalRemoteRequest{} } func (m *FetchInternalRemoteRequest) String() string { return proto.CompactTextString(m) } func (*FetchInternalRemoteRequest) ProtoMessage() {} func (*FetchInternalRemoteRequest) Descriptor() ([]byte, []int) { return fileDescriptor9, []int{4} } func (m *FetchInternalRemoteRequest) GetRepository() *Repository { if m != nil { return m.Repository } return nil } func (m *FetchInternalRemoteRequest) GetRemoteRepository() *Repository { if m != nil { return m.RemoteRepository } return nil } type FetchInternalRemoteResponse struct { Result bool `protobuf:"varint,1,opt,name=result" json:"result,omitempty"` } func (m *FetchInternalRemoteResponse) Reset() { *m = FetchInternalRemoteResponse{} } func (m *FetchInternalRemoteResponse) String() string { return proto.CompactTextString(m) } func (*FetchInternalRemoteResponse) ProtoMessage() {} func (*FetchInternalRemoteResponse) Descriptor() ([]byte, []int) { return fileDescriptor9, []int{5} } func (m *FetchInternalRemoteResponse) GetResult() bool { if m != nil { return m.Result } return false } type UpdateRemoteMirrorRequest struct { Repository *Repository `protobuf:"bytes,1,opt,name=repository" json:"repository,omitempty"` RefName string `protobuf:"bytes,2,opt,name=ref_name,json=refName" json:"ref_name,omitempty"` OnlyBranchesMatching [][]byte `protobuf:"bytes,3,rep,name=only_branches_matching,json=onlyBranchesMatching,proto3" json:"only_branches_matching,omitempty"` } func (m *UpdateRemoteMirrorRequest) Reset() { *m = UpdateRemoteMirrorRequest{} } func (m *UpdateRemoteMirrorRequest) String() string { return proto.CompactTextString(m) } func (*UpdateRemoteMirrorRequest) ProtoMessage() {} func (*UpdateRemoteMirrorRequest) Descriptor() ([]byte, []int) { return fileDescriptor9, []int{6} } func (m *UpdateRemoteMirrorRequest) GetRepository() *Repository { if m != nil { return m.Repository } return nil } func (m *UpdateRemoteMirrorRequest) GetRefName() string { if m != nil { return m.RefName } return "" } func (m *UpdateRemoteMirrorRequest) GetOnlyBranchesMatching() [][]byte { if m != nil { return m.OnlyBranchesMatching } return nil } type UpdateRemoteMirrorResponse struct { } func (m *UpdateRemoteMirrorResponse) Reset() { *m = UpdateRemoteMirrorResponse{} } func (m *UpdateRemoteMirrorResponse) String() string { return proto.CompactTextString(m) } func (*UpdateRemoteMirrorResponse) ProtoMessage() {} func (*UpdateRemoteMirrorResponse) Descriptor() ([]byte, []int) { return fileDescriptor9, []int{7} } func init() { proto.RegisterType((*AddRemoteRequest)(nil), "gitaly.AddRemoteRequest") proto.RegisterType((*AddRemoteResponse)(nil), "gitaly.AddRemoteResponse") proto.RegisterType((*RemoveRemoteRequest)(nil), "gitaly.RemoveRemoteRequest") proto.RegisterType((*RemoveRemoteResponse)(nil), "gitaly.RemoveRemoteResponse") proto.RegisterType((*FetchInternalRemoteRequest)(nil), "gitaly.FetchInternalRemoteRequest") proto.RegisterType((*FetchInternalRemoteResponse)(nil), "gitaly.FetchInternalRemoteResponse") proto.RegisterType((*UpdateRemoteMirrorRequest)(nil), "gitaly.UpdateRemoteMirrorRequest") proto.RegisterType((*UpdateRemoteMirrorResponse)(nil), "gitaly.UpdateRemoteMirrorResponse") } // Reference imports to suppress errors if they are not otherwise used. var _ context.Context var _ grpc.ClientConn // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. const _ = grpc.SupportPackageIsVersion4 // Client API for RemoteService service type RemoteServiceClient interface { AddRemote(ctx context.Context, in *AddRemoteRequest, opts ...grpc.CallOption) (*AddRemoteResponse, error) FetchInternalRemote(ctx context.Context, in *FetchInternalRemoteRequest, opts ...grpc.CallOption) (*FetchInternalRemoteResponse, error) RemoveRemote(ctx context.Context, in *RemoveRemoteRequest, opts ...grpc.CallOption) (*RemoveRemoteResponse, error) UpdateRemoteMirror(ctx context.Context, opts ...grpc.CallOption) (RemoteService_UpdateRemoteMirrorClient, error) } type remoteServiceClient struct { cc *grpc.ClientConn } func NewRemoteServiceClient(cc *grpc.ClientConn) RemoteServiceClient { return &remoteServiceClient{cc} } func (c *remoteServiceClient) AddRemote(ctx context.Context, in *AddRemoteRequest, opts ...grpc.CallOption) (*AddRemoteResponse, error) { out := new(AddRemoteResponse) err := grpc.Invoke(ctx, "/gitaly.RemoteService/AddRemote", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } func (c *remoteServiceClient) FetchInternalRemote(ctx context.Context, in *FetchInternalRemoteRequest, opts ...grpc.CallOption) (*FetchInternalRemoteResponse, error) { out := new(FetchInternalRemoteResponse) err := grpc.Invoke(ctx, "/gitaly.RemoteService/FetchInternalRemote", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } func (c *remoteServiceClient) RemoveRemote(ctx context.Context, in *RemoveRemoteRequest, opts ...grpc.CallOption) (*RemoveRemoteResponse, error) { out := new(RemoveRemoteResponse) err := grpc.Invoke(ctx, "/gitaly.RemoteService/RemoveRemote", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } func (c *remoteServiceClient) UpdateRemoteMirror(ctx context.Context, opts ...grpc.CallOption) (RemoteService_UpdateRemoteMirrorClient, error) { stream, err := grpc.NewClientStream(ctx, &_RemoteService_serviceDesc.Streams[0], c.cc, "/gitaly.RemoteService/UpdateRemoteMirror", opts...) if err != nil { return nil, err } x := &remoteServiceUpdateRemoteMirrorClient{stream} return x, nil } type RemoteService_UpdateRemoteMirrorClient interface { Send(*UpdateRemoteMirrorRequest) error CloseAndRecv() (*UpdateRemoteMirrorResponse, error) grpc.ClientStream } type remoteServiceUpdateRemoteMirrorClient struct { grpc.ClientStream } func (x *remoteServiceUpdateRemoteMirrorClient) Send(m *UpdateRemoteMirrorRequest) error { return x.ClientStream.SendMsg(m) } func (x *remoteServiceUpdateRemoteMirrorClient) CloseAndRecv() (*UpdateRemoteMirrorResponse, error) { if err := x.ClientStream.CloseSend(); err != nil { return nil, err } m := new(UpdateRemoteMirrorResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } // Server API for RemoteService service type RemoteServiceServer interface { AddRemote(context.Context, *AddRemoteRequest) (*AddRemoteResponse, error) FetchInternalRemote(context.Context, *FetchInternalRemoteRequest) (*FetchInternalRemoteResponse, error) RemoveRemote(context.Context, *RemoveRemoteRequest) (*RemoveRemoteResponse, error) UpdateRemoteMirror(RemoteService_UpdateRemoteMirrorServer) error } func RegisterRemoteServiceServer(s *grpc.Server, srv RemoteServiceServer) { s.RegisterService(&_RemoteService_serviceDesc, srv) } func _RemoteService_AddRemote_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(AddRemoteRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(RemoteServiceServer).AddRemote(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/gitaly.RemoteService/AddRemote", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(RemoteServiceServer).AddRemote(ctx, req.(*AddRemoteRequest)) } return interceptor(ctx, in, info, handler) } func _RemoteService_FetchInternalRemote_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(FetchInternalRemoteRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(RemoteServiceServer).FetchInternalRemote(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/gitaly.RemoteService/FetchInternalRemote", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(RemoteServiceServer).FetchInternalRemote(ctx, req.(*FetchInternalRemoteRequest)) } return interceptor(ctx, in, info, handler) } func _RemoteService_RemoveRemote_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(RemoveRemoteRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(RemoteServiceServer).RemoveRemote(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/gitaly.RemoteService/RemoveRemote", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(RemoteServiceServer).RemoveRemote(ctx, req.(*RemoveRemoteRequest)) } return interceptor(ctx, in, info, handler) } func _RemoteService_UpdateRemoteMirror_Handler(srv interface{}, stream grpc.ServerStream) error { return srv.(RemoteServiceServer).UpdateRemoteMirror(&remoteServiceUpdateRemoteMirrorServer{stream}) } type RemoteService_UpdateRemoteMirrorServer interface { SendAndClose(*UpdateRemoteMirrorResponse) error Recv() (*UpdateRemoteMirrorRequest, error) grpc.ServerStream } type remoteServiceUpdateRemoteMirrorServer struct { grpc.ServerStream } func (x *remoteServiceUpdateRemoteMirrorServer) SendAndClose(m *UpdateRemoteMirrorResponse) error { return x.ServerStream.SendMsg(m) } func (x *remoteServiceUpdateRemoteMirrorServer) Recv() (*UpdateRemoteMirrorRequest, error) { m := new(UpdateRemoteMirrorRequest) if err := x.ServerStream.RecvMsg(m); err != nil { return nil, err } return m, nil } var _RemoteService_serviceDesc = grpc.ServiceDesc{ ServiceName: "gitaly.RemoteService", HandlerType: (*RemoteServiceServer)(nil), Methods: []grpc.MethodDesc{ { MethodName: "AddRemote", Handler: _RemoteService_AddRemote_Handler, }, { MethodName: "FetchInternalRemote", Handler: _RemoteService_FetchInternalRemote_Handler, }, { MethodName: "RemoveRemote", Handler: _RemoteService_RemoveRemote_Handler, }, }, Streams: []grpc.StreamDesc{ { StreamName: "UpdateRemoteMirror", Handler: _RemoteService_UpdateRemoteMirror_Handler, ClientStreams: true, }, }, Metadata: "remote.proto", } func init() { proto.RegisterFile("remote.proto", fileDescriptor9) } var fileDescriptor9 = []byte{ // 434 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x54, 0xcd, 0x6e, 0xd3, 0x40, 0x10, 0xc6, 0x71, 0x1a, 0x92, 0x21, 0x45, 0xe9, 0xa6, 0xaa, 0x1c, 0xb7, 0x87, 0xb0, 0x08, 0xc9, 0xa7, 0x1c, 0x02, 0x9c, 0x11, 0x3d, 0x20, 0x01, 0x2a, 0x87, 0x45, 0x1c, 0x91, 0xd9, 0x26, 0x93, 0xc6, 0x92, 0xed, 0x35, 0xb3, 0x9b, 0x4a, 0x79, 0x0c, 0xde, 0x80, 0x23, 0xef, 0xc5, 0x8b, 0xa0, 0x78, 0xd7, 0xc6, 0x50, 0x27, 0x48, 0x20, 0x6e, 0xeb, 0xef, 0x9b, 0xbf, 0x6f, 0xe6, 0x93, 0x61, 0x48, 0x98, 0x29, 0x83, 0xb3, 0x82, 0x94, 0x51, 0xac, 0x77, 0x93, 0x18, 0x99, 0x6e, 0xc3, 0xa1, 0x5e, 0x4b, 0xc2, 0xa5, 0x45, 0xf9, 0x37, 0x0f, 0x46, 0x2f, 0x97, 0x4b, 0x51, 0x46, 0x0a, 0xfc, 0xbc, 0x41, 0x6d, 0xd8, 0x1c, 0x80, 0xb0, 0x50, 0x3a, 0x31, 0x8a, 0xb6, 0x81, 0x37, 0xf5, 0xa2, 0x07, 0x73, 0x36, 0xb3, 0xf9, 0x33, 0x51, 0x33, 0xa2, 0x11, 0xc5, 0x18, 0x74, 0x73, 0x99, 0x61, 0xd0, 0x99, 0x7a, 0xd1, 0x40, 0x94, 0x6f, 0x36, 0x02, 0x7f, 0x43, 0x69, 0xe0, 0x97, 0xd0, 0xee, 0xc9, 0x9e, 0xc0, 0xc3, 0x2c, 0x21, 0x52, 0x14, 0x13, 0xae, 0x32, 0x59, 0xe8, 0xe0, 0x68, 0xea, 0x47, 0x03, 0x71, 0x6c, 0x51, 0x61, 0xc1, 0x37, 0xdd, 0x7e, 0x77, 0x74, 0x54, 0x81, 0x2e, 0x94, 0x8f, 0xe1, 0xa4, 0x31, 0xa9, 0x2e, 0x54, 0xae, 0x91, 0x7f, 0x84, 0xf1, 0x0e, 0xb9, 0xc5, 0xff, 0xa2, 0x80, 0xcf, 0xe0, 0xf4, 0xd7, 0xf2, 0xb6, 0x2d, 0x3b, 0x83, 0x1e, 0xa1, 0xde, 0xa4, 0xa6, 0xac, 0xdd, 0x17, 0xee, 0x8b, 0x7f, 0xf1, 0x20, 0x7c, 0x85, 0x66, 0xb1, 0x7e, 0x9d, 0x1b, 0xa4, 0x5c, 0xa6, 0xff, 0x3e, 0xd6, 0x0b, 0x38, 0xb1, 0x77, 0x8c, 0x1b, 0xa9, 0x9d, 0xbd, 0xa9, 0x23, 0x72, 0x1d, 0x2b, 0x84, 0x3f, 0x87, 0xf3, 0xd6, 0x91, 0xfe, 0x20, 0xe5, 0xab, 0x07, 0x93, 0x0f, 0xc5, 0x52, 0x1a, 0xa7, 0xfd, 0xca, 0x5d, 0xe8, 0xef, 0x95, 0x4c, 0xa0, 0x4f, 0xb8, 0x8a, 0x1b, 0x4b, 0xbe, 0x4f, 0xb8, 0x7a, 0xb7, 0x73, 0xca, 0x33, 0x38, 0x53, 0x79, 0xba, 0x8d, 0xaf, 0x49, 0xe6, 0x8b, 0x35, 0xea, 0x38, 0x93, 0x66, 0xb1, 0x4e, 0xf2, 0x9b, 0xc0, 0x9f, 0xfa, 0xd1, 0x50, 0x9c, 0xee, 0xd8, 0x4b, 0x47, 0x5e, 0x39, 0x8e, 0x5f, 0x40, 0xd8, 0x36, 0xa1, 0x15, 0x36, 0xff, 0xde, 0x81, 0x63, 0x4b, 0xbc, 0x47, 0xba, 0x4d, 0x16, 0xc8, 0x2e, 0x61, 0x50, 0x3b, 0x88, 0x05, 0xd5, 0xb4, 0xbf, 0xdb, 0x3f, 0x9c, 0xb4, 0x30, 0xce, 0x6e, 0xf7, 0xd8, 0x27, 0x18, 0xb7, 0x6c, 0x93, 0xf1, 0x2a, 0x67, 0xff, 0xf5, 0xc3, 0xc7, 0x07, 0x63, 0xea, 0x0e, 0x6f, 0x61, 0xd8, 0xf4, 0x1c, 0x3b, 0xff, 0xb9, 0xd6, 0x3b, 0x46, 0x0f, 0x2f, 0xda, 0xc9, 0xba, 0x58, 0x0c, 0xec, 0xee, 0x8a, 0xd8, 0xa3, 0x2a, 0x6b, 0xef, 0x81, 0x43, 0x7e, 0x28, 0xa4, 0x2a, 0x1f, 0x79, 0xd7, 0xbd, 0xf2, 0x3f, 0xf2, 0xf4, 0x47, 0x00, 0x00, 0x00, 0xff, 0xff, 0x4d, 0xc9, 0x74, 0x7e, 0x6d, 0x04, 0x00, 0x00, } repository-service.pb.go000066400000000000000000001725271324746544700375710ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/gitlab.com/gitlab-org/gitaly-proto/go// Code generated by protoc-gen-go. DO NOT EDIT. // source: repository-service.proto package gitaly import proto "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" import ( context "golang.org/x/net/context" grpc "google.golang.org/grpc" ) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf type GetArchiveRequest_Format int32 const ( GetArchiveRequest_ZIP GetArchiveRequest_Format = 0 GetArchiveRequest_TAR GetArchiveRequest_Format = 1 GetArchiveRequest_TAR_GZ GetArchiveRequest_Format = 2 GetArchiveRequest_TAR_BZ2 GetArchiveRequest_Format = 3 ) var GetArchiveRequest_Format_name = map[int32]string{ 0: "ZIP", 1: "TAR", 2: "TAR_GZ", 3: "TAR_BZ2", } var GetArchiveRequest_Format_value = map[string]int32{ "ZIP": 0, "TAR": 1, "TAR_GZ": 2, "TAR_BZ2": 3, } func (x GetArchiveRequest_Format) String() string { return proto.EnumName(GetArchiveRequest_Format_name, int32(x)) } func (GetArchiveRequest_Format) EnumDescriptor() ([]byte, []int) { return fileDescriptor10, []int{18, 0} } type RepositoryExistsRequest struct { Repository *Repository `protobuf:"bytes,1,opt,name=repository" json:"repository,omitempty"` } func (m *RepositoryExistsRequest) Reset() { *m = RepositoryExistsRequest{} } func (m *RepositoryExistsRequest) String() string { return proto.CompactTextString(m) } func (*RepositoryExistsRequest) ProtoMessage() {} func (*RepositoryExistsRequest) Descriptor() ([]byte, []int) { return fileDescriptor10, []int{0} } func (m *RepositoryExistsRequest) GetRepository() *Repository { if m != nil { return m.Repository } return nil } type RepositoryExistsResponse struct { Exists bool `protobuf:"varint,1,opt,name=exists" json:"exists,omitempty"` } func (m *RepositoryExistsResponse) Reset() { *m = RepositoryExistsResponse{} } func (m *RepositoryExistsResponse) String() string { return proto.CompactTextString(m) } func (*RepositoryExistsResponse) ProtoMessage() {} func (*RepositoryExistsResponse) Descriptor() ([]byte, []int) { return fileDescriptor10, []int{1} } func (m *RepositoryExistsResponse) GetExists() bool { if m != nil { return m.Exists } return false } type RepositoryIsEmptyRequest struct { Repository *Repository `protobuf:"bytes,1,opt,name=repository" json:"repository,omitempty"` } func (m *RepositoryIsEmptyRequest) Reset() { *m = RepositoryIsEmptyRequest{} } func (m *RepositoryIsEmptyRequest) String() string { return proto.CompactTextString(m) } func (*RepositoryIsEmptyRequest) ProtoMessage() {} func (*RepositoryIsEmptyRequest) Descriptor() ([]byte, []int) { return fileDescriptor10, []int{2} } func (m *RepositoryIsEmptyRequest) GetRepository() *Repository { if m != nil { return m.Repository } return nil } type RepositoryIsEmptyResponse struct { IsEmpty bool `protobuf:"varint,1,opt,name=is_empty,json=isEmpty" json:"is_empty,omitempty"` } func (m *RepositoryIsEmptyResponse) Reset() { *m = RepositoryIsEmptyResponse{} } func (m *RepositoryIsEmptyResponse) String() string { return proto.CompactTextString(m) } func (*RepositoryIsEmptyResponse) ProtoMessage() {} func (*RepositoryIsEmptyResponse) Descriptor() ([]byte, []int) { return fileDescriptor10, []int{3} } func (m *RepositoryIsEmptyResponse) GetIsEmpty() bool { if m != nil { return m.IsEmpty } return false } type RepackIncrementalRequest struct { Repository *Repository `protobuf:"bytes,1,opt,name=repository" json:"repository,omitempty"` } func (m *RepackIncrementalRequest) Reset() { *m = RepackIncrementalRequest{} } func (m *RepackIncrementalRequest) String() string { return proto.CompactTextString(m) } func (*RepackIncrementalRequest) ProtoMessage() {} func (*RepackIncrementalRequest) Descriptor() ([]byte, []int) { return fileDescriptor10, []int{4} } func (m *RepackIncrementalRequest) GetRepository() *Repository { if m != nil { return m.Repository } return nil } type RepackIncrementalResponse struct { } func (m *RepackIncrementalResponse) Reset() { *m = RepackIncrementalResponse{} } func (m *RepackIncrementalResponse) String() string { return proto.CompactTextString(m) } func (*RepackIncrementalResponse) ProtoMessage() {} func (*RepackIncrementalResponse) Descriptor() ([]byte, []int) { return fileDescriptor10, []int{5} } type RepackFullRequest struct { Repository *Repository `protobuf:"bytes,1,opt,name=repository" json:"repository,omitempty"` CreateBitmap bool `protobuf:"varint,2,opt,name=create_bitmap,json=createBitmap" json:"create_bitmap,omitempty"` } func (m *RepackFullRequest) Reset() { *m = RepackFullRequest{} } func (m *RepackFullRequest) String() string { return proto.CompactTextString(m) } func (*RepackFullRequest) ProtoMessage() {} func (*RepackFullRequest) Descriptor() ([]byte, []int) { return fileDescriptor10, []int{6} } func (m *RepackFullRequest) GetRepository() *Repository { if m != nil { return m.Repository } return nil } func (m *RepackFullRequest) GetCreateBitmap() bool { if m != nil { return m.CreateBitmap } return false } type RepackFullResponse struct { } func (m *RepackFullResponse) Reset() { *m = RepackFullResponse{} } func (m *RepackFullResponse) String() string { return proto.CompactTextString(m) } func (*RepackFullResponse) ProtoMessage() {} func (*RepackFullResponse) Descriptor() ([]byte, []int) { return fileDescriptor10, []int{7} } type GarbageCollectRequest struct { Repository *Repository `protobuf:"bytes,1,opt,name=repository" json:"repository,omitempty"` CreateBitmap bool `protobuf:"varint,2,opt,name=create_bitmap,json=createBitmap" json:"create_bitmap,omitempty"` } func (m *GarbageCollectRequest) Reset() { *m = GarbageCollectRequest{} } func (m *GarbageCollectRequest) String() string { return proto.CompactTextString(m) } func (*GarbageCollectRequest) ProtoMessage() {} func (*GarbageCollectRequest) Descriptor() ([]byte, []int) { return fileDescriptor10, []int{8} } func (m *GarbageCollectRequest) GetRepository() *Repository { if m != nil { return m.Repository } return nil } func (m *GarbageCollectRequest) GetCreateBitmap() bool { if m != nil { return m.CreateBitmap } return false } type GarbageCollectResponse struct { } func (m *GarbageCollectResponse) Reset() { *m = GarbageCollectResponse{} } func (m *GarbageCollectResponse) String() string { return proto.CompactTextString(m) } func (*GarbageCollectResponse) ProtoMessage() {} func (*GarbageCollectResponse) Descriptor() ([]byte, []int) { return fileDescriptor10, []int{9} } type RepositorySizeRequest struct { Repository *Repository `protobuf:"bytes,1,opt,name=repository" json:"repository,omitempty"` } func (m *RepositorySizeRequest) Reset() { *m = RepositorySizeRequest{} } func (m *RepositorySizeRequest) String() string { return proto.CompactTextString(m) } func (*RepositorySizeRequest) ProtoMessage() {} func (*RepositorySizeRequest) Descriptor() ([]byte, []int) { return fileDescriptor10, []int{10} } func (m *RepositorySizeRequest) GetRepository() *Repository { if m != nil { return m.Repository } return nil } type RepositorySizeResponse struct { // Repository size in kilobytes Size int64 `protobuf:"varint,1,opt,name=size" json:"size,omitempty"` } func (m *RepositorySizeResponse) Reset() { *m = RepositorySizeResponse{} } func (m *RepositorySizeResponse) String() string { return proto.CompactTextString(m) } func (*RepositorySizeResponse) ProtoMessage() {} func (*RepositorySizeResponse) Descriptor() ([]byte, []int) { return fileDescriptor10, []int{11} } func (m *RepositorySizeResponse) GetSize() int64 { if m != nil { return m.Size } return 0 } type ApplyGitattributesRequest struct { Repository *Repository `protobuf:"bytes,1,opt,name=repository" json:"repository,omitempty"` Revision []byte `protobuf:"bytes,2,opt,name=revision,proto3" json:"revision,omitempty"` } func (m *ApplyGitattributesRequest) Reset() { *m = ApplyGitattributesRequest{} } func (m *ApplyGitattributesRequest) String() string { return proto.CompactTextString(m) } func (*ApplyGitattributesRequest) ProtoMessage() {} func (*ApplyGitattributesRequest) Descriptor() ([]byte, []int) { return fileDescriptor10, []int{12} } func (m *ApplyGitattributesRequest) GetRepository() *Repository { if m != nil { return m.Repository } return nil } func (m *ApplyGitattributesRequest) GetRevision() []byte { if m != nil { return m.Revision } return nil } type ApplyGitattributesResponse struct { } func (m *ApplyGitattributesResponse) Reset() { *m = ApplyGitattributesResponse{} } func (m *ApplyGitattributesResponse) String() string { return proto.CompactTextString(m) } func (*ApplyGitattributesResponse) ProtoMessage() {} func (*ApplyGitattributesResponse) Descriptor() ([]byte, []int) { return fileDescriptor10, []int{13} } type FetchRemoteRequest struct { Repository *Repository `protobuf:"bytes,1,opt,name=repository" json:"repository,omitempty"` Remote string `protobuf:"bytes,2,opt,name=remote" json:"remote,omitempty"` Force bool `protobuf:"varint,3,opt,name=force" json:"force,omitempty"` NoTags bool `protobuf:"varint,4,opt,name=no_tags,json=noTags" json:"no_tags,omitempty"` Timeout int32 `protobuf:"varint,5,opt,name=timeout" json:"timeout,omitempty"` SshKey string `protobuf:"bytes,6,opt,name=ssh_key,json=sshKey" json:"ssh_key,omitempty"` KnownHosts string `protobuf:"bytes,7,opt,name=known_hosts,json=knownHosts" json:"known_hosts,omitempty"` } func (m *FetchRemoteRequest) Reset() { *m = FetchRemoteRequest{} } func (m *FetchRemoteRequest) String() string { return proto.CompactTextString(m) } func (*FetchRemoteRequest) ProtoMessage() {} func (*FetchRemoteRequest) Descriptor() ([]byte, []int) { return fileDescriptor10, []int{14} } func (m *FetchRemoteRequest) GetRepository() *Repository { if m != nil { return m.Repository } return nil } func (m *FetchRemoteRequest) GetRemote() string { if m != nil { return m.Remote } return "" } func (m *FetchRemoteRequest) GetForce() bool { if m != nil { return m.Force } return false } func (m *FetchRemoteRequest) GetNoTags() bool { if m != nil { return m.NoTags } return false } func (m *FetchRemoteRequest) GetTimeout() int32 { if m != nil { return m.Timeout } return 0 } func (m *FetchRemoteRequest) GetSshKey() string { if m != nil { return m.SshKey } return "" } func (m *FetchRemoteRequest) GetKnownHosts() string { if m != nil { return m.KnownHosts } return "" } type FetchRemoteResponse struct { } func (m *FetchRemoteResponse) Reset() { *m = FetchRemoteResponse{} } func (m *FetchRemoteResponse) String() string { return proto.CompactTextString(m) } func (*FetchRemoteResponse) ProtoMessage() {} func (*FetchRemoteResponse) Descriptor() ([]byte, []int) { return fileDescriptor10, []int{15} } type CreateRepositoryRequest struct { Repository *Repository `protobuf:"bytes,1,opt,name=repository" json:"repository,omitempty"` } func (m *CreateRepositoryRequest) Reset() { *m = CreateRepositoryRequest{} } func (m *CreateRepositoryRequest) String() string { return proto.CompactTextString(m) } func (*CreateRepositoryRequest) ProtoMessage() {} func (*CreateRepositoryRequest) Descriptor() ([]byte, []int) { return fileDescriptor10, []int{16} } func (m *CreateRepositoryRequest) GetRepository() *Repository { if m != nil { return m.Repository } return nil } type CreateRepositoryResponse struct { } func (m *CreateRepositoryResponse) Reset() { *m = CreateRepositoryResponse{} } func (m *CreateRepositoryResponse) String() string { return proto.CompactTextString(m) } func (*CreateRepositoryResponse) ProtoMessage() {} func (*CreateRepositoryResponse) Descriptor() ([]byte, []int) { return fileDescriptor10, []int{17} } type GetArchiveRequest struct { Repository *Repository `protobuf:"bytes,1,opt,name=repository" json:"repository,omitempty"` CommitId string `protobuf:"bytes,2,opt,name=commit_id,json=commitId" json:"commit_id,omitempty"` Prefix string `protobuf:"bytes,3,opt,name=prefix" json:"prefix,omitempty"` Format GetArchiveRequest_Format `protobuf:"varint,4,opt,name=format,enum=gitaly.GetArchiveRequest_Format" json:"format,omitempty"` } func (m *GetArchiveRequest) Reset() { *m = GetArchiveRequest{} } func (m *GetArchiveRequest) String() string { return proto.CompactTextString(m) } func (*GetArchiveRequest) ProtoMessage() {} func (*GetArchiveRequest) Descriptor() ([]byte, []int) { return fileDescriptor10, []int{18} } func (m *GetArchiveRequest) GetRepository() *Repository { if m != nil { return m.Repository } return nil } func (m *GetArchiveRequest) GetCommitId() string { if m != nil { return m.CommitId } return "" } func (m *GetArchiveRequest) GetPrefix() string { if m != nil { return m.Prefix } return "" } func (m *GetArchiveRequest) GetFormat() GetArchiveRequest_Format { if m != nil { return m.Format } return GetArchiveRequest_ZIP } type GetArchiveResponse struct { Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` } func (m *GetArchiveResponse) Reset() { *m = GetArchiveResponse{} } func (m *GetArchiveResponse) String() string { return proto.CompactTextString(m) } func (*GetArchiveResponse) ProtoMessage() {} func (*GetArchiveResponse) Descriptor() ([]byte, []int) { return fileDescriptor10, []int{19} } func (m *GetArchiveResponse) GetData() []byte { if m != nil { return m.Data } return nil } type HasLocalBranchesRequest struct { Repository *Repository `protobuf:"bytes,1,opt,name=repository" json:"repository,omitempty"` } func (m *HasLocalBranchesRequest) Reset() { *m = HasLocalBranchesRequest{} } func (m *HasLocalBranchesRequest) String() string { return proto.CompactTextString(m) } func (*HasLocalBranchesRequest) ProtoMessage() {} func (*HasLocalBranchesRequest) Descriptor() ([]byte, []int) { return fileDescriptor10, []int{20} } func (m *HasLocalBranchesRequest) GetRepository() *Repository { if m != nil { return m.Repository } return nil } type HasLocalBranchesResponse struct { Value bool `protobuf:"varint,1,opt,name=value" json:"value,omitempty"` } func (m *HasLocalBranchesResponse) Reset() { *m = HasLocalBranchesResponse{} } func (m *HasLocalBranchesResponse) String() string { return proto.CompactTextString(m) } func (*HasLocalBranchesResponse) ProtoMessage() {} func (*HasLocalBranchesResponse) Descriptor() ([]byte, []int) { return fileDescriptor10, []int{21} } func (m *HasLocalBranchesResponse) GetValue() bool { if m != nil { return m.Value } return false } type FetchSourceBranchRequest struct { Repository *Repository `protobuf:"bytes,1,opt,name=repository" json:"repository,omitempty"` SourceRepository *Repository `protobuf:"bytes,2,opt,name=source_repository,json=sourceRepository" json:"source_repository,omitempty"` SourceBranch []byte `protobuf:"bytes,3,opt,name=source_branch,json=sourceBranch,proto3" json:"source_branch,omitempty"` TargetRef []byte `protobuf:"bytes,4,opt,name=target_ref,json=targetRef,proto3" json:"target_ref,omitempty"` } func (m *FetchSourceBranchRequest) Reset() { *m = FetchSourceBranchRequest{} } func (m *FetchSourceBranchRequest) String() string { return proto.CompactTextString(m) } func (*FetchSourceBranchRequest) ProtoMessage() {} func (*FetchSourceBranchRequest) Descriptor() ([]byte, []int) { return fileDescriptor10, []int{22} } func (m *FetchSourceBranchRequest) GetRepository() *Repository { if m != nil { return m.Repository } return nil } func (m *FetchSourceBranchRequest) GetSourceRepository() *Repository { if m != nil { return m.SourceRepository } return nil } func (m *FetchSourceBranchRequest) GetSourceBranch() []byte { if m != nil { return m.SourceBranch } return nil } func (m *FetchSourceBranchRequest) GetTargetRef() []byte { if m != nil { return m.TargetRef } return nil } type FetchSourceBranchResponse struct { Result bool `protobuf:"varint,1,opt,name=result" json:"result,omitempty"` } func (m *FetchSourceBranchResponse) Reset() { *m = FetchSourceBranchResponse{} } func (m *FetchSourceBranchResponse) String() string { return proto.CompactTextString(m) } func (*FetchSourceBranchResponse) ProtoMessage() {} func (*FetchSourceBranchResponse) Descriptor() ([]byte, []int) { return fileDescriptor10, []int{23} } func (m *FetchSourceBranchResponse) GetResult() bool { if m != nil { return m.Result } return false } type FsckRequest struct { Repository *Repository `protobuf:"bytes,1,opt,name=repository" json:"repository,omitempty"` } func (m *FsckRequest) Reset() { *m = FsckRequest{} } func (m *FsckRequest) String() string { return proto.CompactTextString(m) } func (*FsckRequest) ProtoMessage() {} func (*FsckRequest) Descriptor() ([]byte, []int) { return fileDescriptor10, []int{24} } func (m *FsckRequest) GetRepository() *Repository { if m != nil { return m.Repository } return nil } type FsckResponse struct { Error []byte `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` } func (m *FsckResponse) Reset() { *m = FsckResponse{} } func (m *FsckResponse) String() string { return proto.CompactTextString(m) } func (*FsckResponse) ProtoMessage() {} func (*FsckResponse) Descriptor() ([]byte, []int) { return fileDescriptor10, []int{25} } func (m *FsckResponse) GetError() []byte { if m != nil { return m.Error } return nil } type WriteRefRequest struct { Repository *Repository `protobuf:"bytes,1,opt,name=repository" json:"repository,omitempty"` Ref []byte `protobuf:"bytes,2,opt,name=ref,proto3" json:"ref,omitempty"` Revision []byte `protobuf:"bytes,3,opt,name=revision,proto3" json:"revision,omitempty"` OldRevision []byte `protobuf:"bytes,4,opt,name=old_revision,json=oldRevision,proto3" json:"old_revision,omitempty"` Force bool `protobuf:"varint,5,opt,name=force" json:"force,omitempty"` Shell bool `protobuf:"varint,6,opt,name=shell" json:"shell,omitempty"` } func (m *WriteRefRequest) Reset() { *m = WriteRefRequest{} } func (m *WriteRefRequest) String() string { return proto.CompactTextString(m) } func (*WriteRefRequest) ProtoMessage() {} func (*WriteRefRequest) Descriptor() ([]byte, []int) { return fileDescriptor10, []int{26} } func (m *WriteRefRequest) GetRepository() *Repository { if m != nil { return m.Repository } return nil } func (m *WriteRefRequest) GetRef() []byte { if m != nil { return m.Ref } return nil } func (m *WriteRefRequest) GetRevision() []byte { if m != nil { return m.Revision } return nil } func (m *WriteRefRequest) GetOldRevision() []byte { if m != nil { return m.OldRevision } return nil } func (m *WriteRefRequest) GetForce() bool { if m != nil { return m.Force } return false } func (m *WriteRefRequest) GetShell() bool { if m != nil { return m.Shell } return false } type WriteRefResponse struct { Error []byte `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` } func (m *WriteRefResponse) Reset() { *m = WriteRefResponse{} } func (m *WriteRefResponse) String() string { return proto.CompactTextString(m) } func (*WriteRefResponse) ProtoMessage() {} func (*WriteRefResponse) Descriptor() ([]byte, []int) { return fileDescriptor10, []int{27} } func (m *WriteRefResponse) GetError() []byte { if m != nil { return m.Error } return nil } type FindMergeBaseRequest struct { Repository *Repository `protobuf:"bytes,1,opt,name=repository" json:"repository,omitempty"` // We use a repeated field because rugged supports finding a base // for more than 2 revisions, so if we needed that in the future we don't // need to change the protocol. Revisions [][]byte `protobuf:"bytes,2,rep,name=revisions,proto3" json:"revisions,omitempty"` } func (m *FindMergeBaseRequest) Reset() { *m = FindMergeBaseRequest{} } func (m *FindMergeBaseRequest) String() string { return proto.CompactTextString(m) } func (*FindMergeBaseRequest) ProtoMessage() {} func (*FindMergeBaseRequest) Descriptor() ([]byte, []int) { return fileDescriptor10, []int{28} } func (m *FindMergeBaseRequest) GetRepository() *Repository { if m != nil { return m.Repository } return nil } func (m *FindMergeBaseRequest) GetRevisions() [][]byte { if m != nil { return m.Revisions } return nil } type FindMergeBaseResponse struct { Base string `protobuf:"bytes,1,opt,name=base" json:"base,omitempty"` } func (m *FindMergeBaseResponse) Reset() { *m = FindMergeBaseResponse{} } func (m *FindMergeBaseResponse) String() string { return proto.CompactTextString(m) } func (*FindMergeBaseResponse) ProtoMessage() {} func (*FindMergeBaseResponse) Descriptor() ([]byte, []int) { return fileDescriptor10, []int{29} } func (m *FindMergeBaseResponse) GetBase() string { if m != nil { return m.Base } return "" } type CreateForkRequest struct { Repository *Repository `protobuf:"bytes,1,opt,name=repository" json:"repository,omitempty"` SourceRepository *Repository `protobuf:"bytes,2,opt,name=source_repository,json=sourceRepository" json:"source_repository,omitempty"` } func (m *CreateForkRequest) Reset() { *m = CreateForkRequest{} } func (m *CreateForkRequest) String() string { return proto.CompactTextString(m) } func (*CreateForkRequest) ProtoMessage() {} func (*CreateForkRequest) Descriptor() ([]byte, []int) { return fileDescriptor10, []int{30} } func (m *CreateForkRequest) GetRepository() *Repository { if m != nil { return m.Repository } return nil } func (m *CreateForkRequest) GetSourceRepository() *Repository { if m != nil { return m.SourceRepository } return nil } type CreateForkResponse struct { } func (m *CreateForkResponse) Reset() { *m = CreateForkResponse{} } func (m *CreateForkResponse) String() string { return proto.CompactTextString(m) } func (*CreateForkResponse) ProtoMessage() {} func (*CreateForkResponse) Descriptor() ([]byte, []int) { return fileDescriptor10, []int{31} } type IsRebaseInProgressRequest struct { Repository *Repository `protobuf:"bytes,1,opt,name=repository" json:"repository,omitempty"` RebaseId string `protobuf:"bytes,2,opt,name=rebase_id,json=rebaseId" json:"rebase_id,omitempty"` } func (m *IsRebaseInProgressRequest) Reset() { *m = IsRebaseInProgressRequest{} } func (m *IsRebaseInProgressRequest) String() string { return proto.CompactTextString(m) } func (*IsRebaseInProgressRequest) ProtoMessage() {} func (*IsRebaseInProgressRequest) Descriptor() ([]byte, []int) { return fileDescriptor10, []int{32} } func (m *IsRebaseInProgressRequest) GetRepository() *Repository { if m != nil { return m.Repository } return nil } func (m *IsRebaseInProgressRequest) GetRebaseId() string { if m != nil { return m.RebaseId } return "" } type IsRebaseInProgressResponse struct { InProgress bool `protobuf:"varint,1,opt,name=in_progress,json=inProgress" json:"in_progress,omitempty"` } func (m *IsRebaseInProgressResponse) Reset() { *m = IsRebaseInProgressResponse{} } func (m *IsRebaseInProgressResponse) String() string { return proto.CompactTextString(m) } func (*IsRebaseInProgressResponse) ProtoMessage() {} func (*IsRebaseInProgressResponse) Descriptor() ([]byte, []int) { return fileDescriptor10, []int{33} } func (m *IsRebaseInProgressResponse) GetInProgress() bool { if m != nil { return m.InProgress } return false } type CreateRepositoryFromURLRequest struct { Repository *Repository `protobuf:"bytes,1,opt,name=repository" json:"repository,omitempty"` Url string `protobuf:"bytes,2,opt,name=url" json:"url,omitempty"` } func (m *CreateRepositoryFromURLRequest) Reset() { *m = CreateRepositoryFromURLRequest{} } func (m *CreateRepositoryFromURLRequest) String() string { return proto.CompactTextString(m) } func (*CreateRepositoryFromURLRequest) ProtoMessage() {} func (*CreateRepositoryFromURLRequest) Descriptor() ([]byte, []int) { return fileDescriptor10, []int{34} } func (m *CreateRepositoryFromURLRequest) GetRepository() *Repository { if m != nil { return m.Repository } return nil } func (m *CreateRepositoryFromURLRequest) GetUrl() string { if m != nil { return m.Url } return "" } type CreateRepositoryFromURLResponse struct { } func (m *CreateRepositoryFromURLResponse) Reset() { *m = CreateRepositoryFromURLResponse{} } func (m *CreateRepositoryFromURLResponse) String() string { return proto.CompactTextString(m) } func (*CreateRepositoryFromURLResponse) ProtoMessage() {} func (*CreateRepositoryFromURLResponse) Descriptor() ([]byte, []int) { return fileDescriptor10, []int{35} } func init() { proto.RegisterType((*RepositoryExistsRequest)(nil), "gitaly.RepositoryExistsRequest") proto.RegisterType((*RepositoryExistsResponse)(nil), "gitaly.RepositoryExistsResponse") proto.RegisterType((*RepositoryIsEmptyRequest)(nil), "gitaly.RepositoryIsEmptyRequest") proto.RegisterType((*RepositoryIsEmptyResponse)(nil), "gitaly.RepositoryIsEmptyResponse") proto.RegisterType((*RepackIncrementalRequest)(nil), "gitaly.RepackIncrementalRequest") proto.RegisterType((*RepackIncrementalResponse)(nil), "gitaly.RepackIncrementalResponse") proto.RegisterType((*RepackFullRequest)(nil), "gitaly.RepackFullRequest") proto.RegisterType((*RepackFullResponse)(nil), "gitaly.RepackFullResponse") proto.RegisterType((*GarbageCollectRequest)(nil), "gitaly.GarbageCollectRequest") proto.RegisterType((*GarbageCollectResponse)(nil), "gitaly.GarbageCollectResponse") proto.RegisterType((*RepositorySizeRequest)(nil), "gitaly.RepositorySizeRequest") proto.RegisterType((*RepositorySizeResponse)(nil), "gitaly.RepositorySizeResponse") proto.RegisterType((*ApplyGitattributesRequest)(nil), "gitaly.ApplyGitattributesRequest") proto.RegisterType((*ApplyGitattributesResponse)(nil), "gitaly.ApplyGitattributesResponse") proto.RegisterType((*FetchRemoteRequest)(nil), "gitaly.FetchRemoteRequest") proto.RegisterType((*FetchRemoteResponse)(nil), "gitaly.FetchRemoteResponse") proto.RegisterType((*CreateRepositoryRequest)(nil), "gitaly.CreateRepositoryRequest") proto.RegisterType((*CreateRepositoryResponse)(nil), "gitaly.CreateRepositoryResponse") proto.RegisterType((*GetArchiveRequest)(nil), "gitaly.GetArchiveRequest") proto.RegisterType((*GetArchiveResponse)(nil), "gitaly.GetArchiveResponse") proto.RegisterType((*HasLocalBranchesRequest)(nil), "gitaly.HasLocalBranchesRequest") proto.RegisterType((*HasLocalBranchesResponse)(nil), "gitaly.HasLocalBranchesResponse") proto.RegisterType((*FetchSourceBranchRequest)(nil), "gitaly.FetchSourceBranchRequest") proto.RegisterType((*FetchSourceBranchResponse)(nil), "gitaly.FetchSourceBranchResponse") proto.RegisterType((*FsckRequest)(nil), "gitaly.FsckRequest") proto.RegisterType((*FsckResponse)(nil), "gitaly.FsckResponse") proto.RegisterType((*WriteRefRequest)(nil), "gitaly.WriteRefRequest") proto.RegisterType((*WriteRefResponse)(nil), "gitaly.WriteRefResponse") proto.RegisterType((*FindMergeBaseRequest)(nil), "gitaly.FindMergeBaseRequest") proto.RegisterType((*FindMergeBaseResponse)(nil), "gitaly.FindMergeBaseResponse") proto.RegisterType((*CreateForkRequest)(nil), "gitaly.CreateForkRequest") proto.RegisterType((*CreateForkResponse)(nil), "gitaly.CreateForkResponse") proto.RegisterType((*IsRebaseInProgressRequest)(nil), "gitaly.IsRebaseInProgressRequest") proto.RegisterType((*IsRebaseInProgressResponse)(nil), "gitaly.IsRebaseInProgressResponse") proto.RegisterType((*CreateRepositoryFromURLRequest)(nil), "gitaly.CreateRepositoryFromURLRequest") proto.RegisterType((*CreateRepositoryFromURLResponse)(nil), "gitaly.CreateRepositoryFromURLResponse") proto.RegisterEnum("gitaly.GetArchiveRequest_Format", GetArchiveRequest_Format_name, GetArchiveRequest_Format_value) } // Reference imports to suppress errors if they are not otherwise used. var _ context.Context var _ grpc.ClientConn // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. const _ = grpc.SupportPackageIsVersion4 // Client API for RepositoryService service type RepositoryServiceClient interface { RepositoryExists(ctx context.Context, in *RepositoryExistsRequest, opts ...grpc.CallOption) (*RepositoryExistsResponse, error) RepositoryIsEmpty(ctx context.Context, in *RepositoryIsEmptyRequest, opts ...grpc.CallOption) (*RepositoryIsEmptyResponse, error) RepackIncremental(ctx context.Context, in *RepackIncrementalRequest, opts ...grpc.CallOption) (*RepackIncrementalResponse, error) RepackFull(ctx context.Context, in *RepackFullRequest, opts ...grpc.CallOption) (*RepackFullResponse, error) GarbageCollect(ctx context.Context, in *GarbageCollectRequest, opts ...grpc.CallOption) (*GarbageCollectResponse, error) RepositorySize(ctx context.Context, in *RepositorySizeRequest, opts ...grpc.CallOption) (*RepositorySizeResponse, error) ApplyGitattributes(ctx context.Context, in *ApplyGitattributesRequest, opts ...grpc.CallOption) (*ApplyGitattributesResponse, error) FetchRemote(ctx context.Context, in *FetchRemoteRequest, opts ...grpc.CallOption) (*FetchRemoteResponse, error) CreateRepository(ctx context.Context, in *CreateRepositoryRequest, opts ...grpc.CallOption) (*CreateRepositoryResponse, error) GetArchive(ctx context.Context, in *GetArchiveRequest, opts ...grpc.CallOption) (RepositoryService_GetArchiveClient, error) HasLocalBranches(ctx context.Context, in *HasLocalBranchesRequest, opts ...grpc.CallOption) (*HasLocalBranchesResponse, error) FetchSourceBranch(ctx context.Context, in *FetchSourceBranchRequest, opts ...grpc.CallOption) (*FetchSourceBranchResponse, error) Fsck(ctx context.Context, in *FsckRequest, opts ...grpc.CallOption) (*FsckResponse, error) WriteRef(ctx context.Context, in *WriteRefRequest, opts ...grpc.CallOption) (*WriteRefResponse, error) FindMergeBase(ctx context.Context, in *FindMergeBaseRequest, opts ...grpc.CallOption) (*FindMergeBaseResponse, error) CreateFork(ctx context.Context, in *CreateForkRequest, opts ...grpc.CallOption) (*CreateForkResponse, error) IsRebaseInProgress(ctx context.Context, in *IsRebaseInProgressRequest, opts ...grpc.CallOption) (*IsRebaseInProgressResponse, error) CreateRepositoryFromURL(ctx context.Context, in *CreateRepositoryFromURLRequest, opts ...grpc.CallOption) (*CreateRepositoryFromURLResponse, error) } type repositoryServiceClient struct { cc *grpc.ClientConn } func NewRepositoryServiceClient(cc *grpc.ClientConn) RepositoryServiceClient { return &repositoryServiceClient{cc} } func (c *repositoryServiceClient) RepositoryExists(ctx context.Context, in *RepositoryExistsRequest, opts ...grpc.CallOption) (*RepositoryExistsResponse, error) { out := new(RepositoryExistsResponse) err := grpc.Invoke(ctx, "/gitaly.RepositoryService/RepositoryExists", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } func (c *repositoryServiceClient) RepositoryIsEmpty(ctx context.Context, in *RepositoryIsEmptyRequest, opts ...grpc.CallOption) (*RepositoryIsEmptyResponse, error) { out := new(RepositoryIsEmptyResponse) err := grpc.Invoke(ctx, "/gitaly.RepositoryService/RepositoryIsEmpty", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } func (c *repositoryServiceClient) RepackIncremental(ctx context.Context, in *RepackIncrementalRequest, opts ...grpc.CallOption) (*RepackIncrementalResponse, error) { out := new(RepackIncrementalResponse) err := grpc.Invoke(ctx, "/gitaly.RepositoryService/RepackIncremental", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } func (c *repositoryServiceClient) RepackFull(ctx context.Context, in *RepackFullRequest, opts ...grpc.CallOption) (*RepackFullResponse, error) { out := new(RepackFullResponse) err := grpc.Invoke(ctx, "/gitaly.RepositoryService/RepackFull", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } func (c *repositoryServiceClient) GarbageCollect(ctx context.Context, in *GarbageCollectRequest, opts ...grpc.CallOption) (*GarbageCollectResponse, error) { out := new(GarbageCollectResponse) err := grpc.Invoke(ctx, "/gitaly.RepositoryService/GarbageCollect", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } func (c *repositoryServiceClient) RepositorySize(ctx context.Context, in *RepositorySizeRequest, opts ...grpc.CallOption) (*RepositorySizeResponse, error) { out := new(RepositorySizeResponse) err := grpc.Invoke(ctx, "/gitaly.RepositoryService/RepositorySize", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } func (c *repositoryServiceClient) ApplyGitattributes(ctx context.Context, in *ApplyGitattributesRequest, opts ...grpc.CallOption) (*ApplyGitattributesResponse, error) { out := new(ApplyGitattributesResponse) err := grpc.Invoke(ctx, "/gitaly.RepositoryService/ApplyGitattributes", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } func (c *repositoryServiceClient) FetchRemote(ctx context.Context, in *FetchRemoteRequest, opts ...grpc.CallOption) (*FetchRemoteResponse, error) { out := new(FetchRemoteResponse) err := grpc.Invoke(ctx, "/gitaly.RepositoryService/FetchRemote", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } func (c *repositoryServiceClient) CreateRepository(ctx context.Context, in *CreateRepositoryRequest, opts ...grpc.CallOption) (*CreateRepositoryResponse, error) { out := new(CreateRepositoryResponse) err := grpc.Invoke(ctx, "/gitaly.RepositoryService/CreateRepository", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } func (c *repositoryServiceClient) GetArchive(ctx context.Context, in *GetArchiveRequest, opts ...grpc.CallOption) (RepositoryService_GetArchiveClient, error) { stream, err := grpc.NewClientStream(ctx, &_RepositoryService_serviceDesc.Streams[0], c.cc, "/gitaly.RepositoryService/GetArchive", opts...) if err != nil { return nil, err } x := &repositoryServiceGetArchiveClient{stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } if err := x.ClientStream.CloseSend(); err != nil { return nil, err } return x, nil } type RepositoryService_GetArchiveClient interface { Recv() (*GetArchiveResponse, error) grpc.ClientStream } type repositoryServiceGetArchiveClient struct { grpc.ClientStream } func (x *repositoryServiceGetArchiveClient) Recv() (*GetArchiveResponse, error) { m := new(GetArchiveResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func (c *repositoryServiceClient) HasLocalBranches(ctx context.Context, in *HasLocalBranchesRequest, opts ...grpc.CallOption) (*HasLocalBranchesResponse, error) { out := new(HasLocalBranchesResponse) err := grpc.Invoke(ctx, "/gitaly.RepositoryService/HasLocalBranches", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } func (c *repositoryServiceClient) FetchSourceBranch(ctx context.Context, in *FetchSourceBranchRequest, opts ...grpc.CallOption) (*FetchSourceBranchResponse, error) { out := new(FetchSourceBranchResponse) err := grpc.Invoke(ctx, "/gitaly.RepositoryService/FetchSourceBranch", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } func (c *repositoryServiceClient) Fsck(ctx context.Context, in *FsckRequest, opts ...grpc.CallOption) (*FsckResponse, error) { out := new(FsckResponse) err := grpc.Invoke(ctx, "/gitaly.RepositoryService/Fsck", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } func (c *repositoryServiceClient) WriteRef(ctx context.Context, in *WriteRefRequest, opts ...grpc.CallOption) (*WriteRefResponse, error) { out := new(WriteRefResponse) err := grpc.Invoke(ctx, "/gitaly.RepositoryService/WriteRef", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } func (c *repositoryServiceClient) FindMergeBase(ctx context.Context, in *FindMergeBaseRequest, opts ...grpc.CallOption) (*FindMergeBaseResponse, error) { out := new(FindMergeBaseResponse) err := grpc.Invoke(ctx, "/gitaly.RepositoryService/FindMergeBase", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } func (c *repositoryServiceClient) CreateFork(ctx context.Context, in *CreateForkRequest, opts ...grpc.CallOption) (*CreateForkResponse, error) { out := new(CreateForkResponse) err := grpc.Invoke(ctx, "/gitaly.RepositoryService/CreateFork", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } func (c *repositoryServiceClient) IsRebaseInProgress(ctx context.Context, in *IsRebaseInProgressRequest, opts ...grpc.CallOption) (*IsRebaseInProgressResponse, error) { out := new(IsRebaseInProgressResponse) err := grpc.Invoke(ctx, "/gitaly.RepositoryService/IsRebaseInProgress", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } func (c *repositoryServiceClient) CreateRepositoryFromURL(ctx context.Context, in *CreateRepositoryFromURLRequest, opts ...grpc.CallOption) (*CreateRepositoryFromURLResponse, error) { out := new(CreateRepositoryFromURLResponse) err := grpc.Invoke(ctx, "/gitaly.RepositoryService/CreateRepositoryFromURL", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } // Server API for RepositoryService service type RepositoryServiceServer interface { RepositoryExists(context.Context, *RepositoryExistsRequest) (*RepositoryExistsResponse, error) RepositoryIsEmpty(context.Context, *RepositoryIsEmptyRequest) (*RepositoryIsEmptyResponse, error) RepackIncremental(context.Context, *RepackIncrementalRequest) (*RepackIncrementalResponse, error) RepackFull(context.Context, *RepackFullRequest) (*RepackFullResponse, error) GarbageCollect(context.Context, *GarbageCollectRequest) (*GarbageCollectResponse, error) RepositorySize(context.Context, *RepositorySizeRequest) (*RepositorySizeResponse, error) ApplyGitattributes(context.Context, *ApplyGitattributesRequest) (*ApplyGitattributesResponse, error) FetchRemote(context.Context, *FetchRemoteRequest) (*FetchRemoteResponse, error) CreateRepository(context.Context, *CreateRepositoryRequest) (*CreateRepositoryResponse, error) GetArchive(*GetArchiveRequest, RepositoryService_GetArchiveServer) error HasLocalBranches(context.Context, *HasLocalBranchesRequest) (*HasLocalBranchesResponse, error) FetchSourceBranch(context.Context, *FetchSourceBranchRequest) (*FetchSourceBranchResponse, error) Fsck(context.Context, *FsckRequest) (*FsckResponse, error) WriteRef(context.Context, *WriteRefRequest) (*WriteRefResponse, error) FindMergeBase(context.Context, *FindMergeBaseRequest) (*FindMergeBaseResponse, error) CreateFork(context.Context, *CreateForkRequest) (*CreateForkResponse, error) IsRebaseInProgress(context.Context, *IsRebaseInProgressRequest) (*IsRebaseInProgressResponse, error) CreateRepositoryFromURL(context.Context, *CreateRepositoryFromURLRequest) (*CreateRepositoryFromURLResponse, error) } func RegisterRepositoryServiceServer(s *grpc.Server, srv RepositoryServiceServer) { s.RegisterService(&_RepositoryService_serviceDesc, srv) } func _RepositoryService_RepositoryExists_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(RepositoryExistsRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(RepositoryServiceServer).RepositoryExists(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/gitaly.RepositoryService/RepositoryExists", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(RepositoryServiceServer).RepositoryExists(ctx, req.(*RepositoryExistsRequest)) } return interceptor(ctx, in, info, handler) } func _RepositoryService_RepositoryIsEmpty_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(RepositoryIsEmptyRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(RepositoryServiceServer).RepositoryIsEmpty(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/gitaly.RepositoryService/RepositoryIsEmpty", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(RepositoryServiceServer).RepositoryIsEmpty(ctx, req.(*RepositoryIsEmptyRequest)) } return interceptor(ctx, in, info, handler) } func _RepositoryService_RepackIncremental_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(RepackIncrementalRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(RepositoryServiceServer).RepackIncremental(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/gitaly.RepositoryService/RepackIncremental", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(RepositoryServiceServer).RepackIncremental(ctx, req.(*RepackIncrementalRequest)) } return interceptor(ctx, in, info, handler) } func _RepositoryService_RepackFull_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(RepackFullRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(RepositoryServiceServer).RepackFull(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/gitaly.RepositoryService/RepackFull", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(RepositoryServiceServer).RepackFull(ctx, req.(*RepackFullRequest)) } return interceptor(ctx, in, info, handler) } func _RepositoryService_GarbageCollect_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(GarbageCollectRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(RepositoryServiceServer).GarbageCollect(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/gitaly.RepositoryService/GarbageCollect", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(RepositoryServiceServer).GarbageCollect(ctx, req.(*GarbageCollectRequest)) } return interceptor(ctx, in, info, handler) } func _RepositoryService_RepositorySize_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(RepositorySizeRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(RepositoryServiceServer).RepositorySize(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/gitaly.RepositoryService/RepositorySize", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(RepositoryServiceServer).RepositorySize(ctx, req.(*RepositorySizeRequest)) } return interceptor(ctx, in, info, handler) } func _RepositoryService_ApplyGitattributes_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(ApplyGitattributesRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(RepositoryServiceServer).ApplyGitattributes(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/gitaly.RepositoryService/ApplyGitattributes", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(RepositoryServiceServer).ApplyGitattributes(ctx, req.(*ApplyGitattributesRequest)) } return interceptor(ctx, in, info, handler) } func _RepositoryService_FetchRemote_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(FetchRemoteRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(RepositoryServiceServer).FetchRemote(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/gitaly.RepositoryService/FetchRemote", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(RepositoryServiceServer).FetchRemote(ctx, req.(*FetchRemoteRequest)) } return interceptor(ctx, in, info, handler) } func _RepositoryService_CreateRepository_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(CreateRepositoryRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(RepositoryServiceServer).CreateRepository(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/gitaly.RepositoryService/CreateRepository", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(RepositoryServiceServer).CreateRepository(ctx, req.(*CreateRepositoryRequest)) } return interceptor(ctx, in, info, handler) } func _RepositoryService_GetArchive_Handler(srv interface{}, stream grpc.ServerStream) error { m := new(GetArchiveRequest) if err := stream.RecvMsg(m); err != nil { return err } return srv.(RepositoryServiceServer).GetArchive(m, &repositoryServiceGetArchiveServer{stream}) } type RepositoryService_GetArchiveServer interface { Send(*GetArchiveResponse) error grpc.ServerStream } type repositoryServiceGetArchiveServer struct { grpc.ServerStream } func (x *repositoryServiceGetArchiveServer) Send(m *GetArchiveResponse) error { return x.ServerStream.SendMsg(m) } func _RepositoryService_HasLocalBranches_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(HasLocalBranchesRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(RepositoryServiceServer).HasLocalBranches(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/gitaly.RepositoryService/HasLocalBranches", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(RepositoryServiceServer).HasLocalBranches(ctx, req.(*HasLocalBranchesRequest)) } return interceptor(ctx, in, info, handler) } func _RepositoryService_FetchSourceBranch_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(FetchSourceBranchRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(RepositoryServiceServer).FetchSourceBranch(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/gitaly.RepositoryService/FetchSourceBranch", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(RepositoryServiceServer).FetchSourceBranch(ctx, req.(*FetchSourceBranchRequest)) } return interceptor(ctx, in, info, handler) } func _RepositoryService_Fsck_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(FsckRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(RepositoryServiceServer).Fsck(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/gitaly.RepositoryService/Fsck", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(RepositoryServiceServer).Fsck(ctx, req.(*FsckRequest)) } return interceptor(ctx, in, info, handler) } func _RepositoryService_WriteRef_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(WriteRefRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(RepositoryServiceServer).WriteRef(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/gitaly.RepositoryService/WriteRef", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(RepositoryServiceServer).WriteRef(ctx, req.(*WriteRefRequest)) } return interceptor(ctx, in, info, handler) } func _RepositoryService_FindMergeBase_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(FindMergeBaseRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(RepositoryServiceServer).FindMergeBase(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/gitaly.RepositoryService/FindMergeBase", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(RepositoryServiceServer).FindMergeBase(ctx, req.(*FindMergeBaseRequest)) } return interceptor(ctx, in, info, handler) } func _RepositoryService_CreateFork_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(CreateForkRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(RepositoryServiceServer).CreateFork(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/gitaly.RepositoryService/CreateFork", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(RepositoryServiceServer).CreateFork(ctx, req.(*CreateForkRequest)) } return interceptor(ctx, in, info, handler) } func _RepositoryService_IsRebaseInProgress_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(IsRebaseInProgressRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(RepositoryServiceServer).IsRebaseInProgress(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/gitaly.RepositoryService/IsRebaseInProgress", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(RepositoryServiceServer).IsRebaseInProgress(ctx, req.(*IsRebaseInProgressRequest)) } return interceptor(ctx, in, info, handler) } func _RepositoryService_CreateRepositoryFromURL_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(CreateRepositoryFromURLRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(RepositoryServiceServer).CreateRepositoryFromURL(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/gitaly.RepositoryService/CreateRepositoryFromURL", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(RepositoryServiceServer).CreateRepositoryFromURL(ctx, req.(*CreateRepositoryFromURLRequest)) } return interceptor(ctx, in, info, handler) } var _RepositoryService_serviceDesc = grpc.ServiceDesc{ ServiceName: "gitaly.RepositoryService", HandlerType: (*RepositoryServiceServer)(nil), Methods: []grpc.MethodDesc{ { MethodName: "RepositoryExists", Handler: _RepositoryService_RepositoryExists_Handler, }, { MethodName: "RepositoryIsEmpty", Handler: _RepositoryService_RepositoryIsEmpty_Handler, }, { MethodName: "RepackIncremental", Handler: _RepositoryService_RepackIncremental_Handler, }, { MethodName: "RepackFull", Handler: _RepositoryService_RepackFull_Handler, }, { MethodName: "GarbageCollect", Handler: _RepositoryService_GarbageCollect_Handler, }, { MethodName: "RepositorySize", Handler: _RepositoryService_RepositorySize_Handler, }, { MethodName: "ApplyGitattributes", Handler: _RepositoryService_ApplyGitattributes_Handler, }, { MethodName: "FetchRemote", Handler: _RepositoryService_FetchRemote_Handler, }, { MethodName: "CreateRepository", Handler: _RepositoryService_CreateRepository_Handler, }, { MethodName: "HasLocalBranches", Handler: _RepositoryService_HasLocalBranches_Handler, }, { MethodName: "FetchSourceBranch", Handler: _RepositoryService_FetchSourceBranch_Handler, }, { MethodName: "Fsck", Handler: _RepositoryService_Fsck_Handler, }, { MethodName: "WriteRef", Handler: _RepositoryService_WriteRef_Handler, }, { MethodName: "FindMergeBase", Handler: _RepositoryService_FindMergeBase_Handler, }, { MethodName: "CreateFork", Handler: _RepositoryService_CreateFork_Handler, }, { MethodName: "IsRebaseInProgress", Handler: _RepositoryService_IsRebaseInProgress_Handler, }, { MethodName: "CreateRepositoryFromURL", Handler: _RepositoryService_CreateRepositoryFromURL_Handler, }, }, Streams: []grpc.StreamDesc{ { StreamName: "GetArchive", Handler: _RepositoryService_GetArchive_Handler, ServerStreams: true, }, }, Metadata: "repository-service.proto", } func init() { proto.RegisterFile("repository-service.proto", fileDescriptor10) } var fileDescriptor10 = []byte{ // 1250 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x58, 0xdd, 0x6e, 0xdb, 0x36, 0x14, 0xb6, 0xe3, 0xc6, 0x49, 0x8e, 0xdd, 0xce, 0x61, 0x9d, 0x46, 0x56, 0x7f, 0x92, 0x72, 0xc3, 0x16, 0x60, 0x5b, 0x50, 0x38, 0xc0, 0xb0, 0x9b, 0xa1, 0x48, 0x8a, 0x3a, 0x35, 0xfa, 0x83, 0x4e, 0xed, 0x50, 0x20, 0xc0, 0x60, 0xc8, 0x32, 0x6d, 0x0b, 0x96, 0x45, 0x8f, 0xa4, 0xb3, 0xba, 0x4f, 0xb0, 0xe7, 0xda, 0x2b, 0xec, 0x11, 0x76, 0xd9, 0x97, 0x18, 0x44, 0xd2, 0xa2, 0x64, 0x49, 0xd9, 0x85, 0x30, 0xec, 0x4e, 0x3c, 0x24, 0xbf, 0x73, 0x78, 0x7e, 0x3f, 0x1b, 0x2c, 0x46, 0x16, 0x94, 0xfb, 0x82, 0xb2, 0xd5, 0xf7, 0x9c, 0xb0, 0x6b, 0xdf, 0x23, 0xa7, 0x0b, 0x46, 0x05, 0x45, 0xf5, 0x89, 0x2f, 0xdc, 0x60, 0x65, 0x37, 0xf9, 0xd4, 0x65, 0x64, 0xa4, 0xa4, 0xf8, 0x35, 0x1c, 0x3a, 0xf1, 0x8d, 0xe7, 0x1f, 0x7d, 0x2e, 0xb8, 0x43, 0x7e, 0x5b, 0x12, 0x2e, 0x50, 0x17, 0xc0, 0x80, 0x59, 0xd5, 0xe3, 0xea, 0x49, 0xa3, 0x8b, 0x4e, 0x15, 0xca, 0xa9, 0xb9, 0xe4, 0x24, 0x4e, 0xe1, 0x2e, 0x58, 0x59, 0x38, 0xbe, 0xa0, 0x21, 0x27, 0xe8, 0x1e, 0xd4, 0x89, 0x94, 0x48, 0xac, 0x5d, 0x47, 0xaf, 0xf0, 0x9b, 0xe4, 0x9d, 0x3e, 0x7f, 0x3e, 0x5f, 0x88, 0x55, 0x19, 0x1b, 0x7e, 0x80, 0x4e, 0x0e, 0x9e, 0x36, 0xa2, 0x03, 0xbb, 0x3e, 0x1f, 0x90, 0x48, 0xa6, 0xcd, 0xd8, 0xf1, 0xd5, 0x11, 0x6d, 0x87, 0xeb, 0xcd, 0xfa, 0xa1, 0xc7, 0xc8, 0x9c, 0x84, 0xc2, 0x0d, 0xca, 0xd8, 0x71, 0x5f, 0xda, 0xb1, 0x89, 0xa7, 0xec, 0xc0, 0x01, 0xec, 0xab, 0xcd, 0xde, 0x32, 0x28, 0xa3, 0x05, 0x7d, 0x09, 0xb7, 0x3d, 0x46, 0x5c, 0x41, 0x06, 0x43, 0x5f, 0xcc, 0xdd, 0x85, 0xb5, 0x25, 0x5f, 0xd5, 0x54, 0xc2, 0x0b, 0x29, 0xc3, 0x6d, 0x40, 0x49, 0x6d, 0xda, 0x86, 0x05, 0x1c, 0x5c, 0xba, 0x6c, 0xe8, 0x4e, 0xc8, 0x33, 0x1a, 0x04, 0xc4, 0x13, 0xff, 0xb9, 0x1d, 0x16, 0xdc, 0xdb, 0xd4, 0xa8, 0x6d, 0x79, 0x09, 0x07, 0x06, 0xf8, 0x9d, 0xff, 0x89, 0x94, 0xf1, 0xfc, 0x77, 0x70, 0x6f, 0x13, 0x4c, 0x87, 0x1f, 0xc1, 0x2d, 0xee, 0x7f, 0x22, 0x12, 0xa7, 0xe6, 0xc8, 0x6f, 0x3c, 0x83, 0xce, 0xf9, 0x62, 0x11, 0xac, 0x2e, 0x7d, 0xe1, 0x0a, 0xc1, 0xfc, 0xe1, 0x52, 0x90, 0x32, 0x45, 0x80, 0x6c, 0xd8, 0x65, 0xe4, 0xda, 0xe7, 0x3e, 0x0d, 0xa5, 0x17, 0x9a, 0x4e, 0xbc, 0xc6, 0x0f, 0xc0, 0xce, 0x53, 0xa6, 0xbd, 0xf0, 0x77, 0x15, 0x50, 0x8f, 0x08, 0x6f, 0xea, 0x90, 0x39, 0x15, 0x65, 0x7c, 0x10, 0x55, 0x1b, 0x93, 0x20, 0xd2, 0x84, 0x3d, 0x47, 0xaf, 0x50, 0x1b, 0xb6, 0xc7, 0x94, 0x79, 0xc4, 0xaa, 0xc9, 0xf8, 0xa8, 0x05, 0x3a, 0x84, 0x9d, 0x90, 0x0e, 0x84, 0x3b, 0xe1, 0xd6, 0x2d, 0x55, 0x9c, 0x21, 0x7d, 0xef, 0x4e, 0x38, 0xb2, 0x60, 0x47, 0xf8, 0x73, 0x42, 0x97, 0xc2, 0xda, 0x3e, 0xae, 0x9e, 0x6c, 0x3b, 0xeb, 0x65, 0x74, 0x85, 0xf3, 0xe9, 0x60, 0x46, 0x56, 0x56, 0x5d, 0x69, 0xe0, 0x7c, 0xfa, 0x92, 0xac, 0xd0, 0x11, 0x34, 0x66, 0x21, 0xfd, 0x3d, 0x1c, 0x4c, 0x69, 0x54, 0xec, 0x3b, 0x72, 0x13, 0xa4, 0xe8, 0x45, 0x24, 0xc1, 0x07, 0x70, 0x37, 0xf5, 0x48, 0xfd, 0xf8, 0xd7, 0x70, 0xf8, 0x4c, 0x26, 0x4b, 0xe2, 0x45, 0x25, 0x92, 0xc0, 0x06, 0x2b, 0x0b, 0xa7, 0x55, 0x7d, 0xae, 0xc2, 0xfe, 0x25, 0x11, 0xe7, 0xcc, 0x9b, 0xfa, 0xd7, 0xa5, 0xdc, 0x7c, 0x1f, 0xf6, 0x3c, 0x3a, 0x9f, 0xfb, 0x62, 0xe0, 0x8f, 0xb4, 0xa7, 0x77, 0x95, 0xa0, 0x3f, 0x8a, 0x62, 0xb0, 0x60, 0x64, 0xec, 0x7f, 0x94, 0xce, 0xde, 0x73, 0xf4, 0x0a, 0xfd, 0x08, 0xf5, 0x31, 0x65, 0x73, 0x57, 0x48, 0x67, 0xdf, 0xe9, 0x1e, 0xaf, 0x95, 0x64, 0x6c, 0x3a, 0xed, 0xc9, 0x73, 0x8e, 0x3e, 0x8f, 0xcf, 0xa0, 0xae, 0x24, 0x68, 0x07, 0x6a, 0x57, 0xfd, 0xb7, 0xad, 0x4a, 0xf4, 0xf1, 0xfe, 0xdc, 0x69, 0x55, 0x11, 0x40, 0xfd, 0xfd, 0xb9, 0x33, 0xb8, 0xbc, 0x6a, 0x6d, 0xa1, 0x06, 0xec, 0x44, 0xdf, 0x17, 0x57, 0xdd, 0x56, 0x0d, 0x9f, 0x00, 0x4a, 0x02, 0x9b, 0x52, 0x18, 0xb9, 0xc2, 0x95, 0xef, 0x6c, 0x3a, 0xf2, 0x3b, 0x0a, 0xc1, 0x0b, 0x97, 0xbf, 0xa2, 0x9e, 0x1b, 0x5c, 0x30, 0x37, 0xf4, 0xa6, 0xa5, 0x0a, 0x01, 0x3f, 0x01, 0x2b, 0x0b, 0xa7, 0xd5, 0xb7, 0x61, 0xfb, 0xda, 0x0d, 0x96, 0x44, 0x77, 0x61, 0xb5, 0xc0, 0x7f, 0x55, 0xc1, 0x92, 0xb9, 0xf1, 0x8e, 0x2e, 0x99, 0x47, 0xd4, 0xad, 0x32, 0xf1, 0x79, 0x0a, 0xfb, 0x5c, 0x42, 0x0d, 0x12, 0x57, 0xb7, 0x0a, 0xaf, 0xb6, 0xd4, 0x61, 0x27, 0xd5, 0xd7, 0x34, 0xc0, 0x50, 0x1a, 0x23, 0x43, 0xd9, 0x74, 0x9a, 0x3c, 0x61, 0x20, 0x7a, 0x08, 0x20, 0x5c, 0x36, 0x21, 0x62, 0xc0, 0xc8, 0x58, 0x06, 0xb5, 0xe9, 0xec, 0x29, 0x89, 0x43, 0xc6, 0xf8, 0x0c, 0x3a, 0x39, 0x8f, 0x32, 0x63, 0x91, 0x11, 0xbe, 0x0c, 0xc4, 0x7a, 0x2c, 0xaa, 0x15, 0x3e, 0x87, 0x46, 0x8f, 0x7b, 0xb3, 0x32, 0xfe, 0xff, 0x0a, 0x9a, 0x0a, 0xc2, 0xf8, 0x9c, 0x30, 0x46, 0x99, 0x8e, 0xb9, 0x5a, 0xe0, 0x3f, 0xab, 0xf0, 0xc5, 0x07, 0xe6, 0x47, 0x85, 0x32, 0x2e, 0xe3, 0xea, 0x16, 0xd4, 0xa2, 0xd7, 0xab, 0x8e, 0x17, 0x7d, 0xa6, 0x1a, 0x61, 0x2d, 0xdd, 0x08, 0xd1, 0x63, 0x68, 0xd2, 0x60, 0x34, 0x88, 0xf7, 0x95, 0xd3, 0x1a, 0x34, 0x18, 0x39, 0xeb, 0x23, 0x71, 0xab, 0xda, 0x4e, 0xb6, 0xaa, 0x36, 0x6c, 0xf3, 0x29, 0x09, 0x02, 0xd9, 0x75, 0x76, 0x1d, 0xb5, 0xc0, 0x27, 0xd0, 0x32, 0x6f, 0xb8, 0xf1, 0xb9, 0x53, 0x68, 0xf7, 0xfc, 0x70, 0xf4, 0x9a, 0xb0, 0x09, 0xb9, 0x70, 0x79, 0xa9, 0xea, 0x7f, 0x00, 0x7b, 0xeb, 0x07, 0x70, 0x6b, 0xeb, 0xb8, 0x16, 0x85, 0x3d, 0x16, 0xe0, 0x6f, 0xe1, 0x60, 0x43, 0x93, 0x29, 0xbd, 0xa1, 0xcb, 0x55, 0xea, 0xef, 0x39, 0xf2, 0x1b, 0xff, 0x51, 0x85, 0x7d, 0xd5, 0xaf, 0x7a, 0x94, 0xcd, 0xfe, 0xcf, 0x94, 0x8f, 0xd8, 0x42, 0xd2, 0x92, 0x98, 0xb1, 0x74, 0xfa, 0xdc, 0x21, 0x91, 0xb1, 0xfd, 0xf0, 0x2d, 0xa3, 0x13, 0x46, 0x38, 0x2f, 0xd9, 0x3a, 0x99, 0x84, 0x4b, 0xb4, 0x4e, 0x25, 0xe8, 0x8f, 0xf0, 0x4f, 0x60, 0xe7, 0x69, 0xd3, 0x0e, 0x3c, 0x82, 0x86, 0x1f, 0x0e, 0x16, 0x5a, 0xac, 0x0b, 0x07, 0xfc, 0xf8, 0x20, 0x1e, 0xc3, 0xa3, 0xcd, 0xe6, 0xdf, 0x63, 0x74, 0xfe, 0x8b, 0xf3, 0xaa, 0x64, 0x86, 0x2f, 0x59, 0xa0, 0x6d, 0x8d, 0x3e, 0xf1, 0x63, 0x38, 0x2a, 0xd4, 0xa3, 0x6c, 0xed, 0x7e, 0x6e, 0x48, 0xaa, 0xb7, 0x66, 0x23, 0x8a, 0x93, 0xa3, 0x0f, 0xd0, 0xda, 0x24, 0xca, 0xe8, 0x28, 0xab, 0x3e, 0xc5, 0xc8, 0xed, 0xe3, 0xe2, 0x03, 0x3a, 0x48, 0x15, 0x74, 0x95, 0xd4, 0xa6, 0xd9, 0x2f, 0xca, 0xb9, 0x98, 0x26, 0xda, 0xf6, 0xe3, 0x1b, 0x4e, 0x6c, 0x60, 0xa7, 0x19, 0x6d, 0x0a, 0x3b, 0x97, 0x3c, 0xa7, 0xb0, 0x0b, 0xe8, 0x70, 0x05, 0x3d, 0x07, 0x30, 0x14, 0x15, 0x75, 0xd2, 0x57, 0x12, 0x24, 0xd9, 0xb6, 0xf3, 0xb6, 0x62, 0x98, 0x9f, 0xe1, 0x4e, 0x9a, 0x61, 0xa2, 0x87, 0xf1, 0x70, 0xcd, 0xe3, 0xba, 0xf6, 0xa3, 0xa2, 0xed, 0x24, 0x64, 0x9a, 0x4d, 0x1a, 0xc8, 0x5c, 0xca, 0x6a, 0x20, 0xf3, 0x49, 0x28, 0xae, 0xa0, 0x5f, 0x01, 0x65, 0x59, 0x20, 0x8a, 0xfd, 0x54, 0x48, 0x47, 0x6d, 0x7c, 0xd3, 0x91, 0x18, 0xfe, 0x05, 0x34, 0x12, 0x04, 0x0b, 0xc5, 0x1e, 0xcb, 0x52, 0x4b, 0xfb, 0x7e, 0xee, 0x5e, 0x8c, 0xf4, 0x01, 0x5a, 0x9b, 0xf9, 0x6d, 0xd2, 0xb4, 0x80, 0xad, 0x99, 0x34, 0x2d, 0xe4, 0x5f, 0x15, 0x74, 0x09, 0x60, 0x38, 0x89, 0x09, 0x77, 0x86, 0x00, 0x99, 0x70, 0x67, 0x29, 0x0c, 0xae, 0x3c, 0xa9, 0x46, 0x16, 0x6e, 0x72, 0x0c, 0x63, 0x61, 0x01, 0x99, 0x31, 0x16, 0x16, 0xd1, 0x13, 0x95, 0xec, 0x99, 0xa1, 0x6d, 0x92, 0xbd, 0x88, 0xa4, 0x98, 0x64, 0x2f, 0x9c, 0xf8, 0xb8, 0x82, 0xce, 0xe0, 0x56, 0x34, 0x98, 0xd1, 0xdd, 0xf8, 0xb0, 0x99, 0xf4, 0x76, 0x3b, 0x2d, 0x8c, 0x2f, 0x3d, 0x85, 0xdd, 0xf5, 0x88, 0x43, 0x87, 0xeb, 0x33, 0x1b, 0x83, 0xdb, 0xb6, 0xb2, 0x1b, 0x31, 0xc0, 0x1b, 0xb8, 0x9d, 0x9a, 0x47, 0xe8, 0x41, 0xac, 0x29, 0x67, 0x20, 0xda, 0x0f, 0x0b, 0x76, 0x93, 0x25, 0x6b, 0xe6, 0x84, 0x89, 0x61, 0x66, 0x8a, 0x99, 0x18, 0xe6, 0x8c, 0x15, 0x59, 0x0c, 0xd9, 0x56, 0x6f, 0x8a, 0xa1, 0x70, 0xe8, 0x98, 0x62, 0x28, 0x9e, 0x14, 0xb8, 0x82, 0x82, 0xec, 0xcf, 0x0a, 0xdd, 0xa2, 0xd1, 0xd7, 0x45, 0x89, 0x9a, 0x9e, 0x15, 0xf6, 0x37, 0xff, 0x7a, 0x6e, 0xad, 0x6d, 0x58, 0x97, 0x7f, 0xab, 0x9c, 0xfd, 0x13, 0x00, 0x00, 0xff, 0xff, 0xef, 0x87, 0x20, 0x75, 0x88, 0x11, 0x00, 0x00, } shared.pb.go000066400000000000000000000267731324746544700351630ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/gitlab.com/gitlab-org/gitaly-proto/go// Code generated by protoc-gen-go. DO NOT EDIT. // source: shared.proto package gitaly import proto "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" import google_protobuf "github.com/golang/protobuf/ptypes/timestamp" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf type Repository struct { StorageName string `protobuf:"bytes,2,opt,name=storage_name,json=storageName" json:"storage_name,omitempty"` RelativePath string `protobuf:"bytes,3,opt,name=relative_path,json=relativePath" json:"relative_path,omitempty"` // Sets the GIT_OBJECT_DIRECTORY envvar on git commands to the value of this field. // It influences the object storage directory the SHA1 directories are created underneath. GitObjectDirectory string `protobuf:"bytes,4,opt,name=git_object_directory,json=gitObjectDirectory" json:"git_object_directory,omitempty"` // Sets the GIT_ALTERNATE_OBJECT_DIRECTORIES envvar on git commands to the values of this field. // It influences the list of Git object directories which can be used to search for Git objects. GitAlternateObjectDirectories []string `protobuf:"bytes,5,rep,name=git_alternate_object_directories,json=gitAlternateObjectDirectories" json:"git_alternate_object_directories,omitempty"` // Used in callbacks to GitLab so that it knows what repository the event is // associated with. May be left empty on RPC's that do not perform callbacks. GlRepository string `protobuf:"bytes,6,opt,name=gl_repository,json=glRepository" json:"gl_repository,omitempty"` } func (m *Repository) Reset() { *m = Repository{} } func (m *Repository) String() string { return proto.CompactTextString(m) } func (*Repository) ProtoMessage() {} func (*Repository) Descriptor() ([]byte, []int) { return fileDescriptor11, []int{0} } func (m *Repository) GetStorageName() string { if m != nil { return m.StorageName } return "" } func (m *Repository) GetRelativePath() string { if m != nil { return m.RelativePath } return "" } func (m *Repository) GetGitObjectDirectory() string { if m != nil { return m.GitObjectDirectory } return "" } func (m *Repository) GetGitAlternateObjectDirectories() []string { if m != nil { return m.GitAlternateObjectDirectories } return nil } func (m *Repository) GetGlRepository() string { if m != nil { return m.GlRepository } return "" } // Corresponds to Gitlab::Git::Commit type GitCommit struct { Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` Subject []byte `protobuf:"bytes,2,opt,name=subject,proto3" json:"subject,omitempty"` Body []byte `protobuf:"bytes,3,opt,name=body,proto3" json:"body,omitempty"` Author *CommitAuthor `protobuf:"bytes,4,opt,name=author" json:"author,omitempty"` Committer *CommitAuthor `protobuf:"bytes,5,opt,name=committer" json:"committer,omitempty"` ParentIds []string `protobuf:"bytes,6,rep,name=parent_ids,json=parentIds" json:"parent_ids,omitempty"` } func (m *GitCommit) Reset() { *m = GitCommit{} } func (m *GitCommit) String() string { return proto.CompactTextString(m) } func (*GitCommit) ProtoMessage() {} func (*GitCommit) Descriptor() ([]byte, []int) { return fileDescriptor11, []int{1} } func (m *GitCommit) GetId() string { if m != nil { return m.Id } return "" } func (m *GitCommit) GetSubject() []byte { if m != nil { return m.Subject } return nil } func (m *GitCommit) GetBody() []byte { if m != nil { return m.Body } return nil } func (m *GitCommit) GetAuthor() *CommitAuthor { if m != nil { return m.Author } return nil } func (m *GitCommit) GetCommitter() *CommitAuthor { if m != nil { return m.Committer } return nil } func (m *GitCommit) GetParentIds() []string { if m != nil { return m.ParentIds } return nil } type CommitAuthor struct { Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` Email []byte `protobuf:"bytes,2,opt,name=email,proto3" json:"email,omitempty"` Date *google_protobuf.Timestamp `protobuf:"bytes,3,opt,name=date" json:"date,omitempty"` } func (m *CommitAuthor) Reset() { *m = CommitAuthor{} } func (m *CommitAuthor) String() string { return proto.CompactTextString(m) } func (*CommitAuthor) ProtoMessage() {} func (*CommitAuthor) Descriptor() ([]byte, []int) { return fileDescriptor11, []int{2} } func (m *CommitAuthor) GetName() []byte { if m != nil { return m.Name } return nil } func (m *CommitAuthor) GetEmail() []byte { if m != nil { return m.Email } return nil } func (m *CommitAuthor) GetDate() *google_protobuf.Timestamp { if m != nil { return m.Date } return nil } type ExitStatus struct { Value int32 `protobuf:"varint,1,opt,name=value" json:"value,omitempty"` } func (m *ExitStatus) Reset() { *m = ExitStatus{} } func (m *ExitStatus) String() string { return proto.CompactTextString(m) } func (*ExitStatus) ProtoMessage() {} func (*ExitStatus) Descriptor() ([]byte, []int) { return fileDescriptor11, []int{3} } func (m *ExitStatus) GetValue() int32 { if m != nil { return m.Value } return 0 } // Corresponds to Gitlab::Git::Branch type Branch struct { Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` TargetCommit *GitCommit `protobuf:"bytes,2,opt,name=target_commit,json=targetCommit" json:"target_commit,omitempty"` } func (m *Branch) Reset() { *m = Branch{} } func (m *Branch) String() string { return proto.CompactTextString(m) } func (*Branch) ProtoMessage() {} func (*Branch) Descriptor() ([]byte, []int) { return fileDescriptor11, []int{4} } func (m *Branch) GetName() []byte { if m != nil { return m.Name } return nil } func (m *Branch) GetTargetCommit() *GitCommit { if m != nil { return m.TargetCommit } return nil } type Tag struct { Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` Id string `protobuf:"bytes,2,opt,name=id" json:"id,omitempty"` TargetCommit *GitCommit `protobuf:"bytes,3,opt,name=target_commit,json=targetCommit" json:"target_commit,omitempty"` Message []byte `protobuf:"bytes,4,opt,name=message,proto3" json:"message,omitempty"` } func (m *Tag) Reset() { *m = Tag{} } func (m *Tag) String() string { return proto.CompactTextString(m) } func (*Tag) ProtoMessage() {} func (*Tag) Descriptor() ([]byte, []int) { return fileDescriptor11, []int{5} } func (m *Tag) GetName() []byte { if m != nil { return m.Name } return nil } func (m *Tag) GetId() string { if m != nil { return m.Id } return "" } func (m *Tag) GetTargetCommit() *GitCommit { if m != nil { return m.TargetCommit } return nil } func (m *Tag) GetMessage() []byte { if m != nil { return m.Message } return nil } type User struct { GlId string `protobuf:"bytes,1,opt,name=gl_id,json=glId" json:"gl_id,omitempty"` Name []byte `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` Email []byte `protobuf:"bytes,3,opt,name=email,proto3" json:"email,omitempty"` GlUsername string `protobuf:"bytes,4,opt,name=gl_username,json=glUsername" json:"gl_username,omitempty"` } func (m *User) Reset() { *m = User{} } func (m *User) String() string { return proto.CompactTextString(m) } func (*User) ProtoMessage() {} func (*User) Descriptor() ([]byte, []int) { return fileDescriptor11, []int{6} } func (m *User) GetGlId() string { if m != nil { return m.GlId } return "" } func (m *User) GetName() []byte { if m != nil { return m.Name } return nil } func (m *User) GetEmail() []byte { if m != nil { return m.Email } return nil } func (m *User) GetGlUsername() string { if m != nil { return m.GlUsername } return "" } func init() { proto.RegisterType((*Repository)(nil), "gitaly.Repository") proto.RegisterType((*GitCommit)(nil), "gitaly.GitCommit") proto.RegisterType((*CommitAuthor)(nil), "gitaly.CommitAuthor") proto.RegisterType((*ExitStatus)(nil), "gitaly.ExitStatus") proto.RegisterType((*Branch)(nil), "gitaly.Branch") proto.RegisterType((*Tag)(nil), "gitaly.Tag") proto.RegisterType((*User)(nil), "gitaly.User") } func init() { proto.RegisterFile("shared.proto", fileDescriptor11) } var fileDescriptor11 = []byte{ // 518 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x53, 0x4d, 0x6f, 0xd3, 0x40, 0x10, 0x55, 0x1c, 0xc7, 0x90, 0x89, 0x8b, 0x60, 0xc9, 0xc1, 0xaa, 0x54, 0x35, 0x98, 0x4b, 0x0f, 0xc8, 0x45, 0x41, 0xe2, 0x5e, 0x3e, 0x54, 0x95, 0x03, 0xa0, 0x25, 0x3d, 0x5b, 0x9b, 0x78, 0xba, 0x5e, 0xb4, 0xce, 0x46, 0xbb, 0xe3, 0x8a, 0x88, 0x0b, 0xbf, 0x8f, 0x5f, 0x85, 0xbc, 0x1b, 0xa7, 0x05, 0x22, 0xc4, 0xcd, 0x33, 0xfb, 0x66, 0xe6, 0xbd, 0x79, 0x63, 0x48, 0x5d, 0x2d, 0x2c, 0x56, 0xc5, 0xc6, 0x1a, 0x32, 0x2c, 0x91, 0x8a, 0x84, 0xde, 0x1e, 0x9f, 0x4a, 0x63, 0xa4, 0xc6, 0x73, 0x9f, 0x5d, 0xb6, 0x37, 0xe7, 0xa4, 0x1a, 0x74, 0x24, 0x9a, 0x4d, 0x00, 0xe6, 0x3f, 0x22, 0x00, 0x8e, 0x1b, 0xe3, 0x14, 0x19, 0xbb, 0x65, 0xcf, 0x20, 0x75, 0x64, 0xac, 0x90, 0x58, 0xae, 0x45, 0x83, 0x59, 0x34, 0x1b, 0x9c, 0x8d, 0xf9, 0x64, 0x97, 0xfb, 0x28, 0x1a, 0x64, 0xcf, 0xe1, 0xc8, 0xa2, 0x16, 0xa4, 0x6e, 0xb1, 0xdc, 0x08, 0xaa, 0xb3, 0xa1, 0xc7, 0xa4, 0x7d, 0xf2, 0xb3, 0xa0, 0x9a, 0xbd, 0x84, 0xa9, 0x54, 0x54, 0x9a, 0xe5, 0x57, 0x5c, 0x51, 0x59, 0x29, 0x8b, 0xab, 0xae, 0x7f, 0x16, 0x7b, 0x2c, 0x93, 0x8a, 0x3e, 0xf9, 0xa7, 0x77, 0xfd, 0x0b, 0xbb, 0x84, 0x59, 0x57, 0x21, 0x34, 0xa1, 0x5d, 0x0b, 0xc2, 0x3f, 0x6b, 0x15, 0xba, 0x6c, 0x34, 0x1b, 0x9e, 0x8d, 0xf9, 0x89, 0x54, 0x74, 0xd1, 0xc3, 0x7e, 0x6f, 0xa3, 0xd0, 0x75, 0xfc, 0xa4, 0x2e, 0xed, 0x5e, 0x53, 0x96, 0x04, 0x7e, 0x52, 0xdf, 0xe9, 0xfc, 0x10, 0x3f, 0x1c, 0x3c, 0x8e, 0x78, 0xdc, 0xf1, 0xcf, 0x7f, 0x0e, 0x60, 0x7c, 0xa9, 0xe8, 0xad, 0x69, 0x1a, 0x45, 0xec, 0x11, 0x44, 0xaa, 0xca, 0x06, 0xbe, 0x26, 0x52, 0x15, 0xcb, 0xe0, 0x81, 0x6b, 0xfd, 0x10, 0xbf, 0x8c, 0x94, 0xf7, 0x21, 0x63, 0x10, 0x2f, 0x4d, 0xb5, 0xf5, 0xfa, 0x53, 0xee, 0xbf, 0xd9, 0x0b, 0x48, 0x44, 0x4b, 0xb5, 0xb1, 0x5e, 0xe9, 0x64, 0x3e, 0x2d, 0x82, 0x11, 0x45, 0xe8, 0x7e, 0xe1, 0xdf, 0xf8, 0x0e, 0xc3, 0xe6, 0x30, 0x5e, 0xf9, 0x3c, 0xa1, 0xcd, 0x46, 0xff, 0x28, 0xb8, 0x83, 0xb1, 0x13, 0x80, 0x8d, 0xb0, 0xb8, 0xa6, 0x52, 0x55, 0x2e, 0x4b, 0xfc, 0x46, 0xc6, 0x21, 0x73, 0x55, 0xb9, 0xbc, 0x86, 0xf4, 0x7e, 0x65, 0x47, 0xd2, 0x1b, 0x39, 0x08, 0x24, 0xbb, 0x6f, 0x36, 0x85, 0x11, 0x36, 0x42, 0xe9, 0x9d, 0xa0, 0x10, 0xb0, 0x02, 0xe2, 0x4a, 0x10, 0x7a, 0x39, 0x93, 0xf9, 0x71, 0x11, 0x2e, 0xa7, 0xe8, 0x2f, 0xa7, 0x58, 0xf4, 0x97, 0xc3, 0x3d, 0x2e, 0xcf, 0x01, 0xde, 0x7f, 0x53, 0xf4, 0x85, 0x04, 0xb5, 0xae, 0xeb, 0x79, 0x2b, 0x74, 0x1b, 0x06, 0x8d, 0x78, 0x08, 0xf2, 0x05, 0x24, 0x6f, 0xac, 0x58, 0xaf, 0xea, 0x83, 0x3c, 0x5e, 0xc3, 0x11, 0x09, 0x2b, 0x91, 0xca, 0x20, 0xcf, 0xf3, 0x99, 0xcc, 0x9f, 0xf4, 0x2b, 0xd8, 0x9b, 0xc2, 0xd3, 0x80, 0x0b, 0x51, 0xfe, 0x1d, 0x86, 0x0b, 0x21, 0x0f, 0xb6, 0x0c, 0xee, 0x45, 0x7b, 0xf7, 0xfe, 0x1a, 0x31, 0xfc, 0xaf, 0x11, 0x9d, 0xeb, 0x0d, 0x3a, 0x27, 0x24, 0x7a, 0x23, 0x53, 0xde, 0x87, 0xf9, 0x0d, 0xc4, 0xd7, 0x0e, 0x2d, 0x7b, 0x0a, 0x23, 0xa9, 0xcb, 0xfd, 0xa9, 0xc4, 0x52, 0x5f, 0x55, 0x7b, 0x4a, 0xd1, 0xa1, 0x6d, 0x0f, 0xef, 0x6f, 0xfb, 0x14, 0x26, 0x52, 0x97, 0xad, 0xeb, 0xae, 0xb8, 0xc1, 0xdd, 0x7f, 0x01, 0x52, 0x5f, 0xef, 0x32, 0xcb, 0xc4, 0x2f, 0xfe, 0xd5, 0xaf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x45, 0x47, 0x8b, 0xbe, 0xd8, 0x03, 0x00, 0x00, } smarthttp.pb.go000066400000000000000000000440761324746544700357370ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/gitlab.com/gitlab-org/gitaly-proto/go// Code generated by protoc-gen-go. DO NOT EDIT. // source: smarthttp.proto package gitaly import proto "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" import ( context "golang.org/x/net/context" grpc "google.golang.org/grpc" ) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf type InfoRefsRequest struct { Repository *Repository `protobuf:"bytes,1,opt,name=repository" json:"repository,omitempty"` // Parameters to use with git -c (key=value pairs) GitConfigOptions []string `protobuf:"bytes,2,rep,name=git_config_options,json=gitConfigOptions" json:"git_config_options,omitempty"` } func (m *InfoRefsRequest) Reset() { *m = InfoRefsRequest{} } func (m *InfoRefsRequest) String() string { return proto.CompactTextString(m) } func (*InfoRefsRequest) ProtoMessage() {} func (*InfoRefsRequest) Descriptor() ([]byte, []int) { return fileDescriptor12, []int{0} } func (m *InfoRefsRequest) GetRepository() *Repository { if m != nil { return m.Repository } return nil } func (m *InfoRefsRequest) GetGitConfigOptions() []string { if m != nil { return m.GitConfigOptions } return nil } type InfoRefsResponse struct { Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` } func (m *InfoRefsResponse) Reset() { *m = InfoRefsResponse{} } func (m *InfoRefsResponse) String() string { return proto.CompactTextString(m) } func (*InfoRefsResponse) ProtoMessage() {} func (*InfoRefsResponse) Descriptor() ([]byte, []int) { return fileDescriptor12, []int{1} } func (m *InfoRefsResponse) GetData() []byte { if m != nil { return m.Data } return nil } type PostUploadPackRequest struct { // repository should only be present in the first message of the stream Repository *Repository `protobuf:"bytes,1,opt,name=repository" json:"repository,omitempty"` // Raw data to be copied to stdin of 'git upload-pack' Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` // Parameters to use with git -c (key=value pairs) GitConfigOptions []string `protobuf:"bytes,3,rep,name=git_config_options,json=gitConfigOptions" json:"git_config_options,omitempty"` } func (m *PostUploadPackRequest) Reset() { *m = PostUploadPackRequest{} } func (m *PostUploadPackRequest) String() string { return proto.CompactTextString(m) } func (*PostUploadPackRequest) ProtoMessage() {} func (*PostUploadPackRequest) Descriptor() ([]byte, []int) { return fileDescriptor12, []int{2} } func (m *PostUploadPackRequest) GetRepository() *Repository { if m != nil { return m.Repository } return nil } func (m *PostUploadPackRequest) GetData() []byte { if m != nil { return m.Data } return nil } func (m *PostUploadPackRequest) GetGitConfigOptions() []string { if m != nil { return m.GitConfigOptions } return nil } type PostUploadPackResponse struct { // Raw data from stdout of 'git upload-pack' Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` } func (m *PostUploadPackResponse) Reset() { *m = PostUploadPackResponse{} } func (m *PostUploadPackResponse) String() string { return proto.CompactTextString(m) } func (*PostUploadPackResponse) ProtoMessage() {} func (*PostUploadPackResponse) Descriptor() ([]byte, []int) { return fileDescriptor12, []int{3} } func (m *PostUploadPackResponse) GetData() []byte { if m != nil { return m.Data } return nil } type PostReceivePackRequest struct { // repository should only be present in the first message of the stream Repository *Repository `protobuf:"bytes,1,opt,name=repository" json:"repository,omitempty"` // Raw data to be copied to stdin of 'git receive-pack' Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` // gl_id, gl_repository, and gl_username become env variables, used by the Git {pre,post}-receive // hooks. They should only be present in the first message of the stream. GlId string `protobuf:"bytes,3,opt,name=gl_id,json=glId" json:"gl_id,omitempty"` GlRepository string `protobuf:"bytes,4,opt,name=gl_repository,json=glRepository" json:"gl_repository,omitempty"` GlUsername string `protobuf:"bytes,5,opt,name=gl_username,json=glUsername" json:"gl_username,omitempty"` } func (m *PostReceivePackRequest) Reset() { *m = PostReceivePackRequest{} } func (m *PostReceivePackRequest) String() string { return proto.CompactTextString(m) } func (*PostReceivePackRequest) ProtoMessage() {} func (*PostReceivePackRequest) Descriptor() ([]byte, []int) { return fileDescriptor12, []int{4} } func (m *PostReceivePackRequest) GetRepository() *Repository { if m != nil { return m.Repository } return nil } func (m *PostReceivePackRequest) GetData() []byte { if m != nil { return m.Data } return nil } func (m *PostReceivePackRequest) GetGlId() string { if m != nil { return m.GlId } return "" } func (m *PostReceivePackRequest) GetGlRepository() string { if m != nil { return m.GlRepository } return "" } func (m *PostReceivePackRequest) GetGlUsername() string { if m != nil { return m.GlUsername } return "" } type PostReceivePackResponse struct { // Raw data from stdout of 'git receive-pack' Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` } func (m *PostReceivePackResponse) Reset() { *m = PostReceivePackResponse{} } func (m *PostReceivePackResponse) String() string { return proto.CompactTextString(m) } func (*PostReceivePackResponse) ProtoMessage() {} func (*PostReceivePackResponse) Descriptor() ([]byte, []int) { return fileDescriptor12, []int{5} } func (m *PostReceivePackResponse) GetData() []byte { if m != nil { return m.Data } return nil } func init() { proto.RegisterType((*InfoRefsRequest)(nil), "gitaly.InfoRefsRequest") proto.RegisterType((*InfoRefsResponse)(nil), "gitaly.InfoRefsResponse") proto.RegisterType((*PostUploadPackRequest)(nil), "gitaly.PostUploadPackRequest") proto.RegisterType((*PostUploadPackResponse)(nil), "gitaly.PostUploadPackResponse") proto.RegisterType((*PostReceivePackRequest)(nil), "gitaly.PostReceivePackRequest") proto.RegisterType((*PostReceivePackResponse)(nil), "gitaly.PostReceivePackResponse") } // Reference imports to suppress errors if they are not otherwise used. var _ context.Context var _ grpc.ClientConn // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. const _ = grpc.SupportPackageIsVersion4 // Client API for SmartHTTPService service type SmartHTTPServiceClient interface { // The response body for GET /info/refs?service=git-upload-pack InfoRefsUploadPack(ctx context.Context, in *InfoRefsRequest, opts ...grpc.CallOption) (SmartHTTPService_InfoRefsUploadPackClient, error) // The response body for GET /info/refs?service=git-receive-pack InfoRefsReceivePack(ctx context.Context, in *InfoRefsRequest, opts ...grpc.CallOption) (SmartHTTPService_InfoRefsReceivePackClient, error) // Request and response body for POST /upload-pack PostUploadPack(ctx context.Context, opts ...grpc.CallOption) (SmartHTTPService_PostUploadPackClient, error) // Request and response body for POST /receive-pack PostReceivePack(ctx context.Context, opts ...grpc.CallOption) (SmartHTTPService_PostReceivePackClient, error) } type smartHTTPServiceClient struct { cc *grpc.ClientConn } func NewSmartHTTPServiceClient(cc *grpc.ClientConn) SmartHTTPServiceClient { return &smartHTTPServiceClient{cc} } func (c *smartHTTPServiceClient) InfoRefsUploadPack(ctx context.Context, in *InfoRefsRequest, opts ...grpc.CallOption) (SmartHTTPService_InfoRefsUploadPackClient, error) { stream, err := grpc.NewClientStream(ctx, &_SmartHTTPService_serviceDesc.Streams[0], c.cc, "/gitaly.SmartHTTPService/InfoRefsUploadPack", opts...) if err != nil { return nil, err } x := &smartHTTPServiceInfoRefsUploadPackClient{stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } if err := x.ClientStream.CloseSend(); err != nil { return nil, err } return x, nil } type SmartHTTPService_InfoRefsUploadPackClient interface { Recv() (*InfoRefsResponse, error) grpc.ClientStream } type smartHTTPServiceInfoRefsUploadPackClient struct { grpc.ClientStream } func (x *smartHTTPServiceInfoRefsUploadPackClient) Recv() (*InfoRefsResponse, error) { m := new(InfoRefsResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func (c *smartHTTPServiceClient) InfoRefsReceivePack(ctx context.Context, in *InfoRefsRequest, opts ...grpc.CallOption) (SmartHTTPService_InfoRefsReceivePackClient, error) { stream, err := grpc.NewClientStream(ctx, &_SmartHTTPService_serviceDesc.Streams[1], c.cc, "/gitaly.SmartHTTPService/InfoRefsReceivePack", opts...) if err != nil { return nil, err } x := &smartHTTPServiceInfoRefsReceivePackClient{stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } if err := x.ClientStream.CloseSend(); err != nil { return nil, err } return x, nil } type SmartHTTPService_InfoRefsReceivePackClient interface { Recv() (*InfoRefsResponse, error) grpc.ClientStream } type smartHTTPServiceInfoRefsReceivePackClient struct { grpc.ClientStream } func (x *smartHTTPServiceInfoRefsReceivePackClient) Recv() (*InfoRefsResponse, error) { m := new(InfoRefsResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func (c *smartHTTPServiceClient) PostUploadPack(ctx context.Context, opts ...grpc.CallOption) (SmartHTTPService_PostUploadPackClient, error) { stream, err := grpc.NewClientStream(ctx, &_SmartHTTPService_serviceDesc.Streams[2], c.cc, "/gitaly.SmartHTTPService/PostUploadPack", opts...) if err != nil { return nil, err } x := &smartHTTPServicePostUploadPackClient{stream} return x, nil } type SmartHTTPService_PostUploadPackClient interface { Send(*PostUploadPackRequest) error Recv() (*PostUploadPackResponse, error) grpc.ClientStream } type smartHTTPServicePostUploadPackClient struct { grpc.ClientStream } func (x *smartHTTPServicePostUploadPackClient) Send(m *PostUploadPackRequest) error { return x.ClientStream.SendMsg(m) } func (x *smartHTTPServicePostUploadPackClient) Recv() (*PostUploadPackResponse, error) { m := new(PostUploadPackResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func (c *smartHTTPServiceClient) PostReceivePack(ctx context.Context, opts ...grpc.CallOption) (SmartHTTPService_PostReceivePackClient, error) { stream, err := grpc.NewClientStream(ctx, &_SmartHTTPService_serviceDesc.Streams[3], c.cc, "/gitaly.SmartHTTPService/PostReceivePack", opts...) if err != nil { return nil, err } x := &smartHTTPServicePostReceivePackClient{stream} return x, nil } type SmartHTTPService_PostReceivePackClient interface { Send(*PostReceivePackRequest) error Recv() (*PostReceivePackResponse, error) grpc.ClientStream } type smartHTTPServicePostReceivePackClient struct { grpc.ClientStream } func (x *smartHTTPServicePostReceivePackClient) Send(m *PostReceivePackRequest) error { return x.ClientStream.SendMsg(m) } func (x *smartHTTPServicePostReceivePackClient) Recv() (*PostReceivePackResponse, error) { m := new(PostReceivePackResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } // Server API for SmartHTTPService service type SmartHTTPServiceServer interface { // The response body for GET /info/refs?service=git-upload-pack InfoRefsUploadPack(*InfoRefsRequest, SmartHTTPService_InfoRefsUploadPackServer) error // The response body for GET /info/refs?service=git-receive-pack InfoRefsReceivePack(*InfoRefsRequest, SmartHTTPService_InfoRefsReceivePackServer) error // Request and response body for POST /upload-pack PostUploadPack(SmartHTTPService_PostUploadPackServer) error // Request and response body for POST /receive-pack PostReceivePack(SmartHTTPService_PostReceivePackServer) error } func RegisterSmartHTTPServiceServer(s *grpc.Server, srv SmartHTTPServiceServer) { s.RegisterService(&_SmartHTTPService_serviceDesc, srv) } func _SmartHTTPService_InfoRefsUploadPack_Handler(srv interface{}, stream grpc.ServerStream) error { m := new(InfoRefsRequest) if err := stream.RecvMsg(m); err != nil { return err } return srv.(SmartHTTPServiceServer).InfoRefsUploadPack(m, &smartHTTPServiceInfoRefsUploadPackServer{stream}) } type SmartHTTPService_InfoRefsUploadPackServer interface { Send(*InfoRefsResponse) error grpc.ServerStream } type smartHTTPServiceInfoRefsUploadPackServer struct { grpc.ServerStream } func (x *smartHTTPServiceInfoRefsUploadPackServer) Send(m *InfoRefsResponse) error { return x.ServerStream.SendMsg(m) } func _SmartHTTPService_InfoRefsReceivePack_Handler(srv interface{}, stream grpc.ServerStream) error { m := new(InfoRefsRequest) if err := stream.RecvMsg(m); err != nil { return err } return srv.(SmartHTTPServiceServer).InfoRefsReceivePack(m, &smartHTTPServiceInfoRefsReceivePackServer{stream}) } type SmartHTTPService_InfoRefsReceivePackServer interface { Send(*InfoRefsResponse) error grpc.ServerStream } type smartHTTPServiceInfoRefsReceivePackServer struct { grpc.ServerStream } func (x *smartHTTPServiceInfoRefsReceivePackServer) Send(m *InfoRefsResponse) error { return x.ServerStream.SendMsg(m) } func _SmartHTTPService_PostUploadPack_Handler(srv interface{}, stream grpc.ServerStream) error { return srv.(SmartHTTPServiceServer).PostUploadPack(&smartHTTPServicePostUploadPackServer{stream}) } type SmartHTTPService_PostUploadPackServer interface { Send(*PostUploadPackResponse) error Recv() (*PostUploadPackRequest, error) grpc.ServerStream } type smartHTTPServicePostUploadPackServer struct { grpc.ServerStream } func (x *smartHTTPServicePostUploadPackServer) Send(m *PostUploadPackResponse) error { return x.ServerStream.SendMsg(m) } func (x *smartHTTPServicePostUploadPackServer) Recv() (*PostUploadPackRequest, error) { m := new(PostUploadPackRequest) if err := x.ServerStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func _SmartHTTPService_PostReceivePack_Handler(srv interface{}, stream grpc.ServerStream) error { return srv.(SmartHTTPServiceServer).PostReceivePack(&smartHTTPServicePostReceivePackServer{stream}) } type SmartHTTPService_PostReceivePackServer interface { Send(*PostReceivePackResponse) error Recv() (*PostReceivePackRequest, error) grpc.ServerStream } type smartHTTPServicePostReceivePackServer struct { grpc.ServerStream } func (x *smartHTTPServicePostReceivePackServer) Send(m *PostReceivePackResponse) error { return x.ServerStream.SendMsg(m) } func (x *smartHTTPServicePostReceivePackServer) Recv() (*PostReceivePackRequest, error) { m := new(PostReceivePackRequest) if err := x.ServerStream.RecvMsg(m); err != nil { return nil, err } return m, nil } var _SmartHTTPService_serviceDesc = grpc.ServiceDesc{ ServiceName: "gitaly.SmartHTTPService", HandlerType: (*SmartHTTPServiceServer)(nil), Methods: []grpc.MethodDesc{}, Streams: []grpc.StreamDesc{ { StreamName: "InfoRefsUploadPack", Handler: _SmartHTTPService_InfoRefsUploadPack_Handler, ServerStreams: true, }, { StreamName: "InfoRefsReceivePack", Handler: _SmartHTTPService_InfoRefsReceivePack_Handler, ServerStreams: true, }, { StreamName: "PostUploadPack", Handler: _SmartHTTPService_PostUploadPack_Handler, ServerStreams: true, ClientStreams: true, }, { StreamName: "PostReceivePack", Handler: _SmartHTTPService_PostReceivePack_Handler, ServerStreams: true, ClientStreams: true, }, }, Metadata: "smarthttp.proto", } func init() { proto.RegisterFile("smarthttp.proto", fileDescriptor12) } var fileDescriptor12 = []byte{ // 386 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x93, 0xdd, 0xee, 0xd2, 0x40, 0x10, 0xc5, 0x5d, 0xbe, 0x12, 0x06, 0x14, 0x32, 0x44, 0x69, 0x9a, 0x28, 0xa4, 0x26, 0xa6, 0x17, 0x48, 0x08, 0x3e, 0x82, 0x37, 0x12, 0x4d, 0x24, 0x0b, 0x24, 0xde, 0x35, 0x6b, 0xbb, 0x2c, 0x1b, 0x97, 0x6e, 0xed, 0x2e, 0x24, 0x3c, 0x84, 0x4f, 0xe4, 0xbb, 0xf8, 0x2c, 0xc6, 0xd6, 0x52, 0x3e, 0xac, 0x17, 0x9a, 0xff, 0x5d, 0x33, 0x67, 0xf6, 0x9c, 0xdf, 0xee, 0x4c, 0xa1, 0x67, 0xf6, 0x2c, 0xb5, 0x3b, 0x6b, 0x93, 0x69, 0x92, 0x6a, 0xab, 0xb1, 0x25, 0xa4, 0x65, 0xea, 0xe4, 0x76, 0xcd, 0x8e, 0xa5, 0x3c, 0xca, 0xab, 0x9e, 0x81, 0xde, 0x22, 0xde, 0x6a, 0xca, 0xb7, 0x86, 0xf2, 0xaf, 0x07, 0x6e, 0x2c, 0xce, 0x01, 0x52, 0x9e, 0x68, 0x23, 0xad, 0x4e, 0x4f, 0x0e, 0x19, 0x13, 0xbf, 0x33, 0xc7, 0x69, 0x7e, 0x7a, 0x4a, 0xcf, 0x0a, 0xbd, 0xe8, 0xc2, 0x09, 0xa0, 0x90, 0x36, 0x08, 0x75, 0xbc, 0x95, 0x22, 0xd0, 0x89, 0x95, 0x3a, 0x36, 0x4e, 0x6d, 0x5c, 0xf7, 0xdb, 0xb4, 0x2f, 0xa4, 0x7d, 0x9b, 0x09, 0x1f, 0xf3, 0xba, 0xf7, 0x0a, 0xfa, 0x65, 0xa8, 0x49, 0x74, 0x6c, 0x38, 0x22, 0x34, 0x22, 0x66, 0x59, 0x96, 0xd7, 0xa5, 0xd9, 0xb7, 0xf7, 0x8d, 0xc0, 0xd3, 0xa5, 0x36, 0x76, 0x93, 0x28, 0xcd, 0xa2, 0x25, 0x0b, 0xbf, 0xfc, 0x0f, 0x63, 0x91, 0x50, 0x2b, 0x13, 0x2a, 0xb8, 0xeb, 0x15, 0xdc, 0x13, 0x78, 0x76, 0x8b, 0xf3, 0x17, 0xfa, 0xef, 0x24, 0x6f, 0xa7, 0x3c, 0xe4, 0xf2, 0xc8, 0x1f, 0x02, 0x7f, 0x00, 0x4d, 0xa1, 0x02, 0x19, 0x39, 0xf5, 0x31, 0xf1, 0xdb, 0xb4, 0x21, 0xd4, 0x22, 0xc2, 0x97, 0xf0, 0x58, 0xa8, 0xe0, 0xc2, 0xbf, 0x91, 0x89, 0x5d, 0xa1, 0x4a, 0x67, 0x1c, 0x41, 0x47, 0xa8, 0xe0, 0x60, 0x78, 0x1a, 0xb3, 0x3d, 0x77, 0x9a, 0x59, 0x0b, 0x08, 0xb5, 0xf9, 0x5d, 0xf1, 0x5e, 0xc3, 0xf0, 0x0e, 0xbe, 0xfa, 0xb2, 0xf3, 0x1f, 0x35, 0xe8, 0xaf, 0x7e, 0x6d, 0xdc, 0xbb, 0xf5, 0x7a, 0xb9, 0xe2, 0xe9, 0x51, 0x86, 0x1c, 0xdf, 0x03, 0x16, 0x73, 0x2e, 0xdf, 0x0c, 0x87, 0xc5, 0x45, 0x6f, 0x16, 0xcf, 0x75, 0xee, 0x85, 0x3c, 0xd1, 0x7b, 0x34, 0x23, 0xf8, 0x01, 0x06, 0x65, 0xfd, 0x0c, 0xf5, 0xaf, 0x6e, 0x1b, 0x78, 0x72, 0x3d, 0x4a, 0x7c, 0x5e, 0xf4, 0xff, 0x71, 0xe3, 0xdc, 0x17, 0x55, 0x72, 0x61, 0xea, 0x93, 0x19, 0xc1, 0x4f, 0xd0, 0xbb, 0x79, 0x35, 0xbc, 0x3a, 0x78, 0xbf, 0x0b, 0xee, 0xa8, 0x52, 0xbf, 0x74, 0xfe, 0xdc, 0xca, 0xfe, 0xd7, 0x37, 0x3f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xaf, 0x85, 0x01, 0x06, 0xd8, 0x03, 0x00, 0x00, } ssh.pb.go000066400000000000000000000323211324746544700344740ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/gitlab.com/gitlab-org/gitaly-proto/go// Code generated by protoc-gen-go. DO NOT EDIT. // source: ssh.proto package gitaly import proto "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" import ( context "golang.org/x/net/context" grpc "google.golang.org/grpc" ) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf type SSHUploadPackRequest struct { // 'repository' must be present in the first message. Repository *Repository `protobuf:"bytes,1,opt,name=repository" json:"repository,omitempty"` // A chunk of raw data to be copied to 'git upload-pack' standard input Stdin []byte `protobuf:"bytes,2,opt,name=stdin,proto3" json:"stdin,omitempty"` // Parameters to use with git -c (key=value pairs) GitConfigOptions []string `protobuf:"bytes,4,rep,name=git_config_options,json=gitConfigOptions" json:"git_config_options,omitempty"` } func (m *SSHUploadPackRequest) Reset() { *m = SSHUploadPackRequest{} } func (m *SSHUploadPackRequest) String() string { return proto.CompactTextString(m) } func (*SSHUploadPackRequest) ProtoMessage() {} func (*SSHUploadPackRequest) Descriptor() ([]byte, []int) { return fileDescriptor13, []int{0} } func (m *SSHUploadPackRequest) GetRepository() *Repository { if m != nil { return m.Repository } return nil } func (m *SSHUploadPackRequest) GetStdin() []byte { if m != nil { return m.Stdin } return nil } func (m *SSHUploadPackRequest) GetGitConfigOptions() []string { if m != nil { return m.GitConfigOptions } return nil } type SSHUploadPackResponse struct { // A chunk of raw data from 'git upload-pack' standard output Stdout []byte `protobuf:"bytes,1,opt,name=stdout,proto3" json:"stdout,omitempty"` // A chunk of raw data from 'git upload-pack' standard error Stderr []byte `protobuf:"bytes,2,opt,name=stderr,proto3" json:"stderr,omitempty"` // This field may be nil. This is intentional: only when the remote // command has finished can we return its exit status. ExitStatus *ExitStatus `protobuf:"bytes,3,opt,name=exit_status,json=exitStatus" json:"exit_status,omitempty"` } func (m *SSHUploadPackResponse) Reset() { *m = SSHUploadPackResponse{} } func (m *SSHUploadPackResponse) String() string { return proto.CompactTextString(m) } func (*SSHUploadPackResponse) ProtoMessage() {} func (*SSHUploadPackResponse) Descriptor() ([]byte, []int) { return fileDescriptor13, []int{1} } func (m *SSHUploadPackResponse) GetStdout() []byte { if m != nil { return m.Stdout } return nil } func (m *SSHUploadPackResponse) GetStderr() []byte { if m != nil { return m.Stderr } return nil } func (m *SSHUploadPackResponse) GetExitStatus() *ExitStatus { if m != nil { return m.ExitStatus } return nil } type SSHReceivePackRequest struct { // 'repository' must be present in the first message. Repository *Repository `protobuf:"bytes,1,opt,name=repository" json:"repository,omitempty"` // A chunk of raw data to be copied to 'git upload-pack' standard input Stdin []byte `protobuf:"bytes,2,opt,name=stdin,proto3" json:"stdin,omitempty"` // Contents of GL_ID, GL_REPOSITORY, and GL_USERNAME environment variables // for 'git receive-pack' GlId string `protobuf:"bytes,3,opt,name=gl_id,json=glId" json:"gl_id,omitempty"` GlRepository string `protobuf:"bytes,4,opt,name=gl_repository,json=glRepository" json:"gl_repository,omitempty"` GlUsername string `protobuf:"bytes,5,opt,name=gl_username,json=glUsername" json:"gl_username,omitempty"` } func (m *SSHReceivePackRequest) Reset() { *m = SSHReceivePackRequest{} } func (m *SSHReceivePackRequest) String() string { return proto.CompactTextString(m) } func (*SSHReceivePackRequest) ProtoMessage() {} func (*SSHReceivePackRequest) Descriptor() ([]byte, []int) { return fileDescriptor13, []int{2} } func (m *SSHReceivePackRequest) GetRepository() *Repository { if m != nil { return m.Repository } return nil } func (m *SSHReceivePackRequest) GetStdin() []byte { if m != nil { return m.Stdin } return nil } func (m *SSHReceivePackRequest) GetGlId() string { if m != nil { return m.GlId } return "" } func (m *SSHReceivePackRequest) GetGlRepository() string { if m != nil { return m.GlRepository } return "" } func (m *SSHReceivePackRequest) GetGlUsername() string { if m != nil { return m.GlUsername } return "" } type SSHReceivePackResponse struct { // A chunk of raw data from 'git receive-pack' standard output Stdout []byte `protobuf:"bytes,1,opt,name=stdout,proto3" json:"stdout,omitempty"` // A chunk of raw data from 'git receive-pack' standard error Stderr []byte `protobuf:"bytes,2,opt,name=stderr,proto3" json:"stderr,omitempty"` // This field may be nil. This is intentional: only when the remote // command has finished can we return its exit status. ExitStatus *ExitStatus `protobuf:"bytes,3,opt,name=exit_status,json=exitStatus" json:"exit_status,omitempty"` } func (m *SSHReceivePackResponse) Reset() { *m = SSHReceivePackResponse{} } func (m *SSHReceivePackResponse) String() string { return proto.CompactTextString(m) } func (*SSHReceivePackResponse) ProtoMessage() {} func (*SSHReceivePackResponse) Descriptor() ([]byte, []int) { return fileDescriptor13, []int{3} } func (m *SSHReceivePackResponse) GetStdout() []byte { if m != nil { return m.Stdout } return nil } func (m *SSHReceivePackResponse) GetStderr() []byte { if m != nil { return m.Stderr } return nil } func (m *SSHReceivePackResponse) GetExitStatus() *ExitStatus { if m != nil { return m.ExitStatus } return nil } func init() { proto.RegisterType((*SSHUploadPackRequest)(nil), "gitaly.SSHUploadPackRequest") proto.RegisterType((*SSHUploadPackResponse)(nil), "gitaly.SSHUploadPackResponse") proto.RegisterType((*SSHReceivePackRequest)(nil), "gitaly.SSHReceivePackRequest") proto.RegisterType((*SSHReceivePackResponse)(nil), "gitaly.SSHReceivePackResponse") } // Reference imports to suppress errors if they are not otherwise used. var _ context.Context var _ grpc.ClientConn // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. const _ = grpc.SupportPackageIsVersion4 // Client API for SSHService service type SSHServiceClient interface { // To forward 'git upload-pack' to Gitaly for SSH sessions SSHUploadPack(ctx context.Context, opts ...grpc.CallOption) (SSHService_SSHUploadPackClient, error) // To forward 'git receive-pack' to Gitaly for SSH sessions SSHReceivePack(ctx context.Context, opts ...grpc.CallOption) (SSHService_SSHReceivePackClient, error) } type sSHServiceClient struct { cc *grpc.ClientConn } func NewSSHServiceClient(cc *grpc.ClientConn) SSHServiceClient { return &sSHServiceClient{cc} } func (c *sSHServiceClient) SSHUploadPack(ctx context.Context, opts ...grpc.CallOption) (SSHService_SSHUploadPackClient, error) { stream, err := grpc.NewClientStream(ctx, &_SSHService_serviceDesc.Streams[0], c.cc, "/gitaly.SSHService/SSHUploadPack", opts...) if err != nil { return nil, err } x := &sSHServiceSSHUploadPackClient{stream} return x, nil } type SSHService_SSHUploadPackClient interface { Send(*SSHUploadPackRequest) error Recv() (*SSHUploadPackResponse, error) grpc.ClientStream } type sSHServiceSSHUploadPackClient struct { grpc.ClientStream } func (x *sSHServiceSSHUploadPackClient) Send(m *SSHUploadPackRequest) error { return x.ClientStream.SendMsg(m) } func (x *sSHServiceSSHUploadPackClient) Recv() (*SSHUploadPackResponse, error) { m := new(SSHUploadPackResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func (c *sSHServiceClient) SSHReceivePack(ctx context.Context, opts ...grpc.CallOption) (SSHService_SSHReceivePackClient, error) { stream, err := grpc.NewClientStream(ctx, &_SSHService_serviceDesc.Streams[1], c.cc, "/gitaly.SSHService/SSHReceivePack", opts...) if err != nil { return nil, err } x := &sSHServiceSSHReceivePackClient{stream} return x, nil } type SSHService_SSHReceivePackClient interface { Send(*SSHReceivePackRequest) error Recv() (*SSHReceivePackResponse, error) grpc.ClientStream } type sSHServiceSSHReceivePackClient struct { grpc.ClientStream } func (x *sSHServiceSSHReceivePackClient) Send(m *SSHReceivePackRequest) error { return x.ClientStream.SendMsg(m) } func (x *sSHServiceSSHReceivePackClient) Recv() (*SSHReceivePackResponse, error) { m := new(SSHReceivePackResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } // Server API for SSHService service type SSHServiceServer interface { // To forward 'git upload-pack' to Gitaly for SSH sessions SSHUploadPack(SSHService_SSHUploadPackServer) error // To forward 'git receive-pack' to Gitaly for SSH sessions SSHReceivePack(SSHService_SSHReceivePackServer) error } func RegisterSSHServiceServer(s *grpc.Server, srv SSHServiceServer) { s.RegisterService(&_SSHService_serviceDesc, srv) } func _SSHService_SSHUploadPack_Handler(srv interface{}, stream grpc.ServerStream) error { return srv.(SSHServiceServer).SSHUploadPack(&sSHServiceSSHUploadPackServer{stream}) } type SSHService_SSHUploadPackServer interface { Send(*SSHUploadPackResponse) error Recv() (*SSHUploadPackRequest, error) grpc.ServerStream } type sSHServiceSSHUploadPackServer struct { grpc.ServerStream } func (x *sSHServiceSSHUploadPackServer) Send(m *SSHUploadPackResponse) error { return x.ServerStream.SendMsg(m) } func (x *sSHServiceSSHUploadPackServer) Recv() (*SSHUploadPackRequest, error) { m := new(SSHUploadPackRequest) if err := x.ServerStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func _SSHService_SSHReceivePack_Handler(srv interface{}, stream grpc.ServerStream) error { return srv.(SSHServiceServer).SSHReceivePack(&sSHServiceSSHReceivePackServer{stream}) } type SSHService_SSHReceivePackServer interface { Send(*SSHReceivePackResponse) error Recv() (*SSHReceivePackRequest, error) grpc.ServerStream } type sSHServiceSSHReceivePackServer struct { grpc.ServerStream } func (x *sSHServiceSSHReceivePackServer) Send(m *SSHReceivePackResponse) error { return x.ServerStream.SendMsg(m) } func (x *sSHServiceSSHReceivePackServer) Recv() (*SSHReceivePackRequest, error) { m := new(SSHReceivePackRequest) if err := x.ServerStream.RecvMsg(m); err != nil { return nil, err } return m, nil } var _SSHService_serviceDesc = grpc.ServiceDesc{ ServiceName: "gitaly.SSHService", HandlerType: (*SSHServiceServer)(nil), Methods: []grpc.MethodDesc{}, Streams: []grpc.StreamDesc{ { StreamName: "SSHUploadPack", Handler: _SSHService_SSHUploadPack_Handler, ServerStreams: true, ClientStreams: true, }, { StreamName: "SSHReceivePack", Handler: _SSHService_SSHReceivePack_Handler, ServerStreams: true, ClientStreams: true, }, }, Metadata: "ssh.proto", } func init() { proto.RegisterFile("ssh.proto", fileDescriptor13) } var fileDescriptor13 = []byte{ // 377 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x53, 0xcd, 0xce, 0xd2, 0x40, 0x14, 0x75, 0xa4, 0x10, 0xb9, 0xf4, 0x33, 0x64, 0x04, 0xd2, 0x10, 0x7f, 0x48, 0xdd, 0x74, 0x61, 0x88, 0x81, 0x47, 0x30, 0x26, 0xe8, 0x46, 0x33, 0x0d, 0xeb, 0x66, 0x6c, 0xaf, 0xc3, 0xc4, 0xa1, 0x53, 0x67, 0xa6, 0x04, 0x12, 0x7d, 0x22, 0x1f, 0xc0, 0x8d, 0x0f, 0x67, 0x32, 0xad, 0x58, 0x50, 0x96, 0xba, 0xeb, 0x3d, 0xe7, 0xfe, 0x9c, 0x73, 0x6f, 0x07, 0x86, 0xd6, 0xee, 0x96, 0x95, 0xd1, 0x4e, 0xd3, 0x81, 0x90, 0x8e, 0xab, 0xd3, 0x3c, 0xb4, 0x3b, 0x6e, 0xb0, 0x68, 0xd0, 0xf8, 0x1b, 0x81, 0x49, 0x9a, 0x6e, 0xb6, 0x95, 0xd2, 0xbc, 0x78, 0xcf, 0xf3, 0x4f, 0x0c, 0x3f, 0xd7, 0x68, 0x1d, 0x5d, 0x01, 0x18, 0xac, 0xb4, 0x95, 0x4e, 0x9b, 0x53, 0x44, 0x16, 0x24, 0x19, 0xad, 0xe8, 0xb2, 0xe9, 0xb1, 0x64, 0x67, 0x86, 0x75, 0xb2, 0xe8, 0x04, 0xfa, 0xd6, 0x15, 0xb2, 0x8c, 0xee, 0x2f, 0x48, 0x12, 0xb2, 0x26, 0xa0, 0x2f, 0x80, 0x0a, 0xe9, 0xb2, 0x5c, 0x97, 0x1f, 0xa5, 0xc8, 0x74, 0xe5, 0xa4, 0x2e, 0x6d, 0x14, 0x2c, 0x7a, 0xc9, 0x90, 0x8d, 0x85, 0x74, 0xaf, 0x3c, 0xf1, 0xae, 0xc1, 0xdf, 0x06, 0x0f, 0x7a, 0xe3, 0x80, 0x4d, 0x3b, 0x15, 0x15, 0x37, 0x7c, 0x8f, 0x0e, 0x8d, 0x8d, 0xbf, 0xc0, 0xf4, 0x4a, 0xac, 0xad, 0x74, 0x69, 0x91, 0xce, 0x60, 0x60, 0x5d, 0xa1, 0x6b, 0xe7, 0x95, 0x86, 0xac, 0x8d, 0x5a, 0x1c, 0x8d, 0x69, 0x25, 0xb5, 0x11, 0x5d, 0xc3, 0x08, 0x8f, 0xd2, 0x65, 0xd6, 0x71, 0x57, 0xdb, 0xa8, 0x77, 0x69, 0xef, 0xf5, 0x51, 0xba, 0xd4, 0x33, 0x0c, 0xf0, 0xfc, 0x1d, 0xff, 0x20, 0x7e, 0x3c, 0xc3, 0x1c, 0xe5, 0x01, 0xff, 0xcd, 0xb2, 0x1e, 0x41, 0x5f, 0xa8, 0x4c, 0x16, 0x5e, 0xd2, 0x90, 0x05, 0x42, 0xbd, 0x29, 0xe8, 0x73, 0xb8, 0x13, 0x2a, 0xeb, 0x4c, 0x08, 0x3c, 0x19, 0x0a, 0xf5, 0xbb, 0x37, 0x7d, 0x06, 0x23, 0xa1, 0xb2, 0xda, 0xa2, 0x29, 0xf9, 0x1e, 0xa3, 0xbe, 0x4f, 0x01, 0xa1, 0xb6, 0x2d, 0x12, 0x7f, 0x85, 0xd9, 0xb5, 0xfa, 0xff, 0xb8, 0xbd, 0xd5, 0x77, 0x02, 0x90, 0xa6, 0x9b, 0x14, 0xcd, 0x41, 0xe6, 0x48, 0x19, 0xdc, 0x5d, 0x9c, 0x92, 0x3e, 0xfe, 0x55, 0xff, 0xb7, 0xdf, 0x71, 0xfe, 0xe4, 0x06, 0xdb, 0x38, 0x88, 0xef, 0x25, 0xe4, 0x25, 0xa1, 0x5b, 0x78, 0x78, 0xe9, 0x90, 0x76, 0xcb, 0xfe, 0xbc, 0xdb, 0xfc, 0xe9, 0x2d, 0xba, 0xdb, 0xf6, 0xc3, 0xc0, 0x3f, 0x95, 0xf5, 0xcf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x1b, 0x65, 0x3d, 0xab, 0x4d, 0x03, 0x00, 0x00, } wiki.pb.go000066400000000000000000001124401324746544700346430ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/gitlab.com/gitlab-org/gitaly-proto/go// Code generated by protoc-gen-go. DO NOT EDIT. // source: wiki.proto package gitaly import proto "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" import ( context "golang.org/x/net/context" grpc "google.golang.org/grpc" ) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf type WikiCommitDetails struct { Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` Email []byte `protobuf:"bytes,2,opt,name=email,proto3" json:"email,omitempty"` Message []byte `protobuf:"bytes,3,opt,name=message,proto3" json:"message,omitempty"` } func (m *WikiCommitDetails) Reset() { *m = WikiCommitDetails{} } func (m *WikiCommitDetails) String() string { return proto.CompactTextString(m) } func (*WikiCommitDetails) ProtoMessage() {} func (*WikiCommitDetails) Descriptor() ([]byte, []int) { return fileDescriptor14, []int{0} } func (m *WikiCommitDetails) GetName() []byte { if m != nil { return m.Name } return nil } func (m *WikiCommitDetails) GetEmail() []byte { if m != nil { return m.Email } return nil } func (m *WikiCommitDetails) GetMessage() []byte { if m != nil { return m.Message } return nil } type WikiPageVersion struct { Commit *GitCommit `protobuf:"bytes,1,opt,name=commit" json:"commit,omitempty"` Format string `protobuf:"bytes,2,opt,name=format" json:"format,omitempty"` } func (m *WikiPageVersion) Reset() { *m = WikiPageVersion{} } func (m *WikiPageVersion) String() string { return proto.CompactTextString(m) } func (*WikiPageVersion) ProtoMessage() {} func (*WikiPageVersion) Descriptor() ([]byte, []int) { return fileDescriptor14, []int{1} } func (m *WikiPageVersion) GetCommit() *GitCommit { if m != nil { return m.Commit } return nil } func (m *WikiPageVersion) GetFormat() string { if m != nil { return m.Format } return "" } type WikiPage struct { // These fields are only present in the first message of a WikiPage stream Version *WikiPageVersion `protobuf:"bytes,1,opt,name=version" json:"version,omitempty"` Format string `protobuf:"bytes,2,opt,name=format" json:"format,omitempty"` Title []byte `protobuf:"bytes,3,opt,name=title,proto3" json:"title,omitempty"` UrlPath string `protobuf:"bytes,4,opt,name=url_path,json=urlPath" json:"url_path,omitempty"` Path []byte `protobuf:"bytes,5,opt,name=path,proto3" json:"path,omitempty"` Name []byte `protobuf:"bytes,6,opt,name=name,proto3" json:"name,omitempty"` Historical bool `protobuf:"varint,7,opt,name=historical" json:"historical,omitempty"` // This field is present in all messages of a WikiPage stream RawData []byte `protobuf:"bytes,8,opt,name=raw_data,json=rawData,proto3" json:"raw_data,omitempty"` } func (m *WikiPage) Reset() { *m = WikiPage{} } func (m *WikiPage) String() string { return proto.CompactTextString(m) } func (*WikiPage) ProtoMessage() {} func (*WikiPage) Descriptor() ([]byte, []int) { return fileDescriptor14, []int{2} } func (m *WikiPage) GetVersion() *WikiPageVersion { if m != nil { return m.Version } return nil } func (m *WikiPage) GetFormat() string { if m != nil { return m.Format } return "" } func (m *WikiPage) GetTitle() []byte { if m != nil { return m.Title } return nil } func (m *WikiPage) GetUrlPath() string { if m != nil { return m.UrlPath } return "" } func (m *WikiPage) GetPath() []byte { if m != nil { return m.Path } return nil } func (m *WikiPage) GetName() []byte { if m != nil { return m.Name } return nil } func (m *WikiPage) GetHistorical() bool { if m != nil { return m.Historical } return false } func (m *WikiPage) GetRawData() []byte { if m != nil { return m.RawData } return nil } type WikiGetPageVersionsRequest struct { Repository *Repository `protobuf:"bytes,1,opt,name=repository" json:"repository,omitempty"` PagePath []byte `protobuf:"bytes,2,opt,name=page_path,json=pagePath,proto3" json:"page_path,omitempty"` Page int32 `protobuf:"varint,3,opt,name=page" json:"page,omitempty"` PerPage int32 `protobuf:"varint,4,opt,name=per_page,json=perPage" json:"per_page,omitempty"` } func (m *WikiGetPageVersionsRequest) Reset() { *m = WikiGetPageVersionsRequest{} } func (m *WikiGetPageVersionsRequest) String() string { return proto.CompactTextString(m) } func (*WikiGetPageVersionsRequest) ProtoMessage() {} func (*WikiGetPageVersionsRequest) Descriptor() ([]byte, []int) { return fileDescriptor14, []int{3} } func (m *WikiGetPageVersionsRequest) GetRepository() *Repository { if m != nil { return m.Repository } return nil } func (m *WikiGetPageVersionsRequest) GetPagePath() []byte { if m != nil { return m.PagePath } return nil } func (m *WikiGetPageVersionsRequest) GetPage() int32 { if m != nil { return m.Page } return 0 } func (m *WikiGetPageVersionsRequest) GetPerPage() int32 { if m != nil { return m.PerPage } return 0 } type WikiGetPageVersionsResponse struct { Versions []*WikiPageVersion `protobuf:"bytes,1,rep,name=versions" json:"versions,omitempty"` } func (m *WikiGetPageVersionsResponse) Reset() { *m = WikiGetPageVersionsResponse{} } func (m *WikiGetPageVersionsResponse) String() string { return proto.CompactTextString(m) } func (*WikiGetPageVersionsResponse) ProtoMessage() {} func (*WikiGetPageVersionsResponse) Descriptor() ([]byte, []int) { return fileDescriptor14, []int{4} } func (m *WikiGetPageVersionsResponse) GetVersions() []*WikiPageVersion { if m != nil { return m.Versions } return nil } // This message is sent in a stream because the 'content' field may be large. type WikiWritePageRequest struct { // These following fields are only present in the first message. Repository *Repository `protobuf:"bytes,1,opt,name=repository" json:"repository,omitempty"` Name []byte `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` Format string `protobuf:"bytes,3,opt,name=format" json:"format,omitempty"` CommitDetails *WikiCommitDetails `protobuf:"bytes,4,opt,name=commit_details,json=commitDetails" json:"commit_details,omitempty"` // This field is present in all messages. Content []byte `protobuf:"bytes,5,opt,name=content,proto3" json:"content,omitempty"` } func (m *WikiWritePageRequest) Reset() { *m = WikiWritePageRequest{} } func (m *WikiWritePageRequest) String() string { return proto.CompactTextString(m) } func (*WikiWritePageRequest) ProtoMessage() {} func (*WikiWritePageRequest) Descriptor() ([]byte, []int) { return fileDescriptor14, []int{5} } func (m *WikiWritePageRequest) GetRepository() *Repository { if m != nil { return m.Repository } return nil } func (m *WikiWritePageRequest) GetName() []byte { if m != nil { return m.Name } return nil } func (m *WikiWritePageRequest) GetFormat() string { if m != nil { return m.Format } return "" } func (m *WikiWritePageRequest) GetCommitDetails() *WikiCommitDetails { if m != nil { return m.CommitDetails } return nil } func (m *WikiWritePageRequest) GetContent() []byte { if m != nil { return m.Content } return nil } type WikiWritePageResponse struct { DuplicateError []byte `protobuf:"bytes,1,opt,name=duplicate_error,json=duplicateError,proto3" json:"duplicate_error,omitempty"` } func (m *WikiWritePageResponse) Reset() { *m = WikiWritePageResponse{} } func (m *WikiWritePageResponse) String() string { return proto.CompactTextString(m) } func (*WikiWritePageResponse) ProtoMessage() {} func (*WikiWritePageResponse) Descriptor() ([]byte, []int) { return fileDescriptor14, []int{6} } func (m *WikiWritePageResponse) GetDuplicateError() []byte { if m != nil { return m.DuplicateError } return nil } type WikiUpdatePageRequest struct { // There fields are only present in the first message of the stream Repository *Repository `protobuf:"bytes,1,opt,name=repository" json:"repository,omitempty"` PagePath []byte `protobuf:"bytes,2,opt,name=page_path,json=pagePath,proto3" json:"page_path,omitempty"` Title []byte `protobuf:"bytes,3,opt,name=title,proto3" json:"title,omitempty"` Format string `protobuf:"bytes,4,opt,name=format" json:"format,omitempty"` CommitDetails *WikiCommitDetails `protobuf:"bytes,5,opt,name=commit_details,json=commitDetails" json:"commit_details,omitempty"` // This field is present in all messages Content []byte `protobuf:"bytes,6,opt,name=content,proto3" json:"content,omitempty"` } func (m *WikiUpdatePageRequest) Reset() { *m = WikiUpdatePageRequest{} } func (m *WikiUpdatePageRequest) String() string { return proto.CompactTextString(m) } func (*WikiUpdatePageRequest) ProtoMessage() {} func (*WikiUpdatePageRequest) Descriptor() ([]byte, []int) { return fileDescriptor14, []int{7} } func (m *WikiUpdatePageRequest) GetRepository() *Repository { if m != nil { return m.Repository } return nil } func (m *WikiUpdatePageRequest) GetPagePath() []byte { if m != nil { return m.PagePath } return nil } func (m *WikiUpdatePageRequest) GetTitle() []byte { if m != nil { return m.Title } return nil } func (m *WikiUpdatePageRequest) GetFormat() string { if m != nil { return m.Format } return "" } func (m *WikiUpdatePageRequest) GetCommitDetails() *WikiCommitDetails { if m != nil { return m.CommitDetails } return nil } func (m *WikiUpdatePageRequest) GetContent() []byte { if m != nil { return m.Content } return nil } type WikiUpdatePageResponse struct { Error []byte `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` } func (m *WikiUpdatePageResponse) Reset() { *m = WikiUpdatePageResponse{} } func (m *WikiUpdatePageResponse) String() string { return proto.CompactTextString(m) } func (*WikiUpdatePageResponse) ProtoMessage() {} func (*WikiUpdatePageResponse) Descriptor() ([]byte, []int) { return fileDescriptor14, []int{8} } func (m *WikiUpdatePageResponse) GetError() []byte { if m != nil { return m.Error } return nil } type WikiDeletePageRequest struct { Repository *Repository `protobuf:"bytes,1,opt,name=repository" json:"repository,omitempty"` PagePath []byte `protobuf:"bytes,2,opt,name=page_path,json=pagePath,proto3" json:"page_path,omitempty"` CommitDetails *WikiCommitDetails `protobuf:"bytes,3,opt,name=commit_details,json=commitDetails" json:"commit_details,omitempty"` } func (m *WikiDeletePageRequest) Reset() { *m = WikiDeletePageRequest{} } func (m *WikiDeletePageRequest) String() string { return proto.CompactTextString(m) } func (*WikiDeletePageRequest) ProtoMessage() {} func (*WikiDeletePageRequest) Descriptor() ([]byte, []int) { return fileDescriptor14, []int{9} } func (m *WikiDeletePageRequest) GetRepository() *Repository { if m != nil { return m.Repository } return nil } func (m *WikiDeletePageRequest) GetPagePath() []byte { if m != nil { return m.PagePath } return nil } func (m *WikiDeletePageRequest) GetCommitDetails() *WikiCommitDetails { if m != nil { return m.CommitDetails } return nil } type WikiDeletePageResponse struct { } func (m *WikiDeletePageResponse) Reset() { *m = WikiDeletePageResponse{} } func (m *WikiDeletePageResponse) String() string { return proto.CompactTextString(m) } func (*WikiDeletePageResponse) ProtoMessage() {} func (*WikiDeletePageResponse) Descriptor() ([]byte, []int) { return fileDescriptor14, []int{10} } type WikiFindPageRequest struct { Repository *Repository `protobuf:"bytes,1,opt,name=repository" json:"repository,omitempty"` Title []byte `protobuf:"bytes,2,opt,name=title,proto3" json:"title,omitempty"` Revision []byte `protobuf:"bytes,3,opt,name=revision,proto3" json:"revision,omitempty"` Directory []byte `protobuf:"bytes,4,opt,name=directory,proto3" json:"directory,omitempty"` } func (m *WikiFindPageRequest) Reset() { *m = WikiFindPageRequest{} } func (m *WikiFindPageRequest) String() string { return proto.CompactTextString(m) } func (*WikiFindPageRequest) ProtoMessage() {} func (*WikiFindPageRequest) Descriptor() ([]byte, []int) { return fileDescriptor14, []int{11} } func (m *WikiFindPageRequest) GetRepository() *Repository { if m != nil { return m.Repository } return nil } func (m *WikiFindPageRequest) GetTitle() []byte { if m != nil { return m.Title } return nil } func (m *WikiFindPageRequest) GetRevision() []byte { if m != nil { return m.Revision } return nil } func (m *WikiFindPageRequest) GetDirectory() []byte { if m != nil { return m.Directory } return nil } // WikiFindPageResponse is a stream because we need multiple WikiPage // messages to send the raw_data field. type WikiFindPageResponse struct { Page *WikiPage `protobuf:"bytes,1,opt,name=page" json:"page,omitempty"` } func (m *WikiFindPageResponse) Reset() { *m = WikiFindPageResponse{} } func (m *WikiFindPageResponse) String() string { return proto.CompactTextString(m) } func (*WikiFindPageResponse) ProtoMessage() {} func (*WikiFindPageResponse) Descriptor() ([]byte, []int) { return fileDescriptor14, []int{12} } func (m *WikiFindPageResponse) GetPage() *WikiPage { if m != nil { return m.Page } return nil } type WikiFindFileRequest struct { Repository *Repository `protobuf:"bytes,1,opt,name=repository" json:"repository,omitempty"` Name []byte `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` // Optional: revision Revision []byte `protobuf:"bytes,3,opt,name=revision,proto3" json:"revision,omitempty"` } func (m *WikiFindFileRequest) Reset() { *m = WikiFindFileRequest{} } func (m *WikiFindFileRequest) String() string { return proto.CompactTextString(m) } func (*WikiFindFileRequest) ProtoMessage() {} func (*WikiFindFileRequest) Descriptor() ([]byte, []int) { return fileDescriptor14, []int{13} } func (m *WikiFindFileRequest) GetRepository() *Repository { if m != nil { return m.Repository } return nil } func (m *WikiFindFileRequest) GetName() []byte { if m != nil { return m.Name } return nil } func (m *WikiFindFileRequest) GetRevision() []byte { if m != nil { return m.Revision } return nil } type WikiFindFileResponse struct { // If 'name' is empty, the file was not found. Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` MimeType string `protobuf:"bytes,2,opt,name=mime_type,json=mimeType" json:"mime_type,omitempty"` RawData []byte `protobuf:"bytes,3,opt,name=raw_data,json=rawData,proto3" json:"raw_data,omitempty"` Path []byte `protobuf:"bytes,4,opt,name=path,proto3" json:"path,omitempty"` } func (m *WikiFindFileResponse) Reset() { *m = WikiFindFileResponse{} } func (m *WikiFindFileResponse) String() string { return proto.CompactTextString(m) } func (*WikiFindFileResponse) ProtoMessage() {} func (*WikiFindFileResponse) Descriptor() ([]byte, []int) { return fileDescriptor14, []int{14} } func (m *WikiFindFileResponse) GetName() []byte { if m != nil { return m.Name } return nil } func (m *WikiFindFileResponse) GetMimeType() string { if m != nil { return m.MimeType } return "" } func (m *WikiFindFileResponse) GetRawData() []byte { if m != nil { return m.RawData } return nil } func (m *WikiFindFileResponse) GetPath() []byte { if m != nil { return m.Path } return nil } type WikiGetAllPagesRequest struct { Repository *Repository `protobuf:"bytes,1,opt,name=repository" json:"repository,omitempty"` } func (m *WikiGetAllPagesRequest) Reset() { *m = WikiGetAllPagesRequest{} } func (m *WikiGetAllPagesRequest) String() string { return proto.CompactTextString(m) } func (*WikiGetAllPagesRequest) ProtoMessage() {} func (*WikiGetAllPagesRequest) Descriptor() ([]byte, []int) { return fileDescriptor14, []int{15} } func (m *WikiGetAllPagesRequest) GetRepository() *Repository { if m != nil { return m.Repository } return nil } // The WikiGetAllPagesResponse stream is a concatenation of WikiPage streams type WikiGetAllPagesResponse struct { Page *WikiPage `protobuf:"bytes,1,opt,name=page" json:"page,omitempty"` // When end_of_page is true it signals a change of page for the next Response message (if any) EndOfPage bool `protobuf:"varint,2,opt,name=end_of_page,json=endOfPage" json:"end_of_page,omitempty"` } func (m *WikiGetAllPagesResponse) Reset() { *m = WikiGetAllPagesResponse{} } func (m *WikiGetAllPagesResponse) String() string { return proto.CompactTextString(m) } func (*WikiGetAllPagesResponse) ProtoMessage() {} func (*WikiGetAllPagesResponse) Descriptor() ([]byte, []int) { return fileDescriptor14, []int{16} } func (m *WikiGetAllPagesResponse) GetPage() *WikiPage { if m != nil { return m.Page } return nil } func (m *WikiGetAllPagesResponse) GetEndOfPage() bool { if m != nil { return m.EndOfPage } return false } func init() { proto.RegisterType((*WikiCommitDetails)(nil), "gitaly.WikiCommitDetails") proto.RegisterType((*WikiPageVersion)(nil), "gitaly.WikiPageVersion") proto.RegisterType((*WikiPage)(nil), "gitaly.WikiPage") proto.RegisterType((*WikiGetPageVersionsRequest)(nil), "gitaly.WikiGetPageVersionsRequest") proto.RegisterType((*WikiGetPageVersionsResponse)(nil), "gitaly.WikiGetPageVersionsResponse") proto.RegisterType((*WikiWritePageRequest)(nil), "gitaly.WikiWritePageRequest") proto.RegisterType((*WikiWritePageResponse)(nil), "gitaly.WikiWritePageResponse") proto.RegisterType((*WikiUpdatePageRequest)(nil), "gitaly.WikiUpdatePageRequest") proto.RegisterType((*WikiUpdatePageResponse)(nil), "gitaly.WikiUpdatePageResponse") proto.RegisterType((*WikiDeletePageRequest)(nil), "gitaly.WikiDeletePageRequest") proto.RegisterType((*WikiDeletePageResponse)(nil), "gitaly.WikiDeletePageResponse") proto.RegisterType((*WikiFindPageRequest)(nil), "gitaly.WikiFindPageRequest") proto.RegisterType((*WikiFindPageResponse)(nil), "gitaly.WikiFindPageResponse") proto.RegisterType((*WikiFindFileRequest)(nil), "gitaly.WikiFindFileRequest") proto.RegisterType((*WikiFindFileResponse)(nil), "gitaly.WikiFindFileResponse") proto.RegisterType((*WikiGetAllPagesRequest)(nil), "gitaly.WikiGetAllPagesRequest") proto.RegisterType((*WikiGetAllPagesResponse)(nil), "gitaly.WikiGetAllPagesResponse") } // Reference imports to suppress errors if they are not otherwise used. var _ context.Context var _ grpc.ClientConn // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. const _ = grpc.SupportPackageIsVersion4 // Client API for WikiService service type WikiServiceClient interface { WikiGetPageVersions(ctx context.Context, in *WikiGetPageVersionsRequest, opts ...grpc.CallOption) (WikiService_WikiGetPageVersionsClient, error) WikiWritePage(ctx context.Context, opts ...grpc.CallOption) (WikiService_WikiWritePageClient, error) WikiUpdatePage(ctx context.Context, opts ...grpc.CallOption) (WikiService_WikiUpdatePageClient, error) WikiDeletePage(ctx context.Context, in *WikiDeletePageRequest, opts ...grpc.CallOption) (*WikiDeletePageResponse, error) // WikiFindPage returns a stream because the page's raw_data field may be arbitrarily large. WikiFindPage(ctx context.Context, in *WikiFindPageRequest, opts ...grpc.CallOption) (WikiService_WikiFindPageClient, error) WikiFindFile(ctx context.Context, in *WikiFindFileRequest, opts ...grpc.CallOption) (WikiService_WikiFindFileClient, error) WikiGetAllPages(ctx context.Context, in *WikiGetAllPagesRequest, opts ...grpc.CallOption) (WikiService_WikiGetAllPagesClient, error) } type wikiServiceClient struct { cc *grpc.ClientConn } func NewWikiServiceClient(cc *grpc.ClientConn) WikiServiceClient { return &wikiServiceClient{cc} } func (c *wikiServiceClient) WikiGetPageVersions(ctx context.Context, in *WikiGetPageVersionsRequest, opts ...grpc.CallOption) (WikiService_WikiGetPageVersionsClient, error) { stream, err := grpc.NewClientStream(ctx, &_WikiService_serviceDesc.Streams[0], c.cc, "/gitaly.WikiService/WikiGetPageVersions", opts...) if err != nil { return nil, err } x := &wikiServiceWikiGetPageVersionsClient{stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } if err := x.ClientStream.CloseSend(); err != nil { return nil, err } return x, nil } type WikiService_WikiGetPageVersionsClient interface { Recv() (*WikiGetPageVersionsResponse, error) grpc.ClientStream } type wikiServiceWikiGetPageVersionsClient struct { grpc.ClientStream } func (x *wikiServiceWikiGetPageVersionsClient) Recv() (*WikiGetPageVersionsResponse, error) { m := new(WikiGetPageVersionsResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func (c *wikiServiceClient) WikiWritePage(ctx context.Context, opts ...grpc.CallOption) (WikiService_WikiWritePageClient, error) { stream, err := grpc.NewClientStream(ctx, &_WikiService_serviceDesc.Streams[1], c.cc, "/gitaly.WikiService/WikiWritePage", opts...) if err != nil { return nil, err } x := &wikiServiceWikiWritePageClient{stream} return x, nil } type WikiService_WikiWritePageClient interface { Send(*WikiWritePageRequest) error CloseAndRecv() (*WikiWritePageResponse, error) grpc.ClientStream } type wikiServiceWikiWritePageClient struct { grpc.ClientStream } func (x *wikiServiceWikiWritePageClient) Send(m *WikiWritePageRequest) error { return x.ClientStream.SendMsg(m) } func (x *wikiServiceWikiWritePageClient) CloseAndRecv() (*WikiWritePageResponse, error) { if err := x.ClientStream.CloseSend(); err != nil { return nil, err } m := new(WikiWritePageResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func (c *wikiServiceClient) WikiUpdatePage(ctx context.Context, opts ...grpc.CallOption) (WikiService_WikiUpdatePageClient, error) { stream, err := grpc.NewClientStream(ctx, &_WikiService_serviceDesc.Streams[2], c.cc, "/gitaly.WikiService/WikiUpdatePage", opts...) if err != nil { return nil, err } x := &wikiServiceWikiUpdatePageClient{stream} return x, nil } type WikiService_WikiUpdatePageClient interface { Send(*WikiUpdatePageRequest) error CloseAndRecv() (*WikiUpdatePageResponse, error) grpc.ClientStream } type wikiServiceWikiUpdatePageClient struct { grpc.ClientStream } func (x *wikiServiceWikiUpdatePageClient) Send(m *WikiUpdatePageRequest) error { return x.ClientStream.SendMsg(m) } func (x *wikiServiceWikiUpdatePageClient) CloseAndRecv() (*WikiUpdatePageResponse, error) { if err := x.ClientStream.CloseSend(); err != nil { return nil, err } m := new(WikiUpdatePageResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func (c *wikiServiceClient) WikiDeletePage(ctx context.Context, in *WikiDeletePageRequest, opts ...grpc.CallOption) (*WikiDeletePageResponse, error) { out := new(WikiDeletePageResponse) err := grpc.Invoke(ctx, "/gitaly.WikiService/WikiDeletePage", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } func (c *wikiServiceClient) WikiFindPage(ctx context.Context, in *WikiFindPageRequest, opts ...grpc.CallOption) (WikiService_WikiFindPageClient, error) { stream, err := grpc.NewClientStream(ctx, &_WikiService_serviceDesc.Streams[3], c.cc, "/gitaly.WikiService/WikiFindPage", opts...) if err != nil { return nil, err } x := &wikiServiceWikiFindPageClient{stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } if err := x.ClientStream.CloseSend(); err != nil { return nil, err } return x, nil } type WikiService_WikiFindPageClient interface { Recv() (*WikiFindPageResponse, error) grpc.ClientStream } type wikiServiceWikiFindPageClient struct { grpc.ClientStream } func (x *wikiServiceWikiFindPageClient) Recv() (*WikiFindPageResponse, error) { m := new(WikiFindPageResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func (c *wikiServiceClient) WikiFindFile(ctx context.Context, in *WikiFindFileRequest, opts ...grpc.CallOption) (WikiService_WikiFindFileClient, error) { stream, err := grpc.NewClientStream(ctx, &_WikiService_serviceDesc.Streams[4], c.cc, "/gitaly.WikiService/WikiFindFile", opts...) if err != nil { return nil, err } x := &wikiServiceWikiFindFileClient{stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } if err := x.ClientStream.CloseSend(); err != nil { return nil, err } return x, nil } type WikiService_WikiFindFileClient interface { Recv() (*WikiFindFileResponse, error) grpc.ClientStream } type wikiServiceWikiFindFileClient struct { grpc.ClientStream } func (x *wikiServiceWikiFindFileClient) Recv() (*WikiFindFileResponse, error) { m := new(WikiFindFileResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func (c *wikiServiceClient) WikiGetAllPages(ctx context.Context, in *WikiGetAllPagesRequest, opts ...grpc.CallOption) (WikiService_WikiGetAllPagesClient, error) { stream, err := grpc.NewClientStream(ctx, &_WikiService_serviceDesc.Streams[5], c.cc, "/gitaly.WikiService/WikiGetAllPages", opts...) if err != nil { return nil, err } x := &wikiServiceWikiGetAllPagesClient{stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } if err := x.ClientStream.CloseSend(); err != nil { return nil, err } return x, nil } type WikiService_WikiGetAllPagesClient interface { Recv() (*WikiGetAllPagesResponse, error) grpc.ClientStream } type wikiServiceWikiGetAllPagesClient struct { grpc.ClientStream } func (x *wikiServiceWikiGetAllPagesClient) Recv() (*WikiGetAllPagesResponse, error) { m := new(WikiGetAllPagesResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } // Server API for WikiService service type WikiServiceServer interface { WikiGetPageVersions(*WikiGetPageVersionsRequest, WikiService_WikiGetPageVersionsServer) error WikiWritePage(WikiService_WikiWritePageServer) error WikiUpdatePage(WikiService_WikiUpdatePageServer) error WikiDeletePage(context.Context, *WikiDeletePageRequest) (*WikiDeletePageResponse, error) // WikiFindPage returns a stream because the page's raw_data field may be arbitrarily large. WikiFindPage(*WikiFindPageRequest, WikiService_WikiFindPageServer) error WikiFindFile(*WikiFindFileRequest, WikiService_WikiFindFileServer) error WikiGetAllPages(*WikiGetAllPagesRequest, WikiService_WikiGetAllPagesServer) error } func RegisterWikiServiceServer(s *grpc.Server, srv WikiServiceServer) { s.RegisterService(&_WikiService_serviceDesc, srv) } func _WikiService_WikiGetPageVersions_Handler(srv interface{}, stream grpc.ServerStream) error { m := new(WikiGetPageVersionsRequest) if err := stream.RecvMsg(m); err != nil { return err } return srv.(WikiServiceServer).WikiGetPageVersions(m, &wikiServiceWikiGetPageVersionsServer{stream}) } type WikiService_WikiGetPageVersionsServer interface { Send(*WikiGetPageVersionsResponse) error grpc.ServerStream } type wikiServiceWikiGetPageVersionsServer struct { grpc.ServerStream } func (x *wikiServiceWikiGetPageVersionsServer) Send(m *WikiGetPageVersionsResponse) error { return x.ServerStream.SendMsg(m) } func _WikiService_WikiWritePage_Handler(srv interface{}, stream grpc.ServerStream) error { return srv.(WikiServiceServer).WikiWritePage(&wikiServiceWikiWritePageServer{stream}) } type WikiService_WikiWritePageServer interface { SendAndClose(*WikiWritePageResponse) error Recv() (*WikiWritePageRequest, error) grpc.ServerStream } type wikiServiceWikiWritePageServer struct { grpc.ServerStream } func (x *wikiServiceWikiWritePageServer) SendAndClose(m *WikiWritePageResponse) error { return x.ServerStream.SendMsg(m) } func (x *wikiServiceWikiWritePageServer) Recv() (*WikiWritePageRequest, error) { m := new(WikiWritePageRequest) if err := x.ServerStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func _WikiService_WikiUpdatePage_Handler(srv interface{}, stream grpc.ServerStream) error { return srv.(WikiServiceServer).WikiUpdatePage(&wikiServiceWikiUpdatePageServer{stream}) } type WikiService_WikiUpdatePageServer interface { SendAndClose(*WikiUpdatePageResponse) error Recv() (*WikiUpdatePageRequest, error) grpc.ServerStream } type wikiServiceWikiUpdatePageServer struct { grpc.ServerStream } func (x *wikiServiceWikiUpdatePageServer) SendAndClose(m *WikiUpdatePageResponse) error { return x.ServerStream.SendMsg(m) } func (x *wikiServiceWikiUpdatePageServer) Recv() (*WikiUpdatePageRequest, error) { m := new(WikiUpdatePageRequest) if err := x.ServerStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func _WikiService_WikiDeletePage_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(WikiDeletePageRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(WikiServiceServer).WikiDeletePage(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/gitaly.WikiService/WikiDeletePage", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(WikiServiceServer).WikiDeletePage(ctx, req.(*WikiDeletePageRequest)) } return interceptor(ctx, in, info, handler) } func _WikiService_WikiFindPage_Handler(srv interface{}, stream grpc.ServerStream) error { m := new(WikiFindPageRequest) if err := stream.RecvMsg(m); err != nil { return err } return srv.(WikiServiceServer).WikiFindPage(m, &wikiServiceWikiFindPageServer{stream}) } type WikiService_WikiFindPageServer interface { Send(*WikiFindPageResponse) error grpc.ServerStream } type wikiServiceWikiFindPageServer struct { grpc.ServerStream } func (x *wikiServiceWikiFindPageServer) Send(m *WikiFindPageResponse) error { return x.ServerStream.SendMsg(m) } func _WikiService_WikiFindFile_Handler(srv interface{}, stream grpc.ServerStream) error { m := new(WikiFindFileRequest) if err := stream.RecvMsg(m); err != nil { return err } return srv.(WikiServiceServer).WikiFindFile(m, &wikiServiceWikiFindFileServer{stream}) } type WikiService_WikiFindFileServer interface { Send(*WikiFindFileResponse) error grpc.ServerStream } type wikiServiceWikiFindFileServer struct { grpc.ServerStream } func (x *wikiServiceWikiFindFileServer) Send(m *WikiFindFileResponse) error { return x.ServerStream.SendMsg(m) } func _WikiService_WikiGetAllPages_Handler(srv interface{}, stream grpc.ServerStream) error { m := new(WikiGetAllPagesRequest) if err := stream.RecvMsg(m); err != nil { return err } return srv.(WikiServiceServer).WikiGetAllPages(m, &wikiServiceWikiGetAllPagesServer{stream}) } type WikiService_WikiGetAllPagesServer interface { Send(*WikiGetAllPagesResponse) error grpc.ServerStream } type wikiServiceWikiGetAllPagesServer struct { grpc.ServerStream } func (x *wikiServiceWikiGetAllPagesServer) Send(m *WikiGetAllPagesResponse) error { return x.ServerStream.SendMsg(m) } var _WikiService_serviceDesc = grpc.ServiceDesc{ ServiceName: "gitaly.WikiService", HandlerType: (*WikiServiceServer)(nil), Methods: []grpc.MethodDesc{ { MethodName: "WikiDeletePage", Handler: _WikiService_WikiDeletePage_Handler, }, }, Streams: []grpc.StreamDesc{ { StreamName: "WikiGetPageVersions", Handler: _WikiService_WikiGetPageVersions_Handler, ServerStreams: true, }, { StreamName: "WikiWritePage", Handler: _WikiService_WikiWritePage_Handler, ClientStreams: true, }, { StreamName: "WikiUpdatePage", Handler: _WikiService_WikiUpdatePage_Handler, ClientStreams: true, }, { StreamName: "WikiFindPage", Handler: _WikiService_WikiFindPage_Handler, ServerStreams: true, }, { StreamName: "WikiFindFile", Handler: _WikiService_WikiFindFile_Handler, ServerStreams: true, }, { StreamName: "WikiGetAllPages", Handler: _WikiService_WikiGetAllPages_Handler, ServerStreams: true, }, }, Metadata: "wiki.proto", } func init() { proto.RegisterFile("wiki.proto", fileDescriptor14) } var fileDescriptor14 = []byte{ // 846 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x56, 0xcd, 0x72, 0xdc, 0x44, 0x10, 0x8e, 0xbc, 0xeb, 0x5d, 0x6d, 0xdb, 0x71, 0xc8, 0x10, 0x12, 0x45, 0x36, 0xc6, 0x35, 0x50, 0x85, 0xb9, 0xb8, 0xc0, 0xb9, 0x72, 0x08, 0x85, 0x89, 0x2f, 0x50, 0x18, 0x25, 0xc4, 0x47, 0xd5, 0x64, 0xd5, 0xf6, 0x4e, 0x45, 0x7f, 0x8c, 0x66, 0xd7, 0xb5, 0x8f, 0x42, 0x15, 0x4f, 0xc0, 0xe3, 0xf0, 0x06, 0x1c, 0xb9, 0xf2, 0x04, 0xd4, 0xfc, 0x68, 0x35, 0xd2, 0xfe, 0x50, 0x61, 0xc9, 0x4d, 0xd3, 0xdd, 0xf3, 0x4d, 0x7f, 0x5f, 0x4f, 0xf7, 0x08, 0xe0, 0x8e, 0xbf, 0xe5, 0x67, 0xa5, 0x28, 0x64, 0x41, 0x06, 0xb7, 0x5c, 0xb2, 0x74, 0x1e, 0xee, 0x57, 0x13, 0x26, 0x30, 0x31, 0x56, 0x7a, 0x0d, 0x0f, 0xaf, 0xf9, 0x5b, 0xfe, 0x6d, 0x91, 0x65, 0x5c, 0x5e, 0xa0, 0x64, 0x3c, 0xad, 0x08, 0x81, 0x7e, 0xce, 0x32, 0x0c, 0xbc, 0x13, 0xef, 0x74, 0x3f, 0xd2, 0xdf, 0xe4, 0x11, 0xec, 0x62, 0xc6, 0x78, 0x1a, 0xec, 0x68, 0xa3, 0x59, 0x90, 0x00, 0x86, 0x19, 0x56, 0x15, 0xbb, 0xc5, 0xa0, 0xa7, 0xed, 0xf5, 0x92, 0xbe, 0x82, 0x07, 0x0a, 0xf8, 0x8a, 0xdd, 0xe2, 0x6b, 0x14, 0x15, 0x2f, 0x72, 0xf2, 0x05, 0x0c, 0xc6, 0xfa, 0x1c, 0x0d, 0xbc, 0x77, 0xfe, 0xf0, 0xcc, 0xa4, 0x74, 0x76, 0xc9, 0xa5, 0x49, 0x20, 0xb2, 0x01, 0xe4, 0x31, 0x0c, 0x6e, 0x0a, 0x91, 0x31, 0xa9, 0x8f, 0x1b, 0x45, 0x76, 0x45, 0xff, 0xf2, 0xc0, 0xaf, 0x61, 0xc9, 0x57, 0x30, 0x9c, 0x19, 0x68, 0x0b, 0xf8, 0xa4, 0x06, 0xec, 0x9c, 0x1c, 0xd5, 0x71, 0xeb, 0x70, 0x15, 0x3b, 0xc9, 0x65, 0x5a, 0xb3, 0x30, 0x0b, 0xf2, 0x14, 0xfc, 0xa9, 0x48, 0xe3, 0x92, 0xc9, 0x49, 0xd0, 0xd7, 0xf1, 0xc3, 0xa9, 0x48, 0xaf, 0x98, 0x9c, 0x28, 0x89, 0xb4, 0x79, 0xd7, 0x48, 0x54, 0x5a, 0x9b, 0x96, 0x6d, 0xe0, 0xc8, 0x76, 0x0c, 0x30, 0xe1, 0x95, 0x2c, 0x04, 0x1f, 0xb3, 0x34, 0x18, 0x9e, 0x78, 0xa7, 0x7e, 0xe4, 0x58, 0xd4, 0x11, 0x82, 0xdd, 0xc5, 0x09, 0x93, 0x2c, 0xf0, 0x8d, 0x82, 0x82, 0xdd, 0x5d, 0x30, 0xc9, 0xe8, 0x6f, 0x1e, 0x84, 0x8a, 0xc8, 0x25, 0x4a, 0x87, 0x4b, 0x15, 0xe1, 0x2f, 0x53, 0xac, 0x24, 0x39, 0x07, 0x10, 0x58, 0x16, 0x15, 0x97, 0x85, 0x98, 0x5b, 0x01, 0x48, 0x2d, 0x40, 0xb4, 0xf0, 0x44, 0x4e, 0x14, 0x39, 0x84, 0x51, 0xc9, 0x6e, 0xd1, 0x30, 0x32, 0x85, 0xf4, 0x95, 0xa1, 0xa1, 0x64, 0x0b, 0xb9, 0x1b, 0xe9, 0x6f, 0x95, 0x5e, 0x89, 0x22, 0xd6, 0xf6, 0xbe, 0xb6, 0x0f, 0x4b, 0x14, 0x2a, 0x1d, 0x1a, 0xc1, 0xe1, 0xca, 0xec, 0xaa, 0xb2, 0xc8, 0x2b, 0x24, 0xcf, 0xc0, 0xb7, 0xa2, 0x57, 0x81, 0x77, 0xd2, 0xdb, 0x54, 0x9d, 0x45, 0x20, 0xfd, 0xc3, 0x83, 0x47, 0xca, 0x7b, 0x2d, 0xb8, 0x44, 0x15, 0xb2, 0x0d, 0xd9, 0xba, 0x1c, 0x3b, 0x4e, 0x39, 0x9a, 0xfa, 0xf7, 0x5a, 0xf5, 0x7f, 0x0e, 0x07, 0xe6, 0xe6, 0xc5, 0x89, 0xe9, 0x01, 0xcd, 0x76, 0xef, 0xfc, 0xa9, 0x9b, 0x73, 0xab, 0x49, 0xa2, 0xfb, 0xe3, 0x56, 0xcf, 0x04, 0x30, 0x1c, 0x17, 0xb9, 0xc4, 0x5c, 0xda, 0x3b, 0x51, 0x2f, 0xe9, 0x73, 0xf8, 0xa8, 0xc3, 0xc9, 0x4a, 0xf4, 0x39, 0x3c, 0x48, 0xa6, 0x65, 0xca, 0xc7, 0x4c, 0x62, 0x8c, 0x42, 0x14, 0xc2, 0x76, 0xdc, 0xc1, 0xc2, 0xfc, 0x9d, 0xb2, 0xd2, 0xbf, 0x3d, 0x03, 0xf1, 0x73, 0x99, 0xb0, 0xed, 0x75, 0xd9, 0x78, 0x09, 0x56, 0x37, 0x42, 0x23, 0x5b, 0xff, 0x5f, 0x64, 0xdb, 0xfd, 0xef, 0xb2, 0x0d, 0xda, 0xb2, 0x9d, 0xc1, 0xe3, 0x2e, 0x67, 0xab, 0x9b, 0x1a, 0x45, 0x8e, 0x5a, 0x66, 0x41, 0x7f, 0xb7, 0x22, 0x5d, 0x60, 0x8a, 0xef, 0x59, 0xa4, 0x65, 0xda, 0xbd, 0x77, 0xa3, 0x4d, 0x03, 0x43, 0xce, 0xcd, 0xd5, 0x90, 0xa3, 0xbf, 0x7a, 0xf0, 0xa1, 0x72, 0xbd, 0xe0, 0x79, 0xb2, 0x2d, 0x89, 0x45, 0x31, 0x77, 0xdc, 0x62, 0x86, 0xe0, 0x0b, 0x9c, 0x71, 0x3d, 0x37, 0x4d, 0x95, 0x17, 0x6b, 0x72, 0x04, 0xa3, 0x84, 0x0b, 0x1c, 0xeb, 0x43, 0xfa, 0xda, 0xd9, 0x18, 0xe8, 0xd7, 0xa6, 0x3b, 0x9b, 0xd4, 0x6c, 0x41, 0x3e, 0xb3, 0x93, 0xc3, 0x64, 0xf5, 0x41, 0xb7, 0xcf, 0xcd, 0x2c, 0xa1, 0xf3, 0x86, 0xd8, 0x0b, 0x9e, 0xfe, 0xef, 0xad, 0xbd, 0x81, 0x16, 0x9d, 0x35, 0x89, 0x9b, 0xa3, 0x6d, 0xe2, 0xab, 0x1e, 0xba, 0x43, 0x18, 0x65, 0x3c, 0xc3, 0x58, 0xce, 0x4b, 0xb4, 0xaf, 0x84, 0xaf, 0x0c, 0xaf, 0xe6, 0x25, 0xb6, 0xc6, 0x75, 0xaf, 0x35, 0xae, 0x17, 0x2f, 0x42, 0xbf, 0x79, 0x11, 0xe8, 0xf7, 0xa6, 0xcc, 0x97, 0x28, 0xbf, 0x49, 0x53, 0x25, 0xc5, 0x36, 0xd3, 0x9b, 0xc6, 0xf0, 0x64, 0x09, 0xed, 0x5d, 0x2a, 0x40, 0x8e, 0x61, 0x0f, 0xf3, 0x24, 0x2e, 0x6e, 0xcc, 0x40, 0xdf, 0xd1, 0xaf, 0xd1, 0x08, 0xf3, 0xe4, 0xc7, 0x1b, 0x15, 0x75, 0xfe, 0x67, 0x1f, 0xf6, 0xd4, 0x96, 0x97, 0x28, 0x66, 0x7c, 0x8c, 0xe4, 0x8d, 0xa9, 0x58, 0x67, 0xc4, 0x13, 0xea, 0xc2, 0xaf, 0x7e, 0x9d, 0xc2, 0x4f, 0x37, 0xc6, 0xd8, 0xbb, 0x7e, 0xef, 0x4b, 0x8f, 0x5c, 0xc1, 0xfd, 0xd6, 0x74, 0x24, 0x47, 0xee, 0xce, 0xee, 0x43, 0x10, 0x7e, 0xbc, 0xc6, 0x5b, 0x23, 0x9e, 0x7a, 0xe4, 0x25, 0x1c, 0xb4, 0x07, 0x07, 0x69, 0x6d, 0x5a, 0x1a, 0xa2, 0xe1, 0xf1, 0x3a, 0xb7, 0x03, 0xfa, 0x93, 0x01, 0x6d, 0x1a, 0xb6, 0x0d, 0xba, 0x34, 0x74, 0xda, 0xa0, 0x2b, 0xfa, 0xfc, 0x1e, 0xf9, 0x01, 0xf6, 0xdd, 0x6e, 0x22, 0x87, 0xee, 0x8e, 0x4e, 0xfb, 0x87, 0x47, 0xab, 0x9d, 0x8e, 0x90, 0x0e, 0x9c, 0xba, 0xe3, 0xcb, 0x70, 0x4e, 0xd3, 0x2d, 0xc3, 0xb9, 0x6d, 0xa1, 0xe1, 0x5e, 0x9b, 0xff, 0x37, 0xe7, 0xb2, 0x91, 0xe3, 0x4e, 0x4d, 0x3b, 0x77, 0x3a, 0xfc, 0x64, 0xad, 0xbf, 0xc1, 0x7d, 0x33, 0xd0, 0xff, 0x9d, 0xcf, 0xfe, 0x09, 0x00, 0x00, 0xff, 0xff, 0xc9, 0xbb, 0xf3, 0xde, 0x9b, 0x0a, 0x00, 0x00, } gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/gitlab.com/gitlab-org/gitaly/000077500000000000000000000000001324746544700312605ustar00rootroot00000000000000LICENSE000066400000000000000000000020731324746544700322100ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/gitlab.com/gitlab-org/gitalyThe MIT License (MIT) Copyright (c) 2016-2017 GitLab B.V. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. NOTICE000066400000000000000000004222211324746544700321100ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/gitlab.com/gitlab-org/gitalyThe following components are included in Gitaly: LICENSE - go Copyright (c) 2009 The Go Authors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Google Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ PATENTS - go Additional IP Rights Grant (Patents) "This implementation" means the copyrightable works distributed by Google as part of the Go project. Google hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, transfer and otherwise run, modify and propagate the contents of this implementation of Go, where such license applies only to those patent claims, both currently owned or controlled by Google and acquired in the future, licensable by Google that are necessarily infringed by this implementation of Go. This grant does not include claims that would be infringed only as a consequence of further modification of this implementation. If you or your agent or exclusive licensee institute or order or agree to the institution of patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that this implementation of Go or any code incorporated within this implementation of Go constitutes direct or contributory patent infringement, or inducement of patent infringement, then any patent rights granted to you under this License for this implementation of Go shall terminate as of the date such litigation is filed. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ LICENSE - gitlab.com/gitlab-org/gitaly The MIT License (MIT) Copyright (c) 2016-2017 GitLab B.V. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ LICENSE - gitlab.com/gitlab-org/gitaly/internal/middleware/panichandler Copyright (c) 2016 Masahiro Sano MIT License Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ COPYING - gitlab.com/gitlab-org/gitaly/vendor/github.com/BurntSushi/toml The MIT License (MIT) Copyright (c) 2013 TOML authors Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ LICENSE - gitlab.com/gitlab-org/gitaly/vendor/github.com/beorn7/perks Copyright (C) 2013 Blake Mizerany Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ LICENSE - gitlab.com/gitlab-org/gitaly/vendor/github.com/certifi/gocertifi This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0. If a copy of the MPL was not distributed with this file, You can obtain one at http://mozilla.org/MPL/2.0/. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ LICENSE - gitlab.com/gitlab-org/gitaly/vendor/github.com/davecgh/go-spew ISC License Copyright (c) 2012-2016 Dave Collins Permission to use, copy, modify, and distribute this software for any purpose with or without fee is hereby granted, provided that the above copyright notice and this permission notice appear in all copies. THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ LICENSE - gitlab.com/gitlab-org/gitaly/vendor/github.com/getsentry/raven-go Copyright (c) 2013 Apollic Software, LLC. All rights reserved. Copyright (c) 2015 Functional Software, Inc. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Apollic Software, LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ LICENSE - gitlab.com/gitlab-org/gitaly/vendor/github.com/golang/protobuf Go support for Protocol Buffers - Google's data interchange format Copyright 2010 The Go Authors. All rights reserved. https://github.com/golang/protobuf Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Google Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ LICENSE - gitlab.com/gitlab-org/gitaly/vendor/github.com/grpc-ecosystem/go-grpc-middleware Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ LICENSE - gitlab.com/gitlab-org/gitaly/vendor/github.com/grpc-ecosystem/go-grpc-prometheus Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ LICENSE - gitlab.com/gitlab-org/gitaly/vendor/github.com/kelseyhightower/envconfig Copyright (c) 2013 Kelsey Hightower Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ LICENSE - gitlab.com/gitlab-org/gitaly/vendor/github.com/matttproud/golang_protobuf_extensions Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "{}" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright {yyyy} {name of copyright owner} Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ NOTICE - gitlab.com/gitlab-org/gitaly/vendor/github.com/matttproud/golang_protobuf_extensions Copyright 2012 Matt T. Proud (matt.proud@gmail.com) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ LICENSE - gitlab.com/gitlab-org/gitaly/vendor/github.com/pmezard/go-difflib Copyright (c) 2013, Patrick Mezard All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. The names of its contributors may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ LICENSE - gitlab.com/gitlab-org/gitaly/vendor/github.com/prometheus/client_golang Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ NOTICE - gitlab.com/gitlab-org/gitaly/vendor/github.com/prometheus/client_golang Prometheus instrumentation library for Go applications Copyright 2012-2015 The Prometheus Authors This product includes software developed at SoundCloud Ltd. (http://soundcloud.com/). The following components are included in this product: perks - a fork of https://github.com/bmizerany/perks https://github.com/beorn7/perks Copyright 2013-2015 Blake Mizerany, Björn Rabenstein See https://github.com/beorn7/perks/blob/master/README.md for license details. Go support for Protocol Buffers - Google's data interchange format http://github.com/golang/protobuf/ Copyright 2010 The Go Authors See source code for license details. Support for streaming Protocol Buffer messages for the Go language (golang). https://github.com/matttproud/golang_protobuf_extensions Copyright 2013 Matt T. Proud Licensed under the Apache License, Version 2.0 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ LICENSE - gitlab.com/gitlab-org/gitaly/vendor/github.com/prometheus/client_model Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ NOTICE - gitlab.com/gitlab-org/gitaly/vendor/github.com/prometheus/client_model Data model artifacts for Prometheus. Copyright 2012-2015 The Prometheus Authors This product includes software developed at SoundCloud Ltd. (http://soundcloud.com/). ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ LICENSE - gitlab.com/gitlab-org/gitaly/vendor/github.com/prometheus/common Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ NOTICE - gitlab.com/gitlab-org/gitaly/vendor/github.com/prometheus/common Common libraries shared by Prometheus Go components. Copyright 2015 The Prometheus Authors This product includes software developed at SoundCloud Ltd. (http://soundcloud.com/). ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ LICENSE - gitlab.com/gitlab-org/gitaly/vendor/github.com/prometheus/procfs Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ NOTICE - gitlab.com/gitlab-org/gitaly/vendor/github.com/prometheus/procfs procfs provides functions to retrieve system, kernel and process metrics from the pseudo-filesystem proc. Copyright 2014-2015 The Prometheus Authors This product includes software developed at SoundCloud Ltd. (http://soundcloud.com/). ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ LICENSE - gitlab.com/gitlab-org/gitaly/vendor/github.com/sirupsen/logrus The MIT License (MIT) Copyright (c) 2014 Simon Eskildsen Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ LICENSE - gitlab.com/gitlab-org/gitaly/vendor/github.com/stretchr/testify Copyright (c) 2012 - 2013 Mat Ryer and Tyler Bunnell Please consider promoting this project if you find it useful. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ LICENSE - gitlab.com/gitlab-org/gitaly/vendor/gitlab.com/gitlab-org/gitaly-proto The MIT License (MIT) Copyright (c) 2016-2017 GitLab B.V. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ LICENSE - gitlab.com/gitlab-org/gitaly/vendor/golang.org/x/crypto Copyright (c) 2009 The Go Authors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Google Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ PATENTS - gitlab.com/gitlab-org/gitaly/vendor/golang.org/x/crypto Additional IP Rights Grant (Patents) "This implementation" means the copyrightable works distributed by Google as part of the Go project. Google hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, transfer and otherwise run, modify and propagate the contents of this implementation of Go, where such license applies only to those patent claims, both currently owned or controlled by Google and acquired in the future, licensable by Google that are necessarily infringed by this implementation of Go. This grant does not include claims that would be infringed only as a consequence of further modification of this implementation. If you or your agent or exclusive licensee institute or order or agree to the institution of patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that this implementation of Go or any code incorporated within this implementation of Go constitutes direct or contributory patent infringement, or inducement of patent infringement, then any patent rights granted to you under this License for this implementation of Go shall terminate as of the date such litigation is filed. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ LICENSE - gitlab.com/gitlab-org/gitaly/vendor/golang.org/x/net Copyright (c) 2009 The Go Authors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Google Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ PATENTS - gitlab.com/gitlab-org/gitaly/vendor/golang.org/x/net Additional IP Rights Grant (Patents) "This implementation" means the copyrightable works distributed by Google as part of the Go project. Google hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, transfer and otherwise run, modify and propagate the contents of this implementation of Go, where such license applies only to those patent claims, both currently owned or controlled by Google and acquired in the future, licensable by Google that are necessarily infringed by this implementation of Go. This grant does not include claims that would be infringed only as a consequence of further modification of this implementation. If you or your agent or exclusive licensee institute or order or agree to the institution of patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that this implementation of Go or any code incorporated within this implementation of Go constitutes direct or contributory patent infringement, or inducement of patent infringement, then any patent rights granted to you under this License for this implementation of Go shall terminate as of the date such litigation is filed. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ LICENSE - gitlab.com/gitlab-org/gitaly/vendor/golang.org/x/sync Copyright (c) 2009 The Go Authors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Google Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ PATENTS - gitlab.com/gitlab-org/gitaly/vendor/golang.org/x/sync Additional IP Rights Grant (Patents) "This implementation" means the copyrightable works distributed by Google as part of the Go project. Google hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, transfer and otherwise run, modify and propagate the contents of this implementation of Go, where such license applies only to those patent claims, both currently owned or controlled by Google and acquired in the future, licensable by Google that are necessarily infringed by this implementation of Go. This grant does not include claims that would be infringed only as a consequence of further modification of this implementation. If you or your agent or exclusive licensee institute or order or agree to the institution of patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that this implementation of Go or any code incorporated within this implementation of Go constitutes direct or contributory patent infringement, or inducement of patent infringement, then any patent rights granted to you under this License for this implementation of Go shall terminate as of the date such litigation is filed. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ LICENSE - gitlab.com/gitlab-org/gitaly/vendor/golang.org/x/sys Copyright (c) 2009 The Go Authors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Google Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ PATENTS - gitlab.com/gitlab-org/gitaly/vendor/golang.org/x/sys Additional IP Rights Grant (Patents) "This implementation" means the copyrightable works distributed by Google as part of the Go project. Google hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, transfer and otherwise run, modify and propagate the contents of this implementation of Go, where such license applies only to those patent claims, both currently owned or controlled by Google and acquired in the future, licensable by Google that are necessarily infringed by this implementation of Go. This grant does not include claims that would be infringed only as a consequence of further modification of this implementation. If you or your agent or exclusive licensee institute or order or agree to the institution of patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that this implementation of Go or any code incorporated within this implementation of Go constitutes direct or contributory patent infringement, or inducement of patent infringement, then any patent rights granted to you under this License for this implementation of Go shall terminate as of the date such litigation is filed. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ LICENSE - gitlab.com/gitlab-org/gitaly/vendor/golang.org/x/text Copyright (c) 2009 The Go Authors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Google Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ PATENTS - gitlab.com/gitlab-org/gitaly/vendor/golang.org/x/text Additional IP Rights Grant (Patents) "This implementation" means the copyrightable works distributed by Google as part of the Go project. Google hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, transfer and otherwise run, modify and propagate the contents of this implementation of Go, where such license applies only to those patent claims, both currently owned or controlled by Google and acquired in the future, licensable by Google that are necessarily infringed by this implementation of Go. This grant does not include claims that would be infringed only as a consequence of further modification of this implementation. If you or your agent or exclusive licensee institute or order or agree to the institution of patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that this implementation of Go or any code incorporated within this implementation of Go constitutes direct or contributory patent infringement, or inducement of patent infringement, then any patent rights granted to you under this License for this implementation of Go shall terminate as of the date such litigation is filed. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ LICENSE - gitlab.com/gitlab-org/gitaly/vendor/google.golang.org/genproto Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ LICENSE - gitlab.com/gitlab-org/gitaly/vendor/google.golang.org/grpc Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. auth/000077500000000000000000000000001324746544700321425ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/gitlab.com/gitlab-org/gitalyrpccredentials.go000066400000000000000000000013321324746544700354720ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/gitlab.com/gitlab-org/gitaly/authpackage gitalyauth import ( "encoding/base64" "golang.org/x/net/context" "google.golang.org/grpc/credentials" ) // RPCCredentials can be used with grpc.WithPerRPCCredentials to create a // grpc.DialOption that inserts the supplied token for authentication // with a Gitaly server. func RPCCredentials(token string) credentials.PerRPCCredentials { return &rpcCredentials{token: base64.StdEncoding.EncodeToString([]byte(token))} } type rpcCredentials struct { token string } func (*rpcCredentials) RequireTransportSecurity() bool { return false } func (rc *rpcCredentials) GetRequestMetadata(context.Context, ...string) (map[string]string, error) { return map[string]string{"authorization": "Bearer " + rc.token}, nil } client/000077500000000000000000000000001324746544700324575ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/gitlab.com/gitlab-org/gitalydial.go000066400000000000000000000026331324746544700337230ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/gitlab.com/gitlab-org/gitaly/clientpackage client import ( "fmt" "net" "net/url" "strings" "time" "google.golang.org/grpc" ) // DefaultDialOpts hold the default DialOptions for connection to Gitaly over UNIX-socket var DefaultDialOpts = []grpc.DialOption{ grpc.WithInsecure(), } // Dial gitaly func Dial(rawAddress string, connOpts []grpc.DialOption) (*grpc.ClientConn, error) { network, addr, err := parseAddress(rawAddress) if err != nil { return nil, err } connOpts = append(connOpts, grpc.WithDialer(func(a string, timeout time.Duration) (net.Conn, error) { return net.DialTimeout(network, a, timeout) })) conn, err := grpc.Dial(addr, connOpts...) if err != nil { return nil, err } return conn, nil } func parseAddress(rawAddress string) (network, addr string, err error) { // Parsing unix:// URL's with url.Parse does not give the result we want // so we do it manually. for _, prefix := range []string{"unix://", "unix:"} { if strings.HasPrefix(rawAddress, prefix) { return "unix", strings.TrimPrefix(rawAddress, prefix), nil } } u, err := url.Parse(rawAddress) if err != nil { return "", "", err } if u.Scheme != "tcp" { return "", "", fmt.Errorf("unknown scheme: %q", rawAddress) } if u.Host == "" { return "", "", fmt.Errorf("network tcp requires host: %q", rawAddress) } if u.Path != "" { return "", "", fmt.Errorf("network tcp should have no path: %q", rawAddress) } return "tcp", u.Host, nil } receive_pack.go000066400000000000000000000017121324746544700354270ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/gitlab.com/gitlab-org/gitaly/clientpackage client import ( "io" "gitlab.com/gitlab-org/gitaly/streamio" pb "gitlab.com/gitlab-org/gitaly-proto/go" "golang.org/x/net/context" "google.golang.org/grpc" ) // ReceivePack proxies an SSH git-receive-pack (git push) session to Gitaly func ReceivePack(ctx context.Context, conn *grpc.ClientConn, stdin io.Reader, stdout, stderr io.Writer, req *pb.SSHReceivePackRequest) (int32, error) { ctx2, cancel := context.WithCancel(ctx) defer cancel() ssh := pb.NewSSHServiceClient(conn) stream, err := ssh.SSHReceivePack(ctx2) if err != nil { return 0, err } if err = stream.Send(req); err != nil { return 0, err } inWriter := streamio.NewWriter(func(p []byte) error { return stream.Send(&pb.SSHReceivePackRequest{Stdin: p}) }) return streamHandler(func() (stdoutStderrResponse, error) { return stream.Recv() }, func(errC chan error) { _, errRecv := io.Copy(inWriter, stdin) stream.CloseSend() errC <- errRecv }, stdout, stderr) } std_stream.go000066400000000000000000000021511324746544700351520ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/gitlab.com/gitlab-org/gitaly/clientpackage client import ( "fmt" "io" pb "gitlab.com/gitlab-org/gitaly-proto/go" ) type stdoutStderrResponse interface { GetExitStatus() *pb.ExitStatus GetStderr() []byte GetStdout() []byte } func streamHandler(recv func() (stdoutStderrResponse, error), send func(chan error), stdout, stderr io.Writer) (int32, error) { var ( exitStatus int32 err error resp stdoutStderrResponse ) errC := make(chan error, 1) go send(errC) for { resp, err = recv() if err != nil { break } if resp.GetExitStatus() != nil { exitStatus = resp.GetExitStatus().GetValue() } if len(resp.GetStderr()) > 0 { if _, err = stderr.Write(resp.GetStderr()); err != nil { break } } if len(resp.GetStdout()) > 0 { if _, err = stdout.Write(resp.GetStdout()); err != nil { break } } } if err == io.EOF { err = nil } if err != nil { return exitStatus, err } select { case errSend := <-errC: if errSend != nil { // This should not happen errSend = fmt.Errorf("stdin send error: %v", errSend) } return exitStatus, errSend default: return exitStatus, nil } } upload_pack.go000066400000000000000000000017051324746544700352730ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/gitlab.com/gitlab-org/gitaly/clientpackage client import ( "io" "gitlab.com/gitlab-org/gitaly/streamio" pb "gitlab.com/gitlab-org/gitaly-proto/go" "golang.org/x/net/context" "google.golang.org/grpc" ) // UploadPack proxies an SSH git-upload-pack (git fetch) session to Gitaly func UploadPack(ctx context.Context, conn *grpc.ClientConn, stdin io.Reader, stdout, stderr io.Writer, req *pb.SSHUploadPackRequest) (int32, error) { ctx2, cancel := context.WithCancel(ctx) defer cancel() ssh := pb.NewSSHServiceClient(conn) stream, err := ssh.SSHUploadPack(ctx2) if err != nil { return 0, err } if err = stream.Send(req); err != nil { return 0, err } inWriter := streamio.NewWriter(func(p []byte) error { return stream.Send(&pb.SSHUploadPackRequest{Stdin: p}) }) return streamHandler(func() (stdoutStderrResponse, error) { return stream.Recv() }, func(errC chan error) { _, errRecv := io.Copy(inWriter, stdin) stream.CloseSend() errC <- errRecv }, stdout, stderr) } streamio/000077500000000000000000000000001324746544700330245ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/gitlab.com/gitlab-org/gitalystream.go000066400000000000000000000056231324746544700346540ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/gitlab.com/gitlab-org/gitaly/streamio// Package streamio contains wrappers intended for turning gRPC streams // that send/receive messages with a []byte field into io.Writers and // io.Readers. // package streamio import ( "io" "os" "strconv" ) func init() { bufSize64, err := strconv.ParseInt(os.Getenv("GITALY_STREAMIO_WRITE_BUFFER_SIZE"), 10, 32) if err == nil && bufSize64 > 0 { WriteBufferSize = int(bufSize64) } } // NewReader turns receiver into an io.Reader. Errors from the receiver // function are passed on unmodified. This means receiver should emit // io.EOF when done. func NewReader(receiver func() ([]byte, error)) io.Reader { return &receiveReader{receiver: receiver} } type receiveReader struct { receiver func() ([]byte, error) data []byte err error } func (rr *receiveReader) Read(p []byte) (int, error) { if len(rr.data) == 0 { rr.data, rr.err = rr.receiver() } n := copy(p, rr.data) rr.data = rr.data[n:] if len(rr.data) == 0 { return n, rr.err } return n, nil } // WriteTo implements io.WriterTo. func (rr *receiveReader) WriteTo(w io.Writer) (int64, error) { var written int64 // Deal with left-over state in rr.data and rr.err, if any if len(rr.data) > 0 { n, err := w.Write(rr.data) written += int64(n) if err != nil { return written, err } } if rr.err != nil { return written, rr.err } // Consume the response stream var errRead, errWrite error var n int var buf []byte for errWrite == nil && errRead != io.EOF { buf, errRead = rr.receiver() if errRead != nil && errRead != io.EOF { return written, errRead } if len(buf) > 0 { n, errWrite = w.Write(buf) written += int64(n) } } return written, errWrite } // NewWriter turns sender into an io.Writer. The sender callback will // receive []byte arguments of length at most WriteBufferSize. func NewWriter(sender func(p []byte) error) io.Writer { return &sendWriter{sender: sender} } // WriteBufferSize is the largest []byte that Write() will pass to its // underlying send function. This value can be changed at runtime using // the GITALY_STREAMIO_WRITE_BUFFER_SIZE environment variable. var WriteBufferSize = 128 * 1024 type sendWriter struct { sender func([]byte) error } func (sw *sendWriter) Write(p []byte) (int, error) { var sent int for len(p) > 0 { chunkSize := len(p) if chunkSize > WriteBufferSize { chunkSize = WriteBufferSize } if err := sw.sender(p[:chunkSize]); err != nil { return sent, err } sent += chunkSize p = p[chunkSize:] } return sent, nil } // ReadFrom implements io.ReaderFrom. func (sw *sendWriter) ReadFrom(r io.Reader) (int64, error) { var nRead int64 buf := make([]byte, WriteBufferSize) var errRead, errSend error for errSend == nil && errRead != io.EOF { var n int n, errRead = r.Read(buf) nRead += int64(n) if errRead != nil && errRead != io.EOF { return nRead, errRead } if n > 0 { errSend = sw.sender(buf[:n]) } } return nRead, errSend } gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/golang.org/000077500000000000000000000000001324746544700257565ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/golang.org/x/000077500000000000000000000000001324746544700262255ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/golang.org/x/net/000077500000000000000000000000001324746544700270135ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/golang.org/x/net/LICENSE000066400000000000000000000027071324746544700300260ustar00rootroot00000000000000Copyright (c) 2009 The Go Authors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Google Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/golang.org/x/net/PATENTS000066400000000000000000000024271324746544700300610ustar00rootroot00000000000000Additional IP Rights Grant (Patents) "This implementation" means the copyrightable works distributed by Google as part of the Go project. Google hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, transfer and otherwise run, modify and propagate the contents of this implementation of Go, where such license applies only to those patent claims, both currently owned or controlled by Google and acquired in the future, licensable by Google that are necessarily infringed by this implementation of Go. This grant does not include claims that would be infringed only as a consequence of further modification of this implementation. If you or your agent or exclusive licensee institute or order or agree to the institution of patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that this implementation of Go or any code incorporated within this implementation of Go constitutes direct or contributory patent infringement, or inducement of patent infringement, then any patent rights granted to you under this License for this implementation of Go shall terminate as of the date such litigation is filed. gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/golang.org/x/net/context/000077500000000000000000000000001324746544700304775ustar00rootroot00000000000000context.go000066400000000000000000000140571324746544700324420ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/golang.org/x/net/context// Copyright 2014 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package context defines the Context type, which carries deadlines, // cancelation signals, and other request-scoped values across API boundaries // and between processes. // // Incoming requests to a server should create a Context, and outgoing calls to // servers should accept a Context. The chain of function calls between must // propagate the Context, optionally replacing it with a modified copy created // using WithDeadline, WithTimeout, WithCancel, or WithValue. // // Programs that use Contexts should follow these rules to keep interfaces // consistent across packages and enable static analysis tools to check context // propagation: // // Do not store Contexts inside a struct type; instead, pass a Context // explicitly to each function that needs it. The Context should be the first // parameter, typically named ctx: // // func DoSomething(ctx context.Context, arg Arg) error { // // ... use ctx ... // } // // Do not pass a nil Context, even if a function permits it. Pass context.TODO // if you are unsure about which Context to use. // // Use context Values only for request-scoped data that transits processes and // APIs, not for passing optional parameters to functions. // // The same Context may be passed to functions running in different goroutines; // Contexts are safe for simultaneous use by multiple goroutines. // // See http://blog.golang.org/context for example code for a server that uses // Contexts. package context // import "golang.org/x/net/context" import "time" // A Context carries a deadline, a cancelation signal, and other values across // API boundaries. // // Context's methods may be called by multiple goroutines simultaneously. type Context interface { // Deadline returns the time when work done on behalf of this context // should be canceled. Deadline returns ok==false when no deadline is // set. Successive calls to Deadline return the same results. Deadline() (deadline time.Time, ok bool) // Done returns a channel that's closed when work done on behalf of this // context should be canceled. Done may return nil if this context can // never be canceled. Successive calls to Done return the same value. // // WithCancel arranges for Done to be closed when cancel is called; // WithDeadline arranges for Done to be closed when the deadline // expires; WithTimeout arranges for Done to be closed when the timeout // elapses. // // Done is provided for use in select statements: // // // Stream generates values with DoSomething and sends them to out // // until DoSomething returns an error or ctx.Done is closed. // func Stream(ctx context.Context, out chan<- Value) error { // for { // v, err := DoSomething(ctx) // if err != nil { // return err // } // select { // case <-ctx.Done(): // return ctx.Err() // case out <- v: // } // } // } // // See http://blog.golang.org/pipelines for more examples of how to use // a Done channel for cancelation. Done() <-chan struct{} // Err returns a non-nil error value after Done is closed. Err returns // Canceled if the context was canceled or DeadlineExceeded if the // context's deadline passed. No other values for Err are defined. // After Done is closed, successive calls to Err return the same value. Err() error // Value returns the value associated with this context for key, or nil // if no value is associated with key. Successive calls to Value with // the same key returns the same result. // // Use context values only for request-scoped data that transits // processes and API boundaries, not for passing optional parameters to // functions. // // A key identifies a specific value in a Context. Functions that wish // to store values in Context typically allocate a key in a global // variable then use that key as the argument to context.WithValue and // Context.Value. A key can be any type that supports equality; // packages should define keys as an unexported type to avoid // collisions. // // Packages that define a Context key should provide type-safe accessors // for the values stores using that key: // // // Package user defines a User type that's stored in Contexts. // package user // // import "golang.org/x/net/context" // // // User is the type of value stored in the Contexts. // type User struct {...} // // // key is an unexported type for keys defined in this package. // // This prevents collisions with keys defined in other packages. // type key int // // // userKey is the key for user.User values in Contexts. It is // // unexported; clients use user.NewContext and user.FromContext // // instead of using this key directly. // var userKey key = 0 // // // NewContext returns a new Context that carries value u. // func NewContext(ctx context.Context, u *User) context.Context { // return context.WithValue(ctx, userKey, u) // } // // // FromContext returns the User value stored in ctx, if any. // func FromContext(ctx context.Context) (*User, bool) { // u, ok := ctx.Value(userKey).(*User) // return u, ok // } Value(key interface{}) interface{} } // Background returns a non-nil, empty Context. It is never canceled, has no // values, and has no deadline. It is typically used by the main function, // initialization, and tests, and as the top-level Context for incoming // requests. func Background() Context { return background } // TODO returns a non-nil, empty Context. Code should use context.TODO when // it's unclear which Context to use or it is not yet available (because the // surrounding function has not yet been extended to accept a Context // parameter). TODO is recognized by static analysis tools that determine // whether Contexts are propagated correctly in a program. func TODO() Context { return todo } // A CancelFunc tells an operation to abandon its work. // A CancelFunc does not wait for the work to stop. // After the first call, subsequent calls to a CancelFunc do nothing. type CancelFunc func() go17.go000066400000000000000000000054561324746544700315360ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/golang.org/x/net/context// Copyright 2016 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // +build go1.7 package context import ( "context" // standard library's context, as of Go 1.7 "time" ) var ( todo = context.TODO() background = context.Background() ) // Canceled is the error returned by Context.Err when the context is canceled. var Canceled = context.Canceled // DeadlineExceeded is the error returned by Context.Err when the context's // deadline passes. var DeadlineExceeded = context.DeadlineExceeded // WithCancel returns a copy of parent with a new Done channel. The returned // context's Done channel is closed when the returned cancel function is called // or when the parent context's Done channel is closed, whichever happens first. // // Canceling this context releases resources associated with it, so code should // call cancel as soon as the operations running in this Context complete. func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { ctx, f := context.WithCancel(parent) return ctx, CancelFunc(f) } // WithDeadline returns a copy of the parent context with the deadline adjusted // to be no later than d. If the parent's deadline is already earlier than d, // WithDeadline(parent, d) is semantically equivalent to parent. The returned // context's Done channel is closed when the deadline expires, when the returned // cancel function is called, or when the parent context's Done channel is // closed, whichever happens first. // // Canceling this context releases resources associated with it, so code should // call cancel as soon as the operations running in this Context complete. func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { ctx, f := context.WithDeadline(parent, deadline) return ctx, CancelFunc(f) } // WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). // // Canceling this context releases resources associated with it, so code should // call cancel as soon as the operations running in this Context complete: // // func slowOperationWithTimeout(ctx context.Context) (Result, error) { // ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) // defer cancel() // releases resources if slowOperation completes before timeout elapses // return slowOperation(ctx) // } func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { return WithDeadline(parent, time.Now().Add(timeout)) } // WithValue returns a copy of parent in which the value associated with key is // val. // // Use context Values only for request-scoped data that transits processes and // APIs, not for passing optional parameters to functions. func WithValue(parent Context, key interface{}, val interface{}) Context { return context.WithValue(parent, key, val) } pre_go17.go000066400000000000000000000176731324746544700324100ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/golang.org/x/net/context// Copyright 2014 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // +build !go1.7 package context import ( "errors" "fmt" "sync" "time" ) // An emptyCtx is never canceled, has no values, and has no deadline. It is not // struct{}, since vars of this type must have distinct addresses. type emptyCtx int func (*emptyCtx) Deadline() (deadline time.Time, ok bool) { return } func (*emptyCtx) Done() <-chan struct{} { return nil } func (*emptyCtx) Err() error { return nil } func (*emptyCtx) Value(key interface{}) interface{} { return nil } func (e *emptyCtx) String() string { switch e { case background: return "context.Background" case todo: return "context.TODO" } return "unknown empty Context" } var ( background = new(emptyCtx) todo = new(emptyCtx) ) // Canceled is the error returned by Context.Err when the context is canceled. var Canceled = errors.New("context canceled") // DeadlineExceeded is the error returned by Context.Err when the context's // deadline passes. var DeadlineExceeded = errors.New("context deadline exceeded") // WithCancel returns a copy of parent with a new Done channel. The returned // context's Done channel is closed when the returned cancel function is called // or when the parent context's Done channel is closed, whichever happens first. // // Canceling this context releases resources associated with it, so code should // call cancel as soon as the operations running in this Context complete. func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { c := newCancelCtx(parent) propagateCancel(parent, c) return c, func() { c.cancel(true, Canceled) } } // newCancelCtx returns an initialized cancelCtx. func newCancelCtx(parent Context) *cancelCtx { return &cancelCtx{ Context: parent, done: make(chan struct{}), } } // propagateCancel arranges for child to be canceled when parent is. func propagateCancel(parent Context, child canceler) { if parent.Done() == nil { return // parent is never canceled } if p, ok := parentCancelCtx(parent); ok { p.mu.Lock() if p.err != nil { // parent has already been canceled child.cancel(false, p.err) } else { if p.children == nil { p.children = make(map[canceler]bool) } p.children[child] = true } p.mu.Unlock() } else { go func() { select { case <-parent.Done(): child.cancel(false, parent.Err()) case <-child.Done(): } }() } } // parentCancelCtx follows a chain of parent references until it finds a // *cancelCtx. This function understands how each of the concrete types in this // package represents its parent. func parentCancelCtx(parent Context) (*cancelCtx, bool) { for { switch c := parent.(type) { case *cancelCtx: return c, true case *timerCtx: return c.cancelCtx, true case *valueCtx: parent = c.Context default: return nil, false } } } // removeChild removes a context from its parent. func removeChild(parent Context, child canceler) { p, ok := parentCancelCtx(parent) if !ok { return } p.mu.Lock() if p.children != nil { delete(p.children, child) } p.mu.Unlock() } // A canceler is a context type that can be canceled directly. The // implementations are *cancelCtx and *timerCtx. type canceler interface { cancel(removeFromParent bool, err error) Done() <-chan struct{} } // A cancelCtx can be canceled. When canceled, it also cancels any children // that implement canceler. type cancelCtx struct { Context done chan struct{} // closed by the first cancel call. mu sync.Mutex children map[canceler]bool // set to nil by the first cancel call err error // set to non-nil by the first cancel call } func (c *cancelCtx) Done() <-chan struct{} { return c.done } func (c *cancelCtx) Err() error { c.mu.Lock() defer c.mu.Unlock() return c.err } func (c *cancelCtx) String() string { return fmt.Sprintf("%v.WithCancel", c.Context) } // cancel closes c.done, cancels each of c's children, and, if // removeFromParent is true, removes c from its parent's children. func (c *cancelCtx) cancel(removeFromParent bool, err error) { if err == nil { panic("context: internal error: missing cancel error") } c.mu.Lock() if c.err != nil { c.mu.Unlock() return // already canceled } c.err = err close(c.done) for child := range c.children { // NOTE: acquiring the child's lock while holding parent's lock. child.cancel(false, err) } c.children = nil c.mu.Unlock() if removeFromParent { removeChild(c.Context, c) } } // WithDeadline returns a copy of the parent context with the deadline adjusted // to be no later than d. If the parent's deadline is already earlier than d, // WithDeadline(parent, d) is semantically equivalent to parent. The returned // context's Done channel is closed when the deadline expires, when the returned // cancel function is called, or when the parent context's Done channel is // closed, whichever happens first. // // Canceling this context releases resources associated with it, so code should // call cancel as soon as the operations running in this Context complete. func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { if cur, ok := parent.Deadline(); ok && cur.Before(deadline) { // The current deadline is already sooner than the new one. return WithCancel(parent) } c := &timerCtx{ cancelCtx: newCancelCtx(parent), deadline: deadline, } propagateCancel(parent, c) d := deadline.Sub(time.Now()) if d <= 0 { c.cancel(true, DeadlineExceeded) // deadline has already passed return c, func() { c.cancel(true, Canceled) } } c.mu.Lock() defer c.mu.Unlock() if c.err == nil { c.timer = time.AfterFunc(d, func() { c.cancel(true, DeadlineExceeded) }) } return c, func() { c.cancel(true, Canceled) } } // A timerCtx carries a timer and a deadline. It embeds a cancelCtx to // implement Done and Err. It implements cancel by stopping its timer then // delegating to cancelCtx.cancel. type timerCtx struct { *cancelCtx timer *time.Timer // Under cancelCtx.mu. deadline time.Time } func (c *timerCtx) Deadline() (deadline time.Time, ok bool) { return c.deadline, true } func (c *timerCtx) String() string { return fmt.Sprintf("%v.WithDeadline(%s [%s])", c.cancelCtx.Context, c.deadline, c.deadline.Sub(time.Now())) } func (c *timerCtx) cancel(removeFromParent bool, err error) { c.cancelCtx.cancel(false, err) if removeFromParent { // Remove this timerCtx from its parent cancelCtx's children. removeChild(c.cancelCtx.Context, c) } c.mu.Lock() if c.timer != nil { c.timer.Stop() c.timer = nil } c.mu.Unlock() } // WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). // // Canceling this context releases resources associated with it, so code should // call cancel as soon as the operations running in this Context complete: // // func slowOperationWithTimeout(ctx context.Context) (Result, error) { // ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) // defer cancel() // releases resources if slowOperation completes before timeout elapses // return slowOperation(ctx) // } func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { return WithDeadline(parent, time.Now().Add(timeout)) } // WithValue returns a copy of parent in which the value associated with key is // val. // // Use context Values only for request-scoped data that transits processes and // APIs, not for passing optional parameters to functions. func WithValue(parent Context, key interface{}, val interface{}) Context { return &valueCtx{parent, key, val} } // A valueCtx carries a key-value pair. It implements Value for that key and // delegates all other calls to the embedded Context. type valueCtx struct { Context key, val interface{} } func (c *valueCtx) String() string { return fmt.Sprintf("%v.WithValue(%#v, %#v)", c.Context, c.key, c.val) } func (c *valueCtx) Value(key interface{}) interface{} { if c.key == key { return c.val } return c.Context.Value(key) } gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/golang.org/x/net/http2/000077500000000000000000000000001324746544700300545ustar00rootroot00000000000000Dockerfile000066400000000000000000000027221324746544700317720ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/golang.org/x/net/http2# # This Dockerfile builds a recent curl with HTTP/2 client support, using # a recent nghttp2 build. # # See the Makefile for how to tag it. If Docker and that image is found, the # Go tests use this curl binary for integration tests. # FROM ubuntu:trusty RUN apt-get update && \ apt-get upgrade -y && \ apt-get install -y git-core build-essential wget RUN apt-get install -y --no-install-recommends \ autotools-dev libtool pkg-config zlib1g-dev \ libcunit1-dev libssl-dev libxml2-dev libevent-dev \ automake autoconf # The list of packages nghttp2 recommends for h2load: RUN apt-get install -y --no-install-recommends make binutils \ autoconf automake autotools-dev \ libtool pkg-config zlib1g-dev libcunit1-dev libssl-dev libxml2-dev \ libev-dev libevent-dev libjansson-dev libjemalloc-dev \ cython python3.4-dev python-setuptools # Note: setting NGHTTP2_VER before the git clone, so an old git clone isn't cached: ENV NGHTTP2_VER 895da9a RUN cd /root && git clone https://github.com/tatsuhiro-t/nghttp2.git WORKDIR /root/nghttp2 RUN git reset --hard $NGHTTP2_VER RUN autoreconf -i RUN automake RUN autoconf RUN ./configure RUN make RUN make install WORKDIR /root RUN wget http://curl.haxx.se/download/curl-7.45.0.tar.gz RUN tar -zxvf curl-7.45.0.tar.gz WORKDIR /root/curl-7.45.0 RUN ./configure --with-ssl --with-nghttp2=/usr/local RUN make RUN make install RUN ldconfig CMD ["-h"] ENTRYPOINT ["/usr/local/bin/curl"] Makefile000066400000000000000000000000541324746544700314340ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/golang.org/x/net/http2curlimage: docker build -t gohttp2/curl . gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/golang.org/x/net/http2/README000066400000000000000000000011621324746544700307340ustar00rootroot00000000000000This is a work-in-progress HTTP/2 implementation for Go. It will eventually live in the Go standard library and won't require any changes to your code to use. It will just be automatic. Status: * The server support is pretty good. A few things are missing but are being worked on. * The client work has just started but shares a lot of code is coming along much quicker. Docs are at https://godoc.org/golang.org/x/net/http2 Demo test server at https://http2.golang.org/ Help & bug reports welcome! Contributing: https://golang.org/doc/contribute.html Bugs: https://golang.org/issue/new?title=x/net/http2:+ client_conn_pool.go000066400000000000000000000145701324746544700336570ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/golang.org/x/net/http2// Copyright 2015 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Transport code's client connection pooling. package http2 import ( "crypto/tls" "net/http" "sync" ) // ClientConnPool manages a pool of HTTP/2 client connections. type ClientConnPool interface { GetClientConn(req *http.Request, addr string) (*ClientConn, error) MarkDead(*ClientConn) } // clientConnPoolIdleCloser is the interface implemented by ClientConnPool // implementations which can close their idle connections. type clientConnPoolIdleCloser interface { ClientConnPool closeIdleConnections() } var ( _ clientConnPoolIdleCloser = (*clientConnPool)(nil) _ clientConnPoolIdleCloser = noDialClientConnPool{} ) // TODO: use singleflight for dialing and addConnCalls? type clientConnPool struct { t *Transport mu sync.Mutex // TODO: maybe switch to RWMutex // TODO: add support for sharing conns based on cert names // (e.g. share conn for googleapis.com and appspot.com) conns map[string][]*ClientConn // key is host:port dialing map[string]*dialCall // currently in-flight dials keys map[*ClientConn][]string addConnCalls map[string]*addConnCall // in-flight addConnIfNeede calls } func (p *clientConnPool) GetClientConn(req *http.Request, addr string) (*ClientConn, error) { return p.getClientConn(req, addr, dialOnMiss) } const ( dialOnMiss = true noDialOnMiss = false ) func (p *clientConnPool) getClientConn(req *http.Request, addr string, dialOnMiss bool) (*ClientConn, error) { if isConnectionCloseRequest(req) && dialOnMiss { // It gets its own connection. const singleUse = true cc, err := p.t.dialClientConn(addr, singleUse) if err != nil { return nil, err } return cc, nil } p.mu.Lock() for _, cc := range p.conns[addr] { if cc.CanTakeNewRequest() { p.mu.Unlock() return cc, nil } } if !dialOnMiss { p.mu.Unlock() return nil, ErrNoCachedConn } call := p.getStartDialLocked(addr) p.mu.Unlock() <-call.done return call.res, call.err } // dialCall is an in-flight Transport dial call to a host. type dialCall struct { p *clientConnPool done chan struct{} // closed when done res *ClientConn // valid after done is closed err error // valid after done is closed } // requires p.mu is held. func (p *clientConnPool) getStartDialLocked(addr string) *dialCall { if call, ok := p.dialing[addr]; ok { // A dial is already in-flight. Don't start another. return call } call := &dialCall{p: p, done: make(chan struct{})} if p.dialing == nil { p.dialing = make(map[string]*dialCall) } p.dialing[addr] = call go call.dial(addr) return call } // run in its own goroutine. func (c *dialCall) dial(addr string) { const singleUse = false // shared conn c.res, c.err = c.p.t.dialClientConn(addr, singleUse) close(c.done) c.p.mu.Lock() delete(c.p.dialing, addr) if c.err == nil { c.p.addConnLocked(addr, c.res) } c.p.mu.Unlock() } // addConnIfNeeded makes a NewClientConn out of c if a connection for key doesn't // already exist. It coalesces concurrent calls with the same key. // This is used by the http1 Transport code when it creates a new connection. Because // the http1 Transport doesn't de-dup TCP dials to outbound hosts (because it doesn't know // the protocol), it can get into a situation where it has multiple TLS connections. // This code decides which ones live or die. // The return value used is whether c was used. // c is never closed. func (p *clientConnPool) addConnIfNeeded(key string, t *Transport, c *tls.Conn) (used bool, err error) { p.mu.Lock() for _, cc := range p.conns[key] { if cc.CanTakeNewRequest() { p.mu.Unlock() return false, nil } } call, dup := p.addConnCalls[key] if !dup { if p.addConnCalls == nil { p.addConnCalls = make(map[string]*addConnCall) } call = &addConnCall{ p: p, done: make(chan struct{}), } p.addConnCalls[key] = call go call.run(t, key, c) } p.mu.Unlock() <-call.done if call.err != nil { return false, call.err } return !dup, nil } type addConnCall struct { p *clientConnPool done chan struct{} // closed when done err error } func (c *addConnCall) run(t *Transport, key string, tc *tls.Conn) { cc, err := t.NewClientConn(tc) p := c.p p.mu.Lock() if err != nil { c.err = err } else { p.addConnLocked(key, cc) } delete(p.addConnCalls, key) p.mu.Unlock() close(c.done) } func (p *clientConnPool) addConn(key string, cc *ClientConn) { p.mu.Lock() p.addConnLocked(key, cc) p.mu.Unlock() } // p.mu must be held func (p *clientConnPool) addConnLocked(key string, cc *ClientConn) { for _, v := range p.conns[key] { if v == cc { return } } if p.conns == nil { p.conns = make(map[string][]*ClientConn) } if p.keys == nil { p.keys = make(map[*ClientConn][]string) } p.conns[key] = append(p.conns[key], cc) p.keys[cc] = append(p.keys[cc], key) } func (p *clientConnPool) MarkDead(cc *ClientConn) { p.mu.Lock() defer p.mu.Unlock() for _, key := range p.keys[cc] { vv, ok := p.conns[key] if !ok { continue } newList := filterOutClientConn(vv, cc) if len(newList) > 0 { p.conns[key] = newList } else { delete(p.conns, key) } } delete(p.keys, cc) } func (p *clientConnPool) closeIdleConnections() { p.mu.Lock() defer p.mu.Unlock() // TODO: don't close a cc if it was just added to the pool // milliseconds ago and has never been used. There's currently // a small race window with the HTTP/1 Transport's integration // where it can add an idle conn just before using it, and // somebody else can concurrently call CloseIdleConns and // break some caller's RoundTrip. for _, vv := range p.conns { for _, cc := range vv { cc.closeIfIdle() } } } func filterOutClientConn(in []*ClientConn, exclude *ClientConn) []*ClientConn { out := in[:0] for _, v := range in { if v != exclude { out = append(out, v) } } // If we filtered it out, zero out the last item to prevent // the GC from seeing it. if len(in) != len(out) { in[len(in)-1] = nil } return out } // noDialClientConnPool is an implementation of http2.ClientConnPool // which never dials. We let the HTTP/1.1 client dial and use its TLS // connection instead. type noDialClientConnPool struct{ *clientConnPool } func (p noDialClientConnPool) GetClientConn(req *http.Request, addr string) (*ClientConn, error) { return p.getClientConn(req, addr, noDialOnMiss) } configure_transport.go000066400000000000000000000043721324746544700344270ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/golang.org/x/net/http2// Copyright 2015 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // +build go1.6 package http2 import ( "crypto/tls" "fmt" "net/http" ) func configureTransport(t1 *http.Transport) (*Transport, error) { connPool := new(clientConnPool) t2 := &Transport{ ConnPool: noDialClientConnPool{connPool}, t1: t1, } connPool.t = t2 if err := registerHTTPSProtocol(t1, noDialH2RoundTripper{t2}); err != nil { return nil, err } if t1.TLSClientConfig == nil { t1.TLSClientConfig = new(tls.Config) } if !strSliceContains(t1.TLSClientConfig.NextProtos, "h2") { t1.TLSClientConfig.NextProtos = append([]string{"h2"}, t1.TLSClientConfig.NextProtos...) } if !strSliceContains(t1.TLSClientConfig.NextProtos, "http/1.1") { t1.TLSClientConfig.NextProtos = append(t1.TLSClientConfig.NextProtos, "http/1.1") } upgradeFn := func(authority string, c *tls.Conn) http.RoundTripper { addr := authorityAddr("https", authority) if used, err := connPool.addConnIfNeeded(addr, t2, c); err != nil { go c.Close() return erringRoundTripper{err} } else if !used { // Turns out we don't need this c. // For example, two goroutines made requests to the same host // at the same time, both kicking off TCP dials. (since protocol // was unknown) go c.Close() } return t2 } if m := t1.TLSNextProto; len(m) == 0 { t1.TLSNextProto = map[string]func(string, *tls.Conn) http.RoundTripper{ "h2": upgradeFn, } } else { m["h2"] = upgradeFn } return t2, nil } // registerHTTPSProtocol calls Transport.RegisterProtocol but // convering panics into errors. func registerHTTPSProtocol(t *http.Transport, rt http.RoundTripper) (err error) { defer func() { if e := recover(); e != nil { err = fmt.Errorf("%v", e) } }() t.RegisterProtocol("https", rt) return nil } // noDialH2RoundTripper is a RoundTripper which only tries to complete the request // if there's already has a cached connection to the host. type noDialH2RoundTripper struct{ t *Transport } func (rt noDialH2RoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { res, err := rt.t.RoundTrip(req) if err == ErrNoCachedConn { return nil, http.ErrSkipAltProtocol } return res, err } databuffer.go000066400000000000000000000076671324746544700324470ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/golang.org/x/net/http2// Copyright 2014 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package http2 import ( "errors" "fmt" "sync" ) // Buffer chunks are allocated from a pool to reduce pressure on GC. // The maximum wasted space per dataBuffer is 2x the largest size class, // which happens when the dataBuffer has multiple chunks and there is // one unread byte in both the first and last chunks. We use a few size // classes to minimize overheads for servers that typically receive very // small request bodies. // // TODO: Benchmark to determine if the pools are necessary. The GC may have // improved enough that we can instead allocate chunks like this: // make([]byte, max(16<<10, expectedBytesRemaining)) var ( dataChunkSizeClasses = []int{ 1 << 10, 2 << 10, 4 << 10, 8 << 10, 16 << 10, } dataChunkPools = [...]sync.Pool{ {New: func() interface{} { return make([]byte, 1<<10) }}, {New: func() interface{} { return make([]byte, 2<<10) }}, {New: func() interface{} { return make([]byte, 4<<10) }}, {New: func() interface{} { return make([]byte, 8<<10) }}, {New: func() interface{} { return make([]byte, 16<<10) }}, } ) func getDataBufferChunk(size int64) []byte { i := 0 for ; i < len(dataChunkSizeClasses)-1; i++ { if size <= int64(dataChunkSizeClasses[i]) { break } } return dataChunkPools[i].Get().([]byte) } func putDataBufferChunk(p []byte) { for i, n := range dataChunkSizeClasses { if len(p) == n { dataChunkPools[i].Put(p) return } } panic(fmt.Sprintf("unexpected buffer len=%v", len(p))) } // dataBuffer is an io.ReadWriter backed by a list of data chunks. // Each dataBuffer is used to read DATA frames on a single stream. // The buffer is divided into chunks so the server can limit the // total memory used by a single connection without limiting the // request body size on any single stream. type dataBuffer struct { chunks [][]byte r int // next byte to read is chunks[0][r] w int // next byte to write is chunks[len(chunks)-1][w] size int // total buffered bytes expected int64 // we expect at least this many bytes in future Write calls (ignored if <= 0) } var errReadEmpty = errors.New("read from empty dataBuffer") // Read copies bytes from the buffer into p. // It is an error to read when no data is available. func (b *dataBuffer) Read(p []byte) (int, error) { if b.size == 0 { return 0, errReadEmpty } var ntotal int for len(p) > 0 && b.size > 0 { readFrom := b.bytesFromFirstChunk() n := copy(p, readFrom) p = p[n:] ntotal += n b.r += n b.size -= n // If the first chunk has been consumed, advance to the next chunk. if b.r == len(b.chunks[0]) { putDataBufferChunk(b.chunks[0]) end := len(b.chunks) - 1 copy(b.chunks[:end], b.chunks[1:]) b.chunks[end] = nil b.chunks = b.chunks[:end] b.r = 0 } } return ntotal, nil } func (b *dataBuffer) bytesFromFirstChunk() []byte { if len(b.chunks) == 1 { return b.chunks[0][b.r:b.w] } return b.chunks[0][b.r:] } // Len returns the number of bytes of the unread portion of the buffer. func (b *dataBuffer) Len() int { return b.size } // Write appends p to the buffer. func (b *dataBuffer) Write(p []byte) (int, error) { ntotal := len(p) for len(p) > 0 { // If the last chunk is empty, allocate a new chunk. Try to allocate // enough to fully copy p plus any additional bytes we expect to // receive. However, this may allocate less than len(p). want := int64(len(p)) if b.expected > want { want = b.expected } chunk := b.lastChunkOrAlloc(want) n := copy(chunk[b.w:], p) p = p[n:] b.w += n b.size += n b.expected -= int64(n) } return ntotal, nil } func (b *dataBuffer) lastChunkOrAlloc(want int64) []byte { if len(b.chunks) != 0 { last := b.chunks[len(b.chunks)-1] if b.w < len(last) { return last } } chunk := getDataBufferChunk(want) b.chunks = append(b.chunks, chunk) b.w = 0 return chunk } errors.go000066400000000000000000000077201324746544700316460ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/golang.org/x/net/http2// Copyright 2014 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package http2 import ( "errors" "fmt" ) // An ErrCode is an unsigned 32-bit error code as defined in the HTTP/2 spec. type ErrCode uint32 const ( ErrCodeNo ErrCode = 0x0 ErrCodeProtocol ErrCode = 0x1 ErrCodeInternal ErrCode = 0x2 ErrCodeFlowControl ErrCode = 0x3 ErrCodeSettingsTimeout ErrCode = 0x4 ErrCodeStreamClosed ErrCode = 0x5 ErrCodeFrameSize ErrCode = 0x6 ErrCodeRefusedStream ErrCode = 0x7 ErrCodeCancel ErrCode = 0x8 ErrCodeCompression ErrCode = 0x9 ErrCodeConnect ErrCode = 0xa ErrCodeEnhanceYourCalm ErrCode = 0xb ErrCodeInadequateSecurity ErrCode = 0xc ErrCodeHTTP11Required ErrCode = 0xd ) var errCodeName = map[ErrCode]string{ ErrCodeNo: "NO_ERROR", ErrCodeProtocol: "PROTOCOL_ERROR", ErrCodeInternal: "INTERNAL_ERROR", ErrCodeFlowControl: "FLOW_CONTROL_ERROR", ErrCodeSettingsTimeout: "SETTINGS_TIMEOUT", ErrCodeStreamClosed: "STREAM_CLOSED", ErrCodeFrameSize: "FRAME_SIZE_ERROR", ErrCodeRefusedStream: "REFUSED_STREAM", ErrCodeCancel: "CANCEL", ErrCodeCompression: "COMPRESSION_ERROR", ErrCodeConnect: "CONNECT_ERROR", ErrCodeEnhanceYourCalm: "ENHANCE_YOUR_CALM", ErrCodeInadequateSecurity: "INADEQUATE_SECURITY", ErrCodeHTTP11Required: "HTTP_1_1_REQUIRED", } func (e ErrCode) String() string { if s, ok := errCodeName[e]; ok { return s } return fmt.Sprintf("unknown error code 0x%x", uint32(e)) } // ConnectionError is an error that results in the termination of the // entire connection. type ConnectionError ErrCode func (e ConnectionError) Error() string { return fmt.Sprintf("connection error: %s", ErrCode(e)) } // StreamError is an error that only affects one stream within an // HTTP/2 connection. type StreamError struct { StreamID uint32 Code ErrCode Cause error // optional additional detail } func streamError(id uint32, code ErrCode) StreamError { return StreamError{StreamID: id, Code: code} } func (e StreamError) Error() string { if e.Cause != nil { return fmt.Sprintf("stream error: stream ID %d; %v; %v", e.StreamID, e.Code, e.Cause) } return fmt.Sprintf("stream error: stream ID %d; %v", e.StreamID, e.Code) } // 6.9.1 The Flow Control Window // "If a sender receives a WINDOW_UPDATE that causes a flow control // window to exceed this maximum it MUST terminate either the stream // or the connection, as appropriate. For streams, [...]; for the // connection, a GOAWAY frame with a FLOW_CONTROL_ERROR code." type goAwayFlowError struct{} func (goAwayFlowError) Error() string { return "connection exceeded flow control window size" } // connErrorReason wraps a ConnectionError with an informative error about why it occurs. // Errors of this type are only returned by the frame parser functions // and converted into ConnectionError(ErrCodeProtocol). type connError struct { Code ErrCode Reason string } func (e connError) Error() string { return fmt.Sprintf("http2: connection error: %v: %v", e.Code, e.Reason) } type pseudoHeaderError string func (e pseudoHeaderError) Error() string { return fmt.Sprintf("invalid pseudo-header %q", string(e)) } type duplicatePseudoHeaderError string func (e duplicatePseudoHeaderError) Error() string { return fmt.Sprintf("duplicate pseudo-header %q", string(e)) } type headerFieldNameError string func (e headerFieldNameError) Error() string { return fmt.Sprintf("invalid header field name %q", string(e)) } type headerFieldValueError string func (e headerFieldValueError) Error() string { return fmt.Sprintf("invalid header field value %q", string(e)) } var ( errMixPseudoHeaderTypes = errors.New("mix of request and response pseudo headers") errPseudoAfterRegular = errors.New("pseudo header field after regular") ) flow.go000066400000000000000000000021241324746544700312720ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/golang.org/x/net/http2// Copyright 2014 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Flow control package http2 // flow is the flow control window's size. type flow struct { // n is the number of DATA bytes we're allowed to send. // A flow is kept both on a conn and a per-stream. n int32 // conn points to the shared connection-level flow that is // shared by all streams on that conn. It is nil for the flow // that's on the conn directly. conn *flow } func (f *flow) setConnFlow(cf *flow) { f.conn = cf } func (f *flow) available() int32 { n := f.n if f.conn != nil && f.conn.n < n { n = f.conn.n } return n } func (f *flow) take(n int32) { if n > f.available() { panic("internal error: took too much") } f.n -= n if f.conn != nil { f.conn.n -= n } } // add adds n bytes (positive or negative) to the flow control window. // It returns false if the sum would exceed 2^31-1. func (f *flow) add(n int32) bool { remain := (1<<31 - 1) - f.n if n > remain { return false } f.n += n return true } frame.go000066400000000000000000001276351324746544700314340ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/golang.org/x/net/http2// Copyright 2014 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package http2 import ( "bytes" "encoding/binary" "errors" "fmt" "io" "log" "strings" "sync" "golang.org/x/net/http2/hpack" "golang.org/x/net/lex/httplex" ) const frameHeaderLen = 9 var padZeros = make([]byte, 255) // zeros for padding // A FrameType is a registered frame type as defined in // http://http2.github.io/http2-spec/#rfc.section.11.2 type FrameType uint8 const ( FrameData FrameType = 0x0 FrameHeaders FrameType = 0x1 FramePriority FrameType = 0x2 FrameRSTStream FrameType = 0x3 FrameSettings FrameType = 0x4 FramePushPromise FrameType = 0x5 FramePing FrameType = 0x6 FrameGoAway FrameType = 0x7 FrameWindowUpdate FrameType = 0x8 FrameContinuation FrameType = 0x9 ) var frameName = map[FrameType]string{ FrameData: "DATA", FrameHeaders: "HEADERS", FramePriority: "PRIORITY", FrameRSTStream: "RST_STREAM", FrameSettings: "SETTINGS", FramePushPromise: "PUSH_PROMISE", FramePing: "PING", FrameGoAway: "GOAWAY", FrameWindowUpdate: "WINDOW_UPDATE", FrameContinuation: "CONTINUATION", } func (t FrameType) String() string { if s, ok := frameName[t]; ok { return s } return fmt.Sprintf("UNKNOWN_FRAME_TYPE_%d", uint8(t)) } // Flags is a bitmask of HTTP/2 flags. // The meaning of flags varies depending on the frame type. type Flags uint8 // Has reports whether f contains all (0 or more) flags in v. func (f Flags) Has(v Flags) bool { return (f & v) == v } // Frame-specific FrameHeader flag bits. const ( // Data Frame FlagDataEndStream Flags = 0x1 FlagDataPadded Flags = 0x8 // Headers Frame FlagHeadersEndStream Flags = 0x1 FlagHeadersEndHeaders Flags = 0x4 FlagHeadersPadded Flags = 0x8 FlagHeadersPriority Flags = 0x20 // Settings Frame FlagSettingsAck Flags = 0x1 // Ping Frame FlagPingAck Flags = 0x1 // Continuation Frame FlagContinuationEndHeaders Flags = 0x4 FlagPushPromiseEndHeaders Flags = 0x4 FlagPushPromisePadded Flags = 0x8 ) var flagName = map[FrameType]map[Flags]string{ FrameData: { FlagDataEndStream: "END_STREAM", FlagDataPadded: "PADDED", }, FrameHeaders: { FlagHeadersEndStream: "END_STREAM", FlagHeadersEndHeaders: "END_HEADERS", FlagHeadersPadded: "PADDED", FlagHeadersPriority: "PRIORITY", }, FrameSettings: { FlagSettingsAck: "ACK", }, FramePing: { FlagPingAck: "ACK", }, FrameContinuation: { FlagContinuationEndHeaders: "END_HEADERS", }, FramePushPromise: { FlagPushPromiseEndHeaders: "END_HEADERS", FlagPushPromisePadded: "PADDED", }, } // a frameParser parses a frame given its FrameHeader and payload // bytes. The length of payload will always equal fh.Length (which // might be 0). type frameParser func(fc *frameCache, fh FrameHeader, payload []byte) (Frame, error) var frameParsers = map[FrameType]frameParser{ FrameData: parseDataFrame, FrameHeaders: parseHeadersFrame, FramePriority: parsePriorityFrame, FrameRSTStream: parseRSTStreamFrame, FrameSettings: parseSettingsFrame, FramePushPromise: parsePushPromise, FramePing: parsePingFrame, FrameGoAway: parseGoAwayFrame, FrameWindowUpdate: parseWindowUpdateFrame, FrameContinuation: parseContinuationFrame, } func typeFrameParser(t FrameType) frameParser { if f := frameParsers[t]; f != nil { return f } return parseUnknownFrame } // A FrameHeader is the 9 byte header of all HTTP/2 frames. // // See http://http2.github.io/http2-spec/#FrameHeader type FrameHeader struct { valid bool // caller can access []byte fields in the Frame // Type is the 1 byte frame type. There are ten standard frame // types, but extension frame types may be written by WriteRawFrame // and will be returned by ReadFrame (as UnknownFrame). Type FrameType // Flags are the 1 byte of 8 potential bit flags per frame. // They are specific to the frame type. Flags Flags // Length is the length of the frame, not including the 9 byte header. // The maximum size is one byte less than 16MB (uint24), but only // frames up to 16KB are allowed without peer agreement. Length uint32 // StreamID is which stream this frame is for. Certain frames // are not stream-specific, in which case this field is 0. StreamID uint32 } // Header returns h. It exists so FrameHeaders can be embedded in other // specific frame types and implement the Frame interface. func (h FrameHeader) Header() FrameHeader { return h } func (h FrameHeader) String() string { var buf bytes.Buffer buf.WriteString("[FrameHeader ") h.writeDebug(&buf) buf.WriteByte(']') return buf.String() } func (h FrameHeader) writeDebug(buf *bytes.Buffer) { buf.WriteString(h.Type.String()) if h.Flags != 0 { buf.WriteString(" flags=") set := 0 for i := uint8(0); i < 8; i++ { if h.Flags&(1< 1 { buf.WriteByte('|') } name := flagName[h.Type][Flags(1<>24), byte(streamID>>16), byte(streamID>>8), byte(streamID)) } func (f *Framer) endWrite() error { // Now that we know the final size, fill in the FrameHeader in // the space previously reserved for it. Abuse append. length := len(f.wbuf) - frameHeaderLen if length >= (1 << 24) { return ErrFrameTooLarge } _ = append(f.wbuf[:0], byte(length>>16), byte(length>>8), byte(length)) if f.logWrites { f.logWrite() } n, err := f.w.Write(f.wbuf) if err == nil && n != len(f.wbuf) { err = io.ErrShortWrite } return err } func (f *Framer) logWrite() { if f.debugFramer == nil { f.debugFramerBuf = new(bytes.Buffer) f.debugFramer = NewFramer(nil, f.debugFramerBuf) f.debugFramer.logReads = false // we log it ourselves, saying "wrote" below // Let us read anything, even if we accidentally wrote it // in the wrong order: f.debugFramer.AllowIllegalReads = true } f.debugFramerBuf.Write(f.wbuf) fr, err := f.debugFramer.ReadFrame() if err != nil { f.debugWriteLoggerf("http2: Framer %p: failed to decode just-written frame", f) return } f.debugWriteLoggerf("http2: Framer %p: wrote %v", f, summarizeFrame(fr)) } func (f *Framer) writeByte(v byte) { f.wbuf = append(f.wbuf, v) } func (f *Framer) writeBytes(v []byte) { f.wbuf = append(f.wbuf, v...) } func (f *Framer) writeUint16(v uint16) { f.wbuf = append(f.wbuf, byte(v>>8), byte(v)) } func (f *Framer) writeUint32(v uint32) { f.wbuf = append(f.wbuf, byte(v>>24), byte(v>>16), byte(v>>8), byte(v)) } const ( minMaxFrameSize = 1 << 14 maxFrameSize = 1<<24 - 1 ) // SetReuseFrames allows the Framer to reuse Frames. // If called on a Framer, Frames returned by calls to ReadFrame are only // valid until the next call to ReadFrame. func (fr *Framer) SetReuseFrames() { if fr.frameCache != nil { return } fr.frameCache = &frameCache{} } type frameCache struct { dataFrame DataFrame } func (fc *frameCache) getDataFrame() *DataFrame { if fc == nil { return &DataFrame{} } return &fc.dataFrame } // NewFramer returns a Framer that writes frames to w and reads them from r. func NewFramer(w io.Writer, r io.Reader) *Framer { fr := &Framer{ w: w, r: r, logReads: logFrameReads, logWrites: logFrameWrites, debugReadLoggerf: log.Printf, debugWriteLoggerf: log.Printf, } fr.getReadBuf = func(size uint32) []byte { if cap(fr.readBuf) >= int(size) { return fr.readBuf[:size] } fr.readBuf = make([]byte, size) return fr.readBuf } fr.SetMaxReadFrameSize(maxFrameSize) return fr } // SetMaxReadFrameSize sets the maximum size of a frame // that will be read by a subsequent call to ReadFrame. // It is the caller's responsibility to advertise this // limit with a SETTINGS frame. func (fr *Framer) SetMaxReadFrameSize(v uint32) { if v > maxFrameSize { v = maxFrameSize } fr.maxReadSize = v } // ErrorDetail returns a more detailed error of the last error // returned by Framer.ReadFrame. For instance, if ReadFrame // returns a StreamError with code PROTOCOL_ERROR, ErrorDetail // will say exactly what was invalid. ErrorDetail is not guaranteed // to return a non-nil value and like the rest of the http2 package, // its return value is not protected by an API compatibility promise. // ErrorDetail is reset after the next call to ReadFrame. func (fr *Framer) ErrorDetail() error { return fr.errDetail } // ErrFrameTooLarge is returned from Framer.ReadFrame when the peer // sends a frame that is larger than declared with SetMaxReadFrameSize. var ErrFrameTooLarge = errors.New("http2: frame too large") // terminalReadFrameError reports whether err is an unrecoverable // error from ReadFrame and no other frames should be read. func terminalReadFrameError(err error) bool { if _, ok := err.(StreamError); ok { return false } return err != nil } // ReadFrame reads a single frame. The returned Frame is only valid // until the next call to ReadFrame. // // If the frame is larger than previously set with SetMaxReadFrameSize, the // returned error is ErrFrameTooLarge. Other errors may be of type // ConnectionError, StreamError, or anything else from the underlying // reader. func (fr *Framer) ReadFrame() (Frame, error) { fr.errDetail = nil if fr.lastFrame != nil { fr.lastFrame.invalidate() } fh, err := readFrameHeader(fr.headerBuf[:], fr.r) if err != nil { return nil, err } if fh.Length > fr.maxReadSize { return nil, ErrFrameTooLarge } payload := fr.getReadBuf(fh.Length) if _, err := io.ReadFull(fr.r, payload); err != nil { return nil, err } f, err := typeFrameParser(fh.Type)(fr.frameCache, fh, payload) if err != nil { if ce, ok := err.(connError); ok { return nil, fr.connError(ce.Code, ce.Reason) } return nil, err } if err := fr.checkFrameOrder(f); err != nil { return nil, err } if fr.logReads { fr.debugReadLoggerf("http2: Framer %p: read %v", fr, summarizeFrame(f)) } if fh.Type == FrameHeaders && fr.ReadMetaHeaders != nil { return fr.readMetaFrame(f.(*HeadersFrame)) } return f, nil } // connError returns ConnectionError(code) but first // stashes away a public reason to the caller can optionally relay it // to the peer before hanging up on them. This might help others debug // their implementations. func (fr *Framer) connError(code ErrCode, reason string) error { fr.errDetail = errors.New(reason) return ConnectionError(code) } // checkFrameOrder reports an error if f is an invalid frame to return // next from ReadFrame. Mostly it checks whether HEADERS and // CONTINUATION frames are contiguous. func (fr *Framer) checkFrameOrder(f Frame) error { last := fr.lastFrame fr.lastFrame = f if fr.AllowIllegalReads { return nil } fh := f.Header() if fr.lastHeaderStream != 0 { if fh.Type != FrameContinuation { return fr.connError(ErrCodeProtocol, fmt.Sprintf("got %s for stream %d; expected CONTINUATION following %s for stream %d", fh.Type, fh.StreamID, last.Header().Type, fr.lastHeaderStream)) } if fh.StreamID != fr.lastHeaderStream { return fr.connError(ErrCodeProtocol, fmt.Sprintf("got CONTINUATION for stream %d; expected stream %d", fh.StreamID, fr.lastHeaderStream)) } } else if fh.Type == FrameContinuation { return fr.connError(ErrCodeProtocol, fmt.Sprintf("unexpected CONTINUATION for stream %d", fh.StreamID)) } switch fh.Type { case FrameHeaders, FrameContinuation: if fh.Flags.Has(FlagHeadersEndHeaders) { fr.lastHeaderStream = 0 } else { fr.lastHeaderStream = fh.StreamID } } return nil } // A DataFrame conveys arbitrary, variable-length sequences of octets // associated with a stream. // See http://http2.github.io/http2-spec/#rfc.section.6.1 type DataFrame struct { FrameHeader data []byte } func (f *DataFrame) StreamEnded() bool { return f.FrameHeader.Flags.Has(FlagDataEndStream) } // Data returns the frame's data octets, not including any padding // size byte or padding suffix bytes. // The caller must not retain the returned memory past the next // call to ReadFrame. func (f *DataFrame) Data() []byte { f.checkValid() return f.data } func parseDataFrame(fc *frameCache, fh FrameHeader, payload []byte) (Frame, error) { if fh.StreamID == 0 { // DATA frames MUST be associated with a stream. If a // DATA frame is received whose stream identifier // field is 0x0, the recipient MUST respond with a // connection error (Section 5.4.1) of type // PROTOCOL_ERROR. return nil, connError{ErrCodeProtocol, "DATA frame with stream ID 0"} } f := fc.getDataFrame() f.FrameHeader = fh var padSize byte if fh.Flags.Has(FlagDataPadded) { var err error payload, padSize, err = readByte(payload) if err != nil { return nil, err } } if int(padSize) > len(payload) { // If the length of the padding is greater than the // length of the frame payload, the recipient MUST // treat this as a connection error. // Filed: https://github.com/http2/http2-spec/issues/610 return nil, connError{ErrCodeProtocol, "pad size larger than data payload"} } f.data = payload[:len(payload)-int(padSize)] return f, nil } var ( errStreamID = errors.New("invalid stream ID") errDepStreamID = errors.New("invalid dependent stream ID") errPadLength = errors.New("pad length too large") errPadBytes = errors.New("padding bytes must all be zeros unless AllowIllegalWrites is enabled") ) func validStreamIDOrZero(streamID uint32) bool { return streamID&(1<<31) == 0 } func validStreamID(streamID uint32) bool { return streamID != 0 && streamID&(1<<31) == 0 } // WriteData writes a DATA frame. // // It will perform exactly one Write to the underlying Writer. // It is the caller's responsibility not to violate the maximum frame size // and to not call other Write methods concurrently. func (f *Framer) WriteData(streamID uint32, endStream bool, data []byte) error { return f.WriteDataPadded(streamID, endStream, data, nil) } // WriteData writes a DATA frame with optional padding. // // If pad is nil, the padding bit is not sent. // The length of pad must not exceed 255 bytes. // The bytes of pad must all be zero, unless f.AllowIllegalWrites is set. // // It will perform exactly one Write to the underlying Writer. // It is the caller's responsibility not to violate the maximum frame size // and to not call other Write methods concurrently. func (f *Framer) WriteDataPadded(streamID uint32, endStream bool, data, pad []byte) error { if !validStreamID(streamID) && !f.AllowIllegalWrites { return errStreamID } if len(pad) > 0 { if len(pad) > 255 { return errPadLength } if !f.AllowIllegalWrites { for _, b := range pad { if b != 0 { // "Padding octets MUST be set to zero when sending." return errPadBytes } } } } var flags Flags if endStream { flags |= FlagDataEndStream } if pad != nil { flags |= FlagDataPadded } f.startWrite(FrameData, flags, streamID) if pad != nil { f.wbuf = append(f.wbuf, byte(len(pad))) } f.wbuf = append(f.wbuf, data...) f.wbuf = append(f.wbuf, pad...) return f.endWrite() } // A SettingsFrame conveys configuration parameters that affect how // endpoints communicate, such as preferences and constraints on peer // behavior. // // See http://http2.github.io/http2-spec/#SETTINGS type SettingsFrame struct { FrameHeader p []byte } func parseSettingsFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) { if fh.Flags.Has(FlagSettingsAck) && fh.Length > 0 { // When this (ACK 0x1) bit is set, the payload of the // SETTINGS frame MUST be empty. Receipt of a // SETTINGS frame with the ACK flag set and a length // field value other than 0 MUST be treated as a // connection error (Section 5.4.1) of type // FRAME_SIZE_ERROR. return nil, ConnectionError(ErrCodeFrameSize) } if fh.StreamID != 0 { // SETTINGS frames always apply to a connection, // never a single stream. The stream identifier for a // SETTINGS frame MUST be zero (0x0). If an endpoint // receives a SETTINGS frame whose stream identifier // field is anything other than 0x0, the endpoint MUST // respond with a connection error (Section 5.4.1) of // type PROTOCOL_ERROR. return nil, ConnectionError(ErrCodeProtocol) } if len(p)%6 != 0 { // Expecting even number of 6 byte settings. return nil, ConnectionError(ErrCodeFrameSize) } f := &SettingsFrame{FrameHeader: fh, p: p} if v, ok := f.Value(SettingInitialWindowSize); ok && v > (1<<31)-1 { // Values above the maximum flow control window size of 2^31 - 1 MUST // be treated as a connection error (Section 5.4.1) of type // FLOW_CONTROL_ERROR. return nil, ConnectionError(ErrCodeFlowControl) } return f, nil } func (f *SettingsFrame) IsAck() bool { return f.FrameHeader.Flags.Has(FlagSettingsAck) } func (f *SettingsFrame) Value(s SettingID) (v uint32, ok bool) { f.checkValid() buf := f.p for len(buf) > 0 { settingID := SettingID(binary.BigEndian.Uint16(buf[:2])) if settingID == s { return binary.BigEndian.Uint32(buf[2:6]), true } buf = buf[6:] } return 0, false } // ForeachSetting runs fn for each setting. // It stops and returns the first error. func (f *SettingsFrame) ForeachSetting(fn func(Setting) error) error { f.checkValid() buf := f.p for len(buf) > 0 { if err := fn(Setting{ SettingID(binary.BigEndian.Uint16(buf[:2])), binary.BigEndian.Uint32(buf[2:6]), }); err != nil { return err } buf = buf[6:] } return nil } // WriteSettings writes a SETTINGS frame with zero or more settings // specified and the ACK bit not set. // // It will perform exactly one Write to the underlying Writer. // It is the caller's responsibility to not call other Write methods concurrently. func (f *Framer) WriteSettings(settings ...Setting) error { f.startWrite(FrameSettings, 0, 0) for _, s := range settings { f.writeUint16(uint16(s.ID)) f.writeUint32(s.Val) } return f.endWrite() } // WriteSettingsAck writes an empty SETTINGS frame with the ACK bit set. // // It will perform exactly one Write to the underlying Writer. // It is the caller's responsibility to not call other Write methods concurrently. func (f *Framer) WriteSettingsAck() error { f.startWrite(FrameSettings, FlagSettingsAck, 0) return f.endWrite() } // A PingFrame is a mechanism for measuring a minimal round trip time // from the sender, as well as determining whether an idle connection // is still functional. // See http://http2.github.io/http2-spec/#rfc.section.6.7 type PingFrame struct { FrameHeader Data [8]byte } func (f *PingFrame) IsAck() bool { return f.Flags.Has(FlagPingAck) } func parsePingFrame(_ *frameCache, fh FrameHeader, payload []byte) (Frame, error) { if len(payload) != 8 { return nil, ConnectionError(ErrCodeFrameSize) } if fh.StreamID != 0 { return nil, ConnectionError(ErrCodeProtocol) } f := &PingFrame{FrameHeader: fh} copy(f.Data[:], payload) return f, nil } func (f *Framer) WritePing(ack bool, data [8]byte) error { var flags Flags if ack { flags = FlagPingAck } f.startWrite(FramePing, flags, 0) f.writeBytes(data[:]) return f.endWrite() } // A GoAwayFrame informs the remote peer to stop creating streams on this connection. // See http://http2.github.io/http2-spec/#rfc.section.6.8 type GoAwayFrame struct { FrameHeader LastStreamID uint32 ErrCode ErrCode debugData []byte } // DebugData returns any debug data in the GOAWAY frame. Its contents // are not defined. // The caller must not retain the returned memory past the next // call to ReadFrame. func (f *GoAwayFrame) DebugData() []byte { f.checkValid() return f.debugData } func parseGoAwayFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) { if fh.StreamID != 0 { return nil, ConnectionError(ErrCodeProtocol) } if len(p) < 8 { return nil, ConnectionError(ErrCodeFrameSize) } return &GoAwayFrame{ FrameHeader: fh, LastStreamID: binary.BigEndian.Uint32(p[:4]) & (1<<31 - 1), ErrCode: ErrCode(binary.BigEndian.Uint32(p[4:8])), debugData: p[8:], }, nil } func (f *Framer) WriteGoAway(maxStreamID uint32, code ErrCode, debugData []byte) error { f.startWrite(FrameGoAway, 0, 0) f.writeUint32(maxStreamID & (1<<31 - 1)) f.writeUint32(uint32(code)) f.writeBytes(debugData) return f.endWrite() } // An UnknownFrame is the frame type returned when the frame type is unknown // or no specific frame type parser exists. type UnknownFrame struct { FrameHeader p []byte } // Payload returns the frame's payload (after the header). It is not // valid to call this method after a subsequent call to // Framer.ReadFrame, nor is it valid to retain the returned slice. // The memory is owned by the Framer and is invalidated when the next // frame is read. func (f *UnknownFrame) Payload() []byte { f.checkValid() return f.p } func parseUnknownFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) { return &UnknownFrame{fh, p}, nil } // A WindowUpdateFrame is used to implement flow control. // See http://http2.github.io/http2-spec/#rfc.section.6.9 type WindowUpdateFrame struct { FrameHeader Increment uint32 // never read with high bit set } func parseWindowUpdateFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) { if len(p) != 4 { return nil, ConnectionError(ErrCodeFrameSize) } inc := binary.BigEndian.Uint32(p[:4]) & 0x7fffffff // mask off high reserved bit if inc == 0 { // A receiver MUST treat the receipt of a // WINDOW_UPDATE frame with an flow control window // increment of 0 as a stream error (Section 5.4.2) of // type PROTOCOL_ERROR; errors on the connection flow // control window MUST be treated as a connection // error (Section 5.4.1). if fh.StreamID == 0 { return nil, ConnectionError(ErrCodeProtocol) } return nil, streamError(fh.StreamID, ErrCodeProtocol) } return &WindowUpdateFrame{ FrameHeader: fh, Increment: inc, }, nil } // WriteWindowUpdate writes a WINDOW_UPDATE frame. // The increment value must be between 1 and 2,147,483,647, inclusive. // If the Stream ID is zero, the window update applies to the // connection as a whole. func (f *Framer) WriteWindowUpdate(streamID, incr uint32) error { // "The legal range for the increment to the flow control window is 1 to 2^31-1 (2,147,483,647) octets." if (incr < 1 || incr > 2147483647) && !f.AllowIllegalWrites { return errors.New("illegal window increment value") } f.startWrite(FrameWindowUpdate, 0, streamID) f.writeUint32(incr) return f.endWrite() } // A HeadersFrame is used to open a stream and additionally carries a // header block fragment. type HeadersFrame struct { FrameHeader // Priority is set if FlagHeadersPriority is set in the FrameHeader. Priority PriorityParam headerFragBuf []byte // not owned } func (f *HeadersFrame) HeaderBlockFragment() []byte { f.checkValid() return f.headerFragBuf } func (f *HeadersFrame) HeadersEnded() bool { return f.FrameHeader.Flags.Has(FlagHeadersEndHeaders) } func (f *HeadersFrame) StreamEnded() bool { return f.FrameHeader.Flags.Has(FlagHeadersEndStream) } func (f *HeadersFrame) HasPriority() bool { return f.FrameHeader.Flags.Has(FlagHeadersPriority) } func parseHeadersFrame(_ *frameCache, fh FrameHeader, p []byte) (_ Frame, err error) { hf := &HeadersFrame{ FrameHeader: fh, } if fh.StreamID == 0 { // HEADERS frames MUST be associated with a stream. If a HEADERS frame // is received whose stream identifier field is 0x0, the recipient MUST // respond with a connection error (Section 5.4.1) of type // PROTOCOL_ERROR. return nil, connError{ErrCodeProtocol, "HEADERS frame with stream ID 0"} } var padLength uint8 if fh.Flags.Has(FlagHeadersPadded) { if p, padLength, err = readByte(p); err != nil { return } } if fh.Flags.Has(FlagHeadersPriority) { var v uint32 p, v, err = readUint32(p) if err != nil { return nil, err } hf.Priority.StreamDep = v & 0x7fffffff hf.Priority.Exclusive = (v != hf.Priority.StreamDep) // high bit was set p, hf.Priority.Weight, err = readByte(p) if err != nil { return nil, err } } if len(p)-int(padLength) <= 0 { return nil, streamError(fh.StreamID, ErrCodeProtocol) } hf.headerFragBuf = p[:len(p)-int(padLength)] return hf, nil } // HeadersFrameParam are the parameters for writing a HEADERS frame. type HeadersFrameParam struct { // StreamID is the required Stream ID to initiate. StreamID uint32 // BlockFragment is part (or all) of a Header Block. BlockFragment []byte // EndStream indicates that the header block is the last that // the endpoint will send for the identified stream. Setting // this flag causes the stream to enter one of "half closed" // states. EndStream bool // EndHeaders indicates that this frame contains an entire // header block and is not followed by any // CONTINUATION frames. EndHeaders bool // PadLength is the optional number of bytes of zeros to add // to this frame. PadLength uint8 // Priority, if non-zero, includes stream priority information // in the HEADER frame. Priority PriorityParam } // WriteHeaders writes a single HEADERS frame. // // This is a low-level header writing method. Encoding headers and // splitting them into any necessary CONTINUATION frames is handled // elsewhere. // // It will perform exactly one Write to the underlying Writer. // It is the caller's responsibility to not call other Write methods concurrently. func (f *Framer) WriteHeaders(p HeadersFrameParam) error { if !validStreamID(p.StreamID) && !f.AllowIllegalWrites { return errStreamID } var flags Flags if p.PadLength != 0 { flags |= FlagHeadersPadded } if p.EndStream { flags |= FlagHeadersEndStream } if p.EndHeaders { flags |= FlagHeadersEndHeaders } if !p.Priority.IsZero() { flags |= FlagHeadersPriority } f.startWrite(FrameHeaders, flags, p.StreamID) if p.PadLength != 0 { f.writeByte(p.PadLength) } if !p.Priority.IsZero() { v := p.Priority.StreamDep if !validStreamIDOrZero(v) && !f.AllowIllegalWrites { return errDepStreamID } if p.Priority.Exclusive { v |= 1 << 31 } f.writeUint32(v) f.writeByte(p.Priority.Weight) } f.wbuf = append(f.wbuf, p.BlockFragment...) f.wbuf = append(f.wbuf, padZeros[:p.PadLength]...) return f.endWrite() } // A PriorityFrame specifies the sender-advised priority of a stream. // See http://http2.github.io/http2-spec/#rfc.section.6.3 type PriorityFrame struct { FrameHeader PriorityParam } // PriorityParam are the stream prioritzation parameters. type PriorityParam struct { // StreamDep is a 31-bit stream identifier for the // stream that this stream depends on. Zero means no // dependency. StreamDep uint32 // Exclusive is whether the dependency is exclusive. Exclusive bool // Weight is the stream's zero-indexed weight. It should be // set together with StreamDep, or neither should be set. Per // the spec, "Add one to the value to obtain a weight between // 1 and 256." Weight uint8 } func (p PriorityParam) IsZero() bool { return p == PriorityParam{} } func parsePriorityFrame(_ *frameCache, fh FrameHeader, payload []byte) (Frame, error) { if fh.StreamID == 0 { return nil, connError{ErrCodeProtocol, "PRIORITY frame with stream ID 0"} } if len(payload) != 5 { return nil, connError{ErrCodeFrameSize, fmt.Sprintf("PRIORITY frame payload size was %d; want 5", len(payload))} } v := binary.BigEndian.Uint32(payload[:4]) streamID := v & 0x7fffffff // mask off high bit return &PriorityFrame{ FrameHeader: fh, PriorityParam: PriorityParam{ Weight: payload[4], StreamDep: streamID, Exclusive: streamID != v, // was high bit set? }, }, nil } // WritePriority writes a PRIORITY frame. // // It will perform exactly one Write to the underlying Writer. // It is the caller's responsibility to not call other Write methods concurrently. func (f *Framer) WritePriority(streamID uint32, p PriorityParam) error { if !validStreamID(streamID) && !f.AllowIllegalWrites { return errStreamID } if !validStreamIDOrZero(p.StreamDep) { return errDepStreamID } f.startWrite(FramePriority, 0, streamID) v := p.StreamDep if p.Exclusive { v |= 1 << 31 } f.writeUint32(v) f.writeByte(p.Weight) return f.endWrite() } // A RSTStreamFrame allows for abnormal termination of a stream. // See http://http2.github.io/http2-spec/#rfc.section.6.4 type RSTStreamFrame struct { FrameHeader ErrCode ErrCode } func parseRSTStreamFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) { if len(p) != 4 { return nil, ConnectionError(ErrCodeFrameSize) } if fh.StreamID == 0 { return nil, ConnectionError(ErrCodeProtocol) } return &RSTStreamFrame{fh, ErrCode(binary.BigEndian.Uint32(p[:4]))}, nil } // WriteRSTStream writes a RST_STREAM frame. // // It will perform exactly one Write to the underlying Writer. // It is the caller's responsibility to not call other Write methods concurrently. func (f *Framer) WriteRSTStream(streamID uint32, code ErrCode) error { if !validStreamID(streamID) && !f.AllowIllegalWrites { return errStreamID } f.startWrite(FrameRSTStream, 0, streamID) f.writeUint32(uint32(code)) return f.endWrite() } // A ContinuationFrame is used to continue a sequence of header block fragments. // See http://http2.github.io/http2-spec/#rfc.section.6.10 type ContinuationFrame struct { FrameHeader headerFragBuf []byte } func parseContinuationFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) { if fh.StreamID == 0 { return nil, connError{ErrCodeProtocol, "CONTINUATION frame with stream ID 0"} } return &ContinuationFrame{fh, p}, nil } func (f *ContinuationFrame) HeaderBlockFragment() []byte { f.checkValid() return f.headerFragBuf } func (f *ContinuationFrame) HeadersEnded() bool { return f.FrameHeader.Flags.Has(FlagContinuationEndHeaders) } // WriteContinuation writes a CONTINUATION frame. // // It will perform exactly one Write to the underlying Writer. // It is the caller's responsibility to not call other Write methods concurrently. func (f *Framer) WriteContinuation(streamID uint32, endHeaders bool, headerBlockFragment []byte) error { if !validStreamID(streamID) && !f.AllowIllegalWrites { return errStreamID } var flags Flags if endHeaders { flags |= FlagContinuationEndHeaders } f.startWrite(FrameContinuation, flags, streamID) f.wbuf = append(f.wbuf, headerBlockFragment...) return f.endWrite() } // A PushPromiseFrame is used to initiate a server stream. // See http://http2.github.io/http2-spec/#rfc.section.6.6 type PushPromiseFrame struct { FrameHeader PromiseID uint32 headerFragBuf []byte // not owned } func (f *PushPromiseFrame) HeaderBlockFragment() []byte { f.checkValid() return f.headerFragBuf } func (f *PushPromiseFrame) HeadersEnded() bool { return f.FrameHeader.Flags.Has(FlagPushPromiseEndHeaders) } func parsePushPromise(_ *frameCache, fh FrameHeader, p []byte) (_ Frame, err error) { pp := &PushPromiseFrame{ FrameHeader: fh, } if pp.StreamID == 0 { // PUSH_PROMISE frames MUST be associated with an existing, // peer-initiated stream. The stream identifier of a // PUSH_PROMISE frame indicates the stream it is associated // with. If the stream identifier field specifies the value // 0x0, a recipient MUST respond with a connection error // (Section 5.4.1) of type PROTOCOL_ERROR. return nil, ConnectionError(ErrCodeProtocol) } // The PUSH_PROMISE frame includes optional padding. // Padding fields and flags are identical to those defined for DATA frames var padLength uint8 if fh.Flags.Has(FlagPushPromisePadded) { if p, padLength, err = readByte(p); err != nil { return } } p, pp.PromiseID, err = readUint32(p) if err != nil { return } pp.PromiseID = pp.PromiseID & (1<<31 - 1) if int(padLength) > len(p) { // like the DATA frame, error out if padding is longer than the body. return nil, ConnectionError(ErrCodeProtocol) } pp.headerFragBuf = p[:len(p)-int(padLength)] return pp, nil } // PushPromiseParam are the parameters for writing a PUSH_PROMISE frame. type PushPromiseParam struct { // StreamID is the required Stream ID to initiate. StreamID uint32 // PromiseID is the required Stream ID which this // Push Promises PromiseID uint32 // BlockFragment is part (or all) of a Header Block. BlockFragment []byte // EndHeaders indicates that this frame contains an entire // header block and is not followed by any // CONTINUATION frames. EndHeaders bool // PadLength is the optional number of bytes of zeros to add // to this frame. PadLength uint8 } // WritePushPromise writes a single PushPromise Frame. // // As with Header Frames, This is the low level call for writing // individual frames. Continuation frames are handled elsewhere. // // It will perform exactly one Write to the underlying Writer. // It is the caller's responsibility to not call other Write methods concurrently. func (f *Framer) WritePushPromise(p PushPromiseParam) error { if !validStreamID(p.StreamID) && !f.AllowIllegalWrites { return errStreamID } var flags Flags if p.PadLength != 0 { flags |= FlagPushPromisePadded } if p.EndHeaders { flags |= FlagPushPromiseEndHeaders } f.startWrite(FramePushPromise, flags, p.StreamID) if p.PadLength != 0 { f.writeByte(p.PadLength) } if !validStreamID(p.PromiseID) && !f.AllowIllegalWrites { return errStreamID } f.writeUint32(p.PromiseID) f.wbuf = append(f.wbuf, p.BlockFragment...) f.wbuf = append(f.wbuf, padZeros[:p.PadLength]...) return f.endWrite() } // WriteRawFrame writes a raw frame. This can be used to write // extension frames unknown to this package. func (f *Framer) WriteRawFrame(t FrameType, flags Flags, streamID uint32, payload []byte) error { f.startWrite(t, flags, streamID) f.writeBytes(payload) return f.endWrite() } func readByte(p []byte) (remain []byte, b byte, err error) { if len(p) == 0 { return nil, 0, io.ErrUnexpectedEOF } return p[1:], p[0], nil } func readUint32(p []byte) (remain []byte, v uint32, err error) { if len(p) < 4 { return nil, 0, io.ErrUnexpectedEOF } return p[4:], binary.BigEndian.Uint32(p[:4]), nil } type streamEnder interface { StreamEnded() bool } type headersEnder interface { HeadersEnded() bool } type headersOrContinuation interface { headersEnder HeaderBlockFragment() []byte } // A MetaHeadersFrame is the representation of one HEADERS frame and // zero or more contiguous CONTINUATION frames and the decoding of // their HPACK-encoded contents. // // This type of frame does not appear on the wire and is only returned // by the Framer when Framer.ReadMetaHeaders is set. type MetaHeadersFrame struct { *HeadersFrame // Fields are the fields contained in the HEADERS and // CONTINUATION frames. The underlying slice is owned by the // Framer and must not be retained after the next call to // ReadFrame. // // Fields are guaranteed to be in the correct http2 order and // not have unknown pseudo header fields or invalid header // field names or values. Required pseudo header fields may be // missing, however. Use the MetaHeadersFrame.Pseudo accessor // method access pseudo headers. Fields []hpack.HeaderField // Truncated is whether the max header list size limit was hit // and Fields is incomplete. The hpack decoder state is still // valid, however. Truncated bool } // PseudoValue returns the given pseudo header field's value. // The provided pseudo field should not contain the leading colon. func (mh *MetaHeadersFrame) PseudoValue(pseudo string) string { for _, hf := range mh.Fields { if !hf.IsPseudo() { return "" } if hf.Name[1:] == pseudo { return hf.Value } } return "" } // RegularFields returns the regular (non-pseudo) header fields of mh. // The caller does not own the returned slice. func (mh *MetaHeadersFrame) RegularFields() []hpack.HeaderField { for i, hf := range mh.Fields { if !hf.IsPseudo() { return mh.Fields[i:] } } return nil } // PseudoFields returns the pseudo header fields of mh. // The caller does not own the returned slice. func (mh *MetaHeadersFrame) PseudoFields() []hpack.HeaderField { for i, hf := range mh.Fields { if !hf.IsPseudo() { return mh.Fields[:i] } } return mh.Fields } func (mh *MetaHeadersFrame) checkPseudos() error { var isRequest, isResponse bool pf := mh.PseudoFields() for i, hf := range pf { switch hf.Name { case ":method", ":path", ":scheme", ":authority": isRequest = true case ":status": isResponse = true default: return pseudoHeaderError(hf.Name) } // Check for duplicates. // This would be a bad algorithm, but N is 4. // And this doesn't allocate. for _, hf2 := range pf[:i] { if hf.Name == hf2.Name { return duplicatePseudoHeaderError(hf.Name) } } } if isRequest && isResponse { return errMixPseudoHeaderTypes } return nil } func (fr *Framer) maxHeaderStringLen() int { v := fr.maxHeaderListSize() if uint32(int(v)) == v { return int(v) } // They had a crazy big number for MaxHeaderBytes anyway, // so give them unlimited header lengths: return 0 } // readMetaFrame returns 0 or more CONTINUATION frames from fr and // merge them into into the provided hf and returns a MetaHeadersFrame // with the decoded hpack values. func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) { if fr.AllowIllegalReads { return nil, errors.New("illegal use of AllowIllegalReads with ReadMetaHeaders") } mh := &MetaHeadersFrame{ HeadersFrame: hf, } var remainSize = fr.maxHeaderListSize() var sawRegular bool var invalid error // pseudo header field errors hdec := fr.ReadMetaHeaders hdec.SetEmitEnabled(true) hdec.SetMaxStringLength(fr.maxHeaderStringLen()) hdec.SetEmitFunc(func(hf hpack.HeaderField) { if VerboseLogs && fr.logReads { fr.debugReadLoggerf("http2: decoded hpack field %+v", hf) } if !httplex.ValidHeaderFieldValue(hf.Value) { invalid = headerFieldValueError(hf.Value) } isPseudo := strings.HasPrefix(hf.Name, ":") if isPseudo { if sawRegular { invalid = errPseudoAfterRegular } } else { sawRegular = true if !validWireHeaderFieldName(hf.Name) { invalid = headerFieldNameError(hf.Name) } } if invalid != nil { hdec.SetEmitEnabled(false) return } size := hf.Size() if size > remainSize { hdec.SetEmitEnabled(false) mh.Truncated = true return } remainSize -= size mh.Fields = append(mh.Fields, hf) }) // Lose reference to MetaHeadersFrame: defer hdec.SetEmitFunc(func(hf hpack.HeaderField) {}) var hc headersOrContinuation = hf for { frag := hc.HeaderBlockFragment() if _, err := hdec.Write(frag); err != nil { return nil, ConnectionError(ErrCodeCompression) } if hc.HeadersEnded() { break } if f, err := fr.ReadFrame(); err != nil { return nil, err } else { hc = f.(*ContinuationFrame) // guaranteed by checkFrameOrder } } mh.HeadersFrame.headerFragBuf = nil mh.HeadersFrame.invalidate() if err := hdec.Close(); err != nil { return nil, ConnectionError(ErrCodeCompression) } if invalid != nil { fr.errDetail = invalid if VerboseLogs { log.Printf("http2: invalid header: %v", invalid) } return nil, StreamError{mh.StreamID, ErrCodeProtocol, invalid} } if err := mh.checkPseudos(); err != nil { fr.errDetail = err if VerboseLogs { log.Printf("http2: invalid pseudo headers: %v", err) } return nil, StreamError{mh.StreamID, ErrCodeProtocol, err} } return mh, nil } func summarizeFrame(f Frame) string { var buf bytes.Buffer f.Header().writeDebug(&buf) switch f := f.(type) { case *SettingsFrame: n := 0 f.ForeachSetting(func(s Setting) error { n++ if n == 1 { buf.WriteString(", settings:") } fmt.Fprintf(&buf, " %v=%v,", s.ID, s.Val) return nil }) if n > 0 { buf.Truncate(buf.Len() - 1) // remove trailing comma } case *DataFrame: data := f.Data() const max = 256 if len(data) > max { data = data[:max] } fmt.Fprintf(&buf, " data=%q", data) if len(f.Data()) > max { fmt.Fprintf(&buf, " (%d bytes omitted)", len(f.Data())-max) } case *WindowUpdateFrame: if f.StreamID == 0 { buf.WriteString(" (conn)") } fmt.Fprintf(&buf, " incr=%v", f.Increment) case *PingFrame: fmt.Fprintf(&buf, " ping=%q", f.Data[:]) case *GoAwayFrame: fmt.Fprintf(&buf, " LastStreamID=%v ErrCode=%v Debug=%q", f.LastStreamID, f.ErrCode, f.debugData) case *RSTStreamFrame: fmt.Fprintf(&buf, " ErrCode=%v", f.ErrCode) } return buf.String() } go16.go000066400000000000000000000023441324746544700311030ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/golang.org/x/net/http2// Copyright 2016 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // +build go1.6 package http2 import ( "crypto/tls" "net/http" "time" ) func transportExpectContinueTimeout(t1 *http.Transport) time.Duration { return t1.ExpectContinueTimeout } // isBadCipher reports whether the cipher is blacklisted by the HTTP/2 spec. func isBadCipher(cipher uint16) bool { switch cipher { case tls.TLS_RSA_WITH_RC4_128_SHA, tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA, tls.TLS_RSA_WITH_AES_128_CBC_SHA, tls.TLS_RSA_WITH_AES_256_CBC_SHA, tls.TLS_RSA_WITH_AES_128_GCM_SHA256, tls.TLS_RSA_WITH_AES_256_GCM_SHA384, tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA, tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: // Reject cipher suites from Appendix A. // "This list includes those cipher suites that do not // offer an ephemeral key exchange and those that are // based on the TLS null, stream or block cipher type" return true default: return false } } go17.go000066400000000000000000000050471324746544700311070ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/golang.org/x/net/http2// Copyright 2016 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // +build go1.7 package http2 import ( "context" "net" "net/http" "net/http/httptrace" "time" ) type contextContext interface { context.Context } func serverConnBaseContext(c net.Conn, opts *ServeConnOpts) (ctx contextContext, cancel func()) { ctx, cancel = context.WithCancel(context.Background()) ctx = context.WithValue(ctx, http.LocalAddrContextKey, c.LocalAddr()) if hs := opts.baseConfig(); hs != nil { ctx = context.WithValue(ctx, http.ServerContextKey, hs) } return } func contextWithCancel(ctx contextContext) (_ contextContext, cancel func()) { return context.WithCancel(ctx) } func requestWithContext(req *http.Request, ctx contextContext) *http.Request { return req.WithContext(ctx) } type clientTrace httptrace.ClientTrace func reqContext(r *http.Request) context.Context { return r.Context() } func (t *Transport) idleConnTimeout() time.Duration { if t.t1 != nil { return t.t1.IdleConnTimeout } return 0 } func setResponseUncompressed(res *http.Response) { res.Uncompressed = true } func traceGotConn(req *http.Request, cc *ClientConn) { trace := httptrace.ContextClientTrace(req.Context()) if trace == nil || trace.GotConn == nil { return } ci := httptrace.GotConnInfo{Conn: cc.tconn} cc.mu.Lock() ci.Reused = cc.nextStreamID > 1 ci.WasIdle = len(cc.streams) == 0 && ci.Reused if ci.WasIdle && !cc.lastActive.IsZero() { ci.IdleTime = time.Now().Sub(cc.lastActive) } cc.mu.Unlock() trace.GotConn(ci) } func traceWroteHeaders(trace *clientTrace) { if trace != nil && trace.WroteHeaders != nil { trace.WroteHeaders() } } func traceGot100Continue(trace *clientTrace) { if trace != nil && trace.Got100Continue != nil { trace.Got100Continue() } } func traceWait100Continue(trace *clientTrace) { if trace != nil && trace.Wait100Continue != nil { trace.Wait100Continue() } } func traceWroteRequest(trace *clientTrace, err error) { if trace != nil && trace.WroteRequest != nil { trace.WroteRequest(httptrace.WroteRequestInfo{Err: err}) } } func traceFirstResponseByte(trace *clientTrace) { if trace != nil && trace.GotFirstResponseByte != nil { trace.GotFirstResponseByte() } } func requestTrace(req *http.Request) *clientTrace { trace := httptrace.ContextClientTrace(req.Context()) return (*clientTrace)(trace) } // Ping sends a PING frame to the server and waits for the ack. func (cc *ClientConn) Ping(ctx context.Context) error { return cc.ping(ctx) } go17_not18.go000066400000000000000000000025311324746544700321330ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/golang.org/x/net/http2// Copyright 2016 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // +build go1.7,!go1.8 package http2 import "crypto/tls" // temporary copy of Go 1.7's private tls.Config.clone: func cloneTLSConfig(c *tls.Config) *tls.Config { return &tls.Config{ Rand: c.Rand, Time: c.Time, Certificates: c.Certificates, NameToCertificate: c.NameToCertificate, GetCertificate: c.GetCertificate, RootCAs: c.RootCAs, NextProtos: c.NextProtos, ServerName: c.ServerName, ClientAuth: c.ClientAuth, ClientCAs: c.ClientCAs, InsecureSkipVerify: c.InsecureSkipVerify, CipherSuites: c.CipherSuites, PreferServerCipherSuites: c.PreferServerCipherSuites, SessionTicketsDisabled: c.SessionTicketsDisabled, SessionTicketKey: c.SessionTicketKey, ClientSessionCache: c.ClientSessionCache, MinVersion: c.MinVersion, MaxVersion: c.MaxVersion, CurvePreferences: c.CurvePreferences, DynamicRecordSizingDisabled: c.DynamicRecordSizingDisabled, Renegotiation: c.Renegotiation, } } go18.go000066400000000000000000000022731324746544700311060ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/golang.org/x/net/http2// Copyright 2015 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // +build go1.8 package http2 import ( "crypto/tls" "io" "net/http" ) func cloneTLSConfig(c *tls.Config) *tls.Config { c2 := c.Clone() c2.GetClientCertificate = c.GetClientCertificate // golang.org/issue/19264 return c2 } var _ http.Pusher = (*responseWriter)(nil) // Push implements http.Pusher. func (w *responseWriter) Push(target string, opts *http.PushOptions) error { internalOpts := pushOptions{} if opts != nil { internalOpts.Method = opts.Method internalOpts.Header = opts.Header } return w.push(target, internalOpts) } func configureServer18(h1 *http.Server, h2 *Server) error { if h2.IdleTimeout == 0 { if h1.IdleTimeout != 0 { h2.IdleTimeout = h1.IdleTimeout } else { h2.IdleTimeout = h1.ReadTimeout } } return nil } func shouldLogPanic(panicValue interface{}) bool { return panicValue != nil && panicValue != http.ErrAbortHandler } func reqGetBody(req *http.Request) func() (io.ReadCloser, error) { return req.GetBody } func reqBodyIsNoBody(body io.ReadCloser) bool { return body == http.NoBody } gotrack.go000066400000000000000000000061301324746544700317560ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/golang.org/x/net/http2// Copyright 2014 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Defensive debug-only utility to track that functions run on the // goroutine that they're supposed to. package http2 import ( "bytes" "errors" "fmt" "os" "runtime" "strconv" "sync" ) var DebugGoroutines = os.Getenv("DEBUG_HTTP2_GOROUTINES") == "1" type goroutineLock uint64 func newGoroutineLock() goroutineLock { if !DebugGoroutines { return 0 } return goroutineLock(curGoroutineID()) } func (g goroutineLock) check() { if !DebugGoroutines { return } if curGoroutineID() != uint64(g) { panic("running on the wrong goroutine") } } func (g goroutineLock) checkNotOn() { if !DebugGoroutines { return } if curGoroutineID() == uint64(g) { panic("running on the wrong goroutine") } } var goroutineSpace = []byte("goroutine ") func curGoroutineID() uint64 { bp := littleBuf.Get().(*[]byte) defer littleBuf.Put(bp) b := *bp b = b[:runtime.Stack(b, false)] // Parse the 4707 out of "goroutine 4707 [" b = bytes.TrimPrefix(b, goroutineSpace) i := bytes.IndexByte(b, ' ') if i < 0 { panic(fmt.Sprintf("No space found in %q", b)) } b = b[:i] n, err := parseUintBytes(b, 10, 64) if err != nil { panic(fmt.Sprintf("Failed to parse goroutine ID out of %q: %v", b, err)) } return n } var littleBuf = sync.Pool{ New: func() interface{} { buf := make([]byte, 64) return &buf }, } // parseUintBytes is like strconv.ParseUint, but using a []byte. func parseUintBytes(s []byte, base int, bitSize int) (n uint64, err error) { var cutoff, maxVal uint64 if bitSize == 0 { bitSize = int(strconv.IntSize) } s0 := s switch { case len(s) < 1: err = strconv.ErrSyntax goto Error case 2 <= base && base <= 36: // valid base; nothing to do case base == 0: // Look for octal, hex prefix. switch { case s[0] == '0' && len(s) > 1 && (s[1] == 'x' || s[1] == 'X'): base = 16 s = s[2:] if len(s) < 1 { err = strconv.ErrSyntax goto Error } case s[0] == '0': base = 8 default: base = 10 } default: err = errors.New("invalid base " + strconv.Itoa(base)) goto Error } n = 0 cutoff = cutoff64(base) maxVal = 1<= base { n = 0 err = strconv.ErrSyntax goto Error } if n >= cutoff { // n*base overflows n = 1<<64 - 1 err = strconv.ErrRange goto Error } n *= uint64(base) n1 := n + uint64(v) if n1 < n || n1 > maxVal { // n+v overflows n = 1<<64 - 1 err = strconv.ErrRange goto Error } n = n1 } return n, nil Error: return n, &strconv.NumError{Func: "ParseUint", Num: string(s0), Err: err} } // Return the first number n such that n*base >= 1<<64. func cutoff64(base int) uint64 { if base < 2 { return 0 } return (1<<64-1)/uint64(base) + 1 } headermap.go000066400000000000000000000026441324746544700322600ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/golang.org/x/net/http2// Copyright 2014 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package http2 import ( "net/http" "strings" ) var ( commonLowerHeader = map[string]string{} // Go-Canonical-Case -> lower-case commonCanonHeader = map[string]string{} // lower-case -> Go-Canonical-Case ) func init() { for _, v := range []string{ "accept", "accept-charset", "accept-encoding", "accept-language", "accept-ranges", "age", "access-control-allow-origin", "allow", "authorization", "cache-control", "content-disposition", "content-encoding", "content-language", "content-length", "content-location", "content-range", "content-type", "cookie", "date", "etag", "expect", "expires", "from", "host", "if-match", "if-modified-since", "if-none-match", "if-unmodified-since", "last-modified", "link", "location", "max-forwards", "proxy-authenticate", "proxy-authorization", "range", "referer", "refresh", "retry-after", "server", "set-cookie", "strict-transport-security", "trailer", "transfer-encoding", "user-agent", "vary", "via", "www-authenticate", } { chk := http.CanonicalHeaderKey(v) commonLowerHeader[chk] = v commonCanonHeader[v] = chk } } func lowerHeader(v string) string { if s, ok := commonLowerHeader[v]; ok { return s } return strings.ToLower(v) } gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/golang.org/x/net/http2/hpack/000077500000000000000000000000001324746544700311425ustar00rootroot00000000000000encode.go000066400000000000000000000157341324746544700326610ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/golang.org/x/net/http2/hpack// Copyright 2014 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package hpack import ( "io" ) const ( uint32Max = ^uint32(0) initialHeaderTableSize = 4096 ) type Encoder struct { dynTab dynamicTable // minSize is the minimum table size set by // SetMaxDynamicTableSize after the previous Header Table Size // Update. minSize uint32 // maxSizeLimit is the maximum table size this encoder // supports. This will protect the encoder from too large // size. maxSizeLimit uint32 // tableSizeUpdate indicates whether "Header Table Size // Update" is required. tableSizeUpdate bool w io.Writer buf []byte } // NewEncoder returns a new Encoder which performs HPACK encoding. An // encoded data is written to w. func NewEncoder(w io.Writer) *Encoder { e := &Encoder{ minSize: uint32Max, maxSizeLimit: initialHeaderTableSize, tableSizeUpdate: false, w: w, } e.dynTab.table.init() e.dynTab.setMaxSize(initialHeaderTableSize) return e } // WriteField encodes f into a single Write to e's underlying Writer. // This function may also produce bytes for "Header Table Size Update" // if necessary. If produced, it is done before encoding f. func (e *Encoder) WriteField(f HeaderField) error { e.buf = e.buf[:0] if e.tableSizeUpdate { e.tableSizeUpdate = false if e.minSize < e.dynTab.maxSize { e.buf = appendTableSize(e.buf, e.minSize) } e.minSize = uint32Max e.buf = appendTableSize(e.buf, e.dynTab.maxSize) } idx, nameValueMatch := e.searchTable(f) if nameValueMatch { e.buf = appendIndexed(e.buf, idx) } else { indexing := e.shouldIndex(f) if indexing { e.dynTab.add(f) } if idx == 0 { e.buf = appendNewName(e.buf, f, indexing) } else { e.buf = appendIndexedName(e.buf, f, idx, indexing) } } n, err := e.w.Write(e.buf) if err == nil && n != len(e.buf) { err = io.ErrShortWrite } return err } // searchTable searches f in both stable and dynamic header tables. // The static header table is searched first. Only when there is no // exact match for both name and value, the dynamic header table is // then searched. If there is no match, i is 0. If both name and value // match, i is the matched index and nameValueMatch becomes true. If // only name matches, i points to that index and nameValueMatch // becomes false. func (e *Encoder) searchTable(f HeaderField) (i uint64, nameValueMatch bool) { i, nameValueMatch = staticTable.search(f) if nameValueMatch { return i, true } j, nameValueMatch := e.dynTab.table.search(f) if nameValueMatch || (i == 0 && j != 0) { return j + uint64(staticTable.len()), nameValueMatch } return i, false } // SetMaxDynamicTableSize changes the dynamic header table size to v. // The actual size is bounded by the value passed to // SetMaxDynamicTableSizeLimit. func (e *Encoder) SetMaxDynamicTableSize(v uint32) { if v > e.maxSizeLimit { v = e.maxSizeLimit } if v < e.minSize { e.minSize = v } e.tableSizeUpdate = true e.dynTab.setMaxSize(v) } // SetMaxDynamicTableSizeLimit changes the maximum value that can be // specified in SetMaxDynamicTableSize to v. By default, it is set to // 4096, which is the same size of the default dynamic header table // size described in HPACK specification. If the current maximum // dynamic header table size is strictly greater than v, "Header Table // Size Update" will be done in the next WriteField call and the // maximum dynamic header table size is truncated to v. func (e *Encoder) SetMaxDynamicTableSizeLimit(v uint32) { e.maxSizeLimit = v if e.dynTab.maxSize > v { e.tableSizeUpdate = true e.dynTab.setMaxSize(v) } } // shouldIndex reports whether f should be indexed. func (e *Encoder) shouldIndex(f HeaderField) bool { return !f.Sensitive && f.Size() <= e.dynTab.maxSize } // appendIndexed appends index i, as encoded in "Indexed Header Field" // representation, to dst and returns the extended buffer. func appendIndexed(dst []byte, i uint64) []byte { first := len(dst) dst = appendVarInt(dst, 7, i) dst[first] |= 0x80 return dst } // appendNewName appends f, as encoded in one of "Literal Header field // - New Name" representation variants, to dst and returns the // extended buffer. // // If f.Sensitive is true, "Never Indexed" representation is used. If // f.Sensitive is false and indexing is true, "Inremental Indexing" // representation is used. func appendNewName(dst []byte, f HeaderField, indexing bool) []byte { dst = append(dst, encodeTypeByte(indexing, f.Sensitive)) dst = appendHpackString(dst, f.Name) return appendHpackString(dst, f.Value) } // appendIndexedName appends f and index i referring indexed name // entry, as encoded in one of "Literal Header field - Indexed Name" // representation variants, to dst and returns the extended buffer. // // If f.Sensitive is true, "Never Indexed" representation is used. If // f.Sensitive is false and indexing is true, "Incremental Indexing" // representation is used. func appendIndexedName(dst []byte, f HeaderField, i uint64, indexing bool) []byte { first := len(dst) var n byte if indexing { n = 6 } else { n = 4 } dst = appendVarInt(dst, n, i) dst[first] |= encodeTypeByte(indexing, f.Sensitive) return appendHpackString(dst, f.Value) } // appendTableSize appends v, as encoded in "Header Table Size Update" // representation, to dst and returns the extended buffer. func appendTableSize(dst []byte, v uint32) []byte { first := len(dst) dst = appendVarInt(dst, 5, uint64(v)) dst[first] |= 0x20 return dst } // appendVarInt appends i, as encoded in variable integer form using n // bit prefix, to dst and returns the extended buffer. // // See // http://http2.github.io/http2-spec/compression.html#integer.representation func appendVarInt(dst []byte, n byte, i uint64) []byte { k := uint64((1 << n) - 1) if i < k { return append(dst, byte(i)) } dst = append(dst, byte(k)) i -= k for ; i >= 128; i >>= 7 { dst = append(dst, byte(0x80|(i&0x7f))) } return append(dst, byte(i)) } // appendHpackString appends s, as encoded in "String Literal" // representation, to dst and returns the the extended buffer. // // s will be encoded in Huffman codes only when it produces strictly // shorter byte string. func appendHpackString(dst []byte, s string) []byte { huffmanLength := HuffmanEncodeLength(s) if huffmanLength < uint64(len(s)) { first := len(dst) dst = appendVarInt(dst, 7, huffmanLength) dst = AppendHuffmanString(dst, s) dst[first] |= 0x80 } else { dst = appendVarInt(dst, 7, uint64(len(s))) dst = append(dst, s...) } return dst } // encodeTypeByte returns type byte. If sensitive is true, type byte // for "Never Indexed" representation is returned. If sensitive is // false and indexing is true, type byte for "Incremental Indexing" // representation is returned. Otherwise, type byte for "Without // Indexing" is returned. func encodeTypeByte(indexing, sensitive bool) byte { if sensitive { return 0x10 } if indexing { return 0x40 } return 0 } hpack.go000066400000000000000000000335161324746544700325100ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/golang.org/x/net/http2/hpack// Copyright 2014 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package hpack implements HPACK, a compression format for // efficiently representing HTTP header fields in the context of HTTP/2. // // See http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-09 package hpack import ( "bytes" "errors" "fmt" ) // A DecodingError is something the spec defines as a decoding error. type DecodingError struct { Err error } func (de DecodingError) Error() string { return fmt.Sprintf("decoding error: %v", de.Err) } // An InvalidIndexError is returned when an encoder references a table // entry before the static table or after the end of the dynamic table. type InvalidIndexError int func (e InvalidIndexError) Error() string { return fmt.Sprintf("invalid indexed representation index %d", int(e)) } // A HeaderField is a name-value pair. Both the name and value are // treated as opaque sequences of octets. type HeaderField struct { Name, Value string // Sensitive means that this header field should never be // indexed. Sensitive bool } // IsPseudo reports whether the header field is an http2 pseudo header. // That is, it reports whether it starts with a colon. // It is not otherwise guaranteed to be a valid pseudo header field, // though. func (hf HeaderField) IsPseudo() bool { return len(hf.Name) != 0 && hf.Name[0] == ':' } func (hf HeaderField) String() string { var suffix string if hf.Sensitive { suffix = " (sensitive)" } return fmt.Sprintf("header field %q = %q%s", hf.Name, hf.Value, suffix) } // Size returns the size of an entry per RFC 7541 section 4.1. func (hf HeaderField) Size() uint32 { // http://http2.github.io/http2-spec/compression.html#rfc.section.4.1 // "The size of the dynamic table is the sum of the size of // its entries. The size of an entry is the sum of its name's // length in octets (as defined in Section 5.2), its value's // length in octets (see Section 5.2), plus 32. The size of // an entry is calculated using the length of the name and // value without any Huffman encoding applied." // This can overflow if somebody makes a large HeaderField // Name and/or Value by hand, but we don't care, because that // won't happen on the wire because the encoding doesn't allow // it. return uint32(len(hf.Name) + len(hf.Value) + 32) } // A Decoder is the decoding context for incremental processing of // header blocks. type Decoder struct { dynTab dynamicTable emit func(f HeaderField) emitEnabled bool // whether calls to emit are enabled maxStrLen int // 0 means unlimited // buf is the unparsed buffer. It's only written to // saveBuf if it was truncated in the middle of a header // block. Because it's usually not owned, we can only // process it under Write. buf []byte // not owned; only valid during Write // saveBuf is previous data passed to Write which we weren't able // to fully parse before. Unlike buf, we own this data. saveBuf bytes.Buffer } // NewDecoder returns a new decoder with the provided maximum dynamic // table size. The emitFunc will be called for each valid field // parsed, in the same goroutine as calls to Write, before Write returns. func NewDecoder(maxDynamicTableSize uint32, emitFunc func(f HeaderField)) *Decoder { d := &Decoder{ emit: emitFunc, emitEnabled: true, } d.dynTab.table.init() d.dynTab.allowedMaxSize = maxDynamicTableSize d.dynTab.setMaxSize(maxDynamicTableSize) return d } // ErrStringLength is returned by Decoder.Write when the max string length // (as configured by Decoder.SetMaxStringLength) would be violated. var ErrStringLength = errors.New("hpack: string too long") // SetMaxStringLength sets the maximum size of a HeaderField name or // value string. If a string exceeds this length (even after any // decompression), Write will return ErrStringLength. // A value of 0 means unlimited and is the default from NewDecoder. func (d *Decoder) SetMaxStringLength(n int) { d.maxStrLen = n } // SetEmitFunc changes the callback used when new header fields // are decoded. // It must be non-nil. It does not affect EmitEnabled. func (d *Decoder) SetEmitFunc(emitFunc func(f HeaderField)) { d.emit = emitFunc } // SetEmitEnabled controls whether the emitFunc provided to NewDecoder // should be called. The default is true. // // This facility exists to let servers enforce MAX_HEADER_LIST_SIZE // while still decoding and keeping in-sync with decoder state, but // without doing unnecessary decompression or generating unnecessary // garbage for header fields past the limit. func (d *Decoder) SetEmitEnabled(v bool) { d.emitEnabled = v } // EmitEnabled reports whether calls to the emitFunc provided to NewDecoder // are currently enabled. The default is true. func (d *Decoder) EmitEnabled() bool { return d.emitEnabled } // TODO: add method *Decoder.Reset(maxSize, emitFunc) to let callers re-use Decoders and their // underlying buffers for garbage reasons. func (d *Decoder) SetMaxDynamicTableSize(v uint32) { d.dynTab.setMaxSize(v) } // SetAllowedMaxDynamicTableSize sets the upper bound that the encoded // stream (via dynamic table size updates) may set the maximum size // to. func (d *Decoder) SetAllowedMaxDynamicTableSize(v uint32) { d.dynTab.allowedMaxSize = v } type dynamicTable struct { // http://http2.github.io/http2-spec/compression.html#rfc.section.2.3.2 table headerFieldTable size uint32 // in bytes maxSize uint32 // current maxSize allowedMaxSize uint32 // maxSize may go up to this, inclusive } func (dt *dynamicTable) setMaxSize(v uint32) { dt.maxSize = v dt.evict() } func (dt *dynamicTable) add(f HeaderField) { dt.table.addEntry(f) dt.size += f.Size() dt.evict() } // If we're too big, evict old stuff. func (dt *dynamicTable) evict() { var n int for dt.size > dt.maxSize && n < dt.table.len() { dt.size -= dt.table.ents[n].Size() n++ } dt.table.evictOldest(n) } func (d *Decoder) maxTableIndex() int { // This should never overflow. RFC 7540 Section 6.5.2 limits the size of // the dynamic table to 2^32 bytes, where each entry will occupy more than // one byte. Further, the staticTable has a fixed, small length. return d.dynTab.table.len() + staticTable.len() } func (d *Decoder) at(i uint64) (hf HeaderField, ok bool) { // See Section 2.3.3. if i == 0 { return } if i <= uint64(staticTable.len()) { return staticTable.ents[i-1], true } if i > uint64(d.maxTableIndex()) { return } // In the dynamic table, newer entries have lower indices. // However, dt.ents[0] is the oldest entry. Hence, dt.ents is // the reversed dynamic table. dt := d.dynTab.table return dt.ents[dt.len()-(int(i)-staticTable.len())], true } // Decode decodes an entire block. // // TODO: remove this method and make it incremental later? This is // easier for debugging now. func (d *Decoder) DecodeFull(p []byte) ([]HeaderField, error) { var hf []HeaderField saveFunc := d.emit defer func() { d.emit = saveFunc }() d.emit = func(f HeaderField) { hf = append(hf, f) } if _, err := d.Write(p); err != nil { return nil, err } if err := d.Close(); err != nil { return nil, err } return hf, nil } func (d *Decoder) Close() error { if d.saveBuf.Len() > 0 { d.saveBuf.Reset() return DecodingError{errors.New("truncated headers")} } return nil } func (d *Decoder) Write(p []byte) (n int, err error) { if len(p) == 0 { // Prevent state machine CPU attacks (making us redo // work up to the point of finding out we don't have // enough data) return } // Only copy the data if we have to. Optimistically assume // that p will contain a complete header block. if d.saveBuf.Len() == 0 { d.buf = p } else { d.saveBuf.Write(p) d.buf = d.saveBuf.Bytes() d.saveBuf.Reset() } for len(d.buf) > 0 { err = d.parseHeaderFieldRepr() if err == errNeedMore { // Extra paranoia, making sure saveBuf won't // get too large. All the varint and string // reading code earlier should already catch // overlong things and return ErrStringLength, // but keep this as a last resort. const varIntOverhead = 8 // conservative if d.maxStrLen != 0 && int64(len(d.buf)) > 2*(int64(d.maxStrLen)+varIntOverhead) { return 0, ErrStringLength } d.saveBuf.Write(d.buf) return len(p), nil } if err != nil { break } } return len(p), err } // errNeedMore is an internal sentinel error value that means the // buffer is truncated and we need to read more data before we can // continue parsing. var errNeedMore = errors.New("need more data") type indexType int const ( indexedTrue indexType = iota indexedFalse indexedNever ) func (v indexType) indexed() bool { return v == indexedTrue } func (v indexType) sensitive() bool { return v == indexedNever } // returns errNeedMore if there isn't enough data available. // any other error is fatal. // consumes d.buf iff it returns nil. // precondition: must be called with len(d.buf) > 0 func (d *Decoder) parseHeaderFieldRepr() error { b := d.buf[0] switch { case b&128 != 0: // Indexed representation. // High bit set? // http://http2.github.io/http2-spec/compression.html#rfc.section.6.1 return d.parseFieldIndexed() case b&192 == 64: // 6.2.1 Literal Header Field with Incremental Indexing // 0b10xxxxxx: top two bits are 10 // http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.1 return d.parseFieldLiteral(6, indexedTrue) case b&240 == 0: // 6.2.2 Literal Header Field without Indexing // 0b0000xxxx: top four bits are 0000 // http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.2 return d.parseFieldLiteral(4, indexedFalse) case b&240 == 16: // 6.2.3 Literal Header Field never Indexed // 0b0001xxxx: top four bits are 0001 // http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.3 return d.parseFieldLiteral(4, indexedNever) case b&224 == 32: // 6.3 Dynamic Table Size Update // Top three bits are '001'. // http://http2.github.io/http2-spec/compression.html#rfc.section.6.3 return d.parseDynamicTableSizeUpdate() } return DecodingError{errors.New("invalid encoding")} } // (same invariants and behavior as parseHeaderFieldRepr) func (d *Decoder) parseFieldIndexed() error { buf := d.buf idx, buf, err := readVarInt(7, buf) if err != nil { return err } hf, ok := d.at(idx) if !ok { return DecodingError{InvalidIndexError(idx)} } d.buf = buf return d.callEmit(HeaderField{Name: hf.Name, Value: hf.Value}) } // (same invariants and behavior as parseHeaderFieldRepr) func (d *Decoder) parseFieldLiteral(n uint8, it indexType) error { buf := d.buf nameIdx, buf, err := readVarInt(n, buf) if err != nil { return err } var hf HeaderField wantStr := d.emitEnabled || it.indexed() if nameIdx > 0 { ihf, ok := d.at(nameIdx) if !ok { return DecodingError{InvalidIndexError(nameIdx)} } hf.Name = ihf.Name } else { hf.Name, buf, err = d.readString(buf, wantStr) if err != nil { return err } } hf.Value, buf, err = d.readString(buf, wantStr) if err != nil { return err } d.buf = buf if it.indexed() { d.dynTab.add(hf) } hf.Sensitive = it.sensitive() return d.callEmit(hf) } func (d *Decoder) callEmit(hf HeaderField) error { if d.maxStrLen != 0 { if len(hf.Name) > d.maxStrLen || len(hf.Value) > d.maxStrLen { return ErrStringLength } } if d.emitEnabled { d.emit(hf) } return nil } // (same invariants and behavior as parseHeaderFieldRepr) func (d *Decoder) parseDynamicTableSizeUpdate() error { buf := d.buf size, buf, err := readVarInt(5, buf) if err != nil { return err } if size > uint64(d.dynTab.allowedMaxSize) { return DecodingError{errors.New("dynamic table size update too large")} } d.dynTab.setMaxSize(uint32(size)) d.buf = buf return nil } var errVarintOverflow = DecodingError{errors.New("varint integer overflow")} // readVarInt reads an unsigned variable length integer off the // beginning of p. n is the parameter as described in // http://http2.github.io/http2-spec/compression.html#rfc.section.5.1. // // n must always be between 1 and 8. // // The returned remain buffer is either a smaller suffix of p, or err != nil. // The error is errNeedMore if p doesn't contain a complete integer. func readVarInt(n byte, p []byte) (i uint64, remain []byte, err error) { if n < 1 || n > 8 { panic("bad n") } if len(p) == 0 { return 0, p, errNeedMore } i = uint64(p[0]) if n < 8 { i &= (1 << uint64(n)) - 1 } if i < (1< 0 { b := p[0] p = p[1:] i += uint64(b&127) << m if b&128 == 0 { return i, p, nil } m += 7 if m >= 63 { // TODO: proper overflow check. making this up. return 0, origP, errVarintOverflow } } return 0, origP, errNeedMore } // readString decodes an hpack string from p. // // wantStr is whether s will be used. If false, decompression and // []byte->string garbage are skipped if s will be ignored // anyway. This does mean that huffman decoding errors for non-indexed // strings past the MAX_HEADER_LIST_SIZE are ignored, but the server // is returning an error anyway, and because they're not indexed, the error // won't affect the decoding state. func (d *Decoder) readString(p []byte, wantStr bool) (s string, remain []byte, err error) { if len(p) == 0 { return "", p, errNeedMore } isHuff := p[0]&128 != 0 strLen, p, err := readVarInt(7, p) if err != nil { return "", p, err } if d.maxStrLen != 0 && strLen > uint64(d.maxStrLen) { return "", nil, ErrStringLength } if uint64(len(p)) < strLen { return "", p, errNeedMore } if !isHuff { if wantStr { s = string(p[:strLen]) } return s, p[strLen:], nil } if wantStr { buf := bufPool.Get().(*bytes.Buffer) buf.Reset() // don't trust others defer bufPool.Put(buf) if err := huffmanDecode(buf, d.maxStrLen, p[:strLen]); err != nil { buf.Reset() return "", nil, err } s = buf.String() buf.Reset() // be nice to GC } return s, p[strLen:], nil } huffman.go000066400000000000000000000120531324746544700330370ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/golang.org/x/net/http2/hpack// Copyright 2014 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package hpack import ( "bytes" "errors" "io" "sync" ) var bufPool = sync.Pool{ New: func() interface{} { return new(bytes.Buffer) }, } // HuffmanDecode decodes the string in v and writes the expanded // result to w, returning the number of bytes written to w and the // Write call's return value. At most one Write call is made. func HuffmanDecode(w io.Writer, v []byte) (int, error) { buf := bufPool.Get().(*bytes.Buffer) buf.Reset() defer bufPool.Put(buf) if err := huffmanDecode(buf, 0, v); err != nil { return 0, err } return w.Write(buf.Bytes()) } // HuffmanDecodeToString decodes the string in v. func HuffmanDecodeToString(v []byte) (string, error) { buf := bufPool.Get().(*bytes.Buffer) buf.Reset() defer bufPool.Put(buf) if err := huffmanDecode(buf, 0, v); err != nil { return "", err } return buf.String(), nil } // ErrInvalidHuffman is returned for errors found decoding // Huffman-encoded strings. var ErrInvalidHuffman = errors.New("hpack: invalid Huffman-encoded data") // huffmanDecode decodes v to buf. // If maxLen is greater than 0, attempts to write more to buf than // maxLen bytes will return ErrStringLength. func huffmanDecode(buf *bytes.Buffer, maxLen int, v []byte) error { n := rootHuffmanNode // cur is the bit buffer that has not been fed into n. // cbits is the number of low order bits in cur that are valid. // sbits is the number of bits of the symbol prefix being decoded. cur, cbits, sbits := uint(0), uint8(0), uint8(0) for _, b := range v { cur = cur<<8 | uint(b) cbits += 8 sbits += 8 for cbits >= 8 { idx := byte(cur >> (cbits - 8)) n = n.children[idx] if n == nil { return ErrInvalidHuffman } if n.children == nil { if maxLen != 0 && buf.Len() == maxLen { return ErrStringLength } buf.WriteByte(n.sym) cbits -= n.codeLen n = rootHuffmanNode sbits = cbits } else { cbits -= 8 } } } for cbits > 0 { n = n.children[byte(cur<<(8-cbits))] if n == nil { return ErrInvalidHuffman } if n.children != nil || n.codeLen > cbits { break } if maxLen != 0 && buf.Len() == maxLen { return ErrStringLength } buf.WriteByte(n.sym) cbits -= n.codeLen n = rootHuffmanNode sbits = cbits } if sbits > 7 { // Either there was an incomplete symbol, or overlong padding. // Both are decoding errors per RFC 7541 section 5.2. return ErrInvalidHuffman } if mask := uint(1< 8 { codeLen -= 8 i := uint8(code >> codeLen) if cur.children[i] == nil { cur.children[i] = newInternalNode() } cur = cur.children[i] } shift := 8 - codeLen start, end := int(uint8(code<> (nbits - rembits)) dst[len(dst)-1] |= t } return dst } // HuffmanEncodeLength returns the number of bytes required to encode // s in Huffman codes. The result is round up to byte boundary. func HuffmanEncodeLength(s string) uint64 { n := uint64(0) for i := 0; i < len(s); i++ { n += uint64(huffmanCodeLen[s[i]]) } return (n + 7) / 8 } // appendByteToHuffmanCode appends Huffman code for c to dst and // returns the extended buffer and the remaining bits in the last // element. The appending is not byte aligned and the remaining bits // in the last element of dst is given in rembits. func appendByteToHuffmanCode(dst []byte, rembits uint8, c byte) ([]byte, uint8) { code := huffmanCodes[c] nbits := huffmanCodeLen[c] for { if rembits > nbits { t := uint8(code << (rembits - nbits)) dst[len(dst)-1] |= t rembits -= nbits break } t := uint8(code >> (nbits - rembits)) dst[len(dst)-1] |= t nbits -= rembits rembits = 8 if nbits == 0 { break } dst = append(dst, 0) } return dst, rembits } tables.go000066400000000000000000000240761324746544700326750ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/golang.org/x/net/http2/hpack// Copyright 2014 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package hpack import ( "fmt" ) // headerFieldTable implements a list of HeaderFields. // This is used to implement the static and dynamic tables. type headerFieldTable struct { // For static tables, entries are never evicted. // // For dynamic tables, entries are evicted from ents[0] and added to the end. // Each entry has a unique id that starts at one and increments for each // entry that is added. This unique id is stable across evictions, meaning // it can be used as a pointer to a specific entry. As in hpack, unique ids // are 1-based. The unique id for ents[k] is k + evictCount + 1. // // Zero is not a valid unique id. // // evictCount should not overflow in any remotely practical situation. In // practice, we will have one dynamic table per HTTP/2 connection. If we // assume a very powerful server that handles 1M QPS per connection and each // request adds (then evicts) 100 entries from the table, it would still take // 2M years for evictCount to overflow. ents []HeaderField evictCount uint64 // byName maps a HeaderField name to the unique id of the newest entry with // the same name. See above for a definition of "unique id". byName map[string]uint64 // byNameValue maps a HeaderField name/value pair to the unique id of the newest // entry with the same name and value. See above for a definition of "unique id". byNameValue map[pairNameValue]uint64 } type pairNameValue struct { name, value string } func (t *headerFieldTable) init() { t.byName = make(map[string]uint64) t.byNameValue = make(map[pairNameValue]uint64) } // len reports the number of entries in the table. func (t *headerFieldTable) len() int { return len(t.ents) } // addEntry adds a new entry. func (t *headerFieldTable) addEntry(f HeaderField) { id := uint64(t.len()) + t.evictCount + 1 t.byName[f.Name] = id t.byNameValue[pairNameValue{f.Name, f.Value}] = id t.ents = append(t.ents, f) } // evictOldest evicts the n oldest entries in the table. func (t *headerFieldTable) evictOldest(n int) { if n > t.len() { panic(fmt.Sprintf("evictOldest(%v) on table with %v entries", n, t.len())) } for k := 0; k < n; k++ { f := t.ents[k] id := t.evictCount + uint64(k) + 1 if t.byName[f.Name] == id { delete(t.byName, f.Name) } if p := (pairNameValue{f.Name, f.Value}); t.byNameValue[p] == id { delete(t.byNameValue, p) } } copy(t.ents, t.ents[n:]) for k := t.len() - n; k < t.len(); k++ { t.ents[k] = HeaderField{} // so strings can be garbage collected } t.ents = t.ents[:t.len()-n] if t.evictCount+uint64(n) < t.evictCount { panic("evictCount overflow") } t.evictCount += uint64(n) } // search finds f in the table. If there is no match, i is 0. // If both name and value match, i is the matched index and nameValueMatch // becomes true. If only name matches, i points to that index and // nameValueMatch becomes false. // // The returned index is a 1-based HPACK index. For dynamic tables, HPACK says // that index 1 should be the newest entry, but t.ents[0] is the oldest entry, // meaning t.ents is reversed for dynamic tables. Hence, when t is a dynamic // table, the return value i actually refers to the entry t.ents[t.len()-i]. // // All tables are assumed to be a dynamic tables except for the global // staticTable pointer. // // See Section 2.3.3. func (t *headerFieldTable) search(f HeaderField) (i uint64, nameValueMatch bool) { if !f.Sensitive { if id := t.byNameValue[pairNameValue{f.Name, f.Value}]; id != 0 { return t.idToIndex(id), true } } if id := t.byName[f.Name]; id != 0 { return t.idToIndex(id), false } return 0, false } // idToIndex converts a unique id to an HPACK index. // See Section 2.3.3. func (t *headerFieldTable) idToIndex(id uint64) uint64 { if id <= t.evictCount { panic(fmt.Sprintf("id (%v) <= evictCount (%v)", id, t.evictCount)) } k := id - t.evictCount - 1 // convert id to an index t.ents[k] if t != staticTable { return uint64(t.len()) - k // dynamic table } return k + 1 } func pair(name, value string) HeaderField { return HeaderField{Name: name, Value: value} } // http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-07#appendix-B var staticTable = newStaticTable() func newStaticTable() *headerFieldTable { t := &headerFieldTable{} t.init() t.addEntry(pair(":authority", "")) t.addEntry(pair(":method", "GET")) t.addEntry(pair(":method", "POST")) t.addEntry(pair(":path", "/")) t.addEntry(pair(":path", "/index.html")) t.addEntry(pair(":scheme", "http")) t.addEntry(pair(":scheme", "https")) t.addEntry(pair(":status", "200")) t.addEntry(pair(":status", "204")) t.addEntry(pair(":status", "206")) t.addEntry(pair(":status", "304")) t.addEntry(pair(":status", "400")) t.addEntry(pair(":status", "404")) t.addEntry(pair(":status", "500")) t.addEntry(pair("accept-charset", "")) t.addEntry(pair("accept-encoding", "gzip, deflate")) t.addEntry(pair("accept-language", "")) t.addEntry(pair("accept-ranges", "")) t.addEntry(pair("accept", "")) t.addEntry(pair("access-control-allow-origin", "")) t.addEntry(pair("age", "")) t.addEntry(pair("allow", "")) t.addEntry(pair("authorization", "")) t.addEntry(pair("cache-control", "")) t.addEntry(pair("content-disposition", "")) t.addEntry(pair("content-encoding", "")) t.addEntry(pair("content-language", "")) t.addEntry(pair("content-length", "")) t.addEntry(pair("content-location", "")) t.addEntry(pair("content-range", "")) t.addEntry(pair("content-type", "")) t.addEntry(pair("cookie", "")) t.addEntry(pair("date", "")) t.addEntry(pair("etag", "")) t.addEntry(pair("expect", "")) t.addEntry(pair("expires", "")) t.addEntry(pair("from", "")) t.addEntry(pair("host", "")) t.addEntry(pair("if-match", "")) t.addEntry(pair("if-modified-since", "")) t.addEntry(pair("if-none-match", "")) t.addEntry(pair("if-range", "")) t.addEntry(pair("if-unmodified-since", "")) t.addEntry(pair("last-modified", "")) t.addEntry(pair("link", "")) t.addEntry(pair("location", "")) t.addEntry(pair("max-forwards", "")) t.addEntry(pair("proxy-authenticate", "")) t.addEntry(pair("proxy-authorization", "")) t.addEntry(pair("range", "")) t.addEntry(pair("referer", "")) t.addEntry(pair("refresh", "")) t.addEntry(pair("retry-after", "")) t.addEntry(pair("server", "")) t.addEntry(pair("set-cookie", "")) t.addEntry(pair("strict-transport-security", "")) t.addEntry(pair("transfer-encoding", "")) t.addEntry(pair("user-agent", "")) t.addEntry(pair("vary", "")) t.addEntry(pair("via", "")) t.addEntry(pair("www-authenticate", "")) return t } var huffmanCodes = [256]uint32{ 0x1ff8, 0x7fffd8, 0xfffffe2, 0xfffffe3, 0xfffffe4, 0xfffffe5, 0xfffffe6, 0xfffffe7, 0xfffffe8, 0xffffea, 0x3ffffffc, 0xfffffe9, 0xfffffea, 0x3ffffffd, 0xfffffeb, 0xfffffec, 0xfffffed, 0xfffffee, 0xfffffef, 0xffffff0, 0xffffff1, 0xffffff2, 0x3ffffffe, 0xffffff3, 0xffffff4, 0xffffff5, 0xffffff6, 0xffffff7, 0xffffff8, 0xffffff9, 0xffffffa, 0xffffffb, 0x14, 0x3f8, 0x3f9, 0xffa, 0x1ff9, 0x15, 0xf8, 0x7fa, 0x3fa, 0x3fb, 0xf9, 0x7fb, 0xfa, 0x16, 0x17, 0x18, 0x0, 0x1, 0x2, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x5c, 0xfb, 0x7ffc, 0x20, 0xffb, 0x3fc, 0x1ffa, 0x21, 0x5d, 0x5e, 0x5f, 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70, 0x71, 0x72, 0xfc, 0x73, 0xfd, 0x1ffb, 0x7fff0, 0x1ffc, 0x3ffc, 0x22, 0x7ffd, 0x3, 0x23, 0x4, 0x24, 0x5, 0x25, 0x26, 0x27, 0x6, 0x74, 0x75, 0x28, 0x29, 0x2a, 0x7, 0x2b, 0x76, 0x2c, 0x8, 0x9, 0x2d, 0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7ffe, 0x7fc, 0x3ffd, 0x1ffd, 0xffffffc, 0xfffe6, 0x3fffd2, 0xfffe7, 0xfffe8, 0x3fffd3, 0x3fffd4, 0x3fffd5, 0x7fffd9, 0x3fffd6, 0x7fffda, 0x7fffdb, 0x7fffdc, 0x7fffdd, 0x7fffde, 0xffffeb, 0x7fffdf, 0xffffec, 0xffffed, 0x3fffd7, 0x7fffe0, 0xffffee, 0x7fffe1, 0x7fffe2, 0x7fffe3, 0x7fffe4, 0x1fffdc, 0x3fffd8, 0x7fffe5, 0x3fffd9, 0x7fffe6, 0x7fffe7, 0xffffef, 0x3fffda, 0x1fffdd, 0xfffe9, 0x3fffdb, 0x3fffdc, 0x7fffe8, 0x7fffe9, 0x1fffde, 0x7fffea, 0x3fffdd, 0x3fffde, 0xfffff0, 0x1fffdf, 0x3fffdf, 0x7fffeb, 0x7fffec, 0x1fffe0, 0x1fffe1, 0x3fffe0, 0x1fffe2, 0x7fffed, 0x3fffe1, 0x7fffee, 0x7fffef, 0xfffea, 0x3fffe2, 0x3fffe3, 0x3fffe4, 0x7ffff0, 0x3fffe5, 0x3fffe6, 0x7ffff1, 0x3ffffe0, 0x3ffffe1, 0xfffeb, 0x7fff1, 0x3fffe7, 0x7ffff2, 0x3fffe8, 0x1ffffec, 0x3ffffe2, 0x3ffffe3, 0x3ffffe4, 0x7ffffde, 0x7ffffdf, 0x3ffffe5, 0xfffff1, 0x1ffffed, 0x7fff2, 0x1fffe3, 0x3ffffe6, 0x7ffffe0, 0x7ffffe1, 0x3ffffe7, 0x7ffffe2, 0xfffff2, 0x1fffe4, 0x1fffe5, 0x3ffffe8, 0x3ffffe9, 0xffffffd, 0x7ffffe3, 0x7ffffe4, 0x7ffffe5, 0xfffec, 0xfffff3, 0xfffed, 0x1fffe6, 0x3fffe9, 0x1fffe7, 0x1fffe8, 0x7ffff3, 0x3fffea, 0x3fffeb, 0x1ffffee, 0x1ffffef, 0xfffff4, 0xfffff5, 0x3ffffea, 0x7ffff4, 0x3ffffeb, 0x7ffffe6, 0x3ffffec, 0x3ffffed, 0x7ffffe7, 0x7ffffe8, 0x7ffffe9, 0x7ffffea, 0x7ffffeb, 0xffffffe, 0x7ffffec, 0x7ffffed, 0x7ffffee, 0x7ffffef, 0x7fffff0, 0x3ffffee, } var huffmanCodeLen = [256]uint8{ 13, 23, 28, 28, 28, 28, 28, 28, 28, 24, 30, 28, 28, 30, 28, 28, 28, 28, 28, 28, 28, 28, 30, 28, 28, 28, 28, 28, 28, 28, 28, 28, 6, 10, 10, 12, 13, 6, 8, 11, 10, 10, 8, 11, 8, 6, 6, 6, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 7, 8, 15, 6, 12, 10, 13, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 8, 7, 8, 13, 19, 13, 14, 6, 15, 5, 6, 5, 6, 5, 6, 6, 6, 5, 7, 7, 6, 6, 6, 5, 6, 7, 6, 5, 5, 6, 7, 7, 7, 7, 7, 15, 11, 14, 13, 28, 20, 22, 20, 20, 22, 22, 22, 23, 22, 23, 23, 23, 23, 23, 24, 23, 24, 24, 22, 23, 24, 23, 23, 23, 23, 21, 22, 23, 22, 23, 23, 24, 22, 21, 20, 22, 22, 23, 23, 21, 23, 22, 22, 24, 21, 22, 23, 23, 21, 21, 22, 21, 23, 22, 23, 23, 20, 22, 22, 22, 23, 22, 22, 23, 26, 26, 20, 19, 22, 23, 22, 25, 26, 26, 26, 27, 27, 26, 24, 25, 19, 21, 26, 27, 27, 26, 27, 24, 21, 21, 26, 26, 28, 27, 27, 27, 20, 24, 20, 21, 22, 21, 21, 23, 22, 22, 25, 25, 24, 24, 26, 23, 26, 27, 26, 26, 27, 27, 27, 27, 27, 28, 27, 27, 27, 27, 27, 26, } http2.go000066400000000000000000000230261324746544700313700ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/golang.org/x/net/http2// Copyright 2014 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package http2 implements the HTTP/2 protocol. // // This package is low-level and intended to be used directly by very // few people. Most users will use it indirectly through the automatic // use by the net/http package (from Go 1.6 and later). // For use in earlier Go versions see ConfigureServer. (Transport support // requires Go 1.6 or later) // // See https://http2.github.io/ for more information on HTTP/2. // // See https://http2.golang.org/ for a test server running this code. // package http2 // import "golang.org/x/net/http2" import ( "bufio" "crypto/tls" "errors" "fmt" "io" "net/http" "os" "sort" "strconv" "strings" "sync" "golang.org/x/net/lex/httplex" ) var ( VerboseLogs bool logFrameWrites bool logFrameReads bool inTests bool ) func init() { e := os.Getenv("GODEBUG") if strings.Contains(e, "http2debug=1") { VerboseLogs = true } if strings.Contains(e, "http2debug=2") { VerboseLogs = true logFrameWrites = true logFrameReads = true } } const ( // ClientPreface is the string that must be sent by new // connections from clients. ClientPreface = "PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n" // SETTINGS_MAX_FRAME_SIZE default // http://http2.github.io/http2-spec/#rfc.section.6.5.2 initialMaxFrameSize = 16384 // NextProtoTLS is the NPN/ALPN protocol negotiated during // HTTP/2's TLS setup. NextProtoTLS = "h2" // http://http2.github.io/http2-spec/#SettingValues initialHeaderTableSize = 4096 initialWindowSize = 65535 // 6.9.2 Initial Flow Control Window Size defaultMaxReadFrameSize = 1 << 20 ) var ( clientPreface = []byte(ClientPreface) ) type streamState int // HTTP/2 stream states. // // See http://tools.ietf.org/html/rfc7540#section-5.1. // // For simplicity, the server code merges "reserved (local)" into // "half-closed (remote)". This is one less state transition to track. // The only downside is that we send PUSH_PROMISEs slightly less // liberally than allowable. More discussion here: // https://lists.w3.org/Archives/Public/ietf-http-wg/2016JulSep/0599.html // // "reserved (remote)" is omitted since the client code does not // support server push. const ( stateIdle streamState = iota stateOpen stateHalfClosedLocal stateHalfClosedRemote stateClosed ) var stateName = [...]string{ stateIdle: "Idle", stateOpen: "Open", stateHalfClosedLocal: "HalfClosedLocal", stateHalfClosedRemote: "HalfClosedRemote", stateClosed: "Closed", } func (st streamState) String() string { return stateName[st] } // Setting is a setting parameter: which setting it is, and its value. type Setting struct { // ID is which setting is being set. // See http://http2.github.io/http2-spec/#SettingValues ID SettingID // Val is the value. Val uint32 } func (s Setting) String() string { return fmt.Sprintf("[%v = %d]", s.ID, s.Val) } // Valid reports whether the setting is valid. func (s Setting) Valid() error { // Limits and error codes from 6.5.2 Defined SETTINGS Parameters switch s.ID { case SettingEnablePush: if s.Val != 1 && s.Val != 0 { return ConnectionError(ErrCodeProtocol) } case SettingInitialWindowSize: if s.Val > 1<<31-1 { return ConnectionError(ErrCodeFlowControl) } case SettingMaxFrameSize: if s.Val < 16384 || s.Val > 1<<24-1 { return ConnectionError(ErrCodeProtocol) } } return nil } // A SettingID is an HTTP/2 setting as defined in // http://http2.github.io/http2-spec/#iana-settings type SettingID uint16 const ( SettingHeaderTableSize SettingID = 0x1 SettingEnablePush SettingID = 0x2 SettingMaxConcurrentStreams SettingID = 0x3 SettingInitialWindowSize SettingID = 0x4 SettingMaxFrameSize SettingID = 0x5 SettingMaxHeaderListSize SettingID = 0x6 ) var settingName = map[SettingID]string{ SettingHeaderTableSize: "HEADER_TABLE_SIZE", SettingEnablePush: "ENABLE_PUSH", SettingMaxConcurrentStreams: "MAX_CONCURRENT_STREAMS", SettingInitialWindowSize: "INITIAL_WINDOW_SIZE", SettingMaxFrameSize: "MAX_FRAME_SIZE", SettingMaxHeaderListSize: "MAX_HEADER_LIST_SIZE", } func (s SettingID) String() string { if v, ok := settingName[s]; ok { return v } return fmt.Sprintf("UNKNOWN_SETTING_%d", uint16(s)) } var ( errInvalidHeaderFieldName = errors.New("http2: invalid header field name") errInvalidHeaderFieldValue = errors.New("http2: invalid header field value") ) // validWireHeaderFieldName reports whether v is a valid header field // name (key). See httplex.ValidHeaderName for the base rules. // // Further, http2 says: // "Just as in HTTP/1.x, header field names are strings of ASCII // characters that are compared in a case-insensitive // fashion. However, header field names MUST be converted to // lowercase prior to their encoding in HTTP/2. " func validWireHeaderFieldName(v string) bool { if len(v) == 0 { return false } for _, r := range v { if !httplex.IsTokenRune(r) { return false } if 'A' <= r && r <= 'Z' { return false } } return true } var httpCodeStringCommon = map[int]string{} // n -> strconv.Itoa(n) func init() { for i := 100; i <= 999; i++ { if v := http.StatusText(i); v != "" { httpCodeStringCommon[i] = strconv.Itoa(i) } } } func httpCodeString(code int) string { if s, ok := httpCodeStringCommon[code]; ok { return s } return strconv.Itoa(code) } // from pkg io type stringWriter interface { WriteString(s string) (n int, err error) } // A gate lets two goroutines coordinate their activities. type gate chan struct{} func (g gate) Done() { g <- struct{}{} } func (g gate) Wait() { <-g } // A closeWaiter is like a sync.WaitGroup but only goes 1 to 0 (open to closed). type closeWaiter chan struct{} // Init makes a closeWaiter usable. // It exists because so a closeWaiter value can be placed inside a // larger struct and have the Mutex and Cond's memory in the same // allocation. func (cw *closeWaiter) Init() { *cw = make(chan struct{}) } // Close marks the closeWaiter as closed and unblocks any waiters. func (cw closeWaiter) Close() { close(cw) } // Wait waits for the closeWaiter to become closed. func (cw closeWaiter) Wait() { <-cw } // bufferedWriter is a buffered writer that writes to w. // Its buffered writer is lazily allocated as needed, to minimize // idle memory usage with many connections. type bufferedWriter struct { w io.Writer // immutable bw *bufio.Writer // non-nil when data is buffered } func newBufferedWriter(w io.Writer) *bufferedWriter { return &bufferedWriter{w: w} } // bufWriterPoolBufferSize is the size of bufio.Writer's // buffers created using bufWriterPool. // // TODO: pick a less arbitrary value? this is a bit under // (3 x typical 1500 byte MTU) at least. Other than that, // not much thought went into it. const bufWriterPoolBufferSize = 4 << 10 var bufWriterPool = sync.Pool{ New: func() interface{} { return bufio.NewWriterSize(nil, bufWriterPoolBufferSize) }, } func (w *bufferedWriter) Available() int { if w.bw == nil { return bufWriterPoolBufferSize } return w.bw.Available() } func (w *bufferedWriter) Write(p []byte) (n int, err error) { if w.bw == nil { bw := bufWriterPool.Get().(*bufio.Writer) bw.Reset(w.w) w.bw = bw } return w.bw.Write(p) } func (w *bufferedWriter) Flush() error { bw := w.bw if bw == nil { return nil } err := bw.Flush() bw.Reset(nil) bufWriterPool.Put(bw) w.bw = nil return err } func mustUint31(v int32) uint32 { if v < 0 || v > 2147483647 { panic("out of range") } return uint32(v) } // bodyAllowedForStatus reports whether a given response status code // permits a body. See RFC 2616, section 4.4. func bodyAllowedForStatus(status int) bool { switch { case status >= 100 && status <= 199: return false case status == 204: return false case status == 304: return false } return true } type httpError struct { msg string timeout bool } func (e *httpError) Error() string { return e.msg } func (e *httpError) Timeout() bool { return e.timeout } func (e *httpError) Temporary() bool { return true } var errTimeout error = &httpError{msg: "http2: timeout awaiting response headers", timeout: true} type connectionStater interface { ConnectionState() tls.ConnectionState } var sorterPool = sync.Pool{New: func() interface{} { return new(sorter) }} type sorter struct { v []string // owned by sorter } func (s *sorter) Len() int { return len(s.v) } func (s *sorter) Swap(i, j int) { s.v[i], s.v[j] = s.v[j], s.v[i] } func (s *sorter) Less(i, j int) bool { return s.v[i] < s.v[j] } // Keys returns the sorted keys of h. // // The returned slice is only valid until s used again or returned to // its pool. func (s *sorter) Keys(h http.Header) []string { keys := s.v[:0] for k := range h { keys = append(keys, k) } s.v = keys sort.Sort(s) return keys } func (s *sorter) SortStrings(ss []string) { // Our sorter works on s.v, which sorter owns, so // stash it away while we sort the user's buffer. save := s.v s.v = ss sort.Sort(s) s.v = save } // validPseudoPath reports whether v is a valid :path pseudo-header // value. It must be either: // // *) a non-empty string starting with '/', but not with with "//", // *) the string '*', for OPTIONS requests. // // For now this is only used a quick check for deciding when to clean // up Opaque URLs before sending requests from the Transport. // See golang.org/issue/16847 func validPseudoPath(v string) bool { return (len(v) > 0 && v[0] == '/' && (len(v) == 1 || v[1] != '/')) || v == "*" } not_go16.go000066400000000000000000000023471324746544700317660ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/golang.org/x/net/http2// Copyright 2015 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // +build !go1.6 package http2 import ( "crypto/tls" "net/http" "time" ) func configureTransport(t1 *http.Transport) (*Transport, error) { return nil, errTransportVersion } func transportExpectContinueTimeout(t1 *http.Transport) time.Duration { return 0 } // isBadCipher reports whether the cipher is blacklisted by the HTTP/2 spec. func isBadCipher(cipher uint16) bool { switch cipher { case tls.TLS_RSA_WITH_RC4_128_SHA, tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA, tls.TLS_RSA_WITH_AES_128_CBC_SHA, tls.TLS_RSA_WITH_AES_256_CBC_SHA, tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA, tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: // Reject cipher suites from Appendix A. // "This list includes those cipher suites that do not // offer an ephemeral key exchange and those that are // based on the TLS null, stream or block cipher type" return true default: return false } } not_go17.go000066400000000000000000000046261324746544700317710ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/golang.org/x/net/http2// Copyright 2016 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // +build !go1.7 package http2 import ( "crypto/tls" "net" "net/http" "time" ) type contextContext interface { Done() <-chan struct{} Err() error } type fakeContext struct{} func (fakeContext) Done() <-chan struct{} { return nil } func (fakeContext) Err() error { panic("should not be called") } func reqContext(r *http.Request) fakeContext { return fakeContext{} } func setResponseUncompressed(res *http.Response) { // Nothing. } type clientTrace struct{} func requestTrace(*http.Request) *clientTrace { return nil } func traceGotConn(*http.Request, *ClientConn) {} func traceFirstResponseByte(*clientTrace) {} func traceWroteHeaders(*clientTrace) {} func traceWroteRequest(*clientTrace, error) {} func traceGot100Continue(trace *clientTrace) {} func traceWait100Continue(trace *clientTrace) {} func nop() {} func serverConnBaseContext(c net.Conn, opts *ServeConnOpts) (ctx contextContext, cancel func()) { return nil, nop } func contextWithCancel(ctx contextContext) (_ contextContext, cancel func()) { return ctx, nop } func requestWithContext(req *http.Request, ctx contextContext) *http.Request { return req } // temporary copy of Go 1.6's private tls.Config.clone: func cloneTLSConfig(c *tls.Config) *tls.Config { return &tls.Config{ Rand: c.Rand, Time: c.Time, Certificates: c.Certificates, NameToCertificate: c.NameToCertificate, GetCertificate: c.GetCertificate, RootCAs: c.RootCAs, NextProtos: c.NextProtos, ServerName: c.ServerName, ClientAuth: c.ClientAuth, ClientCAs: c.ClientCAs, InsecureSkipVerify: c.InsecureSkipVerify, CipherSuites: c.CipherSuites, PreferServerCipherSuites: c.PreferServerCipherSuites, SessionTicketsDisabled: c.SessionTicketsDisabled, SessionTicketKey: c.SessionTicketKey, ClientSessionCache: c.ClientSessionCache, MinVersion: c.MinVersion, MaxVersion: c.MaxVersion, CurvePreferences: c.CurvePreferences, } } func (cc *ClientConn) Ping(ctx contextContext) error { return cc.ping(ctx) } func (t *Transport) idleConnTimeout() time.Duration { return 0 } not_go18.go000066400000000000000000000010621324746544700317610ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/golang.org/x/net/http2// Copyright 2016 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // +build !go1.8 package http2 import ( "io" "net/http" ) func configureServer18(h1 *http.Server, h2 *Server) error { // No IdleTimeout to sync prior to Go 1.8. return nil } func shouldLogPanic(panicValue interface{}) bool { return panicValue != nil } func reqGetBody(req *http.Request) func() (io.ReadCloser, error) { return nil } func reqBodyIsNoBody(io.ReadCloser) bool { return false } pipe.go000066400000000000000000000071471324746544700312720ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/golang.org/x/net/http2// Copyright 2014 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package http2 import ( "errors" "io" "sync" ) // pipe is a goroutine-safe io.Reader/io.Writer pair. It's like // io.Pipe except there are no PipeReader/PipeWriter halves, and the // underlying buffer is an interface. (io.Pipe is always unbuffered) type pipe struct { mu sync.Mutex c sync.Cond // c.L lazily initialized to &p.mu b pipeBuffer err error // read error once empty. non-nil means closed. breakErr error // immediate read error (caller doesn't see rest of b) donec chan struct{} // closed on error readFn func() // optional code to run in Read before error } type pipeBuffer interface { Len() int io.Writer io.Reader } func (p *pipe) Len() int { p.mu.Lock() defer p.mu.Unlock() return p.b.Len() } // Read waits until data is available and copies bytes // from the buffer into p. func (p *pipe) Read(d []byte) (n int, err error) { p.mu.Lock() defer p.mu.Unlock() if p.c.L == nil { p.c.L = &p.mu } for { if p.breakErr != nil { return 0, p.breakErr } if p.b.Len() > 0 { return p.b.Read(d) } if p.err != nil { if p.readFn != nil { p.readFn() // e.g. copy trailers p.readFn = nil // not sticky like p.err } return 0, p.err } p.c.Wait() } } var errClosedPipeWrite = errors.New("write on closed buffer") // Write copies bytes from p into the buffer and wakes a reader. // It is an error to write more data than the buffer can hold. func (p *pipe) Write(d []byte) (n int, err error) { p.mu.Lock() defer p.mu.Unlock() if p.c.L == nil { p.c.L = &p.mu } defer p.c.Signal() if p.err != nil { return 0, errClosedPipeWrite } return p.b.Write(d) } // CloseWithError causes the next Read (waking up a current blocked // Read if needed) to return the provided err after all data has been // read. // // The error must be non-nil. func (p *pipe) CloseWithError(err error) { p.closeWithError(&p.err, err, nil) } // BreakWithError causes the next Read (waking up a current blocked // Read if needed) to return the provided err immediately, without // waiting for unread data. func (p *pipe) BreakWithError(err error) { p.closeWithError(&p.breakErr, err, nil) } // closeWithErrorAndCode is like CloseWithError but also sets some code to run // in the caller's goroutine before returning the error. func (p *pipe) closeWithErrorAndCode(err error, fn func()) { p.closeWithError(&p.err, err, fn) } func (p *pipe) closeWithError(dst *error, err error, fn func()) { if err == nil { panic("err must be non-nil") } p.mu.Lock() defer p.mu.Unlock() if p.c.L == nil { p.c.L = &p.mu } defer p.c.Signal() if *dst != nil { // Already been done. return } p.readFn = fn *dst = err p.closeDoneLocked() } // requires p.mu be held. func (p *pipe) closeDoneLocked() { if p.donec == nil { return } // Close if unclosed. This isn't racy since we always // hold p.mu while closing. select { case <-p.donec: default: close(p.donec) } } // Err returns the error (if any) first set by BreakWithError or CloseWithError. func (p *pipe) Err() error { p.mu.Lock() defer p.mu.Unlock() if p.breakErr != nil { return p.breakErr } return p.err } // Done returns a channel which is closed if and when this pipe is closed // with CloseWithError. func (p *pipe) Done() <-chan struct{} { p.mu.Lock() defer p.mu.Unlock() if p.donec == nil { p.donec = make(chan struct{}) if p.err != nil || p.breakErr != nil { // Already hit an error. p.closeDoneLocked() } } return p.donec } server.go000066400000000000000000002501221324746544700316340ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/golang.org/x/net/http2// Copyright 2014 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // TODO: turn off the serve goroutine when idle, so // an idle conn only has the readFrames goroutine active. (which could // also be optimized probably to pin less memory in crypto/tls). This // would involve tracking when the serve goroutine is active (atomic // int32 read/CAS probably?) and starting it up when frames arrive, // and shutting it down when all handlers exit. the occasional PING // packets could use time.AfterFunc to call sc.wakeStartServeLoop() // (which is a no-op if already running) and then queue the PING write // as normal. The serve loop would then exit in most cases (if no // Handlers running) and not be woken up again until the PING packet // returns. // TODO (maybe): add a mechanism for Handlers to going into // half-closed-local mode (rw.(io.Closer) test?) but not exit their // handler, and continue to be able to read from the // Request.Body. This would be a somewhat semantic change from HTTP/1 // (or at least what we expose in net/http), so I'd probably want to // add it there too. For now, this package says that returning from // the Handler ServeHTTP function means you're both done reading and // done writing, without a way to stop just one or the other. package http2 import ( "bufio" "bytes" "crypto/tls" "errors" "fmt" "io" "log" "math" "net" "net/http" "net/textproto" "net/url" "os" "reflect" "runtime" "strconv" "strings" "sync" "time" "golang.org/x/net/http2/hpack" ) const ( prefaceTimeout = 10 * time.Second firstSettingsTimeout = 2 * time.Second // should be in-flight with preface anyway handlerChunkWriteSize = 4 << 10 defaultMaxStreams = 250 // TODO: make this 100 as the GFE seems to? ) var ( errClientDisconnected = errors.New("client disconnected") errClosedBody = errors.New("body closed by handler") errHandlerComplete = errors.New("http2: request body closed due to handler exiting") errStreamClosed = errors.New("http2: stream closed") ) var responseWriterStatePool = sync.Pool{ New: func() interface{} { rws := &responseWriterState{} rws.bw = bufio.NewWriterSize(chunkWriter{rws}, handlerChunkWriteSize) return rws }, } // Test hooks. var ( testHookOnConn func() testHookGetServerConn func(*serverConn) testHookOnPanicMu *sync.Mutex // nil except in tests testHookOnPanic func(sc *serverConn, panicVal interface{}) (rePanic bool) ) // Server is an HTTP/2 server. type Server struct { // MaxHandlers limits the number of http.Handler ServeHTTP goroutines // which may run at a time over all connections. // Negative or zero no limit. // TODO: implement MaxHandlers int // MaxConcurrentStreams optionally specifies the number of // concurrent streams that each client may have open at a // time. This is unrelated to the number of http.Handler goroutines // which may be active globally, which is MaxHandlers. // If zero, MaxConcurrentStreams defaults to at least 100, per // the HTTP/2 spec's recommendations. MaxConcurrentStreams uint32 // MaxReadFrameSize optionally specifies the largest frame // this server is willing to read. A valid value is between // 16k and 16M, inclusive. If zero or otherwise invalid, a // default value is used. MaxReadFrameSize uint32 // PermitProhibitedCipherSuites, if true, permits the use of // cipher suites prohibited by the HTTP/2 spec. PermitProhibitedCipherSuites bool // IdleTimeout specifies how long until idle clients should be // closed with a GOAWAY frame. PING frames are not considered // activity for the purposes of IdleTimeout. IdleTimeout time.Duration // MaxUploadBufferPerConnection is the size of the initial flow // control window for each connections. The HTTP/2 spec does not // allow this to be smaller than 65535 or larger than 2^32-1. // If the value is outside this range, a default value will be // used instead. MaxUploadBufferPerConnection int32 // MaxUploadBufferPerStream is the size of the initial flow control // window for each stream. The HTTP/2 spec does not allow this to // be larger than 2^32-1. If the value is zero or larger than the // maximum, a default value will be used instead. MaxUploadBufferPerStream int32 // NewWriteScheduler constructs a write scheduler for a connection. // If nil, a default scheduler is chosen. NewWriteScheduler func() WriteScheduler } func (s *Server) initialConnRecvWindowSize() int32 { if s.MaxUploadBufferPerConnection > initialWindowSize { return s.MaxUploadBufferPerConnection } return 1 << 20 } func (s *Server) initialStreamRecvWindowSize() int32 { if s.MaxUploadBufferPerStream > 0 { return s.MaxUploadBufferPerStream } return 1 << 20 } func (s *Server) maxReadFrameSize() uint32 { if v := s.MaxReadFrameSize; v >= minMaxFrameSize && v <= maxFrameSize { return v } return defaultMaxReadFrameSize } func (s *Server) maxConcurrentStreams() uint32 { if v := s.MaxConcurrentStreams; v > 0 { return v } return defaultMaxStreams } // ConfigureServer adds HTTP/2 support to a net/http Server. // // The configuration conf may be nil. // // ConfigureServer must be called before s begins serving. func ConfigureServer(s *http.Server, conf *Server) error { if s == nil { panic("nil *http.Server") } if conf == nil { conf = new(Server) } if err := configureServer18(s, conf); err != nil { return err } if s.TLSConfig == nil { s.TLSConfig = new(tls.Config) } else if s.TLSConfig.CipherSuites != nil { // If they already provided a CipherSuite list, return // an error if it has a bad order or is missing // ECDHE_RSA_WITH_AES_128_GCM_SHA256. const requiredCipher = tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 haveRequired := false sawBad := false for i, cs := range s.TLSConfig.CipherSuites { if cs == requiredCipher { haveRequired = true } if isBadCipher(cs) { sawBad = true } else if sawBad { return fmt.Errorf("http2: TLSConfig.CipherSuites index %d contains an HTTP/2-approved cipher suite (%#04x), but it comes after unapproved cipher suites. With this configuration, clients that don't support previous, approved cipher suites may be given an unapproved one and reject the connection.", i, cs) } } if !haveRequired { return fmt.Errorf("http2: TLSConfig.CipherSuites is missing HTTP/2-required TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256") } } // Note: not setting MinVersion to tls.VersionTLS12, // as we don't want to interfere with HTTP/1.1 traffic // on the user's server. We enforce TLS 1.2 later once // we accept a connection. Ideally this should be done // during next-proto selection, but using TLS <1.2 with // HTTP/2 is still the client's bug. s.TLSConfig.PreferServerCipherSuites = true haveNPN := false for _, p := range s.TLSConfig.NextProtos { if p == NextProtoTLS { haveNPN = true break } } if !haveNPN { s.TLSConfig.NextProtos = append(s.TLSConfig.NextProtos, NextProtoTLS) } if s.TLSNextProto == nil { s.TLSNextProto = map[string]func(*http.Server, *tls.Conn, http.Handler){} } protoHandler := func(hs *http.Server, c *tls.Conn, h http.Handler) { if testHookOnConn != nil { testHookOnConn() } conf.ServeConn(c, &ServeConnOpts{ Handler: h, BaseConfig: hs, }) } s.TLSNextProto[NextProtoTLS] = protoHandler return nil } // ServeConnOpts are options for the Server.ServeConn method. type ServeConnOpts struct { // BaseConfig optionally sets the base configuration // for values. If nil, defaults are used. BaseConfig *http.Server // Handler specifies which handler to use for processing // requests. If nil, BaseConfig.Handler is used. If BaseConfig // or BaseConfig.Handler is nil, http.DefaultServeMux is used. Handler http.Handler } func (o *ServeConnOpts) baseConfig() *http.Server { if o != nil && o.BaseConfig != nil { return o.BaseConfig } return new(http.Server) } func (o *ServeConnOpts) handler() http.Handler { if o != nil { if o.Handler != nil { return o.Handler } if o.BaseConfig != nil && o.BaseConfig.Handler != nil { return o.BaseConfig.Handler } } return http.DefaultServeMux } // ServeConn serves HTTP/2 requests on the provided connection and // blocks until the connection is no longer readable. // // ServeConn starts speaking HTTP/2 assuming that c has not had any // reads or writes. It writes its initial settings frame and expects // to be able to read the preface and settings frame from the // client. If c has a ConnectionState method like a *tls.Conn, the // ConnectionState is used to verify the TLS ciphersuite and to set // the Request.TLS field in Handlers. // // ServeConn does not support h2c by itself. Any h2c support must be // implemented in terms of providing a suitably-behaving net.Conn. // // The opts parameter is optional. If nil, default values are used. func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { baseCtx, cancel := serverConnBaseContext(c, opts) defer cancel() sc := &serverConn{ srv: s, hs: opts.baseConfig(), conn: c, baseCtx: baseCtx, remoteAddrStr: c.RemoteAddr().String(), bw: newBufferedWriter(c), handler: opts.handler(), streams: make(map[uint32]*stream), readFrameCh: make(chan readFrameResult), wantWriteFrameCh: make(chan FrameWriteRequest, 8), wantStartPushCh: make(chan startPushRequest, 8), wroteFrameCh: make(chan frameWriteResult, 1), // buffered; one send in writeFrameAsync bodyReadCh: make(chan bodyReadMsg), // buffering doesn't matter either way doneServing: make(chan struct{}), clientMaxStreams: math.MaxUint32, // Section 6.5.2: "Initially, there is no limit to this value" advMaxStreams: s.maxConcurrentStreams(), initialStreamSendWindowSize: initialWindowSize, maxFrameSize: initialMaxFrameSize, headerTableSize: initialHeaderTableSize, serveG: newGoroutineLock(), pushEnabled: true, } // The net/http package sets the write deadline from the // http.Server.WriteTimeout during the TLS handshake, but then // passes the connection off to us with the deadline already set. // Write deadlines are set per stream in serverConn.newStream. // Disarm the net.Conn write deadline here. if sc.hs.WriteTimeout != 0 { sc.conn.SetWriteDeadline(time.Time{}) } if s.NewWriteScheduler != nil { sc.writeSched = s.NewWriteScheduler() } else { sc.writeSched = NewRandomWriteScheduler() } // These start at the RFC-specified defaults. If there is a higher // configured value for inflow, that will be updated when we send a // WINDOW_UPDATE shortly after sending SETTINGS. sc.flow.add(initialWindowSize) sc.inflow.add(initialWindowSize) sc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf) fr := NewFramer(sc.bw, c) fr.ReadMetaHeaders = hpack.NewDecoder(initialHeaderTableSize, nil) fr.MaxHeaderListSize = sc.maxHeaderListSize() fr.SetMaxReadFrameSize(s.maxReadFrameSize()) sc.framer = fr if tc, ok := c.(connectionStater); ok { sc.tlsState = new(tls.ConnectionState) *sc.tlsState = tc.ConnectionState() // 9.2 Use of TLS Features // An implementation of HTTP/2 over TLS MUST use TLS // 1.2 or higher with the restrictions on feature set // and cipher suite described in this section. Due to // implementation limitations, it might not be // possible to fail TLS negotiation. An endpoint MUST // immediately terminate an HTTP/2 connection that // does not meet the TLS requirements described in // this section with a connection error (Section // 5.4.1) of type INADEQUATE_SECURITY. if sc.tlsState.Version < tls.VersionTLS12 { sc.rejectConn(ErrCodeInadequateSecurity, "TLS version too low") return } if sc.tlsState.ServerName == "" { // Client must use SNI, but we don't enforce that anymore, // since it was causing problems when connecting to bare IP // addresses during development. // // TODO: optionally enforce? Or enforce at the time we receive // a new request, and verify the the ServerName matches the :authority? // But that precludes proxy situations, perhaps. // // So for now, do nothing here again. } if !s.PermitProhibitedCipherSuites && isBadCipher(sc.tlsState.CipherSuite) { // "Endpoints MAY choose to generate a connection error // (Section 5.4.1) of type INADEQUATE_SECURITY if one of // the prohibited cipher suites are negotiated." // // We choose that. In my opinion, the spec is weak // here. It also says both parties must support at least // TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 so there's no // excuses here. If we really must, we could allow an // "AllowInsecureWeakCiphers" option on the server later. // Let's see how it plays out first. sc.rejectConn(ErrCodeInadequateSecurity, fmt.Sprintf("Prohibited TLS 1.2 Cipher Suite: %x", sc.tlsState.CipherSuite)) return } } if hook := testHookGetServerConn; hook != nil { hook(sc) } sc.serve() } func (sc *serverConn) rejectConn(err ErrCode, debug string) { sc.vlogf("http2: server rejecting conn: %v, %s", err, debug) // ignoring errors. hanging up anyway. sc.framer.WriteGoAway(0, err, []byte(debug)) sc.bw.Flush() sc.conn.Close() } type serverConn struct { // Immutable: srv *Server hs *http.Server conn net.Conn bw *bufferedWriter // writing to conn handler http.Handler baseCtx contextContext framer *Framer doneServing chan struct{} // closed when serverConn.serve ends readFrameCh chan readFrameResult // written by serverConn.readFrames wantWriteFrameCh chan FrameWriteRequest // from handlers -> serve wantStartPushCh chan startPushRequest // from handlers -> serve wroteFrameCh chan frameWriteResult // from writeFrameAsync -> serve, tickles more frame writes bodyReadCh chan bodyReadMsg // from handlers -> serve testHookCh chan func(int) // code to run on the serve loop flow flow // conn-wide (not stream-specific) outbound flow control inflow flow // conn-wide inbound flow control tlsState *tls.ConnectionState // shared by all handlers, like net/http remoteAddrStr string writeSched WriteScheduler // Everything following is owned by the serve loop; use serveG.check(): serveG goroutineLock // used to verify funcs are on serve() pushEnabled bool sawFirstSettings bool // got the initial SETTINGS frame after the preface needToSendSettingsAck bool unackedSettings int // how many SETTINGS have we sent without ACKs? clientMaxStreams uint32 // SETTINGS_MAX_CONCURRENT_STREAMS from client (our PUSH_PROMISE limit) advMaxStreams uint32 // our SETTINGS_MAX_CONCURRENT_STREAMS advertised the client curClientStreams uint32 // number of open streams initiated by the client curPushedStreams uint32 // number of open streams initiated by server push maxClientStreamID uint32 // max ever seen from client (odd), or 0 if there have been no client requests maxPushPromiseID uint32 // ID of the last push promise (even), or 0 if there have been no pushes streams map[uint32]*stream initialStreamSendWindowSize int32 maxFrameSize int32 headerTableSize uint32 peerMaxHeaderListSize uint32 // zero means unknown (default) canonHeader map[string]string // http2-lower-case -> Go-Canonical-Case writingFrame bool // started writing a frame (on serve goroutine or separate) writingFrameAsync bool // started a frame on its own goroutine but haven't heard back on wroteFrameCh needsFrameFlush bool // last frame write wasn't a flush inGoAway bool // we've started to or sent GOAWAY inFrameScheduleLoop bool // whether we're in the scheduleFrameWrite loop needToSendGoAway bool // we need to schedule a GOAWAY frame write goAwayCode ErrCode shutdownTimerCh <-chan time.Time // nil until used shutdownTimer *time.Timer // nil until used idleTimer *time.Timer // nil if unused idleTimerCh <-chan time.Time // nil if unused // Owned by the writeFrameAsync goroutine: headerWriteBuf bytes.Buffer hpackEncoder *hpack.Encoder } func (sc *serverConn) maxHeaderListSize() uint32 { n := sc.hs.MaxHeaderBytes if n <= 0 { n = http.DefaultMaxHeaderBytes } // http2's count is in a slightly different unit and includes 32 bytes per pair. // So, take the net/http.Server value and pad it up a bit, assuming 10 headers. const perFieldOverhead = 32 // per http2 spec const typicalHeaders = 10 // conservative return uint32(n + typicalHeaders*perFieldOverhead) } func (sc *serverConn) curOpenStreams() uint32 { sc.serveG.check() return sc.curClientStreams + sc.curPushedStreams } // stream represents a stream. This is the minimal metadata needed by // the serve goroutine. Most of the actual stream state is owned by // the http.Handler's goroutine in the responseWriter. Because the // responseWriter's responseWriterState is recycled at the end of a // handler, this struct intentionally has no pointer to the // *responseWriter{,State} itself, as the Handler ending nils out the // responseWriter's state field. type stream struct { // immutable: sc *serverConn id uint32 body *pipe // non-nil if expecting DATA frames cw closeWaiter // closed wait stream transitions to closed state ctx contextContext cancelCtx func() // owned by serverConn's serve loop: bodyBytes int64 // body bytes seen so far declBodyBytes int64 // or -1 if undeclared flow flow // limits writing from Handler to client inflow flow // what the client is allowed to POST/etc to us parent *stream // or nil numTrailerValues int64 weight uint8 state streamState resetQueued bool // RST_STREAM queued for write; set by sc.resetStream gotTrailerHeader bool // HEADER frame for trailers was seen wroteHeaders bool // whether we wrote headers (not status 100) writeDeadline *time.Timer // nil if unused trailer http.Header // accumulated trailers reqTrailer http.Header // handler's Request.Trailer } func (sc *serverConn) Framer() *Framer { return sc.framer } func (sc *serverConn) CloseConn() error { return sc.conn.Close() } func (sc *serverConn) Flush() error { return sc.bw.Flush() } func (sc *serverConn) HeaderEncoder() (*hpack.Encoder, *bytes.Buffer) { return sc.hpackEncoder, &sc.headerWriteBuf } func (sc *serverConn) state(streamID uint32) (streamState, *stream) { sc.serveG.check() // http://tools.ietf.org/html/rfc7540#section-5.1 if st, ok := sc.streams[streamID]; ok { return st.state, st } // "The first use of a new stream identifier implicitly closes all // streams in the "idle" state that might have been initiated by // that peer with a lower-valued stream identifier. For example, if // a client sends a HEADERS frame on stream 7 without ever sending a // frame on stream 5, then stream 5 transitions to the "closed" // state when the first frame for stream 7 is sent or received." if streamID%2 == 1 { if streamID <= sc.maxClientStreamID { return stateClosed, nil } } else { if streamID <= sc.maxPushPromiseID { return stateClosed, nil } } return stateIdle, nil } // setConnState calls the net/http ConnState hook for this connection, if configured. // Note that the net/http package does StateNew and StateClosed for us. // There is currently no plan for StateHijacked or hijacking HTTP/2 connections. func (sc *serverConn) setConnState(state http.ConnState) { if sc.hs.ConnState != nil { sc.hs.ConnState(sc.conn, state) } } func (sc *serverConn) vlogf(format string, args ...interface{}) { if VerboseLogs { sc.logf(format, args...) } } func (sc *serverConn) logf(format string, args ...interface{}) { if lg := sc.hs.ErrorLog; lg != nil { lg.Printf(format, args...) } else { log.Printf(format, args...) } } // errno returns v's underlying uintptr, else 0. // // TODO: remove this helper function once http2 can use build // tags. See comment in isClosedConnError. func errno(v error) uintptr { if rv := reflect.ValueOf(v); rv.Kind() == reflect.Uintptr { return uintptr(rv.Uint()) } return 0 } // isClosedConnError reports whether err is an error from use of a closed // network connection. func isClosedConnError(err error) bool { if err == nil { return false } // TODO: remove this string search and be more like the Windows // case below. That might involve modifying the standard library // to return better error types. str := err.Error() if strings.Contains(str, "use of closed network connection") { return true } // TODO(bradfitz): x/tools/cmd/bundle doesn't really support // build tags, so I can't make an http2_windows.go file with // Windows-specific stuff. Fix that and move this, once we // have a way to bundle this into std's net/http somehow. if runtime.GOOS == "windows" { if oe, ok := err.(*net.OpError); ok && oe.Op == "read" { if se, ok := oe.Err.(*os.SyscallError); ok && se.Syscall == "wsarecv" { const WSAECONNABORTED = 10053 const WSAECONNRESET = 10054 if n := errno(se.Err); n == WSAECONNRESET || n == WSAECONNABORTED { return true } } } } return false } func (sc *serverConn) condlogf(err error, format string, args ...interface{}) { if err == nil { return } if err == io.EOF || err == io.ErrUnexpectedEOF || isClosedConnError(err) { // Boring, expected errors. sc.vlogf(format, args...) } else { sc.logf(format, args...) } } func (sc *serverConn) canonicalHeader(v string) string { sc.serveG.check() cv, ok := commonCanonHeader[v] if ok { return cv } cv, ok = sc.canonHeader[v] if ok { return cv } if sc.canonHeader == nil { sc.canonHeader = make(map[string]string) } cv = http.CanonicalHeaderKey(v) sc.canonHeader[v] = cv return cv } type readFrameResult struct { f Frame // valid until readMore is called err error // readMore should be called once the consumer no longer needs or // retains f. After readMore, f is invalid and more frames can be // read. readMore func() } // readFrames is the loop that reads incoming frames. // It takes care to only read one frame at a time, blocking until the // consumer is done with the frame. // It's run on its own goroutine. func (sc *serverConn) readFrames() { gate := make(gate) gateDone := gate.Done for { f, err := sc.framer.ReadFrame() select { case sc.readFrameCh <- readFrameResult{f, err, gateDone}: case <-sc.doneServing: return } select { case <-gate: case <-sc.doneServing: return } if terminalReadFrameError(err) { return } } } // frameWriteResult is the message passed from writeFrameAsync to the serve goroutine. type frameWriteResult struct { wr FrameWriteRequest // what was written (or attempted) err error // result of the writeFrame call } // writeFrameAsync runs in its own goroutine and writes a single frame // and then reports when it's done. // At most one goroutine can be running writeFrameAsync at a time per // serverConn. func (sc *serverConn) writeFrameAsync(wr FrameWriteRequest) { err := wr.write.writeFrame(sc) sc.wroteFrameCh <- frameWriteResult{wr, err} } func (sc *serverConn) closeAllStreamsOnConnClose() { sc.serveG.check() for _, st := range sc.streams { sc.closeStream(st, errClientDisconnected) } } func (sc *serverConn) stopShutdownTimer() { sc.serveG.check() if t := sc.shutdownTimer; t != nil { t.Stop() } } func (sc *serverConn) notePanic() { // Note: this is for serverConn.serve panicking, not http.Handler code. if testHookOnPanicMu != nil { testHookOnPanicMu.Lock() defer testHookOnPanicMu.Unlock() } if testHookOnPanic != nil { if e := recover(); e != nil { if testHookOnPanic(sc, e) { panic(e) } } } } func (sc *serverConn) serve() { sc.serveG.check() defer sc.notePanic() defer sc.conn.Close() defer sc.closeAllStreamsOnConnClose() defer sc.stopShutdownTimer() defer close(sc.doneServing) // unblocks handlers trying to send if VerboseLogs { sc.vlogf("http2: server connection from %v on %p", sc.conn.RemoteAddr(), sc.hs) } sc.writeFrame(FrameWriteRequest{ write: writeSettings{ {SettingMaxFrameSize, sc.srv.maxReadFrameSize()}, {SettingMaxConcurrentStreams, sc.advMaxStreams}, {SettingMaxHeaderListSize, sc.maxHeaderListSize()}, {SettingInitialWindowSize, uint32(sc.srv.initialStreamRecvWindowSize())}, }, }) sc.unackedSettings++ // Each connection starts with intialWindowSize inflow tokens. // If a higher value is configured, we add more tokens. if diff := sc.srv.initialConnRecvWindowSize() - initialWindowSize; diff > 0 { sc.sendWindowUpdate(nil, int(diff)) } if err := sc.readPreface(); err != nil { sc.condlogf(err, "http2: server: error reading preface from client %v: %v", sc.conn.RemoteAddr(), err) return } // Now that we've got the preface, get us out of the // "StateNew" state. We can't go directly to idle, though. // Active means we read some data and anticipate a request. We'll // do another Active when we get a HEADERS frame. sc.setConnState(http.StateActive) sc.setConnState(http.StateIdle) if sc.srv.IdleTimeout != 0 { sc.idleTimer = time.NewTimer(sc.srv.IdleTimeout) defer sc.idleTimer.Stop() sc.idleTimerCh = sc.idleTimer.C } var gracefulShutdownCh <-chan struct{} if sc.hs != nil { gracefulShutdownCh = h1ServerShutdownChan(sc.hs) } go sc.readFrames() // closed by defer sc.conn.Close above settingsTimer := time.NewTimer(firstSettingsTimeout) loopNum := 0 for { loopNum++ select { case wr := <-sc.wantWriteFrameCh: if se, ok := wr.write.(StreamError); ok { sc.resetStream(se) break } sc.writeFrame(wr) case spr := <-sc.wantStartPushCh: sc.startPush(spr) case res := <-sc.wroteFrameCh: sc.wroteFrame(res) case res := <-sc.readFrameCh: if !sc.processFrameFromReader(res) { return } res.readMore() if settingsTimer.C != nil { settingsTimer.Stop() settingsTimer.C = nil } case m := <-sc.bodyReadCh: sc.noteBodyRead(m.st, m.n) case <-settingsTimer.C: sc.logf("timeout waiting for SETTINGS frames from %v", sc.conn.RemoteAddr()) return case <-gracefulShutdownCh: gracefulShutdownCh = nil sc.startGracefulShutdown() case <-sc.shutdownTimerCh: sc.vlogf("GOAWAY close timer fired; closing conn from %v", sc.conn.RemoteAddr()) return case <-sc.idleTimerCh: sc.vlogf("connection is idle") sc.goAway(ErrCodeNo) case fn := <-sc.testHookCh: fn(loopNum) } if sc.inGoAway && sc.curOpenStreams() == 0 && !sc.needToSendGoAway && !sc.writingFrame { return } } } // readPreface reads the ClientPreface greeting from the peer // or returns an error on timeout or an invalid greeting. func (sc *serverConn) readPreface() error { errc := make(chan error, 1) go func() { // Read the client preface buf := make([]byte, len(ClientPreface)) if _, err := io.ReadFull(sc.conn, buf); err != nil { errc <- err } else if !bytes.Equal(buf, clientPreface) { errc <- fmt.Errorf("bogus greeting %q", buf) } else { errc <- nil } }() timer := time.NewTimer(prefaceTimeout) // TODO: configurable on *Server? defer timer.Stop() select { case <-timer.C: return errors.New("timeout waiting for client preface") case err := <-errc: if err == nil { if VerboseLogs { sc.vlogf("http2: server: client %v said hello", sc.conn.RemoteAddr()) } } return err } } var errChanPool = sync.Pool{ New: func() interface{} { return make(chan error, 1) }, } var writeDataPool = sync.Pool{ New: func() interface{} { return new(writeData) }, } // writeDataFromHandler writes DATA response frames from a handler on // the given stream. func (sc *serverConn) writeDataFromHandler(stream *stream, data []byte, endStream bool) error { ch := errChanPool.Get().(chan error) writeArg := writeDataPool.Get().(*writeData) *writeArg = writeData{stream.id, data, endStream} err := sc.writeFrameFromHandler(FrameWriteRequest{ write: writeArg, stream: stream, done: ch, }) if err != nil { return err } var frameWriteDone bool // the frame write is done (successfully or not) select { case err = <-ch: frameWriteDone = true case <-sc.doneServing: return errClientDisconnected case <-stream.cw: // If both ch and stream.cw were ready (as might // happen on the final Write after an http.Handler // ends), prefer the write result. Otherwise this // might just be us successfully closing the stream. // The writeFrameAsync and serve goroutines guarantee // that the ch send will happen before the stream.cw // close. select { case err = <-ch: frameWriteDone = true default: return errStreamClosed } } errChanPool.Put(ch) if frameWriteDone { writeDataPool.Put(writeArg) } return err } // writeFrameFromHandler sends wr to sc.wantWriteFrameCh, but aborts // if the connection has gone away. // // This must not be run from the serve goroutine itself, else it might // deadlock writing to sc.wantWriteFrameCh (which is only mildly // buffered and is read by serve itself). If you're on the serve // goroutine, call writeFrame instead. func (sc *serverConn) writeFrameFromHandler(wr FrameWriteRequest) error { sc.serveG.checkNotOn() // NOT select { case sc.wantWriteFrameCh <- wr: return nil case <-sc.doneServing: // Serve loop is gone. // Client has closed their connection to the server. return errClientDisconnected } } // writeFrame schedules a frame to write and sends it if there's nothing // already being written. // // There is no pushback here (the serve goroutine never blocks). It's // the http.Handlers that block, waiting for their previous frames to // make it onto the wire // // If you're not on the serve goroutine, use writeFrameFromHandler instead. func (sc *serverConn) writeFrame(wr FrameWriteRequest) { sc.serveG.check() // If true, wr will not be written and wr.done will not be signaled. var ignoreWrite bool // We are not allowed to write frames on closed streams. RFC 7540 Section // 5.1.1 says: "An endpoint MUST NOT send frames other than PRIORITY on // a closed stream." Our server never sends PRIORITY, so that exception // does not apply. // // The serverConn might close an open stream while the stream's handler // is still running. For example, the server might close a stream when it // receives bad data from the client. If this happens, the handler might // attempt to write a frame after the stream has been closed (since the // handler hasn't yet been notified of the close). In this case, we simply // ignore the frame. The handler will notice that the stream is closed when // it waits for the frame to be written. // // As an exception to this rule, we allow sending RST_STREAM after close. // This allows us to immediately reject new streams without tracking any // state for those streams (except for the queued RST_STREAM frame). This // may result in duplicate RST_STREAMs in some cases, but the client should // ignore those. if wr.StreamID() != 0 { _, isReset := wr.write.(StreamError) if state, _ := sc.state(wr.StreamID()); state == stateClosed && !isReset { ignoreWrite = true } } // Don't send a 100-continue response if we've already sent headers. // See golang.org/issue/14030. switch wr.write.(type) { case *writeResHeaders: wr.stream.wroteHeaders = true case write100ContinueHeadersFrame: if wr.stream.wroteHeaders { // We do not need to notify wr.done because this frame is // never written with wr.done != nil. if wr.done != nil { panic("wr.done != nil for write100ContinueHeadersFrame") } ignoreWrite = true } } if !ignoreWrite { sc.writeSched.Push(wr) } sc.scheduleFrameWrite() } // startFrameWrite starts a goroutine to write wr (in a separate // goroutine since that might block on the network), and updates the // serve goroutine's state about the world, updated from info in wr. func (sc *serverConn) startFrameWrite(wr FrameWriteRequest) { sc.serveG.check() if sc.writingFrame { panic("internal error: can only be writing one frame at a time") } st := wr.stream if st != nil { switch st.state { case stateHalfClosedLocal: switch wr.write.(type) { case StreamError, handlerPanicRST, writeWindowUpdate: // RFC 7540 Section 5.1 allows sending RST_STREAM, PRIORITY, and WINDOW_UPDATE // in this state. (We never send PRIORITY from the server, so that is not checked.) default: panic(fmt.Sprintf("internal error: attempt to send frame on a half-closed-local stream: %v", wr)) } case stateClosed: panic(fmt.Sprintf("internal error: attempt to send frame on a closed stream: %v", wr)) } } if wpp, ok := wr.write.(*writePushPromise); ok { var err error wpp.promisedID, err = wpp.allocatePromisedID() if err != nil { sc.writingFrameAsync = false wr.replyToWriter(err) return } } sc.writingFrame = true sc.needsFrameFlush = true if wr.write.staysWithinBuffer(sc.bw.Available()) { sc.writingFrameAsync = false err := wr.write.writeFrame(sc) sc.wroteFrame(frameWriteResult{wr, err}) } else { sc.writingFrameAsync = true go sc.writeFrameAsync(wr) } } // errHandlerPanicked is the error given to any callers blocked in a read from // Request.Body when the main goroutine panics. Since most handlers read in the // the main ServeHTTP goroutine, this will show up rarely. var errHandlerPanicked = errors.New("http2: handler panicked") // wroteFrame is called on the serve goroutine with the result of // whatever happened on writeFrameAsync. func (sc *serverConn) wroteFrame(res frameWriteResult) { sc.serveG.check() if !sc.writingFrame { panic("internal error: expected to be already writing a frame") } sc.writingFrame = false sc.writingFrameAsync = false wr := res.wr if writeEndsStream(wr.write) { st := wr.stream if st == nil { panic("internal error: expecting non-nil stream") } switch st.state { case stateOpen: // Here we would go to stateHalfClosedLocal in // theory, but since our handler is done and // the net/http package provides no mechanism // for closing a ResponseWriter while still // reading data (see possible TODO at top of // this file), we go into closed state here // anyway, after telling the peer we're // hanging up on them. We'll transition to // stateClosed after the RST_STREAM frame is // written. st.state = stateHalfClosedLocal sc.resetStream(streamError(st.id, ErrCodeCancel)) case stateHalfClosedRemote: sc.closeStream(st, errHandlerComplete) } } else { switch v := wr.write.(type) { case StreamError: // st may be unknown if the RST_STREAM was generated to reject bad input. if st, ok := sc.streams[v.StreamID]; ok { sc.closeStream(st, v) } case handlerPanicRST: sc.closeStream(wr.stream, errHandlerPanicked) } } // Reply (if requested) to unblock the ServeHTTP goroutine. wr.replyToWriter(res.err) sc.scheduleFrameWrite() } // scheduleFrameWrite tickles the frame writing scheduler. // // If a frame is already being written, nothing happens. This will be called again // when the frame is done being written. // // If a frame isn't being written we need to send one, the best frame // to send is selected, preferring first things that aren't // stream-specific (e.g. ACKing settings), and then finding the // highest priority stream. // // If a frame isn't being written and there's nothing else to send, we // flush the write buffer. func (sc *serverConn) scheduleFrameWrite() { sc.serveG.check() if sc.writingFrame || sc.inFrameScheduleLoop { return } sc.inFrameScheduleLoop = true for !sc.writingFrameAsync { if sc.needToSendGoAway { sc.needToSendGoAway = false sc.startFrameWrite(FrameWriteRequest{ write: &writeGoAway{ maxStreamID: sc.maxClientStreamID, code: sc.goAwayCode, }, }) continue } if sc.needToSendSettingsAck { sc.needToSendSettingsAck = false sc.startFrameWrite(FrameWriteRequest{write: writeSettingsAck{}}) continue } if !sc.inGoAway || sc.goAwayCode == ErrCodeNo { if wr, ok := sc.writeSched.Pop(); ok { sc.startFrameWrite(wr) continue } } if sc.needsFrameFlush { sc.startFrameWrite(FrameWriteRequest{write: flushFrameWriter{}}) sc.needsFrameFlush = false // after startFrameWrite, since it sets this true continue } break } sc.inFrameScheduleLoop = false } // startGracefulShutdown sends a GOAWAY with ErrCodeNo to tell the // client we're gracefully shutting down. The connection isn't closed // until all current streams are done. func (sc *serverConn) startGracefulShutdown() { sc.goAwayIn(ErrCodeNo, 0) } func (sc *serverConn) goAway(code ErrCode) { sc.serveG.check() var forceCloseIn time.Duration if code != ErrCodeNo { forceCloseIn = 250 * time.Millisecond } else { // TODO: configurable forceCloseIn = 1 * time.Second } sc.goAwayIn(code, forceCloseIn) } func (sc *serverConn) goAwayIn(code ErrCode, forceCloseIn time.Duration) { sc.serveG.check() if sc.inGoAway { return } if forceCloseIn != 0 { sc.shutDownIn(forceCloseIn) } sc.inGoAway = true sc.needToSendGoAway = true sc.goAwayCode = code sc.scheduleFrameWrite() } func (sc *serverConn) shutDownIn(d time.Duration) { sc.serveG.check() sc.shutdownTimer = time.NewTimer(d) sc.shutdownTimerCh = sc.shutdownTimer.C } func (sc *serverConn) resetStream(se StreamError) { sc.serveG.check() sc.writeFrame(FrameWriteRequest{write: se}) if st, ok := sc.streams[se.StreamID]; ok { st.resetQueued = true } } // processFrameFromReader processes the serve loop's read from readFrameCh from the // frame-reading goroutine. // processFrameFromReader returns whether the connection should be kept open. func (sc *serverConn) processFrameFromReader(res readFrameResult) bool { sc.serveG.check() err := res.err if err != nil { if err == ErrFrameTooLarge { sc.goAway(ErrCodeFrameSize) return true // goAway will close the loop } clientGone := err == io.EOF || err == io.ErrUnexpectedEOF || isClosedConnError(err) if clientGone { // TODO: could we also get into this state if // the peer does a half close // (e.g. CloseWrite) because they're done // sending frames but they're still wanting // our open replies? Investigate. // TODO: add CloseWrite to crypto/tls.Conn first // so we have a way to test this? I suppose // just for testing we could have a non-TLS mode. return false } } else { f := res.f if VerboseLogs { sc.vlogf("http2: server read frame %v", summarizeFrame(f)) } err = sc.processFrame(f) if err == nil { return true } } switch ev := err.(type) { case StreamError: sc.resetStream(ev) return true case goAwayFlowError: sc.goAway(ErrCodeFlowControl) return true case ConnectionError: sc.logf("http2: server connection error from %v: %v", sc.conn.RemoteAddr(), ev) sc.goAway(ErrCode(ev)) return true // goAway will handle shutdown default: if res.err != nil { sc.vlogf("http2: server closing client connection; error reading frame from client %s: %v", sc.conn.RemoteAddr(), err) } else { sc.logf("http2: server closing client connection: %v", err) } return false } } func (sc *serverConn) processFrame(f Frame) error { sc.serveG.check() // First frame received must be SETTINGS. if !sc.sawFirstSettings { if _, ok := f.(*SettingsFrame); !ok { return ConnectionError(ErrCodeProtocol) } sc.sawFirstSettings = true } switch f := f.(type) { case *SettingsFrame: return sc.processSettings(f) case *MetaHeadersFrame: return sc.processHeaders(f) case *WindowUpdateFrame: return sc.processWindowUpdate(f) case *PingFrame: return sc.processPing(f) case *DataFrame: return sc.processData(f) case *RSTStreamFrame: return sc.processResetStream(f) case *PriorityFrame: return sc.processPriority(f) case *GoAwayFrame: return sc.processGoAway(f) case *PushPromiseFrame: // A client cannot push. Thus, servers MUST treat the receipt of a PUSH_PROMISE // frame as a connection error (Section 5.4.1) of type PROTOCOL_ERROR. return ConnectionError(ErrCodeProtocol) default: sc.vlogf("http2: server ignoring frame: %v", f.Header()) return nil } } func (sc *serverConn) processPing(f *PingFrame) error { sc.serveG.check() if f.IsAck() { // 6.7 PING: " An endpoint MUST NOT respond to PING frames // containing this flag." return nil } if f.StreamID != 0 { // "PING frames are not associated with any individual // stream. If a PING frame is received with a stream // identifier field value other than 0x0, the recipient MUST // respond with a connection error (Section 5.4.1) of type // PROTOCOL_ERROR." return ConnectionError(ErrCodeProtocol) } if sc.inGoAway && sc.goAwayCode != ErrCodeNo { return nil } sc.writeFrame(FrameWriteRequest{write: writePingAck{f}}) return nil } func (sc *serverConn) processWindowUpdate(f *WindowUpdateFrame) error { sc.serveG.check() switch { case f.StreamID != 0: // stream-level flow control state, st := sc.state(f.StreamID) if state == stateIdle { // Section 5.1: "Receiving any frame other than HEADERS // or PRIORITY on a stream in this state MUST be // treated as a connection error (Section 5.4.1) of // type PROTOCOL_ERROR." return ConnectionError(ErrCodeProtocol) } if st == nil { // "WINDOW_UPDATE can be sent by a peer that has sent a // frame bearing the END_STREAM flag. This means that a // receiver could receive a WINDOW_UPDATE frame on a "half // closed (remote)" or "closed" stream. A receiver MUST // NOT treat this as an error, see Section 5.1." return nil } if !st.flow.add(int32(f.Increment)) { return streamError(f.StreamID, ErrCodeFlowControl) } default: // connection-level flow control if !sc.flow.add(int32(f.Increment)) { return goAwayFlowError{} } } sc.scheduleFrameWrite() return nil } func (sc *serverConn) processResetStream(f *RSTStreamFrame) error { sc.serveG.check() state, st := sc.state(f.StreamID) if state == stateIdle { // 6.4 "RST_STREAM frames MUST NOT be sent for a // stream in the "idle" state. If a RST_STREAM frame // identifying an idle stream is received, the // recipient MUST treat this as a connection error // (Section 5.4.1) of type PROTOCOL_ERROR. return ConnectionError(ErrCodeProtocol) } if st != nil { st.cancelCtx() sc.closeStream(st, streamError(f.StreamID, f.ErrCode)) } return nil } func (sc *serverConn) closeStream(st *stream, err error) { sc.serveG.check() if st.state == stateIdle || st.state == stateClosed { panic(fmt.Sprintf("invariant; can't close stream in state %v", st.state)) } st.state = stateClosed if st.writeDeadline != nil { st.writeDeadline.Stop() } if st.isPushed() { sc.curPushedStreams-- } else { sc.curClientStreams-- } delete(sc.streams, st.id) if len(sc.streams) == 0 { sc.setConnState(http.StateIdle) if sc.srv.IdleTimeout != 0 { sc.idleTimer.Reset(sc.srv.IdleTimeout) } if h1ServerKeepAlivesDisabled(sc.hs) { sc.startGracefulShutdown() } } if p := st.body; p != nil { // Return any buffered unread bytes worth of conn-level flow control. // See golang.org/issue/16481 sc.sendWindowUpdate(nil, p.Len()) p.CloseWithError(err) } st.cw.Close() // signals Handler's CloseNotifier, unblocks writes, etc sc.writeSched.CloseStream(st.id) } func (sc *serverConn) processSettings(f *SettingsFrame) error { sc.serveG.check() if f.IsAck() { sc.unackedSettings-- if sc.unackedSettings < 0 { // Why is the peer ACKing settings we never sent? // The spec doesn't mention this case, but // hang up on them anyway. return ConnectionError(ErrCodeProtocol) } return nil } if err := f.ForeachSetting(sc.processSetting); err != nil { return err } sc.needToSendSettingsAck = true sc.scheduleFrameWrite() return nil } func (sc *serverConn) processSetting(s Setting) error { sc.serveG.check() if err := s.Valid(); err != nil { return err } if VerboseLogs { sc.vlogf("http2: server processing setting %v", s) } switch s.ID { case SettingHeaderTableSize: sc.headerTableSize = s.Val sc.hpackEncoder.SetMaxDynamicTableSize(s.Val) case SettingEnablePush: sc.pushEnabled = s.Val != 0 case SettingMaxConcurrentStreams: sc.clientMaxStreams = s.Val case SettingInitialWindowSize: return sc.processSettingInitialWindowSize(s.Val) case SettingMaxFrameSize: sc.maxFrameSize = int32(s.Val) // the maximum valid s.Val is < 2^31 case SettingMaxHeaderListSize: sc.peerMaxHeaderListSize = s.Val default: // Unknown setting: "An endpoint that receives a SETTINGS // frame with any unknown or unsupported identifier MUST // ignore that setting." if VerboseLogs { sc.vlogf("http2: server ignoring unknown setting %v", s) } } return nil } func (sc *serverConn) processSettingInitialWindowSize(val uint32) error { sc.serveG.check() // Note: val already validated to be within range by // processSetting's Valid call. // "A SETTINGS frame can alter the initial flow control window // size for all current streams. When the value of // SETTINGS_INITIAL_WINDOW_SIZE changes, a receiver MUST // adjust the size of all stream flow control windows that it // maintains by the difference between the new value and the // old value." old := sc.initialStreamSendWindowSize sc.initialStreamSendWindowSize = int32(val) growth := int32(val) - old // may be negative for _, st := range sc.streams { if !st.flow.add(growth) { // 6.9.2 Initial Flow Control Window Size // "An endpoint MUST treat a change to // SETTINGS_INITIAL_WINDOW_SIZE that causes any flow // control window to exceed the maximum size as a // connection error (Section 5.4.1) of type // FLOW_CONTROL_ERROR." return ConnectionError(ErrCodeFlowControl) } } return nil } func (sc *serverConn) processData(f *DataFrame) error { sc.serveG.check() if sc.inGoAway && sc.goAwayCode != ErrCodeNo { return nil } data := f.Data() // "If a DATA frame is received whose stream is not in "open" // or "half closed (local)" state, the recipient MUST respond // with a stream error (Section 5.4.2) of type STREAM_CLOSED." id := f.Header().StreamID state, st := sc.state(id) if id == 0 || state == stateIdle { // Section 5.1: "Receiving any frame other than HEADERS // or PRIORITY on a stream in this state MUST be // treated as a connection error (Section 5.4.1) of // type PROTOCOL_ERROR." return ConnectionError(ErrCodeProtocol) } if st == nil || state != stateOpen || st.gotTrailerHeader || st.resetQueued { // This includes sending a RST_STREAM if the stream is // in stateHalfClosedLocal (which currently means that // the http.Handler returned, so it's done reading & // done writing). Try to stop the client from sending // more DATA. // But still enforce their connection-level flow control, // and return any flow control bytes since we're not going // to consume them. if sc.inflow.available() < int32(f.Length) { return streamError(id, ErrCodeFlowControl) } // Deduct the flow control from inflow, since we're // going to immediately add it back in // sendWindowUpdate, which also schedules sending the // frames. sc.inflow.take(int32(f.Length)) sc.sendWindowUpdate(nil, int(f.Length)) // conn-level if st != nil && st.resetQueued { // Already have a stream error in flight. Don't send another. return nil } return streamError(id, ErrCodeStreamClosed) } if st.body == nil { panic("internal error: should have a body in this state") } // Sender sending more than they'd declared? if st.declBodyBytes != -1 && st.bodyBytes+int64(len(data)) > st.declBodyBytes { st.body.CloseWithError(fmt.Errorf("sender tried to send more than declared Content-Length of %d bytes", st.declBodyBytes)) return streamError(id, ErrCodeStreamClosed) } if f.Length > 0 { // Check whether the client has flow control quota. if st.inflow.available() < int32(f.Length) { return streamError(id, ErrCodeFlowControl) } st.inflow.take(int32(f.Length)) if len(data) > 0 { wrote, err := st.body.Write(data) if err != nil { return streamError(id, ErrCodeStreamClosed) } if wrote != len(data) { panic("internal error: bad Writer") } st.bodyBytes += int64(len(data)) } // Return any padded flow control now, since we won't // refund it later on body reads. if pad := int32(f.Length) - int32(len(data)); pad > 0 { sc.sendWindowUpdate32(nil, pad) sc.sendWindowUpdate32(st, pad) } } if f.StreamEnded() { st.endStream() } return nil } func (sc *serverConn) processGoAway(f *GoAwayFrame) error { sc.serveG.check() if f.ErrCode != ErrCodeNo { sc.logf("http2: received GOAWAY %+v, starting graceful shutdown", f) } else { sc.vlogf("http2: received GOAWAY %+v, starting graceful shutdown", f) } sc.startGracefulShutdown() // http://tools.ietf.org/html/rfc7540#section-6.8 // We should not create any new streams, which means we should disable push. sc.pushEnabled = false return nil } // isPushed reports whether the stream is server-initiated. func (st *stream) isPushed() bool { return st.id%2 == 0 } // endStream closes a Request.Body's pipe. It is called when a DATA // frame says a request body is over (or after trailers). func (st *stream) endStream() { sc := st.sc sc.serveG.check() if st.declBodyBytes != -1 && st.declBodyBytes != st.bodyBytes { st.body.CloseWithError(fmt.Errorf("request declared a Content-Length of %d but only wrote %d bytes", st.declBodyBytes, st.bodyBytes)) } else { st.body.closeWithErrorAndCode(io.EOF, st.copyTrailersToHandlerRequest) st.body.CloseWithError(io.EOF) } st.state = stateHalfClosedRemote } // copyTrailersToHandlerRequest is run in the Handler's goroutine in // its Request.Body.Read just before it gets io.EOF. func (st *stream) copyTrailersToHandlerRequest() { for k, vv := range st.trailer { if _, ok := st.reqTrailer[k]; ok { // Only copy it over it was pre-declared. st.reqTrailer[k] = vv } } } // onWriteTimeout is run on its own goroutine (from time.AfterFunc) // when the stream's WriteTimeout has fired. func (st *stream) onWriteTimeout() { st.sc.writeFrameFromHandler(FrameWriteRequest{write: streamError(st.id, ErrCodeInternal)}) } func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error { sc.serveG.check() id := f.StreamID if sc.inGoAway { // Ignore. return nil } // http://tools.ietf.org/html/rfc7540#section-5.1.1 // Streams initiated by a client MUST use odd-numbered stream // identifiers. [...] An endpoint that receives an unexpected // stream identifier MUST respond with a connection error // (Section 5.4.1) of type PROTOCOL_ERROR. if id%2 != 1 { return ConnectionError(ErrCodeProtocol) } // A HEADERS frame can be used to create a new stream or // send a trailer for an open one. If we already have a stream // open, let it process its own HEADERS frame (trailers at this // point, if it's valid). if st := sc.streams[f.StreamID]; st != nil { if st.resetQueued { // We're sending RST_STREAM to close the stream, so don't bother // processing this frame. return nil } return st.processTrailerHeaders(f) } // [...] The identifier of a newly established stream MUST be // numerically greater than all streams that the initiating // endpoint has opened or reserved. [...] An endpoint that // receives an unexpected stream identifier MUST respond with // a connection error (Section 5.4.1) of type PROTOCOL_ERROR. if id <= sc.maxClientStreamID { return ConnectionError(ErrCodeProtocol) } sc.maxClientStreamID = id if sc.idleTimer != nil { sc.idleTimer.Stop() } // http://tools.ietf.org/html/rfc7540#section-5.1.2 // [...] Endpoints MUST NOT exceed the limit set by their peer. An // endpoint that receives a HEADERS frame that causes their // advertised concurrent stream limit to be exceeded MUST treat // this as a stream error (Section 5.4.2) of type PROTOCOL_ERROR // or REFUSED_STREAM. if sc.curClientStreams+1 > sc.advMaxStreams { if sc.unackedSettings == 0 { // They should know better. return streamError(id, ErrCodeProtocol) } // Assume it's a network race, where they just haven't // received our last SETTINGS update. But actually // this can't happen yet, because we don't yet provide // a way for users to adjust server parameters at // runtime. return streamError(id, ErrCodeRefusedStream) } initialState := stateOpen if f.StreamEnded() { initialState = stateHalfClosedRemote } st := sc.newStream(id, 0, initialState) if f.HasPriority() { if err := checkPriority(f.StreamID, f.Priority); err != nil { return err } sc.writeSched.AdjustStream(st.id, f.Priority) } rw, req, err := sc.newWriterAndRequest(st, f) if err != nil { return err } st.reqTrailer = req.Trailer if st.reqTrailer != nil { st.trailer = make(http.Header) } st.body = req.Body.(*requestBody).pipe // may be nil st.declBodyBytes = req.ContentLength handler := sc.handler.ServeHTTP if f.Truncated { // Their header list was too long. Send a 431 error. handler = handleHeaderListTooLong } else if err := checkValidHTTP2RequestHeaders(req.Header); err != nil { handler = new400Handler(err) } // The net/http package sets the read deadline from the // http.Server.ReadTimeout during the TLS handshake, but then // passes the connection off to us with the deadline already // set. Disarm it here after the request headers are read, // similar to how the http1 server works. Here it's // technically more like the http1 Server's ReadHeaderTimeout // (in Go 1.8), though. That's a more sane option anyway. if sc.hs.ReadTimeout != 0 { sc.conn.SetReadDeadline(time.Time{}) } go sc.runHandler(rw, req, handler) return nil } func (st *stream) processTrailerHeaders(f *MetaHeadersFrame) error { sc := st.sc sc.serveG.check() if st.gotTrailerHeader { return ConnectionError(ErrCodeProtocol) } st.gotTrailerHeader = true if !f.StreamEnded() { return streamError(st.id, ErrCodeProtocol) } if len(f.PseudoFields()) > 0 { return streamError(st.id, ErrCodeProtocol) } if st.trailer != nil { for _, hf := range f.RegularFields() { key := sc.canonicalHeader(hf.Name) if !ValidTrailerHeader(key) { // TODO: send more details to the peer somehow. But http2 has // no way to send debug data at a stream level. Discuss with // HTTP folk. return streamError(st.id, ErrCodeProtocol) } st.trailer[key] = append(st.trailer[key], hf.Value) } } st.endStream() return nil } func checkPriority(streamID uint32, p PriorityParam) error { if streamID == p.StreamDep { // Section 5.3.1: "A stream cannot depend on itself. An endpoint MUST treat // this as a stream error (Section 5.4.2) of type PROTOCOL_ERROR." // Section 5.3.3 says that a stream can depend on one of its dependencies, // so it's only self-dependencies that are forbidden. return streamError(streamID, ErrCodeProtocol) } return nil } func (sc *serverConn) processPriority(f *PriorityFrame) error { if sc.inGoAway { return nil } if err := checkPriority(f.StreamID, f.PriorityParam); err != nil { return err } sc.writeSched.AdjustStream(f.StreamID, f.PriorityParam) return nil } func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream { sc.serveG.check() if id == 0 { panic("internal error: cannot create stream with id 0") } ctx, cancelCtx := contextWithCancel(sc.baseCtx) st := &stream{ sc: sc, id: id, state: state, ctx: ctx, cancelCtx: cancelCtx, } st.cw.Init() st.flow.conn = &sc.flow // link to conn-level counter st.flow.add(sc.initialStreamSendWindowSize) st.inflow.conn = &sc.inflow // link to conn-level counter st.inflow.add(sc.srv.initialStreamRecvWindowSize()) if sc.hs.WriteTimeout != 0 { st.writeDeadline = time.AfterFunc(sc.hs.WriteTimeout, st.onWriteTimeout) } sc.streams[id] = st sc.writeSched.OpenStream(st.id, OpenStreamOptions{PusherID: pusherID}) if st.isPushed() { sc.curPushedStreams++ } else { sc.curClientStreams++ } if sc.curOpenStreams() == 1 { sc.setConnState(http.StateActive) } return st } func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*responseWriter, *http.Request, error) { sc.serveG.check() rp := requestParam{ method: f.PseudoValue("method"), scheme: f.PseudoValue("scheme"), authority: f.PseudoValue("authority"), path: f.PseudoValue("path"), } isConnect := rp.method == "CONNECT" if isConnect { if rp.path != "" || rp.scheme != "" || rp.authority == "" { return nil, nil, streamError(f.StreamID, ErrCodeProtocol) } } else if rp.method == "" || rp.path == "" || (rp.scheme != "https" && rp.scheme != "http") { // See 8.1.2.6 Malformed Requests and Responses: // // Malformed requests or responses that are detected // MUST be treated as a stream error (Section 5.4.2) // of type PROTOCOL_ERROR." // // 8.1.2.3 Request Pseudo-Header Fields // "All HTTP/2 requests MUST include exactly one valid // value for the :method, :scheme, and :path // pseudo-header fields" return nil, nil, streamError(f.StreamID, ErrCodeProtocol) } bodyOpen := !f.StreamEnded() if rp.method == "HEAD" && bodyOpen { // HEAD requests can't have bodies return nil, nil, streamError(f.StreamID, ErrCodeProtocol) } rp.header = make(http.Header) for _, hf := range f.RegularFields() { rp.header.Add(sc.canonicalHeader(hf.Name), hf.Value) } if rp.authority == "" { rp.authority = rp.header.Get("Host") } rw, req, err := sc.newWriterAndRequestNoBody(st, rp) if err != nil { return nil, nil, err } if bodyOpen { if vv, ok := rp.header["Content-Length"]; ok { req.ContentLength, _ = strconv.ParseInt(vv[0], 10, 64) } else { req.ContentLength = -1 } req.Body.(*requestBody).pipe = &pipe{ b: &dataBuffer{expected: req.ContentLength}, } } return rw, req, nil } type requestParam struct { method string scheme, authority, path string header http.Header } func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp requestParam) (*responseWriter, *http.Request, error) { sc.serveG.check() var tlsState *tls.ConnectionState // nil if not scheme https if rp.scheme == "https" { tlsState = sc.tlsState } needsContinue := rp.header.Get("Expect") == "100-continue" if needsContinue { rp.header.Del("Expect") } // Merge Cookie headers into one "; "-delimited value. if cookies := rp.header["Cookie"]; len(cookies) > 1 { rp.header.Set("Cookie", strings.Join(cookies, "; ")) } // Setup Trailers var trailer http.Header for _, v := range rp.header["Trailer"] { for _, key := range strings.Split(v, ",") { key = http.CanonicalHeaderKey(strings.TrimSpace(key)) switch key { case "Transfer-Encoding", "Trailer", "Content-Length": // Bogus. (copy of http1 rules) // Ignore. default: if trailer == nil { trailer = make(http.Header) } trailer[key] = nil } } } delete(rp.header, "Trailer") var url_ *url.URL var requestURI string if rp.method == "CONNECT" { url_ = &url.URL{Host: rp.authority} requestURI = rp.authority // mimic HTTP/1 server behavior } else { var err error url_, err = url.ParseRequestURI(rp.path) if err != nil { return nil, nil, streamError(st.id, ErrCodeProtocol) } requestURI = rp.path } body := &requestBody{ conn: sc, stream: st, needsContinue: needsContinue, } req := &http.Request{ Method: rp.method, URL: url_, RemoteAddr: sc.remoteAddrStr, Header: rp.header, RequestURI: requestURI, Proto: "HTTP/2.0", ProtoMajor: 2, ProtoMinor: 0, TLS: tlsState, Host: rp.authority, Body: body, Trailer: trailer, } req = requestWithContext(req, st.ctx) rws := responseWriterStatePool.Get().(*responseWriterState) bwSave := rws.bw *rws = responseWriterState{} // zero all the fields rws.conn = sc rws.bw = bwSave rws.bw.Reset(chunkWriter{rws}) rws.stream = st rws.req = req rws.body = body rw := &responseWriter{rws: rws} return rw, req, nil } // Run on its own goroutine. func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) { didPanic := true defer func() { rw.rws.stream.cancelCtx() if didPanic { e := recover() sc.writeFrameFromHandler(FrameWriteRequest{ write: handlerPanicRST{rw.rws.stream.id}, stream: rw.rws.stream, }) // Same as net/http: if shouldLogPanic(e) { const size = 64 << 10 buf := make([]byte, size) buf = buf[:runtime.Stack(buf, false)] sc.logf("http2: panic serving %v: %v\n%s", sc.conn.RemoteAddr(), e, buf) } return } rw.handlerDone() }() handler(rw, req) didPanic = false } func handleHeaderListTooLong(w http.ResponseWriter, r *http.Request) { // 10.5.1 Limits on Header Block Size: // .. "A server that receives a larger header block than it is // willing to handle can send an HTTP 431 (Request Header Fields Too // Large) status code" const statusRequestHeaderFieldsTooLarge = 431 // only in Go 1.6+ w.WriteHeader(statusRequestHeaderFieldsTooLarge) io.WriteString(w, "

HTTP Error 431

Request Header Field(s) Too Large

") } // called from handler goroutines. // h may be nil. func (sc *serverConn) writeHeaders(st *stream, headerData *writeResHeaders) error { sc.serveG.checkNotOn() // NOT on var errc chan error if headerData.h != nil { // If there's a header map (which we don't own), so we have to block on // waiting for this frame to be written, so an http.Flush mid-handler // writes out the correct value of keys, before a handler later potentially // mutates it. errc = errChanPool.Get().(chan error) } if err := sc.writeFrameFromHandler(FrameWriteRequest{ write: headerData, stream: st, done: errc, }); err != nil { return err } if errc != nil { select { case err := <-errc: errChanPool.Put(errc) return err case <-sc.doneServing: return errClientDisconnected case <-st.cw: return errStreamClosed } } return nil } // called from handler goroutines. func (sc *serverConn) write100ContinueHeaders(st *stream) { sc.writeFrameFromHandler(FrameWriteRequest{ write: write100ContinueHeadersFrame{st.id}, stream: st, }) } // A bodyReadMsg tells the server loop that the http.Handler read n // bytes of the DATA from the client on the given stream. type bodyReadMsg struct { st *stream n int } // called from handler goroutines. // Notes that the handler for the given stream ID read n bytes of its body // and schedules flow control tokens to be sent. func (sc *serverConn) noteBodyReadFromHandler(st *stream, n int, err error) { sc.serveG.checkNotOn() // NOT on if n > 0 { select { case sc.bodyReadCh <- bodyReadMsg{st, n}: case <-sc.doneServing: } } } func (sc *serverConn) noteBodyRead(st *stream, n int) { sc.serveG.check() sc.sendWindowUpdate(nil, n) // conn-level if st.state != stateHalfClosedRemote && st.state != stateClosed { // Don't send this WINDOW_UPDATE if the stream is closed // remotely. sc.sendWindowUpdate(st, n) } } // st may be nil for conn-level func (sc *serverConn) sendWindowUpdate(st *stream, n int) { sc.serveG.check() // "The legal range for the increment to the flow control // window is 1 to 2^31-1 (2,147,483,647) octets." // A Go Read call on 64-bit machines could in theory read // a larger Read than this. Very unlikely, but we handle it here // rather than elsewhere for now. const maxUint31 = 1<<31 - 1 for n >= maxUint31 { sc.sendWindowUpdate32(st, maxUint31) n -= maxUint31 } sc.sendWindowUpdate32(st, int32(n)) } // st may be nil for conn-level func (sc *serverConn) sendWindowUpdate32(st *stream, n int32) { sc.serveG.check() if n == 0 { return } if n < 0 { panic("negative update") } var streamID uint32 if st != nil { streamID = st.id } sc.writeFrame(FrameWriteRequest{ write: writeWindowUpdate{streamID: streamID, n: uint32(n)}, stream: st, }) var ok bool if st == nil { ok = sc.inflow.add(n) } else { ok = st.inflow.add(n) } if !ok { panic("internal error; sent too many window updates without decrements?") } } // requestBody is the Handler's Request.Body type. // Read and Close may be called concurrently. type requestBody struct { stream *stream conn *serverConn closed bool // for use by Close only sawEOF bool // for use by Read only pipe *pipe // non-nil if we have a HTTP entity message body needsContinue bool // need to send a 100-continue } func (b *requestBody) Close() error { if b.pipe != nil && !b.closed { b.pipe.BreakWithError(errClosedBody) } b.closed = true return nil } func (b *requestBody) Read(p []byte) (n int, err error) { if b.needsContinue { b.needsContinue = false b.conn.write100ContinueHeaders(b.stream) } if b.pipe == nil || b.sawEOF { return 0, io.EOF } n, err = b.pipe.Read(p) if err == io.EOF { b.sawEOF = true } if b.conn == nil && inTests { return } b.conn.noteBodyReadFromHandler(b.stream, n, err) return } // responseWriter is the http.ResponseWriter implementation. It's // intentionally small (1 pointer wide) to minimize garbage. The // responseWriterState pointer inside is zeroed at the end of a // request (in handlerDone) and calls on the responseWriter thereafter // simply crash (caller's mistake), but the much larger responseWriterState // and buffers are reused between multiple requests. type responseWriter struct { rws *responseWriterState } // Optional http.ResponseWriter interfaces implemented. var ( _ http.CloseNotifier = (*responseWriter)(nil) _ http.Flusher = (*responseWriter)(nil) _ stringWriter = (*responseWriter)(nil) ) type responseWriterState struct { // immutable within a request: stream *stream req *http.Request body *requestBody // to close at end of request, if DATA frames didn't conn *serverConn // TODO: adjust buffer writing sizes based on server config, frame size updates from peer, etc bw *bufio.Writer // writing to a chunkWriter{this *responseWriterState} // mutated by http.Handler goroutine: handlerHeader http.Header // nil until called snapHeader http.Header // snapshot of handlerHeader at WriteHeader time trailers []string // set in writeChunk status int // status code passed to WriteHeader wroteHeader bool // WriteHeader called (explicitly or implicitly). Not necessarily sent to user yet. sentHeader bool // have we sent the header frame? handlerDone bool // handler has finished sentContentLen int64 // non-zero if handler set a Content-Length header wroteBytes int64 closeNotifierMu sync.Mutex // guards closeNotifierCh closeNotifierCh chan bool // nil until first used } type chunkWriter struct{ rws *responseWriterState } func (cw chunkWriter) Write(p []byte) (n int, err error) { return cw.rws.writeChunk(p) } func (rws *responseWriterState) hasTrailers() bool { return len(rws.trailers) != 0 } // declareTrailer is called for each Trailer header when the // response header is written. It notes that a header will need to be // written in the trailers at the end of the response. func (rws *responseWriterState) declareTrailer(k string) { k = http.CanonicalHeaderKey(k) if !ValidTrailerHeader(k) { // Forbidden by RFC 2616 14.40. rws.conn.logf("ignoring invalid trailer %q", k) return } if !strSliceContains(rws.trailers, k) { rws.trailers = append(rws.trailers, k) } } // writeChunk writes chunks from the bufio.Writer. But because // bufio.Writer may bypass its chunking, sometimes p may be // arbitrarily large. // // writeChunk is also responsible (on the first chunk) for sending the // HEADER response. func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { if !rws.wroteHeader { rws.writeHeader(200) } isHeadResp := rws.req.Method == "HEAD" if !rws.sentHeader { rws.sentHeader = true var ctype, clen string if clen = rws.snapHeader.Get("Content-Length"); clen != "" { rws.snapHeader.Del("Content-Length") clen64, err := strconv.ParseInt(clen, 10, 64) if err == nil && clen64 >= 0 { rws.sentContentLen = clen64 } else { clen = "" } } if clen == "" && rws.handlerDone && bodyAllowedForStatus(rws.status) && (len(p) > 0 || !isHeadResp) { clen = strconv.Itoa(len(p)) } _, hasContentType := rws.snapHeader["Content-Type"] if !hasContentType && bodyAllowedForStatus(rws.status) { ctype = http.DetectContentType(p) } var date string if _, ok := rws.snapHeader["Date"]; !ok { // TODO(bradfitz): be faster here, like net/http? measure. date = time.Now().UTC().Format(http.TimeFormat) } for _, v := range rws.snapHeader["Trailer"] { foreachHeaderElement(v, rws.declareTrailer) } endStream := (rws.handlerDone && !rws.hasTrailers() && len(p) == 0) || isHeadResp err = rws.conn.writeHeaders(rws.stream, &writeResHeaders{ streamID: rws.stream.id, httpResCode: rws.status, h: rws.snapHeader, endStream: endStream, contentType: ctype, contentLength: clen, date: date, }) if err != nil { return 0, err } if endStream { return 0, nil } } if isHeadResp { return len(p), nil } if len(p) == 0 && !rws.handlerDone { return 0, nil } if rws.handlerDone { rws.promoteUndeclaredTrailers() } endStream := rws.handlerDone && !rws.hasTrailers() if len(p) > 0 || endStream { // only send a 0 byte DATA frame if we're ending the stream. if err := rws.conn.writeDataFromHandler(rws.stream, p, endStream); err != nil { return 0, err } } if rws.handlerDone && rws.hasTrailers() { err = rws.conn.writeHeaders(rws.stream, &writeResHeaders{ streamID: rws.stream.id, h: rws.handlerHeader, trailers: rws.trailers, endStream: true, }) return len(p), err } return len(p), nil } // TrailerPrefix is a magic prefix for ResponseWriter.Header map keys // that, if present, signals that the map entry is actually for // the response trailers, and not the response headers. The prefix // is stripped after the ServeHTTP call finishes and the values are // sent in the trailers. // // This mechanism is intended only for trailers that are not known // prior to the headers being written. If the set of trailers is fixed // or known before the header is written, the normal Go trailers mechanism // is preferred: // https://golang.org/pkg/net/http/#ResponseWriter // https://golang.org/pkg/net/http/#example_ResponseWriter_trailers const TrailerPrefix = "Trailer:" // promoteUndeclaredTrailers permits http.Handlers to set trailers // after the header has already been flushed. Because the Go // ResponseWriter interface has no way to set Trailers (only the // Header), and because we didn't want to expand the ResponseWriter // interface, and because nobody used trailers, and because RFC 2616 // says you SHOULD (but not must) predeclare any trailers in the // header, the official ResponseWriter rules said trailers in Go must // be predeclared, and then we reuse the same ResponseWriter.Header() // map to mean both Headers and Trailers. When it's time to write the // Trailers, we pick out the fields of Headers that were declared as // trailers. That worked for a while, until we found the first major // user of Trailers in the wild: gRPC (using them only over http2), // and gRPC libraries permit setting trailers mid-stream without // predeclarnig them. So: change of plans. We still permit the old // way, but we also permit this hack: if a Header() key begins with // "Trailer:", the suffix of that key is a Trailer. Because ':' is an // invalid token byte anyway, there is no ambiguity. (And it's already // filtered out) It's mildly hacky, but not terrible. // // This method runs after the Handler is done and promotes any Header // fields to be trailers. func (rws *responseWriterState) promoteUndeclaredTrailers() { for k, vv := range rws.handlerHeader { if !strings.HasPrefix(k, TrailerPrefix) { continue } trailerKey := strings.TrimPrefix(k, TrailerPrefix) rws.declareTrailer(trailerKey) rws.handlerHeader[http.CanonicalHeaderKey(trailerKey)] = vv } if len(rws.trailers) > 1 { sorter := sorterPool.Get().(*sorter) sorter.SortStrings(rws.trailers) sorterPool.Put(sorter) } } func (w *responseWriter) Flush() { rws := w.rws if rws == nil { panic("Header called after Handler finished") } if rws.bw.Buffered() > 0 { if err := rws.bw.Flush(); err != nil { // Ignore the error. The frame writer already knows. return } } else { // The bufio.Writer won't call chunkWriter.Write // (writeChunk with zero bytes, so we have to do it // ourselves to force the HTTP response header and/or // final DATA frame (with END_STREAM) to be sent. rws.writeChunk(nil) } } func (w *responseWriter) CloseNotify() <-chan bool { rws := w.rws if rws == nil { panic("CloseNotify called after Handler finished") } rws.closeNotifierMu.Lock() ch := rws.closeNotifierCh if ch == nil { ch = make(chan bool, 1) rws.closeNotifierCh = ch cw := rws.stream.cw go func() { cw.Wait() // wait for close ch <- true }() } rws.closeNotifierMu.Unlock() return ch } func (w *responseWriter) Header() http.Header { rws := w.rws if rws == nil { panic("Header called after Handler finished") } if rws.handlerHeader == nil { rws.handlerHeader = make(http.Header) } return rws.handlerHeader } func (w *responseWriter) WriteHeader(code int) { rws := w.rws if rws == nil { panic("WriteHeader called after Handler finished") } rws.writeHeader(code) } func (rws *responseWriterState) writeHeader(code int) { if !rws.wroteHeader { rws.wroteHeader = true rws.status = code if len(rws.handlerHeader) > 0 { rws.snapHeader = cloneHeader(rws.handlerHeader) } } } func cloneHeader(h http.Header) http.Header { h2 := make(http.Header, len(h)) for k, vv := range h { vv2 := make([]string, len(vv)) copy(vv2, vv) h2[k] = vv2 } return h2 } // The Life Of A Write is like this: // // * Handler calls w.Write or w.WriteString -> // * -> rws.bw (*bufio.Writer) -> // * (Handler migth call Flush) // * -> chunkWriter{rws} // * -> responseWriterState.writeChunk(p []byte) // * -> responseWriterState.writeChunk (most of the magic; see comment there) func (w *responseWriter) Write(p []byte) (n int, err error) { return w.write(len(p), p, "") } func (w *responseWriter) WriteString(s string) (n int, err error) { return w.write(len(s), nil, s) } // either dataB or dataS is non-zero. func (w *responseWriter) write(lenData int, dataB []byte, dataS string) (n int, err error) { rws := w.rws if rws == nil { panic("Write called after Handler finished") } if !rws.wroteHeader { w.WriteHeader(200) } if !bodyAllowedForStatus(rws.status) { return 0, http.ErrBodyNotAllowed } rws.wroteBytes += int64(len(dataB)) + int64(len(dataS)) // only one can be set if rws.sentContentLen != 0 && rws.wroteBytes > rws.sentContentLen { // TODO: send a RST_STREAM return 0, errors.New("http2: handler wrote more than declared Content-Length") } if dataB != nil { return rws.bw.Write(dataB) } else { return rws.bw.WriteString(dataS) } } func (w *responseWriter) handlerDone() { rws := w.rws rws.handlerDone = true w.Flush() w.rws = nil responseWriterStatePool.Put(rws) } // Push errors. var ( ErrRecursivePush = errors.New("http2: recursive push not allowed") ErrPushLimitReached = errors.New("http2: push would exceed peer's SETTINGS_MAX_CONCURRENT_STREAMS") ) // pushOptions is the internal version of http.PushOptions, which we // cannot include here because it's only defined in Go 1.8 and later. type pushOptions struct { Method string Header http.Header } func (w *responseWriter) push(target string, opts pushOptions) error { st := w.rws.stream sc := st.sc sc.serveG.checkNotOn() // No recursive pushes: "PUSH_PROMISE frames MUST only be sent on a peer-initiated stream." // http://tools.ietf.org/html/rfc7540#section-6.6 if st.isPushed() { return ErrRecursivePush } // Default options. if opts.Method == "" { opts.Method = "GET" } if opts.Header == nil { opts.Header = http.Header{} } wantScheme := "http" if w.rws.req.TLS != nil { wantScheme = "https" } // Validate the request. u, err := url.Parse(target) if err != nil { return err } if u.Scheme == "" { if !strings.HasPrefix(target, "/") { return fmt.Errorf("target must be an absolute URL or an absolute path: %q", target) } u.Scheme = wantScheme u.Host = w.rws.req.Host } else { if u.Scheme != wantScheme { return fmt.Errorf("cannot push URL with scheme %q from request with scheme %q", u.Scheme, wantScheme) } if u.Host == "" { return errors.New("URL must have a host") } } for k := range opts.Header { if strings.HasPrefix(k, ":") { return fmt.Errorf("promised request headers cannot include pseudo header %q", k) } // These headers are meaningful only if the request has a body, // but PUSH_PROMISE requests cannot have a body. // http://tools.ietf.org/html/rfc7540#section-8.2 // Also disallow Host, since the promised URL must be absolute. switch strings.ToLower(k) { case "content-length", "content-encoding", "trailer", "te", "expect", "host": return fmt.Errorf("promised request headers cannot include %q", k) } } if err := checkValidHTTP2RequestHeaders(opts.Header); err != nil { return err } // The RFC effectively limits promised requests to GET and HEAD: // "Promised requests MUST be cacheable [GET, HEAD, or POST], and MUST be safe [GET or HEAD]" // http://tools.ietf.org/html/rfc7540#section-8.2 if opts.Method != "GET" && opts.Method != "HEAD" { return fmt.Errorf("method %q must be GET or HEAD", opts.Method) } msg := startPushRequest{ parent: st, method: opts.Method, url: u, header: cloneHeader(opts.Header), done: errChanPool.Get().(chan error), } select { case <-sc.doneServing: return errClientDisconnected case <-st.cw: return errStreamClosed case sc.wantStartPushCh <- msg: } select { case <-sc.doneServing: return errClientDisconnected case <-st.cw: return errStreamClosed case err := <-msg.done: errChanPool.Put(msg.done) return err } } type startPushRequest struct { parent *stream method string url *url.URL header http.Header done chan error } func (sc *serverConn) startPush(msg startPushRequest) { sc.serveG.check() // http://tools.ietf.org/html/rfc7540#section-6.6. // PUSH_PROMISE frames MUST only be sent on a peer-initiated stream that // is in either the "open" or "half-closed (remote)" state. if msg.parent.state != stateOpen && msg.parent.state != stateHalfClosedRemote { // responseWriter.Push checks that the stream is peer-initiaed. msg.done <- errStreamClosed return } // http://tools.ietf.org/html/rfc7540#section-6.6. if !sc.pushEnabled { msg.done <- http.ErrNotSupported return } // PUSH_PROMISE frames must be sent in increasing order by stream ID, so // we allocate an ID for the promised stream lazily, when the PUSH_PROMISE // is written. Once the ID is allocated, we start the request handler. allocatePromisedID := func() (uint32, error) { sc.serveG.check() // Check this again, just in case. Technically, we might have received // an updated SETTINGS by the time we got around to writing this frame. if !sc.pushEnabled { return 0, http.ErrNotSupported } // http://tools.ietf.org/html/rfc7540#section-6.5.2. if sc.curPushedStreams+1 > sc.clientMaxStreams { return 0, ErrPushLimitReached } // http://tools.ietf.org/html/rfc7540#section-5.1.1. // Streams initiated by the server MUST use even-numbered identifiers. // A server that is unable to establish a new stream identifier can send a GOAWAY // frame so that the client is forced to open a new connection for new streams. if sc.maxPushPromiseID+2 >= 1<<31 { sc.startGracefulShutdown() return 0, ErrPushLimitReached } sc.maxPushPromiseID += 2 promisedID := sc.maxPushPromiseID // http://tools.ietf.org/html/rfc7540#section-8.2. // Strictly speaking, the new stream should start in "reserved (local)", then // transition to "half closed (remote)" after sending the initial HEADERS, but // we start in "half closed (remote)" for simplicity. // See further comments at the definition of stateHalfClosedRemote. promised := sc.newStream(promisedID, msg.parent.id, stateHalfClosedRemote) rw, req, err := sc.newWriterAndRequestNoBody(promised, requestParam{ method: msg.method, scheme: msg.url.Scheme, authority: msg.url.Host, path: msg.url.RequestURI(), header: cloneHeader(msg.header), // clone since handler runs concurrently with writing the PUSH_PROMISE }) if err != nil { // Should not happen, since we've already validated msg.url. panic(fmt.Sprintf("newWriterAndRequestNoBody(%+v): %v", msg.url, err)) } go sc.runHandler(rw, req, sc.handler.ServeHTTP) return promisedID, nil } sc.writeFrame(FrameWriteRequest{ write: &writePushPromise{ streamID: msg.parent.id, method: msg.method, url: msg.url, h: msg.header, allocatePromisedID: allocatePromisedID, }, stream: msg.parent, done: msg.done, }) } // foreachHeaderElement splits v according to the "#rule" construction // in RFC 2616 section 2.1 and calls fn for each non-empty element. func foreachHeaderElement(v string, fn func(string)) { v = textproto.TrimString(v) if v == "" { return } if !strings.Contains(v, ",") { fn(v) return } for _, f := range strings.Split(v, ",") { if f = textproto.TrimString(f); f != "" { fn(f) } } } // From http://httpwg.org/specs/rfc7540.html#rfc.section.8.1.2.2 var connHeaders = []string{ "Connection", "Keep-Alive", "Proxy-Connection", "Transfer-Encoding", "Upgrade", } // checkValidHTTP2RequestHeaders checks whether h is a valid HTTP/2 request, // per RFC 7540 Section 8.1.2.2. // The returned error is reported to users. func checkValidHTTP2RequestHeaders(h http.Header) error { for _, k := range connHeaders { if _, ok := h[k]; ok { return fmt.Errorf("request header %q is not valid in HTTP/2", k) } } te := h["Te"] if len(te) > 0 && (len(te) > 1 || (te[0] != "trailers" && te[0] != "")) { return errors.New(`request header "TE" may only be "trailers" in HTTP/2`) } return nil } func new400Handler(err error) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { http.Error(w, err.Error(), http.StatusBadRequest) } } // ValidTrailerHeader reports whether name is a valid header field name to appear // in trailers. // See: http://tools.ietf.org/html/rfc7230#section-4.1.2 func ValidTrailerHeader(name string) bool { name = http.CanonicalHeaderKey(name) if strings.HasPrefix(name, "If-") || badTrailer[name] { return false } return true } var badTrailer = map[string]bool{ "Authorization": true, "Cache-Control": true, "Connection": true, "Content-Encoding": true, "Content-Length": true, "Content-Range": true, "Content-Type": true, "Expect": true, "Host": true, "Keep-Alive": true, "Max-Forwards": true, "Pragma": true, "Proxy-Authenticate": true, "Proxy-Authorization": true, "Proxy-Connection": true, "Range": true, "Realm": true, "Te": true, "Trailer": true, "Transfer-Encoding": true, "Www-Authenticate": true, } // h1ServerShutdownChan returns a channel that will be closed when the // provided *http.Server wants to shut down. // // This is a somewhat hacky way to get at http1 innards. It works // when the http2 code is bundled into the net/http package in the // standard library. The alternatives ended up making the cmd/go tool // depend on http Servers. This is the lightest option for now. // This is tested via the TestServeShutdown* tests in net/http. func h1ServerShutdownChan(hs *http.Server) <-chan struct{} { if fn := testh1ServerShutdownChan; fn != nil { return fn(hs) } var x interface{} = hs type I interface { getDoneChan() <-chan struct{} } if hs, ok := x.(I); ok { return hs.getDoneChan() } return nil } // optional test hook for h1ServerShutdownChan. var testh1ServerShutdownChan func(hs *http.Server) <-chan struct{} // h1ServerKeepAlivesDisabled reports whether hs has its keep-alives // disabled. See comments on h1ServerShutdownChan above for why // the code is written this way. func h1ServerKeepAlivesDisabled(hs *http.Server) bool { var x interface{} = hs type I interface { doKeepAlives() bool } if hs, ok := x.(I); ok { return !hs.doKeepAlives() } return false } transport.go000066400000000000000000001637571324746544700324030ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/golang.org/x/net/http2// Copyright 2015 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Transport code. package http2 import ( "bufio" "bytes" "compress/gzip" "crypto/rand" "crypto/tls" "errors" "fmt" "io" "io/ioutil" "log" "math" "net" "net/http" "sort" "strconv" "strings" "sync" "time" "golang.org/x/net/http2/hpack" "golang.org/x/net/idna" "golang.org/x/net/lex/httplex" ) const ( // transportDefaultConnFlow is how many connection-level flow control // tokens we give the server at start-up, past the default 64k. transportDefaultConnFlow = 1 << 30 // transportDefaultStreamFlow is how many stream-level flow // control tokens we announce to the peer, and how many bytes // we buffer per stream. transportDefaultStreamFlow = 4 << 20 // transportDefaultStreamMinRefresh is the minimum number of bytes we'll send // a stream-level WINDOW_UPDATE for at a time. transportDefaultStreamMinRefresh = 4 << 10 defaultUserAgent = "Go-http-client/2.0" ) // Transport is an HTTP/2 Transport. // // A Transport internally caches connections to servers. It is safe // for concurrent use by multiple goroutines. type Transport struct { // DialTLS specifies an optional dial function for creating // TLS connections for requests. // // If DialTLS is nil, tls.Dial is used. // // If the returned net.Conn has a ConnectionState method like tls.Conn, // it will be used to set http.Response.TLS. DialTLS func(network, addr string, cfg *tls.Config) (net.Conn, error) // TLSClientConfig specifies the TLS configuration to use with // tls.Client. If nil, the default configuration is used. TLSClientConfig *tls.Config // ConnPool optionally specifies an alternate connection pool to use. // If nil, the default is used. ConnPool ClientConnPool // DisableCompression, if true, prevents the Transport from // requesting compression with an "Accept-Encoding: gzip" // request header when the Request contains no existing // Accept-Encoding value. If the Transport requests gzip on // its own and gets a gzipped response, it's transparently // decoded in the Response.Body. However, if the user // explicitly requested gzip it is not automatically // uncompressed. DisableCompression bool // AllowHTTP, if true, permits HTTP/2 requests using the insecure, // plain-text "http" scheme. Note that this does not enable h2c support. AllowHTTP bool // MaxHeaderListSize is the http2 SETTINGS_MAX_HEADER_LIST_SIZE to // send in the initial settings frame. It is how many bytes // of response headers are allow. Unlike the http2 spec, zero here // means to use a default limit (currently 10MB). If you actually // want to advertise an ulimited value to the peer, Transport // interprets the highest possible value here (0xffffffff or 1<<32-1) // to mean no limit. MaxHeaderListSize uint32 // t1, if non-nil, is the standard library Transport using // this transport. Its settings are used (but not its // RoundTrip method, etc). t1 *http.Transport connPoolOnce sync.Once connPoolOrDef ClientConnPool // non-nil version of ConnPool } func (t *Transport) maxHeaderListSize() uint32 { if t.MaxHeaderListSize == 0 { return 10 << 20 } if t.MaxHeaderListSize == 0xffffffff { return 0 } return t.MaxHeaderListSize } func (t *Transport) disableCompression() bool { return t.DisableCompression || (t.t1 != nil && t.t1.DisableCompression) } var errTransportVersion = errors.New("http2: ConfigureTransport is only supported starting at Go 1.6") // ConfigureTransport configures a net/http HTTP/1 Transport to use HTTP/2. // It requires Go 1.6 or later and returns an error if the net/http package is too old // or if t1 has already been HTTP/2-enabled. func ConfigureTransport(t1 *http.Transport) error { _, err := configureTransport(t1) // in configure_transport.go (go1.6) or not_go16.go return err } func (t *Transport) connPool() ClientConnPool { t.connPoolOnce.Do(t.initConnPool) return t.connPoolOrDef } func (t *Transport) initConnPool() { if t.ConnPool != nil { t.connPoolOrDef = t.ConnPool } else { t.connPoolOrDef = &clientConnPool{t: t} } } // ClientConn is the state of a single HTTP/2 client connection to an // HTTP/2 server. type ClientConn struct { t *Transport tconn net.Conn // usually *tls.Conn, except specialized impls tlsState *tls.ConnectionState // nil only for specialized impls singleUse bool // whether being used for a single http.Request // readLoop goroutine fields: readerDone chan struct{} // closed on error readerErr error // set before readerDone is closed idleTimeout time.Duration // or 0 for never idleTimer *time.Timer mu sync.Mutex // guards following cond *sync.Cond // hold mu; broadcast on flow/closed changes flow flow // our conn-level flow control quota (cs.flow is per stream) inflow flow // peer's conn-level flow control closed bool wantSettingsAck bool // we sent a SETTINGS frame and haven't heard back goAway *GoAwayFrame // if non-nil, the GoAwayFrame we received goAwayDebug string // goAway frame's debug data, retained as a string streams map[uint32]*clientStream // client-initiated nextStreamID uint32 pings map[[8]byte]chan struct{} // in flight ping data to notification channel bw *bufio.Writer br *bufio.Reader fr *Framer lastActive time.Time // Settings from peer: (also guarded by mu) maxFrameSize uint32 maxConcurrentStreams uint32 initialWindowSize uint32 hbuf bytes.Buffer // HPACK encoder writes into this henc *hpack.Encoder freeBuf [][]byte wmu sync.Mutex // held while writing; acquire AFTER mu if holding both werr error // first write error that has occurred } // clientStream is the state for a single HTTP/2 stream. One of these // is created for each Transport.RoundTrip call. type clientStream struct { cc *ClientConn req *http.Request trace *clientTrace // or nil ID uint32 resc chan resAndError bufPipe pipe // buffered pipe with the flow-controlled response payload startedWrite bool // started request body write; guarded by cc.mu requestedGzip bool on100 func() // optional code to run if get a 100 continue response flow flow // guarded by cc.mu inflow flow // guarded by cc.mu bytesRemain int64 // -1 means unknown; owned by transportResponseBody.Read readErr error // sticky read error; owned by transportResponseBody.Read stopReqBody error // if non-nil, stop writing req body; guarded by cc.mu didReset bool // whether we sent a RST_STREAM to the server; guarded by cc.mu peerReset chan struct{} // closed on peer reset resetErr error // populated before peerReset is closed done chan struct{} // closed when stream remove from cc.streams map; close calls guarded by cc.mu // owned by clientConnReadLoop: firstByte bool // got the first response byte pastHeaders bool // got first MetaHeadersFrame (actual headers) pastTrailers bool // got optional second MetaHeadersFrame (trailers) trailer http.Header // accumulated trailers resTrailer *http.Header // client's Response.Trailer } // awaitRequestCancel runs in its own goroutine and waits for the user // to cancel a RoundTrip request, its context to expire, or for the // request to be done (any way it might be removed from the cc.streams // map: peer reset, successful completion, TCP connection breakage, // etc) func (cs *clientStream) awaitRequestCancel(req *http.Request) { ctx := reqContext(req) if req.Cancel == nil && ctx.Done() == nil { return } select { case <-req.Cancel: cs.cancelStream() cs.bufPipe.CloseWithError(errRequestCanceled) case <-ctx.Done(): cs.cancelStream() cs.bufPipe.CloseWithError(ctx.Err()) case <-cs.done: } } func (cs *clientStream) cancelStream() { cs.cc.mu.Lock() didReset := cs.didReset cs.didReset = true cs.cc.mu.Unlock() if !didReset { cs.cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) } } // checkResetOrDone reports any error sent in a RST_STREAM frame by the // server, or errStreamClosed if the stream is complete. func (cs *clientStream) checkResetOrDone() error { select { case <-cs.peerReset: return cs.resetErr case <-cs.done: return errStreamClosed default: return nil } } func (cs *clientStream) abortRequestBodyWrite(err error) { if err == nil { panic("nil error") } cc := cs.cc cc.mu.Lock() cs.stopReqBody = err cc.cond.Broadcast() cc.mu.Unlock() } type stickyErrWriter struct { w io.Writer err *error } func (sew stickyErrWriter) Write(p []byte) (n int, err error) { if *sew.err != nil { return 0, *sew.err } n, err = sew.w.Write(p) *sew.err = err return } var ErrNoCachedConn = errors.New("http2: no cached connection was available") // RoundTripOpt are options for the Transport.RoundTripOpt method. type RoundTripOpt struct { // OnlyCachedConn controls whether RoundTripOpt may // create a new TCP connection. If set true and // no cached connection is available, RoundTripOpt // will return ErrNoCachedConn. OnlyCachedConn bool } func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { return t.RoundTripOpt(req, RoundTripOpt{}) } // authorityAddr returns a given authority (a host/IP, or host:port / ip:port) // and returns a host:port. The port 443 is added if needed. func authorityAddr(scheme string, authority string) (addr string) { host, port, err := net.SplitHostPort(authority) if err != nil { // authority didn't have a port port = "443" if scheme == "http" { port = "80" } host = authority } if a, err := idna.ToASCII(host); err == nil { host = a } // IPv6 address literal, without a port: if strings.HasPrefix(host, "[") && strings.HasSuffix(host, "]") { return host + ":" + port } return net.JoinHostPort(host, port) } // RoundTripOpt is like RoundTrip, but takes options. func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Response, error) { if !(req.URL.Scheme == "https" || (req.URL.Scheme == "http" && t.AllowHTTP)) { return nil, errors.New("http2: unsupported scheme") } addr := authorityAddr(req.URL.Scheme, req.URL.Host) for { cc, err := t.connPool().GetClientConn(req, addr) if err != nil { t.vlogf("http2: Transport failed to get client conn for %s: %v", addr, err) return nil, err } traceGotConn(req, cc) res, err := cc.RoundTrip(req) if err != nil { if req, err = shouldRetryRequest(req, err); err == nil { continue } } if err != nil { t.vlogf("RoundTrip failure: %v", err) return nil, err } return res, nil } } // CloseIdleConnections closes any connections which were previously // connected from previous requests but are now sitting idle. // It does not interrupt any connections currently in use. func (t *Transport) CloseIdleConnections() { if cp, ok := t.connPool().(clientConnPoolIdleCloser); ok { cp.closeIdleConnections() } } var ( errClientConnClosed = errors.New("http2: client conn is closed") errClientConnUnusable = errors.New("http2: client conn not usable") errClientConnGotGoAway = errors.New("http2: Transport received Server's graceful shutdown GOAWAY") errClientConnGotGoAwayAfterSomeReqBody = errors.New("http2: Transport received Server's graceful shutdown GOAWAY; some request body already written") ) // shouldRetryRequest is called by RoundTrip when a request fails to get // response headers. It is always called with a non-nil error. // It returns either a request to retry (either the same request, or a // modified clone), or an error if the request can't be replayed. func shouldRetryRequest(req *http.Request, err error) (*http.Request, error) { switch err { default: return nil, err case errClientConnUnusable, errClientConnGotGoAway: return req, nil case errClientConnGotGoAwayAfterSomeReqBody: // If the Body is nil (or http.NoBody), it's safe to reuse // this request and its Body. if req.Body == nil || reqBodyIsNoBody(req.Body) { return req, nil } // Otherwise we depend on the Request having its GetBody // func defined. getBody := reqGetBody(req) // Go 1.8: getBody = req.GetBody if getBody == nil { return nil, errors.New("http2: Transport: peer server initiated graceful shutdown after some of Request.Body was written; define Request.GetBody to avoid this error") } body, err := getBody() if err != nil { return nil, err } newReq := *req newReq.Body = body return &newReq, nil } } func (t *Transport) dialClientConn(addr string, singleUse bool) (*ClientConn, error) { host, _, err := net.SplitHostPort(addr) if err != nil { return nil, err } tconn, err := t.dialTLS()("tcp", addr, t.newTLSConfig(host)) if err != nil { return nil, err } return t.newClientConn(tconn, singleUse) } func (t *Transport) newTLSConfig(host string) *tls.Config { cfg := new(tls.Config) if t.TLSClientConfig != nil { *cfg = *cloneTLSConfig(t.TLSClientConfig) } if !strSliceContains(cfg.NextProtos, NextProtoTLS) { cfg.NextProtos = append([]string{NextProtoTLS}, cfg.NextProtos...) } if cfg.ServerName == "" { cfg.ServerName = host } return cfg } func (t *Transport) dialTLS() func(string, string, *tls.Config) (net.Conn, error) { if t.DialTLS != nil { return t.DialTLS } return t.dialTLSDefault } func (t *Transport) dialTLSDefault(network, addr string, cfg *tls.Config) (net.Conn, error) { cn, err := tls.Dial(network, addr, cfg) if err != nil { return nil, err } if err := cn.Handshake(); err != nil { return nil, err } if !cfg.InsecureSkipVerify { if err := cn.VerifyHostname(cfg.ServerName); err != nil { return nil, err } } state := cn.ConnectionState() if p := state.NegotiatedProtocol; p != NextProtoTLS { return nil, fmt.Errorf("http2: unexpected ALPN protocol %q; want %q", p, NextProtoTLS) } if !state.NegotiatedProtocolIsMutual { return nil, errors.New("http2: could not negotiate protocol mutually") } return cn, nil } // disableKeepAlives reports whether connections should be closed as // soon as possible after handling the first request. func (t *Transport) disableKeepAlives() bool { return t.t1 != nil && t.t1.DisableKeepAlives } func (t *Transport) expectContinueTimeout() time.Duration { if t.t1 == nil { return 0 } return transportExpectContinueTimeout(t.t1) } func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) { return t.newClientConn(c, false) } func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, error) { cc := &ClientConn{ t: t, tconn: c, readerDone: make(chan struct{}), nextStreamID: 1, maxFrameSize: 16 << 10, // spec default initialWindowSize: 65535, // spec default maxConcurrentStreams: 1000, // "infinite", per spec. 1000 seems good enough. streams: make(map[uint32]*clientStream), singleUse: singleUse, wantSettingsAck: true, pings: make(map[[8]byte]chan struct{}), } if d := t.idleConnTimeout(); d != 0 { cc.idleTimeout = d cc.idleTimer = time.AfterFunc(d, cc.onIdleTimeout) } if VerboseLogs { t.vlogf("http2: Transport creating client conn %p to %v", cc, c.RemoteAddr()) } cc.cond = sync.NewCond(&cc.mu) cc.flow.add(int32(initialWindowSize)) // TODO: adjust this writer size to account for frame size + // MTU + crypto/tls record padding. cc.bw = bufio.NewWriter(stickyErrWriter{c, &cc.werr}) cc.br = bufio.NewReader(c) cc.fr = NewFramer(cc.bw, cc.br) cc.fr.ReadMetaHeaders = hpack.NewDecoder(initialHeaderTableSize, nil) cc.fr.MaxHeaderListSize = t.maxHeaderListSize() // TODO: SetMaxDynamicTableSize, SetMaxDynamicTableSizeLimit on // henc in response to SETTINGS frames? cc.henc = hpack.NewEncoder(&cc.hbuf) if cs, ok := c.(connectionStater); ok { state := cs.ConnectionState() cc.tlsState = &state } initialSettings := []Setting{ {ID: SettingEnablePush, Val: 0}, {ID: SettingInitialWindowSize, Val: transportDefaultStreamFlow}, } if max := t.maxHeaderListSize(); max != 0 { initialSettings = append(initialSettings, Setting{ID: SettingMaxHeaderListSize, Val: max}) } cc.bw.Write(clientPreface) cc.fr.WriteSettings(initialSettings...) cc.fr.WriteWindowUpdate(0, transportDefaultConnFlow) cc.inflow.add(transportDefaultConnFlow + initialWindowSize) cc.bw.Flush() if cc.werr != nil { return nil, cc.werr } go cc.readLoop() return cc, nil } func (cc *ClientConn) setGoAway(f *GoAwayFrame) { cc.mu.Lock() defer cc.mu.Unlock() old := cc.goAway cc.goAway = f // Merge the previous and current GoAway error frames. if cc.goAwayDebug == "" { cc.goAwayDebug = string(f.DebugData()) } if old != nil && old.ErrCode != ErrCodeNo { cc.goAway.ErrCode = old.ErrCode } last := f.LastStreamID for streamID, cs := range cc.streams { if streamID > last { select { case cs.resc <- resAndError{err: errClientConnGotGoAway}: default: } } } } func (cc *ClientConn) CanTakeNewRequest() bool { cc.mu.Lock() defer cc.mu.Unlock() return cc.canTakeNewRequestLocked() } func (cc *ClientConn) canTakeNewRequestLocked() bool { if cc.singleUse && cc.nextStreamID > 1 { return false } return cc.goAway == nil && !cc.closed && int64(len(cc.streams)+1) < int64(cc.maxConcurrentStreams) && cc.nextStreamID < math.MaxInt32 } // onIdleTimeout is called from a time.AfterFunc goroutine. It will // only be called when we're idle, but because we're coming from a new // goroutine, there could be a new request coming in at the same time, // so this simply calls the synchronized closeIfIdle to shut down this // connection. The timer could just call closeIfIdle, but this is more // clear. func (cc *ClientConn) onIdleTimeout() { cc.closeIfIdle() } func (cc *ClientConn) closeIfIdle() { cc.mu.Lock() if len(cc.streams) > 0 { cc.mu.Unlock() return } cc.closed = true nextID := cc.nextStreamID // TODO: do clients send GOAWAY too? maybe? Just Close: cc.mu.Unlock() if VerboseLogs { cc.vlogf("http2: Transport closing idle conn %p (forSingleUse=%v, maxStream=%v)", cc, cc.singleUse, nextID-2) } cc.tconn.Close() } const maxAllocFrameSize = 512 << 10 // frameBuffer returns a scratch buffer suitable for writing DATA frames. // They're capped at the min of the peer's max frame size or 512KB // (kinda arbitrarily), but definitely capped so we don't allocate 4GB // bufers. func (cc *ClientConn) frameScratchBuffer() []byte { cc.mu.Lock() size := cc.maxFrameSize if size > maxAllocFrameSize { size = maxAllocFrameSize } for i, buf := range cc.freeBuf { if len(buf) >= int(size) { cc.freeBuf[i] = nil cc.mu.Unlock() return buf[:size] } } cc.mu.Unlock() return make([]byte, size) } func (cc *ClientConn) putFrameScratchBuffer(buf []byte) { cc.mu.Lock() defer cc.mu.Unlock() const maxBufs = 4 // arbitrary; 4 concurrent requests per conn? investigate. if len(cc.freeBuf) < maxBufs { cc.freeBuf = append(cc.freeBuf, buf) return } for i, old := range cc.freeBuf { if old == nil { cc.freeBuf[i] = buf return } } // forget about it. } // errRequestCanceled is a copy of net/http's errRequestCanceled because it's not // exported. At least they'll be DeepEqual for h1-vs-h2 comparisons tests. var errRequestCanceled = errors.New("net/http: request canceled") func commaSeparatedTrailers(req *http.Request) (string, error) { keys := make([]string, 0, len(req.Trailer)) for k := range req.Trailer { k = http.CanonicalHeaderKey(k) switch k { case "Transfer-Encoding", "Trailer", "Content-Length": return "", &badStringError{"invalid Trailer key", k} } keys = append(keys, k) } if len(keys) > 0 { sort.Strings(keys) return strings.Join(keys, ","), nil } return "", nil } func (cc *ClientConn) responseHeaderTimeout() time.Duration { if cc.t.t1 != nil { return cc.t.t1.ResponseHeaderTimeout } // No way to do this (yet?) with just an http2.Transport. Probably // no need. Request.Cancel this is the new way. We only need to support // this for compatibility with the old http.Transport fields when // we're doing transparent http2. return 0 } // checkConnHeaders checks whether req has any invalid connection-level headers. // per RFC 7540 section 8.1.2.2: Connection-Specific Header Fields. // Certain headers are special-cased as okay but not transmitted later. func checkConnHeaders(req *http.Request) error { if v := req.Header.Get("Upgrade"); v != "" { return fmt.Errorf("http2: invalid Upgrade request header: %q", req.Header["Upgrade"]) } if vv := req.Header["Transfer-Encoding"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "chunked") { return fmt.Errorf("http2: invalid Transfer-Encoding request header: %q", vv) } if vv := req.Header["Connection"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "close" && vv[0] != "keep-alive") { return fmt.Errorf("http2: invalid Connection request header: %q", vv) } return nil } // actualContentLength returns a sanitized version of // req.ContentLength, where 0 actually means zero (not unknown) and -1 // means unknown. func actualContentLength(req *http.Request) int64 { if req.Body == nil { return 0 } if req.ContentLength != 0 { return req.ContentLength } return -1 } func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { if err := checkConnHeaders(req); err != nil { return nil, err } if cc.idleTimer != nil { cc.idleTimer.Stop() } trailers, err := commaSeparatedTrailers(req) if err != nil { return nil, err } hasTrailers := trailers != "" cc.mu.Lock() cc.lastActive = time.Now() if cc.closed || !cc.canTakeNewRequestLocked() { cc.mu.Unlock() return nil, errClientConnUnusable } body := req.Body hasBody := body != nil contentLen := actualContentLength(req) // TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere? var requestedGzip bool if !cc.t.disableCompression() && req.Header.Get("Accept-Encoding") == "" && req.Header.Get("Range") == "" && req.Method != "HEAD" { // Request gzip only, not deflate. Deflate is ambiguous and // not as universally supported anyway. // See: http://www.gzip.org/zlib/zlib_faq.html#faq38 // // Note that we don't request this for HEAD requests, // due to a bug in nginx: // http://trac.nginx.org/nginx/ticket/358 // https://golang.org/issue/5522 // // We don't request gzip if the request is for a range, since // auto-decoding a portion of a gzipped document will just fail // anyway. See https://golang.org/issue/8923 requestedGzip = true } // we send: HEADERS{1}, CONTINUATION{0,} + DATA{0,} (DATA is // sent by writeRequestBody below, along with any Trailers, // again in form HEADERS{1}, CONTINUATION{0,}) hdrs, err := cc.encodeHeaders(req, requestedGzip, trailers, contentLen) if err != nil { cc.mu.Unlock() return nil, err } cs := cc.newStream() cs.req = req cs.trace = requestTrace(req) cs.requestedGzip = requestedGzip bodyWriter := cc.t.getBodyWriterState(cs, body) cs.on100 = bodyWriter.on100 cc.wmu.Lock() endStream := !hasBody && !hasTrailers werr := cc.writeHeaders(cs.ID, endStream, hdrs) cc.wmu.Unlock() traceWroteHeaders(cs.trace) cc.mu.Unlock() if werr != nil { if hasBody { req.Body.Close() // per RoundTripper contract bodyWriter.cancel() } cc.forgetStreamID(cs.ID) // Don't bother sending a RST_STREAM (our write already failed; // no need to keep writing) traceWroteRequest(cs.trace, werr) return nil, werr } var respHeaderTimer <-chan time.Time if hasBody { bodyWriter.scheduleBodyWrite() } else { traceWroteRequest(cs.trace, nil) if d := cc.responseHeaderTimeout(); d != 0 { timer := time.NewTimer(d) defer timer.Stop() respHeaderTimer = timer.C } } readLoopResCh := cs.resc bodyWritten := false ctx := reqContext(req) handleReadLoopResponse := func(re resAndError) (*http.Response, error) { res := re.res if re.err != nil || res.StatusCode > 299 { // On error or status code 3xx, 4xx, 5xx, etc abort any // ongoing write, assuming that the server doesn't care // about our request body. If the server replied with 1xx or // 2xx, however, then assume the server DOES potentially // want our body (e.g. full-duplex streaming: // golang.org/issue/13444). If it turns out the server // doesn't, they'll RST_STREAM us soon enough. This is a // heuristic to avoid adding knobs to Transport. Hopefully // we can keep it. bodyWriter.cancel() cs.abortRequestBodyWrite(errStopReqBodyWrite) } if re.err != nil { if re.err == errClientConnGotGoAway { cc.mu.Lock() if cs.startedWrite { re.err = errClientConnGotGoAwayAfterSomeReqBody } cc.mu.Unlock() } cc.forgetStreamID(cs.ID) return nil, re.err } res.Request = req res.TLS = cc.tlsState return res, nil } for { select { case re := <-readLoopResCh: return handleReadLoopResponse(re) case <-respHeaderTimer: cc.forgetStreamID(cs.ID) if !hasBody || bodyWritten { cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) } else { bodyWriter.cancel() cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel) } return nil, errTimeout case <-ctx.Done(): cc.forgetStreamID(cs.ID) if !hasBody || bodyWritten { cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) } else { bodyWriter.cancel() cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel) } return nil, ctx.Err() case <-req.Cancel: cc.forgetStreamID(cs.ID) if !hasBody || bodyWritten { cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) } else { bodyWriter.cancel() cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel) } return nil, errRequestCanceled case <-cs.peerReset: // processResetStream already removed the // stream from the streams map; no need for // forgetStreamID. return nil, cs.resetErr case err := <-bodyWriter.resc: // Prefer the read loop's response, if available. Issue 16102. select { case re := <-readLoopResCh: return handleReadLoopResponse(re) default: } if err != nil { return nil, err } bodyWritten = true if d := cc.responseHeaderTimeout(); d != 0 { timer := time.NewTimer(d) defer timer.Stop() respHeaderTimer = timer.C } } } } // requires cc.wmu be held func (cc *ClientConn) writeHeaders(streamID uint32, endStream bool, hdrs []byte) error { first := true // first frame written (HEADERS is first, then CONTINUATION) frameSize := int(cc.maxFrameSize) for len(hdrs) > 0 && cc.werr == nil { chunk := hdrs if len(chunk) > frameSize { chunk = chunk[:frameSize] } hdrs = hdrs[len(chunk):] endHeaders := len(hdrs) == 0 if first { cc.fr.WriteHeaders(HeadersFrameParam{ StreamID: streamID, BlockFragment: chunk, EndStream: endStream, EndHeaders: endHeaders, }) first = false } else { cc.fr.WriteContinuation(streamID, endHeaders, chunk) } } // TODO(bradfitz): this Flush could potentially block (as // could the WriteHeaders call(s) above), which means they // wouldn't respond to Request.Cancel being readable. That's // rare, but this should probably be in a goroutine. cc.bw.Flush() return cc.werr } // internal error values; they don't escape to callers var ( // abort request body write; don't send cancel errStopReqBodyWrite = errors.New("http2: aborting request body write") // abort request body write, but send stream reset of cancel. errStopReqBodyWriteAndCancel = errors.New("http2: canceling request") ) func (cs *clientStream) writeRequestBody(body io.Reader, bodyCloser io.Closer) (err error) { cc := cs.cc sentEnd := false // whether we sent the final DATA frame w/ END_STREAM buf := cc.frameScratchBuffer() defer cc.putFrameScratchBuffer(buf) defer func() { traceWroteRequest(cs.trace, err) // TODO: write h12Compare test showing whether // Request.Body is closed by the Transport, // and in multiple cases: server replies <=299 and >299 // while still writing request body cerr := bodyCloser.Close() if err == nil { err = cerr } }() req := cs.req hasTrailers := req.Trailer != nil var sawEOF bool for !sawEOF { n, err := body.Read(buf) if err == io.EOF { sawEOF = true err = nil } else if err != nil { return err } remain := buf[:n] for len(remain) > 0 && err == nil { var allowed int32 allowed, err = cs.awaitFlowControl(len(remain)) switch { case err == errStopReqBodyWrite: return err case err == errStopReqBodyWriteAndCancel: cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) return err case err != nil: return err } cc.wmu.Lock() data := remain[:allowed] remain = remain[allowed:] sentEnd = sawEOF && len(remain) == 0 && !hasTrailers err = cc.fr.WriteData(cs.ID, sentEnd, data) if err == nil { // TODO(bradfitz): this flush is for latency, not bandwidth. // Most requests won't need this. Make this opt-in or // opt-out? Use some heuristic on the body type? Nagel-like // timers? Based on 'n'? Only last chunk of this for loop, // unless flow control tokens are low? For now, always. // If we change this, see comment below. err = cc.bw.Flush() } cc.wmu.Unlock() } if err != nil { return err } } if sentEnd { // Already sent END_STREAM (which implies we have no // trailers) and flushed, because currently all // WriteData frames above get a flush. So we're done. return nil } var trls []byte if hasTrailers { cc.mu.Lock() defer cc.mu.Unlock() trls = cc.encodeTrailers(req) } cc.wmu.Lock() defer cc.wmu.Unlock() // Two ways to send END_STREAM: either with trailers, or // with an empty DATA frame. if len(trls) > 0 { err = cc.writeHeaders(cs.ID, true, trls) } else { err = cc.fr.WriteData(cs.ID, true, nil) } if ferr := cc.bw.Flush(); ferr != nil && err == nil { err = ferr } return err } // awaitFlowControl waits for [1, min(maxBytes, cc.cs.maxFrameSize)] flow // control tokens from the server. // It returns either the non-zero number of tokens taken or an error // if the stream is dead. func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error) { cc := cs.cc cc.mu.Lock() defer cc.mu.Unlock() for { if cc.closed { return 0, errClientConnClosed } if cs.stopReqBody != nil { return 0, cs.stopReqBody } if err := cs.checkResetOrDone(); err != nil { return 0, err } if a := cs.flow.available(); a > 0 { take := a if int(take) > maxBytes { take = int32(maxBytes) // can't truncate int; take is int32 } if take > int32(cc.maxFrameSize) { take = int32(cc.maxFrameSize) } cs.flow.take(take) return take, nil } cc.cond.Wait() } } type badStringError struct { what string str string } func (e *badStringError) Error() string { return fmt.Sprintf("%s %q", e.what, e.str) } // requires cc.mu be held. func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trailers string, contentLength int64) ([]byte, error) { cc.hbuf.Reset() host := req.Host if host == "" { host = req.URL.Host } host, err := httplex.PunycodeHostPort(host) if err != nil { return nil, err } var path string if req.Method != "CONNECT" { path = req.URL.RequestURI() if !validPseudoPath(path) { orig := path path = strings.TrimPrefix(path, req.URL.Scheme+"://"+host) if !validPseudoPath(path) { if req.URL.Opaque != "" { return nil, fmt.Errorf("invalid request :path %q from URL.Opaque = %q", orig, req.URL.Opaque) } else { return nil, fmt.Errorf("invalid request :path %q", orig) } } } } // Check for any invalid headers and return an error before we // potentially pollute our hpack state. (We want to be able to // continue to reuse the hpack encoder for future requests) for k, vv := range req.Header { if !httplex.ValidHeaderFieldName(k) { return nil, fmt.Errorf("invalid HTTP header name %q", k) } for _, v := range vv { if !httplex.ValidHeaderFieldValue(v) { return nil, fmt.Errorf("invalid HTTP header value %q for header %q", v, k) } } } // 8.1.2.3 Request Pseudo-Header Fields // The :path pseudo-header field includes the path and query parts of the // target URI (the path-absolute production and optionally a '?' character // followed by the query production (see Sections 3.3 and 3.4 of // [RFC3986]). cc.writeHeader(":authority", host) cc.writeHeader(":method", req.Method) if req.Method != "CONNECT" { cc.writeHeader(":path", path) cc.writeHeader(":scheme", req.URL.Scheme) } if trailers != "" { cc.writeHeader("trailer", trailers) } var didUA bool for k, vv := range req.Header { lowKey := strings.ToLower(k) switch lowKey { case "host", "content-length": // Host is :authority, already sent. // Content-Length is automatic, set below. continue case "connection", "proxy-connection", "transfer-encoding", "upgrade", "keep-alive": // Per 8.1.2.2 Connection-Specific Header // Fields, don't send connection-specific // fields. We have already checked if any // are error-worthy so just ignore the rest. continue case "user-agent": // Match Go's http1 behavior: at most one // User-Agent. If set to nil or empty string, // then omit it. Otherwise if not mentioned, // include the default (below). didUA = true if len(vv) < 1 { continue } vv = vv[:1] if vv[0] == "" { continue } } for _, v := range vv { cc.writeHeader(lowKey, v) } } if shouldSendReqContentLength(req.Method, contentLength) { cc.writeHeader("content-length", strconv.FormatInt(contentLength, 10)) } if addGzipHeader { cc.writeHeader("accept-encoding", "gzip") } if !didUA { cc.writeHeader("user-agent", defaultUserAgent) } return cc.hbuf.Bytes(), nil } // shouldSendReqContentLength reports whether the http2.Transport should send // a "content-length" request header. This logic is basically a copy of the net/http // transferWriter.shouldSendContentLength. // The contentLength is the corrected contentLength (so 0 means actually 0, not unknown). // -1 means unknown. func shouldSendReqContentLength(method string, contentLength int64) bool { if contentLength > 0 { return true } if contentLength < 0 { return false } // For zero bodies, whether we send a content-length depends on the method. // It also kinda doesn't matter for http2 either way, with END_STREAM. switch method { case "POST", "PUT", "PATCH": return true default: return false } } // requires cc.mu be held. func (cc *ClientConn) encodeTrailers(req *http.Request) []byte { cc.hbuf.Reset() for k, vv := range req.Trailer { // Transfer-Encoding, etc.. have already been filter at the // start of RoundTrip lowKey := strings.ToLower(k) for _, v := range vv { cc.writeHeader(lowKey, v) } } return cc.hbuf.Bytes() } func (cc *ClientConn) writeHeader(name, value string) { if VerboseLogs { log.Printf("http2: Transport encoding header %q = %q", name, value) } cc.henc.WriteField(hpack.HeaderField{Name: name, Value: value}) } type resAndError struct { res *http.Response err error } // requires cc.mu be held. func (cc *ClientConn) newStream() *clientStream { cs := &clientStream{ cc: cc, ID: cc.nextStreamID, resc: make(chan resAndError, 1), peerReset: make(chan struct{}), done: make(chan struct{}), } cs.flow.add(int32(cc.initialWindowSize)) cs.flow.setConnFlow(&cc.flow) cs.inflow.add(transportDefaultStreamFlow) cs.inflow.setConnFlow(&cc.inflow) cc.nextStreamID += 2 cc.streams[cs.ID] = cs return cs } func (cc *ClientConn) forgetStreamID(id uint32) { cc.streamByID(id, true) } func (cc *ClientConn) streamByID(id uint32, andRemove bool) *clientStream { cc.mu.Lock() defer cc.mu.Unlock() cs := cc.streams[id] if andRemove && cs != nil && !cc.closed { cc.lastActive = time.Now() delete(cc.streams, id) if len(cc.streams) == 0 && cc.idleTimer != nil { cc.idleTimer.Reset(cc.idleTimeout) } close(cs.done) cc.cond.Broadcast() // wake up checkResetOrDone via clientStream.awaitFlowControl } return cs } // clientConnReadLoop is the state owned by the clientConn's frame-reading readLoop. type clientConnReadLoop struct { cc *ClientConn activeRes map[uint32]*clientStream // keyed by streamID closeWhenIdle bool } // readLoop runs in its own goroutine and reads and dispatches frames. func (cc *ClientConn) readLoop() { rl := &clientConnReadLoop{ cc: cc, activeRes: make(map[uint32]*clientStream), } defer rl.cleanup() cc.readerErr = rl.run() if ce, ok := cc.readerErr.(ConnectionError); ok { cc.wmu.Lock() cc.fr.WriteGoAway(0, ErrCode(ce), nil) cc.wmu.Unlock() } } // GoAwayError is returned by the Transport when the server closes the // TCP connection after sending a GOAWAY frame. type GoAwayError struct { LastStreamID uint32 ErrCode ErrCode DebugData string } func (e GoAwayError) Error() string { return fmt.Sprintf("http2: server sent GOAWAY and closed the connection; LastStreamID=%v, ErrCode=%v, debug=%q", e.LastStreamID, e.ErrCode, e.DebugData) } func isEOFOrNetReadError(err error) bool { if err == io.EOF { return true } ne, ok := err.(*net.OpError) return ok && ne.Op == "read" } func (rl *clientConnReadLoop) cleanup() { cc := rl.cc defer cc.tconn.Close() defer cc.t.connPool().MarkDead(cc) defer close(cc.readerDone) if cc.idleTimer != nil { cc.idleTimer.Stop() } // Close any response bodies if the server closes prematurely. // TODO: also do this if we've written the headers but not // gotten a response yet. err := cc.readerErr cc.mu.Lock() if cc.goAway != nil && isEOFOrNetReadError(err) { err = GoAwayError{ LastStreamID: cc.goAway.LastStreamID, ErrCode: cc.goAway.ErrCode, DebugData: cc.goAwayDebug, } } else if err == io.EOF { err = io.ErrUnexpectedEOF } for _, cs := range rl.activeRes { cs.bufPipe.CloseWithError(err) } for _, cs := range cc.streams { select { case cs.resc <- resAndError{err: err}: default: } close(cs.done) } cc.closed = true cc.cond.Broadcast() cc.mu.Unlock() } func (rl *clientConnReadLoop) run() error { cc := rl.cc rl.closeWhenIdle = cc.t.disableKeepAlives() || cc.singleUse gotReply := false // ever saw a HEADERS reply gotSettings := false for { f, err := cc.fr.ReadFrame() if err != nil { cc.vlogf("http2: Transport readFrame error on conn %p: (%T) %v", cc, err, err) } if se, ok := err.(StreamError); ok { if cs := cc.streamByID(se.StreamID, true /*ended; remove it*/); cs != nil { cs.cc.writeStreamReset(cs.ID, se.Code, err) if se.Cause == nil { se.Cause = cc.fr.errDetail } rl.endStreamError(cs, se) } continue } else if err != nil { return err } if VerboseLogs { cc.vlogf("http2: Transport received %s", summarizeFrame(f)) } if !gotSettings { if _, ok := f.(*SettingsFrame); !ok { cc.logf("protocol error: received %T before a SETTINGS frame", f) return ConnectionError(ErrCodeProtocol) } gotSettings = true } maybeIdle := false // whether frame might transition us to idle switch f := f.(type) { case *MetaHeadersFrame: err = rl.processHeaders(f) maybeIdle = true gotReply = true case *DataFrame: err = rl.processData(f) maybeIdle = true case *GoAwayFrame: err = rl.processGoAway(f) maybeIdle = true case *RSTStreamFrame: err = rl.processResetStream(f) maybeIdle = true case *SettingsFrame: err = rl.processSettings(f) case *PushPromiseFrame: err = rl.processPushPromise(f) case *WindowUpdateFrame: err = rl.processWindowUpdate(f) case *PingFrame: err = rl.processPing(f) default: cc.logf("Transport: unhandled response frame type %T", f) } if err != nil { if VerboseLogs { cc.vlogf("http2: Transport conn %p received error from processing frame %v: %v", cc, summarizeFrame(f), err) } return err } if rl.closeWhenIdle && gotReply && maybeIdle && len(rl.activeRes) == 0 { cc.closeIfIdle() } } } func (rl *clientConnReadLoop) processHeaders(f *MetaHeadersFrame) error { cc := rl.cc cs := cc.streamByID(f.StreamID, f.StreamEnded()) if cs == nil { // We'd get here if we canceled a request while the // server had its response still in flight. So if this // was just something we canceled, ignore it. return nil } if !cs.firstByte { if cs.trace != nil { // TODO(bradfitz): move first response byte earlier, // when we first read the 9 byte header, not waiting // until all the HEADERS+CONTINUATION frames have been // merged. This works for now. traceFirstResponseByte(cs.trace) } cs.firstByte = true } if !cs.pastHeaders { cs.pastHeaders = true } else { return rl.processTrailers(cs, f) } res, err := rl.handleResponse(cs, f) if err != nil { if _, ok := err.(ConnectionError); ok { return err } // Any other error type is a stream error. cs.cc.writeStreamReset(f.StreamID, ErrCodeProtocol, err) cs.resc <- resAndError{err: err} return nil // return nil from process* funcs to keep conn alive } if res == nil { // (nil, nil) special case. See handleResponse docs. return nil } if res.Body != noBody { rl.activeRes[cs.ID] = cs } cs.resTrailer = &res.Trailer cs.resc <- resAndError{res: res} return nil } // may return error types nil, or ConnectionError. Any other error value // is a StreamError of type ErrCodeProtocol. The returned error in that case // is the detail. // // As a special case, handleResponse may return (nil, nil) to skip the // frame (currently only used for 100 expect continue). This special // case is going away after Issue 13851 is fixed. func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFrame) (*http.Response, error) { if f.Truncated { return nil, errResponseHeaderListSize } status := f.PseudoValue("status") if status == "" { return nil, errors.New("missing status pseudo header") } statusCode, err := strconv.Atoi(status) if err != nil { return nil, errors.New("malformed non-numeric status pseudo header") } if statusCode == 100 { traceGot100Continue(cs.trace) if cs.on100 != nil { cs.on100() // forces any write delay timer to fire } cs.pastHeaders = false // do it all again return nil, nil } header := make(http.Header) res := &http.Response{ Proto: "HTTP/2.0", ProtoMajor: 2, Header: header, StatusCode: statusCode, Status: status + " " + http.StatusText(statusCode), } for _, hf := range f.RegularFields() { key := http.CanonicalHeaderKey(hf.Name) if key == "Trailer" { t := res.Trailer if t == nil { t = make(http.Header) res.Trailer = t } foreachHeaderElement(hf.Value, func(v string) { t[http.CanonicalHeaderKey(v)] = nil }) } else { header[key] = append(header[key], hf.Value) } } streamEnded := f.StreamEnded() isHead := cs.req.Method == "HEAD" if !streamEnded || isHead { res.ContentLength = -1 if clens := res.Header["Content-Length"]; len(clens) == 1 { if clen64, err := strconv.ParseInt(clens[0], 10, 64); err == nil { res.ContentLength = clen64 } else { // TODO: care? unlike http/1, it won't mess up our framing, so it's // more safe smuggling-wise to ignore. } } else if len(clens) > 1 { // TODO: care? unlike http/1, it won't mess up our framing, so it's // more safe smuggling-wise to ignore. } } if streamEnded || isHead { res.Body = noBody return res, nil } cs.bufPipe = pipe{b: &dataBuffer{expected: res.ContentLength}} cs.bytesRemain = res.ContentLength res.Body = transportResponseBody{cs} go cs.awaitRequestCancel(cs.req) if cs.requestedGzip && res.Header.Get("Content-Encoding") == "gzip" { res.Header.Del("Content-Encoding") res.Header.Del("Content-Length") res.ContentLength = -1 res.Body = &gzipReader{body: res.Body} setResponseUncompressed(res) } return res, nil } func (rl *clientConnReadLoop) processTrailers(cs *clientStream, f *MetaHeadersFrame) error { if cs.pastTrailers { // Too many HEADERS frames for this stream. return ConnectionError(ErrCodeProtocol) } cs.pastTrailers = true if !f.StreamEnded() { // We expect that any headers for trailers also // has END_STREAM. return ConnectionError(ErrCodeProtocol) } if len(f.PseudoFields()) > 0 { // No pseudo header fields are defined for trailers. // TODO: ConnectionError might be overly harsh? Check. return ConnectionError(ErrCodeProtocol) } trailer := make(http.Header) for _, hf := range f.RegularFields() { key := http.CanonicalHeaderKey(hf.Name) trailer[key] = append(trailer[key], hf.Value) } cs.trailer = trailer rl.endStream(cs) return nil } // transportResponseBody is the concrete type of Transport.RoundTrip's // Response.Body. It is an io.ReadCloser. On Read, it reads from cs.body. // On Close it sends RST_STREAM if EOF wasn't already seen. type transportResponseBody struct { cs *clientStream } func (b transportResponseBody) Read(p []byte) (n int, err error) { cs := b.cs cc := cs.cc if cs.readErr != nil { return 0, cs.readErr } n, err = b.cs.bufPipe.Read(p) if cs.bytesRemain != -1 { if int64(n) > cs.bytesRemain { n = int(cs.bytesRemain) if err == nil { err = errors.New("net/http: server replied with more than declared Content-Length; truncated") cc.writeStreamReset(cs.ID, ErrCodeProtocol, err) } cs.readErr = err return int(cs.bytesRemain), err } cs.bytesRemain -= int64(n) if err == io.EOF && cs.bytesRemain > 0 { err = io.ErrUnexpectedEOF cs.readErr = err return n, err } } if n == 0 { // No flow control tokens to send back. return } cc.mu.Lock() defer cc.mu.Unlock() var connAdd, streamAdd int32 // Check the conn-level first, before the stream-level. if v := cc.inflow.available(); v < transportDefaultConnFlow/2 { connAdd = transportDefaultConnFlow - v cc.inflow.add(connAdd) } if err == nil { // No need to refresh if the stream is over or failed. // Consider any buffered body data (read from the conn but not // consumed by the client) when computing flow control for this // stream. v := int(cs.inflow.available()) + cs.bufPipe.Len() if v < transportDefaultStreamFlow-transportDefaultStreamMinRefresh { streamAdd = int32(transportDefaultStreamFlow - v) cs.inflow.add(streamAdd) } } if connAdd != 0 || streamAdd != 0 { cc.wmu.Lock() defer cc.wmu.Unlock() if connAdd != 0 { cc.fr.WriteWindowUpdate(0, mustUint31(connAdd)) } if streamAdd != 0 { cc.fr.WriteWindowUpdate(cs.ID, mustUint31(streamAdd)) } cc.bw.Flush() } return } var errClosedResponseBody = errors.New("http2: response body closed") func (b transportResponseBody) Close() error { cs := b.cs cc := cs.cc serverSentStreamEnd := cs.bufPipe.Err() == io.EOF unread := cs.bufPipe.Len() if unread > 0 || !serverSentStreamEnd { cc.mu.Lock() cc.wmu.Lock() if !serverSentStreamEnd { cc.fr.WriteRSTStream(cs.ID, ErrCodeCancel) } // Return connection-level flow control. if unread > 0 { cc.inflow.add(int32(unread)) cc.fr.WriteWindowUpdate(0, uint32(unread)) } cc.bw.Flush() cc.wmu.Unlock() cc.mu.Unlock() } cs.bufPipe.BreakWithError(errClosedResponseBody) return nil } func (rl *clientConnReadLoop) processData(f *DataFrame) error { cc := rl.cc cs := cc.streamByID(f.StreamID, f.StreamEnded()) data := f.Data() if cs == nil { cc.mu.Lock() neverSent := cc.nextStreamID cc.mu.Unlock() if f.StreamID >= neverSent { // We never asked for this. cc.logf("http2: Transport received unsolicited DATA frame; closing connection") return ConnectionError(ErrCodeProtocol) } // We probably did ask for this, but canceled. Just ignore it. // TODO: be stricter here? only silently ignore things which // we canceled, but not things which were closed normally // by the peer? Tough without accumulating too much state. // But at least return their flow control: if f.Length > 0 { cc.mu.Lock() cc.inflow.add(int32(f.Length)) cc.mu.Unlock() cc.wmu.Lock() cc.fr.WriteWindowUpdate(0, uint32(f.Length)) cc.bw.Flush() cc.wmu.Unlock() } return nil } if f.Length > 0 { if len(data) > 0 && cs.bufPipe.b == nil { // Data frame after it's already closed? cc.logf("http2: Transport received DATA frame for closed stream; closing connection") return ConnectionError(ErrCodeProtocol) } // Check connection-level flow control. cc.mu.Lock() if cs.inflow.available() >= int32(f.Length) { cs.inflow.take(int32(f.Length)) } else { cc.mu.Unlock() return ConnectionError(ErrCodeFlowControl) } // Return any padded flow control now, since we won't // refund it later on body reads. if pad := int32(f.Length) - int32(len(data)); pad > 0 { cs.inflow.add(pad) cc.inflow.add(pad) cc.wmu.Lock() cc.fr.WriteWindowUpdate(0, uint32(pad)) cc.fr.WriteWindowUpdate(cs.ID, uint32(pad)) cc.bw.Flush() cc.wmu.Unlock() } didReset := cs.didReset cc.mu.Unlock() if len(data) > 0 && !didReset { if _, err := cs.bufPipe.Write(data); err != nil { rl.endStreamError(cs, err) return err } } } if f.StreamEnded() { rl.endStream(cs) } return nil } var errInvalidTrailers = errors.New("http2: invalid trailers") func (rl *clientConnReadLoop) endStream(cs *clientStream) { // TODO: check that any declared content-length matches, like // server.go's (*stream).endStream method. rl.endStreamError(cs, nil) } func (rl *clientConnReadLoop) endStreamError(cs *clientStream, err error) { var code func() if err == nil { err = io.EOF code = cs.copyTrailers } cs.bufPipe.closeWithErrorAndCode(err, code) delete(rl.activeRes, cs.ID) if isConnectionCloseRequest(cs.req) { rl.closeWhenIdle = true } select { case cs.resc <- resAndError{err: err}: default: } } func (cs *clientStream) copyTrailers() { for k, vv := range cs.trailer { t := cs.resTrailer if *t == nil { *t = make(http.Header) } (*t)[k] = vv } } func (rl *clientConnReadLoop) processGoAway(f *GoAwayFrame) error { cc := rl.cc cc.t.connPool().MarkDead(cc) if f.ErrCode != 0 { // TODO: deal with GOAWAY more. particularly the error code cc.vlogf("transport got GOAWAY with error code = %v", f.ErrCode) } cc.setGoAway(f) return nil } func (rl *clientConnReadLoop) processSettings(f *SettingsFrame) error { cc := rl.cc cc.mu.Lock() defer cc.mu.Unlock() if f.IsAck() { if cc.wantSettingsAck { cc.wantSettingsAck = false return nil } return ConnectionError(ErrCodeProtocol) } err := f.ForeachSetting(func(s Setting) error { switch s.ID { case SettingMaxFrameSize: cc.maxFrameSize = s.Val case SettingMaxConcurrentStreams: cc.maxConcurrentStreams = s.Val case SettingInitialWindowSize: // Values above the maximum flow-control // window size of 2^31-1 MUST be treated as a // connection error (Section 5.4.1) of type // FLOW_CONTROL_ERROR. if s.Val > math.MaxInt32 { return ConnectionError(ErrCodeFlowControl) } // Adjust flow control of currently-open // frames by the difference of the old initial // window size and this one. delta := int32(s.Val) - int32(cc.initialWindowSize) for _, cs := range cc.streams { cs.flow.add(delta) } cc.cond.Broadcast() cc.initialWindowSize = s.Val default: // TODO(bradfitz): handle more settings? SETTINGS_HEADER_TABLE_SIZE probably. cc.vlogf("Unhandled Setting: %v", s) } return nil }) if err != nil { return err } cc.wmu.Lock() defer cc.wmu.Unlock() cc.fr.WriteSettingsAck() cc.bw.Flush() return cc.werr } func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error { cc := rl.cc cs := cc.streamByID(f.StreamID, false) if f.StreamID != 0 && cs == nil { return nil } cc.mu.Lock() defer cc.mu.Unlock() fl := &cc.flow if cs != nil { fl = &cs.flow } if !fl.add(int32(f.Increment)) { return ConnectionError(ErrCodeFlowControl) } cc.cond.Broadcast() return nil } func (rl *clientConnReadLoop) processResetStream(f *RSTStreamFrame) error { cs := rl.cc.streamByID(f.StreamID, true) if cs == nil { // TODO: return error if server tries to RST_STEAM an idle stream return nil } select { case <-cs.peerReset: // Already reset. // This is the only goroutine // which closes this, so there // isn't a race. default: err := streamError(cs.ID, f.ErrCode) cs.resetErr = err close(cs.peerReset) cs.bufPipe.CloseWithError(err) cs.cc.cond.Broadcast() // wake up checkResetOrDone via clientStream.awaitFlowControl } delete(rl.activeRes, cs.ID) return nil } // Ping sends a PING frame to the server and waits for the ack. // Public implementation is in go17.go and not_go17.go func (cc *ClientConn) ping(ctx contextContext) error { c := make(chan struct{}) // Generate a random payload var p [8]byte for { if _, err := rand.Read(p[:]); err != nil { return err } cc.mu.Lock() // check for dup before insert if _, found := cc.pings[p]; !found { cc.pings[p] = c cc.mu.Unlock() break } cc.mu.Unlock() } cc.wmu.Lock() if err := cc.fr.WritePing(false, p); err != nil { cc.wmu.Unlock() return err } if err := cc.bw.Flush(); err != nil { cc.wmu.Unlock() return err } cc.wmu.Unlock() select { case <-c: return nil case <-ctx.Done(): return ctx.Err() case <-cc.readerDone: // connection closed return cc.readerErr } } func (rl *clientConnReadLoop) processPing(f *PingFrame) error { if f.IsAck() { cc := rl.cc cc.mu.Lock() defer cc.mu.Unlock() // If ack, notify listener if any if c, ok := cc.pings[f.Data]; ok { close(c) delete(cc.pings, f.Data) } return nil } cc := rl.cc cc.wmu.Lock() defer cc.wmu.Unlock() if err := cc.fr.WritePing(true, f.Data); err != nil { return err } return cc.bw.Flush() } func (rl *clientConnReadLoop) processPushPromise(f *PushPromiseFrame) error { // We told the peer we don't want them. // Spec says: // "PUSH_PROMISE MUST NOT be sent if the SETTINGS_ENABLE_PUSH // setting of the peer endpoint is set to 0. An endpoint that // has set this setting and has received acknowledgement MUST // treat the receipt of a PUSH_PROMISE frame as a connection // error (Section 5.4.1) of type PROTOCOL_ERROR." return ConnectionError(ErrCodeProtocol) } func (cc *ClientConn) writeStreamReset(streamID uint32, code ErrCode, err error) { // TODO: map err to more interesting error codes, once the // HTTP community comes up with some. But currently for // RST_STREAM there's no equivalent to GOAWAY frame's debug // data, and the error codes are all pretty vague ("cancel"). cc.wmu.Lock() cc.fr.WriteRSTStream(streamID, code) cc.bw.Flush() cc.wmu.Unlock() } var ( errResponseHeaderListSize = errors.New("http2: response header list larger than advertised limit") errPseudoTrailers = errors.New("http2: invalid pseudo header in trailers") ) func (cc *ClientConn) logf(format string, args ...interface{}) { cc.t.logf(format, args...) } func (cc *ClientConn) vlogf(format string, args ...interface{}) { cc.t.vlogf(format, args...) } func (t *Transport) vlogf(format string, args ...interface{}) { if VerboseLogs { t.logf(format, args...) } } func (t *Transport) logf(format string, args ...interface{}) { log.Printf(format, args...) } var noBody io.ReadCloser = ioutil.NopCloser(bytes.NewReader(nil)) func strSliceContains(ss []string, s string) bool { for _, v := range ss { if v == s { return true } } return false } type erringRoundTripper struct{ err error } func (rt erringRoundTripper) RoundTrip(*http.Request) (*http.Response, error) { return nil, rt.err } // gzipReader wraps a response body so it can lazily // call gzip.NewReader on the first call to Read type gzipReader struct { body io.ReadCloser // underlying Response.Body zr *gzip.Reader // lazily-initialized gzip reader zerr error // sticky error } func (gz *gzipReader) Read(p []byte) (n int, err error) { if gz.zerr != nil { return 0, gz.zerr } if gz.zr == nil { gz.zr, err = gzip.NewReader(gz.body) if err != nil { gz.zerr = err return 0, err } } return gz.zr.Read(p) } func (gz *gzipReader) Close() error { return gz.body.Close() } type errorReader struct{ err error } func (r errorReader) Read(p []byte) (int, error) { return 0, r.err } // bodyWriterState encapsulates various state around the Transport's writing // of the request body, particularly regarding doing delayed writes of the body // when the request contains "Expect: 100-continue". type bodyWriterState struct { cs *clientStream timer *time.Timer // if non-nil, we're doing a delayed write fnonce *sync.Once // to call fn with fn func() // the code to run in the goroutine, writing the body resc chan error // result of fn's execution delay time.Duration // how long we should delay a delayed write for } func (t *Transport) getBodyWriterState(cs *clientStream, body io.Reader) (s bodyWriterState) { s.cs = cs if body == nil { return } resc := make(chan error, 1) s.resc = resc s.fn = func() { cs.cc.mu.Lock() cs.startedWrite = true cs.cc.mu.Unlock() resc <- cs.writeRequestBody(body, cs.req.Body) } s.delay = t.expectContinueTimeout() if s.delay == 0 || !httplex.HeaderValuesContainsToken( cs.req.Header["Expect"], "100-continue") { return } s.fnonce = new(sync.Once) // Arm the timer with a very large duration, which we'll // intentionally lower later. It has to be large now because // we need a handle to it before writing the headers, but the // s.delay value is defined to not start until after the // request headers were written. const hugeDuration = 365 * 24 * time.Hour s.timer = time.AfterFunc(hugeDuration, func() { s.fnonce.Do(s.fn) }) return } func (s bodyWriterState) cancel() { if s.timer != nil { s.timer.Stop() } } func (s bodyWriterState) on100() { if s.timer == nil { // If we didn't do a delayed write, ignore the server's // bogus 100 continue response. return } s.timer.Stop() go func() { s.fnonce.Do(s.fn) }() } // scheduleBodyWrite starts writing the body, either immediately (in // the common case) or after the delay timeout. It should not be // called until after the headers have been written. func (s bodyWriterState) scheduleBodyWrite() { if s.timer == nil { // We're not doing a delayed write (see // getBodyWriterState), so just start the writing // goroutine immediately. go s.fn() return } traceWait100Continue(s.cs.trace) if s.timer.Stop() { s.timer.Reset(s.delay) } } // isConnectionCloseRequest reports whether req should use its own // connection for a single request and then close the connection. func isConnectionCloseRequest(req *http.Request) bool { return req.Close || httplex.HeaderValuesContainsToken(req.Header["Connection"], "close") } write.go000066400000000000000000000251211324746544700314570ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/golang.org/x/net/http2// Copyright 2014 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package http2 import ( "bytes" "fmt" "log" "net/http" "net/url" "time" "golang.org/x/net/http2/hpack" "golang.org/x/net/lex/httplex" ) // writeFramer is implemented by any type that is used to write frames. type writeFramer interface { writeFrame(writeContext) error // staysWithinBuffer reports whether this writer promises that // it will only write less than or equal to size bytes, and it // won't Flush the write context. staysWithinBuffer(size int) bool } // writeContext is the interface needed by the various frame writer // types below. All the writeFrame methods below are scheduled via the // frame writing scheduler (see writeScheduler in writesched.go). // // This interface is implemented by *serverConn. // // TODO: decide whether to a) use this in the client code (which didn't // end up using this yet, because it has a simpler design, not // currently implementing priorities), or b) delete this and // make the server code a bit more concrete. type writeContext interface { Framer() *Framer Flush() error CloseConn() error // HeaderEncoder returns an HPACK encoder that writes to the // returned buffer. HeaderEncoder() (*hpack.Encoder, *bytes.Buffer) } // writeEndsStream reports whether w writes a frame that will transition // the stream to a half-closed local state. This returns false for RST_STREAM, // which closes the entire stream (not just the local half). func writeEndsStream(w writeFramer) bool { switch v := w.(type) { case *writeData: return v.endStream case *writeResHeaders: return v.endStream case nil: // This can only happen if the caller reuses w after it's // been intentionally nil'ed out to prevent use. Keep this // here to catch future refactoring breaking it. panic("writeEndsStream called on nil writeFramer") } return false } type flushFrameWriter struct{} func (flushFrameWriter) writeFrame(ctx writeContext) error { return ctx.Flush() } func (flushFrameWriter) staysWithinBuffer(max int) bool { return false } type writeSettings []Setting func (s writeSettings) staysWithinBuffer(max int) bool { const settingSize = 6 // uint16 + uint32 return frameHeaderLen+settingSize*len(s) <= max } func (s writeSettings) writeFrame(ctx writeContext) error { return ctx.Framer().WriteSettings([]Setting(s)...) } type writeGoAway struct { maxStreamID uint32 code ErrCode } func (p *writeGoAway) writeFrame(ctx writeContext) error { err := ctx.Framer().WriteGoAway(p.maxStreamID, p.code, nil) if p.code != 0 { ctx.Flush() // ignore error: we're hanging up on them anyway time.Sleep(50 * time.Millisecond) ctx.CloseConn() } return err } func (*writeGoAway) staysWithinBuffer(max int) bool { return false } // flushes type writeData struct { streamID uint32 p []byte endStream bool } func (w *writeData) String() string { return fmt.Sprintf("writeData(stream=%d, p=%d, endStream=%v)", w.streamID, len(w.p), w.endStream) } func (w *writeData) writeFrame(ctx writeContext) error { return ctx.Framer().WriteData(w.streamID, w.endStream, w.p) } func (w *writeData) staysWithinBuffer(max int) bool { return frameHeaderLen+len(w.p) <= max } // handlerPanicRST is the message sent from handler goroutines when // the handler panics. type handlerPanicRST struct { StreamID uint32 } func (hp handlerPanicRST) writeFrame(ctx writeContext) error { return ctx.Framer().WriteRSTStream(hp.StreamID, ErrCodeInternal) } func (hp handlerPanicRST) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max } func (se StreamError) writeFrame(ctx writeContext) error { return ctx.Framer().WriteRSTStream(se.StreamID, se.Code) } func (se StreamError) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max } type writePingAck struct{ pf *PingFrame } func (w writePingAck) writeFrame(ctx writeContext) error { return ctx.Framer().WritePing(true, w.pf.Data) } func (w writePingAck) staysWithinBuffer(max int) bool { return frameHeaderLen+len(w.pf.Data) <= max } type writeSettingsAck struct{} func (writeSettingsAck) writeFrame(ctx writeContext) error { return ctx.Framer().WriteSettingsAck() } func (writeSettingsAck) staysWithinBuffer(max int) bool { return frameHeaderLen <= max } // splitHeaderBlock splits headerBlock into fragments so that each fragment fits // in a single frame, then calls fn for each fragment. firstFrag/lastFrag are true // for the first/last fragment, respectively. func splitHeaderBlock(ctx writeContext, headerBlock []byte, fn func(ctx writeContext, frag []byte, firstFrag, lastFrag bool) error) error { // For now we're lazy and just pick the minimum MAX_FRAME_SIZE // that all peers must support (16KB). Later we could care // more and send larger frames if the peer advertised it, but // there's little point. Most headers are small anyway (so we // generally won't have CONTINUATION frames), and extra frames // only waste 9 bytes anyway. const maxFrameSize = 16384 first := true for len(headerBlock) > 0 { frag := headerBlock if len(frag) > maxFrameSize { frag = frag[:maxFrameSize] } headerBlock = headerBlock[len(frag):] if err := fn(ctx, frag, first, len(headerBlock) == 0); err != nil { return err } first = false } return nil } // writeResHeaders is a request to write a HEADERS and 0+ CONTINUATION frames // for HTTP response headers or trailers from a server handler. type writeResHeaders struct { streamID uint32 httpResCode int // 0 means no ":status" line h http.Header // may be nil trailers []string // if non-nil, which keys of h to write. nil means all. endStream bool date string contentType string contentLength string } func encKV(enc *hpack.Encoder, k, v string) { if VerboseLogs { log.Printf("http2: server encoding header %q = %q", k, v) } enc.WriteField(hpack.HeaderField{Name: k, Value: v}) } func (w *writeResHeaders) staysWithinBuffer(max int) bool { // TODO: this is a common one. It'd be nice to return true // here and get into the fast path if we could be clever and // calculate the size fast enough, or at least a conservative // uppper bound that usually fires. (Maybe if w.h and // w.trailers are nil, so we don't need to enumerate it.) // Otherwise I'm afraid that just calculating the length to // answer this question would be slower than the ~2µs benefit. return false } func (w *writeResHeaders) writeFrame(ctx writeContext) error { enc, buf := ctx.HeaderEncoder() buf.Reset() if w.httpResCode != 0 { encKV(enc, ":status", httpCodeString(w.httpResCode)) } encodeHeaders(enc, w.h, w.trailers) if w.contentType != "" { encKV(enc, "content-type", w.contentType) } if w.contentLength != "" { encKV(enc, "content-length", w.contentLength) } if w.date != "" { encKV(enc, "date", w.date) } headerBlock := buf.Bytes() if len(headerBlock) == 0 && w.trailers == nil { panic("unexpected empty hpack") } return splitHeaderBlock(ctx, headerBlock, w.writeHeaderBlock) } func (w *writeResHeaders) writeHeaderBlock(ctx writeContext, frag []byte, firstFrag, lastFrag bool) error { if firstFrag { return ctx.Framer().WriteHeaders(HeadersFrameParam{ StreamID: w.streamID, BlockFragment: frag, EndStream: w.endStream, EndHeaders: lastFrag, }) } else { return ctx.Framer().WriteContinuation(w.streamID, lastFrag, frag) } } // writePushPromise is a request to write a PUSH_PROMISE and 0+ CONTINUATION frames. type writePushPromise struct { streamID uint32 // pusher stream method string // for :method url *url.URL // for :scheme, :authority, :path h http.Header // Creates an ID for a pushed stream. This runs on serveG just before // the frame is written. The returned ID is copied to promisedID. allocatePromisedID func() (uint32, error) promisedID uint32 } func (w *writePushPromise) staysWithinBuffer(max int) bool { // TODO: see writeResHeaders.staysWithinBuffer return false } func (w *writePushPromise) writeFrame(ctx writeContext) error { enc, buf := ctx.HeaderEncoder() buf.Reset() encKV(enc, ":method", w.method) encKV(enc, ":scheme", w.url.Scheme) encKV(enc, ":authority", w.url.Host) encKV(enc, ":path", w.url.RequestURI()) encodeHeaders(enc, w.h, nil) headerBlock := buf.Bytes() if len(headerBlock) == 0 { panic("unexpected empty hpack") } return splitHeaderBlock(ctx, headerBlock, w.writeHeaderBlock) } func (w *writePushPromise) writeHeaderBlock(ctx writeContext, frag []byte, firstFrag, lastFrag bool) error { if firstFrag { return ctx.Framer().WritePushPromise(PushPromiseParam{ StreamID: w.streamID, PromiseID: w.promisedID, BlockFragment: frag, EndHeaders: lastFrag, }) } else { return ctx.Framer().WriteContinuation(w.streamID, lastFrag, frag) } } type write100ContinueHeadersFrame struct { streamID uint32 } func (w write100ContinueHeadersFrame) writeFrame(ctx writeContext) error { enc, buf := ctx.HeaderEncoder() buf.Reset() encKV(enc, ":status", "100") return ctx.Framer().WriteHeaders(HeadersFrameParam{ StreamID: w.streamID, BlockFragment: buf.Bytes(), EndStream: false, EndHeaders: true, }) } func (w write100ContinueHeadersFrame) staysWithinBuffer(max int) bool { // Sloppy but conservative: return 9+2*(len(":status")+len("100")) <= max } type writeWindowUpdate struct { streamID uint32 // or 0 for conn-level n uint32 } func (wu writeWindowUpdate) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max } func (wu writeWindowUpdate) writeFrame(ctx writeContext) error { return ctx.Framer().WriteWindowUpdate(wu.streamID, wu.n) } // encodeHeaders encodes an http.Header. If keys is not nil, then (k, h[k]) // is encoded only only if k is in keys. func encodeHeaders(enc *hpack.Encoder, h http.Header, keys []string) { if keys == nil { sorter := sorterPool.Get().(*sorter) // Using defer here, since the returned keys from the // sorter.Keys method is only valid until the sorter // is returned: defer sorterPool.Put(sorter) keys = sorter.Keys(h) } for _, k := range keys { vv := h[k] k = lowerHeader(k) if !validWireHeaderFieldName(k) { // Skip it as backup paranoia. Per // golang.org/issue/14048, these should // already be rejected at a higher level. continue } isTE := k == "transfer-encoding" for _, v := range vv { if !httplex.ValidHeaderFieldValue(v) { // TODO: return an error? golang.org/issue/14048 // For now just omit it. continue } // TODO: more of "8.1.2.2 Connection-Specific Header Fields" if isTE && v != "trailers" { continue } encKV(enc, k, v) } } } writesched.go000066400000000000000000000163741324746544700325000ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/golang.org/x/net/http2// Copyright 2014 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package http2 import "fmt" // WriteScheduler is the interface implemented by HTTP/2 write schedulers. // Methods are never called concurrently. type WriteScheduler interface { // OpenStream opens a new stream in the write scheduler. // It is illegal to call this with streamID=0 or with a streamID that is // already open -- the call may panic. OpenStream(streamID uint32, options OpenStreamOptions) // CloseStream closes a stream in the write scheduler. Any frames queued on // this stream should be discarded. It is illegal to call this on a stream // that is not open -- the call may panic. CloseStream(streamID uint32) // AdjustStream adjusts the priority of the given stream. This may be called // on a stream that has not yet been opened or has been closed. Note that // RFC 7540 allows PRIORITY frames to be sent on streams in any state. See: // https://tools.ietf.org/html/rfc7540#section-5.1 AdjustStream(streamID uint32, priority PriorityParam) // Push queues a frame in the scheduler. In most cases, this will not be // called with wr.StreamID()!=0 unless that stream is currently open. The one // exception is RST_STREAM frames, which may be sent on idle or closed streams. Push(wr FrameWriteRequest) // Pop dequeues the next frame to write. Returns false if no frames can // be written. Frames with a given wr.StreamID() are Pop'd in the same // order they are Push'd. Pop() (wr FrameWriteRequest, ok bool) } // OpenStreamOptions specifies extra options for WriteScheduler.OpenStream. type OpenStreamOptions struct { // PusherID is zero if the stream was initiated by the client. Otherwise, // PusherID names the stream that pushed the newly opened stream. PusherID uint32 } // FrameWriteRequest is a request to write a frame. type FrameWriteRequest struct { // write is the interface value that does the writing, once the // WriteScheduler has selected this frame to write. The write // functions are all defined in write.go. write writeFramer // stream is the stream on which this frame will be written. // nil for non-stream frames like PING and SETTINGS. stream *stream // done, if non-nil, must be a buffered channel with space for // 1 message and is sent the return value from write (or an // earlier error) when the frame has been written. done chan error } // StreamID returns the id of the stream this frame will be written to. // 0 is used for non-stream frames such as PING and SETTINGS. func (wr FrameWriteRequest) StreamID() uint32 { if wr.stream == nil { if se, ok := wr.write.(StreamError); ok { // (*serverConn).resetStream doesn't set // stream because it doesn't necessarily have // one. So special case this type of write // message. return se.StreamID } return 0 } return wr.stream.id } // DataSize returns the number of flow control bytes that must be consumed // to write this entire frame. This is 0 for non-DATA frames. func (wr FrameWriteRequest) DataSize() int { if wd, ok := wr.write.(*writeData); ok { return len(wd.p) } return 0 } // Consume consumes min(n, available) bytes from this frame, where available // is the number of flow control bytes available on the stream. Consume returns // 0, 1, or 2 frames, where the integer return value gives the number of frames // returned. // // If flow control prevents consuming any bytes, this returns (_, _, 0). If // the entire frame was consumed, this returns (wr, _, 1). Otherwise, this // returns (consumed, rest, 2), where 'consumed' contains the consumed bytes and // 'rest' contains the remaining bytes. The consumed bytes are deducted from the // underlying stream's flow control budget. func (wr FrameWriteRequest) Consume(n int32) (FrameWriteRequest, FrameWriteRequest, int) { var empty FrameWriteRequest // Non-DATA frames are always consumed whole. wd, ok := wr.write.(*writeData) if !ok || len(wd.p) == 0 { return wr, empty, 1 } // Might need to split after applying limits. allowed := wr.stream.flow.available() if n < allowed { allowed = n } if wr.stream.sc.maxFrameSize < allowed { allowed = wr.stream.sc.maxFrameSize } if allowed <= 0 { return empty, empty, 0 } if len(wd.p) > int(allowed) { wr.stream.flow.take(allowed) consumed := FrameWriteRequest{ stream: wr.stream, write: &writeData{ streamID: wd.streamID, p: wd.p[:allowed], // Even if the original had endStream set, there // are bytes remaining because len(wd.p) > allowed, // so we know endStream is false. endStream: false, }, // Our caller is blocking on the final DATA frame, not // this intermediate frame, so no need to wait. done: nil, } rest := FrameWriteRequest{ stream: wr.stream, write: &writeData{ streamID: wd.streamID, p: wd.p[allowed:], endStream: wd.endStream, }, done: wr.done, } return consumed, rest, 2 } // The frame is consumed whole. // NB: This cast cannot overflow because allowed is <= math.MaxInt32. wr.stream.flow.take(int32(len(wd.p))) return wr, empty, 1 } // String is for debugging only. func (wr FrameWriteRequest) String() string { var des string if s, ok := wr.write.(fmt.Stringer); ok { des = s.String() } else { des = fmt.Sprintf("%T", wr.write) } return fmt.Sprintf("[FrameWriteRequest stream=%d, ch=%v, writer=%v]", wr.StreamID(), wr.done != nil, des) } // replyToWriter sends err to wr.done and panics if the send must block // This does nothing if wr.done is nil. func (wr *FrameWriteRequest) replyToWriter(err error) { if wr.done == nil { return } select { case wr.done <- err: default: panic(fmt.Sprintf("unbuffered done channel passed in for type %T", wr.write)) } wr.write = nil // prevent use (assume it's tainted after wr.done send) } // writeQueue is used by implementations of WriteScheduler. type writeQueue struct { s []FrameWriteRequest } func (q *writeQueue) empty() bool { return len(q.s) == 0 } func (q *writeQueue) push(wr FrameWriteRequest) { q.s = append(q.s, wr) } func (q *writeQueue) shift() FrameWriteRequest { if len(q.s) == 0 { panic("invalid use of queue") } wr := q.s[0] // TODO: less copy-happy queue. copy(q.s, q.s[1:]) q.s[len(q.s)-1] = FrameWriteRequest{} q.s = q.s[:len(q.s)-1] return wr } // consume consumes up to n bytes from q.s[0]. If the frame is // entirely consumed, it is removed from the queue. If the frame // is partially consumed, the frame is kept with the consumed // bytes removed. Returns true iff any bytes were consumed. func (q *writeQueue) consume(n int32) (FrameWriteRequest, bool) { if len(q.s) == 0 { return FrameWriteRequest{}, false } consumed, rest, numresult := q.s[0].Consume(n) switch numresult { case 0: return FrameWriteRequest{}, false case 1: q.shift() case 2: q.s[0] = rest } return consumed, true } type writeQueuePool []*writeQueue // put inserts an unused writeQueue into the pool. func (p *writeQueuePool) put(q *writeQueue) { for i := range q.s { q.s[i] = FrameWriteRequest{} } q.s = q.s[:0] *p = append(*p, q) } // get returns an empty writeQueue. func (p *writeQueuePool) get() *writeQueue { ln := len(*p) if ln == 0 { return new(writeQueue) } x := ln - 1 q := (*p)[x] (*p)[x] = nil *p = (*p)[:x] return q } writesched_priority.go000066400000000000000000000327261324746544700344400ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/golang.org/x/net/http2// Copyright 2016 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package http2 import ( "fmt" "math" "sort" ) // RFC 7540, Section 5.3.5: the default weight is 16. const priorityDefaultWeight = 15 // 16 = 15 + 1 // PriorityWriteSchedulerConfig configures a priorityWriteScheduler. type PriorityWriteSchedulerConfig struct { // MaxClosedNodesInTree controls the maximum number of closed streams to // retain in the priority tree. Setting this to zero saves a small amount // of memory at the cost of performance. // // See RFC 7540, Section 5.3.4: // "It is possible for a stream to become closed while prioritization // information ... is in transit. ... This potentially creates suboptimal // prioritization, since the stream could be given a priority that is // different from what is intended. To avoid these problems, an endpoint // SHOULD retain stream prioritization state for a period after streams // become closed. The longer state is retained, the lower the chance that // streams are assigned incorrect or default priority values." MaxClosedNodesInTree int // MaxIdleNodesInTree controls the maximum number of idle streams to // retain in the priority tree. Setting this to zero saves a small amount // of memory at the cost of performance. // // See RFC 7540, Section 5.3.4: // Similarly, streams that are in the "idle" state can be assigned // priority or become a parent of other streams. This allows for the // creation of a grouping node in the dependency tree, which enables // more flexible expressions of priority. Idle streams begin with a // default priority (Section 5.3.5). MaxIdleNodesInTree int // ThrottleOutOfOrderWrites enables write throttling to help ensure that // data is delivered in priority order. This works around a race where // stream B depends on stream A and both streams are about to call Write // to queue DATA frames. If B wins the race, a naive scheduler would eagerly // write as much data from B as possible, but this is suboptimal because A // is a higher-priority stream. With throttling enabled, we write a small // amount of data from B to minimize the amount of bandwidth that B can // steal from A. ThrottleOutOfOrderWrites bool } // NewPriorityWriteScheduler constructs a WriteScheduler that schedules // frames by following HTTP/2 priorities as described in RFC 7340 Section 5.3. // If cfg is nil, default options are used. func NewPriorityWriteScheduler(cfg *PriorityWriteSchedulerConfig) WriteScheduler { if cfg == nil { // For justification of these defaults, see: // https://docs.google.com/document/d/1oLhNg1skaWD4_DtaoCxdSRN5erEXrH-KnLrMwEpOtFY cfg = &PriorityWriteSchedulerConfig{ MaxClosedNodesInTree: 10, MaxIdleNodesInTree: 10, ThrottleOutOfOrderWrites: false, } } ws := &priorityWriteScheduler{ nodes: make(map[uint32]*priorityNode), maxClosedNodesInTree: cfg.MaxClosedNodesInTree, maxIdleNodesInTree: cfg.MaxIdleNodesInTree, enableWriteThrottle: cfg.ThrottleOutOfOrderWrites, } ws.nodes[0] = &ws.root if cfg.ThrottleOutOfOrderWrites { ws.writeThrottleLimit = 1024 } else { ws.writeThrottleLimit = math.MaxInt32 } return ws } type priorityNodeState int const ( priorityNodeOpen priorityNodeState = iota priorityNodeClosed priorityNodeIdle ) // priorityNode is a node in an HTTP/2 priority tree. // Each node is associated with a single stream ID. // See RFC 7540, Section 5.3. type priorityNode struct { q writeQueue // queue of pending frames to write id uint32 // id of the stream, or 0 for the root of the tree weight uint8 // the actual weight is weight+1, so the value is in [1,256] state priorityNodeState // open | closed | idle bytes int64 // number of bytes written by this node, or 0 if closed subtreeBytes int64 // sum(node.bytes) of all nodes in this subtree // These links form the priority tree. parent *priorityNode kids *priorityNode // start of the kids list prev, next *priorityNode // doubly-linked list of siblings } func (n *priorityNode) setParent(parent *priorityNode) { if n == parent { panic("setParent to self") } if n.parent == parent { return } // Unlink from current parent. if parent := n.parent; parent != nil { if n.prev == nil { parent.kids = n.next } else { n.prev.next = n.next } if n.next != nil { n.next.prev = n.prev } } // Link to new parent. // If parent=nil, remove n from the tree. // Always insert at the head of parent.kids (this is assumed by walkReadyInOrder). n.parent = parent if parent == nil { n.next = nil n.prev = nil } else { n.next = parent.kids n.prev = nil if n.next != nil { n.next.prev = n } parent.kids = n } } func (n *priorityNode) addBytes(b int64) { n.bytes += b for ; n != nil; n = n.parent { n.subtreeBytes += b } } // walkReadyInOrder iterates over the tree in priority order, calling f for each node // with a non-empty write queue. When f returns true, this funcion returns true and the // walk halts. tmp is used as scratch space for sorting. // // f(n, openParent) takes two arguments: the node to visit, n, and a bool that is true // if any ancestor p of n is still open (ignoring the root node). func (n *priorityNode) walkReadyInOrder(openParent bool, tmp *[]*priorityNode, f func(*priorityNode, bool) bool) bool { if !n.q.empty() && f(n, openParent) { return true } if n.kids == nil { return false } // Don't consider the root "open" when updating openParent since // we can't send data frames on the root stream (only control frames). if n.id != 0 { openParent = openParent || (n.state == priorityNodeOpen) } // Common case: only one kid or all kids have the same weight. // Some clients don't use weights; other clients (like web browsers) // use mostly-linear priority trees. w := n.kids.weight needSort := false for k := n.kids.next; k != nil; k = k.next { if k.weight != w { needSort = true break } } if !needSort { for k := n.kids; k != nil; k = k.next { if k.walkReadyInOrder(openParent, tmp, f) { return true } } return false } // Uncommon case: sort the child nodes. We remove the kids from the parent, // then re-insert after sorting so we can reuse tmp for future sort calls. *tmp = (*tmp)[:0] for n.kids != nil { *tmp = append(*tmp, n.kids) n.kids.setParent(nil) } sort.Sort(sortPriorityNodeSiblings(*tmp)) for i := len(*tmp) - 1; i >= 0; i-- { (*tmp)[i].setParent(n) // setParent inserts at the head of n.kids } for k := n.kids; k != nil; k = k.next { if k.walkReadyInOrder(openParent, tmp, f) { return true } } return false } type sortPriorityNodeSiblings []*priorityNode func (z sortPriorityNodeSiblings) Len() int { return len(z) } func (z sortPriorityNodeSiblings) Swap(i, k int) { z[i], z[k] = z[k], z[i] } func (z sortPriorityNodeSiblings) Less(i, k int) bool { // Prefer the subtree that has sent fewer bytes relative to its weight. // See sections 5.3.2 and 5.3.4. wi, bi := float64(z[i].weight+1), float64(z[i].subtreeBytes) wk, bk := float64(z[k].weight+1), float64(z[k].subtreeBytes) if bi == 0 && bk == 0 { return wi >= wk } if bk == 0 { return false } return bi/bk <= wi/wk } type priorityWriteScheduler struct { // root is the root of the priority tree, where root.id = 0. // The root queues control frames that are not associated with any stream. root priorityNode // nodes maps stream ids to priority tree nodes. nodes map[uint32]*priorityNode // maxID is the maximum stream id in nodes. maxID uint32 // lists of nodes that have been closed or are idle, but are kept in // the tree for improved prioritization. When the lengths exceed either // maxClosedNodesInTree or maxIdleNodesInTree, old nodes are discarded. closedNodes, idleNodes []*priorityNode // From the config. maxClosedNodesInTree int maxIdleNodesInTree int writeThrottleLimit int32 enableWriteThrottle bool // tmp is scratch space for priorityNode.walkReadyInOrder to reduce allocations. tmp []*priorityNode // pool of empty queues for reuse. queuePool writeQueuePool } func (ws *priorityWriteScheduler) OpenStream(streamID uint32, options OpenStreamOptions) { // The stream may be currently idle but cannot be opened or closed. if curr := ws.nodes[streamID]; curr != nil { if curr.state != priorityNodeIdle { panic(fmt.Sprintf("stream %d already opened", streamID)) } curr.state = priorityNodeOpen return } // RFC 7540, Section 5.3.5: // "All streams are initially assigned a non-exclusive dependency on stream 0x0. // Pushed streams initially depend on their associated stream. In both cases, // streams are assigned a default weight of 16." parent := ws.nodes[options.PusherID] if parent == nil { parent = &ws.root } n := &priorityNode{ q: *ws.queuePool.get(), id: streamID, weight: priorityDefaultWeight, state: priorityNodeOpen, } n.setParent(parent) ws.nodes[streamID] = n if streamID > ws.maxID { ws.maxID = streamID } } func (ws *priorityWriteScheduler) CloseStream(streamID uint32) { if streamID == 0 { panic("violation of WriteScheduler interface: cannot close stream 0") } if ws.nodes[streamID] == nil { panic(fmt.Sprintf("violation of WriteScheduler interface: unknown stream %d", streamID)) } if ws.nodes[streamID].state != priorityNodeOpen { panic(fmt.Sprintf("violation of WriteScheduler interface: stream %d already closed", streamID)) } n := ws.nodes[streamID] n.state = priorityNodeClosed n.addBytes(-n.bytes) q := n.q ws.queuePool.put(&q) n.q.s = nil if ws.maxClosedNodesInTree > 0 { ws.addClosedOrIdleNode(&ws.closedNodes, ws.maxClosedNodesInTree, n) } else { ws.removeNode(n) } } func (ws *priorityWriteScheduler) AdjustStream(streamID uint32, priority PriorityParam) { if streamID == 0 { panic("adjustPriority on root") } // If streamID does not exist, there are two cases: // - A closed stream that has been removed (this will have ID <= maxID) // - An idle stream that is being used for "grouping" (this will have ID > maxID) n := ws.nodes[streamID] if n == nil { if streamID <= ws.maxID || ws.maxIdleNodesInTree == 0 { return } ws.maxID = streamID n = &priorityNode{ q: *ws.queuePool.get(), id: streamID, weight: priorityDefaultWeight, state: priorityNodeIdle, } n.setParent(&ws.root) ws.nodes[streamID] = n ws.addClosedOrIdleNode(&ws.idleNodes, ws.maxIdleNodesInTree, n) } // Section 5.3.1: A dependency on a stream that is not currently in the tree // results in that stream being given a default priority (Section 5.3.5). parent := ws.nodes[priority.StreamDep] if parent == nil { n.setParent(&ws.root) n.weight = priorityDefaultWeight return } // Ignore if the client tries to make a node its own parent. if n == parent { return } // Section 5.3.3: // "If a stream is made dependent on one of its own dependencies, the // formerly dependent stream is first moved to be dependent on the // reprioritized stream's previous parent. The moved dependency retains // its weight." // // That is: if parent depends on n, move parent to depend on n.parent. for x := parent.parent; x != nil; x = x.parent { if x == n { parent.setParent(n.parent) break } } // Section 5.3.3: The exclusive flag causes the stream to become the sole // dependency of its parent stream, causing other dependencies to become // dependent on the exclusive stream. if priority.Exclusive { k := parent.kids for k != nil { next := k.next if k != n { k.setParent(n) } k = next } } n.setParent(parent) n.weight = priority.Weight } func (ws *priorityWriteScheduler) Push(wr FrameWriteRequest) { var n *priorityNode if id := wr.StreamID(); id == 0 { n = &ws.root } else { n = ws.nodes[id] if n == nil { // id is an idle or closed stream. wr should not be a HEADERS or // DATA frame. However, wr can be a RST_STREAM. In this case, we // push wr onto the root, rather than creating a new priorityNode, // since RST_STREAM is tiny and the stream's priority is unknown // anyway. See issue #17919. if wr.DataSize() > 0 { panic("add DATA on non-open stream") } n = &ws.root } } n.q.push(wr) } func (ws *priorityWriteScheduler) Pop() (wr FrameWriteRequest, ok bool) { ws.root.walkReadyInOrder(false, &ws.tmp, func(n *priorityNode, openParent bool) bool { limit := int32(math.MaxInt32) if openParent { limit = ws.writeThrottleLimit } wr, ok = n.q.consume(limit) if !ok { return false } n.addBytes(int64(wr.DataSize())) // If B depends on A and B continuously has data available but A // does not, gradually increase the throttling limit to allow B to // steal more and more bandwidth from A. if openParent { ws.writeThrottleLimit += 1024 if ws.writeThrottleLimit < 0 { ws.writeThrottleLimit = math.MaxInt32 } } else if ws.enableWriteThrottle { ws.writeThrottleLimit = 1024 } return true }) return wr, ok } func (ws *priorityWriteScheduler) addClosedOrIdleNode(list *[]*priorityNode, maxSize int, n *priorityNode) { if maxSize == 0 { return } if len(*list) == maxSize { // Remove the oldest node, then shift left. ws.removeNode((*list)[0]) x := (*list)[1:] copy(*list, x) *list = (*list)[:len(x)] } *list = append(*list, n) } func (ws *priorityWriteScheduler) removeNode(n *priorityNode) { for k := n.kids; k != nil; k = k.next { k.setParent(n.parent) } n.setParent(nil) delete(ws.nodes, n.id) } writesched_random.go000066400000000000000000000036151324746544700340320ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/golang.org/x/net/http2// Copyright 2014 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package http2 import "math" // NewRandomWriteScheduler constructs a WriteScheduler that ignores HTTP/2 // priorities. Control frames like SETTINGS and PING are written before DATA // frames, but if no control frames are queued and multiple streams have queued // HEADERS or DATA frames, Pop selects a ready stream arbitrarily. func NewRandomWriteScheduler() WriteScheduler { return &randomWriteScheduler{sq: make(map[uint32]*writeQueue)} } type randomWriteScheduler struct { // zero are frames not associated with a specific stream. zero writeQueue // sq contains the stream-specific queues, keyed by stream ID. // When a stream is idle or closed, it's deleted from the map. sq map[uint32]*writeQueue // pool of empty queues for reuse. queuePool writeQueuePool } func (ws *randomWriteScheduler) OpenStream(streamID uint32, options OpenStreamOptions) { // no-op: idle streams are not tracked } func (ws *randomWriteScheduler) CloseStream(streamID uint32) { q, ok := ws.sq[streamID] if !ok { return } delete(ws.sq, streamID) ws.queuePool.put(q) } func (ws *randomWriteScheduler) AdjustStream(streamID uint32, priority PriorityParam) { // no-op: priorities are ignored } func (ws *randomWriteScheduler) Push(wr FrameWriteRequest) { id := wr.StreamID() if id == 0 { ws.zero.push(wr) return } q, ok := ws.sq[id] if !ok { q = ws.queuePool.get() ws.sq[id] = q } q.push(wr) } func (ws *randomWriteScheduler) Pop() (FrameWriteRequest, bool) { // Control frames first. if !ws.zero.empty() { return ws.zero.shift(), true } // Iterate over all non-idle streams until finding one that can be consumed. for _, q := range ws.sq { if wr, ok := q.consume(math.MaxInt32); ok { return wr, true } } return FrameWriteRequest{}, false } gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/golang.org/x/net/idna/000077500000000000000000000000001324746544700277265ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/golang.org/x/net/idna/idna.go000066400000000000000000000431741324746544700312010ustar00rootroot00000000000000// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. // Copyright 2016 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package idna implements IDNA2008 using the compatibility processing // defined by UTS (Unicode Technical Standard) #46, which defines a standard to // deal with the transition from IDNA2003. // // IDNA2008 (Internationalized Domain Names for Applications), is defined in RFC // 5890, RFC 5891, RFC 5892, RFC 5893 and RFC 5894. // UTS #46 is defined in http://www.unicode.org/reports/tr46. // See http://unicode.org/cldr/utility/idna.jsp for a visualization of the // differences between these two standards. package idna // import "golang.org/x/net/idna" import ( "fmt" "strings" "unicode/utf8" "golang.org/x/text/secure/bidirule" "golang.org/x/text/unicode/norm" ) // NOTE: Unlike common practice in Go APIs, the functions will return a // sanitized domain name in case of errors. Browsers sometimes use a partially // evaluated string as lookup. // TODO: the current error handling is, in my opinion, the least opinionated. // Other strategies are also viable, though: // Option 1) Return an empty string in case of error, but allow the user to // specify explicitly which errors to ignore. // Option 2) Return the partially evaluated string if it is itself a valid // string, otherwise return the empty string in case of error. // Option 3) Option 1 and 2. // Option 4) Always return an empty string for now and implement Option 1 as // needed, and document that the return string may not be empty in case of // error in the future. // I think Option 1 is best, but it is quite opinionated. // ToASCII is a wrapper for Punycode.ToASCII. func ToASCII(s string) (string, error) { return Punycode.process(s, true) } // ToUnicode is a wrapper for Punycode.ToUnicode. func ToUnicode(s string) (string, error) { return Punycode.process(s, false) } // An Option configures a Profile at creation time. type Option func(*options) // Transitional sets a Profile to use the Transitional mapping as defined in UTS // #46. This will cause, for example, "ß" to be mapped to "ss". Using the // transitional mapping provides a compromise between IDNA2003 and IDNA2008 // compatibility. It is used by most browsers when resolving domain names. This // option is only meaningful if combined with MapForLookup. func Transitional(transitional bool) Option { return func(o *options) { o.transitional = true } } // VerifyDNSLength sets whether a Profile should fail if any of the IDN parts // are longer than allowed by the RFC. func VerifyDNSLength(verify bool) Option { return func(o *options) { o.verifyDNSLength = verify } } // ValidateLabels sets whether to check the mandatory label validation criteria // as defined in Section 5.4 of RFC 5891. This includes testing for correct use // of hyphens ('-'), normalization, validity of runes, and the context rules. func ValidateLabels(enable bool) Option { return func(o *options) { // Don't override existing mappings, but set one that at least checks // normalization if it is not set. if o.mapping == nil && enable { o.mapping = normalize } o.trie = trie o.validateLabels = enable o.fromPuny = validateFromPunycode } } // StrictDomainName limits the set of permissable ASCII characters to those // allowed in domain names as defined in RFC 1034 (A-Z, a-z, 0-9 and the // hyphen). This is set by default for MapForLookup and ValidateForRegistration. // // This option is useful, for instance, for browsers that allow characters // outside this range, for example a '_' (U+005F LOW LINE). See // http://www.rfc-editor.org/std/std3.txt for more details This option // corresponds to the UseSTD3ASCIIRules option in UTS #46. func StrictDomainName(use bool) Option { return func(o *options) { o.trie = trie o.useSTD3Rules = use o.fromPuny = validateFromPunycode } } // NOTE: the following options pull in tables. The tables should not be linked // in as long as the options are not used. // BidiRule enables the Bidi rule as defined in RFC 5893. Any application // that relies on proper validation of labels should include this rule. func BidiRule() Option { return func(o *options) { o.bidirule = bidirule.ValidString } } // ValidateForRegistration sets validation options to verify that a given IDN is // properly formatted for registration as defined by Section 4 of RFC 5891. func ValidateForRegistration() Option { return func(o *options) { o.mapping = validateRegistration StrictDomainName(true)(o) ValidateLabels(true)(o) VerifyDNSLength(true)(o) BidiRule()(o) } } // MapForLookup sets validation and mapping options such that a given IDN is // transformed for domain name lookup according to the requirements set out in // Section 5 of RFC 5891. The mappings follow the recommendations of RFC 5894, // RFC 5895 and UTS 46. It does not add the Bidi Rule. Use the BidiRule option // to add this check. // // The mappings include normalization and mapping case, width and other // compatibility mappings. func MapForLookup() Option { return func(o *options) { o.mapping = validateAndMap StrictDomainName(true)(o) ValidateLabels(true)(o) } } type options struct { transitional bool useSTD3Rules bool validateLabels bool verifyDNSLength bool trie *idnaTrie // fromPuny calls validation rules when converting A-labels to U-labels. fromPuny func(p *Profile, s string) error // mapping implements a validation and mapping step as defined in RFC 5895 // or UTS 46, tailored to, for example, domain registration or lookup. mapping func(p *Profile, s string) (string, error) // bidirule, if specified, checks whether s conforms to the Bidi Rule // defined in RFC 5893. bidirule func(s string) bool } // A Profile defines the configuration of a IDNA mapper. type Profile struct { options } func apply(o *options, opts []Option) { for _, f := range opts { f(o) } } // New creates a new Profile. // // With no options, the returned Profile is the most permissive and equals the // Punycode Profile. Options can be passed to further restrict the Profile. The // MapForLookup and ValidateForRegistration options set a collection of options, // for lookup and registration purposes respectively, which can be tailored by // adding more fine-grained options, where later options override earlier // options. func New(o ...Option) *Profile { p := &Profile{} apply(&p.options, o) return p } // ToASCII converts a domain or domain label to its ASCII form. For example, // ToASCII("bücher.example.com") is "xn--bcher-kva.example.com", and // ToASCII("golang") is "golang". If an error is encountered it will return // an error and a (partially) processed result. func (p *Profile) ToASCII(s string) (string, error) { return p.process(s, true) } // ToUnicode converts a domain or domain label to its Unicode form. For example, // ToUnicode("xn--bcher-kva.example.com") is "bücher.example.com", and // ToUnicode("golang") is "golang". If an error is encountered it will return // an error and a (partially) processed result. func (p *Profile) ToUnicode(s string) (string, error) { pp := *p pp.transitional = false return pp.process(s, false) } // String reports a string with a description of the profile for debugging // purposes. The string format may change with different versions. func (p *Profile) String() string { s := "" if p.transitional { s = "Transitional" } else { s = "NonTransitional" } if p.useSTD3Rules { s += ":UseSTD3Rules" } if p.validateLabels { s += ":ValidateLabels" } if p.verifyDNSLength { s += ":VerifyDNSLength" } return s } var ( // Punycode is a Profile that does raw punycode processing with a minimum // of validation. Punycode *Profile = punycode // Lookup is the recommended profile for looking up domain names, according // to Section 5 of RFC 5891. The exact configuration of this profile may // change over time. Lookup *Profile = lookup // Display is the recommended profile for displaying domain names. // The configuration of this profile may change over time. Display *Profile = display // Registration is the recommended profile for checking whether a given // IDN is valid for registration, according to Section 4 of RFC 5891. Registration *Profile = registration punycode = &Profile{} lookup = &Profile{options{ transitional: true, useSTD3Rules: true, validateLabels: true, trie: trie, fromPuny: validateFromPunycode, mapping: validateAndMap, bidirule: bidirule.ValidString, }} display = &Profile{options{ useSTD3Rules: true, validateLabels: true, trie: trie, fromPuny: validateFromPunycode, mapping: validateAndMap, bidirule: bidirule.ValidString, }} registration = &Profile{options{ useSTD3Rules: true, validateLabels: true, verifyDNSLength: true, trie: trie, fromPuny: validateFromPunycode, mapping: validateRegistration, bidirule: bidirule.ValidString, }} // TODO: profiles // Register: recommended for approving domain names: don't do any mappings // but rather reject on invalid input. Bundle or block deviation characters. ) type labelError struct{ label, code_ string } func (e labelError) code() string { return e.code_ } func (e labelError) Error() string { return fmt.Sprintf("idna: invalid label %q", e.label) } type runeError rune func (e runeError) code() string { return "P1" } func (e runeError) Error() string { return fmt.Sprintf("idna: disallowed rune %U", e) } // process implements the algorithm described in section 4 of UTS #46, // see http://www.unicode.org/reports/tr46. func (p *Profile) process(s string, toASCII bool) (string, error) { var err error if p.mapping != nil { s, err = p.mapping(p, s) } // Remove leading empty labels. for ; len(s) > 0 && s[0] == '.'; s = s[1:] { } // It seems like we should only create this error on ToASCII, but the // UTS 46 conformance tests suggests we should always check this. if err == nil && p.verifyDNSLength && s == "" { err = &labelError{s, "A4"} } labels := labelIter{orig: s} for ; !labels.done(); labels.next() { label := labels.label() if label == "" { // Empty labels are not okay. The label iterator skips the last // label if it is empty. if err == nil && p.verifyDNSLength { err = &labelError{s, "A4"} } continue } if strings.HasPrefix(label, acePrefix) { u, err2 := decode(label[len(acePrefix):]) if err2 != nil { if err == nil { err = err2 } // Spec says keep the old label. continue } labels.set(u) if err == nil && p.validateLabels { err = p.fromPuny(p, u) } if err == nil { // This should be called on NonTransitional, according to the // spec, but that currently does not have any effect. Use the // original profile to preserve options. err = p.validateLabel(u) } } else if err == nil { err = p.validateLabel(label) } } if toASCII { for labels.reset(); !labels.done(); labels.next() { label := labels.label() if !ascii(label) { a, err2 := encode(acePrefix, label) if err == nil { err = err2 } label = a labels.set(a) } n := len(label) if p.verifyDNSLength && err == nil && (n == 0 || n > 63) { err = &labelError{label, "A4"} } } } s = labels.result() if toASCII && p.verifyDNSLength && err == nil { // Compute the length of the domain name minus the root label and its dot. n := len(s) if n > 0 && s[n-1] == '.' { n-- } if len(s) < 1 || n > 253 { err = &labelError{s, "A4"} } } return s, err } func normalize(p *Profile, s string) (string, error) { return norm.NFC.String(s), nil } func validateRegistration(p *Profile, s string) (string, error) { if !norm.NFC.IsNormalString(s) { return s, &labelError{s, "V1"} } var err error for i := 0; i < len(s); { v, sz := trie.lookupString(s[i:]) i += sz // Copy bytes not copied so far. switch p.simplify(info(v).category()) { // TODO: handle the NV8 defined in the Unicode idna data set to allow // for strict conformance to IDNA2008. case valid, deviation: case disallowed, mapped, unknown, ignored: if err == nil { r, _ := utf8.DecodeRuneInString(s[i:]) err = runeError(r) } } } return s, err } func validateAndMap(p *Profile, s string) (string, error) { var ( err error b []byte k int ) for i := 0; i < len(s); { v, sz := trie.lookupString(s[i:]) start := i i += sz // Copy bytes not copied so far. switch p.simplify(info(v).category()) { case valid: continue case disallowed: if err == nil { r, _ := utf8.DecodeRuneInString(s[i:]) err = runeError(r) } continue case mapped, deviation: b = append(b, s[k:start]...) b = info(v).appendMapping(b, s[start:i]) case ignored: b = append(b, s[k:start]...) // drop the rune case unknown: b = append(b, s[k:start]...) b = append(b, "\ufffd"...) } k = i } if k == 0 { // No changes so far. s = norm.NFC.String(s) } else { b = append(b, s[k:]...) if norm.NFC.QuickSpan(b) != len(b) { b = norm.NFC.Bytes(b) } // TODO: the punycode converters require strings as input. s = string(b) } return s, err } // A labelIter allows iterating over domain name labels. type labelIter struct { orig string slice []string curStart int curEnd int i int } func (l *labelIter) reset() { l.curStart = 0 l.curEnd = 0 l.i = 0 } func (l *labelIter) done() bool { return l.curStart >= len(l.orig) } func (l *labelIter) result() string { if l.slice != nil { return strings.Join(l.slice, ".") } return l.orig } func (l *labelIter) label() string { if l.slice != nil { return l.slice[l.i] } p := strings.IndexByte(l.orig[l.curStart:], '.') l.curEnd = l.curStart + p if p == -1 { l.curEnd = len(l.orig) } return l.orig[l.curStart:l.curEnd] } // next sets the value to the next label. It skips the last label if it is empty. func (l *labelIter) next() { l.i++ if l.slice != nil { if l.i >= len(l.slice) || l.i == len(l.slice)-1 && l.slice[l.i] == "" { l.curStart = len(l.orig) } } else { l.curStart = l.curEnd + 1 if l.curStart == len(l.orig)-1 && l.orig[l.curStart] == '.' { l.curStart = len(l.orig) } } } func (l *labelIter) set(s string) { if l.slice == nil { l.slice = strings.Split(l.orig, ".") } l.slice[l.i] = s } // acePrefix is the ASCII Compatible Encoding prefix. const acePrefix = "xn--" func (p *Profile) simplify(cat category) category { switch cat { case disallowedSTD3Mapped: if p.useSTD3Rules { cat = disallowed } else { cat = mapped } case disallowedSTD3Valid: if p.useSTD3Rules { cat = disallowed } else { cat = valid } case deviation: if !p.transitional { cat = valid } case validNV8, validXV8: // TODO: handle V2008 cat = valid } return cat } func validateFromPunycode(p *Profile, s string) error { if !norm.NFC.IsNormalString(s) { return &labelError{s, "V1"} } for i := 0; i < len(s); { v, sz := trie.lookupString(s[i:]) if c := p.simplify(info(v).category()); c != valid && c != deviation { return &labelError{s, "V6"} } i += sz } return nil } const ( zwnj = "\u200c" zwj = "\u200d" ) type joinState int8 const ( stateStart joinState = iota stateVirama stateBefore stateBeforeVirama stateAfter stateFAIL ) var joinStates = [][numJoinTypes]joinState{ stateStart: { joiningL: stateBefore, joiningD: stateBefore, joinZWNJ: stateFAIL, joinZWJ: stateFAIL, joinVirama: stateVirama, }, stateVirama: { joiningL: stateBefore, joiningD: stateBefore, }, stateBefore: { joiningL: stateBefore, joiningD: stateBefore, joiningT: stateBefore, joinZWNJ: stateAfter, joinZWJ: stateFAIL, joinVirama: stateBeforeVirama, }, stateBeforeVirama: { joiningL: stateBefore, joiningD: stateBefore, joiningT: stateBefore, }, stateAfter: { joiningL: stateFAIL, joiningD: stateBefore, joiningT: stateAfter, joiningR: stateStart, joinZWNJ: stateFAIL, joinZWJ: stateFAIL, joinVirama: stateAfter, // no-op as we can't accept joiners here }, stateFAIL: { 0: stateFAIL, joiningL: stateFAIL, joiningD: stateFAIL, joiningT: stateFAIL, joiningR: stateFAIL, joinZWNJ: stateFAIL, joinZWJ: stateFAIL, joinVirama: stateFAIL, }, } // validateLabel validates the criteria from Section 4.1. Item 1, 4, and 6 are // already implicitly satisfied by the overall implementation. func (p *Profile) validateLabel(s string) error { if s == "" { if p.verifyDNSLength { return &labelError{s, "A4"} } return nil } if p.bidirule != nil && !p.bidirule(s) { return &labelError{s, "B"} } if !p.validateLabels { return nil } trie := p.trie // p.validateLabels is only set if trie is set. if len(s) > 4 && s[2] == '-' && s[3] == '-' { return &labelError{s, "V2"} } if s[0] == '-' || s[len(s)-1] == '-' { return &labelError{s, "V3"} } // TODO: merge the use of this in the trie. v, sz := trie.lookupString(s) x := info(v) if x.isModifier() { return &labelError{s, "V5"} } // Quickly return in the absence of zero-width (non) joiners. if strings.Index(s, zwj) == -1 && strings.Index(s, zwnj) == -1 { return nil } st := stateStart for i := 0; ; { jt := x.joinType() if s[i:i+sz] == zwj { jt = joinZWJ } else if s[i:i+sz] == zwnj { jt = joinZWNJ } st = joinStates[st][jt] if x.isViramaModifier() { st = joinStates[st][joinVirama] } if i += sz; i == len(s) { break } v, sz = trie.lookupString(s[i:]) x = info(v) } if st == stateFAIL || st == stateAfter { return &labelError{s, "C"} } return nil } func ascii(s string) bool { for i := 0; i < len(s); i++ { if s[i] >= utf8.RuneSelf { return false } } return true } punycode.go000066400000000000000000000104171324746544700320270ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/golang.org/x/net/idna// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. // Copyright 2016 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package idna // This file implements the Punycode algorithm from RFC 3492. import ( "math" "strings" "unicode/utf8" ) // These parameter values are specified in section 5. // // All computation is done with int32s, so that overflow behavior is identical // regardless of whether int is 32-bit or 64-bit. const ( base int32 = 36 damp int32 = 700 initialBias int32 = 72 initialN int32 = 128 skew int32 = 38 tmax int32 = 26 tmin int32 = 1 ) func punyError(s string) error { return &labelError{s, "A3"} } // decode decodes a string as specified in section 6.2. func decode(encoded string) (string, error) { if encoded == "" { return "", nil } pos := 1 + strings.LastIndex(encoded, "-") if pos == 1 { return "", punyError(encoded) } if pos == len(encoded) { return encoded[:len(encoded)-1], nil } output := make([]rune, 0, len(encoded)) if pos != 0 { for _, r := range encoded[:pos-1] { output = append(output, r) } } i, n, bias := int32(0), initialN, initialBias for pos < len(encoded) { oldI, w := i, int32(1) for k := base; ; k += base { if pos == len(encoded) { return "", punyError(encoded) } digit, ok := decodeDigit(encoded[pos]) if !ok { return "", punyError(encoded) } pos++ i += digit * w if i < 0 { return "", punyError(encoded) } t := k - bias if t < tmin { t = tmin } else if t > tmax { t = tmax } if digit < t { break } w *= base - t if w >= math.MaxInt32/base { return "", punyError(encoded) } } x := int32(len(output) + 1) bias = adapt(i-oldI, x, oldI == 0) n += i / x i %= x if n > utf8.MaxRune || len(output) >= 1024 { return "", punyError(encoded) } output = append(output, 0) copy(output[i+1:], output[i:]) output[i] = n i++ } return string(output), nil } // encode encodes a string as specified in section 6.3 and prepends prefix to // the result. // // The "while h < length(input)" line in the specification becomes "for // remaining != 0" in the Go code, because len(s) in Go is in bytes, not runes. func encode(prefix, s string) (string, error) { output := make([]byte, len(prefix), len(prefix)+1+2*len(s)) copy(output, prefix) delta, n, bias := int32(0), initialN, initialBias b, remaining := int32(0), int32(0) for _, r := range s { if r < 0x80 { b++ output = append(output, byte(r)) } else { remaining++ } } h := b if b > 0 { output = append(output, '-') } for remaining != 0 { m := int32(0x7fffffff) for _, r := range s { if m > r && r >= n { m = r } } delta += (m - n) * (h + 1) if delta < 0 { return "", punyError(s) } n = m for _, r := range s { if r < n { delta++ if delta < 0 { return "", punyError(s) } continue } if r > n { continue } q := delta for k := base; ; k += base { t := k - bias if t < tmin { t = tmin } else if t > tmax { t = tmax } if q < t { break } output = append(output, encodeDigit(t+(q-t)%(base-t))) q = (q - t) / (base - t) } output = append(output, encodeDigit(q)) bias = adapt(delta, h+1, h == b) delta = 0 h++ remaining-- } delta++ n++ } return string(output), nil } func decodeDigit(x byte) (digit int32, ok bool) { switch { case '0' <= x && x <= '9': return int32(x - ('0' - 26)), true case 'A' <= x && x <= 'Z': return int32(x - 'A'), true case 'a' <= x && x <= 'z': return int32(x - 'a'), true } return 0, false } func encodeDigit(digit int32) byte { switch { case 0 <= digit && digit < 26: return byte(digit + 'a') case 26 <= digit && digit < 36: return byte(digit + ('0' - 26)) } panic("idna: internal error in punycode encoding") } // adapt is the bias adaptation function specified in section 6.1. func adapt(delta, numPoints int32, firstTime bool) int32 { if firstTime { delta /= damp } else { delta /= 2 } delta += delta / numPoints k := int32(0) for delta > ((base-tmin)*tmax)/2 { delta /= base - tmin k += base } return k + (base-tmin+1)*delta/(delta+skew) } tables.go000066400000000000000000010150611324746544700314540ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/golang.org/x/net/idna// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. package idna // UnicodeVersion is the Unicode version from which the tables in this package are derived. const UnicodeVersion = "9.0.0" var mappings string = "" + // Size: 8176 bytes "\x00\x01 \x03 ̈\x01a\x03 ̄\x012\x013\x03 ́\x03 ̧\x011\x01o\x051⁄4\x051⁄2" + "\x053⁄4\x03i̇\x03l·\x03ʼn\x01s\x03dž\x03ⱥ\x03ⱦ\x01h\x01j\x01r\x01w\x01y" + "\x03 ̆\x03 ̇\x03 ̊\x03 ̨\x03 ̃\x03 ̋\x01l\x01x\x04̈́\x03 ι\x01;\x05 ̈́" + "\x04եւ\x04اٴ\x04وٴ\x04ۇٴ\x04يٴ\x06क़\x06ख़\x06ग़\x06ज़\x06ड़\x06ढ़\x06फ़" + "\x06य़\x06ড়\x06ঢ়\x06য়\x06ਲ਼\x06ਸ਼\x06ਖ਼\x06ਗ਼\x06ਜ਼\x06ਫ਼\x06ଡ଼\x06ଢ଼" + "\x06ํา\x06ໍາ\x06ຫນ\x06ຫມ\x06གྷ\x06ཌྷ\x06དྷ\x06བྷ\x06ཛྷ\x06ཀྵ\x06ཱི\x06ཱུ" + "\x06ྲྀ\x09ྲཱྀ\x06ླྀ\x09ླཱྀ\x06ཱྀ\x06ྒྷ\x06ྜྷ\x06ྡྷ\x06ྦྷ\x06ྫྷ\x06ྐྵ\x02" + "в\x02д\x02о\x02с\x02т\x02ъ\x02ѣ\x02æ\x01b\x01d\x01e\x02ǝ\x01g\x01i\x01k" + "\x01m\x01n\x02ȣ\x01p\x01t\x01u\x02ɐ\x02ɑ\x02ə\x02ɛ\x02ɜ\x02ŋ\x02ɔ\x02ɯ" + "\x01v\x02β\x02γ\x02δ\x02φ\x02χ\x02ρ\x02н\x02ɒ\x01c\x02ɕ\x02ð\x01f\x02ɟ" + "\x02ɡ\x02ɥ\x02ɨ\x02ɩ\x02ɪ\x02ʝ\x02ɭ\x02ʟ\x02ɱ\x02ɰ\x02ɲ\x02ɳ\x02ɴ\x02ɵ" + "\x02ɸ\x02ʂ\x02ʃ\x02ƫ\x02ʉ\x02ʊ\x02ʋ\x02ʌ\x01z\x02ʐ\x02ʑ\x02ʒ\x02θ\x02ss" + "\x02ά\x02έ\x02ή\x02ί\x02ό\x02ύ\x02ώ\x05ἀι\x05ἁι\x05ἂι\x05ἃι\x05ἄι\x05ἅι" + "\x05ἆι\x05ἇι\x05ἠι\x05ἡι\x05ἢι\x05ἣι\x05ἤι\x05ἥι\x05ἦι\x05ἧι\x05ὠι\x05ὡι" + "\x05ὢι\x05ὣι\x05ὤι\x05ὥι\x05ὦι\x05ὧι\x05ὰι\x04αι\x04άι\x05ᾶι\x02ι\x05 ̈͂" + "\x05ὴι\x04ηι\x04ήι\x05ῆι\x05 ̓̀\x05 ̓́\x05 ̓͂\x02ΐ\x05 ̔̀\x05 ̔́\x05 ̔͂" + "\x02ΰ\x05 ̈̀\x01`\x05ὼι\x04ωι\x04ώι\x05ῶι\x06′′\x09′′′\x06‵‵\x09‵‵‵\x02!" + "!\x02??\x02?!\x02!?\x0c′′′′\x010\x014\x015\x016\x017\x018\x019\x01+\x01=" + "\x01(\x01)\x02rs\x02ħ\x02no\x01q\x02sm\x02tm\x02ω\x02å\x02א\x02ב\x02ג" + "\x02ד\x02π\x051⁄7\x051⁄9\x061⁄10\x051⁄3\x052⁄3\x051⁄5\x052⁄5\x053⁄5\x054" + "⁄5\x051⁄6\x055⁄6\x051⁄8\x053⁄8\x055⁄8\x057⁄8\x041⁄\x02ii\x02iv\x02vi" + "\x04viii\x02ix\x02xi\x050⁄3\x06∫∫\x09∫∫∫\x06∮∮\x09∮∮∮\x0210\x0211\x0212" + "\x0213\x0214\x0215\x0216\x0217\x0218\x0219\x0220\x04(10)\x04(11)\x04(12)" + "\x04(13)\x04(14)\x04(15)\x04(16)\x04(17)\x04(18)\x04(19)\x04(20)\x0c∫∫∫∫" + "\x02==\x05⫝̸\x02ɫ\x02ɽ\x02ȿ\x02ɀ\x01.\x04 ゙\x04 ゚\x06より\x06コト\x05(ᄀ)\x05" + "(ᄂ)\x05(ᄃ)\x05(ᄅ)\x05(ᄆ)\x05(ᄇ)\x05(ᄉ)\x05(ᄋ)\x05(ᄌ)\x05(ᄎ)\x05(ᄏ)\x05(ᄐ" + ")\x05(ᄑ)\x05(ᄒ)\x05(가)\x05(나)\x05(다)\x05(라)\x05(마)\x05(바)\x05(사)\x05(아)" + "\x05(자)\x05(차)\x05(카)\x05(타)\x05(파)\x05(하)\x05(주)\x08(오전)\x08(오후)\x05(一)" + "\x05(二)\x05(三)\x05(四)\x05(五)\x05(六)\x05(七)\x05(八)\x05(九)\x05(十)\x05(月)" + "\x05(火)\x05(水)\x05(木)\x05(金)\x05(土)\x05(日)\x05(株)\x05(有)\x05(社)\x05(名)" + "\x05(特)\x05(財)\x05(祝)\x05(労)\x05(代)\x05(呼)\x05(学)\x05(監)\x05(企)\x05(資)" + "\x05(協)\x05(祭)\x05(休)\x05(自)\x05(至)\x0221\x0222\x0223\x0224\x0225\x0226" + "\x0227\x0228\x0229\x0230\x0231\x0232\x0233\x0234\x0235\x06참고\x06주의\x0236" + "\x0237\x0238\x0239\x0240\x0241\x0242\x0243\x0244\x0245\x0246\x0247\x0248" + "\x0249\x0250\x041月\x042月\x043月\x044月\x045月\x046月\x047月\x048月\x049月\x0510" + "月\x0511月\x0512月\x02hg\x02ev\x0cアパート\x0cアルファ\x0cアンペア\x09アール\x0cイニング\x09" + "インチ\x09ウォン\x0fエスクード\x0cエーカー\x09オンス\x09オーム\x09カイリ\x0cカラット\x0cカロリー\x09ガロ" + "ン\x09ガンマ\x06ギガ\x09ギニー\x0cキュリー\x0cギルダー\x06キロ\x0fキログラム\x12キロメートル\x0fキロワッ" + "ト\x09グラム\x0fグラムトン\x0fクルゼイロ\x0cクローネ\x09ケース\x09コルナ\x09コーポ\x0cサイクル\x0fサンチ" + "ーム\x0cシリング\x09センチ\x09セント\x09ダース\x06デシ\x06ドル\x06トン\x06ナノ\x09ノット\x09ハイツ" + "\x0fパーセント\x09パーツ\x0cバーレル\x0fピアストル\x09ピクル\x06ピコ\x06ビル\x0fファラッド\x0cフィート" + "\x0fブッシェル\x09フラン\x0fヘクタール\x06ペソ\x09ペニヒ\x09ヘルツ\x09ペンス\x09ページ\x09ベータ\x0cポイ" + "ント\x09ボルト\x06ホン\x09ポンド\x09ホール\x09ホーン\x0cマイクロ\x09マイル\x09マッハ\x09マルク\x0fマ" + "ンション\x0cミクロン\x06ミリ\x0fミリバール\x06メガ\x0cメガトン\x0cメートル\x09ヤード\x09ヤール\x09ユアン" + "\x0cリットル\x06リラ\x09ルピー\x0cルーブル\x06レム\x0fレントゲン\x09ワット\x040点\x041点\x042点" + "\x043点\x044点\x045点\x046点\x047点\x048点\x049点\x0510点\x0511点\x0512点\x0513点" + "\x0514点\x0515点\x0516点\x0517点\x0518点\x0519点\x0520点\x0521点\x0522点\x0523点" + "\x0524点\x02da\x02au\x02ov\x02pc\x02dm\x02iu\x06平成\x06昭和\x06大正\x06明治\x0c株" + "式会社\x02pa\x02na\x02ma\x02ka\x02kb\x02mb\x02gb\x04kcal\x02pf\x02nf\x02m" + "g\x02kg\x02hz\x02ml\x02dl\x02kl\x02fm\x02nm\x02mm\x02cm\x02km\x02m2\x02m" + "3\x05m∕s\x06m∕s2\x07rad∕s\x08rad∕s2\x02ps\x02ns\x02ms\x02pv\x02nv\x02mv" + "\x02kv\x02pw\x02nw\x02mw\x02kw\x02bq\x02cc\x02cd\x06c∕kg\x02db\x02gy\x02" + "ha\x02hp\x02in\x02kk\x02kt\x02lm\x02ln\x02lx\x02ph\x02pr\x02sr\x02sv\x02" + "wb\x05v∕m\x05a∕m\x041日\x042日\x043日\x044日\x045日\x046日\x047日\x048日\x049日" + "\x0510日\x0511日\x0512日\x0513日\x0514日\x0515日\x0516日\x0517日\x0518日\x0519日" + "\x0520日\x0521日\x0522日\x0523日\x0524日\x0525日\x0526日\x0527日\x0528日\x0529日" + "\x0530日\x0531日\x02ь\x02ɦ\x02ɬ\x02ʞ\x02ʇ\x02œ\x04𤋮\x04𢡊\x04𢡄\x04𣏕\x04𥉉" + "\x04𥳐\x04𧻓\x02ff\x02fi\x02fl\x02st\x04մն\x04մե\x04մի\x04վն\x04մխ\x04יִ" + "\x04ײַ\x02ע\x02ה\x02כ\x02ל\x02ם\x02ר\x02ת\x04שׁ\x04שׂ\x06שּׁ\x06שּׂ\x04א" + "ַ\x04אָ\x04אּ\x04בּ\x04גּ\x04דּ\x04הּ\x04וּ\x04זּ\x04טּ\x04יּ\x04ךּ\x04" + "כּ\x04לּ\x04מּ\x04נּ\x04סּ\x04ףּ\x04פּ\x04צּ\x04קּ\x04רּ\x04שּ\x04תּ" + "\x04וֹ\x04בֿ\x04כֿ\x04פֿ\x04אל\x02ٱ\x02ٻ\x02پ\x02ڀ\x02ٺ\x02ٿ\x02ٹ\x02ڤ" + "\x02ڦ\x02ڄ\x02ڃ\x02چ\x02ڇ\x02ڍ\x02ڌ\x02ڎ\x02ڈ\x02ژ\x02ڑ\x02ک\x02گ\x02ڳ" + "\x02ڱ\x02ں\x02ڻ\x02ۀ\x02ہ\x02ھ\x02ے\x02ۓ\x02ڭ\x02ۇ\x02ۆ\x02ۈ\x02ۋ\x02ۅ" + "\x02ۉ\x02ې\x02ى\x04ئا\x04ئە\x04ئو\x04ئۇ\x04ئۆ\x04ئۈ\x04ئې\x04ئى\x02ی\x04" + "ئج\x04ئح\x04ئم\x04ئي\x04بج\x04بح\x04بخ\x04بم\x04بى\x04بي\x04تج\x04تح" + "\x04تخ\x04تم\x04تى\x04تي\x04ثج\x04ثم\x04ثى\x04ثي\x04جح\x04جم\x04حج\x04حم" + "\x04خج\x04خح\x04خم\x04سج\x04سح\x04سخ\x04سم\x04صح\x04صم\x04ضج\x04ضح\x04ضخ" + "\x04ضم\x04طح\x04طم\x04ظم\x04عج\x04عم\x04غج\x04غم\x04فج\x04فح\x04فخ\x04فم" + "\x04فى\x04في\x04قح\x04قم\x04قى\x04قي\x04كا\x04كج\x04كح\x04كخ\x04كل\x04كم" + "\x04كى\x04كي\x04لج\x04لح\x04لخ\x04لم\x04لى\x04لي\x04مج\x04مح\x04مخ\x04مم" + "\x04مى\x04مي\x04نج\x04نح\x04نخ\x04نم\x04نى\x04ني\x04هج\x04هم\x04هى\x04هي" + "\x04يج\x04يح\x04يخ\x04يم\x04يى\x04يي\x04ذٰ\x04رٰ\x04ىٰ\x05 ٌّ\x05 ٍّ\x05" + " َّ\x05 ُّ\x05 ِّ\x05 ّٰ\x04ئر\x04ئز\x04ئن\x04بر\x04بز\x04بن\x04تر\x04تز" + "\x04تن\x04ثر\x04ثز\x04ثن\x04ما\x04نر\x04نز\x04نن\x04ير\x04يز\x04ين\x04ئخ" + "\x04ئه\x04به\x04ته\x04صخ\x04له\x04نه\x04هٰ\x04يه\x04ثه\x04سه\x04شم\x04شه" + "\x06ـَّ\x06ـُّ\x06ـِّ\x04طى\x04طي\x04عى\x04عي\x04غى\x04غي\x04سى\x04سي" + "\x04شى\x04شي\x04حى\x04حي\x04جى\x04جي\x04خى\x04خي\x04صى\x04صي\x04ضى\x04ضي" + "\x04شج\x04شح\x04شخ\x04شر\x04سر\x04صر\x04ضر\x04اً\x06تجم\x06تحج\x06تحم" + "\x06تخم\x06تمج\x06تمح\x06تمخ\x06جمح\x06حمي\x06حمى\x06سحج\x06سجح\x06سجى" + "\x06سمح\x06سمج\x06سمم\x06صحح\x06صمم\x06شحم\x06شجي\x06شمخ\x06شمم\x06ضحى" + "\x06ضخم\x06طمح\x06طمم\x06طمي\x06عجم\x06عمم\x06عمى\x06غمم\x06غمي\x06غمى" + "\x06فخم\x06قمح\x06قمم\x06لحم\x06لحي\x06لحى\x06لجج\x06لخم\x06لمح\x06محج" + "\x06محم\x06محي\x06مجح\x06مجم\x06مخج\x06مخم\x06مجخ\x06همج\x06همم\x06نحم" + "\x06نحى\x06نجم\x06نجى\x06نمي\x06نمى\x06يمم\x06بخي\x06تجي\x06تجى\x06تخي" + "\x06تخى\x06تمي\x06تمى\x06جمي\x06جحى\x06جمى\x06سخى\x06صحي\x06شحي\x06ضحي" + "\x06لجي\x06لمي\x06يحي\x06يجي\x06يمي\x06ممي\x06قمي\x06نحي\x06عمي\x06كمي" + "\x06نجح\x06مخي\x06لجم\x06كمم\x06جحي\x06حجي\x06مجي\x06فمي\x06بحي\x06سخي" + "\x06نجي\x06صلے\x06قلے\x08الله\x08اكبر\x08محمد\x08صلعم\x08رسول\x08عليه" + "\x08وسلم\x06صلى!صلى الله عليه وسلم\x0fجل جلاله\x08ریال\x01,\x01:\x01!" + "\x01?\x01_\x01{\x01}\x01[\x01]\x01#\x01&\x01*\x01-\x01<\x01>\x01\\\x01$" + "\x01%\x01@\x04ـً\x04ـَ\x04ـُ\x04ـِ\x04ـّ\x04ـْ\x02ء\x02آ\x02أ\x02ؤ\x02إ" + "\x02ئ\x02ا\x02ب\x02ة\x02ت\x02ث\x02ج\x02ح\x02خ\x02د\x02ذ\x02ر\x02ز\x02س" + "\x02ش\x02ص\x02ض\x02ط\x02ظ\x02ع\x02غ\x02ف\x02ق\x02ك\x02ل\x02م\x02ن\x02ه" + "\x02و\x02ي\x04لآ\x04لأ\x04لإ\x04لا\x01\x22\x01'\x01/\x01^\x01|\x01~\x02¢" + "\x02£\x02¬\x02¦\x02¥\x08𝅗𝅥\x08𝅘𝅥\x0c𝅘𝅥𝅮\x0c𝅘𝅥𝅯\x0c𝅘𝅥𝅰\x0c𝅘𝅥𝅱\x0c𝅘𝅥𝅲\x08𝆹" + "𝅥\x08𝆺𝅥\x0c𝆹𝅥𝅮\x0c𝆺𝅥𝅮\x0c𝆹𝅥𝅯\x0c𝆺𝅥𝅯\x02ı\x02ȷ\x02α\x02ε\x02ζ\x02η\x02" + "κ\x02λ\x02μ\x02ν\x02ξ\x02ο\x02σ\x02τ\x02υ\x02ψ\x03∇\x03∂\x02ϝ\x02ٮ\x02ڡ" + "\x02ٯ\x020,\x021,\x022,\x023,\x024,\x025,\x026,\x027,\x028,\x029,\x03(a)" + "\x03(b)\x03(c)\x03(d)\x03(e)\x03(f)\x03(g)\x03(h)\x03(i)\x03(j)\x03(k)" + "\x03(l)\x03(m)\x03(n)\x03(o)\x03(p)\x03(q)\x03(r)\x03(s)\x03(t)\x03(u)" + "\x03(v)\x03(w)\x03(x)\x03(y)\x03(z)\x07〔s〕\x02wz\x02hv\x02sd\x03ppv\x02w" + "c\x02mc\x02md\x02dj\x06ほか\x06ココ\x03サ\x03手\x03字\x03双\x03デ\x03二\x03多\x03解" + "\x03天\x03交\x03映\x03無\x03料\x03前\x03後\x03再\x03新\x03初\x03終\x03生\x03販\x03声" + "\x03吹\x03演\x03投\x03捕\x03一\x03三\x03遊\x03左\x03中\x03右\x03指\x03走\x03打\x03禁" + "\x03空\x03合\x03満\x03有\x03月\x03申\x03割\x03営\x03配\x09〔本〕\x09〔三〕\x09〔二〕\x09〔安" + "〕\x09〔点〕\x09〔打〕\x09〔盗〕\x09〔勝〕\x09〔敗〕\x03得\x03可\x03丽\x03丸\x03乁\x03你\x03" + "侮\x03侻\x03倂\x03偺\x03備\x03僧\x03像\x03㒞\x03免\x03兔\x03兤\x03具\x03㒹\x03內\x03" + "冗\x03冤\x03仌\x03冬\x03况\x03凵\x03刃\x03㓟\x03刻\x03剆\x03剷\x03㔕\x03勇\x03勉\x03" + "勤\x03勺\x03包\x03匆\x03北\x03卉\x03卑\x03博\x03即\x03卽\x03卿\x03灰\x03及\x03叟\x03" + "叫\x03叱\x03吆\x03咞\x03吸\x03呈\x03周\x03咢\x03哶\x03唐\x03啓\x03啣\x03善\x03喙\x03" + "喫\x03喳\x03嗂\x03圖\x03嘆\x03圗\x03噑\x03噴\x03切\x03壮\x03城\x03埴\x03堍\x03型\x03" + "堲\x03報\x03墬\x03売\x03壷\x03夆\x03夢\x03奢\x03姬\x03娛\x03娧\x03姘\x03婦\x03㛮\x03" + "嬈\x03嬾\x03寃\x03寘\x03寧\x03寳\x03寿\x03将\x03尢\x03㞁\x03屠\x03屮\x03峀\x03岍\x03" + "嵃\x03嵮\x03嵫\x03嵼\x03巡\x03巢\x03㠯\x03巽\x03帨\x03帽\x03幩\x03㡢\x03㡼\x03庰\x03" + "庳\x03庶\x03廊\x03廾\x03舁\x03弢\x03㣇\x03形\x03彫\x03㣣\x03徚\x03忍\x03志\x03忹\x03" + "悁\x03㤺\x03㤜\x03悔\x03惇\x03慈\x03慌\x03慎\x03慺\x03憎\x03憲\x03憤\x03憯\x03懞\x03" + "懲\x03懶\x03成\x03戛\x03扝\x03抱\x03拔\x03捐\x03挽\x03拼\x03捨\x03掃\x03揤\x03搢\x03" + "揅\x03掩\x03㨮\x03摩\x03摾\x03撝\x03摷\x03㩬\x03敏\x03敬\x03旣\x03書\x03晉\x03㬙\x03" + "暑\x03㬈\x03㫤\x03冒\x03冕\x03最\x03暜\x03肭\x03䏙\x03朗\x03望\x03朡\x03杞\x03杓\x03" + "㭉\x03柺\x03枅\x03桒\x03梅\x03梎\x03栟\x03椔\x03㮝\x03楂\x03榣\x03槪\x03檨\x03櫛\x03" + "㰘\x03次\x03歔\x03㱎\x03歲\x03殟\x03殺\x03殻\x03汎\x03沿\x03泍\x03汧\x03洖\x03派\x03" + "海\x03流\x03浩\x03浸\x03涅\x03洴\x03港\x03湮\x03㴳\x03滋\x03滇\x03淹\x03潮\x03濆\x03" + "瀹\x03瀞\x03瀛\x03㶖\x03灊\x03災\x03灷\x03炭\x03煅\x03熜\x03爨\x03爵\x03牐\x03犀\x03" + "犕\x03獺\x03王\x03㺬\x03玥\x03㺸\x03瑇\x03瑜\x03瑱\x03璅\x03瓊\x03㼛\x03甤\x03甾\x03" + "異\x03瘐\x03㿼\x03䀈\x03直\x03眞\x03真\x03睊\x03䀹\x03瞋\x03䁆\x03䂖\x03硎\x03碌\x03" + "磌\x03䃣\x03祖\x03福\x03秫\x03䄯\x03穀\x03穊\x03穏\x03䈂\x03篆\x03築\x03䈧\x03糒\x03" + "䊠\x03糨\x03糣\x03紀\x03絣\x03䌁\x03緇\x03縂\x03繅\x03䌴\x03䍙\x03罺\x03羕\x03翺\x03" + "者\x03聠\x03聰\x03䏕\x03育\x03脃\x03䐋\x03脾\x03媵\x03舄\x03辞\x03䑫\x03芑\x03芋\x03" + "芝\x03劳\x03花\x03芳\x03芽\x03苦\x03若\x03茝\x03荣\x03莭\x03茣\x03莽\x03菧\x03著\x03" + "荓\x03菊\x03菌\x03菜\x03䔫\x03蓱\x03蓳\x03蔖\x03蕤\x03䕝\x03䕡\x03䕫\x03虐\x03虜\x03" + "虧\x03虩\x03蚩\x03蚈\x03蜎\x03蛢\x03蝹\x03蜨\x03蝫\x03螆\x03蟡\x03蠁\x03䗹\x03衠\x03" + "衣\x03裗\x03裞\x03䘵\x03裺\x03㒻\x03䚾\x03䛇\x03誠\x03諭\x03變\x03豕\x03貫\x03賁\x03" + "贛\x03起\x03跋\x03趼\x03跰\x03軔\x03輸\x03邔\x03郱\x03鄑\x03鄛\x03鈸\x03鋗\x03鋘\x03" + "鉼\x03鏹\x03鐕\x03開\x03䦕\x03閷\x03䧦\x03雃\x03嶲\x03霣\x03䩮\x03䩶\x03韠\x03䪲\x03" + "頋\x03頩\x03飢\x03䬳\x03餩\x03馧\x03駂\x03駾\x03䯎\x03鬒\x03鱀\x03鳽\x03䳎\x03䳭\x03" + "鵧\x03䳸\x03麻\x03䵖\x03黹\x03黾\x03鼅\x03鼏\x03鼖\x03鼻" var xorData string = "" + // Size: 4855 bytes "\x02\x0c\x09\x02\xb0\xec\x02\xad\xd8\x02\xad\xd9\x02\x06\x07\x02\x0f\x12" + "\x02\x0f\x1f\x02\x0f\x1d\x02\x01\x13\x02\x0f\x16\x02\x0f\x0b\x02\x0f3" + "\x02\x0f7\x02\x0f?\x02\x0f/\x02\x0f*\x02\x0c&\x02\x0c*\x02\x0c;\x02\x0c9" + "\x02\x0c%\x02\xab\xed\x02\xab\xe2\x02\xab\xe3\x02\xa9\xe0\x02\xa9\xe1" + "\x02\xa9\xe6\x02\xa3\xcb\x02\xa3\xc8\x02\xa3\xc9\x02\x01#\x02\x01\x08" + "\x02\x0e>\x02\x0e'\x02\x0f\x03\x02\x03\x0d\x02\x03\x09\x02\x03\x17\x02" + "\x03\x0e\x02\x02\x03\x02\x011\x02\x01\x00\x02\x01\x10\x02\x03<\x02\x07" + "\x0d\x02\x02\x0c\x02\x0c0\x02\x01\x03\x02\x01\x01\x02\x01 \x02\x01\x22" + "\x02\x01)\x02\x01\x0a\x02\x01\x0c\x02\x02\x06\x02\x02\x02\x02\x03\x10" + "\x03\x037 \x03\x0b+\x03\x02\x01\x04\x02\x01\x02\x02\x019\x02\x03\x1c\x02" + "\x02$\x03\x80p$\x02\x03:\x02\x03\x0a\x03\xc1r.\x03\xc1r,\x03\xc1r\x02" + "\x02\x02:\x02\x02>\x02\x02,\x02\x02\x10\x02\x02\x00\x03\xc1s<\x03\xc1s*" + "\x03\xc2L$\x03\xc2L;\x02\x09)\x02\x0a\x19\x03\x83\xab\xe3\x03\x83\xab" + "\xf2\x03 4\xe0\x03\x81\xab\xea\x03\x81\xab\xf3\x03 4\xef\x03\x96\xe1\xcd" + "\x03\x84\xe5\xc3\x02\x0d\x11\x03\x8b\xec\xcb\x03\x94\xec\xcf\x03\x9a\xec" + "\xc2\x03\x8b\xec\xdb\x03\x94\xec\xdf\x03\x9a\xec\xd2\x03\x01\x0c!\x03" + "\x01\x0c#\x03ʠ\x9d\x03ʣ\x9c\x03ʢ\x9f\x03ʥ\x9e\x03ʤ\x91\x03ʧ\x90\x03ʦ\x93" + "\x03ʩ\x92\x03ʨ\x95\x03\xca\xf3\xb5\x03\xca\xf0\xb4\x03\xca\xf1\xb7\x03" + "\xca\xf6\xb6\x03\xca\xf7\x89\x03\xca\xf4\x88\x03\xca\xf5\x8b\x03\xca\xfa" + "\x8a\x03\xca\xfb\x8d\x03\xca\xf8\x8c\x03\xca\xf9\x8f\x03\xca\xfe\x8e\x03" + "\xca\xff\x81\x03\xca\xfc\x80\x03\xca\xfd\x83\x03\xca\xe2\x82\x03\xca\xe3" + "\x85\x03\xca\xe0\x84\x03\xca\xe1\x87\x03\xca\xe6\x86\x03\xca\xe7\x99\x03" + "\xca\xe4\x98\x03\xca\xe5\x9b\x03\xca\xea\x9a\x03\xca\xeb\x9d\x03\xca\xe8" + "\x9c\x03ؓ\x89\x03ߔ\x8b\x02\x010\x03\x03\x04\x1e\x03\x04\x15\x12\x03\x0b" + "\x05,\x03\x06\x04\x00\x03\x06\x04)\x03\x06\x044\x03\x06\x04<\x03\x06\x05" + "\x1d\x03\x06\x06\x00\x03\x06\x06\x0a\x03\x06\x06'\x03\x06\x062\x03\x0786" + "\x03\x079/\x03\x079 \x03\x07:\x0e\x03\x07:\x1b\x03\x07:%\x03\x07;/\x03" + "\x07;%\x03\x074\x11\x03\x076\x09\x03\x077*\x03\x070\x01\x03\x070\x0f\x03" + "\x070.\x03\x071\x16\x03\x071\x04\x03\x0710\x03\x072\x18\x03\x072-\x03" + "\x073\x14\x03\x073>\x03\x07'\x09\x03\x07 \x00\x03\x07\x1f\x0b\x03\x07" + "\x18#\x03\x07\x18(\x03\x07\x186\x03\x07\x18\x03\x03\x07\x19\x16\x03\x07" + "\x116\x03\x07\x12'\x03\x07\x13\x10\x03\x07\x0c&\x03\x07\x0c\x08\x03\x07" + "\x0c\x13\x03\x07\x0d\x02\x03\x07\x0d\x1c\x03\x07\x0b5\x03\x07\x0b\x0a" + "\x03\x07\x0b\x01\x03\x07\x0b\x0f\x03\x07\x05\x00\x03\x07\x05\x09\x03\x07" + "\x05\x0b\x03\x07\x07\x01\x03\x07\x07\x08\x03\x07\x00<\x03\x07\x00+\x03" + "\x07\x01)\x03\x07\x01\x1b\x03\x07\x01\x08\x03\x07\x03?\x03\x0445\x03\x04" + "4\x08\x03\x0454\x03\x04)/\x03\x04)5\x03\x04+\x05\x03\x04+\x14\x03\x04+ " + "\x03\x04+<\x03\x04*&\x03\x04*\x22\x03\x04&8\x03\x04!\x01\x03\x04!\x22" + "\x03\x04\x11+\x03\x04\x10.\x03\x04\x104\x03\x04\x13=\x03\x04\x12\x04\x03" + "\x04\x12\x0a\x03\x04\x0d\x1d\x03\x04\x0d\x07\x03\x04\x0d \x03\x05<>\x03" + "\x055<\x03\x055!\x03\x055#\x03\x055&\x03\x054\x1d\x03\x054\x02\x03\x054" + "\x07\x03\x0571\x03\x053\x1a\x03\x053\x16\x03\x05.<\x03\x05.\x07\x03\x05)" + ":\x03\x05)<\x03\x05)\x0c\x03\x05)\x15\x03\x05+-\x03\x05+5\x03\x05$\x1e" + "\x03\x05$\x14\x03\x05'\x04\x03\x05'\x14\x03\x05&\x02\x03\x05\x226\x03" + "\x05\x22\x0c\x03\x05\x22\x1c\x03\x05\x19\x0a\x03\x05\x1b\x09\x03\x05\x1b" + "\x0c\x03\x05\x14\x07\x03\x05\x16?\x03\x05\x16\x0c\x03\x05\x0c\x05\x03" + "\x05\x0e\x0f\x03\x05\x01\x0e\x03\x05\x00(\x03\x05\x030\x03\x05\x03\x06" + "\x03\x0a==\x03\x0a=1\x03\x0a=,\x03\x0a=\x0c\x03\x0a??\x03\x0a<\x08\x03" + "\x0a9!\x03\x0a9)\x03\x0a97\x03\x0a99\x03\x0a6\x0a\x03\x0a6\x1c\x03\x0a6" + "\x17\x03\x0a7'\x03\x0a78\x03\x0a73\x03\x0a'\x01\x03\x0a'&\x03\x0a\x1f" + "\x0e\x03\x0a\x1f\x03\x03\x0a\x1f3\x03\x0a\x1b/\x03\x0a\x18\x19\x03\x0a" + "\x19\x01\x03\x0a\x16\x14\x03\x0a\x0e\x22\x03\x0a\x0f\x10\x03\x0a\x0f\x02" + "\x03\x0a\x0f \x03\x0a\x0c\x04\x03\x0a\x0b>\x03\x0a\x0b+\x03\x0a\x08/\x03" + "\x0a\x046\x03\x0a\x05\x14\x03\x0a\x00\x04\x03\x0a\x00\x10\x03\x0a\x00" + "\x14\x03\x0b<3\x03\x0b;*\x03\x0b9\x22\x03\x0b9)\x03\x0b97\x03\x0b+\x10" + "\x03\x0b((\x03\x0b&5\x03\x0b$\x1c\x03\x0b$\x12\x03\x0b%\x04\x03\x0b#<" + "\x03\x0b#0\x03\x0b#\x0d\x03\x0b#\x19\x03\x0b!:\x03\x0b!\x1f\x03\x0b!\x00" + "\x03\x0b\x1e5\x03\x0b\x1c\x1d\x03\x0b\x1d-\x03\x0b\x1d(\x03\x0b\x18.\x03" + "\x0b\x18 \x03\x0b\x18\x16\x03\x0b\x14\x13\x03\x0b\x15$\x03\x0b\x15\x22" + "\x03\x0b\x12\x1b\x03\x0b\x12\x10\x03\x0b\x132\x03\x0b\x13=\x03\x0b\x12" + "\x18\x03\x0b\x0c&\x03\x0b\x061\x03\x0b\x06:\x03\x0b\x05#\x03\x0b\x05<" + "\x03\x0b\x04\x0b\x03\x0b\x04\x04\x03\x0b\x04\x1b\x03\x0b\x042\x03\x0b" + "\x041\x03\x0b\x03\x03\x03\x0b\x03\x1d\x03\x0b\x03/\x03\x0b\x03+\x03\x0b" + "\x02\x1b\x03\x0b\x02\x00\x03\x0b\x01\x1e\x03\x0b\x01\x08\x03\x0b\x015" + "\x03\x06\x0d9\x03\x06\x0d=\x03\x06\x0d?\x03\x02\x001\x03\x02\x003\x03" + "\x02\x02\x19\x03\x02\x006\x03\x02\x02\x1b\x03\x02\x004\x03\x02\x00<\x03" + "\x02\x02\x0a\x03\x02\x02\x0e\x03\x02\x01\x1a\x03\x02\x01\x07\x03\x02\x01" + "\x05\x03\x02\x01\x0b\x03\x02\x01%\x03\x02\x01\x0c\x03\x02\x01\x04\x03" + "\x02\x01\x1c\x03\x02\x00.\x03\x02\x002\x03\x02\x00>\x03\x02\x00\x12\x03" + "\x02\x00\x16\x03\x02\x011\x03\x02\x013\x03\x02\x02 \x03\x02\x02%\x03\x02" + "\x02$\x03\x02\x028\x03\x02\x02;\x03\x02\x024\x03\x02\x012\x03\x02\x022" + "\x03\x02\x02/\x03\x02\x01,\x03\x02\x01\x13\x03\x02\x01\x16\x03\x02\x01" + "\x11\x03\x02\x01\x1e\x03\x02\x01\x15\x03\x02\x01\x17\x03\x02\x01\x0f\x03" + "\x02\x01\x08\x03\x02\x00?\x03\x02\x03\x07\x03\x02\x03\x0d\x03\x02\x03" + "\x13\x03\x02\x03\x1d\x03\x02\x03\x1f\x03\x02\x00\x03\x03\x02\x00\x0d\x03" + "\x02\x00\x01\x03\x02\x00\x1b\x03\x02\x00\x19\x03\x02\x00\x18\x03\x02\x00" + "\x13\x03\x02\x00/\x03\x07>\x12\x03\x07<\x1f\x03\x07>\x1d\x03\x06\x1d\x0e" + "\x03\x07>\x1c\x03\x07>:\x03\x07>\x13\x03\x04\x12+\x03\x07?\x03\x03\x07>" + "\x02\x03\x06\x224\x03\x06\x1a.\x03\x07<%\x03\x06\x1c\x0b\x03\x0609\x03" + "\x05\x1f\x01\x03\x04'\x08\x03\x93\xfd\xf5\x03\x02\x0d \x03\x02\x0d#\x03" + "\x02\x0d!\x03\x02\x0d&\x03\x02\x0d\x22\x03\x02\x0d/\x03\x02\x0d,\x03\x02" + "\x0d$\x03\x02\x0d'\x03\x02\x0d%\x03\x02\x0d;\x03\x02\x0d=\x03\x02\x0d?" + "\x03\x099.\x03\x08\x0b7\x03\x08\x02\x14\x03\x08\x14\x0d\x03\x08.:\x03" + "\x089'\x03\x0f\x0b\x18\x03\x0f\x1c1\x03\x0f\x17&\x03\x0f9\x1f\x03\x0f0" + "\x0c\x03\x0e\x0a9\x03\x0e\x056\x03\x0e\x1c#\x03\x0f\x13\x0e\x03\x072\x00" + "\x03\x070\x0d\x03\x072\x0b\x03\x06\x11\x18\x03\x070\x10\x03\x06\x0f(\x03" + "\x072\x05\x03\x06\x0f,\x03\x073\x15\x03\x06\x07\x08\x03\x05\x16\x02\x03" + "\x04\x0b \x03\x05:8\x03\x05\x16%\x03\x0a\x0d\x1f\x03\x06\x16\x10\x03\x05" + "\x1d5\x03\x05*;\x03\x05\x16\x1b\x03\x04.-\x03\x06\x1a\x19\x03\x04\x03," + "\x03\x0b87\x03\x04/\x0a\x03\x06\x00,\x03\x04-\x01\x03\x04\x1e-\x03\x06/(" + "\x03\x0a\x0b5\x03\x06\x0e7\x03\x06\x07.\x03\x0597\x03\x0a*%\x03\x0760" + "\x03\x06\x0c;\x03\x05'\x00\x03\x072.\x03\x072\x08\x03\x06=\x01\x03\x06" + "\x05\x1b\x03\x06\x06\x12\x03\x06$=\x03\x06'\x0d\x03\x04\x11\x0f\x03\x076" + ",\x03\x06\x07;\x03\x06.,\x03\x86\xf9\xea\x03\x8f\xff\xeb\x02\x092\x02" + "\x095\x02\x094\x02\x09;\x02\x09>\x02\x098\x02\x09*\x02\x09/\x02\x09,\x02" + "\x09%\x02\x09&\x02\x09#\x02\x09 \x02\x08!\x02\x08%\x02\x08$\x02\x08+\x02" + "\x08.\x02\x08*\x02\x08&\x02\x088\x02\x08>\x02\x084\x02\x086\x02\x080\x02" + "\x08\x10\x02\x08\x17\x02\x08\x12\x02\x08\x1d\x02\x08\x1f\x02\x08\x13\x02" + "\x08\x15\x02\x08\x14\x02\x08\x0c\x03\x8b\xfd\xd0\x03\x81\xec\xc6\x03\x87" + "\xe0\x8a\x03-2\xe3\x03\x80\xef\xe4\x03-2\xea\x03\x88\xe6\xeb\x03\x8e\xe6" + "\xe8\x03\x84\xe6\xe9\x03\x97\xe6\xee\x03-2\xf9\x03-2\xf6\x03\x8e\xe3\xad" + "\x03\x80\xe3\x92\x03\x88\xe3\x90\x03\x8e\xe3\x90\x03\x80\xe3\x97\x03\x88" + "\xe3\x95\x03\x88\xfe\xcb\x03\x8e\xfe\xca\x03\x84\xfe\xcd\x03\x91\xef\xc9" + "\x03-2\xc1\x03-2\xc0\x03-2\xcb\x03\x88@\x09\x03\x8e@\x08\x03\x8f\xe0\xf5" + "\x03\x8e\xe6\xf9\x03\x8e\xe0\xfa\x03\x93\xff\xf4\x03\x84\xee\xd3\x03\x0b" + "(\x04\x023 \x021;\x02\x01*\x03\x0b#\x10\x03\x0b 0\x03\x0b!\x10\x03\x0b!0" + "\x03\x07\x15\x08\x03\x09?5\x03\x07\x1f\x08\x03\x07\x17\x0b\x03\x09\x1f" + "\x15\x03\x0b\x1c7\x03\x0a+#\x03\x06\x1a\x1b\x03\x06\x1a\x14\x03\x0a\x01" + "\x18\x03\x06#\x1b\x03\x0a2\x0c\x03\x0a\x01\x04\x03\x09#;\x03\x08='\x03" + "\x08\x1a\x0a\x03\x07\x03\x0a\x111\x03\x09\x1b\x09\x03\x073.\x03\x07\x01\x00" + "\x03\x09/,\x03\x07#>\x03\x07\x048\x03\x0a\x1f\x22\x03\x098>\x03\x09\x11" + "\x00\x03\x08/\x17\x03\x06'\x22\x03\x0b\x1a+\x03\x0a\x22\x19\x03\x0a/1" + "\x03\x0974\x03\x09\x0f\x22\x03\x08,\x22\x03\x08?\x14\x03\x07$5\x03\x07<3" + "\x03\x07=*\x03\x07\x13\x18\x03\x068\x0a\x03\x06\x09\x16\x03\x06\x13\x00" + "\x03\x08\x067\x03\x08\x01\x03\x03\x08\x12\x1d\x03\x07+7\x03\x06(;\x03" + "\x06\x1c?\x03\x07\x0e\x17\x03\x0a\x06\x1d\x03\x0a\x19\x07\x03\x08\x14$" + "\x03\x07$;\x03\x08,$\x03\x08\x06\x0d\x03\x07\x16\x0a\x03\x06>>\x03\x0a" + "\x06\x12\x03\x0a\x14)\x03\x09\x0d\x1f\x03\x09\x12\x17\x03\x09\x19\x01" + "\x03\x08\x11 \x03\x08\x1d'\x03\x06<\x1a\x03\x0a.\x00\x03\x07'\x18\x03" + "\x0a\x22\x08\x03\x08\x0d\x0a\x03\x08\x13)\x03\x07*)\x03\x06<,\x03\x07" + "\x0b\x1a\x03\x09.\x14\x03\x09\x0d\x1e\x03\x07\x0e#\x03\x0b\x1d'\x03\x0a" + "\x0a8\x03\x09%2\x03\x08+&\x03\x080\x12\x03\x0a)4\x03\x08\x06\x1f\x03\x0b" + "\x1b\x1a\x03\x0a\x1b\x0f\x03\x0b\x1d*\x03\x09\x16$\x03\x090\x11\x03\x08" + "\x11\x08\x03\x0a*(\x03\x0a\x042\x03\x089,\x03\x074'\x03\x07\x0f\x05\x03" + "\x09\x0b\x0a\x03\x07\x1b\x01\x03\x09\x17:\x03\x09.\x0d\x03\x07.\x11\x03" + "\x09+\x15\x03\x080\x13\x03\x0b\x1f\x19\x03\x0a \x11\x03\x0a\x220\x03\x09" + "\x07;\x03\x08\x16\x1c\x03\x07,\x13\x03\x07\x0e/\x03\x06\x221\x03\x0a." + "\x0a\x03\x0a7\x02\x03\x0a\x032\x03\x0a\x1d.\x03\x091\x06\x03\x09\x19:" + "\x03\x08\x02/\x03\x060+\x03\x06\x0f-\x03\x06\x1c\x1f\x03\x06\x1d\x07\x03" + "\x0a,\x11\x03\x09=\x0d\x03\x09\x0b;\x03\x07\x1b/\x03\x0a\x1f:\x03\x09 " + "\x1f\x03\x09.\x10\x03\x094\x0b\x03\x09\x1a1\x03\x08#\x1a\x03\x084\x1d" + "\x03\x08\x01\x1f\x03\x08\x11\x22\x03\x07'8\x03\x07\x1a>\x03\x0757\x03" + "\x06&9\x03\x06+\x11\x03\x0a.\x0b\x03\x0a,>\x03\x0a4#\x03\x08%\x17\x03" + "\x07\x05\x22\x03\x07\x0c\x0b\x03\x0a\x1d+\x03\x0a\x19\x16\x03\x09+\x1f" + "\x03\x09\x08\x0b\x03\x08\x16\x18\x03\x08+\x12\x03\x0b\x1d\x0c\x03\x0a=" + "\x10\x03\x0a\x09\x0d\x03\x0a\x10\x11\x03\x09&0\x03\x08(\x1f\x03\x087\x07" + "\x03\x08\x185\x03\x07'6\x03\x06.\x05\x03\x06=\x04\x03\x06;;\x03\x06\x06," + "\x03\x0b\x18>\x03\x08\x00\x18\x03\x06 \x03\x03\x06<\x00\x03\x09%\x18\x03" + "\x0b\x1c<\x03\x0a%!\x03\x0a\x09\x12\x03\x0a\x16\x02\x03\x090'\x03\x09" + "\x0e=\x03\x08 \x0e\x03\x08>\x03\x03\x074>\x03\x06&?\x03\x06\x19\x09\x03" + "\x06?(\x03\x0a-\x0e\x03\x09:3\x03\x098:\x03\x09\x12\x0b\x03\x09\x1d\x17" + "\x03\x087\x05\x03\x082\x14\x03\x08\x06%\x03\x08\x13\x1f\x03\x06\x06\x0e" + "\x03\x0a\x22<\x03\x09/<\x03\x06>+\x03\x0a'?\x03\x0a\x13\x0c\x03\x09\x10<" + "\x03\x07\x1b=\x03\x0a\x19\x13\x03\x09\x22\x1d\x03\x09\x07\x0d\x03\x08)" + "\x1c\x03\x06=\x1a\x03\x0a/4\x03\x0a7\x11\x03\x0a\x16:\x03\x09?3\x03\x09:" + "/\x03\x09\x05\x0a\x03\x09\x14\x06\x03\x087\x22\x03\x080\x07\x03\x08\x1a" + "\x1f\x03\x07\x04(\x03\x07\x04\x09\x03\x06 %\x03\x06<\x08\x03\x0a+\x14" + "\x03\x09\x1d\x16\x03\x0a70\x03\x08 >\x03\x0857\x03\x070\x0a\x03\x06=\x12" + "\x03\x06\x16%\x03\x06\x1d,\x03\x099#\x03\x09\x10>\x03\x07 \x1e\x03\x08" + "\x0c<\x03\x08\x0b\x18\x03\x08\x15+\x03\x08,:\x03\x08%\x22\x03\x07\x0a$" + "\x03\x0b\x1c=\x03\x07+\x08\x03\x0a/\x05\x03\x0a \x07\x03\x0a\x12'\x03" + "\x09#\x11\x03\x08\x1b\x15\x03\x0a\x06\x01\x03\x09\x1c\x1b\x03\x0922\x03" + "\x07\x14<\x03\x07\x09\x04\x03\x061\x04\x03\x07\x0e\x01\x03\x0a\x13\x18" + "\x03\x0a-\x0c\x03\x0a?\x0d\x03\x0a\x09\x0a\x03\x091&\x03\x0a/\x0b\x03" + "\x08$<\x03\x083\x1d\x03\x08\x0c$\x03\x08\x0d\x07\x03\x08\x0d?\x03\x08" + "\x0e\x14\x03\x065\x0a\x03\x08\x1a#\x03\x08\x16#\x03\x0702\x03\x07\x03" + "\x1a\x03\x06(\x1d\x03\x06+\x1b\x03\x06\x0b\x05\x03\x06\x0b\x17\x03\x06" + "\x0c\x04\x03\x06\x1e\x19\x03\x06+0\x03\x062\x18\x03\x0b\x16\x1e\x03\x0a+" + "\x16\x03\x0a-?\x03\x0a#:\x03\x0a#\x10\x03\x0a%$\x03\x0a>+\x03\x0a01\x03" + "\x0a1\x10\x03\x0a\x099\x03\x0a\x0a\x12\x03\x0a\x19\x1f\x03\x0a\x19\x12" + "\x03\x09*)\x03\x09-\x16\x03\x09.1\x03\x09.2\x03\x09<\x0e\x03\x09> \x03" + "\x093\x12\x03\x09\x0b\x01\x03\x09\x1c2\x03\x09\x11\x1c\x03\x09\x15%\x03" + "\x08,&\x03\x08!\x22\x03\x089(\x03\x08\x0b\x1a\x03\x08\x0d2\x03\x08\x0c" + "\x04\x03\x08\x0c\x06\x03\x08\x0c\x1f\x03\x08\x0c\x0c\x03\x08\x0f\x1f\x03" + "\x08\x0f\x1d\x03\x08\x00\x14\x03\x08\x03\x14\x03\x08\x06\x16\x03\x08\x1e" + "#\x03\x08\x11\x11\x03\x08\x10\x18\x03\x08\x14(\x03\x07)\x1e\x03\x07.1" + "\x03\x07 $\x03\x07 '\x03\x078\x08\x03\x07\x0d0\x03\x07\x0f7\x03\x07\x05#" + "\x03\x07\x05\x1a\x03\x07\x1a7\x03\x07\x1d-\x03\x07\x17\x10\x03\x06)\x1f" + "\x03\x062\x0b\x03\x066\x16\x03\x06\x09\x11\x03\x09(\x1e\x03\x07!5\x03" + "\x0b\x11\x16\x03\x0a/\x04\x03\x0a,\x1a\x03\x0b\x173\x03\x0a,1\x03\x0a/5" + "\x03\x0a\x221\x03\x0a\x22\x0d\x03\x0a?%\x03\x0a<,\x03\x0a?#\x03\x0a>\x19" + "\x03\x0a\x08&\x03\x0a\x0b\x0e\x03\x0a\x0c:\x03\x0a\x0c+\x03\x0a\x03\x22" + "\x03\x0a\x06)\x03\x0a\x11\x10\x03\x0a\x11\x1a\x03\x0a\x17-\x03\x0a\x14(" + "\x03\x09)\x1e\x03\x09/\x09\x03\x09.\x00\x03\x09,\x07\x03\x09/*\x03\x09-9" + "\x03\x09\x228\x03\x09%\x09\x03\x09:\x12\x03\x09;\x1d\x03\x09?\x06\x03" + "\x093%\x03\x096\x05\x03\x096\x08\x03\x097\x02\x03\x09\x07,\x03\x09\x04," + "\x03\x09\x1f\x16\x03\x09\x11\x03\x03\x09\x11\x12\x03\x09\x168\x03\x08*" + "\x05\x03\x08/2\x03\x084:\x03\x08\x22+\x03\x08 0\x03\x08&\x0a\x03\x08;" + "\x10\x03\x08>$\x03\x08>\x18\x03\x0829\x03\x082:\x03\x081,\x03\x081<\x03" + "\x081\x1c\x03\x087#\x03\x087*\x03\x08\x09'\x03\x08\x00\x1d\x03\x08\x05-" + "\x03\x08\x1f4\x03\x08\x1d\x04\x03\x08\x16\x0f\x03\x07*7\x03\x07'!\x03" + "\x07%\x1b\x03\x077\x0c\x03\x07\x0c1\x03\x07\x0c.\x03\x07\x00\x06\x03\x07" + "\x01\x02\x03\x07\x010\x03\x07\x06=\x03\x07\x01\x03\x03\x07\x01\x13\x03" + "\x07\x06\x06\x03\x07\x05\x0a\x03\x07\x1f\x09\x03\x07\x17:\x03\x06*1\x03" + "\x06-\x1d\x03\x06\x223\x03\x062:\x03\x060$\x03\x066\x1e\x03\x064\x12\x03" + "\x0645\x03\x06\x0b\x00\x03\x06\x0b7\x03\x06\x07\x1f\x03\x06\x15\x12\x03" + "\x0c\x05\x0f\x03\x0b+\x0b\x03\x0b+-\x03\x06\x16\x1b\x03\x06\x15\x17\x03" + "\x89\xca\xea\x03\x89\xca\xe8\x03\x0c8\x10\x03\x0c8\x01\x03\x0c8\x0f\x03" + "\x0d8%\x03\x0d8!\x03\x0c8-\x03\x0c8/\x03\x0c8+\x03\x0c87\x03\x0c85\x03" + "\x0c9\x09\x03\x0c9\x0d\x03\x0c9\x0f\x03\x0c9\x0b\x03\xcfu\x0c\x03\xcfu" + "\x0f\x03\xcfu\x0e\x03\xcfu\x09\x03\x0c9\x10\x03\x0d9\x0c\x03\xcf`;\x03" + "\xcf`>\x03\xcf`9\x03\xcf`8\x03\xcf`7\x03\xcf`*\x03\xcf`-\x03\xcf`,\x03" + "\x0d\x1b\x1a\x03\x0d\x1b&\x03\x0c=.\x03\x0c=%\x03\x0c>\x1e\x03\x0c>\x14" + "\x03\x0c?\x06\x03\x0c?\x0b\x03\x0c?\x0c\x03\x0c?\x0d\x03\x0c?\x02\x03" + "\x0c>\x0f\x03\x0c>\x08\x03\x0c>\x09\x03\x0c>,\x03\x0c>\x0c\x03\x0c?\x13" + "\x03\x0c?\x16\x03\x0c?\x15\x03\x0c?\x1c\x03\x0c?\x1f\x03\x0c?\x1d\x03" + "\x0c?\x1a\x03\x0c?\x17\x03\x0c?\x08\x03\x0c?\x09\x03\x0c?\x0e\x03\x0c?" + "\x04\x03\x0c?\x05\x03\x0c" + "\x03\x0c=2\x03\x0c=6\x03\x0c<\x07\x03\x0c<\x05\x03\x0e:!\x03\x0e:#\x03" + "\x0e8\x09\x03\x0e:&\x03\x0e8\x0b\x03\x0e:$\x03\x0e:,\x03\x0e8\x1a\x03" + "\x0e8\x1e\x03\x0e:*\x03\x0e:7\x03\x0e:5\x03\x0e:;\x03\x0e:\x15\x03\x0e:<" + "\x03\x0e:4\x03\x0e:'\x03\x0e:-\x03\x0e:%\x03\x0e:?\x03\x0e:=\x03\x0e:)" + "\x03\x0e:/\x03\xcfs'\x03\x0d=\x0f\x03\x0d+*\x03\x0d99\x03\x0d9;\x03\x0d9" + "?\x03\x0d)\x0d\x03\x0d(%\x02\x01\x18\x02\x01(\x02\x01\x1e\x03\x0f$!\x03" + "\x0f87\x03\x0f4\x0e\x03\x0f5\x1d\x03\x06'\x03\x03\x0f\x08\x18\x03\x0f" + "\x0d\x1b\x03\x0e2=\x03\x0e;\x08\x03\x0e:\x0b\x03\x0e\x06$\x03\x0e\x0d)" + "\x03\x0e\x16\x1f\x03\x0e\x16\x1b\x03\x0d$\x0a\x03\x05,\x1d\x03\x0d. \x03" + "\x0d.#\x03\x0c(/\x03\x09%\x02\x03\x0d90\x03\x0d\x0e4\x03\x0d\x0d\x0f\x03" + "\x0c#\x00\x03\x0c,\x1e\x03\x0c2\x0e\x03\x0c\x01\x17\x03\x0c\x09:\x03\x0e" + "\x173\x03\x0c\x08\x03\x03\x0c\x11\x07\x03\x0c\x10\x18\x03\x0c\x1f\x1c" + "\x03\x0c\x19\x0e\x03\x0c\x1a\x1f\x03\x0f0>\x03\x0b->\x03\x0b<+\x03\x0b8" + "\x13\x03\x0b\x043\x03\x0b\x14\x03\x03\x0b\x16%\x03\x0d\x22&\x03\x0b\x1a" + "\x1a\x03\x0b\x1a\x04\x03\x0a%9\x03\x0a&2\x03\x0a&0\x03\x0a!\x1a\x03\x0a!" + "7\x03\x0a5\x10\x03\x0a=4\x03\x0a?\x0e\x03\x0a>\x10\x03\x0a\x00 \x03\x0a" + "\x0f:\x03\x0a\x0f9\x03\x0a\x0b\x0a\x03\x0a\x17%\x03\x0a\x1b-\x03\x09-" + "\x1a\x03\x09,4\x03\x09.,\x03\x09)\x09\x03\x096!\x03\x091\x1f\x03\x093" + "\x16\x03\x0c+\x1f\x03\x098 \x03\x098=\x03\x0c(\x1a\x03\x0c(\x16\x03\x09" + "\x0a+\x03\x09\x16\x12\x03\x09\x13\x0e\x03\x09\x153\x03\x08)!\x03\x09\x1a" + "\x01\x03\x09\x18\x01\x03\x08%#\x03\x08>\x22\x03\x08\x05%\x03\x08\x02*" + "\x03\x08\x15;\x03\x08\x1b7\x03\x0f\x07\x1d\x03\x0f\x04\x03\x03\x070\x0c" + "\x03\x07;\x0b\x03\x07\x08\x17\x03\x07\x12\x06\x03\x06/-\x03\x0671\x03" + "\x065+\x03\x06>7\x03\x06\x049\x03\x05+\x1e\x03\x05,\x17\x03\x05 \x1d\x03" + "\x05\x22\x05\x03\x050\x1d" // lookup returns the trie value for the first UTF-8 encoding in s and // the width in bytes of this encoding. The size will be 0 if s does not // hold enough bytes to complete the encoding. len(s) must be greater than 0. func (t *idnaTrie) lookup(s []byte) (v uint16, sz int) { c0 := s[0] switch { case c0 < 0x80: // is ASCII return idnaValues[c0], 1 case c0 < 0xC2: return 0, 1 // Illegal UTF-8: not a starter, not ASCII. case c0 < 0xE0: // 2-byte UTF-8 if len(s) < 2 { return 0, 0 } i := idnaIndex[c0] c1 := s[1] if c1 < 0x80 || 0xC0 <= c1 { return 0, 1 // Illegal UTF-8: not a continuation byte. } return t.lookupValue(uint32(i), c1), 2 case c0 < 0xF0: // 3-byte UTF-8 if len(s) < 3 { return 0, 0 } i := idnaIndex[c0] c1 := s[1] if c1 < 0x80 || 0xC0 <= c1 { return 0, 1 // Illegal UTF-8: not a continuation byte. } o := uint32(i)<<6 + uint32(c1) i = idnaIndex[o] c2 := s[2] if c2 < 0x80 || 0xC0 <= c2 { return 0, 2 // Illegal UTF-8: not a continuation byte. } return t.lookupValue(uint32(i), c2), 3 case c0 < 0xF8: // 4-byte UTF-8 if len(s) < 4 { return 0, 0 } i := idnaIndex[c0] c1 := s[1] if c1 < 0x80 || 0xC0 <= c1 { return 0, 1 // Illegal UTF-8: not a continuation byte. } o := uint32(i)<<6 + uint32(c1) i = idnaIndex[o] c2 := s[2] if c2 < 0x80 || 0xC0 <= c2 { return 0, 2 // Illegal UTF-8: not a continuation byte. } o = uint32(i)<<6 + uint32(c2) i = idnaIndex[o] c3 := s[3] if c3 < 0x80 || 0xC0 <= c3 { return 0, 3 // Illegal UTF-8: not a continuation byte. } return t.lookupValue(uint32(i), c3), 4 } // Illegal rune return 0, 1 } // lookupUnsafe returns the trie value for the first UTF-8 encoding in s. // s must start with a full and valid UTF-8 encoded rune. func (t *idnaTrie) lookupUnsafe(s []byte) uint16 { c0 := s[0] if c0 < 0x80 { // is ASCII return idnaValues[c0] } i := idnaIndex[c0] if c0 < 0xE0 { // 2-byte UTF-8 return t.lookupValue(uint32(i), s[1]) } i = idnaIndex[uint32(i)<<6+uint32(s[1])] if c0 < 0xF0 { // 3-byte UTF-8 return t.lookupValue(uint32(i), s[2]) } i = idnaIndex[uint32(i)<<6+uint32(s[2])] if c0 < 0xF8 { // 4-byte UTF-8 return t.lookupValue(uint32(i), s[3]) } return 0 } // lookupString returns the trie value for the first UTF-8 encoding in s and // the width in bytes of this encoding. The size will be 0 if s does not // hold enough bytes to complete the encoding. len(s) must be greater than 0. func (t *idnaTrie) lookupString(s string) (v uint16, sz int) { c0 := s[0] switch { case c0 < 0x80: // is ASCII return idnaValues[c0], 1 case c0 < 0xC2: return 0, 1 // Illegal UTF-8: not a starter, not ASCII. case c0 < 0xE0: // 2-byte UTF-8 if len(s) < 2 { return 0, 0 } i := idnaIndex[c0] c1 := s[1] if c1 < 0x80 || 0xC0 <= c1 { return 0, 1 // Illegal UTF-8: not a continuation byte. } return t.lookupValue(uint32(i), c1), 2 case c0 < 0xF0: // 3-byte UTF-8 if len(s) < 3 { return 0, 0 } i := idnaIndex[c0] c1 := s[1] if c1 < 0x80 || 0xC0 <= c1 { return 0, 1 // Illegal UTF-8: not a continuation byte. } o := uint32(i)<<6 + uint32(c1) i = idnaIndex[o] c2 := s[2] if c2 < 0x80 || 0xC0 <= c2 { return 0, 2 // Illegal UTF-8: not a continuation byte. } return t.lookupValue(uint32(i), c2), 3 case c0 < 0xF8: // 4-byte UTF-8 if len(s) < 4 { return 0, 0 } i := idnaIndex[c0] c1 := s[1] if c1 < 0x80 || 0xC0 <= c1 { return 0, 1 // Illegal UTF-8: not a continuation byte. } o := uint32(i)<<6 + uint32(c1) i = idnaIndex[o] c2 := s[2] if c2 < 0x80 || 0xC0 <= c2 { return 0, 2 // Illegal UTF-8: not a continuation byte. } o = uint32(i)<<6 + uint32(c2) i = idnaIndex[o] c3 := s[3] if c3 < 0x80 || 0xC0 <= c3 { return 0, 3 // Illegal UTF-8: not a continuation byte. } return t.lookupValue(uint32(i), c3), 4 } // Illegal rune return 0, 1 } // lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. // s must start with a full and valid UTF-8 encoded rune. func (t *idnaTrie) lookupStringUnsafe(s string) uint16 { c0 := s[0] if c0 < 0x80 { // is ASCII return idnaValues[c0] } i := idnaIndex[c0] if c0 < 0xE0 { // 2-byte UTF-8 return t.lookupValue(uint32(i), s[1]) } i = idnaIndex[uint32(i)<<6+uint32(s[1])] if c0 < 0xF0 { // 3-byte UTF-8 return t.lookupValue(uint32(i), s[2]) } i = idnaIndex[uint32(i)<<6+uint32(s[2])] if c0 < 0xF8 { // 4-byte UTF-8 return t.lookupValue(uint32(i), s[3]) } return 0 } // idnaTrie. Total size: 28496 bytes (27.83 KiB). Checksum: 43288b883596640e. type idnaTrie struct{} func newIdnaTrie(i int) *idnaTrie { return &idnaTrie{} } // lookupValue determines the type of block n and looks up the value for b. func (t *idnaTrie) lookupValue(n uint32, b byte) uint16 { switch { case n < 123: return uint16(idnaValues[n<<6+uint32(b)]) default: n -= 123 return uint16(idnaSparse.lookup(n, b)) } } // idnaValues: 125 blocks, 8000 entries, 16000 bytes // The third block is the zero block. var idnaValues = [8000]uint16{ // Block 0x0, offset 0x0 0x00: 0x0080, 0x01: 0x0080, 0x02: 0x0080, 0x03: 0x0080, 0x04: 0x0080, 0x05: 0x0080, 0x06: 0x0080, 0x07: 0x0080, 0x08: 0x0080, 0x09: 0x0080, 0x0a: 0x0080, 0x0b: 0x0080, 0x0c: 0x0080, 0x0d: 0x0080, 0x0e: 0x0080, 0x0f: 0x0080, 0x10: 0x0080, 0x11: 0x0080, 0x12: 0x0080, 0x13: 0x0080, 0x14: 0x0080, 0x15: 0x0080, 0x16: 0x0080, 0x17: 0x0080, 0x18: 0x0080, 0x19: 0x0080, 0x1a: 0x0080, 0x1b: 0x0080, 0x1c: 0x0080, 0x1d: 0x0080, 0x1e: 0x0080, 0x1f: 0x0080, 0x20: 0x0080, 0x21: 0x0080, 0x22: 0x0080, 0x23: 0x0080, 0x24: 0x0080, 0x25: 0x0080, 0x26: 0x0080, 0x27: 0x0080, 0x28: 0x0080, 0x29: 0x0080, 0x2a: 0x0080, 0x2b: 0x0080, 0x2c: 0x0080, 0x2d: 0x0008, 0x2e: 0x0008, 0x2f: 0x0080, 0x30: 0x0008, 0x31: 0x0008, 0x32: 0x0008, 0x33: 0x0008, 0x34: 0x0008, 0x35: 0x0008, 0x36: 0x0008, 0x37: 0x0008, 0x38: 0x0008, 0x39: 0x0008, 0x3a: 0x0080, 0x3b: 0x0080, 0x3c: 0x0080, 0x3d: 0x0080, 0x3e: 0x0080, 0x3f: 0x0080, // Block 0x1, offset 0x40 0x40: 0x0080, 0x41: 0xe105, 0x42: 0xe105, 0x43: 0xe105, 0x44: 0xe105, 0x45: 0xe105, 0x46: 0xe105, 0x47: 0xe105, 0x48: 0xe105, 0x49: 0xe105, 0x4a: 0xe105, 0x4b: 0xe105, 0x4c: 0xe105, 0x4d: 0xe105, 0x4e: 0xe105, 0x4f: 0xe105, 0x50: 0xe105, 0x51: 0xe105, 0x52: 0xe105, 0x53: 0xe105, 0x54: 0xe105, 0x55: 0xe105, 0x56: 0xe105, 0x57: 0xe105, 0x58: 0xe105, 0x59: 0xe105, 0x5a: 0xe105, 0x5b: 0x0080, 0x5c: 0x0080, 0x5d: 0x0080, 0x5e: 0x0080, 0x5f: 0x0080, 0x60: 0x0080, 0x61: 0x0008, 0x62: 0x0008, 0x63: 0x0008, 0x64: 0x0008, 0x65: 0x0008, 0x66: 0x0008, 0x67: 0x0008, 0x68: 0x0008, 0x69: 0x0008, 0x6a: 0x0008, 0x6b: 0x0008, 0x6c: 0x0008, 0x6d: 0x0008, 0x6e: 0x0008, 0x6f: 0x0008, 0x70: 0x0008, 0x71: 0x0008, 0x72: 0x0008, 0x73: 0x0008, 0x74: 0x0008, 0x75: 0x0008, 0x76: 0x0008, 0x77: 0x0008, 0x78: 0x0008, 0x79: 0x0008, 0x7a: 0x0008, 0x7b: 0x0080, 0x7c: 0x0080, 0x7d: 0x0080, 0x7e: 0x0080, 0x7f: 0x0080, // Block 0x2, offset 0x80 // Block 0x3, offset 0xc0 0xc0: 0x0040, 0xc1: 0x0040, 0xc2: 0x0040, 0xc3: 0x0040, 0xc4: 0x0040, 0xc5: 0x0040, 0xc6: 0x0040, 0xc7: 0x0040, 0xc8: 0x0040, 0xc9: 0x0040, 0xca: 0x0040, 0xcb: 0x0040, 0xcc: 0x0040, 0xcd: 0x0040, 0xce: 0x0040, 0xcf: 0x0040, 0xd0: 0x0040, 0xd1: 0x0040, 0xd2: 0x0040, 0xd3: 0x0040, 0xd4: 0x0040, 0xd5: 0x0040, 0xd6: 0x0040, 0xd7: 0x0040, 0xd8: 0x0040, 0xd9: 0x0040, 0xda: 0x0040, 0xdb: 0x0040, 0xdc: 0x0040, 0xdd: 0x0040, 0xde: 0x0040, 0xdf: 0x0040, 0xe0: 0x000a, 0xe1: 0x0018, 0xe2: 0x0018, 0xe3: 0x0018, 0xe4: 0x0018, 0xe5: 0x0018, 0xe6: 0x0018, 0xe7: 0x0018, 0xe8: 0x001a, 0xe9: 0x0018, 0xea: 0x0039, 0xeb: 0x0018, 0xec: 0x0018, 0xed: 0x03c0, 0xee: 0x0018, 0xef: 0x004a, 0xf0: 0x0018, 0xf1: 0x0018, 0xf2: 0x0069, 0xf3: 0x0079, 0xf4: 0x008a, 0xf5: 0x0005, 0xf6: 0x0018, 0xf7: 0x0008, 0xf8: 0x00aa, 0xf9: 0x00c9, 0xfa: 0x00d9, 0xfb: 0x0018, 0xfc: 0x00e9, 0xfd: 0x0119, 0xfe: 0x0149, 0xff: 0x0018, // Block 0x4, offset 0x100 0x100: 0xe00d, 0x101: 0x0008, 0x102: 0xe00d, 0x103: 0x0008, 0x104: 0xe00d, 0x105: 0x0008, 0x106: 0xe00d, 0x107: 0x0008, 0x108: 0xe00d, 0x109: 0x0008, 0x10a: 0xe00d, 0x10b: 0x0008, 0x10c: 0xe00d, 0x10d: 0x0008, 0x10e: 0xe00d, 0x10f: 0x0008, 0x110: 0xe00d, 0x111: 0x0008, 0x112: 0xe00d, 0x113: 0x0008, 0x114: 0xe00d, 0x115: 0x0008, 0x116: 0xe00d, 0x117: 0x0008, 0x118: 0xe00d, 0x119: 0x0008, 0x11a: 0xe00d, 0x11b: 0x0008, 0x11c: 0xe00d, 0x11d: 0x0008, 0x11e: 0xe00d, 0x11f: 0x0008, 0x120: 0xe00d, 0x121: 0x0008, 0x122: 0xe00d, 0x123: 0x0008, 0x124: 0xe00d, 0x125: 0x0008, 0x126: 0xe00d, 0x127: 0x0008, 0x128: 0xe00d, 0x129: 0x0008, 0x12a: 0xe00d, 0x12b: 0x0008, 0x12c: 0xe00d, 0x12d: 0x0008, 0x12e: 0xe00d, 0x12f: 0x0008, 0x130: 0x0179, 0x131: 0x0008, 0x132: 0x0035, 0x133: 0x004d, 0x134: 0xe00d, 0x135: 0x0008, 0x136: 0xe00d, 0x137: 0x0008, 0x138: 0x0008, 0x139: 0xe01d, 0x13a: 0x0008, 0x13b: 0xe03d, 0x13c: 0x0008, 0x13d: 0xe01d, 0x13e: 0x0008, 0x13f: 0x0199, // Block 0x5, offset 0x140 0x140: 0x0199, 0x141: 0xe01d, 0x142: 0x0008, 0x143: 0xe03d, 0x144: 0x0008, 0x145: 0xe01d, 0x146: 0x0008, 0x147: 0xe07d, 0x148: 0x0008, 0x149: 0x01b9, 0x14a: 0xe00d, 0x14b: 0x0008, 0x14c: 0xe00d, 0x14d: 0x0008, 0x14e: 0xe00d, 0x14f: 0x0008, 0x150: 0xe00d, 0x151: 0x0008, 0x152: 0xe00d, 0x153: 0x0008, 0x154: 0xe00d, 0x155: 0x0008, 0x156: 0xe00d, 0x157: 0x0008, 0x158: 0xe00d, 0x159: 0x0008, 0x15a: 0xe00d, 0x15b: 0x0008, 0x15c: 0xe00d, 0x15d: 0x0008, 0x15e: 0xe00d, 0x15f: 0x0008, 0x160: 0xe00d, 0x161: 0x0008, 0x162: 0xe00d, 0x163: 0x0008, 0x164: 0xe00d, 0x165: 0x0008, 0x166: 0xe00d, 0x167: 0x0008, 0x168: 0xe00d, 0x169: 0x0008, 0x16a: 0xe00d, 0x16b: 0x0008, 0x16c: 0xe00d, 0x16d: 0x0008, 0x16e: 0xe00d, 0x16f: 0x0008, 0x170: 0xe00d, 0x171: 0x0008, 0x172: 0xe00d, 0x173: 0x0008, 0x174: 0xe00d, 0x175: 0x0008, 0x176: 0xe00d, 0x177: 0x0008, 0x178: 0x0065, 0x179: 0xe01d, 0x17a: 0x0008, 0x17b: 0xe03d, 0x17c: 0x0008, 0x17d: 0xe01d, 0x17e: 0x0008, 0x17f: 0x01d9, // Block 0x6, offset 0x180 0x180: 0x0008, 0x181: 0x007d, 0x182: 0xe00d, 0x183: 0x0008, 0x184: 0xe00d, 0x185: 0x0008, 0x186: 0x007d, 0x187: 0xe07d, 0x188: 0x0008, 0x189: 0x0095, 0x18a: 0x00ad, 0x18b: 0xe03d, 0x18c: 0x0008, 0x18d: 0x0008, 0x18e: 0x00c5, 0x18f: 0x00dd, 0x190: 0x00f5, 0x191: 0xe01d, 0x192: 0x0008, 0x193: 0x010d, 0x194: 0x0125, 0x195: 0x0008, 0x196: 0x013d, 0x197: 0x013d, 0x198: 0xe00d, 0x199: 0x0008, 0x19a: 0x0008, 0x19b: 0x0008, 0x19c: 0x010d, 0x19d: 0x0155, 0x19e: 0x0008, 0x19f: 0x016d, 0x1a0: 0xe00d, 0x1a1: 0x0008, 0x1a2: 0xe00d, 0x1a3: 0x0008, 0x1a4: 0xe00d, 0x1a5: 0x0008, 0x1a6: 0x0185, 0x1a7: 0xe07d, 0x1a8: 0x0008, 0x1a9: 0x019d, 0x1aa: 0x0008, 0x1ab: 0x0008, 0x1ac: 0xe00d, 0x1ad: 0x0008, 0x1ae: 0x0185, 0x1af: 0xe0fd, 0x1b0: 0x0008, 0x1b1: 0x01b5, 0x1b2: 0x01cd, 0x1b3: 0xe03d, 0x1b4: 0x0008, 0x1b5: 0xe01d, 0x1b6: 0x0008, 0x1b7: 0x01e5, 0x1b8: 0xe00d, 0x1b9: 0x0008, 0x1ba: 0x0008, 0x1bb: 0x0008, 0x1bc: 0xe00d, 0x1bd: 0x0008, 0x1be: 0x0008, 0x1bf: 0x0008, // Block 0x7, offset 0x1c0 0x1c0: 0x0008, 0x1c1: 0x0008, 0x1c2: 0x0008, 0x1c3: 0x0008, 0x1c4: 0x01e9, 0x1c5: 0x01e9, 0x1c6: 0x01e9, 0x1c7: 0x01fd, 0x1c8: 0x0215, 0x1c9: 0x022d, 0x1ca: 0x0245, 0x1cb: 0x025d, 0x1cc: 0x0275, 0x1cd: 0xe01d, 0x1ce: 0x0008, 0x1cf: 0xe0fd, 0x1d0: 0x0008, 0x1d1: 0xe01d, 0x1d2: 0x0008, 0x1d3: 0xe03d, 0x1d4: 0x0008, 0x1d5: 0xe01d, 0x1d6: 0x0008, 0x1d7: 0xe07d, 0x1d8: 0x0008, 0x1d9: 0xe01d, 0x1da: 0x0008, 0x1db: 0xe03d, 0x1dc: 0x0008, 0x1dd: 0x0008, 0x1de: 0xe00d, 0x1df: 0x0008, 0x1e0: 0xe00d, 0x1e1: 0x0008, 0x1e2: 0xe00d, 0x1e3: 0x0008, 0x1e4: 0xe00d, 0x1e5: 0x0008, 0x1e6: 0xe00d, 0x1e7: 0x0008, 0x1e8: 0xe00d, 0x1e9: 0x0008, 0x1ea: 0xe00d, 0x1eb: 0x0008, 0x1ec: 0xe00d, 0x1ed: 0x0008, 0x1ee: 0xe00d, 0x1ef: 0x0008, 0x1f0: 0x0008, 0x1f1: 0x028d, 0x1f2: 0x02a5, 0x1f3: 0x02bd, 0x1f4: 0xe00d, 0x1f5: 0x0008, 0x1f6: 0x02d5, 0x1f7: 0x02ed, 0x1f8: 0xe00d, 0x1f9: 0x0008, 0x1fa: 0xe00d, 0x1fb: 0x0008, 0x1fc: 0xe00d, 0x1fd: 0x0008, 0x1fe: 0xe00d, 0x1ff: 0x0008, // Block 0x8, offset 0x200 0x200: 0xe00d, 0x201: 0x0008, 0x202: 0xe00d, 0x203: 0x0008, 0x204: 0xe00d, 0x205: 0x0008, 0x206: 0xe00d, 0x207: 0x0008, 0x208: 0xe00d, 0x209: 0x0008, 0x20a: 0xe00d, 0x20b: 0x0008, 0x20c: 0xe00d, 0x20d: 0x0008, 0x20e: 0xe00d, 0x20f: 0x0008, 0x210: 0xe00d, 0x211: 0x0008, 0x212: 0xe00d, 0x213: 0x0008, 0x214: 0xe00d, 0x215: 0x0008, 0x216: 0xe00d, 0x217: 0x0008, 0x218: 0xe00d, 0x219: 0x0008, 0x21a: 0xe00d, 0x21b: 0x0008, 0x21c: 0xe00d, 0x21d: 0x0008, 0x21e: 0xe00d, 0x21f: 0x0008, 0x220: 0x0305, 0x221: 0x0008, 0x222: 0xe00d, 0x223: 0x0008, 0x224: 0xe00d, 0x225: 0x0008, 0x226: 0xe00d, 0x227: 0x0008, 0x228: 0xe00d, 0x229: 0x0008, 0x22a: 0xe00d, 0x22b: 0x0008, 0x22c: 0xe00d, 0x22d: 0x0008, 0x22e: 0xe00d, 0x22f: 0x0008, 0x230: 0xe00d, 0x231: 0x0008, 0x232: 0xe00d, 0x233: 0x0008, 0x234: 0x0008, 0x235: 0x0008, 0x236: 0x0008, 0x237: 0x0008, 0x238: 0x0008, 0x239: 0x0008, 0x23a: 0x0209, 0x23b: 0xe03d, 0x23c: 0x0008, 0x23d: 0x031d, 0x23e: 0x0229, 0x23f: 0x0008, // Block 0x9, offset 0x240 0x240: 0x0008, 0x241: 0x0008, 0x242: 0x0018, 0x243: 0x0018, 0x244: 0x0018, 0x245: 0x0018, 0x246: 0x0008, 0x247: 0x0008, 0x248: 0x0008, 0x249: 0x0008, 0x24a: 0x0008, 0x24b: 0x0008, 0x24c: 0x0008, 0x24d: 0x0008, 0x24e: 0x0008, 0x24f: 0x0008, 0x250: 0x0008, 0x251: 0x0008, 0x252: 0x0018, 0x253: 0x0018, 0x254: 0x0018, 0x255: 0x0018, 0x256: 0x0018, 0x257: 0x0018, 0x258: 0x029a, 0x259: 0x02ba, 0x25a: 0x02da, 0x25b: 0x02fa, 0x25c: 0x031a, 0x25d: 0x033a, 0x25e: 0x0018, 0x25f: 0x0018, 0x260: 0x03ad, 0x261: 0x0359, 0x262: 0x01d9, 0x263: 0x0369, 0x264: 0x03c5, 0x265: 0x0018, 0x266: 0x0018, 0x267: 0x0018, 0x268: 0x0018, 0x269: 0x0018, 0x26a: 0x0018, 0x26b: 0x0018, 0x26c: 0x0008, 0x26d: 0x0018, 0x26e: 0x0008, 0x26f: 0x0018, 0x270: 0x0018, 0x271: 0x0018, 0x272: 0x0018, 0x273: 0x0018, 0x274: 0x0018, 0x275: 0x0018, 0x276: 0x0018, 0x277: 0x0018, 0x278: 0x0018, 0x279: 0x0018, 0x27a: 0x0018, 0x27b: 0x0018, 0x27c: 0x0018, 0x27d: 0x0018, 0x27e: 0x0018, 0x27f: 0x0018, // Block 0xa, offset 0x280 0x280: 0x03dd, 0x281: 0x03dd, 0x282: 0x1308, 0x283: 0x03f5, 0x284: 0x0379, 0x285: 0x040d, 0x286: 0x1308, 0x287: 0x1308, 0x288: 0x1308, 0x289: 0x1308, 0x28a: 0x1308, 0x28b: 0x1308, 0x28c: 0x1308, 0x28d: 0x1308, 0x28e: 0x1308, 0x28f: 0x13c0, 0x290: 0x1308, 0x291: 0x1308, 0x292: 0x1308, 0x293: 0x1308, 0x294: 0x1308, 0x295: 0x1308, 0x296: 0x1308, 0x297: 0x1308, 0x298: 0x1308, 0x299: 0x1308, 0x29a: 0x1308, 0x29b: 0x1308, 0x29c: 0x1308, 0x29d: 0x1308, 0x29e: 0x1308, 0x29f: 0x1308, 0x2a0: 0x1308, 0x2a1: 0x1308, 0x2a2: 0x1308, 0x2a3: 0x1308, 0x2a4: 0x1308, 0x2a5: 0x1308, 0x2a6: 0x1308, 0x2a7: 0x1308, 0x2a8: 0x1308, 0x2a9: 0x1308, 0x2aa: 0x1308, 0x2ab: 0x1308, 0x2ac: 0x1308, 0x2ad: 0x1308, 0x2ae: 0x1308, 0x2af: 0x1308, 0x2b0: 0xe00d, 0x2b1: 0x0008, 0x2b2: 0xe00d, 0x2b3: 0x0008, 0x2b4: 0x0425, 0x2b5: 0x0008, 0x2b6: 0xe00d, 0x2b7: 0x0008, 0x2b8: 0x0040, 0x2b9: 0x0040, 0x2ba: 0x03a2, 0x2bb: 0x0008, 0x2bc: 0x0008, 0x2bd: 0x0008, 0x2be: 0x03c2, 0x2bf: 0x043d, // Block 0xb, offset 0x2c0 0x2c0: 0x0040, 0x2c1: 0x0040, 0x2c2: 0x0040, 0x2c3: 0x0040, 0x2c4: 0x008a, 0x2c5: 0x03d2, 0x2c6: 0xe155, 0x2c7: 0x0455, 0x2c8: 0xe12d, 0x2c9: 0xe13d, 0x2ca: 0xe12d, 0x2cb: 0x0040, 0x2cc: 0x03dd, 0x2cd: 0x0040, 0x2ce: 0x046d, 0x2cf: 0x0485, 0x2d0: 0x0008, 0x2d1: 0xe105, 0x2d2: 0xe105, 0x2d3: 0xe105, 0x2d4: 0xe105, 0x2d5: 0xe105, 0x2d6: 0xe105, 0x2d7: 0xe105, 0x2d8: 0xe105, 0x2d9: 0xe105, 0x2da: 0xe105, 0x2db: 0xe105, 0x2dc: 0xe105, 0x2dd: 0xe105, 0x2de: 0xe105, 0x2df: 0xe105, 0x2e0: 0x049d, 0x2e1: 0x049d, 0x2e2: 0x0040, 0x2e3: 0x049d, 0x2e4: 0x049d, 0x2e5: 0x049d, 0x2e6: 0x049d, 0x2e7: 0x049d, 0x2e8: 0x049d, 0x2e9: 0x049d, 0x2ea: 0x049d, 0x2eb: 0x049d, 0x2ec: 0x0008, 0x2ed: 0x0008, 0x2ee: 0x0008, 0x2ef: 0x0008, 0x2f0: 0x0008, 0x2f1: 0x0008, 0x2f2: 0x0008, 0x2f3: 0x0008, 0x2f4: 0x0008, 0x2f5: 0x0008, 0x2f6: 0x0008, 0x2f7: 0x0008, 0x2f8: 0x0008, 0x2f9: 0x0008, 0x2fa: 0x0008, 0x2fb: 0x0008, 0x2fc: 0x0008, 0x2fd: 0x0008, 0x2fe: 0x0008, 0x2ff: 0x0008, // Block 0xc, offset 0x300 0x300: 0x0008, 0x301: 0x0008, 0x302: 0xe00f, 0x303: 0x0008, 0x304: 0x0008, 0x305: 0x0008, 0x306: 0x0008, 0x307: 0x0008, 0x308: 0x0008, 0x309: 0x0008, 0x30a: 0x0008, 0x30b: 0x0008, 0x30c: 0x0008, 0x30d: 0x0008, 0x30e: 0x0008, 0x30f: 0xe0c5, 0x310: 0x04b5, 0x311: 0x04cd, 0x312: 0xe0bd, 0x313: 0xe0f5, 0x314: 0xe0fd, 0x315: 0xe09d, 0x316: 0xe0b5, 0x317: 0x0008, 0x318: 0xe00d, 0x319: 0x0008, 0x31a: 0xe00d, 0x31b: 0x0008, 0x31c: 0xe00d, 0x31d: 0x0008, 0x31e: 0xe00d, 0x31f: 0x0008, 0x320: 0xe00d, 0x321: 0x0008, 0x322: 0xe00d, 0x323: 0x0008, 0x324: 0xe00d, 0x325: 0x0008, 0x326: 0xe00d, 0x327: 0x0008, 0x328: 0xe00d, 0x329: 0x0008, 0x32a: 0xe00d, 0x32b: 0x0008, 0x32c: 0xe00d, 0x32d: 0x0008, 0x32e: 0xe00d, 0x32f: 0x0008, 0x330: 0x04e5, 0x331: 0xe185, 0x332: 0xe18d, 0x333: 0x0008, 0x334: 0x04fd, 0x335: 0x03dd, 0x336: 0x0018, 0x337: 0xe07d, 0x338: 0x0008, 0x339: 0xe1d5, 0x33a: 0xe00d, 0x33b: 0x0008, 0x33c: 0x0008, 0x33d: 0x0515, 0x33e: 0x052d, 0x33f: 0x052d, // Block 0xd, offset 0x340 0x340: 0x0008, 0x341: 0x0008, 0x342: 0x0008, 0x343: 0x0008, 0x344: 0x0008, 0x345: 0x0008, 0x346: 0x0008, 0x347: 0x0008, 0x348: 0x0008, 0x349: 0x0008, 0x34a: 0x0008, 0x34b: 0x0008, 0x34c: 0x0008, 0x34d: 0x0008, 0x34e: 0x0008, 0x34f: 0x0008, 0x350: 0x0008, 0x351: 0x0008, 0x352: 0x0008, 0x353: 0x0008, 0x354: 0x0008, 0x355: 0x0008, 0x356: 0x0008, 0x357: 0x0008, 0x358: 0x0008, 0x359: 0x0008, 0x35a: 0x0008, 0x35b: 0x0008, 0x35c: 0x0008, 0x35d: 0x0008, 0x35e: 0x0008, 0x35f: 0x0008, 0x360: 0xe00d, 0x361: 0x0008, 0x362: 0xe00d, 0x363: 0x0008, 0x364: 0xe00d, 0x365: 0x0008, 0x366: 0xe00d, 0x367: 0x0008, 0x368: 0xe00d, 0x369: 0x0008, 0x36a: 0xe00d, 0x36b: 0x0008, 0x36c: 0xe00d, 0x36d: 0x0008, 0x36e: 0xe00d, 0x36f: 0x0008, 0x370: 0xe00d, 0x371: 0x0008, 0x372: 0xe00d, 0x373: 0x0008, 0x374: 0xe00d, 0x375: 0x0008, 0x376: 0xe00d, 0x377: 0x0008, 0x378: 0xe00d, 0x379: 0x0008, 0x37a: 0xe00d, 0x37b: 0x0008, 0x37c: 0xe00d, 0x37d: 0x0008, 0x37e: 0xe00d, 0x37f: 0x0008, // Block 0xe, offset 0x380 0x380: 0xe00d, 0x381: 0x0008, 0x382: 0x0018, 0x383: 0x1308, 0x384: 0x1308, 0x385: 0x1308, 0x386: 0x1308, 0x387: 0x1308, 0x388: 0x1318, 0x389: 0x1318, 0x38a: 0xe00d, 0x38b: 0x0008, 0x38c: 0xe00d, 0x38d: 0x0008, 0x38e: 0xe00d, 0x38f: 0x0008, 0x390: 0xe00d, 0x391: 0x0008, 0x392: 0xe00d, 0x393: 0x0008, 0x394: 0xe00d, 0x395: 0x0008, 0x396: 0xe00d, 0x397: 0x0008, 0x398: 0xe00d, 0x399: 0x0008, 0x39a: 0xe00d, 0x39b: 0x0008, 0x39c: 0xe00d, 0x39d: 0x0008, 0x39e: 0xe00d, 0x39f: 0x0008, 0x3a0: 0xe00d, 0x3a1: 0x0008, 0x3a2: 0xe00d, 0x3a3: 0x0008, 0x3a4: 0xe00d, 0x3a5: 0x0008, 0x3a6: 0xe00d, 0x3a7: 0x0008, 0x3a8: 0xe00d, 0x3a9: 0x0008, 0x3aa: 0xe00d, 0x3ab: 0x0008, 0x3ac: 0xe00d, 0x3ad: 0x0008, 0x3ae: 0xe00d, 0x3af: 0x0008, 0x3b0: 0xe00d, 0x3b1: 0x0008, 0x3b2: 0xe00d, 0x3b3: 0x0008, 0x3b4: 0xe00d, 0x3b5: 0x0008, 0x3b6: 0xe00d, 0x3b7: 0x0008, 0x3b8: 0xe00d, 0x3b9: 0x0008, 0x3ba: 0xe00d, 0x3bb: 0x0008, 0x3bc: 0xe00d, 0x3bd: 0x0008, 0x3be: 0xe00d, 0x3bf: 0x0008, // Block 0xf, offset 0x3c0 0x3c0: 0x0040, 0x3c1: 0xe01d, 0x3c2: 0x0008, 0x3c3: 0xe03d, 0x3c4: 0x0008, 0x3c5: 0xe01d, 0x3c6: 0x0008, 0x3c7: 0xe07d, 0x3c8: 0x0008, 0x3c9: 0xe01d, 0x3ca: 0x0008, 0x3cb: 0xe03d, 0x3cc: 0x0008, 0x3cd: 0xe01d, 0x3ce: 0x0008, 0x3cf: 0x0008, 0x3d0: 0xe00d, 0x3d1: 0x0008, 0x3d2: 0xe00d, 0x3d3: 0x0008, 0x3d4: 0xe00d, 0x3d5: 0x0008, 0x3d6: 0xe00d, 0x3d7: 0x0008, 0x3d8: 0xe00d, 0x3d9: 0x0008, 0x3da: 0xe00d, 0x3db: 0x0008, 0x3dc: 0xe00d, 0x3dd: 0x0008, 0x3de: 0xe00d, 0x3df: 0x0008, 0x3e0: 0xe00d, 0x3e1: 0x0008, 0x3e2: 0xe00d, 0x3e3: 0x0008, 0x3e4: 0xe00d, 0x3e5: 0x0008, 0x3e6: 0xe00d, 0x3e7: 0x0008, 0x3e8: 0xe00d, 0x3e9: 0x0008, 0x3ea: 0xe00d, 0x3eb: 0x0008, 0x3ec: 0xe00d, 0x3ed: 0x0008, 0x3ee: 0xe00d, 0x3ef: 0x0008, 0x3f0: 0xe00d, 0x3f1: 0x0008, 0x3f2: 0xe00d, 0x3f3: 0x0008, 0x3f4: 0xe00d, 0x3f5: 0x0008, 0x3f6: 0xe00d, 0x3f7: 0x0008, 0x3f8: 0xe00d, 0x3f9: 0x0008, 0x3fa: 0xe00d, 0x3fb: 0x0008, 0x3fc: 0xe00d, 0x3fd: 0x0008, 0x3fe: 0xe00d, 0x3ff: 0x0008, // Block 0x10, offset 0x400 0x400: 0xe00d, 0x401: 0x0008, 0x402: 0xe00d, 0x403: 0x0008, 0x404: 0xe00d, 0x405: 0x0008, 0x406: 0xe00d, 0x407: 0x0008, 0x408: 0xe00d, 0x409: 0x0008, 0x40a: 0xe00d, 0x40b: 0x0008, 0x40c: 0xe00d, 0x40d: 0x0008, 0x40e: 0xe00d, 0x40f: 0x0008, 0x410: 0xe00d, 0x411: 0x0008, 0x412: 0xe00d, 0x413: 0x0008, 0x414: 0xe00d, 0x415: 0x0008, 0x416: 0xe00d, 0x417: 0x0008, 0x418: 0xe00d, 0x419: 0x0008, 0x41a: 0xe00d, 0x41b: 0x0008, 0x41c: 0xe00d, 0x41d: 0x0008, 0x41e: 0xe00d, 0x41f: 0x0008, 0x420: 0xe00d, 0x421: 0x0008, 0x422: 0xe00d, 0x423: 0x0008, 0x424: 0xe00d, 0x425: 0x0008, 0x426: 0xe00d, 0x427: 0x0008, 0x428: 0xe00d, 0x429: 0x0008, 0x42a: 0xe00d, 0x42b: 0x0008, 0x42c: 0xe00d, 0x42d: 0x0008, 0x42e: 0xe00d, 0x42f: 0x0008, 0x430: 0x0040, 0x431: 0x03f5, 0x432: 0x03f5, 0x433: 0x03f5, 0x434: 0x03f5, 0x435: 0x03f5, 0x436: 0x03f5, 0x437: 0x03f5, 0x438: 0x03f5, 0x439: 0x03f5, 0x43a: 0x03f5, 0x43b: 0x03f5, 0x43c: 0x03f5, 0x43d: 0x03f5, 0x43e: 0x03f5, 0x43f: 0x03f5, // Block 0x11, offset 0x440 0x440: 0x0040, 0x441: 0x0040, 0x442: 0x0040, 0x443: 0x0040, 0x444: 0x0040, 0x445: 0x0040, 0x446: 0x0018, 0x447: 0x0018, 0x448: 0x0018, 0x449: 0x0018, 0x44a: 0x0018, 0x44b: 0x0018, 0x44c: 0x0018, 0x44d: 0x0018, 0x44e: 0x0018, 0x44f: 0x0018, 0x450: 0x1308, 0x451: 0x1308, 0x452: 0x1308, 0x453: 0x1308, 0x454: 0x1308, 0x455: 0x1308, 0x456: 0x1308, 0x457: 0x1308, 0x458: 0x1308, 0x459: 0x1308, 0x45a: 0x1308, 0x45b: 0x0018, 0x45c: 0x0340, 0x45d: 0x0040, 0x45e: 0x0018, 0x45f: 0x0018, 0x460: 0x0208, 0x461: 0x0008, 0x462: 0x0408, 0x463: 0x0408, 0x464: 0x0408, 0x465: 0x0408, 0x466: 0x0208, 0x467: 0x0408, 0x468: 0x0208, 0x469: 0x0408, 0x46a: 0x0208, 0x46b: 0x0208, 0x46c: 0x0208, 0x46d: 0x0208, 0x46e: 0x0208, 0x46f: 0x0408, 0x470: 0x0408, 0x471: 0x0408, 0x472: 0x0408, 0x473: 0x0208, 0x474: 0x0208, 0x475: 0x0208, 0x476: 0x0208, 0x477: 0x0208, 0x478: 0x0208, 0x479: 0x0208, 0x47a: 0x0208, 0x47b: 0x0208, 0x47c: 0x0208, 0x47d: 0x0208, 0x47e: 0x0208, 0x47f: 0x0208, // Block 0x12, offset 0x480 0x480: 0x0408, 0x481: 0x0208, 0x482: 0x0208, 0x483: 0x0408, 0x484: 0x0408, 0x485: 0x0408, 0x486: 0x0408, 0x487: 0x0408, 0x488: 0x0408, 0x489: 0x0408, 0x48a: 0x0408, 0x48b: 0x0408, 0x48c: 0x0208, 0x48d: 0x0408, 0x48e: 0x0208, 0x48f: 0x0408, 0x490: 0x0208, 0x491: 0x0208, 0x492: 0x0408, 0x493: 0x0408, 0x494: 0x0018, 0x495: 0x0408, 0x496: 0x1308, 0x497: 0x1308, 0x498: 0x1308, 0x499: 0x1308, 0x49a: 0x1308, 0x49b: 0x1308, 0x49c: 0x1308, 0x49d: 0x0040, 0x49e: 0x0018, 0x49f: 0x1308, 0x4a0: 0x1308, 0x4a1: 0x1308, 0x4a2: 0x1308, 0x4a3: 0x1308, 0x4a4: 0x1308, 0x4a5: 0x0008, 0x4a6: 0x0008, 0x4a7: 0x1308, 0x4a8: 0x1308, 0x4a9: 0x0018, 0x4aa: 0x1308, 0x4ab: 0x1308, 0x4ac: 0x1308, 0x4ad: 0x1308, 0x4ae: 0x0408, 0x4af: 0x0408, 0x4b0: 0x0008, 0x4b1: 0x0008, 0x4b2: 0x0008, 0x4b3: 0x0008, 0x4b4: 0x0008, 0x4b5: 0x0008, 0x4b6: 0x0008, 0x4b7: 0x0008, 0x4b8: 0x0008, 0x4b9: 0x0008, 0x4ba: 0x0208, 0x4bb: 0x0208, 0x4bc: 0x0208, 0x4bd: 0x0008, 0x4be: 0x0008, 0x4bf: 0x0208, // Block 0x13, offset 0x4c0 0x4c0: 0x0018, 0x4c1: 0x0018, 0x4c2: 0x0018, 0x4c3: 0x0018, 0x4c4: 0x0018, 0x4c5: 0x0018, 0x4c6: 0x0018, 0x4c7: 0x0018, 0x4c8: 0x0018, 0x4c9: 0x0018, 0x4ca: 0x0018, 0x4cb: 0x0018, 0x4cc: 0x0018, 0x4cd: 0x0018, 0x4ce: 0x0040, 0x4cf: 0x0340, 0x4d0: 0x0408, 0x4d1: 0x1308, 0x4d2: 0x0208, 0x4d3: 0x0208, 0x4d4: 0x0208, 0x4d5: 0x0408, 0x4d6: 0x0408, 0x4d7: 0x0408, 0x4d8: 0x0408, 0x4d9: 0x0408, 0x4da: 0x0208, 0x4db: 0x0208, 0x4dc: 0x0208, 0x4dd: 0x0208, 0x4de: 0x0408, 0x4df: 0x0208, 0x4e0: 0x0208, 0x4e1: 0x0208, 0x4e2: 0x0208, 0x4e3: 0x0208, 0x4e4: 0x0208, 0x4e5: 0x0208, 0x4e6: 0x0208, 0x4e7: 0x0208, 0x4e8: 0x0408, 0x4e9: 0x0208, 0x4ea: 0x0408, 0x4eb: 0x0208, 0x4ec: 0x0408, 0x4ed: 0x0208, 0x4ee: 0x0208, 0x4ef: 0x0408, 0x4f0: 0x1308, 0x4f1: 0x1308, 0x4f2: 0x1308, 0x4f3: 0x1308, 0x4f4: 0x1308, 0x4f5: 0x1308, 0x4f6: 0x1308, 0x4f7: 0x1308, 0x4f8: 0x1308, 0x4f9: 0x1308, 0x4fa: 0x1308, 0x4fb: 0x1308, 0x4fc: 0x1308, 0x4fd: 0x1308, 0x4fe: 0x1308, 0x4ff: 0x1308, // Block 0x14, offset 0x500 0x500: 0x1008, 0x501: 0x1308, 0x502: 0x1308, 0x503: 0x1308, 0x504: 0x1308, 0x505: 0x1308, 0x506: 0x1308, 0x507: 0x1308, 0x508: 0x1308, 0x509: 0x1008, 0x50a: 0x1008, 0x50b: 0x1008, 0x50c: 0x1008, 0x50d: 0x1b08, 0x50e: 0x1008, 0x50f: 0x1008, 0x510: 0x0008, 0x511: 0x1308, 0x512: 0x1308, 0x513: 0x1308, 0x514: 0x1308, 0x515: 0x1308, 0x516: 0x1308, 0x517: 0x1308, 0x518: 0x04c9, 0x519: 0x0501, 0x51a: 0x0539, 0x51b: 0x0571, 0x51c: 0x05a9, 0x51d: 0x05e1, 0x51e: 0x0619, 0x51f: 0x0651, 0x520: 0x0008, 0x521: 0x0008, 0x522: 0x1308, 0x523: 0x1308, 0x524: 0x0018, 0x525: 0x0018, 0x526: 0x0008, 0x527: 0x0008, 0x528: 0x0008, 0x529: 0x0008, 0x52a: 0x0008, 0x52b: 0x0008, 0x52c: 0x0008, 0x52d: 0x0008, 0x52e: 0x0008, 0x52f: 0x0008, 0x530: 0x0018, 0x531: 0x0008, 0x532: 0x0008, 0x533: 0x0008, 0x534: 0x0008, 0x535: 0x0008, 0x536: 0x0008, 0x537: 0x0008, 0x538: 0x0008, 0x539: 0x0008, 0x53a: 0x0008, 0x53b: 0x0008, 0x53c: 0x0008, 0x53d: 0x0008, 0x53e: 0x0008, 0x53f: 0x0008, // Block 0x15, offset 0x540 0x540: 0x0008, 0x541: 0x1308, 0x542: 0x1008, 0x543: 0x1008, 0x544: 0x0040, 0x545: 0x0008, 0x546: 0x0008, 0x547: 0x0008, 0x548: 0x0008, 0x549: 0x0008, 0x54a: 0x0008, 0x54b: 0x0008, 0x54c: 0x0008, 0x54d: 0x0040, 0x54e: 0x0040, 0x54f: 0x0008, 0x550: 0x0008, 0x551: 0x0040, 0x552: 0x0040, 0x553: 0x0008, 0x554: 0x0008, 0x555: 0x0008, 0x556: 0x0008, 0x557: 0x0008, 0x558: 0x0008, 0x559: 0x0008, 0x55a: 0x0008, 0x55b: 0x0008, 0x55c: 0x0008, 0x55d: 0x0008, 0x55e: 0x0008, 0x55f: 0x0008, 0x560: 0x0008, 0x561: 0x0008, 0x562: 0x0008, 0x563: 0x0008, 0x564: 0x0008, 0x565: 0x0008, 0x566: 0x0008, 0x567: 0x0008, 0x568: 0x0008, 0x569: 0x0040, 0x56a: 0x0008, 0x56b: 0x0008, 0x56c: 0x0008, 0x56d: 0x0008, 0x56e: 0x0008, 0x56f: 0x0008, 0x570: 0x0008, 0x571: 0x0040, 0x572: 0x0008, 0x573: 0x0040, 0x574: 0x0040, 0x575: 0x0040, 0x576: 0x0008, 0x577: 0x0008, 0x578: 0x0008, 0x579: 0x0008, 0x57a: 0x0040, 0x57b: 0x0040, 0x57c: 0x1308, 0x57d: 0x0008, 0x57e: 0x1008, 0x57f: 0x1008, // Block 0x16, offset 0x580 0x580: 0x1008, 0x581: 0x1308, 0x582: 0x1308, 0x583: 0x1308, 0x584: 0x1308, 0x585: 0x0040, 0x586: 0x0040, 0x587: 0x1008, 0x588: 0x1008, 0x589: 0x0040, 0x58a: 0x0040, 0x58b: 0x1008, 0x58c: 0x1008, 0x58d: 0x1b08, 0x58e: 0x0008, 0x58f: 0x0040, 0x590: 0x0040, 0x591: 0x0040, 0x592: 0x0040, 0x593: 0x0040, 0x594: 0x0040, 0x595: 0x0040, 0x596: 0x0040, 0x597: 0x1008, 0x598: 0x0040, 0x599: 0x0040, 0x59a: 0x0040, 0x59b: 0x0040, 0x59c: 0x0689, 0x59d: 0x06c1, 0x59e: 0x0040, 0x59f: 0x06f9, 0x5a0: 0x0008, 0x5a1: 0x0008, 0x5a2: 0x1308, 0x5a3: 0x1308, 0x5a4: 0x0040, 0x5a5: 0x0040, 0x5a6: 0x0008, 0x5a7: 0x0008, 0x5a8: 0x0008, 0x5a9: 0x0008, 0x5aa: 0x0008, 0x5ab: 0x0008, 0x5ac: 0x0008, 0x5ad: 0x0008, 0x5ae: 0x0008, 0x5af: 0x0008, 0x5b0: 0x0008, 0x5b1: 0x0008, 0x5b2: 0x0018, 0x5b3: 0x0018, 0x5b4: 0x0018, 0x5b5: 0x0018, 0x5b6: 0x0018, 0x5b7: 0x0018, 0x5b8: 0x0018, 0x5b9: 0x0018, 0x5ba: 0x0018, 0x5bb: 0x0018, 0x5bc: 0x0040, 0x5bd: 0x0040, 0x5be: 0x0040, 0x5bf: 0x0040, // Block 0x17, offset 0x5c0 0x5c0: 0x0040, 0x5c1: 0x1308, 0x5c2: 0x1308, 0x5c3: 0x1008, 0x5c4: 0x0040, 0x5c5: 0x0008, 0x5c6: 0x0008, 0x5c7: 0x0008, 0x5c8: 0x0008, 0x5c9: 0x0008, 0x5ca: 0x0008, 0x5cb: 0x0040, 0x5cc: 0x0040, 0x5cd: 0x0040, 0x5ce: 0x0040, 0x5cf: 0x0008, 0x5d0: 0x0008, 0x5d1: 0x0040, 0x5d2: 0x0040, 0x5d3: 0x0008, 0x5d4: 0x0008, 0x5d5: 0x0008, 0x5d6: 0x0008, 0x5d7: 0x0008, 0x5d8: 0x0008, 0x5d9: 0x0008, 0x5da: 0x0008, 0x5db: 0x0008, 0x5dc: 0x0008, 0x5dd: 0x0008, 0x5de: 0x0008, 0x5df: 0x0008, 0x5e0: 0x0008, 0x5e1: 0x0008, 0x5e2: 0x0008, 0x5e3: 0x0008, 0x5e4: 0x0008, 0x5e5: 0x0008, 0x5e6: 0x0008, 0x5e7: 0x0008, 0x5e8: 0x0008, 0x5e9: 0x0040, 0x5ea: 0x0008, 0x5eb: 0x0008, 0x5ec: 0x0008, 0x5ed: 0x0008, 0x5ee: 0x0008, 0x5ef: 0x0008, 0x5f0: 0x0008, 0x5f1: 0x0040, 0x5f2: 0x0008, 0x5f3: 0x0731, 0x5f4: 0x0040, 0x5f5: 0x0008, 0x5f6: 0x0769, 0x5f7: 0x0040, 0x5f8: 0x0008, 0x5f9: 0x0008, 0x5fa: 0x0040, 0x5fb: 0x0040, 0x5fc: 0x1308, 0x5fd: 0x0040, 0x5fe: 0x1008, 0x5ff: 0x1008, // Block 0x18, offset 0x600 0x600: 0x1008, 0x601: 0x1308, 0x602: 0x1308, 0x603: 0x0040, 0x604: 0x0040, 0x605: 0x0040, 0x606: 0x0040, 0x607: 0x1308, 0x608: 0x1308, 0x609: 0x0040, 0x60a: 0x0040, 0x60b: 0x1308, 0x60c: 0x1308, 0x60d: 0x1b08, 0x60e: 0x0040, 0x60f: 0x0040, 0x610: 0x0040, 0x611: 0x1308, 0x612: 0x0040, 0x613: 0x0040, 0x614: 0x0040, 0x615: 0x0040, 0x616: 0x0040, 0x617: 0x0040, 0x618: 0x0040, 0x619: 0x07a1, 0x61a: 0x07d9, 0x61b: 0x0811, 0x61c: 0x0008, 0x61d: 0x0040, 0x61e: 0x0849, 0x61f: 0x0040, 0x620: 0x0040, 0x621: 0x0040, 0x622: 0x0040, 0x623: 0x0040, 0x624: 0x0040, 0x625: 0x0040, 0x626: 0x0008, 0x627: 0x0008, 0x628: 0x0008, 0x629: 0x0008, 0x62a: 0x0008, 0x62b: 0x0008, 0x62c: 0x0008, 0x62d: 0x0008, 0x62e: 0x0008, 0x62f: 0x0008, 0x630: 0x1308, 0x631: 0x1308, 0x632: 0x0008, 0x633: 0x0008, 0x634: 0x0008, 0x635: 0x1308, 0x636: 0x0040, 0x637: 0x0040, 0x638: 0x0040, 0x639: 0x0040, 0x63a: 0x0040, 0x63b: 0x0040, 0x63c: 0x0040, 0x63d: 0x0040, 0x63e: 0x0040, 0x63f: 0x0040, // Block 0x19, offset 0x640 0x640: 0x0040, 0x641: 0x1308, 0x642: 0x1308, 0x643: 0x1008, 0x644: 0x0040, 0x645: 0x0008, 0x646: 0x0008, 0x647: 0x0008, 0x648: 0x0008, 0x649: 0x0008, 0x64a: 0x0008, 0x64b: 0x0008, 0x64c: 0x0008, 0x64d: 0x0008, 0x64e: 0x0040, 0x64f: 0x0008, 0x650: 0x0008, 0x651: 0x0008, 0x652: 0x0040, 0x653: 0x0008, 0x654: 0x0008, 0x655: 0x0008, 0x656: 0x0008, 0x657: 0x0008, 0x658: 0x0008, 0x659: 0x0008, 0x65a: 0x0008, 0x65b: 0x0008, 0x65c: 0x0008, 0x65d: 0x0008, 0x65e: 0x0008, 0x65f: 0x0008, 0x660: 0x0008, 0x661: 0x0008, 0x662: 0x0008, 0x663: 0x0008, 0x664: 0x0008, 0x665: 0x0008, 0x666: 0x0008, 0x667: 0x0008, 0x668: 0x0008, 0x669: 0x0040, 0x66a: 0x0008, 0x66b: 0x0008, 0x66c: 0x0008, 0x66d: 0x0008, 0x66e: 0x0008, 0x66f: 0x0008, 0x670: 0x0008, 0x671: 0x0040, 0x672: 0x0008, 0x673: 0x0008, 0x674: 0x0040, 0x675: 0x0008, 0x676: 0x0008, 0x677: 0x0008, 0x678: 0x0008, 0x679: 0x0008, 0x67a: 0x0040, 0x67b: 0x0040, 0x67c: 0x1308, 0x67d: 0x0008, 0x67e: 0x1008, 0x67f: 0x1008, // Block 0x1a, offset 0x680 0x680: 0x1008, 0x681: 0x1308, 0x682: 0x1308, 0x683: 0x1308, 0x684: 0x1308, 0x685: 0x1308, 0x686: 0x0040, 0x687: 0x1308, 0x688: 0x1308, 0x689: 0x1008, 0x68a: 0x0040, 0x68b: 0x1008, 0x68c: 0x1008, 0x68d: 0x1b08, 0x68e: 0x0040, 0x68f: 0x0040, 0x690: 0x0008, 0x691: 0x0040, 0x692: 0x0040, 0x693: 0x0040, 0x694: 0x0040, 0x695: 0x0040, 0x696: 0x0040, 0x697: 0x0040, 0x698: 0x0040, 0x699: 0x0040, 0x69a: 0x0040, 0x69b: 0x0040, 0x69c: 0x0040, 0x69d: 0x0040, 0x69e: 0x0040, 0x69f: 0x0040, 0x6a0: 0x0008, 0x6a1: 0x0008, 0x6a2: 0x1308, 0x6a3: 0x1308, 0x6a4: 0x0040, 0x6a5: 0x0040, 0x6a6: 0x0008, 0x6a7: 0x0008, 0x6a8: 0x0008, 0x6a9: 0x0008, 0x6aa: 0x0008, 0x6ab: 0x0008, 0x6ac: 0x0008, 0x6ad: 0x0008, 0x6ae: 0x0008, 0x6af: 0x0008, 0x6b0: 0x0018, 0x6b1: 0x0018, 0x6b2: 0x0040, 0x6b3: 0x0040, 0x6b4: 0x0040, 0x6b5: 0x0040, 0x6b6: 0x0040, 0x6b7: 0x0040, 0x6b8: 0x0040, 0x6b9: 0x0008, 0x6ba: 0x0040, 0x6bb: 0x0040, 0x6bc: 0x0040, 0x6bd: 0x0040, 0x6be: 0x0040, 0x6bf: 0x0040, // Block 0x1b, offset 0x6c0 0x6c0: 0x0040, 0x6c1: 0x1308, 0x6c2: 0x1008, 0x6c3: 0x1008, 0x6c4: 0x0040, 0x6c5: 0x0008, 0x6c6: 0x0008, 0x6c7: 0x0008, 0x6c8: 0x0008, 0x6c9: 0x0008, 0x6ca: 0x0008, 0x6cb: 0x0008, 0x6cc: 0x0008, 0x6cd: 0x0040, 0x6ce: 0x0040, 0x6cf: 0x0008, 0x6d0: 0x0008, 0x6d1: 0x0040, 0x6d2: 0x0040, 0x6d3: 0x0008, 0x6d4: 0x0008, 0x6d5: 0x0008, 0x6d6: 0x0008, 0x6d7: 0x0008, 0x6d8: 0x0008, 0x6d9: 0x0008, 0x6da: 0x0008, 0x6db: 0x0008, 0x6dc: 0x0008, 0x6dd: 0x0008, 0x6de: 0x0008, 0x6df: 0x0008, 0x6e0: 0x0008, 0x6e1: 0x0008, 0x6e2: 0x0008, 0x6e3: 0x0008, 0x6e4: 0x0008, 0x6e5: 0x0008, 0x6e6: 0x0008, 0x6e7: 0x0008, 0x6e8: 0x0008, 0x6e9: 0x0040, 0x6ea: 0x0008, 0x6eb: 0x0008, 0x6ec: 0x0008, 0x6ed: 0x0008, 0x6ee: 0x0008, 0x6ef: 0x0008, 0x6f0: 0x0008, 0x6f1: 0x0040, 0x6f2: 0x0008, 0x6f3: 0x0008, 0x6f4: 0x0040, 0x6f5: 0x0008, 0x6f6: 0x0008, 0x6f7: 0x0008, 0x6f8: 0x0008, 0x6f9: 0x0008, 0x6fa: 0x0040, 0x6fb: 0x0040, 0x6fc: 0x1308, 0x6fd: 0x0008, 0x6fe: 0x1008, 0x6ff: 0x1308, // Block 0x1c, offset 0x700 0x700: 0x1008, 0x701: 0x1308, 0x702: 0x1308, 0x703: 0x1308, 0x704: 0x1308, 0x705: 0x0040, 0x706: 0x0040, 0x707: 0x1008, 0x708: 0x1008, 0x709: 0x0040, 0x70a: 0x0040, 0x70b: 0x1008, 0x70c: 0x1008, 0x70d: 0x1b08, 0x70e: 0x0040, 0x70f: 0x0040, 0x710: 0x0040, 0x711: 0x0040, 0x712: 0x0040, 0x713: 0x0040, 0x714: 0x0040, 0x715: 0x0040, 0x716: 0x1308, 0x717: 0x1008, 0x718: 0x0040, 0x719: 0x0040, 0x71a: 0x0040, 0x71b: 0x0040, 0x71c: 0x0881, 0x71d: 0x08b9, 0x71e: 0x0040, 0x71f: 0x0008, 0x720: 0x0008, 0x721: 0x0008, 0x722: 0x1308, 0x723: 0x1308, 0x724: 0x0040, 0x725: 0x0040, 0x726: 0x0008, 0x727: 0x0008, 0x728: 0x0008, 0x729: 0x0008, 0x72a: 0x0008, 0x72b: 0x0008, 0x72c: 0x0008, 0x72d: 0x0008, 0x72e: 0x0008, 0x72f: 0x0008, 0x730: 0x0018, 0x731: 0x0008, 0x732: 0x0018, 0x733: 0x0018, 0x734: 0x0018, 0x735: 0x0018, 0x736: 0x0018, 0x737: 0x0018, 0x738: 0x0040, 0x739: 0x0040, 0x73a: 0x0040, 0x73b: 0x0040, 0x73c: 0x0040, 0x73d: 0x0040, 0x73e: 0x0040, 0x73f: 0x0040, // Block 0x1d, offset 0x740 0x740: 0x0040, 0x741: 0x0040, 0x742: 0x1308, 0x743: 0x0008, 0x744: 0x0040, 0x745: 0x0008, 0x746: 0x0008, 0x747: 0x0008, 0x748: 0x0008, 0x749: 0x0008, 0x74a: 0x0008, 0x74b: 0x0040, 0x74c: 0x0040, 0x74d: 0x0040, 0x74e: 0x0008, 0x74f: 0x0008, 0x750: 0x0008, 0x751: 0x0040, 0x752: 0x0008, 0x753: 0x0008, 0x754: 0x0008, 0x755: 0x0008, 0x756: 0x0040, 0x757: 0x0040, 0x758: 0x0040, 0x759: 0x0008, 0x75a: 0x0008, 0x75b: 0x0040, 0x75c: 0x0008, 0x75d: 0x0040, 0x75e: 0x0008, 0x75f: 0x0008, 0x760: 0x0040, 0x761: 0x0040, 0x762: 0x0040, 0x763: 0x0008, 0x764: 0x0008, 0x765: 0x0040, 0x766: 0x0040, 0x767: 0x0040, 0x768: 0x0008, 0x769: 0x0008, 0x76a: 0x0008, 0x76b: 0x0040, 0x76c: 0x0040, 0x76d: 0x0040, 0x76e: 0x0008, 0x76f: 0x0008, 0x770: 0x0008, 0x771: 0x0008, 0x772: 0x0008, 0x773: 0x0008, 0x774: 0x0008, 0x775: 0x0008, 0x776: 0x0008, 0x777: 0x0008, 0x778: 0x0008, 0x779: 0x0008, 0x77a: 0x0040, 0x77b: 0x0040, 0x77c: 0x0040, 0x77d: 0x0040, 0x77e: 0x1008, 0x77f: 0x1008, // Block 0x1e, offset 0x780 0x780: 0x1308, 0x781: 0x1008, 0x782: 0x1008, 0x783: 0x1008, 0x784: 0x1008, 0x785: 0x0040, 0x786: 0x1308, 0x787: 0x1308, 0x788: 0x1308, 0x789: 0x0040, 0x78a: 0x1308, 0x78b: 0x1308, 0x78c: 0x1308, 0x78d: 0x1b08, 0x78e: 0x0040, 0x78f: 0x0040, 0x790: 0x0040, 0x791: 0x0040, 0x792: 0x0040, 0x793: 0x0040, 0x794: 0x0040, 0x795: 0x1308, 0x796: 0x1308, 0x797: 0x0040, 0x798: 0x0008, 0x799: 0x0008, 0x79a: 0x0008, 0x79b: 0x0040, 0x79c: 0x0040, 0x79d: 0x0040, 0x79e: 0x0040, 0x79f: 0x0040, 0x7a0: 0x0008, 0x7a1: 0x0008, 0x7a2: 0x1308, 0x7a3: 0x1308, 0x7a4: 0x0040, 0x7a5: 0x0040, 0x7a6: 0x0008, 0x7a7: 0x0008, 0x7a8: 0x0008, 0x7a9: 0x0008, 0x7aa: 0x0008, 0x7ab: 0x0008, 0x7ac: 0x0008, 0x7ad: 0x0008, 0x7ae: 0x0008, 0x7af: 0x0008, 0x7b0: 0x0040, 0x7b1: 0x0040, 0x7b2: 0x0040, 0x7b3: 0x0040, 0x7b4: 0x0040, 0x7b5: 0x0040, 0x7b6: 0x0040, 0x7b7: 0x0040, 0x7b8: 0x0018, 0x7b9: 0x0018, 0x7ba: 0x0018, 0x7bb: 0x0018, 0x7bc: 0x0018, 0x7bd: 0x0018, 0x7be: 0x0018, 0x7bf: 0x0018, // Block 0x1f, offset 0x7c0 0x7c0: 0x0008, 0x7c1: 0x1308, 0x7c2: 0x1008, 0x7c3: 0x1008, 0x7c4: 0x0040, 0x7c5: 0x0008, 0x7c6: 0x0008, 0x7c7: 0x0008, 0x7c8: 0x0008, 0x7c9: 0x0008, 0x7ca: 0x0008, 0x7cb: 0x0008, 0x7cc: 0x0008, 0x7cd: 0x0040, 0x7ce: 0x0008, 0x7cf: 0x0008, 0x7d0: 0x0008, 0x7d1: 0x0040, 0x7d2: 0x0008, 0x7d3: 0x0008, 0x7d4: 0x0008, 0x7d5: 0x0008, 0x7d6: 0x0008, 0x7d7: 0x0008, 0x7d8: 0x0008, 0x7d9: 0x0008, 0x7da: 0x0008, 0x7db: 0x0008, 0x7dc: 0x0008, 0x7dd: 0x0008, 0x7de: 0x0008, 0x7df: 0x0008, 0x7e0: 0x0008, 0x7e1: 0x0008, 0x7e2: 0x0008, 0x7e3: 0x0008, 0x7e4: 0x0008, 0x7e5: 0x0008, 0x7e6: 0x0008, 0x7e7: 0x0008, 0x7e8: 0x0008, 0x7e9: 0x0040, 0x7ea: 0x0008, 0x7eb: 0x0008, 0x7ec: 0x0008, 0x7ed: 0x0008, 0x7ee: 0x0008, 0x7ef: 0x0008, 0x7f0: 0x0008, 0x7f1: 0x0008, 0x7f2: 0x0008, 0x7f3: 0x0008, 0x7f4: 0x0040, 0x7f5: 0x0008, 0x7f6: 0x0008, 0x7f7: 0x0008, 0x7f8: 0x0008, 0x7f9: 0x0008, 0x7fa: 0x0040, 0x7fb: 0x0040, 0x7fc: 0x1308, 0x7fd: 0x0008, 0x7fe: 0x1008, 0x7ff: 0x1308, // Block 0x20, offset 0x800 0x800: 0x1008, 0x801: 0x1008, 0x802: 0x1008, 0x803: 0x1008, 0x804: 0x1008, 0x805: 0x0040, 0x806: 0x1308, 0x807: 0x1008, 0x808: 0x1008, 0x809: 0x0040, 0x80a: 0x1008, 0x80b: 0x1008, 0x80c: 0x1308, 0x80d: 0x1b08, 0x80e: 0x0040, 0x80f: 0x0040, 0x810: 0x0040, 0x811: 0x0040, 0x812: 0x0040, 0x813: 0x0040, 0x814: 0x0040, 0x815: 0x1008, 0x816: 0x1008, 0x817: 0x0040, 0x818: 0x0040, 0x819: 0x0040, 0x81a: 0x0040, 0x81b: 0x0040, 0x81c: 0x0040, 0x81d: 0x0040, 0x81e: 0x0008, 0x81f: 0x0040, 0x820: 0x0008, 0x821: 0x0008, 0x822: 0x1308, 0x823: 0x1308, 0x824: 0x0040, 0x825: 0x0040, 0x826: 0x0008, 0x827: 0x0008, 0x828: 0x0008, 0x829: 0x0008, 0x82a: 0x0008, 0x82b: 0x0008, 0x82c: 0x0008, 0x82d: 0x0008, 0x82e: 0x0008, 0x82f: 0x0008, 0x830: 0x0040, 0x831: 0x0008, 0x832: 0x0008, 0x833: 0x0040, 0x834: 0x0040, 0x835: 0x0040, 0x836: 0x0040, 0x837: 0x0040, 0x838: 0x0040, 0x839: 0x0040, 0x83a: 0x0040, 0x83b: 0x0040, 0x83c: 0x0040, 0x83d: 0x0040, 0x83e: 0x0040, 0x83f: 0x0040, // Block 0x21, offset 0x840 0x840: 0x1008, 0x841: 0x1308, 0x842: 0x1308, 0x843: 0x1308, 0x844: 0x1308, 0x845: 0x0040, 0x846: 0x1008, 0x847: 0x1008, 0x848: 0x1008, 0x849: 0x0040, 0x84a: 0x1008, 0x84b: 0x1008, 0x84c: 0x1008, 0x84d: 0x1b08, 0x84e: 0x0008, 0x84f: 0x0018, 0x850: 0x0040, 0x851: 0x0040, 0x852: 0x0040, 0x853: 0x0040, 0x854: 0x0008, 0x855: 0x0008, 0x856: 0x0008, 0x857: 0x1008, 0x858: 0x0018, 0x859: 0x0018, 0x85a: 0x0018, 0x85b: 0x0018, 0x85c: 0x0018, 0x85d: 0x0018, 0x85e: 0x0018, 0x85f: 0x0008, 0x860: 0x0008, 0x861: 0x0008, 0x862: 0x1308, 0x863: 0x1308, 0x864: 0x0040, 0x865: 0x0040, 0x866: 0x0008, 0x867: 0x0008, 0x868: 0x0008, 0x869: 0x0008, 0x86a: 0x0008, 0x86b: 0x0008, 0x86c: 0x0008, 0x86d: 0x0008, 0x86e: 0x0008, 0x86f: 0x0008, 0x870: 0x0018, 0x871: 0x0018, 0x872: 0x0018, 0x873: 0x0018, 0x874: 0x0018, 0x875: 0x0018, 0x876: 0x0018, 0x877: 0x0018, 0x878: 0x0018, 0x879: 0x0018, 0x87a: 0x0008, 0x87b: 0x0008, 0x87c: 0x0008, 0x87d: 0x0008, 0x87e: 0x0008, 0x87f: 0x0008, // Block 0x22, offset 0x880 0x880: 0x0040, 0x881: 0x0008, 0x882: 0x0008, 0x883: 0x0040, 0x884: 0x0008, 0x885: 0x0040, 0x886: 0x0040, 0x887: 0x0008, 0x888: 0x0008, 0x889: 0x0040, 0x88a: 0x0008, 0x88b: 0x0040, 0x88c: 0x0040, 0x88d: 0x0008, 0x88e: 0x0040, 0x88f: 0x0040, 0x890: 0x0040, 0x891: 0x0040, 0x892: 0x0040, 0x893: 0x0040, 0x894: 0x0008, 0x895: 0x0008, 0x896: 0x0008, 0x897: 0x0008, 0x898: 0x0040, 0x899: 0x0008, 0x89a: 0x0008, 0x89b: 0x0008, 0x89c: 0x0008, 0x89d: 0x0008, 0x89e: 0x0008, 0x89f: 0x0008, 0x8a0: 0x0040, 0x8a1: 0x0008, 0x8a2: 0x0008, 0x8a3: 0x0008, 0x8a4: 0x0040, 0x8a5: 0x0008, 0x8a6: 0x0040, 0x8a7: 0x0008, 0x8a8: 0x0040, 0x8a9: 0x0040, 0x8aa: 0x0008, 0x8ab: 0x0008, 0x8ac: 0x0040, 0x8ad: 0x0008, 0x8ae: 0x0008, 0x8af: 0x0008, 0x8b0: 0x0008, 0x8b1: 0x1308, 0x8b2: 0x0008, 0x8b3: 0x0929, 0x8b4: 0x1308, 0x8b5: 0x1308, 0x8b6: 0x1308, 0x8b7: 0x1308, 0x8b8: 0x1308, 0x8b9: 0x1308, 0x8ba: 0x0040, 0x8bb: 0x1308, 0x8bc: 0x1308, 0x8bd: 0x0008, 0x8be: 0x0040, 0x8bf: 0x0040, // Block 0x23, offset 0x8c0 0x8c0: 0x0008, 0x8c1: 0x0008, 0x8c2: 0x0008, 0x8c3: 0x09d1, 0x8c4: 0x0008, 0x8c5: 0x0008, 0x8c6: 0x0008, 0x8c7: 0x0008, 0x8c8: 0x0040, 0x8c9: 0x0008, 0x8ca: 0x0008, 0x8cb: 0x0008, 0x8cc: 0x0008, 0x8cd: 0x0a09, 0x8ce: 0x0008, 0x8cf: 0x0008, 0x8d0: 0x0008, 0x8d1: 0x0008, 0x8d2: 0x0a41, 0x8d3: 0x0008, 0x8d4: 0x0008, 0x8d5: 0x0008, 0x8d6: 0x0008, 0x8d7: 0x0a79, 0x8d8: 0x0008, 0x8d9: 0x0008, 0x8da: 0x0008, 0x8db: 0x0008, 0x8dc: 0x0ab1, 0x8dd: 0x0008, 0x8de: 0x0008, 0x8df: 0x0008, 0x8e0: 0x0008, 0x8e1: 0x0008, 0x8e2: 0x0008, 0x8e3: 0x0008, 0x8e4: 0x0008, 0x8e5: 0x0008, 0x8e6: 0x0008, 0x8e7: 0x0008, 0x8e8: 0x0008, 0x8e9: 0x0ae9, 0x8ea: 0x0008, 0x8eb: 0x0008, 0x8ec: 0x0008, 0x8ed: 0x0040, 0x8ee: 0x0040, 0x8ef: 0x0040, 0x8f0: 0x0040, 0x8f1: 0x1308, 0x8f2: 0x1308, 0x8f3: 0x0b21, 0x8f4: 0x1308, 0x8f5: 0x0b59, 0x8f6: 0x0b91, 0x8f7: 0x0bc9, 0x8f8: 0x0c19, 0x8f9: 0x0c51, 0x8fa: 0x1308, 0x8fb: 0x1308, 0x8fc: 0x1308, 0x8fd: 0x1308, 0x8fe: 0x1308, 0x8ff: 0x1008, // Block 0x24, offset 0x900 0x900: 0x1308, 0x901: 0x0ca1, 0x902: 0x1308, 0x903: 0x1308, 0x904: 0x1b08, 0x905: 0x0018, 0x906: 0x1308, 0x907: 0x1308, 0x908: 0x0008, 0x909: 0x0008, 0x90a: 0x0008, 0x90b: 0x0008, 0x90c: 0x0008, 0x90d: 0x1308, 0x90e: 0x1308, 0x90f: 0x1308, 0x910: 0x1308, 0x911: 0x1308, 0x912: 0x1308, 0x913: 0x0cd9, 0x914: 0x1308, 0x915: 0x1308, 0x916: 0x1308, 0x917: 0x1308, 0x918: 0x0040, 0x919: 0x1308, 0x91a: 0x1308, 0x91b: 0x1308, 0x91c: 0x1308, 0x91d: 0x0d11, 0x91e: 0x1308, 0x91f: 0x1308, 0x920: 0x1308, 0x921: 0x1308, 0x922: 0x0d49, 0x923: 0x1308, 0x924: 0x1308, 0x925: 0x1308, 0x926: 0x1308, 0x927: 0x0d81, 0x928: 0x1308, 0x929: 0x1308, 0x92a: 0x1308, 0x92b: 0x1308, 0x92c: 0x0db9, 0x92d: 0x1308, 0x92e: 0x1308, 0x92f: 0x1308, 0x930: 0x1308, 0x931: 0x1308, 0x932: 0x1308, 0x933: 0x1308, 0x934: 0x1308, 0x935: 0x1308, 0x936: 0x1308, 0x937: 0x1308, 0x938: 0x1308, 0x939: 0x0df1, 0x93a: 0x1308, 0x93b: 0x1308, 0x93c: 0x1308, 0x93d: 0x0040, 0x93e: 0x0018, 0x93f: 0x0018, // Block 0x25, offset 0x940 0x940: 0x0008, 0x941: 0x0008, 0x942: 0x0008, 0x943: 0x0008, 0x944: 0x0008, 0x945: 0x0008, 0x946: 0x0008, 0x947: 0x0008, 0x948: 0x0008, 0x949: 0x0008, 0x94a: 0x0008, 0x94b: 0x0008, 0x94c: 0x0008, 0x94d: 0x0008, 0x94e: 0x0008, 0x94f: 0x0008, 0x950: 0x0008, 0x951: 0x0008, 0x952: 0x0008, 0x953: 0x0008, 0x954: 0x0008, 0x955: 0x0008, 0x956: 0x0008, 0x957: 0x0008, 0x958: 0x0008, 0x959: 0x0008, 0x95a: 0x0008, 0x95b: 0x0008, 0x95c: 0x0008, 0x95d: 0x0008, 0x95e: 0x0008, 0x95f: 0x0008, 0x960: 0x0008, 0x961: 0x0008, 0x962: 0x0008, 0x963: 0x0008, 0x964: 0x0008, 0x965: 0x0008, 0x966: 0x0008, 0x967: 0x0008, 0x968: 0x0008, 0x969: 0x0008, 0x96a: 0x0008, 0x96b: 0x0008, 0x96c: 0x0039, 0x96d: 0x0ed1, 0x96e: 0x0ee9, 0x96f: 0x0008, 0x970: 0x0ef9, 0x971: 0x0f09, 0x972: 0x0f19, 0x973: 0x0f31, 0x974: 0x0249, 0x975: 0x0f41, 0x976: 0x0259, 0x977: 0x0f51, 0x978: 0x0359, 0x979: 0x0f61, 0x97a: 0x0f71, 0x97b: 0x0008, 0x97c: 0x00d9, 0x97d: 0x0f81, 0x97e: 0x0f99, 0x97f: 0x0269, // Block 0x26, offset 0x980 0x980: 0x0fa9, 0x981: 0x0fb9, 0x982: 0x0279, 0x983: 0x0039, 0x984: 0x0fc9, 0x985: 0x0fe1, 0x986: 0x059d, 0x987: 0x0ee9, 0x988: 0x0ef9, 0x989: 0x0f09, 0x98a: 0x0ff9, 0x98b: 0x1011, 0x98c: 0x1029, 0x98d: 0x0f31, 0x98e: 0x0008, 0x98f: 0x0f51, 0x990: 0x0f61, 0x991: 0x1041, 0x992: 0x00d9, 0x993: 0x1059, 0x994: 0x05b5, 0x995: 0x05b5, 0x996: 0x0f99, 0x997: 0x0fa9, 0x998: 0x0fb9, 0x999: 0x059d, 0x99a: 0x1071, 0x99b: 0x1089, 0x99c: 0x05cd, 0x99d: 0x1099, 0x99e: 0x10b1, 0x99f: 0x10c9, 0x9a0: 0x10e1, 0x9a1: 0x10f9, 0x9a2: 0x0f41, 0x9a3: 0x0269, 0x9a4: 0x0fb9, 0x9a5: 0x1089, 0x9a6: 0x1099, 0x9a7: 0x10b1, 0x9a8: 0x1111, 0x9a9: 0x10e1, 0x9aa: 0x10f9, 0x9ab: 0x0008, 0x9ac: 0x0008, 0x9ad: 0x0008, 0x9ae: 0x0008, 0x9af: 0x0008, 0x9b0: 0x0008, 0x9b1: 0x0008, 0x9b2: 0x0008, 0x9b3: 0x0008, 0x9b4: 0x0008, 0x9b5: 0x0008, 0x9b6: 0x0008, 0x9b7: 0x0008, 0x9b8: 0x1129, 0x9b9: 0x0008, 0x9ba: 0x0008, 0x9bb: 0x0008, 0x9bc: 0x0008, 0x9bd: 0x0008, 0x9be: 0x0008, 0x9bf: 0x0008, // Block 0x27, offset 0x9c0 0x9c0: 0x0008, 0x9c1: 0x0008, 0x9c2: 0x0008, 0x9c3: 0x0008, 0x9c4: 0x0008, 0x9c5: 0x0008, 0x9c6: 0x0008, 0x9c7: 0x0008, 0x9c8: 0x0008, 0x9c9: 0x0008, 0x9ca: 0x0008, 0x9cb: 0x0008, 0x9cc: 0x0008, 0x9cd: 0x0008, 0x9ce: 0x0008, 0x9cf: 0x0008, 0x9d0: 0x0008, 0x9d1: 0x0008, 0x9d2: 0x0008, 0x9d3: 0x0008, 0x9d4: 0x0008, 0x9d5: 0x0008, 0x9d6: 0x0008, 0x9d7: 0x0008, 0x9d8: 0x0008, 0x9d9: 0x0008, 0x9da: 0x0008, 0x9db: 0x1141, 0x9dc: 0x1159, 0x9dd: 0x1169, 0x9de: 0x1181, 0x9df: 0x1029, 0x9e0: 0x1199, 0x9e1: 0x11a9, 0x9e2: 0x11c1, 0x9e3: 0x11d9, 0x9e4: 0x11f1, 0x9e5: 0x1209, 0x9e6: 0x1221, 0x9e7: 0x05e5, 0x9e8: 0x1239, 0x9e9: 0x1251, 0x9ea: 0xe17d, 0x9eb: 0x1269, 0x9ec: 0x1281, 0x9ed: 0x1299, 0x9ee: 0x12b1, 0x9ef: 0x12c9, 0x9f0: 0x12e1, 0x9f1: 0x12f9, 0x9f2: 0x1311, 0x9f3: 0x1329, 0x9f4: 0x1341, 0x9f5: 0x1359, 0x9f6: 0x1371, 0x9f7: 0x1389, 0x9f8: 0x05fd, 0x9f9: 0x13a1, 0x9fa: 0x13b9, 0x9fb: 0x13d1, 0x9fc: 0x13e1, 0x9fd: 0x13f9, 0x9fe: 0x1411, 0x9ff: 0x1429, // Block 0x28, offset 0xa00 0xa00: 0xe00d, 0xa01: 0x0008, 0xa02: 0xe00d, 0xa03: 0x0008, 0xa04: 0xe00d, 0xa05: 0x0008, 0xa06: 0xe00d, 0xa07: 0x0008, 0xa08: 0xe00d, 0xa09: 0x0008, 0xa0a: 0xe00d, 0xa0b: 0x0008, 0xa0c: 0xe00d, 0xa0d: 0x0008, 0xa0e: 0xe00d, 0xa0f: 0x0008, 0xa10: 0xe00d, 0xa11: 0x0008, 0xa12: 0xe00d, 0xa13: 0x0008, 0xa14: 0xe00d, 0xa15: 0x0008, 0xa16: 0xe00d, 0xa17: 0x0008, 0xa18: 0xe00d, 0xa19: 0x0008, 0xa1a: 0xe00d, 0xa1b: 0x0008, 0xa1c: 0xe00d, 0xa1d: 0x0008, 0xa1e: 0xe00d, 0xa1f: 0x0008, 0xa20: 0xe00d, 0xa21: 0x0008, 0xa22: 0xe00d, 0xa23: 0x0008, 0xa24: 0xe00d, 0xa25: 0x0008, 0xa26: 0xe00d, 0xa27: 0x0008, 0xa28: 0xe00d, 0xa29: 0x0008, 0xa2a: 0xe00d, 0xa2b: 0x0008, 0xa2c: 0xe00d, 0xa2d: 0x0008, 0xa2e: 0xe00d, 0xa2f: 0x0008, 0xa30: 0xe00d, 0xa31: 0x0008, 0xa32: 0xe00d, 0xa33: 0x0008, 0xa34: 0xe00d, 0xa35: 0x0008, 0xa36: 0xe00d, 0xa37: 0x0008, 0xa38: 0xe00d, 0xa39: 0x0008, 0xa3a: 0xe00d, 0xa3b: 0x0008, 0xa3c: 0xe00d, 0xa3d: 0x0008, 0xa3e: 0xe00d, 0xa3f: 0x0008, // Block 0x29, offset 0xa40 0xa40: 0xe00d, 0xa41: 0x0008, 0xa42: 0xe00d, 0xa43: 0x0008, 0xa44: 0xe00d, 0xa45: 0x0008, 0xa46: 0xe00d, 0xa47: 0x0008, 0xa48: 0xe00d, 0xa49: 0x0008, 0xa4a: 0xe00d, 0xa4b: 0x0008, 0xa4c: 0xe00d, 0xa4d: 0x0008, 0xa4e: 0xe00d, 0xa4f: 0x0008, 0xa50: 0xe00d, 0xa51: 0x0008, 0xa52: 0xe00d, 0xa53: 0x0008, 0xa54: 0xe00d, 0xa55: 0x0008, 0xa56: 0x0008, 0xa57: 0x0008, 0xa58: 0x0008, 0xa59: 0x0008, 0xa5a: 0x0615, 0xa5b: 0x0635, 0xa5c: 0x0008, 0xa5d: 0x0008, 0xa5e: 0x1441, 0xa5f: 0x0008, 0xa60: 0xe00d, 0xa61: 0x0008, 0xa62: 0xe00d, 0xa63: 0x0008, 0xa64: 0xe00d, 0xa65: 0x0008, 0xa66: 0xe00d, 0xa67: 0x0008, 0xa68: 0xe00d, 0xa69: 0x0008, 0xa6a: 0xe00d, 0xa6b: 0x0008, 0xa6c: 0xe00d, 0xa6d: 0x0008, 0xa6e: 0xe00d, 0xa6f: 0x0008, 0xa70: 0xe00d, 0xa71: 0x0008, 0xa72: 0xe00d, 0xa73: 0x0008, 0xa74: 0xe00d, 0xa75: 0x0008, 0xa76: 0xe00d, 0xa77: 0x0008, 0xa78: 0xe00d, 0xa79: 0x0008, 0xa7a: 0xe00d, 0xa7b: 0x0008, 0xa7c: 0xe00d, 0xa7d: 0x0008, 0xa7e: 0xe00d, 0xa7f: 0x0008, // Block 0x2a, offset 0xa80 0xa80: 0x0008, 0xa81: 0x0008, 0xa82: 0x0008, 0xa83: 0x0008, 0xa84: 0x0008, 0xa85: 0x0008, 0xa86: 0x0040, 0xa87: 0x0040, 0xa88: 0xe045, 0xa89: 0xe045, 0xa8a: 0xe045, 0xa8b: 0xe045, 0xa8c: 0xe045, 0xa8d: 0xe045, 0xa8e: 0x0040, 0xa8f: 0x0040, 0xa90: 0x0008, 0xa91: 0x0008, 0xa92: 0x0008, 0xa93: 0x0008, 0xa94: 0x0008, 0xa95: 0x0008, 0xa96: 0x0008, 0xa97: 0x0008, 0xa98: 0x0040, 0xa99: 0xe045, 0xa9a: 0x0040, 0xa9b: 0xe045, 0xa9c: 0x0040, 0xa9d: 0xe045, 0xa9e: 0x0040, 0xa9f: 0xe045, 0xaa0: 0x0008, 0xaa1: 0x0008, 0xaa2: 0x0008, 0xaa3: 0x0008, 0xaa4: 0x0008, 0xaa5: 0x0008, 0xaa6: 0x0008, 0xaa7: 0x0008, 0xaa8: 0xe045, 0xaa9: 0xe045, 0xaaa: 0xe045, 0xaab: 0xe045, 0xaac: 0xe045, 0xaad: 0xe045, 0xaae: 0xe045, 0xaaf: 0xe045, 0xab0: 0x0008, 0xab1: 0x1459, 0xab2: 0x0008, 0xab3: 0x1471, 0xab4: 0x0008, 0xab5: 0x1489, 0xab6: 0x0008, 0xab7: 0x14a1, 0xab8: 0x0008, 0xab9: 0x14b9, 0xaba: 0x0008, 0xabb: 0x14d1, 0xabc: 0x0008, 0xabd: 0x14e9, 0xabe: 0x0040, 0xabf: 0x0040, // Block 0x2b, offset 0xac0 0xac0: 0x1501, 0xac1: 0x1531, 0xac2: 0x1561, 0xac3: 0x1591, 0xac4: 0x15c1, 0xac5: 0x15f1, 0xac6: 0x1621, 0xac7: 0x1651, 0xac8: 0x1501, 0xac9: 0x1531, 0xaca: 0x1561, 0xacb: 0x1591, 0xacc: 0x15c1, 0xacd: 0x15f1, 0xace: 0x1621, 0xacf: 0x1651, 0xad0: 0x1681, 0xad1: 0x16b1, 0xad2: 0x16e1, 0xad3: 0x1711, 0xad4: 0x1741, 0xad5: 0x1771, 0xad6: 0x17a1, 0xad7: 0x17d1, 0xad8: 0x1681, 0xad9: 0x16b1, 0xada: 0x16e1, 0xadb: 0x1711, 0xadc: 0x1741, 0xadd: 0x1771, 0xade: 0x17a1, 0xadf: 0x17d1, 0xae0: 0x1801, 0xae1: 0x1831, 0xae2: 0x1861, 0xae3: 0x1891, 0xae4: 0x18c1, 0xae5: 0x18f1, 0xae6: 0x1921, 0xae7: 0x1951, 0xae8: 0x1801, 0xae9: 0x1831, 0xaea: 0x1861, 0xaeb: 0x1891, 0xaec: 0x18c1, 0xaed: 0x18f1, 0xaee: 0x1921, 0xaef: 0x1951, 0xaf0: 0x0008, 0xaf1: 0x0008, 0xaf2: 0x1981, 0xaf3: 0x19b1, 0xaf4: 0x19d9, 0xaf5: 0x0040, 0xaf6: 0x0008, 0xaf7: 0x1a01, 0xaf8: 0xe045, 0xaf9: 0xe045, 0xafa: 0x064d, 0xafb: 0x1459, 0xafc: 0x19b1, 0xafd: 0x0666, 0xafe: 0x1a31, 0xaff: 0x0686, // Block 0x2c, offset 0xb00 0xb00: 0x06a6, 0xb01: 0x1a4a, 0xb02: 0x1a79, 0xb03: 0x1aa9, 0xb04: 0x1ad1, 0xb05: 0x0040, 0xb06: 0x0008, 0xb07: 0x1af9, 0xb08: 0x06c5, 0xb09: 0x1471, 0xb0a: 0x06dd, 0xb0b: 0x1489, 0xb0c: 0x1aa9, 0xb0d: 0x1b2a, 0xb0e: 0x1b5a, 0xb0f: 0x1b8a, 0xb10: 0x0008, 0xb11: 0x0008, 0xb12: 0x0008, 0xb13: 0x1bb9, 0xb14: 0x0040, 0xb15: 0x0040, 0xb16: 0x0008, 0xb17: 0x0008, 0xb18: 0xe045, 0xb19: 0xe045, 0xb1a: 0x06f5, 0xb1b: 0x14a1, 0xb1c: 0x0040, 0xb1d: 0x1bd2, 0xb1e: 0x1c02, 0xb1f: 0x1c32, 0xb20: 0x0008, 0xb21: 0x0008, 0xb22: 0x0008, 0xb23: 0x1c61, 0xb24: 0x0008, 0xb25: 0x0008, 0xb26: 0x0008, 0xb27: 0x0008, 0xb28: 0xe045, 0xb29: 0xe045, 0xb2a: 0x070d, 0xb2b: 0x14d1, 0xb2c: 0xe04d, 0xb2d: 0x1c7a, 0xb2e: 0x03d2, 0xb2f: 0x1caa, 0xb30: 0x0040, 0xb31: 0x0040, 0xb32: 0x1cb9, 0xb33: 0x1ce9, 0xb34: 0x1d11, 0xb35: 0x0040, 0xb36: 0x0008, 0xb37: 0x1d39, 0xb38: 0x0725, 0xb39: 0x14b9, 0xb3a: 0x0515, 0xb3b: 0x14e9, 0xb3c: 0x1ce9, 0xb3d: 0x073e, 0xb3e: 0x075e, 0xb3f: 0x0040, // Block 0x2d, offset 0xb40 0xb40: 0x000a, 0xb41: 0x000a, 0xb42: 0x000a, 0xb43: 0x000a, 0xb44: 0x000a, 0xb45: 0x000a, 0xb46: 0x000a, 0xb47: 0x000a, 0xb48: 0x000a, 0xb49: 0x000a, 0xb4a: 0x000a, 0xb4b: 0x03c0, 0xb4c: 0x0003, 0xb4d: 0x0003, 0xb4e: 0x0340, 0xb4f: 0x0340, 0xb50: 0x0018, 0xb51: 0xe00d, 0xb52: 0x0018, 0xb53: 0x0018, 0xb54: 0x0018, 0xb55: 0x0018, 0xb56: 0x0018, 0xb57: 0x077e, 0xb58: 0x0018, 0xb59: 0x0018, 0xb5a: 0x0018, 0xb5b: 0x0018, 0xb5c: 0x0018, 0xb5d: 0x0018, 0xb5e: 0x0018, 0xb5f: 0x0018, 0xb60: 0x0018, 0xb61: 0x0018, 0xb62: 0x0018, 0xb63: 0x0018, 0xb64: 0x0040, 0xb65: 0x0040, 0xb66: 0x0040, 0xb67: 0x0018, 0xb68: 0x0040, 0xb69: 0x0040, 0xb6a: 0x0340, 0xb6b: 0x0340, 0xb6c: 0x0340, 0xb6d: 0x0340, 0xb6e: 0x0340, 0xb6f: 0x000a, 0xb70: 0x0018, 0xb71: 0x0018, 0xb72: 0x0018, 0xb73: 0x1d69, 0xb74: 0x1da1, 0xb75: 0x0018, 0xb76: 0x1df1, 0xb77: 0x1e29, 0xb78: 0x0018, 0xb79: 0x0018, 0xb7a: 0x0018, 0xb7b: 0x0018, 0xb7c: 0x1e7a, 0xb7d: 0x0018, 0xb7e: 0x079e, 0xb7f: 0x0018, // Block 0x2e, offset 0xb80 0xb80: 0x0018, 0xb81: 0x0018, 0xb82: 0x0018, 0xb83: 0x0018, 0xb84: 0x0018, 0xb85: 0x0018, 0xb86: 0x0018, 0xb87: 0x1e92, 0xb88: 0x1eaa, 0xb89: 0x1ec2, 0xb8a: 0x0018, 0xb8b: 0x0018, 0xb8c: 0x0018, 0xb8d: 0x0018, 0xb8e: 0x0018, 0xb8f: 0x0018, 0xb90: 0x0018, 0xb91: 0x0018, 0xb92: 0x0018, 0xb93: 0x0018, 0xb94: 0x0018, 0xb95: 0x0018, 0xb96: 0x0018, 0xb97: 0x1ed9, 0xb98: 0x0018, 0xb99: 0x0018, 0xb9a: 0x0018, 0xb9b: 0x0018, 0xb9c: 0x0018, 0xb9d: 0x0018, 0xb9e: 0x0018, 0xb9f: 0x000a, 0xba0: 0x03c0, 0xba1: 0x0340, 0xba2: 0x0340, 0xba3: 0x0340, 0xba4: 0x03c0, 0xba5: 0x0040, 0xba6: 0x0040, 0xba7: 0x0040, 0xba8: 0x0040, 0xba9: 0x0040, 0xbaa: 0x0340, 0xbab: 0x0340, 0xbac: 0x0340, 0xbad: 0x0340, 0xbae: 0x0340, 0xbaf: 0x0340, 0xbb0: 0x1f41, 0xbb1: 0x0f41, 0xbb2: 0x0040, 0xbb3: 0x0040, 0xbb4: 0x1f51, 0xbb5: 0x1f61, 0xbb6: 0x1f71, 0xbb7: 0x1f81, 0xbb8: 0x1f91, 0xbb9: 0x1fa1, 0xbba: 0x1fb2, 0xbbb: 0x07bd, 0xbbc: 0x1fc2, 0xbbd: 0x1fd2, 0xbbe: 0x1fe2, 0xbbf: 0x0f71, // Block 0x2f, offset 0xbc0 0xbc0: 0x1f41, 0xbc1: 0x00c9, 0xbc2: 0x0069, 0xbc3: 0x0079, 0xbc4: 0x1f51, 0xbc5: 0x1f61, 0xbc6: 0x1f71, 0xbc7: 0x1f81, 0xbc8: 0x1f91, 0xbc9: 0x1fa1, 0xbca: 0x1fb2, 0xbcb: 0x07d5, 0xbcc: 0x1fc2, 0xbcd: 0x1fd2, 0xbce: 0x1fe2, 0xbcf: 0x0040, 0xbd0: 0x0039, 0xbd1: 0x0f09, 0xbd2: 0x00d9, 0xbd3: 0x0369, 0xbd4: 0x0ff9, 0xbd5: 0x0249, 0xbd6: 0x0f51, 0xbd7: 0x0359, 0xbd8: 0x0f61, 0xbd9: 0x0f71, 0xbda: 0x0f99, 0xbdb: 0x01d9, 0xbdc: 0x0fa9, 0xbdd: 0x0040, 0xbde: 0x0040, 0xbdf: 0x0040, 0xbe0: 0x0018, 0xbe1: 0x0018, 0xbe2: 0x0018, 0xbe3: 0x0018, 0xbe4: 0x0018, 0xbe5: 0x0018, 0xbe6: 0x0018, 0xbe7: 0x0018, 0xbe8: 0x1ff1, 0xbe9: 0x0018, 0xbea: 0x0018, 0xbeb: 0x0018, 0xbec: 0x0018, 0xbed: 0x0018, 0xbee: 0x0018, 0xbef: 0x0018, 0xbf0: 0x0018, 0xbf1: 0x0018, 0xbf2: 0x0018, 0xbf3: 0x0018, 0xbf4: 0x0018, 0xbf5: 0x0018, 0xbf6: 0x0018, 0xbf7: 0x0018, 0xbf8: 0x0018, 0xbf9: 0x0018, 0xbfa: 0x0018, 0xbfb: 0x0018, 0xbfc: 0x0018, 0xbfd: 0x0018, 0xbfe: 0x0018, 0xbff: 0x0040, // Block 0x30, offset 0xc00 0xc00: 0x07ee, 0xc01: 0x080e, 0xc02: 0x1159, 0xc03: 0x082d, 0xc04: 0x0018, 0xc05: 0x084e, 0xc06: 0x086e, 0xc07: 0x1011, 0xc08: 0x0018, 0xc09: 0x088d, 0xc0a: 0x0f31, 0xc0b: 0x0249, 0xc0c: 0x0249, 0xc0d: 0x0249, 0xc0e: 0x0249, 0xc0f: 0x2009, 0xc10: 0x0f41, 0xc11: 0x0f41, 0xc12: 0x0359, 0xc13: 0x0359, 0xc14: 0x0018, 0xc15: 0x0f71, 0xc16: 0x2021, 0xc17: 0x0018, 0xc18: 0x0018, 0xc19: 0x0f99, 0xc1a: 0x2039, 0xc1b: 0x0269, 0xc1c: 0x0269, 0xc1d: 0x0269, 0xc1e: 0x0018, 0xc1f: 0x0018, 0xc20: 0x2049, 0xc21: 0x08ad, 0xc22: 0x2061, 0xc23: 0x0018, 0xc24: 0x13d1, 0xc25: 0x0018, 0xc26: 0x2079, 0xc27: 0x0018, 0xc28: 0x13d1, 0xc29: 0x0018, 0xc2a: 0x0f51, 0xc2b: 0x2091, 0xc2c: 0x0ee9, 0xc2d: 0x1159, 0xc2e: 0x0018, 0xc2f: 0x0f09, 0xc30: 0x0f09, 0xc31: 0x1199, 0xc32: 0x0040, 0xc33: 0x0f61, 0xc34: 0x00d9, 0xc35: 0x20a9, 0xc36: 0x20c1, 0xc37: 0x20d9, 0xc38: 0x20f1, 0xc39: 0x0f41, 0xc3a: 0x0018, 0xc3b: 0x08cd, 0xc3c: 0x2109, 0xc3d: 0x10b1, 0xc3e: 0x10b1, 0xc3f: 0x2109, // Block 0x31, offset 0xc40 0xc40: 0x08ed, 0xc41: 0x0018, 0xc42: 0x0018, 0xc43: 0x0018, 0xc44: 0x0018, 0xc45: 0x0ef9, 0xc46: 0x0ef9, 0xc47: 0x0f09, 0xc48: 0x0f41, 0xc49: 0x0259, 0xc4a: 0x0018, 0xc4b: 0x0018, 0xc4c: 0x0018, 0xc4d: 0x0018, 0xc4e: 0x0008, 0xc4f: 0x0018, 0xc50: 0x2121, 0xc51: 0x2151, 0xc52: 0x2181, 0xc53: 0x21b9, 0xc54: 0x21e9, 0xc55: 0x2219, 0xc56: 0x2249, 0xc57: 0x2279, 0xc58: 0x22a9, 0xc59: 0x22d9, 0xc5a: 0x2309, 0xc5b: 0x2339, 0xc5c: 0x2369, 0xc5d: 0x2399, 0xc5e: 0x23c9, 0xc5f: 0x23f9, 0xc60: 0x0f41, 0xc61: 0x2421, 0xc62: 0x0905, 0xc63: 0x2439, 0xc64: 0x1089, 0xc65: 0x2451, 0xc66: 0x0925, 0xc67: 0x2469, 0xc68: 0x2491, 0xc69: 0x0369, 0xc6a: 0x24a9, 0xc6b: 0x0945, 0xc6c: 0x0359, 0xc6d: 0x1159, 0xc6e: 0x0ef9, 0xc6f: 0x0f61, 0xc70: 0x0f41, 0xc71: 0x2421, 0xc72: 0x0965, 0xc73: 0x2439, 0xc74: 0x1089, 0xc75: 0x2451, 0xc76: 0x0985, 0xc77: 0x2469, 0xc78: 0x2491, 0xc79: 0x0369, 0xc7a: 0x24a9, 0xc7b: 0x09a5, 0xc7c: 0x0359, 0xc7d: 0x1159, 0xc7e: 0x0ef9, 0xc7f: 0x0f61, // Block 0x32, offset 0xc80 0xc80: 0x0018, 0xc81: 0x0018, 0xc82: 0x0018, 0xc83: 0x0018, 0xc84: 0x0018, 0xc85: 0x0018, 0xc86: 0x0018, 0xc87: 0x0018, 0xc88: 0x0018, 0xc89: 0x0018, 0xc8a: 0x0018, 0xc8b: 0x0040, 0xc8c: 0x0040, 0xc8d: 0x0040, 0xc8e: 0x0040, 0xc8f: 0x0040, 0xc90: 0x0040, 0xc91: 0x0040, 0xc92: 0x0040, 0xc93: 0x0040, 0xc94: 0x0040, 0xc95: 0x0040, 0xc96: 0x0040, 0xc97: 0x0040, 0xc98: 0x0040, 0xc99: 0x0040, 0xc9a: 0x0040, 0xc9b: 0x0040, 0xc9c: 0x0040, 0xc9d: 0x0040, 0xc9e: 0x0040, 0xc9f: 0x0040, 0xca0: 0x00c9, 0xca1: 0x0069, 0xca2: 0x0079, 0xca3: 0x1f51, 0xca4: 0x1f61, 0xca5: 0x1f71, 0xca6: 0x1f81, 0xca7: 0x1f91, 0xca8: 0x1fa1, 0xca9: 0x2601, 0xcaa: 0x2619, 0xcab: 0x2631, 0xcac: 0x2649, 0xcad: 0x2661, 0xcae: 0x2679, 0xcaf: 0x2691, 0xcb0: 0x26a9, 0xcb1: 0x26c1, 0xcb2: 0x26d9, 0xcb3: 0x26f1, 0xcb4: 0x0a06, 0xcb5: 0x0a26, 0xcb6: 0x0a46, 0xcb7: 0x0a66, 0xcb8: 0x0a86, 0xcb9: 0x0aa6, 0xcba: 0x0ac6, 0xcbb: 0x0ae6, 0xcbc: 0x0b06, 0xcbd: 0x270a, 0xcbe: 0x2732, 0xcbf: 0x275a, // Block 0x33, offset 0xcc0 0xcc0: 0x2782, 0xcc1: 0x27aa, 0xcc2: 0x27d2, 0xcc3: 0x27fa, 0xcc4: 0x2822, 0xcc5: 0x284a, 0xcc6: 0x2872, 0xcc7: 0x289a, 0xcc8: 0x0040, 0xcc9: 0x0040, 0xcca: 0x0040, 0xccb: 0x0040, 0xccc: 0x0040, 0xccd: 0x0040, 0xcce: 0x0040, 0xccf: 0x0040, 0xcd0: 0x0040, 0xcd1: 0x0040, 0xcd2: 0x0040, 0xcd3: 0x0040, 0xcd4: 0x0040, 0xcd5: 0x0040, 0xcd6: 0x0040, 0xcd7: 0x0040, 0xcd8: 0x0040, 0xcd9: 0x0040, 0xcda: 0x0040, 0xcdb: 0x0040, 0xcdc: 0x0b26, 0xcdd: 0x0b46, 0xcde: 0x0b66, 0xcdf: 0x0b86, 0xce0: 0x0ba6, 0xce1: 0x0bc6, 0xce2: 0x0be6, 0xce3: 0x0c06, 0xce4: 0x0c26, 0xce5: 0x0c46, 0xce6: 0x0c66, 0xce7: 0x0c86, 0xce8: 0x0ca6, 0xce9: 0x0cc6, 0xcea: 0x0ce6, 0xceb: 0x0d06, 0xcec: 0x0d26, 0xced: 0x0d46, 0xcee: 0x0d66, 0xcef: 0x0d86, 0xcf0: 0x0da6, 0xcf1: 0x0dc6, 0xcf2: 0x0de6, 0xcf3: 0x0e06, 0xcf4: 0x0e26, 0xcf5: 0x0e46, 0xcf6: 0x0039, 0xcf7: 0x0ee9, 0xcf8: 0x1159, 0xcf9: 0x0ef9, 0xcfa: 0x0f09, 0xcfb: 0x1199, 0xcfc: 0x0f31, 0xcfd: 0x0249, 0xcfe: 0x0f41, 0xcff: 0x0259, // Block 0x34, offset 0xd00 0xd00: 0x0f51, 0xd01: 0x0359, 0xd02: 0x0f61, 0xd03: 0x0f71, 0xd04: 0x00d9, 0xd05: 0x0f99, 0xd06: 0x2039, 0xd07: 0x0269, 0xd08: 0x01d9, 0xd09: 0x0fa9, 0xd0a: 0x0fb9, 0xd0b: 0x1089, 0xd0c: 0x0279, 0xd0d: 0x0369, 0xd0e: 0x0289, 0xd0f: 0x13d1, 0xd10: 0x0039, 0xd11: 0x0ee9, 0xd12: 0x1159, 0xd13: 0x0ef9, 0xd14: 0x0f09, 0xd15: 0x1199, 0xd16: 0x0f31, 0xd17: 0x0249, 0xd18: 0x0f41, 0xd19: 0x0259, 0xd1a: 0x0f51, 0xd1b: 0x0359, 0xd1c: 0x0f61, 0xd1d: 0x0f71, 0xd1e: 0x00d9, 0xd1f: 0x0f99, 0xd20: 0x2039, 0xd21: 0x0269, 0xd22: 0x01d9, 0xd23: 0x0fa9, 0xd24: 0x0fb9, 0xd25: 0x1089, 0xd26: 0x0279, 0xd27: 0x0369, 0xd28: 0x0289, 0xd29: 0x13d1, 0xd2a: 0x1f41, 0xd2b: 0x0018, 0xd2c: 0x0018, 0xd2d: 0x0018, 0xd2e: 0x0018, 0xd2f: 0x0018, 0xd30: 0x0018, 0xd31: 0x0018, 0xd32: 0x0018, 0xd33: 0x0018, 0xd34: 0x0018, 0xd35: 0x0018, 0xd36: 0x0018, 0xd37: 0x0018, 0xd38: 0x0018, 0xd39: 0x0018, 0xd3a: 0x0018, 0xd3b: 0x0018, 0xd3c: 0x0018, 0xd3d: 0x0018, 0xd3e: 0x0018, 0xd3f: 0x0018, // Block 0x35, offset 0xd40 0xd40: 0x0008, 0xd41: 0x0008, 0xd42: 0x0008, 0xd43: 0x0008, 0xd44: 0x0008, 0xd45: 0x0008, 0xd46: 0x0008, 0xd47: 0x0008, 0xd48: 0x0008, 0xd49: 0x0008, 0xd4a: 0x0008, 0xd4b: 0x0008, 0xd4c: 0x0008, 0xd4d: 0x0008, 0xd4e: 0x0008, 0xd4f: 0x0008, 0xd50: 0x0008, 0xd51: 0x0008, 0xd52: 0x0008, 0xd53: 0x0008, 0xd54: 0x0008, 0xd55: 0x0008, 0xd56: 0x0008, 0xd57: 0x0008, 0xd58: 0x0008, 0xd59: 0x0008, 0xd5a: 0x0008, 0xd5b: 0x0008, 0xd5c: 0x0008, 0xd5d: 0x0008, 0xd5e: 0x0008, 0xd5f: 0x0040, 0xd60: 0xe00d, 0xd61: 0x0008, 0xd62: 0x2971, 0xd63: 0x0ebd, 0xd64: 0x2989, 0xd65: 0x0008, 0xd66: 0x0008, 0xd67: 0xe07d, 0xd68: 0x0008, 0xd69: 0xe01d, 0xd6a: 0x0008, 0xd6b: 0xe03d, 0xd6c: 0x0008, 0xd6d: 0x0fe1, 0xd6e: 0x1281, 0xd6f: 0x0fc9, 0xd70: 0x1141, 0xd71: 0x0008, 0xd72: 0xe00d, 0xd73: 0x0008, 0xd74: 0x0008, 0xd75: 0xe01d, 0xd76: 0x0008, 0xd77: 0x0008, 0xd78: 0x0008, 0xd79: 0x0008, 0xd7a: 0x0008, 0xd7b: 0x0008, 0xd7c: 0x0259, 0xd7d: 0x1089, 0xd7e: 0x29a1, 0xd7f: 0x29b9, // Block 0x36, offset 0xd80 0xd80: 0xe00d, 0xd81: 0x0008, 0xd82: 0xe00d, 0xd83: 0x0008, 0xd84: 0xe00d, 0xd85: 0x0008, 0xd86: 0xe00d, 0xd87: 0x0008, 0xd88: 0xe00d, 0xd89: 0x0008, 0xd8a: 0xe00d, 0xd8b: 0x0008, 0xd8c: 0xe00d, 0xd8d: 0x0008, 0xd8e: 0xe00d, 0xd8f: 0x0008, 0xd90: 0xe00d, 0xd91: 0x0008, 0xd92: 0xe00d, 0xd93: 0x0008, 0xd94: 0xe00d, 0xd95: 0x0008, 0xd96: 0xe00d, 0xd97: 0x0008, 0xd98: 0xe00d, 0xd99: 0x0008, 0xd9a: 0xe00d, 0xd9b: 0x0008, 0xd9c: 0xe00d, 0xd9d: 0x0008, 0xd9e: 0xe00d, 0xd9f: 0x0008, 0xda0: 0xe00d, 0xda1: 0x0008, 0xda2: 0xe00d, 0xda3: 0x0008, 0xda4: 0x0008, 0xda5: 0x0018, 0xda6: 0x0018, 0xda7: 0x0018, 0xda8: 0x0018, 0xda9: 0x0018, 0xdaa: 0x0018, 0xdab: 0xe03d, 0xdac: 0x0008, 0xdad: 0xe01d, 0xdae: 0x0008, 0xdaf: 0x1308, 0xdb0: 0x1308, 0xdb1: 0x1308, 0xdb2: 0xe00d, 0xdb3: 0x0008, 0xdb4: 0x0040, 0xdb5: 0x0040, 0xdb6: 0x0040, 0xdb7: 0x0040, 0xdb8: 0x0040, 0xdb9: 0x0018, 0xdba: 0x0018, 0xdbb: 0x0018, 0xdbc: 0x0018, 0xdbd: 0x0018, 0xdbe: 0x0018, 0xdbf: 0x0018, // Block 0x37, offset 0xdc0 0xdc0: 0x26fd, 0xdc1: 0x271d, 0xdc2: 0x273d, 0xdc3: 0x275d, 0xdc4: 0x277d, 0xdc5: 0x279d, 0xdc6: 0x27bd, 0xdc7: 0x27dd, 0xdc8: 0x27fd, 0xdc9: 0x281d, 0xdca: 0x283d, 0xdcb: 0x285d, 0xdcc: 0x287d, 0xdcd: 0x289d, 0xdce: 0x28bd, 0xdcf: 0x28dd, 0xdd0: 0x28fd, 0xdd1: 0x291d, 0xdd2: 0x293d, 0xdd3: 0x295d, 0xdd4: 0x297d, 0xdd5: 0x299d, 0xdd6: 0x0040, 0xdd7: 0x0040, 0xdd8: 0x0040, 0xdd9: 0x0040, 0xdda: 0x0040, 0xddb: 0x0040, 0xddc: 0x0040, 0xddd: 0x0040, 0xdde: 0x0040, 0xddf: 0x0040, 0xde0: 0x0040, 0xde1: 0x0040, 0xde2: 0x0040, 0xde3: 0x0040, 0xde4: 0x0040, 0xde5: 0x0040, 0xde6: 0x0040, 0xde7: 0x0040, 0xde8: 0x0040, 0xde9: 0x0040, 0xdea: 0x0040, 0xdeb: 0x0040, 0xdec: 0x0040, 0xded: 0x0040, 0xdee: 0x0040, 0xdef: 0x0040, 0xdf0: 0x0040, 0xdf1: 0x0040, 0xdf2: 0x0040, 0xdf3: 0x0040, 0xdf4: 0x0040, 0xdf5: 0x0040, 0xdf6: 0x0040, 0xdf7: 0x0040, 0xdf8: 0x0040, 0xdf9: 0x0040, 0xdfa: 0x0040, 0xdfb: 0x0040, 0xdfc: 0x0040, 0xdfd: 0x0040, 0xdfe: 0x0040, 0xdff: 0x0040, // Block 0x38, offset 0xe00 0xe00: 0x000a, 0xe01: 0x0018, 0xe02: 0x29d1, 0xe03: 0x0018, 0xe04: 0x0018, 0xe05: 0x0008, 0xe06: 0x0008, 0xe07: 0x0008, 0xe08: 0x0018, 0xe09: 0x0018, 0xe0a: 0x0018, 0xe0b: 0x0018, 0xe0c: 0x0018, 0xe0d: 0x0018, 0xe0e: 0x0018, 0xe0f: 0x0018, 0xe10: 0x0018, 0xe11: 0x0018, 0xe12: 0x0018, 0xe13: 0x0018, 0xe14: 0x0018, 0xe15: 0x0018, 0xe16: 0x0018, 0xe17: 0x0018, 0xe18: 0x0018, 0xe19: 0x0018, 0xe1a: 0x0018, 0xe1b: 0x0018, 0xe1c: 0x0018, 0xe1d: 0x0018, 0xe1e: 0x0018, 0xe1f: 0x0018, 0xe20: 0x0018, 0xe21: 0x0018, 0xe22: 0x0018, 0xe23: 0x0018, 0xe24: 0x0018, 0xe25: 0x0018, 0xe26: 0x0018, 0xe27: 0x0018, 0xe28: 0x0018, 0xe29: 0x0018, 0xe2a: 0x1308, 0xe2b: 0x1308, 0xe2c: 0x1308, 0xe2d: 0x1308, 0xe2e: 0x1018, 0xe2f: 0x1018, 0xe30: 0x0018, 0xe31: 0x0018, 0xe32: 0x0018, 0xe33: 0x0018, 0xe34: 0x0018, 0xe35: 0x0018, 0xe36: 0xe125, 0xe37: 0x0018, 0xe38: 0x29bd, 0xe39: 0x29dd, 0xe3a: 0x29fd, 0xe3b: 0x0018, 0xe3c: 0x0008, 0xe3d: 0x0018, 0xe3e: 0x0018, 0xe3f: 0x0018, // Block 0x39, offset 0xe40 0xe40: 0x2b3d, 0xe41: 0x2b5d, 0xe42: 0x2b7d, 0xe43: 0x2b9d, 0xe44: 0x2bbd, 0xe45: 0x2bdd, 0xe46: 0x2bdd, 0xe47: 0x2bdd, 0xe48: 0x2bfd, 0xe49: 0x2bfd, 0xe4a: 0x2bfd, 0xe4b: 0x2bfd, 0xe4c: 0x2c1d, 0xe4d: 0x2c1d, 0xe4e: 0x2c1d, 0xe4f: 0x2c3d, 0xe50: 0x2c5d, 0xe51: 0x2c5d, 0xe52: 0x2a7d, 0xe53: 0x2a7d, 0xe54: 0x2c5d, 0xe55: 0x2c5d, 0xe56: 0x2c7d, 0xe57: 0x2c7d, 0xe58: 0x2c5d, 0xe59: 0x2c5d, 0xe5a: 0x2a7d, 0xe5b: 0x2a7d, 0xe5c: 0x2c5d, 0xe5d: 0x2c5d, 0xe5e: 0x2c3d, 0xe5f: 0x2c3d, 0xe60: 0x2c9d, 0xe61: 0x2c9d, 0xe62: 0x2cbd, 0xe63: 0x2cbd, 0xe64: 0x0040, 0xe65: 0x2cdd, 0xe66: 0x2cfd, 0xe67: 0x2d1d, 0xe68: 0x2d1d, 0xe69: 0x2d3d, 0xe6a: 0x2d5d, 0xe6b: 0x2d7d, 0xe6c: 0x2d9d, 0xe6d: 0x2dbd, 0xe6e: 0x2ddd, 0xe6f: 0x2dfd, 0xe70: 0x2e1d, 0xe71: 0x2e3d, 0xe72: 0x2e3d, 0xe73: 0x2e5d, 0xe74: 0x2e7d, 0xe75: 0x2e7d, 0xe76: 0x2e9d, 0xe77: 0x2ebd, 0xe78: 0x2e5d, 0xe79: 0x2edd, 0xe7a: 0x2efd, 0xe7b: 0x2edd, 0xe7c: 0x2e5d, 0xe7d: 0x2f1d, 0xe7e: 0x2f3d, 0xe7f: 0x2f5d, // Block 0x3a, offset 0xe80 0xe80: 0x2f7d, 0xe81: 0x2f9d, 0xe82: 0x2cfd, 0xe83: 0x2cdd, 0xe84: 0x2fbd, 0xe85: 0x2fdd, 0xe86: 0x2ffd, 0xe87: 0x301d, 0xe88: 0x303d, 0xe89: 0x305d, 0xe8a: 0x307d, 0xe8b: 0x309d, 0xe8c: 0x30bd, 0xe8d: 0x30dd, 0xe8e: 0x30fd, 0xe8f: 0x0040, 0xe90: 0x0018, 0xe91: 0x0018, 0xe92: 0x311d, 0xe93: 0x313d, 0xe94: 0x315d, 0xe95: 0x317d, 0xe96: 0x319d, 0xe97: 0x31bd, 0xe98: 0x31dd, 0xe99: 0x31fd, 0xe9a: 0x321d, 0xe9b: 0x323d, 0xe9c: 0x315d, 0xe9d: 0x325d, 0xe9e: 0x327d, 0xe9f: 0x329d, 0xea0: 0x0008, 0xea1: 0x0008, 0xea2: 0x0008, 0xea3: 0x0008, 0xea4: 0x0008, 0xea5: 0x0008, 0xea6: 0x0008, 0xea7: 0x0008, 0xea8: 0x0008, 0xea9: 0x0008, 0xeaa: 0x0008, 0xeab: 0x0008, 0xeac: 0x0008, 0xead: 0x0008, 0xeae: 0x0008, 0xeaf: 0x0008, 0xeb0: 0x0008, 0xeb1: 0x0008, 0xeb2: 0x0008, 0xeb3: 0x0008, 0xeb4: 0x0008, 0xeb5: 0x0008, 0xeb6: 0x0008, 0xeb7: 0x0008, 0xeb8: 0x0008, 0xeb9: 0x0008, 0xeba: 0x0008, 0xebb: 0x0040, 0xebc: 0x0040, 0xebd: 0x0040, 0xebe: 0x0040, 0xebf: 0x0040, // Block 0x3b, offset 0xec0 0xec0: 0x36a2, 0xec1: 0x36d2, 0xec2: 0x3702, 0xec3: 0x3732, 0xec4: 0x32bd, 0xec5: 0x32dd, 0xec6: 0x32fd, 0xec7: 0x331d, 0xec8: 0x0018, 0xec9: 0x0018, 0xeca: 0x0018, 0xecb: 0x0018, 0xecc: 0x0018, 0xecd: 0x0018, 0xece: 0x0018, 0xecf: 0x0018, 0xed0: 0x333d, 0xed1: 0x3761, 0xed2: 0x3779, 0xed3: 0x3791, 0xed4: 0x37a9, 0xed5: 0x37c1, 0xed6: 0x37d9, 0xed7: 0x37f1, 0xed8: 0x3809, 0xed9: 0x3821, 0xeda: 0x3839, 0xedb: 0x3851, 0xedc: 0x3869, 0xedd: 0x3881, 0xede: 0x3899, 0xedf: 0x38b1, 0xee0: 0x335d, 0xee1: 0x337d, 0xee2: 0x339d, 0xee3: 0x33bd, 0xee4: 0x33dd, 0xee5: 0x33dd, 0xee6: 0x33fd, 0xee7: 0x341d, 0xee8: 0x343d, 0xee9: 0x345d, 0xeea: 0x347d, 0xeeb: 0x349d, 0xeec: 0x34bd, 0xeed: 0x34dd, 0xeee: 0x34fd, 0xeef: 0x351d, 0xef0: 0x353d, 0xef1: 0x355d, 0xef2: 0x357d, 0xef3: 0x359d, 0xef4: 0x35bd, 0xef5: 0x35dd, 0xef6: 0x35fd, 0xef7: 0x361d, 0xef8: 0x363d, 0xef9: 0x365d, 0xefa: 0x367d, 0xefb: 0x369d, 0xefc: 0x38c9, 0xefd: 0x3901, 0xefe: 0x36bd, 0xeff: 0x0018, // Block 0x3c, offset 0xf00 0xf00: 0x36dd, 0xf01: 0x36fd, 0xf02: 0x371d, 0xf03: 0x373d, 0xf04: 0x375d, 0xf05: 0x377d, 0xf06: 0x379d, 0xf07: 0x37bd, 0xf08: 0x37dd, 0xf09: 0x37fd, 0xf0a: 0x381d, 0xf0b: 0x383d, 0xf0c: 0x385d, 0xf0d: 0x387d, 0xf0e: 0x389d, 0xf0f: 0x38bd, 0xf10: 0x38dd, 0xf11: 0x38fd, 0xf12: 0x391d, 0xf13: 0x393d, 0xf14: 0x395d, 0xf15: 0x397d, 0xf16: 0x399d, 0xf17: 0x39bd, 0xf18: 0x39dd, 0xf19: 0x39fd, 0xf1a: 0x3a1d, 0xf1b: 0x3a3d, 0xf1c: 0x3a5d, 0xf1d: 0x3a7d, 0xf1e: 0x3a9d, 0xf1f: 0x3abd, 0xf20: 0x3add, 0xf21: 0x3afd, 0xf22: 0x3b1d, 0xf23: 0x3b3d, 0xf24: 0x3b5d, 0xf25: 0x3b7d, 0xf26: 0x127d, 0xf27: 0x3b9d, 0xf28: 0x3bbd, 0xf29: 0x3bdd, 0xf2a: 0x3bfd, 0xf2b: 0x3c1d, 0xf2c: 0x3c3d, 0xf2d: 0x3c5d, 0xf2e: 0x239d, 0xf2f: 0x3c7d, 0xf30: 0x3c9d, 0xf31: 0x3939, 0xf32: 0x3951, 0xf33: 0x3969, 0xf34: 0x3981, 0xf35: 0x3999, 0xf36: 0x39b1, 0xf37: 0x39c9, 0xf38: 0x39e1, 0xf39: 0x39f9, 0xf3a: 0x3a11, 0xf3b: 0x3a29, 0xf3c: 0x3a41, 0xf3d: 0x3a59, 0xf3e: 0x3a71, 0xf3f: 0x3a89, // Block 0x3d, offset 0xf40 0xf40: 0x3aa1, 0xf41: 0x3ac9, 0xf42: 0x3af1, 0xf43: 0x3b19, 0xf44: 0x3b41, 0xf45: 0x3b69, 0xf46: 0x3b91, 0xf47: 0x3bb9, 0xf48: 0x3be1, 0xf49: 0x3c09, 0xf4a: 0x3c39, 0xf4b: 0x3c69, 0xf4c: 0x3c99, 0xf4d: 0x3cbd, 0xf4e: 0x3cb1, 0xf4f: 0x3cdd, 0xf50: 0x3cfd, 0xf51: 0x3d15, 0xf52: 0x3d2d, 0xf53: 0x3d45, 0xf54: 0x3d5d, 0xf55: 0x3d5d, 0xf56: 0x3d45, 0xf57: 0x3d75, 0xf58: 0x07bd, 0xf59: 0x3d8d, 0xf5a: 0x3da5, 0xf5b: 0x3dbd, 0xf5c: 0x3dd5, 0xf5d: 0x3ded, 0xf5e: 0x3e05, 0xf5f: 0x3e1d, 0xf60: 0x3e35, 0xf61: 0x3e4d, 0xf62: 0x3e65, 0xf63: 0x3e7d, 0xf64: 0x3e95, 0xf65: 0x3e95, 0xf66: 0x3ead, 0xf67: 0x3ead, 0xf68: 0x3ec5, 0xf69: 0x3ec5, 0xf6a: 0x3edd, 0xf6b: 0x3ef5, 0xf6c: 0x3f0d, 0xf6d: 0x3f25, 0xf6e: 0x3f3d, 0xf6f: 0x3f3d, 0xf70: 0x3f55, 0xf71: 0x3f55, 0xf72: 0x3f55, 0xf73: 0x3f6d, 0xf74: 0x3f85, 0xf75: 0x3f9d, 0xf76: 0x3fb5, 0xf77: 0x3f9d, 0xf78: 0x3fcd, 0xf79: 0x3fe5, 0xf7a: 0x3f6d, 0xf7b: 0x3ffd, 0xf7c: 0x4015, 0xf7d: 0x4015, 0xf7e: 0x4015, 0xf7f: 0x0040, // Block 0x3e, offset 0xf80 0xf80: 0x3cc9, 0xf81: 0x3d31, 0xf82: 0x3d99, 0xf83: 0x3e01, 0xf84: 0x3e51, 0xf85: 0x3eb9, 0xf86: 0x3f09, 0xf87: 0x3f59, 0xf88: 0x3fd9, 0xf89: 0x4041, 0xf8a: 0x4091, 0xf8b: 0x40e1, 0xf8c: 0x4131, 0xf8d: 0x4199, 0xf8e: 0x4201, 0xf8f: 0x4251, 0xf90: 0x42a1, 0xf91: 0x42d9, 0xf92: 0x4329, 0xf93: 0x4391, 0xf94: 0x43f9, 0xf95: 0x4431, 0xf96: 0x44b1, 0xf97: 0x4549, 0xf98: 0x45c9, 0xf99: 0x4619, 0xf9a: 0x4699, 0xf9b: 0x4719, 0xf9c: 0x4781, 0xf9d: 0x47d1, 0xf9e: 0x4821, 0xf9f: 0x4871, 0xfa0: 0x48d9, 0xfa1: 0x4959, 0xfa2: 0x49c1, 0xfa3: 0x4a11, 0xfa4: 0x4a61, 0xfa5: 0x4ab1, 0xfa6: 0x4ae9, 0xfa7: 0x4b21, 0xfa8: 0x4b59, 0xfa9: 0x4b91, 0xfaa: 0x4be1, 0xfab: 0x4c31, 0xfac: 0x4cb1, 0xfad: 0x4d01, 0xfae: 0x4d69, 0xfaf: 0x4de9, 0xfb0: 0x4e39, 0xfb1: 0x4e71, 0xfb2: 0x4ea9, 0xfb3: 0x4f29, 0xfb4: 0x4f91, 0xfb5: 0x5011, 0xfb6: 0x5061, 0xfb7: 0x50e1, 0xfb8: 0x5119, 0xfb9: 0x5169, 0xfba: 0x51b9, 0xfbb: 0x5209, 0xfbc: 0x5259, 0xfbd: 0x52a9, 0xfbe: 0x5311, 0xfbf: 0x5361, // Block 0x3f, offset 0xfc0 0xfc0: 0x5399, 0xfc1: 0x53e9, 0xfc2: 0x5439, 0xfc3: 0x5489, 0xfc4: 0x54f1, 0xfc5: 0x5541, 0xfc6: 0x5591, 0xfc7: 0x55e1, 0xfc8: 0x5661, 0xfc9: 0x56c9, 0xfca: 0x5701, 0xfcb: 0x5781, 0xfcc: 0x57b9, 0xfcd: 0x5821, 0xfce: 0x5889, 0xfcf: 0x58d9, 0xfd0: 0x5929, 0xfd1: 0x5979, 0xfd2: 0x59e1, 0xfd3: 0x5a19, 0xfd4: 0x5a69, 0xfd5: 0x5ad1, 0xfd6: 0x5b09, 0xfd7: 0x5b89, 0xfd8: 0x5bd9, 0xfd9: 0x5c01, 0xfda: 0x5c29, 0xfdb: 0x5c51, 0xfdc: 0x5c79, 0xfdd: 0x5ca1, 0xfde: 0x5cc9, 0xfdf: 0x5cf1, 0xfe0: 0x5d19, 0xfe1: 0x5d41, 0xfe2: 0x5d69, 0xfe3: 0x5d99, 0xfe4: 0x5dc9, 0xfe5: 0x5df9, 0xfe6: 0x5e29, 0xfe7: 0x5e59, 0xfe8: 0x5e89, 0xfe9: 0x5eb9, 0xfea: 0x5ee9, 0xfeb: 0x5f19, 0xfec: 0x5f49, 0xfed: 0x5f79, 0xfee: 0x5fa9, 0xfef: 0x5fd9, 0xff0: 0x6009, 0xff1: 0x402d, 0xff2: 0x6039, 0xff3: 0x6051, 0xff4: 0x404d, 0xff5: 0x6069, 0xff6: 0x6081, 0xff7: 0x6099, 0xff8: 0x406d, 0xff9: 0x406d, 0xffa: 0x60b1, 0xffb: 0x60c9, 0xffc: 0x6101, 0xffd: 0x6139, 0xffe: 0x6171, 0xfff: 0x61a9, // Block 0x40, offset 0x1000 0x1000: 0x6211, 0x1001: 0x6229, 0x1002: 0x408d, 0x1003: 0x6241, 0x1004: 0x6259, 0x1005: 0x6271, 0x1006: 0x6289, 0x1007: 0x62a1, 0x1008: 0x40ad, 0x1009: 0x62b9, 0x100a: 0x62e1, 0x100b: 0x62f9, 0x100c: 0x40cd, 0x100d: 0x40cd, 0x100e: 0x6311, 0x100f: 0x6329, 0x1010: 0x6341, 0x1011: 0x40ed, 0x1012: 0x410d, 0x1013: 0x412d, 0x1014: 0x414d, 0x1015: 0x416d, 0x1016: 0x6359, 0x1017: 0x6371, 0x1018: 0x6389, 0x1019: 0x63a1, 0x101a: 0x63b9, 0x101b: 0x418d, 0x101c: 0x63d1, 0x101d: 0x63e9, 0x101e: 0x6401, 0x101f: 0x41ad, 0x1020: 0x41cd, 0x1021: 0x6419, 0x1022: 0x41ed, 0x1023: 0x420d, 0x1024: 0x422d, 0x1025: 0x6431, 0x1026: 0x424d, 0x1027: 0x6449, 0x1028: 0x6479, 0x1029: 0x6211, 0x102a: 0x426d, 0x102b: 0x428d, 0x102c: 0x42ad, 0x102d: 0x42cd, 0x102e: 0x64b1, 0x102f: 0x64f1, 0x1030: 0x6539, 0x1031: 0x6551, 0x1032: 0x42ed, 0x1033: 0x6569, 0x1034: 0x6581, 0x1035: 0x6599, 0x1036: 0x430d, 0x1037: 0x65b1, 0x1038: 0x65c9, 0x1039: 0x65b1, 0x103a: 0x65e1, 0x103b: 0x65f9, 0x103c: 0x432d, 0x103d: 0x6611, 0x103e: 0x6629, 0x103f: 0x6611, // Block 0x41, offset 0x1040 0x1040: 0x434d, 0x1041: 0x436d, 0x1042: 0x0040, 0x1043: 0x6641, 0x1044: 0x6659, 0x1045: 0x6671, 0x1046: 0x6689, 0x1047: 0x0040, 0x1048: 0x66c1, 0x1049: 0x66d9, 0x104a: 0x66f1, 0x104b: 0x6709, 0x104c: 0x6721, 0x104d: 0x6739, 0x104e: 0x6401, 0x104f: 0x6751, 0x1050: 0x6769, 0x1051: 0x6781, 0x1052: 0x438d, 0x1053: 0x6799, 0x1054: 0x6289, 0x1055: 0x43ad, 0x1056: 0x43cd, 0x1057: 0x67b1, 0x1058: 0x0040, 0x1059: 0x43ed, 0x105a: 0x67c9, 0x105b: 0x67e1, 0x105c: 0x67f9, 0x105d: 0x6811, 0x105e: 0x6829, 0x105f: 0x6859, 0x1060: 0x6889, 0x1061: 0x68b1, 0x1062: 0x68d9, 0x1063: 0x6901, 0x1064: 0x6929, 0x1065: 0x6951, 0x1066: 0x6979, 0x1067: 0x69a1, 0x1068: 0x69c9, 0x1069: 0x69f1, 0x106a: 0x6a21, 0x106b: 0x6a51, 0x106c: 0x6a81, 0x106d: 0x6ab1, 0x106e: 0x6ae1, 0x106f: 0x6b11, 0x1070: 0x6b41, 0x1071: 0x6b71, 0x1072: 0x6ba1, 0x1073: 0x6bd1, 0x1074: 0x6c01, 0x1075: 0x6c31, 0x1076: 0x6c61, 0x1077: 0x6c91, 0x1078: 0x6cc1, 0x1079: 0x6cf1, 0x107a: 0x6d21, 0x107b: 0x6d51, 0x107c: 0x6d81, 0x107d: 0x6db1, 0x107e: 0x6de1, 0x107f: 0x440d, // Block 0x42, offset 0x1080 0x1080: 0xe00d, 0x1081: 0x0008, 0x1082: 0xe00d, 0x1083: 0x0008, 0x1084: 0xe00d, 0x1085: 0x0008, 0x1086: 0xe00d, 0x1087: 0x0008, 0x1088: 0xe00d, 0x1089: 0x0008, 0x108a: 0xe00d, 0x108b: 0x0008, 0x108c: 0xe00d, 0x108d: 0x0008, 0x108e: 0xe00d, 0x108f: 0x0008, 0x1090: 0xe00d, 0x1091: 0x0008, 0x1092: 0xe00d, 0x1093: 0x0008, 0x1094: 0xe00d, 0x1095: 0x0008, 0x1096: 0xe00d, 0x1097: 0x0008, 0x1098: 0xe00d, 0x1099: 0x0008, 0x109a: 0xe00d, 0x109b: 0x0008, 0x109c: 0xe00d, 0x109d: 0x0008, 0x109e: 0xe00d, 0x109f: 0x0008, 0x10a0: 0xe00d, 0x10a1: 0x0008, 0x10a2: 0xe00d, 0x10a3: 0x0008, 0x10a4: 0xe00d, 0x10a5: 0x0008, 0x10a6: 0xe00d, 0x10a7: 0x0008, 0x10a8: 0xe00d, 0x10a9: 0x0008, 0x10aa: 0xe00d, 0x10ab: 0x0008, 0x10ac: 0xe00d, 0x10ad: 0x0008, 0x10ae: 0x0008, 0x10af: 0x1308, 0x10b0: 0x1318, 0x10b1: 0x1318, 0x10b2: 0x1318, 0x10b3: 0x0018, 0x10b4: 0x1308, 0x10b5: 0x1308, 0x10b6: 0x1308, 0x10b7: 0x1308, 0x10b8: 0x1308, 0x10b9: 0x1308, 0x10ba: 0x1308, 0x10bb: 0x1308, 0x10bc: 0x1308, 0x10bd: 0x1308, 0x10be: 0x0018, 0x10bf: 0x0008, // Block 0x43, offset 0x10c0 0x10c0: 0xe00d, 0x10c1: 0x0008, 0x10c2: 0xe00d, 0x10c3: 0x0008, 0x10c4: 0xe00d, 0x10c5: 0x0008, 0x10c6: 0xe00d, 0x10c7: 0x0008, 0x10c8: 0xe00d, 0x10c9: 0x0008, 0x10ca: 0xe00d, 0x10cb: 0x0008, 0x10cc: 0xe00d, 0x10cd: 0x0008, 0x10ce: 0xe00d, 0x10cf: 0x0008, 0x10d0: 0xe00d, 0x10d1: 0x0008, 0x10d2: 0xe00d, 0x10d3: 0x0008, 0x10d4: 0xe00d, 0x10d5: 0x0008, 0x10d6: 0xe00d, 0x10d7: 0x0008, 0x10d8: 0xe00d, 0x10d9: 0x0008, 0x10da: 0xe00d, 0x10db: 0x0008, 0x10dc: 0x0ea1, 0x10dd: 0x6e11, 0x10de: 0x1308, 0x10df: 0x1308, 0x10e0: 0x0008, 0x10e1: 0x0008, 0x10e2: 0x0008, 0x10e3: 0x0008, 0x10e4: 0x0008, 0x10e5: 0x0008, 0x10e6: 0x0008, 0x10e7: 0x0008, 0x10e8: 0x0008, 0x10e9: 0x0008, 0x10ea: 0x0008, 0x10eb: 0x0008, 0x10ec: 0x0008, 0x10ed: 0x0008, 0x10ee: 0x0008, 0x10ef: 0x0008, 0x10f0: 0x0008, 0x10f1: 0x0008, 0x10f2: 0x0008, 0x10f3: 0x0008, 0x10f4: 0x0008, 0x10f5: 0x0008, 0x10f6: 0x0008, 0x10f7: 0x0008, 0x10f8: 0x0008, 0x10f9: 0x0008, 0x10fa: 0x0008, 0x10fb: 0x0008, 0x10fc: 0x0008, 0x10fd: 0x0008, 0x10fe: 0x0008, 0x10ff: 0x0008, // Block 0x44, offset 0x1100 0x1100: 0x0018, 0x1101: 0x0018, 0x1102: 0x0018, 0x1103: 0x0018, 0x1104: 0x0018, 0x1105: 0x0018, 0x1106: 0x0018, 0x1107: 0x0018, 0x1108: 0x0018, 0x1109: 0x0018, 0x110a: 0x0018, 0x110b: 0x0018, 0x110c: 0x0018, 0x110d: 0x0018, 0x110e: 0x0018, 0x110f: 0x0018, 0x1110: 0x0018, 0x1111: 0x0018, 0x1112: 0x0018, 0x1113: 0x0018, 0x1114: 0x0018, 0x1115: 0x0018, 0x1116: 0x0018, 0x1117: 0x0008, 0x1118: 0x0008, 0x1119: 0x0008, 0x111a: 0x0008, 0x111b: 0x0008, 0x111c: 0x0008, 0x111d: 0x0008, 0x111e: 0x0008, 0x111f: 0x0008, 0x1120: 0x0018, 0x1121: 0x0018, 0x1122: 0xe00d, 0x1123: 0x0008, 0x1124: 0xe00d, 0x1125: 0x0008, 0x1126: 0xe00d, 0x1127: 0x0008, 0x1128: 0xe00d, 0x1129: 0x0008, 0x112a: 0xe00d, 0x112b: 0x0008, 0x112c: 0xe00d, 0x112d: 0x0008, 0x112e: 0xe00d, 0x112f: 0x0008, 0x1130: 0x0008, 0x1131: 0x0008, 0x1132: 0xe00d, 0x1133: 0x0008, 0x1134: 0xe00d, 0x1135: 0x0008, 0x1136: 0xe00d, 0x1137: 0x0008, 0x1138: 0xe00d, 0x1139: 0x0008, 0x113a: 0xe00d, 0x113b: 0x0008, 0x113c: 0xe00d, 0x113d: 0x0008, 0x113e: 0xe00d, 0x113f: 0x0008, // Block 0x45, offset 0x1140 0x1140: 0xe00d, 0x1141: 0x0008, 0x1142: 0xe00d, 0x1143: 0x0008, 0x1144: 0xe00d, 0x1145: 0x0008, 0x1146: 0xe00d, 0x1147: 0x0008, 0x1148: 0xe00d, 0x1149: 0x0008, 0x114a: 0xe00d, 0x114b: 0x0008, 0x114c: 0xe00d, 0x114d: 0x0008, 0x114e: 0xe00d, 0x114f: 0x0008, 0x1150: 0xe00d, 0x1151: 0x0008, 0x1152: 0xe00d, 0x1153: 0x0008, 0x1154: 0xe00d, 0x1155: 0x0008, 0x1156: 0xe00d, 0x1157: 0x0008, 0x1158: 0xe00d, 0x1159: 0x0008, 0x115a: 0xe00d, 0x115b: 0x0008, 0x115c: 0xe00d, 0x115d: 0x0008, 0x115e: 0xe00d, 0x115f: 0x0008, 0x1160: 0xe00d, 0x1161: 0x0008, 0x1162: 0xe00d, 0x1163: 0x0008, 0x1164: 0xe00d, 0x1165: 0x0008, 0x1166: 0xe00d, 0x1167: 0x0008, 0x1168: 0xe00d, 0x1169: 0x0008, 0x116a: 0xe00d, 0x116b: 0x0008, 0x116c: 0xe00d, 0x116d: 0x0008, 0x116e: 0xe00d, 0x116f: 0x0008, 0x1170: 0xe0fd, 0x1171: 0x0008, 0x1172: 0x0008, 0x1173: 0x0008, 0x1174: 0x0008, 0x1175: 0x0008, 0x1176: 0x0008, 0x1177: 0x0008, 0x1178: 0x0008, 0x1179: 0xe01d, 0x117a: 0x0008, 0x117b: 0xe03d, 0x117c: 0x0008, 0x117d: 0x442d, 0x117e: 0xe00d, 0x117f: 0x0008, // Block 0x46, offset 0x1180 0x1180: 0xe00d, 0x1181: 0x0008, 0x1182: 0xe00d, 0x1183: 0x0008, 0x1184: 0xe00d, 0x1185: 0x0008, 0x1186: 0xe00d, 0x1187: 0x0008, 0x1188: 0x0008, 0x1189: 0x0018, 0x118a: 0x0018, 0x118b: 0xe03d, 0x118c: 0x0008, 0x118d: 0x11d9, 0x118e: 0x0008, 0x118f: 0x0008, 0x1190: 0xe00d, 0x1191: 0x0008, 0x1192: 0xe00d, 0x1193: 0x0008, 0x1194: 0x0008, 0x1195: 0x0008, 0x1196: 0xe00d, 0x1197: 0x0008, 0x1198: 0xe00d, 0x1199: 0x0008, 0x119a: 0xe00d, 0x119b: 0x0008, 0x119c: 0xe00d, 0x119d: 0x0008, 0x119e: 0xe00d, 0x119f: 0x0008, 0x11a0: 0xe00d, 0x11a1: 0x0008, 0x11a2: 0xe00d, 0x11a3: 0x0008, 0x11a4: 0xe00d, 0x11a5: 0x0008, 0x11a6: 0xe00d, 0x11a7: 0x0008, 0x11a8: 0xe00d, 0x11a9: 0x0008, 0x11aa: 0x6e29, 0x11ab: 0x1029, 0x11ac: 0x11c1, 0x11ad: 0x6e41, 0x11ae: 0x1221, 0x11af: 0x0040, 0x11b0: 0x6e59, 0x11b1: 0x6e71, 0x11b2: 0x1239, 0x11b3: 0x444d, 0x11b4: 0xe00d, 0x11b5: 0x0008, 0x11b6: 0xe00d, 0x11b7: 0x0008, 0x11b8: 0x0040, 0x11b9: 0x0040, 0x11ba: 0x0040, 0x11bb: 0x0040, 0x11bc: 0x0040, 0x11bd: 0x0040, 0x11be: 0x0040, 0x11bf: 0x0040, // Block 0x47, offset 0x11c0 0x11c0: 0x64d5, 0x11c1: 0x64f5, 0x11c2: 0x6515, 0x11c3: 0x6535, 0x11c4: 0x6555, 0x11c5: 0x6575, 0x11c6: 0x6595, 0x11c7: 0x65b5, 0x11c8: 0x65d5, 0x11c9: 0x65f5, 0x11ca: 0x6615, 0x11cb: 0x6635, 0x11cc: 0x6655, 0x11cd: 0x6675, 0x11ce: 0x0008, 0x11cf: 0x0008, 0x11d0: 0x6695, 0x11d1: 0x0008, 0x11d2: 0x66b5, 0x11d3: 0x0008, 0x11d4: 0x0008, 0x11d5: 0x66d5, 0x11d6: 0x66f5, 0x11d7: 0x6715, 0x11d8: 0x6735, 0x11d9: 0x6755, 0x11da: 0x6775, 0x11db: 0x6795, 0x11dc: 0x67b5, 0x11dd: 0x67d5, 0x11de: 0x67f5, 0x11df: 0x0008, 0x11e0: 0x6815, 0x11e1: 0x0008, 0x11e2: 0x6835, 0x11e3: 0x0008, 0x11e4: 0x0008, 0x11e5: 0x6855, 0x11e6: 0x6875, 0x11e7: 0x0008, 0x11e8: 0x0008, 0x11e9: 0x0008, 0x11ea: 0x6895, 0x11eb: 0x68b5, 0x11ec: 0x68d5, 0x11ed: 0x68f5, 0x11ee: 0x6915, 0x11ef: 0x6935, 0x11f0: 0x6955, 0x11f1: 0x6975, 0x11f2: 0x6995, 0x11f3: 0x69b5, 0x11f4: 0x69d5, 0x11f5: 0x69f5, 0x11f6: 0x6a15, 0x11f7: 0x6a35, 0x11f8: 0x6a55, 0x11f9: 0x6a75, 0x11fa: 0x6a95, 0x11fb: 0x6ab5, 0x11fc: 0x6ad5, 0x11fd: 0x6af5, 0x11fe: 0x6b15, 0x11ff: 0x6b35, // Block 0x48, offset 0x1200 0x1200: 0x7a95, 0x1201: 0x7ab5, 0x1202: 0x7ad5, 0x1203: 0x7af5, 0x1204: 0x7b15, 0x1205: 0x7b35, 0x1206: 0x7b55, 0x1207: 0x7b75, 0x1208: 0x7b95, 0x1209: 0x7bb5, 0x120a: 0x7bd5, 0x120b: 0x7bf5, 0x120c: 0x7c15, 0x120d: 0x7c35, 0x120e: 0x7c55, 0x120f: 0x6ec9, 0x1210: 0x6ef1, 0x1211: 0x6f19, 0x1212: 0x7c75, 0x1213: 0x7c95, 0x1214: 0x7cb5, 0x1215: 0x6f41, 0x1216: 0x6f69, 0x1217: 0x6f91, 0x1218: 0x7cd5, 0x1219: 0x7cf5, 0x121a: 0x0040, 0x121b: 0x0040, 0x121c: 0x0040, 0x121d: 0x0040, 0x121e: 0x0040, 0x121f: 0x0040, 0x1220: 0x0040, 0x1221: 0x0040, 0x1222: 0x0040, 0x1223: 0x0040, 0x1224: 0x0040, 0x1225: 0x0040, 0x1226: 0x0040, 0x1227: 0x0040, 0x1228: 0x0040, 0x1229: 0x0040, 0x122a: 0x0040, 0x122b: 0x0040, 0x122c: 0x0040, 0x122d: 0x0040, 0x122e: 0x0040, 0x122f: 0x0040, 0x1230: 0x0040, 0x1231: 0x0040, 0x1232: 0x0040, 0x1233: 0x0040, 0x1234: 0x0040, 0x1235: 0x0040, 0x1236: 0x0040, 0x1237: 0x0040, 0x1238: 0x0040, 0x1239: 0x0040, 0x123a: 0x0040, 0x123b: 0x0040, 0x123c: 0x0040, 0x123d: 0x0040, 0x123e: 0x0040, 0x123f: 0x0040, // Block 0x49, offset 0x1240 0x1240: 0x6fb9, 0x1241: 0x6fd1, 0x1242: 0x6fe9, 0x1243: 0x7d15, 0x1244: 0x7d35, 0x1245: 0x7001, 0x1246: 0x7001, 0x1247: 0x0040, 0x1248: 0x0040, 0x1249: 0x0040, 0x124a: 0x0040, 0x124b: 0x0040, 0x124c: 0x0040, 0x124d: 0x0040, 0x124e: 0x0040, 0x124f: 0x0040, 0x1250: 0x0040, 0x1251: 0x0040, 0x1252: 0x0040, 0x1253: 0x7019, 0x1254: 0x7041, 0x1255: 0x7069, 0x1256: 0x7091, 0x1257: 0x70b9, 0x1258: 0x0040, 0x1259: 0x0040, 0x125a: 0x0040, 0x125b: 0x0040, 0x125c: 0x0040, 0x125d: 0x70e1, 0x125e: 0x1308, 0x125f: 0x7109, 0x1260: 0x7131, 0x1261: 0x20a9, 0x1262: 0x20f1, 0x1263: 0x7149, 0x1264: 0x7161, 0x1265: 0x7179, 0x1266: 0x7191, 0x1267: 0x71a9, 0x1268: 0x71c1, 0x1269: 0x1fb2, 0x126a: 0x71d9, 0x126b: 0x7201, 0x126c: 0x7229, 0x126d: 0x7261, 0x126e: 0x7299, 0x126f: 0x72c1, 0x1270: 0x72e9, 0x1271: 0x7311, 0x1272: 0x7339, 0x1273: 0x7361, 0x1274: 0x7389, 0x1275: 0x73b1, 0x1276: 0x73d9, 0x1277: 0x0040, 0x1278: 0x7401, 0x1279: 0x7429, 0x127a: 0x7451, 0x127b: 0x7479, 0x127c: 0x74a1, 0x127d: 0x0040, 0x127e: 0x74c9, 0x127f: 0x0040, // Block 0x4a, offset 0x1280 0x1280: 0x74f1, 0x1281: 0x7519, 0x1282: 0x0040, 0x1283: 0x7541, 0x1284: 0x7569, 0x1285: 0x0040, 0x1286: 0x7591, 0x1287: 0x75b9, 0x1288: 0x75e1, 0x1289: 0x7609, 0x128a: 0x7631, 0x128b: 0x7659, 0x128c: 0x7681, 0x128d: 0x76a9, 0x128e: 0x76d1, 0x128f: 0x76f9, 0x1290: 0x7721, 0x1291: 0x7721, 0x1292: 0x7739, 0x1293: 0x7739, 0x1294: 0x7739, 0x1295: 0x7739, 0x1296: 0x7751, 0x1297: 0x7751, 0x1298: 0x7751, 0x1299: 0x7751, 0x129a: 0x7769, 0x129b: 0x7769, 0x129c: 0x7769, 0x129d: 0x7769, 0x129e: 0x7781, 0x129f: 0x7781, 0x12a0: 0x7781, 0x12a1: 0x7781, 0x12a2: 0x7799, 0x12a3: 0x7799, 0x12a4: 0x7799, 0x12a5: 0x7799, 0x12a6: 0x77b1, 0x12a7: 0x77b1, 0x12a8: 0x77b1, 0x12a9: 0x77b1, 0x12aa: 0x77c9, 0x12ab: 0x77c9, 0x12ac: 0x77c9, 0x12ad: 0x77c9, 0x12ae: 0x77e1, 0x12af: 0x77e1, 0x12b0: 0x77e1, 0x12b1: 0x77e1, 0x12b2: 0x77f9, 0x12b3: 0x77f9, 0x12b4: 0x77f9, 0x12b5: 0x77f9, 0x12b6: 0x7811, 0x12b7: 0x7811, 0x12b8: 0x7811, 0x12b9: 0x7811, 0x12ba: 0x7829, 0x12bb: 0x7829, 0x12bc: 0x7829, 0x12bd: 0x7829, 0x12be: 0x7841, 0x12bf: 0x7841, // Block 0x4b, offset 0x12c0 0x12c0: 0x7841, 0x12c1: 0x7841, 0x12c2: 0x7859, 0x12c3: 0x7859, 0x12c4: 0x7871, 0x12c5: 0x7871, 0x12c6: 0x7889, 0x12c7: 0x7889, 0x12c8: 0x78a1, 0x12c9: 0x78a1, 0x12ca: 0x78b9, 0x12cb: 0x78b9, 0x12cc: 0x78d1, 0x12cd: 0x78d1, 0x12ce: 0x78e9, 0x12cf: 0x78e9, 0x12d0: 0x78e9, 0x12d1: 0x78e9, 0x12d2: 0x7901, 0x12d3: 0x7901, 0x12d4: 0x7901, 0x12d5: 0x7901, 0x12d6: 0x7919, 0x12d7: 0x7919, 0x12d8: 0x7919, 0x12d9: 0x7919, 0x12da: 0x7931, 0x12db: 0x7931, 0x12dc: 0x7931, 0x12dd: 0x7931, 0x12de: 0x7949, 0x12df: 0x7949, 0x12e0: 0x7961, 0x12e1: 0x7961, 0x12e2: 0x7961, 0x12e3: 0x7961, 0x12e4: 0x7979, 0x12e5: 0x7979, 0x12e6: 0x7991, 0x12e7: 0x7991, 0x12e8: 0x7991, 0x12e9: 0x7991, 0x12ea: 0x79a9, 0x12eb: 0x79a9, 0x12ec: 0x79a9, 0x12ed: 0x79a9, 0x12ee: 0x79c1, 0x12ef: 0x79c1, 0x12f0: 0x79d9, 0x12f1: 0x79d9, 0x12f2: 0x0018, 0x12f3: 0x0018, 0x12f4: 0x0018, 0x12f5: 0x0018, 0x12f6: 0x0018, 0x12f7: 0x0018, 0x12f8: 0x0018, 0x12f9: 0x0018, 0x12fa: 0x0018, 0x12fb: 0x0018, 0x12fc: 0x0018, 0x12fd: 0x0018, 0x12fe: 0x0018, 0x12ff: 0x0018, // Block 0x4c, offset 0x1300 0x1300: 0x0018, 0x1301: 0x0018, 0x1302: 0x0040, 0x1303: 0x0040, 0x1304: 0x0040, 0x1305: 0x0040, 0x1306: 0x0040, 0x1307: 0x0040, 0x1308: 0x0040, 0x1309: 0x0040, 0x130a: 0x0040, 0x130b: 0x0040, 0x130c: 0x0040, 0x130d: 0x0040, 0x130e: 0x0040, 0x130f: 0x0040, 0x1310: 0x0040, 0x1311: 0x0040, 0x1312: 0x0040, 0x1313: 0x79f1, 0x1314: 0x79f1, 0x1315: 0x79f1, 0x1316: 0x79f1, 0x1317: 0x7a09, 0x1318: 0x7a09, 0x1319: 0x7a21, 0x131a: 0x7a21, 0x131b: 0x7a39, 0x131c: 0x7a39, 0x131d: 0x0479, 0x131e: 0x7a51, 0x131f: 0x7a51, 0x1320: 0x7a69, 0x1321: 0x7a69, 0x1322: 0x7a81, 0x1323: 0x7a81, 0x1324: 0x7a99, 0x1325: 0x7a99, 0x1326: 0x7a99, 0x1327: 0x7a99, 0x1328: 0x7ab1, 0x1329: 0x7ab1, 0x132a: 0x7ac9, 0x132b: 0x7ac9, 0x132c: 0x7af1, 0x132d: 0x7af1, 0x132e: 0x7b19, 0x132f: 0x7b19, 0x1330: 0x7b41, 0x1331: 0x7b41, 0x1332: 0x7b69, 0x1333: 0x7b69, 0x1334: 0x7b91, 0x1335: 0x7b91, 0x1336: 0x7bb9, 0x1337: 0x7bb9, 0x1338: 0x7bb9, 0x1339: 0x7be1, 0x133a: 0x7be1, 0x133b: 0x7be1, 0x133c: 0x7c09, 0x133d: 0x7c09, 0x133e: 0x7c09, 0x133f: 0x7c09, // Block 0x4d, offset 0x1340 0x1340: 0x85f9, 0x1341: 0x8621, 0x1342: 0x8649, 0x1343: 0x8671, 0x1344: 0x8699, 0x1345: 0x86c1, 0x1346: 0x86e9, 0x1347: 0x8711, 0x1348: 0x8739, 0x1349: 0x8761, 0x134a: 0x8789, 0x134b: 0x87b1, 0x134c: 0x87d9, 0x134d: 0x8801, 0x134e: 0x8829, 0x134f: 0x8851, 0x1350: 0x8879, 0x1351: 0x88a1, 0x1352: 0x88c9, 0x1353: 0x88f1, 0x1354: 0x8919, 0x1355: 0x8941, 0x1356: 0x8969, 0x1357: 0x8991, 0x1358: 0x89b9, 0x1359: 0x89e1, 0x135a: 0x8a09, 0x135b: 0x8a31, 0x135c: 0x8a59, 0x135d: 0x8a81, 0x135e: 0x8aaa, 0x135f: 0x8ada, 0x1360: 0x8b0a, 0x1361: 0x8b3a, 0x1362: 0x8b6a, 0x1363: 0x8b9a, 0x1364: 0x8bc9, 0x1365: 0x8bf1, 0x1366: 0x7c71, 0x1367: 0x8c19, 0x1368: 0x7be1, 0x1369: 0x7c99, 0x136a: 0x8c41, 0x136b: 0x8c69, 0x136c: 0x7d39, 0x136d: 0x8c91, 0x136e: 0x7d61, 0x136f: 0x7d89, 0x1370: 0x8cb9, 0x1371: 0x8ce1, 0x1372: 0x7e29, 0x1373: 0x8d09, 0x1374: 0x7e51, 0x1375: 0x7e79, 0x1376: 0x8d31, 0x1377: 0x8d59, 0x1378: 0x7ec9, 0x1379: 0x8d81, 0x137a: 0x7ef1, 0x137b: 0x7f19, 0x137c: 0x83a1, 0x137d: 0x83c9, 0x137e: 0x8441, 0x137f: 0x8469, // Block 0x4e, offset 0x1380 0x1380: 0x8491, 0x1381: 0x8531, 0x1382: 0x8559, 0x1383: 0x8581, 0x1384: 0x85a9, 0x1385: 0x8649, 0x1386: 0x8671, 0x1387: 0x8699, 0x1388: 0x8da9, 0x1389: 0x8739, 0x138a: 0x8dd1, 0x138b: 0x8df9, 0x138c: 0x8829, 0x138d: 0x8e21, 0x138e: 0x8851, 0x138f: 0x8879, 0x1390: 0x8a81, 0x1391: 0x8e49, 0x1392: 0x8e71, 0x1393: 0x89b9, 0x1394: 0x8e99, 0x1395: 0x89e1, 0x1396: 0x8a09, 0x1397: 0x7c21, 0x1398: 0x7c49, 0x1399: 0x8ec1, 0x139a: 0x7c71, 0x139b: 0x8ee9, 0x139c: 0x7cc1, 0x139d: 0x7ce9, 0x139e: 0x7d11, 0x139f: 0x7d39, 0x13a0: 0x8f11, 0x13a1: 0x7db1, 0x13a2: 0x7dd9, 0x13a3: 0x7e01, 0x13a4: 0x7e29, 0x13a5: 0x8f39, 0x13a6: 0x7ec9, 0x13a7: 0x7f41, 0x13a8: 0x7f69, 0x13a9: 0x7f91, 0x13aa: 0x7fb9, 0x13ab: 0x7fe1, 0x13ac: 0x8031, 0x13ad: 0x8059, 0x13ae: 0x8081, 0x13af: 0x80a9, 0x13b0: 0x80d1, 0x13b1: 0x80f9, 0x13b2: 0x8f61, 0x13b3: 0x8121, 0x13b4: 0x8149, 0x13b5: 0x8171, 0x13b6: 0x8199, 0x13b7: 0x81c1, 0x13b8: 0x81e9, 0x13b9: 0x8239, 0x13ba: 0x8261, 0x13bb: 0x8289, 0x13bc: 0x82b1, 0x13bd: 0x82d9, 0x13be: 0x8301, 0x13bf: 0x8329, // Block 0x4f, offset 0x13c0 0x13c0: 0x8351, 0x13c1: 0x8379, 0x13c2: 0x83f1, 0x13c3: 0x8419, 0x13c4: 0x84b9, 0x13c5: 0x84e1, 0x13c6: 0x8509, 0x13c7: 0x8531, 0x13c8: 0x8559, 0x13c9: 0x85d1, 0x13ca: 0x85f9, 0x13cb: 0x8621, 0x13cc: 0x8649, 0x13cd: 0x8f89, 0x13ce: 0x86c1, 0x13cf: 0x86e9, 0x13d0: 0x8711, 0x13d1: 0x8739, 0x13d2: 0x87b1, 0x13d3: 0x87d9, 0x13d4: 0x8801, 0x13d5: 0x8829, 0x13d6: 0x8fb1, 0x13d7: 0x88a1, 0x13d8: 0x88c9, 0x13d9: 0x8fd9, 0x13da: 0x8941, 0x13db: 0x8969, 0x13dc: 0x8991, 0x13dd: 0x89b9, 0x13de: 0x9001, 0x13df: 0x7c71, 0x13e0: 0x8ee9, 0x13e1: 0x7d39, 0x13e2: 0x8f11, 0x13e3: 0x7e29, 0x13e4: 0x8f39, 0x13e5: 0x7ec9, 0x13e6: 0x9029, 0x13e7: 0x80d1, 0x13e8: 0x9051, 0x13e9: 0x9079, 0x13ea: 0x90a1, 0x13eb: 0x8531, 0x13ec: 0x8559, 0x13ed: 0x8649, 0x13ee: 0x8829, 0x13ef: 0x8fb1, 0x13f0: 0x89b9, 0x13f1: 0x9001, 0x13f2: 0x90c9, 0x13f3: 0x9101, 0x13f4: 0x9139, 0x13f5: 0x9171, 0x13f6: 0x9199, 0x13f7: 0x91c1, 0x13f8: 0x91e9, 0x13f9: 0x9211, 0x13fa: 0x9239, 0x13fb: 0x9261, 0x13fc: 0x9289, 0x13fd: 0x92b1, 0x13fe: 0x92d9, 0x13ff: 0x9301, // Block 0x50, offset 0x1400 0x1400: 0x9329, 0x1401: 0x9351, 0x1402: 0x9379, 0x1403: 0x93a1, 0x1404: 0x93c9, 0x1405: 0x93f1, 0x1406: 0x9419, 0x1407: 0x9441, 0x1408: 0x9469, 0x1409: 0x9491, 0x140a: 0x94b9, 0x140b: 0x94e1, 0x140c: 0x9079, 0x140d: 0x9509, 0x140e: 0x9531, 0x140f: 0x9559, 0x1410: 0x9581, 0x1411: 0x9171, 0x1412: 0x9199, 0x1413: 0x91c1, 0x1414: 0x91e9, 0x1415: 0x9211, 0x1416: 0x9239, 0x1417: 0x9261, 0x1418: 0x9289, 0x1419: 0x92b1, 0x141a: 0x92d9, 0x141b: 0x9301, 0x141c: 0x9329, 0x141d: 0x9351, 0x141e: 0x9379, 0x141f: 0x93a1, 0x1420: 0x93c9, 0x1421: 0x93f1, 0x1422: 0x9419, 0x1423: 0x9441, 0x1424: 0x9469, 0x1425: 0x9491, 0x1426: 0x94b9, 0x1427: 0x94e1, 0x1428: 0x9079, 0x1429: 0x9509, 0x142a: 0x9531, 0x142b: 0x9559, 0x142c: 0x9581, 0x142d: 0x9491, 0x142e: 0x94b9, 0x142f: 0x94e1, 0x1430: 0x9079, 0x1431: 0x9051, 0x1432: 0x90a1, 0x1433: 0x8211, 0x1434: 0x8059, 0x1435: 0x8081, 0x1436: 0x80a9, 0x1437: 0x9491, 0x1438: 0x94b9, 0x1439: 0x94e1, 0x143a: 0x8211, 0x143b: 0x8239, 0x143c: 0x95a9, 0x143d: 0x95a9, 0x143e: 0x0018, 0x143f: 0x0018, // Block 0x51, offset 0x1440 0x1440: 0x0040, 0x1441: 0x0040, 0x1442: 0x0040, 0x1443: 0x0040, 0x1444: 0x0040, 0x1445: 0x0040, 0x1446: 0x0040, 0x1447: 0x0040, 0x1448: 0x0040, 0x1449: 0x0040, 0x144a: 0x0040, 0x144b: 0x0040, 0x144c: 0x0040, 0x144d: 0x0040, 0x144e: 0x0040, 0x144f: 0x0040, 0x1450: 0x95d1, 0x1451: 0x9609, 0x1452: 0x9609, 0x1453: 0x9641, 0x1454: 0x9679, 0x1455: 0x96b1, 0x1456: 0x96e9, 0x1457: 0x9721, 0x1458: 0x9759, 0x1459: 0x9759, 0x145a: 0x9791, 0x145b: 0x97c9, 0x145c: 0x9801, 0x145d: 0x9839, 0x145e: 0x9871, 0x145f: 0x98a9, 0x1460: 0x98a9, 0x1461: 0x98e1, 0x1462: 0x9919, 0x1463: 0x9919, 0x1464: 0x9951, 0x1465: 0x9951, 0x1466: 0x9989, 0x1467: 0x99c1, 0x1468: 0x99c1, 0x1469: 0x99f9, 0x146a: 0x9a31, 0x146b: 0x9a31, 0x146c: 0x9a69, 0x146d: 0x9a69, 0x146e: 0x9aa1, 0x146f: 0x9ad9, 0x1470: 0x9ad9, 0x1471: 0x9b11, 0x1472: 0x9b11, 0x1473: 0x9b49, 0x1474: 0x9b81, 0x1475: 0x9bb9, 0x1476: 0x9bf1, 0x1477: 0x9bf1, 0x1478: 0x9c29, 0x1479: 0x9c61, 0x147a: 0x9c99, 0x147b: 0x9cd1, 0x147c: 0x9d09, 0x147d: 0x9d09, 0x147e: 0x9d41, 0x147f: 0x9d79, // Block 0x52, offset 0x1480 0x1480: 0xa949, 0x1481: 0xa981, 0x1482: 0xa9b9, 0x1483: 0xa8a1, 0x1484: 0x9bb9, 0x1485: 0x9989, 0x1486: 0xa9f1, 0x1487: 0xaa29, 0x1488: 0x0040, 0x1489: 0x0040, 0x148a: 0x0040, 0x148b: 0x0040, 0x148c: 0x0040, 0x148d: 0x0040, 0x148e: 0x0040, 0x148f: 0x0040, 0x1490: 0x0040, 0x1491: 0x0040, 0x1492: 0x0040, 0x1493: 0x0040, 0x1494: 0x0040, 0x1495: 0x0040, 0x1496: 0x0040, 0x1497: 0x0040, 0x1498: 0x0040, 0x1499: 0x0040, 0x149a: 0x0040, 0x149b: 0x0040, 0x149c: 0x0040, 0x149d: 0x0040, 0x149e: 0x0040, 0x149f: 0x0040, 0x14a0: 0x0040, 0x14a1: 0x0040, 0x14a2: 0x0040, 0x14a3: 0x0040, 0x14a4: 0x0040, 0x14a5: 0x0040, 0x14a6: 0x0040, 0x14a7: 0x0040, 0x14a8: 0x0040, 0x14a9: 0x0040, 0x14aa: 0x0040, 0x14ab: 0x0040, 0x14ac: 0x0040, 0x14ad: 0x0040, 0x14ae: 0x0040, 0x14af: 0x0040, 0x14b0: 0xaa61, 0x14b1: 0xaa99, 0x14b2: 0xaad1, 0x14b3: 0xab19, 0x14b4: 0xab61, 0x14b5: 0xaba9, 0x14b6: 0xabf1, 0x14b7: 0xac39, 0x14b8: 0xac81, 0x14b9: 0xacc9, 0x14ba: 0xad02, 0x14bb: 0xae12, 0x14bc: 0xae91, 0x14bd: 0x0018, 0x14be: 0x0040, 0x14bf: 0x0040, // Block 0x53, offset 0x14c0 0x14c0: 0x13c0, 0x14c1: 0x13c0, 0x14c2: 0x13c0, 0x14c3: 0x13c0, 0x14c4: 0x13c0, 0x14c5: 0x13c0, 0x14c6: 0x13c0, 0x14c7: 0x13c0, 0x14c8: 0x13c0, 0x14c9: 0x13c0, 0x14ca: 0x13c0, 0x14cb: 0x13c0, 0x14cc: 0x13c0, 0x14cd: 0x13c0, 0x14ce: 0x13c0, 0x14cf: 0x13c0, 0x14d0: 0xaeda, 0x14d1: 0x7d55, 0x14d2: 0x0040, 0x14d3: 0xaeea, 0x14d4: 0x03c2, 0x14d5: 0xaefa, 0x14d6: 0xaf0a, 0x14d7: 0x7d75, 0x14d8: 0x7d95, 0x14d9: 0x0040, 0x14da: 0x0040, 0x14db: 0x0040, 0x14dc: 0x0040, 0x14dd: 0x0040, 0x14de: 0x0040, 0x14df: 0x0040, 0x14e0: 0x1308, 0x14e1: 0x1308, 0x14e2: 0x1308, 0x14e3: 0x1308, 0x14e4: 0x1308, 0x14e5: 0x1308, 0x14e6: 0x1308, 0x14e7: 0x1308, 0x14e8: 0x1308, 0x14e9: 0x1308, 0x14ea: 0x1308, 0x14eb: 0x1308, 0x14ec: 0x1308, 0x14ed: 0x1308, 0x14ee: 0x1308, 0x14ef: 0x1308, 0x14f0: 0x0040, 0x14f1: 0x7db5, 0x14f2: 0x7dd5, 0x14f3: 0xaf1a, 0x14f4: 0xaf1a, 0x14f5: 0x1fd2, 0x14f6: 0x1fe2, 0x14f7: 0xaf2a, 0x14f8: 0xaf3a, 0x14f9: 0x7df5, 0x14fa: 0x7e15, 0x14fb: 0x7e35, 0x14fc: 0x7df5, 0x14fd: 0x7e55, 0x14fe: 0x7e75, 0x14ff: 0x7e55, // Block 0x54, offset 0x1500 0x1500: 0x7e95, 0x1501: 0x7eb5, 0x1502: 0x7ed5, 0x1503: 0x7eb5, 0x1504: 0x7ef5, 0x1505: 0x0018, 0x1506: 0x0018, 0x1507: 0xaf4a, 0x1508: 0xaf5a, 0x1509: 0x7f16, 0x150a: 0x7f36, 0x150b: 0x7f56, 0x150c: 0x7f76, 0x150d: 0xaf1a, 0x150e: 0xaf1a, 0x150f: 0xaf1a, 0x1510: 0xaeda, 0x1511: 0x7f95, 0x1512: 0x0040, 0x1513: 0x0040, 0x1514: 0x03c2, 0x1515: 0xaeea, 0x1516: 0xaf0a, 0x1517: 0xaefa, 0x1518: 0x7fb5, 0x1519: 0x1fd2, 0x151a: 0x1fe2, 0x151b: 0xaf2a, 0x151c: 0xaf3a, 0x151d: 0x7e95, 0x151e: 0x7ef5, 0x151f: 0xaf6a, 0x1520: 0xaf7a, 0x1521: 0xaf8a, 0x1522: 0x1fb2, 0x1523: 0xaf99, 0x1524: 0xafaa, 0x1525: 0xafba, 0x1526: 0x1fc2, 0x1527: 0x0040, 0x1528: 0xafca, 0x1529: 0xafda, 0x152a: 0xafea, 0x152b: 0xaffa, 0x152c: 0x0040, 0x152d: 0x0040, 0x152e: 0x0040, 0x152f: 0x0040, 0x1530: 0x7fd6, 0x1531: 0xb009, 0x1532: 0x7ff6, 0x1533: 0x0008, 0x1534: 0x8016, 0x1535: 0x0040, 0x1536: 0x8036, 0x1537: 0xb031, 0x1538: 0x8056, 0x1539: 0xb059, 0x153a: 0x8076, 0x153b: 0xb081, 0x153c: 0x8096, 0x153d: 0xb0a9, 0x153e: 0x80b6, 0x153f: 0xb0d1, // Block 0x55, offset 0x1540 0x1540: 0xb0f9, 0x1541: 0xb111, 0x1542: 0xb111, 0x1543: 0xb129, 0x1544: 0xb129, 0x1545: 0xb141, 0x1546: 0xb141, 0x1547: 0xb159, 0x1548: 0xb159, 0x1549: 0xb171, 0x154a: 0xb171, 0x154b: 0xb171, 0x154c: 0xb171, 0x154d: 0xb189, 0x154e: 0xb189, 0x154f: 0xb1a1, 0x1550: 0xb1a1, 0x1551: 0xb1a1, 0x1552: 0xb1a1, 0x1553: 0xb1b9, 0x1554: 0xb1b9, 0x1555: 0xb1d1, 0x1556: 0xb1d1, 0x1557: 0xb1d1, 0x1558: 0xb1d1, 0x1559: 0xb1e9, 0x155a: 0xb1e9, 0x155b: 0xb1e9, 0x155c: 0xb1e9, 0x155d: 0xb201, 0x155e: 0xb201, 0x155f: 0xb201, 0x1560: 0xb201, 0x1561: 0xb219, 0x1562: 0xb219, 0x1563: 0xb219, 0x1564: 0xb219, 0x1565: 0xb231, 0x1566: 0xb231, 0x1567: 0xb231, 0x1568: 0xb231, 0x1569: 0xb249, 0x156a: 0xb249, 0x156b: 0xb261, 0x156c: 0xb261, 0x156d: 0xb279, 0x156e: 0xb279, 0x156f: 0xb291, 0x1570: 0xb291, 0x1571: 0xb2a9, 0x1572: 0xb2a9, 0x1573: 0xb2a9, 0x1574: 0xb2a9, 0x1575: 0xb2c1, 0x1576: 0xb2c1, 0x1577: 0xb2c1, 0x1578: 0xb2c1, 0x1579: 0xb2d9, 0x157a: 0xb2d9, 0x157b: 0xb2d9, 0x157c: 0xb2d9, 0x157d: 0xb2f1, 0x157e: 0xb2f1, 0x157f: 0xb2f1, // Block 0x56, offset 0x1580 0x1580: 0xb2f1, 0x1581: 0xb309, 0x1582: 0xb309, 0x1583: 0xb309, 0x1584: 0xb309, 0x1585: 0xb321, 0x1586: 0xb321, 0x1587: 0xb321, 0x1588: 0xb321, 0x1589: 0xb339, 0x158a: 0xb339, 0x158b: 0xb339, 0x158c: 0xb339, 0x158d: 0xb351, 0x158e: 0xb351, 0x158f: 0xb351, 0x1590: 0xb351, 0x1591: 0xb369, 0x1592: 0xb369, 0x1593: 0xb369, 0x1594: 0xb369, 0x1595: 0xb381, 0x1596: 0xb381, 0x1597: 0xb381, 0x1598: 0xb381, 0x1599: 0xb399, 0x159a: 0xb399, 0x159b: 0xb399, 0x159c: 0xb399, 0x159d: 0xb3b1, 0x159e: 0xb3b1, 0x159f: 0xb3b1, 0x15a0: 0xb3b1, 0x15a1: 0xb3c9, 0x15a2: 0xb3c9, 0x15a3: 0xb3c9, 0x15a4: 0xb3c9, 0x15a5: 0xb3e1, 0x15a6: 0xb3e1, 0x15a7: 0xb3e1, 0x15a8: 0xb3e1, 0x15a9: 0xb3f9, 0x15aa: 0xb3f9, 0x15ab: 0xb3f9, 0x15ac: 0xb3f9, 0x15ad: 0xb411, 0x15ae: 0xb411, 0x15af: 0x7ab1, 0x15b0: 0x7ab1, 0x15b1: 0xb429, 0x15b2: 0xb429, 0x15b3: 0xb429, 0x15b4: 0xb429, 0x15b5: 0xb441, 0x15b6: 0xb441, 0x15b7: 0xb469, 0x15b8: 0xb469, 0x15b9: 0xb491, 0x15ba: 0xb491, 0x15bb: 0xb4b9, 0x15bc: 0xb4b9, 0x15bd: 0x0040, 0x15be: 0x0040, 0x15bf: 0x03c0, // Block 0x57, offset 0x15c0 0x15c0: 0x0040, 0x15c1: 0xaefa, 0x15c2: 0xb4e2, 0x15c3: 0xaf6a, 0x15c4: 0xafda, 0x15c5: 0xafea, 0x15c6: 0xaf7a, 0x15c7: 0xb4f2, 0x15c8: 0x1fd2, 0x15c9: 0x1fe2, 0x15ca: 0xaf8a, 0x15cb: 0x1fb2, 0x15cc: 0xaeda, 0x15cd: 0xaf99, 0x15ce: 0x29d1, 0x15cf: 0xb502, 0x15d0: 0x1f41, 0x15d1: 0x00c9, 0x15d2: 0x0069, 0x15d3: 0x0079, 0x15d4: 0x1f51, 0x15d5: 0x1f61, 0x15d6: 0x1f71, 0x15d7: 0x1f81, 0x15d8: 0x1f91, 0x15d9: 0x1fa1, 0x15da: 0xaeea, 0x15db: 0x03c2, 0x15dc: 0xafaa, 0x15dd: 0x1fc2, 0x15de: 0xafba, 0x15df: 0xaf0a, 0x15e0: 0xaffa, 0x15e1: 0x0039, 0x15e2: 0x0ee9, 0x15e3: 0x1159, 0x15e4: 0x0ef9, 0x15e5: 0x0f09, 0x15e6: 0x1199, 0x15e7: 0x0f31, 0x15e8: 0x0249, 0x15e9: 0x0f41, 0x15ea: 0x0259, 0x15eb: 0x0f51, 0x15ec: 0x0359, 0x15ed: 0x0f61, 0x15ee: 0x0f71, 0x15ef: 0x00d9, 0x15f0: 0x0f99, 0x15f1: 0x2039, 0x15f2: 0x0269, 0x15f3: 0x01d9, 0x15f4: 0x0fa9, 0x15f5: 0x0fb9, 0x15f6: 0x1089, 0x15f7: 0x0279, 0x15f8: 0x0369, 0x15f9: 0x0289, 0x15fa: 0x13d1, 0x15fb: 0xaf4a, 0x15fc: 0xafca, 0x15fd: 0xaf5a, 0x15fe: 0xb512, 0x15ff: 0xaf1a, // Block 0x58, offset 0x1600 0x1600: 0x1caa, 0x1601: 0x0039, 0x1602: 0x0ee9, 0x1603: 0x1159, 0x1604: 0x0ef9, 0x1605: 0x0f09, 0x1606: 0x1199, 0x1607: 0x0f31, 0x1608: 0x0249, 0x1609: 0x0f41, 0x160a: 0x0259, 0x160b: 0x0f51, 0x160c: 0x0359, 0x160d: 0x0f61, 0x160e: 0x0f71, 0x160f: 0x00d9, 0x1610: 0x0f99, 0x1611: 0x2039, 0x1612: 0x0269, 0x1613: 0x01d9, 0x1614: 0x0fa9, 0x1615: 0x0fb9, 0x1616: 0x1089, 0x1617: 0x0279, 0x1618: 0x0369, 0x1619: 0x0289, 0x161a: 0x13d1, 0x161b: 0xaf2a, 0x161c: 0xb522, 0x161d: 0xaf3a, 0x161e: 0xb532, 0x161f: 0x80d5, 0x1620: 0x80f5, 0x1621: 0x29d1, 0x1622: 0x8115, 0x1623: 0x8115, 0x1624: 0x8135, 0x1625: 0x8155, 0x1626: 0x8175, 0x1627: 0x8195, 0x1628: 0x81b5, 0x1629: 0x81d5, 0x162a: 0x81f5, 0x162b: 0x8215, 0x162c: 0x8235, 0x162d: 0x8255, 0x162e: 0x8275, 0x162f: 0x8295, 0x1630: 0x82b5, 0x1631: 0x82d5, 0x1632: 0x82f5, 0x1633: 0x8315, 0x1634: 0x8335, 0x1635: 0x8355, 0x1636: 0x8375, 0x1637: 0x8395, 0x1638: 0x83b5, 0x1639: 0x83d5, 0x163a: 0x83f5, 0x163b: 0x8415, 0x163c: 0x81b5, 0x163d: 0x8435, 0x163e: 0x8455, 0x163f: 0x8215, // Block 0x59, offset 0x1640 0x1640: 0x8475, 0x1641: 0x8495, 0x1642: 0x84b5, 0x1643: 0x84d5, 0x1644: 0x84f5, 0x1645: 0x8515, 0x1646: 0x8535, 0x1647: 0x8555, 0x1648: 0x84d5, 0x1649: 0x8575, 0x164a: 0x84d5, 0x164b: 0x8595, 0x164c: 0x8595, 0x164d: 0x85b5, 0x164e: 0x85b5, 0x164f: 0x85d5, 0x1650: 0x8515, 0x1651: 0x85f5, 0x1652: 0x8615, 0x1653: 0x85f5, 0x1654: 0x8635, 0x1655: 0x8615, 0x1656: 0x8655, 0x1657: 0x8655, 0x1658: 0x8675, 0x1659: 0x8675, 0x165a: 0x8695, 0x165b: 0x8695, 0x165c: 0x8615, 0x165d: 0x8115, 0x165e: 0x86b5, 0x165f: 0x86d5, 0x1660: 0x0040, 0x1661: 0x86f5, 0x1662: 0x8715, 0x1663: 0x8735, 0x1664: 0x8755, 0x1665: 0x8735, 0x1666: 0x8775, 0x1667: 0x8795, 0x1668: 0x87b5, 0x1669: 0x87b5, 0x166a: 0x87d5, 0x166b: 0x87d5, 0x166c: 0x87f5, 0x166d: 0x87f5, 0x166e: 0x87d5, 0x166f: 0x87d5, 0x1670: 0x8815, 0x1671: 0x8835, 0x1672: 0x8855, 0x1673: 0x8875, 0x1674: 0x8895, 0x1675: 0x88b5, 0x1676: 0x88b5, 0x1677: 0x88b5, 0x1678: 0x88d5, 0x1679: 0x88d5, 0x167a: 0x88d5, 0x167b: 0x88d5, 0x167c: 0x87b5, 0x167d: 0x87b5, 0x167e: 0x87b5, 0x167f: 0x0040, // Block 0x5a, offset 0x1680 0x1680: 0x0040, 0x1681: 0x0040, 0x1682: 0x8715, 0x1683: 0x86f5, 0x1684: 0x88f5, 0x1685: 0x86f5, 0x1686: 0x8715, 0x1687: 0x86f5, 0x1688: 0x0040, 0x1689: 0x0040, 0x168a: 0x8915, 0x168b: 0x8715, 0x168c: 0x8935, 0x168d: 0x88f5, 0x168e: 0x8935, 0x168f: 0x8715, 0x1690: 0x0040, 0x1691: 0x0040, 0x1692: 0x8955, 0x1693: 0x8975, 0x1694: 0x8875, 0x1695: 0x8935, 0x1696: 0x88f5, 0x1697: 0x8935, 0x1698: 0x0040, 0x1699: 0x0040, 0x169a: 0x8995, 0x169b: 0x89b5, 0x169c: 0x8995, 0x169d: 0x0040, 0x169e: 0x0040, 0x169f: 0x0040, 0x16a0: 0xb541, 0x16a1: 0xb559, 0x16a2: 0xb571, 0x16a3: 0x89d6, 0x16a4: 0xb589, 0x16a5: 0xb5a1, 0x16a6: 0x89f5, 0x16a7: 0x0040, 0x16a8: 0x8a15, 0x16a9: 0x8a35, 0x16aa: 0x8a55, 0x16ab: 0x8a35, 0x16ac: 0x8a75, 0x16ad: 0x8a95, 0x16ae: 0x8ab5, 0x16af: 0x0040, 0x16b0: 0x0040, 0x16b1: 0x0040, 0x16b2: 0x0040, 0x16b3: 0x0040, 0x16b4: 0x0040, 0x16b5: 0x0040, 0x16b6: 0x0040, 0x16b7: 0x0040, 0x16b8: 0x0040, 0x16b9: 0x0340, 0x16ba: 0x0340, 0x16bb: 0x0340, 0x16bc: 0x0040, 0x16bd: 0x0040, 0x16be: 0x0040, 0x16bf: 0x0040, // Block 0x5b, offset 0x16c0 0x16c0: 0x0208, 0x16c1: 0x0208, 0x16c2: 0x0208, 0x16c3: 0x0208, 0x16c4: 0x0208, 0x16c5: 0x0408, 0x16c6: 0x0008, 0x16c7: 0x0408, 0x16c8: 0x0018, 0x16c9: 0x0408, 0x16ca: 0x0408, 0x16cb: 0x0008, 0x16cc: 0x0008, 0x16cd: 0x0108, 0x16ce: 0x0408, 0x16cf: 0x0408, 0x16d0: 0x0408, 0x16d1: 0x0408, 0x16d2: 0x0408, 0x16d3: 0x0208, 0x16d4: 0x0208, 0x16d5: 0x0208, 0x16d6: 0x0208, 0x16d7: 0x0108, 0x16d8: 0x0208, 0x16d9: 0x0208, 0x16da: 0x0208, 0x16db: 0x0208, 0x16dc: 0x0208, 0x16dd: 0x0408, 0x16de: 0x0208, 0x16df: 0x0208, 0x16e0: 0x0208, 0x16e1: 0x0408, 0x16e2: 0x0008, 0x16e3: 0x0008, 0x16e4: 0x0408, 0x16e5: 0x1308, 0x16e6: 0x1308, 0x16e7: 0x0040, 0x16e8: 0x0040, 0x16e9: 0x0040, 0x16ea: 0x0040, 0x16eb: 0x0218, 0x16ec: 0x0218, 0x16ed: 0x0218, 0x16ee: 0x0218, 0x16ef: 0x0418, 0x16f0: 0x0018, 0x16f1: 0x0018, 0x16f2: 0x0018, 0x16f3: 0x0018, 0x16f4: 0x0018, 0x16f5: 0x0018, 0x16f6: 0x0018, 0x16f7: 0x0040, 0x16f8: 0x0040, 0x16f9: 0x0040, 0x16fa: 0x0040, 0x16fb: 0x0040, 0x16fc: 0x0040, 0x16fd: 0x0040, 0x16fe: 0x0040, 0x16ff: 0x0040, // Block 0x5c, offset 0x1700 0x1700: 0x0208, 0x1701: 0x0408, 0x1702: 0x0208, 0x1703: 0x0408, 0x1704: 0x0408, 0x1705: 0x0408, 0x1706: 0x0208, 0x1707: 0x0208, 0x1708: 0x0208, 0x1709: 0x0408, 0x170a: 0x0208, 0x170b: 0x0208, 0x170c: 0x0408, 0x170d: 0x0208, 0x170e: 0x0408, 0x170f: 0x0408, 0x1710: 0x0208, 0x1711: 0x0408, 0x1712: 0x0040, 0x1713: 0x0040, 0x1714: 0x0040, 0x1715: 0x0040, 0x1716: 0x0040, 0x1717: 0x0040, 0x1718: 0x0040, 0x1719: 0x0018, 0x171a: 0x0018, 0x171b: 0x0018, 0x171c: 0x0018, 0x171d: 0x0040, 0x171e: 0x0040, 0x171f: 0x0040, 0x1720: 0x0040, 0x1721: 0x0040, 0x1722: 0x0040, 0x1723: 0x0040, 0x1724: 0x0040, 0x1725: 0x0040, 0x1726: 0x0040, 0x1727: 0x0040, 0x1728: 0x0040, 0x1729: 0x0418, 0x172a: 0x0418, 0x172b: 0x0418, 0x172c: 0x0418, 0x172d: 0x0218, 0x172e: 0x0218, 0x172f: 0x0018, 0x1730: 0x0040, 0x1731: 0x0040, 0x1732: 0x0040, 0x1733: 0x0040, 0x1734: 0x0040, 0x1735: 0x0040, 0x1736: 0x0040, 0x1737: 0x0040, 0x1738: 0x0040, 0x1739: 0x0040, 0x173a: 0x0040, 0x173b: 0x0040, 0x173c: 0x0040, 0x173d: 0x0040, 0x173e: 0x0040, 0x173f: 0x0040, // Block 0x5d, offset 0x1740 0x1740: 0x1308, 0x1741: 0x1308, 0x1742: 0x1008, 0x1743: 0x1008, 0x1744: 0x0040, 0x1745: 0x0008, 0x1746: 0x0008, 0x1747: 0x0008, 0x1748: 0x0008, 0x1749: 0x0008, 0x174a: 0x0008, 0x174b: 0x0008, 0x174c: 0x0008, 0x174d: 0x0040, 0x174e: 0x0040, 0x174f: 0x0008, 0x1750: 0x0008, 0x1751: 0x0040, 0x1752: 0x0040, 0x1753: 0x0008, 0x1754: 0x0008, 0x1755: 0x0008, 0x1756: 0x0008, 0x1757: 0x0008, 0x1758: 0x0008, 0x1759: 0x0008, 0x175a: 0x0008, 0x175b: 0x0008, 0x175c: 0x0008, 0x175d: 0x0008, 0x175e: 0x0008, 0x175f: 0x0008, 0x1760: 0x0008, 0x1761: 0x0008, 0x1762: 0x0008, 0x1763: 0x0008, 0x1764: 0x0008, 0x1765: 0x0008, 0x1766: 0x0008, 0x1767: 0x0008, 0x1768: 0x0008, 0x1769: 0x0040, 0x176a: 0x0008, 0x176b: 0x0008, 0x176c: 0x0008, 0x176d: 0x0008, 0x176e: 0x0008, 0x176f: 0x0008, 0x1770: 0x0008, 0x1771: 0x0040, 0x1772: 0x0008, 0x1773: 0x0008, 0x1774: 0x0040, 0x1775: 0x0008, 0x1776: 0x0008, 0x1777: 0x0008, 0x1778: 0x0008, 0x1779: 0x0008, 0x177a: 0x0040, 0x177b: 0x0040, 0x177c: 0x1308, 0x177d: 0x0008, 0x177e: 0x1008, 0x177f: 0x1008, // Block 0x5e, offset 0x1780 0x1780: 0x1308, 0x1781: 0x1008, 0x1782: 0x1008, 0x1783: 0x1008, 0x1784: 0x1008, 0x1785: 0x0040, 0x1786: 0x0040, 0x1787: 0x1008, 0x1788: 0x1008, 0x1789: 0x0040, 0x178a: 0x0040, 0x178b: 0x1008, 0x178c: 0x1008, 0x178d: 0x1808, 0x178e: 0x0040, 0x178f: 0x0040, 0x1790: 0x0008, 0x1791: 0x0040, 0x1792: 0x0040, 0x1793: 0x0040, 0x1794: 0x0040, 0x1795: 0x0040, 0x1796: 0x0040, 0x1797: 0x1008, 0x1798: 0x0040, 0x1799: 0x0040, 0x179a: 0x0040, 0x179b: 0x0040, 0x179c: 0x0040, 0x179d: 0x0008, 0x179e: 0x0008, 0x179f: 0x0008, 0x17a0: 0x0008, 0x17a1: 0x0008, 0x17a2: 0x1008, 0x17a3: 0x1008, 0x17a4: 0x0040, 0x17a5: 0x0040, 0x17a6: 0x1308, 0x17a7: 0x1308, 0x17a8: 0x1308, 0x17a9: 0x1308, 0x17aa: 0x1308, 0x17ab: 0x1308, 0x17ac: 0x1308, 0x17ad: 0x0040, 0x17ae: 0x0040, 0x17af: 0x0040, 0x17b0: 0x1308, 0x17b1: 0x1308, 0x17b2: 0x1308, 0x17b3: 0x1308, 0x17b4: 0x1308, 0x17b5: 0x0040, 0x17b6: 0x0040, 0x17b7: 0x0040, 0x17b8: 0x0040, 0x17b9: 0x0040, 0x17ba: 0x0040, 0x17bb: 0x0040, 0x17bc: 0x0040, 0x17bd: 0x0040, 0x17be: 0x0040, 0x17bf: 0x0040, // Block 0x5f, offset 0x17c0 0x17c0: 0x0039, 0x17c1: 0x0ee9, 0x17c2: 0x1159, 0x17c3: 0x0ef9, 0x17c4: 0x0f09, 0x17c5: 0x1199, 0x17c6: 0x0f31, 0x17c7: 0x0249, 0x17c8: 0x0f41, 0x17c9: 0x0259, 0x17ca: 0x0f51, 0x17cb: 0x0359, 0x17cc: 0x0f61, 0x17cd: 0x0f71, 0x17ce: 0x00d9, 0x17cf: 0x0f99, 0x17d0: 0x2039, 0x17d1: 0x0269, 0x17d2: 0x01d9, 0x17d3: 0x0fa9, 0x17d4: 0x0fb9, 0x17d5: 0x1089, 0x17d6: 0x0279, 0x17d7: 0x0369, 0x17d8: 0x0289, 0x17d9: 0x13d1, 0x17da: 0x0039, 0x17db: 0x0ee9, 0x17dc: 0x1159, 0x17dd: 0x0ef9, 0x17de: 0x0f09, 0x17df: 0x1199, 0x17e0: 0x0f31, 0x17e1: 0x0249, 0x17e2: 0x0f41, 0x17e3: 0x0259, 0x17e4: 0x0f51, 0x17e5: 0x0359, 0x17e6: 0x0f61, 0x17e7: 0x0f71, 0x17e8: 0x00d9, 0x17e9: 0x0f99, 0x17ea: 0x2039, 0x17eb: 0x0269, 0x17ec: 0x01d9, 0x17ed: 0x0fa9, 0x17ee: 0x0fb9, 0x17ef: 0x1089, 0x17f0: 0x0279, 0x17f1: 0x0369, 0x17f2: 0x0289, 0x17f3: 0x13d1, 0x17f4: 0x0039, 0x17f5: 0x0ee9, 0x17f6: 0x1159, 0x17f7: 0x0ef9, 0x17f8: 0x0f09, 0x17f9: 0x1199, 0x17fa: 0x0f31, 0x17fb: 0x0249, 0x17fc: 0x0f41, 0x17fd: 0x0259, 0x17fe: 0x0f51, 0x17ff: 0x0359, // Block 0x60, offset 0x1800 0x1800: 0x0f61, 0x1801: 0x0f71, 0x1802: 0x00d9, 0x1803: 0x0f99, 0x1804: 0x2039, 0x1805: 0x0269, 0x1806: 0x01d9, 0x1807: 0x0fa9, 0x1808: 0x0fb9, 0x1809: 0x1089, 0x180a: 0x0279, 0x180b: 0x0369, 0x180c: 0x0289, 0x180d: 0x13d1, 0x180e: 0x0039, 0x180f: 0x0ee9, 0x1810: 0x1159, 0x1811: 0x0ef9, 0x1812: 0x0f09, 0x1813: 0x1199, 0x1814: 0x0f31, 0x1815: 0x0040, 0x1816: 0x0f41, 0x1817: 0x0259, 0x1818: 0x0f51, 0x1819: 0x0359, 0x181a: 0x0f61, 0x181b: 0x0f71, 0x181c: 0x00d9, 0x181d: 0x0f99, 0x181e: 0x2039, 0x181f: 0x0269, 0x1820: 0x01d9, 0x1821: 0x0fa9, 0x1822: 0x0fb9, 0x1823: 0x1089, 0x1824: 0x0279, 0x1825: 0x0369, 0x1826: 0x0289, 0x1827: 0x13d1, 0x1828: 0x0039, 0x1829: 0x0ee9, 0x182a: 0x1159, 0x182b: 0x0ef9, 0x182c: 0x0f09, 0x182d: 0x1199, 0x182e: 0x0f31, 0x182f: 0x0249, 0x1830: 0x0f41, 0x1831: 0x0259, 0x1832: 0x0f51, 0x1833: 0x0359, 0x1834: 0x0f61, 0x1835: 0x0f71, 0x1836: 0x00d9, 0x1837: 0x0f99, 0x1838: 0x2039, 0x1839: 0x0269, 0x183a: 0x01d9, 0x183b: 0x0fa9, 0x183c: 0x0fb9, 0x183d: 0x1089, 0x183e: 0x0279, 0x183f: 0x0369, // Block 0x61, offset 0x1840 0x1840: 0x0289, 0x1841: 0x13d1, 0x1842: 0x0039, 0x1843: 0x0ee9, 0x1844: 0x1159, 0x1845: 0x0ef9, 0x1846: 0x0f09, 0x1847: 0x1199, 0x1848: 0x0f31, 0x1849: 0x0249, 0x184a: 0x0f41, 0x184b: 0x0259, 0x184c: 0x0f51, 0x184d: 0x0359, 0x184e: 0x0f61, 0x184f: 0x0f71, 0x1850: 0x00d9, 0x1851: 0x0f99, 0x1852: 0x2039, 0x1853: 0x0269, 0x1854: 0x01d9, 0x1855: 0x0fa9, 0x1856: 0x0fb9, 0x1857: 0x1089, 0x1858: 0x0279, 0x1859: 0x0369, 0x185a: 0x0289, 0x185b: 0x13d1, 0x185c: 0x0039, 0x185d: 0x0040, 0x185e: 0x1159, 0x185f: 0x0ef9, 0x1860: 0x0040, 0x1861: 0x0040, 0x1862: 0x0f31, 0x1863: 0x0040, 0x1864: 0x0040, 0x1865: 0x0259, 0x1866: 0x0f51, 0x1867: 0x0040, 0x1868: 0x0040, 0x1869: 0x0f71, 0x186a: 0x00d9, 0x186b: 0x0f99, 0x186c: 0x2039, 0x186d: 0x0040, 0x186e: 0x01d9, 0x186f: 0x0fa9, 0x1870: 0x0fb9, 0x1871: 0x1089, 0x1872: 0x0279, 0x1873: 0x0369, 0x1874: 0x0289, 0x1875: 0x13d1, 0x1876: 0x0039, 0x1877: 0x0ee9, 0x1878: 0x1159, 0x1879: 0x0ef9, 0x187a: 0x0040, 0x187b: 0x1199, 0x187c: 0x0040, 0x187d: 0x0249, 0x187e: 0x0f41, 0x187f: 0x0259, // Block 0x62, offset 0x1880 0x1880: 0x0f51, 0x1881: 0x0359, 0x1882: 0x0f61, 0x1883: 0x0f71, 0x1884: 0x0040, 0x1885: 0x0f99, 0x1886: 0x2039, 0x1887: 0x0269, 0x1888: 0x01d9, 0x1889: 0x0fa9, 0x188a: 0x0fb9, 0x188b: 0x1089, 0x188c: 0x0279, 0x188d: 0x0369, 0x188e: 0x0289, 0x188f: 0x13d1, 0x1890: 0x0039, 0x1891: 0x0ee9, 0x1892: 0x1159, 0x1893: 0x0ef9, 0x1894: 0x0f09, 0x1895: 0x1199, 0x1896: 0x0f31, 0x1897: 0x0249, 0x1898: 0x0f41, 0x1899: 0x0259, 0x189a: 0x0f51, 0x189b: 0x0359, 0x189c: 0x0f61, 0x189d: 0x0f71, 0x189e: 0x00d9, 0x189f: 0x0f99, 0x18a0: 0x2039, 0x18a1: 0x0269, 0x18a2: 0x01d9, 0x18a3: 0x0fa9, 0x18a4: 0x0fb9, 0x18a5: 0x1089, 0x18a6: 0x0279, 0x18a7: 0x0369, 0x18a8: 0x0289, 0x18a9: 0x13d1, 0x18aa: 0x0039, 0x18ab: 0x0ee9, 0x18ac: 0x1159, 0x18ad: 0x0ef9, 0x18ae: 0x0f09, 0x18af: 0x1199, 0x18b0: 0x0f31, 0x18b1: 0x0249, 0x18b2: 0x0f41, 0x18b3: 0x0259, 0x18b4: 0x0f51, 0x18b5: 0x0359, 0x18b6: 0x0f61, 0x18b7: 0x0f71, 0x18b8: 0x00d9, 0x18b9: 0x0f99, 0x18ba: 0x2039, 0x18bb: 0x0269, 0x18bc: 0x01d9, 0x18bd: 0x0fa9, 0x18be: 0x0fb9, 0x18bf: 0x1089, // Block 0x63, offset 0x18c0 0x18c0: 0x0279, 0x18c1: 0x0369, 0x18c2: 0x0289, 0x18c3: 0x13d1, 0x18c4: 0x0039, 0x18c5: 0x0ee9, 0x18c6: 0x0040, 0x18c7: 0x0ef9, 0x18c8: 0x0f09, 0x18c9: 0x1199, 0x18ca: 0x0f31, 0x18cb: 0x0040, 0x18cc: 0x0040, 0x18cd: 0x0259, 0x18ce: 0x0f51, 0x18cf: 0x0359, 0x18d0: 0x0f61, 0x18d1: 0x0f71, 0x18d2: 0x00d9, 0x18d3: 0x0f99, 0x18d4: 0x2039, 0x18d5: 0x0040, 0x18d6: 0x01d9, 0x18d7: 0x0fa9, 0x18d8: 0x0fb9, 0x18d9: 0x1089, 0x18da: 0x0279, 0x18db: 0x0369, 0x18dc: 0x0289, 0x18dd: 0x0040, 0x18de: 0x0039, 0x18df: 0x0ee9, 0x18e0: 0x1159, 0x18e1: 0x0ef9, 0x18e2: 0x0f09, 0x18e3: 0x1199, 0x18e4: 0x0f31, 0x18e5: 0x0249, 0x18e6: 0x0f41, 0x18e7: 0x0259, 0x18e8: 0x0f51, 0x18e9: 0x0359, 0x18ea: 0x0f61, 0x18eb: 0x0f71, 0x18ec: 0x00d9, 0x18ed: 0x0f99, 0x18ee: 0x2039, 0x18ef: 0x0269, 0x18f0: 0x01d9, 0x18f1: 0x0fa9, 0x18f2: 0x0fb9, 0x18f3: 0x1089, 0x18f4: 0x0279, 0x18f5: 0x0369, 0x18f6: 0x0289, 0x18f7: 0x13d1, 0x18f8: 0x0039, 0x18f9: 0x0ee9, 0x18fa: 0x0040, 0x18fb: 0x0ef9, 0x18fc: 0x0f09, 0x18fd: 0x1199, 0x18fe: 0x0f31, 0x18ff: 0x0040, // Block 0x64, offset 0x1900 0x1900: 0x0f41, 0x1901: 0x0259, 0x1902: 0x0f51, 0x1903: 0x0359, 0x1904: 0x0f61, 0x1905: 0x0040, 0x1906: 0x00d9, 0x1907: 0x0040, 0x1908: 0x0040, 0x1909: 0x0040, 0x190a: 0x01d9, 0x190b: 0x0fa9, 0x190c: 0x0fb9, 0x190d: 0x1089, 0x190e: 0x0279, 0x190f: 0x0369, 0x1910: 0x0289, 0x1911: 0x0040, 0x1912: 0x0039, 0x1913: 0x0ee9, 0x1914: 0x1159, 0x1915: 0x0ef9, 0x1916: 0x0f09, 0x1917: 0x1199, 0x1918: 0x0f31, 0x1919: 0x0249, 0x191a: 0x0f41, 0x191b: 0x0259, 0x191c: 0x0f51, 0x191d: 0x0359, 0x191e: 0x0f61, 0x191f: 0x0f71, 0x1920: 0x00d9, 0x1921: 0x0f99, 0x1922: 0x2039, 0x1923: 0x0269, 0x1924: 0x01d9, 0x1925: 0x0fa9, 0x1926: 0x0fb9, 0x1927: 0x1089, 0x1928: 0x0279, 0x1929: 0x0369, 0x192a: 0x0289, 0x192b: 0x13d1, 0x192c: 0x0039, 0x192d: 0x0ee9, 0x192e: 0x1159, 0x192f: 0x0ef9, 0x1930: 0x0f09, 0x1931: 0x1199, 0x1932: 0x0f31, 0x1933: 0x0249, 0x1934: 0x0f41, 0x1935: 0x0259, 0x1936: 0x0f51, 0x1937: 0x0359, 0x1938: 0x0f61, 0x1939: 0x0f71, 0x193a: 0x00d9, 0x193b: 0x0f99, 0x193c: 0x2039, 0x193d: 0x0269, 0x193e: 0x01d9, 0x193f: 0x0fa9, // Block 0x65, offset 0x1940 0x1940: 0x0fb9, 0x1941: 0x1089, 0x1942: 0x0279, 0x1943: 0x0369, 0x1944: 0x0289, 0x1945: 0x13d1, 0x1946: 0x0039, 0x1947: 0x0ee9, 0x1948: 0x1159, 0x1949: 0x0ef9, 0x194a: 0x0f09, 0x194b: 0x1199, 0x194c: 0x0f31, 0x194d: 0x0249, 0x194e: 0x0f41, 0x194f: 0x0259, 0x1950: 0x0f51, 0x1951: 0x0359, 0x1952: 0x0f61, 0x1953: 0x0f71, 0x1954: 0x00d9, 0x1955: 0x0f99, 0x1956: 0x2039, 0x1957: 0x0269, 0x1958: 0x01d9, 0x1959: 0x0fa9, 0x195a: 0x0fb9, 0x195b: 0x1089, 0x195c: 0x0279, 0x195d: 0x0369, 0x195e: 0x0289, 0x195f: 0x13d1, 0x1960: 0x0039, 0x1961: 0x0ee9, 0x1962: 0x1159, 0x1963: 0x0ef9, 0x1964: 0x0f09, 0x1965: 0x1199, 0x1966: 0x0f31, 0x1967: 0x0249, 0x1968: 0x0f41, 0x1969: 0x0259, 0x196a: 0x0f51, 0x196b: 0x0359, 0x196c: 0x0f61, 0x196d: 0x0f71, 0x196e: 0x00d9, 0x196f: 0x0f99, 0x1970: 0x2039, 0x1971: 0x0269, 0x1972: 0x01d9, 0x1973: 0x0fa9, 0x1974: 0x0fb9, 0x1975: 0x1089, 0x1976: 0x0279, 0x1977: 0x0369, 0x1978: 0x0289, 0x1979: 0x13d1, 0x197a: 0x0039, 0x197b: 0x0ee9, 0x197c: 0x1159, 0x197d: 0x0ef9, 0x197e: 0x0f09, 0x197f: 0x1199, // Block 0x66, offset 0x1980 0x1980: 0x0f31, 0x1981: 0x0249, 0x1982: 0x0f41, 0x1983: 0x0259, 0x1984: 0x0f51, 0x1985: 0x0359, 0x1986: 0x0f61, 0x1987: 0x0f71, 0x1988: 0x00d9, 0x1989: 0x0f99, 0x198a: 0x2039, 0x198b: 0x0269, 0x198c: 0x01d9, 0x198d: 0x0fa9, 0x198e: 0x0fb9, 0x198f: 0x1089, 0x1990: 0x0279, 0x1991: 0x0369, 0x1992: 0x0289, 0x1993: 0x13d1, 0x1994: 0x0039, 0x1995: 0x0ee9, 0x1996: 0x1159, 0x1997: 0x0ef9, 0x1998: 0x0f09, 0x1999: 0x1199, 0x199a: 0x0f31, 0x199b: 0x0249, 0x199c: 0x0f41, 0x199d: 0x0259, 0x199e: 0x0f51, 0x199f: 0x0359, 0x19a0: 0x0f61, 0x19a1: 0x0f71, 0x19a2: 0x00d9, 0x19a3: 0x0f99, 0x19a4: 0x2039, 0x19a5: 0x0269, 0x19a6: 0x01d9, 0x19a7: 0x0fa9, 0x19a8: 0x0fb9, 0x19a9: 0x1089, 0x19aa: 0x0279, 0x19ab: 0x0369, 0x19ac: 0x0289, 0x19ad: 0x13d1, 0x19ae: 0x0039, 0x19af: 0x0ee9, 0x19b0: 0x1159, 0x19b1: 0x0ef9, 0x19b2: 0x0f09, 0x19b3: 0x1199, 0x19b4: 0x0f31, 0x19b5: 0x0249, 0x19b6: 0x0f41, 0x19b7: 0x0259, 0x19b8: 0x0f51, 0x19b9: 0x0359, 0x19ba: 0x0f61, 0x19bb: 0x0f71, 0x19bc: 0x00d9, 0x19bd: 0x0f99, 0x19be: 0x2039, 0x19bf: 0x0269, // Block 0x67, offset 0x19c0 0x19c0: 0x01d9, 0x19c1: 0x0fa9, 0x19c2: 0x0fb9, 0x19c3: 0x1089, 0x19c4: 0x0279, 0x19c5: 0x0369, 0x19c6: 0x0289, 0x19c7: 0x13d1, 0x19c8: 0x0039, 0x19c9: 0x0ee9, 0x19ca: 0x1159, 0x19cb: 0x0ef9, 0x19cc: 0x0f09, 0x19cd: 0x1199, 0x19ce: 0x0f31, 0x19cf: 0x0249, 0x19d0: 0x0f41, 0x19d1: 0x0259, 0x19d2: 0x0f51, 0x19d3: 0x0359, 0x19d4: 0x0f61, 0x19d5: 0x0f71, 0x19d6: 0x00d9, 0x19d7: 0x0f99, 0x19d8: 0x2039, 0x19d9: 0x0269, 0x19da: 0x01d9, 0x19db: 0x0fa9, 0x19dc: 0x0fb9, 0x19dd: 0x1089, 0x19de: 0x0279, 0x19df: 0x0369, 0x19e0: 0x0289, 0x19e1: 0x13d1, 0x19e2: 0x0039, 0x19e3: 0x0ee9, 0x19e4: 0x1159, 0x19e5: 0x0ef9, 0x19e6: 0x0f09, 0x19e7: 0x1199, 0x19e8: 0x0f31, 0x19e9: 0x0249, 0x19ea: 0x0f41, 0x19eb: 0x0259, 0x19ec: 0x0f51, 0x19ed: 0x0359, 0x19ee: 0x0f61, 0x19ef: 0x0f71, 0x19f0: 0x00d9, 0x19f1: 0x0f99, 0x19f2: 0x2039, 0x19f3: 0x0269, 0x19f4: 0x01d9, 0x19f5: 0x0fa9, 0x19f6: 0x0fb9, 0x19f7: 0x1089, 0x19f8: 0x0279, 0x19f9: 0x0369, 0x19fa: 0x0289, 0x19fb: 0x13d1, 0x19fc: 0x0039, 0x19fd: 0x0ee9, 0x19fe: 0x1159, 0x19ff: 0x0ef9, // Block 0x68, offset 0x1a00 0x1a00: 0x0f09, 0x1a01: 0x1199, 0x1a02: 0x0f31, 0x1a03: 0x0249, 0x1a04: 0x0f41, 0x1a05: 0x0259, 0x1a06: 0x0f51, 0x1a07: 0x0359, 0x1a08: 0x0f61, 0x1a09: 0x0f71, 0x1a0a: 0x00d9, 0x1a0b: 0x0f99, 0x1a0c: 0x2039, 0x1a0d: 0x0269, 0x1a0e: 0x01d9, 0x1a0f: 0x0fa9, 0x1a10: 0x0fb9, 0x1a11: 0x1089, 0x1a12: 0x0279, 0x1a13: 0x0369, 0x1a14: 0x0289, 0x1a15: 0x13d1, 0x1a16: 0x0039, 0x1a17: 0x0ee9, 0x1a18: 0x1159, 0x1a19: 0x0ef9, 0x1a1a: 0x0f09, 0x1a1b: 0x1199, 0x1a1c: 0x0f31, 0x1a1d: 0x0249, 0x1a1e: 0x0f41, 0x1a1f: 0x0259, 0x1a20: 0x0f51, 0x1a21: 0x0359, 0x1a22: 0x0f61, 0x1a23: 0x0f71, 0x1a24: 0x00d9, 0x1a25: 0x0f99, 0x1a26: 0x2039, 0x1a27: 0x0269, 0x1a28: 0x01d9, 0x1a29: 0x0fa9, 0x1a2a: 0x0fb9, 0x1a2b: 0x1089, 0x1a2c: 0x0279, 0x1a2d: 0x0369, 0x1a2e: 0x0289, 0x1a2f: 0x13d1, 0x1a30: 0x0039, 0x1a31: 0x0ee9, 0x1a32: 0x1159, 0x1a33: 0x0ef9, 0x1a34: 0x0f09, 0x1a35: 0x1199, 0x1a36: 0x0f31, 0x1a37: 0x0249, 0x1a38: 0x0f41, 0x1a39: 0x0259, 0x1a3a: 0x0f51, 0x1a3b: 0x0359, 0x1a3c: 0x0f61, 0x1a3d: 0x0f71, 0x1a3e: 0x00d9, 0x1a3f: 0x0f99, // Block 0x69, offset 0x1a40 0x1a40: 0x2039, 0x1a41: 0x0269, 0x1a42: 0x01d9, 0x1a43: 0x0fa9, 0x1a44: 0x0fb9, 0x1a45: 0x1089, 0x1a46: 0x0279, 0x1a47: 0x0369, 0x1a48: 0x0289, 0x1a49: 0x13d1, 0x1a4a: 0x0039, 0x1a4b: 0x0ee9, 0x1a4c: 0x1159, 0x1a4d: 0x0ef9, 0x1a4e: 0x0f09, 0x1a4f: 0x1199, 0x1a50: 0x0f31, 0x1a51: 0x0249, 0x1a52: 0x0f41, 0x1a53: 0x0259, 0x1a54: 0x0f51, 0x1a55: 0x0359, 0x1a56: 0x0f61, 0x1a57: 0x0f71, 0x1a58: 0x00d9, 0x1a59: 0x0f99, 0x1a5a: 0x2039, 0x1a5b: 0x0269, 0x1a5c: 0x01d9, 0x1a5d: 0x0fa9, 0x1a5e: 0x0fb9, 0x1a5f: 0x1089, 0x1a60: 0x0279, 0x1a61: 0x0369, 0x1a62: 0x0289, 0x1a63: 0x13d1, 0x1a64: 0xba81, 0x1a65: 0xba99, 0x1a66: 0x0040, 0x1a67: 0x0040, 0x1a68: 0xbab1, 0x1a69: 0x1099, 0x1a6a: 0x10b1, 0x1a6b: 0x10c9, 0x1a6c: 0xbac9, 0x1a6d: 0xbae1, 0x1a6e: 0xbaf9, 0x1a6f: 0x1429, 0x1a70: 0x1a31, 0x1a71: 0xbb11, 0x1a72: 0xbb29, 0x1a73: 0xbb41, 0x1a74: 0xbb59, 0x1a75: 0xbb71, 0x1a76: 0xbb89, 0x1a77: 0x2109, 0x1a78: 0x1111, 0x1a79: 0x1429, 0x1a7a: 0xbba1, 0x1a7b: 0xbbb9, 0x1a7c: 0xbbd1, 0x1a7d: 0x10e1, 0x1a7e: 0x10f9, 0x1a7f: 0xbbe9, // Block 0x6a, offset 0x1a80 0x1a80: 0x2079, 0x1a81: 0xbc01, 0x1a82: 0xbab1, 0x1a83: 0x1099, 0x1a84: 0x10b1, 0x1a85: 0x10c9, 0x1a86: 0xbac9, 0x1a87: 0xbae1, 0x1a88: 0xbaf9, 0x1a89: 0x1429, 0x1a8a: 0x1a31, 0x1a8b: 0xbb11, 0x1a8c: 0xbb29, 0x1a8d: 0xbb41, 0x1a8e: 0xbb59, 0x1a8f: 0xbb71, 0x1a90: 0xbb89, 0x1a91: 0x2109, 0x1a92: 0x1111, 0x1a93: 0xbba1, 0x1a94: 0xbba1, 0x1a95: 0xbbb9, 0x1a96: 0xbbd1, 0x1a97: 0x10e1, 0x1a98: 0x10f9, 0x1a99: 0xbbe9, 0x1a9a: 0x2079, 0x1a9b: 0xbc21, 0x1a9c: 0xbac9, 0x1a9d: 0x1429, 0x1a9e: 0xbb11, 0x1a9f: 0x10e1, 0x1aa0: 0x1111, 0x1aa1: 0x2109, 0x1aa2: 0xbab1, 0x1aa3: 0x1099, 0x1aa4: 0x10b1, 0x1aa5: 0x10c9, 0x1aa6: 0xbac9, 0x1aa7: 0xbae1, 0x1aa8: 0xbaf9, 0x1aa9: 0x1429, 0x1aaa: 0x1a31, 0x1aab: 0xbb11, 0x1aac: 0xbb29, 0x1aad: 0xbb41, 0x1aae: 0xbb59, 0x1aaf: 0xbb71, 0x1ab0: 0xbb89, 0x1ab1: 0x2109, 0x1ab2: 0x1111, 0x1ab3: 0x1429, 0x1ab4: 0xbba1, 0x1ab5: 0xbbb9, 0x1ab6: 0xbbd1, 0x1ab7: 0x10e1, 0x1ab8: 0x10f9, 0x1ab9: 0xbbe9, 0x1aba: 0x2079, 0x1abb: 0xbc01, 0x1abc: 0xbab1, 0x1abd: 0x1099, 0x1abe: 0x10b1, 0x1abf: 0x10c9, // Block 0x6b, offset 0x1ac0 0x1ac0: 0xbac9, 0x1ac1: 0xbae1, 0x1ac2: 0xbaf9, 0x1ac3: 0x1429, 0x1ac4: 0x1a31, 0x1ac5: 0xbb11, 0x1ac6: 0xbb29, 0x1ac7: 0xbb41, 0x1ac8: 0xbb59, 0x1ac9: 0xbb71, 0x1aca: 0xbb89, 0x1acb: 0x2109, 0x1acc: 0x1111, 0x1acd: 0xbba1, 0x1ace: 0xbba1, 0x1acf: 0xbbb9, 0x1ad0: 0xbbd1, 0x1ad1: 0x10e1, 0x1ad2: 0x10f9, 0x1ad3: 0xbbe9, 0x1ad4: 0x2079, 0x1ad5: 0xbc21, 0x1ad6: 0xbac9, 0x1ad7: 0x1429, 0x1ad8: 0xbb11, 0x1ad9: 0x10e1, 0x1ada: 0x1111, 0x1adb: 0x2109, 0x1adc: 0xbab1, 0x1add: 0x1099, 0x1ade: 0x10b1, 0x1adf: 0x10c9, 0x1ae0: 0xbac9, 0x1ae1: 0xbae1, 0x1ae2: 0xbaf9, 0x1ae3: 0x1429, 0x1ae4: 0x1a31, 0x1ae5: 0xbb11, 0x1ae6: 0xbb29, 0x1ae7: 0xbb41, 0x1ae8: 0xbb59, 0x1ae9: 0xbb71, 0x1aea: 0xbb89, 0x1aeb: 0x2109, 0x1aec: 0x1111, 0x1aed: 0x1429, 0x1aee: 0xbba1, 0x1aef: 0xbbb9, 0x1af0: 0xbbd1, 0x1af1: 0x10e1, 0x1af2: 0x10f9, 0x1af3: 0xbbe9, 0x1af4: 0x2079, 0x1af5: 0xbc01, 0x1af6: 0xbab1, 0x1af7: 0x1099, 0x1af8: 0x10b1, 0x1af9: 0x10c9, 0x1afa: 0xbac9, 0x1afb: 0xbae1, 0x1afc: 0xbaf9, 0x1afd: 0x1429, 0x1afe: 0x1a31, 0x1aff: 0xbb11, // Block 0x6c, offset 0x1b00 0x1b00: 0xbb29, 0x1b01: 0xbb41, 0x1b02: 0xbb59, 0x1b03: 0xbb71, 0x1b04: 0xbb89, 0x1b05: 0x2109, 0x1b06: 0x1111, 0x1b07: 0xbba1, 0x1b08: 0xbba1, 0x1b09: 0xbbb9, 0x1b0a: 0xbbd1, 0x1b0b: 0x10e1, 0x1b0c: 0x10f9, 0x1b0d: 0xbbe9, 0x1b0e: 0x2079, 0x1b0f: 0xbc21, 0x1b10: 0xbac9, 0x1b11: 0x1429, 0x1b12: 0xbb11, 0x1b13: 0x10e1, 0x1b14: 0x1111, 0x1b15: 0x2109, 0x1b16: 0xbab1, 0x1b17: 0x1099, 0x1b18: 0x10b1, 0x1b19: 0x10c9, 0x1b1a: 0xbac9, 0x1b1b: 0xbae1, 0x1b1c: 0xbaf9, 0x1b1d: 0x1429, 0x1b1e: 0x1a31, 0x1b1f: 0xbb11, 0x1b20: 0xbb29, 0x1b21: 0xbb41, 0x1b22: 0xbb59, 0x1b23: 0xbb71, 0x1b24: 0xbb89, 0x1b25: 0x2109, 0x1b26: 0x1111, 0x1b27: 0x1429, 0x1b28: 0xbba1, 0x1b29: 0xbbb9, 0x1b2a: 0xbbd1, 0x1b2b: 0x10e1, 0x1b2c: 0x10f9, 0x1b2d: 0xbbe9, 0x1b2e: 0x2079, 0x1b2f: 0xbc01, 0x1b30: 0xbab1, 0x1b31: 0x1099, 0x1b32: 0x10b1, 0x1b33: 0x10c9, 0x1b34: 0xbac9, 0x1b35: 0xbae1, 0x1b36: 0xbaf9, 0x1b37: 0x1429, 0x1b38: 0x1a31, 0x1b39: 0xbb11, 0x1b3a: 0xbb29, 0x1b3b: 0xbb41, 0x1b3c: 0xbb59, 0x1b3d: 0xbb71, 0x1b3e: 0xbb89, 0x1b3f: 0x2109, // Block 0x6d, offset 0x1b40 0x1b40: 0x1111, 0x1b41: 0xbba1, 0x1b42: 0xbba1, 0x1b43: 0xbbb9, 0x1b44: 0xbbd1, 0x1b45: 0x10e1, 0x1b46: 0x10f9, 0x1b47: 0xbbe9, 0x1b48: 0x2079, 0x1b49: 0xbc21, 0x1b4a: 0xbac9, 0x1b4b: 0x1429, 0x1b4c: 0xbb11, 0x1b4d: 0x10e1, 0x1b4e: 0x1111, 0x1b4f: 0x2109, 0x1b50: 0xbab1, 0x1b51: 0x1099, 0x1b52: 0x10b1, 0x1b53: 0x10c9, 0x1b54: 0xbac9, 0x1b55: 0xbae1, 0x1b56: 0xbaf9, 0x1b57: 0x1429, 0x1b58: 0x1a31, 0x1b59: 0xbb11, 0x1b5a: 0xbb29, 0x1b5b: 0xbb41, 0x1b5c: 0xbb59, 0x1b5d: 0xbb71, 0x1b5e: 0xbb89, 0x1b5f: 0x2109, 0x1b60: 0x1111, 0x1b61: 0x1429, 0x1b62: 0xbba1, 0x1b63: 0xbbb9, 0x1b64: 0xbbd1, 0x1b65: 0x10e1, 0x1b66: 0x10f9, 0x1b67: 0xbbe9, 0x1b68: 0x2079, 0x1b69: 0xbc01, 0x1b6a: 0xbab1, 0x1b6b: 0x1099, 0x1b6c: 0x10b1, 0x1b6d: 0x10c9, 0x1b6e: 0xbac9, 0x1b6f: 0xbae1, 0x1b70: 0xbaf9, 0x1b71: 0x1429, 0x1b72: 0x1a31, 0x1b73: 0xbb11, 0x1b74: 0xbb29, 0x1b75: 0xbb41, 0x1b76: 0xbb59, 0x1b77: 0xbb71, 0x1b78: 0xbb89, 0x1b79: 0x2109, 0x1b7a: 0x1111, 0x1b7b: 0xbba1, 0x1b7c: 0xbba1, 0x1b7d: 0xbbb9, 0x1b7e: 0xbbd1, 0x1b7f: 0x10e1, // Block 0x6e, offset 0x1b80 0x1b80: 0x10f9, 0x1b81: 0xbbe9, 0x1b82: 0x2079, 0x1b83: 0xbc21, 0x1b84: 0xbac9, 0x1b85: 0x1429, 0x1b86: 0xbb11, 0x1b87: 0x10e1, 0x1b88: 0x1111, 0x1b89: 0x2109, 0x1b8a: 0xbc41, 0x1b8b: 0xbc41, 0x1b8c: 0x0040, 0x1b8d: 0x0040, 0x1b8e: 0x1f41, 0x1b8f: 0x00c9, 0x1b90: 0x0069, 0x1b91: 0x0079, 0x1b92: 0x1f51, 0x1b93: 0x1f61, 0x1b94: 0x1f71, 0x1b95: 0x1f81, 0x1b96: 0x1f91, 0x1b97: 0x1fa1, 0x1b98: 0x1f41, 0x1b99: 0x00c9, 0x1b9a: 0x0069, 0x1b9b: 0x0079, 0x1b9c: 0x1f51, 0x1b9d: 0x1f61, 0x1b9e: 0x1f71, 0x1b9f: 0x1f81, 0x1ba0: 0x1f91, 0x1ba1: 0x1fa1, 0x1ba2: 0x1f41, 0x1ba3: 0x00c9, 0x1ba4: 0x0069, 0x1ba5: 0x0079, 0x1ba6: 0x1f51, 0x1ba7: 0x1f61, 0x1ba8: 0x1f71, 0x1ba9: 0x1f81, 0x1baa: 0x1f91, 0x1bab: 0x1fa1, 0x1bac: 0x1f41, 0x1bad: 0x00c9, 0x1bae: 0x0069, 0x1baf: 0x0079, 0x1bb0: 0x1f51, 0x1bb1: 0x1f61, 0x1bb2: 0x1f71, 0x1bb3: 0x1f81, 0x1bb4: 0x1f91, 0x1bb5: 0x1fa1, 0x1bb6: 0x1f41, 0x1bb7: 0x00c9, 0x1bb8: 0x0069, 0x1bb9: 0x0079, 0x1bba: 0x1f51, 0x1bbb: 0x1f61, 0x1bbc: 0x1f71, 0x1bbd: 0x1f81, 0x1bbe: 0x1f91, 0x1bbf: 0x1fa1, // Block 0x6f, offset 0x1bc0 0x1bc0: 0xe115, 0x1bc1: 0xe115, 0x1bc2: 0xe135, 0x1bc3: 0xe135, 0x1bc4: 0xe115, 0x1bc5: 0xe115, 0x1bc6: 0xe175, 0x1bc7: 0xe175, 0x1bc8: 0xe115, 0x1bc9: 0xe115, 0x1bca: 0xe135, 0x1bcb: 0xe135, 0x1bcc: 0xe115, 0x1bcd: 0xe115, 0x1bce: 0xe1f5, 0x1bcf: 0xe1f5, 0x1bd0: 0xe115, 0x1bd1: 0xe115, 0x1bd2: 0xe135, 0x1bd3: 0xe135, 0x1bd4: 0xe115, 0x1bd5: 0xe115, 0x1bd6: 0xe175, 0x1bd7: 0xe175, 0x1bd8: 0xe115, 0x1bd9: 0xe115, 0x1bda: 0xe135, 0x1bdb: 0xe135, 0x1bdc: 0xe115, 0x1bdd: 0xe115, 0x1bde: 0x8b05, 0x1bdf: 0x8b05, 0x1be0: 0x04b5, 0x1be1: 0x04b5, 0x1be2: 0x0208, 0x1be3: 0x0208, 0x1be4: 0x0208, 0x1be5: 0x0208, 0x1be6: 0x0208, 0x1be7: 0x0208, 0x1be8: 0x0208, 0x1be9: 0x0208, 0x1bea: 0x0208, 0x1beb: 0x0208, 0x1bec: 0x0208, 0x1bed: 0x0208, 0x1bee: 0x0208, 0x1bef: 0x0208, 0x1bf0: 0x0208, 0x1bf1: 0x0208, 0x1bf2: 0x0208, 0x1bf3: 0x0208, 0x1bf4: 0x0208, 0x1bf5: 0x0208, 0x1bf6: 0x0208, 0x1bf7: 0x0208, 0x1bf8: 0x0208, 0x1bf9: 0x0208, 0x1bfa: 0x0208, 0x1bfb: 0x0208, 0x1bfc: 0x0208, 0x1bfd: 0x0208, 0x1bfe: 0x0208, 0x1bff: 0x0208, // Block 0x70, offset 0x1c00 0x1c00: 0xb189, 0x1c01: 0xb1a1, 0x1c02: 0xb201, 0x1c03: 0xb249, 0x1c04: 0x0040, 0x1c05: 0xb411, 0x1c06: 0xb291, 0x1c07: 0xb219, 0x1c08: 0xb309, 0x1c09: 0xb429, 0x1c0a: 0xb399, 0x1c0b: 0xb3b1, 0x1c0c: 0xb3c9, 0x1c0d: 0xb3e1, 0x1c0e: 0xb2a9, 0x1c0f: 0xb339, 0x1c10: 0xb369, 0x1c11: 0xb2d9, 0x1c12: 0xb381, 0x1c13: 0xb279, 0x1c14: 0xb2c1, 0x1c15: 0xb1d1, 0x1c16: 0xb1e9, 0x1c17: 0xb231, 0x1c18: 0xb261, 0x1c19: 0xb2f1, 0x1c1a: 0xb321, 0x1c1b: 0xb351, 0x1c1c: 0xbc59, 0x1c1d: 0x7949, 0x1c1e: 0xbc71, 0x1c1f: 0xbc89, 0x1c20: 0x0040, 0x1c21: 0xb1a1, 0x1c22: 0xb201, 0x1c23: 0x0040, 0x1c24: 0xb3f9, 0x1c25: 0x0040, 0x1c26: 0x0040, 0x1c27: 0xb219, 0x1c28: 0x0040, 0x1c29: 0xb429, 0x1c2a: 0xb399, 0x1c2b: 0xb3b1, 0x1c2c: 0xb3c9, 0x1c2d: 0xb3e1, 0x1c2e: 0xb2a9, 0x1c2f: 0xb339, 0x1c30: 0xb369, 0x1c31: 0xb2d9, 0x1c32: 0xb381, 0x1c33: 0x0040, 0x1c34: 0xb2c1, 0x1c35: 0xb1d1, 0x1c36: 0xb1e9, 0x1c37: 0xb231, 0x1c38: 0x0040, 0x1c39: 0xb2f1, 0x1c3a: 0x0040, 0x1c3b: 0xb351, 0x1c3c: 0x0040, 0x1c3d: 0x0040, 0x1c3e: 0x0040, 0x1c3f: 0x0040, // Block 0x71, offset 0x1c40 0x1c40: 0x0040, 0x1c41: 0x0040, 0x1c42: 0xb201, 0x1c43: 0x0040, 0x1c44: 0x0040, 0x1c45: 0x0040, 0x1c46: 0x0040, 0x1c47: 0xb219, 0x1c48: 0x0040, 0x1c49: 0xb429, 0x1c4a: 0x0040, 0x1c4b: 0xb3b1, 0x1c4c: 0x0040, 0x1c4d: 0xb3e1, 0x1c4e: 0xb2a9, 0x1c4f: 0xb339, 0x1c50: 0x0040, 0x1c51: 0xb2d9, 0x1c52: 0xb381, 0x1c53: 0x0040, 0x1c54: 0xb2c1, 0x1c55: 0x0040, 0x1c56: 0x0040, 0x1c57: 0xb231, 0x1c58: 0x0040, 0x1c59: 0xb2f1, 0x1c5a: 0x0040, 0x1c5b: 0xb351, 0x1c5c: 0x0040, 0x1c5d: 0x7949, 0x1c5e: 0x0040, 0x1c5f: 0xbc89, 0x1c60: 0x0040, 0x1c61: 0xb1a1, 0x1c62: 0xb201, 0x1c63: 0x0040, 0x1c64: 0xb3f9, 0x1c65: 0x0040, 0x1c66: 0x0040, 0x1c67: 0xb219, 0x1c68: 0xb309, 0x1c69: 0xb429, 0x1c6a: 0xb399, 0x1c6b: 0x0040, 0x1c6c: 0xb3c9, 0x1c6d: 0xb3e1, 0x1c6e: 0xb2a9, 0x1c6f: 0xb339, 0x1c70: 0xb369, 0x1c71: 0xb2d9, 0x1c72: 0xb381, 0x1c73: 0x0040, 0x1c74: 0xb2c1, 0x1c75: 0xb1d1, 0x1c76: 0xb1e9, 0x1c77: 0xb231, 0x1c78: 0x0040, 0x1c79: 0xb2f1, 0x1c7a: 0xb321, 0x1c7b: 0xb351, 0x1c7c: 0xbc59, 0x1c7d: 0x0040, 0x1c7e: 0xbc71, 0x1c7f: 0x0040, // Block 0x72, offset 0x1c80 0x1c80: 0xb189, 0x1c81: 0xb1a1, 0x1c82: 0xb201, 0x1c83: 0xb249, 0x1c84: 0xb3f9, 0x1c85: 0xb411, 0x1c86: 0xb291, 0x1c87: 0xb219, 0x1c88: 0xb309, 0x1c89: 0xb429, 0x1c8a: 0x0040, 0x1c8b: 0xb3b1, 0x1c8c: 0xb3c9, 0x1c8d: 0xb3e1, 0x1c8e: 0xb2a9, 0x1c8f: 0xb339, 0x1c90: 0xb369, 0x1c91: 0xb2d9, 0x1c92: 0xb381, 0x1c93: 0xb279, 0x1c94: 0xb2c1, 0x1c95: 0xb1d1, 0x1c96: 0xb1e9, 0x1c97: 0xb231, 0x1c98: 0xb261, 0x1c99: 0xb2f1, 0x1c9a: 0xb321, 0x1c9b: 0xb351, 0x1c9c: 0x0040, 0x1c9d: 0x0040, 0x1c9e: 0x0040, 0x1c9f: 0x0040, 0x1ca0: 0x0040, 0x1ca1: 0xb1a1, 0x1ca2: 0xb201, 0x1ca3: 0xb249, 0x1ca4: 0x0040, 0x1ca5: 0xb411, 0x1ca6: 0xb291, 0x1ca7: 0xb219, 0x1ca8: 0xb309, 0x1ca9: 0xb429, 0x1caa: 0x0040, 0x1cab: 0xb3b1, 0x1cac: 0xb3c9, 0x1cad: 0xb3e1, 0x1cae: 0xb2a9, 0x1caf: 0xb339, 0x1cb0: 0xb369, 0x1cb1: 0xb2d9, 0x1cb2: 0xb381, 0x1cb3: 0xb279, 0x1cb4: 0xb2c1, 0x1cb5: 0xb1d1, 0x1cb6: 0xb1e9, 0x1cb7: 0xb231, 0x1cb8: 0xb261, 0x1cb9: 0xb2f1, 0x1cba: 0xb321, 0x1cbb: 0xb351, 0x1cbc: 0x0040, 0x1cbd: 0x0040, 0x1cbe: 0x0040, 0x1cbf: 0x0040, // Block 0x73, offset 0x1cc0 0x1cc0: 0x0040, 0x1cc1: 0xbca2, 0x1cc2: 0xbcba, 0x1cc3: 0xbcd2, 0x1cc4: 0xbcea, 0x1cc5: 0xbd02, 0x1cc6: 0xbd1a, 0x1cc7: 0xbd32, 0x1cc8: 0xbd4a, 0x1cc9: 0xbd62, 0x1cca: 0xbd7a, 0x1ccb: 0x0018, 0x1ccc: 0x0018, 0x1ccd: 0x0040, 0x1cce: 0x0040, 0x1ccf: 0x0040, 0x1cd0: 0xbd92, 0x1cd1: 0xbdb2, 0x1cd2: 0xbdd2, 0x1cd3: 0xbdf2, 0x1cd4: 0xbe12, 0x1cd5: 0xbe32, 0x1cd6: 0xbe52, 0x1cd7: 0xbe72, 0x1cd8: 0xbe92, 0x1cd9: 0xbeb2, 0x1cda: 0xbed2, 0x1cdb: 0xbef2, 0x1cdc: 0xbf12, 0x1cdd: 0xbf32, 0x1cde: 0xbf52, 0x1cdf: 0xbf72, 0x1ce0: 0xbf92, 0x1ce1: 0xbfb2, 0x1ce2: 0xbfd2, 0x1ce3: 0xbff2, 0x1ce4: 0xc012, 0x1ce5: 0xc032, 0x1ce6: 0xc052, 0x1ce7: 0xc072, 0x1ce8: 0xc092, 0x1ce9: 0xc0b2, 0x1cea: 0xc0d1, 0x1ceb: 0x1159, 0x1cec: 0x0269, 0x1ced: 0x6671, 0x1cee: 0xc111, 0x1cef: 0x0040, 0x1cf0: 0x0039, 0x1cf1: 0x0ee9, 0x1cf2: 0x1159, 0x1cf3: 0x0ef9, 0x1cf4: 0x0f09, 0x1cf5: 0x1199, 0x1cf6: 0x0f31, 0x1cf7: 0x0249, 0x1cf8: 0x0f41, 0x1cf9: 0x0259, 0x1cfa: 0x0f51, 0x1cfb: 0x0359, 0x1cfc: 0x0f61, 0x1cfd: 0x0f71, 0x1cfe: 0x00d9, 0x1cff: 0x0f99, // Block 0x74, offset 0x1d00 0x1d00: 0x2039, 0x1d01: 0x0269, 0x1d02: 0x01d9, 0x1d03: 0x0fa9, 0x1d04: 0x0fb9, 0x1d05: 0x1089, 0x1d06: 0x0279, 0x1d07: 0x0369, 0x1d08: 0x0289, 0x1d09: 0x13d1, 0x1d0a: 0xc129, 0x1d0b: 0x65b1, 0x1d0c: 0xc141, 0x1d0d: 0x1441, 0x1d0e: 0xc159, 0x1d0f: 0xc179, 0x1d10: 0x0018, 0x1d11: 0x0018, 0x1d12: 0x0018, 0x1d13: 0x0018, 0x1d14: 0x0018, 0x1d15: 0x0018, 0x1d16: 0x0018, 0x1d17: 0x0018, 0x1d18: 0x0018, 0x1d19: 0x0018, 0x1d1a: 0x0018, 0x1d1b: 0x0018, 0x1d1c: 0x0018, 0x1d1d: 0x0018, 0x1d1e: 0x0018, 0x1d1f: 0x0018, 0x1d20: 0x0018, 0x1d21: 0x0018, 0x1d22: 0x0018, 0x1d23: 0x0018, 0x1d24: 0x0018, 0x1d25: 0x0018, 0x1d26: 0x0018, 0x1d27: 0x0018, 0x1d28: 0x0018, 0x1d29: 0x0018, 0x1d2a: 0xc191, 0x1d2b: 0xc1a9, 0x1d2c: 0x0040, 0x1d2d: 0x0040, 0x1d2e: 0x0040, 0x1d2f: 0x0040, 0x1d30: 0x0018, 0x1d31: 0x0018, 0x1d32: 0x0018, 0x1d33: 0x0018, 0x1d34: 0x0018, 0x1d35: 0x0018, 0x1d36: 0x0018, 0x1d37: 0x0018, 0x1d38: 0x0018, 0x1d39: 0x0018, 0x1d3a: 0x0018, 0x1d3b: 0x0018, 0x1d3c: 0x0018, 0x1d3d: 0x0018, 0x1d3e: 0x0018, 0x1d3f: 0x0018, // Block 0x75, offset 0x1d40 0x1d40: 0xc1d9, 0x1d41: 0xc211, 0x1d42: 0xc249, 0x1d43: 0x0040, 0x1d44: 0x0040, 0x1d45: 0x0040, 0x1d46: 0x0040, 0x1d47: 0x0040, 0x1d48: 0x0040, 0x1d49: 0x0040, 0x1d4a: 0x0040, 0x1d4b: 0x0040, 0x1d4c: 0x0040, 0x1d4d: 0x0040, 0x1d4e: 0x0040, 0x1d4f: 0x0040, 0x1d50: 0xc269, 0x1d51: 0xc289, 0x1d52: 0xc2a9, 0x1d53: 0xc2c9, 0x1d54: 0xc2e9, 0x1d55: 0xc309, 0x1d56: 0xc329, 0x1d57: 0xc349, 0x1d58: 0xc369, 0x1d59: 0xc389, 0x1d5a: 0xc3a9, 0x1d5b: 0xc3c9, 0x1d5c: 0xc3e9, 0x1d5d: 0xc409, 0x1d5e: 0xc429, 0x1d5f: 0xc449, 0x1d60: 0xc469, 0x1d61: 0xc489, 0x1d62: 0xc4a9, 0x1d63: 0xc4c9, 0x1d64: 0xc4e9, 0x1d65: 0xc509, 0x1d66: 0xc529, 0x1d67: 0xc549, 0x1d68: 0xc569, 0x1d69: 0xc589, 0x1d6a: 0xc5a9, 0x1d6b: 0xc5c9, 0x1d6c: 0xc5e9, 0x1d6d: 0xc609, 0x1d6e: 0xc629, 0x1d6f: 0xc649, 0x1d70: 0xc669, 0x1d71: 0xc689, 0x1d72: 0xc6a9, 0x1d73: 0xc6c9, 0x1d74: 0xc6e9, 0x1d75: 0xc709, 0x1d76: 0xc729, 0x1d77: 0xc749, 0x1d78: 0xc769, 0x1d79: 0xc789, 0x1d7a: 0xc7a9, 0x1d7b: 0xc7c9, 0x1d7c: 0x0040, 0x1d7d: 0x0040, 0x1d7e: 0x0040, 0x1d7f: 0x0040, // Block 0x76, offset 0x1d80 0x1d80: 0xcaf9, 0x1d81: 0xcb19, 0x1d82: 0xcb39, 0x1d83: 0x8b1d, 0x1d84: 0xcb59, 0x1d85: 0xcb79, 0x1d86: 0xcb99, 0x1d87: 0xcbb9, 0x1d88: 0xcbd9, 0x1d89: 0xcbf9, 0x1d8a: 0xcc19, 0x1d8b: 0xcc39, 0x1d8c: 0xcc59, 0x1d8d: 0x8b3d, 0x1d8e: 0xcc79, 0x1d8f: 0xcc99, 0x1d90: 0xccb9, 0x1d91: 0xccd9, 0x1d92: 0x8b5d, 0x1d93: 0xccf9, 0x1d94: 0xcd19, 0x1d95: 0xc429, 0x1d96: 0x8b7d, 0x1d97: 0xcd39, 0x1d98: 0xcd59, 0x1d99: 0xcd79, 0x1d9a: 0xcd99, 0x1d9b: 0xcdb9, 0x1d9c: 0x8b9d, 0x1d9d: 0xcdd9, 0x1d9e: 0xcdf9, 0x1d9f: 0xce19, 0x1da0: 0xce39, 0x1da1: 0xce59, 0x1da2: 0xc789, 0x1da3: 0xce79, 0x1da4: 0xce99, 0x1da5: 0xceb9, 0x1da6: 0xced9, 0x1da7: 0xcef9, 0x1da8: 0xcf19, 0x1da9: 0xcf39, 0x1daa: 0xcf59, 0x1dab: 0xcf79, 0x1dac: 0xcf99, 0x1dad: 0xcfb9, 0x1dae: 0xcfd9, 0x1daf: 0xcff9, 0x1db0: 0xd019, 0x1db1: 0xd039, 0x1db2: 0xd039, 0x1db3: 0xd039, 0x1db4: 0x8bbd, 0x1db5: 0xd059, 0x1db6: 0xd079, 0x1db7: 0xd099, 0x1db8: 0x8bdd, 0x1db9: 0xd0b9, 0x1dba: 0xd0d9, 0x1dbb: 0xd0f9, 0x1dbc: 0xd119, 0x1dbd: 0xd139, 0x1dbe: 0xd159, 0x1dbf: 0xd179, // Block 0x77, offset 0x1dc0 0x1dc0: 0xd199, 0x1dc1: 0xd1b9, 0x1dc2: 0xd1d9, 0x1dc3: 0xd1f9, 0x1dc4: 0xd219, 0x1dc5: 0xd239, 0x1dc6: 0xd239, 0x1dc7: 0xd259, 0x1dc8: 0xd279, 0x1dc9: 0xd299, 0x1dca: 0xd2b9, 0x1dcb: 0xd2d9, 0x1dcc: 0xd2f9, 0x1dcd: 0xd319, 0x1dce: 0xd339, 0x1dcf: 0xd359, 0x1dd0: 0xd379, 0x1dd1: 0xd399, 0x1dd2: 0xd3b9, 0x1dd3: 0xd3d9, 0x1dd4: 0xd3f9, 0x1dd5: 0xd419, 0x1dd6: 0xd439, 0x1dd7: 0xd459, 0x1dd8: 0xd479, 0x1dd9: 0x8bfd, 0x1dda: 0xd499, 0x1ddb: 0xd4b9, 0x1ddc: 0xd4d9, 0x1ddd: 0xc309, 0x1dde: 0xd4f9, 0x1ddf: 0xd519, 0x1de0: 0x8c1d, 0x1de1: 0x8c3d, 0x1de2: 0xd539, 0x1de3: 0xd559, 0x1de4: 0xd579, 0x1de5: 0xd599, 0x1de6: 0xd5b9, 0x1de7: 0xd5d9, 0x1de8: 0x0040, 0x1de9: 0xd5f9, 0x1dea: 0xd619, 0x1deb: 0xd619, 0x1dec: 0x8c5d, 0x1ded: 0xd639, 0x1dee: 0xd659, 0x1def: 0xd679, 0x1df0: 0xd699, 0x1df1: 0x8c7d, 0x1df2: 0xd6b9, 0x1df3: 0xd6d9, 0x1df4: 0x0040, 0x1df5: 0xd6f9, 0x1df6: 0xd719, 0x1df7: 0xd739, 0x1df8: 0xd759, 0x1df9: 0xd779, 0x1dfa: 0xd799, 0x1dfb: 0x8c9d, 0x1dfc: 0xd7b9, 0x1dfd: 0x8cbd, 0x1dfe: 0xd7d9, 0x1dff: 0xd7f9, // Block 0x78, offset 0x1e00 0x1e00: 0xd819, 0x1e01: 0xd839, 0x1e02: 0xd859, 0x1e03: 0xd879, 0x1e04: 0xd899, 0x1e05: 0xd8b9, 0x1e06: 0xd8d9, 0x1e07: 0xd8f9, 0x1e08: 0xd919, 0x1e09: 0x8cdd, 0x1e0a: 0xd939, 0x1e0b: 0xd959, 0x1e0c: 0xd979, 0x1e0d: 0xd999, 0x1e0e: 0xd9b9, 0x1e0f: 0x8cfd, 0x1e10: 0xd9d9, 0x1e11: 0x8d1d, 0x1e12: 0x8d3d, 0x1e13: 0xd9f9, 0x1e14: 0xda19, 0x1e15: 0xda19, 0x1e16: 0xda39, 0x1e17: 0x8d5d, 0x1e18: 0x8d7d, 0x1e19: 0xda59, 0x1e1a: 0xda79, 0x1e1b: 0xda99, 0x1e1c: 0xdab9, 0x1e1d: 0xdad9, 0x1e1e: 0xdaf9, 0x1e1f: 0xdb19, 0x1e20: 0xdb39, 0x1e21: 0xdb59, 0x1e22: 0xdb79, 0x1e23: 0xdb99, 0x1e24: 0x8d9d, 0x1e25: 0xdbb9, 0x1e26: 0xdbd9, 0x1e27: 0xdbf9, 0x1e28: 0xdc19, 0x1e29: 0xdbf9, 0x1e2a: 0xdc39, 0x1e2b: 0xdc59, 0x1e2c: 0xdc79, 0x1e2d: 0xdc99, 0x1e2e: 0xdcb9, 0x1e2f: 0xdcd9, 0x1e30: 0xdcf9, 0x1e31: 0xdd19, 0x1e32: 0xdd39, 0x1e33: 0xdd59, 0x1e34: 0xdd79, 0x1e35: 0xdd99, 0x1e36: 0xddb9, 0x1e37: 0xddd9, 0x1e38: 0x8dbd, 0x1e39: 0xddf9, 0x1e3a: 0xde19, 0x1e3b: 0xde39, 0x1e3c: 0xde59, 0x1e3d: 0xde79, 0x1e3e: 0x8ddd, 0x1e3f: 0xde99, // Block 0x79, offset 0x1e40 0x1e40: 0xe599, 0x1e41: 0xe5b9, 0x1e42: 0xe5d9, 0x1e43: 0xe5f9, 0x1e44: 0xe619, 0x1e45: 0xe639, 0x1e46: 0x8efd, 0x1e47: 0xe659, 0x1e48: 0xe679, 0x1e49: 0xe699, 0x1e4a: 0xe6b9, 0x1e4b: 0xe6d9, 0x1e4c: 0xe6f9, 0x1e4d: 0x8f1d, 0x1e4e: 0xe719, 0x1e4f: 0xe739, 0x1e50: 0x8f3d, 0x1e51: 0x8f5d, 0x1e52: 0xe759, 0x1e53: 0xe779, 0x1e54: 0xe799, 0x1e55: 0xe7b9, 0x1e56: 0xe7d9, 0x1e57: 0xe7f9, 0x1e58: 0xe819, 0x1e59: 0xe839, 0x1e5a: 0xe859, 0x1e5b: 0x8f7d, 0x1e5c: 0xe879, 0x1e5d: 0x8f9d, 0x1e5e: 0xe899, 0x1e5f: 0x0040, 0x1e60: 0xe8b9, 0x1e61: 0xe8d9, 0x1e62: 0xe8f9, 0x1e63: 0x8fbd, 0x1e64: 0xe919, 0x1e65: 0xe939, 0x1e66: 0x8fdd, 0x1e67: 0x8ffd, 0x1e68: 0xe959, 0x1e69: 0xe979, 0x1e6a: 0xe999, 0x1e6b: 0xe9b9, 0x1e6c: 0xe9d9, 0x1e6d: 0xe9d9, 0x1e6e: 0xe9f9, 0x1e6f: 0xea19, 0x1e70: 0xea39, 0x1e71: 0xea59, 0x1e72: 0xea79, 0x1e73: 0xea99, 0x1e74: 0xeab9, 0x1e75: 0x901d, 0x1e76: 0xead9, 0x1e77: 0x903d, 0x1e78: 0xeaf9, 0x1e79: 0x905d, 0x1e7a: 0xeb19, 0x1e7b: 0x907d, 0x1e7c: 0x909d, 0x1e7d: 0x90bd, 0x1e7e: 0xeb39, 0x1e7f: 0xeb59, // Block 0x7a, offset 0x1e80 0x1e80: 0xeb79, 0x1e81: 0x90dd, 0x1e82: 0x90fd, 0x1e83: 0x911d, 0x1e84: 0x913d, 0x1e85: 0xeb99, 0x1e86: 0xebb9, 0x1e87: 0xebb9, 0x1e88: 0xebd9, 0x1e89: 0xebf9, 0x1e8a: 0xec19, 0x1e8b: 0xec39, 0x1e8c: 0xec59, 0x1e8d: 0x915d, 0x1e8e: 0xec79, 0x1e8f: 0xec99, 0x1e90: 0xecb9, 0x1e91: 0xecd9, 0x1e92: 0x917d, 0x1e93: 0xecf9, 0x1e94: 0x919d, 0x1e95: 0x91bd, 0x1e96: 0xed19, 0x1e97: 0xed39, 0x1e98: 0xed59, 0x1e99: 0xed79, 0x1e9a: 0xed99, 0x1e9b: 0xedb9, 0x1e9c: 0x91dd, 0x1e9d: 0x91fd, 0x1e9e: 0x921d, 0x1e9f: 0x0040, 0x1ea0: 0xedd9, 0x1ea1: 0x923d, 0x1ea2: 0xedf9, 0x1ea3: 0xee19, 0x1ea4: 0xee39, 0x1ea5: 0x925d, 0x1ea6: 0xee59, 0x1ea7: 0xee79, 0x1ea8: 0xee99, 0x1ea9: 0xeeb9, 0x1eaa: 0xeed9, 0x1eab: 0x927d, 0x1eac: 0xeef9, 0x1ead: 0xef19, 0x1eae: 0xef39, 0x1eaf: 0xef59, 0x1eb0: 0xef79, 0x1eb1: 0xef99, 0x1eb2: 0x929d, 0x1eb3: 0x92bd, 0x1eb4: 0xefb9, 0x1eb5: 0x92dd, 0x1eb6: 0xefd9, 0x1eb7: 0x92fd, 0x1eb8: 0xeff9, 0x1eb9: 0xf019, 0x1eba: 0xf039, 0x1ebb: 0x931d, 0x1ebc: 0x933d, 0x1ebd: 0xf059, 0x1ebe: 0x935d, 0x1ebf: 0xf079, // Block 0x7b, offset 0x1ec0 0x1ec0: 0xf6b9, 0x1ec1: 0xf6d9, 0x1ec2: 0xf6f9, 0x1ec3: 0xf719, 0x1ec4: 0xf739, 0x1ec5: 0x951d, 0x1ec6: 0xf759, 0x1ec7: 0xf779, 0x1ec8: 0xf799, 0x1ec9: 0xf7b9, 0x1eca: 0xf7d9, 0x1ecb: 0x953d, 0x1ecc: 0x955d, 0x1ecd: 0xf7f9, 0x1ece: 0xf819, 0x1ecf: 0xf839, 0x1ed0: 0xf859, 0x1ed1: 0xf879, 0x1ed2: 0xf899, 0x1ed3: 0x957d, 0x1ed4: 0xf8b9, 0x1ed5: 0xf8d9, 0x1ed6: 0xf8f9, 0x1ed7: 0xf919, 0x1ed8: 0x959d, 0x1ed9: 0x95bd, 0x1eda: 0xf939, 0x1edb: 0xf959, 0x1edc: 0xf979, 0x1edd: 0x95dd, 0x1ede: 0xf999, 0x1edf: 0xf9b9, 0x1ee0: 0x6815, 0x1ee1: 0x95fd, 0x1ee2: 0xf9d9, 0x1ee3: 0xf9f9, 0x1ee4: 0xfa19, 0x1ee5: 0x961d, 0x1ee6: 0xfa39, 0x1ee7: 0xfa59, 0x1ee8: 0xfa79, 0x1ee9: 0xfa99, 0x1eea: 0xfab9, 0x1eeb: 0xfad9, 0x1eec: 0xfaf9, 0x1eed: 0x963d, 0x1eee: 0xfb19, 0x1eef: 0xfb39, 0x1ef0: 0xfb59, 0x1ef1: 0x965d, 0x1ef2: 0xfb79, 0x1ef3: 0xfb99, 0x1ef4: 0xfbb9, 0x1ef5: 0xfbd9, 0x1ef6: 0x7b35, 0x1ef7: 0x967d, 0x1ef8: 0xfbf9, 0x1ef9: 0xfc19, 0x1efa: 0xfc39, 0x1efb: 0x969d, 0x1efc: 0xfc59, 0x1efd: 0x96bd, 0x1efe: 0xfc79, 0x1eff: 0xfc79, // Block 0x7c, offset 0x1f00 0x1f00: 0xfc99, 0x1f01: 0x96dd, 0x1f02: 0xfcb9, 0x1f03: 0xfcd9, 0x1f04: 0xfcf9, 0x1f05: 0xfd19, 0x1f06: 0xfd39, 0x1f07: 0xfd59, 0x1f08: 0xfd79, 0x1f09: 0x96fd, 0x1f0a: 0xfd99, 0x1f0b: 0xfdb9, 0x1f0c: 0xfdd9, 0x1f0d: 0xfdf9, 0x1f0e: 0xfe19, 0x1f0f: 0xfe39, 0x1f10: 0x971d, 0x1f11: 0xfe59, 0x1f12: 0x973d, 0x1f13: 0x975d, 0x1f14: 0x977d, 0x1f15: 0xfe79, 0x1f16: 0xfe99, 0x1f17: 0xfeb9, 0x1f18: 0xfed9, 0x1f19: 0xfef9, 0x1f1a: 0xff19, 0x1f1b: 0xff39, 0x1f1c: 0xff59, 0x1f1d: 0x979d, 0x1f1e: 0x0040, 0x1f1f: 0x0040, 0x1f20: 0x0040, 0x1f21: 0x0040, 0x1f22: 0x0040, 0x1f23: 0x0040, 0x1f24: 0x0040, 0x1f25: 0x0040, 0x1f26: 0x0040, 0x1f27: 0x0040, 0x1f28: 0x0040, 0x1f29: 0x0040, 0x1f2a: 0x0040, 0x1f2b: 0x0040, 0x1f2c: 0x0040, 0x1f2d: 0x0040, 0x1f2e: 0x0040, 0x1f2f: 0x0040, 0x1f30: 0x0040, 0x1f31: 0x0040, 0x1f32: 0x0040, 0x1f33: 0x0040, 0x1f34: 0x0040, 0x1f35: 0x0040, 0x1f36: 0x0040, 0x1f37: 0x0040, 0x1f38: 0x0040, 0x1f39: 0x0040, 0x1f3a: 0x0040, 0x1f3b: 0x0040, 0x1f3c: 0x0040, 0x1f3d: 0x0040, 0x1f3e: 0x0040, 0x1f3f: 0x0040, } // idnaIndex: 35 blocks, 2240 entries, 4480 bytes // Block 0 is the zero block. var idnaIndex = [2240]uint16{ // Block 0x0, offset 0x0 // Block 0x1, offset 0x40 // Block 0x2, offset 0x80 // Block 0x3, offset 0xc0 0xc2: 0x01, 0xc3: 0x7b, 0xc4: 0x02, 0xc5: 0x03, 0xc6: 0x04, 0xc7: 0x05, 0xc8: 0x06, 0xc9: 0x7c, 0xca: 0x7d, 0xcb: 0x07, 0xcc: 0x7e, 0xcd: 0x08, 0xce: 0x09, 0xcf: 0x0a, 0xd0: 0x7f, 0xd1: 0x0b, 0xd2: 0x0c, 0xd3: 0x0d, 0xd4: 0x0e, 0xd5: 0x80, 0xd6: 0x81, 0xd7: 0x82, 0xd8: 0x0f, 0xd9: 0x83, 0xda: 0x84, 0xdb: 0x10, 0xdc: 0x11, 0xdd: 0x85, 0xde: 0x86, 0xdf: 0x87, 0xe0: 0x02, 0xe1: 0x03, 0xe2: 0x04, 0xe3: 0x05, 0xe4: 0x06, 0xe5: 0x07, 0xe6: 0x07, 0xe7: 0x07, 0xe8: 0x07, 0xe9: 0x08, 0xea: 0x09, 0xeb: 0x07, 0xec: 0x07, 0xed: 0x0a, 0xee: 0x0b, 0xef: 0x0c, 0xf0: 0x1c, 0xf1: 0x1d, 0xf2: 0x1d, 0xf3: 0x1f, 0xf4: 0x20, // Block 0x4, offset 0x100 0x120: 0x88, 0x121: 0x89, 0x122: 0x8a, 0x123: 0x8b, 0x124: 0x8c, 0x125: 0x12, 0x126: 0x13, 0x127: 0x14, 0x128: 0x15, 0x129: 0x16, 0x12a: 0x17, 0x12b: 0x18, 0x12c: 0x19, 0x12d: 0x1a, 0x12e: 0x1b, 0x12f: 0x8d, 0x130: 0x8e, 0x131: 0x1c, 0x132: 0x1d, 0x133: 0x1e, 0x134: 0x8f, 0x135: 0x1f, 0x136: 0x90, 0x137: 0x91, 0x138: 0x92, 0x139: 0x93, 0x13a: 0x20, 0x13b: 0x94, 0x13c: 0x95, 0x13d: 0x21, 0x13e: 0x22, 0x13f: 0x96, // Block 0x5, offset 0x140 0x140: 0x97, 0x141: 0x98, 0x142: 0x99, 0x143: 0x9a, 0x144: 0x9b, 0x145: 0x9c, 0x146: 0x9b, 0x147: 0x9b, 0x148: 0x9d, 0x149: 0x9e, 0x14a: 0x9f, 0x14b: 0xa0, 0x14c: 0xa1, 0x14d: 0xa2, 0x14e: 0xa3, 0x14f: 0xa4, 0x150: 0xa5, 0x151: 0x9d, 0x152: 0x9d, 0x153: 0x9d, 0x154: 0x9d, 0x155: 0x9d, 0x156: 0x9d, 0x157: 0x9d, 0x158: 0x9d, 0x159: 0xa6, 0x15a: 0xa7, 0x15b: 0xa8, 0x15c: 0xa9, 0x15d: 0xaa, 0x15e: 0xab, 0x15f: 0xac, 0x160: 0xad, 0x161: 0xae, 0x162: 0xaf, 0x163: 0xb0, 0x164: 0xb1, 0x165: 0xb2, 0x166: 0xb3, 0x167: 0xb4, 0x168: 0xb5, 0x169: 0xb6, 0x16a: 0xb7, 0x16b: 0xb8, 0x16c: 0xb9, 0x16d: 0xba, 0x16e: 0xbb, 0x16f: 0xbc, 0x170: 0xbd, 0x171: 0xbe, 0x172: 0xbf, 0x173: 0xc0, 0x174: 0x23, 0x175: 0x24, 0x176: 0x25, 0x177: 0xc1, 0x178: 0x26, 0x179: 0x26, 0x17a: 0x27, 0x17b: 0x26, 0x17c: 0xc2, 0x17d: 0x28, 0x17e: 0x29, 0x17f: 0x2a, // Block 0x6, offset 0x180 0x180: 0x2b, 0x181: 0x2c, 0x182: 0x2d, 0x183: 0xc3, 0x184: 0x2e, 0x185: 0x2f, 0x186: 0xc4, 0x187: 0x9b, 0x188: 0xc5, 0x189: 0xc6, 0x18a: 0x9b, 0x18b: 0x9b, 0x18c: 0xc7, 0x18d: 0x9b, 0x18e: 0x9b, 0x18f: 0xc8, 0x190: 0xc9, 0x191: 0x30, 0x192: 0x31, 0x193: 0x32, 0x194: 0x9b, 0x195: 0x9b, 0x196: 0x9b, 0x197: 0x9b, 0x198: 0x9b, 0x199: 0x9b, 0x19a: 0x9b, 0x19b: 0x9b, 0x19c: 0x9b, 0x19d: 0x9b, 0x19e: 0x9b, 0x19f: 0x9b, 0x1a0: 0x9b, 0x1a1: 0x9b, 0x1a2: 0x9b, 0x1a3: 0x9b, 0x1a4: 0x9b, 0x1a5: 0x9b, 0x1a6: 0x9b, 0x1a7: 0x9b, 0x1a8: 0xca, 0x1a9: 0xcb, 0x1aa: 0x9b, 0x1ab: 0xcc, 0x1ac: 0x9b, 0x1ad: 0xcd, 0x1ae: 0xce, 0x1af: 0xcf, 0x1b0: 0xd0, 0x1b1: 0x33, 0x1b2: 0x26, 0x1b3: 0x34, 0x1b4: 0xd1, 0x1b5: 0xd2, 0x1b6: 0xd3, 0x1b7: 0xd4, 0x1b8: 0xd5, 0x1b9: 0xd6, 0x1ba: 0xd7, 0x1bb: 0xd8, 0x1bc: 0xd9, 0x1bd: 0xda, 0x1be: 0xdb, 0x1bf: 0x35, // Block 0x7, offset 0x1c0 0x1c0: 0x36, 0x1c1: 0xdc, 0x1c2: 0xdd, 0x1c3: 0xde, 0x1c4: 0xdf, 0x1c5: 0x37, 0x1c6: 0x38, 0x1c7: 0xe0, 0x1c8: 0xe1, 0x1c9: 0x39, 0x1ca: 0x3a, 0x1cb: 0x3b, 0x1cc: 0x3c, 0x1cd: 0x3d, 0x1ce: 0x3e, 0x1cf: 0x3f, 0x1d0: 0x9d, 0x1d1: 0x9d, 0x1d2: 0x9d, 0x1d3: 0x9d, 0x1d4: 0x9d, 0x1d5: 0x9d, 0x1d6: 0x9d, 0x1d7: 0x9d, 0x1d8: 0x9d, 0x1d9: 0x9d, 0x1da: 0x9d, 0x1db: 0x9d, 0x1dc: 0x9d, 0x1dd: 0x9d, 0x1de: 0x9d, 0x1df: 0x9d, 0x1e0: 0x9d, 0x1e1: 0x9d, 0x1e2: 0x9d, 0x1e3: 0x9d, 0x1e4: 0x9d, 0x1e5: 0x9d, 0x1e6: 0x9d, 0x1e7: 0x9d, 0x1e8: 0x9d, 0x1e9: 0x9d, 0x1ea: 0x9d, 0x1eb: 0x9d, 0x1ec: 0x9d, 0x1ed: 0x9d, 0x1ee: 0x9d, 0x1ef: 0x9d, 0x1f0: 0x9d, 0x1f1: 0x9d, 0x1f2: 0x9d, 0x1f3: 0x9d, 0x1f4: 0x9d, 0x1f5: 0x9d, 0x1f6: 0x9d, 0x1f7: 0x9d, 0x1f8: 0x9d, 0x1f9: 0x9d, 0x1fa: 0x9d, 0x1fb: 0x9d, 0x1fc: 0x9d, 0x1fd: 0x9d, 0x1fe: 0x9d, 0x1ff: 0x9d, // Block 0x8, offset 0x200 0x200: 0x9d, 0x201: 0x9d, 0x202: 0x9d, 0x203: 0x9d, 0x204: 0x9d, 0x205: 0x9d, 0x206: 0x9d, 0x207: 0x9d, 0x208: 0x9d, 0x209: 0x9d, 0x20a: 0x9d, 0x20b: 0x9d, 0x20c: 0x9d, 0x20d: 0x9d, 0x20e: 0x9d, 0x20f: 0x9d, 0x210: 0x9d, 0x211: 0x9d, 0x212: 0x9d, 0x213: 0x9d, 0x214: 0x9d, 0x215: 0x9d, 0x216: 0x9d, 0x217: 0x9d, 0x218: 0x9d, 0x219: 0x9d, 0x21a: 0x9d, 0x21b: 0x9d, 0x21c: 0x9d, 0x21d: 0x9d, 0x21e: 0x9d, 0x21f: 0x9d, 0x220: 0x9d, 0x221: 0x9d, 0x222: 0x9d, 0x223: 0x9d, 0x224: 0x9d, 0x225: 0x9d, 0x226: 0x9d, 0x227: 0x9d, 0x228: 0x9d, 0x229: 0x9d, 0x22a: 0x9d, 0x22b: 0x9d, 0x22c: 0x9d, 0x22d: 0x9d, 0x22e: 0x9d, 0x22f: 0x9d, 0x230: 0x9d, 0x231: 0x9d, 0x232: 0x9d, 0x233: 0x9d, 0x234: 0x9d, 0x235: 0x9d, 0x236: 0xb0, 0x237: 0x9b, 0x238: 0x9d, 0x239: 0x9d, 0x23a: 0x9d, 0x23b: 0x9d, 0x23c: 0x9d, 0x23d: 0x9d, 0x23e: 0x9d, 0x23f: 0x9d, // Block 0x9, offset 0x240 0x240: 0x9d, 0x241: 0x9d, 0x242: 0x9d, 0x243: 0x9d, 0x244: 0x9d, 0x245: 0x9d, 0x246: 0x9d, 0x247: 0x9d, 0x248: 0x9d, 0x249: 0x9d, 0x24a: 0x9d, 0x24b: 0x9d, 0x24c: 0x9d, 0x24d: 0x9d, 0x24e: 0x9d, 0x24f: 0x9d, 0x250: 0x9d, 0x251: 0x9d, 0x252: 0x9d, 0x253: 0x9d, 0x254: 0x9d, 0x255: 0x9d, 0x256: 0x9d, 0x257: 0x9d, 0x258: 0x9d, 0x259: 0x9d, 0x25a: 0x9d, 0x25b: 0x9d, 0x25c: 0x9d, 0x25d: 0x9d, 0x25e: 0x9d, 0x25f: 0x9d, 0x260: 0x9d, 0x261: 0x9d, 0x262: 0x9d, 0x263: 0x9d, 0x264: 0x9d, 0x265: 0x9d, 0x266: 0x9d, 0x267: 0x9d, 0x268: 0x9d, 0x269: 0x9d, 0x26a: 0x9d, 0x26b: 0x9d, 0x26c: 0x9d, 0x26d: 0x9d, 0x26e: 0x9d, 0x26f: 0x9d, 0x270: 0x9d, 0x271: 0x9d, 0x272: 0x9d, 0x273: 0x9d, 0x274: 0x9d, 0x275: 0x9d, 0x276: 0x9d, 0x277: 0x9d, 0x278: 0x9d, 0x279: 0x9d, 0x27a: 0x9d, 0x27b: 0x9d, 0x27c: 0x9d, 0x27d: 0x9d, 0x27e: 0x9d, 0x27f: 0x9d, // Block 0xa, offset 0x280 0x280: 0x9d, 0x281: 0x9d, 0x282: 0x9d, 0x283: 0x9d, 0x284: 0x9d, 0x285: 0x9d, 0x286: 0x9d, 0x287: 0x9d, 0x288: 0x9d, 0x289: 0x9d, 0x28a: 0x9d, 0x28b: 0x9d, 0x28c: 0x9d, 0x28d: 0x9d, 0x28e: 0x9d, 0x28f: 0x9d, 0x290: 0x9d, 0x291: 0x9d, 0x292: 0x9d, 0x293: 0x9d, 0x294: 0x9d, 0x295: 0x9d, 0x296: 0x9d, 0x297: 0x9d, 0x298: 0x9d, 0x299: 0x9d, 0x29a: 0x9d, 0x29b: 0x9d, 0x29c: 0x9d, 0x29d: 0x9d, 0x29e: 0x9d, 0x29f: 0x9d, 0x2a0: 0x9d, 0x2a1: 0x9d, 0x2a2: 0x9d, 0x2a3: 0x9d, 0x2a4: 0x9d, 0x2a5: 0x9d, 0x2a6: 0x9d, 0x2a7: 0x9d, 0x2a8: 0x9d, 0x2a9: 0x9d, 0x2aa: 0x9d, 0x2ab: 0x9d, 0x2ac: 0x9d, 0x2ad: 0x9d, 0x2ae: 0x9d, 0x2af: 0x9d, 0x2b0: 0x9d, 0x2b1: 0x9d, 0x2b2: 0x9d, 0x2b3: 0x9d, 0x2b4: 0x9d, 0x2b5: 0x9d, 0x2b6: 0x9d, 0x2b7: 0x9d, 0x2b8: 0x9d, 0x2b9: 0x9d, 0x2ba: 0x9d, 0x2bb: 0x9d, 0x2bc: 0x9d, 0x2bd: 0x9d, 0x2be: 0x9d, 0x2bf: 0xe2, // Block 0xb, offset 0x2c0 0x2c0: 0x9d, 0x2c1: 0x9d, 0x2c2: 0x9d, 0x2c3: 0x9d, 0x2c4: 0x9d, 0x2c5: 0x9d, 0x2c6: 0x9d, 0x2c7: 0x9d, 0x2c8: 0x9d, 0x2c9: 0x9d, 0x2ca: 0x9d, 0x2cb: 0x9d, 0x2cc: 0x9d, 0x2cd: 0x9d, 0x2ce: 0x9d, 0x2cf: 0x9d, 0x2d0: 0x9d, 0x2d1: 0x9d, 0x2d2: 0xe3, 0x2d3: 0xe4, 0x2d4: 0x9d, 0x2d5: 0x9d, 0x2d6: 0x9d, 0x2d7: 0x9d, 0x2d8: 0xe5, 0x2d9: 0x40, 0x2da: 0x41, 0x2db: 0xe6, 0x2dc: 0x42, 0x2dd: 0x43, 0x2de: 0x44, 0x2df: 0xe7, 0x2e0: 0xe8, 0x2e1: 0xe9, 0x2e2: 0xea, 0x2e3: 0xeb, 0x2e4: 0xec, 0x2e5: 0xed, 0x2e6: 0xee, 0x2e7: 0xef, 0x2e8: 0xf0, 0x2e9: 0xf1, 0x2ea: 0xf2, 0x2eb: 0xf3, 0x2ec: 0xf4, 0x2ed: 0xf5, 0x2ee: 0xf6, 0x2ef: 0xf7, 0x2f0: 0x9d, 0x2f1: 0x9d, 0x2f2: 0x9d, 0x2f3: 0x9d, 0x2f4: 0x9d, 0x2f5: 0x9d, 0x2f6: 0x9d, 0x2f7: 0x9d, 0x2f8: 0x9d, 0x2f9: 0x9d, 0x2fa: 0x9d, 0x2fb: 0x9d, 0x2fc: 0x9d, 0x2fd: 0x9d, 0x2fe: 0x9d, 0x2ff: 0x9d, // Block 0xc, offset 0x300 0x300: 0x9d, 0x301: 0x9d, 0x302: 0x9d, 0x303: 0x9d, 0x304: 0x9d, 0x305: 0x9d, 0x306: 0x9d, 0x307: 0x9d, 0x308: 0x9d, 0x309: 0x9d, 0x30a: 0x9d, 0x30b: 0x9d, 0x30c: 0x9d, 0x30d: 0x9d, 0x30e: 0x9d, 0x30f: 0x9d, 0x310: 0x9d, 0x311: 0x9d, 0x312: 0x9d, 0x313: 0x9d, 0x314: 0x9d, 0x315: 0x9d, 0x316: 0x9d, 0x317: 0x9d, 0x318: 0x9d, 0x319: 0x9d, 0x31a: 0x9d, 0x31b: 0x9d, 0x31c: 0x9d, 0x31d: 0x9d, 0x31e: 0xf8, 0x31f: 0xf9, // Block 0xd, offset 0x340 0x340: 0xb8, 0x341: 0xb8, 0x342: 0xb8, 0x343: 0xb8, 0x344: 0xb8, 0x345: 0xb8, 0x346: 0xb8, 0x347: 0xb8, 0x348: 0xb8, 0x349: 0xb8, 0x34a: 0xb8, 0x34b: 0xb8, 0x34c: 0xb8, 0x34d: 0xb8, 0x34e: 0xb8, 0x34f: 0xb8, 0x350: 0xb8, 0x351: 0xb8, 0x352: 0xb8, 0x353: 0xb8, 0x354: 0xb8, 0x355: 0xb8, 0x356: 0xb8, 0x357: 0xb8, 0x358: 0xb8, 0x359: 0xb8, 0x35a: 0xb8, 0x35b: 0xb8, 0x35c: 0xb8, 0x35d: 0xb8, 0x35e: 0xb8, 0x35f: 0xb8, 0x360: 0xb8, 0x361: 0xb8, 0x362: 0xb8, 0x363: 0xb8, 0x364: 0xb8, 0x365: 0xb8, 0x366: 0xb8, 0x367: 0xb8, 0x368: 0xb8, 0x369: 0xb8, 0x36a: 0xb8, 0x36b: 0xb8, 0x36c: 0xb8, 0x36d: 0xb8, 0x36e: 0xb8, 0x36f: 0xb8, 0x370: 0xb8, 0x371: 0xb8, 0x372: 0xb8, 0x373: 0xb8, 0x374: 0xb8, 0x375: 0xb8, 0x376: 0xb8, 0x377: 0xb8, 0x378: 0xb8, 0x379: 0xb8, 0x37a: 0xb8, 0x37b: 0xb8, 0x37c: 0xb8, 0x37d: 0xb8, 0x37e: 0xb8, 0x37f: 0xb8, // Block 0xe, offset 0x380 0x380: 0xb8, 0x381: 0xb8, 0x382: 0xb8, 0x383: 0xb8, 0x384: 0xb8, 0x385: 0xb8, 0x386: 0xb8, 0x387: 0xb8, 0x388: 0xb8, 0x389: 0xb8, 0x38a: 0xb8, 0x38b: 0xb8, 0x38c: 0xb8, 0x38d: 0xb8, 0x38e: 0xb8, 0x38f: 0xb8, 0x390: 0xb8, 0x391: 0xb8, 0x392: 0xb8, 0x393: 0xb8, 0x394: 0xb8, 0x395: 0xb8, 0x396: 0xb8, 0x397: 0xb8, 0x398: 0xb8, 0x399: 0xb8, 0x39a: 0xb8, 0x39b: 0xb8, 0x39c: 0xb8, 0x39d: 0xb8, 0x39e: 0xb8, 0x39f: 0xb8, 0x3a0: 0xb8, 0x3a1: 0xb8, 0x3a2: 0xb8, 0x3a3: 0xb8, 0x3a4: 0xfa, 0x3a5: 0xfb, 0x3a6: 0xfc, 0x3a7: 0xfd, 0x3a8: 0x45, 0x3a9: 0xfe, 0x3aa: 0xff, 0x3ab: 0x46, 0x3ac: 0x47, 0x3ad: 0x48, 0x3ae: 0x49, 0x3af: 0x4a, 0x3b0: 0x100, 0x3b1: 0x4b, 0x3b2: 0x4c, 0x3b3: 0x4d, 0x3b4: 0x4e, 0x3b5: 0x4f, 0x3b6: 0x101, 0x3b7: 0x50, 0x3b8: 0x51, 0x3b9: 0x52, 0x3ba: 0x53, 0x3bb: 0x54, 0x3bc: 0x55, 0x3bd: 0x56, 0x3be: 0x57, 0x3bf: 0x58, // Block 0xf, offset 0x3c0 0x3c0: 0x102, 0x3c1: 0x103, 0x3c2: 0x9d, 0x3c3: 0x104, 0x3c4: 0x105, 0x3c5: 0x9b, 0x3c6: 0x106, 0x3c7: 0x107, 0x3c8: 0xb8, 0x3c9: 0xb8, 0x3ca: 0x108, 0x3cb: 0x109, 0x3cc: 0x10a, 0x3cd: 0x10b, 0x3ce: 0x10c, 0x3cf: 0x10d, 0x3d0: 0x10e, 0x3d1: 0x9d, 0x3d2: 0x10f, 0x3d3: 0x110, 0x3d4: 0x111, 0x3d5: 0x112, 0x3d6: 0xb8, 0x3d7: 0xb8, 0x3d8: 0x9d, 0x3d9: 0x9d, 0x3da: 0x9d, 0x3db: 0x9d, 0x3dc: 0x113, 0x3dd: 0x114, 0x3de: 0xb8, 0x3df: 0xb8, 0x3e0: 0x115, 0x3e1: 0x116, 0x3e2: 0x117, 0x3e3: 0x118, 0x3e4: 0x119, 0x3e5: 0xb8, 0x3e6: 0x11a, 0x3e7: 0x11b, 0x3e8: 0x11c, 0x3e9: 0x11d, 0x3ea: 0x11e, 0x3eb: 0x59, 0x3ec: 0x11f, 0x3ed: 0x120, 0x3ee: 0x5a, 0x3ef: 0xb8, 0x3f0: 0x9d, 0x3f1: 0x121, 0x3f2: 0x122, 0x3f3: 0x123, 0x3f4: 0xb8, 0x3f5: 0xb8, 0x3f6: 0xb8, 0x3f7: 0xb8, 0x3f8: 0xb8, 0x3f9: 0x124, 0x3fa: 0xb8, 0x3fb: 0xb8, 0x3fc: 0xb8, 0x3fd: 0xb8, 0x3fe: 0xb8, 0x3ff: 0xb8, // Block 0x10, offset 0x400 0x400: 0x125, 0x401: 0x126, 0x402: 0x127, 0x403: 0x128, 0x404: 0x129, 0x405: 0x12a, 0x406: 0x12b, 0x407: 0x12c, 0x408: 0x12d, 0x409: 0xb8, 0x40a: 0x12e, 0x40b: 0x12f, 0x40c: 0x5b, 0x40d: 0x5c, 0x40e: 0xb8, 0x40f: 0xb8, 0x410: 0x130, 0x411: 0x131, 0x412: 0x132, 0x413: 0x133, 0x414: 0xb8, 0x415: 0xb8, 0x416: 0x134, 0x417: 0x135, 0x418: 0x136, 0x419: 0x137, 0x41a: 0x138, 0x41b: 0x139, 0x41c: 0x13a, 0x41d: 0xb8, 0x41e: 0xb8, 0x41f: 0xb8, 0x420: 0xb8, 0x421: 0xb8, 0x422: 0x13b, 0x423: 0x13c, 0x424: 0xb8, 0x425: 0xb8, 0x426: 0xb8, 0x427: 0xb8, 0x428: 0xb8, 0x429: 0xb8, 0x42a: 0xb8, 0x42b: 0x13d, 0x42c: 0xb8, 0x42d: 0xb8, 0x42e: 0xb8, 0x42f: 0xb8, 0x430: 0x13e, 0x431: 0x13f, 0x432: 0x140, 0x433: 0xb8, 0x434: 0xb8, 0x435: 0xb8, 0x436: 0xb8, 0x437: 0xb8, 0x438: 0xb8, 0x439: 0xb8, 0x43a: 0xb8, 0x43b: 0xb8, 0x43c: 0xb8, 0x43d: 0xb8, 0x43e: 0xb8, 0x43f: 0xb8, // Block 0x11, offset 0x440 0x440: 0x9d, 0x441: 0x9d, 0x442: 0x9d, 0x443: 0x9d, 0x444: 0x9d, 0x445: 0x9d, 0x446: 0x9d, 0x447: 0x9d, 0x448: 0x9d, 0x449: 0x9d, 0x44a: 0x9d, 0x44b: 0x9d, 0x44c: 0x9d, 0x44d: 0x9d, 0x44e: 0x141, 0x44f: 0xb8, 0x450: 0x9b, 0x451: 0x142, 0x452: 0x9d, 0x453: 0x9d, 0x454: 0x9d, 0x455: 0x143, 0x456: 0xb8, 0x457: 0xb8, 0x458: 0xb8, 0x459: 0xb8, 0x45a: 0xb8, 0x45b: 0xb8, 0x45c: 0xb8, 0x45d: 0xb8, 0x45e: 0xb8, 0x45f: 0xb8, 0x460: 0xb8, 0x461: 0xb8, 0x462: 0xb8, 0x463: 0xb8, 0x464: 0xb8, 0x465: 0xb8, 0x466: 0xb8, 0x467: 0xb8, 0x468: 0xb8, 0x469: 0xb8, 0x46a: 0xb8, 0x46b: 0xb8, 0x46c: 0xb8, 0x46d: 0xb8, 0x46e: 0xb8, 0x46f: 0xb8, 0x470: 0xb8, 0x471: 0xb8, 0x472: 0xb8, 0x473: 0xb8, 0x474: 0xb8, 0x475: 0xb8, 0x476: 0xb8, 0x477: 0xb8, 0x478: 0xb8, 0x479: 0xb8, 0x47a: 0xb8, 0x47b: 0xb8, 0x47c: 0xb8, 0x47d: 0xb8, 0x47e: 0xb8, 0x47f: 0xb8, // Block 0x12, offset 0x480 0x480: 0x9d, 0x481: 0x9d, 0x482: 0x9d, 0x483: 0x9d, 0x484: 0x9d, 0x485: 0x9d, 0x486: 0x9d, 0x487: 0x9d, 0x488: 0x9d, 0x489: 0x9d, 0x48a: 0x9d, 0x48b: 0x9d, 0x48c: 0x9d, 0x48d: 0x9d, 0x48e: 0x9d, 0x48f: 0x9d, 0x490: 0x144, 0x491: 0xb8, 0x492: 0xb8, 0x493: 0xb8, 0x494: 0xb8, 0x495: 0xb8, 0x496: 0xb8, 0x497: 0xb8, 0x498: 0xb8, 0x499: 0xb8, 0x49a: 0xb8, 0x49b: 0xb8, 0x49c: 0xb8, 0x49d: 0xb8, 0x49e: 0xb8, 0x49f: 0xb8, 0x4a0: 0xb8, 0x4a1: 0xb8, 0x4a2: 0xb8, 0x4a3: 0xb8, 0x4a4: 0xb8, 0x4a5: 0xb8, 0x4a6: 0xb8, 0x4a7: 0xb8, 0x4a8: 0xb8, 0x4a9: 0xb8, 0x4aa: 0xb8, 0x4ab: 0xb8, 0x4ac: 0xb8, 0x4ad: 0xb8, 0x4ae: 0xb8, 0x4af: 0xb8, 0x4b0: 0xb8, 0x4b1: 0xb8, 0x4b2: 0xb8, 0x4b3: 0xb8, 0x4b4: 0xb8, 0x4b5: 0xb8, 0x4b6: 0xb8, 0x4b7: 0xb8, 0x4b8: 0xb8, 0x4b9: 0xb8, 0x4ba: 0xb8, 0x4bb: 0xb8, 0x4bc: 0xb8, 0x4bd: 0xb8, 0x4be: 0xb8, 0x4bf: 0xb8, // Block 0x13, offset 0x4c0 0x4c0: 0xb8, 0x4c1: 0xb8, 0x4c2: 0xb8, 0x4c3: 0xb8, 0x4c4: 0xb8, 0x4c5: 0xb8, 0x4c6: 0xb8, 0x4c7: 0xb8, 0x4c8: 0xb8, 0x4c9: 0xb8, 0x4ca: 0xb8, 0x4cb: 0xb8, 0x4cc: 0xb8, 0x4cd: 0xb8, 0x4ce: 0xb8, 0x4cf: 0xb8, 0x4d0: 0x9d, 0x4d1: 0x9d, 0x4d2: 0x9d, 0x4d3: 0x9d, 0x4d4: 0x9d, 0x4d5: 0x9d, 0x4d6: 0x9d, 0x4d7: 0x9d, 0x4d8: 0x9d, 0x4d9: 0x145, 0x4da: 0xb8, 0x4db: 0xb8, 0x4dc: 0xb8, 0x4dd: 0xb8, 0x4de: 0xb8, 0x4df: 0xb8, 0x4e0: 0xb8, 0x4e1: 0xb8, 0x4e2: 0xb8, 0x4e3: 0xb8, 0x4e4: 0xb8, 0x4e5: 0xb8, 0x4e6: 0xb8, 0x4e7: 0xb8, 0x4e8: 0xb8, 0x4e9: 0xb8, 0x4ea: 0xb8, 0x4eb: 0xb8, 0x4ec: 0xb8, 0x4ed: 0xb8, 0x4ee: 0xb8, 0x4ef: 0xb8, 0x4f0: 0xb8, 0x4f1: 0xb8, 0x4f2: 0xb8, 0x4f3: 0xb8, 0x4f4: 0xb8, 0x4f5: 0xb8, 0x4f6: 0xb8, 0x4f7: 0xb8, 0x4f8: 0xb8, 0x4f9: 0xb8, 0x4fa: 0xb8, 0x4fb: 0xb8, 0x4fc: 0xb8, 0x4fd: 0xb8, 0x4fe: 0xb8, 0x4ff: 0xb8, // Block 0x14, offset 0x500 0x500: 0xb8, 0x501: 0xb8, 0x502: 0xb8, 0x503: 0xb8, 0x504: 0xb8, 0x505: 0xb8, 0x506: 0xb8, 0x507: 0xb8, 0x508: 0xb8, 0x509: 0xb8, 0x50a: 0xb8, 0x50b: 0xb8, 0x50c: 0xb8, 0x50d: 0xb8, 0x50e: 0xb8, 0x50f: 0xb8, 0x510: 0xb8, 0x511: 0xb8, 0x512: 0xb8, 0x513: 0xb8, 0x514: 0xb8, 0x515: 0xb8, 0x516: 0xb8, 0x517: 0xb8, 0x518: 0xb8, 0x519: 0xb8, 0x51a: 0xb8, 0x51b: 0xb8, 0x51c: 0xb8, 0x51d: 0xb8, 0x51e: 0xb8, 0x51f: 0xb8, 0x520: 0x9d, 0x521: 0x9d, 0x522: 0x9d, 0x523: 0x9d, 0x524: 0x9d, 0x525: 0x9d, 0x526: 0x9d, 0x527: 0x9d, 0x528: 0x13d, 0x529: 0x146, 0x52a: 0xb8, 0x52b: 0x147, 0x52c: 0x148, 0x52d: 0x149, 0x52e: 0x14a, 0x52f: 0xb8, 0x530: 0xb8, 0x531: 0xb8, 0x532: 0xb8, 0x533: 0xb8, 0x534: 0xb8, 0x535: 0xb8, 0x536: 0xb8, 0x537: 0xb8, 0x538: 0xb8, 0x539: 0xb8, 0x53a: 0xb8, 0x53b: 0xb8, 0x53c: 0x9d, 0x53d: 0x14b, 0x53e: 0x14c, 0x53f: 0x14d, // Block 0x15, offset 0x540 0x540: 0x9d, 0x541: 0x9d, 0x542: 0x9d, 0x543: 0x9d, 0x544: 0x9d, 0x545: 0x9d, 0x546: 0x9d, 0x547: 0x9d, 0x548: 0x9d, 0x549: 0x9d, 0x54a: 0x9d, 0x54b: 0x9d, 0x54c: 0x9d, 0x54d: 0x9d, 0x54e: 0x9d, 0x54f: 0x9d, 0x550: 0x9d, 0x551: 0x9d, 0x552: 0x9d, 0x553: 0x9d, 0x554: 0x9d, 0x555: 0x9d, 0x556: 0x9d, 0x557: 0x9d, 0x558: 0x9d, 0x559: 0x9d, 0x55a: 0x9d, 0x55b: 0x9d, 0x55c: 0x9d, 0x55d: 0x9d, 0x55e: 0x9d, 0x55f: 0x14e, 0x560: 0x9d, 0x561: 0x9d, 0x562: 0x9d, 0x563: 0x9d, 0x564: 0x9d, 0x565: 0x9d, 0x566: 0x9d, 0x567: 0x9d, 0x568: 0x9d, 0x569: 0x9d, 0x56a: 0x9d, 0x56b: 0x14f, 0x56c: 0xb8, 0x56d: 0xb8, 0x56e: 0xb8, 0x56f: 0xb8, 0x570: 0xb8, 0x571: 0xb8, 0x572: 0xb8, 0x573: 0xb8, 0x574: 0xb8, 0x575: 0xb8, 0x576: 0xb8, 0x577: 0xb8, 0x578: 0xb8, 0x579: 0xb8, 0x57a: 0xb8, 0x57b: 0xb8, 0x57c: 0xb8, 0x57d: 0xb8, 0x57e: 0xb8, 0x57f: 0xb8, // Block 0x16, offset 0x580 0x580: 0x150, 0x581: 0xb8, 0x582: 0xb8, 0x583: 0xb8, 0x584: 0xb8, 0x585: 0xb8, 0x586: 0xb8, 0x587: 0xb8, 0x588: 0xb8, 0x589: 0xb8, 0x58a: 0xb8, 0x58b: 0xb8, 0x58c: 0xb8, 0x58d: 0xb8, 0x58e: 0xb8, 0x58f: 0xb8, 0x590: 0xb8, 0x591: 0xb8, 0x592: 0xb8, 0x593: 0xb8, 0x594: 0xb8, 0x595: 0xb8, 0x596: 0xb8, 0x597: 0xb8, 0x598: 0xb8, 0x599: 0xb8, 0x59a: 0xb8, 0x59b: 0xb8, 0x59c: 0xb8, 0x59d: 0xb8, 0x59e: 0xb8, 0x59f: 0xb8, 0x5a0: 0xb8, 0x5a1: 0xb8, 0x5a2: 0xb8, 0x5a3: 0xb8, 0x5a4: 0xb8, 0x5a5: 0xb8, 0x5a6: 0xb8, 0x5a7: 0xb8, 0x5a8: 0xb8, 0x5a9: 0xb8, 0x5aa: 0xb8, 0x5ab: 0xb8, 0x5ac: 0xb8, 0x5ad: 0xb8, 0x5ae: 0xb8, 0x5af: 0xb8, 0x5b0: 0x9d, 0x5b1: 0x151, 0x5b2: 0x152, 0x5b3: 0xb8, 0x5b4: 0xb8, 0x5b5: 0xb8, 0x5b6: 0xb8, 0x5b7: 0xb8, 0x5b8: 0xb8, 0x5b9: 0xb8, 0x5ba: 0xb8, 0x5bb: 0xb8, 0x5bc: 0xb8, 0x5bd: 0xb8, 0x5be: 0xb8, 0x5bf: 0xb8, // Block 0x17, offset 0x5c0 0x5c0: 0x9b, 0x5c1: 0x9b, 0x5c2: 0x9b, 0x5c3: 0x153, 0x5c4: 0x154, 0x5c5: 0x155, 0x5c6: 0x156, 0x5c7: 0x157, 0x5c8: 0x9b, 0x5c9: 0x158, 0x5ca: 0xb8, 0x5cb: 0xb8, 0x5cc: 0x9b, 0x5cd: 0x159, 0x5ce: 0xb8, 0x5cf: 0xb8, 0x5d0: 0x5d, 0x5d1: 0x5e, 0x5d2: 0x5f, 0x5d3: 0x60, 0x5d4: 0x61, 0x5d5: 0x62, 0x5d6: 0x63, 0x5d7: 0x64, 0x5d8: 0x65, 0x5d9: 0x66, 0x5da: 0x67, 0x5db: 0x68, 0x5dc: 0x69, 0x5dd: 0x6a, 0x5de: 0x6b, 0x5df: 0x6c, 0x5e0: 0x9b, 0x5e1: 0x9b, 0x5e2: 0x9b, 0x5e3: 0x9b, 0x5e4: 0x9b, 0x5e5: 0x9b, 0x5e6: 0x9b, 0x5e7: 0x9b, 0x5e8: 0x15a, 0x5e9: 0x15b, 0x5ea: 0x15c, 0x5eb: 0xb8, 0x5ec: 0xb8, 0x5ed: 0xb8, 0x5ee: 0xb8, 0x5ef: 0xb8, 0x5f0: 0xb8, 0x5f1: 0xb8, 0x5f2: 0xb8, 0x5f3: 0xb8, 0x5f4: 0xb8, 0x5f5: 0xb8, 0x5f6: 0xb8, 0x5f7: 0xb8, 0x5f8: 0xb8, 0x5f9: 0xb8, 0x5fa: 0xb8, 0x5fb: 0xb8, 0x5fc: 0xb8, 0x5fd: 0xb8, 0x5fe: 0xb8, 0x5ff: 0xb8, // Block 0x18, offset 0x600 0x600: 0x15d, 0x601: 0xb8, 0x602: 0xb8, 0x603: 0xb8, 0x604: 0xb8, 0x605: 0xb8, 0x606: 0xb8, 0x607: 0xb8, 0x608: 0xb8, 0x609: 0xb8, 0x60a: 0xb8, 0x60b: 0xb8, 0x60c: 0xb8, 0x60d: 0xb8, 0x60e: 0xb8, 0x60f: 0xb8, 0x610: 0xb8, 0x611: 0xb8, 0x612: 0xb8, 0x613: 0xb8, 0x614: 0xb8, 0x615: 0xb8, 0x616: 0xb8, 0x617: 0xb8, 0x618: 0xb8, 0x619: 0xb8, 0x61a: 0xb8, 0x61b: 0xb8, 0x61c: 0xb8, 0x61d: 0xb8, 0x61e: 0xb8, 0x61f: 0xb8, 0x620: 0x9d, 0x621: 0x9d, 0x622: 0x9d, 0x623: 0x15e, 0x624: 0x6d, 0x625: 0x15f, 0x626: 0xb8, 0x627: 0xb8, 0x628: 0xb8, 0x629: 0xb8, 0x62a: 0xb8, 0x62b: 0xb8, 0x62c: 0xb8, 0x62d: 0xb8, 0x62e: 0xb8, 0x62f: 0xb8, 0x630: 0xb8, 0x631: 0xb8, 0x632: 0xb8, 0x633: 0xb8, 0x634: 0xb8, 0x635: 0xb8, 0x636: 0xb8, 0x637: 0xb8, 0x638: 0x6e, 0x639: 0x6f, 0x63a: 0x70, 0x63b: 0x160, 0x63c: 0xb8, 0x63d: 0xb8, 0x63e: 0xb8, 0x63f: 0xb8, // Block 0x19, offset 0x640 0x640: 0x161, 0x641: 0x9b, 0x642: 0x162, 0x643: 0x163, 0x644: 0x71, 0x645: 0x72, 0x646: 0x164, 0x647: 0x165, 0x648: 0x73, 0x649: 0x166, 0x64a: 0xb8, 0x64b: 0xb8, 0x64c: 0x9b, 0x64d: 0x9b, 0x64e: 0x9b, 0x64f: 0x9b, 0x650: 0x9b, 0x651: 0x9b, 0x652: 0x9b, 0x653: 0x9b, 0x654: 0x9b, 0x655: 0x9b, 0x656: 0x9b, 0x657: 0x9b, 0x658: 0x9b, 0x659: 0x9b, 0x65a: 0x9b, 0x65b: 0x167, 0x65c: 0x9b, 0x65d: 0x168, 0x65e: 0x9b, 0x65f: 0x169, 0x660: 0x16a, 0x661: 0x16b, 0x662: 0x16c, 0x663: 0xb8, 0x664: 0x16d, 0x665: 0x16e, 0x666: 0x16f, 0x667: 0x170, 0x668: 0xb8, 0x669: 0xb8, 0x66a: 0xb8, 0x66b: 0xb8, 0x66c: 0xb8, 0x66d: 0xb8, 0x66e: 0xb8, 0x66f: 0xb8, 0x670: 0xb8, 0x671: 0xb8, 0x672: 0xb8, 0x673: 0xb8, 0x674: 0xb8, 0x675: 0xb8, 0x676: 0xb8, 0x677: 0xb8, 0x678: 0xb8, 0x679: 0xb8, 0x67a: 0xb8, 0x67b: 0xb8, 0x67c: 0xb8, 0x67d: 0xb8, 0x67e: 0xb8, 0x67f: 0xb8, // Block 0x1a, offset 0x680 0x680: 0x9d, 0x681: 0x9d, 0x682: 0x9d, 0x683: 0x9d, 0x684: 0x9d, 0x685: 0x9d, 0x686: 0x9d, 0x687: 0x9d, 0x688: 0x9d, 0x689: 0x9d, 0x68a: 0x9d, 0x68b: 0x9d, 0x68c: 0x9d, 0x68d: 0x9d, 0x68e: 0x9d, 0x68f: 0x9d, 0x690: 0x9d, 0x691: 0x9d, 0x692: 0x9d, 0x693: 0x9d, 0x694: 0x9d, 0x695: 0x9d, 0x696: 0x9d, 0x697: 0x9d, 0x698: 0x9d, 0x699: 0x9d, 0x69a: 0x9d, 0x69b: 0x171, 0x69c: 0x9d, 0x69d: 0x9d, 0x69e: 0x9d, 0x69f: 0x9d, 0x6a0: 0x9d, 0x6a1: 0x9d, 0x6a2: 0x9d, 0x6a3: 0x9d, 0x6a4: 0x9d, 0x6a5: 0x9d, 0x6a6: 0x9d, 0x6a7: 0x9d, 0x6a8: 0x9d, 0x6a9: 0x9d, 0x6aa: 0x9d, 0x6ab: 0x9d, 0x6ac: 0x9d, 0x6ad: 0x9d, 0x6ae: 0x9d, 0x6af: 0x9d, 0x6b0: 0x9d, 0x6b1: 0x9d, 0x6b2: 0x9d, 0x6b3: 0x9d, 0x6b4: 0x9d, 0x6b5: 0x9d, 0x6b6: 0x9d, 0x6b7: 0x9d, 0x6b8: 0x9d, 0x6b9: 0x9d, 0x6ba: 0x9d, 0x6bb: 0x9d, 0x6bc: 0x9d, 0x6bd: 0x9d, 0x6be: 0x9d, 0x6bf: 0x9d, // Block 0x1b, offset 0x6c0 0x6c0: 0x9d, 0x6c1: 0x9d, 0x6c2: 0x9d, 0x6c3: 0x9d, 0x6c4: 0x9d, 0x6c5: 0x9d, 0x6c6: 0x9d, 0x6c7: 0x9d, 0x6c8: 0x9d, 0x6c9: 0x9d, 0x6ca: 0x9d, 0x6cb: 0x9d, 0x6cc: 0x9d, 0x6cd: 0x9d, 0x6ce: 0x9d, 0x6cf: 0x9d, 0x6d0: 0x9d, 0x6d1: 0x9d, 0x6d2: 0x9d, 0x6d3: 0x9d, 0x6d4: 0x9d, 0x6d5: 0x9d, 0x6d6: 0x9d, 0x6d7: 0x9d, 0x6d8: 0x9d, 0x6d9: 0x9d, 0x6da: 0x9d, 0x6db: 0x9d, 0x6dc: 0x172, 0x6dd: 0x9d, 0x6de: 0x9d, 0x6df: 0x9d, 0x6e0: 0x173, 0x6e1: 0x9d, 0x6e2: 0x9d, 0x6e3: 0x9d, 0x6e4: 0x9d, 0x6e5: 0x9d, 0x6e6: 0x9d, 0x6e7: 0x9d, 0x6e8: 0x9d, 0x6e9: 0x9d, 0x6ea: 0x9d, 0x6eb: 0x9d, 0x6ec: 0x9d, 0x6ed: 0x9d, 0x6ee: 0x9d, 0x6ef: 0x9d, 0x6f0: 0x9d, 0x6f1: 0x9d, 0x6f2: 0x9d, 0x6f3: 0x9d, 0x6f4: 0x9d, 0x6f5: 0x9d, 0x6f6: 0x9d, 0x6f7: 0x9d, 0x6f8: 0x9d, 0x6f9: 0x9d, 0x6fa: 0x9d, 0x6fb: 0x9d, 0x6fc: 0x9d, 0x6fd: 0x9d, 0x6fe: 0x9d, 0x6ff: 0x9d, // Block 0x1c, offset 0x700 0x700: 0x9d, 0x701: 0x9d, 0x702: 0x9d, 0x703: 0x9d, 0x704: 0x9d, 0x705: 0x9d, 0x706: 0x9d, 0x707: 0x9d, 0x708: 0x9d, 0x709: 0x9d, 0x70a: 0x9d, 0x70b: 0x9d, 0x70c: 0x9d, 0x70d: 0x9d, 0x70e: 0x9d, 0x70f: 0x9d, 0x710: 0x9d, 0x711: 0x9d, 0x712: 0x9d, 0x713: 0x9d, 0x714: 0x9d, 0x715: 0x9d, 0x716: 0x9d, 0x717: 0x9d, 0x718: 0x9d, 0x719: 0x9d, 0x71a: 0x9d, 0x71b: 0x9d, 0x71c: 0x9d, 0x71d: 0x9d, 0x71e: 0x9d, 0x71f: 0x9d, 0x720: 0x9d, 0x721: 0x9d, 0x722: 0x9d, 0x723: 0x9d, 0x724: 0x9d, 0x725: 0x9d, 0x726: 0x9d, 0x727: 0x9d, 0x728: 0x9d, 0x729: 0x9d, 0x72a: 0x9d, 0x72b: 0x9d, 0x72c: 0x9d, 0x72d: 0x9d, 0x72e: 0x9d, 0x72f: 0x9d, 0x730: 0x9d, 0x731: 0x9d, 0x732: 0x9d, 0x733: 0x9d, 0x734: 0x9d, 0x735: 0x9d, 0x736: 0x9d, 0x737: 0x9d, 0x738: 0x9d, 0x739: 0x9d, 0x73a: 0x174, 0x73b: 0xb8, 0x73c: 0xb8, 0x73d: 0xb8, 0x73e: 0xb8, 0x73f: 0xb8, // Block 0x1d, offset 0x740 0x740: 0xb8, 0x741: 0xb8, 0x742: 0xb8, 0x743: 0xb8, 0x744: 0xb8, 0x745: 0xb8, 0x746: 0xb8, 0x747: 0xb8, 0x748: 0xb8, 0x749: 0xb8, 0x74a: 0xb8, 0x74b: 0xb8, 0x74c: 0xb8, 0x74d: 0xb8, 0x74e: 0xb8, 0x74f: 0xb8, 0x750: 0xb8, 0x751: 0xb8, 0x752: 0xb8, 0x753: 0xb8, 0x754: 0xb8, 0x755: 0xb8, 0x756: 0xb8, 0x757: 0xb8, 0x758: 0xb8, 0x759: 0xb8, 0x75a: 0xb8, 0x75b: 0xb8, 0x75c: 0xb8, 0x75d: 0xb8, 0x75e: 0xb8, 0x75f: 0xb8, 0x760: 0x74, 0x761: 0x75, 0x762: 0x76, 0x763: 0x175, 0x764: 0x77, 0x765: 0x78, 0x766: 0x176, 0x767: 0x79, 0x768: 0x7a, 0x769: 0xb8, 0x76a: 0xb8, 0x76b: 0xb8, 0x76c: 0xb8, 0x76d: 0xb8, 0x76e: 0xb8, 0x76f: 0xb8, 0x770: 0xb8, 0x771: 0xb8, 0x772: 0xb8, 0x773: 0xb8, 0x774: 0xb8, 0x775: 0xb8, 0x776: 0xb8, 0x777: 0xb8, 0x778: 0xb8, 0x779: 0xb8, 0x77a: 0xb8, 0x77b: 0xb8, 0x77c: 0xb8, 0x77d: 0xb8, 0x77e: 0xb8, 0x77f: 0xb8, // Block 0x1e, offset 0x780 0x790: 0x0d, 0x791: 0x0e, 0x792: 0x0f, 0x793: 0x10, 0x794: 0x11, 0x795: 0x0b, 0x796: 0x12, 0x797: 0x07, 0x798: 0x13, 0x799: 0x0b, 0x79a: 0x0b, 0x79b: 0x14, 0x79c: 0x0b, 0x79d: 0x15, 0x79e: 0x16, 0x79f: 0x17, 0x7a0: 0x07, 0x7a1: 0x07, 0x7a2: 0x07, 0x7a3: 0x07, 0x7a4: 0x07, 0x7a5: 0x07, 0x7a6: 0x07, 0x7a7: 0x07, 0x7a8: 0x07, 0x7a9: 0x07, 0x7aa: 0x18, 0x7ab: 0x19, 0x7ac: 0x1a, 0x7ad: 0x0b, 0x7ae: 0x0b, 0x7af: 0x1b, 0x7b0: 0x0b, 0x7b1: 0x0b, 0x7b2: 0x0b, 0x7b3: 0x0b, 0x7b4: 0x0b, 0x7b5: 0x0b, 0x7b6: 0x0b, 0x7b7: 0x0b, 0x7b8: 0x0b, 0x7b9: 0x0b, 0x7ba: 0x0b, 0x7bb: 0x0b, 0x7bc: 0x0b, 0x7bd: 0x0b, 0x7be: 0x0b, 0x7bf: 0x0b, // Block 0x1f, offset 0x7c0 0x7c0: 0x0b, 0x7c1: 0x0b, 0x7c2: 0x0b, 0x7c3: 0x0b, 0x7c4: 0x0b, 0x7c5: 0x0b, 0x7c6: 0x0b, 0x7c7: 0x0b, 0x7c8: 0x0b, 0x7c9: 0x0b, 0x7ca: 0x0b, 0x7cb: 0x0b, 0x7cc: 0x0b, 0x7cd: 0x0b, 0x7ce: 0x0b, 0x7cf: 0x0b, 0x7d0: 0x0b, 0x7d1: 0x0b, 0x7d2: 0x0b, 0x7d3: 0x0b, 0x7d4: 0x0b, 0x7d5: 0x0b, 0x7d6: 0x0b, 0x7d7: 0x0b, 0x7d8: 0x0b, 0x7d9: 0x0b, 0x7da: 0x0b, 0x7db: 0x0b, 0x7dc: 0x0b, 0x7dd: 0x0b, 0x7de: 0x0b, 0x7df: 0x0b, 0x7e0: 0x0b, 0x7e1: 0x0b, 0x7e2: 0x0b, 0x7e3: 0x0b, 0x7e4: 0x0b, 0x7e5: 0x0b, 0x7e6: 0x0b, 0x7e7: 0x0b, 0x7e8: 0x0b, 0x7e9: 0x0b, 0x7ea: 0x0b, 0x7eb: 0x0b, 0x7ec: 0x0b, 0x7ed: 0x0b, 0x7ee: 0x0b, 0x7ef: 0x0b, 0x7f0: 0x0b, 0x7f1: 0x0b, 0x7f2: 0x0b, 0x7f3: 0x0b, 0x7f4: 0x0b, 0x7f5: 0x0b, 0x7f6: 0x0b, 0x7f7: 0x0b, 0x7f8: 0x0b, 0x7f9: 0x0b, 0x7fa: 0x0b, 0x7fb: 0x0b, 0x7fc: 0x0b, 0x7fd: 0x0b, 0x7fe: 0x0b, 0x7ff: 0x0b, // Block 0x20, offset 0x800 0x800: 0x177, 0x801: 0x178, 0x802: 0xb8, 0x803: 0xb8, 0x804: 0x179, 0x805: 0x179, 0x806: 0x179, 0x807: 0x17a, 0x808: 0xb8, 0x809: 0xb8, 0x80a: 0xb8, 0x80b: 0xb8, 0x80c: 0xb8, 0x80d: 0xb8, 0x80e: 0xb8, 0x80f: 0xb8, 0x810: 0xb8, 0x811: 0xb8, 0x812: 0xb8, 0x813: 0xb8, 0x814: 0xb8, 0x815: 0xb8, 0x816: 0xb8, 0x817: 0xb8, 0x818: 0xb8, 0x819: 0xb8, 0x81a: 0xb8, 0x81b: 0xb8, 0x81c: 0xb8, 0x81d: 0xb8, 0x81e: 0xb8, 0x81f: 0xb8, 0x820: 0xb8, 0x821: 0xb8, 0x822: 0xb8, 0x823: 0xb8, 0x824: 0xb8, 0x825: 0xb8, 0x826: 0xb8, 0x827: 0xb8, 0x828: 0xb8, 0x829: 0xb8, 0x82a: 0xb8, 0x82b: 0xb8, 0x82c: 0xb8, 0x82d: 0xb8, 0x82e: 0xb8, 0x82f: 0xb8, 0x830: 0xb8, 0x831: 0xb8, 0x832: 0xb8, 0x833: 0xb8, 0x834: 0xb8, 0x835: 0xb8, 0x836: 0xb8, 0x837: 0xb8, 0x838: 0xb8, 0x839: 0xb8, 0x83a: 0xb8, 0x83b: 0xb8, 0x83c: 0xb8, 0x83d: 0xb8, 0x83e: 0xb8, 0x83f: 0xb8, // Block 0x21, offset 0x840 0x840: 0x0b, 0x841: 0x0b, 0x842: 0x0b, 0x843: 0x0b, 0x844: 0x0b, 0x845: 0x0b, 0x846: 0x0b, 0x847: 0x0b, 0x848: 0x0b, 0x849: 0x0b, 0x84a: 0x0b, 0x84b: 0x0b, 0x84c: 0x0b, 0x84d: 0x0b, 0x84e: 0x0b, 0x84f: 0x0b, 0x850: 0x0b, 0x851: 0x0b, 0x852: 0x0b, 0x853: 0x0b, 0x854: 0x0b, 0x855: 0x0b, 0x856: 0x0b, 0x857: 0x0b, 0x858: 0x0b, 0x859: 0x0b, 0x85a: 0x0b, 0x85b: 0x0b, 0x85c: 0x0b, 0x85d: 0x0b, 0x85e: 0x0b, 0x85f: 0x0b, 0x860: 0x1e, 0x861: 0x0b, 0x862: 0x0b, 0x863: 0x0b, 0x864: 0x0b, 0x865: 0x0b, 0x866: 0x0b, 0x867: 0x0b, 0x868: 0x0b, 0x869: 0x0b, 0x86a: 0x0b, 0x86b: 0x0b, 0x86c: 0x0b, 0x86d: 0x0b, 0x86e: 0x0b, 0x86f: 0x0b, 0x870: 0x0b, 0x871: 0x0b, 0x872: 0x0b, 0x873: 0x0b, 0x874: 0x0b, 0x875: 0x0b, 0x876: 0x0b, 0x877: 0x0b, 0x878: 0x0b, 0x879: 0x0b, 0x87a: 0x0b, 0x87b: 0x0b, 0x87c: 0x0b, 0x87d: 0x0b, 0x87e: 0x0b, 0x87f: 0x0b, // Block 0x22, offset 0x880 0x880: 0x0b, 0x881: 0x0b, 0x882: 0x0b, 0x883: 0x0b, 0x884: 0x0b, 0x885: 0x0b, 0x886: 0x0b, 0x887: 0x0b, 0x888: 0x0b, 0x889: 0x0b, 0x88a: 0x0b, 0x88b: 0x0b, 0x88c: 0x0b, 0x88d: 0x0b, 0x88e: 0x0b, 0x88f: 0x0b, } // idnaSparseOffset: 256 entries, 512 bytes var idnaSparseOffset = []uint16{0x0, 0x8, 0x19, 0x25, 0x27, 0x2c, 0x34, 0x3f, 0x4b, 0x5c, 0x60, 0x6f, 0x74, 0x7b, 0x87, 0x95, 0xa3, 0xa8, 0xb1, 0xc1, 0xcf, 0xdc, 0xe8, 0xf9, 0x103, 0x10a, 0x117, 0x128, 0x12f, 0x13a, 0x149, 0x157, 0x161, 0x163, 0x167, 0x169, 0x175, 0x180, 0x188, 0x18e, 0x194, 0x199, 0x19e, 0x1a1, 0x1a5, 0x1ab, 0x1b0, 0x1bc, 0x1c6, 0x1cc, 0x1dd, 0x1e7, 0x1ea, 0x1f2, 0x1f5, 0x202, 0x20a, 0x20e, 0x215, 0x21d, 0x22d, 0x239, 0x23b, 0x245, 0x251, 0x25d, 0x269, 0x271, 0x276, 0x280, 0x291, 0x295, 0x2a0, 0x2a4, 0x2ad, 0x2b5, 0x2bb, 0x2c0, 0x2c3, 0x2c6, 0x2ca, 0x2d0, 0x2d4, 0x2d8, 0x2de, 0x2e5, 0x2eb, 0x2f3, 0x2fa, 0x305, 0x30f, 0x313, 0x316, 0x31c, 0x320, 0x322, 0x325, 0x327, 0x32a, 0x334, 0x337, 0x346, 0x34a, 0x34f, 0x352, 0x356, 0x35b, 0x360, 0x366, 0x36c, 0x37b, 0x381, 0x385, 0x394, 0x399, 0x3a1, 0x3ab, 0x3b6, 0x3be, 0x3cf, 0x3d8, 0x3e8, 0x3f5, 0x3ff, 0x404, 0x411, 0x415, 0x41a, 0x41c, 0x420, 0x422, 0x426, 0x42f, 0x435, 0x439, 0x449, 0x453, 0x458, 0x45b, 0x461, 0x468, 0x46d, 0x471, 0x477, 0x47c, 0x485, 0x48a, 0x490, 0x497, 0x49e, 0x4a5, 0x4a9, 0x4ae, 0x4b1, 0x4b6, 0x4c2, 0x4c8, 0x4cd, 0x4d4, 0x4dc, 0x4e1, 0x4e5, 0x4f5, 0x4fc, 0x500, 0x504, 0x50b, 0x50e, 0x511, 0x515, 0x519, 0x51f, 0x528, 0x534, 0x53b, 0x544, 0x54c, 0x553, 0x561, 0x56e, 0x57b, 0x584, 0x588, 0x596, 0x59e, 0x5a9, 0x5b2, 0x5b8, 0x5c0, 0x5c9, 0x5d3, 0x5d6, 0x5e2, 0x5e5, 0x5ea, 0x5ed, 0x5f7, 0x600, 0x60c, 0x60f, 0x614, 0x617, 0x61a, 0x61d, 0x624, 0x62b, 0x62f, 0x63a, 0x63d, 0x643, 0x648, 0x64c, 0x64f, 0x652, 0x655, 0x65a, 0x664, 0x667, 0x66b, 0x67a, 0x686, 0x68a, 0x68f, 0x694, 0x698, 0x69d, 0x6a6, 0x6b1, 0x6b7, 0x6bf, 0x6c3, 0x6c7, 0x6cd, 0x6d3, 0x6d8, 0x6db, 0x6e9, 0x6f0, 0x6f3, 0x6f6, 0x6fa, 0x700, 0x705, 0x70f, 0x714, 0x717, 0x71a, 0x71d, 0x720, 0x724, 0x727, 0x737, 0x748, 0x74d, 0x74f, 0x751} // idnaSparseValues: 1876 entries, 7504 bytes var idnaSparseValues = [1876]valueRange{ // Block 0x0, offset 0x0 {value: 0x0000, lo: 0x07}, {value: 0xe105, lo: 0x80, hi: 0x96}, {value: 0x0018, lo: 0x97, hi: 0x97}, {value: 0xe105, lo: 0x98, hi: 0x9e}, {value: 0x001f, lo: 0x9f, hi: 0x9f}, {value: 0x0008, lo: 0xa0, hi: 0xb6}, {value: 0x0018, lo: 0xb7, hi: 0xb7}, {value: 0x0008, lo: 0xb8, hi: 0xbf}, // Block 0x1, offset 0x8 {value: 0x0000, lo: 0x10}, {value: 0x0008, lo: 0x80, hi: 0x80}, {value: 0xe01d, lo: 0x81, hi: 0x81}, {value: 0x0008, lo: 0x82, hi: 0x82}, {value: 0x0335, lo: 0x83, hi: 0x83}, {value: 0x034d, lo: 0x84, hi: 0x84}, {value: 0x0365, lo: 0x85, hi: 0x85}, {value: 0xe00d, lo: 0x86, hi: 0x86}, {value: 0x0008, lo: 0x87, hi: 0x87}, {value: 0xe00d, lo: 0x88, hi: 0x88}, {value: 0x0008, lo: 0x89, hi: 0x89}, {value: 0xe00d, lo: 0x8a, hi: 0x8a}, {value: 0x0008, lo: 0x8b, hi: 0x8b}, {value: 0xe00d, lo: 0x8c, hi: 0x8c}, {value: 0x0008, lo: 0x8d, hi: 0x8d}, {value: 0xe00d, lo: 0x8e, hi: 0x8e}, {value: 0x0008, lo: 0x8f, hi: 0xbf}, // Block 0x2, offset 0x19 {value: 0x0000, lo: 0x0b}, {value: 0x0008, lo: 0x80, hi: 0xaf}, {value: 0x0249, lo: 0xb0, hi: 0xb0}, {value: 0x037d, lo: 0xb1, hi: 0xb1}, {value: 0x0259, lo: 0xb2, hi: 0xb2}, {value: 0x0269, lo: 0xb3, hi: 0xb3}, {value: 0x034d, lo: 0xb4, hi: 0xb4}, {value: 0x0395, lo: 0xb5, hi: 0xb5}, {value: 0xe1bd, lo: 0xb6, hi: 0xb6}, {value: 0x0279, lo: 0xb7, hi: 0xb7}, {value: 0x0289, lo: 0xb8, hi: 0xb8}, {value: 0x0008, lo: 0xb9, hi: 0xbf}, // Block 0x3, offset 0x25 {value: 0x0000, lo: 0x01}, {value: 0x1308, lo: 0x80, hi: 0xbf}, // Block 0x4, offset 0x27 {value: 0x0000, lo: 0x04}, {value: 0x03f5, lo: 0x80, hi: 0x8f}, {value: 0xe105, lo: 0x90, hi: 0x9f}, {value: 0x049d, lo: 0xa0, hi: 0xaf}, {value: 0x0008, lo: 0xb0, hi: 0xbf}, // Block 0x5, offset 0x2c {value: 0x0000, lo: 0x07}, {value: 0xe185, lo: 0x80, hi: 0x8f}, {value: 0x0545, lo: 0x90, hi: 0x96}, {value: 0x0040, lo: 0x97, hi: 0x98}, {value: 0x0008, lo: 0x99, hi: 0x99}, {value: 0x0018, lo: 0x9a, hi: 0x9f}, {value: 0x0040, lo: 0xa0, hi: 0xa0}, {value: 0x0008, lo: 0xa1, hi: 0xbf}, // Block 0x6, offset 0x34 {value: 0x0000, lo: 0x0a}, {value: 0x0008, lo: 0x80, hi: 0x86}, {value: 0x0401, lo: 0x87, hi: 0x87}, {value: 0x0040, lo: 0x88, hi: 0x88}, {value: 0x0018, lo: 0x89, hi: 0x8a}, {value: 0x0040, lo: 0x8b, hi: 0x8c}, {value: 0x0018, lo: 0x8d, hi: 0x8f}, {value: 0x0040, lo: 0x90, hi: 0x90}, {value: 0x1308, lo: 0x91, hi: 0xbd}, {value: 0x0018, lo: 0xbe, hi: 0xbe}, {value: 0x1308, lo: 0xbf, hi: 0xbf}, // Block 0x7, offset 0x3f {value: 0x0000, lo: 0x0b}, {value: 0x0018, lo: 0x80, hi: 0x80}, {value: 0x1308, lo: 0x81, hi: 0x82}, {value: 0x0018, lo: 0x83, hi: 0x83}, {value: 0x1308, lo: 0x84, hi: 0x85}, {value: 0x0018, lo: 0x86, hi: 0x86}, {value: 0x1308, lo: 0x87, hi: 0x87}, {value: 0x0040, lo: 0x88, hi: 0x8f}, {value: 0x0008, lo: 0x90, hi: 0xaa}, {value: 0x0040, lo: 0xab, hi: 0xaf}, {value: 0x0008, lo: 0xb0, hi: 0xb4}, {value: 0x0040, lo: 0xb5, hi: 0xbf}, // Block 0x8, offset 0x4b {value: 0x0000, lo: 0x10}, {value: 0x0018, lo: 0x80, hi: 0x80}, {value: 0x0208, lo: 0x81, hi: 0x87}, {value: 0x0408, lo: 0x88, hi: 0x88}, {value: 0x0208, lo: 0x89, hi: 0x8a}, {value: 0x1308, lo: 0x8b, hi: 0x9f}, {value: 0x0008, lo: 0xa0, hi: 0xa9}, {value: 0x0018, lo: 0xaa, hi: 0xad}, {value: 0x0208, lo: 0xae, hi: 0xaf}, {value: 0x1308, lo: 0xb0, hi: 0xb0}, {value: 0x0408, lo: 0xb1, hi: 0xb3}, {value: 0x0008, lo: 0xb4, hi: 0xb4}, {value: 0x0429, lo: 0xb5, hi: 0xb5}, {value: 0x0451, lo: 0xb6, hi: 0xb6}, {value: 0x0479, lo: 0xb7, hi: 0xb7}, {value: 0x04a1, lo: 0xb8, hi: 0xb8}, {value: 0x0208, lo: 0xb9, hi: 0xbf}, // Block 0x9, offset 0x5c {value: 0x0000, lo: 0x03}, {value: 0x0208, lo: 0x80, hi: 0x87}, {value: 0x0408, lo: 0x88, hi: 0x99}, {value: 0x0208, lo: 0x9a, hi: 0xbf}, // Block 0xa, offset 0x60 {value: 0x0000, lo: 0x0e}, {value: 0x1308, lo: 0x80, hi: 0x8a}, {value: 0x0040, lo: 0x8b, hi: 0x8c}, {value: 0x0408, lo: 0x8d, hi: 0x8d}, {value: 0x0208, lo: 0x8e, hi: 0x98}, {value: 0x0408, lo: 0x99, hi: 0x9b}, {value: 0x0208, lo: 0x9c, hi: 0xaa}, {value: 0x0408, lo: 0xab, hi: 0xac}, {value: 0x0208, lo: 0xad, hi: 0xb0}, {value: 0x0408, lo: 0xb1, hi: 0xb1}, {value: 0x0208, lo: 0xb2, hi: 0xb2}, {value: 0x0408, lo: 0xb3, hi: 0xb4}, {value: 0x0208, lo: 0xb5, hi: 0xb7}, {value: 0x0408, lo: 0xb8, hi: 0xb9}, {value: 0x0208, lo: 0xba, hi: 0xbf}, // Block 0xb, offset 0x6f {value: 0x0000, lo: 0x04}, {value: 0x0008, lo: 0x80, hi: 0xa5}, {value: 0x1308, lo: 0xa6, hi: 0xb0}, {value: 0x0008, lo: 0xb1, hi: 0xb1}, {value: 0x0040, lo: 0xb2, hi: 0xbf}, // Block 0xc, offset 0x74 {value: 0x0000, lo: 0x06}, {value: 0x0008, lo: 0x80, hi: 0x89}, {value: 0x0208, lo: 0x8a, hi: 0xaa}, {value: 0x1308, lo: 0xab, hi: 0xb3}, {value: 0x0008, lo: 0xb4, hi: 0xb5}, {value: 0x0018, lo: 0xb6, hi: 0xba}, {value: 0x0040, lo: 0xbb, hi: 0xbf}, // Block 0xd, offset 0x7b {value: 0x0000, lo: 0x0b}, {value: 0x0008, lo: 0x80, hi: 0x95}, {value: 0x1308, lo: 0x96, hi: 0x99}, {value: 0x0008, lo: 0x9a, hi: 0x9a}, {value: 0x1308, lo: 0x9b, hi: 0xa3}, {value: 0x0008, lo: 0xa4, hi: 0xa4}, {value: 0x1308, lo: 0xa5, hi: 0xa7}, {value: 0x0008, lo: 0xa8, hi: 0xa8}, {value: 0x1308, lo: 0xa9, hi: 0xad}, {value: 0x0040, lo: 0xae, hi: 0xaf}, {value: 0x0018, lo: 0xb0, hi: 0xbe}, {value: 0x0040, lo: 0xbf, hi: 0xbf}, // Block 0xe, offset 0x87 {value: 0x0000, lo: 0x0d}, {value: 0x0408, lo: 0x80, hi: 0x80}, {value: 0x0208, lo: 0x81, hi: 0x85}, {value: 0x0408, lo: 0x86, hi: 0x87}, {value: 0x0208, lo: 0x88, hi: 0x88}, {value: 0x0408, lo: 0x89, hi: 0x89}, {value: 0x0208, lo: 0x8a, hi: 0x93}, {value: 0x0408, lo: 0x94, hi: 0x94}, {value: 0x0208, lo: 0x95, hi: 0x95}, {value: 0x0008, lo: 0x96, hi: 0x98}, {value: 0x1308, lo: 0x99, hi: 0x9b}, {value: 0x0040, lo: 0x9c, hi: 0x9d}, {value: 0x0018, lo: 0x9e, hi: 0x9e}, {value: 0x0040, lo: 0x9f, hi: 0xbf}, // Block 0xf, offset 0x95 {value: 0x0000, lo: 0x0d}, {value: 0x0040, lo: 0x80, hi: 0x9f}, {value: 0x0208, lo: 0xa0, hi: 0xa9}, {value: 0x0408, lo: 0xaa, hi: 0xac}, {value: 0x0008, lo: 0xad, hi: 0xad}, {value: 0x0408, lo: 0xae, hi: 0xae}, {value: 0x0208, lo: 0xaf, hi: 0xb0}, {value: 0x0408, lo: 0xb1, hi: 0xb2}, {value: 0x0208, lo: 0xb3, hi: 0xb4}, {value: 0x0040, lo: 0xb5, hi: 0xb5}, {value: 0x0208, lo: 0xb6, hi: 0xb8}, {value: 0x0408, lo: 0xb9, hi: 0xb9}, {value: 0x0208, lo: 0xba, hi: 0xbd}, {value: 0x0040, lo: 0xbe, hi: 0xbf}, // Block 0x10, offset 0xa3 {value: 0x0000, lo: 0x04}, {value: 0x0040, lo: 0x80, hi: 0x93}, {value: 0x1308, lo: 0x94, hi: 0xa1}, {value: 0x0040, lo: 0xa2, hi: 0xa2}, {value: 0x1308, lo: 0xa3, hi: 0xbf}, // Block 0x11, offset 0xa8 {value: 0x0000, lo: 0x08}, {value: 0x1308, lo: 0x80, hi: 0x82}, {value: 0x1008, lo: 0x83, hi: 0x83}, {value: 0x0008, lo: 0x84, hi: 0xb9}, {value: 0x1308, lo: 0xba, hi: 0xba}, {value: 0x1008, lo: 0xbb, hi: 0xbb}, {value: 0x1308, lo: 0xbc, hi: 0xbc}, {value: 0x0008, lo: 0xbd, hi: 0xbd}, {value: 0x1008, lo: 0xbe, hi: 0xbf}, // Block 0x12, offset 0xb1 {value: 0x0000, lo: 0x0f}, {value: 0x1308, lo: 0x80, hi: 0x80}, {value: 0x1008, lo: 0x81, hi: 0x82}, {value: 0x0040, lo: 0x83, hi: 0x85}, {value: 0x1008, lo: 0x86, hi: 0x88}, {value: 0x0040, lo: 0x89, hi: 0x89}, {value: 0x1008, lo: 0x8a, hi: 0x8c}, {value: 0x1b08, lo: 0x8d, hi: 0x8d}, {value: 0x0040, lo: 0x8e, hi: 0x8f}, {value: 0x0008, lo: 0x90, hi: 0x90}, {value: 0x0040, lo: 0x91, hi: 0x96}, {value: 0x1008, lo: 0x97, hi: 0x97}, {value: 0x0040, lo: 0x98, hi: 0xa5}, {value: 0x0008, lo: 0xa6, hi: 0xaf}, {value: 0x0018, lo: 0xb0, hi: 0xba}, {value: 0x0040, lo: 0xbb, hi: 0xbf}, // Block 0x13, offset 0xc1 {value: 0x0000, lo: 0x0d}, {value: 0x1308, lo: 0x80, hi: 0x80}, {value: 0x1008, lo: 0x81, hi: 0x83}, {value: 0x0040, lo: 0x84, hi: 0x84}, {value: 0x0008, lo: 0x85, hi: 0x8c}, {value: 0x0040, lo: 0x8d, hi: 0x8d}, {value: 0x0008, lo: 0x8e, hi: 0x90}, {value: 0x0040, lo: 0x91, hi: 0x91}, {value: 0x0008, lo: 0x92, hi: 0xa8}, {value: 0x0040, lo: 0xa9, hi: 0xa9}, {value: 0x0008, lo: 0xaa, hi: 0xb9}, {value: 0x0040, lo: 0xba, hi: 0xbc}, {value: 0x0008, lo: 0xbd, hi: 0xbd}, {value: 0x1308, lo: 0xbe, hi: 0xbf}, // Block 0x14, offset 0xcf {value: 0x0000, lo: 0x0c}, {value: 0x0040, lo: 0x80, hi: 0x80}, {value: 0x1308, lo: 0x81, hi: 0x81}, {value: 0x1008, lo: 0x82, hi: 0x83}, {value: 0x0040, lo: 0x84, hi: 0x84}, {value: 0x0008, lo: 0x85, hi: 0x8c}, {value: 0x0040, lo: 0x8d, hi: 0x8d}, {value: 0x0008, lo: 0x8e, hi: 0x90}, {value: 0x0040, lo: 0x91, hi: 0x91}, {value: 0x0008, lo: 0x92, hi: 0xba}, {value: 0x0040, lo: 0xbb, hi: 0xbc}, {value: 0x0008, lo: 0xbd, hi: 0xbd}, {value: 0x1008, lo: 0xbe, hi: 0xbf}, // Block 0x15, offset 0xdc {value: 0x0000, lo: 0x0b}, {value: 0x0040, lo: 0x80, hi: 0x81}, {value: 0x1008, lo: 0x82, hi: 0x83}, {value: 0x0040, lo: 0x84, hi: 0x84}, {value: 0x0008, lo: 0x85, hi: 0x96}, {value: 0x0040, lo: 0x97, hi: 0x99}, {value: 0x0008, lo: 0x9a, hi: 0xb1}, {value: 0x0040, lo: 0xb2, hi: 0xb2}, {value: 0x0008, lo: 0xb3, hi: 0xbb}, {value: 0x0040, lo: 0xbc, hi: 0xbc}, {value: 0x0008, lo: 0xbd, hi: 0xbd}, {value: 0x0040, lo: 0xbe, hi: 0xbf}, // Block 0x16, offset 0xe8 {value: 0x0000, lo: 0x10}, {value: 0x0008, lo: 0x80, hi: 0x86}, {value: 0x0040, lo: 0x87, hi: 0x89}, {value: 0x1b08, lo: 0x8a, hi: 0x8a}, {value: 0x0040, lo: 0x8b, hi: 0x8e}, {value: 0x1008, lo: 0x8f, hi: 0x91}, {value: 0x1308, lo: 0x92, hi: 0x94}, {value: 0x0040, lo: 0x95, hi: 0x95}, {value: 0x1308, lo: 0x96, hi: 0x96}, {value: 0x0040, lo: 0x97, hi: 0x97}, {value: 0x1008, lo: 0x98, hi: 0x9f}, {value: 0x0040, lo: 0xa0, hi: 0xa5}, {value: 0x0008, lo: 0xa6, hi: 0xaf}, {value: 0x0040, lo: 0xb0, hi: 0xb1}, {value: 0x1008, lo: 0xb2, hi: 0xb3}, {value: 0x0018, lo: 0xb4, hi: 0xb4}, {value: 0x0040, lo: 0xb5, hi: 0xbf}, // Block 0x17, offset 0xf9 {value: 0x0000, lo: 0x09}, {value: 0x0040, lo: 0x80, hi: 0x80}, {value: 0x0008, lo: 0x81, hi: 0xb0}, {value: 0x1308, lo: 0xb1, hi: 0xb1}, {value: 0x0008, lo: 0xb2, hi: 0xb2}, {value: 0x08f1, lo: 0xb3, hi: 0xb3}, {value: 0x1308, lo: 0xb4, hi: 0xb9}, {value: 0x1b08, lo: 0xba, hi: 0xba}, {value: 0x0040, lo: 0xbb, hi: 0xbe}, {value: 0x0018, lo: 0xbf, hi: 0xbf}, // Block 0x18, offset 0x103 {value: 0x0000, lo: 0x06}, {value: 0x0008, lo: 0x80, hi: 0x86}, {value: 0x1308, lo: 0x87, hi: 0x8e}, {value: 0x0018, lo: 0x8f, hi: 0x8f}, {value: 0x0008, lo: 0x90, hi: 0x99}, {value: 0x0018, lo: 0x9a, hi: 0x9b}, {value: 0x0040, lo: 0x9c, hi: 0xbf}, // Block 0x19, offset 0x10a {value: 0x0000, lo: 0x0c}, {value: 0x0008, lo: 0x80, hi: 0x84}, {value: 0x0040, lo: 0x85, hi: 0x85}, {value: 0x0008, lo: 0x86, hi: 0x86}, {value: 0x0040, lo: 0x87, hi: 0x87}, {value: 0x1308, lo: 0x88, hi: 0x8d}, {value: 0x0040, lo: 0x8e, hi: 0x8f}, {value: 0x0008, lo: 0x90, hi: 0x99}, {value: 0x0040, lo: 0x9a, hi: 0x9b}, {value: 0x0961, lo: 0x9c, hi: 0x9c}, {value: 0x0999, lo: 0x9d, hi: 0x9d}, {value: 0x0008, lo: 0x9e, hi: 0x9f}, {value: 0x0040, lo: 0xa0, hi: 0xbf}, // Block 0x1a, offset 0x117 {value: 0x0000, lo: 0x10}, {value: 0x0008, lo: 0x80, hi: 0x80}, {value: 0x0018, lo: 0x81, hi: 0x8a}, {value: 0x0008, lo: 0x8b, hi: 0x8b}, {value: 0xe03d, lo: 0x8c, hi: 0x8c}, {value: 0x0018, lo: 0x8d, hi: 0x97}, {value: 0x1308, lo: 0x98, hi: 0x99}, {value: 0x0018, lo: 0x9a, hi: 0x9f}, {value: 0x0008, lo: 0xa0, hi: 0xa9}, {value: 0x0018, lo: 0xaa, hi: 0xb4}, {value: 0x1308, lo: 0xb5, hi: 0xb5}, {value: 0x0018, lo: 0xb6, hi: 0xb6}, {value: 0x1308, lo: 0xb7, hi: 0xb7}, {value: 0x0018, lo: 0xb8, hi: 0xb8}, {value: 0x1308, lo: 0xb9, hi: 0xb9}, {value: 0x0018, lo: 0xba, hi: 0xbd}, {value: 0x1008, lo: 0xbe, hi: 0xbf}, // Block 0x1b, offset 0x128 {value: 0x0000, lo: 0x06}, {value: 0x0018, lo: 0x80, hi: 0x85}, {value: 0x1308, lo: 0x86, hi: 0x86}, {value: 0x0018, lo: 0x87, hi: 0x8c}, {value: 0x0040, lo: 0x8d, hi: 0x8d}, {value: 0x0018, lo: 0x8e, hi: 0x9a}, {value: 0x0040, lo: 0x9b, hi: 0xbf}, // Block 0x1c, offset 0x12f {value: 0x0000, lo: 0x0a}, {value: 0x0008, lo: 0x80, hi: 0xaa}, {value: 0x1008, lo: 0xab, hi: 0xac}, {value: 0x1308, lo: 0xad, hi: 0xb0}, {value: 0x1008, lo: 0xb1, hi: 0xb1}, {value: 0x1308, lo: 0xb2, hi: 0xb7}, {value: 0x1008, lo: 0xb8, hi: 0xb8}, {value: 0x1b08, lo: 0xb9, hi: 0xba}, {value: 0x1008, lo: 0xbb, hi: 0xbc}, {value: 0x1308, lo: 0xbd, hi: 0xbe}, {value: 0x0008, lo: 0xbf, hi: 0xbf}, // Block 0x1d, offset 0x13a {value: 0x0000, lo: 0x0e}, {value: 0x0008, lo: 0x80, hi: 0x89}, {value: 0x0018, lo: 0x8a, hi: 0x8f}, {value: 0x0008, lo: 0x90, hi: 0x95}, {value: 0x1008, lo: 0x96, hi: 0x97}, {value: 0x1308, lo: 0x98, hi: 0x99}, {value: 0x0008, lo: 0x9a, hi: 0x9d}, {value: 0x1308, lo: 0x9e, hi: 0xa0}, {value: 0x0008, lo: 0xa1, hi: 0xa1}, {value: 0x1008, lo: 0xa2, hi: 0xa4}, {value: 0x0008, lo: 0xa5, hi: 0xa6}, {value: 0x1008, lo: 0xa7, hi: 0xad}, {value: 0x0008, lo: 0xae, hi: 0xb0}, {value: 0x1308, lo: 0xb1, hi: 0xb4}, {value: 0x0008, lo: 0xb5, hi: 0xbf}, // Block 0x1e, offset 0x149 {value: 0x0000, lo: 0x0d}, {value: 0x0008, lo: 0x80, hi: 0x81}, {value: 0x1308, lo: 0x82, hi: 0x82}, {value: 0x1008, lo: 0x83, hi: 0x84}, {value: 0x1308, lo: 0x85, hi: 0x86}, {value: 0x1008, lo: 0x87, hi: 0x8c}, {value: 0x1308, lo: 0x8d, hi: 0x8d}, {value: 0x0008, lo: 0x8e, hi: 0x8e}, {value: 0x1008, lo: 0x8f, hi: 0x8f}, {value: 0x0008, lo: 0x90, hi: 0x99}, {value: 0x1008, lo: 0x9a, hi: 0x9c}, {value: 0x1308, lo: 0x9d, hi: 0x9d}, {value: 0x0018, lo: 0x9e, hi: 0x9f}, {value: 0x0040, lo: 0xa0, hi: 0xbf}, // Block 0x1f, offset 0x157 {value: 0x0000, lo: 0x09}, {value: 0x0040, lo: 0x80, hi: 0x86}, {value: 0x055d, lo: 0x87, hi: 0x87}, {value: 0x0040, lo: 0x88, hi: 0x8c}, {value: 0x055d, lo: 0x8d, hi: 0x8d}, {value: 0x0040, lo: 0x8e, hi: 0x8f}, {value: 0x0008, lo: 0x90, hi: 0xba}, {value: 0x0018, lo: 0xbb, hi: 0xbb}, {value: 0xe105, lo: 0xbc, hi: 0xbc}, {value: 0x0008, lo: 0xbd, hi: 0xbf}, // Block 0x20, offset 0x161 {value: 0x0000, lo: 0x01}, {value: 0x0018, lo: 0x80, hi: 0xbf}, // Block 0x21, offset 0x163 {value: 0x0000, lo: 0x03}, {value: 0x0018, lo: 0x80, hi: 0x9e}, {value: 0x0040, lo: 0x9f, hi: 0xa0}, {value: 0x0018, lo: 0xa1, hi: 0xbf}, // Block 0x22, offset 0x167 {value: 0x0000, lo: 0x01}, {value: 0x0008, lo: 0x80, hi: 0xbf}, // Block 0x23, offset 0x169 {value: 0x0000, lo: 0x0b}, {value: 0x0008, lo: 0x80, hi: 0x88}, {value: 0x0040, lo: 0x89, hi: 0x89}, {value: 0x0008, lo: 0x8a, hi: 0x8d}, {value: 0x0040, lo: 0x8e, hi: 0x8f}, {value: 0x0008, lo: 0x90, hi: 0x96}, {value: 0x0040, lo: 0x97, hi: 0x97}, {value: 0x0008, lo: 0x98, hi: 0x98}, {value: 0x0040, lo: 0x99, hi: 0x99}, {value: 0x0008, lo: 0x9a, hi: 0x9d}, {value: 0x0040, lo: 0x9e, hi: 0x9f}, {value: 0x0008, lo: 0xa0, hi: 0xbf}, // Block 0x24, offset 0x175 {value: 0x0000, lo: 0x0a}, {value: 0x0008, lo: 0x80, hi: 0x88}, {value: 0x0040, lo: 0x89, hi: 0x89}, {value: 0x0008, lo: 0x8a, hi: 0x8d}, {value: 0x0040, lo: 0x8e, hi: 0x8f}, {value: 0x0008, lo: 0x90, hi: 0xb0}, {value: 0x0040, lo: 0xb1, hi: 0xb1}, {value: 0x0008, lo: 0xb2, hi: 0xb5}, {value: 0x0040, lo: 0xb6, hi: 0xb7}, {value: 0x0008, lo: 0xb8, hi: 0xbe}, {value: 0x0040, lo: 0xbf, hi: 0xbf}, // Block 0x25, offset 0x180 {value: 0x0000, lo: 0x07}, {value: 0x0008, lo: 0x80, hi: 0x80}, {value: 0x0040, lo: 0x81, hi: 0x81}, {value: 0x0008, lo: 0x82, hi: 0x85}, {value: 0x0040, lo: 0x86, hi: 0x87}, {value: 0x0008, lo: 0x88, hi: 0x96}, {value: 0x0040, lo: 0x97, hi: 0x97}, {value: 0x0008, lo: 0x98, hi: 0xbf}, // Block 0x26, offset 0x188 {value: 0x0000, lo: 0x05}, {value: 0x0008, lo: 0x80, hi: 0x90}, {value: 0x0040, lo: 0x91, hi: 0x91}, {value: 0x0008, lo: 0x92, hi: 0x95}, {value: 0x0040, lo: 0x96, hi: 0x97}, {value: 0x0008, lo: 0x98, hi: 0xbf}, // Block 0x27, offset 0x18e {value: 0x0000, lo: 0x05}, {value: 0x0008, lo: 0x80, hi: 0x9a}, {value: 0x0040, lo: 0x9b, hi: 0x9c}, {value: 0x1308, lo: 0x9d, hi: 0x9f}, {value: 0x0018, lo: 0xa0, hi: 0xbc}, {value: 0x0040, lo: 0xbd, hi: 0xbf}, // Block 0x28, offset 0x194 {value: 0x0000, lo: 0x04}, {value: 0x0008, lo: 0x80, hi: 0x8f}, {value: 0x0018, lo: 0x90, hi: 0x99}, {value: 0x0040, lo: 0x9a, hi: 0x9f}, {value: 0x0008, lo: 0xa0, hi: 0xbf}, // Block 0x29, offset 0x199 {value: 0x0000, lo: 0x04}, {value: 0x0008, lo: 0x80, hi: 0xb5}, {value: 0x0040, lo: 0xb6, hi: 0xb7}, {value: 0xe045, lo: 0xb8, hi: 0xbd}, {value: 0x0040, lo: 0xbe, hi: 0xbf}, // Block 0x2a, offset 0x19e {value: 0x0000, lo: 0x02}, {value: 0x0018, lo: 0x80, hi: 0x80}, {value: 0x0008, lo: 0x81, hi: 0xbf}, // Block 0x2b, offset 0x1a1 {value: 0x0000, lo: 0x03}, {value: 0x0008, lo: 0x80, hi: 0xac}, {value: 0x0018, lo: 0xad, hi: 0xae}, {value: 0x0008, lo: 0xaf, hi: 0xbf}, // Block 0x2c, offset 0x1a5 {value: 0x0000, lo: 0x05}, {value: 0x0040, lo: 0x80, hi: 0x80}, {value: 0x0008, lo: 0x81, hi: 0x9a}, {value: 0x0018, lo: 0x9b, hi: 0x9c}, {value: 0x0040, lo: 0x9d, hi: 0x9f}, {value: 0x0008, lo: 0xa0, hi: 0xbf}, // Block 0x2d, offset 0x1ab {value: 0x0000, lo: 0x04}, {value: 0x0008, lo: 0x80, hi: 0xaa}, {value: 0x0018, lo: 0xab, hi: 0xb0}, {value: 0x0008, lo: 0xb1, hi: 0xb8}, {value: 0x0040, lo: 0xb9, hi: 0xbf}, // Block 0x2e, offset 0x1b0 {value: 0x0000, lo: 0x0b}, {value: 0x0008, lo: 0x80, hi: 0x8c}, {value: 0x0040, lo: 0x8d, hi: 0x8d}, {value: 0x0008, lo: 0x8e, hi: 0x91}, {value: 0x1308, lo: 0x92, hi: 0x93}, {value: 0x1b08, lo: 0x94, hi: 0x94}, {value: 0x0040, lo: 0x95, hi: 0x9f}, {value: 0x0008, lo: 0xa0, hi: 0xb1}, {value: 0x1308, lo: 0xb2, hi: 0xb3}, {value: 0x1b08, lo: 0xb4, hi: 0xb4}, {value: 0x0018, lo: 0xb5, hi: 0xb6}, {value: 0x0040, lo: 0xb7, hi: 0xbf}, // Block 0x2f, offset 0x1bc {value: 0x0000, lo: 0x09}, {value: 0x0008, lo: 0x80, hi: 0x91}, {value: 0x1308, lo: 0x92, hi: 0x93}, {value: 0x0040, lo: 0x94, hi: 0x9f}, {value: 0x0008, lo: 0xa0, hi: 0xac}, {value: 0x0040, lo: 0xad, hi: 0xad}, {value: 0x0008, lo: 0xae, hi: 0xb0}, {value: 0x0040, lo: 0xb1, hi: 0xb1}, {value: 0x1308, lo: 0xb2, hi: 0xb3}, {value: 0x0040, lo: 0xb4, hi: 0xbf}, // Block 0x30, offset 0x1c6 {value: 0x0000, lo: 0x05}, {value: 0x0008, lo: 0x80, hi: 0xb3}, {value: 0x1340, lo: 0xb4, hi: 0xb5}, {value: 0x1008, lo: 0xb6, hi: 0xb6}, {value: 0x1308, lo: 0xb7, hi: 0xbd}, {value: 0x1008, lo: 0xbe, hi: 0xbf}, // Block 0x31, offset 0x1cc {value: 0x0000, lo: 0x10}, {value: 0x1008, lo: 0x80, hi: 0x85}, {value: 0x1308, lo: 0x86, hi: 0x86}, {value: 0x1008, lo: 0x87, hi: 0x88}, {value: 0x1308, lo: 0x89, hi: 0x91}, {value: 0x1b08, lo: 0x92, hi: 0x92}, {value: 0x1308, lo: 0x93, hi: 0x93}, {value: 0x0018, lo: 0x94, hi: 0x96}, {value: 0x0008, lo: 0x97, hi: 0x97}, {value: 0x0018, lo: 0x98, hi: 0x9b}, {value: 0x0008, lo: 0x9c, hi: 0x9c}, {value: 0x1308, lo: 0x9d, hi: 0x9d}, {value: 0x0040, lo: 0x9e, hi: 0x9f}, {value: 0x0008, lo: 0xa0, hi: 0xa9}, {value: 0x0040, lo: 0xaa, hi: 0xaf}, {value: 0x0018, lo: 0xb0, hi: 0xb9}, {value: 0x0040, lo: 0xba, hi: 0xbf}, // Block 0x32, offset 0x1dd {value: 0x0000, lo: 0x09}, {value: 0x0018, lo: 0x80, hi: 0x85}, {value: 0x0040, lo: 0x86, hi: 0x86}, {value: 0x0218, lo: 0x87, hi: 0x87}, {value: 0x0018, lo: 0x88, hi: 0x8a}, {value: 0x13c0, lo: 0x8b, hi: 0x8d}, {value: 0x0040, lo: 0x8e, hi: 0x8f}, {value: 0x0008, lo: 0x90, hi: 0x99}, {value: 0x0040, lo: 0x9a, hi: 0x9f}, {value: 0x0208, lo: 0xa0, hi: 0xbf}, // Block 0x33, offset 0x1e7 {value: 0x0000, lo: 0x02}, {value: 0x0208, lo: 0x80, hi: 0xb7}, {value: 0x0040, lo: 0xb8, hi: 0xbf}, // Block 0x34, offset 0x1ea {value: 0x0000, lo: 0x07}, {value: 0x0008, lo: 0x80, hi: 0x84}, {value: 0x1308, lo: 0x85, hi: 0x86}, {value: 0x0208, lo: 0x87, hi: 0xa8}, {value: 0x1308, lo: 0xa9, hi: 0xa9}, {value: 0x0208, lo: 0xaa, hi: 0xaa}, {value: 0x0040, lo: 0xab, hi: 0xaf}, {value: 0x0008, lo: 0xb0, hi: 0xbf}, // Block 0x35, offset 0x1f2 {value: 0x0000, lo: 0x02}, {value: 0x0008, lo: 0x80, hi: 0xb5}, {value: 0x0040, lo: 0xb6, hi: 0xbf}, // Block 0x36, offset 0x1f5 {value: 0x0000, lo: 0x0c}, {value: 0x0008, lo: 0x80, hi: 0x9e}, {value: 0x0040, lo: 0x9f, hi: 0x9f}, {value: 0x1308, lo: 0xa0, hi: 0xa2}, {value: 0x1008, lo: 0xa3, hi: 0xa6}, {value: 0x1308, lo: 0xa7, hi: 0xa8}, {value: 0x1008, lo: 0xa9, hi: 0xab}, {value: 0x0040, lo: 0xac, hi: 0xaf}, {value: 0x1008, lo: 0xb0, hi: 0xb1}, {value: 0x1308, lo: 0xb2, hi: 0xb2}, {value: 0x1008, lo: 0xb3, hi: 0xb8}, {value: 0x1308, lo: 0xb9, hi: 0xbb}, {value: 0x0040, lo: 0xbc, hi: 0xbf}, // Block 0x37, offset 0x202 {value: 0x0000, lo: 0x07}, {value: 0x0018, lo: 0x80, hi: 0x80}, {value: 0x0040, lo: 0x81, hi: 0x83}, {value: 0x0018, lo: 0x84, hi: 0x85}, {value: 0x0008, lo: 0x86, hi: 0xad}, {value: 0x0040, lo: 0xae, hi: 0xaf}, {value: 0x0008, lo: 0xb0, hi: 0xb4}, {value: 0x0040, lo: 0xb5, hi: 0xbf}, // Block 0x38, offset 0x20a {value: 0x0000, lo: 0x03}, {value: 0x0008, lo: 0x80, hi: 0xab}, {value: 0x0040, lo: 0xac, hi: 0xaf}, {value: 0x0008, lo: 0xb0, hi: 0xbf}, // Block 0x39, offset 0x20e {value: 0x0000, lo: 0x06}, {value: 0x0008, lo: 0x80, hi: 0x89}, {value: 0x0040, lo: 0x8a, hi: 0x8f}, {value: 0x0008, lo: 0x90, hi: 0x99}, {value: 0x0028, lo: 0x9a, hi: 0x9a}, {value: 0x0040, lo: 0x9b, hi: 0x9d}, {value: 0x0018, lo: 0x9e, hi: 0xbf}, // Block 0x3a, offset 0x215 {value: 0x0000, lo: 0x07}, {value: 0x0008, lo: 0x80, hi: 0x96}, {value: 0x1308, lo: 0x97, hi: 0x98}, {value: 0x1008, lo: 0x99, hi: 0x9a}, {value: 0x1308, lo: 0x9b, hi: 0x9b}, {value: 0x0040, lo: 0x9c, hi: 0x9d}, {value: 0x0018, lo: 0x9e, hi: 0x9f}, {value: 0x0008, lo: 0xa0, hi: 0xbf}, // Block 0x3b, offset 0x21d {value: 0x0000, lo: 0x0f}, {value: 0x0008, lo: 0x80, hi: 0x94}, {value: 0x1008, lo: 0x95, hi: 0x95}, {value: 0x1308, lo: 0x96, hi: 0x96}, {value: 0x1008, lo: 0x97, hi: 0x97}, {value: 0x1308, lo: 0x98, hi: 0x9e}, {value: 0x0040, lo: 0x9f, hi: 0x9f}, {value: 0x1b08, lo: 0xa0, hi: 0xa0}, {value: 0x1008, lo: 0xa1, hi: 0xa1}, {value: 0x1308, lo: 0xa2, hi: 0xa2}, {value: 0x1008, lo: 0xa3, hi: 0xa4}, {value: 0x1308, lo: 0xa5, hi: 0xac}, {value: 0x1008, lo: 0xad, hi: 0xb2}, {value: 0x1308, lo: 0xb3, hi: 0xbc}, {value: 0x0040, lo: 0xbd, hi: 0xbe}, {value: 0x1308, lo: 0xbf, hi: 0xbf}, // Block 0x3c, offset 0x22d {value: 0x0000, lo: 0x0b}, {value: 0x0008, lo: 0x80, hi: 0x89}, {value: 0x0040, lo: 0x8a, hi: 0x8f}, {value: 0x0008, lo: 0x90, hi: 0x99}, {value: 0x0040, lo: 0x9a, hi: 0x9f}, {value: 0x0018, lo: 0xa0, hi: 0xa6}, {value: 0x0008, lo: 0xa7, hi: 0xa7}, {value: 0x0018, lo: 0xa8, hi: 0xad}, {value: 0x0040, lo: 0xae, hi: 0xaf}, {value: 0x1308, lo: 0xb0, hi: 0xbd}, {value: 0x1318, lo: 0xbe, hi: 0xbe}, {value: 0x0040, lo: 0xbf, hi: 0xbf}, // Block 0x3d, offset 0x239 {value: 0x0000, lo: 0x01}, {value: 0x0040, lo: 0x80, hi: 0xbf}, // Block 0x3e, offset 0x23b {value: 0x0000, lo: 0x09}, {value: 0x1308, lo: 0x80, hi: 0x83}, {value: 0x1008, lo: 0x84, hi: 0x84}, {value: 0x0008, lo: 0x85, hi: 0xb3}, {value: 0x1308, lo: 0xb4, hi: 0xb4}, {value: 0x1008, lo: 0xb5, hi: 0xb5}, {value: 0x1308, lo: 0xb6, hi: 0xba}, {value: 0x1008, lo: 0xbb, hi: 0xbb}, {value: 0x1308, lo: 0xbc, hi: 0xbc}, {value: 0x1008, lo: 0xbd, hi: 0xbf}, // Block 0x3f, offset 0x245 {value: 0x0000, lo: 0x0b}, {value: 0x1008, lo: 0x80, hi: 0x81}, {value: 0x1308, lo: 0x82, hi: 0x82}, {value: 0x1008, lo: 0x83, hi: 0x83}, {value: 0x1808, lo: 0x84, hi: 0x84}, {value: 0x0008, lo: 0x85, hi: 0x8b}, {value: 0x0040, lo: 0x8c, hi: 0x8f}, {value: 0x0008, lo: 0x90, hi: 0x99}, {value: 0x0018, lo: 0x9a, hi: 0xaa}, {value: 0x1308, lo: 0xab, hi: 0xb3}, {value: 0x0018, lo: 0xb4, hi: 0xbc}, {value: 0x0040, lo: 0xbd, hi: 0xbf}, // Block 0x40, offset 0x251 {value: 0x0000, lo: 0x0b}, {value: 0x1308, lo: 0x80, hi: 0x81}, {value: 0x1008, lo: 0x82, hi: 0x82}, {value: 0x0008, lo: 0x83, hi: 0xa0}, {value: 0x1008, lo: 0xa1, hi: 0xa1}, {value: 0x1308, lo: 0xa2, hi: 0xa5}, {value: 0x1008, lo: 0xa6, hi: 0xa7}, {value: 0x1308, lo: 0xa8, hi: 0xa9}, {value: 0x1808, lo: 0xaa, hi: 0xaa}, {value: 0x1b08, lo: 0xab, hi: 0xab}, {value: 0x1308, lo: 0xac, hi: 0xad}, {value: 0x0008, lo: 0xae, hi: 0xbf}, // Block 0x41, offset 0x25d {value: 0x0000, lo: 0x0b}, {value: 0x0008, lo: 0x80, hi: 0xa5}, {value: 0x1308, lo: 0xa6, hi: 0xa6}, {value: 0x1008, lo: 0xa7, hi: 0xa7}, {value: 0x1308, lo: 0xa8, hi: 0xa9}, {value: 0x1008, lo: 0xaa, hi: 0xac}, {value: 0x1308, lo: 0xad, hi: 0xad}, {value: 0x1008, lo: 0xae, hi: 0xae}, {value: 0x1308, lo: 0xaf, hi: 0xb1}, {value: 0x1808, lo: 0xb2, hi: 0xb3}, {value: 0x0040, lo: 0xb4, hi: 0xbb}, {value: 0x0018, lo: 0xbc, hi: 0xbf}, // Block 0x42, offset 0x269 {value: 0x0000, lo: 0x07}, {value: 0x0008, lo: 0x80, hi: 0xa3}, {value: 0x1008, lo: 0xa4, hi: 0xab}, {value: 0x1308, lo: 0xac, hi: 0xb3}, {value: 0x1008, lo: 0xb4, hi: 0xb5}, {value: 0x1308, lo: 0xb6, hi: 0xb7}, {value: 0x0040, lo: 0xb8, hi: 0xba}, {value: 0x0018, lo: 0xbb, hi: 0xbf}, // Block 0x43, offset 0x271 {value: 0x0000, lo: 0x04}, {value: 0x0008, lo: 0x80, hi: 0x89}, {value: 0x0040, lo: 0x8a, hi: 0x8c}, {value: 0x0008, lo: 0x8d, hi: 0xbd}, {value: 0x0018, lo: 0xbe, hi: 0xbf}, // Block 0x44, offset 0x276 {value: 0x0000, lo: 0x09}, {value: 0x0e29, lo: 0x80, hi: 0x80}, {value: 0x0e41, lo: 0x81, hi: 0x81}, {value: 0x0e59, lo: 0x82, hi: 0x82}, {value: 0x0e71, lo: 0x83, hi: 0x83}, {value: 0x0e89, lo: 0x84, hi: 0x85}, {value: 0x0ea1, lo: 0x86, hi: 0x86}, {value: 0x0eb9, lo: 0x87, hi: 0x87}, {value: 0x057d, lo: 0x88, hi: 0x88}, {value: 0x0040, lo: 0x89, hi: 0xbf}, // Block 0x45, offset 0x280 {value: 0x0000, lo: 0x10}, {value: 0x0018, lo: 0x80, hi: 0x87}, {value: 0x0040, lo: 0x88, hi: 0x8f}, {value: 0x1308, lo: 0x90, hi: 0x92}, {value: 0x0018, lo: 0x93, hi: 0x93}, {value: 0x1308, lo: 0x94, hi: 0xa0}, {value: 0x1008, lo: 0xa1, hi: 0xa1}, {value: 0x1308, lo: 0xa2, hi: 0xa8}, {value: 0x0008, lo: 0xa9, hi: 0xac}, {value: 0x1308, lo: 0xad, hi: 0xad}, {value: 0x0008, lo: 0xae, hi: 0xb1}, {value: 0x1008, lo: 0xb2, hi: 0xb3}, {value: 0x1308, lo: 0xb4, hi: 0xb4}, {value: 0x0008, lo: 0xb5, hi: 0xb6}, {value: 0x0040, lo: 0xb7, hi: 0xb7}, {value: 0x1308, lo: 0xb8, hi: 0xb9}, {value: 0x0040, lo: 0xba, hi: 0xbf}, // Block 0x46, offset 0x291 {value: 0x0000, lo: 0x03}, {value: 0x1308, lo: 0x80, hi: 0xb5}, {value: 0x0040, lo: 0xb6, hi: 0xba}, {value: 0x1308, lo: 0xbb, hi: 0xbf}, // Block 0x47, offset 0x295 {value: 0x0000, lo: 0x0a}, {value: 0x0008, lo: 0x80, hi: 0x87}, {value: 0xe045, lo: 0x88, hi: 0x8f}, {value: 0x0008, lo: 0x90, hi: 0x95}, {value: 0x0040, lo: 0x96, hi: 0x97}, {value: 0xe045, lo: 0x98, hi: 0x9d}, {value: 0x0040, lo: 0x9e, hi: 0x9f}, {value: 0x0008, lo: 0xa0, hi: 0xa7}, {value: 0xe045, lo: 0xa8, hi: 0xaf}, {value: 0x0008, lo: 0xb0, hi: 0xb7}, {value: 0xe045, lo: 0xb8, hi: 0xbf}, // Block 0x48, offset 0x2a0 {value: 0x0000, lo: 0x03}, {value: 0x0040, lo: 0x80, hi: 0x8f}, {value: 0x1318, lo: 0x90, hi: 0xb0}, {value: 0x0040, lo: 0xb1, hi: 0xbf}, // Block 0x49, offset 0x2a4 {value: 0x0000, lo: 0x08}, {value: 0x0018, lo: 0x80, hi: 0x82}, {value: 0x0040, lo: 0x83, hi: 0x83}, {value: 0x0008, lo: 0x84, hi: 0x84}, {value: 0x0018, lo: 0x85, hi: 0x88}, {value: 0x24c1, lo: 0x89, hi: 0x89}, {value: 0x0018, lo: 0x8a, hi: 0x8b}, {value: 0x0040, lo: 0x8c, hi: 0x8f}, {value: 0x0018, lo: 0x90, hi: 0xbf}, // Block 0x4a, offset 0x2ad {value: 0x0000, lo: 0x07}, {value: 0x0018, lo: 0x80, hi: 0xab}, {value: 0x24f1, lo: 0xac, hi: 0xac}, {value: 0x2529, lo: 0xad, hi: 0xad}, {value: 0x0018, lo: 0xae, hi: 0xae}, {value: 0x2579, lo: 0xaf, hi: 0xaf}, {value: 0x25b1, lo: 0xb0, hi: 0xb0}, {value: 0x0018, lo: 0xb1, hi: 0xbf}, // Block 0x4b, offset 0x2b5 {value: 0x0000, lo: 0x05}, {value: 0x0018, lo: 0x80, hi: 0x9f}, {value: 0x0080, lo: 0xa0, hi: 0xa0}, {value: 0x0018, lo: 0xa1, hi: 0xad}, {value: 0x0080, lo: 0xae, hi: 0xaf}, {value: 0x0018, lo: 0xb0, hi: 0xbf}, // Block 0x4c, offset 0x2bb {value: 0x0000, lo: 0x04}, {value: 0x0018, lo: 0x80, hi: 0xa8}, {value: 0x09c5, lo: 0xa9, hi: 0xa9}, {value: 0x09e5, lo: 0xaa, hi: 0xaa}, {value: 0x0018, lo: 0xab, hi: 0xbf}, // Block 0x4d, offset 0x2c0 {value: 0x0000, lo: 0x02}, {value: 0x0018, lo: 0x80, hi: 0xbe}, {value: 0x0040, lo: 0xbf, hi: 0xbf}, // Block 0x4e, offset 0x2c3 {value: 0x0000, lo: 0x02}, {value: 0x0018, lo: 0x80, hi: 0xa6}, {value: 0x0040, lo: 0xa7, hi: 0xbf}, // Block 0x4f, offset 0x2c6 {value: 0x0000, lo: 0x03}, {value: 0x0018, lo: 0x80, hi: 0x8b}, {value: 0x28c1, lo: 0x8c, hi: 0x8c}, {value: 0x0018, lo: 0x8d, hi: 0xbf}, // Block 0x50, offset 0x2ca {value: 0x0000, lo: 0x05}, {value: 0x0018, lo: 0x80, hi: 0xb3}, {value: 0x0e66, lo: 0xb4, hi: 0xb4}, {value: 0x292a, lo: 0xb5, hi: 0xb5}, {value: 0x0e86, lo: 0xb6, hi: 0xb6}, {value: 0x0018, lo: 0xb7, hi: 0xbf}, // Block 0x51, offset 0x2d0 {value: 0x0000, lo: 0x03}, {value: 0x0018, lo: 0x80, hi: 0x9b}, {value: 0x2941, lo: 0x9c, hi: 0x9c}, {value: 0x0018, lo: 0x9d, hi: 0xbf}, // Block 0x52, offset 0x2d4 {value: 0x0000, lo: 0x03}, {value: 0x0018, lo: 0x80, hi: 0xb3}, {value: 0x0040, lo: 0xb4, hi: 0xb5}, {value: 0x0018, lo: 0xb6, hi: 0xbf}, // Block 0x53, offset 0x2d8 {value: 0x0000, lo: 0x05}, {value: 0x0018, lo: 0x80, hi: 0x95}, {value: 0x0040, lo: 0x96, hi: 0x97}, {value: 0x0018, lo: 0x98, hi: 0xb9}, {value: 0x0040, lo: 0xba, hi: 0xbc}, {value: 0x0018, lo: 0xbd, hi: 0xbf}, // Block 0x54, offset 0x2de {value: 0x0000, lo: 0x06}, {value: 0x0018, lo: 0x80, hi: 0x88}, {value: 0x0040, lo: 0x89, hi: 0x89}, {value: 0x0018, lo: 0x8a, hi: 0x91}, {value: 0x0040, lo: 0x92, hi: 0xab}, {value: 0x0018, lo: 0xac, hi: 0xaf}, {value: 0x0040, lo: 0xb0, hi: 0xbf}, // Block 0x55, offset 0x2e5 {value: 0x0000, lo: 0x05}, {value: 0xe185, lo: 0x80, hi: 0x8f}, {value: 0x03f5, lo: 0x90, hi: 0x9f}, {value: 0x0ea5, lo: 0xa0, hi: 0xae}, {value: 0x0040, lo: 0xaf, hi: 0xaf}, {value: 0x0008, lo: 0xb0, hi: 0xbf}, // Block 0x56, offset 0x2eb {value: 0x0000, lo: 0x07}, {value: 0x0008, lo: 0x80, hi: 0xa5}, {value: 0x0040, lo: 0xa6, hi: 0xa6}, {value: 0x0008, lo: 0xa7, hi: 0xa7}, {value: 0x0040, lo: 0xa8, hi: 0xac}, {value: 0x0008, lo: 0xad, hi: 0xad}, {value: 0x0040, lo: 0xae, hi: 0xaf}, {value: 0x0008, lo: 0xb0, hi: 0xbf}, // Block 0x57, offset 0x2f3 {value: 0x0000, lo: 0x06}, {value: 0x0008, lo: 0x80, hi: 0xa7}, {value: 0x0040, lo: 0xa8, hi: 0xae}, {value: 0xe075, lo: 0xaf, hi: 0xaf}, {value: 0x0018, lo: 0xb0, hi: 0xb0}, {value: 0x0040, lo: 0xb1, hi: 0xbe}, {value: 0x1b08, lo: 0xbf, hi: 0xbf}, // Block 0x58, offset 0x2fa {value: 0x0000, lo: 0x0a}, {value: 0x0008, lo: 0x80, hi: 0x96}, {value: 0x0040, lo: 0x97, hi: 0x9f}, {value: 0x0008, lo: 0xa0, hi: 0xa6}, {value: 0x0040, lo: 0xa7, hi: 0xa7}, {value: 0x0008, lo: 0xa8, hi: 0xae}, {value: 0x0040, lo: 0xaf, hi: 0xaf}, {value: 0x0008, lo: 0xb0, hi: 0xb6}, {value: 0x0040, lo: 0xb7, hi: 0xb7}, {value: 0x0008, lo: 0xb8, hi: 0xbe}, {value: 0x0040, lo: 0xbf, hi: 0xbf}, // Block 0x59, offset 0x305 {value: 0x0000, lo: 0x09}, {value: 0x0008, lo: 0x80, hi: 0x86}, {value: 0x0040, lo: 0x87, hi: 0x87}, {value: 0x0008, lo: 0x88, hi: 0x8e}, {value: 0x0040, lo: 0x8f, hi: 0x8f}, {value: 0x0008, lo: 0x90, hi: 0x96}, {value: 0x0040, lo: 0x97, hi: 0x97}, {value: 0x0008, lo: 0x98, hi: 0x9e}, {value: 0x0040, lo: 0x9f, hi: 0x9f}, {value: 0x1308, lo: 0xa0, hi: 0xbf}, // Block 0x5a, offset 0x30f {value: 0x0000, lo: 0x03}, {value: 0x0018, lo: 0x80, hi: 0xae}, {value: 0x0008, lo: 0xaf, hi: 0xaf}, {value: 0x0018, lo: 0xb0, hi: 0xbf}, // Block 0x5b, offset 0x313 {value: 0x0000, lo: 0x02}, {value: 0x0018, lo: 0x80, hi: 0x84}, {value: 0x0040, lo: 0x85, hi: 0xbf}, // Block 0x5c, offset 0x316 {value: 0x0000, lo: 0x05}, {value: 0x0018, lo: 0x80, hi: 0x99}, {value: 0x0040, lo: 0x9a, hi: 0x9a}, {value: 0x0018, lo: 0x9b, hi: 0x9e}, {value: 0x0edd, lo: 0x9f, hi: 0x9f}, {value: 0x0018, lo: 0xa0, hi: 0xbf}, // Block 0x5d, offset 0x31c {value: 0x0000, lo: 0x03}, {value: 0x0018, lo: 0x80, hi: 0xb2}, {value: 0x0efd, lo: 0xb3, hi: 0xb3}, {value: 0x0040, lo: 0xb4, hi: 0xbf}, // Block 0x5e, offset 0x320 {value: 0x0020, lo: 0x01}, {value: 0x0f1d, lo: 0x80, hi: 0xbf}, // Block 0x5f, offset 0x322 {value: 0x0020, lo: 0x02}, {value: 0x171d, lo: 0x80, hi: 0x8f}, {value: 0x18fd, lo: 0x90, hi: 0xbf}, // Block 0x60, offset 0x325 {value: 0x0020, lo: 0x01}, {value: 0x1efd, lo: 0x80, hi: 0xbf}, // Block 0x61, offset 0x327 {value: 0x0000, lo: 0x02}, {value: 0x0040, lo: 0x80, hi: 0x80}, {value: 0x0008, lo: 0x81, hi: 0xbf}, // Block 0x62, offset 0x32a {value: 0x0000, lo: 0x09}, {value: 0x0008, lo: 0x80, hi: 0x96}, {value: 0x0040, lo: 0x97, hi: 0x98}, {value: 0x1308, lo: 0x99, hi: 0x9a}, {value: 0x29e2, lo: 0x9b, hi: 0x9b}, {value: 0x2a0a, lo: 0x9c, hi: 0x9c}, {value: 0x0008, lo: 0x9d, hi: 0x9e}, {value: 0x2a31, lo: 0x9f, hi: 0x9f}, {value: 0x0018, lo: 0xa0, hi: 0xa0}, {value: 0x0008, lo: 0xa1, hi: 0xbf}, // Block 0x63, offset 0x334 {value: 0x0000, lo: 0x02}, {value: 0x0008, lo: 0x80, hi: 0xbe}, {value: 0x2a69, lo: 0xbf, hi: 0xbf}, // Block 0x64, offset 0x337 {value: 0x0000, lo: 0x0e}, {value: 0x0040, lo: 0x80, hi: 0x84}, {value: 0x0008, lo: 0x85, hi: 0xad}, {value: 0x0040, lo: 0xae, hi: 0xb0}, {value: 0x2a1d, lo: 0xb1, hi: 0xb1}, {value: 0x2a3d, lo: 0xb2, hi: 0xb2}, {value: 0x2a5d, lo: 0xb3, hi: 0xb3}, {value: 0x2a7d, lo: 0xb4, hi: 0xb4}, {value: 0x2a5d, lo: 0xb5, hi: 0xb5}, {value: 0x2a9d, lo: 0xb6, hi: 0xb6}, {value: 0x2abd, lo: 0xb7, hi: 0xb7}, {value: 0x2add, lo: 0xb8, hi: 0xb9}, {value: 0x2afd, lo: 0xba, hi: 0xbb}, {value: 0x2b1d, lo: 0xbc, hi: 0xbd}, {value: 0x2afd, lo: 0xbe, hi: 0xbf}, // Block 0x65, offset 0x346 {value: 0x0000, lo: 0x03}, {value: 0x0018, lo: 0x80, hi: 0xa3}, {value: 0x0040, lo: 0xa4, hi: 0xaf}, {value: 0x0008, lo: 0xb0, hi: 0xbf}, // Block 0x66, offset 0x34a {value: 0x0030, lo: 0x04}, {value: 0x2aa2, lo: 0x80, hi: 0x9d}, {value: 0x305a, lo: 0x9e, hi: 0x9e}, {value: 0x0040, lo: 0x9f, hi: 0x9f}, {value: 0x30a2, lo: 0xa0, hi: 0xbf}, // Block 0x67, offset 0x34f {value: 0x0000, lo: 0x02}, {value: 0x0008, lo: 0x80, hi: 0x95}, {value: 0x0040, lo: 0x96, hi: 0xbf}, // Block 0x68, offset 0x352 {value: 0x0000, lo: 0x03}, {value: 0x0008, lo: 0x80, hi: 0x8c}, {value: 0x0040, lo: 0x8d, hi: 0x8f}, {value: 0x0018, lo: 0x90, hi: 0xbf}, // Block 0x69, offset 0x356 {value: 0x0000, lo: 0x04}, {value: 0x0018, lo: 0x80, hi: 0x86}, {value: 0x0040, lo: 0x87, hi: 0x8f}, {value: 0x0008, lo: 0x90, hi: 0xbd}, {value: 0x0018, lo: 0xbe, hi: 0xbf}, // Block 0x6a, offset 0x35b {value: 0x0000, lo: 0x04}, {value: 0x0008, lo: 0x80, hi: 0x8c}, {value: 0x0018, lo: 0x8d, hi: 0x8f}, {value: 0x0008, lo: 0x90, hi: 0xab}, {value: 0x0040, lo: 0xac, hi: 0xbf}, // Block 0x6b, offset 0x360 {value: 0x0000, lo: 0x05}, {value: 0x0008, lo: 0x80, hi: 0xa5}, {value: 0x0018, lo: 0xa6, hi: 0xaf}, {value: 0x1308, lo: 0xb0, hi: 0xb1}, {value: 0x0018, lo: 0xb2, hi: 0xb7}, {value: 0x0040, lo: 0xb8, hi: 0xbf}, // Block 0x6c, offset 0x366 {value: 0x0000, lo: 0x05}, {value: 0x0040, lo: 0x80, hi: 0xb6}, {value: 0x0008, lo: 0xb7, hi: 0xb7}, {value: 0x2009, lo: 0xb8, hi: 0xb8}, {value: 0x6e89, lo: 0xb9, hi: 0xb9}, {value: 0x0008, lo: 0xba, hi: 0xbf}, // Block 0x6d, offset 0x36c {value: 0x0000, lo: 0x0e}, {value: 0x0008, lo: 0x80, hi: 0x81}, {value: 0x1308, lo: 0x82, hi: 0x82}, {value: 0x0008, lo: 0x83, hi: 0x85}, {value: 0x1b08, lo: 0x86, hi: 0x86}, {value: 0x0008, lo: 0x87, hi: 0x8a}, {value: 0x1308, lo: 0x8b, hi: 0x8b}, {value: 0x0008, lo: 0x8c, hi: 0xa2}, {value: 0x1008, lo: 0xa3, hi: 0xa4}, {value: 0x1308, lo: 0xa5, hi: 0xa6}, {value: 0x1008, lo: 0xa7, hi: 0xa7}, {value: 0x0018, lo: 0xa8, hi: 0xab}, {value: 0x0040, lo: 0xac, hi: 0xaf}, {value: 0x0018, lo: 0xb0, hi: 0xb9}, {value: 0x0040, lo: 0xba, hi: 0xbf}, // Block 0x6e, offset 0x37b {value: 0x0000, lo: 0x05}, {value: 0x0208, lo: 0x80, hi: 0xb1}, {value: 0x0108, lo: 0xb2, hi: 0xb2}, {value: 0x0008, lo: 0xb3, hi: 0xb3}, {value: 0x0018, lo: 0xb4, hi: 0xb7}, {value: 0x0040, lo: 0xb8, hi: 0xbf}, // Block 0x6f, offset 0x381 {value: 0x0000, lo: 0x03}, {value: 0x1008, lo: 0x80, hi: 0x81}, {value: 0x0008, lo: 0x82, hi: 0xb3}, {value: 0x1008, lo: 0xb4, hi: 0xbf}, // Block 0x70, offset 0x385 {value: 0x0000, lo: 0x0e}, {value: 0x1008, lo: 0x80, hi: 0x83}, {value: 0x1b08, lo: 0x84, hi: 0x84}, {value: 0x1308, lo: 0x85, hi: 0x85}, {value: 0x0040, lo: 0x86, hi: 0x8d}, {value: 0x0018, lo: 0x8e, hi: 0x8f}, {value: 0x0008, lo: 0x90, hi: 0x99}, {value: 0x0040, lo: 0x9a, hi: 0x9f}, {value: 0x1308, lo: 0xa0, hi: 0xb1}, {value: 0x0008, lo: 0xb2, hi: 0xb7}, {value: 0x0018, lo: 0xb8, hi: 0xba}, {value: 0x0008, lo: 0xbb, hi: 0xbb}, {value: 0x0018, lo: 0xbc, hi: 0xbc}, {value: 0x0008, lo: 0xbd, hi: 0xbd}, {value: 0x0040, lo: 0xbe, hi: 0xbf}, // Block 0x71, offset 0x394 {value: 0x0000, lo: 0x04}, {value: 0x0008, lo: 0x80, hi: 0xa5}, {value: 0x1308, lo: 0xa6, hi: 0xad}, {value: 0x0018, lo: 0xae, hi: 0xaf}, {value: 0x0008, lo: 0xb0, hi: 0xbf}, // Block 0x72, offset 0x399 {value: 0x0000, lo: 0x07}, {value: 0x0008, lo: 0x80, hi: 0x86}, {value: 0x1308, lo: 0x87, hi: 0x91}, {value: 0x1008, lo: 0x92, hi: 0x92}, {value: 0x1808, lo: 0x93, hi: 0x93}, {value: 0x0040, lo: 0x94, hi: 0x9e}, {value: 0x0018, lo: 0x9f, hi: 0xbc}, {value: 0x0040, lo: 0xbd, hi: 0xbf}, // Block 0x73, offset 0x3a1 {value: 0x0000, lo: 0x09}, {value: 0x1308, lo: 0x80, hi: 0x82}, {value: 0x1008, lo: 0x83, hi: 0x83}, {value: 0x0008, lo: 0x84, hi: 0xb2}, {value: 0x1308, lo: 0xb3, hi: 0xb3}, {value: 0x1008, lo: 0xb4, hi: 0xb5}, {value: 0x1308, lo: 0xb6, hi: 0xb9}, {value: 0x1008, lo: 0xba, hi: 0xbb}, {value: 0x1308, lo: 0xbc, hi: 0xbc}, {value: 0x1008, lo: 0xbd, hi: 0xbf}, // Block 0x74, offset 0x3ab {value: 0x0000, lo: 0x0a}, {value: 0x1808, lo: 0x80, hi: 0x80}, {value: 0x0018, lo: 0x81, hi: 0x8d}, {value: 0x0040, lo: 0x8e, hi: 0x8e}, {value: 0x0008, lo: 0x8f, hi: 0x99}, {value: 0x0040, lo: 0x9a, hi: 0x9d}, {value: 0x0018, lo: 0x9e, hi: 0x9f}, {value: 0x0008, lo: 0xa0, hi: 0xa4}, {value: 0x1308, lo: 0xa5, hi: 0xa5}, {value: 0x0008, lo: 0xa6, hi: 0xbe}, {value: 0x0040, lo: 0xbf, hi: 0xbf}, // Block 0x75, offset 0x3b6 {value: 0x0000, lo: 0x07}, {value: 0x0008, lo: 0x80, hi: 0xa8}, {value: 0x1308, lo: 0xa9, hi: 0xae}, {value: 0x1008, lo: 0xaf, hi: 0xb0}, {value: 0x1308, lo: 0xb1, hi: 0xb2}, {value: 0x1008, lo: 0xb3, hi: 0xb4}, {value: 0x1308, lo: 0xb5, hi: 0xb6}, {value: 0x0040, lo: 0xb7, hi: 0xbf}, // Block 0x76, offset 0x3be {value: 0x0000, lo: 0x10}, {value: 0x0008, lo: 0x80, hi: 0x82}, {value: 0x1308, lo: 0x83, hi: 0x83}, {value: 0x0008, lo: 0x84, hi: 0x8b}, {value: 0x1308, lo: 0x8c, hi: 0x8c}, {value: 0x1008, lo: 0x8d, hi: 0x8d}, {value: 0x0040, lo: 0x8e, hi: 0x8f}, {value: 0x0008, lo: 0x90, hi: 0x99}, {value: 0x0040, lo: 0x9a, hi: 0x9b}, {value: 0x0018, lo: 0x9c, hi: 0x9f}, {value: 0x0008, lo: 0xa0, hi: 0xb6}, {value: 0x0018, lo: 0xb7, hi: 0xb9}, {value: 0x0008, lo: 0xba, hi: 0xba}, {value: 0x1008, lo: 0xbb, hi: 0xbb}, {value: 0x1308, lo: 0xbc, hi: 0xbc}, {value: 0x1008, lo: 0xbd, hi: 0xbd}, {value: 0x0008, lo: 0xbe, hi: 0xbf}, // Block 0x77, offset 0x3cf {value: 0x0000, lo: 0x08}, {value: 0x0008, lo: 0x80, hi: 0xaf}, {value: 0x1308, lo: 0xb0, hi: 0xb0}, {value: 0x0008, lo: 0xb1, hi: 0xb1}, {value: 0x1308, lo: 0xb2, hi: 0xb4}, {value: 0x0008, lo: 0xb5, hi: 0xb6}, {value: 0x1308, lo: 0xb7, hi: 0xb8}, {value: 0x0008, lo: 0xb9, hi: 0xbd}, {value: 0x1308, lo: 0xbe, hi: 0xbf}, // Block 0x78, offset 0x3d8 {value: 0x0000, lo: 0x0f}, {value: 0x0008, lo: 0x80, hi: 0x80}, {value: 0x1308, lo: 0x81, hi: 0x81}, {value: 0x0008, lo: 0x82, hi: 0x82}, {value: 0x0040, lo: 0x83, hi: 0x9a}, {value: 0x0008, lo: 0x9b, hi: 0x9d}, {value: 0x0018, lo: 0x9e, hi: 0x9f}, {value: 0x0008, lo: 0xa0, hi: 0xaa}, {value: 0x1008, lo: 0xab, hi: 0xab}, {value: 0x1308, lo: 0xac, hi: 0xad}, {value: 0x1008, lo: 0xae, hi: 0xaf}, {value: 0x0018, lo: 0xb0, hi: 0xb1}, {value: 0x0008, lo: 0xb2, hi: 0xb4}, {value: 0x1008, lo: 0xb5, hi: 0xb5}, {value: 0x1b08, lo: 0xb6, hi: 0xb6}, {value: 0x0040, lo: 0xb7, hi: 0xbf}, // Block 0x79, offset 0x3e8 {value: 0x0000, lo: 0x0c}, {value: 0x0040, lo: 0x80, hi: 0x80}, {value: 0x0008, lo: 0x81, hi: 0x86}, {value: 0x0040, lo: 0x87, hi: 0x88}, {value: 0x0008, lo: 0x89, hi: 0x8e}, {value: 0x0040, lo: 0x8f, hi: 0x90}, {value: 0x0008, lo: 0x91, hi: 0x96}, {value: 0x0040, lo: 0x97, hi: 0x9f}, {value: 0x0008, lo: 0xa0, hi: 0xa6}, {value: 0x0040, lo: 0xa7, hi: 0xa7}, {value: 0x0008, lo: 0xa8, hi: 0xae}, {value: 0x0040, lo: 0xaf, hi: 0xaf}, {value: 0x0008, lo: 0xb0, hi: 0xbf}, // Block 0x7a, offset 0x3f5 {value: 0x0000, lo: 0x09}, {value: 0x0008, lo: 0x80, hi: 0x9a}, {value: 0x0018, lo: 0x9b, hi: 0x9b}, {value: 0x4465, lo: 0x9c, hi: 0x9c}, {value: 0x447d, lo: 0x9d, hi: 0x9d}, {value: 0x2971, lo: 0x9e, hi: 0x9e}, {value: 0xe06d, lo: 0x9f, hi: 0x9f}, {value: 0x0008, lo: 0xa0, hi: 0xa5}, {value: 0x0040, lo: 0xa6, hi: 0xaf}, {value: 0x4495, lo: 0xb0, hi: 0xbf}, // Block 0x7b, offset 0x3ff {value: 0x0000, lo: 0x04}, {value: 0x44b5, lo: 0x80, hi: 0x8f}, {value: 0x44d5, lo: 0x90, hi: 0x9f}, {value: 0x44f5, lo: 0xa0, hi: 0xaf}, {value: 0x44d5, lo: 0xb0, hi: 0xbf}, // Block 0x7c, offset 0x404 {value: 0x0000, lo: 0x0c}, {value: 0x0008, lo: 0x80, hi: 0xa2}, {value: 0x1008, lo: 0xa3, hi: 0xa4}, {value: 0x1308, lo: 0xa5, hi: 0xa5}, {value: 0x1008, lo: 0xa6, hi: 0xa7}, {value: 0x1308, lo: 0xa8, hi: 0xa8}, {value: 0x1008, lo: 0xa9, hi: 0xaa}, {value: 0x0018, lo: 0xab, hi: 0xab}, {value: 0x1008, lo: 0xac, hi: 0xac}, {value: 0x1b08, lo: 0xad, hi: 0xad}, {value: 0x0040, lo: 0xae, hi: 0xaf}, {value: 0x0008, lo: 0xb0, hi: 0xb9}, {value: 0x0040, lo: 0xba, hi: 0xbf}, // Block 0x7d, offset 0x411 {value: 0x0000, lo: 0x03}, {value: 0x0008, lo: 0x80, hi: 0xa3}, {value: 0x0040, lo: 0xa4, hi: 0xaf}, {value: 0x0018, lo: 0xb0, hi: 0xbf}, // Block 0x7e, offset 0x415 {value: 0x0000, lo: 0x04}, {value: 0x0018, lo: 0x80, hi: 0x86}, {value: 0x0040, lo: 0x87, hi: 0x8a}, {value: 0x0018, lo: 0x8b, hi: 0xbb}, {value: 0x0040, lo: 0xbc, hi: 0xbf}, // Block 0x7f, offset 0x41a {value: 0x0020, lo: 0x01}, {value: 0x4515, lo: 0x80, hi: 0xbf}, // Block 0x80, offset 0x41c {value: 0x0020, lo: 0x03}, {value: 0x4d15, lo: 0x80, hi: 0x94}, {value: 0x4ad5, lo: 0x95, hi: 0x95}, {value: 0x4fb5, lo: 0x96, hi: 0xbf}, // Block 0x81, offset 0x420 {value: 0x0020, lo: 0x01}, {value: 0x54f5, lo: 0x80, hi: 0xbf}, // Block 0x82, offset 0x422 {value: 0x0020, lo: 0x03}, {value: 0x5cf5, lo: 0x80, hi: 0x84}, {value: 0x5655, lo: 0x85, hi: 0x85}, {value: 0x5d95, lo: 0x86, hi: 0xbf}, // Block 0x83, offset 0x426 {value: 0x0020, lo: 0x08}, {value: 0x6b55, lo: 0x80, hi: 0x8f}, {value: 0x6d15, lo: 0x90, hi: 0x90}, {value: 0x6d55, lo: 0x91, hi: 0xab}, {value: 0x6ea1, lo: 0xac, hi: 0xac}, {value: 0x70b5, lo: 0xad, hi: 0xad}, {value: 0x0040, lo: 0xae, hi: 0xae}, {value: 0x0040, lo: 0xaf, hi: 0xaf}, {value: 0x70d5, lo: 0xb0, hi: 0xbf}, // Block 0x84, offset 0x42f {value: 0x0020, lo: 0x05}, {value: 0x72d5, lo: 0x80, hi: 0xad}, {value: 0x6535, lo: 0xae, hi: 0xae}, {value: 0x7895, lo: 0xaf, hi: 0xb5}, {value: 0x6f55, lo: 0xb6, hi: 0xb6}, {value: 0x7975, lo: 0xb7, hi: 0xbf}, // Block 0x85, offset 0x435 {value: 0x0028, lo: 0x03}, {value: 0x7c21, lo: 0x80, hi: 0x82}, {value: 0x7be1, lo: 0x83, hi: 0x83}, {value: 0x7c99, lo: 0x84, hi: 0xbf}, // Block 0x86, offset 0x439 {value: 0x0038, lo: 0x0f}, {value: 0x9db1, lo: 0x80, hi: 0x83}, {value: 0x9e59, lo: 0x84, hi: 0x85}, {value: 0x9e91, lo: 0x86, hi: 0x87}, {value: 0x9ec9, lo: 0x88, hi: 0x8f}, {value: 0x0040, lo: 0x90, hi: 0x90}, {value: 0x0040, lo: 0x91, hi: 0x91}, {value: 0xa089, lo: 0x92, hi: 0x97}, {value: 0xa1a1, lo: 0x98, hi: 0x9c}, {value: 0xa281, lo: 0x9d, hi: 0xb3}, {value: 0x9d41, lo: 0xb4, hi: 0xb4}, {value: 0x9db1, lo: 0xb5, hi: 0xb5}, {value: 0xa789, lo: 0xb6, hi: 0xbb}, {value: 0xa869, lo: 0xbc, hi: 0xbc}, {value: 0xa7f9, lo: 0xbd, hi: 0xbd}, {value: 0xa8d9, lo: 0xbe, hi: 0xbf}, // Block 0x87, offset 0x449 {value: 0x0000, lo: 0x09}, {value: 0x0008, lo: 0x80, hi: 0x8b}, {value: 0x0040, lo: 0x8c, hi: 0x8c}, {value: 0x0008, lo: 0x8d, hi: 0xa6}, {value: 0x0040, lo: 0xa7, hi: 0xa7}, {value: 0x0008, lo: 0xa8, hi: 0xba}, {value: 0x0040, lo: 0xbb, hi: 0xbb}, {value: 0x0008, lo: 0xbc, hi: 0xbd}, {value: 0x0040, lo: 0xbe, hi: 0xbe}, {value: 0x0008, lo: 0xbf, hi: 0xbf}, // Block 0x88, offset 0x453 {value: 0x0000, lo: 0x04}, {value: 0x0008, lo: 0x80, hi: 0x8d}, {value: 0x0040, lo: 0x8e, hi: 0x8f}, {value: 0x0008, lo: 0x90, hi: 0x9d}, {value: 0x0040, lo: 0x9e, hi: 0xbf}, // Block 0x89, offset 0x458 {value: 0x0000, lo: 0x02}, {value: 0x0008, lo: 0x80, hi: 0xba}, {value: 0x0040, lo: 0xbb, hi: 0xbf}, // Block 0x8a, offset 0x45b {value: 0x0000, lo: 0x05}, {value: 0x0018, lo: 0x80, hi: 0x82}, {value: 0x0040, lo: 0x83, hi: 0x86}, {value: 0x0018, lo: 0x87, hi: 0xb3}, {value: 0x0040, lo: 0xb4, hi: 0xb6}, {value: 0x0018, lo: 0xb7, hi: 0xbf}, // Block 0x8b, offset 0x461 {value: 0x0000, lo: 0x06}, {value: 0x0018, lo: 0x80, hi: 0x8e}, {value: 0x0040, lo: 0x8f, hi: 0x8f}, {value: 0x0018, lo: 0x90, hi: 0x9b}, {value: 0x0040, lo: 0x9c, hi: 0x9f}, {value: 0x0018, lo: 0xa0, hi: 0xa0}, {value: 0x0040, lo: 0xa1, hi: 0xbf}, // Block 0x8c, offset 0x468 {value: 0x0000, lo: 0x04}, {value: 0x0040, lo: 0x80, hi: 0x8f}, {value: 0x0018, lo: 0x90, hi: 0xbc}, {value: 0x1308, lo: 0xbd, hi: 0xbd}, {value: 0x0040, lo: 0xbe, hi: 0xbf}, // Block 0x8d, offset 0x46d {value: 0x0000, lo: 0x03}, {value: 0x0008, lo: 0x80, hi: 0x9c}, {value: 0x0040, lo: 0x9d, hi: 0x9f}, {value: 0x0008, lo: 0xa0, hi: 0xbf}, // Block 0x8e, offset 0x471 {value: 0x0000, lo: 0x05}, {value: 0x0008, lo: 0x80, hi: 0x90}, {value: 0x0040, lo: 0x91, hi: 0x9f}, {value: 0x1308, lo: 0xa0, hi: 0xa0}, {value: 0x0018, lo: 0xa1, hi: 0xbb}, {value: 0x0040, lo: 0xbc, hi: 0xbf}, // Block 0x8f, offset 0x477 {value: 0x0000, lo: 0x04}, {value: 0x0008, lo: 0x80, hi: 0x9f}, {value: 0x0018, lo: 0xa0, hi: 0xa3}, {value: 0x0040, lo: 0xa4, hi: 0xaf}, {value: 0x0008, lo: 0xb0, hi: 0xbf}, // Block 0x90, offset 0x47c {value: 0x0000, lo: 0x08}, {value: 0x0008, lo: 0x80, hi: 0x80}, {value: 0x0018, lo: 0x81, hi: 0x81}, {value: 0x0008, lo: 0x82, hi: 0x89}, {value: 0x0018, lo: 0x8a, hi: 0x8a}, {value: 0x0040, lo: 0x8b, hi: 0x8f}, {value: 0x0008, lo: 0x90, hi: 0xb5}, {value: 0x1308, lo: 0xb6, hi: 0xba}, {value: 0x0040, lo: 0xbb, hi: 0xbf}, // Block 0x91, offset 0x485 {value: 0x0000, lo: 0x04}, {value: 0x0008, lo: 0x80, hi: 0x9d}, {value: 0x0040, lo: 0x9e, hi: 0x9e}, {value: 0x0018, lo: 0x9f, hi: 0x9f}, {value: 0x0008, lo: 0xa0, hi: 0xbf}, // Block 0x92, offset 0x48a {value: 0x0000, lo: 0x05}, {value: 0x0008, lo: 0x80, hi: 0x83}, {value: 0x0040, lo: 0x84, hi: 0x87}, {value: 0x0008, lo: 0x88, hi: 0x8f}, {value: 0x0018, lo: 0x90, hi: 0x95}, {value: 0x0040, lo: 0x96, hi: 0xbf}, // Block 0x93, offset 0x490 {value: 0x0000, lo: 0x06}, {value: 0xe145, lo: 0x80, hi: 0x87}, {value: 0xe1c5, lo: 0x88, hi: 0x8f}, {value: 0xe145, lo: 0x90, hi: 0x97}, {value: 0x8ad5, lo: 0x98, hi: 0x9f}, {value: 0x8aed, lo: 0xa0, hi: 0xa7}, {value: 0x0008, lo: 0xa8, hi: 0xbf}, // Block 0x94, offset 0x497 {value: 0x0000, lo: 0x06}, {value: 0x0008, lo: 0x80, hi: 0x9d}, {value: 0x0040, lo: 0x9e, hi: 0x9f}, {value: 0x0008, lo: 0xa0, hi: 0xa9}, {value: 0x0040, lo: 0xaa, hi: 0xaf}, {value: 0x8aed, lo: 0xb0, hi: 0xb7}, {value: 0x8ad5, lo: 0xb8, hi: 0xbf}, // Block 0x95, offset 0x49e {value: 0x0000, lo: 0x06}, {value: 0xe145, lo: 0x80, hi: 0x87}, {value: 0xe1c5, lo: 0x88, hi: 0x8f}, {value: 0xe145, lo: 0x90, hi: 0x93}, {value: 0x0040, lo: 0x94, hi: 0x97}, {value: 0x0008, lo: 0x98, hi: 0xbb}, {value: 0x0040, lo: 0xbc, hi: 0xbf}, // Block 0x96, offset 0x4a5 {value: 0x0000, lo: 0x03}, {value: 0x0008, lo: 0x80, hi: 0xa7}, {value: 0x0040, lo: 0xa8, hi: 0xaf}, {value: 0x0008, lo: 0xb0, hi: 0xbf}, // Block 0x97, offset 0x4a9 {value: 0x0000, lo: 0x04}, {value: 0x0008, lo: 0x80, hi: 0xa3}, {value: 0x0040, lo: 0xa4, hi: 0xae}, {value: 0x0018, lo: 0xaf, hi: 0xaf}, {value: 0x0040, lo: 0xb0, hi: 0xbf}, // Block 0x98, offset 0x4ae {value: 0x0000, lo: 0x02}, {value: 0x0008, lo: 0x80, hi: 0xb6}, {value: 0x0040, lo: 0xb7, hi: 0xbf}, // Block 0x99, offset 0x4b1 {value: 0x0000, lo: 0x04}, {value: 0x0008, lo: 0x80, hi: 0x95}, {value: 0x0040, lo: 0x96, hi: 0x9f}, {value: 0x0008, lo: 0xa0, hi: 0xa7}, {value: 0x0040, lo: 0xa8, hi: 0xbf}, // Block 0x9a, offset 0x4b6 {value: 0x0000, lo: 0x0b}, {value: 0x0008, lo: 0x80, hi: 0x85}, {value: 0x0040, lo: 0x86, hi: 0x87}, {value: 0x0008, lo: 0x88, hi: 0x88}, {value: 0x0040, lo: 0x89, hi: 0x89}, {value: 0x0008, lo: 0x8a, hi: 0xb5}, {value: 0x0040, lo: 0xb6, hi: 0xb6}, {value: 0x0008, lo: 0xb7, hi: 0xb8}, {value: 0x0040, lo: 0xb9, hi: 0xbb}, {value: 0x0008, lo: 0xbc, hi: 0xbc}, {value: 0x0040, lo: 0xbd, hi: 0xbe}, {value: 0x0008, lo: 0xbf, hi: 0xbf}, // Block 0x9b, offset 0x4c2 {value: 0x0000, lo: 0x05}, {value: 0x0008, lo: 0x80, hi: 0x95}, {value: 0x0040, lo: 0x96, hi: 0x96}, {value: 0x0018, lo: 0x97, hi: 0x9f}, {value: 0x0008, lo: 0xa0, hi: 0xb6}, {value: 0x0018, lo: 0xb7, hi: 0xbf}, // Block 0x9c, offset 0x4c8 {value: 0x0000, lo: 0x04}, {value: 0x0008, lo: 0x80, hi: 0x9e}, {value: 0x0040, lo: 0x9f, hi: 0xa6}, {value: 0x0018, lo: 0xa7, hi: 0xaf}, {value: 0x0040, lo: 0xb0, hi: 0xbf}, // Block 0x9d, offset 0x4cd {value: 0x0000, lo: 0x06}, {value: 0x0040, lo: 0x80, hi: 0x9f}, {value: 0x0008, lo: 0xa0, hi: 0xb2}, {value: 0x0040, lo: 0xb3, hi: 0xb3}, {value: 0x0008, lo: 0xb4, hi: 0xb5}, {value: 0x0040, lo: 0xb6, hi: 0xba}, {value: 0x0018, lo: 0xbb, hi: 0xbf}, // Block 0x9e, offset 0x4d4 {value: 0x0000, lo: 0x07}, {value: 0x0008, lo: 0x80, hi: 0x95}, {value: 0x0018, lo: 0x96, hi: 0x9b}, {value: 0x0040, lo: 0x9c, hi: 0x9e}, {value: 0x0018, lo: 0x9f, hi: 0x9f}, {value: 0x0008, lo: 0xa0, hi: 0xb9}, {value: 0x0040, lo: 0xba, hi: 0xbe}, {value: 0x0018, lo: 0xbf, hi: 0xbf}, // Block 0x9f, offset 0x4dc {value: 0x0000, lo: 0x04}, {value: 0x0008, lo: 0x80, hi: 0xb7}, {value: 0x0040, lo: 0xb8, hi: 0xbb}, {value: 0x0018, lo: 0xbc, hi: 0xbd}, {value: 0x0008, lo: 0xbe, hi: 0xbf}, // Block 0xa0, offset 0x4e1 {value: 0x0000, lo: 0x03}, {value: 0x0018, lo: 0x80, hi: 0x8f}, {value: 0x0040, lo: 0x90, hi: 0x91}, {value: 0x0018, lo: 0x92, hi: 0xbf}, // Block 0xa1, offset 0x4e5 {value: 0x0000, lo: 0x0f}, {value: 0x0008, lo: 0x80, hi: 0x80}, {value: 0x1308, lo: 0x81, hi: 0x83}, {value: 0x0040, lo: 0x84, hi: 0x84}, {value: 0x1308, lo: 0x85, hi: 0x86}, {value: 0x0040, lo: 0x87, hi: 0x8b}, {value: 0x1308, lo: 0x8c, hi: 0x8f}, {value: 0x0008, lo: 0x90, hi: 0x93}, {value: 0x0040, lo: 0x94, hi: 0x94}, {value: 0x0008, lo: 0x95, hi: 0x97}, {value: 0x0040, lo: 0x98, hi: 0x98}, {value: 0x0008, lo: 0x99, hi: 0xb3}, {value: 0x0040, lo: 0xb4, hi: 0xb7}, {value: 0x1308, lo: 0xb8, hi: 0xba}, {value: 0x0040, lo: 0xbb, hi: 0xbe}, {value: 0x1b08, lo: 0xbf, hi: 0xbf}, // Block 0xa2, offset 0x4f5 {value: 0x0000, lo: 0x06}, {value: 0x0018, lo: 0x80, hi: 0x87}, {value: 0x0040, lo: 0x88, hi: 0x8f}, {value: 0x0018, lo: 0x90, hi: 0x98}, {value: 0x0040, lo: 0x99, hi: 0x9f}, {value: 0x0008, lo: 0xa0, hi: 0xbc}, {value: 0x0018, lo: 0xbd, hi: 0xbf}, // Block 0xa3, offset 0x4fc {value: 0x0000, lo: 0x03}, {value: 0x0008, lo: 0x80, hi: 0x9c}, {value: 0x0018, lo: 0x9d, hi: 0x9f}, {value: 0x0040, lo: 0xa0, hi: 0xbf}, // Block 0xa4, offset 0x500 {value: 0x0000, lo: 0x03}, {value: 0x0008, lo: 0x80, hi: 0xb5}, {value: 0x0040, lo: 0xb6, hi: 0xb8}, {value: 0x0018, lo: 0xb9, hi: 0xbf}, // Block 0xa5, offset 0x504 {value: 0x0000, lo: 0x06}, {value: 0x0008, lo: 0x80, hi: 0x95}, {value: 0x0040, lo: 0x96, hi: 0x97}, {value: 0x0018, lo: 0x98, hi: 0x9f}, {value: 0x0008, lo: 0xa0, hi: 0xb2}, {value: 0x0040, lo: 0xb3, hi: 0xb7}, {value: 0x0018, lo: 0xb8, hi: 0xbf}, // Block 0xa6, offset 0x50b {value: 0x0000, lo: 0x02}, {value: 0x0008, lo: 0x80, hi: 0x88}, {value: 0x0040, lo: 0x89, hi: 0xbf}, // Block 0xa7, offset 0x50e {value: 0x0000, lo: 0x02}, {value: 0x03dd, lo: 0x80, hi: 0xb2}, {value: 0x0040, lo: 0xb3, hi: 0xbf}, // Block 0xa8, offset 0x511 {value: 0x0000, lo: 0x03}, {value: 0x0008, lo: 0x80, hi: 0xb2}, {value: 0x0040, lo: 0xb3, hi: 0xb9}, {value: 0x0018, lo: 0xba, hi: 0xbf}, // Block 0xa9, offset 0x515 {value: 0x0000, lo: 0x03}, {value: 0x0040, lo: 0x80, hi: 0x9f}, {value: 0x0018, lo: 0xa0, hi: 0xbe}, {value: 0x0040, lo: 0xbf, hi: 0xbf}, // Block 0xaa, offset 0x519 {value: 0x0000, lo: 0x05}, {value: 0x1008, lo: 0x80, hi: 0x80}, {value: 0x1308, lo: 0x81, hi: 0x81}, {value: 0x1008, lo: 0x82, hi: 0x82}, {value: 0x0008, lo: 0x83, hi: 0xb7}, {value: 0x1308, lo: 0xb8, hi: 0xbf}, // Block 0xab, offset 0x51f {value: 0x0000, lo: 0x08}, {value: 0x1308, lo: 0x80, hi: 0x85}, {value: 0x1b08, lo: 0x86, hi: 0x86}, {value: 0x0018, lo: 0x87, hi: 0x8d}, {value: 0x0040, lo: 0x8e, hi: 0x91}, {value: 0x0018, lo: 0x92, hi: 0xa5}, {value: 0x0008, lo: 0xa6, hi: 0xaf}, {value: 0x0040, lo: 0xb0, hi: 0xbe}, {value: 0x1b08, lo: 0xbf, hi: 0xbf}, // Block 0xac, offset 0x528 {value: 0x0000, lo: 0x0b}, {value: 0x1308, lo: 0x80, hi: 0x81}, {value: 0x1008, lo: 0x82, hi: 0x82}, {value: 0x0008, lo: 0x83, hi: 0xaf}, {value: 0x1008, lo: 0xb0, hi: 0xb2}, {value: 0x1308, lo: 0xb3, hi: 0xb6}, {value: 0x1008, lo: 0xb7, hi: 0xb8}, {value: 0x1b08, lo: 0xb9, hi: 0xb9}, {value: 0x1308, lo: 0xba, hi: 0xba}, {value: 0x0018, lo: 0xbb, hi: 0xbc}, {value: 0x0340, lo: 0xbd, hi: 0xbd}, {value: 0x0018, lo: 0xbe, hi: 0xbf}, // Block 0xad, offset 0x534 {value: 0x0000, lo: 0x06}, {value: 0x0018, lo: 0x80, hi: 0x81}, {value: 0x0040, lo: 0x82, hi: 0x8f}, {value: 0x0008, lo: 0x90, hi: 0xa8}, {value: 0x0040, lo: 0xa9, hi: 0xaf}, {value: 0x0008, lo: 0xb0, hi: 0xb9}, {value: 0x0040, lo: 0xba, hi: 0xbf}, // Block 0xae, offset 0x53b {value: 0x0000, lo: 0x08}, {value: 0x1308, lo: 0x80, hi: 0x82}, {value: 0x0008, lo: 0x83, hi: 0xa6}, {value: 0x1308, lo: 0xa7, hi: 0xab}, {value: 0x1008, lo: 0xac, hi: 0xac}, {value: 0x1308, lo: 0xad, hi: 0xb2}, {value: 0x1b08, lo: 0xb3, hi: 0xb4}, {value: 0x0040, lo: 0xb5, hi: 0xb5}, {value: 0x0008, lo: 0xb6, hi: 0xbf}, // Block 0xaf, offset 0x544 {value: 0x0000, lo: 0x07}, {value: 0x0018, lo: 0x80, hi: 0x83}, {value: 0x0040, lo: 0x84, hi: 0x8f}, {value: 0x0008, lo: 0x90, hi: 0xb2}, {value: 0x1308, lo: 0xb3, hi: 0xb3}, {value: 0x0018, lo: 0xb4, hi: 0xb5}, {value: 0x0008, lo: 0xb6, hi: 0xb6}, {value: 0x0040, lo: 0xb7, hi: 0xbf}, // Block 0xb0, offset 0x54c {value: 0x0000, lo: 0x06}, {value: 0x1308, lo: 0x80, hi: 0x81}, {value: 0x1008, lo: 0x82, hi: 0x82}, {value: 0x0008, lo: 0x83, hi: 0xb2}, {value: 0x1008, lo: 0xb3, hi: 0xb5}, {value: 0x1308, lo: 0xb6, hi: 0xbe}, {value: 0x1008, lo: 0xbf, hi: 0xbf}, // Block 0xb1, offset 0x553 {value: 0x0000, lo: 0x0d}, {value: 0x1808, lo: 0x80, hi: 0x80}, {value: 0x0008, lo: 0x81, hi: 0x84}, {value: 0x0018, lo: 0x85, hi: 0x89}, {value: 0x1308, lo: 0x8a, hi: 0x8c}, {value: 0x0018, lo: 0x8d, hi: 0x8d}, {value: 0x0040, lo: 0x8e, hi: 0x8f}, {value: 0x0008, lo: 0x90, hi: 0x9a}, {value: 0x0018, lo: 0x9b, hi: 0x9b}, {value: 0x0008, lo: 0x9c, hi: 0x9c}, {value: 0x0018, lo: 0x9d, hi: 0x9f}, {value: 0x0040, lo: 0xa0, hi: 0xa0}, {value: 0x0018, lo: 0xa1, hi: 0xb4}, {value: 0x0040, lo: 0xb5, hi: 0xbf}, // Block 0xb2, offset 0x561 {value: 0x0000, lo: 0x0c}, {value: 0x0008, lo: 0x80, hi: 0x91}, {value: 0x0040, lo: 0x92, hi: 0x92}, {value: 0x0008, lo: 0x93, hi: 0xab}, {value: 0x1008, lo: 0xac, hi: 0xae}, {value: 0x1308, lo: 0xaf, hi: 0xb1}, {value: 0x1008, lo: 0xb2, hi: 0xb3}, {value: 0x1308, lo: 0xb4, hi: 0xb4}, {value: 0x1808, lo: 0xb5, hi: 0xb5}, {value: 0x1308, lo: 0xb6, hi: 0xb7}, {value: 0x0018, lo: 0xb8, hi: 0xbd}, {value: 0x1308, lo: 0xbe, hi: 0xbe}, {value: 0x0040, lo: 0xbf, hi: 0xbf}, // Block 0xb3, offset 0x56e {value: 0x0000, lo: 0x0c}, {value: 0x0008, lo: 0x80, hi: 0x86}, {value: 0x0040, lo: 0x87, hi: 0x87}, {value: 0x0008, lo: 0x88, hi: 0x88}, {value: 0x0040, lo: 0x89, hi: 0x89}, {value: 0x0008, lo: 0x8a, hi: 0x8d}, {value: 0x0040, lo: 0x8e, hi: 0x8e}, {value: 0x0008, lo: 0x8f, hi: 0x9d}, {value: 0x0040, lo: 0x9e, hi: 0x9e}, {value: 0x0008, lo: 0x9f, hi: 0xa8}, {value: 0x0018, lo: 0xa9, hi: 0xa9}, {value: 0x0040, lo: 0xaa, hi: 0xaf}, {value: 0x0008, lo: 0xb0, hi: 0xbf}, // Block 0xb4, offset 0x57b {value: 0x0000, lo: 0x08}, {value: 0x0008, lo: 0x80, hi: 0x9e}, {value: 0x1308, lo: 0x9f, hi: 0x9f}, {value: 0x1008, lo: 0xa0, hi: 0xa2}, {value: 0x1308, lo: 0xa3, hi: 0xa9}, {value: 0x1b08, lo: 0xaa, hi: 0xaa}, {value: 0x0040, lo: 0xab, hi: 0xaf}, {value: 0x0008, lo: 0xb0, hi: 0xb9}, {value: 0x0040, lo: 0xba, hi: 0xbf}, // Block 0xb5, offset 0x584 {value: 0x0000, lo: 0x03}, {value: 0x0008, lo: 0x80, hi: 0xb4}, {value: 0x1008, lo: 0xb5, hi: 0xb7}, {value: 0x1308, lo: 0xb8, hi: 0xbf}, // Block 0xb6, offset 0x588 {value: 0x0000, lo: 0x0d}, {value: 0x1008, lo: 0x80, hi: 0x81}, {value: 0x1b08, lo: 0x82, hi: 0x82}, {value: 0x1308, lo: 0x83, hi: 0x84}, {value: 0x1008, lo: 0x85, hi: 0x85}, {value: 0x1308, lo: 0x86, hi: 0x86}, {value: 0x0008, lo: 0x87, hi: 0x8a}, {value: 0x0018, lo: 0x8b, hi: 0x8f}, {value: 0x0008, lo: 0x90, hi: 0x99}, {value: 0x0040, lo: 0x9a, hi: 0x9a}, {value: 0x0018, lo: 0x9b, hi: 0x9b}, {value: 0x0040, lo: 0x9c, hi: 0x9c}, {value: 0x0018, lo: 0x9d, hi: 0x9d}, {value: 0x0040, lo: 0x9e, hi: 0xbf}, // Block 0xb7, offset 0x596 {value: 0x0000, lo: 0x07}, {value: 0x0008, lo: 0x80, hi: 0xaf}, {value: 0x1008, lo: 0xb0, hi: 0xb2}, {value: 0x1308, lo: 0xb3, hi: 0xb8}, {value: 0x1008, lo: 0xb9, hi: 0xb9}, {value: 0x1308, lo: 0xba, hi: 0xba}, {value: 0x1008, lo: 0xbb, hi: 0xbe}, {value: 0x1308, lo: 0xbf, hi: 0xbf}, // Block 0xb8, offset 0x59e {value: 0x0000, lo: 0x0a}, {value: 0x1308, lo: 0x80, hi: 0x80}, {value: 0x1008, lo: 0x81, hi: 0x81}, {value: 0x1b08, lo: 0x82, hi: 0x82}, {value: 0x1308, lo: 0x83, hi: 0x83}, {value: 0x0008, lo: 0x84, hi: 0x85}, {value: 0x0018, lo: 0x86, hi: 0x86}, {value: 0x0008, lo: 0x87, hi: 0x87}, {value: 0x0040, lo: 0x88, hi: 0x8f}, {value: 0x0008, lo: 0x90, hi: 0x99}, {value: 0x0040, lo: 0x9a, hi: 0xbf}, // Block 0xb9, offset 0x5a9 {value: 0x0000, lo: 0x08}, {value: 0x0008, lo: 0x80, hi: 0xae}, {value: 0x1008, lo: 0xaf, hi: 0xb1}, {value: 0x1308, lo: 0xb2, hi: 0xb5}, {value: 0x0040, lo: 0xb6, hi: 0xb7}, {value: 0x1008, lo: 0xb8, hi: 0xbb}, {value: 0x1308, lo: 0xbc, hi: 0xbd}, {value: 0x1008, lo: 0xbe, hi: 0xbe}, {value: 0x1b08, lo: 0xbf, hi: 0xbf}, // Block 0xba, offset 0x5b2 {value: 0x0000, lo: 0x05}, {value: 0x1308, lo: 0x80, hi: 0x80}, {value: 0x0018, lo: 0x81, hi: 0x97}, {value: 0x0008, lo: 0x98, hi: 0x9b}, {value: 0x1308, lo: 0x9c, hi: 0x9d}, {value: 0x0040, lo: 0x9e, hi: 0xbf}, // Block 0xbb, offset 0x5b8 {value: 0x0000, lo: 0x07}, {value: 0x0008, lo: 0x80, hi: 0xaf}, {value: 0x1008, lo: 0xb0, hi: 0xb2}, {value: 0x1308, lo: 0xb3, hi: 0xba}, {value: 0x1008, lo: 0xbb, hi: 0xbc}, {value: 0x1308, lo: 0xbd, hi: 0xbd}, {value: 0x1008, lo: 0xbe, hi: 0xbe}, {value: 0x1b08, lo: 0xbf, hi: 0xbf}, // Block 0xbc, offset 0x5c0 {value: 0x0000, lo: 0x08}, {value: 0x1308, lo: 0x80, hi: 0x80}, {value: 0x0018, lo: 0x81, hi: 0x83}, {value: 0x0008, lo: 0x84, hi: 0x84}, {value: 0x0040, lo: 0x85, hi: 0x8f}, {value: 0x0008, lo: 0x90, hi: 0x99}, {value: 0x0040, lo: 0x9a, hi: 0x9f}, {value: 0x0018, lo: 0xa0, hi: 0xac}, {value: 0x0040, lo: 0xad, hi: 0xbf}, // Block 0xbd, offset 0x5c9 {value: 0x0000, lo: 0x09}, {value: 0x0008, lo: 0x80, hi: 0xaa}, {value: 0x1308, lo: 0xab, hi: 0xab}, {value: 0x1008, lo: 0xac, hi: 0xac}, {value: 0x1308, lo: 0xad, hi: 0xad}, {value: 0x1008, lo: 0xae, hi: 0xaf}, {value: 0x1308, lo: 0xb0, hi: 0xb5}, {value: 0x1808, lo: 0xb6, hi: 0xb6}, {value: 0x1308, lo: 0xb7, hi: 0xb7}, {value: 0x0040, lo: 0xb8, hi: 0xbf}, // Block 0xbe, offset 0x5d3 {value: 0x0000, lo: 0x02}, {value: 0x0008, lo: 0x80, hi: 0x89}, {value: 0x0040, lo: 0x8a, hi: 0xbf}, // Block 0xbf, offset 0x5d6 {value: 0x0000, lo: 0x0b}, {value: 0x0008, lo: 0x80, hi: 0x99}, {value: 0x0040, lo: 0x9a, hi: 0x9c}, {value: 0x1308, lo: 0x9d, hi: 0x9f}, {value: 0x1008, lo: 0xa0, hi: 0xa1}, {value: 0x1308, lo: 0xa2, hi: 0xa5}, {value: 0x1008, lo: 0xa6, hi: 0xa6}, {value: 0x1308, lo: 0xa7, hi: 0xaa}, {value: 0x1b08, lo: 0xab, hi: 0xab}, {value: 0x0040, lo: 0xac, hi: 0xaf}, {value: 0x0008, lo: 0xb0, hi: 0xb9}, {value: 0x0018, lo: 0xba, hi: 0xbf}, // Block 0xc0, offset 0x5e2 {value: 0x0000, lo: 0x02}, {value: 0x0040, lo: 0x80, hi: 0x9f}, {value: 0x049d, lo: 0xa0, hi: 0xbf}, // Block 0xc1, offset 0x5e5 {value: 0x0000, lo: 0x04}, {value: 0x0008, lo: 0x80, hi: 0xa9}, {value: 0x0018, lo: 0xaa, hi: 0xb2}, {value: 0x0040, lo: 0xb3, hi: 0xbe}, {value: 0x0008, lo: 0xbf, hi: 0xbf}, // Block 0xc2, offset 0x5ea {value: 0x0000, lo: 0x02}, {value: 0x0008, lo: 0x80, hi: 0xb8}, {value: 0x0040, lo: 0xb9, hi: 0xbf}, // Block 0xc3, offset 0x5ed {value: 0x0000, lo: 0x09}, {value: 0x0008, lo: 0x80, hi: 0x88}, {value: 0x0040, lo: 0x89, hi: 0x89}, {value: 0x0008, lo: 0x8a, hi: 0xae}, {value: 0x1008, lo: 0xaf, hi: 0xaf}, {value: 0x1308, lo: 0xb0, hi: 0xb6}, {value: 0x0040, lo: 0xb7, hi: 0xb7}, {value: 0x1308, lo: 0xb8, hi: 0xbd}, {value: 0x1008, lo: 0xbe, hi: 0xbe}, {value: 0x1b08, lo: 0xbf, hi: 0xbf}, // Block 0xc4, offset 0x5f7 {value: 0x0000, lo: 0x08}, {value: 0x0008, lo: 0x80, hi: 0x80}, {value: 0x0018, lo: 0x81, hi: 0x85}, {value: 0x0040, lo: 0x86, hi: 0x8f}, {value: 0x0008, lo: 0x90, hi: 0x99}, {value: 0x0018, lo: 0x9a, hi: 0xac}, {value: 0x0040, lo: 0xad, hi: 0xaf}, {value: 0x0018, lo: 0xb0, hi: 0xb1}, {value: 0x0008, lo: 0xb2, hi: 0xbf}, // Block 0xc5, offset 0x600 {value: 0x0000, lo: 0x0b}, {value: 0x0008, lo: 0x80, hi: 0x8f}, {value: 0x0040, lo: 0x90, hi: 0x91}, {value: 0x1308, lo: 0x92, hi: 0xa7}, {value: 0x0040, lo: 0xa8, hi: 0xa8}, {value: 0x1008, lo: 0xa9, hi: 0xa9}, {value: 0x1308, lo: 0xaa, hi: 0xb0}, {value: 0x1008, lo: 0xb1, hi: 0xb1}, {value: 0x1308, lo: 0xb2, hi: 0xb3}, {value: 0x1008, lo: 0xb4, hi: 0xb4}, {value: 0x1308, lo: 0xb5, hi: 0xb6}, {value: 0x0040, lo: 0xb7, hi: 0xbf}, // Block 0xc6, offset 0x60c {value: 0x0000, lo: 0x02}, {value: 0x0008, lo: 0x80, hi: 0x99}, {value: 0x0040, lo: 0x9a, hi: 0xbf}, // Block 0xc7, offset 0x60f {value: 0x0000, lo: 0x04}, {value: 0x0018, lo: 0x80, hi: 0xae}, {value: 0x0040, lo: 0xaf, hi: 0xaf}, {value: 0x0018, lo: 0xb0, hi: 0xb4}, {value: 0x0040, lo: 0xb5, hi: 0xbf}, // Block 0xc8, offset 0x614 {value: 0x0000, lo: 0x02}, {value: 0x0008, lo: 0x80, hi: 0x83}, {value: 0x0040, lo: 0x84, hi: 0xbf}, // Block 0xc9, offset 0x617 {value: 0x0000, lo: 0x02}, {value: 0x0008, lo: 0x80, hi: 0xae}, {value: 0x0040, lo: 0xaf, hi: 0xbf}, // Block 0xca, offset 0x61a {value: 0x0000, lo: 0x02}, {value: 0x0008, lo: 0x80, hi: 0x86}, {value: 0x0040, lo: 0x87, hi: 0xbf}, // Block 0xcb, offset 0x61d {value: 0x0000, lo: 0x06}, {value: 0x0008, lo: 0x80, hi: 0x9e}, {value: 0x0040, lo: 0x9f, hi: 0x9f}, {value: 0x0008, lo: 0xa0, hi: 0xa9}, {value: 0x0040, lo: 0xaa, hi: 0xad}, {value: 0x0018, lo: 0xae, hi: 0xaf}, {value: 0x0040, lo: 0xb0, hi: 0xbf}, // Block 0xcc, offset 0x624 {value: 0x0000, lo: 0x06}, {value: 0x0040, lo: 0x80, hi: 0x8f}, {value: 0x0008, lo: 0x90, hi: 0xad}, {value: 0x0040, lo: 0xae, hi: 0xaf}, {value: 0x1308, lo: 0xb0, hi: 0xb4}, {value: 0x0018, lo: 0xb5, hi: 0xb5}, {value: 0x0040, lo: 0xb6, hi: 0xbf}, // Block 0xcd, offset 0x62b {value: 0x0000, lo: 0x03}, {value: 0x0008, lo: 0x80, hi: 0xaf}, {value: 0x1308, lo: 0xb0, hi: 0xb6}, {value: 0x0018, lo: 0xb7, hi: 0xbf}, // Block 0xce, offset 0x62f {value: 0x0000, lo: 0x0a}, {value: 0x0008, lo: 0x80, hi: 0x83}, {value: 0x0018, lo: 0x84, hi: 0x85}, {value: 0x0040, lo: 0x86, hi: 0x8f}, {value: 0x0008, lo: 0x90, hi: 0x99}, {value: 0x0040, lo: 0x9a, hi: 0x9a}, {value: 0x0018, lo: 0x9b, hi: 0xa1}, {value: 0x0040, lo: 0xa2, hi: 0xa2}, {value: 0x0008, lo: 0xa3, hi: 0xb7}, {value: 0x0040, lo: 0xb8, hi: 0xbc}, {value: 0x0008, lo: 0xbd, hi: 0xbf}, // Block 0xcf, offset 0x63a {value: 0x0000, lo: 0x02}, {value: 0x0008, lo: 0x80, hi: 0x8f}, {value: 0x0040, lo: 0x90, hi: 0xbf}, // Block 0xd0, offset 0x63d {value: 0x0000, lo: 0x05}, {value: 0x0008, lo: 0x80, hi: 0x84}, {value: 0x0040, lo: 0x85, hi: 0x8f}, {value: 0x0008, lo: 0x90, hi: 0x90}, {value: 0x1008, lo: 0x91, hi: 0xbe}, {value: 0x0040, lo: 0xbf, hi: 0xbf}, // Block 0xd1, offset 0x643 {value: 0x0000, lo: 0x04}, {value: 0x0040, lo: 0x80, hi: 0x8e}, {value: 0x1308, lo: 0x8f, hi: 0x92}, {value: 0x0008, lo: 0x93, hi: 0x9f}, {value: 0x0040, lo: 0xa0, hi: 0xbf}, // Block 0xd2, offset 0x648 {value: 0x0000, lo: 0x03}, {value: 0x0040, lo: 0x80, hi: 0x9f}, {value: 0x0008, lo: 0xa0, hi: 0xa0}, {value: 0x0040, lo: 0xa1, hi: 0xbf}, // Block 0xd3, offset 0x64c {value: 0x0000, lo: 0x02}, {value: 0x0008, lo: 0x80, hi: 0xac}, {value: 0x0040, lo: 0xad, hi: 0xbf}, // Block 0xd4, offset 0x64f {value: 0x0000, lo: 0x02}, {value: 0x0008, lo: 0x80, hi: 0xb2}, {value: 0x0040, lo: 0xb3, hi: 0xbf}, // Block 0xd5, offset 0x652 {value: 0x0000, lo: 0x02}, {value: 0x0008, lo: 0x80, hi: 0x81}, {value: 0x0040, lo: 0x82, hi: 0xbf}, // Block 0xd6, offset 0x655 {value: 0x0000, lo: 0x04}, {value: 0x0008, lo: 0x80, hi: 0xaa}, {value: 0x0040, lo: 0xab, hi: 0xaf}, {value: 0x0008, lo: 0xb0, hi: 0xbc}, {value: 0x0040, lo: 0xbd, hi: 0xbf}, // Block 0xd7, offset 0x65a {value: 0x0000, lo: 0x09}, {value: 0x0008, lo: 0x80, hi: 0x88}, {value: 0x0040, lo: 0x89, hi: 0x8f}, {value: 0x0008, lo: 0x90, hi: 0x99}, {value: 0x0040, lo: 0x9a, hi: 0x9b}, {value: 0x0018, lo: 0x9c, hi: 0x9c}, {value: 0x1308, lo: 0x9d, hi: 0x9e}, {value: 0x0018, lo: 0x9f, hi: 0x9f}, {value: 0x03c0, lo: 0xa0, hi: 0xa3}, {value: 0x0040, lo: 0xa4, hi: 0xbf}, // Block 0xd8, offset 0x664 {value: 0x0000, lo: 0x02}, {value: 0x0018, lo: 0x80, hi: 0xb5}, {value: 0x0040, lo: 0xb6, hi: 0xbf}, // Block 0xd9, offset 0x667 {value: 0x0000, lo: 0x03}, {value: 0x0018, lo: 0x80, hi: 0xa6}, {value: 0x0040, lo: 0xa7, hi: 0xa8}, {value: 0x0018, lo: 0xa9, hi: 0xbf}, // Block 0xda, offset 0x66b {value: 0x0000, lo: 0x0e}, {value: 0x0018, lo: 0x80, hi: 0x9d}, {value: 0xb5b9, lo: 0x9e, hi: 0x9e}, {value: 0xb601, lo: 0x9f, hi: 0x9f}, {value: 0xb649, lo: 0xa0, hi: 0xa0}, {value: 0xb6b1, lo: 0xa1, hi: 0xa1}, {value: 0xb719, lo: 0xa2, hi: 0xa2}, {value: 0xb781, lo: 0xa3, hi: 0xa3}, {value: 0xb7e9, lo: 0xa4, hi: 0xa4}, {value: 0x1018, lo: 0xa5, hi: 0xa6}, {value: 0x1318, lo: 0xa7, hi: 0xa9}, {value: 0x0018, lo: 0xaa, hi: 0xac}, {value: 0x1018, lo: 0xad, hi: 0xb2}, {value: 0x0340, lo: 0xb3, hi: 0xba}, {value: 0x1318, lo: 0xbb, hi: 0xbf}, // Block 0xdb, offset 0x67a {value: 0x0000, lo: 0x0b}, {value: 0x1318, lo: 0x80, hi: 0x82}, {value: 0x0018, lo: 0x83, hi: 0x84}, {value: 0x1318, lo: 0x85, hi: 0x8b}, {value: 0x0018, lo: 0x8c, hi: 0xa9}, {value: 0x1318, lo: 0xaa, hi: 0xad}, {value: 0x0018, lo: 0xae, hi: 0xba}, {value: 0xb851, lo: 0xbb, hi: 0xbb}, {value: 0xb899, lo: 0xbc, hi: 0xbc}, {value: 0xb8e1, lo: 0xbd, hi: 0xbd}, {value: 0xb949, lo: 0xbe, hi: 0xbe}, {value: 0xb9b1, lo: 0xbf, hi: 0xbf}, // Block 0xdc, offset 0x686 {value: 0x0000, lo: 0x03}, {value: 0xba19, lo: 0x80, hi: 0x80}, {value: 0x0018, lo: 0x81, hi: 0xa8}, {value: 0x0040, lo: 0xa9, hi: 0xbf}, // Block 0xdd, offset 0x68a {value: 0x0000, lo: 0x04}, {value: 0x0018, lo: 0x80, hi: 0x81}, {value: 0x1318, lo: 0x82, hi: 0x84}, {value: 0x0018, lo: 0x85, hi: 0x85}, {value: 0x0040, lo: 0x86, hi: 0xbf}, // Block 0xde, offset 0x68f {value: 0x0000, lo: 0x04}, {value: 0x0018, lo: 0x80, hi: 0x96}, {value: 0x0040, lo: 0x97, hi: 0x9f}, {value: 0x0018, lo: 0xa0, hi: 0xb1}, {value: 0x0040, lo: 0xb2, hi: 0xbf}, // Block 0xdf, offset 0x694 {value: 0x0000, lo: 0x03}, {value: 0x1308, lo: 0x80, hi: 0xb6}, {value: 0x0018, lo: 0xb7, hi: 0xba}, {value: 0x1308, lo: 0xbb, hi: 0xbf}, // Block 0xe0, offset 0x698 {value: 0x0000, lo: 0x04}, {value: 0x1308, lo: 0x80, hi: 0xac}, {value: 0x0018, lo: 0xad, hi: 0xb4}, {value: 0x1308, lo: 0xb5, hi: 0xb5}, {value: 0x0018, lo: 0xb6, hi: 0xbf}, // Block 0xe1, offset 0x69d {value: 0x0000, lo: 0x08}, {value: 0x0018, lo: 0x80, hi: 0x83}, {value: 0x1308, lo: 0x84, hi: 0x84}, {value: 0x0018, lo: 0x85, hi: 0x8b}, {value: 0x0040, lo: 0x8c, hi: 0x9a}, {value: 0x1308, lo: 0x9b, hi: 0x9f}, {value: 0x0040, lo: 0xa0, hi: 0xa0}, {value: 0x1308, lo: 0xa1, hi: 0xaf}, {value: 0x0040, lo: 0xb0, hi: 0xbf}, // Block 0xe2, offset 0x6a6 {value: 0x0000, lo: 0x0a}, {value: 0x1308, lo: 0x80, hi: 0x86}, {value: 0x0040, lo: 0x87, hi: 0x87}, {value: 0x1308, lo: 0x88, hi: 0x98}, {value: 0x0040, lo: 0x99, hi: 0x9a}, {value: 0x1308, lo: 0x9b, hi: 0xa1}, {value: 0x0040, lo: 0xa2, hi: 0xa2}, {value: 0x1308, lo: 0xa3, hi: 0xa4}, {value: 0x0040, lo: 0xa5, hi: 0xa5}, {value: 0x1308, lo: 0xa6, hi: 0xaa}, {value: 0x0040, lo: 0xab, hi: 0xbf}, // Block 0xe3, offset 0x6b1 {value: 0x0000, lo: 0x05}, {value: 0x0008, lo: 0x80, hi: 0x84}, {value: 0x0040, lo: 0x85, hi: 0x86}, {value: 0x0018, lo: 0x87, hi: 0x8f}, {value: 0x1308, lo: 0x90, hi: 0x96}, {value: 0x0040, lo: 0x97, hi: 0xbf}, // Block 0xe4, offset 0x6b7 {value: 0x0000, lo: 0x07}, {value: 0x0208, lo: 0x80, hi: 0x83}, {value: 0x1308, lo: 0x84, hi: 0x8a}, {value: 0x0040, lo: 0x8b, hi: 0x8f}, {value: 0x0008, lo: 0x90, hi: 0x99}, {value: 0x0040, lo: 0x9a, hi: 0x9d}, {value: 0x0018, lo: 0x9e, hi: 0x9f}, {value: 0x0040, lo: 0xa0, hi: 0xbf}, // Block 0xe5, offset 0x6bf {value: 0x0000, lo: 0x03}, {value: 0x0040, lo: 0x80, hi: 0xaf}, {value: 0x0018, lo: 0xb0, hi: 0xb1}, {value: 0x0040, lo: 0xb2, hi: 0xbf}, // Block 0xe6, offset 0x6c3 {value: 0x0000, lo: 0x03}, {value: 0x0018, lo: 0x80, hi: 0xab}, {value: 0x0040, lo: 0xac, hi: 0xaf}, {value: 0x0018, lo: 0xb0, hi: 0xbf}, // Block 0xe7, offset 0x6c7 {value: 0x0000, lo: 0x05}, {value: 0x0018, lo: 0x80, hi: 0x93}, {value: 0x0040, lo: 0x94, hi: 0x9f}, {value: 0x0018, lo: 0xa0, hi: 0xae}, {value: 0x0040, lo: 0xaf, hi: 0xb0}, {value: 0x0018, lo: 0xb1, hi: 0xbf}, // Block 0xe8, offset 0x6cd {value: 0x0000, lo: 0x05}, {value: 0x0040, lo: 0x80, hi: 0x80}, {value: 0x0018, lo: 0x81, hi: 0x8f}, {value: 0x0040, lo: 0x90, hi: 0x90}, {value: 0x0018, lo: 0x91, hi: 0xb5}, {value: 0x0040, lo: 0xb6, hi: 0xbf}, // Block 0xe9, offset 0x6d3 {value: 0x0000, lo: 0x04}, {value: 0x0018, lo: 0x80, hi: 0x8f}, {value: 0xc1c1, lo: 0x90, hi: 0x90}, {value: 0x0018, lo: 0x91, hi: 0xac}, {value: 0x0040, lo: 0xad, hi: 0xbf}, // Block 0xea, offset 0x6d8 {value: 0x0000, lo: 0x02}, {value: 0x0040, lo: 0x80, hi: 0xa5}, {value: 0x0018, lo: 0xa6, hi: 0xbf}, // Block 0xeb, offset 0x6db {value: 0x0000, lo: 0x0d}, {value: 0xc7e9, lo: 0x80, hi: 0x80}, {value: 0xc839, lo: 0x81, hi: 0x81}, {value: 0xc889, lo: 0x82, hi: 0x82}, {value: 0xc8d9, lo: 0x83, hi: 0x83}, {value: 0xc929, lo: 0x84, hi: 0x84}, {value: 0xc979, lo: 0x85, hi: 0x85}, {value: 0xc9c9, lo: 0x86, hi: 0x86}, {value: 0xca19, lo: 0x87, hi: 0x87}, {value: 0xca69, lo: 0x88, hi: 0x88}, {value: 0x0040, lo: 0x89, hi: 0x8f}, {value: 0xcab9, lo: 0x90, hi: 0x90}, {value: 0xcad9, lo: 0x91, hi: 0x91}, {value: 0x0040, lo: 0x92, hi: 0xbf}, // Block 0xec, offset 0x6e9 {value: 0x0000, lo: 0x06}, {value: 0x0018, lo: 0x80, hi: 0x92}, {value: 0x0040, lo: 0x93, hi: 0x9f}, {value: 0x0018, lo: 0xa0, hi: 0xac}, {value: 0x0040, lo: 0xad, hi: 0xaf}, {value: 0x0018, lo: 0xb0, hi: 0xb6}, {value: 0x0040, lo: 0xb7, hi: 0xbf}, // Block 0xed, offset 0x6f0 {value: 0x0000, lo: 0x02}, {value: 0x0018, lo: 0x80, hi: 0xb3}, {value: 0x0040, lo: 0xb4, hi: 0xbf}, // Block 0xee, offset 0x6f3 {value: 0x0000, lo: 0x02}, {value: 0x0018, lo: 0x80, hi: 0x94}, {value: 0x0040, lo: 0x95, hi: 0xbf}, // Block 0xef, offset 0x6f6 {value: 0x0000, lo: 0x03}, {value: 0x0018, lo: 0x80, hi: 0x8b}, {value: 0x0040, lo: 0x8c, hi: 0x8f}, {value: 0x0018, lo: 0x90, hi: 0xbf}, // Block 0xf0, offset 0x6fa {value: 0x0000, lo: 0x05}, {value: 0x0018, lo: 0x80, hi: 0x87}, {value: 0x0040, lo: 0x88, hi: 0x8f}, {value: 0x0018, lo: 0x90, hi: 0x99}, {value: 0x0040, lo: 0x9a, hi: 0x9f}, {value: 0x0018, lo: 0xa0, hi: 0xbf}, // Block 0xf1, offset 0x700 {value: 0x0000, lo: 0x04}, {value: 0x0018, lo: 0x80, hi: 0x87}, {value: 0x0040, lo: 0x88, hi: 0x8f}, {value: 0x0018, lo: 0x90, hi: 0xad}, {value: 0x0040, lo: 0xae, hi: 0xbf}, // Block 0xf2, offset 0x705 {value: 0x0000, lo: 0x09}, {value: 0x0040, lo: 0x80, hi: 0x8f}, {value: 0x0018, lo: 0x90, hi: 0x9e}, {value: 0x0040, lo: 0x9f, hi: 0x9f}, {value: 0x0018, lo: 0xa0, hi: 0xa7}, {value: 0x0040, lo: 0xa8, hi: 0xaf}, {value: 0x0018, lo: 0xb0, hi: 0xb0}, {value: 0x0040, lo: 0xb1, hi: 0xb2}, {value: 0x0018, lo: 0xb3, hi: 0xbe}, {value: 0x0040, lo: 0xbf, hi: 0xbf}, // Block 0xf3, offset 0x70f {value: 0x0000, lo: 0x04}, {value: 0x0018, lo: 0x80, hi: 0x8b}, {value: 0x0040, lo: 0x8c, hi: 0x8f}, {value: 0x0018, lo: 0x90, hi: 0x9e}, {value: 0x0040, lo: 0x9f, hi: 0xbf}, // Block 0xf4, offset 0x714 {value: 0x0000, lo: 0x02}, {value: 0x0018, lo: 0x80, hi: 0x91}, {value: 0x0040, lo: 0x92, hi: 0xbf}, // Block 0xf5, offset 0x717 {value: 0x0000, lo: 0x02}, {value: 0x0018, lo: 0x80, hi: 0x80}, {value: 0x0040, lo: 0x81, hi: 0xbf}, // Block 0xf6, offset 0x71a {value: 0x0000, lo: 0x02}, {value: 0x0008, lo: 0x80, hi: 0x96}, {value: 0x0040, lo: 0x97, hi: 0xbf}, // Block 0xf7, offset 0x71d {value: 0x0000, lo: 0x02}, {value: 0x0008, lo: 0x80, hi: 0xb4}, {value: 0x0040, lo: 0xb5, hi: 0xbf}, // Block 0xf8, offset 0x720 {value: 0x0000, lo: 0x03}, {value: 0x0008, lo: 0x80, hi: 0x9d}, {value: 0x0040, lo: 0x9e, hi: 0x9f}, {value: 0x0008, lo: 0xa0, hi: 0xbf}, // Block 0xf9, offset 0x724 {value: 0x0000, lo: 0x02}, {value: 0x0008, lo: 0x80, hi: 0xa1}, {value: 0x0040, lo: 0xa2, hi: 0xbf}, // Block 0xfa, offset 0x727 {value: 0x0020, lo: 0x0f}, {value: 0xdeb9, lo: 0x80, hi: 0x89}, {value: 0x8dfd, lo: 0x8a, hi: 0x8a}, {value: 0xdff9, lo: 0x8b, hi: 0x9c}, {value: 0x8e1d, lo: 0x9d, hi: 0x9d}, {value: 0xe239, lo: 0x9e, hi: 0xa2}, {value: 0x8e3d, lo: 0xa3, hi: 0xa3}, {value: 0xe2d9, lo: 0xa4, hi: 0xab}, {value: 0x7ed5, lo: 0xac, hi: 0xac}, {value: 0xe3d9, lo: 0xad, hi: 0xaf}, {value: 0x8e5d, lo: 0xb0, hi: 0xb0}, {value: 0xe439, lo: 0xb1, hi: 0xb6}, {value: 0x8e7d, lo: 0xb7, hi: 0xb9}, {value: 0xe4f9, lo: 0xba, hi: 0xba}, {value: 0x8edd, lo: 0xbb, hi: 0xbb}, {value: 0xe519, lo: 0xbc, hi: 0xbf}, // Block 0xfb, offset 0x737 {value: 0x0020, lo: 0x10}, {value: 0x937d, lo: 0x80, hi: 0x80}, {value: 0xf099, lo: 0x81, hi: 0x86}, {value: 0x939d, lo: 0x87, hi: 0x8a}, {value: 0xd9f9, lo: 0x8b, hi: 0x8b}, {value: 0xf159, lo: 0x8c, hi: 0x96}, {value: 0x941d, lo: 0x97, hi: 0x97}, {value: 0xf2b9, lo: 0x98, hi: 0xa3}, {value: 0x943d, lo: 0xa4, hi: 0xa6}, {value: 0xf439, lo: 0xa7, hi: 0xaa}, {value: 0x949d, lo: 0xab, hi: 0xab}, {value: 0xf4b9, lo: 0xac, hi: 0xac}, {value: 0x94bd, lo: 0xad, hi: 0xad}, {value: 0xf4d9, lo: 0xae, hi: 0xaf}, {value: 0x94dd, lo: 0xb0, hi: 0xb1}, {value: 0xf519, lo: 0xb2, hi: 0xbe}, {value: 0x0040, lo: 0xbf, hi: 0xbf}, // Block 0xfc, offset 0x748 {value: 0x0000, lo: 0x04}, {value: 0x0040, lo: 0x80, hi: 0x80}, {value: 0x0340, lo: 0x81, hi: 0x81}, {value: 0x0040, lo: 0x82, hi: 0x9f}, {value: 0x0340, lo: 0xa0, hi: 0xbf}, // Block 0xfd, offset 0x74d {value: 0x0000, lo: 0x01}, {value: 0x0340, lo: 0x80, hi: 0xbf}, // Block 0xfe, offset 0x74f {value: 0x0000, lo: 0x01}, {value: 0x13c0, lo: 0x80, hi: 0xbf}, // Block 0xff, offset 0x751 {value: 0x0000, lo: 0x02}, {value: 0x13c0, lo: 0x80, hi: 0xaf}, {value: 0x0040, lo: 0xb0, hi: 0xbf}, } // Total table size 41559 bytes (40KiB); checksum: F4A1FA4E gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/golang.org/x/net/idna/trie.go000066400000000000000000000035141324746544700312230ustar00rootroot00000000000000// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. // Copyright 2016 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package idna // appendMapping appends the mapping for the respective rune. isMapped must be // true. A mapping is a categorization of a rune as defined in UTS #46. func (c info) appendMapping(b []byte, s string) []byte { index := int(c >> indexShift) if c&xorBit == 0 { s := mappings[index:] return append(b, s[1:s[0]+1]...) } b = append(b, s...) if c&inlineXOR == inlineXOR { // TODO: support and handle two-byte inline masks b[len(b)-1] ^= byte(index) } else { for p := len(b) - int(xorData[index]); p < len(b); p++ { index++ b[p] ^= xorData[index] } } return b } // Sparse block handling code. type valueRange struct { value uint16 // header: value:stride lo, hi byte // header: lo:n } type sparseBlocks struct { values []valueRange offset []uint16 } var idnaSparse = sparseBlocks{ values: idnaSparseValues[:], offset: idnaSparseOffset[:], } // Don't use newIdnaTrie to avoid unconditional linking in of the table. var trie = &idnaTrie{} // lookup determines the type of block n and looks up the value for b. // For n < t.cutoff, the block is a simple lookup table. Otherwise, the block // is a list of ranges with an accompanying value. Given a matching range r, // the value for b is by r.value + (b - r.lo) * stride. func (t *sparseBlocks) lookup(n uint32, b byte) uint16 { offset := t.offset[n] header := t.values[offset] lo := offset + 1 hi := lo + uint16(header.lo) for lo < hi { m := lo + (hi-lo)/2 r := t.values[m] if r.lo <= b && b <= r.hi { return r.value + uint16(b-r.lo)*header.value } if b < r.lo { hi = m } else { lo = m + 1 } } return 0 } trieval.go000066400000000000000000000056721324746544700316560ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/golang.org/x/net/idna// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. package idna // This file contains definitions for interpreting the trie value of the idna // trie generated by "go run gen*.go". It is shared by both the generator // program and the resultant package. Sharing is achieved by the generator // copying gen_trieval.go to trieval.go and changing what's above this comment. // info holds information from the IDNA mapping table for a single rune. It is // the value returned by a trie lookup. In most cases, all information fits in // a 16-bit value. For mappings, this value may contain an index into a slice // with the mapped string. Such mappings can consist of the actual mapped value // or an XOR pattern to be applied to the bytes of the UTF8 encoding of the // input rune. This technique is used by the cases packages and reduces the // table size significantly. // // The per-rune values have the following format: // // if mapped { // if inlinedXOR { // 15..13 inline XOR marker // 12..11 unused // 10..3 inline XOR mask // } else { // 15..3 index into xor or mapping table // } // } else { // 15..13 unused // 12 modifier (including virama) // 11 virama modifier // 10..8 joining type // 7..3 category type // } // 2 use xor pattern // 1..0 mapped category // // See the definitions below for a more detailed description of the various // bits. type info uint16 const ( catSmallMask = 0x3 catBigMask = 0xF8 indexShift = 3 xorBit = 0x4 // interpret the index as an xor pattern inlineXOR = 0xE000 // These bits are set if the XOR pattern is inlined. joinShift = 8 joinMask = 0x07 viramaModifier = 0x0800 modifier = 0x1000 ) // A category corresponds to a category defined in the IDNA mapping table. type category uint16 const ( unknown category = 0 // not defined currently in unicode. mapped category = 1 disallowedSTD3Mapped category = 2 deviation category = 3 ) const ( valid category = 0x08 validNV8 category = 0x18 validXV8 category = 0x28 disallowed category = 0x40 disallowedSTD3Valid category = 0x80 ignored category = 0xC0 ) // join types and additional rune information const ( joiningL = (iota + 1) joiningD joiningT joiningR //the following types are derived during processing joinZWJ joinZWNJ joinVirama numJoinTypes ) func (c info) isMapped() bool { return c&0x3 != 0 } func (c info) category() category { small := c & catSmallMask if small != 0 { return category(small) } return category(c & catBigMask) } func (c info) joinType() info { if c.isMapped() { return 0 } return (c >> joinShift) & joinMask } func (c info) isModifier() bool { return c&(modifier|catSmallMask) == modifier } func (c info) isViramaModifier() bool { return c&(viramaModifier|catSmallMask) == viramaModifier } gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/golang.org/x/net/internal/000077500000000000000000000000001324746544700306275ustar00rootroot00000000000000timeseries/000077500000000000000000000000001324746544700327215ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/golang.org/x/net/internaltimeseries.go000066400000000000000000000351261324746544700354300ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/golang.org/x/net/internal/timeseries// Copyright 2015 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package timeseries implements a time series structure for stats collection. package timeseries // import "golang.org/x/net/internal/timeseries" import ( "fmt" "log" "time" ) const ( timeSeriesNumBuckets = 64 minuteHourSeriesNumBuckets = 60 ) var timeSeriesResolutions = []time.Duration{ 1 * time.Second, 10 * time.Second, 1 * time.Minute, 10 * time.Minute, 1 * time.Hour, 6 * time.Hour, 24 * time.Hour, // 1 day 7 * 24 * time.Hour, // 1 week 4 * 7 * 24 * time.Hour, // 4 weeks 16 * 7 * 24 * time.Hour, // 16 weeks } var minuteHourSeriesResolutions = []time.Duration{ 1 * time.Second, 1 * time.Minute, } // An Observable is a kind of data that can be aggregated in a time series. type Observable interface { Multiply(ratio float64) // Multiplies the data in self by a given ratio Add(other Observable) // Adds the data from a different observation to self Clear() // Clears the observation so it can be reused. CopyFrom(other Observable) // Copies the contents of a given observation to self } // Float attaches the methods of Observable to a float64. type Float float64 // NewFloat returns a Float. func NewFloat() Observable { f := Float(0) return &f } // String returns the float as a string. func (f *Float) String() string { return fmt.Sprintf("%g", f.Value()) } // Value returns the float's value. func (f *Float) Value() float64 { return float64(*f) } func (f *Float) Multiply(ratio float64) { *f *= Float(ratio) } func (f *Float) Add(other Observable) { o := other.(*Float) *f += *o } func (f *Float) Clear() { *f = 0 } func (f *Float) CopyFrom(other Observable) { o := other.(*Float) *f = *o } // A Clock tells the current time. type Clock interface { Time() time.Time } type defaultClock int var defaultClockInstance defaultClock func (defaultClock) Time() time.Time { return time.Now() } // Information kept per level. Each level consists of a circular list of // observations. The start of the level may be derived from end and the // len(buckets) * sizeInMillis. type tsLevel struct { oldest int // index to oldest bucketed Observable newest int // index to newest bucketed Observable end time.Time // end timestamp for this level size time.Duration // duration of the bucketed Observable buckets []Observable // collections of observations provider func() Observable // used for creating new Observable } func (l *tsLevel) Clear() { l.oldest = 0 l.newest = len(l.buckets) - 1 l.end = time.Time{} for i := range l.buckets { if l.buckets[i] != nil { l.buckets[i].Clear() l.buckets[i] = nil } } } func (l *tsLevel) InitLevel(size time.Duration, numBuckets int, f func() Observable) { l.size = size l.provider = f l.buckets = make([]Observable, numBuckets) } // Keeps a sequence of levels. Each level is responsible for storing data at // a given resolution. For example, the first level stores data at a one // minute resolution while the second level stores data at a one hour // resolution. // Each level is represented by a sequence of buckets. Each bucket spans an // interval equal to the resolution of the level. New observations are added // to the last bucket. type timeSeries struct { provider func() Observable // make more Observable numBuckets int // number of buckets in each level levels []*tsLevel // levels of bucketed Observable lastAdd time.Time // time of last Observable tracked total Observable // convenient aggregation of all Observable clock Clock // Clock for getting current time pending Observable // observations not yet bucketed pendingTime time.Time // what time are we keeping in pending dirty bool // if there are pending observations } // init initializes a level according to the supplied criteria. func (ts *timeSeries) init(resolutions []time.Duration, f func() Observable, numBuckets int, clock Clock) { ts.provider = f ts.numBuckets = numBuckets ts.clock = clock ts.levels = make([]*tsLevel, len(resolutions)) for i := range resolutions { if i > 0 && resolutions[i-1] >= resolutions[i] { log.Print("timeseries: resolutions must be monotonically increasing") break } newLevel := new(tsLevel) newLevel.InitLevel(resolutions[i], ts.numBuckets, ts.provider) ts.levels[i] = newLevel } ts.Clear() } // Clear removes all observations from the time series. func (ts *timeSeries) Clear() { ts.lastAdd = time.Time{} ts.total = ts.resetObservation(ts.total) ts.pending = ts.resetObservation(ts.pending) ts.pendingTime = time.Time{} ts.dirty = false for i := range ts.levels { ts.levels[i].Clear() } } // Add records an observation at the current time. func (ts *timeSeries) Add(observation Observable) { ts.AddWithTime(observation, ts.clock.Time()) } // AddWithTime records an observation at the specified time. func (ts *timeSeries) AddWithTime(observation Observable, t time.Time) { smallBucketDuration := ts.levels[0].size if t.After(ts.lastAdd) { ts.lastAdd = t } if t.After(ts.pendingTime) { ts.advance(t) ts.mergePendingUpdates() ts.pendingTime = ts.levels[0].end ts.pending.CopyFrom(observation) ts.dirty = true } else if t.After(ts.pendingTime.Add(-1 * smallBucketDuration)) { // The observation is close enough to go into the pending bucket. // This compensates for clock skewing and small scheduling delays // by letting the update stay in the fast path. ts.pending.Add(observation) ts.dirty = true } else { ts.mergeValue(observation, t) } } // mergeValue inserts the observation at the specified time in the past into all levels. func (ts *timeSeries) mergeValue(observation Observable, t time.Time) { for _, level := range ts.levels { index := (ts.numBuckets - 1) - int(level.end.Sub(t)/level.size) if 0 <= index && index < ts.numBuckets { bucketNumber := (level.oldest + index) % ts.numBuckets if level.buckets[bucketNumber] == nil { level.buckets[bucketNumber] = level.provider() } level.buckets[bucketNumber].Add(observation) } } ts.total.Add(observation) } // mergePendingUpdates applies the pending updates into all levels. func (ts *timeSeries) mergePendingUpdates() { if ts.dirty { ts.mergeValue(ts.pending, ts.pendingTime) ts.pending = ts.resetObservation(ts.pending) ts.dirty = false } } // advance cycles the buckets at each level until the latest bucket in // each level can hold the time specified. func (ts *timeSeries) advance(t time.Time) { if !t.After(ts.levels[0].end) { return } for i := 0; i < len(ts.levels); i++ { level := ts.levels[i] if !level.end.Before(t) { break } // If the time is sufficiently far, just clear the level and advance // directly. if !t.Before(level.end.Add(level.size * time.Duration(ts.numBuckets))) { for _, b := range level.buckets { ts.resetObservation(b) } level.end = time.Unix(0, (t.UnixNano()/level.size.Nanoseconds())*level.size.Nanoseconds()) } for t.After(level.end) { level.end = level.end.Add(level.size) level.newest = level.oldest level.oldest = (level.oldest + 1) % ts.numBuckets ts.resetObservation(level.buckets[level.newest]) } t = level.end } } // Latest returns the sum of the num latest buckets from the level. func (ts *timeSeries) Latest(level, num int) Observable { now := ts.clock.Time() if ts.levels[0].end.Before(now) { ts.advance(now) } ts.mergePendingUpdates() result := ts.provider() l := ts.levels[level] index := l.newest for i := 0; i < num; i++ { if l.buckets[index] != nil { result.Add(l.buckets[index]) } if index == 0 { index = ts.numBuckets } index-- } return result } // LatestBuckets returns a copy of the num latest buckets from level. func (ts *timeSeries) LatestBuckets(level, num int) []Observable { if level < 0 || level > len(ts.levels) { log.Print("timeseries: bad level argument: ", level) return nil } if num < 0 || num >= ts.numBuckets { log.Print("timeseries: bad num argument: ", num) return nil } results := make([]Observable, num) now := ts.clock.Time() if ts.levels[0].end.Before(now) { ts.advance(now) } ts.mergePendingUpdates() l := ts.levels[level] index := l.newest for i := 0; i < num; i++ { result := ts.provider() results[i] = result if l.buckets[index] != nil { result.CopyFrom(l.buckets[index]) } if index == 0 { index = ts.numBuckets } index -= 1 } return results } // ScaleBy updates observations by scaling by factor. func (ts *timeSeries) ScaleBy(factor float64) { for _, l := range ts.levels { for i := 0; i < ts.numBuckets; i++ { l.buckets[i].Multiply(factor) } } ts.total.Multiply(factor) ts.pending.Multiply(factor) } // Range returns the sum of observations added over the specified time range. // If start or finish times don't fall on bucket boundaries of the same // level, then return values are approximate answers. func (ts *timeSeries) Range(start, finish time.Time) Observable { return ts.ComputeRange(start, finish, 1)[0] } // Recent returns the sum of observations from the last delta. func (ts *timeSeries) Recent(delta time.Duration) Observable { now := ts.clock.Time() return ts.Range(now.Add(-delta), now) } // Total returns the total of all observations. func (ts *timeSeries) Total() Observable { ts.mergePendingUpdates() return ts.total } // ComputeRange computes a specified number of values into a slice using // the observations recorded over the specified time period. The return // values are approximate if the start or finish times don't fall on the // bucket boundaries at the same level or if the number of buckets spanning // the range is not an integral multiple of num. func (ts *timeSeries) ComputeRange(start, finish time.Time, num int) []Observable { if start.After(finish) { log.Printf("timeseries: start > finish, %v>%v", start, finish) return nil } if num < 0 { log.Printf("timeseries: num < 0, %v", num) return nil } results := make([]Observable, num) for _, l := range ts.levels { if !start.Before(l.end.Add(-l.size * time.Duration(ts.numBuckets))) { ts.extract(l, start, finish, num, results) return results } } // Failed to find a level that covers the desired range. So just // extract from the last level, even if it doesn't cover the entire // desired range. ts.extract(ts.levels[len(ts.levels)-1], start, finish, num, results) return results } // RecentList returns the specified number of values in slice over the most // recent time period of the specified range. func (ts *timeSeries) RecentList(delta time.Duration, num int) []Observable { if delta < 0 { return nil } now := ts.clock.Time() return ts.ComputeRange(now.Add(-delta), now, num) } // extract returns a slice of specified number of observations from a given // level over a given range. func (ts *timeSeries) extract(l *tsLevel, start, finish time.Time, num int, results []Observable) { ts.mergePendingUpdates() srcInterval := l.size dstInterval := finish.Sub(start) / time.Duration(num) dstStart := start srcStart := l.end.Add(-srcInterval * time.Duration(ts.numBuckets)) srcIndex := 0 // Where should scanning start? if dstStart.After(srcStart) { advance := dstStart.Sub(srcStart) / srcInterval srcIndex += int(advance) srcStart = srcStart.Add(advance * srcInterval) } // The i'th value is computed as show below. // interval = (finish/start)/num // i'th value = sum of observation in range // [ start + i * interval, // start + (i + 1) * interval ) for i := 0; i < num; i++ { results[i] = ts.resetObservation(results[i]) dstEnd := dstStart.Add(dstInterval) for srcIndex < ts.numBuckets && srcStart.Before(dstEnd) { srcEnd := srcStart.Add(srcInterval) if srcEnd.After(ts.lastAdd) { srcEnd = ts.lastAdd } if !srcEnd.Before(dstStart) { srcValue := l.buckets[(srcIndex+l.oldest)%ts.numBuckets] if !srcStart.Before(dstStart) && !srcEnd.After(dstEnd) { // dst completely contains src. if srcValue != nil { results[i].Add(srcValue) } } else { // dst partially overlaps src. overlapStart := maxTime(srcStart, dstStart) overlapEnd := minTime(srcEnd, dstEnd) base := srcEnd.Sub(srcStart) fraction := overlapEnd.Sub(overlapStart).Seconds() / base.Seconds() used := ts.provider() if srcValue != nil { used.CopyFrom(srcValue) } used.Multiply(fraction) results[i].Add(used) } if srcEnd.After(dstEnd) { break } } srcIndex++ srcStart = srcStart.Add(srcInterval) } dstStart = dstStart.Add(dstInterval) } } // resetObservation clears the content so the struct may be reused. func (ts *timeSeries) resetObservation(observation Observable) Observable { if observation == nil { observation = ts.provider() } else { observation.Clear() } return observation } // TimeSeries tracks data at granularities from 1 second to 16 weeks. type TimeSeries struct { timeSeries } // NewTimeSeries creates a new TimeSeries using the function provided for creating new Observable. func NewTimeSeries(f func() Observable) *TimeSeries { return NewTimeSeriesWithClock(f, defaultClockInstance) } // NewTimeSeriesWithClock creates a new TimeSeries using the function provided for creating new Observable and the clock for // assigning timestamps. func NewTimeSeriesWithClock(f func() Observable, clock Clock) *TimeSeries { ts := new(TimeSeries) ts.timeSeries.init(timeSeriesResolutions, f, timeSeriesNumBuckets, clock) return ts } // MinuteHourSeries tracks data at granularities of 1 minute and 1 hour. type MinuteHourSeries struct { timeSeries } // NewMinuteHourSeries creates a new MinuteHourSeries using the function provided for creating new Observable. func NewMinuteHourSeries(f func() Observable) *MinuteHourSeries { return NewMinuteHourSeriesWithClock(f, defaultClockInstance) } // NewMinuteHourSeriesWithClock creates a new MinuteHourSeries using the function provided for creating new Observable and the clock for // assigning timestamps. func NewMinuteHourSeriesWithClock(f func() Observable, clock Clock) *MinuteHourSeries { ts := new(MinuteHourSeries) ts.timeSeries.init(minuteHourSeriesResolutions, f, minuteHourSeriesNumBuckets, clock) return ts } func (ts *MinuteHourSeries) Minute() Observable { return ts.timeSeries.Latest(0, 60) } func (ts *MinuteHourSeries) Hour() Observable { return ts.timeSeries.Latest(1, 60) } func minTime(a, b time.Time) time.Time { if a.Before(b) { return a } return b } func maxTime(a, b time.Time) time.Time { if a.After(b) { return a } return b } gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/golang.org/x/net/lex/000077500000000000000000000000001324746544700276035ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/golang.org/x/net/lex/httplex/000077500000000000000000000000001324746544700312735ustar00rootroot00000000000000httplex.go000066400000000000000000000221171324746544700332360ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/golang.org/x/net/lex/httplex// Copyright 2016 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package httplex contains rules around lexical matters of various // HTTP-related specifications. // // This package is shared by the standard library (which vendors it) // and x/net/http2. It comes with no API stability promise. package httplex import ( "net" "strings" "unicode/utf8" "golang.org/x/net/idna" ) var isTokenTable = [127]bool{ '!': true, '#': true, '$': true, '%': true, '&': true, '\'': true, '*': true, '+': true, '-': true, '.': true, '0': true, '1': true, '2': true, '3': true, '4': true, '5': true, '6': true, '7': true, '8': true, '9': true, 'A': true, 'B': true, 'C': true, 'D': true, 'E': true, 'F': true, 'G': true, 'H': true, 'I': true, 'J': true, 'K': true, 'L': true, 'M': true, 'N': true, 'O': true, 'P': true, 'Q': true, 'R': true, 'S': true, 'T': true, 'U': true, 'W': true, 'V': true, 'X': true, 'Y': true, 'Z': true, '^': true, '_': true, '`': true, 'a': true, 'b': true, 'c': true, 'd': true, 'e': true, 'f': true, 'g': true, 'h': true, 'i': true, 'j': true, 'k': true, 'l': true, 'm': true, 'n': true, 'o': true, 'p': true, 'q': true, 'r': true, 's': true, 't': true, 'u': true, 'v': true, 'w': true, 'x': true, 'y': true, 'z': true, '|': true, '~': true, } func IsTokenRune(r rune) bool { i := int(r) return i < len(isTokenTable) && isTokenTable[i] } func isNotToken(r rune) bool { return !IsTokenRune(r) } // HeaderValuesContainsToken reports whether any string in values // contains the provided token, ASCII case-insensitively. func HeaderValuesContainsToken(values []string, token string) bool { for _, v := range values { if headerValueContainsToken(v, token) { return true } } return false } // isOWS reports whether b is an optional whitespace byte, as defined // by RFC 7230 section 3.2.3. func isOWS(b byte) bool { return b == ' ' || b == '\t' } // trimOWS returns x with all optional whitespace removes from the // beginning and end. func trimOWS(x string) string { // TODO: consider using strings.Trim(x, " \t") instead, // if and when it's fast enough. See issue 10292. // But this ASCII-only code will probably always beat UTF-8 // aware code. for len(x) > 0 && isOWS(x[0]) { x = x[1:] } for len(x) > 0 && isOWS(x[len(x)-1]) { x = x[:len(x)-1] } return x } // headerValueContainsToken reports whether v (assumed to be a // 0#element, in the ABNF extension described in RFC 7230 section 7) // contains token amongst its comma-separated tokens, ASCII // case-insensitively. func headerValueContainsToken(v string, token string) bool { v = trimOWS(v) if comma := strings.IndexByte(v, ','); comma != -1 { return tokenEqual(trimOWS(v[:comma]), token) || headerValueContainsToken(v[comma+1:], token) } return tokenEqual(v, token) } // lowerASCII returns the ASCII lowercase version of b. func lowerASCII(b byte) byte { if 'A' <= b && b <= 'Z' { return b + ('a' - 'A') } return b } // tokenEqual reports whether t1 and t2 are equal, ASCII case-insensitively. func tokenEqual(t1, t2 string) bool { if len(t1) != len(t2) { return false } for i, b := range t1 { if b >= utf8.RuneSelf { // No UTF-8 or non-ASCII allowed in tokens. return false } if lowerASCII(byte(b)) != lowerASCII(t2[i]) { return false } } return true } // isLWS reports whether b is linear white space, according // to http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2 // LWS = [CRLF] 1*( SP | HT ) func isLWS(b byte) bool { return b == ' ' || b == '\t' } // isCTL reports whether b is a control byte, according // to http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2 // CTL = func isCTL(b byte) bool { const del = 0x7f // a CTL return b < ' ' || b == del } // ValidHeaderFieldName reports whether v is a valid HTTP/1.x header name. // HTTP/2 imposes the additional restriction that uppercase ASCII // letters are not allowed. // // RFC 7230 says: // header-field = field-name ":" OWS field-value OWS // field-name = token // token = 1*tchar // tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*" / "+" / "-" / "." / // "^" / "_" / "`" / "|" / "~" / DIGIT / ALPHA func ValidHeaderFieldName(v string) bool { if len(v) == 0 { return false } for _, r := range v { if !IsTokenRune(r) { return false } } return true } // ValidHostHeader reports whether h is a valid host header. func ValidHostHeader(h string) bool { // The latest spec is actually this: // // http://tools.ietf.org/html/rfc7230#section-5.4 // Host = uri-host [ ":" port ] // // Where uri-host is: // http://tools.ietf.org/html/rfc3986#section-3.2.2 // // But we're going to be much more lenient for now and just // search for any byte that's not a valid byte in any of those // expressions. for i := 0; i < len(h); i++ { if !validHostByte[h[i]] { return false } } return true } // See the validHostHeader comment. var validHostByte = [256]bool{ '0': true, '1': true, '2': true, '3': true, '4': true, '5': true, '6': true, '7': true, '8': true, '9': true, 'a': true, 'b': true, 'c': true, 'd': true, 'e': true, 'f': true, 'g': true, 'h': true, 'i': true, 'j': true, 'k': true, 'l': true, 'm': true, 'n': true, 'o': true, 'p': true, 'q': true, 'r': true, 's': true, 't': true, 'u': true, 'v': true, 'w': true, 'x': true, 'y': true, 'z': true, 'A': true, 'B': true, 'C': true, 'D': true, 'E': true, 'F': true, 'G': true, 'H': true, 'I': true, 'J': true, 'K': true, 'L': true, 'M': true, 'N': true, 'O': true, 'P': true, 'Q': true, 'R': true, 'S': true, 'T': true, 'U': true, 'V': true, 'W': true, 'X': true, 'Y': true, 'Z': true, '!': true, // sub-delims '$': true, // sub-delims '%': true, // pct-encoded (and used in IPv6 zones) '&': true, // sub-delims '(': true, // sub-delims ')': true, // sub-delims '*': true, // sub-delims '+': true, // sub-delims ',': true, // sub-delims '-': true, // unreserved '.': true, // unreserved ':': true, // IPv6address + Host expression's optional port ';': true, // sub-delims '=': true, // sub-delims '[': true, '\'': true, // sub-delims ']': true, '_': true, // unreserved '~': true, // unreserved } // ValidHeaderFieldValue reports whether v is a valid "field-value" according to // http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2 : // // message-header = field-name ":" [ field-value ] // field-value = *( field-content | LWS ) // field-content = // // http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2 : // // TEXT = // LWS = [CRLF] 1*( SP | HT ) // CTL = // // RFC 7230 says: // field-value = *( field-content / obs-fold ) // obj-fold = N/A to http2, and deprecated // field-content = field-vchar [ 1*( SP / HTAB ) field-vchar ] // field-vchar = VCHAR / obs-text // obs-text = %x80-FF // VCHAR = "any visible [USASCII] character" // // http2 further says: "Similarly, HTTP/2 allows header field values // that are not valid. While most of the values that can be encoded // will not alter header field parsing, carriage return (CR, ASCII // 0xd), line feed (LF, ASCII 0xa), and the zero character (NUL, ASCII // 0x0) might be exploited by an attacker if they are translated // verbatim. Any request or response that contains a character not // permitted in a header field value MUST be treated as malformed // (Section 8.1.2.6). Valid characters are defined by the // field-content ABNF rule in Section 3.2 of [RFC7230]." // // This function does not (yet?) properly handle the rejection of // strings that begin or end with SP or HTAB. func ValidHeaderFieldValue(v string) bool { for i := 0; i < len(v); i++ { b := v[i] if isCTL(b) && !isLWS(b) { return false } } return true } func isASCII(s string) bool { for i := 0; i < len(s); i++ { if s[i] >= utf8.RuneSelf { return false } } return true } // PunycodeHostPort returns the IDNA Punycode version // of the provided "host" or "host:port" string. func PunycodeHostPort(v string) (string, error) { if isASCII(v) { return v, nil } host, port, err := net.SplitHostPort(v) if err != nil { // The input 'v' argument was just a "host" argument, // without a port. This error should not be returned // to the caller. host = v port = "" } host, err = idna.ToASCII(host) if err != nil { // Non-UTF-8? Not representable in Punycode, in any // case. return "", err } if port == "" { return host, nil } return net.JoinHostPort(host, port), nil } gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/golang.org/x/net/trace/000077500000000000000000000000001324746544700301115ustar00rootroot00000000000000events.go000066400000000000000000000304701324746544700316710ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/golang.org/x/net/trace// Copyright 2015 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package trace import ( "bytes" "fmt" "html/template" "io" "log" "net/http" "runtime" "sort" "strconv" "strings" "sync" "sync/atomic" "text/tabwriter" "time" ) const maxEventsPerLog = 100 type bucket struct { MaxErrAge time.Duration String string } var buckets = []bucket{ {0, "total"}, {10 * time.Second, "errs<10s"}, {1 * time.Minute, "errs<1m"}, {10 * time.Minute, "errs<10m"}, {1 * time.Hour, "errs<1h"}, {10 * time.Hour, "errs<10h"}, {24000 * time.Hour, "errors"}, } // RenderEvents renders the HTML page typically served at /debug/events. // It does not do any auth checking; see AuthRequest for the default auth check // used by the handler registered on http.DefaultServeMux. // req may be nil. func RenderEvents(w http.ResponseWriter, req *http.Request, sensitive bool) { now := time.Now() data := &struct { Families []string // family names Buckets []bucket Counts [][]int // eventLog count per family/bucket // Set when a bucket has been selected. Family string Bucket int EventLogs eventLogs Expanded bool }{ Buckets: buckets, } data.Families = make([]string, 0, len(families)) famMu.RLock() for name := range families { data.Families = append(data.Families, name) } famMu.RUnlock() sort.Strings(data.Families) // Count the number of eventLogs in each family for each error age. data.Counts = make([][]int, len(data.Families)) for i, name := range data.Families { // TODO(sameer): move this loop under the family lock. f := getEventFamily(name) data.Counts[i] = make([]int, len(data.Buckets)) for j, b := range data.Buckets { data.Counts[i][j] = f.Count(now, b.MaxErrAge) } } if req != nil { var ok bool data.Family, data.Bucket, ok = parseEventsArgs(req) if !ok { // No-op } else { data.EventLogs = getEventFamily(data.Family).Copy(now, buckets[data.Bucket].MaxErrAge) } if data.EventLogs != nil { defer data.EventLogs.Free() sort.Sort(data.EventLogs) } if exp, err := strconv.ParseBool(req.FormValue("exp")); err == nil { data.Expanded = exp } } famMu.RLock() defer famMu.RUnlock() if err := eventsTmpl().Execute(w, data); err != nil { log.Printf("net/trace: Failed executing template: %v", err) } } func parseEventsArgs(req *http.Request) (fam string, b int, ok bool) { fam, bStr := req.FormValue("fam"), req.FormValue("b") if fam == "" || bStr == "" { return "", 0, false } b, err := strconv.Atoi(bStr) if err != nil || b < 0 || b >= len(buckets) { return "", 0, false } return fam, b, true } // An EventLog provides a log of events associated with a specific object. type EventLog interface { // Printf formats its arguments with fmt.Sprintf and adds the // result to the event log. Printf(format string, a ...interface{}) // Errorf is like Printf, but it marks this event as an error. Errorf(format string, a ...interface{}) // Finish declares that this event log is complete. // The event log should not be used after calling this method. Finish() } // NewEventLog returns a new EventLog with the specified family name // and title. func NewEventLog(family, title string) EventLog { el := newEventLog() el.ref() el.Family, el.Title = family, title el.Start = time.Now() el.events = make([]logEntry, 0, maxEventsPerLog) el.stack = make([]uintptr, 32) n := runtime.Callers(2, el.stack) el.stack = el.stack[:n] getEventFamily(family).add(el) return el } func (el *eventLog) Finish() { getEventFamily(el.Family).remove(el) el.unref() // matches ref in New } var ( famMu sync.RWMutex families = make(map[string]*eventFamily) // family name => family ) func getEventFamily(fam string) *eventFamily { famMu.Lock() defer famMu.Unlock() f := families[fam] if f == nil { f = &eventFamily{} families[fam] = f } return f } type eventFamily struct { mu sync.RWMutex eventLogs eventLogs } func (f *eventFamily) add(el *eventLog) { f.mu.Lock() f.eventLogs = append(f.eventLogs, el) f.mu.Unlock() } func (f *eventFamily) remove(el *eventLog) { f.mu.Lock() defer f.mu.Unlock() for i, el0 := range f.eventLogs { if el == el0 { copy(f.eventLogs[i:], f.eventLogs[i+1:]) f.eventLogs = f.eventLogs[:len(f.eventLogs)-1] return } } } func (f *eventFamily) Count(now time.Time, maxErrAge time.Duration) (n int) { f.mu.RLock() defer f.mu.RUnlock() for _, el := range f.eventLogs { if el.hasRecentError(now, maxErrAge) { n++ } } return } func (f *eventFamily) Copy(now time.Time, maxErrAge time.Duration) (els eventLogs) { f.mu.RLock() defer f.mu.RUnlock() els = make(eventLogs, 0, len(f.eventLogs)) for _, el := range f.eventLogs { if el.hasRecentError(now, maxErrAge) { el.ref() els = append(els, el) } } return } type eventLogs []*eventLog // Free calls unref on each element of the list. func (els eventLogs) Free() { for _, el := range els { el.unref() } } // eventLogs may be sorted in reverse chronological order. func (els eventLogs) Len() int { return len(els) } func (els eventLogs) Less(i, j int) bool { return els[i].Start.After(els[j].Start) } func (els eventLogs) Swap(i, j int) { els[i], els[j] = els[j], els[i] } // A logEntry is a timestamped log entry in an event log. type logEntry struct { When time.Time Elapsed time.Duration // since previous event in log NewDay bool // whether this event is on a different day to the previous event What string IsErr bool } // WhenString returns a string representation of the elapsed time of the event. // It will include the date if midnight was crossed. func (e logEntry) WhenString() string { if e.NewDay { return e.When.Format("2006/01/02 15:04:05.000000") } return e.When.Format("15:04:05.000000") } // An eventLog represents an active event log. type eventLog struct { // Family is the top-level grouping of event logs to which this belongs. Family string // Title is the title of this event log. Title string // Timing information. Start time.Time // Call stack where this event log was created. stack []uintptr // Append-only sequence of events. // // TODO(sameer): change this to a ring buffer to avoid the array copy // when we hit maxEventsPerLog. mu sync.RWMutex events []logEntry LastErrorTime time.Time discarded int refs int32 // how many buckets this is in } func (el *eventLog) reset() { // Clear all but the mutex. Mutexes may not be copied, even when unlocked. el.Family = "" el.Title = "" el.Start = time.Time{} el.stack = nil el.events = nil el.LastErrorTime = time.Time{} el.discarded = 0 el.refs = 0 } func (el *eventLog) hasRecentError(now time.Time, maxErrAge time.Duration) bool { if maxErrAge == 0 { return true } el.mu.RLock() defer el.mu.RUnlock() return now.Sub(el.LastErrorTime) < maxErrAge } // delta returns the elapsed time since the last event or the log start, // and whether it spans midnight. // L >= el.mu func (el *eventLog) delta(t time.Time) (time.Duration, bool) { if len(el.events) == 0 { return t.Sub(el.Start), false } prev := el.events[len(el.events)-1].When return t.Sub(prev), prev.Day() != t.Day() } func (el *eventLog) Printf(format string, a ...interface{}) { el.printf(false, format, a...) } func (el *eventLog) Errorf(format string, a ...interface{}) { el.printf(true, format, a...) } func (el *eventLog) printf(isErr bool, format string, a ...interface{}) { e := logEntry{When: time.Now(), IsErr: isErr, What: fmt.Sprintf(format, a...)} el.mu.Lock() e.Elapsed, e.NewDay = el.delta(e.When) if len(el.events) < maxEventsPerLog { el.events = append(el.events, e) } else { // Discard the oldest event. if el.discarded == 0 { // el.discarded starts at two to count for the event it // is replacing, plus the next one that we are about to // drop. el.discarded = 2 } else { el.discarded++ } // TODO(sameer): if this causes allocations on a critical path, // change eventLog.What to be a fmt.Stringer, as in trace.go. el.events[0].What = fmt.Sprintf("(%d events discarded)", el.discarded) // The timestamp of the discarded meta-event should be // the time of the last event it is representing. el.events[0].When = el.events[1].When copy(el.events[1:], el.events[2:]) el.events[maxEventsPerLog-1] = e } if e.IsErr { el.LastErrorTime = e.When } el.mu.Unlock() } func (el *eventLog) ref() { atomic.AddInt32(&el.refs, 1) } func (el *eventLog) unref() { if atomic.AddInt32(&el.refs, -1) == 0 { freeEventLog(el) } } func (el *eventLog) When() string { return el.Start.Format("2006/01/02 15:04:05.000000") } func (el *eventLog) ElapsedTime() string { elapsed := time.Since(el.Start) return fmt.Sprintf("%.6f", elapsed.Seconds()) } func (el *eventLog) Stack() string { buf := new(bytes.Buffer) tw := tabwriter.NewWriter(buf, 1, 8, 1, '\t', 0) printStackRecord(tw, el.stack) tw.Flush() return buf.String() } // printStackRecord prints the function + source line information // for a single stack trace. // Adapted from runtime/pprof/pprof.go. func printStackRecord(w io.Writer, stk []uintptr) { for _, pc := range stk { f := runtime.FuncForPC(pc) if f == nil { continue } file, line := f.FileLine(pc) name := f.Name() // Hide runtime.goexit and any runtime functions at the beginning. if strings.HasPrefix(name, "runtime.") { continue } fmt.Fprintf(w, "# %s\t%s:%d\n", name, file, line) } } func (el *eventLog) Events() []logEntry { el.mu.RLock() defer el.mu.RUnlock() return el.events } // freeEventLogs is a freelist of *eventLog var freeEventLogs = make(chan *eventLog, 1000) // newEventLog returns a event log ready to use. func newEventLog() *eventLog { select { case el := <-freeEventLogs: return el default: return new(eventLog) } } // freeEventLog adds el to freeEventLogs if there's room. // This is non-blocking. func freeEventLog(el *eventLog) { el.reset() select { case freeEventLogs <- el: default: } } var eventsTmplCache *template.Template var eventsTmplOnce sync.Once func eventsTmpl() *template.Template { eventsTmplOnce.Do(func() { eventsTmplCache = template.Must(template.New("events").Funcs(template.FuncMap{ "elapsed": elapsed, "trimSpace": strings.TrimSpace, }).Parse(eventsHTML)) }) return eventsTmplCache } const eventsHTML = ` events

/debug/events

{{range $i, $fam := .Families}} {{range $j, $bucket := $.Buckets}} {{$n := index $.Counts $i $j}} {{end}} {{end}}
{{$fam}} {{if $n}}{{end}} [{{$n}} {{$bucket.String}}] {{if $n}}{{end}}
{{if $.EventLogs}}

Family: {{$.Family}}

{{if $.Expanded}}{{end}} [Summary]{{if $.Expanded}}{{end}} {{if not $.Expanded}}{{end}} [Expanded]{{if not $.Expanded}}{{end}} {{range $el := $.EventLogs}} {{if $.Expanded}} {{range $el.Events}} {{end}} {{end}} {{end}}
WhenElapsed
{{$el.When}} {{$el.ElapsedTime}} {{$el.Title}}
{{$el.Stack|trimSpace}}
{{.WhenString}} {{elapsed .Elapsed}} .{{if .IsErr}}E{{else}}.{{end}}. {{.What}}
{{end}} ` histogram.go000066400000000000000000000223211324746544700323560ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/golang.org/x/net/trace// Copyright 2015 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package trace // This file implements histogramming for RPC statistics collection. import ( "bytes" "fmt" "html/template" "log" "math" "sync" "golang.org/x/net/internal/timeseries" ) const ( bucketCount = 38 ) // histogram keeps counts of values in buckets that are spaced // out in powers of 2: 0-1, 2-3, 4-7... // histogram implements timeseries.Observable type histogram struct { sum int64 // running total of measurements sumOfSquares float64 // square of running total buckets []int64 // bucketed values for histogram value int // holds a single value as an optimization valueCount int64 // number of values recorded for single value } // AddMeasurement records a value measurement observation to the histogram. func (h *histogram) addMeasurement(value int64) { // TODO: assert invariant h.sum += value h.sumOfSquares += float64(value) * float64(value) bucketIndex := getBucket(value) if h.valueCount == 0 || (h.valueCount > 0 && h.value == bucketIndex) { h.value = bucketIndex h.valueCount++ } else { h.allocateBuckets() h.buckets[bucketIndex]++ } } func (h *histogram) allocateBuckets() { if h.buckets == nil { h.buckets = make([]int64, bucketCount) h.buckets[h.value] = h.valueCount h.value = 0 h.valueCount = -1 } } func log2(i int64) int { n := 0 for ; i >= 0x100; i >>= 8 { n += 8 } for ; i > 0; i >>= 1 { n += 1 } return n } func getBucket(i int64) (index int) { index = log2(i) - 1 if index < 0 { index = 0 } if index >= bucketCount { index = bucketCount - 1 } return } // Total returns the number of recorded observations. func (h *histogram) total() (total int64) { if h.valueCount >= 0 { total = h.valueCount } for _, val := range h.buckets { total += int64(val) } return } // Average returns the average value of recorded observations. func (h *histogram) average() float64 { t := h.total() if t == 0 { return 0 } return float64(h.sum) / float64(t) } // Variance returns the variance of recorded observations. func (h *histogram) variance() float64 { t := float64(h.total()) if t == 0 { return 0 } s := float64(h.sum) / t return h.sumOfSquares/t - s*s } // StandardDeviation returns the standard deviation of recorded observations. func (h *histogram) standardDeviation() float64 { return math.Sqrt(h.variance()) } // PercentileBoundary estimates the value that the given fraction of recorded // observations are less than. func (h *histogram) percentileBoundary(percentile float64) int64 { total := h.total() // Corner cases (make sure result is strictly less than Total()) if total == 0 { return 0 } else if total == 1 { return int64(h.average()) } percentOfTotal := round(float64(total) * percentile) var runningTotal int64 for i := range h.buckets { value := h.buckets[i] runningTotal += value if runningTotal == percentOfTotal { // We hit an exact bucket boundary. If the next bucket has data, it is a // good estimate of the value. If the bucket is empty, we interpolate the // midpoint between the next bucket's boundary and the next non-zero // bucket. If the remaining buckets are all empty, then we use the // boundary for the next bucket as the estimate. j := uint8(i + 1) min := bucketBoundary(j) if runningTotal < total { for h.buckets[j] == 0 { j++ } } max := bucketBoundary(j) return min + round(float64(max-min)/2) } else if runningTotal > percentOfTotal { // The value is in this bucket. Interpolate the value. delta := runningTotal - percentOfTotal percentBucket := float64(value-delta) / float64(value) bucketMin := bucketBoundary(uint8(i)) nextBucketMin := bucketBoundary(uint8(i + 1)) bucketSize := nextBucketMin - bucketMin return bucketMin + round(percentBucket*float64(bucketSize)) } } return bucketBoundary(bucketCount - 1) } // Median returns the estimated median of the observed values. func (h *histogram) median() int64 { return h.percentileBoundary(0.5) } // Add adds other to h. func (h *histogram) Add(other timeseries.Observable) { o := other.(*histogram) if o.valueCount == 0 { // Other histogram is empty } else if h.valueCount >= 0 && o.valueCount > 0 && h.value == o.value { // Both have a single bucketed value, aggregate them h.valueCount += o.valueCount } else { // Two different values necessitate buckets in this histogram h.allocateBuckets() if o.valueCount >= 0 { h.buckets[o.value] += o.valueCount } else { for i := range h.buckets { h.buckets[i] += o.buckets[i] } } } h.sumOfSquares += o.sumOfSquares h.sum += o.sum } // Clear resets the histogram to an empty state, removing all observed values. func (h *histogram) Clear() { h.buckets = nil h.value = 0 h.valueCount = 0 h.sum = 0 h.sumOfSquares = 0 } // CopyFrom copies from other, which must be a *histogram, into h. func (h *histogram) CopyFrom(other timeseries.Observable) { o := other.(*histogram) if o.valueCount == -1 { h.allocateBuckets() copy(h.buckets, o.buckets) } h.sum = o.sum h.sumOfSquares = o.sumOfSquares h.value = o.value h.valueCount = o.valueCount } // Multiply scales the histogram by the specified ratio. func (h *histogram) Multiply(ratio float64) { if h.valueCount == -1 { for i := range h.buckets { h.buckets[i] = int64(float64(h.buckets[i]) * ratio) } } else { h.valueCount = int64(float64(h.valueCount) * ratio) } h.sum = int64(float64(h.sum) * ratio) h.sumOfSquares = h.sumOfSquares * ratio } // New creates a new histogram. func (h *histogram) New() timeseries.Observable { r := new(histogram) r.Clear() return r } func (h *histogram) String() string { return fmt.Sprintf("%d, %f, %d, %d, %v", h.sum, h.sumOfSquares, h.value, h.valueCount, h.buckets) } // round returns the closest int64 to the argument func round(in float64) int64 { return int64(math.Floor(in + 0.5)) } // bucketBoundary returns the first value in the bucket. func bucketBoundary(bucket uint8) int64 { if bucket == 0 { return 0 } return 1 << bucket } // bucketData holds data about a specific bucket for use in distTmpl. type bucketData struct { Lower, Upper int64 N int64 Pct, CumulativePct float64 GraphWidth int } // data holds data about a Distribution for use in distTmpl. type data struct { Buckets []*bucketData Count, Median int64 Mean, StandardDeviation float64 } // maxHTMLBarWidth is the maximum width of the HTML bar for visualizing buckets. const maxHTMLBarWidth = 350.0 // newData returns data representing h for use in distTmpl. func (h *histogram) newData() *data { // Force the allocation of buckets to simplify the rendering implementation h.allocateBuckets() // We scale the bars on the right so that the largest bar is // maxHTMLBarWidth pixels in width. maxBucket := int64(0) for _, n := range h.buckets { if n > maxBucket { maxBucket = n } } total := h.total() barsizeMult := maxHTMLBarWidth / float64(maxBucket) var pctMult float64 if total == 0 { pctMult = 1.0 } else { pctMult = 100.0 / float64(total) } buckets := make([]*bucketData, len(h.buckets)) runningTotal := int64(0) for i, n := range h.buckets { if n == 0 { continue } runningTotal += n var upperBound int64 if i < bucketCount-1 { upperBound = bucketBoundary(uint8(i + 1)) } else { upperBound = math.MaxInt64 } buckets[i] = &bucketData{ Lower: bucketBoundary(uint8(i)), Upper: upperBound, N: n, Pct: float64(n) * pctMult, CumulativePct: float64(runningTotal) * pctMult, GraphWidth: int(float64(n) * barsizeMult), } } return &data{ Buckets: buckets, Count: total, Median: h.median(), Mean: h.average(), StandardDeviation: h.standardDeviation(), } } func (h *histogram) html() template.HTML { buf := new(bytes.Buffer) if err := distTmpl().Execute(buf, h.newData()); err != nil { buf.Reset() log.Printf("net/trace: couldn't execute template: %v", err) } return template.HTML(buf.String()) } var distTmplCache *template.Template var distTmplOnce sync.Once func distTmpl() *template.Template { distTmplOnce.Do(func() { // Input: data distTmplCache = template.Must(template.New("distTmpl").Parse(`
Count: {{.Count}} Mean: {{printf "%.0f" .Mean}} StdDev: {{printf "%.0f" .StandardDeviation}} Median: {{.Median}}

{{range $b := .Buckets}} {{if $b}} {{end}} {{end}}
[ {{.Lower}}, {{.Upper}}) {{.N}} {{printf "%#.3f" .Pct}}% {{printf "%#.3f" .CumulativePct}}%
`)) }) return distTmplCache } trace.go000066400000000000000000000654321324746544700314710ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/golang.org/x/net/trace// Copyright 2015 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. /* Package trace implements tracing of requests and long-lived objects. It exports HTTP interfaces on /debug/requests and /debug/events. A trace.Trace provides tracing for short-lived objects, usually requests. A request handler might be implemented like this: func fooHandler(w http.ResponseWriter, req *http.Request) { tr := trace.New("mypkg.Foo", req.URL.Path) defer tr.Finish() ... tr.LazyPrintf("some event %q happened", str) ... if err := somethingImportant(); err != nil { tr.LazyPrintf("somethingImportant failed: %v", err) tr.SetError() } } The /debug/requests HTTP endpoint organizes the traces by family, errors, and duration. It also provides histogram of request duration for each family. A trace.EventLog provides tracing for long-lived objects, such as RPC connections. // A Fetcher fetches URL paths for a single domain. type Fetcher struct { domain string events trace.EventLog } func NewFetcher(domain string) *Fetcher { return &Fetcher{ domain, trace.NewEventLog("mypkg.Fetcher", domain), } } func (f *Fetcher) Fetch(path string) (string, error) { resp, err := http.Get("http://" + f.domain + "/" + path) if err != nil { f.events.Errorf("Get(%q) = %v", path, err) return "", err } f.events.Printf("Get(%q) = %s", path, resp.Status) ... } func (f *Fetcher) Close() error { f.events.Finish() return nil } The /debug/events HTTP endpoint organizes the event logs by family and by time since the last error. The expanded view displays recent log entries and the log's call stack. */ package trace // import "golang.org/x/net/trace" import ( "bytes" "fmt" "html/template" "io" "log" "net" "net/http" "runtime" "sort" "strconv" "sync" "sync/atomic" "time" "golang.org/x/net/context" "golang.org/x/net/internal/timeseries" ) // DebugUseAfterFinish controls whether to debug uses of Trace values after finishing. // FOR DEBUGGING ONLY. This will slow down the program. var DebugUseAfterFinish = false // AuthRequest determines whether a specific request is permitted to load the // /debug/requests or /debug/events pages. // // It returns two bools; the first indicates whether the page may be viewed at all, // and the second indicates whether sensitive events will be shown. // // AuthRequest may be replaced by a program to customize its authorization requirements. // // The default AuthRequest function returns (true, true) if and only if the request // comes from localhost/127.0.0.1/[::1]. var AuthRequest = func(req *http.Request) (any, sensitive bool) { // RemoteAddr is commonly in the form "IP" or "IP:port". // If it is in the form "IP:port", split off the port. host, _, err := net.SplitHostPort(req.RemoteAddr) if err != nil { host = req.RemoteAddr } switch host { case "localhost", "127.0.0.1", "::1": return true, true default: return false, false } } func init() { http.HandleFunc("/debug/requests", func(w http.ResponseWriter, req *http.Request) { any, sensitive := AuthRequest(req) if !any { http.Error(w, "not allowed", http.StatusUnauthorized) return } w.Header().Set("Content-Type", "text/html; charset=utf-8") Render(w, req, sensitive) }) http.HandleFunc("/debug/events", func(w http.ResponseWriter, req *http.Request) { any, sensitive := AuthRequest(req) if !any { http.Error(w, "not allowed", http.StatusUnauthorized) return } w.Header().Set("Content-Type", "text/html; charset=utf-8") RenderEvents(w, req, sensitive) }) } // Render renders the HTML page typically served at /debug/requests. // It does not do any auth checking; see AuthRequest for the default auth check // used by the handler registered on http.DefaultServeMux. // req may be nil. func Render(w io.Writer, req *http.Request, sensitive bool) { data := &struct { Families []string ActiveTraceCount map[string]int CompletedTraces map[string]*family // Set when a bucket has been selected. Traces traceList Family string Bucket int Expanded bool Traced bool Active bool ShowSensitive bool // whether to show sensitive events Histogram template.HTML HistogramWindow string // e.g. "last minute", "last hour", "all time" // If non-zero, the set of traces is a partial set, // and this is the total number. Total int }{ CompletedTraces: completedTraces, } data.ShowSensitive = sensitive if req != nil { // Allow show_sensitive=0 to force hiding of sensitive data for testing. // This only goes one way; you can't use show_sensitive=1 to see things. if req.FormValue("show_sensitive") == "0" { data.ShowSensitive = false } if exp, err := strconv.ParseBool(req.FormValue("exp")); err == nil { data.Expanded = exp } if exp, err := strconv.ParseBool(req.FormValue("rtraced")); err == nil { data.Traced = exp } } completedMu.RLock() data.Families = make([]string, 0, len(completedTraces)) for fam := range completedTraces { data.Families = append(data.Families, fam) } completedMu.RUnlock() sort.Strings(data.Families) // We are careful here to minimize the time spent locking activeMu, // since that lock is required every time an RPC starts and finishes. data.ActiveTraceCount = make(map[string]int, len(data.Families)) activeMu.RLock() for fam, s := range activeTraces { data.ActiveTraceCount[fam] = s.Len() } activeMu.RUnlock() var ok bool data.Family, data.Bucket, ok = parseArgs(req) switch { case !ok: // No-op case data.Bucket == -1: data.Active = true n := data.ActiveTraceCount[data.Family] data.Traces = getActiveTraces(data.Family) if len(data.Traces) < n { data.Total = n } case data.Bucket < bucketsPerFamily: if b := lookupBucket(data.Family, data.Bucket); b != nil { data.Traces = b.Copy(data.Traced) } default: if f := getFamily(data.Family, false); f != nil { var obs timeseries.Observable f.LatencyMu.RLock() switch o := data.Bucket - bucketsPerFamily; o { case 0: obs = f.Latency.Minute() data.HistogramWindow = "last minute" case 1: obs = f.Latency.Hour() data.HistogramWindow = "last hour" case 2: obs = f.Latency.Total() data.HistogramWindow = "all time" } f.LatencyMu.RUnlock() if obs != nil { data.Histogram = obs.(*histogram).html() } } } if data.Traces != nil { defer data.Traces.Free() sort.Sort(data.Traces) } completedMu.RLock() defer completedMu.RUnlock() if err := pageTmpl().ExecuteTemplate(w, "Page", data); err != nil { log.Printf("net/trace: Failed executing template: %v", err) } } func parseArgs(req *http.Request) (fam string, b int, ok bool) { if req == nil { return "", 0, false } fam, bStr := req.FormValue("fam"), req.FormValue("b") if fam == "" || bStr == "" { return "", 0, false } b, err := strconv.Atoi(bStr) if err != nil || b < -1 { return "", 0, false } return fam, b, true } func lookupBucket(fam string, b int) *traceBucket { f := getFamily(fam, false) if f == nil || b < 0 || b >= len(f.Buckets) { return nil } return f.Buckets[b] } type contextKeyT string var contextKey = contextKeyT("golang.org/x/net/trace.Trace") // NewContext returns a copy of the parent context // and associates it with a Trace. func NewContext(ctx context.Context, tr Trace) context.Context { return context.WithValue(ctx, contextKey, tr) } // FromContext returns the Trace bound to the context, if any. func FromContext(ctx context.Context) (tr Trace, ok bool) { tr, ok = ctx.Value(contextKey).(Trace) return } // Trace represents an active request. type Trace interface { // LazyLog adds x to the event log. It will be evaluated each time the // /debug/requests page is rendered. Any memory referenced by x will be // pinned until the trace is finished and later discarded. LazyLog(x fmt.Stringer, sensitive bool) // LazyPrintf evaluates its arguments with fmt.Sprintf each time the // /debug/requests page is rendered. Any memory referenced by a will be // pinned until the trace is finished and later discarded. LazyPrintf(format string, a ...interface{}) // SetError declares that this trace resulted in an error. SetError() // SetRecycler sets a recycler for the trace. // f will be called for each event passed to LazyLog at a time when // it is no longer required, whether while the trace is still active // and the event is discarded, or when a completed trace is discarded. SetRecycler(f func(interface{})) // SetTraceInfo sets the trace info for the trace. // This is currently unused. SetTraceInfo(traceID, spanID uint64) // SetMaxEvents sets the maximum number of events that will be stored // in the trace. This has no effect if any events have already been // added to the trace. SetMaxEvents(m int) // Finish declares that this trace is complete. // The trace should not be used after calling this method. Finish() } type lazySprintf struct { format string a []interface{} } func (l *lazySprintf) String() string { return fmt.Sprintf(l.format, l.a...) } // New returns a new Trace with the specified family and title. func New(family, title string) Trace { tr := newTrace() tr.ref() tr.Family, tr.Title = family, title tr.Start = time.Now() tr.maxEvents = maxEventsPerTrace tr.events = tr.eventsBuf[:0] activeMu.RLock() s := activeTraces[tr.Family] activeMu.RUnlock() if s == nil { activeMu.Lock() s = activeTraces[tr.Family] // check again if s == nil { s = new(traceSet) activeTraces[tr.Family] = s } activeMu.Unlock() } s.Add(tr) // Trigger allocation of the completed trace structure for this family. // This will cause the family to be present in the request page during // the first trace of this family. We don't care about the return value, // nor is there any need for this to run inline, so we execute it in its // own goroutine, but only if the family isn't allocated yet. completedMu.RLock() if _, ok := completedTraces[tr.Family]; !ok { go allocFamily(tr.Family) } completedMu.RUnlock() return tr } func (tr *trace) Finish() { tr.Elapsed = time.Now().Sub(tr.Start) if DebugUseAfterFinish { buf := make([]byte, 4<<10) // 4 KB should be enough n := runtime.Stack(buf, false) tr.finishStack = buf[:n] } activeMu.RLock() m := activeTraces[tr.Family] activeMu.RUnlock() m.Remove(tr) f := getFamily(tr.Family, true) for _, b := range f.Buckets { if b.Cond.match(tr) { b.Add(tr) } } // Add a sample of elapsed time as microseconds to the family's timeseries h := new(histogram) h.addMeasurement(tr.Elapsed.Nanoseconds() / 1e3) f.LatencyMu.Lock() f.Latency.Add(h) f.LatencyMu.Unlock() tr.unref() // matches ref in New } const ( bucketsPerFamily = 9 tracesPerBucket = 10 maxActiveTraces = 20 // Maximum number of active traces to show. maxEventsPerTrace = 10 numHistogramBuckets = 38 ) var ( // The active traces. activeMu sync.RWMutex activeTraces = make(map[string]*traceSet) // family -> traces // Families of completed traces. completedMu sync.RWMutex completedTraces = make(map[string]*family) // family -> traces ) type traceSet struct { mu sync.RWMutex m map[*trace]bool // We could avoid the entire map scan in FirstN by having a slice of all the traces // ordered by start time, and an index into that from the trace struct, with a periodic // repack of the slice after enough traces finish; we could also use a skip list or similar. // However, that would shift some of the expense from /debug/requests time to RPC time, // which is probably the wrong trade-off. } func (ts *traceSet) Len() int { ts.mu.RLock() defer ts.mu.RUnlock() return len(ts.m) } func (ts *traceSet) Add(tr *trace) { ts.mu.Lock() if ts.m == nil { ts.m = make(map[*trace]bool) } ts.m[tr] = true ts.mu.Unlock() } func (ts *traceSet) Remove(tr *trace) { ts.mu.Lock() delete(ts.m, tr) ts.mu.Unlock() } // FirstN returns the first n traces ordered by time. func (ts *traceSet) FirstN(n int) traceList { ts.mu.RLock() defer ts.mu.RUnlock() if n > len(ts.m) { n = len(ts.m) } trl := make(traceList, 0, n) // Fast path for when no selectivity is needed. if n == len(ts.m) { for tr := range ts.m { tr.ref() trl = append(trl, tr) } sort.Sort(trl) return trl } // Pick the oldest n traces. // This is inefficient. See the comment in the traceSet struct. for tr := range ts.m { // Put the first n traces into trl in the order they occur. // When we have n, sort trl, and thereafter maintain its order. if len(trl) < n { tr.ref() trl = append(trl, tr) if len(trl) == n { // This is guaranteed to happen exactly once during this loop. sort.Sort(trl) } continue } if tr.Start.After(trl[n-1].Start) { continue } // Find where to insert this one. tr.ref() i := sort.Search(n, func(i int) bool { return trl[i].Start.After(tr.Start) }) trl[n-1].unref() copy(trl[i+1:], trl[i:]) trl[i] = tr } return trl } func getActiveTraces(fam string) traceList { activeMu.RLock() s := activeTraces[fam] activeMu.RUnlock() if s == nil { return nil } return s.FirstN(maxActiveTraces) } func getFamily(fam string, allocNew bool) *family { completedMu.RLock() f := completedTraces[fam] completedMu.RUnlock() if f == nil && allocNew { f = allocFamily(fam) } return f } func allocFamily(fam string) *family { completedMu.Lock() defer completedMu.Unlock() f := completedTraces[fam] if f == nil { f = newFamily() completedTraces[fam] = f } return f } // family represents a set of trace buckets and associated latency information. type family struct { // traces may occur in multiple buckets. Buckets [bucketsPerFamily]*traceBucket // latency time series LatencyMu sync.RWMutex Latency *timeseries.MinuteHourSeries } func newFamily() *family { return &family{ Buckets: [bucketsPerFamily]*traceBucket{ {Cond: minCond(0)}, {Cond: minCond(50 * time.Millisecond)}, {Cond: minCond(100 * time.Millisecond)}, {Cond: minCond(200 * time.Millisecond)}, {Cond: minCond(500 * time.Millisecond)}, {Cond: minCond(1 * time.Second)}, {Cond: minCond(10 * time.Second)}, {Cond: minCond(100 * time.Second)}, {Cond: errorCond{}}, }, Latency: timeseries.NewMinuteHourSeries(func() timeseries.Observable { return new(histogram) }), } } // traceBucket represents a size-capped bucket of historic traces, // along with a condition for a trace to belong to the bucket. type traceBucket struct { Cond cond // Ring buffer implementation of a fixed-size FIFO queue. mu sync.RWMutex buf [tracesPerBucket]*trace start int // < tracesPerBucket length int // <= tracesPerBucket } func (b *traceBucket) Add(tr *trace) { b.mu.Lock() defer b.mu.Unlock() i := b.start + b.length if i >= tracesPerBucket { i -= tracesPerBucket } if b.length == tracesPerBucket { // "Remove" an element from the bucket. b.buf[i].unref() b.start++ if b.start == tracesPerBucket { b.start = 0 } } b.buf[i] = tr if b.length < tracesPerBucket { b.length++ } tr.ref() } // Copy returns a copy of the traces in the bucket. // If tracedOnly is true, only the traces with trace information will be returned. // The logs will be ref'd before returning; the caller should call // the Free method when it is done with them. // TODO(dsymonds): keep track of traced requests in separate buckets. func (b *traceBucket) Copy(tracedOnly bool) traceList { b.mu.RLock() defer b.mu.RUnlock() trl := make(traceList, 0, b.length) for i, x := 0, b.start; i < b.length; i++ { tr := b.buf[x] if !tracedOnly || tr.spanID != 0 { tr.ref() trl = append(trl, tr) } x++ if x == b.length { x = 0 } } return trl } func (b *traceBucket) Empty() bool { b.mu.RLock() defer b.mu.RUnlock() return b.length == 0 } // cond represents a condition on a trace. type cond interface { match(t *trace) bool String() string } type minCond time.Duration func (m minCond) match(t *trace) bool { return t.Elapsed >= time.Duration(m) } func (m minCond) String() string { return fmt.Sprintf("≥%gs", time.Duration(m).Seconds()) } type errorCond struct{} func (e errorCond) match(t *trace) bool { return t.IsError } func (e errorCond) String() string { return "errors" } type traceList []*trace // Free calls unref on each element of the list. func (trl traceList) Free() { for _, t := range trl { t.unref() } } // traceList may be sorted in reverse chronological order. func (trl traceList) Len() int { return len(trl) } func (trl traceList) Less(i, j int) bool { return trl[i].Start.After(trl[j].Start) } func (trl traceList) Swap(i, j int) { trl[i], trl[j] = trl[j], trl[i] } // An event is a timestamped log entry in a trace. type event struct { When time.Time Elapsed time.Duration // since previous event in trace NewDay bool // whether this event is on a different day to the previous event Recyclable bool // whether this event was passed via LazyLog Sensitive bool // whether this event contains sensitive information What interface{} // string or fmt.Stringer } // WhenString returns a string representation of the elapsed time of the event. // It will include the date if midnight was crossed. func (e event) WhenString() string { if e.NewDay { return e.When.Format("2006/01/02 15:04:05.000000") } return e.When.Format("15:04:05.000000") } // discarded represents a number of discarded events. // It is stored as *discarded to make it easier to update in-place. type discarded int func (d *discarded) String() string { return fmt.Sprintf("(%d events discarded)", int(*d)) } // trace represents an active or complete request, // either sent or received by this program. type trace struct { // Family is the top-level grouping of traces to which this belongs. Family string // Title is the title of this trace. Title string // Timing information. Start time.Time Elapsed time.Duration // zero while active // Trace information if non-zero. traceID uint64 spanID uint64 // Whether this trace resulted in an error. IsError bool // Append-only sequence of events (modulo discards). mu sync.RWMutex events []event maxEvents int refs int32 // how many buckets this is in recycler func(interface{}) disc discarded // scratch space to avoid allocation finishStack []byte // where finish was called, if DebugUseAfterFinish is set eventsBuf [4]event // preallocated buffer in case we only log a few events } func (tr *trace) reset() { // Clear all but the mutex. Mutexes may not be copied, even when unlocked. tr.Family = "" tr.Title = "" tr.Start = time.Time{} tr.Elapsed = 0 tr.traceID = 0 tr.spanID = 0 tr.IsError = false tr.maxEvents = 0 tr.events = nil tr.refs = 0 tr.recycler = nil tr.disc = 0 tr.finishStack = nil for i := range tr.eventsBuf { tr.eventsBuf[i] = event{} } } // delta returns the elapsed time since the last event or the trace start, // and whether it spans midnight. // L >= tr.mu func (tr *trace) delta(t time.Time) (time.Duration, bool) { if len(tr.events) == 0 { return t.Sub(tr.Start), false } prev := tr.events[len(tr.events)-1].When return t.Sub(prev), prev.Day() != t.Day() } func (tr *trace) addEvent(x interface{}, recyclable, sensitive bool) { if DebugUseAfterFinish && tr.finishStack != nil { buf := make([]byte, 4<<10) // 4 KB should be enough n := runtime.Stack(buf, false) log.Printf("net/trace: trace used after finish:\nFinished at:\n%s\nUsed at:\n%s", tr.finishStack, buf[:n]) } /* NOTE TO DEBUGGERS If you are here because your program panicked in this code, it is almost definitely the fault of code using this package, and very unlikely to be the fault of this code. The most likely scenario is that some code elsewhere is using a trace.Trace after its Finish method is called. You can temporarily set the DebugUseAfterFinish var to help discover where that is; do not leave that var set, since it makes this package much less efficient. */ e := event{When: time.Now(), What: x, Recyclable: recyclable, Sensitive: sensitive} tr.mu.Lock() e.Elapsed, e.NewDay = tr.delta(e.When) if len(tr.events) < tr.maxEvents { tr.events = append(tr.events, e) } else { // Discard the middle events. di := int((tr.maxEvents - 1) / 2) if d, ok := tr.events[di].What.(*discarded); ok { (*d)++ } else { // disc starts at two to count for the event it is replacing, // plus the next one that we are about to drop. tr.disc = 2 if tr.recycler != nil && tr.events[di].Recyclable { go tr.recycler(tr.events[di].What) } tr.events[di].What = &tr.disc } // The timestamp of the discarded meta-event should be // the time of the last event it is representing. tr.events[di].When = tr.events[di+1].When if tr.recycler != nil && tr.events[di+1].Recyclable { go tr.recycler(tr.events[di+1].What) } copy(tr.events[di+1:], tr.events[di+2:]) tr.events[tr.maxEvents-1] = e } tr.mu.Unlock() } func (tr *trace) LazyLog(x fmt.Stringer, sensitive bool) { tr.addEvent(x, true, sensitive) } func (tr *trace) LazyPrintf(format string, a ...interface{}) { tr.addEvent(&lazySprintf{format, a}, false, false) } func (tr *trace) SetError() { tr.IsError = true } func (tr *trace) SetRecycler(f func(interface{})) { tr.recycler = f } func (tr *trace) SetTraceInfo(traceID, spanID uint64) { tr.traceID, tr.spanID = traceID, spanID } func (tr *trace) SetMaxEvents(m int) { // Always keep at least three events: first, discarded count, last. if len(tr.events) == 0 && m > 3 { tr.maxEvents = m } } func (tr *trace) ref() { atomic.AddInt32(&tr.refs, 1) } func (tr *trace) unref() { if atomic.AddInt32(&tr.refs, -1) == 0 { if tr.recycler != nil { // freeTrace clears tr, so we hold tr.recycler and tr.events here. go func(f func(interface{}), es []event) { for _, e := range es { if e.Recyclable { f(e.What) } } }(tr.recycler, tr.events) } freeTrace(tr) } } func (tr *trace) When() string { return tr.Start.Format("2006/01/02 15:04:05.000000") } func (tr *trace) ElapsedTime() string { t := tr.Elapsed if t == 0 { // Active trace. t = time.Since(tr.Start) } return fmt.Sprintf("%.6f", t.Seconds()) } func (tr *trace) Events() []event { tr.mu.RLock() defer tr.mu.RUnlock() return tr.events } var traceFreeList = make(chan *trace, 1000) // TODO(dsymonds): Use sync.Pool? // newTrace returns a trace ready to use. func newTrace() *trace { select { case tr := <-traceFreeList: return tr default: return new(trace) } } // freeTrace adds tr to traceFreeList if there's room. // This is non-blocking. func freeTrace(tr *trace) { if DebugUseAfterFinish { return // never reuse } tr.reset() select { case traceFreeList <- tr: default: } } func elapsed(d time.Duration) string { b := []byte(fmt.Sprintf("%.6f", d.Seconds())) // For subsecond durations, blank all zeros before decimal point, // and all zeros between the decimal point and the first non-zero digit. if d < time.Second { dot := bytes.IndexByte(b, '.') for i := 0; i < dot; i++ { b[i] = ' ' } for i := dot + 1; i < len(b); i++ { if b[i] == '0' { b[i] = ' ' } else { break } } } return string(b) } var pageTmplCache *template.Template var pageTmplOnce sync.Once func pageTmpl() *template.Template { pageTmplOnce.Do(func() { pageTmplCache = template.Must(template.New("Page").Funcs(template.FuncMap{ "elapsed": elapsed, "add": func(a, b int) int { return a + b }, }).Parse(pageHTML)) }) return pageTmplCache } const pageHTML = ` {{template "Prolog" .}} {{template "StatusTable" .}} {{template "Epilog" .}} {{define "Prolog"}} /debug/requests

/debug/requests

{{end}} {{/* end of Prolog */}} {{define "StatusTable"}} {{range $fam := .Families}} {{$n := index $.ActiveTraceCount $fam}} {{$f := index $.CompletedTraces $fam}} {{range $i, $b := $f.Buckets}} {{$empty := $b.Empty}} {{end}} {{$nb := len $f.Buckets}} {{end}}
{{$fam}} {{if $n}}{{end}} [{{$n}} active] {{if $n}}{{end}} {{if not $empty}}{{end}} [{{.Cond}}] {{if not $empty}}{{end}} [minute] [hour] [total]
{{end}} {{/* end of StatusTable */}} {{define "Epilog"}} {{if $.Traces}}

Family: {{$.Family}}

{{if or $.Expanded $.Traced}} [Normal/Summary] {{else}} [Normal/Summary] {{end}} {{if or (not $.Expanded) $.Traced}} [Normal/Expanded] {{else}} [Normal/Expanded] {{end}} {{if not $.Active}} {{if or $.Expanded (not $.Traced)}} [Traced/Summary] {{else}} [Traced/Summary] {{end}} {{if or (not $.Expanded) (not $.Traced)}} [Traced/Expanded] {{else}} [Traced/Expanded] {{end}} {{end}} {{if $.Total}}

Showing {{len $.Traces}} of {{$.Total}} traces.

{{end}} {{range $tr := $.Traces}} {{/* TODO: include traceID/spanID */}} {{if $.Expanded}} {{range $tr.Events}} {{end}} {{end}} {{end}}
{{if $.Active}}Active{{else}}Completed{{end}} Requests
WhenElapsed (s)
{{$tr.When}} {{$tr.ElapsedTime}} {{$tr.Title}}
{{.WhenString}} {{elapsed .Elapsed}} {{if or $.ShowSensitive (not .Sensitive)}}... {{.What}}{{else}}[redacted]{{end}}
{{end}} {{/* if $.Traces */}} {{if $.Histogram}}

Latency (µs) of {{$.Family}} over {{$.HistogramWindow}}

{{$.Histogram}} {{end}} {{/* if $.Histogram */}} {{end}} {{/* end of Epilog */}} ` gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/golang.org/x/text/000077500000000000000000000000001324746544700272115ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/golang.org/x/text/LICENSE000066400000000000000000000027071324746544700302240ustar00rootroot00000000000000Copyright (c) 2009 The Go Authors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Google Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/golang.org/x/text/PATENTS000066400000000000000000000024271324746544700302570ustar00rootroot00000000000000Additional IP Rights Grant (Patents) "This implementation" means the copyrightable works distributed by Google as part of the Go project. Google hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, transfer and otherwise run, modify and propagate the contents of this implementation of Go, where such license applies only to those patent claims, both currently owned or controlled by Google and acquired in the future, licensable by Google that are necessarily infringed by this implementation of Go. This grant does not include claims that would be infringed only as a consequence of further modification of this implementation. If you or your agent or exclusive licensee institute or order or agree to the institution of patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that this implementation of Go or any code incorporated within this implementation of Go constitutes direct or contributory patent infringement, or inducement of patent infringement, then any patent rights granted to you under this License for this implementation of Go shall terminate as of the date such litigation is filed. gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/golang.org/x/text/internal/000077500000000000000000000000001324746544700310255ustar00rootroot00000000000000gen/000077500000000000000000000000001324746544700315175ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/golang.org/x/text/internalcode.go000066400000000000000000000220631324746544700327630ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/golang.org/x/text/internal/gen// Copyright 2015 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package gen import ( "bytes" "encoding/gob" "fmt" "hash" "hash/fnv" "io" "log" "os" "reflect" "strings" "unicode" "unicode/utf8" ) // This file contains utilities for generating code. // TODO: other write methods like: // - slices, maps, types, etc. // CodeWriter is a utility for writing structured code. It computes the content // hash and size of written content. It ensures there are newlines between // written code blocks. type CodeWriter struct { buf bytes.Buffer Size int Hash hash.Hash32 // content hash gob *gob.Encoder // For comments we skip the usual one-line separator if they are followed by // a code block. skipSep bool } func (w *CodeWriter) Write(p []byte) (n int, err error) { return w.buf.Write(p) } // NewCodeWriter returns a new CodeWriter. func NewCodeWriter() *CodeWriter { h := fnv.New32() return &CodeWriter{Hash: h, gob: gob.NewEncoder(h)} } // WriteGoFile appends the buffer with the total size of all created structures // and writes it as a Go file to the the given file with the given package name. func (w *CodeWriter) WriteGoFile(filename, pkg string) { f, err := os.Create(filename) if err != nil { log.Fatalf("Could not create file %s: %v", filename, err) } defer f.Close() if _, err = w.WriteGo(f, pkg); err != nil { log.Fatalf("Error writing file %s: %v", filename, err) } } // WriteGo appends the buffer with the total size of all created structures and // writes it as a Go file to the the given writer with the given package name. func (w *CodeWriter) WriteGo(out io.Writer, pkg string) (n int, err error) { sz := w.Size w.WriteComment("Total table size %d bytes (%dKiB); checksum: %X\n", sz, sz/1024, w.Hash.Sum32()) defer w.buf.Reset() return WriteGo(out, pkg, w.buf.Bytes()) } func (w *CodeWriter) printf(f string, x ...interface{}) { fmt.Fprintf(w, f, x...) } func (w *CodeWriter) insertSep() { if w.skipSep { w.skipSep = false return } // Use at least two newlines to ensure a blank space between the previous // block. WriteGoFile will remove extraneous newlines. w.printf("\n\n") } // WriteComment writes a comment block. All line starts are prefixed with "//". // Initial empty lines are gobbled. The indentation for the first line is // stripped from consecutive lines. func (w *CodeWriter) WriteComment(comment string, args ...interface{}) { s := fmt.Sprintf(comment, args...) s = strings.Trim(s, "\n") // Use at least two newlines to ensure a blank space between the previous // block. WriteGoFile will remove extraneous newlines. w.printf("\n\n// ") w.skipSep = true // strip first indent level. sep := "\n" for ; len(s) > 0 && (s[0] == '\t' || s[0] == ' '); s = s[1:] { sep += s[:1] } strings.NewReplacer(sep, "\n// ", "\n", "\n// ").WriteString(w, s) w.printf("\n") } func (w *CodeWriter) writeSizeInfo(size int) { w.printf("// Size: %d bytes\n", size) } // WriteConst writes a constant of the given name and value. func (w *CodeWriter) WriteConst(name string, x interface{}) { w.insertSep() v := reflect.ValueOf(x) switch v.Type().Kind() { case reflect.String: w.printf("const %s %s = ", name, typeName(x)) w.WriteString(v.String()) w.printf("\n") default: w.printf("const %s = %#v\n", name, x) } } // WriteVar writes a variable of the given name and value. func (w *CodeWriter) WriteVar(name string, x interface{}) { w.insertSep() v := reflect.ValueOf(x) oldSize := w.Size sz := int(v.Type().Size()) w.Size += sz switch v.Type().Kind() { case reflect.String: w.printf("var %s %s = ", name, typeName(x)) w.WriteString(v.String()) case reflect.Struct: w.gob.Encode(x) fallthrough case reflect.Slice, reflect.Array: w.printf("var %s = ", name) w.writeValue(v) w.writeSizeInfo(w.Size - oldSize) default: w.printf("var %s %s = ", name, typeName(x)) w.gob.Encode(x) w.writeValue(v) w.writeSizeInfo(w.Size - oldSize) } w.printf("\n") } func (w *CodeWriter) writeValue(v reflect.Value) { x := v.Interface() switch v.Kind() { case reflect.String: w.WriteString(v.String()) case reflect.Array: // Don't double count: callers of WriteArray count on the size being // added, so we need to discount it here. w.Size -= int(v.Type().Size()) w.writeSlice(x, true) case reflect.Slice: w.writeSlice(x, false) case reflect.Struct: w.printf("%s{\n", typeName(v.Interface())) t := v.Type() for i := 0; i < v.NumField(); i++ { w.printf("%s: ", t.Field(i).Name) w.writeValue(v.Field(i)) w.printf(",\n") } w.printf("}") default: w.printf("%#v", x) } } // WriteString writes a string literal. func (w *CodeWriter) WriteString(s string) { s = strings.Replace(s, `\`, `\\`, -1) io.WriteString(w.Hash, s) // content hash w.Size += len(s) const maxInline = 40 if len(s) <= maxInline { w.printf("%q", s) return } // We will render the string as a multi-line string. const maxWidth = 80 - 4 - len(`"`) - len(`" +`) // When starting on its own line, go fmt indents line 2+ an extra level. n, max := maxWidth, maxWidth-4 // As per https://golang.org/issue/18078, the compiler has trouble // compiling the concatenation of many strings, s0 + s1 + s2 + ... + sN, // for large N. We insert redundant, explicit parentheses to work around // that, lowering the N at any given step: (s0 + s1 + ... + s63) + (s64 + // ... + s127) + etc + (etc + ... + sN). explicitParens, extraComment := len(s) > 128*1024, "" if explicitParens { w.printf(`(`) extraComment = "; the redundant, explicit parens are for https://golang.org/issue/18078" } // Print "" +\n, if a string does not start on its own line. b := w.buf.Bytes() if p := len(bytes.TrimRight(b, " \t")); p > 0 && b[p-1] != '\n' { w.printf("\"\" + // Size: %d bytes%s\n", len(s), extraComment) n, max = maxWidth, maxWidth } w.printf(`"`) for sz, p, nLines := 0, 0, 0; p < len(s); { var r rune r, sz = utf8.DecodeRuneInString(s[p:]) out := s[p : p+sz] chars := 1 if !unicode.IsPrint(r) || r == utf8.RuneError || r == '"' { switch sz { case 1: out = fmt.Sprintf("\\x%02x", s[p]) case 2, 3: out = fmt.Sprintf("\\u%04x", r) case 4: out = fmt.Sprintf("\\U%08x", r) } chars = len(out) } if n -= chars; n < 0 { nLines++ if explicitParens && nLines&63 == 63 { w.printf("\") + (\"") } w.printf("\" +\n\"") n = max - len(out) } w.printf("%s", out) p += sz } w.printf(`"`) if explicitParens { w.printf(`)`) } } // WriteSlice writes a slice value. func (w *CodeWriter) WriteSlice(x interface{}) { w.writeSlice(x, false) } // WriteArray writes an array value. func (w *CodeWriter) WriteArray(x interface{}) { w.writeSlice(x, true) } func (w *CodeWriter) writeSlice(x interface{}, isArray bool) { v := reflect.ValueOf(x) w.gob.Encode(v.Len()) w.Size += v.Len() * int(v.Type().Elem().Size()) name := typeName(x) if isArray { name = fmt.Sprintf("[%d]%s", v.Len(), name[strings.Index(name, "]")+1:]) } if isArray { w.printf("%s{\n", name) } else { w.printf("%s{ // %d elements\n", name, v.Len()) } switch kind := v.Type().Elem().Kind(); kind { case reflect.String: for _, s := range x.([]string) { w.WriteString(s) w.printf(",\n") } case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: // nLine and nBlock are the number of elements per line and block. nLine, nBlock, format := 8, 64, "%d," switch kind { case reflect.Uint8: format = "%#02x," case reflect.Uint16: format = "%#04x," case reflect.Uint32: nLine, nBlock, format = 4, 32, "%#08x," case reflect.Uint, reflect.Uint64: nLine, nBlock, format = 4, 32, "%#016x," case reflect.Int8: nLine = 16 } n := nLine for i := 0; i < v.Len(); i++ { if i%nBlock == 0 && v.Len() > nBlock { w.printf("// Entry %X - %X\n", i, i+nBlock-1) } x := v.Index(i).Interface() w.gob.Encode(x) w.printf(format, x) if n--; n == 0 { n = nLine w.printf("\n") } } w.printf("\n") case reflect.Struct: zero := reflect.Zero(v.Type().Elem()).Interface() for i := 0; i < v.Len(); i++ { x := v.Index(i).Interface() w.gob.EncodeValue(v) if !reflect.DeepEqual(zero, x) { line := fmt.Sprintf("%#v,\n", x) line = line[strings.IndexByte(line, '{'):] w.printf("%d: ", i) w.printf(line) } } case reflect.Array: for i := 0; i < v.Len(); i++ { w.printf("%d: %#v,\n", i, v.Index(i).Interface()) } default: panic("gen: slice elem type not supported") } w.printf("}") } // WriteType writes a definition of the type of the given value and returns the // type name. func (w *CodeWriter) WriteType(x interface{}) string { t := reflect.TypeOf(x) w.printf("type %s struct {\n", t.Name()) for i := 0; i < t.NumField(); i++ { w.printf("\t%s %s\n", t.Field(i).Name, t.Field(i).Type) } w.printf("}\n") return t.Name() } // typeName returns the name of the go type of x. func typeName(x interface{}) string { t := reflect.ValueOf(x).Type() return strings.Replace(fmt.Sprint(t), "main.", "", 1) } gen.go000066400000000000000000000206351324746544700326250ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/golang.org/x/text/internal/gen// Copyright 2015 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package gen contains common code for the various code generation tools in the // text repository. Its usage ensures consistency between tools. // // This package defines command line flags that are common to most generation // tools. The flags allow for specifying specific Unicode and CLDR versions // in the public Unicode data repository (http://www.unicode.org/Public). // // A local Unicode data mirror can be set through the flag -local or the // environment variable UNICODE_DIR. The former takes precedence. The local // directory should follow the same structure as the public repository. // // IANA data can also optionally be mirrored by putting it in the iana directory // rooted at the top of the local mirror. Beware, though, that IANA data is not // versioned. So it is up to the developer to use the right version. package gen // import "golang.org/x/text/internal/gen" import ( "bytes" "flag" "fmt" "go/build" "go/format" "io" "io/ioutil" "log" "net/http" "os" "path" "path/filepath" "sync" "unicode" "golang.org/x/text/unicode/cldr" ) var ( url = flag.String("url", "http://www.unicode.org/Public", "URL of Unicode database directory") iana = flag.String("iana", "http://www.iana.org", "URL of the IANA repository") unicodeVersion = flag.String("unicode", getEnv("UNICODE_VERSION", unicode.Version), "unicode version to use") cldrVersion = flag.String("cldr", getEnv("CLDR_VERSION", cldr.Version), "cldr version to use") ) func getEnv(name, def string) string { if v := os.Getenv(name); v != "" { return v } return def } // Init performs common initialization for a gen command. It parses the flags // and sets up the standard logging parameters. func Init() { log.SetPrefix("") log.SetFlags(log.Lshortfile) flag.Parse() } const header = `// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. package %s ` // UnicodeVersion reports the requested Unicode version. func UnicodeVersion() string { return *unicodeVersion } // UnicodeVersion reports the requested CLDR version. func CLDRVersion() string { return *cldrVersion } // IsLocal reports whether data files are available locally. func IsLocal() bool { dir, err := localReadmeFile() if err != nil { return false } if _, err = os.Stat(dir); err != nil { return false } return true } // OpenUCDFile opens the requested UCD file. The file is specified relative to // the public Unicode root directory. It will call log.Fatal if there are any // errors. func OpenUCDFile(file string) io.ReadCloser { return openUnicode(path.Join(*unicodeVersion, "ucd", file)) } // OpenCLDRCoreZip opens the CLDR core zip file. It will call log.Fatal if there // are any errors. func OpenCLDRCoreZip() io.ReadCloser { return OpenUnicodeFile("cldr", *cldrVersion, "core.zip") } // OpenUnicodeFile opens the requested file of the requested category from the // root of the Unicode data archive. The file is specified relative to the // public Unicode root directory. If version is "", it will use the default // Unicode version. It will call log.Fatal if there are any errors. func OpenUnicodeFile(category, version, file string) io.ReadCloser { if version == "" { version = UnicodeVersion() } return openUnicode(path.Join(category, version, file)) } // OpenIANAFile opens the requested IANA file. The file is specified relative // to the IANA root, which is typically either http://www.iana.org or the // iana directory in the local mirror. It will call log.Fatal if there are any // errors. func OpenIANAFile(path string) io.ReadCloser { return Open(*iana, "iana", path) } var ( dirMutex sync.Mutex localDir string ) const permissions = 0755 func localReadmeFile() (string, error) { p, err := build.Import("golang.org/x/text", "", build.FindOnly) if err != nil { return "", fmt.Errorf("Could not locate package: %v", err) } return filepath.Join(p.Dir, "DATA", "README"), nil } func getLocalDir() string { dirMutex.Lock() defer dirMutex.Unlock() readme, err := localReadmeFile() if err != nil { log.Fatal(err) } dir := filepath.Dir(readme) if _, err := os.Stat(readme); err != nil { if err := os.MkdirAll(dir, permissions); err != nil { log.Fatalf("Could not create directory: %v", err) } ioutil.WriteFile(readme, []byte(readmeTxt), permissions) } return dir } const readmeTxt = `Generated by golang.org/x/text/internal/gen. DO NOT EDIT. This directory contains downloaded files used to generate the various tables in the golang.org/x/text subrepo. Note that the language subtag repo (iana/assignments/language-subtag-registry) and all other times in the iana subdirectory are not versioned and will need to be periodically manually updated. The easiest way to do this is to remove the entire iana directory. This is mostly of concern when updating the language package. ` // Open opens subdir/path if a local directory is specified and the file exists, // where subdir is a directory relative to the local root, or fetches it from // urlRoot/path otherwise. It will call log.Fatal if there are any errors. func Open(urlRoot, subdir, path string) io.ReadCloser { file := filepath.Join(getLocalDir(), subdir, filepath.FromSlash(path)) return open(file, urlRoot, path) } func openUnicode(path string) io.ReadCloser { file := filepath.Join(getLocalDir(), filepath.FromSlash(path)) return open(file, *url, path) } // TODO: automatically periodically update non-versioned files. func open(file, urlRoot, path string) io.ReadCloser { if f, err := os.Open(file); err == nil { return f } r := get(urlRoot, path) defer r.Close() b, err := ioutil.ReadAll(r) if err != nil { log.Fatalf("Could not download file: %v", err) } os.MkdirAll(filepath.Dir(file), permissions) if err := ioutil.WriteFile(file, b, permissions); err != nil { log.Fatalf("Could not create file: %v", err) } return ioutil.NopCloser(bytes.NewReader(b)) } func get(root, path string) io.ReadCloser { url := root + "/" + path fmt.Printf("Fetching %s...", url) defer fmt.Println(" done.") resp, err := http.Get(url) if err != nil { log.Fatalf("HTTP GET: %v", err) } if resp.StatusCode != 200 { log.Fatalf("Bad GET status for %q: %q", url, resp.Status) } return resp.Body } // TODO: use Write*Version in all applicable packages. // WriteUnicodeVersion writes a constant for the Unicode version from which the // tables are generated. func WriteUnicodeVersion(w io.Writer) { fmt.Fprintf(w, "// UnicodeVersion is the Unicode version from which the tables in this package are derived.\n") fmt.Fprintf(w, "const UnicodeVersion = %q\n\n", UnicodeVersion()) } // WriteCLDRVersion writes a constant for the CLDR version from which the // tables are generated. func WriteCLDRVersion(w io.Writer) { fmt.Fprintf(w, "// CLDRVersion is the CLDR version from which the tables in this package are derived.\n") fmt.Fprintf(w, "const CLDRVersion = %q\n\n", CLDRVersion()) } // WriteGoFile prepends a standard file comment and package statement to the // given bytes, applies gofmt, and writes them to a file with the given name. // It will call log.Fatal if there are any errors. func WriteGoFile(filename, pkg string, b []byte) { w, err := os.Create(filename) if err != nil { log.Fatalf("Could not create file %s: %v", filename, err) } defer w.Close() if _, err = WriteGo(w, pkg, b); err != nil { log.Fatalf("Error writing file %s: %v", filename, err) } } // WriteGo prepends a standard file comment and package statement to the given // bytes, applies gofmt, and writes them to w. func WriteGo(w io.Writer, pkg string, b []byte) (n int, err error) { src := []byte(fmt.Sprintf(header, pkg)) src = append(src, b...) formatted, err := format.Source(src) if err != nil { // Print the generated code even in case of an error so that the // returned error can be meaningfully interpreted. n, _ = w.Write(src) return n, err } return w.Write(formatted) } // Repackage rewrites a Go file from belonging to package main to belonging to // the given package. func Repackage(inFile, outFile, pkg string) { src, err := ioutil.ReadFile(inFile) if err != nil { log.Fatalf("reading %s: %v", inFile, err) } const toDelete = "package main\n\n" i := bytes.Index(src, []byte(toDelete)) if i < 0 { log.Fatalf("Could not find %q in %s.", toDelete, inFile) } w := &bytes.Buffer{} w.Write(src[i+len(toDelete):]) WriteGoFile(outFile, pkg, w.Bytes()) } triegen/000077500000000000000000000000001324746544700324035ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/golang.org/x/text/internalcompact.go000066400000000000000000000037661324746544700343740ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/golang.org/x/text/internal/triegen// Copyright 2014 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package triegen // This file defines Compacter and its implementations. import "io" // A Compacter generates an alternative, more space-efficient way to store a // trie value block. A trie value block holds all possible values for the last // byte of a UTF-8 encoded rune. Excluding ASCII characters, a trie value block // always has 64 values, as a UTF-8 encoding ends with a byte in [0x80, 0xC0). type Compacter interface { // Size returns whether the Compacter could encode the given block as well // as its size in case it can. len(v) is always 64. Size(v []uint64) (sz int, ok bool) // Store stores the block using the Compacter's compression method. // It returns a handle with which the block can be retrieved. // len(v) is always 64. Store(v []uint64) uint32 // Print writes the data structures associated to the given store to w. Print(w io.Writer) error // Handler returns the name of a function that gets called during trie // lookup for blocks generated by the Compacter. The function should be of // the form func (n uint32, b byte) uint64, where n is the index returned by // the Compacter's Store method and b is the last byte of the UTF-8 // encoding, where 0x80 <= b < 0xC0, for which to do the lookup in the // block. Handler() string } // simpleCompacter is the default Compacter used by builder. It implements a // normal trie block. type simpleCompacter builder func (b *simpleCompacter) Size([]uint64) (sz int, ok bool) { return blockSize * b.ValueSize, true } func (b *simpleCompacter) Store(v []uint64) uint32 { h := uint32(len(b.ValueBlocks) - blockOffset) b.ValueBlocks = append(b.ValueBlocks, v) return h } func (b *simpleCompacter) Print(io.Writer) error { // Structures are printed in print.go. return nil } func (b *simpleCompacter) Handler() string { panic("Handler should be special-cased for this Compacter") } print.go000066400000000000000000000164721324746544700341000ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/golang.org/x/text/internal/triegen// Copyright 2014 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package triegen import ( "bytes" "fmt" "io" "strings" "text/template" ) // print writes all the data structures as well as the code necessary to use the // trie to w. func (b *builder) print(w io.Writer) error { b.Stats.NValueEntries = len(b.ValueBlocks) * blockSize b.Stats.NValueBytes = len(b.ValueBlocks) * blockSize * b.ValueSize b.Stats.NIndexEntries = len(b.IndexBlocks) * blockSize b.Stats.NIndexBytes = len(b.IndexBlocks) * blockSize * b.IndexSize b.Stats.NHandleBytes = len(b.Trie) * 2 * b.IndexSize // If we only have one root trie, all starter blocks are at position 0 and // we can access the arrays directly. if len(b.Trie) == 1 { // At this point we cannot refer to the generated tables directly. b.ASCIIBlock = b.Name + "Values" b.StarterBlock = b.Name + "Index" } else { // Otherwise we need to have explicit starter indexes in the trie // structure. b.ASCIIBlock = "t.ascii" b.StarterBlock = "t.utf8Start" } b.SourceType = "[]byte" if err := lookupGen.Execute(w, b); err != nil { return err } b.SourceType = "string" if err := lookupGen.Execute(w, b); err != nil { return err } if err := trieGen.Execute(w, b); err != nil { return err } for _, c := range b.Compactions { if err := c.c.Print(w); err != nil { return err } } return nil } func printValues(n int, values []uint64) string { w := &bytes.Buffer{} boff := n * blockSize fmt.Fprintf(w, "\t// Block %#x, offset %#x", n, boff) var newline bool for i, v := range values { if i%6 == 0 { newline = true } if v != 0 { if newline { fmt.Fprintf(w, "\n") newline = false } fmt.Fprintf(w, "\t%#02x:%#04x, ", boff+i, v) } } return w.String() } func printIndex(b *builder, nr int, n *node) string { w := &bytes.Buffer{} boff := nr * blockSize fmt.Fprintf(w, "\t// Block %#x, offset %#x", nr, boff) var newline bool for i, c := range n.children { if i%8 == 0 { newline = true } if c != nil { v := b.Compactions[c.index.compaction].Offset + uint32(c.index.index) if v != 0 { if newline { fmt.Fprintf(w, "\n") newline = false } fmt.Fprintf(w, "\t%#02x:%#02x, ", boff+i, v) } } } return w.String() } var ( trieGen = template.Must(template.New("trie").Funcs(template.FuncMap{ "printValues": printValues, "printIndex": printIndex, "title": strings.Title, "dec": func(x int) int { return x - 1 }, "psize": func(n int) string { return fmt.Sprintf("%d bytes (%.2f KiB)", n, float64(n)/1024) }, }).Parse(trieTemplate)) lookupGen = template.Must(template.New("lookup").Parse(lookupTemplate)) ) // TODO: consider the return type of lookup. It could be uint64, even if the // internal value type is smaller. We will have to verify this with the // performance of unicode/norm, which is very sensitive to such changes. const trieTemplate = `{{$b := .}}{{$multi := gt (len .Trie) 1}} // {{.Name}}Trie. Total size: {{psize .Size}}. Checksum: {{printf "%08x" .Checksum}}. type {{.Name}}Trie struct { {{if $multi}} ascii []{{.ValueType}} // index for ASCII bytes utf8Start []{{.IndexType}} // index for UTF-8 bytes >= 0xC0 {{end}}} func new{{title .Name}}Trie(i int) *{{.Name}}Trie { {{if $multi}} h := {{.Name}}TrieHandles[i] return &{{.Name}}Trie{ {{.Name}}Values[uint32(h.ascii)<<6:], {{.Name}}Index[uint32(h.multi)<<6:] } } type {{.Name}}TrieHandle struct { ascii, multi {{.IndexType}} } // {{.Name}}TrieHandles: {{len .Trie}} handles, {{.Stats.NHandleBytes}} bytes var {{.Name}}TrieHandles = [{{len .Trie}}]{{.Name}}TrieHandle{ {{range .Trie}} { {{.ASCIIIndex}}, {{.StarterIndex}} }, // {{printf "%08x" .Checksum}}: {{.Name}} {{end}}}{{else}} return &{{.Name}}Trie{} } {{end}} // lookupValue determines the type of block n and looks up the value for b. func (t *{{.Name}}Trie) lookupValue(n uint32, b byte) {{.ValueType}}{{$last := dec (len .Compactions)}} { switch { {{range $i, $c := .Compactions}} {{if eq $i $last}}default{{else}}case n < {{$c.Cutoff}}{{end}}:{{if ne $i 0}} n -= {{$c.Offset}}{{end}} return {{print $b.ValueType}}({{$c.Handler}}){{end}} } } // {{.Name}}Values: {{len .ValueBlocks}} blocks, {{.Stats.NValueEntries}} entries, {{.Stats.NValueBytes}} bytes // The third block is the zero block. var {{.Name}}Values = [{{.Stats.NValueEntries}}]{{.ValueType}} { {{range $i, $v := .ValueBlocks}}{{printValues $i $v}} {{end}}} // {{.Name}}Index: {{len .IndexBlocks}} blocks, {{.Stats.NIndexEntries}} entries, {{.Stats.NIndexBytes}} bytes // Block 0 is the zero block. var {{.Name}}Index = [{{.Stats.NIndexEntries}}]{{.IndexType}} { {{range $i, $v := .IndexBlocks}}{{printIndex $b $i $v}} {{end}}} ` // TODO: consider allowing zero-length strings after evaluating performance with // unicode/norm. const lookupTemplate = ` // lookup{{if eq .SourceType "string"}}String{{end}} returns the trie value for the first UTF-8 encoding in s and // the width in bytes of this encoding. The size will be 0 if s does not // hold enough bytes to complete the encoding. len(s) must be greater than 0. func (t *{{.Name}}Trie) lookup{{if eq .SourceType "string"}}String{{end}}(s {{.SourceType}}) (v {{.ValueType}}, sz int) { c0 := s[0] switch { case c0 < 0x80: // is ASCII return {{.ASCIIBlock}}[c0], 1 case c0 < 0xC2: return 0, 1 // Illegal UTF-8: not a starter, not ASCII. case c0 < 0xE0: // 2-byte UTF-8 if len(s) < 2 { return 0, 0 } i := {{.StarterBlock}}[c0] c1 := s[1] if c1 < 0x80 || 0xC0 <= c1 { return 0, 1 // Illegal UTF-8: not a continuation byte. } return t.lookupValue(uint32(i), c1), 2 case c0 < 0xF0: // 3-byte UTF-8 if len(s) < 3 { return 0, 0 } i := {{.StarterBlock}}[c0] c1 := s[1] if c1 < 0x80 || 0xC0 <= c1 { return 0, 1 // Illegal UTF-8: not a continuation byte. } o := uint32(i)<<6 + uint32(c1) i = {{.Name}}Index[o] c2 := s[2] if c2 < 0x80 || 0xC0 <= c2 { return 0, 2 // Illegal UTF-8: not a continuation byte. } return t.lookupValue(uint32(i), c2), 3 case c0 < 0xF8: // 4-byte UTF-8 if len(s) < 4 { return 0, 0 } i := {{.StarterBlock}}[c0] c1 := s[1] if c1 < 0x80 || 0xC0 <= c1 { return 0, 1 // Illegal UTF-8: not a continuation byte. } o := uint32(i)<<6 + uint32(c1) i = {{.Name}}Index[o] c2 := s[2] if c2 < 0x80 || 0xC0 <= c2 { return 0, 2 // Illegal UTF-8: not a continuation byte. } o = uint32(i)<<6 + uint32(c2) i = {{.Name}}Index[o] c3 := s[3] if c3 < 0x80 || 0xC0 <= c3 { return 0, 3 // Illegal UTF-8: not a continuation byte. } return t.lookupValue(uint32(i), c3), 4 } // Illegal rune return 0, 1 } // lookup{{if eq .SourceType "string"}}String{{end}}Unsafe returns the trie value for the first UTF-8 encoding in s. // s must start with a full and valid UTF-8 encoded rune. func (t *{{.Name}}Trie) lookup{{if eq .SourceType "string"}}String{{end}}Unsafe(s {{.SourceType}}) {{.ValueType}} { c0 := s[0] if c0 < 0x80 { // is ASCII return {{.ASCIIBlock}}[c0] } i := {{.StarterBlock}}[c0] if c0 < 0xE0 { // 2-byte UTF-8 return t.lookupValue(uint32(i), s[1]) } i = {{.Name}}Index[uint32(i)<<6+uint32(s[1])] if c0 < 0xF0 { // 3-byte UTF-8 return t.lookupValue(uint32(i), s[2]) } i = {{.Name}}Index[uint32(i)<<6+uint32(s[2])] if c0 < 0xF8 { // 4-byte UTF-8 return t.lookupValue(uint32(i), s[3]) } return 0 } ` triegen.go000066400000000000000000000344461324746544700344020ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/golang.org/x/text/internal/triegen// Copyright 2014 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package triegen implements a code generator for a trie for associating // unsigned integer values with UTF-8 encoded runes. // // Many of the go.text packages use tries for storing per-rune information. A // trie is especially useful if many of the runes have the same value. If this // is the case, many blocks can be expected to be shared allowing for // information on many runes to be stored in little space. // // As most of the lookups are done directly on []byte slices, the tries use the // UTF-8 bytes directly for the lookup. This saves a conversion from UTF-8 to // runes and contributes a little bit to better performance. It also naturally // provides a fast path for ASCII. // // Space is also an issue. There are many code points defined in Unicode and as // a result tables can get quite large. So every byte counts. The triegen // package automatically chooses the smallest integer values to represent the // tables. Compacters allow further compression of the trie by allowing for // alternative representations of individual trie blocks. // // triegen allows generating multiple tries as a single structure. This is // useful when, for example, one wants to generate tries for several languages // that have a lot of values in common. Some existing libraries for // internationalization store all per-language data as a dynamically loadable // chunk. The go.text packages are designed with the assumption that the user // typically wants to compile in support for all supported languages, in line // with the approach common to Go to create a single standalone binary. The // multi-root trie approach can give significant storage savings in this // scenario. // // triegen generates both tables and code. The code is optimized to use the // automatically chosen data types. The following code is generated for a Trie // or multiple Tries named "foo": // - type fooTrie // The trie type. // // - func newFooTrie(x int) *fooTrie // Trie constructor, where x is the index of the trie passed to Gen. // // - func (t *fooTrie) lookup(s []byte) (v uintX, sz int) // The lookup method, where uintX is automatically chosen. // // - func lookupString, lookupUnsafe and lookupStringUnsafe // Variants of the above. // // - var fooValues and fooIndex and any tables generated by Compacters. // The core trie data. // // - var fooTrieHandles // Indexes of starter blocks in case of multiple trie roots. // // It is recommended that users test the generated trie by checking the returned // value for every rune. Such exhaustive tests are possible as the the number of // runes in Unicode is limited. package triegen // import "golang.org/x/text/internal/triegen" // TODO: Arguably, the internally optimized data types would not have to be // exposed in the generated API. We could also investigate not generating the // code, but using it through a package. We would have to investigate the impact // on performance of making such change, though. For packages like unicode/norm, // small changes like this could tank performance. import ( "encoding/binary" "fmt" "hash/crc64" "io" "log" "unicode/utf8" ) // builder builds a set of tries for associating values with runes. The set of // tries can share common index and value blocks. type builder struct { Name string // ValueType is the type of the trie values looked up. ValueType string // ValueSize is the byte size of the ValueType. ValueSize int // IndexType is the type of trie index values used for all UTF-8 bytes of // a rune except the last one. IndexType string // IndexSize is the byte size of the IndexType. IndexSize int // SourceType is used when generating the lookup functions. If the user // requests StringSupport, all lookup functions will be generated for // string input as well. SourceType string Trie []*Trie IndexBlocks []*node ValueBlocks [][]uint64 Compactions []compaction Checksum uint64 ASCIIBlock string StarterBlock string indexBlockIdx map[uint64]int valueBlockIdx map[uint64]nodeIndex asciiBlockIdx map[uint64]int // Stats are used to fill out the template. Stats struct { NValueEntries int NValueBytes int NIndexEntries int NIndexBytes int NHandleBytes int } err error } // A nodeIndex encodes the index of a node, which is defined by the compaction // which stores it and an index within the compaction. For internal nodes, the // compaction is always 0. type nodeIndex struct { compaction int index int } // compaction keeps track of stats used for the compaction. type compaction struct { c Compacter blocks []*node maxHandle uint32 totalSize int // Used by template-based generator and thus exported. Cutoff uint32 Offset uint32 Handler string } func (b *builder) setError(err error) { if b.err == nil { b.err = err } } // An Option can be passed to Gen. type Option func(b *builder) error // Compact configures the trie generator to use the given Compacter. func Compact(c Compacter) Option { return func(b *builder) error { b.Compactions = append(b.Compactions, compaction{ c: c, Handler: c.Handler() + "(n, b)"}) return nil } } // Gen writes Go code for a shared trie lookup structure to w for the given // Tries. The generated trie type will be called nameTrie. newNameTrie(x) will // return the *nameTrie for tries[x]. A value can be looked up by using one of // the various lookup methods defined on nameTrie. It returns the table size of // the generated trie. func Gen(w io.Writer, name string, tries []*Trie, opts ...Option) (sz int, err error) { // The index contains two dummy blocks, followed by the zero block. The zero // block is at offset 0x80, so that the offset for the zero block for // continuation bytes is 0. b := &builder{ Name: name, Trie: tries, IndexBlocks: []*node{{}, {}, {}}, Compactions: []compaction{{ Handler: name + "Values[n<<6+uint32(b)]", }}, // The 0 key in indexBlockIdx and valueBlockIdx is the hash of the zero // block. indexBlockIdx: map[uint64]int{0: 0}, valueBlockIdx: map[uint64]nodeIndex{0: {}}, asciiBlockIdx: map[uint64]int{}, } b.Compactions[0].c = (*simpleCompacter)(b) for _, f := range opts { if err := f(b); err != nil { return 0, err } } b.build() if b.err != nil { return 0, b.err } if err = b.print(w); err != nil { return 0, err } return b.Size(), nil } // A Trie represents a single root node of a trie. A builder may build several // overlapping tries at once. type Trie struct { root *node hiddenTrie } // hiddenTrie contains values we want to be visible to the template generator, // but hidden from the API documentation. type hiddenTrie struct { Name string Checksum uint64 ASCIIIndex int StarterIndex int } // NewTrie returns a new trie root. func NewTrie(name string) *Trie { return &Trie{ &node{ children: make([]*node, blockSize), values: make([]uint64, utf8.RuneSelf), }, hiddenTrie{Name: name}, } } // Gen is a convenience wrapper around the Gen func passing t as the only trie // and uses the name passed to NewTrie. It returns the size of the generated // tables. func (t *Trie) Gen(w io.Writer, opts ...Option) (sz int, err error) { return Gen(w, t.Name, []*Trie{t}, opts...) } // node is a node of the intermediate trie structure. type node struct { // children holds this node's children. It is always of length 64. // A child node may be nil. children []*node // values contains the values of this node. If it is non-nil, this node is // either a root or leaf node: // For root nodes, len(values) == 128 and it maps the bytes in [0x00, 0x7F]. // For leaf nodes, len(values) == 64 and it maps the bytes in [0x80, 0xBF]. values []uint64 index nodeIndex } // Insert associates value with the given rune. Insert will panic if a non-zero // value is passed for an invalid rune. func (t *Trie) Insert(r rune, value uint64) { if value == 0 { return } s := string(r) if []rune(s)[0] != r && value != 0 { // Note: The UCD tables will always assign what amounts to a zero value // to a surrogate. Allowing a zero value for an illegal rune allows // users to iterate over [0..MaxRune] without having to explicitly // exclude surrogates, which would be tedious. panic(fmt.Sprintf("triegen: non-zero value for invalid rune %U", r)) } if len(s) == 1 { // It is a root node value (ASCII). t.root.values[s[0]] = value return } n := t.root for ; len(s) > 1; s = s[1:] { if n.children == nil { n.children = make([]*node, blockSize) } p := s[0] % blockSize c := n.children[p] if c == nil { c = &node{} n.children[p] = c } if len(s) > 2 && c.values != nil { log.Fatalf("triegen: insert(%U): found internal node with values", r) } n = c } if n.values == nil { n.values = make([]uint64, blockSize) } if n.children != nil { log.Fatalf("triegen: insert(%U): found leaf node that also has child nodes", r) } n.values[s[0]-0x80] = value } // Size returns the number of bytes the generated trie will take to store. It // needs to be exported as it is used in the templates. func (b *builder) Size() int { // Index blocks. sz := len(b.IndexBlocks) * blockSize * b.IndexSize // Skip the first compaction, which represents the normal value blocks, as // its totalSize does not account for the ASCII blocks, which are managed // separately. sz += len(b.ValueBlocks) * blockSize * b.ValueSize for _, c := range b.Compactions[1:] { sz += c.totalSize } // TODO: this computation does not account for the fixed overhead of a using // a compaction, either code or data. As for data, though, the typical // overhead of data is in the order of bytes (2 bytes for cases). Further, // the savings of using a compaction should anyway be substantial for it to // be worth it. // For multi-root tries, we also need to account for the handles. if len(b.Trie) > 1 { sz += 2 * b.IndexSize * len(b.Trie) } return sz } func (b *builder) build() { // Compute the sizes of the values. var vmax uint64 for _, t := range b.Trie { vmax = maxValue(t.root, vmax) } b.ValueType, b.ValueSize = getIntType(vmax) // Compute all block allocations. // TODO: first compute the ASCII blocks for all tries and then the other // nodes. ASCII blocks are more restricted in placement, as they require two // blocks to be placed consecutively. Processing them first may improve // sharing (at least one zero block can be expected to be saved.) for _, t := range b.Trie { b.Checksum += b.buildTrie(t) } // Compute the offsets for all the Compacters. offset := uint32(0) for i := range b.Compactions { c := &b.Compactions[i] c.Offset = offset offset += c.maxHandle + 1 c.Cutoff = offset } // Compute the sizes of indexes. // TODO: different byte positions could have different sizes. So far we have // not found a case where this is beneficial. imax := uint64(b.Compactions[len(b.Compactions)-1].Cutoff) for _, ib := range b.IndexBlocks { if x := uint64(ib.index.index); x > imax { imax = x } } b.IndexType, b.IndexSize = getIntType(imax) } func maxValue(n *node, max uint64) uint64 { if n == nil { return max } for _, c := range n.children { max = maxValue(c, max) } for _, v := range n.values { if max < v { max = v } } return max } func getIntType(v uint64) (string, int) { switch { case v < 1<<8: return "uint8", 1 case v < 1<<16: return "uint16", 2 case v < 1<<32: return "uint32", 4 } return "uint64", 8 } const ( blockSize = 64 // Subtract two blocks to offset 0x80, the first continuation byte. blockOffset = 2 // Subtract three blocks to offset 0xC0, the first non-ASCII starter. rootBlockOffset = 3 ) var crcTable = crc64.MakeTable(crc64.ISO) func (b *builder) buildTrie(t *Trie) uint64 { n := t.root // Get the ASCII offset. For the first trie, the ASCII block will be at // position 0. hasher := crc64.New(crcTable) binary.Write(hasher, binary.BigEndian, n.values) hash := hasher.Sum64() v, ok := b.asciiBlockIdx[hash] if !ok { v = len(b.ValueBlocks) b.asciiBlockIdx[hash] = v b.ValueBlocks = append(b.ValueBlocks, n.values[:blockSize], n.values[blockSize:]) if v == 0 { // Add the zero block at position 2 so that it will be assigned a // zero reference in the lookup blocks. // TODO: always do this? This would allow us to remove a check from // the trie lookup, but at the expense of extra space. Analyze // performance for unicode/norm. b.ValueBlocks = append(b.ValueBlocks, make([]uint64, blockSize)) } } t.ASCIIIndex = v // Compute remaining offsets. t.Checksum = b.computeOffsets(n, true) // We already subtracted the normal blockOffset from the index. Subtract the // difference for starter bytes. t.StarterIndex = n.index.index - (rootBlockOffset - blockOffset) return t.Checksum } func (b *builder) computeOffsets(n *node, root bool) uint64 { // For the first trie, the root lookup block will be at position 3, which is // the offset for UTF-8 non-ASCII starter bytes. first := len(b.IndexBlocks) == rootBlockOffset if first { b.IndexBlocks = append(b.IndexBlocks, n) } // We special-case the cases where all values recursively are 0. This allows // for the use of a zero block to which all such values can be directed. hash := uint64(0) if n.children != nil || n.values != nil { hasher := crc64.New(crcTable) for _, c := range n.children { var v uint64 if c != nil { v = b.computeOffsets(c, false) } binary.Write(hasher, binary.BigEndian, v) } binary.Write(hasher, binary.BigEndian, n.values) hash = hasher.Sum64() } if first { b.indexBlockIdx[hash] = rootBlockOffset - blockOffset } // Compacters don't apply to internal nodes. if n.children != nil { v, ok := b.indexBlockIdx[hash] if !ok { v = len(b.IndexBlocks) - blockOffset b.IndexBlocks = append(b.IndexBlocks, n) b.indexBlockIdx[hash] = v } n.index = nodeIndex{0, v} } else { h, ok := b.valueBlockIdx[hash] if !ok { bestI, bestSize := 0, blockSize*b.ValueSize for i, c := range b.Compactions[1:] { if sz, ok := c.c.Size(n.values); ok && bestSize > sz { bestI, bestSize = i+1, sz } } c := &b.Compactions[bestI] c.totalSize += bestSize v := c.c.Store(n.values) if c.maxHandle < v { c.maxHandle = v } h = nodeIndex{bestI, int(v)} b.valueBlockIdx[hash] = h } n.index = h } return hash } ucd/000077500000000000000000000000001324746544700315215ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/golang.org/x/text/internalucd.go000066400000000000000000000212151324746544700326240ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/golang.org/x/text/internal/ucd// Copyright 2014 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package ucd provides a parser for Unicode Character Database files, the // format of which is defined in http://www.unicode.org/reports/tr44/. See // http://www.unicode.org/Public/UCD/latest/ucd/ for example files. // // It currently does not support substitutions of missing fields. package ucd // import "golang.org/x/text/internal/ucd" import ( "bufio" "bytes" "errors" "io" "log" "regexp" "strconv" "strings" ) // UnicodeData.txt fields. const ( CodePoint = iota Name GeneralCategory CanonicalCombiningClass BidiClass DecompMapping DecimalValue DigitValue NumericValue BidiMirrored Unicode1Name ISOComment SimpleUppercaseMapping SimpleLowercaseMapping SimpleTitlecaseMapping ) // Parse calls f for each entry in the given reader of a UCD file. It will close // the reader upon return. It will call log.Fatal if any error occurred. // // This implements the most common usage pattern of using Parser. func Parse(r io.ReadCloser, f func(p *Parser)) { defer r.Close() p := New(r) for p.Next() { f(p) } if err := p.Err(); err != nil { r.Close() // os.Exit will cause defers not to be called. log.Fatal(err) } } // An Option is used to configure a Parser. type Option func(p *Parser) func keepRanges(p *Parser) { p.keepRanges = true } var ( // KeepRanges prevents the expansion of ranges. The raw ranges can be // obtained by calling Range(0) on the parser. KeepRanges Option = keepRanges ) // The Part option register a handler for lines starting with a '@'. The text // after a '@' is available as the first field. Comments are handled as usual. func Part(f func(p *Parser)) Option { return func(p *Parser) { p.partHandler = f } } // The CommentHandler option passes comments that are on a line by itself to // a given handler. func CommentHandler(f func(s string)) Option { return func(p *Parser) { p.commentHandler = f } } // A Parser parses Unicode Character Database (UCD) files. type Parser struct { scanner *bufio.Scanner keepRanges bool // Don't expand rune ranges in field 0. err error comment []byte field [][]byte // parsedRange is needed in case Range(0) is called more than once for one // field. In some cases this requires scanning ahead. parsedRange bool rangeStart, rangeEnd rune partHandler func(p *Parser) commentHandler func(s string) } func (p *Parser) setError(err error) { if p.err == nil { p.err = err } } func (p *Parser) getField(i int) []byte { if i >= len(p.field) { return nil } return p.field[i] } // Err returns a non-nil error if any error occurred during parsing. func (p *Parser) Err() error { return p.err } // New returns a Parser for the given Reader. func New(r io.Reader, o ...Option) *Parser { p := &Parser{ scanner: bufio.NewScanner(r), } for _, f := range o { f(p) } return p } // Next parses the next line in the file. It returns true if a line was parsed // and false if it reached the end of the file. func (p *Parser) Next() bool { if !p.keepRanges && p.rangeStart < p.rangeEnd { p.rangeStart++ return true } p.comment = nil p.field = p.field[:0] p.parsedRange = false for p.scanner.Scan() { b := p.scanner.Bytes() if len(b) == 0 { continue } if b[0] == '#' { if p.commentHandler != nil { p.commentHandler(strings.TrimSpace(string(b[1:]))) } continue } // Parse line if i := bytes.IndexByte(b, '#'); i != -1 { p.comment = bytes.TrimSpace(b[i+1:]) b = b[:i] } if b[0] == '@' { if p.partHandler != nil { p.field = append(p.field, bytes.TrimSpace(b[1:])) p.partHandler(p) p.field = p.field[:0] } p.comment = nil continue } for { i := bytes.IndexByte(b, ';') if i == -1 { p.field = append(p.field, bytes.TrimSpace(b)) break } p.field = append(p.field, bytes.TrimSpace(b[:i])) b = b[i+1:] } if !p.keepRanges { p.rangeStart, p.rangeEnd = p.getRange(0) } return true } p.setError(p.scanner.Err()) return false } func parseRune(b []byte) (rune, error) { if len(b) > 2 && b[0] == 'U' && b[1] == '+' { b = b[2:] } x, err := strconv.ParseUint(string(b), 16, 32) return rune(x), err } func (p *Parser) parseRune(b []byte) rune { x, err := parseRune(b) p.setError(err) return x } // Rune parses and returns field i as a rune. func (p *Parser) Rune(i int) rune { if i > 0 || p.keepRanges { return p.parseRune(p.getField(i)) } return p.rangeStart } // Runes interprets and returns field i as a sequence of runes. func (p *Parser) Runes(i int) (runes []rune) { add := func(b []byte) { if b = bytes.TrimSpace(b); len(b) > 0 { runes = append(runes, p.parseRune(b)) } } for b := p.getField(i); ; { i := bytes.IndexByte(b, ' ') if i == -1 { add(b) break } add(b[:i]) b = b[i+1:] } return } var ( errIncorrectLegacyRange = errors.New("ucd: unmatched <* First>") // reRange matches one line of a legacy rune range. reRange = regexp.MustCompile("^([0-9A-F]*);<([^,]*), ([^>]*)>(.*)$") ) // Range parses and returns field i as a rune range. A range is inclusive at // both ends. If the field only has one rune, first and last will be identical. // It supports the legacy format for ranges used in UnicodeData.txt. func (p *Parser) Range(i int) (first, last rune) { if !p.keepRanges { return p.rangeStart, p.rangeStart } return p.getRange(i) } func (p *Parser) getRange(i int) (first, last rune) { b := p.getField(i) if k := bytes.Index(b, []byte("..")); k != -1 { return p.parseRune(b[:k]), p.parseRune(b[k+2:]) } // The first field may not be a rune, in which case we may ignore any error // and set the range as 0..0. x, err := parseRune(b) if err != nil { // Disable range parsing henceforth. This ensures that an error will be // returned if the user subsequently will try to parse this field as // a Rune. p.keepRanges = true } // Special case for UnicodeData that was retained for backwards compatibility. if i == 0 && len(p.field) > 1 && bytes.HasSuffix(p.field[1], []byte("First>")) { if p.parsedRange { return p.rangeStart, p.rangeEnd } mf := reRange.FindStringSubmatch(p.scanner.Text()) if mf == nil || !p.scanner.Scan() { p.setError(errIncorrectLegacyRange) return x, x } // Using Bytes would be more efficient here, but Text is a lot easier // and this is not a frequent case. ml := reRange.FindStringSubmatch(p.scanner.Text()) if ml == nil || mf[2] != ml[2] || ml[3] != "Last" || mf[4] != ml[4] { p.setError(errIncorrectLegacyRange) return x, x } p.rangeStart, p.rangeEnd = x, p.parseRune(p.scanner.Bytes()[:len(ml[1])]) p.parsedRange = true return p.rangeStart, p.rangeEnd } return x, x } // bools recognizes all valid UCD boolean values. var bools = map[string]bool{ "": false, "N": false, "No": false, "F": false, "False": false, "Y": true, "Yes": true, "T": true, "True": true, } // Bool parses and returns field i as a boolean value. func (p *Parser) Bool(i int) bool { b := p.getField(i) for s, v := range bools { if bstrEq(b, s) { return v } } p.setError(strconv.ErrSyntax) return false } // Int parses and returns field i as an integer value. func (p *Parser) Int(i int) int { x, err := strconv.ParseInt(string(p.getField(i)), 10, 64) p.setError(err) return int(x) } // Uint parses and returns field i as an unsigned integer value. func (p *Parser) Uint(i int) uint { x, err := strconv.ParseUint(string(p.getField(i)), 10, 64) p.setError(err) return uint(x) } // Float parses and returns field i as a decimal value. func (p *Parser) Float(i int) float64 { x, err := strconv.ParseFloat(string(p.getField(i)), 64) p.setError(err) return x } // String parses and returns field i as a string value. func (p *Parser) String(i int) string { return string(p.getField(i)) } // Strings parses and returns field i as a space-separated list of strings. func (p *Parser) Strings(i int) []string { ss := strings.Split(string(p.getField(i)), " ") for i, s := range ss { ss[i] = strings.TrimSpace(s) } return ss } // Comment returns the comments for the current line. func (p *Parser) Comment() string { return string(p.comment) } var errUndefinedEnum = errors.New("ucd: undefined enum value") // Enum interprets and returns field i as a value that must be one of the values // in enum. func (p *Parser) Enum(i int, enum ...string) string { b := p.getField(i) for _, s := range enum { if bstrEq(b, s) { return s } } p.setError(errUndefinedEnum) return "" } func bstrEq(b []byte, s string) bool { if len(b) != len(s) { return false } for i, c := range b { if c != s[i] { return false } } return true } gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/golang.org/x/text/secure/000077500000000000000000000000001324746544700304775ustar00rootroot00000000000000bidirule/000077500000000000000000000000001324746544700322175ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/golang.org/x/text/securebidirule.go000066400000000000000000000227531324746544700343560ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/golang.org/x/text/secure/bidirule// Copyright 2016 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package bidirule implements the Bidi Rule defined by RFC 5893. // // This package is under development. The API may change without notice and // without preserving backward compatibility. package bidirule import ( "errors" "unicode/utf8" "golang.org/x/text/transform" "golang.org/x/text/unicode/bidi" ) // This file contains an implementation of RFC 5893: Right-to-Left Scripts for // Internationalized Domain Names for Applications (IDNA) // // A label is an individual component of a domain name. Labels are usually // shown separated by dots; for example, the domain name "www.example.com" is // composed of three labels: "www", "example", and "com". // // An RTL label is a label that contains at least one character of class R, AL, // or AN. An LTR label is any label that is not an RTL label. // // A "Bidi domain name" is a domain name that contains at least one RTL label. // // The following guarantees can be made based on the above: // // o In a domain name consisting of only labels that satisfy the rule, // the requirements of Section 3 are satisfied. Note that even LTR // labels and pure ASCII labels have to be tested. // // o In a domain name consisting of only LDH labels (as defined in the // Definitions document [RFC5890]) and labels that satisfy the rule, // the requirements of Section 3 are satisfied as long as a label // that starts with an ASCII digit does not come after a // right-to-left label. // // No guarantee is given for other combinations. // ErrInvalid indicates a label is invalid according to the Bidi Rule. var ErrInvalid = errors.New("bidirule: failed Bidi Rule") type ruleState uint8 const ( ruleInitial ruleState = iota ruleLTR ruleLTRFinal ruleRTL ruleRTLFinal ruleInvalid ) type ruleTransition struct { next ruleState mask uint16 } var transitions = [...][2]ruleTransition{ // [2.1] The first character must be a character with Bidi property L, R, or // AL. If it has the R or AL property, it is an RTL label; if it has the L // property, it is an LTR label. ruleInitial: { {ruleLTRFinal, 1 << bidi.L}, {ruleRTLFinal, 1< 0 bytes returned // before considering the error". if r.src0 != r.src1 || r.err != nil { r.dst0 = 0 r.dst1, n, err = r.t.Transform(r.dst, r.src[r.src0:r.src1], r.err == io.EOF) r.src0 += n switch { case err == nil: if r.src0 != r.src1 { r.err = errInconsistentByteCount } // The Transform call was successful; we are complete if we // cannot read more bytes into src. r.transformComplete = r.err != nil continue case err == ErrShortDst && (r.dst1 != 0 || n != 0): // Make room in dst by copying out, and try again. continue case err == ErrShortSrc && r.src1-r.src0 != len(r.src) && r.err == nil: // Read more bytes into src via the code below, and try again. default: r.transformComplete = true // The reader error (r.err) takes precedence over the // transformer error (err) unless r.err is nil or io.EOF. if r.err == nil || r.err == io.EOF { r.err = err } continue } } // Move any untransformed source bytes to the start of the buffer // and read more bytes. if r.src0 != 0 { r.src0, r.src1 = 0, copy(r.src, r.src[r.src0:r.src1]) } n, r.err = r.r.Read(r.src[r.src1:]) r.src1 += n } } // TODO: implement ReadByte (and ReadRune??). // Writer wraps another io.Writer by transforming the bytes read. // The user needs to call Close to flush unwritten bytes that may // be buffered. type Writer struct { w io.Writer t Transformer dst []byte // src[:n] contains bytes that have not yet passed through t. src []byte n int } // NewWriter returns a new Writer that wraps w by transforming the bytes written // via t. It calls Reset on t. func NewWriter(w io.Writer, t Transformer) *Writer { t.Reset() return &Writer{ w: w, t: t, dst: make([]byte, defaultBufSize), src: make([]byte, defaultBufSize), } } // Write implements the io.Writer interface. If there are not enough // bytes available to complete a Transform, the bytes will be buffered // for the next write. Call Close to convert the remaining bytes. func (w *Writer) Write(data []byte) (n int, err error) { src := data if w.n > 0 { // Append bytes from data to the last remainder. // TODO: limit the amount copied on first try. n = copy(w.src[w.n:], data) w.n += n src = w.src[:w.n] } for { nDst, nSrc, err := w.t.Transform(w.dst, src, false) if _, werr := w.w.Write(w.dst[:nDst]); werr != nil { return n, werr } src = src[nSrc:] if w.n == 0 { n += nSrc } else if len(src) <= n { // Enough bytes from w.src have been consumed. We make src point // to data instead to reduce the copying. w.n = 0 n -= len(src) src = data[n:] if n < len(data) && (err == nil || err == ErrShortSrc) { continue } } switch err { case ErrShortDst: // This error is okay as long as we are making progress. if nDst > 0 || nSrc > 0 { continue } case ErrShortSrc: if len(src) < len(w.src) { m := copy(w.src, src) // If w.n > 0, bytes from data were already copied to w.src and n // was already set to the number of bytes consumed. if w.n == 0 { n += m } w.n = m err = nil } else if nDst > 0 || nSrc > 0 { // Not enough buffer to store the remainder. Keep processing as // long as there is progress. Without this case, transforms that // require a lookahead larger than the buffer may result in an // error. This is not something one may expect to be common in // practice, but it may occur when buffers are set to small // sizes during testing. continue } case nil: if w.n > 0 { err = errInconsistentByteCount } } return n, err } } // Close implements the io.Closer interface. func (w *Writer) Close() error { src := w.src[:w.n] for { nDst, nSrc, err := w.t.Transform(w.dst, src, true) if _, werr := w.w.Write(w.dst[:nDst]); werr != nil { return werr } if err != ErrShortDst { return err } src = src[nSrc:] } } type nop struct{ NopResetter } func (nop) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { n := copy(dst, src) if n < len(src) { err = ErrShortDst } return n, n, err } func (nop) Span(src []byte, atEOF bool) (n int, err error) { return len(src), nil } type discard struct{ NopResetter } func (discard) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { return 0, len(src), nil } var ( // Discard is a Transformer for which all Transform calls succeed // by consuming all bytes and writing nothing. Discard Transformer = discard{} // Nop is a SpanningTransformer that copies src to dst. Nop SpanningTransformer = nop{} ) // chain is a sequence of links. A chain with N Transformers has N+1 links and // N+1 buffers. Of those N+1 buffers, the first and last are the src and dst // buffers given to chain.Transform and the middle N-1 buffers are intermediate // buffers owned by the chain. The i'th link transforms bytes from the i'th // buffer chain.link[i].b at read offset chain.link[i].p to the i+1'th buffer // chain.link[i+1].b at write offset chain.link[i+1].n, for i in [0, N). type chain struct { link []link err error // errStart is the index at which the error occurred plus 1. Processing // errStart at this level at the next call to Transform. As long as // errStart > 0, chain will not consume any more source bytes. errStart int } func (c *chain) fatalError(errIndex int, err error) { if i := errIndex + 1; i > c.errStart { c.errStart = i c.err = err } } type link struct { t Transformer // b[p:n] holds the bytes to be transformed by t. b []byte p int n int } func (l *link) src() []byte { return l.b[l.p:l.n] } func (l *link) dst() []byte { return l.b[l.n:] } // Chain returns a Transformer that applies t in sequence. func Chain(t ...Transformer) Transformer { if len(t) == 0 { return nop{} } c := &chain{link: make([]link, len(t)+1)} for i, tt := range t { c.link[i].t = tt } // Allocate intermediate buffers. b := make([][defaultBufSize]byte, len(t)-1) for i := range b { c.link[i+1].b = b[i][:] } return c } // Reset resets the state of Chain. It calls Reset on all the Transformers. func (c *chain) Reset() { for i, l := range c.link { if l.t != nil { l.t.Reset() } c.link[i].p, c.link[i].n = 0, 0 } } // TODO: make chain use Span (is going to be fun to implement!) // Transform applies the transformers of c in sequence. func (c *chain) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { // Set up src and dst in the chain. srcL := &c.link[0] dstL := &c.link[len(c.link)-1] srcL.b, srcL.p, srcL.n = src, 0, len(src) dstL.b, dstL.n = dst, 0 var lastFull, needProgress bool // for detecting progress // i is the index of the next Transformer to apply, for i in [low, high]. // low is the lowest index for which c.link[low] may still produce bytes. // high is the highest index for which c.link[high] has a Transformer. // The error returned by Transform determines whether to increase or // decrease i. We try to completely fill a buffer before converting it. for low, i, high := c.errStart, c.errStart, len(c.link)-2; low <= i && i <= high; { in, out := &c.link[i], &c.link[i+1] nDst, nSrc, err0 := in.t.Transform(out.dst(), in.src(), atEOF && low == i) out.n += nDst in.p += nSrc if i > 0 && in.p == in.n { in.p, in.n = 0, 0 } needProgress, lastFull = lastFull, false switch err0 { case ErrShortDst: // Process the destination buffer next. Return if we are already // at the high index. if i == high { return dstL.n, srcL.p, ErrShortDst } if out.n != 0 { i++ // If the Transformer at the next index is not able to process any // source bytes there is nothing that can be done to make progress // and the bytes will remain unprocessed. lastFull is used to // detect this and break out of the loop with a fatal error. lastFull = true continue } // The destination buffer was too small, but is completely empty. // Return a fatal error as this transformation can never complete. c.fatalError(i, errShortInternal) case ErrShortSrc: if i == 0 { // Save ErrShortSrc in err. All other errors take precedence. err = ErrShortSrc break } // Source bytes were depleted before filling up the destination buffer. // Verify we made some progress, move the remaining bytes to the errStart // and try to get more source bytes. if needProgress && nSrc == 0 || in.n-in.p == len(in.b) { // There were not enough source bytes to proceed while the source // buffer cannot hold any more bytes. Return a fatal error as this // transformation can never complete. c.fatalError(i, errShortInternal) break } // in.b is an internal buffer and we can make progress. in.p, in.n = 0, copy(in.b, in.src()) fallthrough case nil: // if i == low, we have depleted the bytes at index i or any lower levels. // In that case we increase low and i. In all other cases we decrease i to // fetch more bytes before proceeding to the next index. if i > low { i-- continue } default: c.fatalError(i, err0) } // Exhausted level low or fatal error: increase low and continue // to process the bytes accepted so far. i++ low = i } // If c.errStart > 0, this means we found a fatal error. We will clear // all upstream buffers. At this point, no more progress can be made // downstream, as Transform would have bailed while handling ErrShortDst. if c.errStart > 0 { for i := 1; i < c.errStart; i++ { c.link[i].p, c.link[i].n = 0, 0 } err, c.errStart, c.err = c.err, 0, nil } return dstL.n, srcL.p, err } // Deprecated: use runes.Remove instead. func RemoveFunc(f func(r rune) bool) Transformer { return removeF(f) } type removeF func(r rune) bool func (removeF) Reset() {} // Transform implements the Transformer interface. func (t removeF) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { for r, sz := rune(0), 0; len(src) > 0; src = src[sz:] { if r = rune(src[0]); r < utf8.RuneSelf { sz = 1 } else { r, sz = utf8.DecodeRune(src) if sz == 1 { // Invalid rune. if !atEOF && !utf8.FullRune(src) { err = ErrShortSrc break } // We replace illegal bytes with RuneError. Not doing so might // otherwise turn a sequence of invalid UTF-8 into valid UTF-8. // The resulting byte sequence may subsequently contain runes // for which t(r) is true that were passed unnoticed. if !t(r) { if nDst+3 > len(dst) { err = ErrShortDst break } nDst += copy(dst[nDst:], "\uFFFD") } nSrc++ continue } } if !t(r) { if nDst+sz > len(dst) { err = ErrShortDst break } nDst += copy(dst[nDst:], src[:sz]) } nSrc += sz } return } // grow returns a new []byte that is longer than b, and copies the first n bytes // of b to the start of the new slice. func grow(b []byte, n int) []byte { m := len(b) if m <= 32 { m = 64 } else if m <= 256 { m *= 2 } else { m += m >> 1 } buf := make([]byte, m) copy(buf, b[:n]) return buf } const initialBufSize = 128 // String returns a string with the result of converting s[:n] using t, where // n <= len(s). If err == nil, n will be len(s). It calls Reset on t. func String(t Transformer, s string) (result string, n int, err error) { t.Reset() if s == "" { // Fast path for the common case for empty input. Results in about a // 86% reduction of running time for BenchmarkStringLowerEmpty. if _, _, err := t.Transform(nil, nil, true); err == nil { return "", 0, nil } } // Allocate only once. Note that both dst and src escape when passed to // Transform. buf := [2 * initialBufSize]byte{} dst := buf[:initialBufSize:initialBufSize] src := buf[initialBufSize : 2*initialBufSize] // The input string s is transformed in multiple chunks (starting with a // chunk size of initialBufSize). nDst and nSrc are per-chunk (or // per-Transform-call) indexes, pDst and pSrc are overall indexes. nDst, nSrc := 0, 0 pDst, pSrc := 0, 0 // pPrefix is the length of a common prefix: the first pPrefix bytes of the // result will equal the first pPrefix bytes of s. It is not guaranteed to // be the largest such value, but if pPrefix, len(result) and len(s) are // all equal after the final transform (i.e. calling Transform with atEOF // being true returned nil error) then we don't need to allocate a new // result string. pPrefix := 0 for { // Invariant: pDst == pPrefix && pSrc == pPrefix. n := copy(src, s[pSrc:]) nDst, nSrc, err = t.Transform(dst, src[:n], pSrc+n == len(s)) pDst += nDst pSrc += nSrc // TODO: let transformers implement an optional Spanner interface, akin // to norm's QuickSpan. This would even allow us to avoid any allocation. if !bytes.Equal(dst[:nDst], src[:nSrc]) { break } pPrefix = pSrc if err == ErrShortDst { // A buffer can only be short if a transformer modifies its input. break } else if err == ErrShortSrc { if nSrc == 0 { // No progress was made. break } // Equal so far and !atEOF, so continue checking. } else if err != nil || pPrefix == len(s) { return string(s[:pPrefix]), pPrefix, err } } // Post-condition: pDst == pPrefix + nDst && pSrc == pPrefix + nSrc. // We have transformed the first pSrc bytes of the input s to become pDst // transformed bytes. Those transformed bytes are discontiguous: the first // pPrefix of them equal s[:pPrefix] and the last nDst of them equal // dst[:nDst]. We copy them around, into a new dst buffer if necessary, so // that they become one contiguous slice: dst[:pDst]. if pPrefix != 0 { newDst := dst if pDst > len(newDst) { newDst = make([]byte, len(s)+nDst-nSrc) } copy(newDst[pPrefix:pDst], dst[:nDst]) copy(newDst[:pPrefix], s[:pPrefix]) dst = newDst } // Prevent duplicate Transform calls with atEOF being true at the end of // the input. Also return if we have an unrecoverable error. if (err == nil && pSrc == len(s)) || (err != nil && err != ErrShortDst && err != ErrShortSrc) { return string(dst[:pDst]), pSrc, err } // Transform the remaining input, growing dst and src buffers as necessary. for { n := copy(src, s[pSrc:]) nDst, nSrc, err := t.Transform(dst[pDst:], src[:n], pSrc+n == len(s)) pDst += nDst pSrc += nSrc // If we got ErrShortDst or ErrShortSrc, do not grow as long as we can // make progress. This may avoid excessive allocations. if err == ErrShortDst { if nDst == 0 { dst = grow(dst, pDst) } } else if err == ErrShortSrc { if nSrc == 0 { src = grow(src, 0) } } else if err != nil || pSrc == len(s) { return string(dst[:pDst]), pSrc, err } } } // Bytes returns a new byte slice with the result of converting b[:n] using t, // where n <= len(b). If err == nil, n will be len(b). It calls Reset on t. func Bytes(t Transformer, b []byte) (result []byte, n int, err error) { return doAppend(t, 0, make([]byte, len(b)), b) } // Append appends the result of converting src[:n] using t to dst, where // n <= len(src), If err == nil, n will be len(src). It calls Reset on t. func Append(t Transformer, dst, src []byte) (result []byte, n int, err error) { if len(dst) == cap(dst) { n := len(src) + len(dst) // It is okay for this to be 0. b := make([]byte, n) dst = b[:copy(b, dst)] } return doAppend(t, len(dst), dst[:cap(dst)], src) } func doAppend(t Transformer, pDst int, dst, src []byte) (result []byte, n int, err error) { t.Reset() pSrc := 0 for { nDst, nSrc, err := t.Transform(dst[pDst:], src[pSrc:], true) pDst += nDst pSrc += nSrc if err != ErrShortDst { return dst[:pDst], pSrc, err } // Grow the destination buffer, but do not grow as long as we can make // progress. This may avoid excessive allocations. if nDst == 0 { dst = grow(dst, pDst) } } } gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/golang.org/x/text/unicode/000077500000000000000000000000001324746544700306375ustar00rootroot00000000000000bidi/000077500000000000000000000000001324746544700314675ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/golang.org/x/text/unicodebidi.go000066400000000000000000000152021324746544700327250ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/golang.org/x/text/unicode/bidi// Copyright 2015 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. //go:generate go run gen.go gen_trieval.go gen_ranges.go // Package bidi contains functionality for bidirectional text support. // // See http://www.unicode.org/reports/tr9. // // NOTE: UNDER CONSTRUCTION. This API may change in backwards incompatible ways // and without notice. package bidi // import "golang.org/x/text/unicode/bidi" // TODO: // The following functionality would not be hard to implement, but hinges on // the definition of a Segmenter interface. For now this is up to the user. // - Iterate over paragraphs // - Segmenter to iterate over runs directly from a given text. // Also: // - Transformer for reordering? // - Transformer (validator, really) for Bidi Rule. // This API tries to avoid dealing with embedding levels for now. Under the hood // these will be computed, but the question is to which extent the user should // know they exist. We should at some point allow the user to specify an // embedding hierarchy, though. // A Direction indicates the overall flow of text. type Direction int const ( // LeftToRight indicates the text contains no right-to-left characters and // that either there are some left-to-right characters or the option // DefaultDirection(LeftToRight) was passed. LeftToRight Direction = iota // RightToLeft indicates the text contains no left-to-right characters and // that either there are some right-to-left characters or the option // DefaultDirection(RightToLeft) was passed. RightToLeft // Mixed indicates text contains both left-to-right and right-to-left // characters. Mixed // Neutral means that text contains no left-to-right and right-to-left // characters and that no default direction has been set. Neutral ) type options struct{} // An Option is an option for Bidi processing. type Option func(*options) // ICU allows the user to define embedding levels. This may be used, for example, // to use hierarchical structure of markup languages to define embeddings. // The following option may be a way to expose this functionality in this API. // // LevelFunc sets a function that associates nesting levels with the given text. // // The levels function will be called with monotonically increasing values for p. // func LevelFunc(levels func(p int) int) Option { // panic("unimplemented") // } // DefaultDirection sets the default direction for a Paragraph. The direction is // overridden if the text contains directional characters. func DefaultDirection(d Direction) Option { panic("unimplemented") } // A Paragraph holds a single Paragraph for Bidi processing. type Paragraph struct { // buffers } // SetBytes configures p for the given paragraph text. It replaces text // previously set by SetBytes or SetString. If b contains a paragraph separator // it will only process the first paragraph and report the number of bytes // consumed from b including this separator. Error may be non-nil if options are // given. func (p *Paragraph) SetBytes(b []byte, opts ...Option) (n int, err error) { panic("unimplemented") } // SetString configures p for the given paragraph text. It replaces text // previously set by SetBytes or SetString. If b contains a paragraph separator // it will only process the first paragraph and report the number of bytes // consumed from b including this separator. Error may be non-nil if options are // given. func (p *Paragraph) SetString(s string, opts ...Option) (n int, err error) { panic("unimplemented") } // IsLeftToRight reports whether the principle direction of rendering for this // paragraphs is left-to-right. If this returns false, the principle direction // of rendering is right-to-left. func (p *Paragraph) IsLeftToRight() bool { panic("unimplemented") } // Direction returns the direction of the text of this paragraph. // // The direction may be LeftToRight, RightToLeft, Mixed, or Neutral. func (p *Paragraph) Direction() Direction { panic("unimplemented") } // RunAt reports the Run at the given position of the input text. // // This method can be used for computing line breaks on paragraphs. func (p *Paragraph) RunAt(pos int) Run { panic("unimplemented") } // Order computes the visual ordering of all the runs in a Paragraph. func (p *Paragraph) Order() (Ordering, error) { panic("unimplemented") } // Line computes the visual ordering of runs for a single line starting and // ending at the given positions in the original text. func (p *Paragraph) Line(start, end int) (Ordering, error) { panic("unimplemented") } // An Ordering holds the computed visual order of runs of a Paragraph. Calling // SetBytes or SetString on the originating Paragraph invalidates an Ordering. // The methods of an Ordering should only be called by one goroutine at a time. type Ordering struct{} // Direction reports the directionality of the runs. // // The direction may be LeftToRight, RightToLeft, Mixed, or Neutral. func (o *Ordering) Direction() Direction { panic("unimplemented") } // NumRuns returns the number of runs. func (o *Ordering) NumRuns() int { panic("unimplemented") } // Run returns the ith run within the ordering. func (o *Ordering) Run(i int) Run { panic("unimplemented") } // TODO: perhaps with options. // // Reorder creates a reader that reads the runes in visual order per character. // // Modifiers remain after the runes they modify. // func (l *Runs) Reorder() io.Reader { // panic("unimplemented") // } // A Run is a continuous sequence of characters of a single direction. type Run struct { } // String returns the text of the run in its original order. func (r *Run) String() string { panic("unimplemented") } // Bytes returns the text of the run in its original order. func (r *Run) Bytes() []byte { panic("unimplemented") } // TODO: methods for // - Display order // - headers and footers // - bracket replacement. // Direction reports the direction of the run. func (r *Run) Direction() Direction { panic("unimplemented") } // Position of the Run within the text passed to SetBytes or SetString of the // originating Paragraph value. func (r *Run) Pos() (start, end int) { panic("unimplemented") } // AppendReverse reverses the order of characters of in, appends them to out, // and returns the result. Modifiers will still follow the runes they modify. // Brackets are replaced with their counterparts. func AppendReverse(out, in []byte) []byte { panic("unimplemented") } // ReverseString reverses the order of characters in s and returns a new string. // Modifiers will still follow the runes they modify. Brackets are replaced with // their counterparts. func ReverseString(s string) string { panic("unimplemented") } bracket.go000066400000000000000000000263501324746544700334370ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/golang.org/x/text/unicode/bidi// Copyright 2015 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package bidi import ( "container/list" "fmt" "sort" ) // This file contains a port of the reference implementation of the // Bidi Parentheses Algorithm: // http://www.unicode.org/Public/PROGRAMS/BidiReferenceJava/BidiPBAReference.java // // The implementation in this file covers definitions BD14-BD16 and rule N0 // of UAX#9. // // Some preprocessing is done for each rune before data is passed to this // algorithm: // - opening and closing brackets are identified // - a bracket pair type, like '(' and ')' is assigned a unique identifier that // is identical for the opening and closing bracket. It is left to do these // mappings. // - The BPA algorithm requires that bracket characters that are canonical // equivalents of each other be able to be substituted for each other. // It is the responsibility of the caller to do this canonicalization. // // In implementing BD16, this implementation departs slightly from the "logical" // algorithm defined in UAX#9. In particular, the stack referenced there // supports operations that go beyond a "basic" stack. An equivalent // implementation based on a linked list is used here. // Bidi_Paired_Bracket_Type // BD14. An opening paired bracket is a character whose // Bidi_Paired_Bracket_Type property value is Open. // // BD15. A closing paired bracket is a character whose // Bidi_Paired_Bracket_Type property value is Close. type bracketType byte const ( bpNone bracketType = iota bpOpen bpClose ) // bracketPair holds a pair of index values for opening and closing bracket // location of a bracket pair. type bracketPair struct { opener int closer int } func (b *bracketPair) String() string { return fmt.Sprintf("(%v, %v)", b.opener, b.closer) } // bracketPairs is a slice of bracketPairs with a sort.Interface implementation. type bracketPairs []bracketPair func (b bracketPairs) Len() int { return len(b) } func (b bracketPairs) Swap(i, j int) { b[i], b[j] = b[j], b[i] } func (b bracketPairs) Less(i, j int) bool { return b[i].opener < b[j].opener } // resolvePairedBrackets runs the paired bracket part of the UBA algorithm. // // For each rune, it takes the indexes into the original string, the class the // bracket type (in pairTypes) and the bracket identifier (pairValues). It also // takes the direction type for the start-of-sentence and the embedding level. // // The identifiers for bracket types are the rune of the canonicalized opening // bracket for brackets (open or close) or 0 for runes that are not brackets. func resolvePairedBrackets(s *isolatingRunSequence) { p := bracketPairer{ sos: s.sos, openers: list.New(), codesIsolatedRun: s.types, indexes: s.indexes, } dirEmbed := L if s.level&1 != 0 { dirEmbed = R } p.locateBrackets(s.p.pairTypes, s.p.pairValues) p.resolveBrackets(dirEmbed, s.p.initialTypes) } type bracketPairer struct { sos Class // direction corresponding to start of sequence // The following is a restatement of BD 16 using non-algorithmic language. // // A bracket pair is a pair of characters consisting of an opening // paired bracket and a closing paired bracket such that the // Bidi_Paired_Bracket property value of the former equals the latter, // subject to the following constraints. // - both characters of a pair occur in the same isolating run sequence // - the closing character of a pair follows the opening character // - any bracket character can belong at most to one pair, the earliest possible one // - any bracket character not part of a pair is treated like an ordinary character // - pairs may nest properly, but their spans may not overlap otherwise // Bracket characters with canonical decompositions are supposed to be // treated as if they had been normalized, to allow normalized and non- // normalized text to give the same result. In this implementation that step // is pushed out to the caller. The caller has to ensure that the pairValue // slices contain the rune of the opening bracket after normalization for // any opening or closing bracket. openers *list.List // list of positions for opening brackets // bracket pair positions sorted by location of opening bracket pairPositions bracketPairs codesIsolatedRun []Class // directional bidi codes for an isolated run indexes []int // array of index values into the original string } // matchOpener reports whether characters at given positions form a matching // bracket pair. func (p *bracketPairer) matchOpener(pairValues []rune, opener, closer int) bool { return pairValues[p.indexes[opener]] == pairValues[p.indexes[closer]] } const maxPairingDepth = 63 // locateBrackets locates matching bracket pairs according to BD16. // // This implementation uses a linked list instead of a stack, because, while // elements are added at the front (like a push) they are not generally removed // in atomic 'pop' operations, reducing the benefit of the stack archetype. func (p *bracketPairer) locateBrackets(pairTypes []bracketType, pairValues []rune) { // traverse the run // do that explicitly (not in a for-each) so we can record position for i, index := range p.indexes { // look at the bracket type for each character if pairTypes[index] == bpNone || p.codesIsolatedRun[i] != ON { // continue scanning continue } switch pairTypes[index] { case bpOpen: // check if maximum pairing depth reached if p.openers.Len() == maxPairingDepth { p.openers.Init() return } // remember opener location, most recent first p.openers.PushFront(i) case bpClose: // see if there is a match count := 0 for elem := p.openers.Front(); elem != nil; elem = elem.Next() { count++ opener := elem.Value.(int) if p.matchOpener(pairValues, opener, i) { // if the opener matches, add nested pair to the ordered list p.pairPositions = append(p.pairPositions, bracketPair{opener, i}) // remove up to and including matched opener for ; count > 0; count-- { p.openers.Remove(p.openers.Front()) } break } } sort.Sort(p.pairPositions) // if we get here, the closing bracket matched no openers // and gets ignored } } } // Bracket pairs within an isolating run sequence are processed as units so // that both the opening and the closing paired bracket in a pair resolve to // the same direction. // // N0. Process bracket pairs in an isolating run sequence sequentially in // the logical order of the text positions of the opening paired brackets // using the logic given below. Within this scope, bidirectional types EN // and AN are treated as R. // // Identify the bracket pairs in the current isolating run sequence // according to BD16. For each bracket-pair element in the list of pairs of // text positions: // // a Inspect the bidirectional types of the characters enclosed within the // bracket pair. // // b If any strong type (either L or R) matching the embedding direction is // found, set the type for both brackets in the pair to match the embedding // direction. // // o [ e ] o -> o e e e o // // o [ o e ] -> o e o e e // // o [ NI e ] -> o e NI e e // // c Otherwise, if a strong type (opposite the embedding direction) is // found, test for adjacent strong types as follows: 1 First, check // backwards before the opening paired bracket until the first strong type // (L, R, or sos) is found. If that first preceding strong type is opposite // the embedding direction, then set the type for both brackets in the pair // to that type. 2 Otherwise, set the type for both brackets in the pair to // the embedding direction. // // o [ o ] e -> o o o o e // // o [ o NI ] o -> o o o NI o o // // e [ o ] o -> e e o e o // // e [ o ] e -> e e o e e // // e ( o [ o ] NI ) e -> e e o o o o NI e e // // d Otherwise, do not set the type for the current bracket pair. Note that // if the enclosed text contains no strong types the paired brackets will // both resolve to the same level when resolved individually using rules N1 // and N2. // // e ( NI ) o -> e ( NI ) o // getStrongTypeN0 maps character's directional code to strong type as required // by rule N0. // // TODO: have separate type for "strong" directionality. func (p *bracketPairer) getStrongTypeN0(index int) Class { switch p.codesIsolatedRun[index] { // in the scope of N0, number types are treated as R case EN, AN, AL, R: return R case L: return L default: return ON } } // classifyPairContent reports the strong types contained inside a Bracket Pair, // assuming the given embedding direction. // // It returns ON if no strong type is found. If a single strong type is found, // it returns this this type. Otherwise it returns the embedding direction. // // TODO: use separate type for "strong" directionality. func (p *bracketPairer) classifyPairContent(loc bracketPair, dirEmbed Class) Class { dirOpposite := ON for i := loc.opener + 1; i < loc.closer; i++ { dir := p.getStrongTypeN0(i) if dir == ON { continue } if dir == dirEmbed { return dir // type matching embedding direction found } dirOpposite = dir } // return ON if no strong type found, or class opposite to dirEmbed return dirOpposite } // classBeforePair determines which strong types are present before a Bracket // Pair. Return R or L if strong type found, otherwise ON. func (p *bracketPairer) classBeforePair(loc bracketPair) Class { for i := loc.opener - 1; i >= 0; i-- { if dir := p.getStrongTypeN0(i); dir != ON { return dir } } // no strong types found, return sos return p.sos } // assignBracketType implements rule N0 for a single bracket pair. func (p *bracketPairer) assignBracketType(loc bracketPair, dirEmbed Class, initialTypes []Class) { // rule "N0, a", inspect contents of pair dirPair := p.classifyPairContent(loc, dirEmbed) // dirPair is now L, R, or N (no strong type found) // the following logical tests are performed out of order compared to // the statement of the rules but yield the same results if dirPair == ON { return // case "d" - nothing to do } if dirPair != dirEmbed { // case "c": strong type found, opposite - check before (c.1) dirPair = p.classBeforePair(loc) if dirPair == dirEmbed || dirPair == ON { // no strong opposite type found before - use embedding (c.2) dirPair = dirEmbed } } // else: case "b", strong type found matching embedding, // no explicit action needed, as dirPair is already set to embedding // direction // set the bracket types to the type found p.setBracketsToType(loc, dirPair, initialTypes) } func (p *bracketPairer) setBracketsToType(loc bracketPair, dirPair Class, initialTypes []Class) { p.codesIsolatedRun[loc.opener] = dirPair p.codesIsolatedRun[loc.closer] = dirPair for i := loc.opener + 1; i < loc.closer; i++ { index := p.indexes[i] if initialTypes[index] != NSM { break } p.codesIsolatedRun[i] = dirPair } for i := loc.closer + 1; i < len(p.indexes); i++ { index := p.indexes[i] if initialTypes[index] != NSM { break } p.codesIsolatedRun[i] = dirPair } } // resolveBrackets implements rule N0 for a list of pairs. func (p *bracketPairer) resolveBrackets(dirEmbed Class, initialTypes []Class) { for _, loc := range p.pairPositions { p.assignBracketType(loc, dirEmbed, initialTypes) } } core.go000066400000000000000000000721301324746544700327510ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/golang.org/x/text/unicode/bidi// Copyright 2015 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package bidi import "log" // This implementation is a port based on the reference implementation found at: // http://www.unicode.org/Public/PROGRAMS/BidiReferenceJava/ // // described in Unicode Bidirectional Algorithm (UAX #9). // // Input: // There are two levels of input to the algorithm, since clients may prefer to // supply some information from out-of-band sources rather than relying on the // default behavior. // // - Bidi class array // - Bidi class array, with externally supplied base line direction // // Output: // Output is separated into several stages: // // - levels array over entire paragraph // - reordering array over entire paragraph // - levels array over line // - reordering array over line // // Note that for conformance to the Unicode Bidirectional Algorithm, // implementations are only required to generate correct reordering and // character directionality (odd or even levels) over a line. Generating // identical level arrays over a line is not required. Bidi explicit format // codes (LRE, RLE, LRO, RLO, PDF) and BN can be assigned arbitrary levels and // positions as long as the rest of the input is properly reordered. // // As the algorithm is defined to operate on a single paragraph at a time, this // implementation is written to handle single paragraphs. Thus rule P1 is // presumed by this implementation-- the data provided to the implementation is // assumed to be a single paragraph, and either contains no 'B' codes, or a // single 'B' code at the end of the input. 'B' is allowed as input to // illustrate how the algorithm assigns it a level. // // Also note that rules L3 and L4 depend on the rendering engine that uses the // result of the bidi algorithm. This implementation assumes that the rendering // engine expects combining marks in visual order (e.g. to the left of their // base character in RTL runs) and that it adjusts the glyphs used to render // mirrored characters that are in RTL runs so that they render appropriately. // level is the embedding level of a character. Even embedding levels indicate // left-to-right order and odd levels indicate right-to-left order. The special // level of -1 is reserved for undefined order. type level int8 const implicitLevel level = -1 // in returns if x is equal to any of the values in set. func (c Class) in(set ...Class) bool { for _, s := range set { if c == s { return true } } return false } // A paragraph contains the state of a paragraph. type paragraph struct { initialTypes []Class // Arrays of properties needed for paired bracket evaluation in N0 pairTypes []bracketType // paired Bracket types for paragraph pairValues []rune // rune for opening bracket or pbOpen and pbClose; 0 for pbNone embeddingLevel level // default: = implicitLevel; // at the paragraph levels resultTypes []Class resultLevels []level // Index of matching PDI for isolate initiator characters. For other // characters, the value of matchingPDI will be set to -1. For isolate // initiators with no matching PDI, matchingPDI will be set to the length of // the input string. matchingPDI []int // Index of matching isolate initiator for PDI characters. For other // characters, and for PDIs with no matching isolate initiator, the value of // matchingIsolateInitiator will be set to -1. matchingIsolateInitiator []int } // newParagraph initializes a paragraph. The user needs to supply a few arrays // corresponding to the preprocessed text input. The types correspond to the // Unicode BiDi classes for each rune. pairTypes indicates the bracket type for // each rune. pairValues provides a unique bracket class identifier for each // rune (suggested is the rune of the open bracket for opening and matching // close brackets, after normalization). The embedding levels are optional, but // may be supplied to encode embedding levels of styled text. // // TODO: return an error. func newParagraph(types []Class, pairTypes []bracketType, pairValues []rune, levels level) *paragraph { validateTypes(types) validatePbTypes(pairTypes) validatePbValues(pairValues, pairTypes) validateParagraphEmbeddingLevel(levels) p := ¶graph{ initialTypes: append([]Class(nil), types...), embeddingLevel: levels, pairTypes: pairTypes, pairValues: pairValues, resultTypes: append([]Class(nil), types...), } p.run() return p } func (p *paragraph) Len() int { return len(p.initialTypes) } // The algorithm. Does not include line-based processing (Rules L1, L2). // These are applied later in the line-based phase of the algorithm. func (p *paragraph) run() { p.determineMatchingIsolates() // 1) determining the paragraph level // Rule P1 is the requirement for entering this algorithm. // Rules P2, P3. // If no externally supplied paragraph embedding level, use default. if p.embeddingLevel == implicitLevel { p.embeddingLevel = p.determineParagraphEmbeddingLevel(0, p.Len()) } // Initialize result levels to paragraph embedding level. p.resultLevels = make([]level, p.Len()) setLevels(p.resultLevels, p.embeddingLevel) // 2) Explicit levels and directions // Rules X1-X8. p.determineExplicitEmbeddingLevels() // Rule X9. // We do not remove the embeddings, the overrides, the PDFs, and the BNs // from the string explicitly. But they are not copied into isolating run // sequences when they are created, so they are removed for all // practical purposes. // Rule X10. // Run remainder of algorithm one isolating run sequence at a time for _, seq := range p.determineIsolatingRunSequences() { // 3) resolving weak types // Rules W1-W7. seq.resolveWeakTypes() // 4a) resolving paired brackets // Rule N0 resolvePairedBrackets(seq) // 4b) resolving neutral types // Rules N1-N3. seq.resolveNeutralTypes() // 5) resolving implicit embedding levels // Rules I1, I2. seq.resolveImplicitLevels() // Apply the computed levels and types seq.applyLevelsAndTypes() } // Assign appropriate levels to 'hide' LREs, RLEs, LROs, RLOs, PDFs, and // BNs. This is for convenience, so the resulting level array will have // a value for every character. p.assignLevelsToCharactersRemovedByX9() } // determineMatchingIsolates determines the matching PDI for each isolate // initiator and vice versa. // // Definition BD9. // // At the end of this function: // // - The member variable matchingPDI is set to point to the index of the // matching PDI character for each isolate initiator character. If there is // no matching PDI, it is set to the length of the input text. For other // characters, it is set to -1. // - The member variable matchingIsolateInitiator is set to point to the // index of the matching isolate initiator character for each PDI character. // If there is no matching isolate initiator, or the character is not a PDI, // it is set to -1. func (p *paragraph) determineMatchingIsolates() { p.matchingPDI = make([]int, p.Len()) p.matchingIsolateInitiator = make([]int, p.Len()) for i := range p.matchingIsolateInitiator { p.matchingIsolateInitiator[i] = -1 } for i := range p.matchingPDI { p.matchingPDI[i] = -1 if t := p.resultTypes[i]; t.in(LRI, RLI, FSI) { depthCounter := 1 for j := i + 1; j < p.Len(); j++ { if u := p.resultTypes[j]; u.in(LRI, RLI, FSI) { depthCounter++ } else if u == PDI { if depthCounter--; depthCounter == 0 { p.matchingPDI[i] = j p.matchingIsolateInitiator[j] = i break } } } if p.matchingPDI[i] == -1 { p.matchingPDI[i] = p.Len() } } } } // determineParagraphEmbeddingLevel reports the resolved paragraph direction of // the substring limited by the given range [start, end). // // Determines the paragraph level based on rules P2, P3. This is also used // in rule X5c to find if an FSI should resolve to LRI or RLI. func (p *paragraph) determineParagraphEmbeddingLevel(start, end int) level { var strongType Class = unknownClass // Rule P2. for i := start; i < end; i++ { if t := p.resultTypes[i]; t.in(L, AL, R) { strongType = t break } else if t.in(FSI, LRI, RLI) { i = p.matchingPDI[i] // skip over to the matching PDI if i > end { log.Panic("assert (i <= end)") } } } // Rule P3. switch strongType { case unknownClass: // none found // default embedding level when no strong types found is 0. return 0 case L: return 0 default: // AL, R return 1 } } const maxDepth = 125 // This stack will store the embedding levels and override and isolated // statuses type directionalStatusStack struct { stackCounter int embeddingLevelStack [maxDepth + 1]level overrideStatusStack [maxDepth + 1]Class isolateStatusStack [maxDepth + 1]bool } func (s *directionalStatusStack) empty() { s.stackCounter = 0 } func (s *directionalStatusStack) pop() { s.stackCounter-- } func (s *directionalStatusStack) depth() int { return s.stackCounter } func (s *directionalStatusStack) push(level level, overrideStatus Class, isolateStatus bool) { s.embeddingLevelStack[s.stackCounter] = level s.overrideStatusStack[s.stackCounter] = overrideStatus s.isolateStatusStack[s.stackCounter] = isolateStatus s.stackCounter++ } func (s *directionalStatusStack) lastEmbeddingLevel() level { return s.embeddingLevelStack[s.stackCounter-1] } func (s *directionalStatusStack) lastDirectionalOverrideStatus() Class { return s.overrideStatusStack[s.stackCounter-1] } func (s *directionalStatusStack) lastDirectionalIsolateStatus() bool { return s.isolateStatusStack[s.stackCounter-1] } // Determine explicit levels using rules X1 - X8 func (p *paragraph) determineExplicitEmbeddingLevels() { var stack directionalStatusStack var overflowIsolateCount, overflowEmbeddingCount, validIsolateCount int // Rule X1. stack.push(p.embeddingLevel, ON, false) for i, t := range p.resultTypes { // Rules X2, X3, X4, X5, X5a, X5b, X5c switch t { case RLE, LRE, RLO, LRO, RLI, LRI, FSI: isIsolate := t.in(RLI, LRI, FSI) isRTL := t.in(RLE, RLO, RLI) // override if this is an FSI that resolves to RLI if t == FSI { isRTL = (p.determineParagraphEmbeddingLevel(i+1, p.matchingPDI[i]) == 1) } if isIsolate { p.resultLevels[i] = stack.lastEmbeddingLevel() if stack.lastDirectionalOverrideStatus() != ON { p.resultTypes[i] = stack.lastDirectionalOverrideStatus() } } var newLevel level if isRTL { // least greater odd newLevel = (stack.lastEmbeddingLevel() + 1) | 1 } else { // least greater even newLevel = (stack.lastEmbeddingLevel() + 2) &^ 1 } if newLevel <= maxDepth && overflowIsolateCount == 0 && overflowEmbeddingCount == 0 { if isIsolate { validIsolateCount++ } // Push new embedding level, override status, and isolated // status. // No check for valid stack counter, since the level check // suffices. switch t { case LRO: stack.push(newLevel, L, isIsolate) case RLO: stack.push(newLevel, R, isIsolate) default: stack.push(newLevel, ON, isIsolate) } // Not really part of the spec if !isIsolate { p.resultLevels[i] = newLevel } } else { // This is an invalid explicit formatting character, // so apply the "Otherwise" part of rules X2-X5b. if isIsolate { overflowIsolateCount++ } else { // !isIsolate if overflowIsolateCount == 0 { overflowEmbeddingCount++ } } } // Rule X6a case PDI: if overflowIsolateCount > 0 { overflowIsolateCount-- } else if validIsolateCount == 0 { // do nothing } else { overflowEmbeddingCount = 0 for !stack.lastDirectionalIsolateStatus() { stack.pop() } stack.pop() validIsolateCount-- } p.resultLevels[i] = stack.lastEmbeddingLevel() // Rule X7 case PDF: // Not really part of the spec p.resultLevels[i] = stack.lastEmbeddingLevel() if overflowIsolateCount > 0 { // do nothing } else if overflowEmbeddingCount > 0 { overflowEmbeddingCount-- } else if !stack.lastDirectionalIsolateStatus() && stack.depth() >= 2 { stack.pop() } case B: // paragraph separator. // Rule X8. // These values are reset for clarity, in this implementation B // can only occur as the last code in the array. stack.empty() overflowIsolateCount = 0 overflowEmbeddingCount = 0 validIsolateCount = 0 p.resultLevels[i] = p.embeddingLevel default: p.resultLevels[i] = stack.lastEmbeddingLevel() if stack.lastDirectionalOverrideStatus() != ON { p.resultTypes[i] = stack.lastDirectionalOverrideStatus() } } } } type isolatingRunSequence struct { p *paragraph indexes []int // indexes to the original string types []Class // type of each character using the index resolvedLevels []level // resolved levels after application of rules level level sos, eos Class } func (i *isolatingRunSequence) Len() int { return len(i.indexes) } func maxLevel(a, b level) level { if a > b { return a } return b } // Rule X10, second bullet: Determine the start-of-sequence (sos) and end-of-sequence (eos) types, // either L or R, for each isolating run sequence. func (p *paragraph) isolatingRunSequence(indexes []int) *isolatingRunSequence { length := len(indexes) types := make([]Class, length) for i, x := range indexes { types[i] = p.resultTypes[x] } // assign level, sos and eos prevChar := indexes[0] - 1 for prevChar >= 0 && isRemovedByX9(p.initialTypes[prevChar]) { prevChar-- } prevLevel := p.embeddingLevel if prevChar >= 0 { prevLevel = p.resultLevels[prevChar] } var succLevel level lastType := types[length-1] if lastType.in(LRI, RLI, FSI) { succLevel = p.embeddingLevel } else { // the first character after the end of run sequence limit := indexes[length-1] + 1 for ; limit < p.Len() && isRemovedByX9(p.initialTypes[limit]); limit++ { } succLevel = p.embeddingLevel if limit < p.Len() { succLevel = p.resultLevels[limit] } } level := p.resultLevels[indexes[0]] return &isolatingRunSequence{ p: p, indexes: indexes, types: types, level: level, sos: typeForLevel(maxLevel(prevLevel, level)), eos: typeForLevel(maxLevel(succLevel, level)), } } // Resolving weak types Rules W1-W7. // // Note that some weak types (EN, AN) remain after this processing is // complete. func (s *isolatingRunSequence) resolveWeakTypes() { // on entry, only these types remain s.assertOnly(L, R, AL, EN, ES, ET, AN, CS, B, S, WS, ON, NSM, LRI, RLI, FSI, PDI) // Rule W1. // Changes all NSMs. preceedingCharacterType := s.sos for i, t := range s.types { if t == NSM { s.types[i] = preceedingCharacterType } else { if t.in(LRI, RLI, FSI, PDI) { preceedingCharacterType = ON } preceedingCharacterType = t } } // Rule W2. // EN does not change at the start of the run, because sos != AL. for i, t := range s.types { if t == EN { for j := i - 1; j >= 0; j-- { if t := s.types[j]; t.in(L, R, AL) { if t == AL { s.types[i] = AN } break } } } } // Rule W3. for i, t := range s.types { if t == AL { s.types[i] = R } } // Rule W4. // Since there must be values on both sides for this rule to have an // effect, the scan skips the first and last value. // // Although the scan proceeds left to right, and changes the type // values in a way that would appear to affect the computations // later in the scan, there is actually no problem. A change in the // current value can only affect the value to its immediate right, // and only affect it if it is ES or CS. But the current value can // only change if the value to its right is not ES or CS. Thus // either the current value will not change, or its change will have // no effect on the remainder of the analysis. for i := 1; i < s.Len()-1; i++ { t := s.types[i] if t == ES || t == CS { prevSepType := s.types[i-1] succSepType := s.types[i+1] if prevSepType == EN && succSepType == EN { s.types[i] = EN } else if s.types[i] == CS && prevSepType == AN && succSepType == AN { s.types[i] = AN } } } // Rule W5. for i, t := range s.types { if t == ET { // locate end of sequence runStart := i runEnd := s.findRunLimit(runStart, ET) // check values at ends of sequence t := s.sos if runStart > 0 { t = s.types[runStart-1] } if t != EN { t = s.eos if runEnd < len(s.types) { t = s.types[runEnd] } } if t == EN { setTypes(s.types[runStart:runEnd], EN) } // continue at end of sequence i = runEnd } } // Rule W6. for i, t := range s.types { if t.in(ES, ET, CS) { s.types[i] = ON } } // Rule W7. for i, t := range s.types { if t == EN { // set default if we reach start of run prevStrongType := s.sos for j := i - 1; j >= 0; j-- { t = s.types[j] if t == L || t == R { // AL's have been changed to R prevStrongType = t break } } if prevStrongType == L { s.types[i] = L } } } } // 6) resolving neutral types Rules N1-N2. func (s *isolatingRunSequence) resolveNeutralTypes() { // on entry, only these types can be in resultTypes s.assertOnly(L, R, EN, AN, B, S, WS, ON, RLI, LRI, FSI, PDI) for i, t := range s.types { switch t { case WS, ON, B, S, RLI, LRI, FSI, PDI: // find bounds of run of neutrals runStart := i runEnd := s.findRunLimit(runStart, B, S, WS, ON, RLI, LRI, FSI, PDI) // determine effective types at ends of run var leadType, trailType Class // Note that the character found can only be L, R, AN, or // EN. if runStart == 0 { leadType = s.sos } else { leadType = s.types[runStart-1] if leadType.in(AN, EN) { leadType = R } } if runEnd == len(s.types) { trailType = s.eos } else { trailType = s.types[runEnd] if trailType.in(AN, EN) { trailType = R } } var resolvedType Class if leadType == trailType { // Rule N1. resolvedType = leadType } else { // Rule N2. // Notice the embedding level of the run is used, not // the paragraph embedding level. resolvedType = typeForLevel(s.level) } setTypes(s.types[runStart:runEnd], resolvedType) // skip over run of (former) neutrals i = runEnd } } } func setLevels(levels []level, newLevel level) { for i := range levels { levels[i] = newLevel } } func setTypes(types []Class, newType Class) { for i := range types { types[i] = newType } } // 7) resolving implicit embedding levels Rules I1, I2. func (s *isolatingRunSequence) resolveImplicitLevels() { // on entry, only these types can be in resultTypes s.assertOnly(L, R, EN, AN) s.resolvedLevels = make([]level, len(s.types)) setLevels(s.resolvedLevels, s.level) if (s.level & 1) == 0 { // even level for i, t := range s.types { // Rule I1. if t == L { // no change } else if t == R { s.resolvedLevels[i] += 1 } else { // t == AN || t == EN s.resolvedLevels[i] += 2 } } } else { // odd level for i, t := range s.types { // Rule I2. if t == R { // no change } else { // t == L || t == AN || t == EN s.resolvedLevels[i] += 1 } } } } // Applies the levels and types resolved in rules W1-I2 to the // resultLevels array. func (s *isolatingRunSequence) applyLevelsAndTypes() { for i, x := range s.indexes { s.p.resultTypes[x] = s.types[i] s.p.resultLevels[x] = s.resolvedLevels[i] } } // Return the limit of the run consisting only of the types in validSet // starting at index. This checks the value at index, and will return // index if that value is not in validSet. func (s *isolatingRunSequence) findRunLimit(index int, validSet ...Class) int { loop: for ; index < len(s.types); index++ { t := s.types[index] for _, valid := range validSet { if t == valid { continue loop } } return index // didn't find a match in validSet } return len(s.types) } // Algorithm validation. Assert that all values in types are in the // provided set. func (s *isolatingRunSequence) assertOnly(codes ...Class) { loop: for i, t := range s.types { for _, c := range codes { if t == c { continue loop } } log.Panicf("invalid bidi code %v present in assertOnly at position %d", t, s.indexes[i]) } } // determineLevelRuns returns an array of level runs. Each level run is // described as an array of indexes into the input string. // // Determines the level runs. Rule X9 will be applied in determining the // runs, in the way that makes sure the characters that are supposed to be // removed are not included in the runs. func (p *paragraph) determineLevelRuns() [][]int { run := []int{} allRuns := [][]int{} currentLevel := implicitLevel for i := range p.initialTypes { if !isRemovedByX9(p.initialTypes[i]) { if p.resultLevels[i] != currentLevel { // we just encountered a new run; wrap up last run if currentLevel >= 0 { // only wrap it up if there was a run allRuns = append(allRuns, run) run = nil } // Start new run currentLevel = p.resultLevels[i] } run = append(run, i) } } // Wrap up the final run, if any if len(run) > 0 { allRuns = append(allRuns, run) } return allRuns } // Definition BD13. Determine isolating run sequences. func (p *paragraph) determineIsolatingRunSequences() []*isolatingRunSequence { levelRuns := p.determineLevelRuns() // Compute the run that each character belongs to runForCharacter := make([]int, p.Len()) for i, run := range levelRuns { for _, index := range run { runForCharacter[index] = i } } sequences := []*isolatingRunSequence{} var currentRunSequence []int for _, run := range levelRuns { first := run[0] if p.initialTypes[first] != PDI || p.matchingIsolateInitiator[first] == -1 { currentRunSequence = nil // int run = i; for { // Copy this level run into currentRunSequence currentRunSequence = append(currentRunSequence, run...) last := currentRunSequence[len(currentRunSequence)-1] lastT := p.initialTypes[last] if lastT.in(LRI, RLI, FSI) && p.matchingPDI[last] != p.Len() { run = levelRuns[runForCharacter[p.matchingPDI[last]]] } else { break } } sequences = append(sequences, p.isolatingRunSequence(currentRunSequence)) } } return sequences } // Assign level information to characters removed by rule X9. This is for // ease of relating the level information to the original input data. Note // that the levels assigned to these codes are arbitrary, they're chosen so // as to avoid breaking level runs. func (p *paragraph) assignLevelsToCharactersRemovedByX9() { for i, t := range p.initialTypes { if t.in(LRE, RLE, LRO, RLO, PDF, BN) { p.resultTypes[i] = t p.resultLevels[i] = -1 } } // now propagate forward the levels information (could have // propagated backward, the main thing is not to introduce a level // break where one doesn't already exist). if p.resultLevels[0] == -1 { p.resultLevels[0] = p.embeddingLevel } for i := 1; i < len(p.initialTypes); i++ { if p.resultLevels[i] == -1 { p.resultLevels[i] = p.resultLevels[i-1] } } // Embedding information is for informational purposes only so need not be // adjusted. } // // Output // // getLevels computes levels array breaking lines at offsets in linebreaks. // Rule L1. // // The linebreaks array must include at least one value. The values must be // in strictly increasing order (no duplicates) between 1 and the length of // the text, inclusive. The last value must be the length of the text. func (p *paragraph) getLevels(linebreaks []int) []level { // Note that since the previous processing has removed all // P, S, and WS values from resultTypes, the values referred to // in these rules are the initial types, before any processing // has been applied (including processing of overrides). // // This example implementation has reinserted explicit format codes // and BN, in order that the levels array correspond to the // initial text. Their final placement is not normative. // These codes are treated like WS in this implementation, // so they don't interrupt sequences of WS. validateLineBreaks(linebreaks, p.Len()) result := append([]level(nil), p.resultLevels...) // don't worry about linebreaks since if there is a break within // a series of WS values preceding S, the linebreak itself // causes the reset. for i, t := range p.initialTypes { if t.in(B, S) { // Rule L1, clauses one and two. result[i] = p.embeddingLevel // Rule L1, clause three. for j := i - 1; j >= 0; j-- { if isWhitespace(p.initialTypes[j]) { // including format codes result[j] = p.embeddingLevel } else { break } } } } // Rule L1, clause four. start := 0 for _, limit := range linebreaks { for j := limit - 1; j >= start; j-- { if isWhitespace(p.initialTypes[j]) { // including format codes result[j] = p.embeddingLevel } else { break } } start = limit } return result } // getReordering returns the reordering of lines from a visual index to a // logical index for line breaks at the given offsets. // // Lines are concatenated from left to right. So for example, the fifth // character from the left on the third line is // // getReordering(linebreaks)[linebreaks[1] + 4] // // (linebreaks[1] is the position after the last character of the second // line, which is also the index of the first character on the third line, // and adding four gets the fifth character from the left). // // The linebreaks array must include at least one value. The values must be // in strictly increasing order (no duplicates) between 1 and the length of // the text, inclusive. The last value must be the length of the text. func (p *paragraph) getReordering(linebreaks []int) []int { validateLineBreaks(linebreaks, p.Len()) return computeMultilineReordering(p.getLevels(linebreaks), linebreaks) } // Return multiline reordering array for a given level array. Reordering // does not occur across a line break. func computeMultilineReordering(levels []level, linebreaks []int) []int { result := make([]int, len(levels)) start := 0 for _, limit := range linebreaks { tempLevels := make([]level, limit-start) copy(tempLevels, levels[start:]) for j, order := range computeReordering(tempLevels) { result[start+j] = order + start } start = limit } return result } // Return reordering array for a given level array. This reorders a single // line. The reordering is a visual to logical map. For example, the // leftmost char is string.charAt(order[0]). Rule L2. func computeReordering(levels []level) []int { result := make([]int, len(levels)) // initialize order for i := range result { result[i] = i } // locate highest level found on line. // Note the rules say text, but no reordering across line bounds is // performed, so this is sufficient. highestLevel := level(0) lowestOddLevel := level(maxDepth + 2) for _, level := range levels { if level > highestLevel { highestLevel = level } if level&1 != 0 && level < lowestOddLevel { lowestOddLevel = level } } for level := highestLevel; level >= lowestOddLevel; level-- { for i := 0; i < len(levels); i++ { if levels[i] >= level { // find range of text at or above this level start := i limit := i + 1 for limit < len(levels) && levels[limit] >= level { limit++ } for j, k := start, limit-1; j < k; j, k = j+1, k-1 { result[j], result[k] = result[k], result[j] } // skip to end of level run i = limit } } } return result } // isWhitespace reports whether the type is considered a whitespace type for the // line break rules. func isWhitespace(c Class) bool { switch c { case LRE, RLE, LRO, RLO, PDF, LRI, RLI, FSI, PDI, BN, WS: return true } return false } // isRemovedByX9 reports whether the type is one of the types removed in X9. func isRemovedByX9(c Class) bool { switch c { case LRE, RLE, LRO, RLO, PDF, BN: return true } return false } // typeForLevel reports the strong type (L or R) corresponding to the level. func typeForLevel(level level) Class { if (level & 0x1) == 0 { return L } return R } // TODO: change validation to not panic func validateTypes(types []Class) { if len(types) == 0 { log.Panic("types is null") } for i, t := range types[:len(types)-1] { if t == B { log.Panicf("B type before end of paragraph at index: %d", i) } } } func validateParagraphEmbeddingLevel(embeddingLevel level) { if embeddingLevel != implicitLevel && embeddingLevel != 0 && embeddingLevel != 1 { log.Panicf("illegal paragraph embedding level: %d", embeddingLevel) } } func validateLineBreaks(linebreaks []int, textLength int) { prev := 0 for i, next := range linebreaks { if next <= prev { log.Panicf("bad linebreak: %d at index: %d", next, i) } prev = next } if prev != textLength { log.Panicf("last linebreak was %d, want %d", prev, textLength) } } func validatePbTypes(pairTypes []bracketType) { if len(pairTypes) == 0 { log.Panic("pairTypes is null") } for i, pt := range pairTypes { switch pt { case bpNone, bpOpen, bpClose: default: log.Panicf("illegal pairType value at %d: %v", i, pairTypes[i]) } } } func validatePbValues(pairValues []rune, pairTypes []bracketType) { if pairValues == nil { log.Panic("pairValues is null") } if len(pairTypes) != len(pairValues) { log.Panic("pairTypes is different length from pairValues") } } gen.go000066400000000000000000000057711324746544700326010ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/golang.org/x/text/unicode/bidi// Copyright 2015 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // +build ignore package main import ( "flag" "log" "golang.org/x/text/internal/gen" "golang.org/x/text/internal/triegen" "golang.org/x/text/internal/ucd" ) var outputFile = flag.String("out", "tables.go", "output file") func main() { gen.Init() gen.Repackage("gen_trieval.go", "trieval.go", "bidi") gen.Repackage("gen_ranges.go", "ranges_test.go", "bidi") genTables() } // bidiClass names and codes taken from class "bc" in // http://www.unicode.org/Public/8.0.0/ucd/PropertyValueAliases.txt var bidiClass = map[string]Class{ "AL": AL, // ArabicLetter "AN": AN, // ArabicNumber "B": B, // ParagraphSeparator "BN": BN, // BoundaryNeutral "CS": CS, // CommonSeparator "EN": EN, // EuropeanNumber "ES": ES, // EuropeanSeparator "ET": ET, // EuropeanTerminator "L": L, // LeftToRight "NSM": NSM, // NonspacingMark "ON": ON, // OtherNeutral "R": R, // RightToLeft "S": S, // SegmentSeparator "WS": WS, // WhiteSpace "FSI": Control, "PDF": Control, "PDI": Control, "LRE": Control, "LRI": Control, "LRO": Control, "RLE": Control, "RLI": Control, "RLO": Control, } func genTables() { if numClass > 0x0F { log.Fatalf("Too many Class constants (%#x > 0x0F).", numClass) } w := gen.NewCodeWriter() defer w.WriteGoFile(*outputFile, "bidi") gen.WriteUnicodeVersion(w) t := triegen.NewTrie("bidi") // Build data about bracket mapping. These bits need to be or-ed with // any other bits. orMask := map[rune]uint64{} xorMap := map[rune]int{} xorMasks := []rune{0} // First value is no-op. ucd.Parse(gen.OpenUCDFile("BidiBrackets.txt"), func(p *ucd.Parser) { r1 := p.Rune(0) r2 := p.Rune(1) xor := r1 ^ r2 if _, ok := xorMap[xor]; !ok { xorMap[xor] = len(xorMasks) xorMasks = append(xorMasks, xor) } entry := uint64(xorMap[xor]) << xorMaskShift switch p.String(2) { case "o": entry |= openMask case "c", "n": default: log.Fatalf("Unknown bracket class %q.", p.String(2)) } orMask[r1] = entry }) w.WriteComment(` xorMasks contains masks to be xor-ed with brackets to get the reverse version.`) w.WriteVar("xorMasks", xorMasks) done := map[rune]bool{} insert := func(r rune, c Class) { if !done[r] { t.Insert(r, orMask[r]|uint64(c)) done[r] = true } } // Insert the derived BiDi properties. ucd.Parse(gen.OpenUCDFile("extracted/DerivedBidiClass.txt"), func(p *ucd.Parser) { r := p.Rune(0) class, ok := bidiClass[p.String(1)] if !ok { log.Fatalf("%U: Unknown BiDi class %q", r, p.String(1)) } insert(r, class) }) visitDefaults(insert) // TODO: use sparse blocks. This would reduce table size considerably // from the looks of it. sz, err := t.Gen(w) if err != nil { log.Fatal(err) } w.Size += sz } // dummy values to make methods in gen_common compile. The real versions // will be generated by this file to tables.go. var ( xorMasks []rune ) gen_ranges.go000066400000000000000000000030431324746544700341260ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/golang.org/x/text/unicode/bidi// Copyright 2015 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // +build ignore package main import ( "unicode" "golang.org/x/text/internal/gen" "golang.org/x/text/internal/ucd" "golang.org/x/text/unicode/rangetable" ) // These tables are hand-extracted from: // http://www.unicode.org/Public/8.0.0/ucd/extracted/DerivedBidiClass.txt func visitDefaults(fn func(r rune, c Class)) { // first write default values for ranges listed above. visitRunes(fn, AL, []rune{ 0x0600, 0x07BF, // Arabic 0x08A0, 0x08FF, // Arabic Extended-A 0xFB50, 0xFDCF, // Arabic Presentation Forms 0xFDF0, 0xFDFF, 0xFE70, 0xFEFF, 0x0001EE00, 0x0001EEFF, // Arabic Mathematical Alpha Symbols }) visitRunes(fn, R, []rune{ 0x0590, 0x05FF, // Hebrew 0x07C0, 0x089F, // Nko et al. 0xFB1D, 0xFB4F, 0x00010800, 0x00010FFF, // Cypriot Syllabary et. al. 0x0001E800, 0x0001EDFF, 0x0001EF00, 0x0001EFFF, }) visitRunes(fn, ET, []rune{ // European Terminator 0x20A0, 0x20Cf, // Currency symbols }) rangetable.Visit(unicode.Noncharacter_Code_Point, func(r rune) { fn(r, BN) // Boundary Neutral }) ucd.Parse(gen.OpenUCDFile("DerivedCoreProperties.txt"), func(p *ucd.Parser) { if p.String(1) == "Default_Ignorable_Code_Point" { fn(p.Rune(0), BN) // Boundary Neutral } }) } func visitRunes(fn func(r rune, c Class), c Class, runes []rune) { for i := 0; i < len(runes); i += 2 { lo, hi := runes[i], runes[i+1] for j := lo; j <= hi; j++ { fn(j, c) } } } gen_trieval.go000066400000000000000000000033251324746544700343200ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/golang.org/x/text/unicode/bidi// Copyright 2015 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // +build ignore package main // Class is the Unicode BiDi class. Each rune has a single class. type Class uint const ( L Class = iota // LeftToRight R // RightToLeft EN // EuropeanNumber ES // EuropeanSeparator ET // EuropeanTerminator AN // ArabicNumber CS // CommonSeparator B // ParagraphSeparator S // SegmentSeparator WS // WhiteSpace ON // OtherNeutral BN // BoundaryNeutral NSM // NonspacingMark AL // ArabicLetter Control // Control LRO - PDI numClass LRO // LeftToRightOverride RLO // RightToLeftOverride LRE // LeftToRightEmbedding RLE // RightToLeftEmbedding PDF // PopDirectionalFormat LRI // LeftToRightIsolate RLI // RightToLeftIsolate FSI // FirstStrongIsolate PDI // PopDirectionalIsolate unknownClass = ^Class(0) ) var controlToClass = map[rune]Class{ 0x202D: LRO, // LeftToRightOverride, 0x202E: RLO, // RightToLeftOverride, 0x202A: LRE, // LeftToRightEmbedding, 0x202B: RLE, // RightToLeftEmbedding, 0x202C: PDF, // PopDirectionalFormat, 0x2066: LRI, // LeftToRightIsolate, 0x2067: RLI, // RightToLeftIsolate, 0x2068: FSI, // FirstStrongIsolate, 0x2069: PDI, // PopDirectionalIsolate, } // A trie entry has the following bits: // 7..5 XOR mask for brackets // 4 1: Bracket open, 0: Bracket close // 3..0 Class type const ( openMask = 0x10 xorMaskShift = 5 ) prop.go000066400000000000000000000133461324746544700330050ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/golang.org/x/text/unicode/bidi// Copyright 2016 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package bidi import "unicode/utf8" // Properties provides access to BiDi properties of runes. type Properties struct { entry uint8 last uint8 } var trie = newBidiTrie(0) // TODO: using this for bidirule reduces the running time by about 5%. Consider // if this is worth exposing or if we can find a way to speed up the Class // method. // // // CompactClass is like Class, but maps all of the BiDi control classes // // (LRO, RLO, LRE, RLE, PDF, LRI, RLI, FSI, PDI) to the class Control. // func (p Properties) CompactClass() Class { // return Class(p.entry & 0x0F) // } // Class returns the Bidi class for p. func (p Properties) Class() Class { c := Class(p.entry & 0x0F) if c == Control { c = controlByteToClass[p.last&0xF] } return c } // IsBracket reports whether the rune is a bracket. func (p Properties) IsBracket() bool { return p.entry&0xF0 != 0 } // IsOpeningBracket reports whether the rune is an opening bracket. // IsBracket must return true. func (p Properties) IsOpeningBracket() bool { return p.entry&openMask != 0 } // TODO: find a better API and expose. func (p Properties) reverseBracket(r rune) rune { return xorMasks[p.entry>>xorMaskShift] ^ r } var controlByteToClass = [16]Class{ 0xD: LRO, // U+202D LeftToRightOverride, 0xE: RLO, // U+202E RightToLeftOverride, 0xA: LRE, // U+202A LeftToRightEmbedding, 0xB: RLE, // U+202B RightToLeftEmbedding, 0xC: PDF, // U+202C PopDirectionalFormat, 0x6: LRI, // U+2066 LeftToRightIsolate, 0x7: RLI, // U+2067 RightToLeftIsolate, 0x8: FSI, // U+2068 FirstStrongIsolate, 0x9: PDI, // U+2069 PopDirectionalIsolate, } // LookupRune returns properties for r. func LookupRune(r rune) (p Properties, size int) { var buf [4]byte n := utf8.EncodeRune(buf[:], r) return Lookup(buf[:n]) } // TODO: these lookup methods are based on the generated trie code. The returned // sizes have slightly different semantics from the generated code, in that it // always returns size==1 for an illegal UTF-8 byte (instead of the length // of the maximum invalid subsequence). Most Transformers, like unicode/norm, // leave invalid UTF-8 untouched, in which case it has performance benefits to // do so (without changing the semantics). Bidi requires the semantics used here // for the bidirule implementation to be compatible with the Go semantics. // They ultimately should perhaps be adopted by all trie implementations, for // convenience sake. // This unrolled code also boosts performance of the secure/bidirule package by // about 30%. // So, to remove this code: // - add option to trie generator to define return type. // - always return 1 byte size for ill-formed UTF-8 runes. // Lookup returns properties for the first rune in s and the width in bytes of // its encoding. The size will be 0 if s does not hold enough bytes to complete // the encoding. func Lookup(s []byte) (p Properties, sz int) { c0 := s[0] switch { case c0 < 0x80: // is ASCII return Properties{entry: bidiValues[c0]}, 1 case c0 < 0xC2: return Properties{}, 1 case c0 < 0xE0: // 2-byte UTF-8 if len(s) < 2 { return Properties{}, 0 } i := bidiIndex[c0] c1 := s[1] if c1 < 0x80 || 0xC0 <= c1 { return Properties{}, 1 } return Properties{entry: trie.lookupValue(uint32(i), c1)}, 2 case c0 < 0xF0: // 3-byte UTF-8 if len(s) < 3 { return Properties{}, 0 } i := bidiIndex[c0] c1 := s[1] if c1 < 0x80 || 0xC0 <= c1 { return Properties{}, 1 } o := uint32(i)<<6 + uint32(c1) i = bidiIndex[o] c2 := s[2] if c2 < 0x80 || 0xC0 <= c2 { return Properties{}, 1 } return Properties{entry: trie.lookupValue(uint32(i), c2), last: c2}, 3 case c0 < 0xF8: // 4-byte UTF-8 if len(s) < 4 { return Properties{}, 0 } i := bidiIndex[c0] c1 := s[1] if c1 < 0x80 || 0xC0 <= c1 { return Properties{}, 1 } o := uint32(i)<<6 + uint32(c1) i = bidiIndex[o] c2 := s[2] if c2 < 0x80 || 0xC0 <= c2 { return Properties{}, 1 } o = uint32(i)<<6 + uint32(c2) i = bidiIndex[o] c3 := s[3] if c3 < 0x80 || 0xC0 <= c3 { return Properties{}, 1 } return Properties{entry: trie.lookupValue(uint32(i), c3)}, 4 } // Illegal rune return Properties{}, 1 } // LookupString returns properties for the first rune in s and the width in // bytes of its encoding. The size will be 0 if s does not hold enough bytes to // complete the encoding. func LookupString(s string) (p Properties, sz int) { c0 := s[0] switch { case c0 < 0x80: // is ASCII return Properties{entry: bidiValues[c0]}, 1 case c0 < 0xC2: return Properties{}, 1 case c0 < 0xE0: // 2-byte UTF-8 if len(s) < 2 { return Properties{}, 0 } i := bidiIndex[c0] c1 := s[1] if c1 < 0x80 || 0xC0 <= c1 { return Properties{}, 1 } return Properties{entry: trie.lookupValue(uint32(i), c1)}, 2 case c0 < 0xF0: // 3-byte UTF-8 if len(s) < 3 { return Properties{}, 0 } i := bidiIndex[c0] c1 := s[1] if c1 < 0x80 || 0xC0 <= c1 { return Properties{}, 1 } o := uint32(i)<<6 + uint32(c1) i = bidiIndex[o] c2 := s[2] if c2 < 0x80 || 0xC0 <= c2 { return Properties{}, 1 } return Properties{entry: trie.lookupValue(uint32(i), c2), last: c2}, 3 case c0 < 0xF8: // 4-byte UTF-8 if len(s) < 4 { return Properties{}, 0 } i := bidiIndex[c0] c1 := s[1] if c1 < 0x80 || 0xC0 <= c1 { return Properties{}, 1 } o := uint32(i)<<6 + uint32(c1) i = bidiIndex[o] c2 := s[2] if c2 < 0x80 || 0xC0 <= c2 { return Properties{}, 1 } o = uint32(i)<<6 + uint32(c2) i = bidiIndex[o] c3 := s[3] if c3 < 0x80 || 0xC0 <= c3 { return Properties{}, 1 } return Properties{entry: trie.lookupValue(uint32(i), c3)}, 4 } // Illegal rune return Properties{}, 1 } tables.go000066400000000000000000003322411324746544700332750ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/golang.org/x/text/unicode/bidi// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. package bidi // UnicodeVersion is the Unicode version from which the tables in this package are derived. const UnicodeVersion = "9.0.0" // xorMasks contains masks to be xor-ed with brackets to get the reverse // version. var xorMasks = []int32{ // 8 elements 0, 1, 6, 7, 3, 15, 29, 63, } // Size: 56 bytes // lookup returns the trie value for the first UTF-8 encoding in s and // the width in bytes of this encoding. The size will be 0 if s does not // hold enough bytes to complete the encoding. len(s) must be greater than 0. func (t *bidiTrie) lookup(s []byte) (v uint8, sz int) { c0 := s[0] switch { case c0 < 0x80: // is ASCII return bidiValues[c0], 1 case c0 < 0xC2: return 0, 1 // Illegal UTF-8: not a starter, not ASCII. case c0 < 0xE0: // 2-byte UTF-8 if len(s) < 2 { return 0, 0 } i := bidiIndex[c0] c1 := s[1] if c1 < 0x80 || 0xC0 <= c1 { return 0, 1 // Illegal UTF-8: not a continuation byte. } return t.lookupValue(uint32(i), c1), 2 case c0 < 0xF0: // 3-byte UTF-8 if len(s) < 3 { return 0, 0 } i := bidiIndex[c0] c1 := s[1] if c1 < 0x80 || 0xC0 <= c1 { return 0, 1 // Illegal UTF-8: not a continuation byte. } o := uint32(i)<<6 + uint32(c1) i = bidiIndex[o] c2 := s[2] if c2 < 0x80 || 0xC0 <= c2 { return 0, 2 // Illegal UTF-8: not a continuation byte. } return t.lookupValue(uint32(i), c2), 3 case c0 < 0xF8: // 4-byte UTF-8 if len(s) < 4 { return 0, 0 } i := bidiIndex[c0] c1 := s[1] if c1 < 0x80 || 0xC0 <= c1 { return 0, 1 // Illegal UTF-8: not a continuation byte. } o := uint32(i)<<6 + uint32(c1) i = bidiIndex[o] c2 := s[2] if c2 < 0x80 || 0xC0 <= c2 { return 0, 2 // Illegal UTF-8: not a continuation byte. } o = uint32(i)<<6 + uint32(c2) i = bidiIndex[o] c3 := s[3] if c3 < 0x80 || 0xC0 <= c3 { return 0, 3 // Illegal UTF-8: not a continuation byte. } return t.lookupValue(uint32(i), c3), 4 } // Illegal rune return 0, 1 } // lookupUnsafe returns the trie value for the first UTF-8 encoding in s. // s must start with a full and valid UTF-8 encoded rune. func (t *bidiTrie) lookupUnsafe(s []byte) uint8 { c0 := s[0] if c0 < 0x80 { // is ASCII return bidiValues[c0] } i := bidiIndex[c0] if c0 < 0xE0 { // 2-byte UTF-8 return t.lookupValue(uint32(i), s[1]) } i = bidiIndex[uint32(i)<<6+uint32(s[1])] if c0 < 0xF0 { // 3-byte UTF-8 return t.lookupValue(uint32(i), s[2]) } i = bidiIndex[uint32(i)<<6+uint32(s[2])] if c0 < 0xF8 { // 4-byte UTF-8 return t.lookupValue(uint32(i), s[3]) } return 0 } // lookupString returns the trie value for the first UTF-8 encoding in s and // the width in bytes of this encoding. The size will be 0 if s does not // hold enough bytes to complete the encoding. len(s) must be greater than 0. func (t *bidiTrie) lookupString(s string) (v uint8, sz int) { c0 := s[0] switch { case c0 < 0x80: // is ASCII return bidiValues[c0], 1 case c0 < 0xC2: return 0, 1 // Illegal UTF-8: not a starter, not ASCII. case c0 < 0xE0: // 2-byte UTF-8 if len(s) < 2 { return 0, 0 } i := bidiIndex[c0] c1 := s[1] if c1 < 0x80 || 0xC0 <= c1 { return 0, 1 // Illegal UTF-8: not a continuation byte. } return t.lookupValue(uint32(i), c1), 2 case c0 < 0xF0: // 3-byte UTF-8 if len(s) < 3 { return 0, 0 } i := bidiIndex[c0] c1 := s[1] if c1 < 0x80 || 0xC0 <= c1 { return 0, 1 // Illegal UTF-8: not a continuation byte. } o := uint32(i)<<6 + uint32(c1) i = bidiIndex[o] c2 := s[2] if c2 < 0x80 || 0xC0 <= c2 { return 0, 2 // Illegal UTF-8: not a continuation byte. } return t.lookupValue(uint32(i), c2), 3 case c0 < 0xF8: // 4-byte UTF-8 if len(s) < 4 { return 0, 0 } i := bidiIndex[c0] c1 := s[1] if c1 < 0x80 || 0xC0 <= c1 { return 0, 1 // Illegal UTF-8: not a continuation byte. } o := uint32(i)<<6 + uint32(c1) i = bidiIndex[o] c2 := s[2] if c2 < 0x80 || 0xC0 <= c2 { return 0, 2 // Illegal UTF-8: not a continuation byte. } o = uint32(i)<<6 + uint32(c2) i = bidiIndex[o] c3 := s[3] if c3 < 0x80 || 0xC0 <= c3 { return 0, 3 // Illegal UTF-8: not a continuation byte. } return t.lookupValue(uint32(i), c3), 4 } // Illegal rune return 0, 1 } // lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. // s must start with a full and valid UTF-8 encoded rune. func (t *bidiTrie) lookupStringUnsafe(s string) uint8 { c0 := s[0] if c0 < 0x80 { // is ASCII return bidiValues[c0] } i := bidiIndex[c0] if c0 < 0xE0 { // 2-byte UTF-8 return t.lookupValue(uint32(i), s[1]) } i = bidiIndex[uint32(i)<<6+uint32(s[1])] if c0 < 0xF0 { // 3-byte UTF-8 return t.lookupValue(uint32(i), s[2]) } i = bidiIndex[uint32(i)<<6+uint32(s[2])] if c0 < 0xF8 { // 4-byte UTF-8 return t.lookupValue(uint32(i), s[3]) } return 0 } // bidiTrie. Total size: 15744 bytes (15.38 KiB). Checksum: b4c3b70954803b86. type bidiTrie struct{} func newBidiTrie(i int) *bidiTrie { return &bidiTrie{} } // lookupValue determines the type of block n and looks up the value for b. func (t *bidiTrie) lookupValue(n uint32, b byte) uint8 { switch { default: return uint8(bidiValues[n<<6+uint32(b)]) } } // bidiValues: 222 blocks, 14208 entries, 14208 bytes // The third block is the zero block. var bidiValues = [14208]uint8{ // Block 0x0, offset 0x0 0x00: 0x000b, 0x01: 0x000b, 0x02: 0x000b, 0x03: 0x000b, 0x04: 0x000b, 0x05: 0x000b, 0x06: 0x000b, 0x07: 0x000b, 0x08: 0x000b, 0x09: 0x0008, 0x0a: 0x0007, 0x0b: 0x0008, 0x0c: 0x0009, 0x0d: 0x0007, 0x0e: 0x000b, 0x0f: 0x000b, 0x10: 0x000b, 0x11: 0x000b, 0x12: 0x000b, 0x13: 0x000b, 0x14: 0x000b, 0x15: 0x000b, 0x16: 0x000b, 0x17: 0x000b, 0x18: 0x000b, 0x19: 0x000b, 0x1a: 0x000b, 0x1b: 0x000b, 0x1c: 0x0007, 0x1d: 0x0007, 0x1e: 0x0007, 0x1f: 0x0008, 0x20: 0x0009, 0x21: 0x000a, 0x22: 0x000a, 0x23: 0x0004, 0x24: 0x0004, 0x25: 0x0004, 0x26: 0x000a, 0x27: 0x000a, 0x28: 0x003a, 0x29: 0x002a, 0x2a: 0x000a, 0x2b: 0x0003, 0x2c: 0x0006, 0x2d: 0x0003, 0x2e: 0x0006, 0x2f: 0x0006, 0x30: 0x0002, 0x31: 0x0002, 0x32: 0x0002, 0x33: 0x0002, 0x34: 0x0002, 0x35: 0x0002, 0x36: 0x0002, 0x37: 0x0002, 0x38: 0x0002, 0x39: 0x0002, 0x3a: 0x0006, 0x3b: 0x000a, 0x3c: 0x000a, 0x3d: 0x000a, 0x3e: 0x000a, 0x3f: 0x000a, // Block 0x1, offset 0x40 0x40: 0x000a, 0x5b: 0x005a, 0x5c: 0x000a, 0x5d: 0x004a, 0x5e: 0x000a, 0x5f: 0x000a, 0x60: 0x000a, 0x7b: 0x005a, 0x7c: 0x000a, 0x7d: 0x004a, 0x7e: 0x000a, 0x7f: 0x000b, // Block 0x2, offset 0x80 // Block 0x3, offset 0xc0 0xc0: 0x000b, 0xc1: 0x000b, 0xc2: 0x000b, 0xc3: 0x000b, 0xc4: 0x000b, 0xc5: 0x0007, 0xc6: 0x000b, 0xc7: 0x000b, 0xc8: 0x000b, 0xc9: 0x000b, 0xca: 0x000b, 0xcb: 0x000b, 0xcc: 0x000b, 0xcd: 0x000b, 0xce: 0x000b, 0xcf: 0x000b, 0xd0: 0x000b, 0xd1: 0x000b, 0xd2: 0x000b, 0xd3: 0x000b, 0xd4: 0x000b, 0xd5: 0x000b, 0xd6: 0x000b, 0xd7: 0x000b, 0xd8: 0x000b, 0xd9: 0x000b, 0xda: 0x000b, 0xdb: 0x000b, 0xdc: 0x000b, 0xdd: 0x000b, 0xde: 0x000b, 0xdf: 0x000b, 0xe0: 0x0006, 0xe1: 0x000a, 0xe2: 0x0004, 0xe3: 0x0004, 0xe4: 0x0004, 0xe5: 0x0004, 0xe6: 0x000a, 0xe7: 0x000a, 0xe8: 0x000a, 0xe9: 0x000a, 0xeb: 0x000a, 0xec: 0x000a, 0xed: 0x000b, 0xee: 0x000a, 0xef: 0x000a, 0xf0: 0x0004, 0xf1: 0x0004, 0xf2: 0x0002, 0xf3: 0x0002, 0xf4: 0x000a, 0xf6: 0x000a, 0xf7: 0x000a, 0xf8: 0x000a, 0xf9: 0x0002, 0xfb: 0x000a, 0xfc: 0x000a, 0xfd: 0x000a, 0xfe: 0x000a, 0xff: 0x000a, // Block 0x4, offset 0x100 0x117: 0x000a, 0x137: 0x000a, // Block 0x5, offset 0x140 0x179: 0x000a, 0x17a: 0x000a, // Block 0x6, offset 0x180 0x182: 0x000a, 0x183: 0x000a, 0x184: 0x000a, 0x185: 0x000a, 0x186: 0x000a, 0x187: 0x000a, 0x188: 0x000a, 0x189: 0x000a, 0x18a: 0x000a, 0x18b: 0x000a, 0x18c: 0x000a, 0x18d: 0x000a, 0x18e: 0x000a, 0x18f: 0x000a, 0x192: 0x000a, 0x193: 0x000a, 0x194: 0x000a, 0x195: 0x000a, 0x196: 0x000a, 0x197: 0x000a, 0x198: 0x000a, 0x199: 0x000a, 0x19a: 0x000a, 0x19b: 0x000a, 0x19c: 0x000a, 0x19d: 0x000a, 0x19e: 0x000a, 0x19f: 0x000a, 0x1a5: 0x000a, 0x1a6: 0x000a, 0x1a7: 0x000a, 0x1a8: 0x000a, 0x1a9: 0x000a, 0x1aa: 0x000a, 0x1ab: 0x000a, 0x1ac: 0x000a, 0x1ad: 0x000a, 0x1af: 0x000a, 0x1b0: 0x000a, 0x1b1: 0x000a, 0x1b2: 0x000a, 0x1b3: 0x000a, 0x1b4: 0x000a, 0x1b5: 0x000a, 0x1b6: 0x000a, 0x1b7: 0x000a, 0x1b8: 0x000a, 0x1b9: 0x000a, 0x1ba: 0x000a, 0x1bb: 0x000a, 0x1bc: 0x000a, 0x1bd: 0x000a, 0x1be: 0x000a, 0x1bf: 0x000a, // Block 0x7, offset 0x1c0 0x1c0: 0x000c, 0x1c1: 0x000c, 0x1c2: 0x000c, 0x1c3: 0x000c, 0x1c4: 0x000c, 0x1c5: 0x000c, 0x1c6: 0x000c, 0x1c7: 0x000c, 0x1c8: 0x000c, 0x1c9: 0x000c, 0x1ca: 0x000c, 0x1cb: 0x000c, 0x1cc: 0x000c, 0x1cd: 0x000c, 0x1ce: 0x000c, 0x1cf: 0x000c, 0x1d0: 0x000c, 0x1d1: 0x000c, 0x1d2: 0x000c, 0x1d3: 0x000c, 0x1d4: 0x000c, 0x1d5: 0x000c, 0x1d6: 0x000c, 0x1d7: 0x000c, 0x1d8: 0x000c, 0x1d9: 0x000c, 0x1da: 0x000c, 0x1db: 0x000c, 0x1dc: 0x000c, 0x1dd: 0x000c, 0x1de: 0x000c, 0x1df: 0x000c, 0x1e0: 0x000c, 0x1e1: 0x000c, 0x1e2: 0x000c, 0x1e3: 0x000c, 0x1e4: 0x000c, 0x1e5: 0x000c, 0x1e6: 0x000c, 0x1e7: 0x000c, 0x1e8: 0x000c, 0x1e9: 0x000c, 0x1ea: 0x000c, 0x1eb: 0x000c, 0x1ec: 0x000c, 0x1ed: 0x000c, 0x1ee: 0x000c, 0x1ef: 0x000c, 0x1f0: 0x000c, 0x1f1: 0x000c, 0x1f2: 0x000c, 0x1f3: 0x000c, 0x1f4: 0x000c, 0x1f5: 0x000c, 0x1f6: 0x000c, 0x1f7: 0x000c, 0x1f8: 0x000c, 0x1f9: 0x000c, 0x1fa: 0x000c, 0x1fb: 0x000c, 0x1fc: 0x000c, 0x1fd: 0x000c, 0x1fe: 0x000c, 0x1ff: 0x000c, // Block 0x8, offset 0x200 0x200: 0x000c, 0x201: 0x000c, 0x202: 0x000c, 0x203: 0x000c, 0x204: 0x000c, 0x205: 0x000c, 0x206: 0x000c, 0x207: 0x000c, 0x208: 0x000c, 0x209: 0x000c, 0x20a: 0x000c, 0x20b: 0x000c, 0x20c: 0x000c, 0x20d: 0x000c, 0x20e: 0x000c, 0x20f: 0x000c, 0x210: 0x000c, 0x211: 0x000c, 0x212: 0x000c, 0x213: 0x000c, 0x214: 0x000c, 0x215: 0x000c, 0x216: 0x000c, 0x217: 0x000c, 0x218: 0x000c, 0x219: 0x000c, 0x21a: 0x000c, 0x21b: 0x000c, 0x21c: 0x000c, 0x21d: 0x000c, 0x21e: 0x000c, 0x21f: 0x000c, 0x220: 0x000c, 0x221: 0x000c, 0x222: 0x000c, 0x223: 0x000c, 0x224: 0x000c, 0x225: 0x000c, 0x226: 0x000c, 0x227: 0x000c, 0x228: 0x000c, 0x229: 0x000c, 0x22a: 0x000c, 0x22b: 0x000c, 0x22c: 0x000c, 0x22d: 0x000c, 0x22e: 0x000c, 0x22f: 0x000c, 0x234: 0x000a, 0x235: 0x000a, 0x23e: 0x000a, // Block 0x9, offset 0x240 0x244: 0x000a, 0x245: 0x000a, 0x247: 0x000a, // Block 0xa, offset 0x280 0x2b6: 0x000a, // Block 0xb, offset 0x2c0 0x2c3: 0x000c, 0x2c4: 0x000c, 0x2c5: 0x000c, 0x2c6: 0x000c, 0x2c7: 0x000c, 0x2c8: 0x000c, 0x2c9: 0x000c, // Block 0xc, offset 0x300 0x30a: 0x000a, 0x30d: 0x000a, 0x30e: 0x000a, 0x30f: 0x0004, 0x310: 0x0001, 0x311: 0x000c, 0x312: 0x000c, 0x313: 0x000c, 0x314: 0x000c, 0x315: 0x000c, 0x316: 0x000c, 0x317: 0x000c, 0x318: 0x000c, 0x319: 0x000c, 0x31a: 0x000c, 0x31b: 0x000c, 0x31c: 0x000c, 0x31d: 0x000c, 0x31e: 0x000c, 0x31f: 0x000c, 0x320: 0x000c, 0x321: 0x000c, 0x322: 0x000c, 0x323: 0x000c, 0x324: 0x000c, 0x325: 0x000c, 0x326: 0x000c, 0x327: 0x000c, 0x328: 0x000c, 0x329: 0x000c, 0x32a: 0x000c, 0x32b: 0x000c, 0x32c: 0x000c, 0x32d: 0x000c, 0x32e: 0x000c, 0x32f: 0x000c, 0x330: 0x000c, 0x331: 0x000c, 0x332: 0x000c, 0x333: 0x000c, 0x334: 0x000c, 0x335: 0x000c, 0x336: 0x000c, 0x337: 0x000c, 0x338: 0x000c, 0x339: 0x000c, 0x33a: 0x000c, 0x33b: 0x000c, 0x33c: 0x000c, 0x33d: 0x000c, 0x33e: 0x0001, 0x33f: 0x000c, // Block 0xd, offset 0x340 0x340: 0x0001, 0x341: 0x000c, 0x342: 0x000c, 0x343: 0x0001, 0x344: 0x000c, 0x345: 0x000c, 0x346: 0x0001, 0x347: 0x000c, 0x348: 0x0001, 0x349: 0x0001, 0x34a: 0x0001, 0x34b: 0x0001, 0x34c: 0x0001, 0x34d: 0x0001, 0x34e: 0x0001, 0x34f: 0x0001, 0x350: 0x0001, 0x351: 0x0001, 0x352: 0x0001, 0x353: 0x0001, 0x354: 0x0001, 0x355: 0x0001, 0x356: 0x0001, 0x357: 0x0001, 0x358: 0x0001, 0x359: 0x0001, 0x35a: 0x0001, 0x35b: 0x0001, 0x35c: 0x0001, 0x35d: 0x0001, 0x35e: 0x0001, 0x35f: 0x0001, 0x360: 0x0001, 0x361: 0x0001, 0x362: 0x0001, 0x363: 0x0001, 0x364: 0x0001, 0x365: 0x0001, 0x366: 0x0001, 0x367: 0x0001, 0x368: 0x0001, 0x369: 0x0001, 0x36a: 0x0001, 0x36b: 0x0001, 0x36c: 0x0001, 0x36d: 0x0001, 0x36e: 0x0001, 0x36f: 0x0001, 0x370: 0x0001, 0x371: 0x0001, 0x372: 0x0001, 0x373: 0x0001, 0x374: 0x0001, 0x375: 0x0001, 0x376: 0x0001, 0x377: 0x0001, 0x378: 0x0001, 0x379: 0x0001, 0x37a: 0x0001, 0x37b: 0x0001, 0x37c: 0x0001, 0x37d: 0x0001, 0x37e: 0x0001, 0x37f: 0x0001, // Block 0xe, offset 0x380 0x380: 0x0005, 0x381: 0x0005, 0x382: 0x0005, 0x383: 0x0005, 0x384: 0x0005, 0x385: 0x0005, 0x386: 0x000a, 0x387: 0x000a, 0x388: 0x000d, 0x389: 0x0004, 0x38a: 0x0004, 0x38b: 0x000d, 0x38c: 0x0006, 0x38d: 0x000d, 0x38e: 0x000a, 0x38f: 0x000a, 0x390: 0x000c, 0x391: 0x000c, 0x392: 0x000c, 0x393: 0x000c, 0x394: 0x000c, 0x395: 0x000c, 0x396: 0x000c, 0x397: 0x000c, 0x398: 0x000c, 0x399: 0x000c, 0x39a: 0x000c, 0x39b: 0x000d, 0x39c: 0x000d, 0x39d: 0x000d, 0x39e: 0x000d, 0x39f: 0x000d, 0x3a0: 0x000d, 0x3a1: 0x000d, 0x3a2: 0x000d, 0x3a3: 0x000d, 0x3a4: 0x000d, 0x3a5: 0x000d, 0x3a6: 0x000d, 0x3a7: 0x000d, 0x3a8: 0x000d, 0x3a9: 0x000d, 0x3aa: 0x000d, 0x3ab: 0x000d, 0x3ac: 0x000d, 0x3ad: 0x000d, 0x3ae: 0x000d, 0x3af: 0x000d, 0x3b0: 0x000d, 0x3b1: 0x000d, 0x3b2: 0x000d, 0x3b3: 0x000d, 0x3b4: 0x000d, 0x3b5: 0x000d, 0x3b6: 0x000d, 0x3b7: 0x000d, 0x3b8: 0x000d, 0x3b9: 0x000d, 0x3ba: 0x000d, 0x3bb: 0x000d, 0x3bc: 0x000d, 0x3bd: 0x000d, 0x3be: 0x000d, 0x3bf: 0x000d, // Block 0xf, offset 0x3c0 0x3c0: 0x000d, 0x3c1: 0x000d, 0x3c2: 0x000d, 0x3c3: 0x000d, 0x3c4: 0x000d, 0x3c5: 0x000d, 0x3c6: 0x000d, 0x3c7: 0x000d, 0x3c8: 0x000d, 0x3c9: 0x000d, 0x3ca: 0x000d, 0x3cb: 0x000c, 0x3cc: 0x000c, 0x3cd: 0x000c, 0x3ce: 0x000c, 0x3cf: 0x000c, 0x3d0: 0x000c, 0x3d1: 0x000c, 0x3d2: 0x000c, 0x3d3: 0x000c, 0x3d4: 0x000c, 0x3d5: 0x000c, 0x3d6: 0x000c, 0x3d7: 0x000c, 0x3d8: 0x000c, 0x3d9: 0x000c, 0x3da: 0x000c, 0x3db: 0x000c, 0x3dc: 0x000c, 0x3dd: 0x000c, 0x3de: 0x000c, 0x3df: 0x000c, 0x3e0: 0x0005, 0x3e1: 0x0005, 0x3e2: 0x0005, 0x3e3: 0x0005, 0x3e4: 0x0005, 0x3e5: 0x0005, 0x3e6: 0x0005, 0x3e7: 0x0005, 0x3e8: 0x0005, 0x3e9: 0x0005, 0x3ea: 0x0004, 0x3eb: 0x0005, 0x3ec: 0x0005, 0x3ed: 0x000d, 0x3ee: 0x000d, 0x3ef: 0x000d, 0x3f0: 0x000c, 0x3f1: 0x000d, 0x3f2: 0x000d, 0x3f3: 0x000d, 0x3f4: 0x000d, 0x3f5: 0x000d, 0x3f6: 0x000d, 0x3f7: 0x000d, 0x3f8: 0x000d, 0x3f9: 0x000d, 0x3fa: 0x000d, 0x3fb: 0x000d, 0x3fc: 0x000d, 0x3fd: 0x000d, 0x3fe: 0x000d, 0x3ff: 0x000d, // Block 0x10, offset 0x400 0x400: 0x000d, 0x401: 0x000d, 0x402: 0x000d, 0x403: 0x000d, 0x404: 0x000d, 0x405: 0x000d, 0x406: 0x000d, 0x407: 0x000d, 0x408: 0x000d, 0x409: 0x000d, 0x40a: 0x000d, 0x40b: 0x000d, 0x40c: 0x000d, 0x40d: 0x000d, 0x40e: 0x000d, 0x40f: 0x000d, 0x410: 0x000d, 0x411: 0x000d, 0x412: 0x000d, 0x413: 0x000d, 0x414: 0x000d, 0x415: 0x000d, 0x416: 0x000d, 0x417: 0x000d, 0x418: 0x000d, 0x419: 0x000d, 0x41a: 0x000d, 0x41b: 0x000d, 0x41c: 0x000d, 0x41d: 0x000d, 0x41e: 0x000d, 0x41f: 0x000d, 0x420: 0x000d, 0x421: 0x000d, 0x422: 0x000d, 0x423: 0x000d, 0x424: 0x000d, 0x425: 0x000d, 0x426: 0x000d, 0x427: 0x000d, 0x428: 0x000d, 0x429: 0x000d, 0x42a: 0x000d, 0x42b: 0x000d, 0x42c: 0x000d, 0x42d: 0x000d, 0x42e: 0x000d, 0x42f: 0x000d, 0x430: 0x000d, 0x431: 0x000d, 0x432: 0x000d, 0x433: 0x000d, 0x434: 0x000d, 0x435: 0x000d, 0x436: 0x000d, 0x437: 0x000d, 0x438: 0x000d, 0x439: 0x000d, 0x43a: 0x000d, 0x43b: 0x000d, 0x43c: 0x000d, 0x43d: 0x000d, 0x43e: 0x000d, 0x43f: 0x000d, // Block 0x11, offset 0x440 0x440: 0x000d, 0x441: 0x000d, 0x442: 0x000d, 0x443: 0x000d, 0x444: 0x000d, 0x445: 0x000d, 0x446: 0x000d, 0x447: 0x000d, 0x448: 0x000d, 0x449: 0x000d, 0x44a: 0x000d, 0x44b: 0x000d, 0x44c: 0x000d, 0x44d: 0x000d, 0x44e: 0x000d, 0x44f: 0x000d, 0x450: 0x000d, 0x451: 0x000d, 0x452: 0x000d, 0x453: 0x000d, 0x454: 0x000d, 0x455: 0x000d, 0x456: 0x000c, 0x457: 0x000c, 0x458: 0x000c, 0x459: 0x000c, 0x45a: 0x000c, 0x45b: 0x000c, 0x45c: 0x000c, 0x45d: 0x0005, 0x45e: 0x000a, 0x45f: 0x000c, 0x460: 0x000c, 0x461: 0x000c, 0x462: 0x000c, 0x463: 0x000c, 0x464: 0x000c, 0x465: 0x000d, 0x466: 0x000d, 0x467: 0x000c, 0x468: 0x000c, 0x469: 0x000a, 0x46a: 0x000c, 0x46b: 0x000c, 0x46c: 0x000c, 0x46d: 0x000c, 0x46e: 0x000d, 0x46f: 0x000d, 0x470: 0x0002, 0x471: 0x0002, 0x472: 0x0002, 0x473: 0x0002, 0x474: 0x0002, 0x475: 0x0002, 0x476: 0x0002, 0x477: 0x0002, 0x478: 0x0002, 0x479: 0x0002, 0x47a: 0x000d, 0x47b: 0x000d, 0x47c: 0x000d, 0x47d: 0x000d, 0x47e: 0x000d, 0x47f: 0x000d, // Block 0x12, offset 0x480 0x480: 0x000d, 0x481: 0x000d, 0x482: 0x000d, 0x483: 0x000d, 0x484: 0x000d, 0x485: 0x000d, 0x486: 0x000d, 0x487: 0x000d, 0x488: 0x000d, 0x489: 0x000d, 0x48a: 0x000d, 0x48b: 0x000d, 0x48c: 0x000d, 0x48d: 0x000d, 0x48e: 0x000d, 0x48f: 0x000d, 0x490: 0x000d, 0x491: 0x000c, 0x492: 0x000d, 0x493: 0x000d, 0x494: 0x000d, 0x495: 0x000d, 0x496: 0x000d, 0x497: 0x000d, 0x498: 0x000d, 0x499: 0x000d, 0x49a: 0x000d, 0x49b: 0x000d, 0x49c: 0x000d, 0x49d: 0x000d, 0x49e: 0x000d, 0x49f: 0x000d, 0x4a0: 0x000d, 0x4a1: 0x000d, 0x4a2: 0x000d, 0x4a3: 0x000d, 0x4a4: 0x000d, 0x4a5: 0x000d, 0x4a6: 0x000d, 0x4a7: 0x000d, 0x4a8: 0x000d, 0x4a9: 0x000d, 0x4aa: 0x000d, 0x4ab: 0x000d, 0x4ac: 0x000d, 0x4ad: 0x000d, 0x4ae: 0x000d, 0x4af: 0x000d, 0x4b0: 0x000c, 0x4b1: 0x000c, 0x4b2: 0x000c, 0x4b3: 0x000c, 0x4b4: 0x000c, 0x4b5: 0x000c, 0x4b6: 0x000c, 0x4b7: 0x000c, 0x4b8: 0x000c, 0x4b9: 0x000c, 0x4ba: 0x000c, 0x4bb: 0x000c, 0x4bc: 0x000c, 0x4bd: 0x000c, 0x4be: 0x000c, 0x4bf: 0x000c, // Block 0x13, offset 0x4c0 0x4c0: 0x000c, 0x4c1: 0x000c, 0x4c2: 0x000c, 0x4c3: 0x000c, 0x4c4: 0x000c, 0x4c5: 0x000c, 0x4c6: 0x000c, 0x4c7: 0x000c, 0x4c8: 0x000c, 0x4c9: 0x000c, 0x4ca: 0x000c, 0x4cb: 0x000d, 0x4cc: 0x000d, 0x4cd: 0x000d, 0x4ce: 0x000d, 0x4cf: 0x000d, 0x4d0: 0x000d, 0x4d1: 0x000d, 0x4d2: 0x000d, 0x4d3: 0x000d, 0x4d4: 0x000d, 0x4d5: 0x000d, 0x4d6: 0x000d, 0x4d7: 0x000d, 0x4d8: 0x000d, 0x4d9: 0x000d, 0x4da: 0x000d, 0x4db: 0x000d, 0x4dc: 0x000d, 0x4dd: 0x000d, 0x4de: 0x000d, 0x4df: 0x000d, 0x4e0: 0x000d, 0x4e1: 0x000d, 0x4e2: 0x000d, 0x4e3: 0x000d, 0x4e4: 0x000d, 0x4e5: 0x000d, 0x4e6: 0x000d, 0x4e7: 0x000d, 0x4e8: 0x000d, 0x4e9: 0x000d, 0x4ea: 0x000d, 0x4eb: 0x000d, 0x4ec: 0x000d, 0x4ed: 0x000d, 0x4ee: 0x000d, 0x4ef: 0x000d, 0x4f0: 0x000d, 0x4f1: 0x000d, 0x4f2: 0x000d, 0x4f3: 0x000d, 0x4f4: 0x000d, 0x4f5: 0x000d, 0x4f6: 0x000d, 0x4f7: 0x000d, 0x4f8: 0x000d, 0x4f9: 0x000d, 0x4fa: 0x000d, 0x4fb: 0x000d, 0x4fc: 0x000d, 0x4fd: 0x000d, 0x4fe: 0x000d, 0x4ff: 0x000d, // Block 0x14, offset 0x500 0x500: 0x000d, 0x501: 0x000d, 0x502: 0x000d, 0x503: 0x000d, 0x504: 0x000d, 0x505: 0x000d, 0x506: 0x000d, 0x507: 0x000d, 0x508: 0x000d, 0x509: 0x000d, 0x50a: 0x000d, 0x50b: 0x000d, 0x50c: 0x000d, 0x50d: 0x000d, 0x50e: 0x000d, 0x50f: 0x000d, 0x510: 0x000d, 0x511: 0x000d, 0x512: 0x000d, 0x513: 0x000d, 0x514: 0x000d, 0x515: 0x000d, 0x516: 0x000d, 0x517: 0x000d, 0x518: 0x000d, 0x519: 0x000d, 0x51a: 0x000d, 0x51b: 0x000d, 0x51c: 0x000d, 0x51d: 0x000d, 0x51e: 0x000d, 0x51f: 0x000d, 0x520: 0x000d, 0x521: 0x000d, 0x522: 0x000d, 0x523: 0x000d, 0x524: 0x000d, 0x525: 0x000d, 0x526: 0x000c, 0x527: 0x000c, 0x528: 0x000c, 0x529: 0x000c, 0x52a: 0x000c, 0x52b: 0x000c, 0x52c: 0x000c, 0x52d: 0x000c, 0x52e: 0x000c, 0x52f: 0x000c, 0x530: 0x000c, 0x531: 0x000d, 0x532: 0x000d, 0x533: 0x000d, 0x534: 0x000d, 0x535: 0x000d, 0x536: 0x000d, 0x537: 0x000d, 0x538: 0x000d, 0x539: 0x000d, 0x53a: 0x000d, 0x53b: 0x000d, 0x53c: 0x000d, 0x53d: 0x000d, 0x53e: 0x000d, 0x53f: 0x000d, // Block 0x15, offset 0x540 0x540: 0x0001, 0x541: 0x0001, 0x542: 0x0001, 0x543: 0x0001, 0x544: 0x0001, 0x545: 0x0001, 0x546: 0x0001, 0x547: 0x0001, 0x548: 0x0001, 0x549: 0x0001, 0x54a: 0x0001, 0x54b: 0x0001, 0x54c: 0x0001, 0x54d: 0x0001, 0x54e: 0x0001, 0x54f: 0x0001, 0x550: 0x0001, 0x551: 0x0001, 0x552: 0x0001, 0x553: 0x0001, 0x554: 0x0001, 0x555: 0x0001, 0x556: 0x0001, 0x557: 0x0001, 0x558: 0x0001, 0x559: 0x0001, 0x55a: 0x0001, 0x55b: 0x0001, 0x55c: 0x0001, 0x55d: 0x0001, 0x55e: 0x0001, 0x55f: 0x0001, 0x560: 0x0001, 0x561: 0x0001, 0x562: 0x0001, 0x563: 0x0001, 0x564: 0x0001, 0x565: 0x0001, 0x566: 0x0001, 0x567: 0x0001, 0x568: 0x0001, 0x569: 0x0001, 0x56a: 0x0001, 0x56b: 0x000c, 0x56c: 0x000c, 0x56d: 0x000c, 0x56e: 0x000c, 0x56f: 0x000c, 0x570: 0x000c, 0x571: 0x000c, 0x572: 0x000c, 0x573: 0x000c, 0x574: 0x0001, 0x575: 0x0001, 0x576: 0x000a, 0x577: 0x000a, 0x578: 0x000a, 0x579: 0x000a, 0x57a: 0x0001, 0x57b: 0x0001, 0x57c: 0x0001, 0x57d: 0x0001, 0x57e: 0x0001, 0x57f: 0x0001, // Block 0x16, offset 0x580 0x580: 0x0001, 0x581: 0x0001, 0x582: 0x0001, 0x583: 0x0001, 0x584: 0x0001, 0x585: 0x0001, 0x586: 0x0001, 0x587: 0x0001, 0x588: 0x0001, 0x589: 0x0001, 0x58a: 0x0001, 0x58b: 0x0001, 0x58c: 0x0001, 0x58d: 0x0001, 0x58e: 0x0001, 0x58f: 0x0001, 0x590: 0x0001, 0x591: 0x0001, 0x592: 0x0001, 0x593: 0x0001, 0x594: 0x0001, 0x595: 0x0001, 0x596: 0x000c, 0x597: 0x000c, 0x598: 0x000c, 0x599: 0x000c, 0x59a: 0x0001, 0x59b: 0x000c, 0x59c: 0x000c, 0x59d: 0x000c, 0x59e: 0x000c, 0x59f: 0x000c, 0x5a0: 0x000c, 0x5a1: 0x000c, 0x5a2: 0x000c, 0x5a3: 0x000c, 0x5a4: 0x0001, 0x5a5: 0x000c, 0x5a6: 0x000c, 0x5a7: 0x000c, 0x5a8: 0x0001, 0x5a9: 0x000c, 0x5aa: 0x000c, 0x5ab: 0x000c, 0x5ac: 0x000c, 0x5ad: 0x000c, 0x5ae: 0x0001, 0x5af: 0x0001, 0x5b0: 0x0001, 0x5b1: 0x0001, 0x5b2: 0x0001, 0x5b3: 0x0001, 0x5b4: 0x0001, 0x5b5: 0x0001, 0x5b6: 0x0001, 0x5b7: 0x0001, 0x5b8: 0x0001, 0x5b9: 0x0001, 0x5ba: 0x0001, 0x5bb: 0x0001, 0x5bc: 0x0001, 0x5bd: 0x0001, 0x5be: 0x0001, 0x5bf: 0x0001, // Block 0x17, offset 0x5c0 0x5c0: 0x0001, 0x5c1: 0x0001, 0x5c2: 0x0001, 0x5c3: 0x0001, 0x5c4: 0x0001, 0x5c5: 0x0001, 0x5c6: 0x0001, 0x5c7: 0x0001, 0x5c8: 0x0001, 0x5c9: 0x0001, 0x5ca: 0x0001, 0x5cb: 0x0001, 0x5cc: 0x0001, 0x5cd: 0x0001, 0x5ce: 0x0001, 0x5cf: 0x0001, 0x5d0: 0x0001, 0x5d1: 0x0001, 0x5d2: 0x0001, 0x5d3: 0x0001, 0x5d4: 0x0001, 0x5d5: 0x0001, 0x5d6: 0x0001, 0x5d7: 0x0001, 0x5d8: 0x0001, 0x5d9: 0x000c, 0x5da: 0x000c, 0x5db: 0x000c, 0x5dc: 0x0001, 0x5dd: 0x0001, 0x5de: 0x0001, 0x5df: 0x0001, 0x5e0: 0x0001, 0x5e1: 0x0001, 0x5e2: 0x0001, 0x5e3: 0x0001, 0x5e4: 0x0001, 0x5e5: 0x0001, 0x5e6: 0x0001, 0x5e7: 0x0001, 0x5e8: 0x0001, 0x5e9: 0x0001, 0x5ea: 0x0001, 0x5eb: 0x0001, 0x5ec: 0x0001, 0x5ed: 0x0001, 0x5ee: 0x0001, 0x5ef: 0x0001, 0x5f0: 0x0001, 0x5f1: 0x0001, 0x5f2: 0x0001, 0x5f3: 0x0001, 0x5f4: 0x0001, 0x5f5: 0x0001, 0x5f6: 0x0001, 0x5f7: 0x0001, 0x5f8: 0x0001, 0x5f9: 0x0001, 0x5fa: 0x0001, 0x5fb: 0x0001, 0x5fc: 0x0001, 0x5fd: 0x0001, 0x5fe: 0x0001, 0x5ff: 0x0001, // Block 0x18, offset 0x600 0x600: 0x0001, 0x601: 0x0001, 0x602: 0x0001, 0x603: 0x0001, 0x604: 0x0001, 0x605: 0x0001, 0x606: 0x0001, 0x607: 0x0001, 0x608: 0x0001, 0x609: 0x0001, 0x60a: 0x0001, 0x60b: 0x0001, 0x60c: 0x0001, 0x60d: 0x0001, 0x60e: 0x0001, 0x60f: 0x0001, 0x610: 0x0001, 0x611: 0x0001, 0x612: 0x0001, 0x613: 0x0001, 0x614: 0x0001, 0x615: 0x0001, 0x616: 0x0001, 0x617: 0x0001, 0x618: 0x0001, 0x619: 0x0001, 0x61a: 0x0001, 0x61b: 0x0001, 0x61c: 0x0001, 0x61d: 0x0001, 0x61e: 0x0001, 0x61f: 0x0001, 0x620: 0x000d, 0x621: 0x000d, 0x622: 0x000d, 0x623: 0x000d, 0x624: 0x000d, 0x625: 0x000d, 0x626: 0x000d, 0x627: 0x000d, 0x628: 0x000d, 0x629: 0x000d, 0x62a: 0x000d, 0x62b: 0x000d, 0x62c: 0x000d, 0x62d: 0x000d, 0x62e: 0x000d, 0x62f: 0x000d, 0x630: 0x000d, 0x631: 0x000d, 0x632: 0x000d, 0x633: 0x000d, 0x634: 0x000d, 0x635: 0x000d, 0x636: 0x000d, 0x637: 0x000d, 0x638: 0x000d, 0x639: 0x000d, 0x63a: 0x000d, 0x63b: 0x000d, 0x63c: 0x000d, 0x63d: 0x000d, 0x63e: 0x000d, 0x63f: 0x000d, // Block 0x19, offset 0x640 0x640: 0x000d, 0x641: 0x000d, 0x642: 0x000d, 0x643: 0x000d, 0x644: 0x000d, 0x645: 0x000d, 0x646: 0x000d, 0x647: 0x000d, 0x648: 0x000d, 0x649: 0x000d, 0x64a: 0x000d, 0x64b: 0x000d, 0x64c: 0x000d, 0x64d: 0x000d, 0x64e: 0x000d, 0x64f: 0x000d, 0x650: 0x000d, 0x651: 0x000d, 0x652: 0x000d, 0x653: 0x000d, 0x654: 0x000c, 0x655: 0x000c, 0x656: 0x000c, 0x657: 0x000c, 0x658: 0x000c, 0x659: 0x000c, 0x65a: 0x000c, 0x65b: 0x000c, 0x65c: 0x000c, 0x65d: 0x000c, 0x65e: 0x000c, 0x65f: 0x000c, 0x660: 0x000c, 0x661: 0x000c, 0x662: 0x0005, 0x663: 0x000c, 0x664: 0x000c, 0x665: 0x000c, 0x666: 0x000c, 0x667: 0x000c, 0x668: 0x000c, 0x669: 0x000c, 0x66a: 0x000c, 0x66b: 0x000c, 0x66c: 0x000c, 0x66d: 0x000c, 0x66e: 0x000c, 0x66f: 0x000c, 0x670: 0x000c, 0x671: 0x000c, 0x672: 0x000c, 0x673: 0x000c, 0x674: 0x000c, 0x675: 0x000c, 0x676: 0x000c, 0x677: 0x000c, 0x678: 0x000c, 0x679: 0x000c, 0x67a: 0x000c, 0x67b: 0x000c, 0x67c: 0x000c, 0x67d: 0x000c, 0x67e: 0x000c, 0x67f: 0x000c, // Block 0x1a, offset 0x680 0x680: 0x000c, 0x681: 0x000c, 0x682: 0x000c, 0x6ba: 0x000c, 0x6bc: 0x000c, // Block 0x1b, offset 0x6c0 0x6c1: 0x000c, 0x6c2: 0x000c, 0x6c3: 0x000c, 0x6c4: 0x000c, 0x6c5: 0x000c, 0x6c6: 0x000c, 0x6c7: 0x000c, 0x6c8: 0x000c, 0x6cd: 0x000c, 0x6d1: 0x000c, 0x6d2: 0x000c, 0x6d3: 0x000c, 0x6d4: 0x000c, 0x6d5: 0x000c, 0x6d6: 0x000c, 0x6d7: 0x000c, 0x6e2: 0x000c, 0x6e3: 0x000c, // Block 0x1c, offset 0x700 0x701: 0x000c, 0x73c: 0x000c, // Block 0x1d, offset 0x740 0x741: 0x000c, 0x742: 0x000c, 0x743: 0x000c, 0x744: 0x000c, 0x74d: 0x000c, 0x762: 0x000c, 0x763: 0x000c, 0x772: 0x0004, 0x773: 0x0004, 0x77b: 0x0004, // Block 0x1e, offset 0x780 0x781: 0x000c, 0x782: 0x000c, 0x7bc: 0x000c, // Block 0x1f, offset 0x7c0 0x7c1: 0x000c, 0x7c2: 0x000c, 0x7c7: 0x000c, 0x7c8: 0x000c, 0x7cb: 0x000c, 0x7cc: 0x000c, 0x7cd: 0x000c, 0x7d1: 0x000c, 0x7f0: 0x000c, 0x7f1: 0x000c, 0x7f5: 0x000c, // Block 0x20, offset 0x800 0x801: 0x000c, 0x802: 0x000c, 0x803: 0x000c, 0x804: 0x000c, 0x805: 0x000c, 0x807: 0x000c, 0x808: 0x000c, 0x80d: 0x000c, 0x822: 0x000c, 0x823: 0x000c, 0x831: 0x0004, // Block 0x21, offset 0x840 0x841: 0x000c, 0x87c: 0x000c, 0x87f: 0x000c, // Block 0x22, offset 0x880 0x881: 0x000c, 0x882: 0x000c, 0x883: 0x000c, 0x884: 0x000c, 0x88d: 0x000c, 0x896: 0x000c, 0x8a2: 0x000c, 0x8a3: 0x000c, // Block 0x23, offset 0x8c0 0x8c2: 0x000c, // Block 0x24, offset 0x900 0x900: 0x000c, 0x90d: 0x000c, 0x933: 0x000a, 0x934: 0x000a, 0x935: 0x000a, 0x936: 0x000a, 0x937: 0x000a, 0x938: 0x000a, 0x939: 0x0004, 0x93a: 0x000a, // Block 0x25, offset 0x940 0x940: 0x000c, 0x97e: 0x000c, 0x97f: 0x000c, // Block 0x26, offset 0x980 0x980: 0x000c, 0x986: 0x000c, 0x987: 0x000c, 0x988: 0x000c, 0x98a: 0x000c, 0x98b: 0x000c, 0x98c: 0x000c, 0x98d: 0x000c, 0x995: 0x000c, 0x996: 0x000c, 0x9a2: 0x000c, 0x9a3: 0x000c, 0x9b8: 0x000a, 0x9b9: 0x000a, 0x9ba: 0x000a, 0x9bb: 0x000a, 0x9bc: 0x000a, 0x9bd: 0x000a, 0x9be: 0x000a, // Block 0x27, offset 0x9c0 0x9cc: 0x000c, 0x9cd: 0x000c, 0x9e2: 0x000c, 0x9e3: 0x000c, // Block 0x28, offset 0xa00 0xa01: 0x000c, // Block 0x29, offset 0xa40 0xa41: 0x000c, 0xa42: 0x000c, 0xa43: 0x000c, 0xa44: 0x000c, 0xa4d: 0x000c, 0xa62: 0x000c, 0xa63: 0x000c, // Block 0x2a, offset 0xa80 0xa8a: 0x000c, 0xa92: 0x000c, 0xa93: 0x000c, 0xa94: 0x000c, 0xa96: 0x000c, // Block 0x2b, offset 0xac0 0xaf1: 0x000c, 0xaf4: 0x000c, 0xaf5: 0x000c, 0xaf6: 0x000c, 0xaf7: 0x000c, 0xaf8: 0x000c, 0xaf9: 0x000c, 0xafa: 0x000c, 0xaff: 0x0004, // Block 0x2c, offset 0xb00 0xb07: 0x000c, 0xb08: 0x000c, 0xb09: 0x000c, 0xb0a: 0x000c, 0xb0b: 0x000c, 0xb0c: 0x000c, 0xb0d: 0x000c, 0xb0e: 0x000c, // Block 0x2d, offset 0xb40 0xb71: 0x000c, 0xb74: 0x000c, 0xb75: 0x000c, 0xb76: 0x000c, 0xb77: 0x000c, 0xb78: 0x000c, 0xb79: 0x000c, 0xb7b: 0x000c, 0xb7c: 0x000c, // Block 0x2e, offset 0xb80 0xb88: 0x000c, 0xb89: 0x000c, 0xb8a: 0x000c, 0xb8b: 0x000c, 0xb8c: 0x000c, 0xb8d: 0x000c, // Block 0x2f, offset 0xbc0 0xbd8: 0x000c, 0xbd9: 0x000c, 0xbf5: 0x000c, 0xbf7: 0x000c, 0xbf9: 0x000c, 0xbfa: 0x003a, 0xbfb: 0x002a, 0xbfc: 0x003a, 0xbfd: 0x002a, // Block 0x30, offset 0xc00 0xc31: 0x000c, 0xc32: 0x000c, 0xc33: 0x000c, 0xc34: 0x000c, 0xc35: 0x000c, 0xc36: 0x000c, 0xc37: 0x000c, 0xc38: 0x000c, 0xc39: 0x000c, 0xc3a: 0x000c, 0xc3b: 0x000c, 0xc3c: 0x000c, 0xc3d: 0x000c, 0xc3e: 0x000c, // Block 0x31, offset 0xc40 0xc40: 0x000c, 0xc41: 0x000c, 0xc42: 0x000c, 0xc43: 0x000c, 0xc44: 0x000c, 0xc46: 0x000c, 0xc47: 0x000c, 0xc4d: 0x000c, 0xc4e: 0x000c, 0xc4f: 0x000c, 0xc50: 0x000c, 0xc51: 0x000c, 0xc52: 0x000c, 0xc53: 0x000c, 0xc54: 0x000c, 0xc55: 0x000c, 0xc56: 0x000c, 0xc57: 0x000c, 0xc59: 0x000c, 0xc5a: 0x000c, 0xc5b: 0x000c, 0xc5c: 0x000c, 0xc5d: 0x000c, 0xc5e: 0x000c, 0xc5f: 0x000c, 0xc60: 0x000c, 0xc61: 0x000c, 0xc62: 0x000c, 0xc63: 0x000c, 0xc64: 0x000c, 0xc65: 0x000c, 0xc66: 0x000c, 0xc67: 0x000c, 0xc68: 0x000c, 0xc69: 0x000c, 0xc6a: 0x000c, 0xc6b: 0x000c, 0xc6c: 0x000c, 0xc6d: 0x000c, 0xc6e: 0x000c, 0xc6f: 0x000c, 0xc70: 0x000c, 0xc71: 0x000c, 0xc72: 0x000c, 0xc73: 0x000c, 0xc74: 0x000c, 0xc75: 0x000c, 0xc76: 0x000c, 0xc77: 0x000c, 0xc78: 0x000c, 0xc79: 0x000c, 0xc7a: 0x000c, 0xc7b: 0x000c, 0xc7c: 0x000c, // Block 0x32, offset 0xc80 0xc86: 0x000c, // Block 0x33, offset 0xcc0 0xced: 0x000c, 0xcee: 0x000c, 0xcef: 0x000c, 0xcf0: 0x000c, 0xcf2: 0x000c, 0xcf3: 0x000c, 0xcf4: 0x000c, 0xcf5: 0x000c, 0xcf6: 0x000c, 0xcf7: 0x000c, 0xcf9: 0x000c, 0xcfa: 0x000c, 0xcfd: 0x000c, 0xcfe: 0x000c, // Block 0x34, offset 0xd00 0xd18: 0x000c, 0xd19: 0x000c, 0xd1e: 0x000c, 0xd1f: 0x000c, 0xd20: 0x000c, 0xd31: 0x000c, 0xd32: 0x000c, 0xd33: 0x000c, 0xd34: 0x000c, // Block 0x35, offset 0xd40 0xd42: 0x000c, 0xd45: 0x000c, 0xd46: 0x000c, 0xd4d: 0x000c, 0xd5d: 0x000c, // Block 0x36, offset 0xd80 0xd9d: 0x000c, 0xd9e: 0x000c, 0xd9f: 0x000c, // Block 0x37, offset 0xdc0 0xdd0: 0x000a, 0xdd1: 0x000a, 0xdd2: 0x000a, 0xdd3: 0x000a, 0xdd4: 0x000a, 0xdd5: 0x000a, 0xdd6: 0x000a, 0xdd7: 0x000a, 0xdd8: 0x000a, 0xdd9: 0x000a, // Block 0x38, offset 0xe00 0xe00: 0x000a, // Block 0x39, offset 0xe40 0xe40: 0x0009, 0xe5b: 0x007a, 0xe5c: 0x006a, // Block 0x3a, offset 0xe80 0xe92: 0x000c, 0xe93: 0x000c, 0xe94: 0x000c, 0xeb2: 0x000c, 0xeb3: 0x000c, 0xeb4: 0x000c, // Block 0x3b, offset 0xec0 0xed2: 0x000c, 0xed3: 0x000c, 0xef2: 0x000c, 0xef3: 0x000c, // Block 0x3c, offset 0xf00 0xf34: 0x000c, 0xf35: 0x000c, 0xf37: 0x000c, 0xf38: 0x000c, 0xf39: 0x000c, 0xf3a: 0x000c, 0xf3b: 0x000c, 0xf3c: 0x000c, 0xf3d: 0x000c, // Block 0x3d, offset 0xf40 0xf46: 0x000c, 0xf49: 0x000c, 0xf4a: 0x000c, 0xf4b: 0x000c, 0xf4c: 0x000c, 0xf4d: 0x000c, 0xf4e: 0x000c, 0xf4f: 0x000c, 0xf50: 0x000c, 0xf51: 0x000c, 0xf52: 0x000c, 0xf53: 0x000c, 0xf5b: 0x0004, 0xf5d: 0x000c, 0xf70: 0x000a, 0xf71: 0x000a, 0xf72: 0x000a, 0xf73: 0x000a, 0xf74: 0x000a, 0xf75: 0x000a, 0xf76: 0x000a, 0xf77: 0x000a, 0xf78: 0x000a, 0xf79: 0x000a, // Block 0x3e, offset 0xf80 0xf80: 0x000a, 0xf81: 0x000a, 0xf82: 0x000a, 0xf83: 0x000a, 0xf84: 0x000a, 0xf85: 0x000a, 0xf86: 0x000a, 0xf87: 0x000a, 0xf88: 0x000a, 0xf89: 0x000a, 0xf8a: 0x000a, 0xf8b: 0x000c, 0xf8c: 0x000c, 0xf8d: 0x000c, 0xf8e: 0x000b, // Block 0x3f, offset 0xfc0 0xfc5: 0x000c, 0xfc6: 0x000c, 0xfe9: 0x000c, // Block 0x40, offset 0x1000 0x1020: 0x000c, 0x1021: 0x000c, 0x1022: 0x000c, 0x1027: 0x000c, 0x1028: 0x000c, 0x1032: 0x000c, 0x1039: 0x000c, 0x103a: 0x000c, 0x103b: 0x000c, // Block 0x41, offset 0x1040 0x1040: 0x000a, 0x1044: 0x000a, 0x1045: 0x000a, // Block 0x42, offset 0x1080 0x109e: 0x000a, 0x109f: 0x000a, 0x10a0: 0x000a, 0x10a1: 0x000a, 0x10a2: 0x000a, 0x10a3: 0x000a, 0x10a4: 0x000a, 0x10a5: 0x000a, 0x10a6: 0x000a, 0x10a7: 0x000a, 0x10a8: 0x000a, 0x10a9: 0x000a, 0x10aa: 0x000a, 0x10ab: 0x000a, 0x10ac: 0x000a, 0x10ad: 0x000a, 0x10ae: 0x000a, 0x10af: 0x000a, 0x10b0: 0x000a, 0x10b1: 0x000a, 0x10b2: 0x000a, 0x10b3: 0x000a, 0x10b4: 0x000a, 0x10b5: 0x000a, 0x10b6: 0x000a, 0x10b7: 0x000a, 0x10b8: 0x000a, 0x10b9: 0x000a, 0x10ba: 0x000a, 0x10bb: 0x000a, 0x10bc: 0x000a, 0x10bd: 0x000a, 0x10be: 0x000a, 0x10bf: 0x000a, // Block 0x43, offset 0x10c0 0x10d7: 0x000c, 0x10d8: 0x000c, 0x10db: 0x000c, // Block 0x44, offset 0x1100 0x1116: 0x000c, 0x1118: 0x000c, 0x1119: 0x000c, 0x111a: 0x000c, 0x111b: 0x000c, 0x111c: 0x000c, 0x111d: 0x000c, 0x111e: 0x000c, 0x1120: 0x000c, 0x1122: 0x000c, 0x1125: 0x000c, 0x1126: 0x000c, 0x1127: 0x000c, 0x1128: 0x000c, 0x1129: 0x000c, 0x112a: 0x000c, 0x112b: 0x000c, 0x112c: 0x000c, 0x1133: 0x000c, 0x1134: 0x000c, 0x1135: 0x000c, 0x1136: 0x000c, 0x1137: 0x000c, 0x1138: 0x000c, 0x1139: 0x000c, 0x113a: 0x000c, 0x113b: 0x000c, 0x113c: 0x000c, 0x113f: 0x000c, // Block 0x45, offset 0x1140 0x1170: 0x000c, 0x1171: 0x000c, 0x1172: 0x000c, 0x1173: 0x000c, 0x1174: 0x000c, 0x1175: 0x000c, 0x1176: 0x000c, 0x1177: 0x000c, 0x1178: 0x000c, 0x1179: 0x000c, 0x117a: 0x000c, 0x117b: 0x000c, 0x117c: 0x000c, 0x117d: 0x000c, 0x117e: 0x000c, // Block 0x46, offset 0x1180 0x1180: 0x000c, 0x1181: 0x000c, 0x1182: 0x000c, 0x1183: 0x000c, 0x11b4: 0x000c, 0x11b6: 0x000c, 0x11b7: 0x000c, 0x11b8: 0x000c, 0x11b9: 0x000c, 0x11ba: 0x000c, 0x11bc: 0x000c, // Block 0x47, offset 0x11c0 0x11c2: 0x000c, 0x11eb: 0x000c, 0x11ec: 0x000c, 0x11ed: 0x000c, 0x11ee: 0x000c, 0x11ef: 0x000c, 0x11f0: 0x000c, 0x11f1: 0x000c, 0x11f2: 0x000c, 0x11f3: 0x000c, // Block 0x48, offset 0x1200 0x1200: 0x000c, 0x1201: 0x000c, 0x1222: 0x000c, 0x1223: 0x000c, 0x1224: 0x000c, 0x1225: 0x000c, 0x1228: 0x000c, 0x1229: 0x000c, 0x122b: 0x000c, 0x122c: 0x000c, 0x122d: 0x000c, // Block 0x49, offset 0x1240 0x1266: 0x000c, 0x1268: 0x000c, 0x1269: 0x000c, 0x126d: 0x000c, 0x126f: 0x000c, 0x1270: 0x000c, 0x1271: 0x000c, // Block 0x4a, offset 0x1280 0x12ac: 0x000c, 0x12ad: 0x000c, 0x12ae: 0x000c, 0x12af: 0x000c, 0x12b0: 0x000c, 0x12b1: 0x000c, 0x12b2: 0x000c, 0x12b3: 0x000c, 0x12b6: 0x000c, 0x12b7: 0x000c, // Block 0x4b, offset 0x12c0 0x12d0: 0x000c, 0x12d1: 0x000c, 0x12d2: 0x000c, 0x12d4: 0x000c, 0x12d5: 0x000c, 0x12d6: 0x000c, 0x12d7: 0x000c, 0x12d8: 0x000c, 0x12d9: 0x000c, 0x12da: 0x000c, 0x12db: 0x000c, 0x12dc: 0x000c, 0x12dd: 0x000c, 0x12de: 0x000c, 0x12df: 0x000c, 0x12e0: 0x000c, 0x12e2: 0x000c, 0x12e3: 0x000c, 0x12e4: 0x000c, 0x12e5: 0x000c, 0x12e6: 0x000c, 0x12e7: 0x000c, 0x12e8: 0x000c, 0x12ed: 0x000c, 0x12f4: 0x000c, 0x12f8: 0x000c, 0x12f9: 0x000c, // Block 0x4c, offset 0x1300 0x1300: 0x000c, 0x1301: 0x000c, 0x1302: 0x000c, 0x1303: 0x000c, 0x1304: 0x000c, 0x1305: 0x000c, 0x1306: 0x000c, 0x1307: 0x000c, 0x1308: 0x000c, 0x1309: 0x000c, 0x130a: 0x000c, 0x130b: 0x000c, 0x130c: 0x000c, 0x130d: 0x000c, 0x130e: 0x000c, 0x130f: 0x000c, 0x1310: 0x000c, 0x1311: 0x000c, 0x1312: 0x000c, 0x1313: 0x000c, 0x1314: 0x000c, 0x1315: 0x000c, 0x1316: 0x000c, 0x1317: 0x000c, 0x1318: 0x000c, 0x1319: 0x000c, 0x131a: 0x000c, 0x131b: 0x000c, 0x131c: 0x000c, 0x131d: 0x000c, 0x131e: 0x000c, 0x131f: 0x000c, 0x1320: 0x000c, 0x1321: 0x000c, 0x1322: 0x000c, 0x1323: 0x000c, 0x1324: 0x000c, 0x1325: 0x000c, 0x1326: 0x000c, 0x1327: 0x000c, 0x1328: 0x000c, 0x1329: 0x000c, 0x132a: 0x000c, 0x132b: 0x000c, 0x132c: 0x000c, 0x132d: 0x000c, 0x132e: 0x000c, 0x132f: 0x000c, 0x1330: 0x000c, 0x1331: 0x000c, 0x1332: 0x000c, 0x1333: 0x000c, 0x1334: 0x000c, 0x1335: 0x000c, 0x133b: 0x000c, 0x133c: 0x000c, 0x133d: 0x000c, 0x133e: 0x000c, 0x133f: 0x000c, // Block 0x4d, offset 0x1340 0x137d: 0x000a, 0x137f: 0x000a, // Block 0x4e, offset 0x1380 0x1380: 0x000a, 0x1381: 0x000a, 0x138d: 0x000a, 0x138e: 0x000a, 0x138f: 0x000a, 0x139d: 0x000a, 0x139e: 0x000a, 0x139f: 0x000a, 0x13ad: 0x000a, 0x13ae: 0x000a, 0x13af: 0x000a, 0x13bd: 0x000a, 0x13be: 0x000a, // Block 0x4f, offset 0x13c0 0x13c0: 0x0009, 0x13c1: 0x0009, 0x13c2: 0x0009, 0x13c3: 0x0009, 0x13c4: 0x0009, 0x13c5: 0x0009, 0x13c6: 0x0009, 0x13c7: 0x0009, 0x13c8: 0x0009, 0x13c9: 0x0009, 0x13ca: 0x0009, 0x13cb: 0x000b, 0x13cc: 0x000b, 0x13cd: 0x000b, 0x13cf: 0x0001, 0x13d0: 0x000a, 0x13d1: 0x000a, 0x13d2: 0x000a, 0x13d3: 0x000a, 0x13d4: 0x000a, 0x13d5: 0x000a, 0x13d6: 0x000a, 0x13d7: 0x000a, 0x13d8: 0x000a, 0x13d9: 0x000a, 0x13da: 0x000a, 0x13db: 0x000a, 0x13dc: 0x000a, 0x13dd: 0x000a, 0x13de: 0x000a, 0x13df: 0x000a, 0x13e0: 0x000a, 0x13e1: 0x000a, 0x13e2: 0x000a, 0x13e3: 0x000a, 0x13e4: 0x000a, 0x13e5: 0x000a, 0x13e6: 0x000a, 0x13e7: 0x000a, 0x13e8: 0x0009, 0x13e9: 0x0007, 0x13ea: 0x000e, 0x13eb: 0x000e, 0x13ec: 0x000e, 0x13ed: 0x000e, 0x13ee: 0x000e, 0x13ef: 0x0006, 0x13f0: 0x0004, 0x13f1: 0x0004, 0x13f2: 0x0004, 0x13f3: 0x0004, 0x13f4: 0x0004, 0x13f5: 0x000a, 0x13f6: 0x000a, 0x13f7: 0x000a, 0x13f8: 0x000a, 0x13f9: 0x000a, 0x13fa: 0x000a, 0x13fb: 0x000a, 0x13fc: 0x000a, 0x13fd: 0x000a, 0x13fe: 0x000a, 0x13ff: 0x000a, // Block 0x50, offset 0x1400 0x1400: 0x000a, 0x1401: 0x000a, 0x1402: 0x000a, 0x1403: 0x000a, 0x1404: 0x0006, 0x1405: 0x009a, 0x1406: 0x008a, 0x1407: 0x000a, 0x1408: 0x000a, 0x1409: 0x000a, 0x140a: 0x000a, 0x140b: 0x000a, 0x140c: 0x000a, 0x140d: 0x000a, 0x140e: 0x000a, 0x140f: 0x000a, 0x1410: 0x000a, 0x1411: 0x000a, 0x1412: 0x000a, 0x1413: 0x000a, 0x1414: 0x000a, 0x1415: 0x000a, 0x1416: 0x000a, 0x1417: 0x000a, 0x1418: 0x000a, 0x1419: 0x000a, 0x141a: 0x000a, 0x141b: 0x000a, 0x141c: 0x000a, 0x141d: 0x000a, 0x141e: 0x000a, 0x141f: 0x0009, 0x1420: 0x000b, 0x1421: 0x000b, 0x1422: 0x000b, 0x1423: 0x000b, 0x1424: 0x000b, 0x1425: 0x000b, 0x1426: 0x000e, 0x1427: 0x000e, 0x1428: 0x000e, 0x1429: 0x000e, 0x142a: 0x000b, 0x142b: 0x000b, 0x142c: 0x000b, 0x142d: 0x000b, 0x142e: 0x000b, 0x142f: 0x000b, 0x1430: 0x0002, 0x1434: 0x0002, 0x1435: 0x0002, 0x1436: 0x0002, 0x1437: 0x0002, 0x1438: 0x0002, 0x1439: 0x0002, 0x143a: 0x0003, 0x143b: 0x0003, 0x143c: 0x000a, 0x143d: 0x009a, 0x143e: 0x008a, // Block 0x51, offset 0x1440 0x1440: 0x0002, 0x1441: 0x0002, 0x1442: 0x0002, 0x1443: 0x0002, 0x1444: 0x0002, 0x1445: 0x0002, 0x1446: 0x0002, 0x1447: 0x0002, 0x1448: 0x0002, 0x1449: 0x0002, 0x144a: 0x0003, 0x144b: 0x0003, 0x144c: 0x000a, 0x144d: 0x009a, 0x144e: 0x008a, 0x1460: 0x0004, 0x1461: 0x0004, 0x1462: 0x0004, 0x1463: 0x0004, 0x1464: 0x0004, 0x1465: 0x0004, 0x1466: 0x0004, 0x1467: 0x0004, 0x1468: 0x0004, 0x1469: 0x0004, 0x146a: 0x0004, 0x146b: 0x0004, 0x146c: 0x0004, 0x146d: 0x0004, 0x146e: 0x0004, 0x146f: 0x0004, 0x1470: 0x0004, 0x1471: 0x0004, 0x1472: 0x0004, 0x1473: 0x0004, 0x1474: 0x0004, 0x1475: 0x0004, 0x1476: 0x0004, 0x1477: 0x0004, 0x1478: 0x0004, 0x1479: 0x0004, 0x147a: 0x0004, 0x147b: 0x0004, 0x147c: 0x0004, 0x147d: 0x0004, 0x147e: 0x0004, 0x147f: 0x0004, // Block 0x52, offset 0x1480 0x1480: 0x0004, 0x1481: 0x0004, 0x1482: 0x0004, 0x1483: 0x0004, 0x1484: 0x0004, 0x1485: 0x0004, 0x1486: 0x0004, 0x1487: 0x0004, 0x1488: 0x0004, 0x1489: 0x0004, 0x148a: 0x0004, 0x148b: 0x0004, 0x148c: 0x0004, 0x148d: 0x0004, 0x148e: 0x0004, 0x148f: 0x0004, 0x1490: 0x000c, 0x1491: 0x000c, 0x1492: 0x000c, 0x1493: 0x000c, 0x1494: 0x000c, 0x1495: 0x000c, 0x1496: 0x000c, 0x1497: 0x000c, 0x1498: 0x000c, 0x1499: 0x000c, 0x149a: 0x000c, 0x149b: 0x000c, 0x149c: 0x000c, 0x149d: 0x000c, 0x149e: 0x000c, 0x149f: 0x000c, 0x14a0: 0x000c, 0x14a1: 0x000c, 0x14a2: 0x000c, 0x14a3: 0x000c, 0x14a4: 0x000c, 0x14a5: 0x000c, 0x14a6: 0x000c, 0x14a7: 0x000c, 0x14a8: 0x000c, 0x14a9: 0x000c, 0x14aa: 0x000c, 0x14ab: 0x000c, 0x14ac: 0x000c, 0x14ad: 0x000c, 0x14ae: 0x000c, 0x14af: 0x000c, 0x14b0: 0x000c, // Block 0x53, offset 0x14c0 0x14c0: 0x000a, 0x14c1: 0x000a, 0x14c3: 0x000a, 0x14c4: 0x000a, 0x14c5: 0x000a, 0x14c6: 0x000a, 0x14c8: 0x000a, 0x14c9: 0x000a, 0x14d4: 0x000a, 0x14d6: 0x000a, 0x14d7: 0x000a, 0x14d8: 0x000a, 0x14de: 0x000a, 0x14df: 0x000a, 0x14e0: 0x000a, 0x14e1: 0x000a, 0x14e2: 0x000a, 0x14e3: 0x000a, 0x14e5: 0x000a, 0x14e7: 0x000a, 0x14e9: 0x000a, 0x14ee: 0x0004, 0x14fa: 0x000a, 0x14fb: 0x000a, // Block 0x54, offset 0x1500 0x1500: 0x000a, 0x1501: 0x000a, 0x1502: 0x000a, 0x1503: 0x000a, 0x1504: 0x000a, 0x150a: 0x000a, 0x150b: 0x000a, 0x150c: 0x000a, 0x150d: 0x000a, 0x1510: 0x000a, 0x1511: 0x000a, 0x1512: 0x000a, 0x1513: 0x000a, 0x1514: 0x000a, 0x1515: 0x000a, 0x1516: 0x000a, 0x1517: 0x000a, 0x1518: 0x000a, 0x1519: 0x000a, 0x151a: 0x000a, 0x151b: 0x000a, 0x151c: 0x000a, 0x151d: 0x000a, 0x151e: 0x000a, 0x151f: 0x000a, // Block 0x55, offset 0x1540 0x1549: 0x000a, 0x154a: 0x000a, 0x154b: 0x000a, 0x1550: 0x000a, 0x1551: 0x000a, 0x1552: 0x000a, 0x1553: 0x000a, 0x1554: 0x000a, 0x1555: 0x000a, 0x1556: 0x000a, 0x1557: 0x000a, 0x1558: 0x000a, 0x1559: 0x000a, 0x155a: 0x000a, 0x155b: 0x000a, 0x155c: 0x000a, 0x155d: 0x000a, 0x155e: 0x000a, 0x155f: 0x000a, 0x1560: 0x000a, 0x1561: 0x000a, 0x1562: 0x000a, 0x1563: 0x000a, 0x1564: 0x000a, 0x1565: 0x000a, 0x1566: 0x000a, 0x1567: 0x000a, 0x1568: 0x000a, 0x1569: 0x000a, 0x156a: 0x000a, 0x156b: 0x000a, 0x156c: 0x000a, 0x156d: 0x000a, 0x156e: 0x000a, 0x156f: 0x000a, 0x1570: 0x000a, 0x1571: 0x000a, 0x1572: 0x000a, 0x1573: 0x000a, 0x1574: 0x000a, 0x1575: 0x000a, 0x1576: 0x000a, 0x1577: 0x000a, 0x1578: 0x000a, 0x1579: 0x000a, 0x157a: 0x000a, 0x157b: 0x000a, 0x157c: 0x000a, 0x157d: 0x000a, 0x157e: 0x000a, 0x157f: 0x000a, // Block 0x56, offset 0x1580 0x1580: 0x000a, 0x1581: 0x000a, 0x1582: 0x000a, 0x1583: 0x000a, 0x1584: 0x000a, 0x1585: 0x000a, 0x1586: 0x000a, 0x1587: 0x000a, 0x1588: 0x000a, 0x1589: 0x000a, 0x158a: 0x000a, 0x158b: 0x000a, 0x158c: 0x000a, 0x158d: 0x000a, 0x158e: 0x000a, 0x158f: 0x000a, 0x1590: 0x000a, 0x1591: 0x000a, 0x1592: 0x000a, 0x1593: 0x000a, 0x1594: 0x000a, 0x1595: 0x000a, 0x1596: 0x000a, 0x1597: 0x000a, 0x1598: 0x000a, 0x1599: 0x000a, 0x159a: 0x000a, 0x159b: 0x000a, 0x159c: 0x000a, 0x159d: 0x000a, 0x159e: 0x000a, 0x159f: 0x000a, 0x15a0: 0x000a, 0x15a1: 0x000a, 0x15a2: 0x000a, 0x15a3: 0x000a, 0x15a4: 0x000a, 0x15a5: 0x000a, 0x15a6: 0x000a, 0x15a7: 0x000a, 0x15a8: 0x000a, 0x15a9: 0x000a, 0x15aa: 0x000a, 0x15ab: 0x000a, 0x15ac: 0x000a, 0x15ad: 0x000a, 0x15ae: 0x000a, 0x15af: 0x000a, 0x15b0: 0x000a, 0x15b1: 0x000a, 0x15b2: 0x000a, 0x15b3: 0x000a, 0x15b4: 0x000a, 0x15b5: 0x000a, 0x15b6: 0x000a, 0x15b7: 0x000a, 0x15b8: 0x000a, 0x15b9: 0x000a, 0x15ba: 0x000a, 0x15bb: 0x000a, 0x15bc: 0x000a, 0x15bd: 0x000a, 0x15be: 0x000a, 0x15bf: 0x000a, // Block 0x57, offset 0x15c0 0x15c0: 0x000a, 0x15c1: 0x000a, 0x15c2: 0x000a, 0x15c3: 0x000a, 0x15c4: 0x000a, 0x15c5: 0x000a, 0x15c6: 0x000a, 0x15c7: 0x000a, 0x15c8: 0x000a, 0x15c9: 0x000a, 0x15ca: 0x000a, 0x15cb: 0x000a, 0x15cc: 0x000a, 0x15cd: 0x000a, 0x15ce: 0x000a, 0x15cf: 0x000a, 0x15d0: 0x000a, 0x15d1: 0x000a, 0x15d2: 0x0003, 0x15d3: 0x0004, 0x15d4: 0x000a, 0x15d5: 0x000a, 0x15d6: 0x000a, 0x15d7: 0x000a, 0x15d8: 0x000a, 0x15d9: 0x000a, 0x15da: 0x000a, 0x15db: 0x000a, 0x15dc: 0x000a, 0x15dd: 0x000a, 0x15de: 0x000a, 0x15df: 0x000a, 0x15e0: 0x000a, 0x15e1: 0x000a, 0x15e2: 0x000a, 0x15e3: 0x000a, 0x15e4: 0x000a, 0x15e5: 0x000a, 0x15e6: 0x000a, 0x15e7: 0x000a, 0x15e8: 0x000a, 0x15e9: 0x000a, 0x15ea: 0x000a, 0x15eb: 0x000a, 0x15ec: 0x000a, 0x15ed: 0x000a, 0x15ee: 0x000a, 0x15ef: 0x000a, 0x15f0: 0x000a, 0x15f1: 0x000a, 0x15f2: 0x000a, 0x15f3: 0x000a, 0x15f4: 0x000a, 0x15f5: 0x000a, 0x15f6: 0x000a, 0x15f7: 0x000a, 0x15f8: 0x000a, 0x15f9: 0x000a, 0x15fa: 0x000a, 0x15fb: 0x000a, 0x15fc: 0x000a, 0x15fd: 0x000a, 0x15fe: 0x000a, 0x15ff: 0x000a, // Block 0x58, offset 0x1600 0x1600: 0x000a, 0x1601: 0x000a, 0x1602: 0x000a, 0x1603: 0x000a, 0x1604: 0x000a, 0x1605: 0x000a, 0x1606: 0x000a, 0x1607: 0x000a, 0x1608: 0x003a, 0x1609: 0x002a, 0x160a: 0x003a, 0x160b: 0x002a, 0x160c: 0x000a, 0x160d: 0x000a, 0x160e: 0x000a, 0x160f: 0x000a, 0x1610: 0x000a, 0x1611: 0x000a, 0x1612: 0x000a, 0x1613: 0x000a, 0x1614: 0x000a, 0x1615: 0x000a, 0x1616: 0x000a, 0x1617: 0x000a, 0x1618: 0x000a, 0x1619: 0x000a, 0x161a: 0x000a, 0x161b: 0x000a, 0x161c: 0x000a, 0x161d: 0x000a, 0x161e: 0x000a, 0x161f: 0x000a, 0x1620: 0x000a, 0x1621: 0x000a, 0x1622: 0x000a, 0x1623: 0x000a, 0x1624: 0x000a, 0x1625: 0x000a, 0x1626: 0x000a, 0x1627: 0x000a, 0x1628: 0x000a, 0x1629: 0x009a, 0x162a: 0x008a, 0x162b: 0x000a, 0x162c: 0x000a, 0x162d: 0x000a, 0x162e: 0x000a, 0x162f: 0x000a, 0x1630: 0x000a, 0x1631: 0x000a, 0x1632: 0x000a, 0x1633: 0x000a, 0x1634: 0x000a, 0x1635: 0x000a, // Block 0x59, offset 0x1640 0x167b: 0x000a, 0x167c: 0x000a, 0x167d: 0x000a, 0x167e: 0x000a, 0x167f: 0x000a, // Block 0x5a, offset 0x1680 0x1680: 0x000a, 0x1681: 0x000a, 0x1682: 0x000a, 0x1683: 0x000a, 0x1684: 0x000a, 0x1685: 0x000a, 0x1686: 0x000a, 0x1687: 0x000a, 0x1688: 0x000a, 0x1689: 0x000a, 0x168a: 0x000a, 0x168b: 0x000a, 0x168c: 0x000a, 0x168d: 0x000a, 0x168e: 0x000a, 0x168f: 0x000a, 0x1690: 0x000a, 0x1691: 0x000a, 0x1692: 0x000a, 0x1693: 0x000a, 0x1694: 0x000a, 0x1696: 0x000a, 0x1697: 0x000a, 0x1698: 0x000a, 0x1699: 0x000a, 0x169a: 0x000a, 0x169b: 0x000a, 0x169c: 0x000a, 0x169d: 0x000a, 0x169e: 0x000a, 0x169f: 0x000a, 0x16a0: 0x000a, 0x16a1: 0x000a, 0x16a2: 0x000a, 0x16a3: 0x000a, 0x16a4: 0x000a, 0x16a5: 0x000a, 0x16a6: 0x000a, 0x16a7: 0x000a, 0x16a8: 0x000a, 0x16a9: 0x000a, 0x16aa: 0x000a, 0x16ab: 0x000a, 0x16ac: 0x000a, 0x16ad: 0x000a, 0x16ae: 0x000a, 0x16af: 0x000a, 0x16b0: 0x000a, 0x16b1: 0x000a, 0x16b2: 0x000a, 0x16b3: 0x000a, 0x16b4: 0x000a, 0x16b5: 0x000a, 0x16b6: 0x000a, 0x16b7: 0x000a, 0x16b8: 0x000a, 0x16b9: 0x000a, 0x16ba: 0x000a, 0x16bb: 0x000a, 0x16bc: 0x000a, 0x16bd: 0x000a, 0x16be: 0x000a, 0x16bf: 0x000a, // Block 0x5b, offset 0x16c0 0x16c0: 0x000a, 0x16c1: 0x000a, 0x16c2: 0x000a, 0x16c3: 0x000a, 0x16c4: 0x000a, 0x16c5: 0x000a, 0x16c6: 0x000a, 0x16c7: 0x000a, 0x16c8: 0x000a, 0x16c9: 0x000a, 0x16ca: 0x000a, 0x16cb: 0x000a, 0x16cc: 0x000a, 0x16cd: 0x000a, 0x16ce: 0x000a, 0x16cf: 0x000a, 0x16d0: 0x000a, 0x16d1: 0x000a, 0x16d2: 0x000a, 0x16d3: 0x000a, 0x16d4: 0x000a, 0x16d5: 0x000a, 0x16d6: 0x000a, 0x16d7: 0x000a, 0x16d8: 0x000a, 0x16d9: 0x000a, 0x16da: 0x000a, 0x16db: 0x000a, 0x16dc: 0x000a, 0x16dd: 0x000a, 0x16de: 0x000a, 0x16df: 0x000a, 0x16e0: 0x000a, 0x16e1: 0x000a, 0x16e2: 0x000a, 0x16e3: 0x000a, 0x16e4: 0x000a, 0x16e5: 0x000a, 0x16e6: 0x000a, 0x16e7: 0x000a, 0x16e8: 0x000a, 0x16e9: 0x000a, 0x16ea: 0x000a, 0x16eb: 0x000a, 0x16ec: 0x000a, 0x16ed: 0x000a, 0x16ee: 0x000a, 0x16ef: 0x000a, 0x16f0: 0x000a, 0x16f1: 0x000a, 0x16f2: 0x000a, 0x16f3: 0x000a, 0x16f4: 0x000a, 0x16f5: 0x000a, 0x16f6: 0x000a, 0x16f7: 0x000a, 0x16f8: 0x000a, 0x16f9: 0x000a, 0x16fa: 0x000a, 0x16fb: 0x000a, 0x16fc: 0x000a, 0x16fd: 0x000a, 0x16fe: 0x000a, // Block 0x5c, offset 0x1700 0x1700: 0x000a, 0x1701: 0x000a, 0x1702: 0x000a, 0x1703: 0x000a, 0x1704: 0x000a, 0x1705: 0x000a, 0x1706: 0x000a, 0x1707: 0x000a, 0x1708: 0x000a, 0x1709: 0x000a, 0x170a: 0x000a, 0x170b: 0x000a, 0x170c: 0x000a, 0x170d: 0x000a, 0x170e: 0x000a, 0x170f: 0x000a, 0x1710: 0x000a, 0x1711: 0x000a, 0x1712: 0x000a, 0x1713: 0x000a, 0x1714: 0x000a, 0x1715: 0x000a, 0x1716: 0x000a, 0x1717: 0x000a, 0x1718: 0x000a, 0x1719: 0x000a, 0x171a: 0x000a, 0x171b: 0x000a, 0x171c: 0x000a, 0x171d: 0x000a, 0x171e: 0x000a, 0x171f: 0x000a, 0x1720: 0x000a, 0x1721: 0x000a, 0x1722: 0x000a, 0x1723: 0x000a, 0x1724: 0x000a, 0x1725: 0x000a, 0x1726: 0x000a, // Block 0x5d, offset 0x1740 0x1740: 0x000a, 0x1741: 0x000a, 0x1742: 0x000a, 0x1743: 0x000a, 0x1744: 0x000a, 0x1745: 0x000a, 0x1746: 0x000a, 0x1747: 0x000a, 0x1748: 0x000a, 0x1749: 0x000a, 0x174a: 0x000a, 0x1760: 0x000a, 0x1761: 0x000a, 0x1762: 0x000a, 0x1763: 0x000a, 0x1764: 0x000a, 0x1765: 0x000a, 0x1766: 0x000a, 0x1767: 0x000a, 0x1768: 0x000a, 0x1769: 0x000a, 0x176a: 0x000a, 0x176b: 0x000a, 0x176c: 0x000a, 0x176d: 0x000a, 0x176e: 0x000a, 0x176f: 0x000a, 0x1770: 0x000a, 0x1771: 0x000a, 0x1772: 0x000a, 0x1773: 0x000a, 0x1774: 0x000a, 0x1775: 0x000a, 0x1776: 0x000a, 0x1777: 0x000a, 0x1778: 0x000a, 0x1779: 0x000a, 0x177a: 0x000a, 0x177b: 0x000a, 0x177c: 0x000a, 0x177d: 0x000a, 0x177e: 0x000a, 0x177f: 0x000a, // Block 0x5e, offset 0x1780 0x1780: 0x000a, 0x1781: 0x000a, 0x1782: 0x000a, 0x1783: 0x000a, 0x1784: 0x000a, 0x1785: 0x000a, 0x1786: 0x000a, 0x1787: 0x000a, 0x1788: 0x0002, 0x1789: 0x0002, 0x178a: 0x0002, 0x178b: 0x0002, 0x178c: 0x0002, 0x178d: 0x0002, 0x178e: 0x0002, 0x178f: 0x0002, 0x1790: 0x0002, 0x1791: 0x0002, 0x1792: 0x0002, 0x1793: 0x0002, 0x1794: 0x0002, 0x1795: 0x0002, 0x1796: 0x0002, 0x1797: 0x0002, 0x1798: 0x0002, 0x1799: 0x0002, 0x179a: 0x0002, 0x179b: 0x0002, // Block 0x5f, offset 0x17c0 0x17ea: 0x000a, 0x17eb: 0x000a, 0x17ec: 0x000a, 0x17ed: 0x000a, 0x17ee: 0x000a, 0x17ef: 0x000a, 0x17f0: 0x000a, 0x17f1: 0x000a, 0x17f2: 0x000a, 0x17f3: 0x000a, 0x17f4: 0x000a, 0x17f5: 0x000a, 0x17f6: 0x000a, 0x17f7: 0x000a, 0x17f8: 0x000a, 0x17f9: 0x000a, 0x17fa: 0x000a, 0x17fb: 0x000a, 0x17fc: 0x000a, 0x17fd: 0x000a, 0x17fe: 0x000a, 0x17ff: 0x000a, // Block 0x60, offset 0x1800 0x1800: 0x000a, 0x1801: 0x000a, 0x1802: 0x000a, 0x1803: 0x000a, 0x1804: 0x000a, 0x1805: 0x000a, 0x1806: 0x000a, 0x1807: 0x000a, 0x1808: 0x000a, 0x1809: 0x000a, 0x180a: 0x000a, 0x180b: 0x000a, 0x180c: 0x000a, 0x180d: 0x000a, 0x180e: 0x000a, 0x180f: 0x000a, 0x1810: 0x000a, 0x1811: 0x000a, 0x1812: 0x000a, 0x1813: 0x000a, 0x1814: 0x000a, 0x1815: 0x000a, 0x1816: 0x000a, 0x1817: 0x000a, 0x1818: 0x000a, 0x1819: 0x000a, 0x181a: 0x000a, 0x181b: 0x000a, 0x181c: 0x000a, 0x181d: 0x000a, 0x181e: 0x000a, 0x181f: 0x000a, 0x1820: 0x000a, 0x1821: 0x000a, 0x1822: 0x000a, 0x1823: 0x000a, 0x1824: 0x000a, 0x1825: 0x000a, 0x1826: 0x000a, 0x1827: 0x000a, 0x1828: 0x000a, 0x1829: 0x000a, 0x182a: 0x000a, 0x182b: 0x000a, 0x182d: 0x000a, 0x182e: 0x000a, 0x182f: 0x000a, 0x1830: 0x000a, 0x1831: 0x000a, 0x1832: 0x000a, 0x1833: 0x000a, 0x1834: 0x000a, 0x1835: 0x000a, 0x1836: 0x000a, 0x1837: 0x000a, 0x1838: 0x000a, 0x1839: 0x000a, 0x183a: 0x000a, 0x183b: 0x000a, 0x183c: 0x000a, 0x183d: 0x000a, 0x183e: 0x000a, 0x183f: 0x000a, // Block 0x61, offset 0x1840 0x1840: 0x000a, 0x1841: 0x000a, 0x1842: 0x000a, 0x1843: 0x000a, 0x1844: 0x000a, 0x1845: 0x000a, 0x1846: 0x000a, 0x1847: 0x000a, 0x1848: 0x000a, 0x1849: 0x000a, 0x184a: 0x000a, 0x184b: 0x000a, 0x184c: 0x000a, 0x184d: 0x000a, 0x184e: 0x000a, 0x184f: 0x000a, 0x1850: 0x000a, 0x1851: 0x000a, 0x1852: 0x000a, 0x1853: 0x000a, 0x1854: 0x000a, 0x1855: 0x000a, 0x1856: 0x000a, 0x1857: 0x000a, 0x1858: 0x000a, 0x1859: 0x000a, 0x185a: 0x000a, 0x185b: 0x000a, 0x185c: 0x000a, 0x185d: 0x000a, 0x185e: 0x000a, 0x185f: 0x000a, 0x1860: 0x000a, 0x1861: 0x000a, 0x1862: 0x000a, 0x1863: 0x000a, 0x1864: 0x000a, 0x1865: 0x000a, 0x1866: 0x000a, 0x1867: 0x000a, 0x1868: 0x003a, 0x1869: 0x002a, 0x186a: 0x003a, 0x186b: 0x002a, 0x186c: 0x003a, 0x186d: 0x002a, 0x186e: 0x003a, 0x186f: 0x002a, 0x1870: 0x003a, 0x1871: 0x002a, 0x1872: 0x003a, 0x1873: 0x002a, 0x1874: 0x003a, 0x1875: 0x002a, 0x1876: 0x000a, 0x1877: 0x000a, 0x1878: 0x000a, 0x1879: 0x000a, 0x187a: 0x000a, 0x187b: 0x000a, 0x187c: 0x000a, 0x187d: 0x000a, 0x187e: 0x000a, 0x187f: 0x000a, // Block 0x62, offset 0x1880 0x1880: 0x000a, 0x1881: 0x000a, 0x1882: 0x000a, 0x1883: 0x000a, 0x1884: 0x000a, 0x1885: 0x009a, 0x1886: 0x008a, 0x1887: 0x000a, 0x1888: 0x000a, 0x1889: 0x000a, 0x188a: 0x000a, 0x188b: 0x000a, 0x188c: 0x000a, 0x188d: 0x000a, 0x188e: 0x000a, 0x188f: 0x000a, 0x1890: 0x000a, 0x1891: 0x000a, 0x1892: 0x000a, 0x1893: 0x000a, 0x1894: 0x000a, 0x1895: 0x000a, 0x1896: 0x000a, 0x1897: 0x000a, 0x1898: 0x000a, 0x1899: 0x000a, 0x189a: 0x000a, 0x189b: 0x000a, 0x189c: 0x000a, 0x189d: 0x000a, 0x189e: 0x000a, 0x189f: 0x000a, 0x18a0: 0x000a, 0x18a1: 0x000a, 0x18a2: 0x000a, 0x18a3: 0x000a, 0x18a4: 0x000a, 0x18a5: 0x000a, 0x18a6: 0x003a, 0x18a7: 0x002a, 0x18a8: 0x003a, 0x18a9: 0x002a, 0x18aa: 0x003a, 0x18ab: 0x002a, 0x18ac: 0x003a, 0x18ad: 0x002a, 0x18ae: 0x003a, 0x18af: 0x002a, 0x18b0: 0x000a, 0x18b1: 0x000a, 0x18b2: 0x000a, 0x18b3: 0x000a, 0x18b4: 0x000a, 0x18b5: 0x000a, 0x18b6: 0x000a, 0x18b7: 0x000a, 0x18b8: 0x000a, 0x18b9: 0x000a, 0x18ba: 0x000a, 0x18bb: 0x000a, 0x18bc: 0x000a, 0x18bd: 0x000a, 0x18be: 0x000a, 0x18bf: 0x000a, // Block 0x63, offset 0x18c0 0x18c0: 0x000a, 0x18c1: 0x000a, 0x18c2: 0x000a, 0x18c3: 0x007a, 0x18c4: 0x006a, 0x18c5: 0x009a, 0x18c6: 0x008a, 0x18c7: 0x00ba, 0x18c8: 0x00aa, 0x18c9: 0x009a, 0x18ca: 0x008a, 0x18cb: 0x007a, 0x18cc: 0x006a, 0x18cd: 0x00da, 0x18ce: 0x002a, 0x18cf: 0x003a, 0x18d0: 0x00ca, 0x18d1: 0x009a, 0x18d2: 0x008a, 0x18d3: 0x007a, 0x18d4: 0x006a, 0x18d5: 0x009a, 0x18d6: 0x008a, 0x18d7: 0x00ba, 0x18d8: 0x00aa, 0x18d9: 0x000a, 0x18da: 0x000a, 0x18db: 0x000a, 0x18dc: 0x000a, 0x18dd: 0x000a, 0x18de: 0x000a, 0x18df: 0x000a, 0x18e0: 0x000a, 0x18e1: 0x000a, 0x18e2: 0x000a, 0x18e3: 0x000a, 0x18e4: 0x000a, 0x18e5: 0x000a, 0x18e6: 0x000a, 0x18e7: 0x000a, 0x18e8: 0x000a, 0x18e9: 0x000a, 0x18ea: 0x000a, 0x18eb: 0x000a, 0x18ec: 0x000a, 0x18ed: 0x000a, 0x18ee: 0x000a, 0x18ef: 0x000a, 0x18f0: 0x000a, 0x18f1: 0x000a, 0x18f2: 0x000a, 0x18f3: 0x000a, 0x18f4: 0x000a, 0x18f5: 0x000a, 0x18f6: 0x000a, 0x18f7: 0x000a, 0x18f8: 0x000a, 0x18f9: 0x000a, 0x18fa: 0x000a, 0x18fb: 0x000a, 0x18fc: 0x000a, 0x18fd: 0x000a, 0x18fe: 0x000a, 0x18ff: 0x000a, // Block 0x64, offset 0x1900 0x1900: 0x000a, 0x1901: 0x000a, 0x1902: 0x000a, 0x1903: 0x000a, 0x1904: 0x000a, 0x1905: 0x000a, 0x1906: 0x000a, 0x1907: 0x000a, 0x1908: 0x000a, 0x1909: 0x000a, 0x190a: 0x000a, 0x190b: 0x000a, 0x190c: 0x000a, 0x190d: 0x000a, 0x190e: 0x000a, 0x190f: 0x000a, 0x1910: 0x000a, 0x1911: 0x000a, 0x1912: 0x000a, 0x1913: 0x000a, 0x1914: 0x000a, 0x1915: 0x000a, 0x1916: 0x000a, 0x1917: 0x000a, 0x1918: 0x003a, 0x1919: 0x002a, 0x191a: 0x003a, 0x191b: 0x002a, 0x191c: 0x000a, 0x191d: 0x000a, 0x191e: 0x000a, 0x191f: 0x000a, 0x1920: 0x000a, 0x1921: 0x000a, 0x1922: 0x000a, 0x1923: 0x000a, 0x1924: 0x000a, 0x1925: 0x000a, 0x1926: 0x000a, 0x1927: 0x000a, 0x1928: 0x000a, 0x1929: 0x000a, 0x192a: 0x000a, 0x192b: 0x000a, 0x192c: 0x000a, 0x192d: 0x000a, 0x192e: 0x000a, 0x192f: 0x000a, 0x1930: 0x000a, 0x1931: 0x000a, 0x1932: 0x000a, 0x1933: 0x000a, 0x1934: 0x000a, 0x1935: 0x000a, 0x1936: 0x000a, 0x1937: 0x000a, 0x1938: 0x000a, 0x1939: 0x000a, 0x193a: 0x000a, 0x193b: 0x000a, 0x193c: 0x003a, 0x193d: 0x002a, 0x193e: 0x000a, 0x193f: 0x000a, // Block 0x65, offset 0x1940 0x1940: 0x000a, 0x1941: 0x000a, 0x1942: 0x000a, 0x1943: 0x000a, 0x1944: 0x000a, 0x1945: 0x000a, 0x1946: 0x000a, 0x1947: 0x000a, 0x1948: 0x000a, 0x1949: 0x000a, 0x194a: 0x000a, 0x194b: 0x000a, 0x194c: 0x000a, 0x194d: 0x000a, 0x194e: 0x000a, 0x194f: 0x000a, 0x1950: 0x000a, 0x1951: 0x000a, 0x1952: 0x000a, 0x1953: 0x000a, 0x1954: 0x000a, 0x1955: 0x000a, 0x1956: 0x000a, 0x1957: 0x000a, 0x1958: 0x000a, 0x1959: 0x000a, 0x195a: 0x000a, 0x195b: 0x000a, 0x195c: 0x000a, 0x195d: 0x000a, 0x195e: 0x000a, 0x195f: 0x000a, 0x1960: 0x000a, 0x1961: 0x000a, 0x1962: 0x000a, 0x1963: 0x000a, 0x1964: 0x000a, 0x1965: 0x000a, 0x1966: 0x000a, 0x1967: 0x000a, 0x1968: 0x000a, 0x1969: 0x000a, 0x196a: 0x000a, 0x196b: 0x000a, 0x196c: 0x000a, 0x196d: 0x000a, 0x196e: 0x000a, 0x196f: 0x000a, 0x1970: 0x000a, 0x1971: 0x000a, 0x1972: 0x000a, 0x1973: 0x000a, 0x1976: 0x000a, 0x1977: 0x000a, 0x1978: 0x000a, 0x1979: 0x000a, 0x197a: 0x000a, 0x197b: 0x000a, 0x197c: 0x000a, 0x197d: 0x000a, 0x197e: 0x000a, 0x197f: 0x000a, // Block 0x66, offset 0x1980 0x1980: 0x000a, 0x1981: 0x000a, 0x1982: 0x000a, 0x1983: 0x000a, 0x1984: 0x000a, 0x1985: 0x000a, 0x1986: 0x000a, 0x1987: 0x000a, 0x1988: 0x000a, 0x1989: 0x000a, 0x198a: 0x000a, 0x198b: 0x000a, 0x198c: 0x000a, 0x198d: 0x000a, 0x198e: 0x000a, 0x198f: 0x000a, 0x1990: 0x000a, 0x1991: 0x000a, 0x1992: 0x000a, 0x1993: 0x000a, 0x1994: 0x000a, 0x1995: 0x000a, 0x1998: 0x000a, 0x1999: 0x000a, 0x199a: 0x000a, 0x199b: 0x000a, 0x199c: 0x000a, 0x199d: 0x000a, 0x199e: 0x000a, 0x199f: 0x000a, 0x19a0: 0x000a, 0x19a1: 0x000a, 0x19a2: 0x000a, 0x19a3: 0x000a, 0x19a4: 0x000a, 0x19a5: 0x000a, 0x19a6: 0x000a, 0x19a7: 0x000a, 0x19a8: 0x000a, 0x19a9: 0x000a, 0x19aa: 0x000a, 0x19ab: 0x000a, 0x19ac: 0x000a, 0x19ad: 0x000a, 0x19ae: 0x000a, 0x19af: 0x000a, 0x19b0: 0x000a, 0x19b1: 0x000a, 0x19b2: 0x000a, 0x19b3: 0x000a, 0x19b4: 0x000a, 0x19b5: 0x000a, 0x19b6: 0x000a, 0x19b7: 0x000a, 0x19b8: 0x000a, 0x19b9: 0x000a, 0x19bd: 0x000a, 0x19be: 0x000a, 0x19bf: 0x000a, // Block 0x67, offset 0x19c0 0x19c0: 0x000a, 0x19c1: 0x000a, 0x19c2: 0x000a, 0x19c3: 0x000a, 0x19c4: 0x000a, 0x19c5: 0x000a, 0x19c6: 0x000a, 0x19c7: 0x000a, 0x19c8: 0x000a, 0x19ca: 0x000a, 0x19cb: 0x000a, 0x19cc: 0x000a, 0x19cd: 0x000a, 0x19ce: 0x000a, 0x19cf: 0x000a, 0x19d0: 0x000a, 0x19d1: 0x000a, 0x19ec: 0x000a, 0x19ed: 0x000a, 0x19ee: 0x000a, 0x19ef: 0x000a, // Block 0x68, offset 0x1a00 0x1a25: 0x000a, 0x1a26: 0x000a, 0x1a27: 0x000a, 0x1a28: 0x000a, 0x1a29: 0x000a, 0x1a2a: 0x000a, 0x1a2f: 0x000c, 0x1a30: 0x000c, 0x1a31: 0x000c, 0x1a39: 0x000a, 0x1a3a: 0x000a, 0x1a3b: 0x000a, 0x1a3c: 0x000a, 0x1a3d: 0x000a, 0x1a3e: 0x000a, 0x1a3f: 0x000a, // Block 0x69, offset 0x1a40 0x1a7f: 0x000c, // Block 0x6a, offset 0x1a80 0x1aa0: 0x000c, 0x1aa1: 0x000c, 0x1aa2: 0x000c, 0x1aa3: 0x000c, 0x1aa4: 0x000c, 0x1aa5: 0x000c, 0x1aa6: 0x000c, 0x1aa7: 0x000c, 0x1aa8: 0x000c, 0x1aa9: 0x000c, 0x1aaa: 0x000c, 0x1aab: 0x000c, 0x1aac: 0x000c, 0x1aad: 0x000c, 0x1aae: 0x000c, 0x1aaf: 0x000c, 0x1ab0: 0x000c, 0x1ab1: 0x000c, 0x1ab2: 0x000c, 0x1ab3: 0x000c, 0x1ab4: 0x000c, 0x1ab5: 0x000c, 0x1ab6: 0x000c, 0x1ab7: 0x000c, 0x1ab8: 0x000c, 0x1ab9: 0x000c, 0x1aba: 0x000c, 0x1abb: 0x000c, 0x1abc: 0x000c, 0x1abd: 0x000c, 0x1abe: 0x000c, 0x1abf: 0x000c, // Block 0x6b, offset 0x1ac0 0x1ac0: 0x000a, 0x1ac1: 0x000a, 0x1ac2: 0x000a, 0x1ac3: 0x000a, 0x1ac4: 0x000a, 0x1ac5: 0x000a, 0x1ac6: 0x000a, 0x1ac7: 0x000a, 0x1ac8: 0x000a, 0x1ac9: 0x000a, 0x1aca: 0x000a, 0x1acb: 0x000a, 0x1acc: 0x000a, 0x1acd: 0x000a, 0x1ace: 0x000a, 0x1acf: 0x000a, 0x1ad0: 0x000a, 0x1ad1: 0x000a, 0x1ad2: 0x000a, 0x1ad3: 0x000a, 0x1ad4: 0x000a, 0x1ad5: 0x000a, 0x1ad6: 0x000a, 0x1ad7: 0x000a, 0x1ad8: 0x000a, 0x1ad9: 0x000a, 0x1ada: 0x000a, 0x1adb: 0x000a, 0x1adc: 0x000a, 0x1add: 0x000a, 0x1ade: 0x000a, 0x1adf: 0x000a, 0x1ae0: 0x000a, 0x1ae1: 0x000a, 0x1ae2: 0x003a, 0x1ae3: 0x002a, 0x1ae4: 0x003a, 0x1ae5: 0x002a, 0x1ae6: 0x003a, 0x1ae7: 0x002a, 0x1ae8: 0x003a, 0x1ae9: 0x002a, 0x1aea: 0x000a, 0x1aeb: 0x000a, 0x1aec: 0x000a, 0x1aed: 0x000a, 0x1aee: 0x000a, 0x1aef: 0x000a, 0x1af0: 0x000a, 0x1af1: 0x000a, 0x1af2: 0x000a, 0x1af3: 0x000a, 0x1af4: 0x000a, 0x1af5: 0x000a, 0x1af6: 0x000a, 0x1af7: 0x000a, 0x1af8: 0x000a, 0x1af9: 0x000a, 0x1afa: 0x000a, 0x1afb: 0x000a, 0x1afc: 0x000a, 0x1afd: 0x000a, 0x1afe: 0x000a, 0x1aff: 0x000a, // Block 0x6c, offset 0x1b00 0x1b00: 0x000a, 0x1b01: 0x000a, 0x1b02: 0x000a, 0x1b03: 0x000a, 0x1b04: 0x000a, // Block 0x6d, offset 0x1b40 0x1b40: 0x000a, 0x1b41: 0x000a, 0x1b42: 0x000a, 0x1b43: 0x000a, 0x1b44: 0x000a, 0x1b45: 0x000a, 0x1b46: 0x000a, 0x1b47: 0x000a, 0x1b48: 0x000a, 0x1b49: 0x000a, 0x1b4a: 0x000a, 0x1b4b: 0x000a, 0x1b4c: 0x000a, 0x1b4d: 0x000a, 0x1b4e: 0x000a, 0x1b4f: 0x000a, 0x1b50: 0x000a, 0x1b51: 0x000a, 0x1b52: 0x000a, 0x1b53: 0x000a, 0x1b54: 0x000a, 0x1b55: 0x000a, 0x1b56: 0x000a, 0x1b57: 0x000a, 0x1b58: 0x000a, 0x1b59: 0x000a, 0x1b5b: 0x000a, 0x1b5c: 0x000a, 0x1b5d: 0x000a, 0x1b5e: 0x000a, 0x1b5f: 0x000a, 0x1b60: 0x000a, 0x1b61: 0x000a, 0x1b62: 0x000a, 0x1b63: 0x000a, 0x1b64: 0x000a, 0x1b65: 0x000a, 0x1b66: 0x000a, 0x1b67: 0x000a, 0x1b68: 0x000a, 0x1b69: 0x000a, 0x1b6a: 0x000a, 0x1b6b: 0x000a, 0x1b6c: 0x000a, 0x1b6d: 0x000a, 0x1b6e: 0x000a, 0x1b6f: 0x000a, 0x1b70: 0x000a, 0x1b71: 0x000a, 0x1b72: 0x000a, 0x1b73: 0x000a, 0x1b74: 0x000a, 0x1b75: 0x000a, 0x1b76: 0x000a, 0x1b77: 0x000a, 0x1b78: 0x000a, 0x1b79: 0x000a, 0x1b7a: 0x000a, 0x1b7b: 0x000a, 0x1b7c: 0x000a, 0x1b7d: 0x000a, 0x1b7e: 0x000a, 0x1b7f: 0x000a, // Block 0x6e, offset 0x1b80 0x1b80: 0x000a, 0x1b81: 0x000a, 0x1b82: 0x000a, 0x1b83: 0x000a, 0x1b84: 0x000a, 0x1b85: 0x000a, 0x1b86: 0x000a, 0x1b87: 0x000a, 0x1b88: 0x000a, 0x1b89: 0x000a, 0x1b8a: 0x000a, 0x1b8b: 0x000a, 0x1b8c: 0x000a, 0x1b8d: 0x000a, 0x1b8e: 0x000a, 0x1b8f: 0x000a, 0x1b90: 0x000a, 0x1b91: 0x000a, 0x1b92: 0x000a, 0x1b93: 0x000a, 0x1b94: 0x000a, 0x1b95: 0x000a, 0x1b96: 0x000a, 0x1b97: 0x000a, 0x1b98: 0x000a, 0x1b99: 0x000a, 0x1b9a: 0x000a, 0x1b9b: 0x000a, 0x1b9c: 0x000a, 0x1b9d: 0x000a, 0x1b9e: 0x000a, 0x1b9f: 0x000a, 0x1ba0: 0x000a, 0x1ba1: 0x000a, 0x1ba2: 0x000a, 0x1ba3: 0x000a, 0x1ba4: 0x000a, 0x1ba5: 0x000a, 0x1ba6: 0x000a, 0x1ba7: 0x000a, 0x1ba8: 0x000a, 0x1ba9: 0x000a, 0x1baa: 0x000a, 0x1bab: 0x000a, 0x1bac: 0x000a, 0x1bad: 0x000a, 0x1bae: 0x000a, 0x1baf: 0x000a, 0x1bb0: 0x000a, 0x1bb1: 0x000a, 0x1bb2: 0x000a, 0x1bb3: 0x000a, // Block 0x6f, offset 0x1bc0 0x1bc0: 0x000a, 0x1bc1: 0x000a, 0x1bc2: 0x000a, 0x1bc3: 0x000a, 0x1bc4: 0x000a, 0x1bc5: 0x000a, 0x1bc6: 0x000a, 0x1bc7: 0x000a, 0x1bc8: 0x000a, 0x1bc9: 0x000a, 0x1bca: 0x000a, 0x1bcb: 0x000a, 0x1bcc: 0x000a, 0x1bcd: 0x000a, 0x1bce: 0x000a, 0x1bcf: 0x000a, 0x1bd0: 0x000a, 0x1bd1: 0x000a, 0x1bd2: 0x000a, 0x1bd3: 0x000a, 0x1bd4: 0x000a, 0x1bd5: 0x000a, 0x1bf0: 0x000a, 0x1bf1: 0x000a, 0x1bf2: 0x000a, 0x1bf3: 0x000a, 0x1bf4: 0x000a, 0x1bf5: 0x000a, 0x1bf6: 0x000a, 0x1bf7: 0x000a, 0x1bf8: 0x000a, 0x1bf9: 0x000a, 0x1bfa: 0x000a, 0x1bfb: 0x000a, // Block 0x70, offset 0x1c00 0x1c00: 0x0009, 0x1c01: 0x000a, 0x1c02: 0x000a, 0x1c03: 0x000a, 0x1c04: 0x000a, 0x1c08: 0x003a, 0x1c09: 0x002a, 0x1c0a: 0x003a, 0x1c0b: 0x002a, 0x1c0c: 0x003a, 0x1c0d: 0x002a, 0x1c0e: 0x003a, 0x1c0f: 0x002a, 0x1c10: 0x003a, 0x1c11: 0x002a, 0x1c12: 0x000a, 0x1c13: 0x000a, 0x1c14: 0x003a, 0x1c15: 0x002a, 0x1c16: 0x003a, 0x1c17: 0x002a, 0x1c18: 0x003a, 0x1c19: 0x002a, 0x1c1a: 0x003a, 0x1c1b: 0x002a, 0x1c1c: 0x000a, 0x1c1d: 0x000a, 0x1c1e: 0x000a, 0x1c1f: 0x000a, 0x1c20: 0x000a, 0x1c2a: 0x000c, 0x1c2b: 0x000c, 0x1c2c: 0x000c, 0x1c2d: 0x000c, 0x1c30: 0x000a, 0x1c36: 0x000a, 0x1c37: 0x000a, 0x1c3d: 0x000a, 0x1c3e: 0x000a, 0x1c3f: 0x000a, // Block 0x71, offset 0x1c40 0x1c59: 0x000c, 0x1c5a: 0x000c, 0x1c5b: 0x000a, 0x1c5c: 0x000a, 0x1c60: 0x000a, // Block 0x72, offset 0x1c80 0x1cbb: 0x000a, // Block 0x73, offset 0x1cc0 0x1cc0: 0x000a, 0x1cc1: 0x000a, 0x1cc2: 0x000a, 0x1cc3: 0x000a, 0x1cc4: 0x000a, 0x1cc5: 0x000a, 0x1cc6: 0x000a, 0x1cc7: 0x000a, 0x1cc8: 0x000a, 0x1cc9: 0x000a, 0x1cca: 0x000a, 0x1ccb: 0x000a, 0x1ccc: 0x000a, 0x1ccd: 0x000a, 0x1cce: 0x000a, 0x1ccf: 0x000a, 0x1cd0: 0x000a, 0x1cd1: 0x000a, 0x1cd2: 0x000a, 0x1cd3: 0x000a, 0x1cd4: 0x000a, 0x1cd5: 0x000a, 0x1cd6: 0x000a, 0x1cd7: 0x000a, 0x1cd8: 0x000a, 0x1cd9: 0x000a, 0x1cda: 0x000a, 0x1cdb: 0x000a, 0x1cdc: 0x000a, 0x1cdd: 0x000a, 0x1cde: 0x000a, 0x1cdf: 0x000a, 0x1ce0: 0x000a, 0x1ce1: 0x000a, 0x1ce2: 0x000a, 0x1ce3: 0x000a, // Block 0x74, offset 0x1d00 0x1d1d: 0x000a, 0x1d1e: 0x000a, // Block 0x75, offset 0x1d40 0x1d50: 0x000a, 0x1d51: 0x000a, 0x1d52: 0x000a, 0x1d53: 0x000a, 0x1d54: 0x000a, 0x1d55: 0x000a, 0x1d56: 0x000a, 0x1d57: 0x000a, 0x1d58: 0x000a, 0x1d59: 0x000a, 0x1d5a: 0x000a, 0x1d5b: 0x000a, 0x1d5c: 0x000a, 0x1d5d: 0x000a, 0x1d5e: 0x000a, 0x1d5f: 0x000a, 0x1d7c: 0x000a, 0x1d7d: 0x000a, 0x1d7e: 0x000a, // Block 0x76, offset 0x1d80 0x1db1: 0x000a, 0x1db2: 0x000a, 0x1db3: 0x000a, 0x1db4: 0x000a, 0x1db5: 0x000a, 0x1db6: 0x000a, 0x1db7: 0x000a, 0x1db8: 0x000a, 0x1db9: 0x000a, 0x1dba: 0x000a, 0x1dbb: 0x000a, 0x1dbc: 0x000a, 0x1dbd: 0x000a, 0x1dbe: 0x000a, 0x1dbf: 0x000a, // Block 0x77, offset 0x1dc0 0x1dcc: 0x000a, 0x1dcd: 0x000a, 0x1dce: 0x000a, 0x1dcf: 0x000a, // Block 0x78, offset 0x1e00 0x1e37: 0x000a, 0x1e38: 0x000a, 0x1e39: 0x000a, 0x1e3a: 0x000a, // Block 0x79, offset 0x1e40 0x1e5e: 0x000a, 0x1e5f: 0x000a, 0x1e7f: 0x000a, // Block 0x7a, offset 0x1e80 0x1e90: 0x000a, 0x1e91: 0x000a, 0x1e92: 0x000a, 0x1e93: 0x000a, 0x1e94: 0x000a, 0x1e95: 0x000a, 0x1e96: 0x000a, 0x1e97: 0x000a, 0x1e98: 0x000a, 0x1e99: 0x000a, 0x1e9a: 0x000a, 0x1e9b: 0x000a, 0x1e9c: 0x000a, 0x1e9d: 0x000a, 0x1e9e: 0x000a, 0x1e9f: 0x000a, 0x1ea0: 0x000a, 0x1ea1: 0x000a, 0x1ea2: 0x000a, 0x1ea3: 0x000a, 0x1ea4: 0x000a, 0x1ea5: 0x000a, 0x1ea6: 0x000a, 0x1ea7: 0x000a, 0x1ea8: 0x000a, 0x1ea9: 0x000a, 0x1eaa: 0x000a, 0x1eab: 0x000a, 0x1eac: 0x000a, 0x1ead: 0x000a, 0x1eae: 0x000a, 0x1eaf: 0x000a, 0x1eb0: 0x000a, 0x1eb1: 0x000a, 0x1eb2: 0x000a, 0x1eb3: 0x000a, 0x1eb4: 0x000a, 0x1eb5: 0x000a, 0x1eb6: 0x000a, 0x1eb7: 0x000a, 0x1eb8: 0x000a, 0x1eb9: 0x000a, 0x1eba: 0x000a, 0x1ebb: 0x000a, 0x1ebc: 0x000a, 0x1ebd: 0x000a, 0x1ebe: 0x000a, 0x1ebf: 0x000a, // Block 0x7b, offset 0x1ec0 0x1ec0: 0x000a, 0x1ec1: 0x000a, 0x1ec2: 0x000a, 0x1ec3: 0x000a, 0x1ec4: 0x000a, 0x1ec5: 0x000a, 0x1ec6: 0x000a, // Block 0x7c, offset 0x1f00 0x1f0d: 0x000a, 0x1f0e: 0x000a, 0x1f0f: 0x000a, // Block 0x7d, offset 0x1f40 0x1f6f: 0x000c, 0x1f70: 0x000c, 0x1f71: 0x000c, 0x1f72: 0x000c, 0x1f73: 0x000a, 0x1f74: 0x000c, 0x1f75: 0x000c, 0x1f76: 0x000c, 0x1f77: 0x000c, 0x1f78: 0x000c, 0x1f79: 0x000c, 0x1f7a: 0x000c, 0x1f7b: 0x000c, 0x1f7c: 0x000c, 0x1f7d: 0x000c, 0x1f7e: 0x000a, 0x1f7f: 0x000a, // Block 0x7e, offset 0x1f80 0x1f9e: 0x000c, 0x1f9f: 0x000c, // Block 0x7f, offset 0x1fc0 0x1ff0: 0x000c, 0x1ff1: 0x000c, // Block 0x80, offset 0x2000 0x2000: 0x000a, 0x2001: 0x000a, 0x2002: 0x000a, 0x2003: 0x000a, 0x2004: 0x000a, 0x2005: 0x000a, 0x2006: 0x000a, 0x2007: 0x000a, 0x2008: 0x000a, 0x2009: 0x000a, 0x200a: 0x000a, 0x200b: 0x000a, 0x200c: 0x000a, 0x200d: 0x000a, 0x200e: 0x000a, 0x200f: 0x000a, 0x2010: 0x000a, 0x2011: 0x000a, 0x2012: 0x000a, 0x2013: 0x000a, 0x2014: 0x000a, 0x2015: 0x000a, 0x2016: 0x000a, 0x2017: 0x000a, 0x2018: 0x000a, 0x2019: 0x000a, 0x201a: 0x000a, 0x201b: 0x000a, 0x201c: 0x000a, 0x201d: 0x000a, 0x201e: 0x000a, 0x201f: 0x000a, 0x2020: 0x000a, 0x2021: 0x000a, // Block 0x81, offset 0x2040 0x2048: 0x000a, // Block 0x82, offset 0x2080 0x2082: 0x000c, 0x2086: 0x000c, 0x208b: 0x000c, 0x20a5: 0x000c, 0x20a6: 0x000c, 0x20a8: 0x000a, 0x20a9: 0x000a, 0x20aa: 0x000a, 0x20ab: 0x000a, 0x20b8: 0x0004, 0x20b9: 0x0004, // Block 0x83, offset 0x20c0 0x20f4: 0x000a, 0x20f5: 0x000a, 0x20f6: 0x000a, 0x20f7: 0x000a, // Block 0x84, offset 0x2100 0x2104: 0x000c, 0x2105: 0x000c, 0x2120: 0x000c, 0x2121: 0x000c, 0x2122: 0x000c, 0x2123: 0x000c, 0x2124: 0x000c, 0x2125: 0x000c, 0x2126: 0x000c, 0x2127: 0x000c, 0x2128: 0x000c, 0x2129: 0x000c, 0x212a: 0x000c, 0x212b: 0x000c, 0x212c: 0x000c, 0x212d: 0x000c, 0x212e: 0x000c, 0x212f: 0x000c, 0x2130: 0x000c, 0x2131: 0x000c, // Block 0x85, offset 0x2140 0x2166: 0x000c, 0x2167: 0x000c, 0x2168: 0x000c, 0x2169: 0x000c, 0x216a: 0x000c, 0x216b: 0x000c, 0x216c: 0x000c, 0x216d: 0x000c, // Block 0x86, offset 0x2180 0x2187: 0x000c, 0x2188: 0x000c, 0x2189: 0x000c, 0x218a: 0x000c, 0x218b: 0x000c, 0x218c: 0x000c, 0x218d: 0x000c, 0x218e: 0x000c, 0x218f: 0x000c, 0x2190: 0x000c, 0x2191: 0x000c, // Block 0x87, offset 0x21c0 0x21c0: 0x000c, 0x21c1: 0x000c, 0x21c2: 0x000c, 0x21f3: 0x000c, 0x21f6: 0x000c, 0x21f7: 0x000c, 0x21f8: 0x000c, 0x21f9: 0x000c, 0x21fc: 0x000c, // Block 0x88, offset 0x2200 0x2225: 0x000c, // Block 0x89, offset 0x2240 0x2269: 0x000c, 0x226a: 0x000c, 0x226b: 0x000c, 0x226c: 0x000c, 0x226d: 0x000c, 0x226e: 0x000c, 0x2271: 0x000c, 0x2272: 0x000c, 0x2275: 0x000c, 0x2276: 0x000c, // Block 0x8a, offset 0x2280 0x2283: 0x000c, 0x228c: 0x000c, 0x22bc: 0x000c, // Block 0x8b, offset 0x22c0 0x22f0: 0x000c, 0x22f2: 0x000c, 0x22f3: 0x000c, 0x22f4: 0x000c, 0x22f7: 0x000c, 0x22f8: 0x000c, 0x22fe: 0x000c, 0x22ff: 0x000c, // Block 0x8c, offset 0x2300 0x2301: 0x000c, 0x232c: 0x000c, 0x232d: 0x000c, 0x2336: 0x000c, // Block 0x8d, offset 0x2340 0x2365: 0x000c, 0x2368: 0x000c, 0x236d: 0x000c, // Block 0x8e, offset 0x2380 0x239d: 0x0001, 0x239e: 0x000c, 0x239f: 0x0001, 0x23a0: 0x0001, 0x23a1: 0x0001, 0x23a2: 0x0001, 0x23a3: 0x0001, 0x23a4: 0x0001, 0x23a5: 0x0001, 0x23a6: 0x0001, 0x23a7: 0x0001, 0x23a8: 0x0001, 0x23a9: 0x0003, 0x23aa: 0x0001, 0x23ab: 0x0001, 0x23ac: 0x0001, 0x23ad: 0x0001, 0x23ae: 0x0001, 0x23af: 0x0001, 0x23b0: 0x0001, 0x23b1: 0x0001, 0x23b2: 0x0001, 0x23b3: 0x0001, 0x23b4: 0x0001, 0x23b5: 0x0001, 0x23b6: 0x0001, 0x23b7: 0x0001, 0x23b8: 0x0001, 0x23b9: 0x0001, 0x23ba: 0x0001, 0x23bb: 0x0001, 0x23bc: 0x0001, 0x23bd: 0x0001, 0x23be: 0x0001, 0x23bf: 0x0001, // Block 0x8f, offset 0x23c0 0x23c0: 0x0001, 0x23c1: 0x0001, 0x23c2: 0x0001, 0x23c3: 0x0001, 0x23c4: 0x0001, 0x23c5: 0x0001, 0x23c6: 0x0001, 0x23c7: 0x0001, 0x23c8: 0x0001, 0x23c9: 0x0001, 0x23ca: 0x0001, 0x23cb: 0x0001, 0x23cc: 0x0001, 0x23cd: 0x0001, 0x23ce: 0x0001, 0x23cf: 0x0001, 0x23d0: 0x000d, 0x23d1: 0x000d, 0x23d2: 0x000d, 0x23d3: 0x000d, 0x23d4: 0x000d, 0x23d5: 0x000d, 0x23d6: 0x000d, 0x23d7: 0x000d, 0x23d8: 0x000d, 0x23d9: 0x000d, 0x23da: 0x000d, 0x23db: 0x000d, 0x23dc: 0x000d, 0x23dd: 0x000d, 0x23de: 0x000d, 0x23df: 0x000d, 0x23e0: 0x000d, 0x23e1: 0x000d, 0x23e2: 0x000d, 0x23e3: 0x000d, 0x23e4: 0x000d, 0x23e5: 0x000d, 0x23e6: 0x000d, 0x23e7: 0x000d, 0x23e8: 0x000d, 0x23e9: 0x000d, 0x23ea: 0x000d, 0x23eb: 0x000d, 0x23ec: 0x000d, 0x23ed: 0x000d, 0x23ee: 0x000d, 0x23ef: 0x000d, 0x23f0: 0x000d, 0x23f1: 0x000d, 0x23f2: 0x000d, 0x23f3: 0x000d, 0x23f4: 0x000d, 0x23f5: 0x000d, 0x23f6: 0x000d, 0x23f7: 0x000d, 0x23f8: 0x000d, 0x23f9: 0x000d, 0x23fa: 0x000d, 0x23fb: 0x000d, 0x23fc: 0x000d, 0x23fd: 0x000d, 0x23fe: 0x000d, 0x23ff: 0x000d, // Block 0x90, offset 0x2400 0x2400: 0x000d, 0x2401: 0x000d, 0x2402: 0x000d, 0x2403: 0x000d, 0x2404: 0x000d, 0x2405: 0x000d, 0x2406: 0x000d, 0x2407: 0x000d, 0x2408: 0x000d, 0x2409: 0x000d, 0x240a: 0x000d, 0x240b: 0x000d, 0x240c: 0x000d, 0x240d: 0x000d, 0x240e: 0x000d, 0x240f: 0x000d, 0x2410: 0x000d, 0x2411: 0x000d, 0x2412: 0x000d, 0x2413: 0x000d, 0x2414: 0x000d, 0x2415: 0x000d, 0x2416: 0x000d, 0x2417: 0x000d, 0x2418: 0x000d, 0x2419: 0x000d, 0x241a: 0x000d, 0x241b: 0x000d, 0x241c: 0x000d, 0x241d: 0x000d, 0x241e: 0x000d, 0x241f: 0x000d, 0x2420: 0x000d, 0x2421: 0x000d, 0x2422: 0x000d, 0x2423: 0x000d, 0x2424: 0x000d, 0x2425: 0x000d, 0x2426: 0x000d, 0x2427: 0x000d, 0x2428: 0x000d, 0x2429: 0x000d, 0x242a: 0x000d, 0x242b: 0x000d, 0x242c: 0x000d, 0x242d: 0x000d, 0x242e: 0x000d, 0x242f: 0x000d, 0x2430: 0x000d, 0x2431: 0x000d, 0x2432: 0x000d, 0x2433: 0x000d, 0x2434: 0x000d, 0x2435: 0x000d, 0x2436: 0x000d, 0x2437: 0x000d, 0x2438: 0x000d, 0x2439: 0x000d, 0x243a: 0x000d, 0x243b: 0x000d, 0x243c: 0x000d, 0x243d: 0x000d, 0x243e: 0x000a, 0x243f: 0x000a, // Block 0x91, offset 0x2440 0x2440: 0x000d, 0x2441: 0x000d, 0x2442: 0x000d, 0x2443: 0x000d, 0x2444: 0x000d, 0x2445: 0x000d, 0x2446: 0x000d, 0x2447: 0x000d, 0x2448: 0x000d, 0x2449: 0x000d, 0x244a: 0x000d, 0x244b: 0x000d, 0x244c: 0x000d, 0x244d: 0x000d, 0x244e: 0x000d, 0x244f: 0x000d, 0x2450: 0x000b, 0x2451: 0x000b, 0x2452: 0x000b, 0x2453: 0x000b, 0x2454: 0x000b, 0x2455: 0x000b, 0x2456: 0x000b, 0x2457: 0x000b, 0x2458: 0x000b, 0x2459: 0x000b, 0x245a: 0x000b, 0x245b: 0x000b, 0x245c: 0x000b, 0x245d: 0x000b, 0x245e: 0x000b, 0x245f: 0x000b, 0x2460: 0x000b, 0x2461: 0x000b, 0x2462: 0x000b, 0x2463: 0x000b, 0x2464: 0x000b, 0x2465: 0x000b, 0x2466: 0x000b, 0x2467: 0x000b, 0x2468: 0x000b, 0x2469: 0x000b, 0x246a: 0x000b, 0x246b: 0x000b, 0x246c: 0x000b, 0x246d: 0x000b, 0x246e: 0x000b, 0x246f: 0x000b, 0x2470: 0x000d, 0x2471: 0x000d, 0x2472: 0x000d, 0x2473: 0x000d, 0x2474: 0x000d, 0x2475: 0x000d, 0x2476: 0x000d, 0x2477: 0x000d, 0x2478: 0x000d, 0x2479: 0x000d, 0x247a: 0x000d, 0x247b: 0x000d, 0x247c: 0x000d, 0x247d: 0x000a, 0x247e: 0x000d, 0x247f: 0x000d, // Block 0x92, offset 0x2480 0x2480: 0x000c, 0x2481: 0x000c, 0x2482: 0x000c, 0x2483: 0x000c, 0x2484: 0x000c, 0x2485: 0x000c, 0x2486: 0x000c, 0x2487: 0x000c, 0x2488: 0x000c, 0x2489: 0x000c, 0x248a: 0x000c, 0x248b: 0x000c, 0x248c: 0x000c, 0x248d: 0x000c, 0x248e: 0x000c, 0x248f: 0x000c, 0x2490: 0x000a, 0x2491: 0x000a, 0x2492: 0x000a, 0x2493: 0x000a, 0x2494: 0x000a, 0x2495: 0x000a, 0x2496: 0x000a, 0x2497: 0x000a, 0x2498: 0x000a, 0x2499: 0x000a, 0x24a0: 0x000c, 0x24a1: 0x000c, 0x24a2: 0x000c, 0x24a3: 0x000c, 0x24a4: 0x000c, 0x24a5: 0x000c, 0x24a6: 0x000c, 0x24a7: 0x000c, 0x24a8: 0x000c, 0x24a9: 0x000c, 0x24aa: 0x000c, 0x24ab: 0x000c, 0x24ac: 0x000c, 0x24ad: 0x000c, 0x24ae: 0x000c, 0x24af: 0x000c, 0x24b0: 0x000a, 0x24b1: 0x000a, 0x24b2: 0x000a, 0x24b3: 0x000a, 0x24b4: 0x000a, 0x24b5: 0x000a, 0x24b6: 0x000a, 0x24b7: 0x000a, 0x24b8: 0x000a, 0x24b9: 0x000a, 0x24ba: 0x000a, 0x24bb: 0x000a, 0x24bc: 0x000a, 0x24bd: 0x000a, 0x24be: 0x000a, 0x24bf: 0x000a, // Block 0x93, offset 0x24c0 0x24c0: 0x000a, 0x24c1: 0x000a, 0x24c2: 0x000a, 0x24c3: 0x000a, 0x24c4: 0x000a, 0x24c5: 0x000a, 0x24c6: 0x000a, 0x24c7: 0x000a, 0x24c8: 0x000a, 0x24c9: 0x000a, 0x24ca: 0x000a, 0x24cb: 0x000a, 0x24cc: 0x000a, 0x24cd: 0x000a, 0x24ce: 0x000a, 0x24cf: 0x000a, 0x24d0: 0x0006, 0x24d1: 0x000a, 0x24d2: 0x0006, 0x24d4: 0x000a, 0x24d5: 0x0006, 0x24d6: 0x000a, 0x24d7: 0x000a, 0x24d8: 0x000a, 0x24d9: 0x009a, 0x24da: 0x008a, 0x24db: 0x007a, 0x24dc: 0x006a, 0x24dd: 0x009a, 0x24de: 0x008a, 0x24df: 0x0004, 0x24e0: 0x000a, 0x24e1: 0x000a, 0x24e2: 0x0003, 0x24e3: 0x0003, 0x24e4: 0x000a, 0x24e5: 0x000a, 0x24e6: 0x000a, 0x24e8: 0x000a, 0x24e9: 0x0004, 0x24ea: 0x0004, 0x24eb: 0x000a, 0x24f0: 0x000d, 0x24f1: 0x000d, 0x24f2: 0x000d, 0x24f3: 0x000d, 0x24f4: 0x000d, 0x24f5: 0x000d, 0x24f6: 0x000d, 0x24f7: 0x000d, 0x24f8: 0x000d, 0x24f9: 0x000d, 0x24fa: 0x000d, 0x24fb: 0x000d, 0x24fc: 0x000d, 0x24fd: 0x000d, 0x24fe: 0x000d, 0x24ff: 0x000d, // Block 0x94, offset 0x2500 0x2500: 0x000d, 0x2501: 0x000d, 0x2502: 0x000d, 0x2503: 0x000d, 0x2504: 0x000d, 0x2505: 0x000d, 0x2506: 0x000d, 0x2507: 0x000d, 0x2508: 0x000d, 0x2509: 0x000d, 0x250a: 0x000d, 0x250b: 0x000d, 0x250c: 0x000d, 0x250d: 0x000d, 0x250e: 0x000d, 0x250f: 0x000d, 0x2510: 0x000d, 0x2511: 0x000d, 0x2512: 0x000d, 0x2513: 0x000d, 0x2514: 0x000d, 0x2515: 0x000d, 0x2516: 0x000d, 0x2517: 0x000d, 0x2518: 0x000d, 0x2519: 0x000d, 0x251a: 0x000d, 0x251b: 0x000d, 0x251c: 0x000d, 0x251d: 0x000d, 0x251e: 0x000d, 0x251f: 0x000d, 0x2520: 0x000d, 0x2521: 0x000d, 0x2522: 0x000d, 0x2523: 0x000d, 0x2524: 0x000d, 0x2525: 0x000d, 0x2526: 0x000d, 0x2527: 0x000d, 0x2528: 0x000d, 0x2529: 0x000d, 0x252a: 0x000d, 0x252b: 0x000d, 0x252c: 0x000d, 0x252d: 0x000d, 0x252e: 0x000d, 0x252f: 0x000d, 0x2530: 0x000d, 0x2531: 0x000d, 0x2532: 0x000d, 0x2533: 0x000d, 0x2534: 0x000d, 0x2535: 0x000d, 0x2536: 0x000d, 0x2537: 0x000d, 0x2538: 0x000d, 0x2539: 0x000d, 0x253a: 0x000d, 0x253b: 0x000d, 0x253c: 0x000d, 0x253d: 0x000d, 0x253e: 0x000d, 0x253f: 0x000b, // Block 0x95, offset 0x2540 0x2541: 0x000a, 0x2542: 0x000a, 0x2543: 0x0004, 0x2544: 0x0004, 0x2545: 0x0004, 0x2546: 0x000a, 0x2547: 0x000a, 0x2548: 0x003a, 0x2549: 0x002a, 0x254a: 0x000a, 0x254b: 0x0003, 0x254c: 0x0006, 0x254d: 0x0003, 0x254e: 0x0006, 0x254f: 0x0006, 0x2550: 0x0002, 0x2551: 0x0002, 0x2552: 0x0002, 0x2553: 0x0002, 0x2554: 0x0002, 0x2555: 0x0002, 0x2556: 0x0002, 0x2557: 0x0002, 0x2558: 0x0002, 0x2559: 0x0002, 0x255a: 0x0006, 0x255b: 0x000a, 0x255c: 0x000a, 0x255d: 0x000a, 0x255e: 0x000a, 0x255f: 0x000a, 0x2560: 0x000a, 0x257b: 0x005a, 0x257c: 0x000a, 0x257d: 0x004a, 0x257e: 0x000a, 0x257f: 0x000a, // Block 0x96, offset 0x2580 0x2580: 0x000a, 0x259b: 0x005a, 0x259c: 0x000a, 0x259d: 0x004a, 0x259e: 0x000a, 0x259f: 0x00fa, 0x25a0: 0x00ea, 0x25a1: 0x000a, 0x25a2: 0x003a, 0x25a3: 0x002a, 0x25a4: 0x000a, 0x25a5: 0x000a, // Block 0x97, offset 0x25c0 0x25e0: 0x0004, 0x25e1: 0x0004, 0x25e2: 0x000a, 0x25e3: 0x000a, 0x25e4: 0x000a, 0x25e5: 0x0004, 0x25e6: 0x0004, 0x25e8: 0x000a, 0x25e9: 0x000a, 0x25ea: 0x000a, 0x25eb: 0x000a, 0x25ec: 0x000a, 0x25ed: 0x000a, 0x25ee: 0x000a, 0x25f0: 0x000b, 0x25f1: 0x000b, 0x25f2: 0x000b, 0x25f3: 0x000b, 0x25f4: 0x000b, 0x25f5: 0x000b, 0x25f6: 0x000b, 0x25f7: 0x000b, 0x25f8: 0x000b, 0x25f9: 0x000a, 0x25fa: 0x000a, 0x25fb: 0x000a, 0x25fc: 0x000a, 0x25fd: 0x000a, 0x25fe: 0x000b, 0x25ff: 0x000b, // Block 0x98, offset 0x2600 0x2601: 0x000a, // Block 0x99, offset 0x2640 0x2640: 0x000a, 0x2641: 0x000a, 0x2642: 0x000a, 0x2643: 0x000a, 0x2644: 0x000a, 0x2645: 0x000a, 0x2646: 0x000a, 0x2647: 0x000a, 0x2648: 0x000a, 0x2649: 0x000a, 0x264a: 0x000a, 0x264b: 0x000a, 0x264c: 0x000a, 0x2650: 0x000a, 0x2651: 0x000a, 0x2652: 0x000a, 0x2653: 0x000a, 0x2654: 0x000a, 0x2655: 0x000a, 0x2656: 0x000a, 0x2657: 0x000a, 0x2658: 0x000a, 0x2659: 0x000a, 0x265a: 0x000a, 0x265b: 0x000a, 0x2660: 0x000a, // Block 0x9a, offset 0x2680 0x26bd: 0x000c, // Block 0x9b, offset 0x26c0 0x26e0: 0x000c, 0x26e1: 0x0002, 0x26e2: 0x0002, 0x26e3: 0x0002, 0x26e4: 0x0002, 0x26e5: 0x0002, 0x26e6: 0x0002, 0x26e7: 0x0002, 0x26e8: 0x0002, 0x26e9: 0x0002, 0x26ea: 0x0002, 0x26eb: 0x0002, 0x26ec: 0x0002, 0x26ed: 0x0002, 0x26ee: 0x0002, 0x26ef: 0x0002, 0x26f0: 0x0002, 0x26f1: 0x0002, 0x26f2: 0x0002, 0x26f3: 0x0002, 0x26f4: 0x0002, 0x26f5: 0x0002, 0x26f6: 0x0002, 0x26f7: 0x0002, 0x26f8: 0x0002, 0x26f9: 0x0002, 0x26fa: 0x0002, 0x26fb: 0x0002, // Block 0x9c, offset 0x2700 0x2736: 0x000c, 0x2737: 0x000c, 0x2738: 0x000c, 0x2739: 0x000c, 0x273a: 0x000c, // Block 0x9d, offset 0x2740 0x2740: 0x0001, 0x2741: 0x0001, 0x2742: 0x0001, 0x2743: 0x0001, 0x2744: 0x0001, 0x2745: 0x0001, 0x2746: 0x0001, 0x2747: 0x0001, 0x2748: 0x0001, 0x2749: 0x0001, 0x274a: 0x0001, 0x274b: 0x0001, 0x274c: 0x0001, 0x274d: 0x0001, 0x274e: 0x0001, 0x274f: 0x0001, 0x2750: 0x0001, 0x2751: 0x0001, 0x2752: 0x0001, 0x2753: 0x0001, 0x2754: 0x0001, 0x2755: 0x0001, 0x2756: 0x0001, 0x2757: 0x0001, 0x2758: 0x0001, 0x2759: 0x0001, 0x275a: 0x0001, 0x275b: 0x0001, 0x275c: 0x0001, 0x275d: 0x0001, 0x275e: 0x0001, 0x275f: 0x0001, 0x2760: 0x0001, 0x2761: 0x0001, 0x2762: 0x0001, 0x2763: 0x0001, 0x2764: 0x0001, 0x2765: 0x0001, 0x2766: 0x0001, 0x2767: 0x0001, 0x2768: 0x0001, 0x2769: 0x0001, 0x276a: 0x0001, 0x276b: 0x0001, 0x276c: 0x0001, 0x276d: 0x0001, 0x276e: 0x0001, 0x276f: 0x0001, 0x2770: 0x0001, 0x2771: 0x0001, 0x2772: 0x0001, 0x2773: 0x0001, 0x2774: 0x0001, 0x2775: 0x0001, 0x2776: 0x0001, 0x2777: 0x0001, 0x2778: 0x0001, 0x2779: 0x0001, 0x277a: 0x0001, 0x277b: 0x0001, 0x277c: 0x0001, 0x277d: 0x0001, 0x277e: 0x0001, 0x277f: 0x0001, // Block 0x9e, offset 0x2780 0x2780: 0x0001, 0x2781: 0x0001, 0x2782: 0x0001, 0x2783: 0x0001, 0x2784: 0x0001, 0x2785: 0x0001, 0x2786: 0x0001, 0x2787: 0x0001, 0x2788: 0x0001, 0x2789: 0x0001, 0x278a: 0x0001, 0x278b: 0x0001, 0x278c: 0x0001, 0x278d: 0x0001, 0x278e: 0x0001, 0x278f: 0x0001, 0x2790: 0x0001, 0x2791: 0x0001, 0x2792: 0x0001, 0x2793: 0x0001, 0x2794: 0x0001, 0x2795: 0x0001, 0x2796: 0x0001, 0x2797: 0x0001, 0x2798: 0x0001, 0x2799: 0x0001, 0x279a: 0x0001, 0x279b: 0x0001, 0x279c: 0x0001, 0x279d: 0x0001, 0x279e: 0x0001, 0x279f: 0x000a, 0x27a0: 0x0001, 0x27a1: 0x0001, 0x27a2: 0x0001, 0x27a3: 0x0001, 0x27a4: 0x0001, 0x27a5: 0x0001, 0x27a6: 0x0001, 0x27a7: 0x0001, 0x27a8: 0x0001, 0x27a9: 0x0001, 0x27aa: 0x0001, 0x27ab: 0x0001, 0x27ac: 0x0001, 0x27ad: 0x0001, 0x27ae: 0x0001, 0x27af: 0x0001, 0x27b0: 0x0001, 0x27b1: 0x0001, 0x27b2: 0x0001, 0x27b3: 0x0001, 0x27b4: 0x0001, 0x27b5: 0x0001, 0x27b6: 0x0001, 0x27b7: 0x0001, 0x27b8: 0x0001, 0x27b9: 0x0001, 0x27ba: 0x0001, 0x27bb: 0x0001, 0x27bc: 0x0001, 0x27bd: 0x0001, 0x27be: 0x0001, 0x27bf: 0x0001, // Block 0x9f, offset 0x27c0 0x27c0: 0x0001, 0x27c1: 0x000c, 0x27c2: 0x000c, 0x27c3: 0x000c, 0x27c4: 0x0001, 0x27c5: 0x000c, 0x27c6: 0x000c, 0x27c7: 0x0001, 0x27c8: 0x0001, 0x27c9: 0x0001, 0x27ca: 0x0001, 0x27cb: 0x0001, 0x27cc: 0x000c, 0x27cd: 0x000c, 0x27ce: 0x000c, 0x27cf: 0x000c, 0x27d0: 0x0001, 0x27d1: 0x0001, 0x27d2: 0x0001, 0x27d3: 0x0001, 0x27d4: 0x0001, 0x27d5: 0x0001, 0x27d6: 0x0001, 0x27d7: 0x0001, 0x27d8: 0x0001, 0x27d9: 0x0001, 0x27da: 0x0001, 0x27db: 0x0001, 0x27dc: 0x0001, 0x27dd: 0x0001, 0x27de: 0x0001, 0x27df: 0x0001, 0x27e0: 0x0001, 0x27e1: 0x0001, 0x27e2: 0x0001, 0x27e3: 0x0001, 0x27e4: 0x0001, 0x27e5: 0x0001, 0x27e6: 0x0001, 0x27e7: 0x0001, 0x27e8: 0x0001, 0x27e9: 0x0001, 0x27ea: 0x0001, 0x27eb: 0x0001, 0x27ec: 0x0001, 0x27ed: 0x0001, 0x27ee: 0x0001, 0x27ef: 0x0001, 0x27f0: 0x0001, 0x27f1: 0x0001, 0x27f2: 0x0001, 0x27f3: 0x0001, 0x27f4: 0x0001, 0x27f5: 0x0001, 0x27f6: 0x0001, 0x27f7: 0x0001, 0x27f8: 0x000c, 0x27f9: 0x000c, 0x27fa: 0x000c, 0x27fb: 0x0001, 0x27fc: 0x0001, 0x27fd: 0x0001, 0x27fe: 0x0001, 0x27ff: 0x000c, // Block 0xa0, offset 0x2800 0x2800: 0x0001, 0x2801: 0x0001, 0x2802: 0x0001, 0x2803: 0x0001, 0x2804: 0x0001, 0x2805: 0x0001, 0x2806: 0x0001, 0x2807: 0x0001, 0x2808: 0x0001, 0x2809: 0x0001, 0x280a: 0x0001, 0x280b: 0x0001, 0x280c: 0x0001, 0x280d: 0x0001, 0x280e: 0x0001, 0x280f: 0x0001, 0x2810: 0x0001, 0x2811: 0x0001, 0x2812: 0x0001, 0x2813: 0x0001, 0x2814: 0x0001, 0x2815: 0x0001, 0x2816: 0x0001, 0x2817: 0x0001, 0x2818: 0x0001, 0x2819: 0x0001, 0x281a: 0x0001, 0x281b: 0x0001, 0x281c: 0x0001, 0x281d: 0x0001, 0x281e: 0x0001, 0x281f: 0x0001, 0x2820: 0x0001, 0x2821: 0x0001, 0x2822: 0x0001, 0x2823: 0x0001, 0x2824: 0x0001, 0x2825: 0x000c, 0x2826: 0x000c, 0x2827: 0x0001, 0x2828: 0x0001, 0x2829: 0x0001, 0x282a: 0x0001, 0x282b: 0x0001, 0x282c: 0x0001, 0x282d: 0x0001, 0x282e: 0x0001, 0x282f: 0x0001, 0x2830: 0x0001, 0x2831: 0x0001, 0x2832: 0x0001, 0x2833: 0x0001, 0x2834: 0x0001, 0x2835: 0x0001, 0x2836: 0x0001, 0x2837: 0x0001, 0x2838: 0x0001, 0x2839: 0x0001, 0x283a: 0x0001, 0x283b: 0x0001, 0x283c: 0x0001, 0x283d: 0x0001, 0x283e: 0x0001, 0x283f: 0x0001, // Block 0xa1, offset 0x2840 0x2840: 0x0001, 0x2841: 0x0001, 0x2842: 0x0001, 0x2843: 0x0001, 0x2844: 0x0001, 0x2845: 0x0001, 0x2846: 0x0001, 0x2847: 0x0001, 0x2848: 0x0001, 0x2849: 0x0001, 0x284a: 0x0001, 0x284b: 0x0001, 0x284c: 0x0001, 0x284d: 0x0001, 0x284e: 0x0001, 0x284f: 0x0001, 0x2850: 0x0001, 0x2851: 0x0001, 0x2852: 0x0001, 0x2853: 0x0001, 0x2854: 0x0001, 0x2855: 0x0001, 0x2856: 0x0001, 0x2857: 0x0001, 0x2858: 0x0001, 0x2859: 0x0001, 0x285a: 0x0001, 0x285b: 0x0001, 0x285c: 0x0001, 0x285d: 0x0001, 0x285e: 0x0001, 0x285f: 0x0001, 0x2860: 0x0001, 0x2861: 0x0001, 0x2862: 0x0001, 0x2863: 0x0001, 0x2864: 0x0001, 0x2865: 0x0001, 0x2866: 0x0001, 0x2867: 0x0001, 0x2868: 0x0001, 0x2869: 0x0001, 0x286a: 0x0001, 0x286b: 0x0001, 0x286c: 0x0001, 0x286d: 0x0001, 0x286e: 0x0001, 0x286f: 0x0001, 0x2870: 0x0001, 0x2871: 0x0001, 0x2872: 0x0001, 0x2873: 0x0001, 0x2874: 0x0001, 0x2875: 0x0001, 0x2876: 0x0001, 0x2877: 0x0001, 0x2878: 0x0001, 0x2879: 0x000a, 0x287a: 0x000a, 0x287b: 0x000a, 0x287c: 0x000a, 0x287d: 0x000a, 0x287e: 0x000a, 0x287f: 0x000a, // Block 0xa2, offset 0x2880 0x2880: 0x0001, 0x2881: 0x0001, 0x2882: 0x0001, 0x2883: 0x0001, 0x2884: 0x0001, 0x2885: 0x0001, 0x2886: 0x0001, 0x2887: 0x0001, 0x2888: 0x0001, 0x2889: 0x0001, 0x288a: 0x0001, 0x288b: 0x0001, 0x288c: 0x0001, 0x288d: 0x0001, 0x288e: 0x0001, 0x288f: 0x0001, 0x2890: 0x0001, 0x2891: 0x0001, 0x2892: 0x0001, 0x2893: 0x0001, 0x2894: 0x0001, 0x2895: 0x0001, 0x2896: 0x0001, 0x2897: 0x0001, 0x2898: 0x0001, 0x2899: 0x0001, 0x289a: 0x0001, 0x289b: 0x0001, 0x289c: 0x0001, 0x289d: 0x0001, 0x289e: 0x0001, 0x289f: 0x0001, 0x28a0: 0x0005, 0x28a1: 0x0005, 0x28a2: 0x0005, 0x28a3: 0x0005, 0x28a4: 0x0005, 0x28a5: 0x0005, 0x28a6: 0x0005, 0x28a7: 0x0005, 0x28a8: 0x0005, 0x28a9: 0x0005, 0x28aa: 0x0005, 0x28ab: 0x0005, 0x28ac: 0x0005, 0x28ad: 0x0005, 0x28ae: 0x0005, 0x28af: 0x0005, 0x28b0: 0x0005, 0x28b1: 0x0005, 0x28b2: 0x0005, 0x28b3: 0x0005, 0x28b4: 0x0005, 0x28b5: 0x0005, 0x28b6: 0x0005, 0x28b7: 0x0005, 0x28b8: 0x0005, 0x28b9: 0x0005, 0x28ba: 0x0005, 0x28bb: 0x0005, 0x28bc: 0x0005, 0x28bd: 0x0005, 0x28be: 0x0005, 0x28bf: 0x0001, // Block 0xa3, offset 0x28c0 0x28c1: 0x000c, 0x28f8: 0x000c, 0x28f9: 0x000c, 0x28fa: 0x000c, 0x28fb: 0x000c, 0x28fc: 0x000c, 0x28fd: 0x000c, 0x28fe: 0x000c, 0x28ff: 0x000c, // Block 0xa4, offset 0x2900 0x2900: 0x000c, 0x2901: 0x000c, 0x2902: 0x000c, 0x2903: 0x000c, 0x2904: 0x000c, 0x2905: 0x000c, 0x2906: 0x000c, 0x2912: 0x000a, 0x2913: 0x000a, 0x2914: 0x000a, 0x2915: 0x000a, 0x2916: 0x000a, 0x2917: 0x000a, 0x2918: 0x000a, 0x2919: 0x000a, 0x291a: 0x000a, 0x291b: 0x000a, 0x291c: 0x000a, 0x291d: 0x000a, 0x291e: 0x000a, 0x291f: 0x000a, 0x2920: 0x000a, 0x2921: 0x000a, 0x2922: 0x000a, 0x2923: 0x000a, 0x2924: 0x000a, 0x2925: 0x000a, 0x293f: 0x000c, // Block 0xa5, offset 0x2940 0x2940: 0x000c, 0x2941: 0x000c, 0x2973: 0x000c, 0x2974: 0x000c, 0x2975: 0x000c, 0x2976: 0x000c, 0x2979: 0x000c, 0x297a: 0x000c, // Block 0xa6, offset 0x2980 0x2980: 0x000c, 0x2981: 0x000c, 0x2982: 0x000c, 0x29a7: 0x000c, 0x29a8: 0x000c, 0x29a9: 0x000c, 0x29aa: 0x000c, 0x29ab: 0x000c, 0x29ad: 0x000c, 0x29ae: 0x000c, 0x29af: 0x000c, 0x29b0: 0x000c, 0x29b1: 0x000c, 0x29b2: 0x000c, 0x29b3: 0x000c, 0x29b4: 0x000c, // Block 0xa7, offset 0x29c0 0x29f3: 0x000c, // Block 0xa8, offset 0x2a00 0x2a00: 0x000c, 0x2a01: 0x000c, 0x2a36: 0x000c, 0x2a37: 0x000c, 0x2a38: 0x000c, 0x2a39: 0x000c, 0x2a3a: 0x000c, 0x2a3b: 0x000c, 0x2a3c: 0x000c, 0x2a3d: 0x000c, 0x2a3e: 0x000c, // Block 0xa9, offset 0x2a40 0x2a4a: 0x000c, 0x2a4b: 0x000c, 0x2a4c: 0x000c, // Block 0xaa, offset 0x2a80 0x2aaf: 0x000c, 0x2ab0: 0x000c, 0x2ab1: 0x000c, 0x2ab4: 0x000c, 0x2ab6: 0x000c, 0x2ab7: 0x000c, 0x2abe: 0x000c, // Block 0xab, offset 0x2ac0 0x2adf: 0x000c, 0x2ae3: 0x000c, 0x2ae4: 0x000c, 0x2ae5: 0x000c, 0x2ae6: 0x000c, 0x2ae7: 0x000c, 0x2ae8: 0x000c, 0x2ae9: 0x000c, 0x2aea: 0x000c, // Block 0xac, offset 0x2b00 0x2b00: 0x000c, 0x2b01: 0x000c, 0x2b3c: 0x000c, // Block 0xad, offset 0x2b40 0x2b40: 0x000c, 0x2b66: 0x000c, 0x2b67: 0x000c, 0x2b68: 0x000c, 0x2b69: 0x000c, 0x2b6a: 0x000c, 0x2b6b: 0x000c, 0x2b6c: 0x000c, 0x2b70: 0x000c, 0x2b71: 0x000c, 0x2b72: 0x000c, 0x2b73: 0x000c, 0x2b74: 0x000c, // Block 0xae, offset 0x2b80 0x2bb8: 0x000c, 0x2bb9: 0x000c, 0x2bba: 0x000c, 0x2bbb: 0x000c, 0x2bbc: 0x000c, 0x2bbd: 0x000c, 0x2bbe: 0x000c, 0x2bbf: 0x000c, // Block 0xaf, offset 0x2bc0 0x2bc2: 0x000c, 0x2bc3: 0x000c, 0x2bc4: 0x000c, 0x2bc6: 0x000c, // Block 0xb0, offset 0x2c00 0x2c33: 0x000c, 0x2c34: 0x000c, 0x2c35: 0x000c, 0x2c36: 0x000c, 0x2c37: 0x000c, 0x2c38: 0x000c, 0x2c3a: 0x000c, 0x2c3f: 0x000c, // Block 0xb1, offset 0x2c40 0x2c40: 0x000c, 0x2c42: 0x000c, 0x2c43: 0x000c, // Block 0xb2, offset 0x2c80 0x2cb2: 0x000c, 0x2cb3: 0x000c, 0x2cb4: 0x000c, 0x2cb5: 0x000c, 0x2cbc: 0x000c, 0x2cbd: 0x000c, 0x2cbf: 0x000c, // Block 0xb3, offset 0x2cc0 0x2cc0: 0x000c, 0x2cdc: 0x000c, 0x2cdd: 0x000c, // Block 0xb4, offset 0x2d00 0x2d33: 0x000c, 0x2d34: 0x000c, 0x2d35: 0x000c, 0x2d36: 0x000c, 0x2d37: 0x000c, 0x2d38: 0x000c, 0x2d39: 0x000c, 0x2d3a: 0x000c, 0x2d3d: 0x000c, 0x2d3f: 0x000c, // Block 0xb5, offset 0x2d40 0x2d40: 0x000c, 0x2d60: 0x000a, 0x2d61: 0x000a, 0x2d62: 0x000a, 0x2d63: 0x000a, 0x2d64: 0x000a, 0x2d65: 0x000a, 0x2d66: 0x000a, 0x2d67: 0x000a, 0x2d68: 0x000a, 0x2d69: 0x000a, 0x2d6a: 0x000a, 0x2d6b: 0x000a, 0x2d6c: 0x000a, // Block 0xb6, offset 0x2d80 0x2dab: 0x000c, 0x2dad: 0x000c, 0x2db0: 0x000c, 0x2db1: 0x000c, 0x2db2: 0x000c, 0x2db3: 0x000c, 0x2db4: 0x000c, 0x2db5: 0x000c, 0x2db7: 0x000c, // Block 0xb7, offset 0x2dc0 0x2ddd: 0x000c, 0x2dde: 0x000c, 0x2ddf: 0x000c, 0x2de2: 0x000c, 0x2de3: 0x000c, 0x2de4: 0x000c, 0x2de5: 0x000c, 0x2de7: 0x000c, 0x2de8: 0x000c, 0x2de9: 0x000c, 0x2dea: 0x000c, 0x2deb: 0x000c, // Block 0xb8, offset 0x2e00 0x2e30: 0x000c, 0x2e31: 0x000c, 0x2e32: 0x000c, 0x2e33: 0x000c, 0x2e34: 0x000c, 0x2e35: 0x000c, 0x2e36: 0x000c, 0x2e38: 0x000c, 0x2e39: 0x000c, 0x2e3a: 0x000c, 0x2e3b: 0x000c, 0x2e3c: 0x000c, 0x2e3d: 0x000c, // Block 0xb9, offset 0x2e40 0x2e52: 0x000c, 0x2e53: 0x000c, 0x2e54: 0x000c, 0x2e55: 0x000c, 0x2e56: 0x000c, 0x2e57: 0x000c, 0x2e58: 0x000c, 0x2e59: 0x000c, 0x2e5a: 0x000c, 0x2e5b: 0x000c, 0x2e5c: 0x000c, 0x2e5d: 0x000c, 0x2e5e: 0x000c, 0x2e5f: 0x000c, 0x2e60: 0x000c, 0x2e61: 0x000c, 0x2e62: 0x000c, 0x2e63: 0x000c, 0x2e64: 0x000c, 0x2e65: 0x000c, 0x2e66: 0x000c, 0x2e67: 0x000c, 0x2e6a: 0x000c, 0x2e6b: 0x000c, 0x2e6c: 0x000c, 0x2e6d: 0x000c, 0x2e6e: 0x000c, 0x2e6f: 0x000c, 0x2e70: 0x000c, 0x2e72: 0x000c, 0x2e73: 0x000c, 0x2e75: 0x000c, 0x2e76: 0x000c, // Block 0xba, offset 0x2e80 0x2eb0: 0x000c, 0x2eb1: 0x000c, 0x2eb2: 0x000c, 0x2eb3: 0x000c, 0x2eb4: 0x000c, // Block 0xbb, offset 0x2ec0 0x2ef0: 0x000c, 0x2ef1: 0x000c, 0x2ef2: 0x000c, 0x2ef3: 0x000c, 0x2ef4: 0x000c, 0x2ef5: 0x000c, 0x2ef6: 0x000c, // Block 0xbc, offset 0x2f00 0x2f0f: 0x000c, 0x2f10: 0x000c, 0x2f11: 0x000c, 0x2f12: 0x000c, // Block 0xbd, offset 0x2f40 0x2f5d: 0x000c, 0x2f5e: 0x000c, 0x2f60: 0x000b, 0x2f61: 0x000b, 0x2f62: 0x000b, 0x2f63: 0x000b, // Block 0xbe, offset 0x2f80 0x2fa7: 0x000c, 0x2fa8: 0x000c, 0x2fa9: 0x000c, 0x2fb3: 0x000b, 0x2fb4: 0x000b, 0x2fb5: 0x000b, 0x2fb6: 0x000b, 0x2fb7: 0x000b, 0x2fb8: 0x000b, 0x2fb9: 0x000b, 0x2fba: 0x000b, 0x2fbb: 0x000c, 0x2fbc: 0x000c, 0x2fbd: 0x000c, 0x2fbe: 0x000c, 0x2fbf: 0x000c, // Block 0xbf, offset 0x2fc0 0x2fc0: 0x000c, 0x2fc1: 0x000c, 0x2fc2: 0x000c, 0x2fc5: 0x000c, 0x2fc6: 0x000c, 0x2fc7: 0x000c, 0x2fc8: 0x000c, 0x2fc9: 0x000c, 0x2fca: 0x000c, 0x2fcb: 0x000c, 0x2fea: 0x000c, 0x2feb: 0x000c, 0x2fec: 0x000c, 0x2fed: 0x000c, // Block 0xc0, offset 0x3000 0x3000: 0x000a, 0x3001: 0x000a, 0x3002: 0x000c, 0x3003: 0x000c, 0x3004: 0x000c, 0x3005: 0x000a, // Block 0xc1, offset 0x3040 0x3040: 0x000a, 0x3041: 0x000a, 0x3042: 0x000a, 0x3043: 0x000a, 0x3044: 0x000a, 0x3045: 0x000a, 0x3046: 0x000a, 0x3047: 0x000a, 0x3048: 0x000a, 0x3049: 0x000a, 0x304a: 0x000a, 0x304b: 0x000a, 0x304c: 0x000a, 0x304d: 0x000a, 0x304e: 0x000a, 0x304f: 0x000a, 0x3050: 0x000a, 0x3051: 0x000a, 0x3052: 0x000a, 0x3053: 0x000a, 0x3054: 0x000a, 0x3055: 0x000a, 0x3056: 0x000a, // Block 0xc2, offset 0x3080 0x309b: 0x000a, // Block 0xc3, offset 0x30c0 0x30d5: 0x000a, // Block 0xc4, offset 0x3100 0x310f: 0x000a, // Block 0xc5, offset 0x3140 0x3149: 0x000a, // Block 0xc6, offset 0x3180 0x3183: 0x000a, 0x318e: 0x0002, 0x318f: 0x0002, 0x3190: 0x0002, 0x3191: 0x0002, 0x3192: 0x0002, 0x3193: 0x0002, 0x3194: 0x0002, 0x3195: 0x0002, 0x3196: 0x0002, 0x3197: 0x0002, 0x3198: 0x0002, 0x3199: 0x0002, 0x319a: 0x0002, 0x319b: 0x0002, 0x319c: 0x0002, 0x319d: 0x0002, 0x319e: 0x0002, 0x319f: 0x0002, 0x31a0: 0x0002, 0x31a1: 0x0002, 0x31a2: 0x0002, 0x31a3: 0x0002, 0x31a4: 0x0002, 0x31a5: 0x0002, 0x31a6: 0x0002, 0x31a7: 0x0002, 0x31a8: 0x0002, 0x31a9: 0x0002, 0x31aa: 0x0002, 0x31ab: 0x0002, 0x31ac: 0x0002, 0x31ad: 0x0002, 0x31ae: 0x0002, 0x31af: 0x0002, 0x31b0: 0x0002, 0x31b1: 0x0002, 0x31b2: 0x0002, 0x31b3: 0x0002, 0x31b4: 0x0002, 0x31b5: 0x0002, 0x31b6: 0x0002, 0x31b7: 0x0002, 0x31b8: 0x0002, 0x31b9: 0x0002, 0x31ba: 0x0002, 0x31bb: 0x0002, 0x31bc: 0x0002, 0x31bd: 0x0002, 0x31be: 0x0002, 0x31bf: 0x0002, // Block 0xc7, offset 0x31c0 0x31c0: 0x000c, 0x31c1: 0x000c, 0x31c2: 0x000c, 0x31c3: 0x000c, 0x31c4: 0x000c, 0x31c5: 0x000c, 0x31c6: 0x000c, 0x31c7: 0x000c, 0x31c8: 0x000c, 0x31c9: 0x000c, 0x31ca: 0x000c, 0x31cb: 0x000c, 0x31cc: 0x000c, 0x31cd: 0x000c, 0x31ce: 0x000c, 0x31cf: 0x000c, 0x31d0: 0x000c, 0x31d1: 0x000c, 0x31d2: 0x000c, 0x31d3: 0x000c, 0x31d4: 0x000c, 0x31d5: 0x000c, 0x31d6: 0x000c, 0x31d7: 0x000c, 0x31d8: 0x000c, 0x31d9: 0x000c, 0x31da: 0x000c, 0x31db: 0x000c, 0x31dc: 0x000c, 0x31dd: 0x000c, 0x31de: 0x000c, 0x31df: 0x000c, 0x31e0: 0x000c, 0x31e1: 0x000c, 0x31e2: 0x000c, 0x31e3: 0x000c, 0x31e4: 0x000c, 0x31e5: 0x000c, 0x31e6: 0x000c, 0x31e7: 0x000c, 0x31e8: 0x000c, 0x31e9: 0x000c, 0x31ea: 0x000c, 0x31eb: 0x000c, 0x31ec: 0x000c, 0x31ed: 0x000c, 0x31ee: 0x000c, 0x31ef: 0x000c, 0x31f0: 0x000c, 0x31f1: 0x000c, 0x31f2: 0x000c, 0x31f3: 0x000c, 0x31f4: 0x000c, 0x31f5: 0x000c, 0x31f6: 0x000c, 0x31fb: 0x000c, 0x31fc: 0x000c, 0x31fd: 0x000c, 0x31fe: 0x000c, 0x31ff: 0x000c, // Block 0xc8, offset 0x3200 0x3200: 0x000c, 0x3201: 0x000c, 0x3202: 0x000c, 0x3203: 0x000c, 0x3204: 0x000c, 0x3205: 0x000c, 0x3206: 0x000c, 0x3207: 0x000c, 0x3208: 0x000c, 0x3209: 0x000c, 0x320a: 0x000c, 0x320b: 0x000c, 0x320c: 0x000c, 0x320d: 0x000c, 0x320e: 0x000c, 0x320f: 0x000c, 0x3210: 0x000c, 0x3211: 0x000c, 0x3212: 0x000c, 0x3213: 0x000c, 0x3214: 0x000c, 0x3215: 0x000c, 0x3216: 0x000c, 0x3217: 0x000c, 0x3218: 0x000c, 0x3219: 0x000c, 0x321a: 0x000c, 0x321b: 0x000c, 0x321c: 0x000c, 0x321d: 0x000c, 0x321e: 0x000c, 0x321f: 0x000c, 0x3220: 0x000c, 0x3221: 0x000c, 0x3222: 0x000c, 0x3223: 0x000c, 0x3224: 0x000c, 0x3225: 0x000c, 0x3226: 0x000c, 0x3227: 0x000c, 0x3228: 0x000c, 0x3229: 0x000c, 0x322a: 0x000c, 0x322b: 0x000c, 0x322c: 0x000c, 0x3235: 0x000c, // Block 0xc9, offset 0x3240 0x3244: 0x000c, 0x325b: 0x000c, 0x325c: 0x000c, 0x325d: 0x000c, 0x325e: 0x000c, 0x325f: 0x000c, 0x3261: 0x000c, 0x3262: 0x000c, 0x3263: 0x000c, 0x3264: 0x000c, 0x3265: 0x000c, 0x3266: 0x000c, 0x3267: 0x000c, 0x3268: 0x000c, 0x3269: 0x000c, 0x326a: 0x000c, 0x326b: 0x000c, 0x326c: 0x000c, 0x326d: 0x000c, 0x326e: 0x000c, 0x326f: 0x000c, // Block 0xca, offset 0x3280 0x3280: 0x000c, 0x3281: 0x000c, 0x3282: 0x000c, 0x3283: 0x000c, 0x3284: 0x000c, 0x3285: 0x000c, 0x3286: 0x000c, 0x3288: 0x000c, 0x3289: 0x000c, 0x328a: 0x000c, 0x328b: 0x000c, 0x328c: 0x000c, 0x328d: 0x000c, 0x328e: 0x000c, 0x328f: 0x000c, 0x3290: 0x000c, 0x3291: 0x000c, 0x3292: 0x000c, 0x3293: 0x000c, 0x3294: 0x000c, 0x3295: 0x000c, 0x3296: 0x000c, 0x3297: 0x000c, 0x3298: 0x000c, 0x329b: 0x000c, 0x329c: 0x000c, 0x329d: 0x000c, 0x329e: 0x000c, 0x329f: 0x000c, 0x32a0: 0x000c, 0x32a1: 0x000c, 0x32a3: 0x000c, 0x32a4: 0x000c, 0x32a6: 0x000c, 0x32a7: 0x000c, 0x32a8: 0x000c, 0x32a9: 0x000c, 0x32aa: 0x000c, // Block 0xcb, offset 0x32c0 0x32c0: 0x0001, 0x32c1: 0x0001, 0x32c2: 0x0001, 0x32c3: 0x0001, 0x32c4: 0x0001, 0x32c5: 0x0001, 0x32c6: 0x0001, 0x32c7: 0x0001, 0x32c8: 0x0001, 0x32c9: 0x0001, 0x32ca: 0x0001, 0x32cb: 0x0001, 0x32cc: 0x0001, 0x32cd: 0x0001, 0x32ce: 0x0001, 0x32cf: 0x0001, 0x32d0: 0x000c, 0x32d1: 0x000c, 0x32d2: 0x000c, 0x32d3: 0x000c, 0x32d4: 0x000c, 0x32d5: 0x000c, 0x32d6: 0x000c, 0x32d7: 0x0001, 0x32d8: 0x0001, 0x32d9: 0x0001, 0x32da: 0x0001, 0x32db: 0x0001, 0x32dc: 0x0001, 0x32dd: 0x0001, 0x32de: 0x0001, 0x32df: 0x0001, 0x32e0: 0x0001, 0x32e1: 0x0001, 0x32e2: 0x0001, 0x32e3: 0x0001, 0x32e4: 0x0001, 0x32e5: 0x0001, 0x32e6: 0x0001, 0x32e7: 0x0001, 0x32e8: 0x0001, 0x32e9: 0x0001, 0x32ea: 0x0001, 0x32eb: 0x0001, 0x32ec: 0x0001, 0x32ed: 0x0001, 0x32ee: 0x0001, 0x32ef: 0x0001, 0x32f0: 0x0001, 0x32f1: 0x0001, 0x32f2: 0x0001, 0x32f3: 0x0001, 0x32f4: 0x0001, 0x32f5: 0x0001, 0x32f6: 0x0001, 0x32f7: 0x0001, 0x32f8: 0x0001, 0x32f9: 0x0001, 0x32fa: 0x0001, 0x32fb: 0x0001, 0x32fc: 0x0001, 0x32fd: 0x0001, 0x32fe: 0x0001, 0x32ff: 0x0001, // Block 0xcc, offset 0x3300 0x3300: 0x0001, 0x3301: 0x0001, 0x3302: 0x0001, 0x3303: 0x0001, 0x3304: 0x000c, 0x3305: 0x000c, 0x3306: 0x000c, 0x3307: 0x000c, 0x3308: 0x000c, 0x3309: 0x000c, 0x330a: 0x000c, 0x330b: 0x0001, 0x330c: 0x0001, 0x330d: 0x0001, 0x330e: 0x0001, 0x330f: 0x0001, 0x3310: 0x0001, 0x3311: 0x0001, 0x3312: 0x0001, 0x3313: 0x0001, 0x3314: 0x0001, 0x3315: 0x0001, 0x3316: 0x0001, 0x3317: 0x0001, 0x3318: 0x0001, 0x3319: 0x0001, 0x331a: 0x0001, 0x331b: 0x0001, 0x331c: 0x0001, 0x331d: 0x0001, 0x331e: 0x0001, 0x331f: 0x0001, 0x3320: 0x0001, 0x3321: 0x0001, 0x3322: 0x0001, 0x3323: 0x0001, 0x3324: 0x0001, 0x3325: 0x0001, 0x3326: 0x0001, 0x3327: 0x0001, 0x3328: 0x0001, 0x3329: 0x0001, 0x332a: 0x0001, 0x332b: 0x0001, 0x332c: 0x0001, 0x332d: 0x0001, 0x332e: 0x0001, 0x332f: 0x0001, 0x3330: 0x0001, 0x3331: 0x0001, 0x3332: 0x0001, 0x3333: 0x0001, 0x3334: 0x0001, 0x3335: 0x0001, 0x3336: 0x0001, 0x3337: 0x0001, 0x3338: 0x0001, 0x3339: 0x0001, 0x333a: 0x0001, 0x333b: 0x0001, 0x333c: 0x0001, 0x333d: 0x0001, 0x333e: 0x0001, 0x333f: 0x0001, // Block 0xcd, offset 0x3340 0x3340: 0x000d, 0x3341: 0x000d, 0x3342: 0x000d, 0x3343: 0x000d, 0x3344: 0x000d, 0x3345: 0x000d, 0x3346: 0x000d, 0x3347: 0x000d, 0x3348: 0x000d, 0x3349: 0x000d, 0x334a: 0x000d, 0x334b: 0x000d, 0x334c: 0x000d, 0x334d: 0x000d, 0x334e: 0x000d, 0x334f: 0x000d, 0x3350: 0x000d, 0x3351: 0x000d, 0x3352: 0x000d, 0x3353: 0x000d, 0x3354: 0x000d, 0x3355: 0x000d, 0x3356: 0x000d, 0x3357: 0x000d, 0x3358: 0x000d, 0x3359: 0x000d, 0x335a: 0x000d, 0x335b: 0x000d, 0x335c: 0x000d, 0x335d: 0x000d, 0x335e: 0x000d, 0x335f: 0x000d, 0x3360: 0x000d, 0x3361: 0x000d, 0x3362: 0x000d, 0x3363: 0x000d, 0x3364: 0x000d, 0x3365: 0x000d, 0x3366: 0x000d, 0x3367: 0x000d, 0x3368: 0x000d, 0x3369: 0x000d, 0x336a: 0x000d, 0x336b: 0x000d, 0x336c: 0x000d, 0x336d: 0x000d, 0x336e: 0x000d, 0x336f: 0x000d, 0x3370: 0x000a, 0x3371: 0x000a, 0x3372: 0x000d, 0x3373: 0x000d, 0x3374: 0x000d, 0x3375: 0x000d, 0x3376: 0x000d, 0x3377: 0x000d, 0x3378: 0x000d, 0x3379: 0x000d, 0x337a: 0x000d, 0x337b: 0x000d, 0x337c: 0x000d, 0x337d: 0x000d, 0x337e: 0x000d, 0x337f: 0x000d, // Block 0xce, offset 0x3380 0x3380: 0x000a, 0x3381: 0x000a, 0x3382: 0x000a, 0x3383: 0x000a, 0x3384: 0x000a, 0x3385: 0x000a, 0x3386: 0x000a, 0x3387: 0x000a, 0x3388: 0x000a, 0x3389: 0x000a, 0x338a: 0x000a, 0x338b: 0x000a, 0x338c: 0x000a, 0x338d: 0x000a, 0x338e: 0x000a, 0x338f: 0x000a, 0x3390: 0x000a, 0x3391: 0x000a, 0x3392: 0x000a, 0x3393: 0x000a, 0x3394: 0x000a, 0x3395: 0x000a, 0x3396: 0x000a, 0x3397: 0x000a, 0x3398: 0x000a, 0x3399: 0x000a, 0x339a: 0x000a, 0x339b: 0x000a, 0x339c: 0x000a, 0x339d: 0x000a, 0x339e: 0x000a, 0x339f: 0x000a, 0x33a0: 0x000a, 0x33a1: 0x000a, 0x33a2: 0x000a, 0x33a3: 0x000a, 0x33a4: 0x000a, 0x33a5: 0x000a, 0x33a6: 0x000a, 0x33a7: 0x000a, 0x33a8: 0x000a, 0x33a9: 0x000a, 0x33aa: 0x000a, 0x33ab: 0x000a, 0x33b0: 0x000a, 0x33b1: 0x000a, 0x33b2: 0x000a, 0x33b3: 0x000a, 0x33b4: 0x000a, 0x33b5: 0x000a, 0x33b6: 0x000a, 0x33b7: 0x000a, 0x33b8: 0x000a, 0x33b9: 0x000a, 0x33ba: 0x000a, 0x33bb: 0x000a, 0x33bc: 0x000a, 0x33bd: 0x000a, 0x33be: 0x000a, 0x33bf: 0x000a, // Block 0xcf, offset 0x33c0 0x33c0: 0x000a, 0x33c1: 0x000a, 0x33c2: 0x000a, 0x33c3: 0x000a, 0x33c4: 0x000a, 0x33c5: 0x000a, 0x33c6: 0x000a, 0x33c7: 0x000a, 0x33c8: 0x000a, 0x33c9: 0x000a, 0x33ca: 0x000a, 0x33cb: 0x000a, 0x33cc: 0x000a, 0x33cd: 0x000a, 0x33ce: 0x000a, 0x33cf: 0x000a, 0x33d0: 0x000a, 0x33d1: 0x000a, 0x33d2: 0x000a, 0x33d3: 0x000a, 0x33e0: 0x000a, 0x33e1: 0x000a, 0x33e2: 0x000a, 0x33e3: 0x000a, 0x33e4: 0x000a, 0x33e5: 0x000a, 0x33e6: 0x000a, 0x33e7: 0x000a, 0x33e8: 0x000a, 0x33e9: 0x000a, 0x33ea: 0x000a, 0x33eb: 0x000a, 0x33ec: 0x000a, 0x33ed: 0x000a, 0x33ee: 0x000a, 0x33f1: 0x000a, 0x33f2: 0x000a, 0x33f3: 0x000a, 0x33f4: 0x000a, 0x33f5: 0x000a, 0x33f6: 0x000a, 0x33f7: 0x000a, 0x33f8: 0x000a, 0x33f9: 0x000a, 0x33fa: 0x000a, 0x33fb: 0x000a, 0x33fc: 0x000a, 0x33fd: 0x000a, 0x33fe: 0x000a, 0x33ff: 0x000a, // Block 0xd0, offset 0x3400 0x3401: 0x000a, 0x3402: 0x000a, 0x3403: 0x000a, 0x3404: 0x000a, 0x3405: 0x000a, 0x3406: 0x000a, 0x3407: 0x000a, 0x3408: 0x000a, 0x3409: 0x000a, 0x340a: 0x000a, 0x340b: 0x000a, 0x340c: 0x000a, 0x340d: 0x000a, 0x340e: 0x000a, 0x340f: 0x000a, 0x3411: 0x000a, 0x3412: 0x000a, 0x3413: 0x000a, 0x3414: 0x000a, 0x3415: 0x000a, 0x3416: 0x000a, 0x3417: 0x000a, 0x3418: 0x000a, 0x3419: 0x000a, 0x341a: 0x000a, 0x341b: 0x000a, 0x341c: 0x000a, 0x341d: 0x000a, 0x341e: 0x000a, 0x341f: 0x000a, 0x3420: 0x000a, 0x3421: 0x000a, 0x3422: 0x000a, 0x3423: 0x000a, 0x3424: 0x000a, 0x3425: 0x000a, 0x3426: 0x000a, 0x3427: 0x000a, 0x3428: 0x000a, 0x3429: 0x000a, 0x342a: 0x000a, 0x342b: 0x000a, 0x342c: 0x000a, 0x342d: 0x000a, 0x342e: 0x000a, 0x342f: 0x000a, 0x3430: 0x000a, 0x3431: 0x000a, 0x3432: 0x000a, 0x3433: 0x000a, 0x3434: 0x000a, 0x3435: 0x000a, // Block 0xd1, offset 0x3440 0x3440: 0x0002, 0x3441: 0x0002, 0x3442: 0x0002, 0x3443: 0x0002, 0x3444: 0x0002, 0x3445: 0x0002, 0x3446: 0x0002, 0x3447: 0x0002, 0x3448: 0x0002, 0x3449: 0x0002, 0x344a: 0x0002, 0x344b: 0x000a, 0x344c: 0x000a, // Block 0xd2, offset 0x3480 0x34aa: 0x000a, 0x34ab: 0x000a, // Block 0xd3, offset 0x34c0 0x34c0: 0x000a, 0x34c1: 0x000a, 0x34c2: 0x000a, 0x34c3: 0x000a, 0x34c4: 0x000a, 0x34c5: 0x000a, 0x34c6: 0x000a, 0x34c7: 0x000a, 0x34c8: 0x000a, 0x34c9: 0x000a, 0x34ca: 0x000a, 0x34cb: 0x000a, 0x34cc: 0x000a, 0x34cd: 0x000a, 0x34ce: 0x000a, 0x34cf: 0x000a, 0x34d0: 0x000a, 0x34d1: 0x000a, 0x34d2: 0x000a, 0x34e0: 0x000a, 0x34e1: 0x000a, 0x34e2: 0x000a, 0x34e3: 0x000a, 0x34e4: 0x000a, 0x34e5: 0x000a, 0x34e6: 0x000a, 0x34e7: 0x000a, 0x34e8: 0x000a, 0x34e9: 0x000a, 0x34ea: 0x000a, 0x34eb: 0x000a, 0x34ec: 0x000a, 0x34f0: 0x000a, 0x34f1: 0x000a, 0x34f2: 0x000a, 0x34f3: 0x000a, 0x34f4: 0x000a, 0x34f5: 0x000a, 0x34f6: 0x000a, // Block 0xd4, offset 0x3500 0x3500: 0x000a, 0x3501: 0x000a, 0x3502: 0x000a, 0x3503: 0x000a, 0x3504: 0x000a, 0x3505: 0x000a, 0x3506: 0x000a, 0x3507: 0x000a, 0x3508: 0x000a, 0x3509: 0x000a, 0x350a: 0x000a, 0x350b: 0x000a, 0x350c: 0x000a, 0x350d: 0x000a, 0x350e: 0x000a, 0x350f: 0x000a, 0x3510: 0x000a, 0x3511: 0x000a, 0x3512: 0x000a, 0x3513: 0x000a, 0x3514: 0x000a, // Block 0xd5, offset 0x3540 0x3540: 0x000a, 0x3541: 0x000a, 0x3542: 0x000a, 0x3543: 0x000a, 0x3544: 0x000a, 0x3545: 0x000a, 0x3546: 0x000a, 0x3547: 0x000a, 0x3548: 0x000a, 0x3549: 0x000a, 0x354a: 0x000a, 0x354b: 0x000a, 0x3550: 0x000a, 0x3551: 0x000a, 0x3552: 0x000a, 0x3553: 0x000a, 0x3554: 0x000a, 0x3555: 0x000a, 0x3556: 0x000a, 0x3557: 0x000a, 0x3558: 0x000a, 0x3559: 0x000a, 0x355a: 0x000a, 0x355b: 0x000a, 0x355c: 0x000a, 0x355d: 0x000a, 0x355e: 0x000a, 0x355f: 0x000a, 0x3560: 0x000a, 0x3561: 0x000a, 0x3562: 0x000a, 0x3563: 0x000a, 0x3564: 0x000a, 0x3565: 0x000a, 0x3566: 0x000a, 0x3567: 0x000a, 0x3568: 0x000a, 0x3569: 0x000a, 0x356a: 0x000a, 0x356b: 0x000a, 0x356c: 0x000a, 0x356d: 0x000a, 0x356e: 0x000a, 0x356f: 0x000a, 0x3570: 0x000a, 0x3571: 0x000a, 0x3572: 0x000a, 0x3573: 0x000a, 0x3574: 0x000a, 0x3575: 0x000a, 0x3576: 0x000a, 0x3577: 0x000a, 0x3578: 0x000a, 0x3579: 0x000a, 0x357a: 0x000a, 0x357b: 0x000a, 0x357c: 0x000a, 0x357d: 0x000a, 0x357e: 0x000a, 0x357f: 0x000a, // Block 0xd6, offset 0x3580 0x3580: 0x000a, 0x3581: 0x000a, 0x3582: 0x000a, 0x3583: 0x000a, 0x3584: 0x000a, 0x3585: 0x000a, 0x3586: 0x000a, 0x3587: 0x000a, 0x3590: 0x000a, 0x3591: 0x000a, 0x3592: 0x000a, 0x3593: 0x000a, 0x3594: 0x000a, 0x3595: 0x000a, 0x3596: 0x000a, 0x3597: 0x000a, 0x3598: 0x000a, 0x3599: 0x000a, 0x35a0: 0x000a, 0x35a1: 0x000a, 0x35a2: 0x000a, 0x35a3: 0x000a, 0x35a4: 0x000a, 0x35a5: 0x000a, 0x35a6: 0x000a, 0x35a7: 0x000a, 0x35a8: 0x000a, 0x35a9: 0x000a, 0x35aa: 0x000a, 0x35ab: 0x000a, 0x35ac: 0x000a, 0x35ad: 0x000a, 0x35ae: 0x000a, 0x35af: 0x000a, 0x35b0: 0x000a, 0x35b1: 0x000a, 0x35b2: 0x000a, 0x35b3: 0x000a, 0x35b4: 0x000a, 0x35b5: 0x000a, 0x35b6: 0x000a, 0x35b7: 0x000a, 0x35b8: 0x000a, 0x35b9: 0x000a, 0x35ba: 0x000a, 0x35bb: 0x000a, 0x35bc: 0x000a, 0x35bd: 0x000a, 0x35be: 0x000a, 0x35bf: 0x000a, // Block 0xd7, offset 0x35c0 0x35c0: 0x000a, 0x35c1: 0x000a, 0x35c2: 0x000a, 0x35c3: 0x000a, 0x35c4: 0x000a, 0x35c5: 0x000a, 0x35c6: 0x000a, 0x35c7: 0x000a, 0x35d0: 0x000a, 0x35d1: 0x000a, 0x35d2: 0x000a, 0x35d3: 0x000a, 0x35d4: 0x000a, 0x35d5: 0x000a, 0x35d6: 0x000a, 0x35d7: 0x000a, 0x35d8: 0x000a, 0x35d9: 0x000a, 0x35da: 0x000a, 0x35db: 0x000a, 0x35dc: 0x000a, 0x35dd: 0x000a, 0x35de: 0x000a, 0x35df: 0x000a, 0x35e0: 0x000a, 0x35e1: 0x000a, 0x35e2: 0x000a, 0x35e3: 0x000a, 0x35e4: 0x000a, 0x35e5: 0x000a, 0x35e6: 0x000a, 0x35e7: 0x000a, 0x35e8: 0x000a, 0x35e9: 0x000a, 0x35ea: 0x000a, 0x35eb: 0x000a, 0x35ec: 0x000a, 0x35ed: 0x000a, // Block 0xd8, offset 0x3600 0x3610: 0x000a, 0x3611: 0x000a, 0x3612: 0x000a, 0x3613: 0x000a, 0x3614: 0x000a, 0x3615: 0x000a, 0x3616: 0x000a, 0x3617: 0x000a, 0x3618: 0x000a, 0x3619: 0x000a, 0x361a: 0x000a, 0x361b: 0x000a, 0x361c: 0x000a, 0x361d: 0x000a, 0x361e: 0x000a, 0x3620: 0x000a, 0x3621: 0x000a, 0x3622: 0x000a, 0x3623: 0x000a, 0x3624: 0x000a, 0x3625: 0x000a, 0x3626: 0x000a, 0x3627: 0x000a, 0x3630: 0x000a, 0x3633: 0x000a, 0x3634: 0x000a, 0x3635: 0x000a, 0x3636: 0x000a, 0x3637: 0x000a, 0x3638: 0x000a, 0x3639: 0x000a, 0x363a: 0x000a, 0x363b: 0x000a, 0x363c: 0x000a, 0x363d: 0x000a, 0x363e: 0x000a, // Block 0xd9, offset 0x3640 0x3640: 0x000a, 0x3641: 0x000a, 0x3642: 0x000a, 0x3643: 0x000a, 0x3644: 0x000a, 0x3645: 0x000a, 0x3646: 0x000a, 0x3647: 0x000a, 0x3648: 0x000a, 0x3649: 0x000a, 0x364a: 0x000a, 0x364b: 0x000a, 0x3650: 0x000a, 0x3651: 0x000a, 0x3652: 0x000a, 0x3653: 0x000a, 0x3654: 0x000a, 0x3655: 0x000a, 0x3656: 0x000a, 0x3657: 0x000a, 0x3658: 0x000a, 0x3659: 0x000a, 0x365a: 0x000a, 0x365b: 0x000a, 0x365c: 0x000a, 0x365d: 0x000a, 0x365e: 0x000a, // Block 0xda, offset 0x3680 0x3680: 0x000a, 0x3681: 0x000a, 0x3682: 0x000a, 0x3683: 0x000a, 0x3684: 0x000a, 0x3685: 0x000a, 0x3686: 0x000a, 0x3687: 0x000a, 0x3688: 0x000a, 0x3689: 0x000a, 0x368a: 0x000a, 0x368b: 0x000a, 0x368c: 0x000a, 0x368d: 0x000a, 0x368e: 0x000a, 0x368f: 0x000a, 0x3690: 0x000a, 0x3691: 0x000a, // Block 0xdb, offset 0x36c0 0x36fe: 0x000b, 0x36ff: 0x000b, // Block 0xdc, offset 0x3700 0x3700: 0x000b, 0x3701: 0x000b, 0x3702: 0x000b, 0x3703: 0x000b, 0x3704: 0x000b, 0x3705: 0x000b, 0x3706: 0x000b, 0x3707: 0x000b, 0x3708: 0x000b, 0x3709: 0x000b, 0x370a: 0x000b, 0x370b: 0x000b, 0x370c: 0x000b, 0x370d: 0x000b, 0x370e: 0x000b, 0x370f: 0x000b, 0x3710: 0x000b, 0x3711: 0x000b, 0x3712: 0x000b, 0x3713: 0x000b, 0x3714: 0x000b, 0x3715: 0x000b, 0x3716: 0x000b, 0x3717: 0x000b, 0x3718: 0x000b, 0x3719: 0x000b, 0x371a: 0x000b, 0x371b: 0x000b, 0x371c: 0x000b, 0x371d: 0x000b, 0x371e: 0x000b, 0x371f: 0x000b, 0x3720: 0x000b, 0x3721: 0x000b, 0x3722: 0x000b, 0x3723: 0x000b, 0x3724: 0x000b, 0x3725: 0x000b, 0x3726: 0x000b, 0x3727: 0x000b, 0x3728: 0x000b, 0x3729: 0x000b, 0x372a: 0x000b, 0x372b: 0x000b, 0x372c: 0x000b, 0x372d: 0x000b, 0x372e: 0x000b, 0x372f: 0x000b, 0x3730: 0x000b, 0x3731: 0x000b, 0x3732: 0x000b, 0x3733: 0x000b, 0x3734: 0x000b, 0x3735: 0x000b, 0x3736: 0x000b, 0x3737: 0x000b, 0x3738: 0x000b, 0x3739: 0x000b, 0x373a: 0x000b, 0x373b: 0x000b, 0x373c: 0x000b, 0x373d: 0x000b, 0x373e: 0x000b, 0x373f: 0x000b, // Block 0xdd, offset 0x3740 0x3740: 0x000c, 0x3741: 0x000c, 0x3742: 0x000c, 0x3743: 0x000c, 0x3744: 0x000c, 0x3745: 0x000c, 0x3746: 0x000c, 0x3747: 0x000c, 0x3748: 0x000c, 0x3749: 0x000c, 0x374a: 0x000c, 0x374b: 0x000c, 0x374c: 0x000c, 0x374d: 0x000c, 0x374e: 0x000c, 0x374f: 0x000c, 0x3750: 0x000c, 0x3751: 0x000c, 0x3752: 0x000c, 0x3753: 0x000c, 0x3754: 0x000c, 0x3755: 0x000c, 0x3756: 0x000c, 0x3757: 0x000c, 0x3758: 0x000c, 0x3759: 0x000c, 0x375a: 0x000c, 0x375b: 0x000c, 0x375c: 0x000c, 0x375d: 0x000c, 0x375e: 0x000c, 0x375f: 0x000c, 0x3760: 0x000c, 0x3761: 0x000c, 0x3762: 0x000c, 0x3763: 0x000c, 0x3764: 0x000c, 0x3765: 0x000c, 0x3766: 0x000c, 0x3767: 0x000c, 0x3768: 0x000c, 0x3769: 0x000c, 0x376a: 0x000c, 0x376b: 0x000c, 0x376c: 0x000c, 0x376d: 0x000c, 0x376e: 0x000c, 0x376f: 0x000c, 0x3770: 0x000b, 0x3771: 0x000b, 0x3772: 0x000b, 0x3773: 0x000b, 0x3774: 0x000b, 0x3775: 0x000b, 0x3776: 0x000b, 0x3777: 0x000b, 0x3778: 0x000b, 0x3779: 0x000b, 0x377a: 0x000b, 0x377b: 0x000b, 0x377c: 0x000b, 0x377d: 0x000b, 0x377e: 0x000b, 0x377f: 0x000b, } // bidiIndex: 24 blocks, 1536 entries, 1536 bytes // Block 0 is the zero block. var bidiIndex = [1536]uint8{ // Block 0x0, offset 0x0 // Block 0x1, offset 0x40 // Block 0x2, offset 0x80 // Block 0x3, offset 0xc0 0xc2: 0x01, 0xc3: 0x02, 0xca: 0x03, 0xcb: 0x04, 0xcc: 0x05, 0xcd: 0x06, 0xce: 0x07, 0xcf: 0x08, 0xd2: 0x09, 0xd6: 0x0a, 0xd7: 0x0b, 0xd8: 0x0c, 0xd9: 0x0d, 0xda: 0x0e, 0xdb: 0x0f, 0xdc: 0x10, 0xdd: 0x11, 0xde: 0x12, 0xdf: 0x13, 0xe0: 0x02, 0xe1: 0x03, 0xe2: 0x04, 0xe3: 0x05, 0xe4: 0x06, 0xea: 0x07, 0xef: 0x08, 0xf0: 0x11, 0xf1: 0x12, 0xf2: 0x12, 0xf3: 0x14, 0xf4: 0x15, // Block 0x4, offset 0x100 0x120: 0x14, 0x121: 0x15, 0x122: 0x16, 0x123: 0x17, 0x124: 0x18, 0x125: 0x19, 0x126: 0x1a, 0x127: 0x1b, 0x128: 0x1c, 0x129: 0x1d, 0x12a: 0x1c, 0x12b: 0x1e, 0x12c: 0x1f, 0x12d: 0x20, 0x12e: 0x21, 0x12f: 0x22, 0x130: 0x23, 0x131: 0x24, 0x132: 0x1a, 0x133: 0x25, 0x134: 0x26, 0x135: 0x27, 0x137: 0x28, 0x138: 0x29, 0x139: 0x2a, 0x13a: 0x2b, 0x13b: 0x2c, 0x13c: 0x2d, 0x13d: 0x2e, 0x13e: 0x2f, 0x13f: 0x30, // Block 0x5, offset 0x140 0x140: 0x31, 0x141: 0x32, 0x142: 0x33, 0x14d: 0x34, 0x14e: 0x35, 0x150: 0x36, 0x15a: 0x37, 0x15c: 0x38, 0x15d: 0x39, 0x15e: 0x3a, 0x15f: 0x3b, 0x160: 0x3c, 0x162: 0x3d, 0x164: 0x3e, 0x165: 0x3f, 0x167: 0x40, 0x168: 0x41, 0x169: 0x42, 0x16a: 0x43, 0x16c: 0x44, 0x16d: 0x45, 0x16e: 0x46, 0x16f: 0x47, 0x170: 0x48, 0x173: 0x49, 0x177: 0x4a, 0x17e: 0x4b, 0x17f: 0x4c, // Block 0x6, offset 0x180 0x180: 0x4d, 0x181: 0x4e, 0x182: 0x4f, 0x183: 0x50, 0x184: 0x51, 0x185: 0x52, 0x186: 0x53, 0x187: 0x54, 0x188: 0x55, 0x189: 0x54, 0x18a: 0x54, 0x18b: 0x54, 0x18c: 0x56, 0x18d: 0x57, 0x18e: 0x58, 0x18f: 0x59, 0x190: 0x5a, 0x191: 0x5b, 0x192: 0x5c, 0x193: 0x5d, 0x194: 0x54, 0x195: 0x54, 0x196: 0x54, 0x197: 0x54, 0x198: 0x54, 0x199: 0x54, 0x19a: 0x5e, 0x19b: 0x54, 0x19c: 0x54, 0x19d: 0x5f, 0x19e: 0x54, 0x19f: 0x60, 0x1a4: 0x54, 0x1a5: 0x54, 0x1a6: 0x61, 0x1a7: 0x62, 0x1a8: 0x54, 0x1a9: 0x54, 0x1aa: 0x54, 0x1ab: 0x54, 0x1ac: 0x54, 0x1ad: 0x63, 0x1ae: 0x64, 0x1af: 0x65, 0x1b3: 0x66, 0x1b5: 0x67, 0x1b7: 0x68, 0x1b8: 0x69, 0x1b9: 0x6a, 0x1ba: 0x6b, 0x1bb: 0x6c, 0x1bc: 0x54, 0x1bd: 0x54, 0x1be: 0x54, 0x1bf: 0x6d, // Block 0x7, offset 0x1c0 0x1c0: 0x6e, 0x1c2: 0x6f, 0x1c3: 0x70, 0x1c7: 0x71, 0x1c8: 0x72, 0x1c9: 0x73, 0x1ca: 0x74, 0x1cb: 0x75, 0x1cd: 0x76, 0x1cf: 0x77, // Block 0x8, offset 0x200 0x237: 0x54, // Block 0x9, offset 0x240 0x252: 0x78, 0x253: 0x79, 0x258: 0x7a, 0x259: 0x7b, 0x25a: 0x7c, 0x25b: 0x7d, 0x25c: 0x7e, 0x25e: 0x7f, 0x260: 0x80, 0x261: 0x81, 0x263: 0x82, 0x264: 0x83, 0x265: 0x84, 0x266: 0x85, 0x267: 0x86, 0x268: 0x87, 0x269: 0x88, 0x26a: 0x89, 0x26b: 0x8a, 0x26f: 0x8b, // Block 0xa, offset 0x280 0x2ac: 0x8c, 0x2ad: 0x8d, 0x2ae: 0x0e, 0x2af: 0x0e, 0x2b0: 0x0e, 0x2b1: 0x0e, 0x2b2: 0x0e, 0x2b3: 0x0e, 0x2b4: 0x8e, 0x2b5: 0x0e, 0x2b6: 0x0e, 0x2b7: 0x8f, 0x2b8: 0x90, 0x2b9: 0x91, 0x2ba: 0x0e, 0x2bb: 0x92, 0x2bc: 0x93, 0x2bd: 0x94, 0x2bf: 0x95, // Block 0xb, offset 0x2c0 0x2c4: 0x96, 0x2c5: 0x54, 0x2c6: 0x97, 0x2c7: 0x98, 0x2cb: 0x99, 0x2cd: 0x9a, 0x2e0: 0x9b, 0x2e1: 0x9b, 0x2e2: 0x9b, 0x2e3: 0x9b, 0x2e4: 0x9c, 0x2e5: 0x9b, 0x2e6: 0x9b, 0x2e7: 0x9b, 0x2e8: 0x9d, 0x2e9: 0x9b, 0x2ea: 0x9b, 0x2eb: 0x9e, 0x2ec: 0x9f, 0x2ed: 0x9b, 0x2ee: 0x9b, 0x2ef: 0x9b, 0x2f0: 0x9b, 0x2f1: 0x9b, 0x2f2: 0x9b, 0x2f3: 0x9b, 0x2f4: 0x9b, 0x2f5: 0x9b, 0x2f6: 0x9b, 0x2f7: 0x9b, 0x2f8: 0x9b, 0x2f9: 0xa0, 0x2fa: 0x9b, 0x2fb: 0x9b, 0x2fc: 0x9b, 0x2fd: 0x9b, 0x2fe: 0x9b, 0x2ff: 0x9b, // Block 0xc, offset 0x300 0x300: 0xa1, 0x301: 0xa2, 0x302: 0xa3, 0x304: 0xa4, 0x305: 0xa5, 0x306: 0xa6, 0x307: 0xa7, 0x308: 0xa8, 0x30b: 0xa9, 0x30c: 0xaa, 0x30d: 0xab, 0x310: 0xac, 0x311: 0xad, 0x312: 0xae, 0x313: 0xaf, 0x316: 0xb0, 0x317: 0xb1, 0x318: 0xb2, 0x319: 0xb3, 0x31a: 0xb4, 0x31c: 0xb5, 0x330: 0xb6, 0x332: 0xb7, // Block 0xd, offset 0x340 0x36b: 0xb8, 0x36c: 0xb9, 0x37e: 0xba, // Block 0xe, offset 0x380 0x3b2: 0xbb, // Block 0xf, offset 0x3c0 0x3c5: 0xbc, 0x3c6: 0xbd, 0x3c8: 0x54, 0x3c9: 0xbe, 0x3cc: 0x54, 0x3cd: 0xbf, 0x3db: 0xc0, 0x3dc: 0xc1, 0x3dd: 0xc2, 0x3de: 0xc3, 0x3df: 0xc4, 0x3e8: 0xc5, 0x3e9: 0xc6, 0x3ea: 0xc7, // Block 0x10, offset 0x400 0x400: 0xc8, 0x420: 0x9b, 0x421: 0x9b, 0x422: 0x9b, 0x423: 0xc9, 0x424: 0x9b, 0x425: 0xca, 0x426: 0x9b, 0x427: 0x9b, 0x428: 0x9b, 0x429: 0x9b, 0x42a: 0x9b, 0x42b: 0x9b, 0x42c: 0x9b, 0x42d: 0x9b, 0x42e: 0x9b, 0x42f: 0x9b, 0x430: 0x9b, 0x431: 0x9b, 0x432: 0x9b, 0x433: 0x9b, 0x434: 0x9b, 0x435: 0x9b, 0x436: 0x9b, 0x437: 0x9b, 0x438: 0x0e, 0x439: 0x0e, 0x43a: 0x0e, 0x43b: 0xcb, 0x43c: 0x9b, 0x43d: 0x9b, 0x43e: 0x9b, 0x43f: 0x9b, // Block 0x11, offset 0x440 0x440: 0xcc, 0x441: 0x54, 0x442: 0xcd, 0x443: 0xce, 0x444: 0xcf, 0x445: 0xd0, 0x44c: 0x54, 0x44d: 0x54, 0x44e: 0x54, 0x44f: 0x54, 0x450: 0x54, 0x451: 0x54, 0x452: 0x54, 0x453: 0x54, 0x454: 0x54, 0x455: 0x54, 0x456: 0x54, 0x457: 0x54, 0x458: 0x54, 0x459: 0x54, 0x45a: 0x54, 0x45b: 0xd1, 0x45c: 0x54, 0x45d: 0x6c, 0x45e: 0x54, 0x45f: 0xd2, 0x460: 0xd3, 0x461: 0xd4, 0x462: 0xd5, 0x464: 0xd6, 0x465: 0xd7, 0x466: 0xd8, 0x467: 0x36, 0x47f: 0xd9, // Block 0x12, offset 0x480 0x4bf: 0xd9, // Block 0x13, offset 0x4c0 0x4d0: 0x09, 0x4d1: 0x0a, 0x4d6: 0x0b, 0x4db: 0x0c, 0x4dd: 0x0d, 0x4de: 0x0e, 0x4df: 0x0f, 0x4ef: 0x10, 0x4ff: 0x10, // Block 0x14, offset 0x500 0x50f: 0x10, 0x51f: 0x10, 0x52f: 0x10, 0x53f: 0x10, // Block 0x15, offset 0x540 0x540: 0xda, 0x541: 0xda, 0x542: 0xda, 0x543: 0xda, 0x544: 0x05, 0x545: 0x05, 0x546: 0x05, 0x547: 0xdb, 0x548: 0xda, 0x549: 0xda, 0x54a: 0xda, 0x54b: 0xda, 0x54c: 0xda, 0x54d: 0xda, 0x54e: 0xda, 0x54f: 0xda, 0x550: 0xda, 0x551: 0xda, 0x552: 0xda, 0x553: 0xda, 0x554: 0xda, 0x555: 0xda, 0x556: 0xda, 0x557: 0xda, 0x558: 0xda, 0x559: 0xda, 0x55a: 0xda, 0x55b: 0xda, 0x55c: 0xda, 0x55d: 0xda, 0x55e: 0xda, 0x55f: 0xda, 0x560: 0xda, 0x561: 0xda, 0x562: 0xda, 0x563: 0xda, 0x564: 0xda, 0x565: 0xda, 0x566: 0xda, 0x567: 0xda, 0x568: 0xda, 0x569: 0xda, 0x56a: 0xda, 0x56b: 0xda, 0x56c: 0xda, 0x56d: 0xda, 0x56e: 0xda, 0x56f: 0xda, 0x570: 0xda, 0x571: 0xda, 0x572: 0xda, 0x573: 0xda, 0x574: 0xda, 0x575: 0xda, 0x576: 0xda, 0x577: 0xda, 0x578: 0xda, 0x579: 0xda, 0x57a: 0xda, 0x57b: 0xda, 0x57c: 0xda, 0x57d: 0xda, 0x57e: 0xda, 0x57f: 0xda, // Block 0x16, offset 0x580 0x58f: 0x10, 0x59f: 0x10, 0x5a0: 0x13, 0x5af: 0x10, 0x5bf: 0x10, // Block 0x17, offset 0x5c0 0x5cf: 0x10, } // Total table size 15800 bytes (15KiB); checksum: F50EF68C trieval.go000066400000000000000000000031621324746544700334660ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/golang.org/x/text/unicode/bidi// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. package bidi // Class is the Unicode BiDi class. Each rune has a single class. type Class uint const ( L Class = iota // LeftToRight R // RightToLeft EN // EuropeanNumber ES // EuropeanSeparator ET // EuropeanTerminator AN // ArabicNumber CS // CommonSeparator B // ParagraphSeparator S // SegmentSeparator WS // WhiteSpace ON // OtherNeutral BN // BoundaryNeutral NSM // NonspacingMark AL // ArabicLetter Control // Control LRO - PDI numClass LRO // LeftToRightOverride RLO // RightToLeftOverride LRE // LeftToRightEmbedding RLE // RightToLeftEmbedding PDF // PopDirectionalFormat LRI // LeftToRightIsolate RLI // RightToLeftIsolate FSI // FirstStrongIsolate PDI // PopDirectionalIsolate unknownClass = ^Class(0) ) var controlToClass = map[rune]Class{ 0x202D: LRO, // LeftToRightOverride, 0x202E: RLO, // RightToLeftOverride, 0x202A: LRE, // LeftToRightEmbedding, 0x202B: RLE, // RightToLeftEmbedding, 0x202C: PDF, // PopDirectionalFormat, 0x2066: LRI, // LeftToRightIsolate, 0x2067: RLI, // RightToLeftIsolate, 0x2068: FSI, // FirstStrongIsolate, 0x2069: PDI, // PopDirectionalIsolate, } // A trie entry has the following bits: // 7..5 XOR mask for brackets // 4 1: Bracket open, 0: Bracket close // 3..0 Class type const ( openMask = 0x10 xorMaskShift = 5 ) cldr/000077500000000000000000000000001324746544700315045ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/golang.org/x/text/unicodebase.go000066400000000000000000000047501324746544700327530ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/golang.org/x/text/unicode/cldr// Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package cldr import ( "encoding/xml" "regexp" "strconv" ) // Elem is implemented by every XML element. type Elem interface { setEnclosing(Elem) setName(string) enclosing() Elem GetCommon() *Common } type hidden struct { CharData string `xml:",chardata"` Alias *struct { Common Source string `xml:"source,attr"` Path string `xml:"path,attr"` } `xml:"alias"` Def *struct { Common Choice string `xml:"choice,attr,omitempty"` Type string `xml:"type,attr,omitempty"` } `xml:"default"` } // Common holds several of the most common attributes and sub elements // of an XML element. type Common struct { XMLName xml.Name name string enclElem Elem Type string `xml:"type,attr,omitempty"` Reference string `xml:"reference,attr,omitempty"` Alt string `xml:"alt,attr,omitempty"` ValidSubLocales string `xml:"validSubLocales,attr,omitempty"` Draft string `xml:"draft,attr,omitempty"` hidden } // Default returns the default type to select from the enclosed list // or "" if no default value is specified. func (e *Common) Default() string { if e.Def == nil { return "" } if e.Def.Choice != "" { return e.Def.Choice } else if e.Def.Type != "" { // Type is still used by the default element in collation. return e.Def.Type } return "" } // GetCommon returns e. It is provided such that Common implements Elem. func (e *Common) GetCommon() *Common { return e } // Data returns the character data accumulated for this element. func (e *Common) Data() string { e.CharData = charRe.ReplaceAllStringFunc(e.CharData, replaceUnicode) return e.CharData } func (e *Common) setName(s string) { e.name = s } func (e *Common) enclosing() Elem { return e.enclElem } func (e *Common) setEnclosing(en Elem) { e.enclElem = en } // Escape characters that can be escaped without further escaping the string. var charRe = regexp.MustCompile(`&#x[0-9a-fA-F]*;|\\u[0-9a-fA-F]{4}|\\U[0-9a-fA-F]{8}|\\x[0-9a-fA-F]{2}|\\[0-7]{3}|\\[abtnvfr]`) // replaceUnicode converts hexadecimal Unicode codepoint notations to a one-rune string. // It assumes the input string is correctly formatted. func replaceUnicode(s string) string { if s[1] == '#' { r, _ := strconv.ParseInt(s[3:len(s)-1], 16, 32) return string(r) } r, _, _, _ := strconv.UnquoteChar(s, 0) return string(r) } cldr.go000066400000000000000000000074201324746544700327620ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/golang.org/x/text/unicode/cldr// Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. //go:generate go run makexml.go -output xml.go // Package cldr provides a parser for LDML and related XML formats. // This package is intended to be used by the table generation tools // for the various internationalization-related packages. // As the XML types are generated from the CLDR DTD, and as the CLDR standard // is periodically amended, this package may change considerably over time. // This mostly means that data may appear and disappear between versions. // That is, old code should keep compiling for newer versions, but data // may have moved or changed. // CLDR version 22 is the first version supported by this package. // Older versions may not work. package cldr // import "golang.org/x/text/unicode/cldr" import ( "fmt" "sort" ) // CLDR provides access to parsed data of the Unicode Common Locale Data Repository. type CLDR struct { parent map[string][]string locale map[string]*LDML resolved map[string]*LDML bcp47 *LDMLBCP47 supp *SupplementalData } func makeCLDR() *CLDR { return &CLDR{ parent: make(map[string][]string), locale: make(map[string]*LDML), resolved: make(map[string]*LDML), bcp47: &LDMLBCP47{}, supp: &SupplementalData{}, } } // BCP47 returns the parsed BCP47 LDML data. If no such data was parsed, nil is returned. func (cldr *CLDR) BCP47() *LDMLBCP47 { return nil } // Draft indicates the draft level of an element. type Draft int const ( Approved Draft = iota Contributed Provisional Unconfirmed ) var drafts = []string{"unconfirmed", "provisional", "contributed", "approved", ""} // ParseDraft returns the Draft value corresponding to the given string. The // empty string corresponds to Approved. func ParseDraft(level string) (Draft, error) { if level == "" { return Approved, nil } for i, s := range drafts { if level == s { return Unconfirmed - Draft(i), nil } } return Approved, fmt.Errorf("cldr: unknown draft level %q", level) } func (d Draft) String() string { return drafts[len(drafts)-1-int(d)] } // SetDraftLevel sets which draft levels to include in the evaluated LDML. // Any draft element for which the draft level is higher than lev will be excluded. // If multiple draft levels are available for a single element, the one with the // lowest draft level will be selected, unless preferDraft is true, in which case // the highest draft will be chosen. // It is assumed that the underlying LDML is canonicalized. func (cldr *CLDR) SetDraftLevel(lev Draft, preferDraft bool) { // TODO: implement cldr.resolved = make(map[string]*LDML) } // RawLDML returns the LDML XML for id in unresolved form. // id must be one of the strings returned by Locales. func (cldr *CLDR) RawLDML(loc string) *LDML { return cldr.locale[loc] } // LDML returns the fully resolved LDML XML for loc, which must be one of // the strings returned by Locales. func (cldr *CLDR) LDML(loc string) (*LDML, error) { return cldr.resolve(loc) } // Supplemental returns the parsed supplemental data. If no such data was parsed, // nil is returned. func (cldr *CLDR) Supplemental() *SupplementalData { return cldr.supp } // Locales returns the locales for which there exist files. // Valid sublocales for which there is no file are not included. // The root locale is always sorted first. func (cldr *CLDR) Locales() []string { loc := []string{"root"} hasRoot := false for l, _ := range cldr.locale { if l == "root" { hasRoot = true continue } loc = append(loc, l) } sort.Strings(loc[1:]) if !hasRoot { return loc[1:] } return loc } // Get fills in the fields of x based on the XPath path. func Get(e Elem, path string) (res Elem, err error) { return walkXPath(e, path) } collate.go000066400000000000000000000210761324746544700334640ustar00rootroot00000000000000gitlab-shell-v6.0.4-6feee1ccc265c765181533bbc719f91c364de2b3/go/vendor/golang.org/x/text/unicode/cldr// Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package cldr import ( "bufio" "encoding/xml" "errors" "fmt" "strconv" "strings" "unicode" "unicode/utf8" ) // RuleProcessor can be passed to Collator's Process method, which // parses the rules and calls the respective method for each rule found. type RuleProcessor interface { Reset(anchor string, before int) error Insert(level int, str, context, extend string) error Index(id string) } const ( // cldrIndex is a Unicode-reserved sentinel value used to mark the start // of a grouping within an index. // We ignore any rule that starts with this rune. // See http://unicode.org/reports/tr35/#Collation_Elements for details. cldrIndex = "\uFDD0" // specialAnchor is the format in which to represent logical reset positions, // such as "first tertiary ignorable". specialAnchor = "<%s/>" ) // Process parses the rules for the tailorings of this collation // and calls the respective methods of p for each rule found. func (c Collation) Process(p RuleProcessor) (err error) { if len(c.Cr) > 0 { if len(c.Cr) > 1 { return fmt.Errorf("multiple cr elements, want 0 or 1") } return processRules(p, c.Cr[0].Data()) } if c.Rules.Any != nil { return c.processXML(p) } return errors.New("no tailoring data") } // processRules parses rules in the Collation Rule Syntax defined in // http://www.unicode.org/reports/tr35/tr35-collation.html#Collation_Tailorings. func processRules(p RuleProcessor, s string) (err error) { chk := func(s string, e error) string { if err == nil { err = e } return s } i := 0 // Save the line number for use after the loop. scanner := bufio.NewScanner(strings.NewReader(s)) for ; scanner.Scan() && err == nil; i++ { for s := skipSpace(scanner.Text()); s != "" && s[0] != '#'; s = skipSpace(s) { level := 5 var ch byte switch ch, s = s[0], s[1:]; ch { case '&': // followed by or '[' ']' if s = skipSpace(s); consume(&s, '[') { s = chk(parseSpecialAnchor(p, s)) } else { s = chk(parseAnchor(p, 0, s)) } case '<': // sort relation '<'{1,4}, optionally followed by '*'. for level = 1; consume(&s, '<'); level++ { } if level > 4 { err = fmt.Errorf("level %d > 4", level) } fallthrough case '=': // identity relation, optionally followed by *. if consume(&s, '*') { s = chk(parseSequence(p, level, s)) } else { s = chk(parseOrder(p, level, s)) } default: chk("", fmt.Errorf("illegal operator %q", ch)) break } } } if chk("", scanner.Err()); err != nil { return fmt.Errorf("%d: %v", i, err) } return nil } // parseSpecialAnchor parses the anchor syntax which is either of the form // ['before' ] // or // [