pax_global_header00006660000000000000000000000064131223451160014510gustar00rootroot0000000000000052 comment=d4f8670e9dba7e0c9dd3f6da843d678afd21c587 golang-google-cloud-0.9.0/000077500000000000000000000000001312234511600153435ustar00rootroot00000000000000golang-google-cloud-0.9.0/.travis.yml000066400000000000000000000011241312234511600174520ustar00rootroot00000000000000sudo: false language: go go: - 1.6 - 1.7 - 1.8 install: - go get -v cloud.google.com/go/... script: - openssl aes-256-cbc -K $encrypted_a8b3f4fc85f4_key -iv $encrypted_a8b3f4fc85f4_iv -in key.json.enc -out key.json -d - GCLOUD_TESTS_GOLANG_PROJECT_ID="dulcet-port-762" GCLOUD_TESTS_GOLANG_KEY="$(pwd)/key.json" ./run-tests.sh $TRAVIS_COMMIT env: matrix: # The GCLOUD_TESTS_API_KEY environment variable. secure: VdldogUOoubQ60LhuHJ+g/aJoBiujkSkWEWl79Zb8cvQorcQbxISS+JsOOp4QkUOU4WwaHAm8/3pIH1QMWOR6O78DaLmDKi5Q4RpkVdCpUXy+OAfQaZIcBsispMrjxLXnqFjo9ELnrArfjoeCTzaX0QTCfwQwVmigC8rR30JBKI= golang-google-cloud-0.9.0/AUTHORS000066400000000000000000000007751312234511600164240ustar00rootroot00000000000000# This is the official list of cloud authors for copyright purposes. # This file is distinct from the CONTRIBUTORS files. # See the latter for an explanation. # Names should be added to this file as: # Name or Organization # The email address is not required for organizations. Filippo Valsorda Google Inc. Ingo Oeser Palm Stone Games, Inc. Paweł Knap Péter Szilágyi Tyler Treat golang-google-cloud-0.9.0/CONTRIBUTING.md000066400000000000000000000143271312234511600176030ustar00rootroot00000000000000# Contributing 1. Sign one of the contributor license agreements below. 1. `go get golang.org/x/review/git-codereview` to install the code reviewing tool. 1. You will need to ensure that your `GOBIN` directory (by default `$GOPATH/bin`) is in your `PATH` so that git can find the command. 1. If you would like, you may want to set up aliases for git-codereview, such that `git codereview change` becomes `git change`. See the [godoc](https://godoc.org/golang.org/x/review/git-codereview) for details. 1. Should you run into issues with the git-codereview tool, please note that all error messages will assume that you have set up these aliases. 1. Get the cloud package by running `go get -d cloud.google.com/go`. 1. If you have already checked out the source, make sure that the remote git origin is https://code.googlesource.com/gocloud: git remote set-url origin https://code.googlesource.com/gocloud 1. Make sure your auth is configured correctly by visiting https://code.googlesource.com, clicking "Generate Password", and following the directions. 1. Make changes and create a change by running `git codereview change `, provide a commit message, and use `git codereview mail` to create a Gerrit CL. 1. Keep amending to the change with `git codereview change` and mail as your receive feedback. Each new mailed amendment will create a new patch set for your change in Gerrit. ## Integration Tests In addition to the unit tests, you may run the integration test suite. To run the integrations tests, creating and configuration of a project in the Google Developers Console is required. After creating a project, you must [create a service account](https://developers.google.com/identity/protocols/OAuth2ServiceAccount#creatinganaccount). Ensure the project-level **Owner** [IAM role](console.cloud.google.com/iam-admin/iam/project) (or **Editor** and **Logs Configuration Writer** roles) are added to the service account. Once you create a project, set the following environment variables to be able to run the against the actual APIs. - **GCLOUD_TESTS_GOLANG_PROJECT_ID**: Developers Console project's ID (e.g. bamboo-shift-455) - **GCLOUD_TESTS_GOLANG_KEY**: The path to the JSON key file. - **GCLOUD_TESTS_API_KEY**: Your API key. Install the [gcloud command-line tool][gcloudcli] to your machine and use it to create some resources used in integration tests. From the project's root directory: ``` sh # Set the default project in your env. $ gcloud config set project $GCLOUD_TESTS_GOLANG_PROJECT_ID # Authenticate the gcloud tool with your account. $ gcloud auth login # Create the indexes used in the datastore integration tests. $ gcloud preview datastore create-indexes datastore/testdata/index.yaml # Create a Google Cloud storage bucket with the same name as your test project, # and with the Stackdriver Logging service account as owner, for the sink # integration tests in logging. $ gsutil mb gs://$GCLOUD_TESTS_GOLANG_PROJECT_ID $ gsutil acl ch -g cloud-logs@google.com:O gs://$GCLOUD_TESTS_GOLANG_PROJECT_ID # Create a Spanner instance for the spanner integration tests. $ gcloud beta spanner instances create go-integration-test --config regional-us-central1 --nodes 1 --description 'Instance for go client test' # NOTE: Spanner instances are priced by the node-hour, so you may want to delete # the instance after testing with 'gcloud beta spanner instances delete'. ``` Once you've set the environment variables, you can run the integration tests by running: ``` sh $ go test -v cloud.google.com/go/... ``` ## Contributor License Agreements Before we can accept your pull requests you'll need to sign a Contributor License Agreement (CLA): - **If you are an individual writing original source code** and **you own the - intellectual property**, then you'll need to sign an [individual CLA][indvcla]. - **If you work for a company that wants to allow you to contribute your work**, then you'll need to sign a [corporate CLA][corpcla]. You can sign these electronically (just scroll to the bottom). After that, we'll be able to accept your pull requests. ## Contributor Code of Conduct As contributors and maintainers of this project, and in the interest of fostering an open and welcoming community, we pledge to respect all people who contribute through reporting issues, posting feature requests, updating documentation, submitting pull requests or patches, and other activities. We are committed to making participation in this project a harassment-free experience for everyone, regardless of level of experience, gender, gender identity and expression, sexual orientation, disability, personal appearance, body size, race, ethnicity, age, religion, or nationality. Examples of unacceptable behavior by participants include: * The use of sexualized language or imagery * Personal attacks * Trolling or insulting/derogatory comments * Public or private harassment * Publishing other's private information, such as physical or electronic addresses, without explicit permission * Other unethical or unprofessional conduct. Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct. By adopting this Code of Conduct, project maintainers commit themselves to fairly and consistently applying these principles to every aspect of managing this project. Project maintainers who do not follow or enforce the Code of Conduct may be permanently removed from the project team. This code of conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by opening an issue or contacting one or more of the project maintainers. This Code of Conduct is adapted from the [Contributor Covenant](http://contributor-covenant.org), version 1.2.0, available at [http://contributor-covenant.org/version/1/2/0/](http://contributor-covenant.org/version/1/2/0/) [gcloudcli]: https://developers.google.com/cloud/sdk/gcloud/ [indvcla]: https://developers.google.com/open-source/cla/individual [corpcla]: https://developers.google.com/open-source/cla/corporate golang-google-cloud-0.9.0/CONTRIBUTORS000066400000000000000000000024451312234511600172300ustar00rootroot00000000000000# People who have agreed to one of the CLAs and can contribute patches. # The AUTHORS file lists the copyright holders; this file # lists people. For example, Google employees are listed here # but not in AUTHORS, because Google holds the copyright. # # https://developers.google.com/open-source/cla/individual # https://developers.google.com/open-source/cla/corporate # # Names should be added to this file as: # Name # Keep the list alphabetically sorted. Alexis Hunt Andreas Litt Andrew Gerrand Brad Fitzpatrick Burcu Dogan Dave Day David Sansome David Symonds Filippo Valsorda Glenn Lewis Ingo Oeser Johan Euphrosine Jonathan Amsterdam Luna Duclos Magnus Hiie Michael McGreevy Omar Jarjur Paweł Knap Péter Szilágyi Sarah Adams Thanatat Tamtan Toby Burress Tuo Shan Tyler Treat golang-google-cloud-0.9.0/LICENSE000066400000000000000000000261161312234511600163560ustar00rootroot00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright 2014 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. golang-google-cloud-0.9.0/README.md000066400000000000000000000351311312234511600166250ustar00rootroot00000000000000# Google Cloud Client Libraries for Go [![GoDoc](https://godoc.org/cloud.google.com/go?status.svg)](https://godoc.org/cloud.google.com/go) Go packages for [Google Cloud Platform](https://cloud.google.com) services. ``` go import "cloud.google.com/go" ``` To install the packages on your system, ``` $ go get -u cloud.google.com/go/... ``` **NOTE:** Some of these packages are under development, and may occasionally make backwards-incompatible changes. **NOTE:** Github repo is a mirror of [https://code.googlesource.com/gocloud](https://code.googlesource.com/gocloud). * [News](#news) * [Supported APIs](#supported-apis) * [Go Versions Supported](#go-versions-supported) * [Authorization](#authorization) * [Cloud Datastore](#cloud-datastore-) * [Cloud Storage](#cloud-storage-) * [Cloud Pub/Sub](#cloud-pub-sub-) * [Cloud BigQuery](#cloud-bigquery-) * [Stackdriver Logging](#stackdriver-logging-) * [Cloud Spanner](#cloud-spanner-) ## News _March 17, 2017_ Breaking Pubsub changes. * Publish is now asynchronous ([announcement](https://groups.google.com/d/topic/google-api-go-announce/aaqRDIQ3rvU/discussion)). * Subscription.Pull replaced by Subscription.Receive, which takes a callback ([announcement](https://groups.google.com/d/topic/google-api-go-announce/8pt6oetAdKc/discussion)). * Message.Done replaced with Message.Ack and Message.Nack. _February 14, 2017_ Release of a client library for Spanner. See the [blog post](https://cloudplatform.googleblog.com/2017/02/introducing-Cloud-Spanner-a-global-database-service-for-mission-critical-applications.html). Note that although the Spanner service is beta, the Go client library is alpha. [Older news](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/master/old-news.md) ## Supported APIs Google API | Status | Package ---------------------------------|--------------|----------------------------------------------------------- [Datastore][cloud-datastore] | stable | [`cloud.google.com/go/datastore`][cloud-datastore-ref] [Storage][cloud-storage] | stable | [`cloud.google.com/go/storage`][cloud-storage-ref] [Bigtable][cloud-bigtable] | beta | [`cloud.google.com/go/bigtable`][cloud-bigtable-ref] [BigQuery][cloud-bigquery] | beta | [`cloud.google.com/go/bigquery`][cloud-bigquery-ref] [Logging][cloud-logging] | stable | [`cloud.google.com/go/logging`][cloud-logging-ref] [Monitoring][cloud-monitoring] | alpha | [`cloud.google.com/go/monitoring/apiv3`][cloud-monitoring-ref] [Pub/Sub][cloud-pubsub] | alpha | [`cloud.google.com/go/pubsub`][cloud-pubsub-ref] [Vision][cloud-vision] | beta | [`cloud.google.com/go/vision`][cloud-vision-ref] [Language][cloud-language] | beta | [`cloud.google.com/go/language/apiv1`][cloud-language-ref] [Speech][cloud-speech] | beta | [`cloud.google.com/go/speech/apiv1`][cloud-speech-ref] [Spanner][cloud-spanner] | alpha | [`cloud.google.com/go/spanner`][cloud-spanner-ref] [Translation][cloud-translation] | beta | [`cloud.google.com/go/translate`][cloud-translation-ref] [Trace][cloud-trace] | alpha | [`cloud.google.com/go/trace`][cloud-trace-ref] [ErrorReporting][cloud-errors] | alpha | [`cloud.google.com/go/errors`][cloud-errors-ref] > **Alpha status**: the API is still being actively developed. As a > result, it might change in backward-incompatible ways and is not recommended > for production use. > > **Beta status**: the API is largely complete, but still has outstanding > features and bugs to be addressed. There may be minor backwards-incompatible > changes where necessary. > > **Stable status**: the API is mature and ready for production use. We will > continue addressing bugs and feature requests. Documentation and examples are available at https://godoc.org/cloud.google.com/go Visit or join the [google-api-go-announce group](https://groups.google.com/forum/#!forum/google-api-go-announce) for updates on these packages. ## Go Versions Supported We support the two most recent major versions of Go. If Google App Engine uses an older version, we support that as well. You can see which versions are currently supported by looking at the lines following `go:` in [`.travis.yml`](.travis.yml). ## Authorization By default, each API will use [Google Application Default Credentials][default-creds] for authorization credentials used in calling the API endpoints. This will allow your application to run in many environments without requiring explicit configuration. [snip]:# (auth) ```go client, err := storage.NewClient(ctx) ``` To authorize using a [JSON key file](https://cloud.google.com/iam/docs/managing-service-account-keys), pass [`option.WithServiceAccountFile`](https://godoc.org/google.golang.org/api/option#WithServiceAccountFile) to the `NewClient` function of the desired package. For example: [snip]:# (auth-JSON) ```go client, err := storage.NewClient(ctx, option.WithServiceAccountFile("path/to/keyfile.json")) ``` You can exert more control over authorization by using the [`golang.org/x/oauth2`](https://godoc.org/golang.org/x/oauth2) package to create an `oauth2.TokenSource`. Then pass [`option.WithTokenSource`](https://godoc.org/google.golang.org/api/option#WithTokenSource) to the `NewClient` function: [snip]:# (auth-ts) ```go tokenSource := ... client, err := storage.NewClient(ctx, option.WithTokenSource(tokenSource)) ``` ## Cloud Datastore [![GoDoc](https://godoc.org/cloud.google.com/go/datastore?status.svg)](https://godoc.org/cloud.google.com/go/datastore) - [About Cloud Datastore][cloud-datastore] - [Activating the API for your project][cloud-datastore-activation] - [API documentation][cloud-datastore-docs] - [Go client documentation](https://godoc.org/cloud.google.com/go/datastore) - [Complete sample program](https://github.com/GoogleCloudPlatform/golang-samples/tree/master/datastore/tasks) ### Example Usage First create a `datastore.Client` to use throughout your application: [snip]:# (datastore-1) ```go client, err := datastore.NewClient(ctx, "my-project-id") if err != nil { log.Fatal(err) } ``` Then use that client to interact with the API: [snip]:# (datastore-2) ```go type Post struct { Title string Body string `datastore:",noindex"` PublishedAt time.Time } keys := []*datastore.Key{ datastore.NameKey("Post", "post1", nil), datastore.NameKey("Post", "post2", nil), } posts := []*Post{ {Title: "Post 1", Body: "...", PublishedAt: time.Now()}, {Title: "Post 2", Body: "...", PublishedAt: time.Now()}, } if _, err := client.PutMulti(ctx, keys, posts); err != nil { log.Fatal(err) } ``` ## Cloud Storage [![GoDoc](https://godoc.org/cloud.google.com/go/storage?status.svg)](https://godoc.org/cloud.google.com/go/storage) - [About Cloud Storage][cloud-storage] - [API documentation][cloud-storage-docs] - [Go client documentation](https://godoc.org/cloud.google.com/go/storage) - [Complete sample programs](https://github.com/GoogleCloudPlatform/golang-samples/tree/master/storage) ### Example Usage First create a `storage.Client` to use throughout your application: [snip]:# (storage-1) ```go client, err := storage.NewClient(ctx) if err != nil { log.Fatal(err) } ``` [snip]:# (storage-2) ```go // Read the object1 from bucket. rc, err := client.Bucket("bucket").Object("object1").NewReader(ctx) if err != nil { log.Fatal(err) } defer rc.Close() body, err := ioutil.ReadAll(rc) if err != nil { log.Fatal(err) } ``` ## Cloud Pub/Sub [![GoDoc](https://godoc.org/cloud.google.com/go/pubsub?status.svg)](https://godoc.org/cloud.google.com/go/pubsub) - [About Cloud Pubsub][cloud-pubsub] - [API documentation][cloud-pubsub-docs] - [Go client documentation](https://godoc.org/cloud.google.com/go/pubsub) - [Complete sample programs](https://github.com/GoogleCloudPlatform/golang-samples/tree/master/pubsub) ### Example Usage First create a `pubsub.Client` to use throughout your application: [snip]:# (pubsub-1) ```go client, err := pubsub.NewClient(ctx, "project-id") if err != nil { log.Fatal(err) } ``` Then use the client to publish and subscribe: [snip]:# (pubsub-2) ```go // Publish "hello world" on topic1. topic := client.Topic("topic1") res := topic.Publish(ctx, &pubsub.Message{ Data: []byte("hello world"), }) // The publish happens asynchronously. // Later, you can get the result from res: ... msgID, err := res.Get(ctx) if err != nil { log.Fatal(err) } // Use a callback to receive messages via subscription1. sub := client.Subscription("subscription1") err = sub.Receive(ctx, func(ctx context.Context, m *pubsub.Message) { fmt.Println(m.Data) m.Ack() // Acknowledge that we've consumed the message. }) if err != nil { log.Println(err) } ``` ## Cloud BigQuery [![GoDoc](https://godoc.org/cloud.google.com/go/bigquery?status.svg)](https://godoc.org/cloud.google.com/go/bigquery) - [About Cloud BigQuery][cloud-bigquery] - [API documentation][cloud-bigquery-docs] - [Go client documentation][cloud-bigquery-ref] - [Complete sample programs](https://github.com/GoogleCloudPlatform/golang-samples/tree/master/bigquery) ### Example Usage First create a `bigquery.Client` to use throughout your application: [snip]:# (bq-1) ```go c, err := bigquery.NewClient(ctx, "my-project-ID") if err != nil { // TODO: Handle error. } ``` Then use that client to interact with the API: [snip]:# (bq-2) ```go // Construct a query. q := c.Query(` SELECT year, SUM(number) FROM [bigquery-public-data:usa_names.usa_1910_2013] WHERE name = "William" GROUP BY year ORDER BY year `) // Execute the query. it, err := q.Read(ctx) if err != nil { // TODO: Handle error. } // Iterate through the results. for { var values []bigquery.Value err := it.Next(&values) if err == iterator.Done { break } if err != nil { // TODO: Handle error. } fmt.Println(values) } ``` ## Stackdriver Logging [![GoDoc](https://godoc.org/cloud.google.com/go/logging?status.svg)](https://godoc.org/cloud.google.com/go/logging) - [About Stackdriver Logging][cloud-logging] - [API documentation][cloud-logging-docs] - [Go client documentation][cloud-logging-ref] - [Complete sample programs](https://github.com/GoogleCloudPlatform/golang-samples/tree/master/logging) ### Example Usage First create a `logging.Client` to use throughout your application: [snip]:# (logging-1) ```go ctx := context.Background() client, err := logging.NewClient(ctx, "my-project") if err != nil { // TODO: Handle error. } ``` Usually, you'll want to add log entries to a buffer to be periodically flushed (automatically and asynchronously) to the Stackdriver Logging service. [snip]:# (logging-2) ```go logger := client.Logger("my-log") logger.Log(logging.Entry{Payload: "something happened!"}) ``` Close your client before your program exits, to flush any buffered log entries. [snip]:# (logging-3) ```go err = client.Close() if err != nil { // TODO: Handle error. } ``` ## Cloud Spanner [![GoDoc](https://godoc.org/cloud.google.com/go/spanner?status.svg)](https://godoc.org/cloud.google.com/go/spanner) - [About Cloud Spanner][cloud-spanner] - [API documentation][cloud-spanner-docs] - [Go client documentation](https://godoc.org/cloud.google.com/go/spanner) ### Example Usage First create a `spanner.Client` to use throughout your application: [snip]:# (spanner-1) ```go client, err := spanner.NewClient(ctx, "projects/P/instances/I/databases/D") if err != nil { log.Fatal(err) } ``` [snip]:# (spanner-2) ```go // Simple Reads And Writes _, err = client.Apply(ctx, []*spanner.Mutation{ spanner.Insert("Users", []string{"name", "email"}, []interface{}{"alice", "a@example.com"})}) if err != nil { log.Fatal(err) } row, err := client.Single().ReadRow(ctx, "Users", spanner.Key{"alice"}, []string{"email"}) if err != nil { log.Fatal(err) } ``` ## Contributing Contributions are welcome. Please, see the [CONTRIBUTING](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/master/CONTRIBUTING.md) document for details. We're using Gerrit for our code reviews. Please don't open pull requests against this repo, new pull requests will be automatically closed. Please note that this project is released with a Contributor Code of Conduct. By participating in this project you agree to abide by its terms. See [Contributor Code of Conduct](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/master/CONTRIBUTING.md#contributor-code-of-conduct) for more information. [cloud-datastore]: https://cloud.google.com/datastore/ [cloud-datastore-ref]: https://godoc.org/cloud.google.com/go/datastore [cloud-datastore-docs]: https://cloud.google.com/datastore/docs [cloud-datastore-activation]: https://cloud.google.com/datastore/docs/activate [cloud-pubsub]: https://cloud.google.com/pubsub/ [cloud-pubsub-ref]: https://godoc.org/cloud.google.com/go/pubsub [cloud-pubsub-docs]: https://cloud.google.com/pubsub/docs [cloud-storage]: https://cloud.google.com/storage/ [cloud-storage-ref]: https://godoc.org/cloud.google.com/go/storage [cloud-storage-docs]: https://cloud.google.com/storage/docs [cloud-storage-create-bucket]: https://cloud.google.com/storage/docs/cloud-console#_creatingbuckets [cloud-bigtable]: https://cloud.google.com/bigtable/ [cloud-bigtable-ref]: https://godoc.org/cloud.google.com/go/bigtable [cloud-bigquery]: https://cloud.google.com/bigquery/ [cloud-bigquery-docs]: https://cloud.google.com/bigquery/docs [cloud-bigquery-ref]: https://godoc.org/cloud.google.com/go/bigquery [cloud-logging]: https://cloud.google.com/logging/ [cloud-logging-docs]: https://cloud.google.com/logging/docs [cloud-logging-ref]: https://godoc.org/cloud.google.com/go/logging [cloud-monitoring]: https://cloud.google.com/monitoring/ [cloud-monitoring-ref]: https://godoc.org/cloud.google.com/go/monitoring/apiv3 [cloud-vision]: https://cloud.google.com/vision/ [cloud-vision-ref]: https://godoc.org/cloud.google.com/go/vision [cloud-language]: https://cloud.google.com/natural-language [cloud-language-ref]: https://godoc.org/cloud.google.com/go/language/apiv1 [cloud-speech]: https://cloud.google.com/speech [cloud-speech-ref]: https://godoc.org/cloud.google.com/go/speech/apiv1 [cloud-spanner]: https://cloud.google.com/spanner/ [cloud-spanner-ref]: https://godoc.org/cloud.google.com/go/spanner [cloud-spanner-docs]: https://cloud.google.com/spanner/docs [cloud-translation]: https://cloud.google.com/translation [cloud-translation-ref]: https://godoc.org/cloud.google.com/go/translation [cloud-trace]: https://cloud.google.com/trace/ [cloud-trace-ref]: https://godoc.org/cloud.google.com/go/trace [cloud-errors]: https://cloud.google.com/error-reporting/ [cloud-errors-ref]: https://godoc.org/cloud.google.com/go/errors [default-creds]: https://developers.google.com/identity/protocols/application-default-credentials golang-google-cloud-0.9.0/appveyor.yml000066400000000000000000000046011312234511600177340ustar00rootroot00000000000000# This file configures AppVeyor (http://www.appveyor.com), # a Windows-based CI service similar to Travis. # Identifier for this run version: "{build}" # Clone the repo into this path, which conforms to the standard # Go workspace structure. clone_folder: c:\gopath\src\cloud.google.com\go environment: GOPATH: c:\gopath GCLOUD_TESTS_GOLANG_PROJECT_ID: dulcet-port-762 GCLOUD_TESTS_GOLANG_KEY: c:\gopath\src\cloud.google.com\go\key.json KEYFILE_CONTENTS: secure: IvRbDAhM2PIQqzVkjzJ4FjizUvoQ+c3vG/qhJQG+HlZ/L5KEkqLu+x6WjLrExrNMyGku4znB2jmbTrUW3Ob4sGG+R5vvqeQ3YMHCVIkw5CxY+/bUDkW5RZWsVbuCnNa/vKsWmCP+/sZW6ICe29yKJ2ZOb6QaauI4s9R6j+cqBbU9pumMGYFRb0Rw3uUU7DKmVFCy+NjTENZIlDP9rmjANgAzigowJJEb2Tg9sLlQKmQeKiBSRN8lKc5Nq60a+fIzHGKvql4eIitDDDpOpyHv15/Xr1BzFw2yDoiR4X1lng0u7q0X9RgX4VIYa6gT16NXBEmQgbuX8gh7SfPMp9RhiZD9sVUaV+yogEabYpyPnmUURo0hXwkctKaBkQlEmKvjHwF5dvbg8+yqGhwtjAgFNimXG3INrwQsfQsZskkQWanutbJf9xy50GyWWFZZdi0uT4oXP/b5P7aklPXKXsvrJKBh7RjEaqBrhi86IJwOjBspvoR4l2WmcQyxb2xzQS1pjbBJFQfYJJ8+JgsstTL8PBO9d4ybJC0li1Om1qnWxkaewvPxxuoHJ9LpRKof19yRYWBmhTXb2tTASKG/zslvl4fgG4DmQBS93WC7dsiGOhAraGw2eCTgd0lYZOhk1FjWl9TS80aktXxzH/7nTvem5ohm+eDl6O0wnTL4KXjQVNSQ1PyLn4lGRJ5MNGzBTRFWIr2API2rca4Fysyfh/UdmazPGlNbY9JPGqb9+F04QzLfqm+Zz/cHy59E7lOSMBlUI4KD6d6ZNNKNRH+/g9i+fSiyiXKugTfda8KBnWGyPwprxuWGYaiQUGUYOwJY5R6x5c4mjImAB310V+Wo33UbWFJiwxEDsiCNqW1meVkBzt2er26vh4qbgCUIQ3iM3gFPfHgy+QxkmIhic7Q1HYacQElt8AAP41M7cCKWCuZidegP37MBB//mjjiNt047ZSQEvB4tqsX/OvfbByVef+cbtVw9T0yjHvmCdPW1XrhyrCCgclu6oYYdbmc5D7BBDRbjjMWGv6YvceAbfGf6ukdB5PuV+TGEN/FoQ1QTRA6Aqf+3fLMg4mS4oyTfw5xyYNbv3qoyLPrp+BnxI53WB9p0hfMg4n9FD6NntBxjDq+Q3Lk/bjC/Y4MaRWdzbMzF9a0lgGfcw9DURlK5p7uGJC9vg34feNoQprxVEZRQ01cHLeob6eGkYm4HxSRx8JY39Mh+9wzJo+k/aIvFleNC3e35NOrkXr6wb5e42n2DwBdPqdNolTLtLFRglAL1LTpp27UjvjieWJAKfoDTR5CKl01sZqt0wPdLLcvsMj6CiPFmccUIOYeZMe86kLBD61Qa5F1EwkgO3Om2qSjW96FzL4skRc+BmU5RrHlAFSldR1wpUgtkUMv9vH5Cy+UJdcvpZ8KbmhZ2PsjF7ddJ1ve9RAw3cP325AyIMwZ77Ef1mgTM0NJze6eSW1qKlEsgt1FADPyeUu1NQTA2H2dueMPGlArWTSUgyWR9AdfpqouT7eg0JWI5w+yUZZC+/rPglYbt84oLmYpwuli0z8FyEQRPIc3EtkfWIv/yYgDr2TZ0N2KvGfpi/MAUWgxI1gleC2uKgEOEtuJthd3XZjF2NoE7IBqjQOINybcJOjyeB5vRLDY1FLuxYzdg1y1etkV4XQig/vje install: # Info for debugging. - echo %PATH% - go version - go env - go get -v -d -t ./... # Provide a build script, or AppVeyor will call msbuild. build_script: - go install -v ./... - echo %KEYFILE_CONTENTS% > %GCLOUD_TESTS_GOLANG_KEY% test_script: - go test -v ./... golang-google-cloud-0.9.0/authexample_test.go000066400000000000000000000034621312234511600212530ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package cloud_test import ( "cloud.google.com/go/datastore" "golang.org/x/net/context" "google.golang.org/api/option" ) func Example_applicationDefaultCredentials() { // Google Application Default Credentials is the recommended way to authorize // and authenticate clients. // // See the following link on how to create and obtain Application Default Credentials: // https://developers.google.com/identity/protocols/application-default-credentials. client, err := datastore.NewClient(context.Background(), "project-id") if err != nil { // TODO: handle error. } _ = client // Use the client. } func Example_serviceAccountFile() { // Use a JSON key file associated with a Google service account to // authenticate and authorize. Service Account keys can be created and // downloaded from https://console.developers.google.com/permissions/serviceaccounts. // // Note: This example uses the datastore client, but the same steps apply to // the other client libraries underneath this package. client, err := datastore.NewClient(context.Background(), "project-id", option.WithServiceAccountFile("/path/to/service-account-key.json")) if err != nil { // TODO: handle error. } _ = client // Use the client. } golang-google-cloud-0.9.0/bigquery/000077500000000000000000000000001312234511600171725ustar00rootroot00000000000000golang-google-cloud-0.9.0/bigquery/bigquery.go000066400000000000000000000046641312234511600213620ustar00rootroot00000000000000// Copyright 2015 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package bigquery // TODO(mcgreevy): support dry-run mode when creating jobs. import ( "fmt" "google.golang.org/api/option" "google.golang.org/api/transport" "golang.org/x/net/context" bq "google.golang.org/api/bigquery/v2" ) const prodAddr = "https://www.googleapis.com/bigquery/v2/" // ExternalData is a table which is stored outside of BigQuery. It is implemented by GCSReference. type ExternalData interface { externalDataConfig() bq.ExternalDataConfiguration } const Scope = "https://www.googleapis.com/auth/bigquery" const userAgent = "gcloud-golang-bigquery/20160429" // Client may be used to perform BigQuery operations. type Client struct { service service projectID string } // NewClient constructs a new Client which can perform BigQuery operations. // Operations performed via the client are billed to the specified GCP project. func NewClient(ctx context.Context, projectID string, opts ...option.ClientOption) (*Client, error) { o := []option.ClientOption{ option.WithEndpoint(prodAddr), option.WithScopes(Scope), option.WithUserAgent(userAgent), } o = append(o, opts...) httpClient, endpoint, err := transport.NewHTTPClient(ctx, o...) if err != nil { return nil, fmt.Errorf("dialing: %v", err) } s, err := newBigqueryService(httpClient, endpoint) if err != nil { return nil, fmt.Errorf("constructing bigquery client: %v", err) } c := &Client{ service: s, projectID: projectID, } return c, nil } // Close closes any resources held by the client. // Close should be called when the client is no longer needed. // It need not be called at program exit. func (c *Client) Close() error { return nil } func (c *Client) insertJob(ctx context.Context, conf *insertJobConf) (*Job, error) { job, err := c.service.insertJob(ctx, c.projectID, conf) if err != nil { return nil, err } job.c = c return job, nil } golang-google-cloud-0.9.0/bigquery/copy.go000066400000000000000000000045351312234511600205020ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package bigquery import ( "golang.org/x/net/context" bq "google.golang.org/api/bigquery/v2" ) // CopyConfig holds the configuration for a copy job. type CopyConfig struct { // JobID is the ID to use for the copy job. If unset, a job ID will be automatically created. JobID string // Srcs are the tables from which data will be copied. Srcs []*Table // Dst is the table into which the data will be copied. Dst *Table // CreateDisposition specifies the circumstances under which the destination table will be created. // The default is CreateIfNeeded. CreateDisposition TableCreateDisposition // WriteDisposition specifies how existing data in the destination table is treated. // The default is WriteAppend. WriteDisposition TableWriteDisposition } // A Copier copies data into a BigQuery table from one or more BigQuery tables. type Copier struct { CopyConfig c *Client } // CopierFrom returns a Copier which can be used to copy data into a // BigQuery table from one or more BigQuery tables. // The returned Copier may optionally be further configured before its Run method is called. func (t *Table) CopierFrom(srcs ...*Table) *Copier { return &Copier{ c: t.c, CopyConfig: CopyConfig{ Srcs: srcs, Dst: t, }, } } // Run initiates a copy job. func (c *Copier) Run(ctx context.Context) (*Job, error) { conf := &bq.JobConfigurationTableCopy{ CreateDisposition: string(c.CreateDisposition), WriteDisposition: string(c.WriteDisposition), DestinationTable: c.Dst.tableRefProto(), } for _, t := range c.Srcs { conf.SourceTables = append(conf.SourceTables, t.tableRefProto()) } job := &bq.Job{Configuration: &bq.JobConfiguration{Copy: conf}} setJobRef(job, c.JobID, c.c.projectID) return c.c.insertJob(ctx, &insertJobConf{job: job}) } golang-google-cloud-0.9.0/bigquery/copy_test.go000066400000000000000000000060541312234511600215370ustar00rootroot00000000000000// Copyright 2015 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package bigquery import ( "reflect" "testing" "golang.org/x/net/context" bq "google.golang.org/api/bigquery/v2" ) func defaultCopyJob() *bq.Job { return &bq.Job{ Configuration: &bq.JobConfiguration{ Copy: &bq.JobConfigurationTableCopy{ DestinationTable: &bq.TableReference{ ProjectId: "d-project-id", DatasetId: "d-dataset-id", TableId: "d-table-id", }, SourceTables: []*bq.TableReference{ { ProjectId: "s-project-id", DatasetId: "s-dataset-id", TableId: "s-table-id", }, }, }, }, } } func TestCopy(t *testing.T) { testCases := []struct { dst *Table srcs []*Table config CopyConfig want *bq.Job }{ { dst: &Table{ ProjectID: "d-project-id", DatasetID: "d-dataset-id", TableID: "d-table-id", }, srcs: []*Table{ { ProjectID: "s-project-id", DatasetID: "s-dataset-id", TableID: "s-table-id", }, }, want: defaultCopyJob(), }, { dst: &Table{ ProjectID: "d-project-id", DatasetID: "d-dataset-id", TableID: "d-table-id", }, srcs: []*Table{ { ProjectID: "s-project-id", DatasetID: "s-dataset-id", TableID: "s-table-id", }, }, config: CopyConfig{ CreateDisposition: CreateNever, WriteDisposition: WriteTruncate, }, want: func() *bq.Job { j := defaultCopyJob() j.Configuration.Copy.CreateDisposition = "CREATE_NEVER" j.Configuration.Copy.WriteDisposition = "WRITE_TRUNCATE" return j }(), }, { dst: &Table{ ProjectID: "d-project-id", DatasetID: "d-dataset-id", TableID: "d-table-id", }, srcs: []*Table{ { ProjectID: "s-project-id", DatasetID: "s-dataset-id", TableID: "s-table-id", }, }, config: CopyConfig{JobID: "job-id"}, want: func() *bq.Job { j := defaultCopyJob() j.JobReference = &bq.JobReference{ JobId: "job-id", ProjectId: "client-project-id", } return j }(), }, } for _, tc := range testCases { s := &testService{} c := &Client{ service: s, projectID: "client-project-id", } tc.dst.c = c copier := tc.dst.CopierFrom(tc.srcs...) tc.config.Srcs = tc.srcs tc.config.Dst = tc.dst copier.CopyConfig = tc.config if _, err := copier.Run(context.Background()); err != nil { t.Errorf("err calling Run: %v", err) continue } if !reflect.DeepEqual(s.Job, tc.want) { t.Errorf("copying: got:\n%v\nwant:\n%v", s.Job, tc.want) } } } golang-google-cloud-0.9.0/bigquery/create_table_test.go000066400000000000000000000054601312234511600231770ustar00rootroot00000000000000// Copyright 2015 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package bigquery import ( "reflect" "testing" "time" "golang.org/x/net/context" bq "google.golang.org/api/bigquery/v2" ) type createTableRecorder struct { conf *createTableConf service } func (rec *createTableRecorder) createTable(ctx context.Context, conf *createTableConf) error { rec.conf = conf return nil } func TestCreateTableOptions(t *testing.T) { s := &createTableRecorder{} c := &Client{ projectID: "p", service: s, } ds := c.Dataset("d") table := ds.Table("t") exp := time.Now() q := "query" if err := table.Create(context.Background(), TableExpiration(exp), ViewQuery(q), UseStandardSQL()); err != nil { t.Fatalf("err calling Table.Create: %v", err) } want := createTableConf{ projectID: "p", datasetID: "d", tableID: "t", expiration: exp, viewQuery: q, useStandardSQL: true, } if !reflect.DeepEqual(*s.conf, want) { t.Errorf("createTableConf: got:\n%v\nwant:\n%v", *s.conf, want) } sc := Schema{fieldSchema("desc", "name", "STRING", false, true)} if err := table.Create(context.Background(), TableExpiration(exp), sc); err != nil { t.Fatalf("err calling Table.Create: %v", err) } want = createTableConf{ projectID: "p", datasetID: "d", tableID: "t", expiration: exp, // No need for an elaborate schema, that is tested in schema_test.go. schema: &bq.TableSchema{ Fields: []*bq.TableFieldSchema{ bqTableFieldSchema("desc", "name", "STRING", "REQUIRED"), }, }, } if !reflect.DeepEqual(*s.conf, want) { t.Errorf("createTableConf: got:\n%v\nwant:\n%v", *s.conf, want) } partitionCases := []struct { timePartitioning TimePartitioning expectedExpiration time.Duration }{ {TimePartitioning{}, time.Duration(0)}, {TimePartitioning{time.Second}, time.Second}, } for _, c := range partitionCases { if err := table.Create(context.Background(), c.timePartitioning); err != nil { t.Fatalf("err calling Table.Create: %v", err) } want = createTableConf{ projectID: "p", datasetID: "d", tableID: "t", timePartitioning: &TimePartitioning{c.expectedExpiration}, } if !reflect.DeepEqual(*s.conf, want) { t.Errorf("createTableConf: got:\n%v\nwant:\n%v", *s.conf, want) } } } golang-google-cloud-0.9.0/bigquery/dataset.go000066400000000000000000000135701312234511600211540ustar00rootroot00000000000000// Copyright 2015 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package bigquery import ( "time" "golang.org/x/net/context" "google.golang.org/api/iterator" ) // Dataset is a reference to a BigQuery dataset. type Dataset struct { ProjectID string DatasetID string c *Client } type DatasetMetadata struct { CreationTime time.Time LastModifiedTime time.Time // When the dataset or any of its tables were modified. DefaultTableExpiration time.Duration Description string // The user-friendly description of this table. Name string // The user-friendly name for this table. ID string Location string // The geo location of the dataset. Labels map[string]string // User-provided labels. // TODO(jba): access rules } // Dataset creates a handle to a BigQuery dataset in the client's project. func (c *Client) Dataset(id string) *Dataset { return c.DatasetInProject(c.projectID, id) } // DatasetInProject creates a handle to a BigQuery dataset in the specified project. func (c *Client) DatasetInProject(projectID, datasetID string) *Dataset { return &Dataset{ ProjectID: projectID, DatasetID: datasetID, c: c, } } // Create creates a dataset in the BigQuery service. An error will be returned // if the dataset already exists. func (d *Dataset) Create(ctx context.Context) error { return d.c.service.insertDataset(ctx, d.DatasetID, d.ProjectID) } // Delete deletes the dataset. func (d *Dataset) Delete(ctx context.Context) error { return d.c.service.deleteDataset(ctx, d.DatasetID, d.ProjectID) } // Metadata fetches the metadata for the dataset. func (d *Dataset) Metadata(ctx context.Context) (*DatasetMetadata, error) { return d.c.service.getDatasetMetadata(ctx, d.ProjectID, d.DatasetID) } // Table creates a handle to a BigQuery table in the dataset. // To determine if a table exists, call Table.Metadata. // If the table does not already exist, use Table.Create to create it. func (d *Dataset) Table(tableID string) *Table { return &Table{ProjectID: d.ProjectID, DatasetID: d.DatasetID, TableID: tableID, c: d.c} } // Tables returns an iterator over the tables in the Dataset. func (d *Dataset) Tables(ctx context.Context) *TableIterator { it := &TableIterator{ ctx: ctx, dataset: d, } it.pageInfo, it.nextFunc = iterator.NewPageInfo( it.fetch, func() int { return len(it.tables) }, func() interface{} { b := it.tables; it.tables = nil; return b }) return it } // A TableIterator is an iterator over Tables. type TableIterator struct { ctx context.Context dataset *Dataset tables []*Table pageInfo *iterator.PageInfo nextFunc func() error } // Next returns the next result. Its second return value is Done if there are // no more results. Once Next returns Done, all subsequent calls will return // Done. func (it *TableIterator) Next() (*Table, error) { if err := it.nextFunc(); err != nil { return nil, err } t := it.tables[0] it.tables = it.tables[1:] return t, nil } // PageInfo supports pagination. See the google.golang.org/api/iterator package for details. func (it *TableIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } func (it *TableIterator) fetch(pageSize int, pageToken string) (string, error) { tables, tok, err := it.dataset.c.service.listTables(it.ctx, it.dataset.ProjectID, it.dataset.DatasetID, pageSize, pageToken) if err != nil { return "", err } for _, t := range tables { t.c = it.dataset.c it.tables = append(it.tables, t) } return tok, nil } // Datasets returns an iterator over the datasets in the Client's project. func (c *Client) Datasets(ctx context.Context) *DatasetIterator { return c.DatasetsInProject(ctx, c.projectID) } // DatasetsInProject returns an iterator over the datasets in the provided project. func (c *Client) DatasetsInProject(ctx context.Context, projectID string) *DatasetIterator { it := &DatasetIterator{ ctx: ctx, c: c, projectID: projectID, } it.pageInfo, it.nextFunc = iterator.NewPageInfo( it.fetch, func() int { return len(it.items) }, func() interface{} { b := it.items; it.items = nil; return b }) return it } // DatasetIterator iterates over the datasets in a project. type DatasetIterator struct { // ListHidden causes hidden datasets to be listed when set to true. ListHidden bool // Filter restricts the datasets returned by label. The filter syntax is described in // https://cloud.google.com/bigquery/docs/labeling-datasets#filtering_datasets_using_labels Filter string ctx context.Context projectID string c *Client pageInfo *iterator.PageInfo nextFunc func() error items []*Dataset } // PageInfo supports pagination. See the google.golang.org/api/iterator package for details. func (it *DatasetIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } func (it *DatasetIterator) Next() (*Dataset, error) { if err := it.nextFunc(); err != nil { return nil, err } item := it.items[0] it.items = it.items[1:] return item, nil } func (it *DatasetIterator) fetch(pageSize int, pageToken string) (string, error) { datasets, nextPageToken, err := it.c.service.listDatasets(it.ctx, it.projectID, pageSize, pageToken, it.ListHidden, it.Filter) if err != nil { return "", err } for _, d := range datasets { d.c = it.c it.items = append(it.items, d) } return nextPageToken, nil } golang-google-cloud-0.9.0/bigquery/dataset_test.go000066400000000000000000000104761312234511600222150ustar00rootroot00000000000000// Copyright 2015 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package bigquery import ( "errors" "strconv" "testing" "golang.org/x/net/context" itest "google.golang.org/api/iterator/testing" ) // readServiceStub services read requests by returning data from an in-memory list of values. type listTablesServiceStub struct { expectedProject, expectedDataset string tables []*Table service } func (s *listTablesServiceStub) listTables(ctx context.Context, projectID, datasetID string, pageSize int, pageToken string) ([]*Table, string, error) { if projectID != s.expectedProject { return nil, "", errors.New("wrong project id") } if datasetID != s.expectedDataset { return nil, "", errors.New("wrong dataset id") } const maxPageSize = 2 if pageSize <= 0 || pageSize > maxPageSize { pageSize = maxPageSize } start := 0 if pageToken != "" { var err error start, err = strconv.Atoi(pageToken) if err != nil { return nil, "", err } } end := start + pageSize if end > len(s.tables) { end = len(s.tables) } nextPageToken := "" if end < len(s.tables) { nextPageToken = strconv.Itoa(end) } return s.tables[start:end], nextPageToken, nil } func TestTables(t *testing.T) { t1 := &Table{ProjectID: "p1", DatasetID: "d1", TableID: "t1"} t2 := &Table{ProjectID: "p1", DatasetID: "d1", TableID: "t2"} t3 := &Table{ProjectID: "p1", DatasetID: "d1", TableID: "t3"} allTables := []*Table{t1, t2, t3} c := &Client{ service: &listTablesServiceStub{ expectedProject: "x", expectedDataset: "y", tables: allTables, }, projectID: "x", } msg, ok := itest.TestIterator(allTables, func() interface{} { return c.Dataset("y").Tables(context.Background()) }, func(it interface{}) (interface{}, error) { return it.(*TableIterator).Next() }) if !ok { t.Error(msg) } } type listDatasetsFake struct { service projectID string datasets []*Dataset hidden map[*Dataset]bool } func (df *listDatasetsFake) listDatasets(_ context.Context, projectID string, pageSize int, pageToken string, listHidden bool, filter string) ([]*Dataset, string, error) { const maxPageSize = 2 if pageSize <= 0 || pageSize > maxPageSize { pageSize = maxPageSize } if filter != "" { return nil, "", errors.New("filter not supported") } if projectID != df.projectID { return nil, "", errors.New("bad project ID") } start := 0 if pageToken != "" { var err error start, err = strconv.Atoi(pageToken) if err != nil { return nil, "", err } } var ( i int result []*Dataset nextPageToken string ) for i = start; len(result) < pageSize && i < len(df.datasets); i++ { if df.hidden[df.datasets[i]] && !listHidden { continue } result = append(result, df.datasets[i]) } if i < len(df.datasets) { nextPageToken = strconv.Itoa(i) } return result, nextPageToken, nil } func TestDatasets(t *testing.T) { service := &listDatasetsFake{projectID: "p"} client := &Client{service: service} datasets := []*Dataset{ {"p", "a", client}, {"p", "b", client}, {"p", "hidden", client}, {"p", "c", client}, } service.datasets = datasets service.hidden = map[*Dataset]bool{datasets[2]: true} c := &Client{ projectID: "p", service: service, } msg, ok := itest.TestIterator(datasets, func() interface{} { it := c.Datasets(context.Background()); it.ListHidden = true; return it }, func(it interface{}) (interface{}, error) { return it.(*DatasetIterator).Next() }) if !ok { t.Fatalf("ListHidden=true: %s", msg) } msg, ok = itest.TestIterator([]*Dataset{datasets[0], datasets[1], datasets[3]}, func() interface{} { it := c.Datasets(context.Background()); it.ListHidden = false; return it }, func(it interface{}) (interface{}, error) { return it.(*DatasetIterator).Next() }) if !ok { t.Fatalf("ListHidden=false: %s", msg) } } golang-google-cloud-0.9.0/bigquery/doc.go000066400000000000000000000213711312234511600202720ustar00rootroot00000000000000// Copyright 2015 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. /* Package bigquery provides a client for the BigQuery service. Note: This package is in beta. Some backwards-incompatible changes may occur. The following assumes a basic familiarity with BigQuery concepts. See https://cloud.google.com/bigquery/docs. Creating a Client To start working with this package, create a client: ctx := context.Background() client, err := bigquery.NewClient(ctx, projectID) if err != nil { // TODO: Handle error. } Querying To query existing tables, create a Query and call its Read method: q := client.Query(` SELECT year, SUM(number) as num FROM [bigquery-public-data:usa_names.usa_1910_2013] WHERE name = "William" GROUP BY year ORDER BY year `) it, err := q.Read(ctx) if err != nil { // TODO: Handle error. } Then iterate through the resulting rows. You can store a row using anything that implements the ValueLoader interface, or with a slice or map of bigquery.Value. A slice is simplest: for { var values []bigquery.Value err := it.Next(&values) if err == iterator.Done { break } if err != nil { // TODO: Handle error. } fmt.Println(values) } You can also use a struct whose exported fields match the query: type Count struct { Year int Num int } for { var c Count err := it.Next(&c) if err == iterator.Done { break } if err != nil { // TODO: Handle error. } fmt.Println(c) } You can also start the query running and get the results later. Create the query as above, but call Run instead of Read. This returns a Job, which represents an asychronous operation. job, err := q.Run(ctx) if err != nil { // TODO: Handle error. } Get the job's ID, a printable string. You can save this string to retrieve the results at a later time, even in another process. jobID := job.ID() fmt.Printf("The job ID is %s\n", jobID) To retrieve the job's results from the ID, first look up the Job: job, err = client.JobFromID(ctx, jobID) if err != nil { // TODO: Handle error. } Use the Job.Read method to obtain an iterator, and loop over the rows. Query.Read is just a convenience method that combines Query.Run and Job.Read. it, err = job.Read(ctx) if err != nil { // TODO: Handle error. } // Proceed with iteration as above. Datasets and Tables You can refer to datasets in the client's project with the Dataset method, and in other projects with the DatasetInProject method: myDataset := client.Dataset("my_dataset") yourDataset := client.DatasetInProject("your-project-id", "your_dataset") These methods create references to datasets, not the datasets themselves. You can have a dataset reference even if the dataset doesn't exist yet. Use Dataset.Create to create a dataset from a reference: if err := myDataset.Create(ctx); err != nil { // TODO: Handle error. } You can refer to tables with Dataset.Table. Like bigquery.Dataset, bigquery.Table is a reference to an object in BigQuery that may or may not exist. table := myDataset.Table("my_table") You can create, delete and update the metadata of tables with methods on Table. Table.Create supports a few options. For instance, you could create a temporary table with: err = myDataset.Table("temp").Create(ctx, bigquery.TableExpiration(time.Now().Add(1*time.Hour))) if err != nil { // TODO: Handle error. } We'll see how to create a table with a schema in the next section. Schemas There are two ways to construct schemas with this package. You can build a schema by hand, like so: schema1 := bigquery.Schema{ &bigquery.FieldSchema{Name: "Name", Required: true, Type: bigquery.StringFieldType}, &bigquery.FieldSchema{Name: "Grades", Repeated: true, Type: bigquery.IntegerFieldType}, } Or you can infer the schema from a struct: type student struct { Name string Grades []int } schema2, err := bigquery.InferSchema(student{}) if err != nil { // TODO: Handle error. } // schema1 and schema2 are identical. Struct inference supports tags like those of the encoding/json package, so you can change names or ignore fields: type student2 struct { Name string `bigquery:"full_name"` Grades []int Secret string `bigquery:"-"` } schema3, err := bigquery.InferSchema(student2{}) if err != nil { // TODO: Handle error. } // schema3 has fields "full_name" and "Grade". Having constructed a schema, you can pass it to Table.Create as an option: if err := table.Create(ctx, schema1); err != nil { // TODO: Handle error. } Copying You can copy one or more tables to another table. Begin by constructing a Copier describing the copy. Then set any desired copy options, and finally call Run to get a Job: copier := myDataset.Table("dest").CopierFrom(myDataset.Table("src")) copier.WriteDisposition = bigquery.WriteTruncate job, err = copier.Run(ctx) if err != nil { // TODO: Handle error. } You can chain the call to Run if you don't want to set options: job, err = myDataset.Table("dest").CopierFrom(myDataset.Table("src")).Run(ctx) if err != nil { // TODO: Handle error. } You can wait for your job to complete: status, err := job.Wait(ctx) if err != nil { // TODO: Handle error. } Job.Wait polls with exponential backoff. You can also poll yourself, if you wish: for { status, err := job.Status(ctx) if err != nil { // TODO: Handle error. } if status.Done() { if status.Err() != nil { log.Fatalf("Job failed with error %v", status.Err()) } break } time.Sleep(pollInterval) } Loading and Uploading There are two ways to populate a table with this package: load the data from a Google Cloud Storage object, or upload rows directly from your program. For loading, first create a GCSReference, configuring it if desired. Then make a Loader, optionally configure it as well, and call its Run method. gcsRef := bigquery.NewGCSReference("gs://my-bucket/my-object") gcsRef.AllowJaggedRows = true loader := myDataset.Table("dest").LoaderFrom(gcsRef) loader.CreateDisposition = bigquery.CreateNever job, err = loader.Run(ctx) // Poll the job for completion if desired, as above. To upload, first define a type that implements the ValueSaver interface, which has a single method named Save. Then create an Uploader, and call its Put method with a slice of values. u := table.Uploader() // Item implements the ValueSaver interface. items := []*Item{ {Name: "n1", Size: 32.6, Count: 7}, {Name: "n2", Size: 4, Count: 2}, {Name: "n3", Size: 101.5, Count: 1}, } if err := u.Put(ctx, items); err != nil { // TODO: Handle error. } You can also upload a struct that doesn't implement ValueSaver. Use the StructSaver type to specify the schema and insert ID by hand, or just supply the struct or struct pointer directly and the schema will be inferred: type Item2 struct { Name string Size float64 Count int } // Item implements the ValueSaver interface. items2 := []*Item2{ {Name: "n1", Size: 32.6, Count: 7}, {Name: "n2", Size: 4, Count: 2}, {Name: "n3", Size: 101.5, Count: 1}, } if err := u.Put(ctx, items2); err != nil { // TODO: Handle error. } Extracting If you've been following so far, extracting data from a BigQuery table into a Google Cloud Storage object will feel familiar. First create an Extractor, then optionally configure it, and lastly call its Run method. extractor := table.ExtractorTo(gcsRef) extractor.DisableHeader = true job, err = extractor.Run(ctx) // Poll the job for completion if desired, as above. Authentication See examples of authorization and authentication at https://godoc.org/cloud.google.com/go#pkg-examples. */ package bigquery // import "cloud.google.com/go/bigquery" golang-google-cloud-0.9.0/bigquery/error.go000066400000000000000000000044271312234511600206610ustar00rootroot00000000000000// Copyright 2015 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package bigquery import ( "fmt" bq "google.golang.org/api/bigquery/v2" ) // An Error contains detailed information about a failed bigquery operation. type Error struct { // Mirrors bq.ErrorProto, but drops DebugInfo Location, Message, Reason string } func (e Error) Error() string { return fmt.Sprintf("{Location: %q; Message: %q; Reason: %q}", e.Location, e.Message, e.Reason) } func errorFromErrorProto(ep *bq.ErrorProto) *Error { if ep == nil { return nil } return &Error{ Location: ep.Location, Message: ep.Message, Reason: ep.Reason, } } // A MultiError contains multiple related errors. type MultiError []error func (m MultiError) Error() string { switch len(m) { case 0: return "(0 errors)" case 1: return m[0].Error() case 2: return m[0].Error() + " (and 1 other error)" } return fmt.Sprintf("%s (and %d other errors)", m[0].Error(), len(m)-1) } // RowInsertionError contains all errors that occurred when attempting to insert a row. type RowInsertionError struct { InsertID string // The InsertID associated with the affected row. RowIndex int // The 0-based index of the affected row in the batch of rows being inserted. Errors MultiError } func (e *RowInsertionError) Error() string { errFmt := "insertion of row [insertID: %q; insertIndex: %v] failed with error: %s" return fmt.Sprintf(errFmt, e.InsertID, e.RowIndex, e.Errors.Error()) } // PutMultiError contains an error for each row which was not successfully inserted // into a BigQuery table. type PutMultiError []RowInsertionError func (pme PutMultiError) Error() string { plural := "s" if len(pme) == 1 { plural = "" } return fmt.Sprintf("%v row insertion%s failed", len(pme), plural) } golang-google-cloud-0.9.0/bigquery/error_test.go000066400000000000000000000052151312234511600217140ustar00rootroot00000000000000// Copyright 2015 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package bigquery import ( "errors" "reflect" "strings" "testing" bq "google.golang.org/api/bigquery/v2" ) func rowInsertionError(msg string) RowInsertionError { return RowInsertionError{Errors: []error{errors.New(msg)}} } func TestPutMultiErrorString(t *testing.T) { testCases := []struct { errs PutMultiError want string }{ { errs: PutMultiError{}, want: "0 row insertions failed", }, { errs: PutMultiError{rowInsertionError("a")}, want: "1 row insertion failed", }, { errs: PutMultiError{rowInsertionError("a"), rowInsertionError("b")}, want: "2 row insertions failed", }, } for _, tc := range testCases { if tc.errs.Error() != tc.want { t.Errorf("PutMultiError string: got:\n%v\nwant:\n%v", tc.errs.Error(), tc.want) } } } func TestMultiErrorString(t *testing.T) { testCases := []struct { errs MultiError want string }{ { errs: MultiError{}, want: "(0 errors)", }, { errs: MultiError{errors.New("a")}, want: "a", }, { errs: MultiError{errors.New("a"), errors.New("b")}, want: "a (and 1 other error)", }, { errs: MultiError{errors.New("a"), errors.New("b"), errors.New("c")}, want: "a (and 2 other errors)", }, } for _, tc := range testCases { if tc.errs.Error() != tc.want { t.Errorf("PutMultiError string: got:\n%v\nwant:\n%v", tc.errs.Error(), tc.want) } } } func TestErrorFromErrorProto(t *testing.T) { for _, test := range []struct { in *bq.ErrorProto want *Error }{ {nil, nil}, { in: &bq.ErrorProto{Location: "L", Message: "M", Reason: "R"}, want: &Error{Location: "L", Message: "M", Reason: "R"}, }, } { if got := errorFromErrorProto(test.in); !reflect.DeepEqual(got, test.want) { t.Errorf("%v: got %v, want %v", test.in, got, test.want) } } } func TestErrorString(t *testing.T) { e := &Error{Location: "", Message: "", Reason: ""} got := e.Error() if !strings.Contains(got, "") || !strings.Contains(got, "") || !strings.Contains(got, "") { t.Errorf(`got %q, expected to see "", "" and ""`, got) } } golang-google-cloud-0.9.0/bigquery/examples_test.go000066400000000000000000000356651312234511600224150ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package bigquery_test import ( "fmt" "os" "cloud.google.com/go/bigquery" "golang.org/x/net/context" "google.golang.org/api/iterator" ) func ExampleNewClient() { ctx := context.Background() client, err := bigquery.NewClient(ctx, "project-id") if err != nil { // TODO: Handle error. } _ = client // TODO: Use client. } func ExampleClient_Dataset() { ctx := context.Background() client, err := bigquery.NewClient(ctx, "project-id") if err != nil { // TODO: Handle error. } ds := client.Dataset("my_dataset") fmt.Println(ds) } func ExampleClient_DatasetInProject() { ctx := context.Background() client, err := bigquery.NewClient(ctx, "project-id") if err != nil { // TODO: Handle error. } ds := client.DatasetInProject("their-project-id", "their-dataset") fmt.Println(ds) } func ExampleClient_Datasets() { ctx := context.Background() client, err := bigquery.NewClient(ctx, "project-id") if err != nil { // TODO: Handle error. } it := client.Datasets(ctx) _ = it // TODO: iterate using Next or iterator.Pager. } func ExampleClient_DatasetsInProject() { ctx := context.Background() client, err := bigquery.NewClient(ctx, "project-id") if err != nil { // TODO: Handle error. } it := client.DatasetsInProject(ctx, "their-project-id") _ = it // TODO: iterate using Next or iterator.Pager. } func getJobID() string { return "" } func ExampleClient_JobFromID() { ctx := context.Background() client, err := bigquery.NewClient(ctx, "project-id") if err != nil { // TODO: Handle error. } jobID := getJobID() // Get a job ID using Job.ID, the console or elsewhere. job, err := client.JobFromID(ctx, jobID) if err != nil { // TODO: Handle error. } fmt.Println(job) } func ExampleNewGCSReference() { gcsRef := bigquery.NewGCSReference("gs://my-bucket/my-object") fmt.Println(gcsRef) } func ExampleClient_Query() { ctx := context.Background() client, err := bigquery.NewClient(ctx, "project-id") if err != nil { // TODO: Handle error. } q := client.Query("select name, num from t1") q.DefaultProjectID = "project-id" // TODO: set other options on the Query. // TODO: Call Query.Run or Query.Read. } func ExampleClient_Query_parameters() { ctx := context.Background() client, err := bigquery.NewClient(ctx, "project-id") if err != nil { // TODO: Handle error. } q := client.Query("select num from t1 where name = @user") q.Parameters = []bigquery.QueryParameter{ {Name: "user", Value: "Elizabeth"}, } // TODO: set other options on the Query. // TODO: Call Query.Run or Query.Read. } func ExampleQuery_Read() { ctx := context.Background() client, err := bigquery.NewClient(ctx, "project-id") if err != nil { // TODO: Handle error. } q := client.Query("select name, num from t1") it, err := q.Read(ctx) if err != nil { // TODO: Handle error. } _ = it // TODO: iterate using Next or iterator.Pager. } func ExampleRowIterator_Next() { ctx := context.Background() client, err := bigquery.NewClient(ctx, "project-id") if err != nil { // TODO: Handle error. } q := client.Query("select name, num from t1") it, err := q.Read(ctx) if err != nil { // TODO: Handle error. } for { var row []bigquery.Value err := it.Next(&row) if err == iterator.Done { break } if err != nil { // TODO: Handle error. } fmt.Println(row) } } func ExampleRowIterator_Next_struct() { ctx := context.Background() client, err := bigquery.NewClient(ctx, "project-id") if err != nil { // TODO: Handle error. } type score struct { Name string Num int } q := client.Query("select name, num from t1") it, err := q.Read(ctx) if err != nil { // TODO: Handle error. } for { var s score err := it.Next(&s) if err == iterator.Done { break } if err != nil { // TODO: Handle error. } fmt.Println(s) } } func ExampleJob_Read() { ctx := context.Background() client, err := bigquery.NewClient(ctx, "project-id") if err != nil { // TODO: Handle error. } q := client.Query("select name, num from t1") // Call Query.Run to get a Job, then call Read on the job. // Note: Query.Read is a shorthand for this. job, err := q.Run(ctx) if err != nil { // TODO: Handle error. } it, err := job.Read(ctx) if err != nil { // TODO: Handle error. } _ = it // TODO: iterate using Next or iterator.Pager. } func ExampleJob_Wait() { ctx := context.Background() client, err := bigquery.NewClient(ctx, "project-id") if err != nil { // TODO: Handle error. } ds := client.Dataset("my_dataset") job, err := ds.Table("t1").CopierFrom(ds.Table("t2")).Run(ctx) if err != nil { // TODO: Handle error. } status, err := job.Wait(ctx) if err != nil { // TODO: Handle error. } if status.Err() != nil { // TODO: Handle error. } } func ExampleDataset_Create() { ctx := context.Background() client, err := bigquery.NewClient(ctx, "project-id") if err != nil { // TODO: Handle error. } if err := client.Dataset("my_dataset").Create(ctx); err != nil { // TODO: Handle error. } } func ExampleDataset_Delete() { ctx := context.Background() client, err := bigquery.NewClient(ctx, "project-id") if err != nil { // TODO: Handle error. } if err := client.Dataset("my_dataset").Delete(ctx); err != nil { // TODO: Handle error. } } func ExampleDataset_Metadata() { ctx := context.Background() client, err := bigquery.NewClient(ctx, "project-id") if err != nil { // TODO: Handle error. } md, err := client.Dataset("my_dataset").Metadata(ctx) if err != nil { // TODO: Handle error. } fmt.Println(md) } func ExampleDataset_Table() { ctx := context.Background() client, err := bigquery.NewClient(ctx, "project-id") if err != nil { // TODO: Handle error. } // Table creates a reference to the table. It does not create the actual // table in BigQuery; to do so, use Table.Create. t := client.Dataset("my_dataset").Table("my_table") fmt.Println(t) } func ExampleDataset_Tables() { ctx := context.Background() client, err := bigquery.NewClient(ctx, "project-id") if err != nil { // TODO: Handle error. } it := client.Dataset("my_dataset").Tables(ctx) _ = it // TODO: iterate using Next or iterator.Pager. } func ExampleDatasetIterator_Next() { ctx := context.Background() client, err := bigquery.NewClient(ctx, "project-id") if err != nil { // TODO: Handle error. } it := client.Datasets(ctx) for { ds, err := it.Next() if err == iterator.Done { break } if err != nil { // TODO: Handle error. } fmt.Println(ds) } } func ExampleInferSchema() { type Item struct { Name string Size float64 Count int } schema, err := bigquery.InferSchema(Item{}) if err != nil { fmt.Println(err) // TODO: Handle error. } for _, fs := range schema { fmt.Println(fs.Name, fs.Type) } // Output: // Name STRING // Size FLOAT // Count INTEGER } func ExampleInferSchema_tags() { type Item struct { Name string Size float64 Count int `bigquery:"number"` Secret []byte `bigquery:"-"` } schema, err := bigquery.InferSchema(Item{}) if err != nil { fmt.Println(err) // TODO: Handle error. } for _, fs := range schema { fmt.Println(fs.Name, fs.Type) } // Output: // Name STRING // Size FLOAT // number INTEGER } func ExampleTable_Create() { ctx := context.Background() client, err := bigquery.NewClient(ctx, "project-id") if err != nil { // TODO: Handle error. } t := client.Dataset("my_dataset").Table("new-table") if err := t.Create(ctx); err != nil { // TODO: Handle error. } } func ExampleTable_Create_schema() { ctx := context.Background() // Infer table schema from a Go type. schema, err := bigquery.InferSchema(Item{}) if err != nil { // TODO: Handle error. } client, err := bigquery.NewClient(ctx, "project-id") if err != nil { // TODO: Handle error. } t := client.Dataset("my_dataset").Table("new-table") if err := t.Create(ctx, schema); err != nil { // TODO: Handle error. } } func ExampleTable_Delete() { ctx := context.Background() client, err := bigquery.NewClient(ctx, "project-id") if err != nil { // TODO: Handle error. } if err := client.Dataset("my_dataset").Table("my_table").Delete(ctx); err != nil { // TODO: Handle error. } } func ExampleTable_Metadata() { ctx := context.Background() client, err := bigquery.NewClient(ctx, "project-id") if err != nil { // TODO: Handle error. } md, err := client.Dataset("my_dataset").Table("my_table").Metadata(ctx) if err != nil { // TODO: Handle error. } fmt.Println(md) } func ExampleTable_Uploader() { ctx := context.Background() client, err := bigquery.NewClient(ctx, "project-id") if err != nil { // TODO: Handle error. } u := client.Dataset("my_dataset").Table("my_table").Uploader() _ = u // TODO: Use u. } func ExampleTable_Uploader_options() { ctx := context.Background() client, err := bigquery.NewClient(ctx, "project-id") if err != nil { // TODO: Handle error. } u := client.Dataset("my_dataset").Table("my_table").Uploader() u.SkipInvalidRows = true u.IgnoreUnknownValues = true _ = u // TODO: Use u. } func ExampleTable_CopierFrom() { ctx := context.Background() client, err := bigquery.NewClient(ctx, "project-id") if err != nil { // TODO: Handle error. } ds := client.Dataset("my_dataset") c := ds.Table("combined").CopierFrom(ds.Table("t1"), ds.Table("t2")) c.WriteDisposition = bigquery.WriteTruncate // TODO: set other options on the Copier. job, err := c.Run(ctx) if err != nil { // TODO: Handle error. } status, err := job.Wait(ctx) if err != nil { // TODO: Handle error. } if status.Err() != nil { // TODO: Handle error. } } func ExampleTable_ExtractorTo() { ctx := context.Background() client, err := bigquery.NewClient(ctx, "project-id") if err != nil { // TODO: Handle error. } gcsRef := bigquery.NewGCSReference("gs://my-bucket/my-object") gcsRef.FieldDelimiter = ":" // TODO: set other options on the GCSReference. ds := client.Dataset("my_dataset") extractor := ds.Table("my_table").ExtractorTo(gcsRef) extractor.DisableHeader = true // TODO: set other options on the Extractor. job, err := extractor.Run(ctx) if err != nil { // TODO: Handle error. } status, err := job.Wait(ctx) if err != nil { // TODO: Handle error. } if status.Err() != nil { // TODO: Handle error. } } func ExampleTable_LoaderFrom() { ctx := context.Background() client, err := bigquery.NewClient(ctx, "project-id") if err != nil { // TODO: Handle error. } gcsRef := bigquery.NewGCSReference("gs://my-bucket/my-object") gcsRef.AllowJaggedRows = true // TODO: set other options on the GCSReference. ds := client.Dataset("my_dataset") loader := ds.Table("my_table").LoaderFrom(gcsRef) loader.CreateDisposition = bigquery.CreateNever // TODO: set other options on the Loader. job, err := loader.Run(ctx) if err != nil { // TODO: Handle error. } status, err := job.Wait(ctx) if err != nil { // TODO: Handle error. } if status.Err() != nil { // TODO: Handle error. } } func ExampleTable_LoaderFrom_reader() { ctx := context.Background() client, err := bigquery.NewClient(ctx, "project-id") if err != nil { // TODO: Handle error. } f, err := os.Open("data.csv") if err != nil { // TODO: Handle error. } rs := bigquery.NewReaderSource(f) rs.AllowJaggedRows = true // TODO: set other options on the GCSReference. ds := client.Dataset("my_dataset") loader := ds.Table("my_table").LoaderFrom(rs) loader.CreateDisposition = bigquery.CreateNever // TODO: set other options on the Loader. job, err := loader.Run(ctx) if err != nil { // TODO: Handle error. } status, err := job.Wait(ctx) if err != nil { // TODO: Handle error. } if status.Err() != nil { // TODO: Handle error. } } func ExampleTable_Read() { ctx := context.Background() client, err := bigquery.NewClient(ctx, "project-id") if err != nil { // TODO: Handle error. } it := client.Dataset("my_dataset").Table("my_table").Read(ctx) _ = it // TODO: iterate using Next or iterator.Pager. } func ExampleTable_Update() { ctx := context.Background() client, err := bigquery.NewClient(ctx, "project-id") if err != nil { // TODO: Handle error. } t := client.Dataset("my_dataset").Table("my_table") tm, err := t.Update(ctx, bigquery.TableMetadataToUpdate{ Description: "my favorite table", }) if err != nil { // TODO: Handle error. } fmt.Println(tm) } func ExampleTableIterator_Next() { ctx := context.Background() client, err := bigquery.NewClient(ctx, "project-id") if err != nil { // TODO: Handle error. } it := client.Dataset("my_dataset").Tables(ctx) for { t, err := it.Next() if err == iterator.Done { break } if err != nil { // TODO: Handle error. } fmt.Println(t) } } type Item struct { Name string Size float64 Count int } // Save implements the ValueSaver interface. func (i *Item) Save() (map[string]bigquery.Value, string, error) { return map[string]bigquery.Value{ "Name": i.Name, "Size": i.Size, "Count": i.Count, }, "", nil } func ExampleUploader_Put() { ctx := context.Background() client, err := bigquery.NewClient(ctx, "project-id") if err != nil { // TODO: Handle error. } u := client.Dataset("my_dataset").Table("my_table").Uploader() // Item implements the ValueSaver interface. items := []*Item{ {Name: "n1", Size: 32.6, Count: 7}, {Name: "n2", Size: 4, Count: 2}, {Name: "n3", Size: 101.5, Count: 1}, } if err := u.Put(ctx, items); err != nil { // TODO: Handle error. } } var schema bigquery.Schema func ExampleUploader_Put_structSaver() { ctx := context.Background() client, err := bigquery.NewClient(ctx, "project-id") if err != nil { // TODO: Handle error. } u := client.Dataset("my_dataset").Table("my_table").Uploader() type score struct { Name string Num int } // Assume schema holds the table's schema. savers := []*bigquery.StructSaver{ {Struct: score{Name: "n1", Num: 12}, Schema: schema, InsertID: "id1"}, {Struct: score{Name: "n2", Num: 31}, Schema: schema, InsertID: "id2"}, {Struct: score{Name: "n3", Num: 7}, Schema: schema, InsertID: "id3"}, } if err := u.Put(ctx, savers); err != nil { // TODO: Handle error. } } func ExampleUploader_Put_struct() { ctx := context.Background() client, err := bigquery.NewClient(ctx, "project-id") if err != nil { // TODO: Handle error. } u := client.Dataset("my_dataset").Table("my_table").Uploader() type score struct { Name string Num int } scores := []score{ {Name: "n1", Num: 12}, {Name: "n2", Num: 31}, {Name: "n3", Num: 7}, } // Schema is inferred from the score type. if err := u.Put(ctx, scores); err != nil { // TODO: Handle error. } } golang-google-cloud-0.9.0/bigquery/extract.go000066400000000000000000000043511312234511600211760ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package bigquery import ( "golang.org/x/net/context" bq "google.golang.org/api/bigquery/v2" ) // ExtractConfig holds the configuration for an extract job. type ExtractConfig struct { // JobID is the ID to use for the extract job. If empty, a job ID will be automatically created. JobID string // Src is the table from which data will be extracted. Src *Table // Dst is the destination into which the data will be extracted. Dst *GCSReference // DisableHeader disables the printing of a header row in exported data. DisableHeader bool } // An Extractor extracts data from a BigQuery table into Google Cloud Storage. type Extractor struct { ExtractConfig c *Client } // ExtractorTo returns an Extractor which can be used to extract data from a // BigQuery table into Google Cloud Storage. // The returned Extractor may optionally be further configured before its Run method is called. func (t *Table) ExtractorTo(dst *GCSReference) *Extractor { return &Extractor{ c: t.c, ExtractConfig: ExtractConfig{ Src: t, Dst: dst, }, } } // Run initiates an extract job. func (e *Extractor) Run(ctx context.Context) (*Job, error) { conf := &bq.JobConfigurationExtract{} job := &bq.Job{Configuration: &bq.JobConfiguration{Extract: conf}} setJobRef(job, e.JobID, e.c.projectID) conf.DestinationUris = append([]string{}, e.Dst.uris...) conf.Compression = string(e.Dst.Compression) conf.DestinationFormat = string(e.Dst.DestinationFormat) conf.FieldDelimiter = e.Dst.FieldDelimiter conf.SourceTable = e.Src.tableRefProto() if e.DisableHeader { f := false conf.PrintHeader = &f } return e.c.insertJob(ctx, &insertJobConf{job: job}) } golang-google-cloud-0.9.0/bigquery/extract_test.go000066400000000000000000000047361312234511600222440ustar00rootroot00000000000000// Copyright 2015 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package bigquery import ( "reflect" "testing" "golang.org/x/net/context" bq "google.golang.org/api/bigquery/v2" ) func defaultExtractJob() *bq.Job { return &bq.Job{ Configuration: &bq.JobConfiguration{ Extract: &bq.JobConfigurationExtract{ SourceTable: &bq.TableReference{ ProjectId: "project-id", DatasetId: "dataset-id", TableId: "table-id", }, DestinationUris: []string{"uri"}, }, }, } } func TestExtract(t *testing.T) { s := &testService{} c := &Client{ service: s, projectID: "project-id", } testCases := []struct { dst *GCSReference src *Table config ExtractConfig want *bq.Job }{ { dst: defaultGCS(), src: c.Dataset("dataset-id").Table("table-id"), want: defaultExtractJob(), }, { dst: defaultGCS(), src: c.Dataset("dataset-id").Table("table-id"), config: ExtractConfig{DisableHeader: true}, want: func() *bq.Job { j := defaultExtractJob() f := false j.Configuration.Extract.PrintHeader = &f return j }(), }, { dst: func() *GCSReference { g := NewGCSReference("uri") g.Compression = Gzip g.DestinationFormat = JSON g.FieldDelimiter = "\t" return g }(), src: c.Dataset("dataset-id").Table("table-id"), want: func() *bq.Job { j := defaultExtractJob() j.Configuration.Extract.Compression = "GZIP" j.Configuration.Extract.DestinationFormat = "NEWLINE_DELIMITED_JSON" j.Configuration.Extract.FieldDelimiter = "\t" return j }(), }, } for _, tc := range testCases { ext := tc.src.ExtractorTo(tc.dst) tc.config.Src = ext.Src tc.config.Dst = ext.Dst ext.ExtractConfig = tc.config if _, err := ext.Run(context.Background()); err != nil { t.Errorf("err calling extract: %v", err) continue } if !reflect.DeepEqual(s.Job, tc.want) { t.Errorf("extracting: got:\n%v\nwant:\n%v", s.Job, tc.want) } } } golang-google-cloud-0.9.0/bigquery/file.go000066400000000000000000000132571312234511600204500ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package bigquery import ( "io" bq "google.golang.org/api/bigquery/v2" ) // A ReaderSource is a source for a load operation that gets // data from an io.Reader. type ReaderSource struct { r io.Reader FileConfig } // NewReaderSource creates a ReaderSource from an io.Reader. You may // optionally configure properties on the ReaderSource that describe the // data being read, before passing it to Table.LoaderFrom. func NewReaderSource(r io.Reader) *ReaderSource { return &ReaderSource{r: r} } func (r *ReaderSource) populateInsertJobConfForLoad(conf *insertJobConf) { conf.media = r.r r.FileConfig.populateLoadConfig(conf.job.Configuration.Load) } // FileConfig contains configuration options that pertain to files, typically // text files that require interpretation to be used as a BigQuery table. A // file may live in Google Cloud Storage (see GCSReference), or it may be // loaded into a table via the Table.LoaderFromReader. type FileConfig struct { // SourceFormat is the format of the GCS data to be read. // Allowed values are: CSV, Avro, JSON, DatastoreBackup. The default is CSV. SourceFormat DataFormat // FieldDelimiter is the separator for fields in a CSV file, used when // reading or exporting data. The default is ",". FieldDelimiter string // The number of rows at the top of a CSV file that BigQuery will skip when // reading data. SkipLeadingRows int64 // AllowJaggedRows causes missing trailing optional columns to be tolerated // when reading CSV data. Missing values are treated as nulls. AllowJaggedRows bool // AllowQuotedNewlines sets whether quoted data sections containing // newlines are allowed when reading CSV data. AllowQuotedNewlines bool // Indicates if we should automatically infer the options and // schema for CSV and JSON sources. AutoDetect bool // Encoding is the character encoding of data to be read. Encoding Encoding // MaxBadRecords is the maximum number of bad records that will be ignored // when reading data. MaxBadRecords int64 // IgnoreUnknownValues causes values not matching the schema to be // tolerated. Unknown values are ignored. For CSV this ignores extra values // at the end of a line. For JSON this ignores named values that do not // match any column name. If this field is not set, records containing // unknown values are treated as bad records. The MaxBadRecords field can // be used to customize how bad records are handled. IgnoreUnknownValues bool // Schema describes the data. It is required when reading CSV or JSON data, // unless the data is being loaded into a table that already exists. Schema Schema // Quote is the value used to quote data sections in a CSV file. The // default quotation character is the double quote ("), which is used if // both Quote and ForceZeroQuote are unset. // To specify that no character should be interpreted as a quotation // character, set ForceZeroQuote to true. // Only used when reading data. Quote string ForceZeroQuote bool } // quote returns the CSV quote character, or nil if unset. func (fc *FileConfig) quote() *string { if fc.ForceZeroQuote { quote := "" return "e } if fc.Quote == "" { return nil } return &fc.Quote } func (fc *FileConfig) populateLoadConfig(conf *bq.JobConfigurationLoad) { conf.SkipLeadingRows = fc.SkipLeadingRows conf.SourceFormat = string(fc.SourceFormat) conf.Autodetect = fc.AutoDetect conf.AllowJaggedRows = fc.AllowJaggedRows conf.AllowQuotedNewlines = fc.AllowQuotedNewlines conf.Encoding = string(fc.Encoding) conf.FieldDelimiter = fc.FieldDelimiter conf.IgnoreUnknownValues = fc.IgnoreUnknownValues conf.MaxBadRecords = fc.MaxBadRecords if fc.Schema != nil { conf.Schema = fc.Schema.asTableSchema() } conf.Quote = fc.quote() } func (fc *FileConfig) populateExternalDataConfig(conf *bq.ExternalDataConfiguration) { format := fc.SourceFormat if format == "" { // Format must be explicitly set for external data sources. format = CSV } // TODO(jba): support AutoDetect. conf.IgnoreUnknownValues = fc.IgnoreUnknownValues conf.MaxBadRecords = fc.MaxBadRecords conf.SourceFormat = string(format) if fc.Schema != nil { conf.Schema = fc.Schema.asTableSchema() } if format == CSV { conf.CsvOptions = &bq.CsvOptions{ AllowJaggedRows: fc.AllowJaggedRows, AllowQuotedNewlines: fc.AllowQuotedNewlines, Encoding: string(fc.Encoding), FieldDelimiter: fc.FieldDelimiter, SkipLeadingRows: fc.SkipLeadingRows, Quote: fc.quote(), } } } // DataFormat describes the format of BigQuery table data. type DataFormat string // Constants describing the format of BigQuery table data. const ( CSV DataFormat = "CSV" Avro DataFormat = "AVRO" JSON DataFormat = "NEWLINE_DELIMITED_JSON" DatastoreBackup DataFormat = "DATASTORE_BACKUP" ) // Encoding specifies the character encoding of data to be loaded into BigQuery. // See https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load.encoding // for more details about how this is used. type Encoding string const ( UTF_8 Encoding = "UTF-8" ISO_8859_1 Encoding = "ISO-8859-1" ) golang-google-cloud-0.9.0/bigquery/file_test.go000066400000000000000000000044541312234511600215060ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package bigquery import ( "reflect" "testing" "cloud.google.com/go/internal/pretty" bq "google.golang.org/api/bigquery/v2" ) func TestQuote(t *testing.T) { ptr := func(s string) *string { return &s } for _, test := range []struct { quote string force bool want *string }{ {"", false, nil}, {"", true, ptr("")}, {"-", false, ptr("-")}, {"-", true, ptr("")}, } { fc := FileConfig{ Quote: test.quote, ForceZeroQuote: test.force, } got := fc.quote() if (got == nil) != (test.want == nil) { t.Errorf("%+v\ngot %v\nwant %v", test, pretty.Value(got), pretty.Value(test.want)) } if got != nil && test.want != nil && *got != *test.want { t.Errorf("%+v: got %q, want %q", test, *got, *test.want) } } } func TestPopulateLoadConfig(t *testing.T) { hyphen := "-" fc := FileConfig{ SourceFormat: CSV, FieldDelimiter: "\t", SkipLeadingRows: 8, AllowJaggedRows: true, AllowQuotedNewlines: true, Encoding: UTF_8, MaxBadRecords: 7, IgnoreUnknownValues: true, Schema: Schema{ stringFieldSchema(), nestedFieldSchema(), }, Quote: hyphen, } want := &bq.JobConfigurationLoad{ SourceFormat: "CSV", FieldDelimiter: "\t", SkipLeadingRows: 8, AllowJaggedRows: true, AllowQuotedNewlines: true, Encoding: "UTF-8", MaxBadRecords: 7, IgnoreUnknownValues: true, Schema: &bq.TableSchema{ Fields: []*bq.TableFieldSchema{ bqStringFieldSchema(), bqNestedFieldSchema(), }}, Quote: &hyphen, } got := &bq.JobConfigurationLoad{} fc.populateLoadConfig(got) if !reflect.DeepEqual(got, want) { t.Errorf("got:\n%v\nwant:\n%v", pretty.Value(got), pretty.Value(want)) } } golang-google-cloud-0.9.0/bigquery/gcs.go000066400000000000000000000054151312234511600203020ustar00rootroot00000000000000// Copyright 2015 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package bigquery import bq "google.golang.org/api/bigquery/v2" // GCSReference is a reference to one or more Google Cloud Storage objects, which together constitute // an input or output to a BigQuery operation. type GCSReference struct { // TODO(jba): Export so that GCSReference can be used to hold data from a Job.get api call and expose it to the user. uris []string FileConfig // DestinationFormat is the format to use when writing exported files. // Allowed values are: CSV, Avro, JSON. The default is CSV. // CSV is not supported for tables with nested or repeated fields. DestinationFormat DataFormat // Compression specifies the type of compression to apply when writing data // to Google Cloud Storage, or using this GCSReference as an ExternalData // source with CSV or JSON SourceFormat. Default is None. Compression Compression } // NewGCSReference constructs a reference to one or more Google Cloud Storage objects, which together constitute a data source or destination. // In the simple case, a single URI in the form gs://bucket/object may refer to a single GCS object. // Data may also be split into mutiple files, if multiple URIs or URIs containing wildcards are provided. // Each URI may contain one '*' wildcard character, which (if present) must come after the bucket name. // For more information about the treatment of wildcards and multiple URIs, // see https://cloud.google.com/bigquery/exporting-data-from-bigquery#exportingmultiple func NewGCSReference(uri ...string) *GCSReference { return &GCSReference{uris: uri} } // Compression is the type of compression to apply when writing data to Google Cloud Storage. type Compression string const ( None Compression = "NONE" Gzip Compression = "GZIP" ) func (gcs *GCSReference) populateInsertJobConfForLoad(conf *insertJobConf) { conf.job.Configuration.Load.SourceUris = gcs.uris gcs.FileConfig.populateLoadConfig(conf.job.Configuration.Load) } func (gcs *GCSReference) externalDataConfig() bq.ExternalDataConfiguration { conf := bq.ExternalDataConfiguration{ Compression: string(gcs.Compression), SourceUris: append([]string{}, gcs.uris...), } gcs.FileConfig.populateExternalDataConfig(&conf) return conf } golang-google-cloud-0.9.0/bigquery/integration_test.go000066400000000000000000000661011312234511600231070ustar00rootroot00000000000000// Copyright 2015 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package bigquery import ( "errors" "flag" "fmt" "log" "net/http" "os" "reflect" "sort" "strings" "testing" "time" gax "github.com/googleapis/gax-go" "cloud.google.com/go/civil" "cloud.google.com/go/internal" "cloud.google.com/go/internal/pretty" "cloud.google.com/go/internal/testutil" "golang.org/x/net/context" "google.golang.org/api/googleapi" "google.golang.org/api/iterator" "google.golang.org/api/option" ) var ( client *Client dataset *Dataset schema = Schema{ {Name: "name", Type: StringFieldType}, {Name: "num", Type: IntegerFieldType}, } testTableExpiration time.Time ) func TestMain(m *testing.M) { initIntegrationTest() os.Exit(m.Run()) } func getClient(t *testing.T) *Client { if client == nil { t.Skip("Integration tests skipped") } return client } // If integration tests will be run, create a unique bucket for them. func initIntegrationTest() { flag.Parse() // needed for testing.Short() if testing.Short() { return } ctx := context.Background() ts := testutil.TokenSource(ctx, Scope) if ts == nil { log.Println("Integration tests skipped. See CONTRIBUTING.md for details") return } projID := testutil.ProjID() var err error client, err = NewClient(ctx, projID, option.WithTokenSource(ts)) if err != nil { log.Fatalf("NewClient: %v", err) } dataset = client.Dataset("bigquery_integration_test") if err := dataset.Create(ctx); err != nil && !hasStatusCode(err, http.StatusConflict) { // AlreadyExists is 409 log.Fatalf("creating dataset: %v", err) } testTableExpiration = time.Now().Add(10 * time.Minute).Round(time.Second) } func TestIntegration_Create(t *testing.T) { // Check that creating a record field with an empty schema is an error. if client == nil { t.Skip("Integration tests skipped") } table := dataset.Table("t_bad") schema := Schema{ {Name: "rec", Type: RecordFieldType, Schema: Schema{}}, } err := table.Create(context.Background(), schema, TableExpiration(time.Now().Add(5*time.Minute))) if err == nil { t.Fatal("want error, got nil") } if !hasStatusCode(err, http.StatusBadRequest) { t.Fatalf("want a 400 error, got %v", err) } } func TestIntegration_CreateView(t *testing.T) { if client == nil { t.Skip("Integration tests skipped") } ctx := context.Background() table := newTable(t, schema) defer table.Delete(ctx) // Test that standard SQL views work. view := dataset.Table("t_view_standardsql") query := ViewQuery(fmt.Sprintf("SELECT APPROX_COUNT_DISTINCT(name) FROM `%s.%s.%s`", dataset.ProjectID, dataset.DatasetID, table.TableID)) err := view.Create(context.Background(), UseStandardSQL(), query) if err != nil { t.Fatalf("table.create: Did not expect an error, got: %v", err) } view.Delete(ctx) } func TestIntegration_TableMetadata(t *testing.T) { if client == nil { t.Skip("Integration tests skipped") } ctx := context.Background() table := newTable(t, schema) defer table.Delete(ctx) // Check table metadata. md, err := table.Metadata(ctx) if err != nil { t.Fatal(err) } // TODO(jba): check md more thorougly. if got, want := md.ID, fmt.Sprintf("%s:%s.%s", dataset.ProjectID, dataset.DatasetID, table.TableID); got != want { t.Errorf("metadata.ID: got %q, want %q", got, want) } if got, want := md.Type, RegularTable; got != want { t.Errorf("metadata.Type: got %v, want %v", got, want) } if got, want := md.ExpirationTime, testTableExpiration; !got.Equal(want) { t.Errorf("metadata.Type: got %v, want %v", got, want) } // Check that timePartitioning is nil by default if md.TimePartitioning != nil { t.Errorf("metadata.TimePartitioning: got %v, want %v", md.TimePartitioning, nil) } // Create tables that have time partitioning partitionCases := []struct { timePartitioning TimePartitioning expectedExpiration time.Duration }{ {TimePartitioning{}, time.Duration(0)}, {TimePartitioning{time.Second}, time.Second}, } for i, c := range partitionCases { table := dataset.Table(fmt.Sprintf("t_metadata_partition_%v", i)) err = table.Create(context.Background(), schema, c.timePartitioning, TableExpiration(time.Now().Add(5*time.Minute))) if err != nil { t.Fatal(err) } defer table.Delete(ctx) md, err = table.Metadata(ctx) if err != nil { t.Fatal(err) } got := md.TimePartitioning want := &TimePartitioning{c.expectedExpiration} if !reflect.DeepEqual(got, want) { t.Errorf("metadata.TimePartitioning: got %v, want %v", got, want) } } } func TestIntegration_DatasetMetadata(t *testing.T) { if client == nil { t.Skip("Integration tests skipped") } ctx := context.Background() md, err := dataset.Metadata(ctx) if err != nil { t.Fatal(err) } if got, want := md.ID, fmt.Sprintf("%s:%s", dataset.ProjectID, dataset.DatasetID); got != want { t.Errorf("ID: got %q, want %q", got, want) } jan2016 := time.Date(2016, 1, 1, 0, 0, 0, 0, time.UTC) if md.CreationTime.Before(jan2016) { t.Errorf("CreationTime: got %s, want > 2016-1-1", md.CreationTime) } if md.LastModifiedTime.Before(jan2016) { t.Errorf("LastModifiedTime: got %s, want > 2016-1-1", md.LastModifiedTime) } // Verify that we get a NotFound for a nonexistent dataset. _, err = client.Dataset("does_not_exist").Metadata(ctx) if err == nil || !hasStatusCode(err, http.StatusNotFound) { t.Errorf("got %v, want NotFound error", err) } } func TestIntegration_DatasetDelete(t *testing.T) { if client == nil { t.Skip("Integration tests skipped") } ctx := context.Background() ds := client.Dataset("delete_test") if err := ds.Create(ctx); err != nil && !hasStatusCode(err, http.StatusConflict) { // AlreadyExists is 409 t.Fatalf("creating dataset %s: %v", ds, err) } if err := ds.Delete(ctx); err != nil { t.Fatalf("deleting dataset %s: %v", ds, err) } } func TestIntegration_Tables(t *testing.T) { if client == nil { t.Skip("Integration tests skipped") } ctx := context.Background() table := newTable(t, schema) defer table.Delete(ctx) wantName := table.FullyQualifiedName() // This test is flaky due to eventual consistency. ctx, cancel := context.WithTimeout(ctx, 10*time.Second) defer cancel() err := internal.Retry(ctx, gax.Backoff{}, func() (stop bool, err error) { // Iterate over tables in the dataset. it := dataset.Tables(ctx) var tableNames []string for { tbl, err := it.Next() if err == iterator.Done { break } if err != nil { return false, err } tableNames = append(tableNames, tbl.FullyQualifiedName()) } // Other tests may be running with this dataset, so there might be more // than just our table in the list. So don't try for an exact match; just // make sure that our table is there somewhere. for _, tn := range tableNames { if tn == wantName { return true, nil } } return false, fmt.Errorf("got %v\nwant %s in the list", tableNames, wantName) }) if err != nil { t.Fatal(err) } } func TestIntegration_UploadAndRead(t *testing.T) { if client == nil { t.Skip("Integration tests skipped") } ctx := context.Background() table := newTable(t, schema) defer table.Delete(ctx) // Populate the table. upl := table.Uploader() var ( wantRows [][]Value saverRows []*ValuesSaver ) for i, name := range []string{"a", "b", "c"} { row := []Value{name, int64(i)} wantRows = append(wantRows, row) saverRows = append(saverRows, &ValuesSaver{ Schema: schema, InsertID: name, Row: row, }) } if err := upl.Put(ctx, saverRows); err != nil { t.Fatal(putError(err)) } // Wait until the data has been uploaded. This can take a few seconds, according // to https://cloud.google.com/bigquery/streaming-data-into-bigquery. if err := waitForRow(ctx, table); err != nil { t.Fatal(err) } // Read the table. checkRead(t, "upload", table.Read(ctx), wantRows) // Query the table. q := client.Query(fmt.Sprintf("select name, num from %s", table.TableID)) q.DefaultProjectID = dataset.ProjectID q.DefaultDatasetID = dataset.DatasetID rit, err := q.Read(ctx) if err != nil { t.Fatal(err) } checkRead(t, "query", rit, wantRows) // Query the long way. job1, err := q.Run(ctx) if err != nil { t.Fatal(err) } job2, err := client.JobFromID(ctx, job1.ID()) if err != nil { t.Fatal(err) } rit, err = job2.Read(ctx) if err != nil { t.Fatal(err) } checkRead(t, "job.Read", rit, wantRows) // Get statistics. jobStatus, err := job2.Status(ctx) if err != nil { t.Fatal(err) } if jobStatus.Statistics == nil { t.Fatal("jobStatus missing statistics") } if _, ok := jobStatus.Statistics.Details.(*QueryStatistics); !ok { t.Errorf("expected QueryStatistics, got %T", jobStatus.Statistics.Details) } // Test reading directly into a []Value. valueLists, err := readAll(table.Read(ctx)) if err != nil { t.Fatal(err) } it := table.Read(ctx) for i, vl := range valueLists { var got []Value if err := it.Next(&got); err != nil { t.Fatal(err) } want := []Value(vl) if !reflect.DeepEqual(got, want) { t.Errorf("%d: got %v, want %v", i, got, want) } } // Test reading into a map. it = table.Read(ctx) for _, vl := range valueLists { var vm map[string]Value if err := it.Next(&vm); err != nil { t.Fatal(err) } if got, want := len(vm), len(vl); got != want { t.Fatalf("valueMap len: got %d, want %d", got, want) } for i, v := range vl { if got, want := vm[schema[i].Name], v; got != want { t.Errorf("%d, name=%s: got %v, want %v", i, schema[i].Name, got, want) } } } } type SubSubTestStruct struct { Integer int64 } type SubTestStruct struct { String string Record SubSubTestStruct RecordArray []SubSubTestStruct } type TestStruct struct { Name string Bytes []byte Integer int64 Float float64 Boolean bool Timestamp time.Time Date civil.Date Time civil.Time DateTime civil.DateTime StringArray []string IntegerArray []int64 FloatArray []float64 BooleanArray []bool TimestampArray []time.Time DateArray []civil.Date TimeArray []civil.Time DateTimeArray []civil.DateTime Record SubTestStruct RecordArray []SubTestStruct } func TestIntegration_UploadAndReadStructs(t *testing.T) { if client == nil { t.Skip("Integration tests skipped") } schema, err := InferSchema(TestStruct{}) if err != nil { t.Fatal(err) } ctx := context.Background() table := newTable(t, schema) defer table.Delete(ctx) d := civil.Date{2016, 3, 20} tm := civil.Time{15, 4, 5, 0} ts := time.Date(2016, 3, 20, 15, 4, 5, 0, time.UTC) dtm := civil.DateTime{d, tm} d2 := civil.Date{1994, 5, 15} tm2 := civil.Time{1, 2, 4, 0} ts2 := time.Date(1994, 5, 15, 1, 2, 4, 0, time.UTC) dtm2 := civil.DateTime{d2, tm2} // Populate the table. upl := table.Uploader() want := []*TestStruct{ { "a", []byte("byte"), 42, 3.14, true, ts, d, tm, dtm, []string{"a", "b"}, []int64{1, 2}, []float64{1, 1.41}, []bool{true, false}, []time.Time{ts, ts2}, []civil.Date{d, d2}, []civil.Time{tm, tm2}, []civil.DateTime{dtm, dtm2}, SubTestStruct{ "string", SubSubTestStruct{24}, []SubSubTestStruct{{1}, {2}}, }, []SubTestStruct{ {String: "empty"}, { "full", SubSubTestStruct{1}, []SubSubTestStruct{{1}, {2}}, }, }, }, { Name: "b", Bytes: []byte("byte2"), Integer: 24, Float: 4.13, Boolean: false, Timestamp: ts, Date: d, Time: tm, DateTime: dtm, }, } var savers []*StructSaver for _, s := range want { savers = append(savers, &StructSaver{Schema: schema, Struct: s}) } if err := upl.Put(ctx, savers); err != nil { t.Fatal(putError(err)) } // Wait until the data has been uploaded. This can take a few seconds, according // to https://cloud.google.com/bigquery/streaming-data-into-bigquery. if err := waitForRow(ctx, table); err != nil { t.Fatal(err) } // Test iteration with structs. it := table.Read(ctx) var got []*TestStruct for { var g TestStruct err := it.Next(&g) if err == iterator.Done { break } if err != nil { t.Fatal(err) } got = append(got, &g) } sort.Sort(byName(got)) // BigQuery does not elide nils. It reports an error for nil fields. for i, g := range got { if i >= len(want) { t.Errorf("%d: got %v, past end of want", i, pretty.Value(g)) } else if w := want[i]; !reflect.DeepEqual(g, w) { t.Errorf("%d: got %v, want %v", i, pretty.Value(g), pretty.Value(w)) } } } type byName []*TestStruct func (b byName) Len() int { return len(b) } func (b byName) Swap(i, j int) { b[i], b[j] = b[j], b[i] } func (b byName) Less(i, j int) bool { return b[i].Name < b[j].Name } func TestIntegration_Update(t *testing.T) { if client == nil { t.Skip("Integration tests skipped") } ctx := context.Background() table := newTable(t, schema) defer table.Delete(ctx) // Test Update of non-schema fields. tm, err := table.Metadata(ctx) if err != nil { t.Fatal(err) } wantDescription := tm.Description + "more" wantName := tm.Name + "more" got, err := table.Update(ctx, TableMetadataToUpdate{ Description: wantDescription, Name: wantName, }) if err != nil { t.Fatal(err) } if got.Description != wantDescription { t.Errorf("Description: got %q, want %q", got.Description, wantDescription) } if got.Name != wantName { t.Errorf("Name: got %q, want %q", got.Name, wantName) } if !reflect.DeepEqual(got.Schema, schema) { t.Errorf("Schema: got %v, want %v", pretty.Value(got.Schema), pretty.Value(schema)) } // Test schema update. // Columns can be added. schema2 is the same as schema, except for the // added column in the middle. nested := Schema{ {Name: "nested", Type: BooleanFieldType}, {Name: "other", Type: StringFieldType}, } schema2 := Schema{ schema[0], {Name: "rec", Type: RecordFieldType, Schema: nested}, schema[1], } got, err = table.Update(ctx, TableMetadataToUpdate{Schema: schema2}) if err != nil { t.Fatal(err) } // Wherever you add the column, it appears at the end. schema3 := Schema{schema2[0], schema2[2], schema2[1]} if !reflect.DeepEqual(got.Schema, schema3) { t.Errorf("add field:\ngot %v\nwant %v", pretty.Value(got.Schema), pretty.Value(schema3)) } // Updating with the empty schema succeeds, but is a no-op. got, err = table.Update(ctx, TableMetadataToUpdate{Schema: Schema{}}) if err != nil { t.Fatal(err) } if !reflect.DeepEqual(got.Schema, schema3) { t.Errorf("empty schema:\ngot %v\nwant %v", pretty.Value(got.Schema), pretty.Value(schema3)) } // Error cases. for _, test := range []struct { desc string fields []*FieldSchema }{ {"change from optional to required", []*FieldSchema{ schema3[0], {Name: "num", Type: IntegerFieldType, Required: true}, schema3[2], }}, {"add a required field", []*FieldSchema{ schema3[0], schema3[1], schema3[2], {Name: "req", Type: StringFieldType, Required: true}, }}, {"remove a field", []*FieldSchema{schema3[0], schema3[1]}}, {"remove a nested field", []*FieldSchema{ schema3[0], schema3[1], {Name: "rec", Type: RecordFieldType, Schema: Schema{nested[0]}}}}, {"remove all nested fields", []*FieldSchema{ schema3[0], schema3[1], {Name: "rec", Type: RecordFieldType, Schema: Schema{}}}}, } { for { _, err = table.Update(ctx, TableMetadataToUpdate{Schema: Schema(test.fields)}) if !hasStatusCode(err, 403) { break } // We've hit the rate limit for updates. Wait a bit and retry. t.Logf("%s: retrying after getting %v", test.desc, err) time.Sleep(4 * time.Second) } if err == nil { t.Errorf("%s: want error, got nil", test.desc) } else if !hasStatusCode(err, 400) { t.Errorf("%s: want 400, got %v", test.desc, err) } } } func TestIntegration_Load(t *testing.T) { if client == nil { t.Skip("Integration tests skipped") } ctx := context.Background() table := newTable(t, schema) defer table.Delete(ctx) // Load the table from a reader. r := strings.NewReader("a,0\nb,1\nc,2\n") wantRows := [][]Value{ []Value{"a", int64(0)}, []Value{"b", int64(1)}, []Value{"c", int64(2)}, } rs := NewReaderSource(r) loader := table.LoaderFrom(rs) loader.WriteDisposition = WriteTruncate job, err := loader.Run(ctx) if err != nil { t.Fatal(err) } if err := wait(ctx, job); err != nil { t.Fatal(err) } checkRead(t, "reader load", table.Read(ctx), wantRows) } func TestIntegration_DML(t *testing.T) { if client == nil { t.Skip("Integration tests skipped") } ctx := context.Background() // Retry insert; sometimes it fails with INTERNAL. err := internal.Retry(ctx, gax.Backoff{}, func() (bool, error) { table := newTable(t, schema) defer table.Delete(ctx) // Use DML to insert. wantRows := [][]Value{ []Value{"a", int64(0)}, []Value{"b", int64(1)}, []Value{"c", int64(2)}, } query := fmt.Sprintf("INSERT bigquery_integration_test.%s (name, num) "+ "VALUES ('a', 0), ('b', 1), ('c', 2)", table.TableID) q := client.Query(query) q.UseStandardSQL = true // necessary for DML job, err := q.Run(ctx) if err != nil { return false, err } if err := wait(ctx, job); err != nil { return false, err } if msg, ok := compareRead(table.Read(ctx), wantRows); !ok { // Stop on read error, because that has never been flaky. return true, errors.New(msg) } return true, nil }) if err != nil { t.Fatal(err) } } func TestIntegration_TimeTypes(t *testing.T) { if client == nil { t.Skip("Integration tests skipped") } ctx := context.Background() dtSchema := Schema{ {Name: "d", Type: DateFieldType}, {Name: "t", Type: TimeFieldType}, {Name: "dt", Type: DateTimeFieldType}, {Name: "ts", Type: TimestampFieldType}, } table := newTable(t, dtSchema) defer table.Delete(ctx) d := civil.Date{2016, 3, 20} tm := civil.Time{12, 30, 0, 0} ts := time.Date(2016, 3, 20, 15, 04, 05, 0, time.UTC) wantRows := [][]Value{ []Value{d, tm, civil.DateTime{d, tm}, ts}, } upl := table.Uploader() if err := upl.Put(ctx, []*ValuesSaver{ {Schema: dtSchema, Row: wantRows[0]}, }); err != nil { t.Fatal(putError(err)) } if err := waitForRow(ctx, table); err != nil { t.Fatal(err) } // SQL wants DATETIMEs with a space between date and time, but the service // returns them in RFC3339 form, with a "T" between. query := fmt.Sprintf("INSERT bigquery_integration_test.%s (d, t, dt, ts) "+ "VALUES ('%s', '%s', '%s %s', '%s')", table.TableID, d, tm, d, tm, ts.Format("2006-01-02 15:04:05")) q := client.Query(query) q.UseStandardSQL = true // necessary for DML job, err := q.Run(ctx) if err != nil { t.Fatal(err) } if err := wait(ctx, job); err != nil { t.Fatal(err) } wantRows = append(wantRows, wantRows[0]) checkRead(t, "TimeTypes", table.Read(ctx), wantRows) } func TestIntegration_StandardQuery(t *testing.T) { if client == nil { t.Skip("Integration tests skipped") } ctx := context.Background() d := civil.Date{2016, 3, 20} tm := civil.Time{15, 04, 05, 0} ts := time.Date(2016, 3, 20, 15, 04, 05, 0, time.UTC) dtm := ts.Format("2006-01-02 15:04:05") // Constructs Value slices made up of int64s. ints := func(args ...int) []Value { vals := make([]Value, len(args)) for i, arg := range args { vals[i] = int64(arg) } return vals } testCases := []struct { query string wantRow []Value }{ {"SELECT 1", ints(1)}, {"SELECT 1.3", []Value{1.3}}, {"SELECT TRUE", []Value{true}}, {"SELECT 'ABC'", []Value{"ABC"}}, {"SELECT CAST('foo' AS BYTES)", []Value{[]byte("foo")}}, {fmt.Sprintf("SELECT TIMESTAMP '%s'", dtm), []Value{ts}}, {fmt.Sprintf("SELECT [TIMESTAMP '%s', TIMESTAMP '%s']", dtm, dtm), []Value{[]Value{ts, ts}}}, {fmt.Sprintf("SELECT ('hello', TIMESTAMP '%s')", dtm), []Value{[]Value{"hello", ts}}}, {fmt.Sprintf("SELECT DATETIME(TIMESTAMP '%s')", dtm), []Value{civil.DateTime{d, tm}}}, {fmt.Sprintf("SELECT DATE(TIMESTAMP '%s')", dtm), []Value{d}}, {fmt.Sprintf("SELECT TIME(TIMESTAMP '%s')", dtm), []Value{tm}}, {"SELECT (1, 2)", []Value{ints(1, 2)}}, {"SELECT [1, 2, 3]", []Value{ints(1, 2, 3)}}, {"SELECT ([1, 2], 3, [4, 5])", []Value{[]Value{ints(1, 2), int64(3), ints(4, 5)}}}, {"SELECT [(1, 2, 3), (4, 5, 6)]", []Value{[]Value{ints(1, 2, 3), ints(4, 5, 6)}}}, {"SELECT [([1, 2, 3], 4), ([5, 6], 7)]", []Value{[]Value{[]Value{ints(1, 2, 3), int64(4)}, []Value{ints(5, 6), int64(7)}}}}, {"SELECT ARRAY(SELECT STRUCT([1, 2]))", []Value{[]Value{[]Value{ints(1, 2)}}}}, } for _, c := range testCases { q := client.Query(c.query) q.UseStandardSQL = true it, err := q.Read(ctx) if err != nil { t.Fatal(err) } checkRead(t, "StandardQuery", it, [][]Value{c.wantRow}) } } func TestIntegration_LegacyQuery(t *testing.T) { if client == nil { t.Skip("Integration tests skipped") } ctx := context.Background() ts := time.Date(2016, 3, 20, 15, 04, 05, 0, time.UTC) dtm := ts.Format("2006-01-02 15:04:05") testCases := []struct { query string wantRow []Value }{ {"SELECT 1", []Value{int64(1)}}, {"SELECT 1.3", []Value{1.3}}, {"SELECT TRUE", []Value{true}}, {"SELECT 'ABC'", []Value{"ABC"}}, {"SELECT CAST('foo' AS BYTES)", []Value{[]byte("foo")}}, {fmt.Sprintf("SELECT TIMESTAMP('%s')", dtm), []Value{ts}}, {fmt.Sprintf("SELECT DATE(TIMESTAMP('%s'))", dtm), []Value{"2016-03-20"}}, {fmt.Sprintf("SELECT TIME(TIMESTAMP('%s'))", dtm), []Value{"15:04:05"}}, } for _, c := range testCases { q := client.Query(c.query) it, err := q.Read(ctx) if err != nil { t.Fatal(err) } checkRead(t, "LegacyQuery", it, [][]Value{c.wantRow}) } } func TestIntegration_QueryParameters(t *testing.T) { if client == nil { t.Skip("Integration tests skipped") } ctx := context.Background() d := civil.Date{2016, 3, 20} tm := civil.Time{15, 04, 05, 0} dtm := civil.DateTime{d, tm} ts := time.Date(2016, 3, 20, 15, 04, 05, 0, time.UTC) type ss struct { String string } type s struct { Timestamp time.Time StringArray []string SubStruct ss SubStructArray []ss } testCases := []struct { query string parameters []QueryParameter wantRow []Value }{ {"SELECT @val", []QueryParameter{{"val", 1}}, []Value{int64(1)}}, {"SELECT @val", []QueryParameter{{"val", 1.3}}, []Value{1.3}}, {"SELECT @val", []QueryParameter{{"val", true}}, []Value{true}}, {"SELECT @val", []QueryParameter{{"val", "ABC"}}, []Value{"ABC"}}, {"SELECT @val", []QueryParameter{{"val", []byte("foo")}}, []Value{[]byte("foo")}}, {"SELECT @val", []QueryParameter{{"val", ts}}, []Value{ts}}, {"SELECT @val", []QueryParameter{{"val", []time.Time{ts, ts}}}, []Value{[]Value{ts, ts}}}, {"SELECT @val", []QueryParameter{{"val", dtm}}, []Value{dtm}}, {"SELECT @val", []QueryParameter{{"val", d}}, []Value{d}}, {"SELECT @val", []QueryParameter{{"val", tm}}, []Value{tm}}, {"SELECT @val", []QueryParameter{{"val", s{ts, []string{"a", "b"}, ss{"c"}, []ss{{"d"}, {"e"}}}}}, []Value{[]Value{ts, []Value{"a", "b"}, []Value{"c"}, []Value{[]Value{"d"}, []Value{"e"}}}}}, {"SELECT @val.Timestamp, @val.SubStruct.String", []QueryParameter{{"val", s{Timestamp: ts, SubStruct: ss{"a"}}}}, []Value{ts, "a"}}, } for _, c := range testCases { q := client.Query(c.query) q.Parameters = c.parameters it, err := q.Read(ctx) if err != nil { t.Fatal(err) } checkRead(t, "QueryParameters", it, [][]Value{c.wantRow}) } } func TestIntegration_ReadNullIntoStruct(t *testing.T) { // Reading a null into a struct field should return an error (not panic). if client == nil { t.Skip("Integration tests skipped") } ctx := context.Background() table := newTable(t, schema) defer table.Delete(ctx) upl := table.Uploader() row := &ValuesSaver{ Schema: schema, Row: []Value{"name", nil}, } if err := upl.Put(ctx, []*ValuesSaver{row}); err != nil { t.Fatal(putError(err)) } if err := waitForRow(ctx, table); err != nil { t.Fatal(err) } q := client.Query(fmt.Sprintf("select name, num from %s", table.TableID)) q.DefaultProjectID = dataset.ProjectID q.DefaultDatasetID = dataset.DatasetID it, err := q.Read(ctx) if err != nil { t.Fatal(err) } type S struct{ Num int64 } var s S if err := it.Next(&s); err == nil { t.Fatal("got nil, want error") } } // Creates a new, temporary table with a unique name and the given schema. func newTable(t *testing.T, s Schema) *Table { name := fmt.Sprintf("t%d", time.Now().UnixNano()) table := dataset.Table(name) err := table.Create(context.Background(), s, TableExpiration(testTableExpiration)) if err != nil { t.Fatal(err) } return table } func checkRead(t *testing.T, msg string, it *RowIterator, want [][]Value) { if msg2, ok := compareRead(it, want); !ok { t.Errorf("%s: %s", msg, msg2) } } func compareRead(it *RowIterator, want [][]Value) (msg string, ok bool) { got, err := readAll(it) if err != nil { return err.Error(), false } if len(got) != len(want) { return fmt.Sprintf("got %d rows, want %d", len(got), len(want)), false } sort.Sort(byCol0(got)) for i, r := range got { gotRow := []Value(r) wantRow := want[i] if !reflect.DeepEqual(gotRow, wantRow) { return fmt.Sprintf("#%d: got %v, want %v", i, gotRow, wantRow), false } } return "", true } func readAll(it *RowIterator) ([][]Value, error) { var rows [][]Value for { var vals []Value err := it.Next(&vals) if err == iterator.Done { return rows, nil } if err != nil { return nil, err } rows = append(rows, vals) } } type byCol0 [][]Value func (b byCol0) Len() int { return len(b) } func (b byCol0) Swap(i, j int) { b[i], b[j] = b[j], b[i] } func (b byCol0) Less(i, j int) bool { switch a := b[i][0].(type) { case string: return a < b[j][0].(string) case civil.Date: return a.Before(b[j][0].(civil.Date)) default: panic("unknown type") } } func hasStatusCode(err error, code int) bool { if e, ok := err.(*googleapi.Error); ok && e.Code == code { return true } return false } // wait polls the job until it is complete or an error is returned. func wait(ctx context.Context, job *Job) error { status, err := job.Wait(ctx) if err != nil { return fmt.Errorf("getting job status: %v", err) } if status.Err() != nil { return fmt.Errorf("job status error: %#v", status.Err()) } if status.Statistics == nil { return errors.New("nil Statistics") } if status.Statistics.EndTime.IsZero() { return errors.New("EndTime is zero") } if status.Statistics.Details == nil { return errors.New("nil Statistics.Details") } return nil } // waitForRow polls the table until it contains a row. // TODO(jba): use internal.Retry. func waitForRow(ctx context.Context, table *Table) error { for { it := table.Read(ctx) var v []Value err := it.Next(&v) if err == nil { return nil } if err != iterator.Done { return err } time.Sleep(1 * time.Second) } } func putError(err error) string { pme, ok := err.(PutMultiError) if !ok { return err.Error() } var msgs []string for _, err := range pme { msgs = append(msgs, err.Error()) } return strings.Join(msgs, "\n") } golang-google-cloud-0.9.0/bigquery/iterator.go000066400000000000000000000115221312234511600213530ustar00rootroot00000000000000// Copyright 2015 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package bigquery import ( "fmt" "reflect" "golang.org/x/net/context" "google.golang.org/api/iterator" ) // A pageFetcher returns a page of rows, starting from the row specified by token. type pageFetcher interface { fetch(ctx context.Context, s service, token string) (*readDataResult, error) setPaging(*pagingConf) } func newRowIterator(ctx context.Context, s service, pf pageFetcher) *RowIterator { it := &RowIterator{ ctx: ctx, service: s, pf: pf, } it.pageInfo, it.nextFunc = iterator.NewPageInfo( it.fetch, func() int { return len(it.rows) }, func() interface{} { r := it.rows; it.rows = nil; return r }) return it } // A RowIterator provides access to the result of a BigQuery lookup. type RowIterator struct { ctx context.Context service service pf pageFetcher pageInfo *iterator.PageInfo nextFunc func() error // StartIndex can be set before the first call to Next. If PageInfo().Token // is also set, StartIndex is ignored. StartIndex uint64 rows [][]Value schema Schema // populated on first call to fetch structLoader structLoader // used to populate a pointer to a struct } // Next loads the next row into dst. Its return value is iterator.Done if there // are no more results. Once Next returns iterator.Done, all subsequent calls // will return iterator.Done. // // dst may implement ValueLoader, or may be a *[]Value, *map[string]Value, or struct pointer. // // If dst is a *[]Value, it will be set to to new []Value whose i'th element // will be populated with the i'th column of the row. // // If dst is a *map[string]Value, a new map will be created if dst is nil. Then // for each schema column name, the map key of that name will be set to the column's // value. // // If dst is pointer to a struct, each column in the schema will be matched // with an exported field of the struct that has the same name, ignoring case. // Unmatched schema columns and struct fields will be ignored. // // Each BigQuery column type corresponds to one or more Go types; a matching struct // field must be of the correct type. The correspondences are: // // STRING string // BOOL bool // INTEGER int, int8, int16, int32, int64, uint8, uint16, uint32 // FLOAT float32, float64 // BYTES []byte // TIMESTAMP time.Time // DATE civil.Date // TIME civil.Time // DATETIME civil.DateTime // // A repeated field corresponds to a slice or array of the element type. // A RECORD type (nested schema) corresponds to a nested struct or struct pointer. // All calls to Next on the same iterator must use the same struct type. // // It is an error to attempt to read a BigQuery NULL value into a struct field. // If your table contains NULLs, use a *[]Value or *map[string]Value. func (it *RowIterator) Next(dst interface{}) error { var vl ValueLoader switch dst := dst.(type) { case ValueLoader: vl = dst case *[]Value: vl = (*valueList)(dst) case *map[string]Value: vl = (*valueMap)(dst) default: if !isStructPtr(dst) { return fmt.Errorf("bigquery: cannot convert %T to ValueLoader (need pointer to []Value, map[string]Value, or struct)", dst) } } if err := it.nextFunc(); err != nil { return err } row := it.rows[0] it.rows = it.rows[1:] if vl == nil { // This can only happen if dst is a pointer to a struct. We couldn't // set vl above because we need the schema. if err := it.structLoader.set(dst, it.schema); err != nil { return err } vl = &it.structLoader } return vl.Load(row, it.schema) } func isStructPtr(x interface{}) bool { t := reflect.TypeOf(x) return t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Struct } // PageInfo supports pagination. See the google.golang.org/api/iterator package for details. func (it *RowIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } func (it *RowIterator) fetch(pageSize int, pageToken string) (string, error) { pc := &pagingConf{} if pageSize > 0 { pc.recordsPerRequest = int64(pageSize) pc.setRecordsPerRequest = true } if pageToken == "" { pc.startIndex = it.StartIndex } it.pf.setPaging(pc) res, err := it.pf.fetch(it.ctx, it.service, pageToken) if err != nil { return "", err } it.rows = append(it.rows, res.rows...) it.schema = res.schema return res.pageToken, nil } golang-google-cloud-0.9.0/bigquery/iterator_test.go000066400000000000000000000212471312234511600224170ustar00rootroot00000000000000// Copyright 2015 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package bigquery import ( "errors" "fmt" "reflect" "testing" "golang.org/x/net/context" "google.golang.org/api/iterator" ) type fetchResponse struct { result *readDataResult // The result to return. err error // The error to return. } // pageFetcherStub services fetch requests by returning data from an in-memory list of values. type pageFetcherStub struct { fetchResponses map[string]fetchResponse err error } func (pf *pageFetcherStub) fetch(ctx context.Context, s service, token string) (*readDataResult, error) { call, ok := pf.fetchResponses[token] if !ok { pf.err = fmt.Errorf("Unexpected page token: %q", token) } return call.result, call.err } func (pf *pageFetcherStub) setPaging(pc *pagingConf) {} func TestIterator(t *testing.T) { var ( iiSchema = Schema{ {Type: IntegerFieldType}, {Type: IntegerFieldType}, } siSchema = Schema{ {Type: StringFieldType}, {Type: IntegerFieldType}, } ) fetchFailure := errors.New("fetch failure") testCases := []struct { desc string pageToken string fetchResponses map[string]fetchResponse want [][]Value wantErr error wantSchema Schema }{ { desc: "Iteration over single empty page", fetchResponses: map[string]fetchResponse{ "": { result: &readDataResult{ pageToken: "", rows: [][]Value{}, schema: Schema{}, }, }, }, want: [][]Value{}, wantSchema: Schema{}, }, { desc: "Iteration over single page", fetchResponses: map[string]fetchResponse{ "": { result: &readDataResult{ pageToken: "", rows: [][]Value{{1, 2}, {11, 12}}, schema: iiSchema, }, }, }, want: [][]Value{{1, 2}, {11, 12}}, wantSchema: iiSchema, }, { desc: "Iteration over single page with different schema", fetchResponses: map[string]fetchResponse{ "": { result: &readDataResult{ pageToken: "", rows: [][]Value{{"1", 2}, {"11", 12}}, schema: siSchema, }, }, }, want: [][]Value{{"1", 2}, {"11", 12}}, wantSchema: siSchema, }, { desc: "Iteration over two pages", fetchResponses: map[string]fetchResponse{ "": { result: &readDataResult{ pageToken: "a", rows: [][]Value{{1, 2}, {11, 12}}, schema: iiSchema, }, }, "a": { result: &readDataResult{ pageToken: "", rows: [][]Value{{101, 102}, {111, 112}}, schema: iiSchema, }, }, }, want: [][]Value{{1, 2}, {11, 12}, {101, 102}, {111, 112}}, wantSchema: iiSchema, }, { desc: "Server response includes empty page", fetchResponses: map[string]fetchResponse{ "": { result: &readDataResult{ pageToken: "a", rows: [][]Value{{1, 2}, {11, 12}}, schema: iiSchema, }, }, "a": { result: &readDataResult{ pageToken: "b", rows: [][]Value{}, schema: iiSchema, }, }, "b": { result: &readDataResult{ pageToken: "", rows: [][]Value{{101, 102}, {111, 112}}, schema: iiSchema, }, }, }, want: [][]Value{{1, 2}, {11, 12}, {101, 102}, {111, 112}}, wantSchema: iiSchema, }, { desc: "Fetch error", fetchResponses: map[string]fetchResponse{ "": { result: &readDataResult{ pageToken: "a", rows: [][]Value{{1, 2}, {11, 12}}, schema: iiSchema, }, }, "a": { // We returns some data from this fetch, but also an error. // So the end result should include only data from the previous fetch. err: fetchFailure, result: &readDataResult{ pageToken: "b", rows: [][]Value{{101, 102}, {111, 112}}, schema: iiSchema, }, }, }, want: [][]Value{{1, 2}, {11, 12}}, wantErr: fetchFailure, wantSchema: iiSchema, }, { desc: "Skip over an entire page", pageToken: "a", fetchResponses: map[string]fetchResponse{ "": { result: &readDataResult{ pageToken: "a", rows: [][]Value{{1, 2}, {11, 12}}, schema: iiSchema, }, }, "a": { result: &readDataResult{ pageToken: "", rows: [][]Value{{101, 102}, {111, 112}}, schema: iiSchema, }, }, }, want: [][]Value{{101, 102}, {111, 112}}, wantSchema: iiSchema, }, { desc: "Skip beyond all data", pageToken: "b", fetchResponses: map[string]fetchResponse{ "": { result: &readDataResult{ pageToken: "a", rows: [][]Value{{1, 2}, {11, 12}}, schema: iiSchema, }, }, "a": { result: &readDataResult{ pageToken: "b", rows: [][]Value{{101, 102}, {111, 112}}, schema: iiSchema, }, }, "b": { result: &readDataResult{}, }, }, // In this test case, Next will return false on its first call, // so we won't even attempt to call Get. want: [][]Value{}, wantSchema: Schema{}, }, } for _, tc := range testCases { pf := &pageFetcherStub{ fetchResponses: tc.fetchResponses, } it := newRowIterator(context.Background(), nil, pf) it.PageInfo().Token = tc.pageToken values, schema, err := consumeRowIterator(it) if err != tc.wantErr { t.Fatalf("%s: got %v, want %v", tc.desc, err, tc.wantErr) } if (len(values) != 0 || len(tc.want) != 0) && !reflect.DeepEqual(values, tc.want) { t.Errorf("%s: values:\ngot: %v\nwant:%v", tc.desc, values, tc.want) } if (len(schema) != 0 || len(tc.wantSchema) != 0) && !reflect.DeepEqual(schema, tc.wantSchema) { t.Errorf("%s: iterator.Schema:\ngot: %v\nwant: %v", tc.desc, schema, tc.wantSchema) } } } type valueListWithSchema struct { vals valueList schema Schema } func (v *valueListWithSchema) Load(vs []Value, s Schema) error { v.vals.Load(vs, s) v.schema = s return nil } // consumeRowIterator reads the schema and all values from a RowIterator and returns them. func consumeRowIterator(it *RowIterator) ([][]Value, Schema, error) { var got [][]Value var schema Schema for { var vls valueListWithSchema err := it.Next(&vls) if err == iterator.Done { return got, schema, nil } if err != nil { return got, schema, err } got = append(got, vls.vals) schema = vls.schema } } func TestNextDuringErrorState(t *testing.T) { pf := &pageFetcherStub{ fetchResponses: map[string]fetchResponse{ "": {err: errors.New("bang")}, }, } it := newRowIterator(context.Background(), nil, pf) var vals []Value if err := it.Next(&vals); err == nil { t.Errorf("Expected error after calling Next") } if err := it.Next(&vals); err == nil { t.Errorf("Expected error calling Next again when iterator has a non-nil error.") } } func TestNextAfterFinished(t *testing.T) { testCases := []struct { fetchResponses map[string]fetchResponse want [][]Value }{ { fetchResponses: map[string]fetchResponse{ "": { result: &readDataResult{ pageToken: "", rows: [][]Value{{1, 2}, {11, 12}}, }, }, }, want: [][]Value{{1, 2}, {11, 12}}, }, { fetchResponses: map[string]fetchResponse{ "": { result: &readDataResult{ pageToken: "", rows: [][]Value{}, }, }, }, want: [][]Value{}, }, } for _, tc := range testCases { pf := &pageFetcherStub{ fetchResponses: tc.fetchResponses, } it := newRowIterator(context.Background(), nil, pf) values, _, err := consumeRowIterator(it) if err != nil { t.Fatal(err) } if (len(values) != 0 || len(tc.want) != 0) && !reflect.DeepEqual(values, tc.want) { t.Errorf("values: got:\n%v\nwant:\n%v", values, tc.want) } // Try calling Get again. var vals []Value if err := it.Next(&vals); err != iterator.Done { t.Errorf("Expected Done calling Next when there are no more values") } } } func TestIteratorNextTypes(t *testing.T) { it := newRowIterator(context.Background(), nil, nil) for _, v := range []interface{}{3, "s", []int{}, &[]int{}, map[string]Value{}, &map[string]interface{}{}, struct{}{}, } { if err := it.Next(v); err == nil { t.Error("%v: want error, got nil", v) } } } golang-google-cloud-0.9.0/bigquery/job.go000066400000000000000000000224351312234511600203010ustar00rootroot00000000000000// Copyright 2015 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package bigquery import ( "errors" "time" "cloud.google.com/go/internal" gax "github.com/googleapis/gax-go" "golang.org/x/net/context" bq "google.golang.org/api/bigquery/v2" ) // A Job represents an operation which has been submitted to BigQuery for processing. type Job struct { c *Client projectID string jobID string isQuery bool destinationTable *bq.TableReference // table to read query results from } // JobFromID creates a Job which refers to an existing BigQuery job. The job // need not have been created by this package. For example, the job may have // been created in the BigQuery console. func (c *Client) JobFromID(ctx context.Context, id string) (*Job, error) { job, err := c.service.getJob(ctx, c.projectID, id) if err != nil { return nil, err } job.c = c return job, nil } func (j *Job) ID() string { return j.jobID } // State is one of a sequence of states that a Job progresses through as it is processed. type State int const ( Pending State = iota Running Done ) // JobStatus contains the current State of a job, and errors encountered while processing that job. type JobStatus struct { State State err error // All errors encountered during the running of the job. // Not all Errors are fatal, so errors here do not necessarily mean that the job has completed or was unsuccessful. Errors []*Error // Statistics about the job. Statistics *JobStatistics } // setJobRef initializes job's JobReference if given a non-empty jobID. // projectID must be non-empty. func setJobRef(job *bq.Job, jobID, projectID string) { if jobID == "" { return } // We don't check whether projectID is empty; the server will return an // error when it encounters the resulting JobReference. job.JobReference = &bq.JobReference{ JobId: jobID, ProjectId: projectID, } } // Done reports whether the job has completed. // After Done returns true, the Err method will return an error if the job completed unsuccesfully. func (s *JobStatus) Done() bool { return s.State == Done } // Err returns the error that caused the job to complete unsuccesfully (if any). func (s *JobStatus) Err() error { return s.err } // Status returns the current status of the job. It fails if the Status could not be determined. func (j *Job) Status(ctx context.Context) (*JobStatus, error) { js, err := j.c.service.jobStatus(ctx, j.projectID, j.jobID) if err != nil { return nil, err } // Fill in the client field of Tables in the statistics. if js.Statistics != nil { if qs, ok := js.Statistics.Details.(*QueryStatistics); ok { for _, t := range qs.ReferencedTables { t.c = j.c } } } return js, nil } // Cancel requests that a job be cancelled. This method returns without waiting for // cancellation to take effect. To check whether the job has terminated, use Job.Status. // Cancelled jobs may still incur costs. func (j *Job) Cancel(ctx context.Context) error { return j.c.service.jobCancel(ctx, j.projectID, j.jobID) } // Wait blocks until the job or the context is done. It returns the final status // of the job. // If an error occurs while retrieving the status, Wait returns that error. But // Wait returns nil if the status was retrieved successfully, even if // status.Err() != nil. So callers must check both errors. See the example. func (j *Job) Wait(ctx context.Context) (*JobStatus, error) { if j.isQuery { // We can avoid polling for query jobs. if _, err := j.c.service.waitForQuery(ctx, j.projectID, j.jobID); err != nil { return nil, err } // Note: extra RPC even if you just want to wait for the query to finish. js, err := j.Status(ctx) if err != nil { return nil, err } return js, nil } // Non-query jobs must poll. var js *JobStatus err := internal.Retry(ctx, gax.Backoff{}, func() (stop bool, err error) { js, err = j.Status(ctx) if err != nil { return true, err } if js.Done() { return true, nil } return false, nil }) if err != nil { return nil, err } return js, nil } // Read fetches the results of a query job. // If j is not a query job, Read returns an error. func (j *Job) Read(ctx context.Context) (*RowIterator, error) { if !j.isQuery { return nil, errors.New("bigquery: cannot read from a non-query job") } var projectID string if j.destinationTable != nil { projectID = j.destinationTable.ProjectId } else { projectID = j.c.projectID } schema, err := j.c.service.waitForQuery(ctx, projectID, j.jobID) if err != nil { return nil, err } // The destination table should only be nil if there was a query error. if j.destinationTable == nil { return nil, errors.New("bigquery: query job missing destination table") } return newRowIterator(ctx, j.c.service, &readTableConf{ projectID: j.destinationTable.ProjectId, datasetID: j.destinationTable.DatasetId, tableID: j.destinationTable.TableId, schema: schema, }), nil } // JobStatistics contains statistics about a job. type JobStatistics struct { CreationTime time.Time StartTime time.Time EndTime time.Time TotalBytesProcessed int64 Details Statistics } // Statistics is one of ExtractStatistics, LoadStatistics or QueryStatistics. type Statistics interface { implementsStatistics() } // ExtractStatistics contains statistics about an extract job. type ExtractStatistics struct { // The number of files per destination URI or URI pattern specified in the // extract configuration. These values will be in the same order as the // URIs specified in the 'destinationUris' field. DestinationURIFileCounts []int64 } // LoadStatistics contains statistics about a load job. type LoadStatistics struct { // The number of bytes of source data in a load job. InputFileBytes int64 // The number of source files in a load job. InputFiles int64 // Size of the loaded data in bytes. Note that while a load job is in the // running state, this value may change. OutputBytes int64 // The number of rows imported in a load job. Note that while an import job is // in the running state, this value may change. OutputRows int64 } // QueryStatistics contains statistics about a query job. type QueryStatistics struct { // Billing tier for the job. BillingTier int64 // Whether the query result was fetched from the query cache. CacheHit bool // The type of query statement, if valid. StatementType string // Total bytes billed for the job. TotalBytesBilled int64 // Total bytes processed for the job. TotalBytesProcessed int64 // Describes execution plan for the query. QueryPlan []*ExplainQueryStage // The number of rows affected by a DML statement. Present only for DML // statements INSERT, UPDATE or DELETE. NumDMLAffectedRows int64 // ReferencedTables: [Output-only, Experimental] Referenced tables for // the job. Queries that reference more than 50 tables will not have a // complete list. ReferencedTables []*Table // The schema of the results. Present only for successful dry run of // non-legacy SQL queries. Schema Schema // Standard SQL: list of undeclared query parameter names detected during a // dry run validation. UndeclaredQueryParameterNames []string } // ExplainQueryStage describes one stage of a query. type ExplainQueryStage struct { // Relative amount of the total time the average shard spent on CPU-bound tasks. ComputeRatioAvg float64 // Relative amount of the total time the slowest shard spent on CPU-bound tasks. ComputeRatioMax float64 // Unique ID for stage within plan. ID int64 // Human-readable name for stage. Name string // Relative amount of the total time the average shard spent reading input. ReadRatioAvg float64 // Relative amount of the total time the slowest shard spent reading input. ReadRatioMax float64 // Number of records read into the stage. RecordsRead int64 // Number of records written by the stage. RecordsWritten int64 // Current status for the stage. Status string // List of operations within the stage in dependency order (approximately // chronological). Steps []*ExplainQueryStep // Relative amount of the total time the average shard spent waiting to be scheduled. WaitRatioAvg float64 // Relative amount of the total time the slowest shard spent waiting to be scheduled. WaitRatioMax float64 // Relative amount of the total time the average shard spent on writing output. WriteRatioAvg float64 // Relative amount of the total time the slowest shard spent on writing output. WriteRatioMax float64 } // ExplainQueryStep describes one step of a query stage. type ExplainQueryStep struct { // Machine-readable operation type. Kind string // Human-readable stage descriptions. Substeps []string } func (*ExtractStatistics) implementsStatistics() {} func (*LoadStatistics) implementsStatistics() {} func (*QueryStatistics) implementsStatistics() {} golang-google-cloud-0.9.0/bigquery/load.go000066400000000000000000000051271312234511600204450ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package bigquery import ( "golang.org/x/net/context" bq "google.golang.org/api/bigquery/v2" ) // LoadConfig holds the configuration for a load job. type LoadConfig struct { // JobID is the ID to use for the load job. If unset, a job ID will be automatically created. JobID string // Src is the source from which data will be loaded. Src LoadSource // Dst is the table into which the data will be loaded. Dst *Table // CreateDisposition specifies the circumstances under which the destination table will be created. // The default is CreateIfNeeded. CreateDisposition TableCreateDisposition // WriteDisposition specifies how existing data in the destination table is treated. // The default is WriteAppend. WriteDisposition TableWriteDisposition } // A Loader loads data from Google Cloud Storage into a BigQuery table. type Loader struct { LoadConfig c *Client } // A LoadSource represents a source of data that can be loaded into // a BigQuery table. // // This package defines two LoadSources: GCSReference, for Google Cloud Storage // objects, and ReaderSource, for data read from an io.Reader. type LoadSource interface { populateInsertJobConfForLoad(conf *insertJobConf) } // LoaderFrom returns a Loader which can be used to load data into a BigQuery table. // The returned Loader may optionally be further configured before its Run method is called. func (t *Table) LoaderFrom(src LoadSource) *Loader { return &Loader{ c: t.c, LoadConfig: LoadConfig{ Src: src, Dst: t, }, } } // Run initiates a load job. func (l *Loader) Run(ctx context.Context) (*Job, error) { job := &bq.Job{ Configuration: &bq.JobConfiguration{ Load: &bq.JobConfigurationLoad{ CreateDisposition: string(l.CreateDisposition), WriteDisposition: string(l.WriteDisposition), }, }, } conf := &insertJobConf{job: job} l.Src.populateInsertJobConfForLoad(conf) setJobRef(job, l.JobID, l.c.projectID) job.Configuration.Load.DestinationTable = l.Dst.tableRefProto() return l.c.insertJob(ctx, conf) } golang-google-cloud-0.9.0/bigquery/load_test.go000066400000000000000000000131761312234511600215070ustar00rootroot00000000000000// Copyright 2015 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package bigquery import ( "reflect" "strings" "testing" "golang.org/x/net/context" "cloud.google.com/go/internal/pretty" bq "google.golang.org/api/bigquery/v2" ) func defaultLoadJob() *bq.Job { return &bq.Job{ Configuration: &bq.JobConfiguration{ Load: &bq.JobConfigurationLoad{ DestinationTable: &bq.TableReference{ ProjectId: "project-id", DatasetId: "dataset-id", TableId: "table-id", }, SourceUris: []string{"uri"}, }, }, } } func stringFieldSchema() *FieldSchema { return &FieldSchema{Name: "fieldname", Type: StringFieldType} } func nestedFieldSchema() *FieldSchema { return &FieldSchema{ Name: "nested", Type: RecordFieldType, Schema: Schema{stringFieldSchema()}, } } func bqStringFieldSchema() *bq.TableFieldSchema { return &bq.TableFieldSchema{ Name: "fieldname", Type: "STRING", } } func bqNestedFieldSchema() *bq.TableFieldSchema { return &bq.TableFieldSchema{ Name: "nested", Type: "RECORD", Fields: []*bq.TableFieldSchema{bqStringFieldSchema()}, } } func TestLoad(t *testing.T) { c := &Client{projectID: "project-id"} testCases := []struct { dst *Table src LoadSource config LoadConfig want *bq.Job }{ { dst: c.Dataset("dataset-id").Table("table-id"), src: NewGCSReference("uri"), want: defaultLoadJob(), }, { dst: c.Dataset("dataset-id").Table("table-id"), config: LoadConfig{ CreateDisposition: CreateNever, WriteDisposition: WriteTruncate, JobID: "ajob", }, src: NewGCSReference("uri"), want: func() *bq.Job { j := defaultLoadJob() j.Configuration.Load.CreateDisposition = "CREATE_NEVER" j.Configuration.Load.WriteDisposition = "WRITE_TRUNCATE" j.JobReference = &bq.JobReference{ JobId: "ajob", ProjectId: "project-id", } return j }(), }, { dst: c.Dataset("dataset-id").Table("table-id"), src: func() *GCSReference { g := NewGCSReference("uri") g.MaxBadRecords = 1 g.AllowJaggedRows = true g.AllowQuotedNewlines = true g.IgnoreUnknownValues = true return g }(), want: func() *bq.Job { j := defaultLoadJob() j.Configuration.Load.MaxBadRecords = 1 j.Configuration.Load.AllowJaggedRows = true j.Configuration.Load.AllowQuotedNewlines = true j.Configuration.Load.IgnoreUnknownValues = true return j }(), }, { dst: c.Dataset("dataset-id").Table("table-id"), src: func() *GCSReference { g := NewGCSReference("uri") g.Schema = Schema{ stringFieldSchema(), nestedFieldSchema(), } return g }(), want: func() *bq.Job { j := defaultLoadJob() j.Configuration.Load.Schema = &bq.TableSchema{ Fields: []*bq.TableFieldSchema{ bqStringFieldSchema(), bqNestedFieldSchema(), }} return j }(), }, { dst: c.Dataset("dataset-id").Table("table-id"), src: func() *GCSReference { g := NewGCSReference("uri") g.SkipLeadingRows = 1 g.SourceFormat = JSON g.Encoding = UTF_8 g.FieldDelimiter = "\t" g.Quote = "-" return g }(), want: func() *bq.Job { j := defaultLoadJob() j.Configuration.Load.SkipLeadingRows = 1 j.Configuration.Load.SourceFormat = "NEWLINE_DELIMITED_JSON" j.Configuration.Load.Encoding = "UTF-8" j.Configuration.Load.FieldDelimiter = "\t" hyphen := "-" j.Configuration.Load.Quote = &hyphen return j }(), }, { dst: c.Dataset("dataset-id").Table("table-id"), src: NewGCSReference("uri"), want: func() *bq.Job { j := defaultLoadJob() // Quote is left unset in GCSReference, so should be nil here. j.Configuration.Load.Quote = nil return j }(), }, { dst: c.Dataset("dataset-id").Table("table-id"), src: func() *GCSReference { g := NewGCSReference("uri") g.ForceZeroQuote = true return g }(), want: func() *bq.Job { j := defaultLoadJob() empty := "" j.Configuration.Load.Quote = &empty return j }(), }, { dst: c.Dataset("dataset-id").Table("table-id"), src: func() *ReaderSource { r := NewReaderSource(strings.NewReader("foo")) r.SkipLeadingRows = 1 r.SourceFormat = JSON r.Encoding = UTF_8 r.FieldDelimiter = "\t" r.Quote = "-" return r }(), want: func() *bq.Job { j := defaultLoadJob() j.Configuration.Load.SourceUris = nil j.Configuration.Load.SkipLeadingRows = 1 j.Configuration.Load.SourceFormat = "NEWLINE_DELIMITED_JSON" j.Configuration.Load.Encoding = "UTF-8" j.Configuration.Load.FieldDelimiter = "\t" hyphen := "-" j.Configuration.Load.Quote = &hyphen return j }(), }, } for i, tc := range testCases { s := &testService{} c.service = s loader := tc.dst.LoaderFrom(tc.src) tc.config.Src = tc.src tc.config.Dst = tc.dst loader.LoadConfig = tc.config if _, err := loader.Run(context.Background()); err != nil { t.Errorf("%d: err calling Loader.Run: %v", i, err) continue } if !reflect.DeepEqual(s.Job, tc.want) { t.Errorf("loading %d: got:\n%v\nwant:\n%v", i, pretty.Value(s.Job), pretty.Value(tc.want)) } } } golang-google-cloud-0.9.0/bigquery/params.go000066400000000000000000000161421312234511600210100ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package bigquery import ( "encoding/base64" "errors" "fmt" "reflect" "regexp" "time" "cloud.google.com/go/civil" "cloud.google.com/go/internal/fields" bq "google.golang.org/api/bigquery/v2" ) var ( // See https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#timestamp-type. timestampFormat = "2006-01-02 15:04:05.999999-07:00" // See https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#schema.fields.name validFieldName = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]{0,127}$") ) func bqTagParser(t reflect.StructTag) (name string, keep bool, other interface{}, err error) { if s := t.Get("bigquery"); s != "" { if s == "-" { return "", false, nil, nil } if !validFieldName.MatchString(s) { return "", false, nil, errInvalidFieldName } return s, true, nil, nil } return "", true, nil, nil } var fieldCache = fields.NewCache(bqTagParser, nil, nil) var ( int64ParamType = &bq.QueryParameterType{Type: "INT64"} float64ParamType = &bq.QueryParameterType{Type: "FLOAT64"} boolParamType = &bq.QueryParameterType{Type: "BOOL"} stringParamType = &bq.QueryParameterType{Type: "STRING"} bytesParamType = &bq.QueryParameterType{Type: "BYTES"} dateParamType = &bq.QueryParameterType{Type: "DATE"} timeParamType = &bq.QueryParameterType{Type: "TIME"} dateTimeParamType = &bq.QueryParameterType{Type: "DATETIME"} timestampParamType = &bq.QueryParameterType{Type: "TIMESTAMP"} ) var ( typeOfDate = reflect.TypeOf(civil.Date{}) typeOfTime = reflect.TypeOf(civil.Time{}) typeOfDateTime = reflect.TypeOf(civil.DateTime{}) typeOfGoTime = reflect.TypeOf(time.Time{}) ) // A QueryParameter is a parameter to a query. type QueryParameter struct { // Name is used for named parameter mode. // It must match the name in the query case-insensitively. Name string // Value is the value of the parameter. // The following Go types are supported, with their corresponding // Bigquery types: // int, int8, int16, int32, int64, uint8, uint16, uint32: INT64 // Note that uint, uint64 and uintptr are not supported, because // they may contain values that cannot fit into a 64-bit signed integer. // float32, float64: FLOAT64 // bool: BOOL // string: STRING // []byte: BYTES // time.Time: TIMESTAMP // Arrays and slices of the above. // Structs of the above. Only the exported fields are used. Value interface{} } func (p QueryParameter) toRaw() (*bq.QueryParameter, error) { pv, err := paramValue(reflect.ValueOf(p.Value)) if err != nil { return nil, err } pt, err := paramType(reflect.TypeOf(p.Value)) if err != nil { return nil, err } return &bq.QueryParameter{ Name: p.Name, ParameterValue: &pv, ParameterType: pt, }, nil } func paramType(t reflect.Type) (*bq.QueryParameterType, error) { if t == nil { return nil, errors.New("bigquery: nil parameter") } switch t { case typeOfDate: return dateParamType, nil case typeOfTime: return timeParamType, nil case typeOfDateTime: return dateTimeParamType, nil case typeOfGoTime: return timestampParamType, nil } switch t.Kind() { case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint8, reflect.Uint16, reflect.Uint32: return int64ParamType, nil case reflect.Float32, reflect.Float64: return float64ParamType, nil case reflect.Bool: return boolParamType, nil case reflect.String: return stringParamType, nil case reflect.Slice: if t.Elem().Kind() == reflect.Uint8 { return bytesParamType, nil } fallthrough case reflect.Array: et, err := paramType(t.Elem()) if err != nil { return nil, err } return &bq.QueryParameterType{Type: "ARRAY", ArrayType: et}, nil case reflect.Ptr: if t.Elem().Kind() != reflect.Struct { break } t = t.Elem() fallthrough case reflect.Struct: var fts []*bq.QueryParameterTypeStructTypes fields, err := fieldCache.Fields(t) if err != nil { return nil, err } for _, f := range fields { pt, err := paramType(f.Type) if err != nil { return nil, err } fts = append(fts, &bq.QueryParameterTypeStructTypes{ Name: f.Name, Type: pt, }) } return &bq.QueryParameterType{Type: "STRUCT", StructTypes: fts}, nil } return nil, fmt.Errorf("bigquery: Go type %s cannot be represented as a parameter type", t) } func paramValue(v reflect.Value) (bq.QueryParameterValue, error) { var res bq.QueryParameterValue if !v.IsValid() { return res, errors.New("bigquery: nil parameter") } t := v.Type() switch t { case typeOfDate: res.Value = v.Interface().(civil.Date).String() return res, nil case typeOfTime: // civil.Time has nanosecond resolution, but BigQuery TIME only microsecond. res.Value = civilTimeParamString(v.Interface().(civil.Time)) return res, nil case typeOfDateTime: dt := v.Interface().(civil.DateTime) res.Value = dt.Date.String() + " " + civilTimeParamString(dt.Time) return res, nil case typeOfGoTime: res.Value = v.Interface().(time.Time).Format(timestampFormat) return res, nil } switch t.Kind() { case reflect.Slice: if t.Elem().Kind() == reflect.Uint8 { res.Value = base64.StdEncoding.EncodeToString(v.Interface().([]byte)) return res, nil } fallthrough case reflect.Array: var vals []*bq.QueryParameterValue for i := 0; i < v.Len(); i++ { val, err := paramValue(v.Index(i)) if err != nil { return bq.QueryParameterValue{}, err } vals = append(vals, &val) } return bq.QueryParameterValue{ArrayValues: vals}, nil case reflect.Ptr: if t.Elem().Kind() != reflect.Struct { return res, fmt.Errorf("bigquery: Go type %s cannot be represented as a parameter value", t) } t = t.Elem() v = v.Elem() if !v.IsValid() { // nil pointer becomes empty value return res, nil } fallthrough case reflect.Struct: fields, err := fieldCache.Fields(t) if err != nil { return bq.QueryParameterValue{}, err } res.StructValues = map[string]bq.QueryParameterValue{} for _, f := range fields { fv := v.FieldByIndex(f.Index) fp, err := paramValue(fv) if err != nil { return bq.QueryParameterValue{}, err } res.StructValues[f.Name] = fp } return res, nil } // None of the above: assume a scalar type. (If it's not a valid type, // paramType will catch the error.) res.Value = fmt.Sprint(v.Interface()) return res, nil } func civilTimeParamString(t civil.Time) string { if t.Nanosecond == 0 { return t.String() } else { micro := (t.Nanosecond + 500) / 1000 // round to nearest microsecond t.Nanosecond = 0 return t.String() + fmt.Sprintf(".%06d", micro) } } golang-google-cloud-0.9.0/bigquery/params_test.go000066400000000000000000000145461312234511600220550ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package bigquery import ( "errors" "math" "reflect" "testing" "time" "cloud.google.com/go/civil" "golang.org/x/net/context" bq "google.golang.org/api/bigquery/v2" ) var scalarTests = []struct { val interface{} want string }{ {int64(0), "0"}, {3.14, "3.14"}, {3.14159e-87, "3.14159e-87"}, {true, "true"}, {"string", "string"}, {"\u65e5\u672c\u8a9e\n", "\u65e5\u672c\u8a9e\n"}, {math.NaN(), "NaN"}, {[]byte("foo"), "Zm9v"}, // base64 encoding of "foo" {time.Date(2016, 3, 20, 4, 22, 9, 5000, time.FixedZone("neg1-2", -3720)), "2016-03-20 04:22:09.000005-01:02"}, {civil.Date{2016, 3, 20}, "2016-03-20"}, {civil.Time{4, 5, 6, 789000000}, "04:05:06.789000"}, {civil.DateTime{civil.Date{2016, 3, 20}, civil.Time{4, 5, 6, 789000000}}, "2016-03-20 04:05:06.789000"}, } type S1 struct { A int B *S2 C bool } type S2 struct { D string e int } var s1 = S1{ A: 1, B: &S2{D: "s"}, C: true, } func sval(s string) bq.QueryParameterValue { return bq.QueryParameterValue{Value: s} } func TestParamValueScalar(t *testing.T) { for _, test := range scalarTests { got, err := paramValue(reflect.ValueOf(test.val)) if err != nil { t.Errorf("%v: got %v, want nil", test.val, err) continue } want := sval(test.want) if !reflect.DeepEqual(got, want) { t.Errorf("%v:\ngot %+v\nwant %+v", test.val, got, want) } } } func TestParamValueArray(t *testing.T) { qpv := bq.QueryParameterValue{ArrayValues: []*bq.QueryParameterValue{ {Value: "1"}, {Value: "2"}, }, } for _, test := range []struct { val interface{} want bq.QueryParameterValue }{ {[]int(nil), bq.QueryParameterValue{}}, {[]int{}, bq.QueryParameterValue{}}, {[]int{1, 2}, qpv}, {[2]int{1, 2}, qpv}, } { got, err := paramValue(reflect.ValueOf(test.val)) if err != nil { t.Fatal(err) } if !reflect.DeepEqual(got, test.want) { t.Errorf("%#v:\ngot %+v\nwant %+v", test.val, got, test.want) } } } func TestParamValueStruct(t *testing.T) { got, err := paramValue(reflect.ValueOf(s1)) if err != nil { t.Fatal(err) } want := bq.QueryParameterValue{ StructValues: map[string]bq.QueryParameterValue{ "A": sval("1"), "B": bq.QueryParameterValue{ StructValues: map[string]bq.QueryParameterValue{ "D": sval("s"), }, }, "C": sval("true"), }, } if !reflect.DeepEqual(got, want) { t.Errorf("got %+v\nwant %+v", got, want) } } func TestParamValueErrors(t *testing.T) { // paramValue lets a few invalid types through, but paramType catches them. // Since we never call one without the other that's fine. for _, val := range []interface{}{nil, new([]int)} { _, err := paramValue(reflect.ValueOf(val)) if err == nil { t.Errorf("%v (%T): got nil, want error", val, val) } } } func TestParamType(t *testing.T) { for _, test := range []struct { val interface{} want *bq.QueryParameterType }{ {0, int64ParamType}, {uint32(32767), int64ParamType}, {3.14, float64ParamType}, {float32(3.14), float64ParamType}, {math.NaN(), float64ParamType}, {true, boolParamType}, {"", stringParamType}, {"string", stringParamType}, {time.Now(), timestampParamType}, {[]byte("foo"), bytesParamType}, {[]int{}, &bq.QueryParameterType{Type: "ARRAY", ArrayType: int64ParamType}}, {[3]bool{}, &bq.QueryParameterType{Type: "ARRAY", ArrayType: boolParamType}}, {S1{}, &bq.QueryParameterType{ Type: "STRUCT", StructTypes: []*bq.QueryParameterTypeStructTypes{ {Name: "A", Type: int64ParamType}, {Name: "B", Type: &bq.QueryParameterType{ Type: "STRUCT", StructTypes: []*bq.QueryParameterTypeStructTypes{ {Name: "D", Type: stringParamType}, }, }}, {Name: "C", Type: boolParamType}, }, }}, } { got, err := paramType(reflect.TypeOf(test.val)) if err != nil { t.Fatal(err) } if !reflect.DeepEqual(got, test.want) { t.Errorf("%v (%T): got %v, want %v", test.val, test.val, got, test.want) } } } func TestParamTypeErrors(t *testing.T) { for _, val := range []interface{}{ nil, uint(0), new([]int), make(chan int), } { _, err := paramType(reflect.TypeOf(val)) if err == nil { t.Errorf("%v (%T): got nil, want error", val, val) } } } func TestIntegration_ScalarParam(t *testing.T) { c := getClient(t) for _, test := range scalarTests { got, err := paramRoundTrip(c, test.val) if err != nil { t.Fatal(err) } if !equal(got, test.val) { t.Errorf("\ngot %#v (%T)\nwant %#v (%T)", got, got, test.val, test.val) } } } func TestIntegration_OtherParam(t *testing.T) { c := getClient(t) for _, test := range []struct { val interface{} want interface{} }{ {[]int(nil), []Value(nil)}, {[]int{}, []Value(nil)}, {[]int{1, 2}, []Value{int64(1), int64(2)}}, {[3]int{1, 2, 3}, []Value{int64(1), int64(2), int64(3)}}, {S1{}, []Value{int64(0), nil, false}}, {s1, []Value{int64(1), []Value{"s"}, true}}, } { got, err := paramRoundTrip(c, test.val) if err != nil { t.Fatal(err) } if !equal(got, test.want) { t.Errorf("\ngot %#v (%T)\nwant %#v (%T)", got, got, test.want, test.want) } } } func paramRoundTrip(c *Client, x interface{}) (Value, error) { q := c.Query("select ?") q.Parameters = []QueryParameter{{Value: x}} it, err := q.Read(context.Background()) if err != nil { return nil, err } var val []Value err = it.Next(&val) if err != nil { return nil, err } if len(val) != 1 { return nil, errors.New("wrong number of values") } return val[0], nil } func equal(x1, x2 interface{}) bool { if reflect.TypeOf(x1) != reflect.TypeOf(x2) { return false } switch x1 := x1.(type) { case float64: if math.IsNaN(x1) { return math.IsNaN(x2.(float64)) } return x1 == x2 case time.Time: // BigQuery is only accurate to the microsecond. return x1.Round(time.Microsecond).Equal(x2.(time.Time).Round(time.Microsecond)) default: return reflect.DeepEqual(x1, x2) } } golang-google-cloud-0.9.0/bigquery/query.go000066400000000000000000000156651312234511600207030ustar00rootroot00000000000000// Copyright 2015 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package bigquery import ( "golang.org/x/net/context" bq "google.golang.org/api/bigquery/v2" ) // QueryConfig holds the configuration for a query job. type QueryConfig struct { // JobID is the ID to use for the query job. If this field is empty, a job ID // will be automatically created. JobID string // Dst is the table into which the results of the query will be written. // If this field is nil, a temporary table will be created. Dst *Table // The query to execute. See https://cloud.google.com/bigquery/query-reference for details. Q string // DefaultProjectID and DefaultDatasetID specify the dataset to use for unqualified table names in the query. // If DefaultProjectID is set, DefaultDatasetID must also be set. DefaultProjectID string DefaultDatasetID string // TableDefinitions describes data sources outside of BigQuery. // The map keys may be used as table names in the query string. TableDefinitions map[string]ExternalData // CreateDisposition specifies the circumstances under which the destination table will be created. // The default is CreateIfNeeded. CreateDisposition TableCreateDisposition // WriteDisposition specifies how existing data in the destination table is treated. // The default is WriteEmpty. WriteDisposition TableWriteDisposition // DisableQueryCache prevents results being fetched from the query cache. // If this field is false, results are fetched from the cache if they are available. // The query cache is a best-effort cache that is flushed whenever tables in the query are modified. // Cached results are only available when TableID is unspecified in the query's destination Table. // For more information, see https://cloud.google.com/bigquery/querying-data#querycaching DisableQueryCache bool // DisableFlattenedResults prevents results being flattened. // If this field is false, results from nested and repeated fields are flattened. // DisableFlattenedResults implies AllowLargeResults // For more information, see https://cloud.google.com/bigquery/docs/data#nested DisableFlattenedResults bool // AllowLargeResults allows the query to produce arbitrarily large result tables. // The destination must be a table. // When using this option, queries will take longer to execute, even if the result set is small. // For additional limitations, see https://cloud.google.com/bigquery/querying-data#largequeryresults AllowLargeResults bool // Priority specifies the priority with which to schedule the query. // The default priority is InteractivePriority. // For more information, see https://cloud.google.com/bigquery/querying-data#batchqueries Priority QueryPriority // MaxBillingTier sets the maximum billing tier for a Query. // Queries that have resource usage beyond this tier will fail (without // incurring a charge). If this field is zero, the project default will be used. MaxBillingTier int // MaxBytesBilled limits the number of bytes billed for // this job. Queries that would exceed this limit will fail (without incurring // a charge). // If this field is less than 1, the project default will be // used. MaxBytesBilled int64 // UseStandardSQL causes the query to use standard SQL. // The default is false (using legacy SQL). UseStandardSQL bool // Parameters is a list of query parameters. The presence of parameters // implies the use of standard SQL. // If the query uses positional syntax ("?"), then no parameter may have a name. // If the query uses named syntax ("@p"), then all parameters must have names. // It is illegal to mix positional and named syntax. Parameters []QueryParameter } // QueryPriority specifies a priority with which a query is to be executed. type QueryPriority string const ( BatchPriority QueryPriority = "BATCH" InteractivePriority QueryPriority = "INTERACTIVE" ) // A Query queries data from a BigQuery table. Use Client.Query to create a Query. type Query struct { client *Client QueryConfig } // Query creates a query with string q. // The returned Query may optionally be further configured before its Run method is called. func (c *Client) Query(q string) *Query { return &Query{ client: c, QueryConfig: QueryConfig{Q: q}, } } // Run initiates a query job. func (q *Query) Run(ctx context.Context) (*Job, error) { job := &bq.Job{ Configuration: &bq.JobConfiguration{ Query: &bq.JobConfigurationQuery{}, }, } setJobRef(job, q.JobID, q.client.projectID) if err := q.QueryConfig.populateJobQueryConfig(job.Configuration.Query); err != nil { return nil, err } j, err := q.client.insertJob(ctx, &insertJobConf{job: job}) if err != nil { return nil, err } j.isQuery = true return j, nil } func (q *QueryConfig) populateJobQueryConfig(conf *bq.JobConfigurationQuery) error { conf.Query = q.Q if len(q.TableDefinitions) > 0 { conf.TableDefinitions = make(map[string]bq.ExternalDataConfiguration) } for name, data := range q.TableDefinitions { conf.TableDefinitions[name] = data.externalDataConfig() } if q.DefaultProjectID != "" || q.DefaultDatasetID != "" { conf.DefaultDataset = &bq.DatasetReference{ DatasetId: q.DefaultDatasetID, ProjectId: q.DefaultProjectID, } } if tier := int64(q.MaxBillingTier); tier > 0 { conf.MaximumBillingTier = &tier } conf.CreateDisposition = string(q.CreateDisposition) conf.WriteDisposition = string(q.WriteDisposition) conf.AllowLargeResults = q.AllowLargeResults conf.Priority = string(q.Priority) f := false if q.DisableQueryCache { conf.UseQueryCache = &f } if q.DisableFlattenedResults { conf.FlattenResults = &f // DisableFlattenResults implies AllowLargeResults. conf.AllowLargeResults = true } if q.MaxBytesBilled >= 1 { conf.MaximumBytesBilled = q.MaxBytesBilled } if q.UseStandardSQL || len(q.Parameters) > 0 { conf.UseLegacySql = false conf.ForceSendFields = append(conf.ForceSendFields, "UseLegacySql") } if q.Dst != nil && !q.Dst.implicitTable() { conf.DestinationTable = q.Dst.tableRefProto() } for _, p := range q.Parameters { qp, err := p.toRaw() if err != nil { return err } conf.QueryParameters = append(conf.QueryParameters, qp) } return nil } // Read submits a query for execution and returns the results via a RowIterator. // It is a shorthand for Query.Run followed by Job.Read. func (q *Query) Read(ctx context.Context) (*RowIterator, error) { job, err := q.Run(ctx) if err != nil { return nil, err } return job.Read(ctx) } golang-google-cloud-0.9.0/bigquery/query_test.go000066400000000000000000000172271312234511600217360ustar00rootroot00000000000000// Copyright 2015 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package bigquery import ( "reflect" "testing" "golang.org/x/net/context" bq "google.golang.org/api/bigquery/v2" ) func defaultQueryJob() *bq.Job { return &bq.Job{ Configuration: &bq.JobConfiguration{ Query: &bq.JobConfigurationQuery{ DestinationTable: &bq.TableReference{ ProjectId: "project-id", DatasetId: "dataset-id", TableId: "table-id", }, Query: "query string", DefaultDataset: &bq.DatasetReference{ ProjectId: "def-project-id", DatasetId: "def-dataset-id", }, }, }, } } func TestQuery(t *testing.T) { c := &Client{ projectID: "project-id", } testCases := []struct { dst *Table src *QueryConfig want *bq.Job }{ { dst: c.Dataset("dataset-id").Table("table-id"), src: defaultQuery, want: defaultQueryJob(), }, { dst: c.Dataset("dataset-id").Table("table-id"), src: &QueryConfig{ Q: "query string", }, want: func() *bq.Job { j := defaultQueryJob() j.Configuration.Query.DefaultDataset = nil return j }(), }, { dst: &Table{}, src: defaultQuery, want: func() *bq.Job { j := defaultQueryJob() j.Configuration.Query.DestinationTable = nil return j }(), }, { dst: c.Dataset("dataset-id").Table("table-id"), src: &QueryConfig{ Q: "query string", TableDefinitions: map[string]ExternalData{ "atable": func() *GCSReference { g := NewGCSReference("uri") g.AllowJaggedRows = true g.AllowQuotedNewlines = true g.Compression = Gzip g.Encoding = UTF_8 g.FieldDelimiter = ";" g.IgnoreUnknownValues = true g.MaxBadRecords = 1 g.Quote = "'" g.SkipLeadingRows = 2 g.Schema = Schema([]*FieldSchema{ {Name: "name", Type: StringFieldType}, }) return g }(), }, }, want: func() *bq.Job { j := defaultQueryJob() j.Configuration.Query.DefaultDataset = nil td := make(map[string]bq.ExternalDataConfiguration) quote := "'" td["atable"] = bq.ExternalDataConfiguration{ Compression: "GZIP", IgnoreUnknownValues: true, MaxBadRecords: 1, SourceFormat: "CSV", // must be explicitly set. SourceUris: []string{"uri"}, CsvOptions: &bq.CsvOptions{ AllowJaggedRows: true, AllowQuotedNewlines: true, Encoding: "UTF-8", FieldDelimiter: ";", SkipLeadingRows: 2, Quote: "e, }, Schema: &bq.TableSchema{ Fields: []*bq.TableFieldSchema{ {Name: "name", Type: "STRING"}, }, }, } j.Configuration.Query.TableDefinitions = td return j }(), }, { dst: &Table{ ProjectID: "project-id", DatasetID: "dataset-id", TableID: "table-id", }, src: &QueryConfig{ Q: "query string", DefaultProjectID: "def-project-id", DefaultDatasetID: "def-dataset-id", CreateDisposition: CreateNever, WriteDisposition: WriteTruncate, }, want: func() *bq.Job { j := defaultQueryJob() j.Configuration.Query.WriteDisposition = "WRITE_TRUNCATE" j.Configuration.Query.CreateDisposition = "CREATE_NEVER" return j }(), }, { dst: c.Dataset("dataset-id").Table("table-id"), src: &QueryConfig{ Q: "query string", DefaultProjectID: "def-project-id", DefaultDatasetID: "def-dataset-id", DisableQueryCache: true, }, want: func() *bq.Job { j := defaultQueryJob() f := false j.Configuration.Query.UseQueryCache = &f return j }(), }, { dst: c.Dataset("dataset-id").Table("table-id"), src: &QueryConfig{ Q: "query string", DefaultProjectID: "def-project-id", DefaultDatasetID: "def-dataset-id", AllowLargeResults: true, }, want: func() *bq.Job { j := defaultQueryJob() j.Configuration.Query.AllowLargeResults = true return j }(), }, { dst: c.Dataset("dataset-id").Table("table-id"), src: &QueryConfig{ Q: "query string", DefaultProjectID: "def-project-id", DefaultDatasetID: "def-dataset-id", DisableFlattenedResults: true, }, want: func() *bq.Job { j := defaultQueryJob() f := false j.Configuration.Query.FlattenResults = &f j.Configuration.Query.AllowLargeResults = true return j }(), }, { dst: c.Dataset("dataset-id").Table("table-id"), src: &QueryConfig{ Q: "query string", DefaultProjectID: "def-project-id", DefaultDatasetID: "def-dataset-id", Priority: QueryPriority("low"), }, want: func() *bq.Job { j := defaultQueryJob() j.Configuration.Query.Priority = "low" return j }(), }, { dst: c.Dataset("dataset-id").Table("table-id"), src: &QueryConfig{ Q: "query string", DefaultProjectID: "def-project-id", DefaultDatasetID: "def-dataset-id", MaxBillingTier: 3, MaxBytesBilled: 5, }, want: func() *bq.Job { j := defaultQueryJob() tier := int64(3) j.Configuration.Query.MaximumBillingTier = &tier j.Configuration.Query.MaximumBytesBilled = 5 return j }(), }, { dst: c.Dataset("dataset-id").Table("table-id"), src: &QueryConfig{ Q: "query string", DefaultProjectID: "def-project-id", DefaultDatasetID: "def-dataset-id", MaxBytesBilled: -1, }, want: defaultQueryJob(), }, { dst: c.Dataset("dataset-id").Table("table-id"), src: &QueryConfig{ Q: "query string", DefaultProjectID: "def-project-id", DefaultDatasetID: "def-dataset-id", UseStandardSQL: true, }, want: func() *bq.Job { j := defaultQueryJob() j.Configuration.Query.UseLegacySql = false j.Configuration.Query.ForceSendFields = []string{"UseLegacySql"} return j }(), }, } for _, tc := range testCases { s := &testService{} c.service = s query := c.Query("") query.QueryConfig = *tc.src query.Dst = tc.dst if _, err := query.Run(context.Background()); err != nil { t.Errorf("err calling query: %v", err) continue } if !reflect.DeepEqual(s.Job, tc.want) { t.Errorf("querying: got:\n%v\nwant:\n%v", s.Job, tc.want) } } } func TestConfiguringQuery(t *testing.T) { s := &testService{} c := &Client{ projectID: "project-id", service: s, } query := c.Query("q") query.JobID = "ajob" query.DefaultProjectID = "def-project-id" query.DefaultDatasetID = "def-dataset-id" // Note: Other configuration fields are tested in other tests above. // A lot of that can be consolidated once Client.Copy is gone. want := &bq.Job{ Configuration: &bq.JobConfiguration{ Query: &bq.JobConfigurationQuery{ Query: "q", DefaultDataset: &bq.DatasetReference{ ProjectId: "def-project-id", DatasetId: "def-dataset-id", }, }, }, JobReference: &bq.JobReference{ JobId: "ajob", ProjectId: "project-id", }, } if _, err := query.Run(context.Background()); err != nil { t.Fatalf("err calling Query.Run: %v", err) } if !reflect.DeepEqual(s.Job, want) { t.Errorf("querying: got:\n%v\nwant:\n%v", s.Job, want) } } golang-google-cloud-0.9.0/bigquery/read_test.go000066400000000000000000000151561312234511600215030ustar00rootroot00000000000000// Copyright 2015 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package bigquery import ( "errors" "reflect" "testing" "golang.org/x/net/context" bq "google.golang.org/api/bigquery/v2" "google.golang.org/api/iterator" ) type readTabledataArgs struct { conf *readTableConf tok string } // readServiceStub services read requests by returning data from an in-memory list of values. type readServiceStub struct { // values and pageTokens are used as sources of data to return in response to calls to readTabledata or readQuery. values [][][]Value // contains pages / rows / columns. pageTokens map[string]string // maps incoming page token to returned page token. // arguments are recorded for later inspection. readTabledataCalls []readTabledataArgs service } func (s *readServiceStub) readValues(tok string) *readDataResult { result := &readDataResult{ pageToken: s.pageTokens[tok], rows: s.values[0], } s.values = s.values[1:] return result } func (s *readServiceStub) waitForQuery(ctx context.Context, projectID, jobID string) (Schema, error) { return nil, nil } func (s *readServiceStub) readTabledata(ctx context.Context, conf *readTableConf, token string) (*readDataResult, error) { s.readTabledataCalls = append(s.readTabledataCalls, readTabledataArgs{conf, token}) return s.readValues(token), nil } func TestRead(t *testing.T) { // The data for the service stub to return is populated for each test case in the testCases for loop. ctx := context.Background() service := &readServiceStub{} c := &Client{ projectID: "project-id", service: service, } queryJob := &Job{ projectID: "project-id", jobID: "job-id", c: c, isQuery: true, destinationTable: &bq.TableReference{ ProjectId: "project-id", DatasetId: "dataset-id", TableId: "table-id", }, } for _, readFunc := range []func() *RowIterator{ func() *RowIterator { return c.Dataset("dataset-id").Table("table-id").Read(ctx) }, func() *RowIterator { it, err := queryJob.Read(ctx) if err != nil { t.Fatal(err) } return it }, } { testCases := []struct { data [][][]Value pageTokens map[string]string want [][]Value }{ { data: [][][]Value{{{1, 2}, {11, 12}}, {{30, 40}, {31, 41}}}, pageTokens: map[string]string{"": "a", "a": ""}, want: [][]Value{{1, 2}, {11, 12}, {30, 40}, {31, 41}}, }, { data: [][][]Value{{{1, 2}, {11, 12}}, {{30, 40}, {31, 41}}}, pageTokens: map[string]string{"": ""}, // no more pages after first one. want: [][]Value{{1, 2}, {11, 12}}, }, } for _, tc := range testCases { service.values = tc.data service.pageTokens = tc.pageTokens if got, ok := collectValues(t, readFunc()); ok { if !reflect.DeepEqual(got, tc.want) { t.Errorf("reading: got:\n%v\nwant:\n%v", got, tc.want) } } } } } func collectValues(t *testing.T, it *RowIterator) ([][]Value, bool) { var got [][]Value for { var vals []Value err := it.Next(&vals) if err == iterator.Done { break } if err != nil { t.Errorf("err calling Next: %v", err) return nil, false } got = append(got, vals) } return got, true } func TestNoMoreValues(t *testing.T) { c := &Client{ projectID: "project-id", service: &readServiceStub{ values: [][][]Value{{{1, 2}, {11, 12}}}, }, } it := c.Dataset("dataset-id").Table("table-id").Read(context.Background()) var vals []Value // We expect to retrieve two values and then fail on the next attempt. if err := it.Next(&vals); err != nil { t.Fatalf("Next: got: %v: want: nil", err) } if err := it.Next(&vals); err != nil { t.Fatalf("Next: got: %v: want: nil", err) } if err := it.Next(&vals); err != iterator.Done { t.Fatalf("Next: got: %v: want: iterator.Done", err) } } type errorReadService struct { service } var errBang = errors.New("bang!") func (s *errorReadService) readTabledata(ctx context.Context, conf *readTableConf, token string) (*readDataResult, error) { return nil, errBang } func TestReadError(t *testing.T) { // test that service read errors are propagated back to the caller. c := &Client{ projectID: "project-id", service: &errorReadService{}, } it := c.Dataset("dataset-id").Table("table-id").Read(context.Background()) var vals []Value if err := it.Next(&vals); err != errBang { t.Fatalf("Get: got: %v: want: %v", err, errBang) } } func TestReadTabledataOptions(t *testing.T) { // test that read options are propagated. s := &readServiceStub{ values: [][][]Value{{{1, 2}}}, } c := &Client{ projectID: "project-id", service: s, } it := c.Dataset("dataset-id").Table("table-id").Read(context.Background()) it.PageInfo().MaxSize = 5 var vals []Value if err := it.Next(&vals); err != nil { t.Fatal(err) } want := []readTabledataArgs{{ conf: &readTableConf{ projectID: "project-id", datasetID: "dataset-id", tableID: "table-id", paging: pagingConf{ recordsPerRequest: 5, setRecordsPerRequest: true, }, }, tok: "", }} if !reflect.DeepEqual(s.readTabledataCalls, want) { t.Errorf("reading: got:\n%v\nwant:\n%v", s.readTabledataCalls, want) } } func TestReadQueryOptions(t *testing.T) { // test that read options are propagated. s := &readServiceStub{ values: [][][]Value{{{1, 2}}}, } queryJob := &Job{ projectID: "project-id", jobID: "job-id", c: &Client{service: s}, isQuery: true, destinationTable: &bq.TableReference{ ProjectId: "project-id", DatasetId: "dataset-id", TableId: "table-id", }, } it, err := queryJob.Read(context.Background()) if err != nil { t.Fatalf("err calling Read: %v", err) } it.PageInfo().MaxSize = 5 var vals []Value if err := it.Next(&vals); err != nil { t.Fatalf("Next: got: %v: want: nil", err) } want := []readTabledataArgs{{ conf: &readTableConf{ projectID: "project-id", datasetID: "dataset-id", tableID: "table-id", paging: pagingConf{ recordsPerRequest: 5, setRecordsPerRequest: true, }, }, tok: "", }} if !reflect.DeepEqual(s.readTabledataCalls, want) { t.Errorf("reading: got:\n%v\nwant:\n%v", s.readTabledataCalls, want) } } golang-google-cloud-0.9.0/bigquery/schema.go000066400000000000000000000210111312234511600207540ustar00rootroot00000000000000// Copyright 2015 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package bigquery import ( "errors" "fmt" "reflect" "cloud.google.com/go/internal/atomiccache" bq "google.golang.org/api/bigquery/v2" ) // Schema describes the fields in a table or query result. type Schema []*FieldSchema type FieldSchema struct { // The field name. // Must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_), // and must start with a letter or underscore. // The maximum length is 128 characters. Name string // A description of the field. The maximum length is 16,384 characters. Description string // Whether the field may contain multiple values. Repeated bool // Whether the field is required. Ignored if Repeated is true. Required bool // The field data type. If Type is Record, then this field contains a nested schema, // which is described by Schema. Type FieldType // Describes the nested schema if Type is set to Record. Schema Schema } func (fs *FieldSchema) asTableFieldSchema() *bq.TableFieldSchema { tfs := &bq.TableFieldSchema{ Description: fs.Description, Name: fs.Name, Type: string(fs.Type), } if fs.Repeated { tfs.Mode = "REPEATED" } else if fs.Required { tfs.Mode = "REQUIRED" } // else leave as default, which is interpreted as NULLABLE. for _, f := range fs.Schema { tfs.Fields = append(tfs.Fields, f.asTableFieldSchema()) } return tfs } func (s Schema) asTableSchema() *bq.TableSchema { var fields []*bq.TableFieldSchema for _, f := range s { fields = append(fields, f.asTableFieldSchema()) } return &bq.TableSchema{Fields: fields} } // customizeCreateTable allows a Schema to be used directly as an option to CreateTable. func (s Schema) customizeCreateTable(conf *createTableConf) { conf.schema = s.asTableSchema() } func convertTableFieldSchema(tfs *bq.TableFieldSchema) *FieldSchema { fs := &FieldSchema{ Description: tfs.Description, Name: tfs.Name, Repeated: tfs.Mode == "REPEATED", Required: tfs.Mode == "REQUIRED", Type: FieldType(tfs.Type), } for _, f := range tfs.Fields { fs.Schema = append(fs.Schema, convertTableFieldSchema(f)) } return fs } func convertTableSchema(ts *bq.TableSchema) Schema { if ts == nil { return nil } var s Schema for _, f := range ts.Fields { s = append(s, convertTableFieldSchema(f)) } return s } type FieldType string const ( StringFieldType FieldType = "STRING" BytesFieldType FieldType = "BYTES" IntegerFieldType FieldType = "INTEGER" FloatFieldType FieldType = "FLOAT" BooleanFieldType FieldType = "BOOLEAN" TimestampFieldType FieldType = "TIMESTAMP" RecordFieldType FieldType = "RECORD" DateFieldType FieldType = "DATE" TimeFieldType FieldType = "TIME" DateTimeFieldType FieldType = "DATETIME" ) var ( errNoStruct = errors.New("bigquery: can only infer schema from struct or pointer to struct") errUnsupportedFieldType = errors.New("bigquery: unsupported type of field in struct") errInvalidFieldName = errors.New("bigquery: invalid name of field in struct") ) var typeOfByteSlice = reflect.TypeOf([]byte{}) // InferSchema tries to derive a BigQuery schema from the supplied struct value. // NOTE: All fields in the returned Schema are configured to be required, // unless the corresponding field in the supplied struct is a slice or array. // // It is considered an error if the struct (including nested structs) contains // any exported fields that are pointers or one of the following types: // uint, uint64, uintptr, map, interface, complex64, complex128, func, chan. // In these cases, an error will be returned. // Future versions may handle these cases without error. // // Recursively defined structs are also disallowed. func InferSchema(st interface{}) (Schema, error) { return inferSchemaReflectCached(reflect.TypeOf(st)) } var schemaCache atomiccache.Cache type cacheVal struct { schema Schema err error } func inferSchemaReflectCached(t reflect.Type) (Schema, error) { cv := schemaCache.Get(t, func() interface{} { s, err := inferSchemaReflect(t) return cacheVal{s, err} }).(cacheVal) return cv.schema, cv.err } func inferSchemaReflect(t reflect.Type) (Schema, error) { rec, err := hasRecursiveType(t, nil) if err != nil { return nil, err } if rec { return nil, fmt.Errorf("bigquery: schema inference for recursive type %s", t) } return inferStruct(t) } func inferStruct(t reflect.Type) (Schema, error) { switch t.Kind() { case reflect.Ptr: if t.Elem().Kind() != reflect.Struct { return nil, errNoStruct } t = t.Elem() fallthrough case reflect.Struct: return inferFields(t) default: return nil, errNoStruct } } // inferFieldSchema infers the FieldSchema for a Go type func inferFieldSchema(rt reflect.Type) (*FieldSchema, error) { switch rt { case typeOfByteSlice: return &FieldSchema{Required: true, Type: BytesFieldType}, nil case typeOfGoTime: return &FieldSchema{Required: true, Type: TimestampFieldType}, nil case typeOfDate: return &FieldSchema{Required: true, Type: DateFieldType}, nil case typeOfTime: return &FieldSchema{Required: true, Type: TimeFieldType}, nil case typeOfDateTime: return &FieldSchema{Required: true, Type: DateTimeFieldType}, nil } if isSupportedIntType(rt) { return &FieldSchema{Required: true, Type: IntegerFieldType}, nil } switch rt.Kind() { case reflect.Slice, reflect.Array: et := rt.Elem() if et != typeOfByteSlice && (et.Kind() == reflect.Slice || et.Kind() == reflect.Array) { // Multi dimensional slices/arrays are not supported by BigQuery return nil, errUnsupportedFieldType } f, err := inferFieldSchema(et) if err != nil { return nil, err } f.Repeated = true f.Required = false return f, nil case reflect.Struct, reflect.Ptr: nested, err := inferStruct(rt) if err != nil { return nil, err } return &FieldSchema{Required: true, Type: RecordFieldType, Schema: nested}, nil case reflect.String: return &FieldSchema{Required: true, Type: StringFieldType}, nil case reflect.Bool: return &FieldSchema{Required: true, Type: BooleanFieldType}, nil case reflect.Float32, reflect.Float64: return &FieldSchema{Required: true, Type: FloatFieldType}, nil default: return nil, errUnsupportedFieldType } } // inferFields extracts all exported field types from struct type. func inferFields(rt reflect.Type) (Schema, error) { var s Schema fields, err := fieldCache.Fields(rt) if err != nil { return nil, err } for _, field := range fields { f, err := inferFieldSchema(field.Type) if err != nil { return nil, err } f.Name = field.Name s = append(s, f) } return s, nil } // isSupportedIntType reports whether t can be properly represented by the // BigQuery INTEGER/INT64 type. func isSupportedIntType(t reflect.Type) bool { switch t.Kind() { case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int, reflect.Uint8, reflect.Uint16, reflect.Uint32: return true default: return false } } // typeList is a linked list of reflect.Types. type typeList struct { t reflect.Type next *typeList } func (l *typeList) has(t reflect.Type) bool { for l != nil { if l.t == t { return true } l = l.next } return false } // hasRecursiveType reports whether t or any type inside t refers to itself, directly or indirectly, // via exported fields. (Schema inference ignores unexported fields.) func hasRecursiveType(t reflect.Type, seen *typeList) (bool, error) { for t.Kind() == reflect.Ptr || t.Kind() == reflect.Slice || t.Kind() == reflect.Array { t = t.Elem() } if t.Kind() != reflect.Struct { return false, nil } if seen.has(t) { return true, nil } fields, err := fieldCache.Fields(t) if err != nil { return false, err } seen = &typeList{t, seen} // Because seen is a linked list, additions to it from one field's // recursive call will not affect the value for subsequent fields' calls. for _, field := range fields { ok, err := hasRecursiveType(field.Type, seen) if err != nil { return false, err } if ok { return true, nil } } return false, nil } golang-google-cloud-0.9.0/bigquery/schema_test.go000066400000000000000000000375411312234511600220320ustar00rootroot00000000000000// Copyright 2015 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package bigquery import ( "fmt" "reflect" "testing" "time" "cloud.google.com/go/civil" "cloud.google.com/go/internal/pretty" bq "google.golang.org/api/bigquery/v2" ) func (fs *FieldSchema) GoString() string { if fs == nil { return "" } return fmt.Sprintf("{Name:%s Description:%s Repeated:%t Required:%t Type:%s Schema:%s}", fs.Name, fs.Description, fs.Repeated, fs.Required, fs.Type, fmt.Sprintf("%#v", fs.Schema), ) } func bqTableFieldSchema(desc, name, typ, mode string) *bq.TableFieldSchema { return &bq.TableFieldSchema{ Description: desc, Name: name, Mode: mode, Type: typ, } } func fieldSchema(desc, name, typ string, repeated, required bool) *FieldSchema { return &FieldSchema{ Description: desc, Name: name, Repeated: repeated, Required: required, Type: FieldType(typ), } } func TestSchemaConversion(t *testing.T) { testCases := []struct { schema Schema bqSchema *bq.TableSchema }{ { // required bqSchema: &bq.TableSchema{ Fields: []*bq.TableFieldSchema{ bqTableFieldSchema("desc", "name", "STRING", "REQUIRED"), }, }, schema: Schema{ fieldSchema("desc", "name", "STRING", false, true), }, }, { // repeated bqSchema: &bq.TableSchema{ Fields: []*bq.TableFieldSchema{ bqTableFieldSchema("desc", "name", "STRING", "REPEATED"), }, }, schema: Schema{ fieldSchema("desc", "name", "STRING", true, false), }, }, { // nullable, string bqSchema: &bq.TableSchema{ Fields: []*bq.TableFieldSchema{ bqTableFieldSchema("desc", "name", "STRING", ""), }, }, schema: Schema{ fieldSchema("desc", "name", "STRING", false, false), }, }, { // integer bqSchema: &bq.TableSchema{ Fields: []*bq.TableFieldSchema{ bqTableFieldSchema("desc", "name", "INTEGER", ""), }, }, schema: Schema{ fieldSchema("desc", "name", "INTEGER", false, false), }, }, { // float bqSchema: &bq.TableSchema{ Fields: []*bq.TableFieldSchema{ bqTableFieldSchema("desc", "name", "FLOAT", ""), }, }, schema: Schema{ fieldSchema("desc", "name", "FLOAT", false, false), }, }, { // boolean bqSchema: &bq.TableSchema{ Fields: []*bq.TableFieldSchema{ bqTableFieldSchema("desc", "name", "BOOLEAN", ""), }, }, schema: Schema{ fieldSchema("desc", "name", "BOOLEAN", false, false), }, }, { // timestamp bqSchema: &bq.TableSchema{ Fields: []*bq.TableFieldSchema{ bqTableFieldSchema("desc", "name", "TIMESTAMP", ""), }, }, schema: Schema{ fieldSchema("desc", "name", "TIMESTAMP", false, false), }, }, { // civil times bqSchema: &bq.TableSchema{ Fields: []*bq.TableFieldSchema{ bqTableFieldSchema("desc", "f1", "TIME", ""), bqTableFieldSchema("desc", "f2", "DATE", ""), bqTableFieldSchema("desc", "f3", "DATETIME", ""), }, }, schema: Schema{ fieldSchema("desc", "f1", "TIME", false, false), fieldSchema("desc", "f2", "DATE", false, false), fieldSchema("desc", "f3", "DATETIME", false, false), }, }, { // nested bqSchema: &bq.TableSchema{ Fields: []*bq.TableFieldSchema{ { Description: "An outer schema wrapping a nested schema", Name: "outer", Mode: "REQUIRED", Type: "RECORD", Fields: []*bq.TableFieldSchema{ bqTableFieldSchema("inner field", "inner", "STRING", ""), }, }, }, }, schema: Schema{ &FieldSchema{ Description: "An outer schema wrapping a nested schema", Name: "outer", Required: true, Type: "RECORD", Schema: []*FieldSchema{ { Description: "inner field", Name: "inner", Type: "STRING", }, }, }, }, }, } for _, tc := range testCases { bqSchema := tc.schema.asTableSchema() if !reflect.DeepEqual(bqSchema, tc.bqSchema) { t.Errorf("converting to TableSchema: got:\n%v\nwant:\n%v", pretty.Value(bqSchema), pretty.Value(tc.bqSchema)) } schema := convertTableSchema(tc.bqSchema) if !reflect.DeepEqual(schema, tc.schema) { t.Errorf("converting to Schema: got:\n%v\nwant:\n%v", schema, tc.schema) } } } type allStrings struct { String string ByteSlice []byte } type allSignedIntegers struct { Int64 int64 Int32 int32 Int16 int16 Int8 int8 Int int } type allUnsignedIntegers struct { Uint32 uint32 Uint16 uint16 Uint8 uint8 } type allFloat struct { Float64 float64 Float32 float32 // NOTE: Complex32 and Complex64 are unsupported by BigQuery } type allBoolean struct { Bool bool } type allTime struct { Timestamp time.Time Time civil.Time Date civil.Date DateTime civil.DateTime } func reqField(name, typ string) *FieldSchema { return &FieldSchema{ Name: name, Type: FieldType(typ), Required: true, } } func TestSimpleInference(t *testing.T) { testCases := []struct { in interface{} want Schema }{ { in: allSignedIntegers{}, want: Schema{ reqField("Int64", "INTEGER"), reqField("Int32", "INTEGER"), reqField("Int16", "INTEGER"), reqField("Int8", "INTEGER"), reqField("Int", "INTEGER"), }, }, { in: allUnsignedIntegers{}, want: Schema{ reqField("Uint32", "INTEGER"), reqField("Uint16", "INTEGER"), reqField("Uint8", "INTEGER"), }, }, { in: allFloat{}, want: Schema{ reqField("Float64", "FLOAT"), reqField("Float32", "FLOAT"), }, }, { in: allBoolean{}, want: Schema{ reqField("Bool", "BOOLEAN"), }, }, { in: &allBoolean{}, want: Schema{ reqField("Bool", "BOOLEAN"), }, }, { in: allTime{}, want: Schema{ reqField("Timestamp", "TIMESTAMP"), reqField("Time", "TIME"), reqField("Date", "DATE"), reqField("DateTime", "DATETIME"), }, }, { in: allStrings{}, want: Schema{ reqField("String", "STRING"), reqField("ByteSlice", "BYTES"), }, }, } for _, tc := range testCases { got, err := InferSchema(tc.in) if err != nil { t.Fatalf("%T: error inferring TableSchema: %v", tc.in, err) } if !reflect.DeepEqual(got, tc.want) { t.Errorf("%T: inferring TableSchema: got:\n%#v\nwant:\n%#v", tc.in, pretty.Value(got), pretty.Value(tc.want)) } } } type containsNested struct { hidden string NotNested int Nested struct { Inside int } } type containsDoubleNested struct { NotNested int Nested struct { InsideNested struct { Inside int } } } type ptrNested struct { Ptr *struct{ Inside int } } type dup struct { // more than one field of the same struct type A, B allBoolean } func TestNestedInference(t *testing.T) { testCases := []struct { in interface{} want Schema }{ { in: containsNested{}, want: Schema{ reqField("NotNested", "INTEGER"), &FieldSchema{ Name: "Nested", Required: true, Type: "RECORD", Schema: Schema{reqField("Inside", "INTEGER")}, }, }, }, { in: containsDoubleNested{}, want: Schema{ reqField("NotNested", "INTEGER"), &FieldSchema{ Name: "Nested", Required: true, Type: "RECORD", Schema: Schema{ { Name: "InsideNested", Required: true, Type: "RECORD", Schema: Schema{reqField("Inside", "INTEGER")}, }, }, }, }, }, { in: ptrNested{}, want: Schema{ &FieldSchema{ Name: "Ptr", Required: true, Type: "RECORD", Schema: Schema{reqField("Inside", "INTEGER")}, }, }, }, { in: dup{}, want: Schema{ &FieldSchema{ Name: "A", Required: true, Type: "RECORD", Schema: Schema{reqField("Bool", "BOOLEAN")}, }, &FieldSchema{ Name: "B", Required: true, Type: "RECORD", Schema: Schema{reqField("Bool", "BOOLEAN")}, }, }, }, } for _, tc := range testCases { got, err := InferSchema(tc.in) if err != nil { t.Fatalf("%T: error inferring TableSchema: %v", tc.in, err) } if !reflect.DeepEqual(got, tc.want) { t.Errorf("%T: inferring TableSchema: got:\n%#v\nwant:\n%#v", tc.in, pretty.Value(got), pretty.Value(tc.want)) } } } type repeated struct { NotRepeated []byte RepeatedByteSlice [][]byte Slice []int Array [5]bool } type nestedRepeated struct { NotRepeated int Repeated []struct { Inside int } RepeatedPtr []*struct{ Inside int } } func repField(name, typ string) *FieldSchema { return &FieldSchema{ Name: name, Type: FieldType(typ), Repeated: true, } } func TestRepeatedInference(t *testing.T) { testCases := []struct { in interface{} want Schema }{ { in: repeated{}, want: Schema{ reqField("NotRepeated", "BYTES"), repField("RepeatedByteSlice", "BYTES"), repField("Slice", "INTEGER"), repField("Array", "BOOLEAN"), }, }, { in: nestedRepeated{}, want: Schema{ reqField("NotRepeated", "INTEGER"), { Name: "Repeated", Repeated: true, Type: "RECORD", Schema: Schema{reqField("Inside", "INTEGER")}, }, { Name: "RepeatedPtr", Repeated: true, Type: "RECORD", Schema: Schema{reqField("Inside", "INTEGER")}, }, }, }, } for i, tc := range testCases { got, err := InferSchema(tc.in) if err != nil { t.Fatalf("%d: error inferring TableSchema: %v", i, err) } if !reflect.DeepEqual(got, tc.want) { t.Errorf("%d: inferring TableSchema: got:\n%#v\nwant:\n%#v", i, pretty.Value(got), pretty.Value(tc.want)) } } } type Embedded struct { Embedded int } type embedded struct { Embedded2 int } type nestedEmbedded struct { Embedded embedded } func TestEmbeddedInference(t *testing.T) { got, err := InferSchema(nestedEmbedded{}) if err != nil { t.Fatal(err) } want := Schema{ reqField("Embedded", "INTEGER"), reqField("Embedded2", "INTEGER"), } if !reflect.DeepEqual(got, want) { t.Errorf("got %v, want %v", pretty.Value(got), pretty.Value(want)) } } func TestRecursiveInference(t *testing.T) { type List struct { Val int Next *List } _, err := InferSchema(List{}) if err == nil { t.Fatal("got nil, want error") } } type withTags struct { NoTag int ExcludeTag int `bigquery:"-"` SimpleTag int `bigquery:"simple_tag"` UnderscoreTag int `bigquery:"_id"` MixedCase int `bigquery:"MIXEDcase"` } type withTagsNested struct { Nested withTags `bigquery:"nested"` NestedAnonymous struct { ExcludeTag int `bigquery:"-"` Inside int `bigquery:"inside"` } `bigquery:"anon"` } type withTagsRepeated struct { Repeated []withTags `bigquery:"repeated"` RepeatedAnonymous []struct { ExcludeTag int `bigquery:"-"` Inside int `bigquery:"inside"` } `bigquery:"anon"` } type withTagsEmbedded struct { withTags } var withTagsSchema = Schema{ reqField("NoTag", "INTEGER"), reqField("simple_tag", "INTEGER"), reqField("_id", "INTEGER"), reqField("MIXEDcase", "INTEGER"), } func TestTagInference(t *testing.T) { testCases := []struct { in interface{} want Schema }{ { in: withTags{}, want: withTagsSchema, }, { in: withTagsNested{}, want: Schema{ &FieldSchema{ Name: "nested", Required: true, Type: "RECORD", Schema: withTagsSchema, }, &FieldSchema{ Name: "anon", Required: true, Type: "RECORD", Schema: Schema{reqField("inside", "INTEGER")}, }, }, }, { in: withTagsRepeated{}, want: Schema{ &FieldSchema{ Name: "repeated", Repeated: true, Type: "RECORD", Schema: withTagsSchema, }, &FieldSchema{ Name: "anon", Repeated: true, Type: "RECORD", Schema: Schema{reqField("inside", "INTEGER")}, }, }, }, { in: withTagsEmbedded{}, want: withTagsSchema, }, } for i, tc := range testCases { got, err := InferSchema(tc.in) if err != nil { t.Fatalf("%d: error inferring TableSchema: %v", i, err) } if !reflect.DeepEqual(got, tc.want) { t.Errorf("%d: inferring TableSchema: got:\n%#v\nwant:\n%#v", i, pretty.Value(got), pretty.Value(tc.want)) } } } func TestTagInferenceErrors(t *testing.T) { testCases := []struct { in interface{} err error }{ { in: struct { LongTag int `bigquery:"abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxy"` }{}, err: errInvalidFieldName, }, { in: struct { UnsupporedStartChar int `bigquery:"øab"` }{}, err: errInvalidFieldName, }, { in: struct { UnsupportedEndChar int `bigquery:"abø"` }{}, err: errInvalidFieldName, }, { in: struct { UnsupportedMiddleChar int `bigquery:"aøb"` }{}, err: errInvalidFieldName, }, { in: struct { StartInt int `bigquery:"1abc"` }{}, err: errInvalidFieldName, }, { in: struct { Hyphens int `bigquery:"a-b"` }{}, err: errInvalidFieldName, }, { in: struct { OmitEmpty int `bigquery:"abc,omitempty"` }{}, err: errInvalidFieldName, }, } for i, tc := range testCases { want := tc.err _, got := InferSchema(tc.in) if !reflect.DeepEqual(got, want) { t.Errorf("%d: inferring TableSchema: got:\n%#v\nwant:\n%#v", i, got, want) } } } func TestSchemaErrors(t *testing.T) { testCases := []struct { in interface{} err error }{ { in: []byte{}, err: errNoStruct, }, { in: new(int), err: errNoStruct, }, { in: struct{ Uint uint }{}, err: errUnsupportedFieldType, }, { in: struct{ Uint64 uint64 }{}, err: errUnsupportedFieldType, }, { in: struct{ Uintptr uintptr }{}, err: errUnsupportedFieldType, }, { in: struct{ Complex complex64 }{}, err: errUnsupportedFieldType, }, { in: struct{ Map map[string]int }{}, err: errUnsupportedFieldType, }, { in: struct{ Chan chan bool }{}, err: errUnsupportedFieldType, }, { in: struct{ Ptr *int }{}, err: errNoStruct, }, { in: struct{ Interface interface{} }{}, err: errUnsupportedFieldType, }, { in: struct{ MultiDimensional [][]int }{}, err: errUnsupportedFieldType, }, { in: struct{ MultiDimensional [][][]byte }{}, err: errUnsupportedFieldType, }, { in: struct{ ChanSlice []chan bool }{}, err: errUnsupportedFieldType, }, { in: struct{ NestedChan struct{ Chan []chan bool } }{}, err: errUnsupportedFieldType, }, } for _, tc := range testCases { want := tc.err _, got := InferSchema(tc.in) if !reflect.DeepEqual(got, want) { t.Errorf("%#v: got:\n%#v\nwant:\n%#v", tc.in, got, want) } } } func TestHasRecursiveType(t *testing.T) { type ( nonStruct int nonRec struct{ A string } dup struct{ A, B nonRec } rec struct { A int B *rec } recUnexported struct { A int b *rec } hasRec struct { A int R *rec } recSlicePointer struct { A []*recSlicePointer } ) for _, test := range []struct { in interface{} want bool }{ {nonStruct(0), false}, {nonRec{}, false}, {dup{}, false}, {rec{}, true}, {recUnexported{}, false}, {hasRec{}, true}, {&recSlicePointer{}, true}, } { got, err := hasRecursiveType(reflect.TypeOf(test.in), nil) if err != nil { t.Fatal(err) } if got != test.want { t.Errorf("%T: got %t, want %t", test.in, got, test.want) } } } golang-google-cloud-0.9.0/bigquery/service.go000066400000000000000000000531501312234511600211650ustar00rootroot00000000000000// Copyright 2015 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package bigquery import ( "fmt" "io" "net/http" "sync" "time" "cloud.google.com/go/internal" "cloud.google.com/go/internal/version" gax "github.com/googleapis/gax-go" "golang.org/x/net/context" bq "google.golang.org/api/bigquery/v2" "google.golang.org/api/googleapi" ) // service provides an internal abstraction to isolate the generated // BigQuery API; most of this package uses this interface instead. // The single implementation, *bigqueryService, contains all the knowledge // of the generated BigQuery API. type service interface { // Jobs insertJob(ctx context.Context, projectId string, conf *insertJobConf) (*Job, error) getJob(ctx context.Context, projectId, jobID string) (*Job, error) jobCancel(ctx context.Context, projectId, jobID string) error jobStatus(ctx context.Context, projectId, jobID string) (*JobStatus, error) // Tables createTable(ctx context.Context, conf *createTableConf) error getTableMetadata(ctx context.Context, projectID, datasetID, tableID string) (*TableMetadata, error) deleteTable(ctx context.Context, projectID, datasetID, tableID string) error // listTables returns a page of Tables and a next page token. Note: the Tables do not have their c field populated. listTables(ctx context.Context, projectID, datasetID string, pageSize int, pageToken string) ([]*Table, string, error) patchTable(ctx context.Context, projectID, datasetID, tableID string, conf *patchTableConf) (*TableMetadata, error) // Table data readTabledata(ctx context.Context, conf *readTableConf, pageToken string) (*readDataResult, error) insertRows(ctx context.Context, projectID, datasetID, tableID string, rows []*insertionRow, conf *insertRowsConf) error // Datasets insertDataset(ctx context.Context, datasetID, projectID string) error deleteDataset(ctx context.Context, datasetID, projectID string) error getDatasetMetadata(ctx context.Context, projectID, datasetID string) (*DatasetMetadata, error) // Misc // Waits for a query to complete. waitForQuery(ctx context.Context, projectID, jobID string) (Schema, error) // listDatasets returns a page of Datasets and a next page token. Note: the Datasets do not have their c field populated. listDatasets(ctx context.Context, projectID string, maxResults int, pageToken string, all bool, filter string) ([]*Dataset, string, error) } var xGoogHeader = fmt.Sprintf("gl-go/%s gccl/%s", version.Go(), version.Repo) func setClientHeader(headers http.Header) { headers.Set("x-goog-api-client", xGoogHeader) } type bigqueryService struct { s *bq.Service } func newBigqueryService(client *http.Client, endpoint string) (*bigqueryService, error) { s, err := bq.New(client) if err != nil { return nil, fmt.Errorf("constructing bigquery client: %v", err) } s.BasePath = endpoint return &bigqueryService{s: s}, nil } // getPages calls the supplied getPage function repeatedly until there are no pages left to get. // token is the token of the initial page to start from. Use an empty string to start from the beginning. func getPages(token string, getPage func(token string) (nextToken string, err error)) error { for { var err error token, err = getPage(token) if err != nil { return err } if token == "" { return nil } } } type insertJobConf struct { job *bq.Job media io.Reader } // Calls the Jobs.Insert RPC and returns a Job. Callers must set the returned Job's // client. func (s *bigqueryService) insertJob(ctx context.Context, projectID string, conf *insertJobConf) (*Job, error) { call := s.s.Jobs.Insert(projectID, conf.job).Context(ctx) setClientHeader(call.Header()) if conf.media != nil { call.Media(conf.media) } var res *bq.Job var err error invoke := func() error { res, err = call.Do() return err } // A job with a client-generated ID can be retried; the presence of the // ID makes the insert operation idempotent. // We don't retry if there is media, because it is an io.Reader. We'd // have to read the contents and keep it in memory, and that could be expensive. // TODO(jba): Look into retrying if media != nil. if conf.job.JobReference != nil && conf.media == nil { err = runWithRetry(ctx, invoke) } else { err = invoke() } if err != nil { return nil, err } var dt *bq.TableReference if qc := res.Configuration.Query; qc != nil { dt = qc.DestinationTable } return &Job{ projectID: projectID, jobID: res.JobReference.JobId, destinationTable: dt, }, nil } type pagingConf struct { recordsPerRequest int64 setRecordsPerRequest bool startIndex uint64 } type readTableConf struct { projectID, datasetID, tableID string paging pagingConf schema Schema // lazily initialized when the first page of data is fetched. } func (conf *readTableConf) fetch(ctx context.Context, s service, token string) (*readDataResult, error) { return s.readTabledata(ctx, conf, token) } func (conf *readTableConf) setPaging(pc *pagingConf) { conf.paging = *pc } type readDataResult struct { pageToken string rows [][]Value totalRows uint64 schema Schema } func (s *bigqueryService) readTabledata(ctx context.Context, conf *readTableConf, pageToken string) (*readDataResult, error) { // Prepare request to fetch one page of table data. req := s.s.Tabledata.List(conf.projectID, conf.datasetID, conf.tableID) setClientHeader(req.Header()) if pageToken != "" { req.PageToken(pageToken) } else { req.StartIndex(conf.paging.startIndex) } if conf.paging.setRecordsPerRequest { req.MaxResults(conf.paging.recordsPerRequest) } // Fetch the table schema in the background, if necessary. var schemaErr error var schemaFetch sync.WaitGroup if conf.schema == nil { schemaFetch.Add(1) go func() { defer schemaFetch.Done() var t *bq.Table t, schemaErr = s.s.Tables.Get(conf.projectID, conf.datasetID, conf.tableID). Fields("schema"). Context(ctx). Do() if schemaErr == nil && t.Schema != nil { conf.schema = convertTableSchema(t.Schema) } }() } res, err := req.Context(ctx).Do() if err != nil { return nil, err } schemaFetch.Wait() if schemaErr != nil { return nil, schemaErr } result := &readDataResult{ pageToken: res.PageToken, totalRows: uint64(res.TotalRows), schema: conf.schema, } result.rows, err = convertRows(res.Rows, conf.schema) if err != nil { return nil, err } return result, nil } func (s *bigqueryService) waitForQuery(ctx context.Context, projectID, jobID string) (Schema, error) { // Use GetQueryResults only to wait for completion, not to read results. req := s.s.Jobs.GetQueryResults(projectID, jobID).Context(ctx).MaxResults(0) setClientHeader(req.Header()) backoff := gax.Backoff{ Initial: 1 * time.Second, Multiplier: 2, Max: 60 * time.Second, } var res *bq.GetQueryResultsResponse err := internal.Retry(ctx, backoff, func() (stop bool, err error) { res, err = req.Do() if err != nil { return !retryableError(err), err } if !res.JobComplete { // GetQueryResults may return early without error; retry. return false, nil } return true, nil }) if err != nil { return nil, err } return convertTableSchema(res.Schema), nil } type insertRowsConf struct { templateSuffix string ignoreUnknownValues bool skipInvalidRows bool } func (s *bigqueryService) insertRows(ctx context.Context, projectID, datasetID, tableID string, rows []*insertionRow, conf *insertRowsConf) error { req := &bq.TableDataInsertAllRequest{ TemplateSuffix: conf.templateSuffix, IgnoreUnknownValues: conf.ignoreUnknownValues, SkipInvalidRows: conf.skipInvalidRows, } for _, row := range rows { m := make(map[string]bq.JsonValue) for k, v := range row.Row { m[k] = bq.JsonValue(v) } req.Rows = append(req.Rows, &bq.TableDataInsertAllRequestRows{ InsertId: row.InsertID, Json: m, }) } var res *bq.TableDataInsertAllResponse err := runWithRetry(ctx, func() error { var err error req := s.s.Tabledata.InsertAll(projectID, datasetID, tableID, req).Context(ctx) setClientHeader(req.Header()) res, err = req.Do() return err }) if err != nil { return err } if len(res.InsertErrors) == 0 { return nil } var errs PutMultiError for _, e := range res.InsertErrors { if int(e.Index) > len(rows) { return fmt.Errorf("internal error: unexpected row index: %v", e.Index) } rie := RowInsertionError{ InsertID: rows[e.Index].InsertID, RowIndex: int(e.Index), } for _, errp := range e.Errors { rie.Errors = append(rie.Errors, errorFromErrorProto(errp)) } errs = append(errs, rie) } return errs } func (s *bigqueryService) getJob(ctx context.Context, projectID, jobID string) (*Job, error) { res, err := s.s.Jobs.Get(projectID, jobID). Fields("configuration"). Context(ctx). Do() if err != nil { return nil, err } var isQuery bool var dest *bq.TableReference if res.Configuration.Query != nil { isQuery = true dest = res.Configuration.Query.DestinationTable } return &Job{ projectID: projectID, jobID: jobID, isQuery: isQuery, destinationTable: dest, }, nil } func (s *bigqueryService) jobCancel(ctx context.Context, projectID, jobID string) error { // Jobs.Cancel returns a job entity, but the only relevant piece of // data it may contain (the status of the job) is unreliable. From the // docs: "This call will return immediately, and the client will need // to poll for the job status to see if the cancel completed // successfully". So it would be misleading to return a status. _, err := s.s.Jobs.Cancel(projectID, jobID). Fields(). // We don't need any of the response data. Context(ctx). Do() return err } func (s *bigqueryService) jobStatus(ctx context.Context, projectID, jobID string) (*JobStatus, error) { res, err := s.s.Jobs.Get(projectID, jobID). Fields("status", "statistics"). // Only fetch what we need. Context(ctx). Do() if err != nil { return nil, err } st, err := jobStatusFromProto(res.Status) if err != nil { return nil, err } st.Statistics = jobStatisticsFromProto(res.Statistics) return st, nil } var stateMap = map[string]State{"PENDING": Pending, "RUNNING": Running, "DONE": Done} func jobStatusFromProto(status *bq.JobStatus) (*JobStatus, error) { state, ok := stateMap[status.State] if !ok { return nil, fmt.Errorf("unexpected job state: %v", status.State) } newStatus := &JobStatus{ State: state, err: nil, } if err := errorFromErrorProto(status.ErrorResult); state == Done && err != nil { newStatus.err = err } for _, ep := range status.Errors { newStatus.Errors = append(newStatus.Errors, errorFromErrorProto(ep)) } return newStatus, nil } func jobStatisticsFromProto(s *bq.JobStatistics) *JobStatistics { js := &JobStatistics{ CreationTime: unixMillisToTime(s.CreationTime), StartTime: unixMillisToTime(s.StartTime), EndTime: unixMillisToTime(s.EndTime), TotalBytesProcessed: s.TotalBytesProcessed, } switch { case s.Extract != nil: js.Details = &ExtractStatistics{ DestinationURIFileCounts: []int64(s.Extract.DestinationUriFileCounts), } case s.Load != nil: js.Details = &LoadStatistics{ InputFileBytes: s.Load.InputFileBytes, InputFiles: s.Load.InputFiles, OutputBytes: s.Load.OutputBytes, OutputRows: s.Load.OutputRows, } case s.Query != nil: var names []string for _, qp := range s.Query.UndeclaredQueryParameters { names = append(names, qp.Name) } var tables []*Table for _, tr := range s.Query.ReferencedTables { tables = append(tables, convertTableReference(tr)) } js.Details = &QueryStatistics{ BillingTier: s.Query.BillingTier, CacheHit: s.Query.CacheHit, StatementType: s.Query.StatementType, TotalBytesBilled: s.Query.TotalBytesBilled, TotalBytesProcessed: s.Query.TotalBytesProcessed, NumDMLAffectedRows: s.Query.NumDmlAffectedRows, QueryPlan: queryPlanFromProto(s.Query.QueryPlan), Schema: convertTableSchema(s.Query.Schema), ReferencedTables: tables, UndeclaredQueryParameterNames: names, } } return js } func queryPlanFromProto(stages []*bq.ExplainQueryStage) []*ExplainQueryStage { var res []*ExplainQueryStage for _, s := range stages { var steps []*ExplainQueryStep for _, p := range s.Steps { steps = append(steps, &ExplainQueryStep{ Kind: p.Kind, Substeps: p.Substeps, }) } res = append(res, &ExplainQueryStage{ ComputeRatioAvg: s.ComputeRatioAvg, ComputeRatioMax: s.ComputeRatioMax, ID: s.Id, Name: s.Name, ReadRatioAvg: s.ReadRatioAvg, ReadRatioMax: s.ReadRatioMax, RecordsRead: s.RecordsRead, RecordsWritten: s.RecordsWritten, Status: s.Status, Steps: steps, WaitRatioAvg: s.WaitRatioAvg, WaitRatioMax: s.WaitRatioMax, WriteRatioAvg: s.WriteRatioAvg, WriteRatioMax: s.WriteRatioMax, }) } return res } // listTables returns a subset of tables that belong to a dataset, and a token for fetching the next subset. func (s *bigqueryService) listTables(ctx context.Context, projectID, datasetID string, pageSize int, pageToken string) ([]*Table, string, error) { var tables []*Table req := s.s.Tables.List(projectID, datasetID). PageToken(pageToken). Context(ctx) setClientHeader(req.Header()) if pageSize > 0 { req.MaxResults(int64(pageSize)) } res, err := req.Do() if err != nil { return nil, "", err } for _, t := range res.Tables { tables = append(tables, convertTableReference(t.TableReference)) } return tables, res.NextPageToken, nil } type createTableConf struct { projectID, datasetID, tableID string expiration time.Time viewQuery string schema *bq.TableSchema useStandardSQL bool timePartitioning *TimePartitioning } // createTable creates a table in the BigQuery service. // expiration is an optional time after which the table will be deleted and its storage reclaimed. // If viewQuery is non-empty, the created table will be of type VIEW. // Note: expiration can only be set during table creation. // Note: after table creation, a view can be modified only if its table was initially created with a view. func (s *bigqueryService) createTable(ctx context.Context, conf *createTableConf) error { table := &bq.Table{ TableReference: &bq.TableReference{ ProjectId: conf.projectID, DatasetId: conf.datasetID, TableId: conf.tableID, }, } if !conf.expiration.IsZero() { table.ExpirationTime = conf.expiration.UnixNano() / 1e6 } // TODO(jba): make it impossible to provide both a view query and a schema. if conf.viewQuery != "" { table.View = &bq.ViewDefinition{ Query: conf.viewQuery, } if conf.useStandardSQL { table.View.UseLegacySql = false table.View.ForceSendFields = append(table.View.ForceSendFields, "UseLegacySql") } } if conf.schema != nil { table.Schema = conf.schema } if conf.timePartitioning != nil { table.TimePartitioning = &bq.TimePartitioning{ Type: "DAY", ExpirationMs: int64(conf.timePartitioning.Expiration.Seconds() * 1000), } } req := s.s.Tables.Insert(conf.projectID, conf.datasetID, table).Context(ctx) setClientHeader(req.Header()) _, err := req.Do() return err } func (s *bigqueryService) getTableMetadata(ctx context.Context, projectID, datasetID, tableID string) (*TableMetadata, error) { req := s.s.Tables.Get(projectID, datasetID, tableID).Context(ctx) setClientHeader(req.Header()) table, err := req.Do() if err != nil { return nil, err } return bqTableToMetadata(table), nil } func (s *bigqueryService) deleteTable(ctx context.Context, projectID, datasetID, tableID string) error { req := s.s.Tables.Delete(projectID, datasetID, tableID).Context(ctx) setClientHeader(req.Header()) return req.Do() } func bqTableToMetadata(t *bq.Table) *TableMetadata { md := &TableMetadata{ Description: t.Description, Name: t.FriendlyName, Type: TableType(t.Type), ID: t.Id, NumBytes: t.NumBytes, NumRows: t.NumRows, ExpirationTime: unixMillisToTime(t.ExpirationTime), CreationTime: unixMillisToTime(t.CreationTime), LastModifiedTime: unixMillisToTime(int64(t.LastModifiedTime)), } if t.Schema != nil { md.Schema = convertTableSchema(t.Schema) } if t.View != nil { md.View = t.View.Query } if t.TimePartitioning != nil { md.TimePartitioning = &TimePartitioning{ Expiration: time.Duration(t.TimePartitioning.ExpirationMs) * time.Millisecond, } } if t.StreamingBuffer != nil { md.StreamingBuffer = &StreamingBuffer{ EstimatedBytes: t.StreamingBuffer.EstimatedBytes, EstimatedRows: t.StreamingBuffer.EstimatedRows, OldestEntryTime: unixMillisToTime(int64(t.StreamingBuffer.OldestEntryTime)), } } return md } func bqDatasetToMetadata(d *bq.Dataset) *DatasetMetadata { /// TODO(jba): access return &DatasetMetadata{ CreationTime: unixMillisToTime(d.CreationTime), LastModifiedTime: unixMillisToTime(d.LastModifiedTime), DefaultTableExpiration: time.Duration(d.DefaultTableExpirationMs) * time.Millisecond, Description: d.Description, Name: d.FriendlyName, ID: d.Id, Location: d.Location, Labels: d.Labels, } } // Convert a number of milliseconds since the Unix epoch to a time.Time. // Treat an input of zero specially: convert it to the zero time, // rather than the start of the epoch. func unixMillisToTime(m int64) time.Time { if m == 0 { return time.Time{} } return time.Unix(0, m*1e6) } func convertTableReference(tr *bq.TableReference) *Table { return &Table{ ProjectID: tr.ProjectId, DatasetID: tr.DatasetId, TableID: tr.TableId, } } // patchTableConf contains fields to be patched. type patchTableConf struct { // These fields are omitted from the patch operation if nil. Description *string Name *string Schema Schema } func (s *bigqueryService) patchTable(ctx context.Context, projectID, datasetID, tableID string, conf *patchTableConf) (*TableMetadata, error) { t := &bq.Table{} forceSend := func(field string) { t.ForceSendFields = append(t.ForceSendFields, field) } if conf.Description != nil { t.Description = *conf.Description forceSend("Description") } if conf.Name != nil { t.FriendlyName = *conf.Name forceSend("FriendlyName") } if conf.Schema != nil { t.Schema = conf.Schema.asTableSchema() forceSend("Schema") } table, err := s.s.Tables.Patch(projectID, datasetID, tableID, t). Context(ctx). Do() if err != nil { return nil, err } return bqTableToMetadata(table), nil } func (s *bigqueryService) insertDataset(ctx context.Context, datasetID, projectID string) error { ds := &bq.Dataset{ DatasetReference: &bq.DatasetReference{DatasetId: datasetID}, } req := s.s.Datasets.Insert(projectID, ds).Context(ctx) setClientHeader(req.Header()) _, err := req.Do() return err } func (s *bigqueryService) deleteDataset(ctx context.Context, datasetID, projectID string) error { req := s.s.Datasets.Delete(projectID, datasetID).Context(ctx) setClientHeader(req.Header()) return req.Do() } func (s *bigqueryService) getDatasetMetadata(ctx context.Context, projectID, datasetID string) (*DatasetMetadata, error) { req := s.s.Datasets.Get(projectID, datasetID).Context(ctx) setClientHeader(req.Header()) table, err := req.Do() if err != nil { return nil, err } return bqDatasetToMetadata(table), nil } func (s *bigqueryService) listDatasets(ctx context.Context, projectID string, maxResults int, pageToken string, all bool, filter string) ([]*Dataset, string, error) { req := s.s.Datasets.List(projectID). Context(ctx). PageToken(pageToken). All(all) setClientHeader(req.Header()) if maxResults > 0 { req.MaxResults(int64(maxResults)) } if filter != "" { req.Filter(filter) } res, err := req.Do() if err != nil { return nil, "", err } var datasets []*Dataset for _, d := range res.Datasets { datasets = append(datasets, s.convertListedDataset(d)) } return datasets, res.NextPageToken, nil } func (s *bigqueryService) convertListedDataset(d *bq.DatasetListDatasets) *Dataset { return &Dataset{ ProjectID: d.DatasetReference.ProjectId, DatasetID: d.DatasetReference.DatasetId, } } // runWithRetry calls the function until it returns nil or a non-retryable error, or // the context is done. // See the similar function in ../storage/invoke.go. The main difference is the // reason for retrying. func runWithRetry(ctx context.Context, call func() error) error { backoff := gax.Backoff{ Initial: 2 * time.Second, Max: 32 * time.Second, Multiplier: 2, } return internal.Retry(ctx, backoff, func() (stop bool, err error) { err = call() if err == nil { return true, nil } return !retryableError(err), err }) } // Use the criteria in https://cloud.google.com/bigquery/troubleshooting-errors. func retryableError(err error) bool { e, ok := err.(*googleapi.Error) if !ok { return false } var reason string if len(e.Errors) > 0 { reason = e.Errors[0].Reason } return reason == "backendError" && (e.Code == 500 || e.Code == 503) } golang-google-cloud-0.9.0/bigquery/service_test.go000066400000000000000000000046231312234511600222250ustar00rootroot00000000000000// Copyright 2015 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package bigquery import ( "reflect" "testing" "time" bq "google.golang.org/api/bigquery/v2" ) func TestBQTableToMetadata(t *testing.T) { aTime := time.Date(2017, 1, 26, 0, 0, 0, 0, time.Local) aTimeMillis := aTime.UnixNano() / 1e6 for _, test := range []struct { in *bq.Table want *TableMetadata }{ {&bq.Table{}, &TableMetadata{}}, // test minimal case { &bq.Table{ CreationTime: aTimeMillis, Description: "desc", Etag: "etag", ExpirationTime: aTimeMillis, FriendlyName: "fname", Id: "id", LastModifiedTime: uint64(aTimeMillis), Location: "loc", NumBytes: 123, NumLongTermBytes: 23, NumRows: 7, StreamingBuffer: &bq.Streamingbuffer{ EstimatedBytes: 11, EstimatedRows: 3, OldestEntryTime: uint64(aTimeMillis), }, TimePartitioning: &bq.TimePartitioning{ ExpirationMs: 7890, Type: "DAY", }, Type: "EXTERNAL", View: &bq.ViewDefinition{Query: "view-query"}, }, &TableMetadata{ Description: "desc", Name: "fname", View: "view-query", ID: "id", Type: ExternalTable, ExpirationTime: aTime.Truncate(time.Millisecond), CreationTime: aTime.Truncate(time.Millisecond), LastModifiedTime: aTime.Truncate(time.Millisecond), NumBytes: 123, NumRows: 7, TimePartitioning: &TimePartitioning{Expiration: time.Duration(7890) * time.Millisecond}, StreamingBuffer: &StreamingBuffer{ EstimatedBytes: 11, EstimatedRows: 3, OldestEntryTime: aTime, }, }, }, } { got := bqTableToMetadata(test.in) if !reflect.DeepEqual(got, test.want) { t.Errorf("%v:\ngot %+v\nwant %+v", test.in, got, test.want) } } } golang-google-cloud-0.9.0/bigquery/table.go000066400000000000000000000203501312234511600206100ustar00rootroot00000000000000// Copyright 2015 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package bigquery import ( "fmt" "time" "golang.org/x/net/context" "cloud.google.com/go/internal/optional" bq "google.golang.org/api/bigquery/v2" ) // A Table is a reference to a BigQuery table. type Table struct { // ProjectID, DatasetID and TableID may be omitted if the Table is the destination for a query. // In this case the result will be stored in an ephemeral table. ProjectID string DatasetID string // TableID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). // The maximum length is 1,024 characters. TableID string c *Client } // TableMetadata contains information about a BigQuery table. type TableMetadata struct { Description string // The user-friendly description of this table. Name string // The user-friendly name for this table. Schema Schema View string ID string // An opaque ID uniquely identifying the table. Type TableType // The time when this table expires. If not set, the table will persist // indefinitely. Expired tables will be deleted and their storage reclaimed. ExpirationTime time.Time CreationTime time.Time LastModifiedTime time.Time // The size of the table in bytes. // This does not include data that is being buffered during a streaming insert. NumBytes int64 // The number of rows of data in this table. // This does not include data that is being buffered during a streaming insert. NumRows uint64 // The time-based partitioning settings for this table. TimePartitioning *TimePartitioning // Contains information regarding this table's streaming buffer, if one is // present. This field will be nil if the table is not being streamed to or if // there is no data in the streaming buffer. StreamingBuffer *StreamingBuffer } // TableCreateDisposition specifies the circumstances under which destination table will be created. // Default is CreateIfNeeded. type TableCreateDisposition string const ( // CreateIfNeeded will create the table if it does not already exist. // Tables are created atomically on successful completion of a job. CreateIfNeeded TableCreateDisposition = "CREATE_IF_NEEDED" // CreateNever ensures the table must already exist and will not be // automatically created. CreateNever TableCreateDisposition = "CREATE_NEVER" ) // TableWriteDisposition specifies how existing data in a destination table is treated. // Default is WriteAppend. type TableWriteDisposition string const ( // WriteAppend will append to any existing data in the destination table. // Data is appended atomically on successful completion of a job. WriteAppend TableWriteDisposition = "WRITE_APPEND" // WriteTruncate overrides the existing data in the destination table. // Data is overwritten atomically on successful completion of a job. WriteTruncate TableWriteDisposition = "WRITE_TRUNCATE" // WriteEmpty fails writes if the destination table already contains data. WriteEmpty TableWriteDisposition = "WRITE_EMPTY" ) // TableType is the type of table. type TableType string const ( RegularTable TableType = "TABLE" ViewTable TableType = "VIEW" ExternalTable TableType = "EXTERNAL" ) // StreamingBuffer holds information about the streaming buffer. type StreamingBuffer struct { // A lower-bound estimate of the number of bytes currently in the streaming // buffer. EstimatedBytes uint64 // A lower-bound estimate of the number of rows currently in the streaming // buffer. EstimatedRows uint64 // The time of the oldest entry in the streaming buffer. OldestEntryTime time.Time } func (t *Table) tableRefProto() *bq.TableReference { return &bq.TableReference{ ProjectId: t.ProjectID, DatasetId: t.DatasetID, TableId: t.TableID, } } // FullyQualifiedName returns the ID of the table in projectID:datasetID.tableID format. func (t *Table) FullyQualifiedName() string { return fmt.Sprintf("%s:%s.%s", t.ProjectID, t.DatasetID, t.TableID) } // implicitTable reports whether Table is an empty placeholder, which signifies that a new table should be created with an auto-generated Table ID. func (t *Table) implicitTable() bool { return t.ProjectID == "" && t.DatasetID == "" && t.TableID == "" } // Create creates a table in the BigQuery service. func (t *Table) Create(ctx context.Context, options ...CreateTableOption) error { conf := &createTableConf{ projectID: t.ProjectID, datasetID: t.DatasetID, tableID: t.TableID, } for _, o := range options { o.customizeCreateTable(conf) } return t.c.service.createTable(ctx, conf) } // Metadata fetches the metadata for the table. func (t *Table) Metadata(ctx context.Context) (*TableMetadata, error) { return t.c.service.getTableMetadata(ctx, t.ProjectID, t.DatasetID, t.TableID) } // Delete deletes the table. func (t *Table) Delete(ctx context.Context) error { return t.c.service.deleteTable(ctx, t.ProjectID, t.DatasetID, t.TableID) } // A CreateTableOption is an optional argument to CreateTable. type CreateTableOption interface { customizeCreateTable(*createTableConf) } type tableExpiration time.Time // TableExpiration returns a CreateTableOption that will cause the created table to be deleted after the expiration time. func TableExpiration(exp time.Time) CreateTableOption { return tableExpiration(exp) } func (opt tableExpiration) customizeCreateTable(conf *createTableConf) { conf.expiration = time.Time(opt) } type viewQuery string // ViewQuery returns a CreateTableOption that causes the created table to be a virtual table defined by the supplied query. // For more information see: https://cloud.google.com/bigquery/querying-data#views func ViewQuery(query string) CreateTableOption { return viewQuery(query) } func (opt viewQuery) customizeCreateTable(conf *createTableConf) { conf.viewQuery = string(opt) } type useStandardSQL struct{} // UseStandardSQL returns a CreateTableOption to set the table to use standard SQL. // The default setting is false (using legacy SQL). func UseStandardSQL() CreateTableOption { return useStandardSQL{} } func (opt useStandardSQL) customizeCreateTable(conf *createTableConf) { conf.useStandardSQL = true } // TimePartitioning is a CreateTableOption that can be used to set time-based // date partitioning on a table. // For more information see: https://cloud.google.com/bigquery/docs/creating-partitioned-tables type TimePartitioning struct { // (Optional) The amount of time to keep the storage for a partition. // If the duration is empty (0), the data in the partitions do not expire. Expiration time.Duration } func (opt TimePartitioning) customizeCreateTable(conf *createTableConf) { conf.timePartitioning = &opt } // Read fetches the contents of the table. func (t *Table) Read(ctx context.Context) *RowIterator { return newRowIterator(ctx, t.c.service, &readTableConf{ projectID: t.ProjectID, datasetID: t.DatasetID, tableID: t.TableID, }) } // Update modifies specific Table metadata fields. func (t *Table) Update(ctx context.Context, tm TableMetadataToUpdate) (*TableMetadata, error) { var conf patchTableConf if tm.Description != nil { s := optional.ToString(tm.Description) conf.Description = &s } if tm.Name != nil { s := optional.ToString(tm.Name) conf.Name = &s } conf.Schema = tm.Schema return t.c.service.patchTable(ctx, t.ProjectID, t.DatasetID, t.TableID, &conf) } // TableMetadataToUpdate is used when updating a table's metadata. // Only non-nil fields will be updated. type TableMetadataToUpdate struct { // Description is the user-friendly description of this table. Description optional.String // Name is the user-friendly name for this table. Name optional.String // Schema is the table's schema. // When updating a schema, you can add columns but not remove them. Schema Schema // TODO(jba): support updating the view } golang-google-cloud-0.9.0/bigquery/uploader.go000066400000000000000000000131421312234511600213350ustar00rootroot00000000000000// Copyright 2015 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package bigquery import ( "errors" "fmt" "reflect" "golang.org/x/net/context" ) // An Uploader does streaming inserts into a BigQuery table. // It is safe for concurrent use. type Uploader struct { t *Table // SkipInvalidRows causes rows containing invalid data to be silently // ignored. The default value is false, which causes the entire request to // fail if there is an attempt to insert an invalid row. SkipInvalidRows bool // IgnoreUnknownValues causes values not matching the schema to be ignored. // The default value is false, which causes records containing such values // to be treated as invalid records. IgnoreUnknownValues bool // A TableTemplateSuffix allows Uploaders to create tables automatically. // // Experimental: this option is experimental and may be modified or removed in future versions, // regardless of any other documented package stability guarantees. // // When you specify a suffix, the table you upload data to // will be used as a template for creating a new table, with the same schema, // called + . // // More information is available at // https://cloud.google.com/bigquery/streaming-data-into-bigquery#template-tables TableTemplateSuffix string } // Uploader returns an Uploader that can be used to append rows to t. // The returned Uploader may optionally be further configured before its Put method is called. func (t *Table) Uploader() *Uploader { return &Uploader{t: t} } // Put uploads one or more rows to the BigQuery service. // // If src is ValueSaver, then its Save method is called to produce a row for uploading. // // If src is a struct or pointer to a struct, then a schema is inferred from it // and used to create a StructSaver. The InsertID of the StructSaver will be // empty. // // If src is a slice of ValueSavers, structs, or struct pointers, then each // element of the slice is treated as above, and multiple rows are uploaded. // // Put returns a PutMultiError if one or more rows failed to be uploaded. // The PutMultiError contains a RowInsertionError for each failed row. // // Put will retry on temporary errors (see // https://cloud.google.com/bigquery/troubleshooting-errors). This can result // in duplicate rows if you do not use insert IDs. Also, if the error persists, // the call will run indefinitely. Pass a context with a timeout to prevent // hanging calls. func (u *Uploader) Put(ctx context.Context, src interface{}) error { savers, err := valueSavers(src) if err != nil { return err } return u.putMulti(ctx, savers) } func valueSavers(src interface{}) ([]ValueSaver, error) { saver, ok, err := toValueSaver(src) if err != nil { return nil, err } if ok { return []ValueSaver{saver}, nil } srcVal := reflect.ValueOf(src) if srcVal.Kind() != reflect.Slice { return nil, fmt.Errorf("%T is not a ValueSaver, struct, struct pointer, or slice", src) } var savers []ValueSaver for i := 0; i < srcVal.Len(); i++ { s := srcVal.Index(i).Interface() saver, ok, err := toValueSaver(s) if err != nil { return nil, err } if !ok { return nil, fmt.Errorf("src[%d] has type %T, which is not a ValueSaver, struct or struct pointer", i, s) } savers = append(savers, saver) } return savers, nil } // Make a ValueSaver from x, which must implement ValueSaver already // or be a struct or pointer to struct. func toValueSaver(x interface{}) (ValueSaver, bool, error) { if _, ok := x.(StructSaver); ok { return nil, false, errors.New("bigquery: use &StructSaver, not StructSaver") } var insertID string // Handle StructSavers specially so we can infer the schema if necessary. if ss, ok := x.(*StructSaver); ok && ss.Schema == nil { x = ss.Struct insertID = ss.InsertID // Fall through so we can infer the schema. } if saver, ok := x.(ValueSaver); ok { return saver, ok, nil } v := reflect.ValueOf(x) // Support Put with []interface{} if v.Kind() == reflect.Interface { v = v.Elem() } if v.Kind() == reflect.Ptr { v = v.Elem() } if v.Kind() != reflect.Struct { return nil, false, nil } schema, err := inferSchemaReflectCached(v.Type()) if err != nil { return nil, false, err } return &StructSaver{ Struct: x, InsertID: insertID, Schema: schema, }, true, nil } func (u *Uploader) putMulti(ctx context.Context, src []ValueSaver) error { var rows []*insertionRow for _, saver := range src { row, insertID, err := saver.Save() if err != nil { return err } rows = append(rows, &insertionRow{InsertID: insertID, Row: row}) } return u.t.c.service.insertRows(ctx, u.t.ProjectID, u.t.DatasetID, u.t.TableID, rows, &insertRowsConf{ skipInvalidRows: u.SkipInvalidRows, ignoreUnknownValues: u.IgnoreUnknownValues, templateSuffix: u.TableTemplateSuffix, }) } // An insertionRow represents a row of data to be inserted into a table. type insertionRow struct { // If InsertID is non-empty, BigQuery will use it to de-duplicate insertions of // this row on a best-effort basis. InsertID string // The data to be inserted, represented as a map from field name to Value. Row map[string]Value } golang-google-cloud-0.9.0/bigquery/uploader_test.go000066400000000000000000000147041312234511600224010ustar00rootroot00000000000000// Copyright 2015 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package bigquery import ( "reflect" "testing" "cloud.google.com/go/internal/pretty" "golang.org/x/net/context" ) type testSaver struct { ir *insertionRow err error } func (ts testSaver) Save() (map[string]Value, string, error) { return ts.ir.Row, ts.ir.InsertID, ts.err } func TestRejectsNonValueSavers(t *testing.T) { client := &Client{projectID: "project-id"} u := Uploader{t: client.Dataset("dataset-id").Table("table-id")} inputs := []interface{}{ 1, []int{1, 2}, []interface{}{ testSaver{ir: &insertionRow{"a", map[string]Value{"one": 1}}}, 1, }, StructSaver{}, } for _, in := range inputs { if err := u.Put(context.Background(), in); err == nil { t.Errorf("put value: %v; got nil, want error", in) } } } type insertRowsRecorder struct { rowBatches [][]*insertionRow service } func (irr *insertRowsRecorder) insertRows(ctx context.Context, projectID, datasetID, tableID string, rows []*insertionRow, conf *insertRowsConf) error { irr.rowBatches = append(irr.rowBatches, rows) return nil } func TestInsertsData(t *testing.T) { testCases := []struct { data [][]*insertionRow }{ { data: [][]*insertionRow{ { &insertionRow{"a", map[string]Value{"one": 1}}, }, }, }, { data: [][]*insertionRow{ { &insertionRow{"a", map[string]Value{"one": 1}}, &insertionRow{"b", map[string]Value{"two": 2}}, }, }, }, { data: [][]*insertionRow{ { &insertionRow{"a", map[string]Value{"one": 1}}, }, { &insertionRow{"b", map[string]Value{"two": 2}}, }, }, }, { data: [][]*insertionRow{ { &insertionRow{"a", map[string]Value{"one": 1}}, &insertionRow{"b", map[string]Value{"two": 2}}, }, { &insertionRow{"c", map[string]Value{"three": 3}}, &insertionRow{"d", map[string]Value{"four": 4}}, }, }, }, } for _, tc := range testCases { irr := &insertRowsRecorder{} client := &Client{ projectID: "project-id", service: irr, } u := client.Dataset("dataset-id").Table("table-id").Uploader() for _, batch := range tc.data { if len(batch) == 0 { continue } var toUpload interface{} if len(batch) == 1 { toUpload = testSaver{ir: batch[0]} } else { savers := []testSaver{} for _, row := range batch { savers = append(savers, testSaver{ir: row}) } toUpload = savers } err := u.Put(context.Background(), toUpload) if err != nil { t.Errorf("expected successful Put of ValueSaver; got: %v", err) } } if got, want := irr.rowBatches, tc.data; !reflect.DeepEqual(got, want) { t.Errorf("got: %v, want: %v", got, want) } } } type uploadOptionRecorder struct { received *insertRowsConf service } func (u *uploadOptionRecorder) insertRows(ctx context.Context, projectID, datasetID, tableID string, rows []*insertionRow, conf *insertRowsConf) error { u.received = conf return nil } func TestUploadOptionsPropagate(t *testing.T) { // we don't care for the data in this testcase. dummyData := testSaver{ir: &insertionRow{}} recorder := new(uploadOptionRecorder) c := &Client{service: recorder} table := &Table{ ProjectID: "project-id", DatasetID: "dataset-id", TableID: "table-id", c: c, } tests := [...]struct { ul *Uploader conf insertRowsConf }{ { // test zero options lead to zero value for insertRowsConf ul: table.Uploader(), }, { ul: func() *Uploader { u := table.Uploader() u.TableTemplateSuffix = "suffix" return u }(), conf: insertRowsConf{ templateSuffix: "suffix", }, }, { ul: func() *Uploader { u := table.Uploader() u.IgnoreUnknownValues = true return u }(), conf: insertRowsConf{ ignoreUnknownValues: true, }, }, { ul: func() *Uploader { u := table.Uploader() u.SkipInvalidRows = true return u }(), conf: insertRowsConf{ skipInvalidRows: true, }, }, { // multiple upload options combine ul: func() *Uploader { u := table.Uploader() u.TableTemplateSuffix = "suffix" u.IgnoreUnknownValues = true u.SkipInvalidRows = true return u }(), conf: insertRowsConf{ templateSuffix: "suffix", skipInvalidRows: true, ignoreUnknownValues: true, }, }, } for i, tc := range tests { err := tc.ul.Put(context.Background(), dummyData) if err != nil { t.Fatalf("%d: expected successful Put of ValueSaver; got: %v", i, err) } if recorder.received == nil { t.Fatalf("%d: received no options at all!", i) } want := tc.conf got := *recorder.received if got != want { t.Errorf("%d: got %#v, want %#v, ul=%#v", i, got, want, tc.ul) } } } func TestValueSavers(t *testing.T) { ts := &testSaver{ir: &insertionRow{}} type T struct{ I int } schema, err := InferSchema(T{}) if err != nil { t.Fatal(err) } for _, test := range []struct { in interface{} want []ValueSaver }{ {ts, []ValueSaver{ts}}, {T{I: 1}, []ValueSaver{&StructSaver{Schema: schema, Struct: T{I: 1}}}}, {[]ValueSaver{ts, ts}, []ValueSaver{ts, ts}}, {[]interface{}{ts, ts}, []ValueSaver{ts, ts}}, {[]T{{I: 1}, {I: 2}}, []ValueSaver{ &StructSaver{Schema: schema, Struct: T{I: 1}}, &StructSaver{Schema: schema, Struct: T{I: 2}}, }}, {[]interface{}{T{I: 1}, &T{I: 2}}, []ValueSaver{ &StructSaver{Schema: schema, Struct: T{I: 1}}, &StructSaver{Schema: schema, Struct: &T{I: 2}}, }}, {&StructSaver{Struct: T{I: 3}, InsertID: "foo"}, []ValueSaver{ &StructSaver{Schema: schema, Struct: T{I: 3}, InsertID: "foo"}, }}, } { got, err := valueSavers(test.in) if err != nil { t.Fatal(err) } if !reflect.DeepEqual(got, test.want) { t.Errorf("%+v: got %v, want %v", test.in, pretty.Value(got), pretty.Value(test.want)) } // Make sure Save is successful. for i, vs := range got { _, _, err := vs.Save() if err != nil { t.Fatalf("%+v, #%d: got error %v, want nil", test.in, i, err) } } } } golang-google-cloud-0.9.0/bigquery/utils_test.go000066400000000000000000000023761312234511600217300ustar00rootroot00000000000000// Copyright 2015 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package bigquery import ( "golang.org/x/net/context" bq "google.golang.org/api/bigquery/v2" ) func defaultGCS() *GCSReference { return &GCSReference{ uris: []string{"uri"}, } } var defaultQuery = &QueryConfig{ Q: "query string", DefaultProjectID: "def-project-id", DefaultDatasetID: "def-dataset-id", } type testService struct { *bq.Job service } func (s *testService) insertJob(ctx context.Context, projectID string, conf *insertJobConf) (*Job, error) { s.Job = conf.job return &Job{}, nil } func (s *testService) jobStatus(ctx context.Context, projectID, jobID string) (*JobStatus, error) { return &JobStatus{State: Done}, nil } golang-google-cloud-0.9.0/bigquery/value.go000066400000000000000000000424331312234511600206430ustar00rootroot00000000000000// Copyright 2015 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package bigquery import ( "encoding/base64" "errors" "fmt" "reflect" "strconv" "time" "cloud.google.com/go/civil" bq "google.golang.org/api/bigquery/v2" ) // Value stores the contents of a single cell from a BigQuery result. type Value interface{} // ValueLoader stores a slice of Values representing a result row from a Read operation. // See RowIterator.Next for more information. type ValueLoader interface { Load(v []Value, s Schema) error } // valueList converts a []Value to implement ValueLoader. type valueList []Value // Load stores a sequence of values in a valueList. // It resets the slice length to zero, then appends each value to it. func (vs *valueList) Load(v []Value, _ Schema) error { *vs = append((*vs)[:0], v...) return nil } // valueMap converts a map[string]Value to implement ValueLoader. type valueMap map[string]Value // Load stores a sequence of values in a valueMap. func (vm *valueMap) Load(v []Value, s Schema) error { if *vm == nil { *vm = map[string]Value{} } loadMap(*vm, v, s) return nil } func loadMap(m map[string]Value, vals []Value, s Schema) { for i, f := range s { val := vals[i] var v interface{} switch { case f.Schema == nil: v = val case !f.Repeated: m2 := map[string]Value{} loadMap(m2, val.([]Value), f.Schema) v = m2 default: // repeated and nested sval := val.([]Value) vs := make([]Value, len(sval)) for j, e := range sval { m2 := map[string]Value{} loadMap(m2, e.([]Value), f.Schema) vs[j] = m2 } v = vs } m[f.Name] = v } } type structLoader struct { typ reflect.Type // type of struct err error ops []structLoaderOp vstructp reflect.Value // pointer to current struct value; changed by set } // A setFunc is a function that sets a struct field or slice/array // element to a value. type setFunc func(v reflect.Value, val interface{}) error // A structLoaderOp instructs the loader to set a struct field to a row value. type structLoaderOp struct { fieldIndex []int valueIndex int setFunc setFunc repeated bool } var errNoNulls = errors.New("bigquery: NULL values cannot be read into structs") func setAny(v reflect.Value, x interface{}) error { if x == nil { return errNoNulls } v.Set(reflect.ValueOf(x)) return nil } func setInt(v reflect.Value, x interface{}) error { if x == nil { return errNoNulls } xx := x.(int64) if v.OverflowInt(xx) { return fmt.Errorf("bigquery: value %v overflows struct field of type %v", xx, v.Type()) } v.SetInt(xx) return nil } func setFloat(v reflect.Value, x interface{}) error { if x == nil { return errNoNulls } xx := x.(float64) if v.OverflowFloat(xx) { return fmt.Errorf("bigquery: value %v overflows struct field of type %v", xx, v.Type()) } v.SetFloat(xx) return nil } func setBool(v reflect.Value, x interface{}) error { if x == nil { return errNoNulls } v.SetBool(x.(bool)) return nil } func setString(v reflect.Value, x interface{}) error { if x == nil { return errNoNulls } v.SetString(x.(string)) return nil } func setBytes(v reflect.Value, x interface{}) error { if x == nil { return errNoNulls } v.SetBytes(x.([]byte)) return nil } // set remembers a value for the next call to Load. The value must be // a pointer to a struct. (This is checked in RowIterator.Next.) func (sl *structLoader) set(structp interface{}, schema Schema) error { if sl.err != nil { return sl.err } sl.vstructp = reflect.ValueOf(structp) typ := sl.vstructp.Type().Elem() if sl.typ == nil { // First call: remember the type and compile the schema. sl.typ = typ ops, err := compileToOps(typ, schema) if err != nil { sl.err = err return err } sl.ops = ops } else if sl.typ != typ { return fmt.Errorf("bigquery: struct type changed from %s to %s", sl.typ, typ) } return nil } // compileToOps produces a sequence of operations that will set the fields of a // value of structType to the contents of a row with schema. func compileToOps(structType reflect.Type, schema Schema) ([]structLoaderOp, error) { var ops []structLoaderOp fields, err := fieldCache.Fields(structType) if err != nil { return nil, err } for i, schemaField := range schema { // Look for an exported struct field with the same name as the schema // field, ignoring case (BigQuery column names are case-insensitive, // and we want to act like encoding/json anyway). structField := fields.Match(schemaField.Name) if structField == nil { // Ignore schema fields with no corresponding struct field. continue } op := structLoaderOp{ fieldIndex: structField.Index, valueIndex: i, } t := structField.Type if schemaField.Repeated { if t.Kind() != reflect.Slice && t.Kind() != reflect.Array { return nil, fmt.Errorf("bigquery: repeated schema field %s requires slice or array, but struct field %s has type %s", schemaField.Name, structField.Name, t) } t = t.Elem() op.repeated = true } if schemaField.Type == RecordFieldType { // Field can be a struct or a pointer to a struct. if t.Kind() == reflect.Ptr { t = t.Elem() } if t.Kind() != reflect.Struct { return nil, fmt.Errorf("bigquery: field %s has type %s, expected struct or *struct", structField.Name, structField.Type) } nested, err := compileToOps(t, schemaField.Schema) if err != nil { return nil, err } op.setFunc = func(v reflect.Value, val interface{}) error { return setNested(nested, v, val.([]Value)) } } else { op.setFunc = determineSetFunc(t, schemaField.Type) if op.setFunc == nil { return nil, fmt.Errorf("bigquery: schema field %s of type %s is not assignable to struct field %s of type %s", schemaField.Name, schemaField.Type, structField.Name, t) } } ops = append(ops, op) } return ops, nil } // determineSetFunc chooses the best function for setting a field of type ftype // to a value whose schema field type is sftype. It returns nil if stype // is not assignable to ftype. // determineSetFunc considers only basic types. See compileToOps for // handling of repetition and nesting. func determineSetFunc(ftype reflect.Type, stype FieldType) setFunc { switch stype { case StringFieldType: if ftype.Kind() == reflect.String { return setString } case BytesFieldType: if ftype == typeOfByteSlice { return setBytes } case IntegerFieldType: if isSupportedIntType(ftype) { return setInt } case FloatFieldType: switch ftype.Kind() { case reflect.Float32, reflect.Float64: return setFloat } case BooleanFieldType: if ftype.Kind() == reflect.Bool { return setBool } case TimestampFieldType: if ftype == typeOfGoTime { return setAny } case DateFieldType: if ftype == typeOfDate { return setAny } case TimeFieldType: if ftype == typeOfTime { return setAny } case DateTimeFieldType: if ftype == typeOfDateTime { return setAny } } return nil } func (sl *structLoader) Load(values []Value, _ Schema) error { if sl.err != nil { return sl.err } return runOps(sl.ops, sl.vstructp.Elem(), values) } // runOps executes a sequence of ops, setting the fields of vstruct to the // supplied values. func runOps(ops []structLoaderOp, vstruct reflect.Value, values []Value) error { for _, op := range ops { field := vstruct.FieldByIndex(op.fieldIndex) var err error if op.repeated { err = setRepeated(field, values[op.valueIndex].([]Value), op.setFunc) } else { err = op.setFunc(field, values[op.valueIndex]) } if err != nil { return err } } return nil } func setNested(ops []structLoaderOp, v reflect.Value, vals []Value) error { // v is either a struct or a pointer to a struct. if v.Kind() == reflect.Ptr { // If the pointer is nil, set it to a zero struct value. if v.IsNil() { v.Set(reflect.New(v.Type().Elem())) } v = v.Elem() } return runOps(ops, v, vals) } func setRepeated(field reflect.Value, vslice []Value, setElem setFunc) error { vlen := len(vslice) var flen int switch field.Type().Kind() { case reflect.Slice: // Make a slice of the right size, avoiding allocation if possible. switch { case field.Len() < vlen: field.Set(reflect.MakeSlice(field.Type(), vlen, vlen)) case field.Len() > vlen: field.SetLen(vlen) } flen = vlen case reflect.Array: flen = field.Len() if flen > vlen { // Set extra elements to their zero value. z := reflect.Zero(field.Type().Elem()) for i := vlen; i < flen; i++ { field.Index(i).Set(z) } } default: return fmt.Errorf("bigquery: impossible field type %s", field.Type()) } for i, val := range vslice { if i < flen { // avoid writing past the end of a short array if err := setElem(field.Index(i), val); err != nil { return err } } } return nil } // A ValueSaver returns a row of data to be inserted into a table. type ValueSaver interface { // Save returns a row to be inserted into a BigQuery table, represented // as a map from field name to Value. // If insertID is non-empty, BigQuery will use it to de-duplicate // insertions of this row on a best-effort basis. Save() (row map[string]Value, insertID string, err error) } // ValuesSaver implements ValueSaver for a slice of Values. type ValuesSaver struct { Schema Schema // If non-empty, BigQuery will use InsertID to de-duplicate insertions // of this row on a best-effort basis. InsertID string Row []Value } // Save implements ValueSaver. func (vls *ValuesSaver) Save() (map[string]Value, string, error) { m, err := valuesToMap(vls.Row, vls.Schema) return m, vls.InsertID, err } func valuesToMap(vs []Value, schema Schema) (map[string]Value, error) { if len(vs) != len(schema) { return nil, errors.New("Schema does not match length of row to be inserted") } m := make(map[string]Value) for i, fieldSchema := range schema { if fieldSchema.Type != RecordFieldType { m[fieldSchema.Name] = vs[i] continue } // Nested record, possibly repeated. vals, ok := vs[i].([]Value) if !ok { return nil, errors.New("nested record is not a []Value") } if !fieldSchema.Repeated { value, err := valuesToMap(vals, fieldSchema.Schema) if err != nil { return nil, err } m[fieldSchema.Name] = value continue } // A repeated nested field is converted into a slice of maps. var maps []Value for _, v := range vals { sv, ok := v.([]Value) if !ok { return nil, errors.New("nested record in slice is not a []Value") } value, err := valuesToMap(sv, fieldSchema.Schema) if err != nil { return nil, err } maps = append(maps, value) } m[fieldSchema.Name] = maps } return m, nil } // StructSaver implements ValueSaver for a struct. // The struct is converted to a map of values by using the values of struct // fields corresponding to schema fields. Additional and missing // fields are ignored, as are nested struct pointers that are nil. type StructSaver struct { // Schema determines what fields of the struct are uploaded. It should // match the table's schema. Schema Schema // If non-empty, BigQuery will use InsertID to de-duplicate insertions // of this row on a best-effort basis. InsertID string // Struct should be a struct or a pointer to a struct. Struct interface{} } // Save implements ValueSaver. func (ss *StructSaver) Save() (row map[string]Value, insertID string, err error) { vstruct := reflect.ValueOf(ss.Struct) row, err = structToMap(vstruct, ss.Schema) if err != nil { return nil, "", err } return row, ss.InsertID, nil } func structToMap(vstruct reflect.Value, schema Schema) (map[string]Value, error) { if vstruct.Kind() == reflect.Ptr { vstruct = vstruct.Elem() } if !vstruct.IsValid() { return nil, nil } m := map[string]Value{} if vstruct.Kind() != reflect.Struct { return nil, fmt.Errorf("bigquery: type is %s, need struct or struct pointer", vstruct.Type()) } fields, err := fieldCache.Fields(vstruct.Type()) if err != nil { return nil, err } for _, schemaField := range schema { // Look for an exported struct field with the same name as the schema // field, ignoring case. structField := fields.Match(schemaField.Name) if structField == nil { continue } val, err := structFieldToUploadValue(vstruct.FieldByIndex(structField.Index), schemaField) if err != nil { return nil, err } // Add the value to the map, unless it is nil. if val != nil { m[schemaField.Name] = val } } return m, nil } // structFieldToUploadValue converts a struct field to a value suitable for ValueSaver.Save, using // the schemaField as a guide. // structFieldToUploadValue is careful to return a true nil interface{} when needed, so its // caller can easily identify a nil value. func structFieldToUploadValue(vfield reflect.Value, schemaField *FieldSchema) (interface{}, error) { if schemaField.Repeated && (vfield.Kind() != reflect.Slice && vfield.Kind() != reflect.Array) { return nil, fmt.Errorf("bigquery: repeated schema field %s requires slice or array, but value has type %s", schemaField.Name, vfield.Type()) } // A non-nested field can be represented by its Go value. if schemaField.Type != RecordFieldType { if !schemaField.Repeated || vfield.Len() > 0 { return vfield.Interface(), nil } // The service treats a null repeated field as an error. Return // nil to omit the field entirely. return nil, nil } // A non-repeated nested field is converted into a map[string]Value. if !schemaField.Repeated { m, err := structToMap(vfield, schemaField.Schema) if err != nil { return nil, err } if m == nil { return nil, nil } return m, nil } // A repeated nested field is converted into a slice of maps. if vfield.Len() == 0 { return nil, nil } var vals []Value for i := 0; i < vfield.Len(); i++ { m, err := structToMap(vfield.Index(i), schemaField.Schema) if err != nil { return nil, err } vals = append(vals, m) } return vals, nil } // convertRows converts a series of TableRows into a series of Value slices. // schema is used to interpret the data from rows; its length must match the // length of each row. func convertRows(rows []*bq.TableRow, schema Schema) ([][]Value, error) { var rs [][]Value for _, r := range rows { row, err := convertRow(r, schema) if err != nil { return nil, err } rs = append(rs, row) } return rs, nil } func convertRow(r *bq.TableRow, schema Schema) ([]Value, error) { if len(schema) != len(r.F) { return nil, errors.New("schema length does not match row length") } var values []Value for i, cell := range r.F { fs := schema[i] v, err := convertValue(cell.V, fs.Type, fs.Schema) if err != nil { return nil, err } values = append(values, v) } return values, nil } func convertValue(val interface{}, typ FieldType, schema Schema) (Value, error) { switch val := val.(type) { case nil: return nil, nil case []interface{}: return convertRepeatedRecord(val, typ, schema) case map[string]interface{}: return convertNestedRecord(val, schema) case string: return convertBasicType(val, typ) default: return nil, fmt.Errorf("got value %v; expected a value of type %s", val, typ) } } func convertRepeatedRecord(vals []interface{}, typ FieldType, schema Schema) (Value, error) { var values []Value for _, cell := range vals { // each cell contains a single entry, keyed by "v" val := cell.(map[string]interface{})["v"] v, err := convertValue(val, typ, schema) if err != nil { return nil, err } values = append(values, v) } return values, nil } func convertNestedRecord(val map[string]interface{}, schema Schema) (Value, error) { // convertNestedRecord is similar to convertRow, as a record has the same structure as a row. // Nested records are wrapped in a map with a single key, "f". record := val["f"].([]interface{}) if len(record) != len(schema) { return nil, errors.New("schema length does not match record length") } var values []Value for i, cell := range record { // each cell contains a single entry, keyed by "v" val := cell.(map[string]interface{})["v"] fs := schema[i] v, err := convertValue(val, fs.Type, fs.Schema) if err != nil { return nil, err } values = append(values, v) } return values, nil } // convertBasicType returns val as an interface with a concrete type specified by typ. func convertBasicType(val string, typ FieldType) (Value, error) { switch typ { case StringFieldType: return val, nil case BytesFieldType: return base64.StdEncoding.DecodeString(val) case IntegerFieldType: return strconv.ParseInt(val, 10, 64) case FloatFieldType: return strconv.ParseFloat(val, 64) case BooleanFieldType: return strconv.ParseBool(val) case TimestampFieldType: f, err := strconv.ParseFloat(val, 64) return Value(time.Unix(0, int64(f*1e9)).UTC()), err case DateFieldType: return civil.ParseDate(val) case TimeFieldType: return civil.ParseTime(val) case DateTimeFieldType: return civil.ParseDateTime(val) default: return nil, fmt.Errorf("unrecognized type: %s", typ) } } golang-google-cloud-0.9.0/bigquery/value_test.go000066400000000000000000000520761312234511600217060ustar00rootroot00000000000000// Copyright 2015 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package bigquery import ( "encoding/base64" "fmt" "math" "reflect" "testing" "time" "cloud.google.com/go/civil" "cloud.google.com/go/internal/pretty" bq "google.golang.org/api/bigquery/v2" ) func TestConvertBasicValues(t *testing.T) { schema := []*FieldSchema{ {Type: StringFieldType}, {Type: IntegerFieldType}, {Type: FloatFieldType}, {Type: BooleanFieldType}, {Type: BytesFieldType}, } row := &bq.TableRow{ F: []*bq.TableCell{ {V: "a"}, {V: "1"}, {V: "1.2"}, {V: "true"}, {V: base64.StdEncoding.EncodeToString([]byte("foo"))}, }, } got, err := convertRow(row, schema) if err != nil { t.Fatalf("error converting: %v", err) } want := []Value{"a", int64(1), 1.2, true, []byte("foo")} if !reflect.DeepEqual(got, want) { t.Errorf("converting basic values: got:\n%v\nwant:\n%v", got, want) } } func TestConvertTime(t *testing.T) { // TODO(jba): add tests for civil time types. schema := []*FieldSchema{ {Type: TimestampFieldType}, } thyme := time.Date(1970, 1, 1, 10, 0, 0, 10, time.UTC) row := &bq.TableRow{ F: []*bq.TableCell{ {V: fmt.Sprintf("%.10f", float64(thyme.UnixNano())/1e9)}, }, } got, err := convertRow(row, schema) if err != nil { t.Fatalf("error converting: %v", err) } if !got[0].(time.Time).Equal(thyme) { t.Errorf("converting basic values: got:\n%v\nwant:\n%v", got, thyme) } if got[0].(time.Time).Location() != time.UTC { t.Errorf("expected time zone UTC: got:\n%v", got) } } func TestConvertNullValues(t *testing.T) { schema := []*FieldSchema{ {Type: StringFieldType}, } row := &bq.TableRow{ F: []*bq.TableCell{ {V: nil}, }, } got, err := convertRow(row, schema) if err != nil { t.Fatalf("error converting: %v", err) } want := []Value{nil} if !reflect.DeepEqual(got, want) { t.Errorf("converting null values: got:\n%v\nwant:\n%v", got, want) } } func TestBasicRepetition(t *testing.T) { schema := []*FieldSchema{ {Type: IntegerFieldType, Repeated: true}, } row := &bq.TableRow{ F: []*bq.TableCell{ { V: []interface{}{ map[string]interface{}{ "v": "1", }, map[string]interface{}{ "v": "2", }, map[string]interface{}{ "v": "3", }, }, }, }, } got, err := convertRow(row, schema) if err != nil { t.Fatalf("error converting: %v", err) } want := []Value{[]Value{int64(1), int64(2), int64(3)}} if !reflect.DeepEqual(got, want) { t.Errorf("converting basic repeated values: got:\n%v\nwant:\n%v", got, want) } } func TestNestedRecordContainingRepetition(t *testing.T) { schema := []*FieldSchema{ { Type: RecordFieldType, Schema: Schema{ {Type: IntegerFieldType, Repeated: true}, }, }, } row := &bq.TableRow{ F: []*bq.TableCell{ { V: map[string]interface{}{ "f": []interface{}{ map[string]interface{}{ "v": []interface{}{ map[string]interface{}{"v": "1"}, map[string]interface{}{"v": "2"}, map[string]interface{}{"v": "3"}, }, }, }, }, }, }, } got, err := convertRow(row, schema) if err != nil { t.Fatalf("error converting: %v", err) } want := []Value{[]Value{[]Value{int64(1), int64(2), int64(3)}}} if !reflect.DeepEqual(got, want) { t.Errorf("converting basic repeated values: got:\n%v\nwant:\n%v", got, want) } } func TestRepeatedRecordContainingRepetition(t *testing.T) { schema := []*FieldSchema{ { Type: RecordFieldType, Repeated: true, Schema: Schema{ {Type: IntegerFieldType, Repeated: true}, }, }, } row := &bq.TableRow{F: []*bq.TableCell{ { V: []interface{}{ // repeated records. map[string]interface{}{ // first record. "v": map[string]interface{}{ // pointless single-key-map wrapper. "f": []interface{}{ // list of record fields. map[string]interface{}{ // only record (repeated ints) "v": []interface{}{ // pointless wrapper. map[string]interface{}{ "v": "1", }, map[string]interface{}{ "v": "2", }, map[string]interface{}{ "v": "3", }, }, }, }, }, }, map[string]interface{}{ // second record. "v": map[string]interface{}{ "f": []interface{}{ map[string]interface{}{ "v": []interface{}{ map[string]interface{}{ "v": "4", }, map[string]interface{}{ "v": "5", }, map[string]interface{}{ "v": "6", }, }, }, }, }, }, }, }, }} got, err := convertRow(row, schema) if err != nil { t.Fatalf("error converting: %v", err) } want := []Value{ // the row is a list of length 1, containing an entry for the repeated record. []Value{ // the repeated record is a list of length 2, containing an entry for each repetition. []Value{ // the record is a list of length 1, containing an entry for the repeated integer field. []Value{int64(1), int64(2), int64(3)}, // the repeated integer field is a list of length 3. }, []Value{ // second record []Value{int64(4), int64(5), int64(6)}, }, }, } if !reflect.DeepEqual(got, want) { t.Errorf("converting repeated records with repeated values: got:\n%v\nwant:\n%v", got, want) } } func TestRepeatedRecordContainingRecord(t *testing.T) { schema := []*FieldSchema{ { Type: RecordFieldType, Repeated: true, Schema: Schema{ { Type: StringFieldType, }, { Type: RecordFieldType, Schema: Schema{ {Type: IntegerFieldType}, {Type: StringFieldType}, }, }, }, }, } row := &bq.TableRow{F: []*bq.TableCell{ { V: []interface{}{ // repeated records. map[string]interface{}{ // first record. "v": map[string]interface{}{ // pointless single-key-map wrapper. "f": []interface{}{ // list of record fields. map[string]interface{}{ // first record field (name) "v": "first repeated record", }, map[string]interface{}{ // second record field (nested record). "v": map[string]interface{}{ // pointless single-key-map wrapper. "f": []interface{}{ // nested record fields map[string]interface{}{ "v": "1", }, map[string]interface{}{ "v": "two", }, }, }, }, }, }, }, map[string]interface{}{ // second record. "v": map[string]interface{}{ "f": []interface{}{ map[string]interface{}{ "v": "second repeated record", }, map[string]interface{}{ "v": map[string]interface{}{ "f": []interface{}{ map[string]interface{}{ "v": "3", }, map[string]interface{}{ "v": "four", }, }, }, }, }, }, }, }, }, }} got, err := convertRow(row, schema) if err != nil { t.Fatalf("error converting: %v", err) } // TODO: test with flattenresults. want := []Value{ // the row is a list of length 1, containing an entry for the repeated record. []Value{ // the repeated record is a list of length 2, containing an entry for each repetition. []Value{ // record contains a string followed by a nested record. "first repeated record", []Value{ int64(1), "two", }, }, []Value{ // second record. "second repeated record", []Value{ int64(3), "four", }, }, }, } if !reflect.DeepEqual(got, want) { t.Errorf("converting repeated records containing record : got:\n%v\nwant:\n%v", got, want) } } func TestValuesSaverConvertsToMap(t *testing.T) { testCases := []struct { vs ValuesSaver want *insertionRow }{ { vs: ValuesSaver{ Schema: []*FieldSchema{ {Name: "intField", Type: IntegerFieldType}, {Name: "strField", Type: StringFieldType}, }, InsertID: "iid", Row: []Value{1, "a"}, }, want: &insertionRow{ InsertID: "iid", Row: map[string]Value{"intField": 1, "strField": "a"}, }, }, { vs: ValuesSaver{ Schema: []*FieldSchema{ {Name: "intField", Type: IntegerFieldType}, { Name: "recordField", Type: RecordFieldType, Schema: []*FieldSchema{ {Name: "nestedInt", Type: IntegerFieldType, Repeated: true}, }, }, }, InsertID: "iid", Row: []Value{1, []Value{[]Value{2, 3}}}, }, want: &insertionRow{ InsertID: "iid", Row: map[string]Value{ "intField": 1, "recordField": map[string]Value{ "nestedInt": []Value{2, 3}, }, }, }, }, { // repeated nested field vs: ValuesSaver{ Schema: Schema{ { Name: "records", Type: RecordFieldType, Schema: Schema{ {Name: "x", Type: IntegerFieldType}, {Name: "y", Type: IntegerFieldType}, }, Repeated: true, }, }, InsertID: "iid", Row: []Value{ // a row is a []Value []Value{ // repeated field's value is a []Value []Value{1, 2}, // first record of the repeated field []Value{3, 4}, // second record }, }, }, want: &insertionRow{ InsertID: "iid", Row: map[string]Value{ "records": []Value{ map[string]Value{"x": 1, "y": 2}, map[string]Value{"x": 3, "y": 4}, }, }, }, }, } for _, tc := range testCases { data, insertID, err := tc.vs.Save() if err != nil { t.Errorf("Expected successful save; got: %v", err) } got := &insertionRow{insertID, data} if !reflect.DeepEqual(got, tc.want) { t.Errorf("saving ValuesSaver:\ngot:\n%+v\nwant:\n%+v", got, tc.want) } } } func TestStructSaver(t *testing.T) { schema := Schema{ {Name: "s", Type: StringFieldType}, {Name: "r", Type: IntegerFieldType, Repeated: true}, {Name: "nested", Type: RecordFieldType, Schema: Schema{ {Name: "b", Type: BooleanFieldType}, }}, {Name: "rnested", Type: RecordFieldType, Repeated: true, Schema: Schema{ {Name: "b", Type: BooleanFieldType}, }}, } type ( N struct{ B bool } T struct { S string R []int Nested *N Rnested []*N } ) check := func(msg string, in interface{}, want map[string]Value) { ss := StructSaver{ Schema: schema, InsertID: "iid", Struct: in, } got, gotIID, err := ss.Save() if err != nil { t.Fatalf("%s: %v", msg, err) } if wantIID := "iid"; gotIID != wantIID { t.Errorf("%s: InsertID: got %q, want %q", msg, gotIID, wantIID) } if !reflect.DeepEqual(got, want) { t.Errorf("%s:\ngot\n%#v\nwant\n%#v", msg, got, want) } } in := T{ S: "x", R: []int{1, 2}, Nested: &N{B: true}, Rnested: []*N{{true}, {false}}, } want := map[string]Value{ "s": "x", "r": []int{1, 2}, "nested": map[string]Value{"b": true}, "rnested": []Value{map[string]Value{"b": true}, map[string]Value{"b": false}}, } check("all values", in, want) check("all values, ptr", &in, want) check("empty struct", T{}, map[string]Value{"s": ""}) // Missing and extra fields ignored. type T2 struct { S string // missing R, Nested, RNested Extra int } check("missing and extra", T2{S: "x"}, map[string]Value{"s": "x"}) check("nils in slice", T{Rnested: []*N{{true}, nil, {false}}}, map[string]Value{ "s": "", "rnested": []Value{map[string]Value{"b": true}, map[string]Value(nil), map[string]Value{"b": false}}, }) } func TestConvertRows(t *testing.T) { schema := []*FieldSchema{ {Type: StringFieldType}, {Type: IntegerFieldType}, {Type: FloatFieldType}, {Type: BooleanFieldType}, } rows := []*bq.TableRow{ {F: []*bq.TableCell{ {V: "a"}, {V: "1"}, {V: "1.2"}, {V: "true"}, }}, {F: []*bq.TableCell{ {V: "b"}, {V: "2"}, {V: "2.2"}, {V: "false"}, }}, } want := [][]Value{ {"a", int64(1), 1.2, true}, {"b", int64(2), 2.2, false}, } got, err := convertRows(rows, schema) if err != nil { t.Fatalf("got %v, want nil", err) } if !reflect.DeepEqual(got, want) { t.Errorf("\ngot %v\nwant %v", got, want) } } func TestValueList(t *testing.T) { schema := Schema{ {Name: "s", Type: StringFieldType}, {Name: "i", Type: IntegerFieldType}, {Name: "f", Type: FloatFieldType}, {Name: "b", Type: BooleanFieldType}, } want := []Value{"x", 7, 3.14, true} var got []Value vl := (*valueList)(&got) if err := vl.Load(want, schema); err != nil { t.Fatal(err) } if !reflect.DeepEqual(got, want) { t.Errorf("got %+v, want %+v", got, want) } // Load truncates, not appends. // https://github.com/GoogleCloudPlatform/google-cloud-go/issues/437 if err := vl.Load(want, schema); err != nil { t.Fatal(err) } if !reflect.DeepEqual(got, want) { t.Errorf("got %+v, want %+v", got, want) } } func TestValueMap(t *testing.T) { ns := Schema{ {Name: "x", Type: IntegerFieldType}, {Name: "y", Type: IntegerFieldType}, } schema := Schema{ {Name: "s", Type: StringFieldType}, {Name: "i", Type: IntegerFieldType}, {Name: "f", Type: FloatFieldType}, {Name: "b", Type: BooleanFieldType}, {Name: "n", Type: RecordFieldType, Schema: ns}, {Name: "rn", Type: RecordFieldType, Schema: ns, Repeated: true}, } in := []Value{"x", 7, 3.14, true, []Value{1, 2}, []Value{[]Value{3, 4}, []Value{5, 6}}, } var vm valueMap if err := vm.Load(in, schema); err != nil { t.Fatal(err) } want := map[string]Value{ "s": "x", "i": 7, "f": 3.14, "b": true, "n": map[string]Value{"x": 1, "y": 2}, "rn": []Value{ map[string]Value{"x": 3, "y": 4}, map[string]Value{"x": 5, "y": 6}, }, } if !reflect.DeepEqual(vm, valueMap(want)) { t.Errorf("got\n%+v\nwant\n%+v", vm, want) } } var ( // For testing StructLoader schema2 = Schema{ {Name: "s", Type: StringFieldType}, {Name: "s2", Type: StringFieldType}, {Name: "by", Type: BytesFieldType}, {Name: "I", Type: IntegerFieldType}, {Name: "F", Type: FloatFieldType}, {Name: "B", Type: BooleanFieldType}, {Name: "TS", Type: TimestampFieldType}, {Name: "D", Type: DateFieldType}, {Name: "T", Type: TimeFieldType}, {Name: "DT", Type: DateTimeFieldType}, {Name: "nested", Type: RecordFieldType, Schema: Schema{ {Name: "nestS", Type: StringFieldType}, {Name: "nestI", Type: IntegerFieldType}, }}, {Name: "t", Type: StringFieldType}, } testTimestamp = time.Date(2016, 11, 5, 7, 50, 22, 8, time.UTC) testDate = civil.Date{2016, 11, 5} testTime = civil.Time{7, 50, 22, 8} testDateTime = civil.DateTime{testDate, testTime} testValues = []Value{"x", "y", []byte{1, 2, 3}, int64(7), 3.14, true, testTimestamp, testDate, testTime, testDateTime, []Value{"nested", int64(17)}, "z"} ) type testStruct1 struct { B bool I int times S string S2 String By []byte s string F float64 Nested nested Tagged string `bigquery:"t"` } type String string type nested struct { NestS string NestI int } type times struct { TS time.Time T civil.Time D civil.Date DT civil.DateTime } func TestStructLoader(t *testing.T) { var ts1 testStruct1 if err := load(&ts1, schema2, testValues); err != nil { t.Fatal(err) } // Note: the schema field named "s" gets matched to the exported struct // field "S", not the unexported "s". want := &testStruct1{ B: true, I: 7, F: 3.14, times: times{TS: testTimestamp, T: testTime, D: testDate, DT: testDateTime}, S: "x", S2: "y", By: []byte{1, 2, 3}, Nested: nested{NestS: "nested", NestI: 17}, Tagged: "z", } if !reflect.DeepEqual(&ts1, want) { t.Errorf("got %+v, want %+v", pretty.Value(ts1), pretty.Value(*want)) d, _, err := pretty.Diff(*want, ts1) if err == nil { t.Logf("diff:\n%s", d) } } // Test pointers to nested structs. type nestedPtr struct{ Nested *nested } var np nestedPtr if err := load(&np, schema2, testValues); err != nil { t.Fatal(err) } want2 := &nestedPtr{Nested: &nested{NestS: "nested", NestI: 17}} if !reflect.DeepEqual(&np, want2) { t.Errorf("got %+v, want %+v", pretty.Value(np), pretty.Value(*want2)) } // Existing values should be reused. nst := &nested{NestS: "x", NestI: -10} np = nestedPtr{Nested: nst} if err := load(&np, schema2, testValues); err != nil { t.Fatal(err) } if !reflect.DeepEqual(&np, want2) { t.Errorf("got %+v, want %+v", pretty.Value(np), pretty.Value(*want2)) } if np.Nested != nst { t.Error("nested struct pointers not equal") } } type repStruct struct { Nums []int ShortNums [2]int // to test truncation LongNums [5]int // to test padding with zeroes Nested []*nested } var ( repSchema = Schema{ {Name: "nums", Type: IntegerFieldType, Repeated: true}, {Name: "shortNums", Type: IntegerFieldType, Repeated: true}, {Name: "longNums", Type: IntegerFieldType, Repeated: true}, {Name: "nested", Type: RecordFieldType, Repeated: true, Schema: Schema{ {Name: "nestS", Type: StringFieldType}, {Name: "nestI", Type: IntegerFieldType}, }}, } v123 = []Value{int64(1), int64(2), int64(3)} repValues = []Value{v123, v123, v123, []Value{ []Value{"x", int64(1)}, []Value{"y", int64(2)}, }, } ) func TestStructLoaderRepeated(t *testing.T) { var r1 repStruct if err := load(&r1, repSchema, repValues); err != nil { t.Fatal(err) } want := repStruct{ Nums: []int{1, 2, 3}, ShortNums: [...]int{1, 2}, // extra values discarded LongNums: [...]int{1, 2, 3, 0, 0}, Nested: []*nested{{"x", 1}, {"y", 2}}, } if !reflect.DeepEqual(r1, want) { t.Errorf("got %+v, want %+v", pretty.Value(r1), pretty.Value(want)) } r2 := repStruct{ Nums: []int{-1, -2, -3, -4, -5}, // truncated to zero and appended to LongNums: [...]int{-1, -2, -3, -4, -5}, // unset elements are zeroed } if err := load(&r2, repSchema, repValues); err != nil { t.Fatal(err) } if !reflect.DeepEqual(r2, want) { t.Errorf("got %+v, want %+v", pretty.Value(r2), pretty.Value(want)) } if got, want := cap(r2.Nums), 5; got != want { t.Errorf("cap(r2.Nums) = %d, want %d", got, want) } // Short slice case. r3 := repStruct{Nums: []int{-1}} if err := load(&r3, repSchema, repValues); err != nil { t.Fatal(err) } if !reflect.DeepEqual(r3, want) { t.Errorf("got %+v, want %+v", pretty.Value(r3), pretty.Value(want)) } if got, want := cap(r3.Nums), 3; got != want { t.Errorf("cap(r3.Nums) = %d, want %d", got, want) } } func TestStructLoaderOverflow(t *testing.T) { type S struct { I int16 F float32 } schema := Schema{ {Name: "I", Type: IntegerFieldType}, {Name: "F", Type: FloatFieldType}, } var s S if err := load(&s, schema, []Value{int64(math.MaxInt16 + 1), 0}); err == nil { t.Error("int: got nil, want error") } if err := load(&s, schema, []Value{int64(0), math.MaxFloat32 * 2}); err == nil { t.Error("float: got nil, want error") } } func TestStructLoaderFieldOverlap(t *testing.T) { // It's OK if the struct has fields that the schema does not, and vice versa. type S1 struct { I int X [][]int // not in the schema; does not even correspond to a valid BigQuery type // many schema fields missing } var s1 S1 if err := load(&s1, schema2, testValues); err != nil { t.Fatal(err) } want1 := S1{I: 7} if !reflect.DeepEqual(s1, want1) { t.Errorf("got %+v, want %+v", pretty.Value(s1), pretty.Value(want1)) } // It's even valid to have no overlapping fields at all. type S2 struct{ Z int } var s2 S2 if err := load(&s2, schema2, testValues); err != nil { t.Fatal(err) } want2 := S2{} if !reflect.DeepEqual(s2, want2) { t.Errorf("got %+v, want %+v", pretty.Value(s2), pretty.Value(want2)) } } func TestStructLoaderErrors(t *testing.T) { check := func(sp interface{}) { var sl structLoader err := sl.set(sp, schema2) if err == nil { t.Errorf("%T: got nil, want error", sp) } } type bad1 struct{ F int32 } // wrong type for FLOAT column check(&bad1{}) type bad2 struct{ I uint } // unsupported integer type check(&bad2{}) // Using more than one struct type with the same structLoader. type different struct { B bool I int times S string s string Nums []int } var sl structLoader if err := sl.set(&testStruct1{}, schema2); err != nil { t.Fatal(err) } err := sl.set(&different{}, schema2) if err == nil { t.Error("different struct types: got nil, want error") } } func load(pval interface{}, schema Schema, vals []Value) error { var sl structLoader if err := sl.set(pval, schema); err != nil { return err } return sl.Load(vals, nil) } func BenchmarkStructLoader_NoCompile(b *testing.B) { benchmarkStructLoader(b, false) } func BenchmarkStructLoader_Compile(b *testing.B) { benchmarkStructLoader(b, true) } func benchmarkStructLoader(b *testing.B, compile bool) { var ts1 testStruct1 for i := 0; i < b.N; i++ { var sl structLoader for j := 0; j < 10; j++ { if err := load(&ts1, schema2, testValues); err != nil { b.Fatal(err) } if !compile { sl.typ = nil } } } } golang-google-cloud-0.9.0/bigtable/000077500000000000000000000000001312234511600171145ustar00rootroot00000000000000golang-google-cloud-0.9.0/bigtable/admin.go000066400000000000000000000275031312234511600205420ustar00rootroot00000000000000/* Copyright 2015 Google Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package bigtable import ( "fmt" "regexp" "strings" btopt "cloud.google.com/go/bigtable/internal/option" "cloud.google.com/go/longrunning" lroauto "cloud.google.com/go/longrunning/autogen" "golang.org/x/net/context" "google.golang.org/api/option" "google.golang.org/api/transport" btapb "google.golang.org/genproto/googleapis/bigtable/admin/v2" "google.golang.org/grpc" "google.golang.org/grpc/metadata" ) const adminAddr = "bigtableadmin.googleapis.com:443" // AdminClient is a client type for performing admin operations within a specific instance. type AdminClient struct { conn *grpc.ClientConn tClient btapb.BigtableTableAdminClient project, instance string // Metadata to be sent with each request. md metadata.MD } // NewAdminClient creates a new AdminClient for a given project and instance. func NewAdminClient(ctx context.Context, project, instance string, opts ...option.ClientOption) (*AdminClient, error) { o, err := btopt.DefaultClientOptions(adminAddr, AdminScope, clientUserAgent) if err != nil { return nil, err } o = append(o, opts...) conn, err := transport.DialGRPC(ctx, o...) if err != nil { return nil, fmt.Errorf("dialing: %v", err) } return &AdminClient{ conn: conn, tClient: btapb.NewBigtableTableAdminClient(conn), project: project, instance: instance, md: metadata.Pairs(resourcePrefixHeader, fmt.Sprintf("projects/%s/instances/%s", project, instance)), }, nil } // Close closes the AdminClient. func (ac *AdminClient) Close() error { return ac.conn.Close() } func (ac *AdminClient) instancePrefix() string { return fmt.Sprintf("projects/%s/instances/%s", ac.project, ac.instance) } // Tables returns a list of the tables in the instance. func (ac *AdminClient) Tables(ctx context.Context) ([]string, error) { ctx = mergeOutgoingMetadata(ctx, ac.md) prefix := ac.instancePrefix() req := &btapb.ListTablesRequest{ Parent: prefix, } res, err := ac.tClient.ListTables(ctx, req) if err != nil { return nil, err } names := make([]string, 0, len(res.Tables)) for _, tbl := range res.Tables { names = append(names, strings.TrimPrefix(tbl.Name, prefix+"/tables/")) } return names, nil } // CreateTable creates a new table in the instance. // This method may return before the table's creation is complete. func (ac *AdminClient) CreateTable(ctx context.Context, table string) error { ctx = mergeOutgoingMetadata(ctx, ac.md) prefix := ac.instancePrefix() req := &btapb.CreateTableRequest{ Parent: prefix, TableId: table, } _, err := ac.tClient.CreateTable(ctx, req) return err } // CreatePresplitTable creates a new table in the instance. // The list of row keys will be used to initially split the table into multiple tablets. // Given two split keys, "s1" and "s2", three tablets will be created, // spanning the key ranges: [, s1), [s1, s2), [s2, ). // This method may return before the table's creation is complete. func (ac *AdminClient) CreatePresplitTable(ctx context.Context, table string, split_keys []string) error { var req_splits []*btapb.CreateTableRequest_Split for _, split := range split_keys { req_splits = append(req_splits, &btapb.CreateTableRequest_Split{[]byte(split)}) } ctx = mergeOutgoingMetadata(ctx, ac.md) prefix := ac.instancePrefix() req := &btapb.CreateTableRequest{ Parent: prefix, TableId: table, InitialSplits: req_splits, } _, err := ac.tClient.CreateTable(ctx, req) return err } // CreateColumnFamily creates a new column family in a table. func (ac *AdminClient) CreateColumnFamily(ctx context.Context, table, family string) error { // TODO(dsymonds): Permit specifying gcexpr and any other family settings. ctx = mergeOutgoingMetadata(ctx, ac.md) prefix := ac.instancePrefix() req := &btapb.ModifyColumnFamiliesRequest{ Name: prefix + "/tables/" + table, Modifications: []*btapb.ModifyColumnFamiliesRequest_Modification{{ Id: family, Mod: &btapb.ModifyColumnFamiliesRequest_Modification_Create{&btapb.ColumnFamily{}}, }}, } _, err := ac.tClient.ModifyColumnFamilies(ctx, req) return err } // DeleteTable deletes a table and all of its data. func (ac *AdminClient) DeleteTable(ctx context.Context, table string) error { ctx = mergeOutgoingMetadata(ctx, ac.md) prefix := ac.instancePrefix() req := &btapb.DeleteTableRequest{ Name: prefix + "/tables/" + table, } _, err := ac.tClient.DeleteTable(ctx, req) return err } // DeleteColumnFamily deletes a column family in a table and all of its data. func (ac *AdminClient) DeleteColumnFamily(ctx context.Context, table, family string) error { ctx = mergeOutgoingMetadata(ctx, ac.md) prefix := ac.instancePrefix() req := &btapb.ModifyColumnFamiliesRequest{ Name: prefix + "/tables/" + table, Modifications: []*btapb.ModifyColumnFamiliesRequest_Modification{{ Id: family, Mod: &btapb.ModifyColumnFamiliesRequest_Modification_Drop{true}, }}, } _, err := ac.tClient.ModifyColumnFamilies(ctx, req) return err } // TableInfo represents information about a table. type TableInfo struct { // DEPRECATED - This field is deprecated. Please use FamilyInfos instead. Families []string FamilyInfos []FamilyInfo } // FamilyInfo represents information about a column family. type FamilyInfo struct { Name string GCPolicy string } // TableInfo retrieves information about a table. func (ac *AdminClient) TableInfo(ctx context.Context, table string) (*TableInfo, error) { ctx = mergeOutgoingMetadata(ctx, ac.md) prefix := ac.instancePrefix() req := &btapb.GetTableRequest{ Name: prefix + "/tables/" + table, } res, err := ac.tClient.GetTable(ctx, req) if err != nil { return nil, err } ti := &TableInfo{} for name, fam := range res.ColumnFamilies { ti.Families = append(ti.Families, name) ti.FamilyInfos = append(ti.FamilyInfos, FamilyInfo{Name: name, GCPolicy: GCRuleToString(fam.GcRule)}) } return ti, nil } // SetGCPolicy specifies which cells in a column family should be garbage collected. // GC executes opportunistically in the background; table reads may return data // matching the GC policy. func (ac *AdminClient) SetGCPolicy(ctx context.Context, table, family string, policy GCPolicy) error { ctx = mergeOutgoingMetadata(ctx, ac.md) prefix := ac.instancePrefix() req := &btapb.ModifyColumnFamiliesRequest{ Name: prefix + "/tables/" + table, Modifications: []*btapb.ModifyColumnFamiliesRequest_Modification{{ Id: family, Mod: &btapb.ModifyColumnFamiliesRequest_Modification_Update{&btapb.ColumnFamily{GcRule: policy.proto()}}, }}, } _, err := ac.tClient.ModifyColumnFamilies(ctx, req) return err } // DropRowRange permanently deletes a row range from the specified table. func (ac *AdminClient) DropRowRange(ctx context.Context, table, rowKeyPrefix string) error { ctx = mergeOutgoingMetadata(ctx, ac.md) prefix := ac.instancePrefix() req := &btapb.DropRowRangeRequest{ Name: prefix + "/tables/" + table, Target: &btapb.DropRowRangeRequest_RowKeyPrefix{[]byte(rowKeyPrefix)}, } _, err := ac.tClient.DropRowRange(ctx, req) return err } const instanceAdminAddr = "bigtableadmin.googleapis.com:443" // InstanceAdminClient is a client type for performing admin operations on instances. // These operations can be substantially more dangerous than those provided by AdminClient. type InstanceAdminClient struct { conn *grpc.ClientConn iClient btapb.BigtableInstanceAdminClient lroClient *lroauto.OperationsClient project string // Metadata to be sent with each request. md metadata.MD } // NewInstanceAdminClient creates a new InstanceAdminClient for a given project. func NewInstanceAdminClient(ctx context.Context, project string, opts ...option.ClientOption) (*InstanceAdminClient, error) { o, err := btopt.DefaultClientOptions(instanceAdminAddr, InstanceAdminScope, clientUserAgent) if err != nil { return nil, err } o = append(o, opts...) conn, err := transport.DialGRPC(ctx, o...) if err != nil { return nil, fmt.Errorf("dialing: %v", err) } lroClient, err := lroauto.NewOperationsClient(ctx, option.WithGRPCConn(conn)) if err != nil { // This error "should not happen", since we are just reusing old connection // and never actually need to dial. // If this does happen, we could leak conn. However, we cannot close conn: // If the user invoked the function with option.WithGRPCConn, // we would close a connection that's still in use. // TODO(pongad): investigate error conditions. return nil, err } return &InstanceAdminClient{ conn: conn, iClient: btapb.NewBigtableInstanceAdminClient(conn), lroClient: lroClient, project: project, md: metadata.Pairs(resourcePrefixHeader, "projects/"+project), }, nil } // Close closes the InstanceAdminClient. func (iac *InstanceAdminClient) Close() error { return iac.conn.Close() } // StorageType is the type of storage used for all tables in an instance type StorageType int const ( SSD StorageType = iota HDD ) func (st StorageType) proto() btapb.StorageType { if st == HDD { return btapb.StorageType_HDD } return btapb.StorageType_SSD } // InstanceInfo represents information about an instance type InstanceInfo struct { Name string // name of the instance DisplayName string // display name for UIs } // InstanceConf contains the information necessary to create an Instance type InstanceConf struct { InstanceId, DisplayName, ClusterId, Zone string NumNodes int32 StorageType StorageType } var instanceNameRegexp = regexp.MustCompile(`^projects/([^/]+)/instances/([a-z][-a-z0-9]*)$`) // CreateInstance creates a new instance in the project. // This method will return when the instance has been created or when an error occurs. func (iac *InstanceAdminClient) CreateInstance(ctx context.Context, conf *InstanceConf) error { ctx = mergeOutgoingMetadata(ctx, iac.md) req := &btapb.CreateInstanceRequest{ Parent: "projects/" + iac.project, InstanceId: conf.InstanceId, Instance: &btapb.Instance{DisplayName: conf.DisplayName}, Clusters: map[string]*btapb.Cluster{ conf.ClusterId: { ServeNodes: conf.NumNodes, DefaultStorageType: conf.StorageType.proto(), Location: "projects/" + iac.project + "/locations/" + conf.Zone, }, }, } lro, err := iac.iClient.CreateInstance(ctx, req) if err != nil { return err } resp := btapb.Instance{} return longrunning.InternalNewOperation(iac.lroClient, lro).Wait(ctx, &resp) } // DeleteInstance deletes an instance from the project. func (iac *InstanceAdminClient) DeleteInstance(ctx context.Context, instanceId string) error { ctx = mergeOutgoingMetadata(ctx, iac.md) req := &btapb.DeleteInstanceRequest{"projects/" + iac.project + "/instances/" + instanceId} _, err := iac.iClient.DeleteInstance(ctx, req) return err } // Instances returns a list of instances in the project. func (iac *InstanceAdminClient) Instances(ctx context.Context) ([]*InstanceInfo, error) { ctx = mergeOutgoingMetadata(ctx, iac.md) req := &btapb.ListInstancesRequest{ Parent: "projects/" + iac.project, } res, err := iac.iClient.ListInstances(ctx, req) if err != nil { return nil, err } var is []*InstanceInfo for _, i := range res.Instances { m := instanceNameRegexp.FindStringSubmatch(i.Name) if m == nil { return nil, fmt.Errorf("malformed instance name %q", i.Name) } is = append(is, &InstanceInfo{ Name: m[2], DisplayName: i.DisplayName, }) } return is, nil } golang-google-cloud-0.9.0/bigtable/admin_test.go000066400000000000000000000072271312234511600216020ustar00rootroot00000000000000// Copyright 2015 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package bigtable import ( "sort" "testing" "time" "fmt" "golang.org/x/net/context" "strings" ) func TestAdminIntegration(t *testing.T) { testEnv, err := NewIntegrationEnv() if err != nil { t.Fatalf("IntegrationEnv: %v", err) } defer testEnv.Close() timeout := 2 * time.Second if testEnv.Config().UseProd { timeout = 5 * time.Minute } ctx, _ := context.WithTimeout(context.Background(), timeout) adminClient, err := testEnv.NewAdminClient() if err != nil { t.Fatalf("NewAdminClient: %v", err) } defer adminClient.Close() list := func() []string { tbls, err := adminClient.Tables(ctx) if err != nil { t.Fatalf("Fetching list of tables: %v", err) } sort.Strings(tbls) return tbls } containsAll := func(got, want []string) bool { gotSet := make(map[string]bool) for _, s := range got { gotSet[s] = true } for _, s := range want { if !gotSet[s] { return false } } return true } defer adminClient.DeleteTable(ctx, "mytable") if err := adminClient.CreateTable(ctx, "mytable"); err != nil { t.Fatalf("Creating table: %v", err) } defer adminClient.DeleteTable(ctx, "myothertable") if err := adminClient.CreateTable(ctx, "myothertable"); err != nil { t.Fatalf("Creating table: %v", err) } if got, want := list(), []string{"myothertable", "mytable"}; !containsAll(got, want) { t.Errorf("adminClient.Tables returned %#v, want %#v", got, want) } if err := adminClient.DeleteTable(ctx, "myothertable"); err != nil { t.Fatalf("Deleting table: %v", err) } tables := list() if got, want := tables, []string{"mytable"}; !containsAll(got, want) { t.Errorf("adminClient.Tables returned %#v, want %#v", got, want) } if got, unwanted := tables, []string{"myothertable"}; containsAll(got, unwanted) { t.Errorf("adminClient.Tables return %#v. unwanted %#v", got, unwanted) } // Populate mytable and drop row ranges if err = adminClient.CreateColumnFamily(ctx, "mytable", "cf"); err != nil { t.Fatalf("Creating column family: %v", err) } client, err := testEnv.NewClient() if err != nil { t.Fatalf("NewClient: %v", err) } defer client.Close() tbl := client.Open("mytable") prefixes := []string{"a", "b", "c"} for _, prefix := range prefixes { for i := 0; i < 5; i++ { mut := NewMutation() mut.Set("cf", "col", 0, []byte("1")) if err := tbl.Apply(ctx, fmt.Sprintf("%v-%v", prefix, i), mut); err != nil { t.Fatalf("Mutating row: %v", err) } } } if err = adminClient.DropRowRange(ctx, "mytable", "a"); err != nil { t.Errorf("DropRowRange a: %v", err) } if err = adminClient.DropRowRange(ctx, "mytable", "c"); err != nil { t.Errorf("DropRowRange c: %v", err) } if err = adminClient.DropRowRange(ctx, "mytable", "x"); err != nil { t.Errorf("DropRowRange x: %v", err) } var gotRowCount int tbl.ReadRows(ctx, RowRange{}, func(row Row) bool { gotRowCount += 1 if !strings.HasPrefix(row.Key(), "b") { t.Errorf("Invalid row after dropping range: %v", row) } return true }) if gotRowCount != 5 { t.Errorf("Invalid row count after dropping range: got %v, want %v", gotRowCount, 5) } } golang-google-cloud-0.9.0/bigtable/bigtable.go000066400000000000000000000554271312234511600212310ustar00rootroot00000000000000/* Copyright 2015 Google Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package bigtable // import "cloud.google.com/go/bigtable" import ( "errors" "fmt" "io" "strconv" "time" "cloud.google.com/go/bigtable/internal/gax" btopt "cloud.google.com/go/bigtable/internal/option" "github.com/golang/protobuf/proto" "golang.org/x/net/context" "google.golang.org/api/option" "google.golang.org/api/transport" btpb "google.golang.org/genproto/googleapis/bigtable/v2" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/metadata" ) const prodAddr = "bigtable.googleapis.com:443" // Client is a client for reading and writing data to tables in an instance. // // A Client is safe to use concurrently, except for its Close method. type Client struct { conn *grpc.ClientConn client btpb.BigtableClient project, instance string } // NewClient creates a new Client for a given project and instance. func NewClient(ctx context.Context, project, instance string, opts ...option.ClientOption) (*Client, error) { o, err := btopt.DefaultClientOptions(prodAddr, Scope, clientUserAgent) if err != nil { return nil, err } // Default to a small connection pool that can be overridden. o = append(o, option.WithGRPCConnectionPool(4)) o = append(o, opts...) conn, err := transport.DialGRPC(ctx, o...) if err != nil { return nil, fmt.Errorf("dialing: %v", err) } return &Client{ conn: conn, client: btpb.NewBigtableClient(conn), project: project, instance: instance, }, nil } // Close closes the Client. func (c *Client) Close() error { return c.conn.Close() } var ( idempotentRetryCodes = []codes.Code{codes.DeadlineExceeded, codes.Unavailable, codes.Aborted} isIdempotentRetryCode = make(map[codes.Code]bool) retryOptions = []gax.CallOption{ gax.WithDelayTimeoutSettings(100*time.Millisecond, 2000*time.Millisecond, 1.2), gax.WithRetryCodes(idempotentRetryCodes), } ) func init() { for _, code := range idempotentRetryCodes { isIdempotentRetryCode[code] = true } } func (c *Client) fullTableName(table string) string { return fmt.Sprintf("projects/%s/instances/%s/tables/%s", c.project, c.instance, table) } // A Table refers to a table. // // A Table is safe to use concurrently. type Table struct { c *Client table string // Metadata to be sent with each request. md metadata.MD } // Open opens a table. func (c *Client) Open(table string) *Table { return &Table{ c: c, table: table, md: metadata.Pairs(resourcePrefixHeader, c.fullTableName(table)), } } // TODO(dsymonds): Read method that returns a sequence of ReadItems. // ReadRows reads rows from a table. f is called for each row. // If f returns false, the stream is shut down and ReadRows returns. // f owns its argument, and f is called serially in order by row key. // // By default, the yielded rows will contain all values in all cells. // Use RowFilter to limit the cells returned. func (t *Table) ReadRows(ctx context.Context, arg RowSet, f func(Row) bool, opts ...ReadOption) error { ctx = mergeOutgoingMetadata(ctx, t.md) var prevRowKey string err := gax.Invoke(ctx, func(ctx context.Context) error { req := &btpb.ReadRowsRequest{ TableName: t.c.fullTableName(t.table), Rows: arg.proto(), } for _, opt := range opts { opt.set(req) } ctx, cancel := context.WithCancel(ctx) // for aborting the stream defer cancel() stream, err := t.c.client.ReadRows(ctx, req) if err != nil { return err } cr := newChunkReader() for { res, err := stream.Recv() if err == io.EOF { break } if err != nil { // Reset arg for next Invoke call. arg = arg.retainRowsAfter(prevRowKey) return err } for _, cc := range res.Chunks { row, err := cr.Process(cc) if err != nil { // No need to prepare for a retry, this is an unretryable error. return err } if row == nil { continue } prevRowKey = row.Key() if !f(row) { // Cancel and drain stream. cancel() for { if _, err := stream.Recv(); err != nil { // The stream has ended. We don't return an error // because the caller has intentionally interrupted the scan. return nil } } } } if err := cr.Close(); err != nil { // No need to prepare for a retry, this is an unretryable error. return err } } return err }, retryOptions...) return err } // ReadRow is a convenience implementation of a single-row reader. // A missing row will return a zero-length map and a nil error. func (t *Table) ReadRow(ctx context.Context, row string, opts ...ReadOption) (Row, error) { var r Row err := t.ReadRows(ctx, SingleRow(row), func(rr Row) bool { r = rr return true }, opts...) return r, err } // decodeFamilyProto adds the cell data from f to the given row. func decodeFamilyProto(r Row, row string, f *btpb.Family) { fam := f.Name // does not have colon for _, col := range f.Columns { for _, cell := range col.Cells { ri := ReadItem{ Row: row, Column: fam + ":" + string(col.Qualifier), Timestamp: Timestamp(cell.TimestampMicros), Value: cell.Value, } r[fam] = append(r[fam], ri) } } } // RowSet is a set of rows to be read. It is satisfied by RowList, RowRange and RowRangeList. type RowSet interface { proto() *btpb.RowSet // retainRowsAfter returns a new RowSet that does not include the // given row key or any row key lexicographically less than it. retainRowsAfter(lastRowKey string) RowSet // Valid reports whether this set can cover at least one row. valid() bool } // RowList is a sequence of row keys. type RowList []string func (r RowList) proto() *btpb.RowSet { keys := make([][]byte, len(r)) for i, row := range r { keys[i] = []byte(row) } return &btpb.RowSet{RowKeys: keys} } func (r RowList) retainRowsAfter(lastRowKey string) RowSet { var retryKeys RowList for _, key := range r { if key > lastRowKey { retryKeys = append(retryKeys, key) } } return retryKeys } func (r RowList) valid() bool { return len(r) > 0 } // A RowRange is a half-open interval [Start, Limit) encompassing // all the rows with keys at least as large as Start, and less than Limit. // (Bigtable string comparison is the same as Go's.) // A RowRange can be unbounded, encompassing all keys at least as large as Start. type RowRange struct { start string limit string } // NewRange returns the new RowRange [begin, end). func NewRange(begin, end string) RowRange { return RowRange{ start: begin, limit: end, } } // Unbounded tests whether a RowRange is unbounded. func (r RowRange) Unbounded() bool { return r.limit == "" } // Contains says whether the RowRange contains the key. func (r RowRange) Contains(row string) bool { return r.start <= row && (r.limit == "" || r.limit > row) } // String provides a printable description of a RowRange. func (r RowRange) String() string { a := strconv.Quote(r.start) if r.Unbounded() { return fmt.Sprintf("[%s,∞)", a) } return fmt.Sprintf("[%s,%q)", a, r.limit) } func (r RowRange) proto() *btpb.RowSet { rr := &btpb.RowRange{ StartKey: &btpb.RowRange_StartKeyClosed{[]byte(r.start)}, } if !r.Unbounded() { rr.EndKey = &btpb.RowRange_EndKeyOpen{[]byte(r.limit)} } return &btpb.RowSet{RowRanges: []*btpb.RowRange{rr}} } func (r RowRange) retainRowsAfter(lastRowKey string) RowSet { if lastRowKey == "" || lastRowKey < r.start { return r } // Set the beginning of the range to the row after the last scanned. start := lastRowKey + "\x00" if r.Unbounded() { return InfiniteRange(start) } return NewRange(start, r.limit) } func (r RowRange) valid() bool { return r.start < r.limit } // RowRangeList is a sequence of RowRanges representing the union of the ranges. type RowRangeList []RowRange func (r RowRangeList) proto() *btpb.RowSet { ranges := make([]*btpb.RowRange, len(r)) for i, rr := range r { // RowRange.proto() returns a RowSet with a single element RowRange array ranges[i] = rr.proto().RowRanges[0] } return &btpb.RowSet{RowRanges: ranges} } func (r RowRangeList) retainRowsAfter(lastRowKey string) RowSet { if lastRowKey == "" { return r } // Return a list of any range that has not yet been completely processed var ranges RowRangeList for _, rr := range r { retained := rr.retainRowsAfter(lastRowKey) if retained.valid() { ranges = append(ranges, retained.(RowRange)) } } return ranges } func (r RowRangeList) valid() bool { for _, rr := range r { if rr.valid() { return true } } return false } // SingleRow returns a RowSet for reading a single row. func SingleRow(row string) RowSet { return RowList{row} } // PrefixRange returns a RowRange consisting of all keys starting with the prefix. func PrefixRange(prefix string) RowRange { return RowRange{ start: prefix, limit: prefixSuccessor(prefix), } } // InfiniteRange returns the RowRange consisting of all keys at least as // large as start. func InfiniteRange(start string) RowRange { return RowRange{ start: start, limit: "", } } // prefixSuccessor returns the lexically smallest string greater than the // prefix, if it exists, or "" otherwise. In either case, it is the string // needed for the Limit of a RowRange. func prefixSuccessor(prefix string) string { if prefix == "" { return "" // infinite range } n := len(prefix) for n--; n >= 0 && prefix[n] == '\xff'; n-- { } if n == -1 { return "" } ans := []byte(prefix[:n]) ans = append(ans, prefix[n]+1) return string(ans) } // A ReadOption is an optional argument to ReadRows. type ReadOption interface { set(req *btpb.ReadRowsRequest) } // RowFilter returns a ReadOption that applies f to the contents of read rows. func RowFilter(f Filter) ReadOption { return rowFilter{f} } type rowFilter struct{ f Filter } func (rf rowFilter) set(req *btpb.ReadRowsRequest) { req.Filter = rf.f.proto() } // LimitRows returns a ReadOption that will limit the number of rows to be read. func LimitRows(limit int64) ReadOption { return limitRows{limit} } type limitRows struct{ limit int64 } func (lr limitRows) set(req *btpb.ReadRowsRequest) { req.RowsLimit = lr.limit } // mutationsAreRetryable returns true if all mutations are idempotent // and therefore retryable. A mutation is idempotent iff all cell timestamps // have an explicit timestamp set and do not rely on the timestamp being set on the server. func mutationsAreRetryable(muts []*btpb.Mutation) bool { serverTime := int64(ServerTime) for _, mut := range muts { setCell := mut.GetSetCell() if setCell != nil && setCell.TimestampMicros == serverTime { return false } } return true } // Apply applies a Mutation to a specific row. func (t *Table) Apply(ctx context.Context, row string, m *Mutation, opts ...ApplyOption) error { ctx = mergeOutgoingMetadata(ctx, t.md) after := func(res proto.Message) { for _, o := range opts { o.after(res) } } var callOptions []gax.CallOption if m.cond == nil { req := &btpb.MutateRowRequest{ TableName: t.c.fullTableName(t.table), RowKey: []byte(row), Mutations: m.ops, } if mutationsAreRetryable(m.ops) { callOptions = retryOptions } var res *btpb.MutateRowResponse err := gax.Invoke(ctx, func(ctx context.Context) error { var err error res, err = t.c.client.MutateRow(ctx, req) return err }, callOptions...) if err == nil { after(res) } return err } req := &btpb.CheckAndMutateRowRequest{ TableName: t.c.fullTableName(t.table), RowKey: []byte(row), PredicateFilter: m.cond.proto(), } if m.mtrue != nil { req.TrueMutations = m.mtrue.ops } if m.mfalse != nil { req.FalseMutations = m.mfalse.ops } if mutationsAreRetryable(req.TrueMutations) && mutationsAreRetryable(req.FalseMutations) { callOptions = retryOptions } var cmRes *btpb.CheckAndMutateRowResponse err := gax.Invoke(ctx, func(ctx context.Context) error { var err error cmRes, err = t.c.client.CheckAndMutateRow(ctx, req) return err }, callOptions...) if err == nil { after(cmRes) } return err } // An ApplyOption is an optional argument to Apply. type ApplyOption interface { after(res proto.Message) } type applyAfterFunc func(res proto.Message) func (a applyAfterFunc) after(res proto.Message) { a(res) } // GetCondMutationResult returns an ApplyOption that reports whether the conditional // mutation's condition matched. func GetCondMutationResult(matched *bool) ApplyOption { return applyAfterFunc(func(res proto.Message) { if res, ok := res.(*btpb.CheckAndMutateRowResponse); ok { *matched = res.PredicateMatched } }) } // Mutation represents a set of changes for a single row of a table. type Mutation struct { ops []*btpb.Mutation // for conditional mutations cond Filter mtrue, mfalse *Mutation } // NewMutation returns a new mutation. func NewMutation() *Mutation { return new(Mutation) } // NewCondMutation returns a conditional mutation. // The given row filter determines which mutation is applied: // If the filter matches any cell in the row, mtrue is applied; // otherwise, mfalse is applied. // Either given mutation may be nil. func NewCondMutation(cond Filter, mtrue, mfalse *Mutation) *Mutation { return &Mutation{cond: cond, mtrue: mtrue, mfalse: mfalse} } // Set sets a value in a specified column, with the given timestamp. // The timestamp will be truncated to millisecond granularity. // A timestamp of ServerTime means to use the server timestamp. func (m *Mutation) Set(family, column string, ts Timestamp, value []byte) { m.ops = append(m.ops, &btpb.Mutation{Mutation: &btpb.Mutation_SetCell_{&btpb.Mutation_SetCell{ FamilyName: family, ColumnQualifier: []byte(column), TimestampMicros: int64(ts.TruncateToMilliseconds()), Value: value, }}}) } // DeleteCellsInColumn will delete all the cells whose columns are family:column. func (m *Mutation) DeleteCellsInColumn(family, column string) { m.ops = append(m.ops, &btpb.Mutation{Mutation: &btpb.Mutation_DeleteFromColumn_{&btpb.Mutation_DeleteFromColumn{ FamilyName: family, ColumnQualifier: []byte(column), }}}) } // DeleteTimestampRange deletes all cells whose columns are family:column // and whose timestamps are in the half-open interval [start, end). // If end is zero, it will be interpreted as infinity. // The timestamps will be truncated to millisecond granularity. func (m *Mutation) DeleteTimestampRange(family, column string, start, end Timestamp) { m.ops = append(m.ops, &btpb.Mutation{Mutation: &btpb.Mutation_DeleteFromColumn_{&btpb.Mutation_DeleteFromColumn{ FamilyName: family, ColumnQualifier: []byte(column), TimeRange: &btpb.TimestampRange{ StartTimestampMicros: int64(start.TruncateToMilliseconds()), EndTimestampMicros: int64(end.TruncateToMilliseconds()), }, }}}) } // DeleteCellsInFamily will delete all the cells whose columns are family:*. func (m *Mutation) DeleteCellsInFamily(family string) { m.ops = append(m.ops, &btpb.Mutation{Mutation: &btpb.Mutation_DeleteFromFamily_{&btpb.Mutation_DeleteFromFamily{ FamilyName: family, }}}) } // DeleteRow deletes the entire row. func (m *Mutation) DeleteRow() { m.ops = append(m.ops, &btpb.Mutation{Mutation: &btpb.Mutation_DeleteFromRow_{&btpb.Mutation_DeleteFromRow{}}}) } // entryErr is a container that combines an entry with the error that was returned for it. // Err may be nil if no error was returned for the Entry, or if the Entry has not yet been processed. type entryErr struct { Entry *btpb.MutateRowsRequest_Entry Err error } // ApplyBulk applies multiple Mutations. // Each mutation is individually applied atomically, // but the set of mutations may be applied in any order. // // Two types of failures may occur. If the entire process // fails, (nil, err) will be returned. If specific mutations // fail to apply, ([]err, nil) will be returned, and the errors // will correspond to the relevant rowKeys/muts arguments. // // Conditional mutations cannot be applied in bulk and providing one will result in an error. func (t *Table) ApplyBulk(ctx context.Context, rowKeys []string, muts []*Mutation, opts ...ApplyOption) ([]error, error) { ctx = mergeOutgoingMetadata(ctx, t.md) if len(rowKeys) != len(muts) { return nil, fmt.Errorf("mismatched rowKeys and mutation array lengths: %d, %d", len(rowKeys), len(muts)) } origEntries := make([]*entryErr, len(rowKeys)) for i, key := range rowKeys { mut := muts[i] if mut.cond != nil { return nil, errors.New("conditional mutations cannot be applied in bulk") } origEntries[i] = &entryErr{Entry: &btpb.MutateRowsRequest_Entry{RowKey: []byte(key), Mutations: mut.ops}} } // entries will be reduced after each invocation to just what needs to be retried. entries := make([]*entryErr, len(rowKeys)) copy(entries, origEntries) err := gax.Invoke(ctx, func(ctx context.Context) error { err := t.doApplyBulk(ctx, entries, opts...) if err != nil { // We want to retry the entire request with the current entries return err } entries = t.getApplyBulkRetries(entries) if len(entries) > 0 && len(idempotentRetryCodes) > 0 { // We have at least one mutation that needs to be retried. // Return an arbitrary error that is retryable according to callOptions. return grpc.Errorf(idempotentRetryCodes[0], "Synthetic error: partial failure of ApplyBulk") } return nil }, retryOptions...) if err != nil { return nil, err } // Accumulate all of the errors into an array to return, interspersed with nils for successful // entries. The absence of any errors means we should return nil. var errs []error var foundErr bool for _, entry := range origEntries { if entry.Err != nil { foundErr = true } errs = append(errs, entry.Err) } if foundErr { return errs, nil } return nil, nil } // getApplyBulkRetries returns the entries that need to be retried func (t *Table) getApplyBulkRetries(entries []*entryErr) []*entryErr { var retryEntries []*entryErr for _, entry := range entries { err := entry.Err if err != nil && isIdempotentRetryCode[grpc.Code(err)] && mutationsAreRetryable(entry.Entry.Mutations) { // There was an error and the entry is retryable. retryEntries = append(retryEntries, entry) } } return retryEntries } // doApplyBulk does the work of a single ApplyBulk invocation func (t *Table) doApplyBulk(ctx context.Context, entryErrs []*entryErr, opts ...ApplyOption) error { after := func(res proto.Message) { for _, o := range opts { o.after(res) } } entries := make([]*btpb.MutateRowsRequest_Entry, len(entryErrs)) for i, entryErr := range entryErrs { entries[i] = entryErr.Entry } req := &btpb.MutateRowsRequest{ TableName: t.c.fullTableName(t.table), Entries: entries, } stream, err := t.c.client.MutateRows(ctx, req) if err != nil { return err } for { res, err := stream.Recv() if err == io.EOF { break } if err != nil { return err } for i, entry := range res.Entries { status := entry.Status if status.Code == int32(codes.OK) { entryErrs[i].Err = nil } else { entryErrs[i].Err = grpc.Errorf(codes.Code(status.Code), status.Message) } } after(res) } return nil } // Timestamp is in units of microseconds since 1 January 1970. type Timestamp int64 // ServerTime is a specific Timestamp that may be passed to (*Mutation).Set. // It indicates that the server's timestamp should be used. const ServerTime Timestamp = -1 // Time converts a time.Time into a Timestamp. func Time(t time.Time) Timestamp { return Timestamp(t.UnixNano() / 1e3) } // Now returns the Timestamp representation of the current time on the client. func Now() Timestamp { return Time(time.Now()) } // Time converts a Timestamp into a time.Time. func (ts Timestamp) Time() time.Time { return time.Unix(0, int64(ts)*1e3) } // TruncateToMilliseconds truncates a Timestamp to millisecond granularity, // which is currently the only granularity supported. func (ts Timestamp) TruncateToMilliseconds() Timestamp { if ts == ServerTime { return ts } return ts - ts%1000 } // ApplyReadModifyWrite applies a ReadModifyWrite to a specific row. // It returns the newly written cells. func (t *Table) ApplyReadModifyWrite(ctx context.Context, row string, m *ReadModifyWrite) (Row, error) { ctx = mergeOutgoingMetadata(ctx, t.md) req := &btpb.ReadModifyWriteRowRequest{ TableName: t.c.fullTableName(t.table), RowKey: []byte(row), Rules: m.ops, } res, err := t.c.client.ReadModifyWriteRow(ctx, req) if err != nil { return nil, err } if res.Row == nil { return nil, errors.New("unable to apply ReadModifyWrite: res.Row=nil") } r := make(Row) for _, fam := range res.Row.Families { // res is *btpb.Row, fam is *btpb.Family decodeFamilyProto(r, row, fam) } return r, nil } // ReadModifyWrite represents a set of operations on a single row of a table. // It is like Mutation but for non-idempotent changes. // When applied, these operations operate on the latest values of the row's cells, // and result in a new value being written to the relevant cell with a timestamp // that is max(existing timestamp, current server time). // // The application of a ReadModifyWrite is atomic; concurrent ReadModifyWrites will // be executed serially by the server. type ReadModifyWrite struct { ops []*btpb.ReadModifyWriteRule } // NewReadModifyWrite returns a new ReadModifyWrite. func NewReadModifyWrite() *ReadModifyWrite { return new(ReadModifyWrite) } // AppendValue appends a value to a specific cell's value. // If the cell is unset, it will be treated as an empty value. func (m *ReadModifyWrite) AppendValue(family, column string, v []byte) { m.ops = append(m.ops, &btpb.ReadModifyWriteRule{ FamilyName: family, ColumnQualifier: []byte(column), Rule: &btpb.ReadModifyWriteRule_AppendValue{v}, }) } // Increment interprets the value in a specific cell as a 64-bit big-endian signed integer, // and adds a value to it. If the cell is unset, it will be treated as zero. // If the cell is set and is not an 8-byte value, the entire ApplyReadModifyWrite // operation will fail. func (m *ReadModifyWrite) Increment(family, column string, delta int64) { m.ops = append(m.ops, &btpb.ReadModifyWriteRule{ FamilyName: family, ColumnQualifier: []byte(column), Rule: &btpb.ReadModifyWriteRule_IncrementAmount{delta}, }) } // mergeOutgoingMetadata returns a context populated by the existing outgoing metadata, // if any, joined with internal metadata. func mergeOutgoingMetadata(ctx context.Context, md metadata.MD) context.Context { mdCopy, _ := metadata.FromOutgoingContext(ctx) return metadata.NewOutgoingContext(ctx, metadata.Join(mdCopy, md)) } golang-google-cloud-0.9.0/bigtable/bigtable_test.go000066400000000000000000000651061312234511600222630ustar00rootroot00000000000000/* Copyright 2015 Google Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package bigtable import ( "fmt" "math/rand" "reflect" "strings" "sync" "testing" "time" "golang.org/x/net/context" ) func TestPrefix(t *testing.T) { tests := []struct { prefix, succ string }{ {"", ""}, {"\xff", ""}, // when used, "" means Infinity {"x\xff", "y"}, {"\xfe", "\xff"}, } for _, tc := range tests { got := prefixSuccessor(tc.prefix) if got != tc.succ { t.Errorf("prefixSuccessor(%q) = %q, want %s", tc.prefix, got, tc.succ) continue } r := PrefixRange(tc.prefix) if tc.succ == "" && r.limit != "" { t.Errorf("PrefixRange(%q) got limit %q", tc.prefix, r.limit) } if tc.succ != "" && r.limit != tc.succ { t.Errorf("PrefixRange(%q) got limit %q, want %q", tc.prefix, r.limit, tc.succ) } } } func TestClientIntegration(t *testing.T) { start := time.Now() lastCheckpoint := start checkpoint := func(s string) { n := time.Now() t.Logf("[%s] %v since start, %v since last checkpoint", s, n.Sub(start), n.Sub(lastCheckpoint)) lastCheckpoint = n } testEnv, err := NewIntegrationEnv() if err != nil { t.Fatalf("IntegrationEnv: %v", err) } timeout := 30 * time.Second if testEnv.Config().UseProd { timeout = 5 * time.Minute t.Logf("Running test against production") } else { t.Logf("bttest.Server running on %s", testEnv.Config().AdminEndpoint) } ctx, _ := context.WithTimeout(context.Background(), timeout) client, err := testEnv.NewClient() if err != nil { t.Fatalf("Client: %v", err) } defer client.Close() checkpoint("dialed Client") adminClient, err := testEnv.NewAdminClient() if err != nil { t.Fatalf("AdminClient: %v", err) } defer adminClient.Close() checkpoint("dialed AdminClient") table := testEnv.Config().Table // Delete the table at the end of the test. // Do this even before creating the table so that if this is running // against production and CreateTable fails there's a chance of cleaning it up. defer adminClient.DeleteTable(ctx, table) if err := adminClient.CreateTable(ctx, table); err != nil { t.Fatalf("Creating table: %v", err) } checkpoint("created table") if err := adminClient.CreateColumnFamily(ctx, table, "follows"); err != nil { t.Fatalf("Creating column family: %v", err) } checkpoint(`created "follows" column family`) tbl := client.Open(table) // Insert some data. initialData := map[string][]string{ "wmckinley": {"tjefferson"}, "gwashington": {"jadams"}, "tjefferson": {"gwashington", "jadams"}, // wmckinley set conditionally below "jadams": {"gwashington", "tjefferson"}, } for row, ss := range initialData { mut := NewMutation() for _, name := range ss { mut.Set("follows", name, 0, []byte("1")) } if err := tbl.Apply(ctx, row, mut); err != nil { t.Errorf("Mutating row %q: %v", row, err) } } checkpoint("inserted initial data") // Do a conditional mutation with a complex filter. mutTrue := NewMutation() mutTrue.Set("follows", "wmckinley", 0, []byte("1")) filter := ChainFilters(ColumnFilter("gwash[iz].*"), ValueFilter(".")) mut := NewCondMutation(filter, mutTrue, nil) if err := tbl.Apply(ctx, "tjefferson", mut); err != nil { t.Errorf("Conditionally mutating row: %v", err) } // Do a second condition mutation with a filter that does not match, // and thus no changes should be made. mutTrue = NewMutation() mutTrue.DeleteRow() filter = ColumnFilter("snoop.dogg") mut = NewCondMutation(filter, mutTrue, nil) if err := tbl.Apply(ctx, "tjefferson", mut); err != nil { t.Errorf("Conditionally mutating row: %v", err) } checkpoint("did two conditional mutations") // Fetch a row. row, err := tbl.ReadRow(ctx, "jadams") if err != nil { t.Fatalf("Reading a row: %v", err) } wantRow := Row{ "follows": []ReadItem{ {Row: "jadams", Column: "follows:gwashington", Value: []byte("1")}, {Row: "jadams", Column: "follows:tjefferson", Value: []byte("1")}, }, } if !reflect.DeepEqual(row, wantRow) { t.Errorf("Read row mismatch.\n got %#v\nwant %#v", row, wantRow) } checkpoint("tested ReadRow") // Do a bunch of reads with filters. readTests := []struct { desc string rr RowSet filter Filter // may be nil limit ReadOption // may be nil // We do the read, grab all the cells, turn them into "--", // and join with a comma. want string }{ { desc: "read all, unfiltered", rr: RowRange{}, want: "gwashington-jadams-1,jadams-gwashington-1,jadams-tjefferson-1,tjefferson-gwashington-1,tjefferson-jadams-1,tjefferson-wmckinley-1,wmckinley-tjefferson-1", }, { desc: "read with InfiniteRange, unfiltered", rr: InfiniteRange("tjefferson"), want: "tjefferson-gwashington-1,tjefferson-jadams-1,tjefferson-wmckinley-1,wmckinley-tjefferson-1", }, { desc: "read with NewRange, unfiltered", rr: NewRange("gargamel", "hubbard"), want: "gwashington-jadams-1", }, { desc: "read with PrefixRange, unfiltered", rr: PrefixRange("jad"), want: "jadams-gwashington-1,jadams-tjefferson-1", }, { desc: "read with SingleRow, unfiltered", rr: SingleRow("wmckinley"), want: "wmckinley-tjefferson-1", }, { desc: "read all, with ColumnFilter", rr: RowRange{}, filter: ColumnFilter(".*j.*"), // matches "jadams" and "tjefferson" want: "gwashington-jadams-1,jadams-tjefferson-1,tjefferson-jadams-1,wmckinley-tjefferson-1", }, { desc: "read range, with ColumnRangeFilter", rr: RowRange{}, filter: ColumnRangeFilter("follows", "h", "k"), want: "gwashington-jadams-1,tjefferson-jadams-1", }, { desc: "read range from empty, with ColumnRangeFilter", rr: RowRange{}, filter: ColumnRangeFilter("follows", "", "u"), want: "gwashington-jadams-1,jadams-gwashington-1,jadams-tjefferson-1,tjefferson-gwashington-1,tjefferson-jadams-1,wmckinley-tjefferson-1", }, { desc: "read range from start to empty, with ColumnRangeFilter", rr: RowRange{}, filter: ColumnRangeFilter("follows", "h", ""), want: "gwashington-jadams-1,jadams-tjefferson-1,tjefferson-jadams-1,tjefferson-wmckinley-1,wmckinley-tjefferson-1", }, { desc: "read with RowKeyFilter", rr: RowRange{}, filter: RowKeyFilter(".*wash.*"), want: "gwashington-jadams-1", }, { desc: "read with RowKeyFilter, no matches", rr: RowRange{}, filter: RowKeyFilter(".*xxx.*"), want: "", }, { desc: "read with FamilyFilter, no matches", rr: RowRange{}, filter: FamilyFilter(".*xxx.*"), want: "", }, { desc: "read with ColumnFilter + row limit", rr: RowRange{}, filter: ColumnFilter(".*j.*"), // matches "jadams" and "tjefferson" limit: LimitRows(2), want: "gwashington-jadams-1,jadams-tjefferson-1", }, { desc: "read all, strip values", rr: RowRange{}, filter: StripValueFilter(), want: "gwashington-jadams-,jadams-gwashington-,jadams-tjefferson-,tjefferson-gwashington-,tjefferson-jadams-,tjefferson-wmckinley-,wmckinley-tjefferson-", }, { desc: "read with ColumnFilter + row limit + strip values", rr: RowRange{}, filter: ChainFilters(ColumnFilter(".*j.*"), StripValueFilter()), // matches "jadams" and "tjefferson" limit: LimitRows(2), want: "gwashington-jadams-,jadams-tjefferson-", }, { desc: "read with condition, strip values on true", rr: RowRange{}, filter: ConditionFilter(ColumnFilter(".*j.*"), StripValueFilter(), nil), want: "gwashington-jadams-,jadams-gwashington-,jadams-tjefferson-,tjefferson-gwashington-,tjefferson-jadams-,tjefferson-wmckinley-,wmckinley-tjefferson-", }, { desc: "read with condition, strip values on false", rr: RowRange{}, filter: ConditionFilter(ColumnFilter(".*xxx.*"), nil, StripValueFilter()), want: "gwashington-jadams-,jadams-gwashington-,jadams-tjefferson-,tjefferson-gwashington-,tjefferson-jadams-,tjefferson-wmckinley-,wmckinley-tjefferson-", }, { desc: "read with ValueRangeFilter + row limit", rr: RowRange{}, filter: ValueRangeFilter([]byte("1"), []byte("5")), // matches our value of "1" limit: LimitRows(2), want: "gwashington-jadams-1,jadams-gwashington-1,jadams-tjefferson-1", }, { desc: "read with ValueRangeFilter, no match on exclusive end", rr: RowRange{}, filter: ValueRangeFilter([]byte("0"), []byte("1")), // no match want: "", }, { desc: "read with ValueRangeFilter, no matches", rr: RowRange{}, filter: ValueRangeFilter([]byte("3"), []byte("5")), // matches nothing want: "", }, { desc: "read with InterleaveFilter, no matches on all filters", rr: RowRange{}, filter: InterleaveFilters(ColumnFilter(".*x.*"), ColumnFilter(".*z.*")), want: "", }, { desc: "read with InterleaveFilter, no duplicate cells", rr: RowRange{}, filter: InterleaveFilters(ColumnFilter(".*g.*"), ColumnFilter(".*j.*")), want: "gwashington-jadams-1,jadams-gwashington-1,jadams-tjefferson-1,tjefferson-gwashington-1,tjefferson-jadams-1,wmckinley-tjefferson-1", }, { desc: "read with InterleaveFilter, with duplicate cells", rr: RowRange{}, filter: InterleaveFilters(ColumnFilter(".*g.*"), ColumnFilter(".*g.*")), want: "jadams-gwashington-1,jadams-gwashington-1,tjefferson-gwashington-1,tjefferson-gwashington-1", }, { desc: "read with a RowRangeList and no filter", rr: RowRangeList{NewRange("gargamel", "hubbard"), InfiniteRange("wmckinley")}, want: "gwashington-jadams-1,wmckinley-tjefferson-1", }, { desc: "chain that excludes rows and matches nothing, in a condition", rr: RowRange{}, filter: ConditionFilter(ChainFilters(ColumnFilter(".*j.*"), ColumnFilter(".*mckinley.*")), StripValueFilter(), nil), want: "", }, } for _, tc := range readTests { var opts []ReadOption if tc.filter != nil { opts = append(opts, RowFilter(tc.filter)) } if tc.limit != nil { opts = append(opts, tc.limit) } var elt []string err := tbl.ReadRows(context.Background(), tc.rr, func(r Row) bool { for _, ris := range r { for _, ri := range ris { elt = append(elt, formatReadItem(ri)) } } return true }, opts...) if err != nil { t.Errorf("%s: %v", tc.desc, err) continue } if got := strings.Join(elt, ","); got != tc.want { t.Errorf("%s: wrong reads.\n got %q\nwant %q", tc.desc, got, tc.want) } } // Read a RowList var elt []string keys := RowList{"wmckinley", "gwashington", "jadams"} want := "gwashington-jadams-1,jadams-gwashington-1,jadams-tjefferson-1,wmckinley-tjefferson-1" err = tbl.ReadRows(ctx, keys, func(r Row) bool { for _, ris := range r { for _, ri := range ris { elt = append(elt, formatReadItem(ri)) } } return true }) if err != nil { t.Errorf("read RowList: %v", err) } if got := strings.Join(elt, ","); got != want { t.Errorf("bulk read: wrong reads.\n got %q\nwant %q", got, want) } checkpoint("tested ReadRows in a few ways") // Do a scan and stop part way through. // Verify that the ReadRows callback doesn't keep running. stopped := false err = tbl.ReadRows(ctx, InfiniteRange(""), func(r Row) bool { if r.Key() < "h" { return true } if !stopped { stopped = true return false } t.Errorf("ReadRows kept scanning to row %q after being told to stop", r.Key()) return false }) if err != nil { t.Errorf("Partial ReadRows: %v", err) } checkpoint("did partial ReadRows test") // Delete a row and check it goes away. mut = NewMutation() mut.DeleteRow() if err := tbl.Apply(ctx, "wmckinley", mut); err != nil { t.Errorf("Apply DeleteRow: %v", err) } row, err = tbl.ReadRow(ctx, "wmckinley") if err != nil { t.Fatalf("Reading a row after DeleteRow: %v", err) } if len(row) != 0 { t.Fatalf("Read non-zero row after DeleteRow: %v", row) } checkpoint("exercised DeleteRow") // Check ReadModifyWrite. if err := adminClient.CreateColumnFamily(ctx, table, "counter"); err != nil { t.Fatalf("Creating column family: %v", err) } appendRMW := func(b []byte) *ReadModifyWrite { rmw := NewReadModifyWrite() rmw.AppendValue("counter", "likes", b) return rmw } incRMW := func(n int64) *ReadModifyWrite { rmw := NewReadModifyWrite() rmw.Increment("counter", "likes", n) return rmw } rmwSeq := []struct { desc string rmw *ReadModifyWrite want []byte }{ { desc: "append #1", rmw: appendRMW([]byte{0, 0, 0}), want: []byte{0, 0, 0}, }, { desc: "append #2", rmw: appendRMW([]byte{0, 0, 0, 0, 17}), // the remaining 40 bits to make a big-endian 17 want: []byte{0, 0, 0, 0, 0, 0, 0, 17}, }, { desc: "increment", rmw: incRMW(8), want: []byte{0, 0, 0, 0, 0, 0, 0, 25}, }, } for _, step := range rmwSeq { row, err := tbl.ApplyReadModifyWrite(ctx, "gwashington", step.rmw) if err != nil { t.Fatalf("ApplyReadModifyWrite %+v: %v", step.rmw, err) } clearTimestamps(row) wantRow := Row{"counter": []ReadItem{{Row: "gwashington", Column: "counter:likes", Value: step.want}}} if !reflect.DeepEqual(row, wantRow) { t.Fatalf("After %s,\n got %v\nwant %v", step.desc, row, wantRow) } } checkpoint("tested ReadModifyWrite") // Test arbitrary timestamps more thoroughly. if err := adminClient.CreateColumnFamily(ctx, table, "ts"); err != nil { t.Fatalf("Creating column family: %v", err) } const numVersions = 4 mut = NewMutation() for i := 0; i < numVersions; i++ { // Timestamps are used in thousands because the server // only permits that granularity. mut.Set("ts", "col", Timestamp(i*1000), []byte(fmt.Sprintf("val-%d", i))) } if err := tbl.Apply(ctx, "testrow", mut); err != nil { t.Fatalf("Mutating row: %v", err) } r, err := tbl.ReadRow(ctx, "testrow") if err != nil { t.Fatalf("Reading row: %v", err) } wantRow = Row{"ts": []ReadItem{ // These should be returned in descending timestamp order. {Row: "testrow", Column: "ts:col", Timestamp: 3000, Value: []byte("val-3")}, {Row: "testrow", Column: "ts:col", Timestamp: 2000, Value: []byte("val-2")}, {Row: "testrow", Column: "ts:col", Timestamp: 1000, Value: []byte("val-1")}, {Row: "testrow", Column: "ts:col", Timestamp: 0, Value: []byte("val-0")}, }} if !reflect.DeepEqual(r, wantRow) { t.Errorf("Cell with multiple versions,\n got %v\nwant %v", r, wantRow) } // Do the same read, but filter to the latest two versions. r, err = tbl.ReadRow(ctx, "testrow", RowFilter(LatestNFilter(2))) if err != nil { t.Fatalf("Reading row: %v", err) } wantRow = Row{"ts": []ReadItem{ {Row: "testrow", Column: "ts:col", Timestamp: 3000, Value: []byte("val-3")}, {Row: "testrow", Column: "ts:col", Timestamp: 2000, Value: []byte("val-2")}, }} if !reflect.DeepEqual(r, wantRow) { t.Errorf("Cell with multiple versions and LatestNFilter(2),\n got %v\nwant %v", r, wantRow) } // Check timestamp range filtering (with truncation) r, err = tbl.ReadRow(ctx, "testrow", RowFilter(TimestampRangeFilterMicros(1001, 3000))) if err != nil { t.Fatalf("Reading row: %v", err) } wantRow = Row{"ts": []ReadItem{ {Row: "testrow", Column: "ts:col", Timestamp: 2000, Value: []byte("val-2")}, {Row: "testrow", Column: "ts:col", Timestamp: 1000, Value: []byte("val-1")}, }} if !reflect.DeepEqual(r, wantRow) { t.Errorf("Cell with multiple versions and TimestampRangeFilter(1000, 3000),\n got %v\nwant %v", r, wantRow) } r, err = tbl.ReadRow(ctx, "testrow", RowFilter(TimestampRangeFilterMicros(1000, 0))) if err != nil { t.Fatalf("Reading row: %v", err) } wantRow = Row{"ts": []ReadItem{ {Row: "testrow", Column: "ts:col", Timestamp: 3000, Value: []byte("val-3")}, {Row: "testrow", Column: "ts:col", Timestamp: 2000, Value: []byte("val-2")}, {Row: "testrow", Column: "ts:col", Timestamp: 1000, Value: []byte("val-1")}, }} if !reflect.DeepEqual(r, wantRow) { t.Errorf("Cell with multiple versions and TimestampRangeFilter(1000, 0),\n got %v\nwant %v", r, wantRow) } // Delete non-existing cells, no such column family in this row // Should not delete anything if err := adminClient.CreateColumnFamily(ctx, table, "non-existing"); err != nil { t.Fatalf("Creating column family: %v", err) } mut = NewMutation() mut.DeleteTimestampRange("non-existing", "col", 2000, 3000) // half-open interval if err := tbl.Apply(ctx, "testrow", mut); err != nil { t.Fatalf("Mutating row: %v", err) } r, err = tbl.ReadRow(ctx, "testrow", RowFilter(LatestNFilter(3))) if err != nil { t.Fatalf("Reading row: %v", err) } if !reflect.DeepEqual(r, wantRow) { t.Errorf("Cell was deleted unexpectly,\n got %v\nwant %v", r, wantRow) } // Delete non-existing cells, no such column in this column family // Should not delete anything mut = NewMutation() mut.DeleteTimestampRange("ts", "non-existing", 2000, 3000) // half-open interval if err := tbl.Apply(ctx, "testrow", mut); err != nil { t.Fatalf("Mutating row: %v", err) } r, err = tbl.ReadRow(ctx, "testrow", RowFilter(LatestNFilter(3))) if err != nil { t.Fatalf("Reading row: %v", err) } if !reflect.DeepEqual(r, wantRow) { t.Errorf("Cell was deleted unexpectly,\n got %v\nwant %v", r, wantRow) } // Delete the cell with timestamp 2000 and repeat the last read, // checking that we get ts 3000 and ts 1000. mut = NewMutation() mut.DeleteTimestampRange("ts", "col", 2001, 3000) // half-open interval if err := tbl.Apply(ctx, "testrow", mut); err != nil { t.Fatalf("Mutating row: %v", err) } r, err = tbl.ReadRow(ctx, "testrow", RowFilter(LatestNFilter(2))) if err != nil { t.Fatalf("Reading row: %v", err) } wantRow = Row{"ts": []ReadItem{ {Row: "testrow", Column: "ts:col", Timestamp: 3000, Value: []byte("val-3")}, {Row: "testrow", Column: "ts:col", Timestamp: 1000, Value: []byte("val-1")}, }} if !reflect.DeepEqual(r, wantRow) { t.Errorf("Cell with multiple versions and LatestNFilter(2), after deleting timestamp 2000,\n got %v\nwant %v", r, wantRow) } checkpoint("tested multiple versions in a cell") // Check DeleteCellsInFamily if err := adminClient.CreateColumnFamily(ctx, table, "status"); err != nil { t.Fatalf("Creating column family: %v", err) } mut = NewMutation() mut.Set("status", "start", 0, []byte("1")) mut.Set("status", "end", 0, []byte("2")) mut.Set("ts", "col", 0, []byte("3")) if err := tbl.Apply(ctx, "row1", mut); err != nil { t.Errorf("Mutating row: %v", err) } if err := tbl.Apply(ctx, "row2", mut); err != nil { t.Errorf("Mutating row: %v", err) } mut = NewMutation() mut.DeleteCellsInFamily("status") if err := tbl.Apply(ctx, "row1", mut); err != nil { t.Errorf("Delete cf: %v", err) } // ColumnFamily removed r, err = tbl.ReadRow(ctx, "row1") if err != nil { t.Fatalf("Reading row: %v", err) } wantRow = Row{"ts": []ReadItem{ {Row: "row1", Column: "ts:col", Timestamp: 0, Value: []byte("3")}, }} if !reflect.DeepEqual(r, wantRow) { t.Errorf("column family was not deleted.\n got %v\n want %v", r, wantRow) } // ColumnFamily not removed r, err = tbl.ReadRow(ctx, "row2") if err != nil { t.Fatalf("Reading row: %v", err) } wantRow = Row{ "ts": []ReadItem{ {Row: "row2", Column: "ts:col", Timestamp: 0, Value: []byte("3")}, }, "status": []ReadItem{ {Row: "row2", Column: "status:end", Timestamp: 0, Value: []byte("2")}, {Row: "row2", Column: "status:start", Timestamp: 0, Value: []byte("1")}, }, } if !reflect.DeepEqual(r, wantRow) { t.Errorf("Column family was deleted unexpectly.\n got %v\n want %v", r, wantRow) } checkpoint("tested family delete") // Check DeleteCellsInColumn mut = NewMutation() mut.Set("status", "start", 0, []byte("1")) mut.Set("status", "middle", 0, []byte("2")) mut.Set("status", "end", 0, []byte("3")) if err := tbl.Apply(ctx, "row3", mut); err != nil { t.Errorf("Mutating row: %v", err) } mut = NewMutation() mut.DeleteCellsInColumn("status", "middle") if err := tbl.Apply(ctx, "row3", mut); err != nil { t.Errorf("Delete column: %v", err) } r, err = tbl.ReadRow(ctx, "row3") if err != nil { t.Fatalf("Reading row: %v", err) } wantRow = Row{ "status": []ReadItem{ {Row: "row3", Column: "status:end", Timestamp: 0, Value: []byte("3")}, {Row: "row3", Column: "status:start", Timestamp: 0, Value: []byte("1")}, }, } if !reflect.DeepEqual(r, wantRow) { t.Errorf("Column was not deleted.\n got %v\n want %v", r, wantRow) } mut = NewMutation() mut.DeleteCellsInColumn("status", "start") if err := tbl.Apply(ctx, "row3", mut); err != nil { t.Errorf("Delete column: %v", err) } r, err = tbl.ReadRow(ctx, "row3") if err != nil { t.Fatalf("Reading row: %v", err) } wantRow = Row{ "status": []ReadItem{ {Row: "row3", Column: "status:end", Timestamp: 0, Value: []byte("3")}, }, } if !reflect.DeepEqual(r, wantRow) { t.Errorf("Column was not deleted.\n got %v\n want %v", r, wantRow) } mut = NewMutation() mut.DeleteCellsInColumn("status", "end") if err := tbl.Apply(ctx, "row3", mut); err != nil { t.Errorf("Delete column: %v", err) } r, err = tbl.ReadRow(ctx, "row3") if err != nil { t.Fatalf("Reading row: %v", err) } if len(r) != 0 { t.Errorf("Delete column: got %v, want empty row", r) } // Add same cell after delete mut = NewMutation() mut.Set("status", "end", 0, []byte("3")) if err := tbl.Apply(ctx, "row3", mut); err != nil { t.Errorf("Mutating row: %v", err) } r, err = tbl.ReadRow(ctx, "row3") if err != nil { t.Fatalf("Reading row: %v", err) } if !reflect.DeepEqual(r, wantRow) { t.Errorf("Column was not deleted correctly.\n got %v\n want %v", r, wantRow) } checkpoint("tested column delete") // Do highly concurrent reads/writes. // TODO(dsymonds): Raise this to 1000 when https://github.com/grpc/grpc-go/issues/205 is resolved. const maxConcurrency = 100 var wg sync.WaitGroup for i := 0; i < maxConcurrency; i++ { wg.Add(1) go func() { defer wg.Done() switch r := rand.Intn(100); { // r ∈ [0,100) case 0 <= r && r < 30: // Do a read. _, err := tbl.ReadRow(ctx, "testrow", RowFilter(LatestNFilter(1))) if err != nil { t.Errorf("Concurrent read: %v", err) } case 30 <= r && r < 100: // Do a write. mut := NewMutation() mut.Set("ts", "col", 0, []byte("data")) if err := tbl.Apply(ctx, "testrow", mut); err != nil { t.Errorf("Concurrent write: %v", err) } } }() } wg.Wait() checkpoint("tested high concurrency") // Large reads, writes and scans. bigBytes := make([]byte, 3<<20) // 3 MB is large, but less than current gRPC max of 4 MB. nonsense := []byte("lorem ipsum dolor sit amet, ") fill(bigBytes, nonsense) mut = NewMutation() mut.Set("ts", "col", 0, bigBytes) if err := tbl.Apply(ctx, "bigrow", mut); err != nil { t.Errorf("Big write: %v", err) } r, err = tbl.ReadRow(ctx, "bigrow") if err != nil { t.Errorf("Big read: %v", err) } wantRow = Row{"ts": []ReadItem{ {Row: "bigrow", Column: "ts:col", Value: bigBytes}, }} if !reflect.DeepEqual(r, wantRow) { t.Errorf("Big read returned incorrect bytes: %v", r) } // Now write 1000 rows, each with 82 KB values, then scan them all. medBytes := make([]byte, 82<<10) fill(medBytes, nonsense) sem := make(chan int, 50) // do up to 50 mutations at a time. for i := 0; i < 1000; i++ { mut := NewMutation() mut.Set("ts", "big-scan", 0, medBytes) row := fmt.Sprintf("row-%d", i) wg.Add(1) go func() { defer wg.Done() defer func() { <-sem }() sem <- 1 if err := tbl.Apply(ctx, row, mut); err != nil { t.Errorf("Preparing large scan: %v", err) } }() } wg.Wait() n := 0 err = tbl.ReadRows(ctx, PrefixRange("row-"), func(r Row) bool { for _, ris := range r { for _, ri := range ris { n += len(ri.Value) } } return true }, RowFilter(ColumnFilter("big-scan"))) if err != nil { t.Errorf("Doing large scan: %v", err) } if want := 1000 * len(medBytes); n != want { t.Errorf("Large scan returned %d bytes, want %d", n, want) } // Scan a subset of the 1000 rows that we just created, using a LimitRows ReadOption. rc := 0 wantRc := 3 err = tbl.ReadRows(ctx, PrefixRange("row-"), func(r Row) bool { rc++ return true }, LimitRows(int64(wantRc))) if rc != wantRc { t.Errorf("Scan with row limit returned %d rows, want %d", rc, wantRc) } checkpoint("tested big read/write/scan") // Test bulk mutations if err := adminClient.CreateColumnFamily(ctx, table, "bulk"); err != nil { t.Fatalf("Creating column family: %v", err) } bulkData := map[string][]string{ "red sox": {"2004", "2007", "2013"}, "patriots": {"2001", "2003", "2004", "2014"}, "celtics": {"1981", "1984", "1986", "2008"}, } var rowKeys []string var muts []*Mutation for row, ss := range bulkData { mut := NewMutation() for _, name := range ss { mut.Set("bulk", name, 0, []byte("1")) } rowKeys = append(rowKeys, row) muts = append(muts, mut) } status, err := tbl.ApplyBulk(ctx, rowKeys, muts) if err != nil { t.Fatalf("Bulk mutating rows %q: %v", rowKeys, err) } if status != nil { t.Errorf("non-nil errors: %v", err) } checkpoint("inserted bulk data") // Read each row back for rowKey, ss := range bulkData { row, err := tbl.ReadRow(ctx, rowKey) if err != nil { t.Fatalf("Reading a bulk row: %v", err) } var wantItems []ReadItem for _, val := range ss { wantItems = append(wantItems, ReadItem{Row: rowKey, Column: "bulk:" + val, Value: []byte("1")}) } wantRow := Row{"bulk": wantItems} if !reflect.DeepEqual(row, wantRow) { t.Errorf("Read row mismatch.\n got %#v\nwant %#v", row, wantRow) } } checkpoint("tested reading from bulk insert") // Test bulk write errors. // Note: Setting timestamps as ServerTime makes sure the mutations are not retried on error. badMut := NewMutation() badMut.Set("badfamily", "col", ServerTime, nil) badMut2 := NewMutation() badMut2.Set("badfamily2", "goodcol", ServerTime, []byte("1")) status, err = tbl.ApplyBulk(ctx, []string{"badrow", "badrow2"}, []*Mutation{badMut, badMut2}) if err != nil { t.Fatalf("Bulk mutating rows %q: %v", rowKeys, err) } if status == nil { t.Errorf("No errors for bad bulk mutation") } else if status[0] == nil || status[1] == nil { t.Errorf("No error for bad bulk mutation") } } func formatReadItem(ri ReadItem) string { // Use the column qualifier only to make the test data briefer. col := ri.Column[strings.Index(ri.Column, ":")+1:] return fmt.Sprintf("%s-%s-%s", ri.Row, col, ri.Value) } func fill(b, sub []byte) { for len(b) > len(sub) { n := copy(b, sub) b = b[n:] } } func clearTimestamps(r Row) { for _, ris := range r { for i := range ris { ris[i].Timestamp = 0 } } } golang-google-cloud-0.9.0/bigtable/bttest/000077500000000000000000000000001312234511600204215ustar00rootroot00000000000000golang-google-cloud-0.9.0/bigtable/bttest/example_test.go000066400000000000000000000036771312234511600234570ustar00rootroot00000000000000/* Copyright 2016 Google Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package bttest_test import ( "fmt" "log" "cloud.google.com/go/bigtable" "cloud.google.com/go/bigtable/bttest" "golang.org/x/net/context" "google.golang.org/api/option" "google.golang.org/grpc" ) func ExampleNewServer() { srv, err := bttest.NewServer("127.0.0.1:0") if err != nil { log.Fatalln(err) } ctx := context.Background() conn, err := grpc.Dial(srv.Addr, grpc.WithInsecure()) if err != nil { log.Fatalln(err) } proj, instance := "proj", "instance" adminClient, err := bigtable.NewAdminClient(ctx, proj, instance, option.WithGRPCConn(conn)) if err != nil { log.Fatalln(err) } if err = adminClient.CreateTable(ctx, "example"); err != nil { log.Fatalln(err) } if err = adminClient.CreateColumnFamily(ctx, "example", "links"); err != nil { log.Fatalln(err) } client, err := bigtable.NewClient(ctx, proj, instance, option.WithGRPCConn(conn)) if err != nil { log.Fatalln(err) } tbl := client.Open("example") mut := bigtable.NewMutation() mut.Set("links", "golang.org", bigtable.Now(), []byte("Gophers!")) if err = tbl.Apply(ctx, "com.google.cloud", mut); err != nil { log.Fatalln(err) } if row, err := tbl.ReadRow(ctx, "com.google.cloud"); err != nil { log.Fatalln(err) } else { for _, column := range row["links"] { fmt.Println(column.Column) fmt.Println(string(column.Value)) } } // Output: // links:golang.org // Gophers! } golang-google-cloud-0.9.0/bigtable/bttest/inmem.go000066400000000000000000001013101312234511600220510ustar00rootroot00000000000000/* Copyright 2015 Google Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Package bttest contains test helpers for working with the bigtable package. To use a Server, create it, and then connect to it with no security: (The project/instance values are ignored.) srv, err := bttest.NewServer("127.0.0.1:0") ... conn, err := grpc.Dial(srv.Addr, grpc.WithInsecure()) ... client, err := bigtable.NewClient(ctx, proj, instance, option.WithGRPCConn(conn)) ... */ package bttest // import "cloud.google.com/go/bigtable/bttest" import ( "encoding/binary" "fmt" "log" "math/rand" "net" "regexp" "sort" "strings" "sync" "time" "bytes" emptypb "github.com/golang/protobuf/ptypes/empty" "github.com/golang/protobuf/ptypes/wrappers" "golang.org/x/net/context" btapb "google.golang.org/genproto/googleapis/bigtable/admin/v2" btpb "google.golang.org/genproto/googleapis/bigtable/v2" statpb "google.golang.org/genproto/googleapis/rpc/status" "google.golang.org/grpc" "google.golang.org/grpc/codes" ) // Server is an in-memory Cloud Bigtable fake. // It is unauthenticated, and only a rough approximation. type Server struct { Addr string l net.Listener srv *grpc.Server s *server } // server is the real implementation of the fake. // It is a separate and unexported type so the API won't be cluttered with // methods that are only relevant to the fake's implementation. type server struct { mu sync.Mutex tables map[string]*table // keyed by fully qualified name gcc chan int // set when gcloop starts, closed when server shuts down // Any unimplemented methods will cause a panic. btapb.BigtableTableAdminServer btpb.BigtableServer } // NewServer creates a new Server. // The Server will be listening for gRPC connections, without TLS, // on the provided address. The resolved address is named by the Addr field. func NewServer(laddr string, opt ...grpc.ServerOption) (*Server, error) { l, err := net.Listen("tcp", laddr) if err != nil { return nil, err } s := &Server{ Addr: l.Addr().String(), l: l, srv: grpc.NewServer(opt...), s: &server{ tables: make(map[string]*table), }, } btapb.RegisterBigtableTableAdminServer(s.srv, s.s) btpb.RegisterBigtableServer(s.srv, s.s) go s.srv.Serve(s.l) return s, nil } // Close shuts down the server. func (s *Server) Close() { s.s.mu.Lock() if s.s.gcc != nil { close(s.s.gcc) } s.s.mu.Unlock() s.srv.Stop() s.l.Close() } func (s *server) CreateTable(ctx context.Context, req *btapb.CreateTableRequest) (*btapb.Table, error) { tbl := req.Parent + "/tables/" + req.TableId s.mu.Lock() if _, ok := s.tables[tbl]; ok { s.mu.Unlock() return nil, grpc.Errorf(codes.AlreadyExists, "table %q already exists", tbl) } s.tables[tbl] = newTable(req) s.mu.Unlock() return &btapb.Table{Name: tbl}, nil } func (s *server) ListTables(ctx context.Context, req *btapb.ListTablesRequest) (*btapb.ListTablesResponse, error) { res := &btapb.ListTablesResponse{} prefix := req.Parent + "/tables/" s.mu.Lock() for tbl := range s.tables { if strings.HasPrefix(tbl, prefix) { res.Tables = append(res.Tables, &btapb.Table{Name: tbl}) } } s.mu.Unlock() return res, nil } func (s *server) GetTable(ctx context.Context, req *btapb.GetTableRequest) (*btapb.Table, error) { tbl := req.Name s.mu.Lock() tblIns, ok := s.tables[tbl] s.mu.Unlock() if !ok { return nil, grpc.Errorf(codes.NotFound, "table %q not found", tbl) } return &btapb.Table{ Name: tbl, ColumnFamilies: toColumnFamilies(tblIns.columnFamilies()), }, nil } func (s *server) DeleteTable(ctx context.Context, req *btapb.DeleteTableRequest) (*emptypb.Empty, error) { s.mu.Lock() defer s.mu.Unlock() if _, ok := s.tables[req.Name]; !ok { return nil, grpc.Errorf(codes.NotFound, "table %q not found", req.Name) } delete(s.tables, req.Name) return &emptypb.Empty{}, nil } func (s *server) ModifyColumnFamilies(ctx context.Context, req *btapb.ModifyColumnFamiliesRequest) (*btapb.Table, error) { tblName := req.Name[strings.LastIndex(req.Name, "/")+1:] s.mu.Lock() tbl, ok := s.tables[req.Name] s.mu.Unlock() if !ok { return nil, grpc.Errorf(codes.NotFound, "table %q not found", req.Name) } tbl.mu.Lock() defer tbl.mu.Unlock() for _, mod := range req.Modifications { if create := mod.GetCreate(); create != nil { if _, ok := tbl.families[mod.Id]; ok { return nil, grpc.Errorf(codes.AlreadyExists, "family %q already exists", mod.Id) } newcf := &columnFamily{ name: req.Name + "/columnFamilies/" + mod.Id, order: tbl.counter, gcRule: create.GcRule, } tbl.counter++ tbl.families[mod.Id] = newcf } else if mod.GetDrop() { if _, ok := tbl.families[mod.Id]; !ok { return nil, fmt.Errorf("can't delete unknown family %q", mod.Id) } delete(tbl.families, mod.Id) } else if modify := mod.GetUpdate(); modify != nil { if _, ok := tbl.families[mod.Id]; !ok { return nil, fmt.Errorf("no such family %q", mod.Id) } newcf := &columnFamily{ name: req.Name + "/columnFamilies/" + mod.Id, gcRule: modify.GcRule, } // assume that we ALWAYS want to replace by the new setting // we may need partial update through tbl.families[mod.Id] = newcf } } s.needGC() return &btapb.Table{ Name: tblName, ColumnFamilies: toColumnFamilies(tbl.families), }, nil } func (s *server) DropRowRange(ctx context.Context, req *btapb.DropRowRangeRequest) (*emptypb.Empty, error) { s.mu.Lock() defer s.mu.Unlock() tbl, ok := s.tables[req.Name] if !ok { return nil, grpc.Errorf(codes.NotFound, "table %q not found", req.Name) } if req.GetDeleteAllDataFromTable() { tbl.rows = nil tbl.rowIndex = make(map[string]*row) } else { // Delete rows by prefix prefixBytes := req.GetRowKeyPrefix() if prefixBytes == nil { return nil, fmt.Errorf("missing row key prefix") } prefix := string(prefixBytes) start := -1 end := 0 for i, row := range tbl.rows { match := strings.HasPrefix(row.key, prefix) if match { // Delete the mapping. Row will be deleted from sorted range below. delete(tbl.rowIndex, row.key) } if match && start == -1 { start = i } else if !match && start != -1 { break } end++ } if start != -1 { // Delete the range, using method from https://github.com/golang/go/wiki/SliceTricks copy(tbl.rows[start:], tbl.rows[end:]) for k, n := len(tbl.rows)-end+start, len(tbl.rows); k < n; k++ { tbl.rows[k] = nil } tbl.rows = tbl.rows[:len(tbl.rows)-end+start] } } return &emptypb.Empty{}, nil } func (s *server) ReadRows(req *btpb.ReadRowsRequest, stream btpb.Bigtable_ReadRowsServer) error { s.mu.Lock() tbl, ok := s.tables[req.TableName] s.mu.Unlock() if !ok { return grpc.Errorf(codes.NotFound, "table %q not found", req.TableName) } // Rows to read can be specified by a set of row keys and/or a set of row ranges. // Output is a stream of sorted, de-duped rows. tbl.mu.RLock() rowSet := make(map[string]*row) if req.Rows != nil { // Add the explicitly given keys for _, key := range req.Rows.RowKeys { start := string(key) addRows(start, start+"\x00", tbl, rowSet) } // Add keys from row ranges for _, rr := range req.Rows.RowRanges { var start, end string switch sk := rr.StartKey.(type) { case *btpb.RowRange_StartKeyClosed: start = string(sk.StartKeyClosed) case *btpb.RowRange_StartKeyOpen: start = string(sk.StartKeyOpen) + "\x00" } switch ek := rr.EndKey.(type) { case *btpb.RowRange_EndKeyClosed: end = string(ek.EndKeyClosed) + "\x00" case *btpb.RowRange_EndKeyOpen: end = string(ek.EndKeyOpen) } addRows(start, end, tbl, rowSet) } } else { // Read all rows addRows("", "", tbl, rowSet) } tbl.mu.RUnlock() rows := make([]*row, 0, len(rowSet)) for _, r := range rowSet { rows = append(rows, r) } sort.Sort(byRowKey(rows)) limit := int(req.RowsLimit) count := 0 for _, r := range rows { if limit > 0 && count >= limit { return nil } streamed, err := streamRow(stream, r, req.Filter) if err != nil { return err } if streamed { count++ } } return nil } func addRows(start, end string, tbl *table, rowSet map[string]*row) { si, ei := 0, len(tbl.rows) // half-open interval if start != "" { si = sort.Search(len(tbl.rows), func(i int) bool { return tbl.rows[i].key >= start }) } if end != "" { ei = sort.Search(len(tbl.rows), func(i int) bool { return tbl.rows[i].key >= end }) } if si < ei { for _, row := range tbl.rows[si:ei] { rowSet[row.key] = row } } } // streamRow filters the given row and sends it via the given stream. // Returns true if at least one cell matched the filter and was streamed, false otherwise. func streamRow(stream btpb.Bigtable_ReadRowsServer, r *row, f *btpb.RowFilter) (bool, error) { r.mu.Lock() nr := r.copy() r.mu.Unlock() r = nr if !filterRow(f, r) { return false, nil } rrr := &btpb.ReadRowsResponse{} families := r.sortedFamilies() for _, fam := range families { for _, colName := range fam.colNames { cells := fam.cells[colName] if len(cells) == 0 { continue } // TODO(dsymonds): Apply transformers. for _, cell := range cells { rrr.Chunks = append(rrr.Chunks, &btpb.ReadRowsResponse_CellChunk{ RowKey: []byte(r.key), FamilyName: &wrappers.StringValue{Value: fam.name}, Qualifier: &wrappers.BytesValue{Value: []byte(colName)}, TimestampMicros: cell.ts, Value: cell.value, }) } } } // We can't have a cell with just COMMIT set, which would imply a new empty cell. // So modify the last cell to have the COMMIT flag set. if len(rrr.Chunks) > 0 { rrr.Chunks[len(rrr.Chunks)-1].RowStatus = &btpb.ReadRowsResponse_CellChunk_CommitRow{true} } return true, stream.Send(rrr) } // filterRow modifies a row with the given filter. Returns true if at least one cell from the row matches, // false otherwise. func filterRow(f *btpb.RowFilter, r *row) bool { if f == nil { return true } // Handle filters that apply beyond just including/excluding cells. switch f := f.Filter.(type) { case *btpb.RowFilter_Chain_: for _, sub := range f.Chain.Filters { if !filterRow(sub, r) { return false } } return true case *btpb.RowFilter_Interleave_: srs := make([]*row, 0, len(f.Interleave.Filters)) for _, sub := range f.Interleave.Filters { sr := r.copy() filterRow(sub, sr) srs = append(srs, sr) } // merge // TODO(dsymonds): is this correct? r.families = make(map[string]*family) for _, sr := range srs { for _, fam := range sr.families { f := r.getOrCreateFamily(fam.name, fam.order) for colName, cs := range fam.cells { f.cells[colName] = append(f.cellsByColumn(colName), cs...) } } } for _, fam := range r.families { for _, cs := range fam.cells { sort.Sort(byDescTS(cs)) } } return true case *btpb.RowFilter_CellsPerColumnLimitFilter: lim := int(f.CellsPerColumnLimitFilter) for _, fam := range r.families { for col, cs := range fam.cells { if len(cs) > lim { fam.cells[col] = cs[:lim] } } } return true case *btpb.RowFilter_Condition_: if filterRow(f.Condition.PredicateFilter, r.copy()) { if f.Condition.TrueFilter == nil { return false } return filterRow(f.Condition.TrueFilter, r) } if f.Condition.FalseFilter == nil { return false } return filterRow(f.Condition.FalseFilter, r) case *btpb.RowFilter_RowKeyRegexFilter: pat := string(f.RowKeyRegexFilter) rx, err := regexp.Compile(pat) if err != nil { log.Printf("Bad rowkey_regex_filter pattern %q: %v", pat, err) return false } if !rx.MatchString(r.key) { return false } } // Any other case, operate on a per-cell basis. cellCount := 0 for _, fam := range r.families { for colName, cs := range fam.cells { fam.cells[colName] = filterCells(f, fam.name, colName, cs) cellCount += len(fam.cells[colName]) } } return cellCount > 0 } func filterCells(f *btpb.RowFilter, fam, col string, cs []cell) []cell { var ret []cell for _, cell := range cs { if includeCell(f, fam, col, cell) { cell = modifyCell(f, cell) ret = append(ret, cell) } } return ret } func modifyCell(f *btpb.RowFilter, c cell) cell { if f == nil { return c } // Consider filters that may modify the cell contents switch f.Filter.(type) { case *btpb.RowFilter_StripValueTransformer: return cell{ts: c.ts} default: return c } } func includeCell(f *btpb.RowFilter, fam, col string, cell cell) bool { if f == nil { return true } // TODO(dsymonds): Implement many more filters. switch f := f.Filter.(type) { case *btpb.RowFilter_CellsPerColumnLimitFilter: // Don't log, row-level filter return true case *btpb.RowFilter_RowKeyRegexFilter: // Don't log, row-level filter return true case *btpb.RowFilter_StripValueTransformer: // Don't log, cell-modifying filter return true default: log.Printf("WARNING: don't know how to handle filter of type %T (ignoring it)", f) return true case *btpb.RowFilter_FamilyNameRegexFilter: pat := string(f.FamilyNameRegexFilter) rx, err := regexp.Compile(pat) if err != nil { log.Printf("Bad family_name_regex_filter pattern %q: %v", pat, err) return false } return rx.MatchString(fam) case *btpb.RowFilter_ColumnQualifierRegexFilter: pat := string(f.ColumnQualifierRegexFilter) rx, err := regexp.Compile(pat) if err != nil { log.Printf("Bad column_qualifier_regex_filter pattern %q: %v", pat, err) return false } return rx.MatchString(col) case *btpb.RowFilter_ValueRegexFilter: pat := string(f.ValueRegexFilter) rx, err := regexp.Compile(pat) if err != nil { log.Printf("Bad value_regex_filter pattern %q: %v", pat, err) return false } return rx.Match(cell.value) case *btpb.RowFilter_ColumnRangeFilter: if fam != f.ColumnRangeFilter.FamilyName { return false } // Start qualifier defaults to empty string closed inRangeStart := func() bool { return col >= "" } switch sq := f.ColumnRangeFilter.StartQualifier.(type) { case *btpb.ColumnRange_StartQualifierOpen: inRangeStart = func() bool { return col > string(sq.StartQualifierOpen) } case *btpb.ColumnRange_StartQualifierClosed: inRangeStart = func() bool { return col >= string(sq.StartQualifierClosed) } } // End qualifier defaults to no upper boundary inRangeEnd := func() bool { return true } switch eq := f.ColumnRangeFilter.EndQualifier.(type) { case *btpb.ColumnRange_EndQualifierClosed: inRangeEnd = func() bool { return col <= string(eq.EndQualifierClosed) } case *btpb.ColumnRange_EndQualifierOpen: inRangeEnd = func() bool { return col < string(eq.EndQualifierOpen) } } return inRangeStart() && inRangeEnd() case *btpb.RowFilter_TimestampRangeFilter: // Lower bound is inclusive and defaults to 0, upper bound is exclusive and defaults to infinity. return cell.ts >= f.TimestampRangeFilter.StartTimestampMicros && (f.TimestampRangeFilter.EndTimestampMicros == 0 || cell.ts < f.TimestampRangeFilter.EndTimestampMicros) case *btpb.RowFilter_ValueRangeFilter: v := cell.value // Start value defaults to empty string closed inRangeStart := func() bool { return bytes.Compare(v, []byte{}) >= 0 } switch sv := f.ValueRangeFilter.StartValue.(type) { case *btpb.ValueRange_StartValueOpen: inRangeStart = func() bool { return bytes.Compare(v, sv.StartValueOpen) > 0 } case *btpb.ValueRange_StartValueClosed: inRangeStart = func() bool { return bytes.Compare(v, sv.StartValueClosed) >= 0 } } // End value defaults to no upper boundary inRangeEnd := func() bool { return true } switch ev := f.ValueRangeFilter.EndValue.(type) { case *btpb.ValueRange_EndValueClosed: inRangeEnd = func() bool { return bytes.Compare(v, ev.EndValueClosed) <= 0 } case *btpb.ValueRange_EndValueOpen: inRangeEnd = func() bool { return bytes.Compare(v, ev.EndValueOpen) < 0 } } return inRangeStart() && inRangeEnd() } } func (s *server) MutateRow(ctx context.Context, req *btpb.MutateRowRequest) (*btpb.MutateRowResponse, error) { s.mu.Lock() tbl, ok := s.tables[req.TableName] s.mu.Unlock() if !ok { return nil, grpc.Errorf(codes.NotFound, "table %q not found", req.TableName) } fs := tbl.columnFamilies() r := tbl.mutableRow(string(req.RowKey)) r.mu.Lock() defer tbl.resortRowIndex() // Make sure the row lock is released before this grabs the table lock defer r.mu.Unlock() if err := applyMutations(tbl, r, req.Mutations, fs); err != nil { return nil, err } return &btpb.MutateRowResponse{}, nil } func (s *server) MutateRows(req *btpb.MutateRowsRequest, stream btpb.Bigtable_MutateRowsServer) error { s.mu.Lock() tbl, ok := s.tables[req.TableName] s.mu.Unlock() if !ok { return grpc.Errorf(codes.NotFound, "table %q not found", req.TableName) } res := &btpb.MutateRowsResponse{Entries: make([]*btpb.MutateRowsResponse_Entry, len(req.Entries))} fs := tbl.columnFamilies() defer tbl.resortRowIndex() for i, entry := range req.Entries { r := tbl.mutableRow(string(entry.RowKey)) r.mu.Lock() code, msg := int32(codes.OK), "" if err := applyMutations(tbl, r, entry.Mutations, fs); err != nil { code = int32(codes.Internal) msg = err.Error() } res.Entries[i] = &btpb.MutateRowsResponse_Entry{ Index: int64(i), Status: &statpb.Status{Code: code, Message: msg}, } r.mu.Unlock() } stream.Send(res) return nil } func (s *server) CheckAndMutateRow(ctx context.Context, req *btpb.CheckAndMutateRowRequest) (*btpb.CheckAndMutateRowResponse, error) { s.mu.Lock() tbl, ok := s.tables[req.TableName] s.mu.Unlock() if !ok { return nil, grpc.Errorf(codes.NotFound, "table %q not found", req.TableName) } res := &btpb.CheckAndMutateRowResponse{} fs := tbl.columnFamilies() r := tbl.mutableRow(string(req.RowKey)) r.mu.Lock() defer r.mu.Unlock() // Figure out which mutation to apply. whichMut := false if req.PredicateFilter == nil { // Use true_mutations iff row contains any cells. whichMut = !r.isEmpty() } else { // Use true_mutations iff any cells in the row match the filter. // TODO(dsymonds): This could be cheaper. nr := r.copy() filterRow(req.PredicateFilter, nr) whichMut = !nr.isEmpty() // TODO(dsymonds): Figure out if this is supposed to be set // even when there's no predicate filter. res.PredicateMatched = whichMut } muts := req.FalseMutations if whichMut { muts = req.TrueMutations } defer tbl.resortRowIndex() if err := applyMutations(tbl, r, muts, fs); err != nil { return nil, err } return res, nil } // applyMutations applies a sequence of mutations to a row. // fam should be a snapshot of the keys of tbl.families. // It assumes r.mu is locked. func applyMutations(tbl *table, r *row, muts []*btpb.Mutation, fs map[string]*columnFamily) error { for _, mut := range muts { switch mut := mut.Mutation.(type) { default: return fmt.Errorf("can't handle mutation type %T", mut) case *btpb.Mutation_SetCell_: set := mut.SetCell if _, ok := fs[set.FamilyName]; !ok { return fmt.Errorf("unknown family %q", set.FamilyName) } ts := set.TimestampMicros if ts == -1 { // bigtable.ServerTime ts = newTimestamp() } if !tbl.validTimestamp(ts) { return fmt.Errorf("invalid timestamp %d", ts) } fam := set.FamilyName col := string(set.ColumnQualifier) newCell := cell{ts: ts, value: set.Value} f := r.getOrCreateFamily(fam, fs[fam].order) f.cells[col] = appendOrReplaceCell(f.cellsByColumn(col), newCell) case *btpb.Mutation_DeleteFromColumn_: del := mut.DeleteFromColumn if _, ok := fs[del.FamilyName]; !ok { return fmt.Errorf("unknown family %q", del.FamilyName) } fam := del.FamilyName col := string(del.ColumnQualifier) if _, ok := r.families[fam]; ok { cs := r.families[fam].cells[col] if del.TimeRange != nil { tsr := del.TimeRange if !tbl.validTimestamp(tsr.StartTimestampMicros) { return fmt.Errorf("invalid timestamp %d", tsr.StartTimestampMicros) } if !tbl.validTimestamp(tsr.EndTimestampMicros) { return fmt.Errorf("invalid timestamp %d", tsr.EndTimestampMicros) } // Find half-open interval to remove. // Cells are in descending timestamp order, // so the predicates to sort.Search are inverted. si, ei := 0, len(cs) if tsr.StartTimestampMicros > 0 { ei = sort.Search(len(cs), func(i int) bool { return cs[i].ts < tsr.StartTimestampMicros }) } if tsr.EndTimestampMicros > 0 { si = sort.Search(len(cs), func(i int) bool { return cs[i].ts < tsr.EndTimestampMicros }) } if si < ei { copy(cs[si:], cs[ei:]) cs = cs[:len(cs)-(ei-si)] } } else { cs = nil } if len(cs) == 0 { delete(r.families[fam].cells, col) colNames := r.families[fam].colNames i := sort.Search(len(colNames), func(i int) bool { return colNames[i] >= col }) if i < len(colNames) && colNames[i] == col { r.families[fam].colNames = append(colNames[:i], colNames[i+1:]...) } if len(r.families[fam].cells) == 0 { delete(r.families, fam) } } else { r.families[fam].cells[col] = cs } } case *btpb.Mutation_DeleteFromRow_: r.families = make(map[string]*family) case *btpb.Mutation_DeleteFromFamily_: fampre := mut.DeleteFromFamily.FamilyName delete(r.families, fampre) } } return nil } func maxTimestamp(x, y int64) int64 { if x > y { return x } return y } func newTimestamp() int64 { ts := time.Now().UnixNano() / 1e3 ts -= ts % 1000 // round to millisecond granularity return ts } func appendOrReplaceCell(cs []cell, newCell cell) []cell { replaced := false for i, cell := range cs { if cell.ts == newCell.ts { cs[i] = newCell replaced = true break } } if !replaced { cs = append(cs, newCell) } sort.Sort(byDescTS(cs)) return cs } func (s *server) ReadModifyWriteRow(ctx context.Context, req *btpb.ReadModifyWriteRowRequest) (*btpb.ReadModifyWriteRowResponse, error) { s.mu.Lock() tbl, ok := s.tables[req.TableName] s.mu.Unlock() if !ok { return nil, grpc.Errorf(codes.NotFound, "table %q not found", req.TableName) } updates := make(map[string]cell) // copy of updated cells; keyed by full column name fs := tbl.columnFamilies() r := tbl.mutableRow(string(req.RowKey)) r.mu.Lock() defer r.mu.Unlock() // Assume all mutations apply to the most recent version of the cell. // TODO(dsymonds): Verify this assumption and document it in the proto. for _, rule := range req.Rules { if _, ok := fs[rule.FamilyName]; !ok { return nil, fmt.Errorf("unknown family %q", rule.FamilyName) } fam := rule.FamilyName col := string(rule.ColumnQualifier) isEmpty := false f := r.getOrCreateFamily(fam, fs[fam].order) cs := f.cells[col] isEmpty = len(cs) == 0 ts := newTimestamp() var newCell, prevCell cell if !isEmpty { cells := r.families[fam].cells[col] prevCell = cells[0] // ts is the max of now or the prev cell's timestamp in case the // prev cell is in the future ts = maxTimestamp(ts, prevCell.ts) } switch rule := rule.Rule.(type) { default: return nil, fmt.Errorf("unknown RMW rule oneof %T", rule) case *btpb.ReadModifyWriteRule_AppendValue: newCell = cell{ts: ts, value: append(prevCell.value, rule.AppendValue...)} case *btpb.ReadModifyWriteRule_IncrementAmount: var v int64 if !isEmpty { prevVal := prevCell.value if len(prevVal) != 8 { return nil, fmt.Errorf("increment on non-64-bit value") } v = int64(binary.BigEndian.Uint64(prevVal)) } v += rule.IncrementAmount var val [8]byte binary.BigEndian.PutUint64(val[:], uint64(v)) newCell = cell{ts: ts, value: val[:]} } key := strings.Join([]string{fam, col}, ":") updates[key] = newCell f.cells[col] = appendOrReplaceCell(f.cellsByColumn(col), newCell) } res := &btpb.Row{ Key: req.RowKey, } for col, cell := range updates { i := strings.Index(col, ":") fam, qual := col[:i], col[i+1:] var f *btpb.Family for _, ff := range res.Families { if ff.Name == fam { f = ff break } } if f == nil { f = &btpb.Family{Name: fam} res.Families = append(res.Families, f) } f.Columns = append(f.Columns, &btpb.Column{ Qualifier: []byte(qual), Cells: []*btpb.Cell{{ Value: cell.value, }}, }) } return &btpb.ReadModifyWriteRowResponse{Row: res}, nil } func (s *server) SampleRowKeys(req *btpb.SampleRowKeysRequest, stream btpb.Bigtable_SampleRowKeysServer) error { s.mu.Lock() tbl, ok := s.tables[req.TableName] s.mu.Unlock() if !ok { return grpc.Errorf(codes.NotFound, "table %q not found", req.TableName) } tbl.mu.RLock() defer tbl.mu.RUnlock() // The return value of SampleRowKeys is very loosely defined. Return at least the // final row key in the table and choose other row keys randomly. var offset int64 for i, row := range tbl.rows { if i == len(tbl.rows)-1 || rand.Int31n(100) == 0 { resp := &btpb.SampleRowKeysResponse{ RowKey: []byte(row.key), OffsetBytes: offset, } err := stream.Send(resp) if err != nil { return err } } offset += int64(row.size()) } return nil } // needGC is invoked whenever the server needs gcloop running. func (s *server) needGC() { s.mu.Lock() if s.gcc == nil { s.gcc = make(chan int) go s.gcloop(s.gcc) } s.mu.Unlock() } func (s *server) gcloop(done <-chan int) { const ( minWait = 500 // ms maxWait = 1500 // ms ) for { // Wait for a random time interval. d := time.Duration(minWait+rand.Intn(maxWait-minWait)) * time.Millisecond select { case <-time.After(d): case <-done: return // server has been closed } // Do a GC pass over all tables. var tables []*table s.mu.Lock() for _, tbl := range s.tables { tables = append(tables, tbl) } s.mu.Unlock() for _, tbl := range tables { tbl.gc() } } } type table struct { mu sync.RWMutex counter uint64 // increment by 1 when a new family is created families map[string]*columnFamily // keyed by plain family name rows []*row // sorted by row key rowIndex map[string]*row // indexed by row key } func newTable(ctr *btapb.CreateTableRequest) *table { fams := make(map[string]*columnFamily) c := uint64(0) if ctr.Table != nil { for id, cf := range ctr.Table.ColumnFamilies { fams[id] = &columnFamily{ name: ctr.Parent + "/columnFamilies/" + id, order: c, gcRule: cf.GcRule, } c++ } } return &table{ families: fams, counter: c, rowIndex: make(map[string]*row), } } func (t *table) validTimestamp(ts int64) bool { // Assume millisecond granularity is required. return ts%1000 == 0 } func (t *table) columnFamilies() map[string]*columnFamily { cp := make(map[string]*columnFamily) t.mu.RLock() for fam, cf := range t.families { cp[fam] = cf } t.mu.RUnlock() return cp } func (t *table) mutableRow(row string) *row { // Try fast path first. t.mu.RLock() r := t.rowIndex[row] t.mu.RUnlock() if r != nil { return r } // We probably need to create the row. t.mu.Lock() r = t.rowIndex[row] if r == nil { r = newRow(row) t.rowIndex[row] = r t.rows = append(t.rows, r) } t.mu.Unlock() return r } func (t *table) resortRowIndex() { t.mu.Lock() sort.Sort(byRowKey(t.rows)) t.mu.Unlock() } func (t *table) gc() { // This method doesn't add or remove rows, so we only need a read lock for the table. t.mu.RLock() defer t.mu.RUnlock() // Gather GC rules we'll apply. rules := make(map[string]*btapb.GcRule) // keyed by "fam" for fam, cf := range t.families { if cf.gcRule != nil { rules[fam] = cf.gcRule } } if len(rules) == 0 { return } for _, r := range t.rows { r.mu.Lock() r.gc(rules) r.mu.Unlock() } } type byRowKey []*row func (b byRowKey) Len() int { return len(b) } func (b byRowKey) Swap(i, j int) { b[i], b[j] = b[j], b[i] } func (b byRowKey) Less(i, j int) bool { return b[i].key < b[j].key } type row struct { key string mu sync.Mutex families map[string]*family // keyed by family name } func newRow(key string) *row { return &row{ key: key, families: make(map[string]*family), } } // copy returns a copy of the row. // Cell values are aliased. // r.mu should be held. func (r *row) copy() *row { nr := newRow(r.key) for _, fam := range r.families { nr.families[fam.name] = &family{ name: fam.name, order: fam.order, colNames: fam.colNames, cells: make(map[string][]cell), } for col, cs := range fam.cells { // Copy the []cell slice, but not the []byte inside each cell. nr.families[fam.name].cells[col] = append([]cell(nil), cs...) } } return nr } // isEmpty returns true if a row doesn't contain any cell func (r *row) isEmpty() bool { for _, fam := range r.families { for _, cs := range fam.cells { if len(cs) > 0 { return false } } } return true } // sortedFamilies returns a column family set // sorted in ascending creation order in a row. func (r *row) sortedFamilies() []*family { var families []*family for _, fam := range r.families { families = append(families, fam) } sort.Sort(byCreationOrder(families)) return families } func (r *row) getOrCreateFamily(name string, order uint64) *family { if _, ok := r.families[name]; !ok { r.families[name] = &family{ name: name, order: order, cells: make(map[string][]cell), } } return r.families[name] } // gc applies the given GC rules to the row. // r.mu should be held. func (r *row) gc(rules map[string]*btapb.GcRule) { for _, fam := range r.families { rule, ok := rules[fam.name] if !ok { continue } for col, cs := range fam.cells { r.families[fam.name].cells[col] = applyGC(cs, rule) } } } // size returns the total size of all cell values in the row. func (r *row) size() int { size := 0 for _, fam := range r.families { for _, cells := range fam.cells { for _, cell := range cells { size += len(cell.value) } } } return size } func (r *row) String() string { return r.key } var gcTypeWarn sync.Once // applyGC applies the given GC rule to the cells. func applyGC(cells []cell, rule *btapb.GcRule) []cell { switch rule := rule.Rule.(type) { default: // TODO(dsymonds): Support GcRule_Intersection_ gcTypeWarn.Do(func() { log.Printf("Unsupported GC rule type %T", rule) }) case *btapb.GcRule_Union_: for _, sub := range rule.Union.Rules { cells = applyGC(cells, sub) } return cells case *btapb.GcRule_MaxAge: // Timestamps are in microseconds. cutoff := time.Now().UnixNano() / 1e3 cutoff -= rule.MaxAge.Seconds * 1e6 cutoff -= int64(rule.MaxAge.Nanos) / 1e3 // The slice of cells in in descending timestamp order. // This sort.Search will return the index of the first cell whose timestamp is chronologically before the cutoff. si := sort.Search(len(cells), func(i int) bool { return cells[i].ts < cutoff }) if si < len(cells) { log.Printf("bttest: GC MaxAge(%v) deleted %d cells.", rule.MaxAge, len(cells)-si) } return cells[:si] case *btapb.GcRule_MaxNumVersions: n := int(rule.MaxNumVersions) if len(cells) > n { cells = cells[:n] } return cells } return cells } type family struct { name string // Column family name order uint64 // Creation order of column family colNames []string // Collumn names are sorted in lexicographical ascending order cells map[string][]cell // Keyed by collumn name; cells are in descending timestamp order } type byCreationOrder []*family func (b byCreationOrder) Len() int { return len(b) } func (b byCreationOrder) Swap(i, j int) { b[i], b[j] = b[j], b[i] } func (b byCreationOrder) Less(i, j int) bool { return b[i].order < b[j].order } // cellsByColumn adds the column name to colNames set if it does not exist // and returns all cells within a column func (f *family) cellsByColumn(name string) []cell { if _, ok := f.cells[name]; !ok { f.colNames = append(f.colNames, name) sort.Strings(f.colNames) } return f.cells[name] } type cell struct { ts int64 value []byte } type byDescTS []cell func (b byDescTS) Len() int { return len(b) } func (b byDescTS) Swap(i, j int) { b[i], b[j] = b[j], b[i] } func (b byDescTS) Less(i, j int) bool { return b[i].ts > b[j].ts } type columnFamily struct { name string order uint64 // Creation order of column family gcRule *btapb.GcRule } func (c *columnFamily) proto() *btapb.ColumnFamily { return &btapb.ColumnFamily{ GcRule: c.gcRule, } } func toColumnFamilies(families map[string]*columnFamily) map[string]*btapb.ColumnFamily { fs := make(map[string]*btapb.ColumnFamily) for k, v := range families { fs[k] = v.proto() } return fs } golang-google-cloud-0.9.0/bigtable/bttest/inmem_test.go000066400000000000000000000350611312234511600231210ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package bttest import ( "fmt" "math/rand" "sync" "sync/atomic" "testing" "time" "golang.org/x/net/context" btapb "google.golang.org/genproto/googleapis/bigtable/admin/v2" btpb "google.golang.org/genproto/googleapis/bigtable/v2" "google.golang.org/grpc" "strconv" ) func TestConcurrentMutationsReadModifyAndGC(t *testing.T) { s := &server{ tables: make(map[string]*table), } ctx, cancel := context.WithTimeout(context.Background(), 50*time.Millisecond) defer cancel() if _, err := s.CreateTable( ctx, &btapb.CreateTableRequest{Parent: "cluster", TableId: "t"}); err != nil { t.Fatal(err) } const name = `cluster/tables/t` tbl := s.tables[name] req := &btapb.ModifyColumnFamiliesRequest{ Name: name, Modifications: []*btapb.ModifyColumnFamiliesRequest_Modification{{ Id: "cf", Mod: &btapb.ModifyColumnFamiliesRequest_Modification_Create{&btapb.ColumnFamily{}}, }}, } _, err := s.ModifyColumnFamilies(ctx, req) if err != nil { t.Fatal(err) } req = &btapb.ModifyColumnFamiliesRequest{ Name: name, Modifications: []*btapb.ModifyColumnFamiliesRequest_Modification{{ Id: "cf", Mod: &btapb.ModifyColumnFamiliesRequest_Modification_Update{&btapb.ColumnFamily{ GcRule: &btapb.GcRule{Rule: &btapb.GcRule_MaxNumVersions{1}}, }}, }}, } if _, err := s.ModifyColumnFamilies(ctx, req); err != nil { t.Fatal(err) } var wg sync.WaitGroup var ts int64 ms := func() []*btpb.Mutation { return []*btpb.Mutation{{ Mutation: &btpb.Mutation_SetCell_{&btpb.Mutation_SetCell{ FamilyName: "cf", ColumnQualifier: []byte(`col`), TimestampMicros: atomic.AddInt64(&ts, 1000), }}, }} } rmw := func() *btpb.ReadModifyWriteRowRequest { return &btpb.ReadModifyWriteRowRequest{ TableName: name, RowKey: []byte(fmt.Sprint(rand.Intn(100))), Rules: []*btpb.ReadModifyWriteRule{{ FamilyName: "cf", ColumnQualifier: []byte("col"), Rule: &btpb.ReadModifyWriteRule_IncrementAmount{1}, }}, } } for i := 0; i < 100; i++ { wg.Add(1) go func() { defer wg.Done() for ctx.Err() == nil { req := &btpb.MutateRowRequest{ TableName: name, RowKey: []byte(fmt.Sprint(rand.Intn(100))), Mutations: ms(), } s.MutateRow(ctx, req) } }() wg.Add(1) go func() { defer wg.Done() for ctx.Err() == nil { _, _ = s.ReadModifyWriteRow(ctx, rmw()) } }() wg.Add(1) go func() { defer wg.Done() tbl.gc() }() } done := make(chan struct{}) go func() { wg.Wait() close(done) }() select { case <-done: case <-time.After(1 * time.Second): t.Error("Concurrent mutations and GCs haven't completed after 1s") } } func TestCreateTableWithFamily(t *testing.T) { // The Go client currently doesn't support creating a table with column families // in one operation but it is allowed by the API. This must still be supported by the // fake server so this test lives here instead of in the main bigtable // integration test. s := &server{ tables: make(map[string]*table), } ctx := context.Background() newTbl := btapb.Table{ ColumnFamilies: map[string]*btapb.ColumnFamily{ "cf1": {GcRule: &btapb.GcRule{Rule: &btapb.GcRule_MaxNumVersions{123}}}, "cf2": {GcRule: &btapb.GcRule{Rule: &btapb.GcRule_MaxNumVersions{456}}}, }, } cTbl, err := s.CreateTable(ctx, &btapb.CreateTableRequest{Parent: "cluster", TableId: "t", Table: &newTbl}) if err != nil { t.Fatalf("Creating table: %v", err) } tbl, err := s.GetTable(ctx, &btapb.GetTableRequest{Name: cTbl.Name}) if err != nil { t.Fatalf("Getting table: %v", err) } cf := tbl.ColumnFamilies["cf1"] if cf == nil { t.Fatalf("Missing col family cf1") } if got, want := cf.GcRule.GetMaxNumVersions(), int32(123); got != want { t.Errorf("Invalid MaxNumVersions: wanted:%d, got:%d", want, got) } cf = tbl.ColumnFamilies["cf2"] if cf == nil { t.Fatalf("Missing col family cf2") } if got, want := cf.GcRule.GetMaxNumVersions(), int32(456); got != want { t.Errorf("Invalid MaxNumVersions: wanted:%d, got:%d", want, got) } } type MockSampleRowKeysServer struct { responses []*btpb.SampleRowKeysResponse grpc.ServerStream } func (s *MockSampleRowKeysServer) Send(resp *btpb.SampleRowKeysResponse) error { s.responses = append(s.responses, resp) return nil } func TestSampleRowKeys(t *testing.T) { s := &server{ tables: make(map[string]*table), } ctx := context.Background() newTbl := btapb.Table{ ColumnFamilies: map[string]*btapb.ColumnFamily{ "cf": {GcRule: &btapb.GcRule{Rule: &btapb.GcRule_MaxNumVersions{1}}}, }, } tbl, err := s.CreateTable(ctx, &btapb.CreateTableRequest{Parent: "cluster", TableId: "t", Table: &newTbl}) if err != nil { t.Fatalf("Creating table: %v", err) } // Populate the table val := []byte("value") rowCount := 1000 for i := 0; i < rowCount; i++ { req := &btpb.MutateRowRequest{ TableName: tbl.Name, RowKey: []byte("row-" + strconv.Itoa(i)), Mutations: []*btpb.Mutation{{ Mutation: &btpb.Mutation_SetCell_{&btpb.Mutation_SetCell{ FamilyName: "cf", ColumnQualifier: []byte("col"), TimestampMicros: 0, Value: val, }}, }}, } if _, err := s.MutateRow(ctx, req); err != nil { t.Fatalf("Populating table: %v", err) } } mock := &MockSampleRowKeysServer{} if err := s.SampleRowKeys(&btpb.SampleRowKeysRequest{TableName: tbl.Name}, mock); err != nil { t.Errorf("SampleRowKeys error: %v", err) } if len(mock.responses) == 0 { t.Fatal("Response count: got 0, want > 0") } // Make sure the offset of the final response is the offset of the final row got := mock.responses[len(mock.responses)-1].OffsetBytes want := int64((rowCount - 1) * len(val)) if got != want { t.Errorf("Invalid offset: got %d, want %d", got, want) } } func TestDropRowRange(t *testing.T) { s := &server{ tables: make(map[string]*table), } ctx := context.Background() newTbl := btapb.Table{ ColumnFamilies: map[string]*btapb.ColumnFamily{ "cf": {GcRule: &btapb.GcRule{Rule: &btapb.GcRule_MaxNumVersions{1}}}, }, } tblInfo, err := s.CreateTable(ctx, &btapb.CreateTableRequest{Parent: "cluster", TableId: "t", Table: &newTbl}) if err != nil { t.Fatalf("Creating table: %v", err) } tbl := s.tables[tblInfo.Name] // Populate the table prefixes := []string{"AAA", "BBB", "CCC", "DDD"} count := 3 doWrite := func() { for _, prefix := range prefixes { for i := 0; i < count; i++ { req := &btpb.MutateRowRequest{ TableName: tblInfo.Name, RowKey: []byte(prefix + strconv.Itoa(i)), Mutations: []*btpb.Mutation{{ Mutation: &btpb.Mutation_SetCell_{&btpb.Mutation_SetCell{ FamilyName: "cf", ColumnQualifier: []byte("col"), TimestampMicros: 0, Value: []byte{}, }}, }}, } if _, err := s.MutateRow(ctx, req); err != nil { t.Fatalf("Populating table: %v", err) } } } } doWrite() tblSize := len(tbl.rows) req := &btapb.DropRowRangeRequest{ Name: tblInfo.Name, Target: &btapb.DropRowRangeRequest_RowKeyPrefix{[]byte("AAA")}, } if _, err = s.DropRowRange(ctx, req); err != nil { t.Fatalf("Dropping first range: %v", err) } got, want := len(tbl.rows), tblSize-count if got != want { t.Errorf("Row count after first drop: got %d (%v), want %d", got, tbl.rows, want) } req = &btapb.DropRowRangeRequest{ Name: tblInfo.Name, Target: &btapb.DropRowRangeRequest_RowKeyPrefix{[]byte("DDD")}, } if _, err = s.DropRowRange(ctx, req); err != nil { t.Fatalf("Dropping second range: %v", err) } got, want = len(tbl.rows), tblSize-(2*count) if got != want { t.Errorf("Row count after second drop: got %d (%v), want %d", got, tbl.rows, want) } req = &btapb.DropRowRangeRequest{ Name: tblInfo.Name, Target: &btapb.DropRowRangeRequest_RowKeyPrefix{[]byte("XXX")}, } if _, err = s.DropRowRange(ctx, req); err != nil { t.Fatalf("Dropping invalid range: %v", err) } got, want = len(tbl.rows), tblSize-(2*count) if got != want { t.Errorf("Row count after invalid drop: got %d (%v), want %d", got, tbl.rows, want) } req = &btapb.DropRowRangeRequest{ Name: tblInfo.Name, Target: &btapb.DropRowRangeRequest_DeleteAllDataFromTable{true}, } if _, err = s.DropRowRange(ctx, req); err != nil { t.Fatalf("Dropping all data: %v", err) } got, want = len(tbl.rows), 0 if got != want { t.Errorf("Row count after drop all: got %d, want %d", got, want) } // Test that we can write rows, delete some and then write them again. count = 1 doWrite() req = &btapb.DropRowRangeRequest{ Name: tblInfo.Name, Target: &btapb.DropRowRangeRequest_DeleteAllDataFromTable{true}, } if _, err = s.DropRowRange(ctx, req); err != nil { t.Fatalf("Dropping all data: %v", err) } got, want = len(tbl.rows), 0 if got != want { t.Errorf("Row count after drop all: got %d, want %d", got, want) } doWrite() got, want = len(tbl.rows), len(prefixes) if got != want { t.Errorf("Row count after rewrite: got %d, want %d", got, want) } req = &btapb.DropRowRangeRequest{ Name: tblInfo.Name, Target: &btapb.DropRowRangeRequest_RowKeyPrefix{[]byte("BBB")}, } if _, err = s.DropRowRange(ctx, req); err != nil { t.Fatalf("Dropping range: %v", err) } doWrite() got, want = len(tbl.rows), len(prefixes) if got != want { t.Errorf("Row count after drop range: got %d, want %d", got, want) } } type MockReadRowsServer struct { responses []*btpb.ReadRowsResponse grpc.ServerStream } func (s *MockReadRowsServer) Send(resp *btpb.ReadRowsResponse) error { s.responses = append(s.responses, resp) return nil } func TestReadRowsOrder(t *testing.T) { s := &server{ tables: make(map[string]*table), } ctx := context.Background() newTbl := btapb.Table{ ColumnFamilies: map[string]*btapb.ColumnFamily{ "cf0": {GcRule: &btapb.GcRule{Rule: &btapb.GcRule_MaxNumVersions{1}}}, }, } tblInfo, err := s.CreateTable(ctx, &btapb.CreateTableRequest{Parent: "cluster", TableId: "t", Table: &newTbl}) if err != nil { t.Fatalf("Creating table: %v", err) } count := 3 mcf := func(i int) *btapb.ModifyColumnFamiliesRequest { return &btapb.ModifyColumnFamiliesRequest{ Name: tblInfo.Name, Modifications: []*btapb.ModifyColumnFamiliesRequest_Modification{{ Id: "cf" + strconv.Itoa(i), Mod: &btapb.ModifyColumnFamiliesRequest_Modification_Create{&btapb.ColumnFamily{}}, }}, } } for i := 1; i <= count; i++ { _, err = s.ModifyColumnFamilies(ctx, mcf(i)) if err != nil { t.Fatal(err) } } // Populate the table for fc := 0; fc < count; fc++ { for cc := count; cc > 0; cc-- { for tc := 0; tc < count; tc++ { req := &btpb.MutateRowRequest{ TableName: tblInfo.Name, RowKey: []byte("row"), Mutations: []*btpb.Mutation{{ Mutation: &btpb.Mutation_SetCell_{&btpb.Mutation_SetCell{ FamilyName: "cf" + strconv.Itoa(fc), ColumnQualifier: []byte("col" + strconv.Itoa(cc)), TimestampMicros: int64((tc + 1) * 1000), Value: []byte{}, }}, }}, } if _, err := s.MutateRow(ctx, req); err != nil { t.Fatalf("Populating table: %v", err) } } } } req := &btpb.ReadRowsRequest{ TableName: tblInfo.Name, Rows: &btpb.RowSet{RowKeys: [][]byte{[]byte("row")}}, } mock := &MockReadRowsServer{} if err = s.ReadRows(req, mock); err != nil { t.Errorf("ReadRows error: %v", err) } if len(mock.responses) == 0 { t.Fatal("Response count: got 0, want > 0") } if len(mock.responses[0].Chunks) != 27 { t.Fatal("Chunk count: got %d, want 27", len(mock.responses[0].Chunks)) } testOrder := func(ms *MockReadRowsServer) { var prevFam, prevCol string var prevTime int64 for _, cc := range ms.responses[0].Chunks { if prevFam == "" { prevFam = cc.FamilyName.Value prevCol = string(cc.Qualifier.Value) prevTime = cc.TimestampMicros continue } if cc.FamilyName.Value < prevFam { t.Errorf("Family order is not correct: got %s < %s", cc.FamilyName.Value, prevFam) } else if cc.FamilyName.Value == prevFam { if string(cc.Qualifier.Value) < prevCol { t.Errorf("Column order is not correct: got %s < %s", string(cc.Qualifier.Value), prevCol) } else if string(cc.Qualifier.Value) == prevCol { if cc.TimestampMicros > prevTime { t.Errorf("cell order is not correct: got %d > %d", cc.TimestampMicros, prevTime) } } } prevFam = cc.FamilyName.Value prevCol = string(cc.Qualifier.Value) prevTime = cc.TimestampMicros } } testOrder(mock) // Read with interleave filter inter := &btpb.RowFilter_Interleave{} fnr := &btpb.RowFilter{Filter: &btpb.RowFilter_FamilyNameRegexFilter{"1"}} cqr := &btpb.RowFilter{Filter: &btpb.RowFilter_ColumnQualifierRegexFilter{[]byte("2")}} inter.Filters = append(inter.Filters, fnr, cqr) req = &btpb.ReadRowsRequest{ TableName: tblInfo.Name, Rows: &btpb.RowSet{RowKeys: [][]byte{[]byte("row")}}, Filter: &btpb.RowFilter{ Filter: &btpb.RowFilter_Interleave_{inter}, }, } mock = &MockReadRowsServer{} if err = s.ReadRows(req, mock); err != nil { t.Errorf("ReadRows error: %v", err) } if len(mock.responses) == 0 { t.Fatal("Response count: got 0, want > 0") } if len(mock.responses[0].Chunks) != 18 { t.Fatal("Chunk count: got %d, want 18", len(mock.responses[0].Chunks)) } testOrder(mock) // Check order after ReadModifyWriteRow rmw := func(i int) *btpb.ReadModifyWriteRowRequest { return &btpb.ReadModifyWriteRowRequest{ TableName: tblInfo.Name, RowKey: []byte("row"), Rules: []*btpb.ReadModifyWriteRule{{ FamilyName: "cf3", ColumnQualifier: []byte("col" + strconv.Itoa(i)), Rule: &btpb.ReadModifyWriteRule_IncrementAmount{1}, }}, } } for i := count; i > 0; i-- { s.ReadModifyWriteRow(ctx, rmw(i)) } req = &btpb.ReadRowsRequest{ TableName: tblInfo.Name, Rows: &btpb.RowSet{RowKeys: [][]byte{[]byte("row")}}, } mock = &MockReadRowsServer{} if err = s.ReadRows(req, mock); err != nil { t.Errorf("ReadRows error: %v", err) } if len(mock.responses) == 0 { t.Fatal("Response count: got 0, want > 0") } if len(mock.responses[0].Chunks) != 30 { t.Fatal("Chunk count: got %d, want 30", len(mock.responses[0].Chunks)) } testOrder(mock) } golang-google-cloud-0.9.0/bigtable/cmd/000077500000000000000000000000001312234511600176575ustar00rootroot00000000000000golang-google-cloud-0.9.0/bigtable/cmd/cbt/000077500000000000000000000000001312234511600204275ustar00rootroot00000000000000golang-google-cloud-0.9.0/bigtable/cmd/cbt/cbt.go000066400000000000000000000504601312234511600215330ustar00rootroot00000000000000/* Copyright 2015 Google Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package main // Command docs are in cbtdoc.go. import ( "bytes" "flag" "fmt" "go/format" "io" "log" "os" "regexp" "sort" "strconv" "strings" "text/tabwriter" "text/template" "time" "cloud.google.com/go/bigtable" "cloud.google.com/go/bigtable/internal/cbtconfig" "golang.org/x/net/context" "google.golang.org/api/option" "google.golang.org/grpc" ) var ( oFlag = flag.String("o", "", "if set, redirect stdout to this file") config *cbtconfig.Config client *bigtable.Client adminClient *bigtable.AdminClient instanceAdminClient *bigtable.InstanceAdminClient version = "" revision = "" revisionDate = "" ) func getCredentialOpts(opts []option.ClientOption) []option.ClientOption { if ts := config.TokenSource; ts != nil { opts = append(opts, option.WithTokenSource(ts)) } if tlsCreds := config.TLSCreds; tlsCreds != nil { opts = append(opts, option.WithGRPCDialOption(grpc.WithTransportCredentials(tlsCreds))) } return opts } func getClient() *bigtable.Client { if client == nil { var opts []option.ClientOption if ep := config.DataEndpoint; ep != "" { opts = append(opts, option.WithEndpoint(ep)) } opts = getCredentialOpts(opts) var err error client, err = bigtable.NewClient(context.Background(), config.Project, config.Instance, opts...) if err != nil { log.Fatalf("Making bigtable.Client: %v", err) } } return client } func getAdminClient() *bigtable.AdminClient { if adminClient == nil { var opts []option.ClientOption if ep := config.AdminEndpoint; ep != "" { opts = append(opts, option.WithEndpoint(ep)) } opts = getCredentialOpts(opts) var err error adminClient, err = bigtable.NewAdminClient(context.Background(), config.Project, config.Instance, opts...) if err != nil { log.Fatalf("Making bigtable.AdminClient: %v", err) } } return adminClient } func getInstanceAdminClient() *bigtable.InstanceAdminClient { if instanceAdminClient == nil { var opts []option.ClientOption if ep := config.AdminEndpoint; ep != "" { opts = append(opts, option.WithEndpoint(ep)) } opts = getCredentialOpts(opts) var err error instanceAdminClient, err = bigtable.NewInstanceAdminClient(context.Background(), config.Project, opts...) if err != nil { log.Fatalf("Making bigtable.InstanceAdminClient: %v", err) } } return instanceAdminClient } func main() { var err error config, err = cbtconfig.Load() if err != nil { log.Fatal(err) } config.RegisterFlags() flag.Usage = func() { usage(os.Stderr) } flag.Parse() if flag.NArg() == 0 { usage(os.Stderr) os.Exit(1) } if *oFlag != "" { f, err := os.Create(*oFlag) if err != nil { log.Fatal(err) } defer func() { if err := f.Close(); err != nil { log.Fatal(err) } }() os.Stdout = f } ctx := context.Background() for _, cmd := range commands { if cmd.Name == flag.Arg(0) { if err := config.CheckFlags(cmd.Required); err != nil { log.Fatal(err) } cmd.do(ctx, flag.Args()[1:]...) return } } log.Fatalf("Unknown command %q", flag.Arg(0)) } func usage(w io.Writer) { fmt.Fprintf(w, "Usage: %s [flags] ...\n", os.Args[0]) flag.CommandLine.SetOutput(w) flag.CommandLine.PrintDefaults() fmt.Fprintf(w, "\n%s", cmdSummary) } var cmdSummary string // generated in init, below func init() { var buf bytes.Buffer tw := tabwriter.NewWriter(&buf, 10, 8, 4, '\t', 0) for _, cmd := range commands { fmt.Fprintf(tw, "cbt %s\t%s\n", cmd.Name, cmd.Desc) } tw.Flush() buf.WriteString(configHelp) cmdSummary = buf.String() } var configHelp = ` For convenience, values of the -project, -instance, -creds, -admin-endpoint and -data-endpoint flags may be specified in ` + cbtconfig.Filename() + ` in this format: project = my-project-123 instance = my-instance creds = path-to-account-key.json admin-endpoint = hostname:port data-endpoint = hostname:port All values are optional, and all will be overridden by flags. cbt ` + version + ` ` + revision + ` ` + revisionDate + ` ` var commands = []struct { Name, Desc string do func(context.Context, ...string) Usage string Required cbtconfig.RequiredFlags }{ { Name: "count", Desc: "Count rows in a table", do: doCount, Usage: "cbt count
", Required: cbtconfig.ProjectAndInstanceRequired, }, { Name: "createfamily", Desc: "Create a column family", do: doCreateFamily, Usage: "cbt createfamily
", Required: cbtconfig.ProjectAndInstanceRequired, }, { Name: "createtable", Desc: "Create a table", do: doCreateTable, Usage: "cbt createtable
[initial_splits...]\n" + " initial_splits=row A row key to be used to initially split the table " + "into multiple tablets. Can be repeated to create multiple splits.", Required: cbtconfig.ProjectAndInstanceRequired, }, { Name: "deletefamily", Desc: "Delete a column family", do: doDeleteFamily, Usage: "cbt deletefamily
", Required: cbtconfig.ProjectAndInstanceRequired, }, { Name: "deleterow", Desc: "Delete a row", do: doDeleteRow, Usage: "cbt deleterow
", Required: cbtconfig.ProjectAndInstanceRequired, }, { Name: "deletetable", Desc: "Delete a table", do: doDeleteTable, Usage: "cbt deletetable
", Required: cbtconfig.ProjectAndInstanceRequired, }, { Name: "doc", Desc: "Print godoc-suitable documentation for cbt", do: doDoc, Usage: "cbt doc", Required: cbtconfig.NoneRequired, }, { Name: "help", Desc: "Print help text", do: doHelp, Usage: "cbt help [command]", Required: cbtconfig.NoneRequired, }, { Name: "listinstances", Desc: "List instances in a project", do: doListInstances, Usage: "cbt listinstances", Required: cbtconfig.ProjectRequired, }, { Name: "lookup", Desc: "Read from a single row", do: doLookup, Usage: "cbt lookup
", Required: cbtconfig.ProjectAndInstanceRequired, }, { Name: "ls", Desc: "List tables and column families", do: doLS, Usage: "cbt ls List tables\n" + "cbt ls
List column families in
", Required: cbtconfig.ProjectAndInstanceRequired, }, { Name: "mddoc", Desc: "Print documentation for cbt in Markdown format", do: doMDDoc, Usage: "cbt mddoc", Required: cbtconfig.NoneRequired, }, { Name: "read", Desc: "Read rows", do: doRead, Usage: "cbt read
[start=] [end=] [prefix=] [count=]\n" + " start= Start reading at this row\n" + " end= Stop reading before this row\n" + " prefix= Read rows with this prefix\n" + " count= Read only this many rows\n", Required: cbtconfig.ProjectAndInstanceRequired, }, { Name: "set", Desc: "Set value of a cell", do: doSet, Usage: "cbt set
family:column=val[@ts] ...\n" + " family:column=val[@ts] may be repeated to set multiple cells.\n" + "\n" + " ts is an optional integer timestamp.\n" + " If it cannot be parsed, the `@ts` part will be\n" + " interpreted as part of the value.", Required: cbtconfig.ProjectAndInstanceRequired, }, { Name: "setgcpolicy", Desc: "Set the GC policy for a column family", do: doSetGCPolicy, Usage: "cbt setgcpolicy
( maxage= | maxversions= )\n" + "\n" + ` maxage= Maximum timestamp age to preserve (e.g. "1h", "4d")` + "\n" + " maxversions= Maximum number of versions to preserve", Required: cbtconfig.ProjectAndInstanceRequired, }, { Name: "version", Desc: "Print the current cbt version", do: doVersion, Usage: "cbt version", Required: cbtconfig.NoneRequired, }, } func doCount(ctx context.Context, args ...string) { if len(args) != 1 { log.Fatal("usage: cbt count
") } tbl := getClient().Open(args[0]) n := 0 err := tbl.ReadRows(ctx, bigtable.InfiniteRange(""), func(_ bigtable.Row) bool { n++ return true }, bigtable.RowFilter(bigtable.StripValueFilter())) if err != nil { log.Fatalf("Reading rows: %v", err) } fmt.Println(n) } func doCreateFamily(ctx context.Context, args ...string) { if len(args) != 2 { log.Fatal("usage: cbt createfamily
") } err := getAdminClient().CreateColumnFamily(ctx, args[0], args[1]) if err != nil { log.Fatalf("Creating column family: %v", err) } } func doCreateTable(ctx context.Context, args ...string) { if len(args) < 1 { log.Fatal("usage: cbt createtable
[initial_splits...]") } var err error if len(args) > 1 { splits := args[1:] err = getAdminClient().CreatePresplitTable(ctx, args[0], splits) } else { err = getAdminClient().CreateTable(ctx, args[0]) } if err != nil { log.Fatalf("Creating table: %v", err) } } func doDeleteFamily(ctx context.Context, args ...string) { if len(args) != 2 { log.Fatal("usage: cbt deletefamily
") } err := getAdminClient().DeleteColumnFamily(ctx, args[0], args[1]) if err != nil { log.Fatalf("Deleting column family: %v", err) } } func doDeleteRow(ctx context.Context, args ...string) { if len(args) != 2 { log.Fatal("usage: cbt deleterow
") } tbl := getClient().Open(args[0]) mut := bigtable.NewMutation() mut.DeleteRow() if err := tbl.Apply(ctx, args[1], mut); err != nil { log.Fatalf("Deleting row: %v", err) } } func doDeleteTable(ctx context.Context, args ...string) { if len(args) != 1 { log.Fatalf("Can't do `cbt deletetable %s`", args) } err := getAdminClient().DeleteTable(ctx, args[0]) if err != nil { log.Fatalf("Deleting table: %v", err) } } // to break circular dependencies var ( doDocFn func(ctx context.Context, args ...string) doHelpFn func(ctx context.Context, args ...string) doMDDocFn func(ctx context.Context, args ...string) ) func init() { doDocFn = doDocReal doHelpFn = doHelpReal doMDDocFn = doMDDocReal } func doDoc(ctx context.Context, args ...string) { doDocFn(ctx, args...) } func doHelp(ctx context.Context, args ...string) { doHelpFn(ctx, args...) } func doMDDoc(ctx context.Context, args ...string) { doMDDocFn(ctx, args...) } func docFlags() []*flag.Flag { // Only include specific flags, in a specific order. var flags []*flag.Flag for _, name := range []string{"project", "instance", "creds"} { f := flag.Lookup(name) if f == nil { log.Fatalf("Flag not linked: -%s", name) } flags = append(flags, f) } return flags } func doDocReal(ctx context.Context, args ...string) { data := map[string]interface{}{ "Commands": commands, "Flags": docFlags(), } var buf bytes.Buffer if err := docTemplate.Execute(&buf, data); err != nil { log.Fatalf("Bad doc template: %v", err) } out, err := format.Source(buf.Bytes()) if err != nil { log.Fatalf("Bad doc output: %v", err) } os.Stdout.Write(out) } func indentLines(s, ind string) string { ss := strings.Split(s, "\n") for i, p := range ss { ss[i] = ind + p } return strings.Join(ss, "\n") } var docTemplate = template.Must(template.New("doc").Funcs(template.FuncMap{ "indent": indentLines, }). Parse(` // Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // DO NOT EDIT. THIS IS AUTOMATICALLY GENERATED. // Run "go generate" to regenerate. //go:generate go run cbt.go -o cbtdoc.go doc /* Cbt is a tool for doing basic interactions with Cloud Bigtable. Usage: cbt [options] command [arguments] The commands are: {{range .Commands}} {{printf "%-25s %s" .Name .Desc}}{{end}} Use "cbt help " for more information about a command. The options are: {{range .Flags}} -{{.Name}} string {{.Usage}}{{end}} {{range .Commands}} {{.Desc}} Usage: {{indent .Usage "\t"}} {{end}} */ package main `)) func doHelpReal(ctx context.Context, args ...string) { if len(args) == 0 { usage(os.Stdout) return } for _, cmd := range commands { if cmd.Name == args[0] { fmt.Println(cmd.Usage) return } } log.Fatalf("Don't know command %q", args[0]) } func doListInstances(ctx context.Context, args ...string) { if len(args) != 0 { log.Fatalf("usage: cbt listinstances") } is, err := getInstanceAdminClient().Instances(ctx) if err != nil { log.Fatalf("Getting list of instances: %v", err) } tw := tabwriter.NewWriter(os.Stdout, 10, 8, 4, '\t', 0) fmt.Fprintf(tw, "Instance Name\tInfo\n") fmt.Fprintf(tw, "-------------\t----\n") for _, i := range is { fmt.Fprintf(tw, "%s\t%s\n", i.Name, i.DisplayName) } tw.Flush() } func doLookup(ctx context.Context, args ...string) { if len(args) != 2 { log.Fatalf("usage: cbt lookup
") } table, row := args[0], args[1] tbl := getClient().Open(table) r, err := tbl.ReadRow(ctx, row) if err != nil { log.Fatalf("Reading row: %v", err) } printRow(r) } func printRow(r bigtable.Row) { fmt.Println(strings.Repeat("-", 40)) fmt.Println(r.Key()) var fams []string for fam := range r { fams = append(fams, fam) } sort.Strings(fams) for _, fam := range fams { ris := r[fam] sort.Sort(byColumn(ris)) for _, ri := range ris { ts := time.Unix(0, int64(ri.Timestamp)*1e3) fmt.Printf(" %-40s @ %s\n", ri.Column, ts.Format("2006/01/02-15:04:05.000000")) fmt.Printf(" %q\n", ri.Value) } } } type byColumn []bigtable.ReadItem func (b byColumn) Len() int { return len(b) } func (b byColumn) Swap(i, j int) { b[i], b[j] = b[j], b[i] } func (b byColumn) Less(i, j int) bool { return b[i].Column < b[j].Column } type byFamilyName []bigtable.FamilyInfo func (b byFamilyName) Len() int { return len(b) } func (b byFamilyName) Swap(i, j int) { b[i], b[j] = b[j], b[i] } func (b byFamilyName) Less(i, j int) bool { return b[i].Name < b[j].Name } func doLS(ctx context.Context, args ...string) { switch len(args) { default: log.Fatalf("Can't do `cbt ls %s`", args) case 0: tables, err := getAdminClient().Tables(ctx) if err != nil { log.Fatalf("Getting list of tables: %v", err) } sort.Strings(tables) for _, table := range tables { fmt.Println(table) } case 1: table := args[0] ti, err := getAdminClient().TableInfo(ctx, table) if err != nil { log.Fatalf("Getting table info: %v", err) } sort.Sort(byFamilyName(ti.FamilyInfos)) tw := tabwriter.NewWriter(os.Stdout, 10, 8, 4, '\t', 0) fmt.Fprintf(tw, "Family Name\tGC Policy\n") fmt.Fprintf(tw, "-----------\t---------\n") for _, fam := range ti.FamilyInfos { fmt.Fprintf(tw, "%s\t%s\n", fam.Name, fam.GCPolicy) } tw.Flush() } } func doMDDocReal(ctx context.Context, args ...string) { data := map[string]interface{}{ "Commands": commands, "Flags": docFlags(), } var buf bytes.Buffer if err := mddocTemplate.Execute(&buf, data); err != nil { log.Fatalf("Bad mddoc template: %v", err) } io.Copy(os.Stdout, &buf) } var mddocTemplate = template.Must(template.New("mddoc").Funcs(template.FuncMap{ "indent": indentLines, }). Parse(` Cbt is a tool for doing basic interactions with Cloud Bigtable. Usage: cbt [options] command [arguments] The commands are: {{range .Commands}} {{printf "%-25s %s" .Name .Desc}}{{end}} Use "cbt help " for more information about a command. The options are: {{range .Flags}} -{{.Name}} string {{.Usage}}{{end}} {{range .Commands}} ## {{.Desc}} {{indent .Usage "\t"}} {{end}} `)) func doRead(ctx context.Context, args ...string) { if len(args) < 1 { log.Fatalf("usage: cbt read
[args ...]") } tbl := getClient().Open(args[0]) parsed := make(map[string]string) for _, arg := range args[1:] { i := strings.Index(arg, "=") if i < 0 { log.Fatalf("Bad arg %q", arg) } key, val := arg[:i], arg[i+1:] switch key { default: log.Fatalf("Unknown arg key %q", key) case "limit": // Be nicer; we used to support this, but renamed it to "end". log.Fatalf("Unknown arg key %q; did you mean %q?", key, "end") case "start", "end", "prefix", "count": parsed[key] = val } } if (parsed["start"] != "" || parsed["end"] != "") && parsed["prefix"] != "" { log.Fatal(`"start"/"end" may not be mixed with "prefix"`) } var rr bigtable.RowRange if start, end := parsed["start"], parsed["end"]; end != "" { rr = bigtable.NewRange(start, end) } else if start != "" { rr = bigtable.InfiniteRange(start) } if prefix := parsed["prefix"]; prefix != "" { rr = bigtable.PrefixRange(prefix) } var opts []bigtable.ReadOption if count := parsed["count"]; count != "" { n, err := strconv.ParseInt(count, 0, 64) if err != nil { log.Fatalf("Bad count %q: %v", count, err) } opts = append(opts, bigtable.LimitRows(n)) } // TODO(dsymonds): Support filters. err := tbl.ReadRows(ctx, rr, func(r bigtable.Row) bool { printRow(r) return true }, opts...) if err != nil { log.Fatalf("Reading rows: %v", err) } } var setArg = regexp.MustCompile(`([^:]+):([^=]*)=(.*)`) func doSet(ctx context.Context, args ...string) { if len(args) < 3 { log.Fatalf("usage: cbt set
family:[column]=val[@ts] ...") } tbl := getClient().Open(args[0]) row := args[1] mut := bigtable.NewMutation() for _, arg := range args[2:] { m := setArg.FindStringSubmatch(arg) if m == nil { log.Fatalf("Bad set arg %q", arg) } val := m[3] ts := bigtable.Now() if i := strings.LastIndex(val, "@"); i >= 0 { // Try parsing a timestamp. n, err := strconv.ParseInt(val[i+1:], 0, 64) if err == nil { val = val[:i] ts = bigtable.Timestamp(n) } } mut.Set(m[1], m[2], ts, []byte(val)) } if err := tbl.Apply(ctx, row, mut); err != nil { log.Fatalf("Applying mutation: %v", err) } } func doSetGCPolicy(ctx context.Context, args ...string) { if len(args) < 3 { log.Fatalf("usage: cbt setgcpolicy
( maxage= | maxversions= )") } table := args[0] fam := args[1] var pol bigtable.GCPolicy switch p := args[2]; { case strings.HasPrefix(p, "maxage="): d, err := parseDuration(p[7:]) if err != nil { log.Fatal(err) } pol = bigtable.MaxAgePolicy(d) case strings.HasPrefix(p, "maxversions="): n, err := strconv.ParseUint(p[12:], 10, 16) if err != nil { log.Fatal(err) } pol = bigtable.MaxVersionsPolicy(int(n)) default: log.Fatalf("Bad GC policy %q", p) } if err := getAdminClient().SetGCPolicy(ctx, table, fam, pol); err != nil { log.Fatalf("Setting GC policy: %v", err) } } // parseDuration parses a duration string. // It is similar to Go's time.ParseDuration, except with a different set of supported units, // and only simple formats supported. func parseDuration(s string) (time.Duration, error) { // [0-9]+[a-z]+ // Split [0-9]+ from [a-z]+. i := 0 for ; i < len(s); i++ { c := s[i] if c < '0' || c > '9' { break } } ds, u := s[:i], s[i:] if ds == "" || u == "" { return 0, fmt.Errorf("invalid duration %q", s) } // Parse them. d, err := strconv.ParseUint(ds, 10, 32) if err != nil { return 0, fmt.Errorf("invalid duration %q: %v", s, err) } unit, ok := unitMap[u] if !ok { return 0, fmt.Errorf("unknown unit %q in duration %q", u, s) } if d > uint64((1<<63-1)/unit) { // overflow return 0, fmt.Errorf("invalid duration %q overflows", s) } return time.Duration(d) * unit, nil } var unitMap = map[string]time.Duration{ "ms": time.Millisecond, "s": time.Second, "m": time.Minute, "h": time.Hour, "d": 24 * time.Hour, } func doVersion(ctx context.Context, args ...string) { fmt.Printf("%s %s %s\n", version, revision, revisionDate) } golang-google-cloud-0.9.0/bigtable/cmd/cbt/cbt_test.go000066400000000000000000000030521312234511600225650ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package main import ( "testing" "time" ) func TestParseDuration(t *testing.T) { tests := []struct { in string // out or fail are mutually exclusive out time.Duration fail bool }{ {in: "10ms", out: 10 * time.Millisecond}, {in: "3s", out: 3 * time.Second}, {in: "60m", out: 60 * time.Minute}, {in: "12h", out: 12 * time.Hour}, {in: "7d", out: 168 * time.Hour}, {in: "", fail: true}, {in: "0", fail: true}, {in: "7ns", fail: true}, {in: "14mo", fail: true}, {in: "3.5h", fail: true}, {in: "106752d", fail: true}, // overflow } for _, tc := range tests { got, err := parseDuration(tc.in) if !tc.fail && err != nil { t.Errorf("parseDuration(%q) unexpectedly failed: %v", tc.in, err) continue } if tc.fail && err == nil { t.Errorf("parseDuration(%q) did not fail", tc.in) continue } if tc.fail { continue } if got != tc.out { t.Errorf("parseDuration(%q) = %v, want %v", tc.in, got, tc.out) } } } golang-google-cloud-0.9.0/bigtable/cmd/cbt/cbtdoc.go000066400000000000000000000065601312234511600222230ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // DO NOT EDIT. THIS IS AUTOMATICALLY GENERATED. // Run "go generate" to regenerate. //go:generate go run cbt.go -o cbtdoc.go doc /* Cbt is a tool for doing basic interactions with Cloud Bigtable. Usage: cbt [options] command [arguments] The commands are: count Count rows in a table createfamily Create a column family createtable Create a table deletefamily Delete a column family deleterow Delete a row deletetable Delete a table doc Print godoc-suitable documentation for cbt help Print help text listinstances List instances in a project lookup Read from a single row ls List tables and column families mddoc Print documentation for cbt in Markdown format read Read rows set Set value of a cell setgcpolicy Set the GC policy for a column family Use "cbt help " for more information about a command. The options are: -project string project ID -instance string Cloud Bigtable instance -creds string if set, use application credentials in this file Count rows in a table Usage: cbt count
Create a column family Usage: cbt createfamily
Create a table Usage: cbt createtable
Delete a column family Usage: cbt deletefamily
Delete a row Usage: cbt deleterow
Delete a table Usage: cbt deletetable
Print godoc-suitable documentation for cbt Usage: cbt doc Print help text Usage: cbt help [command] List instances in a project Usage: cbt listinstances Read from a single row Usage: cbt lookup
List tables and column families Usage: cbt ls List tables cbt ls
List column families in
Print documentation for cbt in Markdown format Usage: cbt mddoc Read rows Usage: cbt read
[start=] [end=] [prefix=] [count=] start= Start reading at this row end= Stop reading before this row prefix= Read rows with this prefix count= Read only this many rows Set value of a cell Usage: cbt set
family:column=val[@ts] ... family:column=val[@ts] may be repeated to set multiple cells. ts is an optional integer timestamp. If it cannot be parsed, the `@ts` part will be interpreted as part of the value. Set the GC policy for a column family Usage: cbt setgcpolicy
( maxage= | maxversions= ) maxage= Maximum timestamp age to preserve (e.g. "1h", "4d") maxversions= Maximum number of versions to preserve */ package main golang-google-cloud-0.9.0/bigtable/cmd/emulator/000077500000000000000000000000001312234511600215075ustar00rootroot00000000000000golang-google-cloud-0.9.0/bigtable/cmd/emulator/cbtemulator.go000066400000000000000000000023541312234511600243630ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. /* cbtemulator launches the in-memory Cloud Bigtable server on the given address. */ package main import ( "flag" "fmt" "log" "cloud.google.com/go/bigtable/bttest" "google.golang.org/grpc" ) var ( host = flag.String("host", "localhost", "the address to bind to on the local machine") port = flag.Int("port", 9000, "the port number to bind to on the local machine") ) func main() { grpc.EnableTracing = false flag.Parse() srv, err := bttest.NewServer(fmt.Sprintf("%s:%d", *host, *port)) if err != nil { log.Fatalf("failed to start emulator: %v", err) } fmt.Printf("Cloud Bigtable emulator running on %s\n", srv.Addr) select {} } golang-google-cloud-0.9.0/bigtable/cmd/loadtest/000077500000000000000000000000001312234511600214765ustar00rootroot00000000000000golang-google-cloud-0.9.0/bigtable/cmd/loadtest/loadtest.go000066400000000000000000000127021312234511600236460ustar00rootroot00000000000000/* Copyright 2015 Google Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Loadtest does some load testing through the Go client library for Cloud Bigtable. */ package main import ( "bytes" "flag" "fmt" "log" "math/rand" "os" "os/signal" "sync" "sync/atomic" "time" "cloud.google.com/go/bigtable" "cloud.google.com/go/bigtable/internal/cbtconfig" "cloud.google.com/go/bigtable/internal/stat" "golang.org/x/net/context" "google.golang.org/api/option" ) var ( runFor = flag.Duration("run_for", 5*time.Second, "how long to run the load test for; 0 to run forever until SIGTERM") scratchTable = flag.String("scratch_table", "loadtest-scratch", "name of table to use; should not already exist") csvOutput = flag.String("csv_output", "", "output path for statistics in .csv format. If this file already exists it will be overwritten.") poolSize = flag.Int("pool_size", 1, "size of the gRPC connection pool to use for the data client") reqCount = flag.Int("req_count", 100, "number of concurrent requests") config *cbtconfig.Config client *bigtable.Client adminClient *bigtable.AdminClient ) func main() { var err error config, err = cbtconfig.Load() if err != nil { log.Fatal(err) } config.RegisterFlags() flag.Parse() if err := config.CheckFlags(cbtconfig.ProjectAndInstanceRequired); err != nil { log.Fatal(err) } if config.Creds != "" { os.Setenv("GOOGLE_APPLICATION_CREDENTIALS", config.Creds) } if flag.NArg() != 0 { flag.Usage() os.Exit(1) } var options []option.ClientOption if *poolSize > 1 { options = append(options, option.WithGRPCConnectionPool(*poolSize)) } var csvFile *os.File if *csvOutput != "" { csvFile, err = os.Create(*csvOutput) if err != nil { log.Fatalf("creating csv output file: %v", err) } defer csvFile.Close() log.Printf("Writing statistics to %q ...", *csvOutput) } log.Printf("Dialing connections...") client, err = bigtable.NewClient(context.Background(), config.Project, config.Instance, options...) if err != nil { log.Fatalf("Making bigtable.Client: %v", err) } defer client.Close() adminClient, err = bigtable.NewAdminClient(context.Background(), config.Project, config.Instance) if err != nil { log.Fatalf("Making bigtable.AdminClient: %v", err) } defer adminClient.Close() // Create a scratch table. log.Printf("Setting up scratch table...") if err := adminClient.CreateTable(context.Background(), *scratchTable); err != nil { log.Fatalf("Making scratch table %q: %v", *scratchTable, err) } if err := adminClient.CreateColumnFamily(context.Background(), *scratchTable, "f"); err != nil { log.Fatalf("Making scratch table column family: %v", err) } // Upon a successful run, delete the table. Don't bother checking for errors. defer adminClient.DeleteTable(context.Background(), *scratchTable) // Also delete the table on SIGTERM. c := make(chan os.Signal, 1) signal.Notify(c, os.Interrupt) go func() { s := <-c log.Printf("Caught %v, cleaning scratch table.", s) adminClient.DeleteTable(context.Background(), *scratchTable) os.Exit(1) }() log.Printf("Starting load test... (run for %v)", *runFor) tbl := client.Open(*scratchTable) sem := make(chan int, *reqCount) // limit the number of requests happening at once var reads, writes stats stopTime := time.Now().Add(*runFor) var wg sync.WaitGroup for time.Now().Before(stopTime) || *runFor == 0 { sem <- 1 wg.Add(1) go func() { defer wg.Done() defer func() { <-sem }() ok := true opStart := time.Now() var stats *stats defer func() { stats.Record(ok, time.Since(opStart)) }() row := fmt.Sprintf("row%d", rand.Intn(100)) // operate on 1 of 100 rows switch rand.Intn(10) { default: // read stats = &reads _, err := tbl.ReadRow(context.Background(), row, bigtable.RowFilter(bigtable.LatestNFilter(1))) if err != nil { log.Printf("Error doing read: %v", err) ok = false } case 0, 1, 2, 3, 4: // write stats = &writes mut := bigtable.NewMutation() mut.Set("f", "col", bigtable.Now(), bytes.Repeat([]byte("0"), 1<<10)) // 1 KB write if err := tbl.Apply(context.Background(), row, mut); err != nil { log.Printf("Error doing mutation: %v", err) ok = false } } }() } wg.Wait() readsAgg := stat.NewAggregate("reads", reads.ds, reads.tries-reads.ok) writesAgg := stat.NewAggregate("writes", writes.ds, writes.tries-writes.ok) log.Printf("Reads (%d ok / %d tries):\n%v", reads.ok, reads.tries, readsAgg) log.Printf("Writes (%d ok / %d tries):\n%v", writes.ok, writes.tries, writesAgg) if csvFile != nil { stat.WriteCSV([]*stat.Aggregate{readsAgg, writesAgg}, csvFile) } } var allStats int64 // atomic type stats struct { mu sync.Mutex tries, ok int ds []time.Duration } func (s *stats) Record(ok bool, d time.Duration) { s.mu.Lock() s.tries++ if ok { s.ok++ } s.ds = append(s.ds, d) s.mu.Unlock() if n := atomic.AddInt64(&allStats, 1); n%1000 == 0 { log.Printf("Progress: done %d ops", n) } } golang-google-cloud-0.9.0/bigtable/cmd/scantest/000077500000000000000000000000001312234511600215035ustar00rootroot00000000000000golang-google-cloud-0.9.0/bigtable/cmd/scantest/scantest.go000066400000000000000000000073451312234511600236670ustar00rootroot00000000000000/* Copyright 2016 Google Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Scantest does scan-related load testing against Cloud Bigtable. The logic here mimics a similar test written using the Java client. */ package main import ( "bytes" "flag" "fmt" "log" "math/rand" "os" "sync" "sync/atomic" "text/tabwriter" "time" "cloud.google.com/go/bigtable" "cloud.google.com/go/bigtable/internal/cbtconfig" "cloud.google.com/go/bigtable/internal/stat" "golang.org/x/net/context" ) var ( runFor = flag.Duration("run_for", 5*time.Second, "how long to run the load test for") numScans = flag.Int("concurrent_scans", 1, "number of concurrent scans") rowLimit = flag.Int("row_limit", 10000, "max number of records per scan") config *cbtconfig.Config client *bigtable.Client ) func main() { flag.Usage = func() { fmt.Printf("Usage: scantest [options] \n\n") flag.PrintDefaults() } var err error config, err = cbtconfig.Load() if err != nil { log.Fatal(err) } config.RegisterFlags() flag.Parse() if err := config.CheckFlags(cbtconfig.ProjectAndInstanceRequired); err != nil { log.Fatal(err) } if config.Creds != "" { os.Setenv("GOOGLE_APPLICATION_CREDENTIALS", config.Creds) } if flag.NArg() != 1 { flag.Usage() os.Exit(1) } table := flag.Arg(0) log.Printf("Dialing connections...") client, err = bigtable.NewClient(context.Background(), config.Project, config.Instance) if err != nil { log.Fatalf("Making bigtable.Client: %v", err) } defer client.Close() log.Printf("Starting scan test... (run for %v)", *runFor) tbl := client.Open(table) sem := make(chan int, *numScans) // limit the number of requests happening at once var scans stats stopTime := time.Now().Add(*runFor) var wg sync.WaitGroup for time.Now().Before(stopTime) { sem <- 1 wg.Add(1) go func() { defer wg.Done() defer func() { <-sem }() ok := true opStart := time.Now() defer func() { scans.Record(ok, time.Since(opStart)) }() // Start at a random row key key := fmt.Sprintf("user%d", rand.Int63()) limit := bigtable.LimitRows(int64(*rowLimit)) noop := func(bigtable.Row) bool { return true } if err := tbl.ReadRows(context.Background(), bigtable.NewRange(key, ""), noop, limit); err != nil { log.Printf("Error during scan: %v", err) ok = false } }() } wg.Wait() agg := stat.NewAggregate("scans", scans.ds, scans.tries-scans.ok) log.Printf("Scans (%d ok / %d tries):\nscan times:\n%v\nthroughput (rows/second):\n%v", scans.ok, scans.tries, agg, throughputString(agg)) } func throughputString(agg *stat.Aggregate) string { var buf bytes.Buffer tw := tabwriter.NewWriter(&buf, 0, 0, 1, ' ', 0) // one-space padding rowLimitF := float64(*rowLimit) fmt.Fprintf( tw, "min:\t%.2f\nmedian:\t%.2f\nmax:\t%.2f\n", rowLimitF/agg.Max.Seconds(), rowLimitF/agg.Median.Seconds(), rowLimitF/agg.Min.Seconds()) tw.Flush() return buf.String() } var allStats int64 // atomic type stats struct { mu sync.Mutex tries, ok int ds []time.Duration } func (s *stats) Record(ok bool, d time.Duration) { s.mu.Lock() s.tries++ if ok { s.ok++ } s.ds = append(s.ds, d) s.mu.Unlock() if n := atomic.AddInt64(&allStats, 1); n%1000 == 0 { log.Printf("Progress: done %d ops", n) } } golang-google-cloud-0.9.0/bigtable/doc.go000066400000000000000000000123671312234511600202210ustar00rootroot00000000000000/* Copyright 2015 Google Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Package bigtable is an API to Google Cloud Bigtable. See https://cloud.google.com/bigtable/docs/ for general product documentation. Setup and Credentials Use NewClient or NewAdminClient to create a client that can be used to access the data or admin APIs respectively. Both require credentials that have permission to access the Cloud Bigtable API. If your program is run on Google App Engine or Google Compute Engine, using the Application Default Credentials (https://developers.google.com/accounts/docs/application-default-credentials) is the simplest option. Those credentials will be used by default when NewClient or NewAdminClient are called. To use alternate credentials, pass them to NewClient or NewAdminClient using option.WithTokenSource. For instance, you can use service account credentials by visiting https://cloud.google.com/console/project/MYPROJECT/apiui/credential, creating a new OAuth "Client ID", storing the JSON key somewhere accessible, and writing jsonKey, err := ioutil.ReadFile(pathToKeyFile) ... config, err := google.JWTConfigFromJSON(jsonKey, bigtable.Scope) // or bigtable.AdminScope, etc. ... client, err := bigtable.NewClient(ctx, project, instance, option.WithTokenSource(config.TokenSource(ctx))) ... Here, `google` means the golang.org/x/oauth2/google package and `option` means the google.golang.org/api/option package. Reading The principal way to read from a Bigtable is to use the ReadRows method on *Table. A RowRange specifies a contiguous portion of a table. A Filter may be provided through RowFilter to limit or transform the data that is returned. tbl := client.Open("mytable") ... // Read all the rows starting with "com.google.", // but only fetch the columns in the "links" family. rr := bigtable.PrefixRange("com.google.") err := tbl.ReadRows(ctx, rr, func(r Row) bool { // do something with r return true // keep going }, bigtable.RowFilter(bigtable.FamilyFilter("links"))) ... To read a single row, use the ReadRow helper method. r, err := tbl.ReadRow(ctx, "com.google.cloud") // "com.google.cloud" is the entire row key ... Writing This API exposes two distinct forms of writing to a Bigtable: a Mutation and a ReadModifyWrite. The former expresses idempotent operations. The latter expresses non-idempotent operations and returns the new values of updated cells. These operations are performed by creating a Mutation or ReadModifyWrite (with NewMutation or NewReadModifyWrite), building up one or more operations on that, and then using the Apply or ApplyReadModifyWrite methods on a Table. For instance, to set a couple of cells in a table, tbl := client.Open("mytable") mut := bigtable.NewMutation() mut.Set("links", "maps.google.com", bigtable.Now(), []byte("1")) mut.Set("links", "golang.org", bigtable.Now(), []byte("1")) err := tbl.Apply(ctx, "com.google.cloud", mut) ... To increment an encoded value in one cell, tbl := client.Open("mytable") rmw := bigtable.NewReadModifyWrite() rmw.Increment("links", "golang.org", 12) // add 12 to the cell in column "links:golang.org" r, err := tbl.ApplyReadModifyWrite(ctx, "com.google.cloud", rmw) ... Retries If a read or write operation encounters a transient error it will be retried until a successful response, an unretryable error or the context deadline is reached. Non-idempotent writes (where the timestamp is set to ServerTime) will not be retried. In the case of ReadRows, retried calls will not re-scan rows that have already been processed. Authentication See examples of authorization and authentication at https://godoc.org/cloud.google.com/go#pkg-examples. */ package bigtable // import "cloud.google.com/go/bigtable" // Scope constants for authentication credentials. // These should be used when using credential creation functions such as oauth.NewServiceAccountFromFile. const ( // Scope is the OAuth scope for Cloud Bigtable data operations. Scope = "https://www.googleapis.com/auth/bigtable.data" // ReadonlyScope is the OAuth scope for Cloud Bigtable read-only data operations. ReadonlyScope = "https://www.googleapis.com/auth/bigtable.readonly" // AdminScope is the OAuth scope for Cloud Bigtable table admin operations. AdminScope = "https://www.googleapis.com/auth/bigtable.admin.table" // InstanceAdminScope is the OAuth scope for Cloud Bigtable instance (and cluster) admin operations. InstanceAdminScope = "https://www.googleapis.com/auth/bigtable.admin.cluster" ) // clientUserAgent identifies the version of this package. // It should be bumped upon significant changes only. const clientUserAgent = "cbt-go/20160628" // resourcePrefixHeader is the name of the metadata header used to indicate // the resource being operated on. const resourcePrefixHeader = "google-cloud-resource-prefix" golang-google-cloud-0.9.0/bigtable/export_test.go000066400000000000000000000136341312234511600220320ustar00rootroot00000000000000/* Copyright 2016 Google Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package bigtable import ( "errors" "flag" "fmt" "strings" "time" "cloud.google.com/go/bigtable/bttest" "golang.org/x/net/context" "google.golang.org/api/option" "google.golang.org/grpc" ) var legacyUseProd string var integrationConfig IntegrationTestConfig func init() { c := &integrationConfig flag.BoolVar(&c.UseProd, "it.use-prod", false, "Use remote bigtable instead of local emulator") flag.StringVar(&c.AdminEndpoint, "it.admin-endpoint", "", "Admin api host and port") flag.StringVar(&c.DataEndpoint, "it.data-endpoint", "", "Data api host and port") flag.StringVar(&c.Project, "it.project", "", "Project to use for integration test") flag.StringVar(&c.Instance, "it.instance", "", "Bigtable instance to use") flag.StringVar(&c.Cluster, "it.cluster", "", "Bigtable cluster to use") flag.StringVar(&c.Table, "it.table", "", "Bigtable table to create") // Backwards compat flag.StringVar(&legacyUseProd, "use_prod", "", `DEPRECATED: if set to "proj,instance,table", run integration test against production`) } // IntegrationTestConfig contains parameters to pick and setup a IntegrationEnv for testing type IntegrationTestConfig struct { UseProd bool AdminEndpoint string DataEndpoint string Project string Instance string Cluster string Table string } // IntegrationEnv represents a testing environment. // The environment can be implemented using production or an emulator type IntegrationEnv interface { Config() IntegrationTestConfig NewAdminClient() (*AdminClient, error) NewClient() (*Client, error) Close() } // NewIntegrationEnv creates a new environment based on the command line args func NewIntegrationEnv() (IntegrationEnv, error) { c := integrationConfig if legacyUseProd != "" { fmt.Println("WARNING: using legacy commandline arg -use_prod, please switch to -it.*") parts := strings.SplitN(legacyUseProd, ",", 3) c.UseProd = true c.Project = parts[0] c.Instance = parts[1] c.Table = parts[2] } if integrationConfig.UseProd { return NewProdEnv(c) } else { return NewEmulatedEnv(c) } } // EmulatedEnv encapsulates the state of an emulator type EmulatedEnv struct { config IntegrationTestConfig server *bttest.Server } // NewEmulatedEnv builds and starts the emulator based environment func NewEmulatedEnv(config IntegrationTestConfig) (*EmulatedEnv, error) { srv, err := bttest.NewServer("127.0.0.1:0") if err != nil { return nil, err } if config.Project == "" { config.Project = "project" } if config.Instance == "" { config.Instance = "instance" } if config.Table == "" { config.Table = "mytable" } config.AdminEndpoint = srv.Addr config.DataEndpoint = srv.Addr env := &EmulatedEnv{ config: config, server: srv, } return env, nil } // Close stops & cleans up the emulator func (e *EmulatedEnv) Close() { e.server.Close() } // Config gets the config used to build this environment func (e *EmulatedEnv) Config() IntegrationTestConfig { return e.config } // NewAdminClient builds a new connected admin client for this environment func (e *EmulatedEnv) NewAdminClient() (*AdminClient, error) { timeout := 20 * time.Second ctx, _ := context.WithTimeout(context.Background(), timeout) conn, err := grpc.Dial(e.server.Addr, grpc.WithInsecure()) if err != nil { return nil, err } return NewAdminClient(ctx, e.config.Project, e.config.Instance, option.WithGRPCConn(conn)) } // NewClient builds a new connected data client for this environment func (e *EmulatedEnv) NewClient() (*Client, error) { timeout := 20 * time.Second ctx, _ := context.WithTimeout(context.Background(), timeout) conn, err := grpc.Dial(e.server.Addr, grpc.WithInsecure()) if err != nil { return nil, err } return NewClient(ctx, e.config.Project, e.config.Instance, option.WithGRPCConn(conn)) } // ProdEnv encapsulates the state necessary to connect to the external Bigtable service type ProdEnv struct { config IntegrationTestConfig } // NewProdEnv builds the environment representation func NewProdEnv(config IntegrationTestConfig) (*ProdEnv, error) { if config.Project == "" { return nil, errors.New("Project not set") } if config.Instance == "" { return nil, errors.New("Instance not set") } if config.Table == "" { return nil, errors.New("Table not set") } return &ProdEnv{config}, nil } // Close is a no-op for production environments func (e *ProdEnv) Close() {} // Config gets the config used to build this environment func (e *ProdEnv) Config() IntegrationTestConfig { return e.config } // NewAdminClient builds a new connected admin client for this environment func (e *ProdEnv) NewAdminClient() (*AdminClient, error) { timeout := 20 * time.Second ctx, _ := context.WithTimeout(context.Background(), timeout) var clientOpts []option.ClientOption if endpoint := e.config.AdminEndpoint; endpoint != "" { clientOpts = append(clientOpts, option.WithEndpoint(endpoint)) } return NewAdminClient(ctx, e.config.Project, e.config.Instance, clientOpts...) } // NewClient builds a connected data client for this environment func (e *ProdEnv) NewClient() (*Client, error) { timeout := 20 * time.Second ctx, _ := context.WithTimeout(context.Background(), timeout) var clientOpts []option.ClientOption if endpoint := e.config.DataEndpoint; endpoint != "" { clientOpts = append(clientOpts, option.WithEndpoint(endpoint)) } return NewClient(ctx, e.config.Project, e.config.Instance, clientOpts...) } golang-google-cloud-0.9.0/bigtable/filter.go000066400000000000000000000216461312234511600207410ustar00rootroot00000000000000/* Copyright 2015 Google Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package bigtable import ( "fmt" "strings" "time" btpb "google.golang.org/genproto/googleapis/bigtable/v2" ) // A Filter represents a row filter. type Filter interface { String() string proto() *btpb.RowFilter } // ChainFilters returns a filter that applies a sequence of filters. func ChainFilters(sub ...Filter) Filter { return chainFilter{sub} } type chainFilter struct { sub []Filter } func (cf chainFilter) String() string { var ss []string for _, sf := range cf.sub { ss = append(ss, sf.String()) } return "(" + strings.Join(ss, " | ") + ")" } func (cf chainFilter) proto() *btpb.RowFilter { chain := &btpb.RowFilter_Chain{} for _, sf := range cf.sub { chain.Filters = append(chain.Filters, sf.proto()) } return &btpb.RowFilter{ Filter: &btpb.RowFilter_Chain_{chain}, } } // InterleaveFilters returns a filter that applies a set of filters in parallel // and interleaves the results. func InterleaveFilters(sub ...Filter) Filter { return interleaveFilter{sub} } type interleaveFilter struct { sub []Filter } func (ilf interleaveFilter) String() string { var ss []string for _, sf := range ilf.sub { ss = append(ss, sf.String()) } return "(" + strings.Join(ss, " + ") + ")" } func (ilf interleaveFilter) proto() *btpb.RowFilter { inter := &btpb.RowFilter_Interleave{} for _, sf := range ilf.sub { inter.Filters = append(inter.Filters, sf.proto()) } return &btpb.RowFilter{ Filter: &btpb.RowFilter_Interleave_{inter}, } } // RowKeyFilter returns a filter that matches cells from rows whose // key matches the provided RE2 pattern. // See https://github.com/google/re2/wiki/Syntax for the accepted syntax. func RowKeyFilter(pattern string) Filter { return rowKeyFilter(pattern) } type rowKeyFilter string func (rkf rowKeyFilter) String() string { return fmt.Sprintf("row(%s)", string(rkf)) } func (rkf rowKeyFilter) proto() *btpb.RowFilter { return &btpb.RowFilter{Filter: &btpb.RowFilter_RowKeyRegexFilter{[]byte(rkf)}} } // FamilyFilter returns a filter that matches cells whose family name // matches the provided RE2 pattern. // See https://github.com/google/re2/wiki/Syntax for the accepted syntax. func FamilyFilter(pattern string) Filter { return familyFilter(pattern) } type familyFilter string func (ff familyFilter) String() string { return fmt.Sprintf("col(%s:)", string(ff)) } func (ff familyFilter) proto() *btpb.RowFilter { return &btpb.RowFilter{Filter: &btpb.RowFilter_FamilyNameRegexFilter{string(ff)}} } // ColumnFilter returns a filter that matches cells whose column name // matches the provided RE2 pattern. // See https://github.com/google/re2/wiki/Syntax for the accepted syntax. func ColumnFilter(pattern string) Filter { return columnFilter(pattern) } type columnFilter string func (cf columnFilter) String() string { return fmt.Sprintf("col(.*:%s)", string(cf)) } func (cf columnFilter) proto() *btpb.RowFilter { return &btpb.RowFilter{Filter: &btpb.RowFilter_ColumnQualifierRegexFilter{[]byte(cf)}} } // ValueFilter returns a filter that matches cells whose value // matches the provided RE2 pattern. // See https://github.com/google/re2/wiki/Syntax for the accepted syntax. func ValueFilter(pattern string) Filter { return valueFilter(pattern) } type valueFilter string func (vf valueFilter) String() string { return fmt.Sprintf("value_match(%s)", string(vf)) } func (vf valueFilter) proto() *btpb.RowFilter { return &btpb.RowFilter{Filter: &btpb.RowFilter_ValueRegexFilter{[]byte(vf)}} } // LatestNFilter returns a filter that matches the most recent N cells in each column. func LatestNFilter(n int) Filter { return latestNFilter(n) } type latestNFilter int32 func (lnf latestNFilter) String() string { return fmt.Sprintf("col(*,%d)", lnf) } func (lnf latestNFilter) proto() *btpb.RowFilter { return &btpb.RowFilter{Filter: &btpb.RowFilter_CellsPerColumnLimitFilter{int32(lnf)}} } // StripValueFilter returns a filter that replaces each value with the empty string. func StripValueFilter() Filter { return stripValueFilter{} } type stripValueFilter struct{} func (stripValueFilter) String() string { return "strip_value()" } func (stripValueFilter) proto() *btpb.RowFilter { return &btpb.RowFilter{Filter: &btpb.RowFilter_StripValueTransformer{true}} } // TimestampRangeFilter returns a filter that matches any cells whose timestamp is within the given time bounds. A zero // time means no bound. // The timestamp will be truncated to millisecond granularity. func TimestampRangeFilter(startTime time.Time, endTime time.Time) Filter { trf := timestampRangeFilter{} if !startTime.IsZero() { trf.startTime = Time(startTime) } if !endTime.IsZero() { trf.endTime = Time(endTime) } return trf } // TimestampRangeFilterMicros returns a filter that matches any cells whose timestamp is within the given time bounds, // specified in units of microseconds since 1 January 1970. A zero value for the end time is interpreted as no bound. // The timestamp will be truncated to millisecond granularity. func TimestampRangeFilterMicros(startTime Timestamp, endTime Timestamp) Filter { return timestampRangeFilter{startTime, endTime} } type timestampRangeFilter struct { startTime Timestamp endTime Timestamp } func (trf timestampRangeFilter) String() string { return fmt.Sprintf("timestamp_range(%s,%s)", trf.startTime, trf.endTime) } func (trf timestampRangeFilter) proto() *btpb.RowFilter { return &btpb.RowFilter{ Filter: &btpb.RowFilter_TimestampRangeFilter{ &btpb.TimestampRange{ int64(trf.startTime.TruncateToMilliseconds()), int64(trf.endTime.TruncateToMilliseconds()), }, }} } // ColumnRangeFilter returns a filter that matches a contiguous range of columns within a single // family, as specified by an inclusive start qualifier and exclusive end qualifier. func ColumnRangeFilter(family, start, end string) Filter { return columnRangeFilter{family, start, end} } type columnRangeFilter struct { family string start string end string } func (crf columnRangeFilter) String() string { return fmt.Sprintf("columnRangeFilter(%s,%s,%s)", crf.family, crf.start, crf.end) } func (crf columnRangeFilter) proto() *btpb.RowFilter { r := &btpb.ColumnRange{FamilyName: crf.family} if crf.start != "" { r.StartQualifier = &btpb.ColumnRange_StartQualifierClosed{[]byte(crf.start)} } if crf.end != "" { r.EndQualifier = &btpb.ColumnRange_EndQualifierOpen{[]byte(crf.end)} } return &btpb.RowFilter{&btpb.RowFilter_ColumnRangeFilter{r}} } // ValueRangeFilter returns a filter that matches cells with values that fall within // the given range, as specified by an inclusive start value and exclusive end value. func ValueRangeFilter(start, end []byte) Filter { return valueRangeFilter{start, end} } type valueRangeFilter struct { start []byte end []byte } func (vrf valueRangeFilter) String() string { return fmt.Sprintf("valueRangeFilter(%s,%s)", vrf.start, vrf.end) } func (vrf valueRangeFilter) proto() *btpb.RowFilter { r := &btpb.ValueRange{} if vrf.start != nil { r.StartValue = &btpb.ValueRange_StartValueClosed{vrf.start} } if vrf.end != nil { r.EndValue = &btpb.ValueRange_EndValueOpen{vrf.end} } return &btpb.RowFilter{&btpb.RowFilter_ValueRangeFilter{r}} } // ConditionFilter returns a filter that evaluates to one of two possible filters depending // on whether or not the given predicate filter matches at least one cell. // If the matched filter is nil then no results will be returned. // IMPORTANT NOTE: The predicate filter does not execute atomically with the // true and false filters, which may lead to inconsistent or unexpected // results. Additionally, condition filters have poor performance, especially // when filters are set for the false condition. func ConditionFilter(predicateFilter, trueFilter, falseFilter Filter) Filter { return conditionFilter{predicateFilter, trueFilter, falseFilter} } type conditionFilter struct { predicateFilter Filter trueFilter Filter falseFilter Filter } func (cf conditionFilter) String() string { return fmt.Sprintf("conditionFilter(%s,%s,%s)", cf.predicateFilter, cf.trueFilter, cf.falseFilter) } func (cf conditionFilter) proto() *btpb.RowFilter { var tf *btpb.RowFilter var ff *btpb.RowFilter if cf.trueFilter != nil { tf = cf.trueFilter.proto() } if cf.falseFilter != nil { ff = cf.falseFilter.proto() } return &btpb.RowFilter{ &btpb.RowFilter_Condition_{&btpb.RowFilter_Condition{ cf.predicateFilter.proto(), tf, ff, }}} } // TODO(dsymonds): More filters: sampling golang-google-cloud-0.9.0/bigtable/gc.go000066400000000000000000000105461312234511600200420ustar00rootroot00000000000000/* Copyright 2015 Google Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package bigtable import ( "fmt" "strings" "time" durpb "github.com/golang/protobuf/ptypes/duration" bttdpb "google.golang.org/genproto/googleapis/bigtable/admin/v2" ) // A GCPolicy represents a rule that determines which cells are eligible for garbage collection. type GCPolicy interface { String() string proto() *bttdpb.GcRule } // IntersectionPolicy returns a GC policy that only applies when all its sub-policies apply. func IntersectionPolicy(sub ...GCPolicy) GCPolicy { return intersectionPolicy{sub} } type intersectionPolicy struct { sub []GCPolicy } func (ip intersectionPolicy) String() string { var ss []string for _, sp := range ip.sub { ss = append(ss, sp.String()) } return "(" + strings.Join(ss, " && ") + ")" } func (ip intersectionPolicy) proto() *bttdpb.GcRule { inter := &bttdpb.GcRule_Intersection{} for _, sp := range ip.sub { inter.Rules = append(inter.Rules, sp.proto()) } return &bttdpb.GcRule{ Rule: &bttdpb.GcRule_Intersection_{inter}, } } // UnionPolicy returns a GC policy that applies when any of its sub-policies apply. func UnionPolicy(sub ...GCPolicy) GCPolicy { return unionPolicy{sub} } type unionPolicy struct { sub []GCPolicy } func (up unionPolicy) String() string { var ss []string for _, sp := range up.sub { ss = append(ss, sp.String()) } return "(" + strings.Join(ss, " || ") + ")" } func (up unionPolicy) proto() *bttdpb.GcRule { union := &bttdpb.GcRule_Union{} for _, sp := range up.sub { union.Rules = append(union.Rules, sp.proto()) } return &bttdpb.GcRule{ Rule: &bttdpb.GcRule_Union_{union}, } } // MaxVersionsPolicy returns a GC policy that applies to all versions of a cell // except for the most recent n. func MaxVersionsPolicy(n int) GCPolicy { return maxVersionsPolicy(n) } type maxVersionsPolicy int func (mvp maxVersionsPolicy) String() string { return fmt.Sprintf("versions() > %d", int(mvp)) } func (mvp maxVersionsPolicy) proto() *bttdpb.GcRule { return &bttdpb.GcRule{Rule: &bttdpb.GcRule_MaxNumVersions{int32(mvp)}} } // MaxAgePolicy returns a GC policy that applies to all cells // older than the given age. func MaxAgePolicy(d time.Duration) GCPolicy { return maxAgePolicy(d) } type maxAgePolicy time.Duration var units = []struct { d time.Duration suffix string }{ {24 * time.Hour, "d"}, {time.Hour, "h"}, {time.Minute, "m"}, } func (ma maxAgePolicy) String() string { d := time.Duration(ma) for _, u := range units { if d%u.d == 0 { return fmt.Sprintf("age() > %d%s", d/u.d, u.suffix) } } return fmt.Sprintf("age() > %d", d/time.Microsecond) } func (ma maxAgePolicy) proto() *bttdpb.GcRule { // This doesn't handle overflows, etc. // Fix this if people care about GC policies over 290 years. ns := time.Duration(ma).Nanoseconds() return &bttdpb.GcRule{ Rule: &bttdpb.GcRule_MaxAge{&durpb.Duration{ Seconds: ns / 1e9, Nanos: int32(ns % 1e9), }}, } } // GCRuleToString converts the given GcRule proto to a user-visible string. func GCRuleToString(rule *bttdpb.GcRule) string { if rule == nil { return "" } var ruleStr string if r, ok := rule.Rule.(*bttdpb.GcRule_MaxNumVersions); ok { ruleStr += MaxVersionsPolicy(int(r.MaxNumVersions)).String() } else if r, ok := rule.Rule.(*bttdpb.GcRule_MaxAge); ok { ruleStr += MaxAgePolicy(time.Duration(r.MaxAge.Seconds) * time.Second).String() } else if r, ok := rule.Rule.(*bttdpb.GcRule_Intersection_); ok { var chunks []string for _, intRule := range r.Intersection.Rules { chunks = append(chunks, GCRuleToString(intRule)) } ruleStr += "(" + strings.Join(chunks, " && ") + ")" } else if r, ok := rule.Rule.(*bttdpb.GcRule_Union_); ok { var chunks []string for _, unionRule := range r.Union.Rules { chunks = append(chunks, GCRuleToString(unionRule)) } ruleStr += "(" + strings.Join(chunks, " || ") + ")" } return ruleStr } golang-google-cloud-0.9.0/bigtable/gc_test.go000066400000000000000000000026011312234511600210720ustar00rootroot00000000000000/* Copyright 2017 Google Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package bigtable import ( "testing" "time" bttdpb "google.golang.org/genproto/googleapis/bigtable/admin/v2" ) func TestGcRuleToString(t *testing.T) { intersection := IntersectionPolicy(MaxVersionsPolicy(5), MaxVersionsPolicy(10), MaxAgePolicy(16*time.Hour)) var tests = []struct { proto *bttdpb.GcRule want string }{ {MaxAgePolicy(72 * time.Hour).proto(), "age() > 3d"}, {MaxVersionsPolicy(5).proto(), "versions() > 5"}, {intersection.proto(), "(versions() > 5 && versions() > 10 && age() > 16h)"}, {UnionPolicy(intersection, MaxAgePolicy(72*time.Hour)).proto(), "((versions() > 5 && versions() > 10 && age() > 16h) || age() > 3d)"}, } for _, test := range tests { got := GCRuleToString(test.proto) if got != test.want { t.Errorf("got gc rule string: %v, wanted: %v", got, test.want) } } } golang-google-cloud-0.9.0/bigtable/internal/000077500000000000000000000000001312234511600207305ustar00rootroot00000000000000golang-google-cloud-0.9.0/bigtable/internal/cbtconfig/000077500000000000000000000000001312234511600226665ustar00rootroot00000000000000golang-google-cloud-0.9.0/bigtable/internal/cbtconfig/cbtconfig.go000066400000000000000000000157221312234511600251620ustar00rootroot00000000000000/* Copyright 2015 Google Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Package cbtconfig encapsulates common code for reading configuration from .cbtrc and gcloud. package cbtconfig import ( "bufio" "bytes" "crypto/tls" "crypto/x509" "encoding/json" "flag" "fmt" "io/ioutil" "log" "os" "os/exec" "path/filepath" "runtime" "strings" "time" "golang.org/x/oauth2" "google.golang.org/grpc/credentials" ) // Config represents a configuration. type Config struct { Project, Instance string // required Creds string // optional AdminEndpoint string // optional DataEndpoint string // optional CertFile string // optional TokenSource oauth2.TokenSource // derived TLSCreds credentials.TransportCredentials // derived } type RequiredFlags uint const NoneRequired RequiredFlags = 0 const ( ProjectRequired RequiredFlags = 1 << iota InstanceRequired ) const ProjectAndInstanceRequired RequiredFlags = ProjectRequired | InstanceRequired // RegisterFlags registers a set of standard flags for this config. // It should be called before flag.Parse. func (c *Config) RegisterFlags() { flag.StringVar(&c.Project, "project", c.Project, "project ID, if unset uses gcloud configured project") flag.StringVar(&c.Instance, "instance", c.Instance, "Cloud Bigtable instance") flag.StringVar(&c.Creds, "creds", c.Creds, "if set, use application credentials in this file") flag.StringVar(&c.AdminEndpoint, "admin-endpoint", c.AdminEndpoint, "Override the admin api endpoint") flag.StringVar(&c.DataEndpoint, "data-endpoint", c.DataEndpoint, "Override the data api endpoint") flag.StringVar(&c.CertFile, "cert-file", c.CertFile, "Override the TLS certificates file") } // CheckFlags checks that the required config values are set. func (c *Config) CheckFlags(required RequiredFlags) error { var missing []string if c.CertFile != "" { b, err := ioutil.ReadFile(c.CertFile) if err != nil { return fmt.Errorf("Failed to load certificates from %s: %v", c.CertFile, err) } cp := x509.NewCertPool() if !cp.AppendCertsFromPEM(b) { return fmt.Errorf("Failed to append certificates from %s", c.CertFile) } c.TLSCreds = credentials.NewTLS(&tls.Config{RootCAs: cp}) } if required != NoneRequired { c.SetFromGcloud() } if required&ProjectRequired != 0 && c.Project == "" { missing = append(missing, "-project") } if required&InstanceRequired != 0 && c.Instance == "" { missing = append(missing, "-instance") } if len(missing) > 0 { return fmt.Errorf("Missing %s", strings.Join(missing, " and ")) } return nil } // Filename returns the filename consulted for standard configuration. func Filename() string { // TODO(dsymonds): Might need tweaking for Windows. return filepath.Join(os.Getenv("HOME"), ".cbtrc") } // Load loads a .cbtrc file. // If the file is not present, an empty config is returned. func Load() (*Config, error) { filename := Filename() data, err := ioutil.ReadFile(filename) if err != nil { // silent fail if the file isn't there if os.IsNotExist(err) { return &Config{}, nil } return nil, fmt.Errorf("Reading %s: %v", filename, err) } c := new(Config) s := bufio.NewScanner(bytes.NewReader(data)) for s.Scan() { line := s.Text() i := strings.Index(line, "=") if i < 0 { return nil, fmt.Errorf("Bad line in %s: %q", filename, line) } key, val := strings.TrimSpace(line[:i]), strings.TrimSpace(line[i+1:]) switch key { default: return nil, fmt.Errorf("Unknown key in %s: %q", filename, key) case "project": c.Project = val case "instance": c.Instance = val case "creds": c.Creds = val case "admin-endpoint": c.AdminEndpoint = val case "data-endpoint": c.DataEndpoint = val } } return c, s.Err() } type GcloudCredential struct { AccessToken string `json:"access_token"` Expiry time.Time `json:"token_expiry"` } func (cred *GcloudCredential) Token() *oauth2.Token { return &oauth2.Token{AccessToken: cred.AccessToken, TokenType: "Bearer", Expiry: cred.Expiry} } type GcloudConfig struct { Configuration struct { Properties struct { Core struct { Project string `json:"project"` } `json:"core"` } `json:"properties"` } `json:"configuration"` Credential GcloudCredential `json:"credential"` } type GcloudCmdTokenSource struct { Command string Args []string } // Token implements the oauth2.TokenSource interface func (g *GcloudCmdTokenSource) Token() (*oauth2.Token, error) { gcloudConfig, err := LoadGcloudConfig(g.Command, g.Args) if err != nil { return nil, err } return gcloudConfig.Credential.Token(), nil } // LoadGcloudConfig retrieves the gcloud configuration values we need use via the // 'config-helper' command func LoadGcloudConfig(gcloudCmd string, gcloudCmdArgs []string) (*GcloudConfig, error) { out, err := exec.Command(gcloudCmd, gcloudCmdArgs...).Output() if err != nil { return nil, fmt.Errorf("Could not retrieve gcloud configuration") } var gcloudConfig GcloudConfig if err := json.Unmarshal(out, &gcloudConfig); err != nil { return nil, fmt.Errorf("Could not parse gcloud configuration") } return &gcloudConfig, nil } // SetFromGcloud retrieves and sets any missing config values from the gcloud // configuration if possible possible func (c *Config) SetFromGcloud() error { if c.Creds == "" { c.Creds = os.Getenv("GOOGLE_APPLICATION_CREDENTIALS") if c.Creds == "" { log.Printf("-creds flag unset, will use gcloud credential") } } else { os.Setenv("GOOGLE_APPLICATION_CREDENTIALS", c.Creds) } if c.Project == "" { log.Printf("-project flag unset, will use gcloud active project") } if c.Creds != "" && c.Project != "" { return nil } gcloudCmd := "gcloud" if runtime.GOOS == "windows" { gcloudCmd = gcloudCmd + ".cmd" } gcloudCmdArgs := []string{"config", "config-helper", "--format=json(configuration.properties.core.project,credential)"} gcloudConfig, err := LoadGcloudConfig(gcloudCmd, gcloudCmdArgs) if err != nil { return err } if c.Project == "" && gcloudConfig.Configuration.Properties.Core.Project != "" { log.Printf("gcloud active project is \"%s\"", gcloudConfig.Configuration.Properties.Core.Project) c.Project = gcloudConfig.Configuration.Properties.Core.Project } if c.Creds == "" { c.TokenSource = oauth2.ReuseTokenSource( gcloudConfig.Credential.Token(), &GcloudCmdTokenSource{Command: gcloudCmd, Args: gcloudCmdArgs}) } return nil } golang-google-cloud-0.9.0/bigtable/internal/gax/000077500000000000000000000000001312234511600215075ustar00rootroot00000000000000golang-google-cloud-0.9.0/bigtable/internal/gax/call_option.go000066400000000000000000000061671312234511600243530ustar00rootroot00000000000000/* Copyright 2016 Google Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // This is ia snapshot from github.com/googleapis/gax-go with minor modifications. package gax import ( "time" "google.golang.org/grpc/codes" ) type CallOption interface { Resolve(*CallSettings) } type callOptions []CallOption func (opts callOptions) Resolve(s *CallSettings) *CallSettings { for _, opt := range opts { opt.Resolve(s) } return s } // Encapsulates the call settings for a particular API call. type CallSettings struct { Timeout time.Duration RetrySettings RetrySettings } // Per-call configurable settings for retrying upon transient failure. type RetrySettings struct { RetryCodes map[codes.Code]bool BackoffSettings BackoffSettings } // Parameters to the exponential backoff algorithm for retrying. type BackoffSettings struct { DelayTimeoutSettings MultipliableDuration RPCTimeoutSettings MultipliableDuration } type MultipliableDuration struct { Initial time.Duration Max time.Duration Multiplier float64 } func (w CallSettings) Resolve(s *CallSettings) { s.Timeout = w.Timeout s.RetrySettings = w.RetrySettings s.RetrySettings.RetryCodes = make(map[codes.Code]bool, len(w.RetrySettings.RetryCodes)) for key, value := range w.RetrySettings.RetryCodes { s.RetrySettings.RetryCodes[key] = value } } type withRetryCodes []codes.Code func (w withRetryCodes) Resolve(s *CallSettings) { s.RetrySettings.RetryCodes = make(map[codes.Code]bool) for _, code := range w { s.RetrySettings.RetryCodes[code] = true } } // WithRetryCodes sets a list of Google API canonical error codes upon which a // retry should be attempted. func WithRetryCodes(retryCodes []codes.Code) CallOption { return withRetryCodes(retryCodes) } type withDelayTimeoutSettings MultipliableDuration func (w withDelayTimeoutSettings) Resolve(s *CallSettings) { s.RetrySettings.BackoffSettings.DelayTimeoutSettings = MultipliableDuration(w) } // WithDelayTimeoutSettings specifies: // - The initial delay time, in milliseconds, between the completion of // the first failed request and the initiation of the first retrying // request. // - The multiplier by which to increase the delay time between the // completion of failed requests, and the initiation of the subsequent // retrying request. // - The maximum delay time, in milliseconds, between requests. When this // value is reached, `RetryDelayMultiplier` will no longer be used to // increase delay time. func WithDelayTimeoutSettings(initial time.Duration, max time.Duration, multiplier float64) CallOption { return withDelayTimeoutSettings(MultipliableDuration{initial, max, multiplier}) } golang-google-cloud-0.9.0/bigtable/internal/gax/invoke.go000066400000000000000000000047041312234511600233360ustar00rootroot00000000000000/* Copyright 2015 Google Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // This is ia snapshot from github.com/googleapis/gax-go with minor modifications. package gax import ( "math/rand" "time" "golang.org/x/net/context" "google.golang.org/grpc" "google.golang.org/grpc/codes" "log" "os" ) var logger *log.Logger = log.New(os.Stderr, "", log.LstdFlags) // A user defined call stub. type APICall func(context.Context) error // scaleDuration returns the product of a and mult. func scaleDuration(a time.Duration, mult float64) time.Duration { ns := float64(a) * mult return time.Duration(ns) } // invokeWithRetry calls stub using an exponential backoff retry mechanism // based on the values provided in callSettings. func invokeWithRetry(ctx context.Context, stub APICall, callSettings CallSettings) error { retrySettings := callSettings.RetrySettings backoffSettings := callSettings.RetrySettings.BackoffSettings delay := backoffSettings.DelayTimeoutSettings.Initial for { // If the deadline is exceeded... if ctx.Err() != nil { return ctx.Err() } err := stub(ctx) code := grpc.Code(err) if code == codes.OK { return nil } if !retrySettings.RetryCodes[code] { return err } // Sleep a random amount up to the current delay d := time.Duration(rand.Int63n(int64(delay))) delayCtx, _ := context.WithTimeout(ctx, delay) logger.Printf("Retryable error: %v, retrying in %v", err, d) <-delayCtx.Done() delay = scaleDuration(delay, backoffSettings.DelayTimeoutSettings.Multiplier) if delay > backoffSettings.DelayTimeoutSettings.Max { delay = backoffSettings.DelayTimeoutSettings.Max } } } // Invoke calls stub with a child of context modified by the specified options. func Invoke(ctx context.Context, stub APICall, opts ...CallOption) error { settings := &CallSettings{} callOptions(opts).Resolve(settings) if len(settings.RetrySettings.RetryCodes) > 0 { return invokeWithRetry(ctx, stub, *settings) } return stub(ctx) } golang-google-cloud-0.9.0/bigtable/internal/gax/invoke_test.go000066400000000000000000000030431312234511600243700ustar00rootroot00000000000000/* Copyright 2015 Google Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package gax import ( "testing" "time" "golang.org/x/net/context" "google.golang.org/grpc" "google.golang.org/grpc/codes" ) func TestRandomizedDelays(t *testing.T) { max := 200 * time.Millisecond settings := []CallOption{ WithRetryCodes([]codes.Code{codes.Unavailable, codes.DeadlineExceeded}), WithDelayTimeoutSettings(10*time.Millisecond, max, 1.5), } deadline := time.Now().Add(1 * time.Second) ctx, _ := context.WithDeadline(context.Background(), deadline) var invokeTime time.Time Invoke(ctx, func(childCtx context.Context) error { // Keep failing, make sure we never slept more than max (plus a fudge factor) if !invokeTime.IsZero() { if got, want := time.Since(invokeTime), max; got > (want + 20*time.Millisecond) { t.Logf("Slept too long. Got: %v, want: %v", got, max) } } invokeTime = time.Now() // Workaround for `go vet`: https://github.com/grpc/grpc-go/issues/90 errf := grpc.Errorf return errf(codes.Unavailable, "") }, settings...) } golang-google-cloud-0.9.0/bigtable/internal/option/000077500000000000000000000000001312234511600222405ustar00rootroot00000000000000golang-google-cloud-0.9.0/bigtable/internal/option/option.go000066400000000000000000000027401312234511600241020ustar00rootroot00000000000000/* Copyright 2015 Google Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Package option contains common code for dealing with client options. package option import ( "fmt" "os" "google.golang.org/api/option" "google.golang.org/grpc" ) // DefaultClientOptions returns the default client options to use for the // client's gRPC connection. func DefaultClientOptions(endpoint, scope, userAgent string) ([]option.ClientOption, error) { var o []option.ClientOption // Check the environment variables for the bigtable emulator. // Dial it directly and don't pass any credentials. if addr := os.Getenv("BIGTABLE_EMULATOR_HOST"); addr != "" { conn, err := grpc.Dial(addr, grpc.WithInsecure()) if err != nil { return nil, fmt.Errorf("emulator grpc.Dial: %v", err) } o = []option.ClientOption{option.WithGRPCConn(conn)} } else { o = []option.ClientOption{ option.WithEndpoint(endpoint), option.WithScopes(scope), option.WithUserAgent(userAgent), } } return o, nil } golang-google-cloud-0.9.0/bigtable/internal/stat/000077500000000000000000000000001312234511600217035ustar00rootroot00000000000000golang-google-cloud-0.9.0/bigtable/internal/stat/stats.go000066400000000000000000000073641312234511600234020ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package stat import ( "bytes" "encoding/csv" "fmt" "io" "math" "sort" "strconv" "text/tabwriter" "time" ) type byDuration []time.Duration func (data byDuration) Len() int { return len(data) } func (data byDuration) Swap(i, j int) { data[i], data[j] = data[j], data[i] } func (data byDuration) Less(i, j int) bool { return data[i] < data[j] } // quantile returns a value representing the kth of q quantiles. // May alter the order of data. func quantile(data []time.Duration, k, q int) (quantile time.Duration, ok bool) { if len(data) < 1 { return 0, false } if k > q { return 0, false } if k < 0 || q < 1 { return 0, false } sort.Sort(byDuration(data)) if k == 0 { return data[0], true } if k == q { return data[len(data)-1], true } bucketSize := float64(len(data)-1) / float64(q) i := float64(k) * bucketSize lower := int(math.Trunc(i)) var upper int if i > float64(lower) && lower+1 < len(data) { // If the quantile lies between two elements upper = lower + 1 } else { upper = lower } weightUpper := i - float64(lower) weightLower := 1 - weightUpper return time.Duration(weightLower*float64(data[lower]) + weightUpper*float64(data[upper])), true } type Aggregate struct { Name string Count, Errors int Min, Median, Max time.Duration P75, P90, P95, P99 time.Duration // percentiles } // NewAggregate constructs an aggregate from latencies. Returns nil if latencies does not contain aggregateable data. func NewAggregate(name string, latencies []time.Duration, errorCount int) *Aggregate { agg := Aggregate{Name: name, Count: len(latencies), Errors: errorCount} if len(latencies) == 0 { return nil } var ok bool if agg.Min, ok = quantile(latencies, 0, 2); !ok { return nil } if agg.Median, ok = quantile(latencies, 1, 2); !ok { return nil } if agg.Max, ok = quantile(latencies, 2, 2); !ok { return nil } if agg.P75, ok = quantile(latencies, 75, 100); !ok { return nil } if agg.P90, ok = quantile(latencies, 90, 100); !ok { return nil } if agg.P95, ok = quantile(latencies, 95, 100); !ok { return nil } if agg.P99, ok = quantile(latencies, 99, 100); !ok { return nil } return &agg } func (agg *Aggregate) String() string { if agg == nil { return "no data" } var buf bytes.Buffer tw := tabwriter.NewWriter(&buf, 0, 0, 1, ' ', 0) // one-space padding fmt.Fprintf(tw, "min:\t%v\nmedian:\t%v\nmax:\t%v\n95th percentile:\t%v\n99th percentile:\t%v\n", agg.Min, agg.Median, agg.Max, agg.P95, agg.P99) tw.Flush() return buf.String() } // WriteCSV writes a csv file to the given Writer, // with a header row and one row per aggregate. func WriteCSV(aggs []*Aggregate, iow io.Writer) error { w := csv.NewWriter(iow) defer w.Flush() err := w.Write([]string{"name", "count", "errors", "min", "median", "max", "p75", "p90", "p95", "p99"}) if err != nil { return err } for _, agg := range aggs { err = w.Write([]string{ agg.Name, strconv.Itoa(agg.Count), strconv.Itoa(agg.Errors), agg.Min.String(), agg.Median.String(), agg.Max.String(), agg.P75.String(), agg.P90.String(), agg.P95.String(), agg.P99.String(), }) if err != nil { return err } } return nil } golang-google-cloud-0.9.0/bigtable/reader.go000066400000000000000000000147441312234511600207170ustar00rootroot00000000000000/* Copyright 2016 Google Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package bigtable import ( "bytes" "fmt" btpb "google.golang.org/genproto/googleapis/bigtable/v2" ) // A Row is returned by ReadRows. The map is keyed by column family (the prefix // of the column name before the colon). The values are the returned ReadItems // for that column family in the order returned by Read. type Row map[string][]ReadItem // Key returns the row's key, or "" if the row is empty. func (r Row) Key() string { for _, items := range r { if len(items) > 0 { return items[0].Row } } return "" } // A ReadItem is returned by Read. A ReadItem contains data from a specific row and column. type ReadItem struct { Row, Column string Timestamp Timestamp Value []byte } // The current state of the read rows state machine. type rrState int64 const ( newRow rrState = iota rowInProgress cellInProgress ) // chunkReader handles cell chunks from the read rows response and combines // them into full Rows. type chunkReader struct { state rrState curKey []byte curFam string curQual []byte curTS int64 curVal []byte curRow Row lastKey string } // newChunkReader returns a new chunkReader for handling read rows responses. func newChunkReader() *chunkReader { return &chunkReader{state: newRow} } // Process takes a cell chunk and returns a new Row if the given chunk // completes a Row, or nil otherwise. func (cr *chunkReader) Process(cc *btpb.ReadRowsResponse_CellChunk) (Row, error) { var row Row switch cr.state { case newRow: if err := cr.validateNewRow(cc); err != nil { return nil, err } cr.curRow = make(Row) cr.curKey = cc.RowKey cr.curFam = cc.FamilyName.Value cr.curQual = cc.Qualifier.Value cr.curTS = cc.TimestampMicros row = cr.handleCellValue(cc) case rowInProgress: if err := cr.validateRowInProgress(cc); err != nil { return nil, err } if cc.GetResetRow() { cr.resetToNewRow() return nil, nil } if cc.FamilyName != nil { cr.curFam = cc.FamilyName.Value } if cc.Qualifier != nil { cr.curQual = cc.Qualifier.Value } cr.curTS = cc.TimestampMicros row = cr.handleCellValue(cc) case cellInProgress: if err := cr.validateCellInProgress(cc); err != nil { return nil, err } if cc.GetResetRow() { cr.resetToNewRow() return nil, nil } row = cr.handleCellValue(cc) } return row, nil } // Close must be called after all cell chunks from the response // have been processed. An error will be returned if the reader is // in an invalid state, in which case the error should be propagated to the caller. func (cr *chunkReader) Close() error { if cr.state != newRow { return fmt.Errorf("invalid state for end of stream %q", cr.state) } return nil } // handleCellValue returns a Row if the cell value includes a commit, otherwise nil. func (cr *chunkReader) handleCellValue(cc *btpb.ReadRowsResponse_CellChunk) Row { if cc.ValueSize > 0 { // ValueSize is specified so expect a split value of ValueSize bytes if cr.curVal == nil { cr.curVal = make([]byte, 0, cc.ValueSize) } cr.curVal = append(cr.curVal, cc.Value...) cr.state = cellInProgress } else { // This cell is either the complete value or the last chunk of a split if cr.curVal == nil { cr.curVal = cc.Value } else { cr.curVal = append(cr.curVal, cc.Value...) } cr.finishCell() if cc.GetCommitRow() { return cr.commitRow() } else { cr.state = rowInProgress } } return nil } func (cr *chunkReader) finishCell() { ri := ReadItem{ Row: string(cr.curKey), Column: fmt.Sprintf("%s:%s", cr.curFam, cr.curQual), Timestamp: Timestamp(cr.curTS), Value: cr.curVal, } cr.curRow[cr.curFam] = append(cr.curRow[cr.curFam], ri) cr.curVal = nil } func (cr *chunkReader) commitRow() Row { row := cr.curRow cr.lastKey = cr.curRow.Key() cr.resetToNewRow() return row } func (cr *chunkReader) resetToNewRow() { cr.curKey = nil cr.curFam = "" cr.curQual = nil cr.curVal = nil cr.curRow = nil cr.curTS = 0 cr.state = newRow } func (cr *chunkReader) validateNewRow(cc *btpb.ReadRowsResponse_CellChunk) error { if cc.GetResetRow() { return fmt.Errorf("reset_row not allowed between rows") } if cc.RowKey == nil || cc.FamilyName == nil || cc.Qualifier == nil { return fmt.Errorf("missing key field for new row %v", cc) } if cr.lastKey != "" && cr.lastKey >= string(cc.RowKey) { return fmt.Errorf("out of order row key: %q, %q", cr.lastKey, string(cc.RowKey)) } return nil } func (cr *chunkReader) validateRowInProgress(cc *btpb.ReadRowsResponse_CellChunk) error { if err := cr.validateRowStatus(cc); err != nil { return err } if cc.RowKey != nil && !bytes.Equal(cc.RowKey, cr.curKey) { return fmt.Errorf("received new row key %q during existing row %q", cc.RowKey, cr.curKey) } if cc.FamilyName != nil && cc.Qualifier == nil { return fmt.Errorf("family name %q specified without a qualifier", cc.FamilyName) } return nil } func (cr *chunkReader) validateCellInProgress(cc *btpb.ReadRowsResponse_CellChunk) error { if err := cr.validateRowStatus(cc); err != nil { return err } if cr.curVal == nil { return fmt.Errorf("no cached cell while CELL_IN_PROGRESS %v", cc) } if cc.GetResetRow() == false && cr.isAnyKeyPresent(cc) { return fmt.Errorf("cell key components found while CELL_IN_PROGRESS %v", cc) } return nil } func (cr *chunkReader) isAnyKeyPresent(cc *btpb.ReadRowsResponse_CellChunk) bool { return cc.RowKey != nil || cc.FamilyName != nil || cc.Qualifier != nil || cc.TimestampMicros != 0 } // Validate a RowStatus, commit or reset, if present. func (cr *chunkReader) validateRowStatus(cc *btpb.ReadRowsResponse_CellChunk) error { // Resets can't be specified with any other part of a cell if cc.GetResetRow() && (cr.isAnyKeyPresent(cc) || cc.Value != nil || cc.ValueSize != 0 || cc.Labels != nil) { return fmt.Errorf("reset must not be specified with other fields %v", cc) } if cc.GetCommitRow() && cc.ValueSize > 0 { return fmt.Errorf("commit row found in between chunks in a cell") } return nil } golang-google-cloud-0.9.0/bigtable/reader_test.go000066400000000000000000000226071312234511600217530ustar00rootroot00000000000000/* Copyright 2016 Google Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package bigtable import ( "encoding/json" "fmt" "io/ioutil" "reflect" "strings" "testing" "github.com/golang/protobuf/proto" "github.com/golang/protobuf/ptypes/wrappers" btspb "google.golang.org/genproto/googleapis/bigtable/v2" ) // Indicates that a field in the proto should be omitted, rather than included // as a wrapped empty string. const nilStr = "<>" func TestSingleCell(t *testing.T) { cr := newChunkReader() // All in one cell row, err := cr.Process(cc("rk", "fm", "col", 1, "value", 0, true)) if err != nil { t.Fatalf("Processing chunk: %v", err) } if row == nil { t.Fatalf("Missing row") } if len(row["fm"]) != 1 { t.Fatalf("Family name length mismatch %d, %d", 1, len(row["fm"])) } want := []ReadItem{ri("rk", "fm", "col", 1, "value")} if !reflect.DeepEqual(row["fm"], want) { t.Fatalf("Incorrect ReadItem: got: %v\nwant: %v\n", row["fm"], want) } if err := cr.Close(); err != nil { t.Fatalf("Close: %v", err) } } func TestMultipleCells(t *testing.T) { cr := newChunkReader() cr.Process(cc("rs", "fm1", "col1", 0, "val1", 0, false)) cr.Process(cc("rs", "fm1", "col1", 1, "val2", 0, false)) cr.Process(cc("rs", "fm1", "col2", 0, "val3", 0, false)) cr.Process(cc("rs", "fm2", "col1", 0, "val4", 0, false)) row, err := cr.Process(cc("rs", "fm2", "col2", 1, "extralongval5", 0, true)) if err != nil { t.Fatalf("Processing chunk: %v", err) } if row == nil { t.Fatalf("Missing row") } want := []ReadItem{ ri("rs", "fm1", "col1", 0, "val1"), ri("rs", "fm1", "col1", 1, "val2"), ri("rs", "fm1", "col2", 0, "val3"), } if !reflect.DeepEqual(row["fm1"], want) { t.Fatalf("Incorrect ReadItem: got: %v\nwant: %v\n", row["fm1"], want) } want = []ReadItem{ ri("rs", "fm2", "col1", 0, "val4"), ri("rs", "fm2", "col2", 1, "extralongval5"), } if !reflect.DeepEqual(row["fm2"], want) { t.Fatalf("Incorrect ReadItem: got: %v\nwant: %v\n", row["fm2"], want) } if err := cr.Close(); err != nil { t.Fatalf("Close: %v", err) } } func TestSplitCells(t *testing.T) { cr := newChunkReader() cr.Process(cc("rs", "fm1", "col1", 0, "hello ", 11, false)) cr.Process(ccData("world", 0, false)) row, err := cr.Process(cc("rs", "fm1", "col2", 0, "val2", 0, true)) if err != nil { t.Fatalf("Processing chunk: %v", err) } if row == nil { t.Fatalf("Missing row") } want := []ReadItem{ ri("rs", "fm1", "col1", 0, "hello world"), ri("rs", "fm1", "col2", 0, "val2"), } if !reflect.DeepEqual(row["fm1"], want) { t.Fatalf("Incorrect ReadItem: got: %v\nwant: %v\n", row["fm1"], want) } if err := cr.Close(); err != nil { t.Fatalf("Close: %v", err) } } func TestMultipleRows(t *testing.T) { cr := newChunkReader() row, err := cr.Process(cc("rs1", "fm1", "col1", 1, "val1", 0, true)) if err != nil { t.Fatalf("Processing chunk: %v", err) } want := []ReadItem{ri("rs1", "fm1", "col1", 1, "val1")} if !reflect.DeepEqual(row["fm1"], want) { t.Fatalf("Incorrect ReadItem: got: %v\nwant: %v\n", row["fm1"], want) } row, err = cr.Process(cc("rs2", "fm2", "col2", 2, "val2", 0, true)) if err != nil { t.Fatalf("Processing chunk: %v", err) } want = []ReadItem{ri("rs2", "fm2", "col2", 2, "val2")} if !reflect.DeepEqual(row["fm2"], want) { t.Fatalf("Incorrect ReadItem: got: %v\nwant: %v\n", row["fm2"], want) } if err := cr.Close(); err != nil { t.Fatalf("Close: %v", err) } } func TestBlankQualifier(t *testing.T) { cr := newChunkReader() row, err := cr.Process(cc("rs1", "fm1", "", 1, "val1", 0, true)) if err != nil { t.Fatalf("Processing chunk: %v", err) } want := []ReadItem{ri("rs1", "fm1", "", 1, "val1")} if !reflect.DeepEqual(row["fm1"], want) { t.Fatalf("Incorrect ReadItem: got: %v\nwant: %v\n", row["fm1"], want) } row, err = cr.Process(cc("rs2", "fm2", "col2", 2, "val2", 0, true)) if err != nil { t.Fatalf("Processing chunk: %v", err) } want = []ReadItem{ri("rs2", "fm2", "col2", 2, "val2")} if !reflect.DeepEqual(row["fm2"], want) { t.Fatalf("Incorrect ReadItem: got: %v\nwant: %v\n", row["fm2"], want) } if err := cr.Close(); err != nil { t.Fatalf("Close: %v", err) } } func TestReset(t *testing.T) { cr := newChunkReader() cr.Process(cc("rs", "fm1", "col1", 0, "val1", 0, false)) cr.Process(cc("rs", "fm1", "col1", 1, "val2", 0, false)) cr.Process(cc("rs", "fm1", "col2", 0, "val3", 0, false)) cr.Process(ccReset()) row, _ := cr.Process(cc("rs1", "fm1", "col1", 1, "val1", 0, true)) want := []ReadItem{ri("rs1", "fm1", "col1", 1, "val1")} if !reflect.DeepEqual(row["fm1"], want) { t.Fatalf("Reset: got: %v\nwant: %v\n", row["fm1"], want) } if err := cr.Close(); err != nil { t.Fatalf("Close: %v", err) } } func TestNewFamEmptyQualifier(t *testing.T) { cr := newChunkReader() cr.Process(cc("rs", "fm1", "col1", 0, "val1", 0, false)) _, err := cr.Process(cc(nilStr, "fm2", nilStr, 0, "val2", 0, true)) if err == nil { t.Fatalf("Expected error on second chunk with no qualifier set") } } // The read rows acceptance test reads a json file specifying a number of tests, // each consisting of one or more cell chunk text protos and one or more resulting // cells or errors. type AcceptanceTest struct { Tests []TestCase `json:"tests"` } type TestCase struct { Name string `json:"name"` Chunks []string `json:"chunks"` Results []TestResult `json:"results"` } type TestResult struct { RK string `json:"rk"` FM string `json:"fm"` Qual string `json:"qual"` TS int64 `json:"ts"` Value string `json:"value"` Error bool `json:"error"` // If true, expect an error. Ignore any other field. } func TestAcceptance(t *testing.T) { testJson, err := ioutil.ReadFile("./testdata/read-rows-acceptance-test.json") if err != nil { t.Fatalf("could not open acceptance test file %v", err) } var accTest AcceptanceTest err = json.Unmarshal(testJson, &accTest) if err != nil { t.Fatalf("could not parse acceptance test file: %v", err) } for _, test := range accTest.Tests { runTestCase(t, test) } } func runTestCase(t *testing.T, test TestCase) { // Increment an index into the result array as we get results cr := newChunkReader() var results []TestResult var seenErr bool for _, chunkText := range test.Chunks { // Parse and pass each cell chunk to the ChunkReader cc := &btspb.ReadRowsResponse_CellChunk{} err := proto.UnmarshalText(chunkText, cc) if err != nil { t.Errorf("[%s] failed to unmarshal text proto: %s\n%s", test.Name, chunkText, err) return } row, err := cr.Process(cc) if err != nil { results = append(results, TestResult{Error: true}) seenErr = true break } else { // Turn the Row into TestResults for fm, ris := range row { for _, ri := range ris { tr := TestResult{ RK: ri.Row, FM: fm, Qual: strings.Split(ri.Column, ":")[1], TS: int64(ri.Timestamp), Value: string(ri.Value), } results = append(results, tr) } } } } // Only Close if we don't have an error yet, otherwise Close: is expected. if !seenErr { err := cr.Close() if err != nil { results = append(results, TestResult{Error: true}) } } got := toSet(results) want := toSet(test.Results) if !reflect.DeepEqual(got, want) { t.Fatalf("[%s]: got: %v\nwant: %v\n", test.Name, got, want) } } func toSet(res []TestResult) map[TestResult]bool { set := make(map[TestResult]bool) for _, tr := range res { set[tr] = true } return set } // ri returns a ReadItem for the given components func ri(rk string, fm string, qual string, ts int64, val string) ReadItem { return ReadItem{Row: rk, Column: fmt.Sprintf("%s:%s", fm, qual), Value: []byte(val), Timestamp: Timestamp(ts)} } // cc returns a CellChunk proto func cc(rk string, fm string, qual string, ts int64, val string, size int32, commit bool) *btspb.ReadRowsResponse_CellChunk { // The components of the cell key are wrapped and can be null or empty var rkWrapper []byte if rk == nilStr { rkWrapper = nil } else { rkWrapper = []byte(rk) } var fmWrapper *wrappers.StringValue if fm != nilStr { fmWrapper = &wrappers.StringValue{Value: fm} } else { fmWrapper = nil } var qualWrapper *wrappers.BytesValue if qual != nilStr { qualWrapper = &wrappers.BytesValue{Value: []byte(qual)} } else { qualWrapper = nil } return &btspb.ReadRowsResponse_CellChunk{ RowKey: rkWrapper, FamilyName: fmWrapper, Qualifier: qualWrapper, TimestampMicros: ts, Value: []byte(val), ValueSize: size, RowStatus: &btspb.ReadRowsResponse_CellChunk_CommitRow{CommitRow: commit}} } // ccData returns a CellChunk with only a value and size func ccData(val string, size int32, commit bool) *btspb.ReadRowsResponse_CellChunk { return cc(nilStr, nilStr, nilStr, 0, val, size, commit) } // ccReset returns a CellChunk with RestRow set to true func ccReset() *btspb.ReadRowsResponse_CellChunk { return &btspb.ReadRowsResponse_CellChunk{ RowStatus: &btspb.ReadRowsResponse_CellChunk_ResetRow{ResetRow: true}} } golang-google-cloud-0.9.0/bigtable/retry_test.go000066400000000000000000000260651312234511600216600ustar00rootroot00000000000000/* Copyright 2016 Google Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package bigtable import ( "reflect" "strings" "testing" "time" "cloud.google.com/go/bigtable/bttest" "github.com/golang/protobuf/ptypes/wrappers" "golang.org/x/net/context" "google.golang.org/api/option" btpb "google.golang.org/genproto/googleapis/bigtable/v2" rpcpb "google.golang.org/genproto/googleapis/rpc/status" "google.golang.org/grpc" "google.golang.org/grpc/codes" ) func setupFakeServer(opt ...grpc.ServerOption) (tbl *Table, cleanup func(), err error) { srv, err := bttest.NewServer("127.0.0.1:0", opt...) if err != nil { return nil, nil, err } conn, err := grpc.Dial(srv.Addr, grpc.WithInsecure()) if err != nil { return nil, nil, err } client, err := NewClient(context.Background(), "client", "instance", option.WithGRPCConn(conn)) if err != nil { return nil, nil, err } adminClient, err := NewAdminClient(context.Background(), "client", "instance", option.WithGRPCConn(conn)) if err != nil { return nil, nil, err } if err := adminClient.CreateTable(context.Background(), "table"); err != nil { return nil, nil, err } if err := adminClient.CreateColumnFamily(context.Background(), "table", "cf"); err != nil { return nil, nil, err } t := client.Open("table") cleanupFunc := func() { adminClient.Close() client.Close() srv.Close() } return t, cleanupFunc, nil } func TestRetryApply(t *testing.T) { ctx := context.Background() errCount := 0 code := codes.Unavailable // Will be retried // Intercept requests and return an error or defer to the underlying handler errInjector := func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { if strings.HasSuffix(info.FullMethod, "MutateRow") && errCount < 3 { errCount++ return nil, grpc.Errorf(code, "") } return handler(ctx, req) } tbl, cleanup, err := setupFakeServer(grpc.UnaryInterceptor(errInjector)) defer cleanup() if err != nil { t.Fatalf("fake server setup: %v", err) } mut := NewMutation() mut.Set("cf", "col", 1, []byte("val")) if err := tbl.Apply(ctx, "row1", mut); err != nil { t.Errorf("applying single mutation with retries: %v", err) } row, err := tbl.ReadRow(ctx, "row1") if err != nil { t.Errorf("reading single value with retries: %v", err) } if row == nil { t.Errorf("applying single mutation with retries: could not read back row") } code = codes.FailedPrecondition // Won't be retried errCount = 0 if err := tbl.Apply(ctx, "row", mut); err == nil { t.Errorf("applying single mutation with no retries: no error") } // Check and mutate mutTrue := NewMutation() mutTrue.DeleteRow() mutFalse := NewMutation() mutFalse.Set("cf", "col", 1, []byte("val")) condMut := NewCondMutation(ValueFilter("."), mutTrue, mutFalse) errCount = 0 code = codes.Unavailable // Will be retried if err := tbl.Apply(ctx, "row1", condMut); err != nil { t.Errorf("conditionally mutating row with retries: %v", err) } row, err = tbl.ReadRow(ctx, "row1") // row1 already in the table if err != nil { t.Errorf("reading single value after conditional mutation: %v", err) } if row != nil { t.Errorf("reading single value after conditional mutation: row not deleted") } errCount = 0 code = codes.FailedPrecondition // Won't be retried if err := tbl.Apply(ctx, "row", condMut); err == nil { t.Errorf("conditionally mutating row with no retries: no error") } } func TestRetryApplyBulk(t *testing.T) { ctx := context.Background() // Intercept requests and delegate to an interceptor defined by the test case errCount := 0 var f func(grpc.ServerStream) error errInjector := func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { if strings.HasSuffix(info.FullMethod, "MutateRows") { return f(ss) } return handler(ctx, ss) } tbl, cleanup, err := setupFakeServer(grpc.StreamInterceptor(errInjector)) defer cleanup() if err != nil { t.Fatalf("fake server setup: %v", err) } errCount = 0 // Test overall request failure and retries f = func(ss grpc.ServerStream) error { if errCount < 3 { errCount++ return grpc.Errorf(codes.Aborted, "") } return nil } mut := NewMutation() mut.Set("cf", "col", 1, []byte{}) errors, err := tbl.ApplyBulk(ctx, []string{"row2"}, []*Mutation{mut}) if errors != nil || err != nil { t.Errorf("bulk with request failure: got: %v, %v, want: nil", errors, err) } // Test failures and retries in one request errCount = 0 m1 := NewMutation() m1.Set("cf", "col", 1, []byte{}) m2 := NewMutation() m2.Set("cf", "col2", 1, []byte{}) m3 := NewMutation() m3.Set("cf", "col3", 1, []byte{}) f = func(ss grpc.ServerStream) error { var err error req := new(btpb.MutateRowsRequest) ss.RecvMsg(req) switch errCount { case 0: // Retryable request failure err = grpc.Errorf(codes.Unavailable, "") case 1: // Two mutations fail writeMutateRowsResponse(ss, codes.Unavailable, codes.OK, codes.Aborted) err = nil case 2: // Two failures were retried. One will succeed. if want, got := 2, len(req.Entries); want != got { t.Errorf("2 bulk retries, got: %d, want %d", got, want) } writeMutateRowsResponse(ss, codes.OK, codes.Aborted) err = nil case 3: // One failure was retried and will succeed. if want, got := 1, len(req.Entries); want != got { t.Errorf("1 bulk retry, got: %d, want %d", got, want) } writeMutateRowsResponse(ss, codes.OK) err = nil } errCount++ return err } errors, err = tbl.ApplyBulk(ctx, []string{"row1", "row2", "row3"}, []*Mutation{m1, m2, m3}) if errors != nil || err != nil { t.Errorf("bulk with retries: got: %v, %v, want: nil", errors, err) } // Test unretryable errors niMut := NewMutation() niMut.Set("cf", "col", ServerTime, []byte{}) // Non-idempotent errCount = 0 f = func(ss grpc.ServerStream) error { var err error req := new(btpb.MutateRowsRequest) ss.RecvMsg(req) switch errCount { case 0: // Give non-idempotent mutation a retryable error code. // Nothing should be retried. writeMutateRowsResponse(ss, codes.FailedPrecondition, codes.Aborted) err = nil case 1: t.Errorf("unretryable errors: got one retry, want no retries") } errCount++ return err } errors, err = tbl.ApplyBulk(ctx, []string{"row1", "row2"}, []*Mutation{m1, niMut}) if err != nil { t.Errorf("unretryable errors: request failed %v") } want := []error{ grpc.Errorf(codes.FailedPrecondition, ""), grpc.Errorf(codes.Aborted, ""), } if !reflect.DeepEqual(want, errors) { t.Errorf("unretryable errors: got: %v, want: %v", errors, want) } // Test individual errors and a deadline exceeded f = func(ss grpc.ServerStream) error { writeMutateRowsResponse(ss, codes.FailedPrecondition, codes.OK, codes.Aborted) return nil } ctx, _ = context.WithTimeout(ctx, 100*time.Millisecond) errors, err = tbl.ApplyBulk(ctx, []string{"row1", "row2", "row3"}, []*Mutation{m1, m2, m3}) wantErr := context.DeadlineExceeded if wantErr != err { t.Errorf("deadline exceeded error: got: %v, want: %v", err, wantErr) } if errors != nil { t.Errorf("deadline exceeded errors: got: %v, want: nil", err) } } func writeMutateRowsResponse(ss grpc.ServerStream, codes ...codes.Code) error { res := &btpb.MutateRowsResponse{Entries: make([]*btpb.MutateRowsResponse_Entry, len(codes))} for i, code := range codes { res.Entries[i] = &btpb.MutateRowsResponse_Entry{ Index: int64(i), Status: &rpcpb.Status{Code: int32(code), Message: ""}, } } return ss.SendMsg(res) } func TestRetainRowsAfter(t *testing.T) { prevRowRange := NewRange("a", "z") prevRowKey := "m" want := NewRange("m\x00", "z") got := prevRowRange.retainRowsAfter(prevRowKey) if !reflect.DeepEqual(want, got) { t.Errorf("range retry: got %v, want %v", got, want) } prevRowRangeList := RowRangeList{NewRange("a", "d"), NewRange("e", "g"), NewRange("h", "l")} prevRowKey = "f" wantRowRangeList := RowRangeList{NewRange("f\x00", "g"), NewRange("h", "l")} got = prevRowRangeList.retainRowsAfter(prevRowKey) if !reflect.DeepEqual(wantRowRangeList, got) { t.Errorf("range list retry: got %v, want %v", got, wantRowRangeList) } prevRowList := RowList{"a", "b", "c", "d", "e", "f"} prevRowKey = "b" wantList := RowList{"c", "d", "e", "f"} got = prevRowList.retainRowsAfter(prevRowKey) if !reflect.DeepEqual(wantList, got) { t.Errorf("list retry: got %v, want %v", got, wantList) } } func TestRetryReadRows(t *testing.T) { ctx := context.Background() // Intercept requests and delegate to an interceptor defined by the test case errCount := 0 var f func(grpc.ServerStream) error errInjector := func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { if strings.HasSuffix(info.FullMethod, "ReadRows") { return f(ss) } return handler(ctx, ss) } tbl, cleanup, err := setupFakeServer(grpc.StreamInterceptor(errInjector)) defer cleanup() if err != nil { t.Fatalf("fake server setup: %v", err) } errCount = 0 // Test overall request failure and retries f = func(ss grpc.ServerStream) error { var err error req := new(btpb.ReadRowsRequest) ss.RecvMsg(req) switch errCount { case 0: // Retryable request failure err = grpc.Errorf(codes.Unavailable, "") case 1: // Write two rows then error if want, got := "a", string(req.Rows.RowRanges[0].GetStartKeyClosed()); want != got { t.Errorf("first retry, no data received yet: got %q, want %q", got, want) } writeReadRowsResponse(ss, "a", "b") err = grpc.Errorf(codes.Unavailable, "") case 2: // Retryable request failure if want, got := "b\x00", string(req.Rows.RowRanges[0].GetStartKeyClosed()); want != got { t.Errorf("2 range retries: got %q, want %q", got, want) } err = grpc.Errorf(codes.Unavailable, "") case 3: // Write two more rows writeReadRowsResponse(ss, "c", "d") err = nil } errCount++ return err } var got []string tbl.ReadRows(ctx, NewRange("a", "z"), func(r Row) bool { got = append(got, r.Key()) return true }) want := []string{"a", "b", "c", "d"} if !reflect.DeepEqual(got, want) { t.Errorf("retry range integration: got %v, want %v", got, want) } } func writeReadRowsResponse(ss grpc.ServerStream, rowKeys ...string) error { var chunks []*btpb.ReadRowsResponse_CellChunk for _, key := range rowKeys { chunks = append(chunks, &btpb.ReadRowsResponse_CellChunk{ RowKey: []byte(key), FamilyName: &wrappers.StringValue{Value: "fm"}, Qualifier: &wrappers.BytesValue{Value: []byte("col")}, RowStatus: &btpb.ReadRowsResponse_CellChunk_CommitRow{CommitRow: true}, }) } return ss.SendMsg(&btpb.ReadRowsResponse{Chunks: chunks}) } golang-google-cloud-0.9.0/bigtable/testdata/000077500000000000000000000000001312234511600207255ustar00rootroot00000000000000golang-google-cloud-0.9.0/bigtable/testdata/read-rows-acceptance-test.json000066400000000000000000001065121312234511600265710ustar00rootroot00000000000000{ "tests": [ { "name": "invalid - no commit", "chunks": [ "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n" ], "results": [ { "rk": "", "fm": "", "qual": "", "ts": 0, "value": "", "label": "", "error": true } ] }, { "name": "invalid - no cell key before commit", "chunks": [ "commit_row: true\n" ], "results": [ { "rk": "", "fm": "", "qual": "", "ts": 0, "value": "", "label": "", "error": true } ] }, { "name": "invalid - no cell key before value", "chunks": [ "timestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n" ], "results": [ { "rk": "", "fm": "", "qual": "", "ts": 0, "value": "", "label": "", "error": true } ] }, { "name": "invalid - new col family must specify qualifier", "chunks": [ "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nvalue: \"value-VAL_1\"\ncommit_row: false\n", "family_name: \u003c\n value: \"B\"\n\u003e\ntimestamp_micros: 102\nvalue: \"value-VAL_2\"\ncommit_row: true\n" ], "results": [ { "rk": "", "fm": "", "qual": "", "ts": 0, "value": "", "label": "", "error": true } ] }, { "name": "bare commit implies ts=0", "chunks": [ "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n", "commit_row: true\n" ], "results": [ { "rk": "RK", "fm": "A", "qual": "C", "ts": 100, "value": "value-VAL", "label": "", "error": false }, { "rk": "RK", "fm": "A", "qual": "C", "ts": 0, "value": "", "label": "", "error": false } ] }, { "name": "simple row with timestamp", "chunks": [ "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n" ], "results": [ { "rk": "RK", "fm": "A", "qual": "C", "ts": 100, "value": "value-VAL", "label": "", "error": false } ] }, { "name": "missing timestamp, implied ts=0", "chunks": [ "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\nvalue: \"value-VAL\"\ncommit_row: true\n" ], "results": [ { "rk": "RK", "fm": "A", "qual": "C", "ts": 0, "value": "value-VAL", "label": "", "error": false } ] }, { "name": "empty cell value", "chunks": [ "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ncommit_row: true\n" ], "results": [ { "rk": "RK", "fm": "A", "qual": "C", "ts": 0, "value": "", "label": "", "error": false } ] }, { "name": "two unsplit cells", "chunks": [ "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nvalue: \"value-VAL_1\"\ncommit_row: false\n", "timestamp_micros: 102\nvalue: \"value-VAL_2\"\ncommit_row: true\n" ], "results": [ { "rk": "RK", "fm": "A", "qual": "C", "ts": 101, "value": "value-VAL_1", "label": "", "error": false }, { "rk": "RK", "fm": "A", "qual": "C", "ts": 102, "value": "value-VAL_2", "label": "", "error": false } ] }, { "name": "two qualifiers", "chunks": [ "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nvalue: \"value-VAL_1\"\ncommit_row: false\n", "qualifier: \u003c\n value: \"D\"\n\u003e\ntimestamp_micros: 102\nvalue: \"value-VAL_2\"\ncommit_row: true\n" ], "results": [ { "rk": "RK", "fm": "A", "qual": "C", "ts": 101, "value": "value-VAL_1", "label": "", "error": false }, { "rk": "RK", "fm": "A", "qual": "D", "ts": 102, "value": "value-VAL_2", "label": "", "error": false } ] }, { "name": "two families", "chunks": [ "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nvalue: \"value-VAL_1\"\ncommit_row: false\n", "family_name: \u003c\n value: \"B\"\n\u003e\nqualifier: \u003c\n value: \"E\"\n\u003e\ntimestamp_micros: 102\nvalue: \"value-VAL_2\"\ncommit_row: true\n" ], "results": [ { "rk": "RK", "fm": "A", "qual": "C", "ts": 101, "value": "value-VAL_1", "label": "", "error": false }, { "rk": "RK", "fm": "B", "qual": "E", "ts": 102, "value": "value-VAL_2", "label": "", "error": false } ] }, { "name": "with labels", "chunks": [ "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nlabels: \"L_1\"\nvalue: \"value-VAL_1\"\ncommit_row: false\n", "timestamp_micros: 102\nlabels: \"L_2\"\nvalue: \"value-VAL_2\"\ncommit_row: true\n" ], "results": [ { "rk": "RK", "fm": "A", "qual": "C", "ts": 101, "value": "value-VAL_1", "label": "L_1", "error": false }, { "rk": "RK", "fm": "A", "qual": "C", "ts": 102, "value": "value-VAL_2", "label": "L_2", "error": false } ] }, { "name": "split cell, bare commit", "chunks": [ "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", "value: \"alue-VAL\"\ncommit_row: false\n", "commit_row: true\n" ], "results": [ { "rk": "RK", "fm": "A", "qual": "C", "ts": 100, "value": "value-VAL", "label": "", "error": false }, { "rk": "RK", "fm": "A", "qual": "C", "ts": 0, "value": "", "label": "", "error": false } ] }, { "name": "split cell", "chunks": [ "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", "value: \"alue-VAL\"\ncommit_row: true\n" ], "results": [ { "rk": "RK", "fm": "A", "qual": "C", "ts": 100, "value": "value-VAL", "label": "", "error": false } ] }, { "name": "split four ways", "chunks": [ "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nlabels: \"L\"\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", "value: \"a\"\nvalue_size: 10\ncommit_row: false\n", "value: \"l\"\nvalue_size: 10\ncommit_row: false\n", "value: \"ue-VAL\"\ncommit_row: true\n" ], "results": [ { "rk": "RK", "fm": "A", "qual": "C", "ts": 100, "value": "value-VAL", "label": "L", "error": false } ] }, { "name": "two split cells", "chunks": [ "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", "value: \"alue-VAL_1\"\ncommit_row: false\n", "timestamp_micros: 102\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", "value: \"alue-VAL_2\"\ncommit_row: true\n" ], "results": [ { "rk": "RK", "fm": "A", "qual": "C", "ts": 101, "value": "value-VAL_1", "label": "", "error": false }, { "rk": "RK", "fm": "A", "qual": "C", "ts": 102, "value": "value-VAL_2", "label": "", "error": false } ] }, { "name": "multi-qualifier splits", "chunks": [ "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", "value: \"alue-VAL_1\"\ncommit_row: false\n", "qualifier: \u003c\n value: \"D\"\n\u003e\ntimestamp_micros: 102\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", "value: \"alue-VAL_2\"\ncommit_row: true\n" ], "results": [ { "rk": "RK", "fm": "A", "qual": "C", "ts": 101, "value": "value-VAL_1", "label": "", "error": false }, { "rk": "RK", "fm": "A", "qual": "D", "ts": 102, "value": "value-VAL_2", "label": "", "error": false } ] }, { "name": "multi-qualifier multi-split", "chunks": [ "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", "value: \"a\"\nvalue_size: 10\ncommit_row: false\n", "value: \"lue-VAL_1\"\ncommit_row: false\n", "qualifier: \u003c\n value: \"D\"\n\u003e\ntimestamp_micros: 102\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", "value: \"a\"\nvalue_size: 10\ncommit_row: false\n", "value: \"lue-VAL_2\"\ncommit_row: true\n" ], "results": [ { "rk": "RK", "fm": "A", "qual": "C", "ts": 101, "value": "value-VAL_1", "label": "", "error": false }, { "rk": "RK", "fm": "A", "qual": "D", "ts": 102, "value": "value-VAL_2", "label": "", "error": false } ] }, { "name": "multi-family split", "chunks": [ "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", "value: \"alue-VAL_1\"\ncommit_row: false\n", "family_name: \u003c\n value: \"B\"\n\u003e\nqualifier: \u003c\n value: \"E\"\n\u003e\ntimestamp_micros: 102\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", "value: \"alue-VAL_2\"\ncommit_row: true\n" ], "results": [ { "rk": "RK", "fm": "A", "qual": "C", "ts": 101, "value": "value-VAL_1", "label": "", "error": false }, { "rk": "RK", "fm": "B", "qual": "E", "ts": 102, "value": "value-VAL_2", "label": "", "error": false } ] }, { "name": "invalid - no commit between rows", "chunks": [ "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n", "row_key: \"RK_2\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n" ], "results": [ { "rk": "", "fm": "", "qual": "", "ts": 0, "value": "", "label": "", "error": true } ] }, { "name": "invalid - no commit after first row", "chunks": [ "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n", "row_key: \"RK_2\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n" ], "results": [ { "rk": "", "fm": "", "qual": "", "ts": 0, "value": "", "label": "", "error": true } ] }, { "name": "invalid - last row missing commit", "chunks": [ "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n", "row_key: \"RK_2\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n" ], "results": [ { "rk": "RK_1", "fm": "A", "qual": "C", "ts": 100, "value": "value-VAL", "label": "", "error": false }, { "rk": "", "fm": "", "qual": "", "ts": 0, "value": "", "label": "", "error": true } ] }, { "name": "invalid - duplicate row key", "chunks": [ "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n", "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"B\"\n\u003e\nqualifier: \u003c\n value: \"D\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n" ], "results": [ { "rk": "RK_1", "fm": "A", "qual": "C", "ts": 100, "value": "value-VAL", "label": "", "error": false }, { "rk": "", "fm": "", "qual": "", "ts": 0, "value": "", "label": "", "error": true } ] }, { "name": "invalid - new row missing row key", "chunks": [ "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n", "timestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n" ], "results": [ { "rk": "RK_1", "fm": "A", "qual": "C", "ts": 100, "value": "value-VAL", "label": "", "error": false }, { "rk": "", "fm": "", "qual": "", "ts": 0, "value": "", "label": "", "error": true } ] }, { "name": "two rows", "chunks": [ "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n", "row_key: \"RK_2\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n" ], "results": [ { "rk": "RK_1", "fm": "A", "qual": "C", "ts": 100, "value": "value-VAL", "label": "", "error": false }, { "rk": "RK_2", "fm": "A", "qual": "C", "ts": 100, "value": "value-VAL", "label": "", "error": false } ] }, { "name": "two rows implicit timestamp", "chunks": [ "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\nvalue: \"value-VAL\"\ncommit_row: true\n", "row_key: \"RK_2\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n" ], "results": [ { "rk": "RK_1", "fm": "A", "qual": "C", "ts": 0, "value": "value-VAL", "label": "", "error": false }, { "rk": "RK_2", "fm": "A", "qual": "C", "ts": 100, "value": "value-VAL", "label": "", "error": false } ] }, { "name": "two rows empty value", "chunks": [ "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ncommit_row: true\n", "row_key: \"RK_2\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n" ], "results": [ { "rk": "RK_1", "fm": "A", "qual": "C", "ts": 0, "value": "", "label": "", "error": false }, { "rk": "RK_2", "fm": "A", "qual": "C", "ts": 100, "value": "value-VAL", "label": "", "error": false } ] }, { "name": "two rows, one with multiple cells", "chunks": [ "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nvalue: \"value-VAL_1\"\ncommit_row: false\n", "timestamp_micros: 102\nvalue: \"value-VAL_2\"\ncommit_row: true\n", "row_key: \"RK_2\"\nfamily_name: \u003c\n value: \"B\"\n\u003e\nqualifier: \u003c\n value: \"D\"\n\u003e\ntimestamp_micros: 103\nvalue: \"value-VAL_3\"\ncommit_row: true\n" ], "results": [ { "rk": "RK_1", "fm": "A", "qual": "C", "ts": 101, "value": "value-VAL_1", "label": "", "error": false }, { "rk": "RK_1", "fm": "A", "qual": "C", "ts": 102, "value": "value-VAL_2", "label": "", "error": false }, { "rk": "RK_2", "fm": "B", "qual": "D", "ts": 103, "value": "value-VAL_3", "label": "", "error": false } ] }, { "name": "two rows, multiple cells", "chunks": [ "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nvalue: \"value-VAL_1\"\ncommit_row: false\n", "qualifier: \u003c\n value: \"D\"\n\u003e\ntimestamp_micros: 102\nvalue: \"value-VAL_2\"\ncommit_row: true\n", "row_key: \"RK_2\"\nfamily_name: \u003c\n value: \"B\"\n\u003e\nqualifier: \u003c\n value: \"E\"\n\u003e\ntimestamp_micros: 103\nvalue: \"value-VAL_3\"\ncommit_row: false\n", "qualifier: \u003c\n value: \"F\"\n\u003e\ntimestamp_micros: 104\nvalue: \"value-VAL_4\"\ncommit_row: true\n" ], "results": [ { "rk": "RK_1", "fm": "A", "qual": "C", "ts": 101, "value": "value-VAL_1", "label": "", "error": false }, { "rk": "RK_1", "fm": "A", "qual": "D", "ts": 102, "value": "value-VAL_2", "label": "", "error": false }, { "rk": "RK_2", "fm": "B", "qual": "E", "ts": 103, "value": "value-VAL_3", "label": "", "error": false }, { "rk": "RK_2", "fm": "B", "qual": "F", "ts": 104, "value": "value-VAL_4", "label": "", "error": false } ] }, { "name": "two rows, multiple cells, multiple families", "chunks": [ "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nvalue: \"value-VAL_1\"\ncommit_row: false\n", "family_name: \u003c\n value: \"B\"\n\u003e\nqualifier: \u003c\n value: \"E\"\n\u003e\ntimestamp_micros: 102\nvalue: \"value-VAL_2\"\ncommit_row: true\n", "row_key: \"RK_2\"\nfamily_name: \u003c\n value: \"M\"\n\u003e\nqualifier: \u003c\n value: \"O\"\n\u003e\ntimestamp_micros: 103\nvalue: \"value-VAL_3\"\ncommit_row: false\n", "family_name: \u003c\n value: \"N\"\n\u003e\nqualifier: \u003c\n value: \"P\"\n\u003e\ntimestamp_micros: 104\nvalue: \"value-VAL_4\"\ncommit_row: true\n" ], "results": [ { "rk": "RK_1", "fm": "A", "qual": "C", "ts": 101, "value": "value-VAL_1", "label": "", "error": false }, { "rk": "RK_1", "fm": "B", "qual": "E", "ts": 102, "value": "value-VAL_2", "label": "", "error": false }, { "rk": "RK_2", "fm": "M", "qual": "O", "ts": 103, "value": "value-VAL_3", "label": "", "error": false }, { "rk": "RK_2", "fm": "N", "qual": "P", "ts": 104, "value": "value-VAL_4", "label": "", "error": false } ] }, { "name": "two rows, four cells, 2 labels", "chunks": [ "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nlabels: \"L_1\"\nvalue: \"value-VAL_1\"\ncommit_row: false\n", "timestamp_micros: 102\nvalue: \"value-VAL_2\"\ncommit_row: true\n", "row_key: \"RK_2\"\nfamily_name: \u003c\n value: \"B\"\n\u003e\nqualifier: \u003c\n value: \"D\"\n\u003e\ntimestamp_micros: 103\nlabels: \"L_3\"\nvalue: \"value-VAL_3\"\ncommit_row: false\n", "timestamp_micros: 104\nvalue: \"value-VAL_4\"\ncommit_row: true\n" ], "results": [ { "rk": "RK_1", "fm": "A", "qual": "C", "ts": 101, "value": "value-VAL_1", "label": "L_1", "error": false }, { "rk": "RK_1", "fm": "A", "qual": "C", "ts": 102, "value": "value-VAL_2", "label": "", "error": false }, { "rk": "RK_2", "fm": "B", "qual": "D", "ts": 103, "value": "value-VAL_3", "label": "L_3", "error": false }, { "rk": "RK_2", "fm": "B", "qual": "D", "ts": 104, "value": "value-VAL_4", "label": "", "error": false } ] }, { "name": "two rows with splits, same timestamp", "chunks": [ "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", "value: \"alue-VAL_1\"\ncommit_row: true\n", "row_key: \"RK_2\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", "value: \"alue-VAL_2\"\ncommit_row: true\n" ], "results": [ { "rk": "RK_1", "fm": "A", "qual": "C", "ts": 100, "value": "value-VAL_1", "label": "", "error": false }, { "rk": "RK_2", "fm": "A", "qual": "C", "ts": 100, "value": "value-VAL_2", "label": "", "error": false } ] }, { "name": "invalid - bare reset", "chunks": [ "reset_row: true\n" ], "results": [ { "rk": "", "fm": "", "qual": "", "ts": 0, "value": "", "label": "", "error": true } ] }, { "name": "invalid - bad reset, no commit", "chunks": [ "reset_row: true\n", "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n" ], "results": [ { "rk": "", "fm": "", "qual": "", "ts": 0, "value": "", "label": "", "error": true } ] }, { "name": "invalid - missing key after reset", "chunks": [ "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n", "reset_row: true\n", "timestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n" ], "results": [ { "rk": "", "fm": "", "qual": "", "ts": 0, "value": "", "label": "", "error": true } ] }, { "name": "no data after reset", "chunks": [ "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n", "reset_row: true\n" ], "results": null }, { "name": "simple reset", "chunks": [ "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n", "reset_row: true\n", "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n" ], "results": [ { "rk": "RK", "fm": "A", "qual": "C", "ts": 100, "value": "value-VAL", "label": "", "error": false } ] }, { "name": "reset to new val", "chunks": [ "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_1\"\ncommit_row: false\n", "reset_row: true\n", "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_2\"\ncommit_row: true\n" ], "results": [ { "rk": "RK", "fm": "A", "qual": "C", "ts": 100, "value": "value-VAL_2", "label": "", "error": false } ] }, { "name": "reset to new qual", "chunks": [ "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_1\"\ncommit_row: false\n", "reset_row: true\n", "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"D\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_1\"\ncommit_row: true\n" ], "results": [ { "rk": "RK", "fm": "A", "qual": "D", "ts": 100, "value": "value-VAL_1", "label": "", "error": false } ] }, { "name": "reset with splits", "chunks": [ "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_1\"\ncommit_row: false\n", "timestamp_micros: 102\nvalue: \"value-VAL_2\"\ncommit_row: false\n", "reset_row: true\n", "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_2\"\ncommit_row: true\n" ], "results": [ { "rk": "RK", "fm": "A", "qual": "C", "ts": 100, "value": "value-VAL_2", "label": "", "error": false } ] }, { "name": "reset two cells", "chunks": [ "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_1\"\ncommit_row: false\n", "reset_row: true\n", "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_2\"\ncommit_row: false\n", "timestamp_micros: 103\nvalue: \"value-VAL_3\"\ncommit_row: true\n" ], "results": [ { "rk": "RK", "fm": "A", "qual": "C", "ts": 100, "value": "value-VAL_2", "label": "", "error": false }, { "rk": "RK", "fm": "A", "qual": "C", "ts": 103, "value": "value-VAL_3", "label": "", "error": false } ] }, { "name": "two resets", "chunks": [ "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_1\"\ncommit_row: false\n", "reset_row: true\n", "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_2\"\ncommit_row: false\n", "reset_row: true\n", "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_3\"\ncommit_row: true\n" ], "results": [ { "rk": "RK", "fm": "A", "qual": "C", "ts": 100, "value": "value-VAL_3", "label": "", "error": false } ] }, { "name": "reset then two cells", "chunks": [ "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_1\"\ncommit_row: false\n", "reset_row: true\n", "row_key: \"RK\"\nfamily_name: \u003c\n value: \"B\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_2\"\ncommit_row: false\n", "qualifier: \u003c\n value: \"D\"\n\u003e\ntimestamp_micros: 103\nvalue: \"value-VAL_3\"\ncommit_row: true\n" ], "results": [ { "rk": "RK", "fm": "B", "qual": "C", "ts": 100, "value": "value-VAL_2", "label": "", "error": false }, { "rk": "RK", "fm": "B", "qual": "D", "ts": 103, "value": "value-VAL_3", "label": "", "error": false } ] }, { "name": "reset to new row", "chunks": [ "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_1\"\ncommit_row: false\n", "reset_row: true\n", "row_key: \"RK_2\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_2\"\ncommit_row: true\n" ], "results": [ { "rk": "RK_2", "fm": "A", "qual": "C", "ts": 100, "value": "value-VAL_2", "label": "", "error": false } ] }, { "name": "reset in between chunks", "chunks": [ "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nlabels: \"L\"\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", "value: \"a\"\nvalue_size: 10\ncommit_row: false\n", "reset_row: true\n", "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_1\"\ncommit_row: true\n" ], "results": [ { "rk": "RK_1", "fm": "A", "qual": "C", "ts": 100, "value": "value-VAL_1", "label": "", "error": false } ] }, { "name": "invalid - reset with chunk", "chunks": [ "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nlabels: \"L\"\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", "value: \"a\"\nvalue_size: 10\nreset_row: true\n" ], "results": [ { "rk": "", "fm": "", "qual": "", "ts": 0, "value": "", "label": "", "error": true } ] }, { "name": "invalid - commit with chunk", "chunks": [ "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nlabels: \"L\"\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", "value: \"a\"\nvalue_size: 10\ncommit_row: true\n" ], "results": [ { "rk": "", "fm": "", "qual": "", "ts": 0, "value": "", "label": "", "error": true } ] }, { "name": "empty cell chunk", "chunks": [ "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n", "commit_row: false\n", "commit_row: true\n" ], "results": [ { "rk": "RK", "fm": "A", "qual": "C", "ts": 100, "value": "value-VAL", "label": "", "error": false }, { "rk": "RK", "fm": "A", "qual": "C", "ts": 0, "value": "", "label": "", "error": false }, { "rk": "RK", "fm": "A", "qual": "C", "ts": 0, "value": "", "label": "", "error": false } ] } ] }golang-google-cloud-0.9.0/civil/000077500000000000000000000000001312234511600164515ustar00rootroot00000000000000golang-google-cloud-0.9.0/civil/civil.go000066400000000000000000000221021312234511600201030ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package civil implements types for civil time, a time-zone-independent // representation of time that follows the rules of the proleptic // Gregorian calendar with exactly 24-hour days, 60-minute hours, and 60-second // minutes. // // Because they lack location information, these types do not represent unique // moments or intervals of time. Use time.Time for that purpose. package civil import ( "fmt" "time" ) // A Date represents a date (year, month, day). // // This type does not include location information, and therefore does not // describe a unique 24-hour timespan. type Date struct { Year int // Year (e.g., 2014). Month time.Month // Month of the year (January = 1, ...). Day int // Day of the month, starting at 1. } // DateOf returns the Date in which a time occurs in that time's location. func DateOf(t time.Time) Date { var d Date d.Year, d.Month, d.Day = t.Date() return d } // ParseDate parses a string in RFC3339 full-date format and returns the date value it represents. func ParseDate(s string) (Date, error) { t, err := time.Parse("2006-01-02", s) if err != nil { return Date{}, err } return DateOf(t), nil } // String returns the date in RFC3339 full-date format. func (d Date) String() string { return fmt.Sprintf("%04d-%02d-%02d", d.Year, d.Month, d.Day) } // IsValid reports whether the date is valid. func (d Date) IsValid() bool { return DateOf(d.In(time.UTC)) == d } // In returns the time corresponding to time 00:00:00 of the date in the location. // // In is always consistent with time.Date, even when time.Date returns a time // on a different day. For example, if loc is America/Indiana/Vincennes, then both // time.Date(1955, time.May, 1, 0, 0, 0, 0, loc) // and // civil.Date{Year: 1955, Month: time.May, Day: 1}.In(loc) // return 23:00:00 on April 30, 1955. // // In panics if loc is nil. func (d Date) In(loc *time.Location) time.Time { return time.Date(d.Year, d.Month, d.Day, 0, 0, 0, 0, loc) } // AddDays returns the date that is n days in the future. // n can also be negative to go into the past. func (d Date) AddDays(n int) Date { return DateOf(d.In(time.UTC).AddDate(0, 0, n)) } // DaysSince returns the signed number of days between the date and s, not including the end day. // This is the inverse operation to AddDays. func (d Date) DaysSince(s Date) (days int) { // We convert to Unix time so we do not have to worry about leap seconds: // Unix time increases by exactly 86400 seconds per day. deltaUnix := d.In(time.UTC).Unix() - s.In(time.UTC).Unix() return int(deltaUnix / 86400) } // Before reports whether d1 occurs before d2. func (d1 Date) Before(d2 Date) bool { if d1.Year != d2.Year { return d1.Year < d2.Year } if d1.Month != d2.Month { return d1.Month < d2.Month } return d1.Day < d2.Day } // After reports whether d1 occurs after d2. func (d1 Date) After(d2 Date) bool { return d2.Before(d1) } // MarshalText implements the encoding.TextMarshaler interface. // The output is the result of d.String(). func (d Date) MarshalText() ([]byte, error) { return []byte(d.String()), nil } // UnmarshalText implements the encoding.TextUnmarshaler interface. // The date is expected to be a string in a format accepted by ParseDate. func (d *Date) UnmarshalText(data []byte) error { var err error *d, err = ParseDate(string(data)) return err } // A Time represents a time with nanosecond precision. // // This type does not include location information, and therefore does not // describe a unique moment in time. // // This type exists to represent the TIME type in storage-based APIs like BigQuery. // Most operations on Times are unlikely to be meaningful. Prefer the DateTime type. type Time struct { Hour int // The hour of the day in 24-hour format; range [0-23] Minute int // The minute of the hour; range [0-59] Second int // The second of the minute; range [0-59] Nanosecond int // The nanosecond of the second; range [0-999999999] } // TimeOf returns the Time representing the time of day in which a time occurs // in that time's location. It ignores the date. func TimeOf(t time.Time) Time { var tm Time tm.Hour, tm.Minute, tm.Second = t.Clock() tm.Nanosecond = t.Nanosecond() return tm } // ParseTime parses a string and returns the time value it represents. // ParseTime accepts an extended form of the RFC3339 partial-time format. After // the HH:MM:SS part of the string, an optional fractional part may appear, // consisting of a decimal point followed by one to nine decimal digits. // (RFC3339 admits only one digit after the decimal point). func ParseTime(s string) (Time, error) { t, err := time.Parse("15:04:05.999999999", s) if err != nil { return Time{}, err } return TimeOf(t), nil } // String returns the date in the format described in ParseTime. If Nanoseconds // is zero, no fractional part will be generated. Otherwise, the result will // end with a fractional part consisting of a decimal point and nine digits. func (t Time) String() string { s := fmt.Sprintf("%02d:%02d:%02d", t.Hour, t.Minute, t.Second) if t.Nanosecond == 0 { return s } return s + fmt.Sprintf(".%09d", t.Nanosecond) } // IsValid reports whether the time is valid. func (t Time) IsValid() bool { // Construct a non-zero time. tm := time.Date(2, 2, 2, t.Hour, t.Minute, t.Second, t.Nanosecond, time.UTC) return TimeOf(tm) == t } // MarshalText implements the encoding.TextMarshaler interface. // The output is the result of t.String(). func (t Time) MarshalText() ([]byte, error) { return []byte(t.String()), nil } // UnmarshalText implements the encoding.TextUnmarshaler interface. // The time is expected to be a string in a format accepted by ParseTime. func (t *Time) UnmarshalText(data []byte) error { var err error *t, err = ParseTime(string(data)) return err } // A DateTime represents a date and time. // // This type does not include location information, and therefore does not // describe a unique moment in time. type DateTime struct { Date Date Time Time } // Note: We deliberately do not embed Date into DateTime, to avoid promoting AddDays and Sub. // DateTimeOf returns the DateTime in which a time occurs in that time's location. func DateTimeOf(t time.Time) DateTime { return DateTime{ Date: DateOf(t), Time: TimeOf(t), } } // ParseDateTime parses a string and returns the DateTime it represents. // ParseDateTime accepts a variant of the RFC3339 date-time format that omits // the time offset but includes an optional fractional time, as described in // ParseTime. Informally, the accepted format is // YYYY-MM-DDTHH:MM:SS[.FFFFFFFFF] // where the 'T' may be a lower-case 't'. func ParseDateTime(s string) (DateTime, error) { t, err := time.Parse("2006-01-02T15:04:05.999999999", s) if err != nil { t, err = time.Parse("2006-01-02t15:04:05.999999999", s) if err != nil { return DateTime{}, err } } return DateTimeOf(t), nil } // String returns the date in the format described in ParseDate. func (dt DateTime) String() string { return dt.Date.String() + "T" + dt.Time.String() } // IsValid reports whether the datetime is valid. func (dt DateTime) IsValid() bool { return dt.Date.IsValid() && dt.Time.IsValid() } // In returns the time corresponding to the DateTime in the given location. // // If the time is missing or ambigous at the location, In returns the same // result as time.Date. For example, if loc is America/Indiana/Vincennes, then // both // time.Date(1955, time.May, 1, 0, 30, 0, 0, loc) // and // civil.DateTime{ // civil.Date{Year: 1955, Month: time.May, Day: 1}}, // civil.Time{Minute: 30}}.In(loc) // return 23:30:00 on April 30, 1955. // // In panics if loc is nil. func (dt DateTime) In(loc *time.Location) time.Time { return time.Date(dt.Date.Year, dt.Date.Month, dt.Date.Day, dt.Time.Hour, dt.Time.Minute, dt.Time.Second, dt.Time.Nanosecond, loc) } // Before reports whether dt1 occurs before dt2. func (dt1 DateTime) Before(dt2 DateTime) bool { return dt1.In(time.UTC).Before(dt2.In(time.UTC)) } // After reports whether dt1 occurs after dt2. func (dt1 DateTime) After(dt2 DateTime) bool { return dt2.Before(dt1) } // MarshalText implements the encoding.TextMarshaler interface. // The output is the result of dt.String(). func (dt DateTime) MarshalText() ([]byte, error) { return []byte(dt.String()), nil } // UnmarshalText implements the encoding.TextUnmarshaler interface. // The datetime is expected to be a string in a format accepted by ParseDateTime func (dt *DateTime) UnmarshalText(data []byte) error { var err error *dt, err = ParseDateTime(string(data)) return err } golang-google-cloud-0.9.0/civil/civil_test.go000066400000000000000000000270311312234511600211500ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package civil import ( "encoding/json" "reflect" "testing" "time" ) func TestDates(t *testing.T) { for _, test := range []struct { date Date loc *time.Location wantStr string wantTime time.Time }{ { date: Date{2014, 7, 29}, loc: time.Local, wantStr: "2014-07-29", wantTime: time.Date(2014, time.July, 29, 0, 0, 0, 0, time.Local), }, { date: DateOf(time.Date(2014, 8, 20, 15, 8, 43, 1, time.Local)), loc: time.UTC, wantStr: "2014-08-20", wantTime: time.Date(2014, 8, 20, 0, 0, 0, 0, time.UTC), }, { date: DateOf(time.Date(999, time.January, 26, 0, 0, 0, 0, time.Local)), loc: time.UTC, wantStr: "0999-01-26", wantTime: time.Date(999, 1, 26, 0, 0, 0, 0, time.UTC), }, } { if got := test.date.String(); got != test.wantStr { t.Errorf("%#v.String() = %q, want %q", test.date, got, test.wantStr) } if got := test.date.In(test.loc); !got.Equal(test.wantTime) { t.Errorf("%#v.In(%v) = %v, want %v", test.date, test.loc, got, test.wantTime) } } } func TestDateIsValid(t *testing.T) { for _, test := range []struct { date Date want bool }{ {Date{2014, 7, 29}, true}, {Date{2000, 2, 29}, true}, {Date{10000, 12, 31}, true}, {Date{1, 1, 1}, true}, {Date{0, 1, 1}, true}, // year zero is OK {Date{-1, 1, 1}, true}, // negative year is OK {Date{1, 0, 1}, false}, {Date{1, 1, 0}, false}, {Date{2016, 1, 32}, false}, {Date{2016, 13, 1}, false}, {Date{1, -1, 1}, false}, {Date{1, 1, -1}, false}, } { got := test.date.IsValid() if got != test.want { t.Errorf("%#v: got %t, want %t", test.date, got, test.want) } } } func TestParseDate(t *testing.T) { for _, test := range []struct { str string want Date // if empty, expect an error }{ {"2016-01-02", Date{2016, 1, 2}}, {"2016-12-31", Date{2016, 12, 31}}, {"0003-02-04", Date{3, 2, 4}}, {"999-01-26", Date{}}, {"", Date{}}, {"2016-01-02x", Date{}}, } { got, err := ParseDate(test.str) if got != test.want { t.Errorf("ParseDate(%q) = %+v, want %+v", test.str, got, test.want) } if err != nil && test.want != (Date{}) { t.Errorf("Unexpected error %v from ParseDate(%q)", err, test.str) } } } func TestDateArithmetic(t *testing.T) { for _, test := range []struct { desc string start Date end Date days int }{ { desc: "zero days noop", start: Date{2014, 5, 9}, end: Date{2014, 5, 9}, days: 0, }, { desc: "crossing a year boundary", start: Date{2014, 12, 31}, end: Date{2015, 1, 1}, days: 1, }, { desc: "negative number of days", start: Date{2015, 1, 1}, end: Date{2014, 12, 31}, days: -1, }, { desc: "full leap year", start: Date{2004, 1, 1}, end: Date{2005, 1, 1}, days: 366, }, { desc: "full non-leap year", start: Date{2001, 1, 1}, end: Date{2002, 1, 1}, days: 365, }, { desc: "crossing a leap second", start: Date{1972, 6, 30}, end: Date{1972, 7, 1}, days: 1, }, { desc: "dates before the unix epoch", start: Date{101, 1, 1}, end: Date{102, 1, 1}, days: 365, }, } { if got := test.start.AddDays(test.days); got != test.end { t.Errorf("[%s] %#v.AddDays(%v) = %#v, want %#v", test.desc, test.start, test.days, got, test.end) } if got := test.end.DaysSince(test.start); got != test.days { t.Errorf("[%s] %#v.Sub(%#v) = %v, want %v", test.desc, test.end, test.start, got, test.days) } } } func TestDateBefore(t *testing.T) { for _, test := range []struct { d1, d2 Date want bool }{ {Date{2016, 12, 31}, Date{2017, 1, 1}, true}, {Date{2016, 1, 1}, Date{2016, 1, 1}, false}, {Date{2016, 12, 30}, Date{2016, 12, 31}, true}, } { if got := test.d1.Before(test.d2); got != test.want { t.Errorf("%v.Before(%v): got %t, want %t", test.d1, test.d2, got, test.want) } } } func TestDateAfter(t *testing.T) { for _, test := range []struct { d1, d2 Date want bool }{ {Date{2016, 12, 31}, Date{2017, 1, 1}, false}, {Date{2016, 1, 1}, Date{2016, 1, 1}, false}, {Date{2016, 12, 30}, Date{2016, 12, 31}, false}, } { if got := test.d1.After(test.d2); got != test.want { t.Errorf("%v.After(%v): got %t, want %t", test.d1, test.d2, got, test.want) } } } func TestTimeToString(t *testing.T) { for _, test := range []struct { str string time Time roundTrip bool // ParseTime(str).String() == str? }{ {"13:26:33", Time{13, 26, 33, 0}, true}, {"01:02:03.000023456", Time{1, 2, 3, 23456}, true}, {"00:00:00.000000001", Time{0, 0, 0, 1}, true}, {"13:26:03.1", Time{13, 26, 3, 100000000}, false}, {"13:26:33.0000003", Time{13, 26, 33, 300}, false}, } { gotTime, err := ParseTime(test.str) if err != nil { t.Errorf("ParseTime(%q): got error: %v", test.str, err) continue } if gotTime != test.time { t.Errorf("ParseTime(%q) = %+v, want %+v", test.str, gotTime, test.time) } if test.roundTrip { gotStr := test.time.String() if gotStr != test.str { t.Errorf("%#v.String() = %q, want %q", test.time, gotStr, test.str) } } } } func TestTimeOf(t *testing.T) { for _, test := range []struct { time time.Time want Time }{ {time.Date(2014, 8, 20, 15, 8, 43, 1, time.Local), Time{15, 8, 43, 1}}, {time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC), Time{0, 0, 0, 0}}, } { if got := TimeOf(test.time); got != test.want { t.Errorf("TimeOf(%v) = %+v, want %+v", test.time, got, test.want) } } } func TestTimeIsValid(t *testing.T) { for _, test := range []struct { time Time want bool }{ {Time{0, 0, 0, 0}, true}, {Time{23, 0, 0, 0}, true}, {Time{23, 59, 59, 999999999}, true}, {Time{24, 59, 59, 999999999}, false}, {Time{23, 60, 59, 999999999}, false}, {Time{23, 59, 60, 999999999}, false}, {Time{23, 59, 59, 1000000000}, false}, {Time{-1, 0, 0, 0}, false}, {Time{0, -1, 0, 0}, false}, {Time{0, 0, -1, 0}, false}, {Time{0, 0, 0, -1}, false}, } { got := test.time.IsValid() if got != test.want { t.Errorf("%#v: got %t, want %t", test.time, got, test.want) } } } func TestDateTimeToString(t *testing.T) { for _, test := range []struct { str string dateTime DateTime roundTrip bool // ParseDateTime(str).String() == str? }{ {"2016-03-22T13:26:33", DateTime{Date{2016, 03, 22}, Time{13, 26, 33, 0}}, true}, {"2016-03-22T13:26:33.000000600", DateTime{Date{2016, 03, 22}, Time{13, 26, 33, 600}}, true}, {"2016-03-22t13:26:33", DateTime{Date{2016, 03, 22}, Time{13, 26, 33, 0}}, false}, } { gotDateTime, err := ParseDateTime(test.str) if err != nil { t.Errorf("ParseDateTime(%q): got error: %v", test.str, err) continue } if gotDateTime != test.dateTime { t.Errorf("ParseDateTime(%q) = %+v, want %+v", test.str, gotDateTime, test.dateTime) } if test.roundTrip { gotStr := test.dateTime.String() if gotStr != test.str { t.Errorf("%#v.String() = %q, want %q", test.dateTime, gotStr, test.str) } } } } func TestParseDateTimeErrors(t *testing.T) { for _, str := range []string{ "", "2016-03-22", // just a date "13:26:33", // just a time "2016-03-22 13:26:33", // wrong separating character "2016-03-22T13:26:33x", // extra at end } { if _, err := ParseDateTime(str); err == nil { t.Errorf("ParseDateTime(%q) succeeded, want error", str) } } } func TestDateTimeOf(t *testing.T) { for _, test := range []struct { time time.Time want DateTime }{ {time.Date(2014, 8, 20, 15, 8, 43, 1, time.Local), DateTime{Date{2014, 8, 20}, Time{15, 8, 43, 1}}}, {time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC), DateTime{Date{1, 1, 1}, Time{0, 0, 0, 0}}}, } { if got := DateTimeOf(test.time); got != test.want { t.Errorf("DateTimeOf(%v) = %+v, want %+v", test.time, got, test.want) } } } func TestDateTimeIsValid(t *testing.T) { // No need to be exhaustive here; it's just Date.IsValid && Time.IsValid. for _, test := range []struct { dt DateTime want bool }{ {DateTime{Date{2016, 3, 20}, Time{0, 0, 0, 0}}, true}, {DateTime{Date{2016, -3, 20}, Time{0, 0, 0, 0}}, false}, {DateTime{Date{2016, 3, 20}, Time{24, 0, 0, 0}}, false}, } { got := test.dt.IsValid() if got != test.want { t.Errorf("%#v: got %t, want %t", test.dt, got, test.want) } } } func TestDateTimeIn(t *testing.T) { dt := DateTime{Date{2016, 1, 2}, Time{3, 4, 5, 6}} got := dt.In(time.UTC) want := time.Date(2016, 1, 2, 3, 4, 5, 6, time.UTC) if !got.Equal(want) { t.Errorf("got %v, want %v", got, want) } } func TestDateTimeBefore(t *testing.T) { d1 := Date{2016, 12, 31} d2 := Date{2017, 1, 1} t1 := Time{5, 6, 7, 8} t2 := Time{5, 6, 7, 9} for _, test := range []struct { dt1, dt2 DateTime want bool }{ {DateTime{d1, t1}, DateTime{d2, t1}, true}, {DateTime{d1, t1}, DateTime{d1, t2}, true}, {DateTime{d2, t1}, DateTime{d1, t1}, false}, {DateTime{d2, t1}, DateTime{d2, t1}, false}, } { if got := test.dt1.Before(test.dt2); got != test.want { t.Errorf("%v.Before(%v): got %t, want %t", test.dt1, test.dt2, got, test.want) } } } func TestDateTimeAfter(t *testing.T) { d1 := Date{2016, 12, 31} d2 := Date{2017, 1, 1} t1 := Time{5, 6, 7, 8} t2 := Time{5, 6, 7, 9} for _, test := range []struct { dt1, dt2 DateTime want bool }{ {DateTime{d1, t1}, DateTime{d2, t1}, false}, {DateTime{d1, t1}, DateTime{d1, t2}, false}, {DateTime{d2, t1}, DateTime{d1, t1}, true}, {DateTime{d2, t1}, DateTime{d2, t1}, false}, } { if got := test.dt1.After(test.dt2); got != test.want { t.Errorf("%v.After(%v): got %t, want %t", test.dt1, test.dt2, got, test.want) } } } func TestMarshalJSON(t *testing.T) { for _, test := range []struct { value interface{} want string }{ {Date{1987, 4, 15}, `"1987-04-15"`}, {Time{18, 54, 2, 0}, `"18:54:02"`}, {DateTime{Date{1987, 4, 15}, Time{18, 54, 2, 0}}, `"1987-04-15T18:54:02"`}, } { bgot, err := json.Marshal(test.value) if err != nil { t.Fatal(err) } if got := string(bgot); got != test.want { t.Errorf("%#v: got %s, want %s", test.value, got, test.want) } } } func TestUnmarshalJSON(t *testing.T) { var d Date var tm Time var dt DateTime for _, test := range []struct { data string ptr interface{} want interface{} }{ {`"1987-04-15"`, &d, &Date{1987, 4, 15}}, {`"1987-04-\u0031\u0035"`, &d, &Date{1987, 4, 15}}, {`"18:54:02"`, &tm, &Time{18, 54, 2, 0}}, {`"1987-04-15T18:54:02"`, &dt, &DateTime{Date{1987, 4, 15}, Time{18, 54, 2, 0}}}, } { if err := json.Unmarshal([]byte(test.data), test.ptr); err != nil { t.Fatalf("%s: %v", test.data, err) } if !reflect.DeepEqual(test.ptr, test.want) { t.Errorf("%s: got %#v, want %#v", test.data, test.ptr, test.want) } } for _, bad := range []string{"", `""`, `"bad"`, `"1987-04-15x"`, `19870415`, // a JSON number `11987-04-15x`, // not a JSON string } { if json.Unmarshal([]byte(bad), &d) == nil { t.Errorf("%q, Date: got nil, want error", bad) } if json.Unmarshal([]byte(bad), &tm) == nil { t.Errorf("%q, Time: got nil, want error", bad) } if json.Unmarshal([]byte(bad), &dt) == nil { t.Errorf("%q, DateTime: got nil, want error", bad) } } } golang-google-cloud-0.9.0/cloud.go000066400000000000000000000016061312234511600170030ustar00rootroot00000000000000// Copyright 2014 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package cloud is the root of the packages used to access Google Cloud // Services. See https://godoc.org/cloud.google.com/go for a full list // of sub-packages. // // This package documents how to authorize and authenticate the sub packages. package cloud // import "cloud.google.com/go" golang-google-cloud-0.9.0/cmd/000077500000000000000000000000001312234511600161065ustar00rootroot00000000000000golang-google-cloud-0.9.0/cmd/go-cloud-debug-agent/000077500000000000000000000000001312234511600217775ustar00rootroot00000000000000golang-google-cloud-0.9.0/cmd/go-cloud-debug-agent/debuglet.go000066400000000000000000000353051312234511600241270ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // +build linux package main import ( "encoding/json" "flag" "fmt" "io/ioutil" "log" "math/rand" "os" "sync" "time" "cloud.google.com/go/cmd/go-cloud-debug-agent/internal/breakpoints" debuglet "cloud.google.com/go/cmd/go-cloud-debug-agent/internal/controller" "cloud.google.com/go/cmd/go-cloud-debug-agent/internal/valuecollector" "cloud.google.com/go/compute/metadata" "golang.org/x/debug" "golang.org/x/debug/local" "golang.org/x/net/context" "golang.org/x/oauth2" "golang.org/x/oauth2/google" cd "google.golang.org/api/clouddebugger/v2" ) var ( appModule = flag.String("appmodule", "", "Optional application module name.") appVersion = flag.String("appversion", "", "Optional application module version name.") sourceContextFile = flag.String("sourcecontext", "", "File containing JSON-encoded source context.") verbose = flag.Bool("v", false, "Output verbose log messages.") projectNumber = flag.String("projectnumber", "", "Project number."+ " If this is not set, it is read from the GCP metadata server.") projectID = flag.String("projectid", "", "Project ID."+ " If this is not set, it is read from the GCP metadata server.") serviceAccountFile = flag.String("serviceaccountfile", "", "File containing JSON service account credentials.") ) const ( maxCapturedStackFrames = 50 maxCapturedVariables = 1000 ) func main() { flag.Usage = usage flag.Parse() args := flag.Args() if len(args) == 0 { // The user needs to supply the name of the executable to run. flag.Usage() return } if *projectNumber == "" { var err error *projectNumber, err = metadata.NumericProjectID() if err != nil { log.Print("Debuglet initialization: ", err) } } if *projectID == "" { var err error *projectID, err = metadata.ProjectID() if err != nil { log.Print("Debuglet initialization: ", err) } } sourceContexts, err := readSourceContextFile(*sourceContextFile) if err != nil { log.Print("Reading source context file: ", err) } var ts oauth2.TokenSource ctx := context.Background() if *serviceAccountFile != "" { if ts, err = serviceAcctTokenSource(ctx, *serviceAccountFile, cd.CloudDebuggerScope); err != nil { log.Fatalf("Error getting credentials from file %s: %v", *serviceAccountFile, err) } } else if ts, err = google.DefaultTokenSource(ctx, cd.CloudDebuggerScope); err != nil { log.Print("Error getting application default credentials for Cloud Debugger:", err) os.Exit(103) } c, err := debuglet.NewController(ctx, debuglet.Options{ ProjectNumber: *projectNumber, ProjectID: *projectID, AppModule: *appModule, AppVersion: *appVersion, SourceContexts: sourceContexts, Verbose: *verbose, TokenSource: ts, }) if err != nil { log.Fatal("Error connecting to Cloud Debugger: ", err) } prog, err := local.New(args[0]) if err != nil { log.Fatal("Error loading program: ", err) } // Load the program, but don't actually start it running yet. if _, err = prog.Run(args[1:]...); err != nil { log.Fatal("Error loading program: ", err) } bs := breakpoints.NewBreakpointStore(prog) // Seed the random number generator. rand.Seed(time.Now().UnixNano()) // Now we want to do two things: run the user's program, and start sending // List requests periodically to the Debuglet Controller to get breakpoints // to set. // // We want to give the Debuglet Controller a chance to give us breakpoints // before we start the program, otherwise we would miss any breakpoint // triggers that occur during program startup -- for example, a breakpoint on // the first line of main. But if the Debuglet Controller is not responding or // is returning errors, we don't want to delay starting the program // indefinitely. // // We pass a channel to breakpointListLoop, which will close it when the first // List call finishes. Then we wait until either the channel is closed or a // 5-second timer has finished before starting the program. ch := make(chan bool) // Start a goroutine that sends List requests to the Debuglet Controller, and // sets any breakpoints it gets back. go breakpointListLoop(ctx, c, bs, ch) // Wait until 5 seconds have passed or breakpointListLoop has closed ch. select { case <-time.After(5 * time.Second): case <-ch: } // Run the debuggee. programLoop(ctx, c, bs, prog) } // usage prints a usage message to stderr and exits. func usage() { me := "a.out" if len(os.Args) >= 1 { me = os.Args[0] } fmt.Fprintf(os.Stderr, "Usage of %s:\n", me) fmt.Fprintf(os.Stderr, "\t%s [flags...] -- args...\n", me) fmt.Fprintf(os.Stderr, "Flags:\n") flag.PrintDefaults() fmt.Fprintf(os.Stderr, "See https://cloud.google.com/tools/cloud-debugger/setting-up-on-compute-engine for more information.\n") os.Exit(2) } // readSourceContextFile reads a JSON-encoded source context from the given file. // It returns a non-empty slice on success. func readSourceContextFile(filename string) ([]*cd.SourceContext, error) { if filename == "" { return nil, nil } scJSON, err := ioutil.ReadFile(filename) if err != nil { return nil, fmt.Errorf("reading file %q: %v", filename, err) } var sc cd.SourceContext if err = json.Unmarshal(scJSON, &sc); err != nil { return nil, fmt.Errorf("parsing file %q: %v", filename, err) } return []*cd.SourceContext{&sc}, nil } // breakpointListLoop repeatedly calls the Debuglet Controller's List RPC, and // passes the results to the BreakpointStore so it can set and unset breakpoints // in the program. // // After the first List call finishes, ch is closed. func breakpointListLoop(ctx context.Context, c *debuglet.Controller, bs *breakpoints.BreakpointStore, first chan bool) { const ( avgTimeBetweenCalls = time.Second errorDelay = 5 * time.Second ) // randomDuration returns a random duration with expected value avg. randomDuration := func(avg time.Duration) time.Duration { return time.Duration(rand.Int63n(int64(2*avg + 1))) } var consecutiveFailures uint for { callStart := time.Now() resp, err := c.List(ctx) if err != nil && err != debuglet.ErrListUnchanged { log.Printf("Debuglet controller server error: %v", err) } if err == nil { bs.ProcessBreakpointList(resp.Breakpoints) } if first != nil { // We've finished one call to List and set any breakpoints we received. close(first) first = nil } // Asynchronously send updates for any breakpoints that caused an error when // the BreakpointStore tried to process them. We don't wait for the update // to finish before the program can exit, as we do for normal updates. errorBps := bs.ErrorBreakpoints() for _, bp := range errorBps { go func(bp *cd.Breakpoint) { if err := c.Update(ctx, bp.Id, bp); err != nil { log.Printf("Failed to send breakpoint update for %s: %s", bp.Id, err) } }(bp) } // Make the next call not too soon after the one we just did. delay := randomDuration(avgTimeBetweenCalls) // If the call returned an error other than ErrListUnchanged, wait longer. if err != nil && err != debuglet.ErrListUnchanged { // Wait twice as long after each consecutive failure, to a maximum of 16x. delay += randomDuration(errorDelay * (1 << consecutiveFailures)) if consecutiveFailures < 4 { consecutiveFailures++ } } else { consecutiveFailures = 0 } // Sleep until we reach time callStart+delay. If we've already passed that // time, time.Sleep will return immediately -- this should be the common // case, since the server will delay responding to List for a while when // there are no changes to report. time.Sleep(callStart.Add(delay).Sub(time.Now())) } } // programLoop runs the program being debugged to completion. When a breakpoint's // conditions are satisfied, it sends an Update RPC to the Debuglet Controller. // The function returns when the program exits and all Update RPCs have finished. func programLoop(ctx context.Context, c *debuglet.Controller, bs *breakpoints.BreakpointStore, prog debug.Program) { var wg sync.WaitGroup for { // Run the program until it hits a breakpoint or exits. status, err := prog.Resume() if err != nil { break } // Get the breakpoints at this address whose conditions were satisfied, // and remove the ones that aren't logpoints. bps := bs.BreakpointsAtPC(status.PC) bps = bpsWithConditionSatisfied(bps, prog) for _, bp := range bps { if bp.Action != "LOG" { bs.RemoveBreakpoint(bp) } } if len(bps) == 0 { continue } // Evaluate expressions and get the stack. vc := valuecollector.NewCollector(prog, maxCapturedVariables) needStackFrames := false for _, bp := range bps { // If evaluating bp's condition didn't return an error, evaluate bp's // expressions, and later get the stack frames. if bp.Status == nil { bp.EvaluatedExpressions = expressionValues(bp.Expressions, prog, vc) needStackFrames = true } } var ( stack []*cd.StackFrame stackFramesStatusMessage *cd.StatusMessage ) if needStackFrames { stack, stackFramesStatusMessage = stackFrames(prog, vc) } // Read variable values from the program. variableTable := vc.ReadValues() // Start a goroutine to send updates to the Debuglet Controller or write // to logs, concurrently with resuming the program. // TODO: retry Update on failure. for _, bp := range bps { wg.Add(1) switch bp.Action { case "LOG": go func(format string, evaluatedExpressions []*cd.Variable) { s := valuecollector.LogString(format, evaluatedExpressions, variableTable) log.Print(s) wg.Done() }(bp.LogMessageFormat, bp.EvaluatedExpressions) bp.Status = nil bp.EvaluatedExpressions = nil default: go func(bp *cd.Breakpoint) { defer wg.Done() bp.IsFinalState = true if bp.Status == nil { // If evaluating bp's condition didn't return an error, include the // stack frames, variable table, and any status message produced when // getting the stack frames. bp.StackFrames = stack bp.VariableTable = variableTable bp.Status = stackFramesStatusMessage } if err := c.Update(ctx, bp.Id, bp); err != nil { log.Printf("Failed to send breakpoint update for %s: %s", bp.Id, err) } }(bp) } } } // Wait for all updates to finish before returning. wg.Wait() } // bpsWithConditionSatisfied returns the breakpoints whose conditions are true // (or that do not have a condition.) func bpsWithConditionSatisfied(bpsIn []*cd.Breakpoint, prog debug.Program) []*cd.Breakpoint { var bpsOut []*cd.Breakpoint for _, bp := range bpsIn { cond, err := condTruth(bp.Condition, prog) if err != nil { bp.Status = errorStatusMessage(err.Error(), refersToBreakpointCondition) // Include bp in the list to be updated when there's an error, so that // the user gets a response. bpsOut = append(bpsOut, bp) } else if cond { bpsOut = append(bpsOut, bp) } } return bpsOut } // condTruth evaluates a condition. func condTruth(condition string, prog debug.Program) (bool, error) { if condition == "" { // A condition wasn't set. return true, nil } val, err := prog.Evaluate(condition) if err != nil { return false, err } if v, ok := val.(bool); !ok { return false, fmt.Errorf("condition expression has type %T, should be bool", val) } else { return v, nil } } // expressionValues evaluates a slice of expressions and returns a []*cd.Variable // containing the results. // If the result of an expression evaluation refers to values from the program's // memory (e.g., the expression evaluates to a slice) a corresponding variable is // added to the value collector, to be read later. func expressionValues(expressions []string, prog debug.Program, vc *valuecollector.Collector) []*cd.Variable { evaluatedExpressions := make([]*cd.Variable, len(expressions)) for i, exp := range expressions { ee := &cd.Variable{Name: exp} evaluatedExpressions[i] = ee if val, err := prog.Evaluate(exp); err != nil { ee.Status = errorStatusMessage(err.Error(), refersToBreakpointExpression) } else { vc.FillValue(val, ee) } } return evaluatedExpressions } // stackFrames returns a stack trace for the program. It passes references to // function parameters and local variables to the value collector, so it can read // their values later. func stackFrames(prog debug.Program, vc *valuecollector.Collector) ([]*cd.StackFrame, *cd.StatusMessage) { frames, err := prog.Frames(maxCapturedStackFrames) if err != nil { return nil, errorStatusMessage("Error getting stack: "+err.Error(), refersToUnspecified) } stackFrames := make([]*cd.StackFrame, len(frames)) for i, f := range frames { frame := &cd.StackFrame{} frame.Function = f.Function for _, v := range f.Params { frame.Arguments = append(frame.Arguments, vc.AddVariable(debug.LocalVar(v))) } for _, v := range f.Vars { frame.Locals = append(frame.Locals, vc.AddVariable(v)) } frame.Location = &cd.SourceLocation{ Path: f.File, Line: int64(f.Line), } stackFrames[i] = frame } return stackFrames, nil } // errorStatusMessage returns a *cd.StatusMessage indicating an error, // with the given message and refersTo field. func errorStatusMessage(msg string, refersTo int) *cd.StatusMessage { return &cd.StatusMessage{ Description: &cd.FormatMessage{Format: "$0", Parameters: []string{msg}}, IsError: true, RefersTo: refersToString[refersTo], } } const ( // RefersTo values for cd.StatusMessage. refersToUnspecified = iota refersToBreakpointCondition refersToBreakpointExpression ) // refersToString contains the strings for each refersTo value. // See the definition of StatusMessage in the v2/clouddebugger package. var refersToString = map[int]string{ refersToUnspecified: "UNSPECIFIED", refersToBreakpointCondition: "BREAKPOINT_CONDITION", refersToBreakpointExpression: "BREAKPOINT_EXPRESSION", } func serviceAcctTokenSource(ctx context.Context, filename string, scope ...string) (oauth2.TokenSource, error) { data, err := ioutil.ReadFile(filename) if err != nil { return nil, fmt.Errorf("cannot read service account file: %v", err) } cfg, err := google.JWTConfigFromJSON(data, scope...) if err != nil { return nil, fmt.Errorf("google.JWTConfigFromJSON: %v", err) } return cfg.TokenSource(ctx), nil } golang-google-cloud-0.9.0/cmd/go-cloud-debug-agent/internal/000077500000000000000000000000001312234511600236135ustar00rootroot00000000000000golang-google-cloud-0.9.0/cmd/go-cloud-debug-agent/internal/breakpoints/000077500000000000000000000000001312234511600261345ustar00rootroot00000000000000golang-google-cloud-0.9.0/cmd/go-cloud-debug-agent/internal/breakpoints/breakpoints.go000066400000000000000000000143671312234511600310170ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package breakpoints handles breakpoint requests we get from the user through // the Debuglet Controller, and manages corresponding breakpoints set in the code. package breakpoints import ( "log" "sync" "golang.org/x/debug" cd "google.golang.org/api/clouddebugger/v2" ) // BreakpointStore stores the set of breakpoints for a program. type BreakpointStore struct { mu sync.Mutex // prog is the program being debugged. prog debug.Program // idToBreakpoint is a map from breakpoint identifier to *cd.Breakpoint. The // map value is nil if the breakpoint is inactive. A breakpoint is active if: // - We received it from the Debuglet Controller, and it was active at the time; // - We were able to set code breakpoints for it; // - We have not reached any of those code breakpoints while satisfying the // breakpoint's conditions, or the breakpoint has action LOG; and // - The Debuglet Controller hasn't informed us the breakpoint has become inactive. idToBreakpoint map[string]*cd.Breakpoint // pcToBps and bpToPCs store the many-to-many relationship between breakpoints we // received from the Debuglet Controller and the code breakpoints we set for them. pcToBps map[uint64][]*cd.Breakpoint bpToPCs map[*cd.Breakpoint][]uint64 // errors contains any breakpoints which couldn't be set because they caused an // error. These are retrieved with ErrorBreakpoints, and the caller is // expected to handle sending updates for them. errors []*cd.Breakpoint } // NewBreakpointStore returns a BreakpointStore for the given program. func NewBreakpointStore(prog debug.Program) *BreakpointStore { return &BreakpointStore{ idToBreakpoint: make(map[string]*cd.Breakpoint), pcToBps: make(map[uint64][]*cd.Breakpoint), bpToPCs: make(map[*cd.Breakpoint][]uint64), prog: prog, } } // ProcessBreakpointList applies updates received from the Debuglet Controller through a List call. func (bs *BreakpointStore) ProcessBreakpointList(bps []*cd.Breakpoint) { bs.mu.Lock() defer bs.mu.Unlock() for _, bp := range bps { if storedBp, ok := bs.idToBreakpoint[bp.Id]; ok { if storedBp != nil && bp.IsFinalState { // IsFinalState indicates that the breakpoint has been made inactive. bs.removeBreakpointLocked(storedBp) } } else { if bp.IsFinalState { // The controller is notifying us that the breakpoint is no longer active, // but we didn't know about it anyway. continue } if bp.Action != "" && bp.Action != "CAPTURE" && bp.Action != "LOG" { bp.IsFinalState = true bp.Status = &cd.StatusMessage{ Description: &cd.FormatMessage{Format: "Action is not supported"}, IsError: true, } bs.errors = append(bs.errors, bp) // Note in idToBreakpoint that we've already seen this breakpoint, so that we // don't try to report it as an error multiple times. bs.idToBreakpoint[bp.Id] = nil continue } pcs, err := bs.prog.BreakpointAtLine(bp.Location.Path, uint64(bp.Location.Line)) if err != nil { log.Printf("error setting breakpoint at %s:%d: %v", bp.Location.Path, bp.Location.Line, err) } if len(pcs) == 0 { // We can't find a PC for this breakpoint's source line, so don't make it active. // TODO: we could snap the line to a location where we can break, or report an error to the user. bs.idToBreakpoint[bp.Id] = nil } else { bs.idToBreakpoint[bp.Id] = bp for _, pc := range pcs { bs.pcToBps[pc] = append(bs.pcToBps[pc], bp) } bs.bpToPCs[bp] = pcs } } } } // ErrorBreakpoints returns a slice of Breakpoints that caused errors when the // BreakpointStore tried to process them, and resets the list of such // breakpoints. // The caller is expected to send updates to the server to indicate the errors. func (bs *BreakpointStore) ErrorBreakpoints() []*cd.Breakpoint { bs.mu.Lock() defer bs.mu.Unlock() bps := bs.errors bs.errors = nil return bps } // BreakpointsAtPC returns all the breakpoints for which we set a code // breakpoint at the given address. func (bs *BreakpointStore) BreakpointsAtPC(pc uint64) []*cd.Breakpoint { bs.mu.Lock() defer bs.mu.Unlock() return bs.pcToBps[pc] } // RemoveBreakpoint makes the given breakpoint inactive. // This is called when either the debugged program hits the breakpoint, or the Debuglet // Controller informs us that the breakpoint is now inactive. func (bs *BreakpointStore) RemoveBreakpoint(bp *cd.Breakpoint) { bs.mu.Lock() bs.removeBreakpointLocked(bp) bs.mu.Unlock() } func (bs *BreakpointStore) removeBreakpointLocked(bp *cd.Breakpoint) { // Set the ID's corresponding breakpoint to nil, so that we won't activate it // if we see it again. // TODO: we could delete it after a few seconds. bs.idToBreakpoint[bp.Id] = nil // Delete bp from the list of cd breakpoints at each of its corresponding // code breakpoint locations, and delete any code breakpoints which no longer // have a corresponding cd breakpoint. var codeBreakpointsToDelete []uint64 for _, pc := range bs.bpToPCs[bp] { bps := remove(bs.pcToBps[pc], bp) if len(bps) == 0 { // bp was the last breakpoint set at this PC, so delete the code breakpoint. codeBreakpointsToDelete = append(codeBreakpointsToDelete, pc) delete(bs.pcToBps, pc) } else { bs.pcToBps[pc] = bps } } if len(codeBreakpointsToDelete) > 0 { bs.prog.DeleteBreakpoints(codeBreakpointsToDelete) } delete(bs.bpToPCs, bp) } // remove updates rs by removing r, then returns rs. // The mutex in the BreakpointStore which contains rs should be held. func remove(rs []*cd.Breakpoint, r *cd.Breakpoint) []*cd.Breakpoint { for i := range rs { if rs[i] == r { rs[i] = rs[len(rs)-1] rs = rs[0 : len(rs)-1] return rs } } // We shouldn't reach here. return rs } golang-google-cloud-0.9.0/cmd/go-cloud-debug-agent/internal/breakpoints/breakpoints_test.go000066400000000000000000000116701312234511600320500ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package breakpoints import ( "reflect" "testing" "golang.org/x/debug" cd "google.golang.org/api/clouddebugger/v2" ) var ( testPC1 uint64 = 0x1234 testPC2 uint64 = 0x5678 testPC3 uint64 = 0x3333 testFile = "foo.go" testLine uint64 = 42 testLine2 uint64 = 99 testLogPC uint64 = 0x9abc testLogLine uint64 = 43 testBadPC uint64 = 0xdef0 testBadLine uint64 = 44 testBP = &cd.Breakpoint{ Action: "CAPTURE", Id: "TestBreakpoint", IsFinalState: false, Location: &cd.SourceLocation{Path: testFile, Line: int64(testLine)}, } testBP2 = &cd.Breakpoint{ Action: "CAPTURE", Id: "TestBreakpoint2", IsFinalState: false, Location: &cd.SourceLocation{Path: testFile, Line: int64(testLine2)}, } testLogBP = &cd.Breakpoint{ Action: "LOG", Id: "TestLogBreakpoint", IsFinalState: false, Location: &cd.SourceLocation{Path: testFile, Line: int64(testLogLine)}, } testBadBP = &cd.Breakpoint{ Action: "BEEP", Id: "TestBadBreakpoint", IsFinalState: false, Location: &cd.SourceLocation{Path: testFile, Line: int64(testBadLine)}, } ) func TestBreakpointStore(t *testing.T) { p := &Program{breakpointPCs: make(map[uint64]bool)} bs := NewBreakpointStore(p) checkPCs := func(expected map[uint64]bool) { if !reflect.DeepEqual(p.breakpointPCs, expected) { t.Errorf("got breakpoint map %v want %v", p.breakpointPCs, expected) } } bs.ProcessBreakpointList([]*cd.Breakpoint{testBP, testBP2, testLogBP, testBadBP}) checkPCs(map[uint64]bool{ testPC1: true, testPC2: true, testPC3: true, testLogPC: true, }) for _, test := range []struct { pc uint64 expected []*cd.Breakpoint }{ {testPC1, []*cd.Breakpoint{testBP}}, {testPC2, []*cd.Breakpoint{testBP}}, {testPC3, []*cd.Breakpoint{testBP2}}, {testLogPC, []*cd.Breakpoint{testLogBP}}, } { if bps := bs.BreakpointsAtPC(test.pc); !reflect.DeepEqual(bps, test.expected) { t.Errorf("BreakpointsAtPC(%x): got %v want %v", test.pc, bps, test.expected) } } testBP2.IsFinalState = true bs.ProcessBreakpointList([]*cd.Breakpoint{testBP, testBP2, testLogBP, testBadBP}) checkPCs(map[uint64]bool{ testPC1: true, testPC2: true, testPC3: false, testLogPC: true, }) bs.RemoveBreakpoint(testBP) checkPCs(map[uint64]bool{ testPC1: false, testPC2: false, testPC3: false, testLogPC: true, }) for _, pc := range []uint64{testPC1, testPC2, testPC3} { if bps := bs.BreakpointsAtPC(pc); len(bps) != 0 { t.Errorf("BreakpointsAtPC(%x): got %v want []", pc, bps) } } // bs.ErrorBreakpoints should return testBadBP. errorBps := bs.ErrorBreakpoints() if len(errorBps) != 1 { t.Errorf("ErrorBreakpoints: got %d want 1", len(errorBps)) } else { bp := errorBps[0] if bp.Id != testBadBP.Id { t.Errorf("ErrorBreakpoints: got id %q want 1", bp.Id) } if bp.Status == nil || !bp.Status.IsError { t.Errorf("ErrorBreakpoints: got %v, want error", bp.Status) } } // The error should have been removed by the last call to bs.ErrorBreakpoints. errorBps = bs.ErrorBreakpoints() if len(errorBps) != 0 { t.Errorf("ErrorBreakpoints: got %d want 0", len(errorBps)) } // Even if testBadBP is sent in a new list, it should not be returned again. bs.ProcessBreakpointList([]*cd.Breakpoint{testBadBP}) errorBps = bs.ErrorBreakpoints() if len(errorBps) != 0 { t.Errorf("ErrorBreakpoints: got %d want 0", len(errorBps)) } } // Program implements the similarly-named interface in x/debug. // ValueCollector should only call its BreakpointAtLine and DeleteBreakpoints methods. type Program struct { debug.Program // breakpointPCs contains the state of code breakpoints -- true if the // breakpoint is currently set, false if it has been deleted. breakpointPCs map[uint64]bool } func (p *Program) BreakpointAtLine(file string, line uint64) ([]uint64, error) { var pcs []uint64 switch { case file == testFile && line == testLine: pcs = []uint64{testPC1, testPC2} case file == testFile && line == testLine2: pcs = []uint64{testPC3} case file == testFile && line == testLogLine: pcs = []uint64{testLogPC} default: pcs = []uint64{0xbad} } for _, pc := range pcs { p.breakpointPCs[pc] = true } return pcs, nil } func (p *Program) DeleteBreakpoints(pcs []uint64) error { for _, pc := range pcs { p.breakpointPCs[pc] = false } return nil } golang-google-cloud-0.9.0/cmd/go-cloud-debug-agent/internal/controller/000077500000000000000000000000001312234511600257765ustar00rootroot00000000000000golang-google-cloud-0.9.0/cmd/go-cloud-debug-agent/internal/controller/client.go000066400000000000000000000231251312234511600276060ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package controller is a library for interacting with the Google Cloud Debugger's Debuglet Controller service. package controller import ( "crypto/sha256" "encoding/json" "errors" "fmt" "log" "sync" "golang.org/x/net/context" "golang.org/x/oauth2" cd "google.golang.org/api/clouddebugger/v2" "google.golang.org/api/googleapi" "google.golang.org/api/option" "google.golang.org/api/transport" ) const ( // agentVersionString identifies the agent to the service. agentVersionString = "google.com/go-gcp/v0.2" // initWaitToken is the wait token sent in the first Update request to a server. initWaitToken = "init" ) var ( // ErrListUnchanged is returned by List if the server time limit is reached // before the list of breakpoints changes. ErrListUnchanged = errors.New("breakpoint list unchanged") // ErrDebuggeeDisabled is returned by List or Update if the server has disabled // this Debuggee. The caller can retry later. ErrDebuggeeDisabled = errors.New("debuglet disabled by server") ) // Controller manages a connection to the Debuglet Controller service. type Controller struct { s serviceInterface // waitToken is sent with List requests so the server knows which set of // breakpoints this client has already seen. Each successful List request // returns a new waitToken to send in the next request. waitToken string // verbose determines whether to do some logging verbose bool // options, uniquifier and description are used in register. options Options uniquifier string description string // labels are included when registering the debuggee. They should contain // the module name, version and minorversion, and are used by the debug UI // to label the correct version active for debugging. labels map[string]string // mu protects debuggeeID mu sync.Mutex // debuggeeID is returned from the server on registration, and is passed back // to the server in List and Update requests. debuggeeID string } // Options controls how the Debuglet Controller client identifies itself to the server. // See https://cloud.google.com/storage/docs/projects and // https://cloud.google.com/tools/cloud-debugger/setting-up-on-compute-engine // for further documentation of these parameters. type Options struct { ProjectNumber string // GCP Project Number. ProjectID string // GCP Project ID. AppModule string // Module name for the debugged program. AppVersion string // Version number for this module. SourceContexts []*cd.SourceContext // Description of source. Verbose bool TokenSource oauth2.TokenSource // Source of Credentials used for Stackdriver Debugger. } type serviceInterface interface { Register(ctx context.Context, req *cd.RegisterDebuggeeRequest) (*cd.RegisterDebuggeeResponse, error) Update(ctx context.Context, debuggeeID, breakpointID string, req *cd.UpdateActiveBreakpointRequest) (*cd.UpdateActiveBreakpointResponse, error) List(ctx context.Context, debuggeeID, waitToken string) (*cd.ListActiveBreakpointsResponse, error) } var newService = func(ctx context.Context, tokenSource oauth2.TokenSource) (serviceInterface, error) { httpClient, endpoint, err := transport.NewHTTPClient(ctx, option.WithTokenSource(tokenSource)) if err != nil { return nil, err } s, err := cd.New(httpClient) if err != nil { return nil, err } if endpoint != "" { s.BasePath = endpoint } return &service{s: s}, nil } type service struct { s *cd.Service } func (s service) Register(ctx context.Context, req *cd.RegisterDebuggeeRequest) (*cd.RegisterDebuggeeResponse, error) { call := cd.NewControllerDebuggeesService(s.s).Register(req) return call.Context(ctx).Do() } func (s service) Update(ctx context.Context, debuggeeID, breakpointID string, req *cd.UpdateActiveBreakpointRequest) (*cd.UpdateActiveBreakpointResponse, error) { call := cd.NewControllerDebuggeesBreakpointsService(s.s).Update(debuggeeID, breakpointID, req) return call.Context(ctx).Do() } func (s service) List(ctx context.Context, debuggeeID, waitToken string) (*cd.ListActiveBreakpointsResponse, error) { call := cd.NewControllerDebuggeesBreakpointsService(s.s).List(debuggeeID) call.WaitToken(waitToken) return call.Context(ctx).Do() } // NewController connects to the Debuglet Controller server using the given options, // and returns a Controller for that connection. // Google Application Default Credentials are used to connect to the Debuglet Controller; // see https://developers.google.com/identity/protocols/application-default-credentials func NewController(ctx context.Context, o Options) (*Controller, error) { // We build a JSON encoding of o.SourceContexts so we can hash it. scJSON, err := json.Marshal(o.SourceContexts) if err != nil { scJSON = nil o.SourceContexts = nil } const minorversion = "107157" // any arbitrary numeric string // Compute a uniquifier string by hashing the project number, app module name, // app module version, debuglet version, and source context. // The choice of hash function is arbitrary. h := sha256.Sum256([]byte(fmt.Sprintf("%d %s %d %s %d %s %d %s %d %s %d %s", len(o.ProjectNumber), o.ProjectNumber, len(o.AppModule), o.AppModule, len(o.AppVersion), o.AppVersion, len(agentVersionString), agentVersionString, len(scJSON), scJSON, len(minorversion), minorversion))) uniquifier := fmt.Sprintf("%X", h[0:16]) // 32 hex characters description := o.ProjectID if o.AppModule != "" { description += "-" + o.AppModule } if o.AppVersion != "" { description += "-" + o.AppVersion } s, err := newService(ctx, o.TokenSource) if err != nil { return nil, err } // Construct client. c := &Controller{ s: s, waitToken: initWaitToken, verbose: o.Verbose, options: o, uniquifier: uniquifier, description: description, labels: map[string]string{ "module": o.AppModule, "version": o.AppVersion, "minorversion": minorversion, }, } return c, nil } func (c *Controller) getDebuggeeID(ctx context.Context) (string, error) { c.mu.Lock() defer c.mu.Unlock() if c.debuggeeID != "" { return c.debuggeeID, nil } // The debuglet hasn't been registered yet, or it is disabled and we should try registering again. if err := c.register(ctx); err != nil { return "", err } return c.debuggeeID, nil } // List retrieves the current list of breakpoints from the server. // If the set of breakpoints on the server is the same as the one returned in // the previous call to List, the server can delay responding until it changes, // and return an error instead if no change occurs before a time limit the // server sets. List can't be called concurrently with itself. func (c *Controller) List(ctx context.Context) (*cd.ListActiveBreakpointsResponse, error) { id, err := c.getDebuggeeID(ctx) if err != nil { return nil, err } resp, err := c.s.List(ctx, id, c.waitToken) if err != nil { if isAbortedError(err) { return nil, ErrListUnchanged } // For other errors, the protocol requires that we attempt to re-register. c.mu.Lock() defer c.mu.Unlock() if regError := c.register(ctx); regError != nil { return nil, regError } return nil, err } if resp == nil { return nil, errors.New("no response") } if c.verbose { log.Printf("List response: %v", resp) } c.waitToken = resp.NextWaitToken return resp, nil } // isAbortedError tests if err is a *googleapi.Error, that it contains one error // in Errors, and that that error's Reason is "aborted". func isAbortedError(err error) bool { e, _ := err.(*googleapi.Error) if e == nil { return false } if len(e.Errors) != 1 { return false } return e.Errors[0].Reason == "aborted" } // Update reports information to the server about a breakpoint that was hit. // Update can be called concurrently with List and Update. func (c *Controller) Update(ctx context.Context, breakpointID string, bp *cd.Breakpoint) error { req := &cd.UpdateActiveBreakpointRequest{Breakpoint: bp} if c.verbose { log.Printf("sending update for %s: %v", breakpointID, req) } id, err := c.getDebuggeeID(ctx) if err != nil { return err } _, err = c.s.Update(ctx, id, breakpointID, req) return err } // register calls the Debuglet Controller Register method, and sets c.debuggeeID. // c.mu should be locked while calling this function. List and Update can't // make progress until it returns. func (c *Controller) register(ctx context.Context) error { req := cd.RegisterDebuggeeRequest{ Debuggee: &cd.Debuggee{ AgentVersion: agentVersionString, Description: c.description, Project: c.options.ProjectNumber, SourceContexts: c.options.SourceContexts, Uniquifier: c.uniquifier, Labels: c.labels, }, } resp, err := c.s.Register(ctx, &req) if err != nil { return err } if resp == nil { return errors.New("register: no response") } if resp.Debuggee.IsDisabled { // Setting c.debuggeeID to empty makes sure future List and Update calls // will call register first. c.debuggeeID = "" } else { c.debuggeeID = resp.Debuggee.Id } if c.debuggeeID == "" { return ErrDebuggeeDisabled } return nil } golang-google-cloud-0.9.0/cmd/go-cloud-debug-agent/internal/controller/client_test.go000066400000000000000000000154641312234511600306540ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package controller import ( "bytes" "errors" "fmt" "strconv" "testing" "golang.org/x/net/context" "golang.org/x/oauth2" cd "google.golang.org/api/clouddebugger/v2" "google.golang.org/api/googleapi" ) const ( testDebuggeeID = "d12345" testBreakpointID = "bp12345" ) var ( // The sequence of wait tokens in List requests and responses. expectedWaitToken = []string{"init", "token1", "token2", "token1", "token1"} // The set of breakpoints returned from each List call. expectedBreakpoints = [][]*cd.Breakpoint{ nil, { &cd.Breakpoint{ Id: testBreakpointID, IsFinalState: false, Location: &cd.SourceLocation{Line: 42, Path: "foo.go"}, }, }, nil, } abortedError error = &googleapi.Error{ Code: 409, Message: "Conflict", Body: `{ "error": { "errors": [ { "domain": "global", "reason": "aborted", "message": "Conflict" } ], "code": 409, "message": "Conflict" } }`, Errors: []googleapi.ErrorItem{ {Reason: "aborted", Message: "Conflict"}, }, } backendError error = &googleapi.Error{ Code: 503, Message: "Backend Error", Body: `{ "error": { "errors": [ { "domain": "global", "reason": "backendError", "message": "Backend Error" } ], "code": 503, "message": "Backend Error" } }`, Errors: []googleapi.ErrorItem{ {Reason: "backendError", Message: "Backend Error"}, }, } ) type mockService struct { t *testing.T listCallsSeen int registerCallsSeen int } func (s *mockService) Register(ctx context.Context, req *cd.RegisterDebuggeeRequest) (*cd.RegisterDebuggeeResponse, error) { s.registerCallsSeen++ if req.Debuggee == nil { s.t.Errorf("missing debuggee") return nil, nil } if req.Debuggee.AgentVersion == "" { s.t.Errorf("missing agent version") } if req.Debuggee.Description == "" { s.t.Errorf("missing debuglet description") } if req.Debuggee.Project == "" { s.t.Errorf("missing project id") } if req.Debuggee.Uniquifier == "" { s.t.Errorf("missing uniquifier") } return &cd.RegisterDebuggeeResponse{ Debuggee: &cd.Debuggee{Id: testDebuggeeID}, }, nil } func (s *mockService) Update(ctx context.Context, id, breakpointID string, req *cd.UpdateActiveBreakpointRequest) (*cd.UpdateActiveBreakpointResponse, error) { if id != testDebuggeeID { s.t.Errorf("got debuggee ID %s want %s", id, testDebuggeeID) } if breakpointID != testBreakpointID { s.t.Errorf("got breakpoint ID %s want %s", breakpointID, testBreakpointID) } if !req.Breakpoint.IsFinalState { s.t.Errorf("got IsFinalState = false, want true") } return nil, nil } func (s *mockService) List(ctx context.Context, id, waitToken string) (*cd.ListActiveBreakpointsResponse, error) { if id != testDebuggeeID { s.t.Errorf("got debuggee ID %s want %s", id, testDebuggeeID) } if waitToken != expectedWaitToken[s.listCallsSeen] { s.t.Errorf("got wait token %s want %s", waitToken, expectedWaitToken[s.listCallsSeen]) } s.listCallsSeen++ if s.listCallsSeen == 4 { return nil, backendError } if s.listCallsSeen == 5 { return nil, abortedError } resp := &cd.ListActiveBreakpointsResponse{ Breakpoints: expectedBreakpoints[s.listCallsSeen-1], NextWaitToken: expectedWaitToken[s.listCallsSeen], } return resp, nil } func TestDebugletControllerClientLibrary(t *testing.T) { var ( m *mockService c *Controller list *cd.ListActiveBreakpointsResponse err error ) m = &mockService{t: t} newService = func(context.Context, oauth2.TokenSource) (serviceInterface, error) { return m, nil } opts := Options{ ProjectNumber: "5", ProjectID: "p1", AppModule: "mod1", AppVersion: "v1", } ctx := context.Background() if c, err = NewController(ctx, opts); err != nil { t.Fatal("Initializing Controller client:", err) } if err := validateLabels(c, opts); err != nil { t.Fatalf("Invalid labels:\n%v", err) } if list, err = c.List(ctx); err != nil { t.Fatal("List:", err) } if m.registerCallsSeen != 1 { t.Errorf("saw %d Register calls, want 1", m.registerCallsSeen) } if list, err = c.List(ctx); err != nil { t.Fatal("List:", err) } if len(list.Breakpoints) != 1 { t.Fatalf("got %d breakpoints, want 1", len(list.Breakpoints)) } if err = c.Update(ctx, list.Breakpoints[0].Id, &cd.Breakpoint{Id: testBreakpointID, IsFinalState: true}); err != nil { t.Fatal("Update:", err) } if list, err = c.List(ctx); err != nil { t.Fatal("List:", err) } if m.registerCallsSeen != 1 { t.Errorf("saw %d Register calls, want 1", m.registerCallsSeen) } // The next List call produces an error that should cause a Register call. if list, err = c.List(ctx); err == nil { t.Fatal("List should have returned an error") } if m.registerCallsSeen != 2 { t.Errorf("saw %d Register calls, want 2", m.registerCallsSeen) } // The next List call produces an error that should not cause a Register call. if list, err = c.List(ctx); err == nil { t.Fatal("List should have returned an error") } if m.registerCallsSeen != 2 { t.Errorf("saw %d Register calls, want 2", m.registerCallsSeen) } if m.listCallsSeen != 5 { t.Errorf("saw %d list calls, want 5", m.listCallsSeen) } } func validateLabels(c *Controller, o Options) error { errMsg := new(bytes.Buffer) if m, ok := c.labels["module"]; ok { if m != o.AppModule { errMsg.WriteString(fmt.Sprintf("label module: want %s, got %s\n", o.AppModule, m)) } } else { errMsg.WriteString("Missing \"module\" label\n") } if v, ok := c.labels["version"]; ok { if v != o.AppVersion { errMsg.WriteString(fmt.Sprintf("label version: want %s, got %s\n", o.AppVersion, v)) } } else { errMsg.WriteString("Missing \"version\" label\n") } if mv, ok := c.labels["minorversion"]; ok { if _, err := strconv.Atoi(mv); err != nil { errMsg.WriteString(fmt.Sprintln("label minorversion: not a numeric string:", mv)) } } else { errMsg.WriteString("Missing \"minorversion\" label\n") } if errMsg.Len() != 0 { return errors.New(errMsg.String()) } return nil } func TestIsAbortedError(t *testing.T) { if !isAbortedError(abortedError) { t.Errorf("isAborted(%+v): got false, want true", abortedError) } if isAbortedError(backendError) { t.Errorf("isAborted(%+v): got true, want false", backendError) } } golang-google-cloud-0.9.0/cmd/go-cloud-debug-agent/internal/valuecollector/000077500000000000000000000000001312234511600266365ustar00rootroot00000000000000golang-google-cloud-0.9.0/cmd/go-cloud-debug-agent/internal/valuecollector/valuecollector.go000066400000000000000000000342001312234511600322070ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package valuecollector is used to collect the values of variables in a program. package valuecollector import ( "bytes" "fmt" "strconv" "strings" "golang.org/x/debug" cd "google.golang.org/api/clouddebugger/v2" ) const ( maxArrayLength = 50 maxMapLength = 20 ) // Collector is given references to variables from a program being debugged // using AddVariable. Then when ReadValues is called, the Collector will fetch // the values of those variables. Any variables referred to by those values // will also be fetched; e.g. the targets of pointers, members of structs, // elements of slices, etc. This continues iteratively, building a graph of // values, until all the reachable values are fetched, or a size limit is // reached. // // Variables are passed to the Collector as debug.Var, which is used by x/debug // to represent references to variables. Values are returned as cd.Variable, // which is used by the Debuglet Controller to represent the graph of values. // // For example, if the program has a struct variable: // // foo := SomeStruct{a:42, b:"xyz"} // // and we call AddVariable with a reference to foo, we will get back a result // like: // // cd.Variable{Name:"foo", VarTableIndex:10} // // which denotes a variable named "foo" which will have its value stored in // element 10 of the table that will later be returned by ReadValues. That // element might be: // // out[10] = &cd.Variable{Members:{{Name:"a", VarTableIndex:11},{Name:"b", VarTableIndex:12}}} // // which denotes a struct with two members a and b, whose values are in elements // 11 and 12 of the output table: // // out[11] = &cd.Variable{Value:"42"} // out[12] = &cd.Variable{Value:"xyz"} type Collector struct { // prog is the program being debugged. prog debug.Program // limit is the maximum size of the output slice of values. limit int // index is a map from references (variables and map elements) to their // locations in the table. index map[reference]int // table contains the references, including those given to the // Collector directly and those the Collector itself found. // If VarTableIndex is set to 0 in a cd.Variable, it is ignored, so the first entry // of table can't be used. On initialization we put a dummy value there. table []reference } // reference represents a value which is in the queue to be read by the // collector. It is either a debug.Var, or a mapElement. type reference interface{} // mapElement represents an element of a map in the debugged program's memory. type mapElement struct { debug.Map index uint64 } // NewCollector returns a Collector for the given program and size limit. // The limit is the maximum size of the slice of values returned by ReadValues. func NewCollector(prog debug.Program, limit int) *Collector { return &Collector{ prog: prog, limit: limit, index: make(map[reference]int), table: []reference{debug.Var{}}, } } // AddVariable adds another variable to be collected. // The Collector doesn't get the value immediately; it returns a cd.Variable // that contains an index into the table which will later be returned by // ReadValues. func (c *Collector) AddVariable(lv debug.LocalVar) *cd.Variable { ret := &cd.Variable{Name: lv.Name} if index, ok := c.add(lv.Var); !ok { // If the add call failed, it's because we reached the size limit. // The Debuglet Controller's convention is to pass it a "Not Captured" error // in this case. ret.Status = statusMessage(messageNotCaptured, true, refersToVariableName) } else { ret.VarTableIndex = int64(index) } return ret } // add adds a reference to the set of values to be read from the // program. It returns the index in the output table that will contain the // corresponding value. It fails if the table has reached the size limit. // It deduplicates references, so the index may be the same as one that was // returned from an earlier add call. func (c *Collector) add(r reference) (outputIndex int, ok bool) { if i, ok := c.index[r]; ok { return i, true } i := len(c.table) if i >= c.limit { return 0, false } c.index[r] = i c.table = append(c.table, r) return i, true } func addMember(v *cd.Variable, name string) *cd.Variable { v2 := &cd.Variable{Name: name} v.Members = append(v.Members, v2) return v2 } // ReadValues fetches values of the variables that were passed to the Collector // with AddVariable. The values of any new variables found are also fetched, // e.g. the targets of pointers or the members of structs, until we reach the // size limit or we run out of values to fetch. // The results are output as a []*cd.Variable, which is the type we need to send // to the Debuglet Controller after we trigger a breakpoint. func (c *Collector) ReadValues() (out []*cd.Variable) { for i := 0; i < len(c.table); i++ { // Create a new cd.Variable for this value, and append it to the output. dcv := new(cd.Variable) out = append(out, dcv) if i == 0 { // The first element is unused. continue } switch x := c.table[i].(type) { case mapElement: key, value, err := c.prog.MapElement(x.Map, x.index) if err != nil { dcv.Status = statusMessage(err.Error(), true, refersToVariableValue) continue } // Add a member for the key. member := addMember(dcv, "key") if index, ok := c.add(key); !ok { // The table is full. member.Status = statusMessage(messageNotCaptured, true, refersToVariableName) continue } else { member.VarTableIndex = int64(index) } // Add a member for the value. member = addMember(dcv, "value") if index, ok := c.add(value); !ok { // The table is full. member.Status = statusMessage(messageNotCaptured, true, refersToVariableName) } else { member.VarTableIndex = int64(index) } case debug.Var: if v, err := c.prog.Value(x); err != nil { dcv.Status = statusMessage(err.Error(), true, refersToVariableValue) } else { c.FillValue(v, dcv) } } } return out } // indexable is an interface for arrays, slices and channels. type indexable interface { Len() uint64 Element(uint64) debug.Var } // channel implements indexable. type channel struct { debug.Channel } func (c channel) Len() uint64 { return c.Length } var ( _ indexable = debug.Array{} _ indexable = debug.Slice{} _ indexable = channel{} ) // FillValue copies a value into a cd.Variable. Any variables referred to by // that value, e.g. struct members and pointer targets, are added to the // collector's queue, to be fetched later by ReadValues. func (c *Collector) FillValue(v debug.Value, dcv *cd.Variable) { if c, ok := v.(debug.Channel); ok { // Convert to channel, which implements indexable. v = channel{c} } // Fill in dcv in a manner depending on the type of the value we got. switch val := v.(type) { case int8, int16, int32, int64, bool, uint8, uint16, uint32, uint64, float32, float64, complex64, complex128: // For simple types, we just print the value to dcv.Value. dcv.Value = fmt.Sprint(val) case string: // Put double quotes around strings. dcv.Value = strconv.Quote(val) case debug.String: if uint64(len(val.String)) < val.Length { // This string value was truncated. dcv.Value = strconv.Quote(val.String + "...") } else { dcv.Value = strconv.Quote(val.String) } case debug.Struct: // For structs, we add an entry to dcv.Members for each field in the // struct. // Each member will contain the name of the field, and the index in the // output table which will contain the value of that field. for _, f := range val.Fields { member := addMember(dcv, f.Name) if index, ok := c.add(f.Var); !ok { // The table is full. member.Status = statusMessage(messageNotCaptured, true, refersToVariableName) } else { member.VarTableIndex = int64(index) } } case debug.Map: dcv.Value = fmt.Sprintf("len = %d", val.Length) for i := uint64(0); i < val.Length; i++ { field := addMember(dcv, `⚫`) if i == maxMapLength { field.Name = "..." field.Status = statusMessage(messageTruncated, true, refersToVariableName) break } if index, ok := c.add(mapElement{val, i}); !ok { // The value table is full; add a member to contain the error message. field.Name = "..." field.Status = statusMessage(messageNotCaptured, true, refersToVariableName) break } else { field.VarTableIndex = int64(index) } } case debug.Pointer: if val.Address == 0 { dcv.Value = "" } else if val.TypeID == 0 { // We don't know the type of the pointer, so just output the address as // the value. dcv.Value = fmt.Sprintf("0x%X", val.Address) dcv.Status = statusMessage(messageUnknownPointerType, false, refersToVariableName) } else { // Adds the pointed-to variable to the table, and links this value to // that table entry through VarTableIndex. dcv.Value = fmt.Sprintf("0x%X", val.Address) target := addMember(dcv, "") if index, ok := c.add(debug.Var(val)); !ok { target.Status = statusMessage(messageNotCaptured, true, refersToVariableName) } else { target.VarTableIndex = int64(index) } } case indexable: // Arrays, slices and channels. dcv.Value = "len = " + fmt.Sprint(val.Len()) for j := uint64(0); j < val.Len(); j++ { field := addMember(dcv, fmt.Sprint(`[`, j, `]`)) if j == maxArrayLength { field.Name = "..." field.Status = statusMessage(messageTruncated, true, refersToVariableName) break } vr := val.Element(j) if index, ok := c.add(vr); !ok { // The value table is full; add a member to contain the error message. field.Name = "..." field.Status = statusMessage(messageNotCaptured, true, refersToVariableName) break } else { // Add a member with the index as the name. field.VarTableIndex = int64(index) } } default: dcv.Status = statusMessage(messageUnknownType, false, refersToVariableName) } } // statusMessage returns a *cd.StatusMessage with the given message, IsError // field and refersTo field. func statusMessage(msg string, isError bool, refersTo int) *cd.StatusMessage { return &cd.StatusMessage{ Description: &cd.FormatMessage{Format: "$0", Parameters: []string{msg}}, IsError: isError, RefersTo: refersToString[refersTo], } } // LogString produces a string for a logpoint, substituting in variable values // using evaluatedExpressions and varTable. func LogString(s string, evaluatedExpressions []*cd.Variable, varTable []*cd.Variable) string { var buf bytes.Buffer fmt.Fprintf(&buf, "LOGPOINT: ") seen := make(map[*cd.Variable]bool) for i := 0; i < len(s); { if s[i] == '$' { i++ if num, n, ok := parseToken(s[i:], len(evaluatedExpressions)-1); ok { // This token is one of $0, $1, etc. Write the corresponding expression. writeExpression(&buf, evaluatedExpressions[num], false, varTable, seen) i += n } else { // Something else, like $$. buf.WriteByte(s[i]) i++ } } else { buf.WriteByte(s[i]) i++ } } return buf.String() } func parseToken(s string, max int) (num int, bytesRead int, ok bool) { var i int for i < len(s) && s[i] >= '0' && s[i] <= '9' { i++ } num, err := strconv.Atoi(s[:i]) return num, i, err == nil && num <= max } // writeExpression recursively writes variables to buf, in a format suitable // for logging. If printName is true, writes the name of the variable. func writeExpression(buf *bytes.Buffer, v *cd.Variable, printName bool, varTable []*cd.Variable, seen map[*cd.Variable]bool) { if v == nil { // Shouldn't happen. return } name, value, status, members := v.Name, v.Value, v.Status, v.Members // If v.VarTableIndex is not zero, it refers to an element of varTable. // We merge its fields with the fields we got from v. var other *cd.Variable if idx := int(v.VarTableIndex); idx > 0 && idx < len(varTable) { other = varTable[idx] } if other != nil { if name == "" { name = other.Name } if value == "" { value = other.Value } if status == nil { status = other.Status } if len(members) == 0 { members = other.Members } } if printName && name != "" { buf.WriteString(name) buf.WriteByte(':') } // If we have seen this value before, write "..." rather than repeating it. if seen[v] { buf.WriteString("...") return } seen[v] = true if other != nil { if seen[other] { buf.WriteString("...") return } seen[other] = true } if value != "" && !strings.HasPrefix(value, "len = ") { // A plain value. buf.WriteString(value) } else if status != nil && status.Description != nil { // An error. for _, p := range status.Description.Parameters { buf.WriteByte('(') buf.WriteString(p) buf.WriteByte(')') } } else if name == `⚫` { // A map element. first := true for _, member := range members { if first { first = false } else { buf.WriteByte(':') } writeExpression(buf, member, false, varTable, seen) } } else { // A map, array, slice, channel, or struct. isStruct := value == "" first := true buf.WriteByte('{') for _, member := range members { if first { first = false } else { buf.WriteString(", ") } writeExpression(buf, member, isStruct, varTable, seen) } buf.WriteByte('}') } } const ( // Error messages for cd.StatusMessage messageNotCaptured = "Not captured" messageTruncated = "Truncated" messageUnknownPointerType = "Unknown pointer type" messageUnknownType = "Unknown type" // RefersTo values for cd.StatusMessage. refersToVariableName = iota refersToVariableValue ) // refersToString contains the strings for each refersTo value. // See the definition of StatusMessage in the v2/clouddebugger package. var refersToString = map[int]string{ refersToVariableName: "VARIABLE_NAME", refersToVariableValue: "VARIABLE_VALUE", } golang-google-cloud-0.9.0/cmd/go-cloud-debug-agent/internal/valuecollector/valuecollector_test.go000066400000000000000000000264241312234511600332570ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package valuecollector import ( "fmt" "reflect" "testing" "golang.org/x/debug" cd "google.golang.org/api/clouddebugger/v2" ) const ( // Some arbitrary type IDs for the test, for use in debug.Var's TypeID field. // A TypeID of 0 means the type is unknown, so we start at 1. int16Type = iota + 1 stringType structType pointerType arrayType int32Type debugStringType mapType channelType sliceType ) func TestValueCollector(t *testing.T) { // Construct the collector. c := NewCollector(&Program{}, 26) // Add some variables of various types, whose values we want the collector to read. variablesToAdd := []debug.LocalVar{ {Name: "a", Var: debug.Var{int16Type, 0x1}}, {Name: "b", Var: debug.Var{stringType, 0x2}}, {Name: "c", Var: debug.Var{structType, 0x3}}, {Name: "d", Var: debug.Var{pointerType, 0x4}}, {Name: "e", Var: debug.Var{arrayType, 0x5}}, {Name: "f", Var: debug.Var{debugStringType, 0x6}}, {Name: "g", Var: debug.Var{mapType, 0x7}}, {Name: "h", Var: debug.Var{channelType, 0x8}}, {Name: "i", Var: debug.Var{sliceType, 0x9}}, } expectedResults := []*cd.Variable{ &cd.Variable{Name: "a", VarTableIndex: 1}, &cd.Variable{Name: "b", VarTableIndex: 2}, &cd.Variable{Name: "c", VarTableIndex: 3}, &cd.Variable{Name: "d", VarTableIndex: 4}, &cd.Variable{Name: "e", VarTableIndex: 5}, &cd.Variable{Name: "f", VarTableIndex: 6}, &cd.Variable{Name: "g", VarTableIndex: 7}, &cd.Variable{Name: "h", VarTableIndex: 8}, &cd.Variable{Name: "i", VarTableIndex: 9}, } for i, v := range variablesToAdd { added := c.AddVariable(v) if !reflect.DeepEqual(added, expectedResults[i]) { t.Errorf("AddVariable: got %+v want %+v", *added, *expectedResults[i]) } } // Read the values, compare the output to what we expect. v := c.ReadValues() expectedValues := []*cd.Variable{ &cd.Variable{}, &cd.Variable{Value: "1"}, &cd.Variable{Value: `"hello"`}, &cd.Variable{ Members: []*cd.Variable{ &cd.Variable{Name: "x", VarTableIndex: 1}, &cd.Variable{Name: "y", VarTableIndex: 2}, }, }, &cd.Variable{ Members: []*cd.Variable{ &cd.Variable{VarTableIndex: 1}, }, Value: "0x1", }, &cd.Variable{ Members: []*cd.Variable{ &cd.Variable{Name: "[0]", VarTableIndex: 10}, &cd.Variable{Name: "[1]", VarTableIndex: 11}, &cd.Variable{Name: "[2]", VarTableIndex: 12}, &cd.Variable{Name: "[3]", VarTableIndex: 13}, }, Value: "len = 4", }, &cd.Variable{Value: `"world"`}, &cd.Variable{ Members: []*cd.Variable{ &cd.Variable{Name: "⚫", VarTableIndex: 14}, &cd.Variable{Name: "⚫", VarTableIndex: 15}, &cd.Variable{Name: "⚫", VarTableIndex: 16}, }, Value: "len = 3", }, &cd.Variable{ Members: []*cd.Variable{ &cd.Variable{Name: "[0]", VarTableIndex: 17}, &cd.Variable{Name: "[1]", VarTableIndex: 18}, }, Value: "len = 2", }, &cd.Variable{ Members: []*cd.Variable{ &cd.Variable{Name: "[0]", VarTableIndex: 19}, &cd.Variable{Name: "[1]", VarTableIndex: 20}, }, Value: "len = 2", }, &cd.Variable{Value: "100"}, &cd.Variable{Value: "104"}, &cd.Variable{Value: "108"}, &cd.Variable{Value: "112"}, &cd.Variable{ Members: []*cd.Variable{ &cd.Variable{Name: "key", VarTableIndex: 21}, &cd.Variable{Name: "value", VarTableIndex: 22}, }, }, &cd.Variable{ Members: []*cd.Variable{ &cd.Variable{Name: "key", VarTableIndex: 23}, &cd.Variable{Name: "value", VarTableIndex: 24}, }, }, &cd.Variable{ Members: []*cd.Variable{ &cd.Variable{Name: "key", VarTableIndex: 25}, &cd.Variable{ Name: "value", Status: &cd.StatusMessage{ Description: &cd.FormatMessage{ Format: "$0", Parameters: []string{"Not captured"}, }, IsError: true, RefersTo: "VARIABLE_NAME", }, }, }, }, &cd.Variable{Value: "246"}, &cd.Variable{Value: "210"}, &cd.Variable{Value: "300"}, &cd.Variable{Value: "304"}, &cd.Variable{Value: "400"}, &cd.Variable{Value: "404"}, &cd.Variable{Value: "1400"}, &cd.Variable{Value: "1404"}, &cd.Variable{Value: "2400"}, } if !reflect.DeepEqual(v, expectedValues) { t.Errorf("ReadValues: got %v want %v", v, expectedValues) // Do element-by-element comparisons, for more useful error messages. for i := range v { if i < len(expectedValues) && !reflect.DeepEqual(v[i], expectedValues[i]) { t.Errorf("element %d: got %+v want %+v", i, *v[i], *expectedValues[i]) } } } } // Program implements the similarly-named interface in x/debug. // ValueCollector should only call its Value and MapElement methods. type Program struct { debug.Program } func (p *Program) Value(v debug.Var) (debug.Value, error) { // We determine what to return using v.TypeID. switch v.TypeID { case int16Type: // We use the address as the value, so that we're testing whether the right // address was calculated. return int16(v.Address), nil case stringType: // A string. return "hello", nil case structType: // A struct with two elements. return debug.Struct{ Fields: []debug.StructField{ { Name: "x", Var: debug.Var{int16Type, 0x1}, }, { Name: "y", Var: debug.Var{stringType, 0x2}, }, }, }, nil case pointerType: // A pointer to the first variable above. return debug.Pointer{int16Type, 0x1}, nil case arrayType: // An array of 4 32-bit-wide elements. return debug.Array{ ElementTypeID: int32Type, Address: 0x64, Length: 4, StrideBits: 32, }, nil case debugStringType: return debug.String{ Length: 5, String: "world", }, nil case mapType: return debug.Map{ TypeID: 99, Address: 0x100, Length: 3, }, nil case channelType: return debug.Channel{ ElementTypeID: int32Type, Address: 200, Buffer: 210, Length: 2, Capacity: 10, Stride: 4, BufferStart: 9, }, nil case sliceType: // A slice of 2 32-bit-wide elements. return debug.Slice{ Array: debug.Array{ ElementTypeID: int32Type, Address: 300, Length: 2, StrideBits: 32, }, Capacity: 50, }, nil case int32Type: // We use the address as the value, so that we're testing whether the right // address was calculated. return int32(v.Address), nil } return nil, fmt.Errorf("unexpected Value request") } func (p *Program) MapElement(m debug.Map, index uint64) (debug.Var, debug.Var, error) { return debug.Var{TypeID: int16Type, Address: 1000*index + 400}, debug.Var{TypeID: int32Type, Address: 1000*index + 404}, nil } func TestLogString(t *testing.T) { bp := cd.Breakpoint{ Action: "LOG", LogMessageFormat: "$0 hello, $$7world! $1 $2 $3 $4 $5$6 $7 $8", EvaluatedExpressions: []*cd.Variable{ &cd.Variable{Name: "a", VarTableIndex: 1}, &cd.Variable{Name: "b", VarTableIndex: 2}, &cd.Variable{Name: "c", VarTableIndex: 3}, &cd.Variable{Name: "d", VarTableIndex: 4}, &cd.Variable{Name: "e", VarTableIndex: 5}, &cd.Variable{Name: "f", VarTableIndex: 6}, &cd.Variable{Name: "g", VarTableIndex: 7}, &cd.Variable{Name: "h", VarTableIndex: 8}, &cd.Variable{Name: "i", VarTableIndex: 9}, }, } varTable := []*cd.Variable{ &cd.Variable{}, &cd.Variable{Value: "1"}, &cd.Variable{Value: `"hello"`}, &cd.Variable{ Members: []*cd.Variable{ &cd.Variable{Name: "x", Value: "1"}, &cd.Variable{Name: "y", Value: `"hello"`}, &cd.Variable{Name: "z", VarTableIndex: 3}, }, }, &cd.Variable{ Members: []*cd.Variable{ &cd.Variable{VarTableIndex: 1}, }, Value: "0x1", }, &cd.Variable{ Members: []*cd.Variable{ &cd.Variable{Name: "[0]", VarTableIndex: 10}, &cd.Variable{Name: "[1]", VarTableIndex: 11}, &cd.Variable{Name: "[2]", VarTableIndex: 12}, &cd.Variable{Name: "[3]", VarTableIndex: 13}, }, Value: "len = 4", }, &cd.Variable{Value: `"world"`}, &cd.Variable{ Members: []*cd.Variable{ &cd.Variable{Name: "⚫", VarTableIndex: 14}, &cd.Variable{Name: "⚫", VarTableIndex: 15}, &cd.Variable{Name: "⚫", VarTableIndex: 16}, }, Value: "len = 3", }, &cd.Variable{ Members: []*cd.Variable{ &cd.Variable{Name: "[0]", VarTableIndex: 17}, &cd.Variable{Name: "[1]", VarTableIndex: 18}, }, Value: "len = 2", }, &cd.Variable{ Members: []*cd.Variable{ &cd.Variable{Name: "[0]", VarTableIndex: 19}, &cd.Variable{Name: "[1]", VarTableIndex: 20}, }, Value: "len = 2", }, &cd.Variable{Value: "100"}, &cd.Variable{Value: "104"}, &cd.Variable{Value: "108"}, &cd.Variable{Value: "112"}, &cd.Variable{ Members: []*cd.Variable{ &cd.Variable{Name: "key", VarTableIndex: 21}, &cd.Variable{Name: "value", VarTableIndex: 22}, }, }, &cd.Variable{ Members: []*cd.Variable{ &cd.Variable{Name: "key", VarTableIndex: 23}, &cd.Variable{Name: "value", VarTableIndex: 24}, }, }, &cd.Variable{ Members: []*cd.Variable{ &cd.Variable{Name: "key", VarTableIndex: 25}, &cd.Variable{ Name: "value", Status: &cd.StatusMessage{ Description: &cd.FormatMessage{ Format: "$0", Parameters: []string{"Not captured"}, }, IsError: true, RefersTo: "VARIABLE_NAME", }, }, }, }, &cd.Variable{Value: "246"}, &cd.Variable{Value: "210"}, &cd.Variable{Value: "300"}, &cd.Variable{Value: "304"}, &cd.Variable{Value: "400"}, &cd.Variable{Value: "404"}, &cd.Variable{Value: "1400"}, &cd.Variable{Value: "1404"}, &cd.Variable{Value: "2400"}, } s := LogString(bp.LogMessageFormat, bp.EvaluatedExpressions, varTable) expected := `LOGPOINT: 1 hello, $7world! "hello" {x:1, y:"hello", z:...} ` + `0x1 {100, 104, 108, 112} "world"{400:404, 1400:1404, 2400:(Not captured)} ` + `{246, 210} {300, 304}` if s != expected { t.Errorf("LogString: got %q want %q", s, expected) } } func TestParseToken(t *testing.T) { for _, c := range []struct { s string max int num int n int ok bool }{ {"", 0, 0, 0, false}, {".", 0, 0, 0, false}, {"0", 0, 0, 1, true}, {"0", 1, 0, 1, true}, {"00", 0, 0, 2, true}, {"1.", 1, 1, 1, true}, {"1.", 0, 0, 0, false}, {"10", 10, 10, 2, true}, {"10..", 10, 10, 2, true}, {"10", 11, 10, 2, true}, {"10..", 11, 10, 2, true}, {"10", 9, 0, 0, false}, {"10..", 9, 0, 0, false}, {" 10", 10, 0, 0, false}, {"010", 10, 10, 3, true}, {"123456789", 123456789, 123456789, 9, true}, {"123456789", 123456788, 0, 0, false}, {"123456789123456789123456789", 999999999, 0, 0, false}, } { num, n, ok := parseToken(c.s, c.max) if ok != c.ok { t.Errorf("parseToken(%q, %d): got ok=%t want ok=%t", c.s, c.max, ok, c.ok) continue } if !ok { continue } if num != c.num || n != c.n { t.Errorf("parseToken(%q, %d): got %d,%d,%t want %d,%d,%t", c.s, c.max, num, n, ok, c.num, c.n, c.ok) } } } golang-google-cloud-0.9.0/compute/000077500000000000000000000000001312234511600170175ustar00rootroot00000000000000golang-google-cloud-0.9.0/compute/metadata/000077500000000000000000000000001312234511600205775ustar00rootroot00000000000000golang-google-cloud-0.9.0/compute/metadata/metadata.go000066400000000000000000000310401312234511600227040ustar00rootroot00000000000000// Copyright 2014 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package metadata provides access to Google Compute Engine (GCE) // metadata and API service accounts. // // This package is a wrapper around the GCE metadata service, // as documented at https://developers.google.com/compute/docs/metadata. package metadata // import "cloud.google.com/go/compute/metadata" import ( "encoding/json" "fmt" "io/ioutil" "net" "net/http" "net/url" "os" "runtime" "strings" "sync" "time" "golang.org/x/net/context" "golang.org/x/net/context/ctxhttp" ) const ( // metadataIP is the documented metadata server IP address. metadataIP = "169.254.169.254" // metadataHostEnv is the environment variable specifying the // GCE metadata hostname. If empty, the default value of // metadataIP ("169.254.169.254") is used instead. // This is variable name is not defined by any spec, as far as // I know; it was made up for the Go package. metadataHostEnv = "GCE_METADATA_HOST" userAgent = "gcloud-golang/0.1" ) type cachedValue struct { k string trim bool mu sync.Mutex v string } var ( projID = &cachedValue{k: "project/project-id", trim: true} projNum = &cachedValue{k: "project/numeric-project-id", trim: true} instID = &cachedValue{k: "instance/id", trim: true} ) var ( metaClient = &http.Client{ Transport: &http.Transport{ Dial: (&net.Dialer{ Timeout: 2 * time.Second, KeepAlive: 30 * time.Second, }).Dial, ResponseHeaderTimeout: 2 * time.Second, }, } subscribeClient = &http.Client{ Transport: &http.Transport{ Dial: (&net.Dialer{ Timeout: 2 * time.Second, KeepAlive: 30 * time.Second, }).Dial, }, } ) // NotDefinedError is returned when requested metadata is not defined. // // The underlying string is the suffix after "/computeMetadata/v1/". // // This error is not returned if the value is defined to be the empty // string. type NotDefinedError string func (suffix NotDefinedError) Error() string { return fmt.Sprintf("metadata: GCE metadata %q not defined", string(suffix)) } // Get returns a value from the metadata service. // The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/". // // If the GCE_METADATA_HOST environment variable is not defined, a default of // 169.254.169.254 will be used instead. // // If the requested metadata is not defined, the returned error will // be of type NotDefinedError. func Get(suffix string) (string, error) { val, _, err := getETag(metaClient, suffix) return val, err } // getETag returns a value from the metadata service as well as the associated // ETag using the provided client. This func is otherwise equivalent to Get. func getETag(client *http.Client, suffix string) (value, etag string, err error) { // Using a fixed IP makes it very difficult to spoof the metadata service in // a container, which is an important use-case for local testing of cloud // deployments. To enable spoofing of the metadata service, the environment // variable GCE_METADATA_HOST is first inspected to decide where metadata // requests shall go. host := os.Getenv(metadataHostEnv) if host == "" { // Using 169.254.169.254 instead of "metadata" here because Go // binaries built with the "netgo" tag and without cgo won't // know the search suffix for "metadata" is // ".google.internal", and this IP address is documented as // being stable anyway. host = metadataIP } url := "http://" + host + "/computeMetadata/v1/" + suffix req, _ := http.NewRequest("GET", url, nil) req.Header.Set("Metadata-Flavor", "Google") req.Header.Set("User-Agent", userAgent) res, err := client.Do(req) if err != nil { return "", "", err } defer res.Body.Close() if res.StatusCode == http.StatusNotFound { return "", "", NotDefinedError(suffix) } if res.StatusCode != 200 { return "", "", fmt.Errorf("status code %d trying to fetch %s", res.StatusCode, url) } all, err := ioutil.ReadAll(res.Body) if err != nil { return "", "", err } return string(all), res.Header.Get("Etag"), nil } func getTrimmed(suffix string) (s string, err error) { s, err = Get(suffix) s = strings.TrimSpace(s) return } func (c *cachedValue) get() (v string, err error) { defer c.mu.Unlock() c.mu.Lock() if c.v != "" { return c.v, nil } if c.trim { v, err = getTrimmed(c.k) } else { v, err = Get(c.k) } if err == nil { c.v = v } return } var ( onGCEOnce sync.Once onGCE bool ) // OnGCE reports whether this process is running on Google Compute Engine. func OnGCE() bool { onGCEOnce.Do(initOnGCE) return onGCE } func initOnGCE() { onGCE = testOnGCE() } func testOnGCE() bool { // The user explicitly said they're on GCE, so trust them. if os.Getenv(metadataHostEnv) != "" { return true } ctx, cancel := context.WithCancel(context.Background()) defer cancel() resc := make(chan bool, 2) // Try two strategies in parallel. // See https://github.com/GoogleCloudPlatform/google-cloud-go/issues/194 go func() { req, _ := http.NewRequest("GET", "http://"+metadataIP, nil) req.Header.Set("User-Agent", userAgent) res, err := ctxhttp.Do(ctx, metaClient, req) if err != nil { resc <- false return } defer res.Body.Close() resc <- res.Header.Get("Metadata-Flavor") == "Google" }() go func() { addrs, err := net.LookupHost("metadata.google.internal") if err != nil || len(addrs) == 0 { resc <- false return } resc <- strsContains(addrs, metadataIP) }() tryHarder := systemInfoSuggestsGCE() if tryHarder { res := <-resc if res { // The first strategy succeeded, so let's use it. return true } // Wait for either the DNS or metadata server probe to // contradict the other one and say we are running on // GCE. Give it a lot of time to do so, since the system // info already suggests we're running on a GCE BIOS. timer := time.NewTimer(5 * time.Second) defer timer.Stop() select { case res = <-resc: return res case <-timer.C: // Too slow. Who knows what this system is. return false } } // There's no hint from the system info that we're running on // GCE, so use the first probe's result as truth, whether it's // true or false. The goal here is to optimize for speed for // users who are NOT running on GCE. We can't assume that // either a DNS lookup or an HTTP request to a blackholed IP // address is fast. Worst case this should return when the // metaClient's Transport.ResponseHeaderTimeout or // Transport.Dial.Timeout fires (in two seconds). return <-resc } // systemInfoSuggestsGCE reports whether the local system (without // doing network requests) suggests that we're running on GCE. If this // returns true, testOnGCE tries a bit harder to reach its metadata // server. func systemInfoSuggestsGCE() bool { if runtime.GOOS != "linux" { // We don't have any non-Linux clues available, at least yet. return false } slurp, _ := ioutil.ReadFile("/sys/class/dmi/id/product_name") name := strings.TrimSpace(string(slurp)) return name == "Google" || name == "Google Compute Engine" } // Subscribe subscribes to a value from the metadata service. // The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/". // The suffix may contain query parameters. // // Subscribe calls fn with the latest metadata value indicated by the provided // suffix. If the metadata value is deleted, fn is called with the empty string // and ok false. Subscribe blocks until fn returns a non-nil error or the value // is deleted. Subscribe returns the error value returned from the last call to // fn, which may be nil when ok == false. func Subscribe(suffix string, fn func(v string, ok bool) error) error { const failedSubscribeSleep = time.Second * 5 // First check to see if the metadata value exists at all. val, lastETag, err := getETag(subscribeClient, suffix) if err != nil { return err } if err := fn(val, true); err != nil { return err } ok := true if strings.ContainsRune(suffix, '?') { suffix += "&wait_for_change=true&last_etag=" } else { suffix += "?wait_for_change=true&last_etag=" } for { val, etag, err := getETag(subscribeClient, suffix+url.QueryEscape(lastETag)) if err != nil { if _, deleted := err.(NotDefinedError); !deleted { time.Sleep(failedSubscribeSleep) continue // Retry on other errors. } ok = false } lastETag = etag if err := fn(val, ok); err != nil || !ok { return err } } } // ProjectID returns the current instance's project ID string. func ProjectID() (string, error) { return projID.get() } // NumericProjectID returns the current instance's numeric project ID. func NumericProjectID() (string, error) { return projNum.get() } // InternalIP returns the instance's primary internal IP address. func InternalIP() (string, error) { return getTrimmed("instance/network-interfaces/0/ip") } // ExternalIP returns the instance's primary external (public) IP address. func ExternalIP() (string, error) { return getTrimmed("instance/network-interfaces/0/access-configs/0/external-ip") } // Hostname returns the instance's hostname. This will be of the form // ".c..internal". func Hostname() (string, error) { return getTrimmed("instance/hostname") } // InstanceTags returns the list of user-defined instance tags, // assigned when initially creating a GCE instance. func InstanceTags() ([]string, error) { var s []string j, err := Get("instance/tags") if err != nil { return nil, err } if err := json.NewDecoder(strings.NewReader(j)).Decode(&s); err != nil { return nil, err } return s, nil } // InstanceID returns the current VM's numeric instance ID. func InstanceID() (string, error) { return instID.get() } // InstanceName returns the current VM's instance ID string. func InstanceName() (string, error) { host, err := Hostname() if err != nil { return "", err } return strings.Split(host, ".")[0], nil } // Zone returns the current VM's zone, such as "us-central1-b". func Zone() (string, error) { zone, err := getTrimmed("instance/zone") // zone is of the form "projects//zones/". if err != nil { return "", err } return zone[strings.LastIndex(zone, "/")+1:], nil } // InstanceAttributes returns the list of user-defined attributes, // assigned when initially creating a GCE VM instance. The value of an // attribute can be obtained with InstanceAttributeValue. func InstanceAttributes() ([]string, error) { return lines("instance/attributes/") } // ProjectAttributes returns the list of user-defined attributes // applying to the project as a whole, not just this VM. The value of // an attribute can be obtained with ProjectAttributeValue. func ProjectAttributes() ([]string, error) { return lines("project/attributes/") } func lines(suffix string) ([]string, error) { j, err := Get(suffix) if err != nil { return nil, err } s := strings.Split(strings.TrimSpace(j), "\n") for i := range s { s[i] = strings.TrimSpace(s[i]) } return s, nil } // InstanceAttributeValue returns the value of the provided VM // instance attribute. // // If the requested attribute is not defined, the returned error will // be of type NotDefinedError. // // InstanceAttributeValue may return ("", nil) if the attribute was // defined to be the empty string. func InstanceAttributeValue(attr string) (string, error) { return Get("instance/attributes/" + attr) } // ProjectAttributeValue returns the value of the provided // project attribute. // // If the requested attribute is not defined, the returned error will // be of type NotDefinedError. // // ProjectAttributeValue may return ("", nil) if the attribute was // defined to be the empty string. func ProjectAttributeValue(attr string) (string, error) { return Get("project/attributes/" + attr) } // Scopes returns the service account scopes for the given account. // The account may be empty or the string "default" to use the instance's // main account. func Scopes(serviceAccount string) ([]string, error) { if serviceAccount == "" { serviceAccount = "default" } return lines("instance/service-accounts/" + serviceAccount + "/scopes") } func strsContains(ss []string, s string) bool { for _, v := range ss { if v == s { return true } } return false } golang-google-cloud-0.9.0/compute/metadata/metadata_test.go000066400000000000000000000023111312234511600237420ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package metadata import ( "os" "sync" "testing" ) func TestOnGCE_Stress(t *testing.T) { if testing.Short() { t.Skip("skipping in -short mode") } var last bool for i := 0; i < 100; i++ { onGCEOnce = sync.Once{} now := OnGCE() if i > 0 && now != last { t.Errorf("%d. changed from %v to %v", i, last, now) } last = now } t.Logf("OnGCE() = %v", last) } func TestOnGCE_Force(t *testing.T) { onGCEOnce = sync.Once{} old := os.Getenv(metadataHostEnv) defer os.Setenv(metadataHostEnv, old) os.Setenv(metadataHostEnv, "127.0.0.1") if !OnGCE() { t.Error("OnGCE() = false; want true") } } golang-google-cloud-0.9.0/container/000077500000000000000000000000001312234511600173255ustar00rootroot00000000000000golang-google-cloud-0.9.0/container/container.go000066400000000000000000000175411312234511600216460ustar00rootroot00000000000000// Copyright 2014 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package container contains a deprecated Google Container Engine client. // // Deprecated: Use google.golang.org/api/container instead. package container // import "cloud.google.com/go/container" import ( "errors" "fmt" "time" "golang.org/x/net/context" raw "google.golang.org/api/container/v1" "google.golang.org/api/option" "google.golang.org/api/transport" ) type Type string const ( TypeCreate = Type("createCluster") TypeDelete = Type("deleteCluster") ) type Status string const ( StatusDone = Status("done") StatusPending = Status("pending") StatusRunning = Status("running") StatusError = Status("error") StatusProvisioning = Status("provisioning") StatusStopping = Status("stopping") ) const prodAddr = "https://container.googleapis.com/" const userAgent = "gcloud-golang-container/20151008" // Client is a Google Container Engine client, which may be used to manage // clusters with a project. It must be constructed via NewClient. type Client struct { projectID string svc *raw.Service } // NewClient creates a new Google Container Engine client. func NewClient(ctx context.Context, projectID string, opts ...option.ClientOption) (*Client, error) { o := []option.ClientOption{ option.WithEndpoint(prodAddr), option.WithScopes(raw.CloudPlatformScope), option.WithUserAgent(userAgent), } o = append(o, opts...) httpClient, endpoint, err := transport.NewHTTPClient(ctx, o...) if err != nil { return nil, fmt.Errorf("dialing: %v", err) } svc, err := raw.New(httpClient) if err != nil { return nil, fmt.Errorf("constructing container client: %v", err) } svc.BasePath = endpoint c := &Client{ projectID: projectID, svc: svc, } return c, nil } // Resource is a Google Container Engine cluster resource. type Resource struct { // Name is the name of this cluster. The name must be unique // within this project and zone, and can be up to 40 characters. Name string // Description is the description of the cluster. Optional. Description string // Zone is the Google Compute Engine zone in which the cluster resides. Zone string // Status is the current status of the cluster. It could either be // StatusError, StatusProvisioning, StatusRunning or StatusStopping. Status Status // Num is the number of the nodes in this cluster resource. Num int64 // APIVersion is the version of the Kubernetes master and kubelets running // in this cluster. Allowed value is 0.4.2, or leave blank to // pick up the latest stable release. APIVersion string // Endpoint is the IP address of this cluster's Kubernetes master. // The endpoint can be accessed at https://username:password@endpoint/. // See Username and Password fields for the username and password information. Endpoint string // Username is the username to use when accessing the Kubernetes master endpoint. Username string // Password is the password to use when accessing the Kubernetes master endpoint. Password string // ContainerIPv4CIDR is the IP addresses of the container pods in // this cluster, in CIDR notation (e.g. 1.2.3.4/29). ContainerIPv4CIDR string // ServicesIPv4CIDR is the IP addresses of the Kubernetes services in this // cluster, in CIDR notation (e.g. 1.2.3.4/29). Service addresses are // always in the 10.0.0.0/16 range. ServicesIPv4CIDR string // MachineType is a Google Compute Engine machine type (e.g. n1-standard-1). // If none set, the default type is used while creating a new cluster. MachineType string // This field is ignored. It was removed from the underlying container API in v1. SourceImage string // Created is the creation time of this cluster. Created time.Time } func resourceFromRaw(c *raw.Cluster) *Resource { if c == nil { return nil } r := &Resource{ Name: c.Name, Description: c.Description, Zone: c.Zone, Status: Status(c.Status), Num: c.CurrentNodeCount, APIVersion: c.InitialClusterVersion, Endpoint: c.Endpoint, Username: c.MasterAuth.Username, Password: c.MasterAuth.Password, ContainerIPv4CIDR: c.ClusterIpv4Cidr, ServicesIPv4CIDR: c.ServicesIpv4Cidr, MachineType: c.NodeConfig.MachineType, } r.Created, _ = time.Parse(time.RFC3339, c.CreateTime) return r } func resourcesFromRaw(c []*raw.Cluster) []*Resource { r := make([]*Resource, len(c)) for i, val := range c { r[i] = resourceFromRaw(val) } return r } // Op represents a Google Container Engine API operation. type Op struct { // Name is the name of the operation. Name string // Zone is the Google Compute Engine zone. Zone string // This field is ignored. It was removed from the underlying container API in v1. TargetURL string // Type is the operation type. It could be either be TypeCreate or TypeDelete. Type Type // Status is the current status of this operation. It could be either // OpDone or OpPending. Status Status } func opFromRaw(o *raw.Operation) *Op { if o == nil { return nil } return &Op{ Name: o.Name, Zone: o.Zone, Type: Type(o.OperationType), Status: Status(o.Status), } } func opsFromRaw(o []*raw.Operation) []*Op { ops := make([]*Op, len(o)) for i, val := range o { ops[i] = opFromRaw(val) } return ops } // Clusters returns a list of cluster resources from the specified zone. // If no zone is specified, it returns all clusters under the user project. func (c *Client) Clusters(ctx context.Context, zone string) ([]*Resource, error) { if zone == "" { zone = "-" } resp, err := c.svc.Projects.Zones.Clusters.List(c.projectID, zone).Do() if err != nil { return nil, err } return resourcesFromRaw(resp.Clusters), nil } // Cluster returns metadata about the specified cluster. func (c *Client) Cluster(ctx context.Context, zone, name string) (*Resource, error) { resp, err := c.svc.Projects.Zones.Clusters.Get(c.projectID, zone, name).Do() if err != nil { return nil, err } return resourceFromRaw(resp), nil } // CreateCluster creates a new cluster with the provided metadata // in the specified zone. func (c *Client) CreateCluster(ctx context.Context, zone string, resource *Resource) (*Resource, error) { panic("not implemented") } // DeleteCluster deletes a cluster. func (c *Client) DeleteCluster(ctx context.Context, zone, name string) error { _, err := c.svc.Projects.Zones.Clusters.Delete(c.projectID, zone, name).Do() return err } // Operations returns a list of operations from the specified zone. // If no zone is specified, it looks up for all of the operations // that are running under the user's project. func (c *Client) Operations(ctx context.Context, zone string) ([]*Op, error) { if zone == "" { resp, err := c.svc.Projects.Zones.Operations.List(c.projectID, "-").Do() if err != nil { return nil, err } return opsFromRaw(resp.Operations), nil } resp, err := c.svc.Projects.Zones.Operations.List(c.projectID, zone).Do() if err != nil { return nil, err } return opsFromRaw(resp.Operations), nil } // Operation returns an operation. func (c *Client) Operation(ctx context.Context, zone, name string) (*Op, error) { resp, err := c.svc.Projects.Zones.Operations.Get(c.projectID, zone, name).Do() if err != nil { return nil, err } if resp.StatusMessage != "" { return nil, errors.New(resp.StatusMessage) } return opFromRaw(resp), nil } golang-google-cloud-0.9.0/datastore/000077500000000000000000000000001312234511600173315ustar00rootroot00000000000000golang-google-cloud-0.9.0/datastore/datastore.go000066400000000000000000000446201312234511600216540ustar00rootroot00000000000000// Copyright 2014 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package datastore import ( "errors" "fmt" "log" "os" "reflect" "cloud.google.com/go/internal/version" "github.com/golang/protobuf/proto" "golang.org/x/net/context" "google.golang.org/api/option" "google.golang.org/api/transport" pb "google.golang.org/genproto/googleapis/datastore/v1" "google.golang.org/grpc" "google.golang.org/grpc/metadata" ) const ( prodAddr = "datastore.googleapis.com:443" userAgent = "gcloud-golang-datastore/20160401" ) // ScopeDatastore grants permissions to view and/or manage datastore entities const ScopeDatastore = "https://www.googleapis.com/auth/datastore" // resourcePrefixHeader is the name of the metadata header used to indicate // the resource being operated on. const resourcePrefixHeader = "google-cloud-resource-prefix" // protoClient is an interface for *transport.ProtoClient to support injecting // fake clients in tests. type protoClient interface { Call(context.Context, string, proto.Message, proto.Message) error } // datastoreClient is a wrapper for the pb.DatastoreClient that includes gRPC // metadata to be sent in each request for server-side traffic management. type datastoreClient struct { // Embed so we still implement the DatastoreClient interface, // if the interface adds more methods. pb.DatastoreClient c pb.DatastoreClient md metadata.MD } func newDatastoreClient(conn *grpc.ClientConn, projectID string) pb.DatastoreClient { return &datastoreClient{ c: pb.NewDatastoreClient(conn), md: metadata.Pairs( resourcePrefixHeader, "projects/"+projectID, "x-goog-api-client", fmt.Sprintf("gl-go/%s gccl/%s grpc/", version.Go(), version.Repo)), } } func (dc *datastoreClient) Lookup(ctx context.Context, in *pb.LookupRequest, opts ...grpc.CallOption) (*pb.LookupResponse, error) { return dc.c.Lookup(metadata.NewOutgoingContext(ctx, dc.md), in, opts...) } func (dc *datastoreClient) RunQuery(ctx context.Context, in *pb.RunQueryRequest, opts ...grpc.CallOption) (*pb.RunQueryResponse, error) { return dc.c.RunQuery(metadata.NewOutgoingContext(ctx, dc.md), in, opts...) } func (dc *datastoreClient) BeginTransaction(ctx context.Context, in *pb.BeginTransactionRequest, opts ...grpc.CallOption) (*pb.BeginTransactionResponse, error) { return dc.c.BeginTransaction(metadata.NewOutgoingContext(ctx, dc.md), in, opts...) } func (dc *datastoreClient) Commit(ctx context.Context, in *pb.CommitRequest, opts ...grpc.CallOption) (*pb.CommitResponse, error) { return dc.c.Commit(metadata.NewOutgoingContext(ctx, dc.md), in, opts...) } func (dc *datastoreClient) Rollback(ctx context.Context, in *pb.RollbackRequest, opts ...grpc.CallOption) (*pb.RollbackResponse, error) { return dc.c.Rollback(metadata.NewOutgoingContext(ctx, dc.md), in, opts...) } func (dc *datastoreClient) AllocateIds(ctx context.Context, in *pb.AllocateIdsRequest, opts ...grpc.CallOption) (*pb.AllocateIdsResponse, error) { return dc.c.AllocateIds(metadata.NewOutgoingContext(ctx, dc.md), in, opts...) } // Client is a client for reading and writing data in a datastore dataset. type Client struct { conn *grpc.ClientConn client pb.DatastoreClient endpoint string dataset string // Called dataset by the datastore API, synonym for project ID. } // NewClient creates a new Client for a given dataset. // If the project ID is empty, it is derived from the DATASTORE_PROJECT_ID environment variable. // If the DATASTORE_EMULATOR_HOST environment variable is set, client will use its value // to connect to a locally-running datastore emulator. func NewClient(ctx context.Context, projectID string, opts ...option.ClientOption) (*Client, error) { var o []option.ClientOption // Environment variables for gcd emulator: // https://cloud.google.com/datastore/docs/tools/datastore-emulator // If the emulator is available, dial it directly (and don't pass any credentials). if addr := os.Getenv("DATASTORE_EMULATOR_HOST"); addr != "" { conn, err := grpc.Dial(addr, grpc.WithInsecure()) if err != nil { return nil, fmt.Errorf("grpc.Dial: %v", err) } o = []option.ClientOption{option.WithGRPCConn(conn)} } else { o = []option.ClientOption{ option.WithEndpoint(prodAddr), option.WithScopes(ScopeDatastore), option.WithUserAgent(userAgent), } } // Warn if we see the legacy emulator environment variables. if os.Getenv("DATASTORE_HOST") != "" && os.Getenv("DATASTORE_EMULATOR_HOST") == "" { log.Print("WARNING: legacy environment variable DATASTORE_HOST is ignored. Use DATASTORE_EMULATOR_HOST instead.") } if os.Getenv("DATASTORE_DATASET") != "" && os.Getenv("DATASTORE_PROJECT_ID") == "" { log.Print("WARNING: legacy environment variable DATASTORE_DATASET is ignored. Use DATASTORE_PROJECT_ID instead.") } if projectID == "" { projectID = os.Getenv("DATASTORE_PROJECT_ID") } if projectID == "" { return nil, errors.New("datastore: missing project/dataset id") } o = append(o, opts...) conn, err := transport.DialGRPC(ctx, o...) if err != nil { return nil, fmt.Errorf("dialing: %v", err) } return &Client{ conn: conn, client: newDatastoreClient(conn, projectID), dataset: projectID, }, nil } var ( // ErrInvalidEntityType is returned when functions like Get or Next are // passed a dst or src argument of invalid type. ErrInvalidEntityType = errors.New("datastore: invalid entity type") // ErrInvalidKey is returned when an invalid key is presented. ErrInvalidKey = errors.New("datastore: invalid key") // ErrNoSuchEntity is returned when no entity was found for a given key. ErrNoSuchEntity = errors.New("datastore: no such entity") ) type multiArgType int const ( multiArgTypeInvalid multiArgType = iota multiArgTypePropertyLoadSaver multiArgTypeStruct multiArgTypeStructPtr multiArgTypeInterface ) // ErrFieldMismatch is returned when a field is to be loaded into a different // type than the one it was stored from, or when a field is missing or // unexported in the destination struct. // StructType is the type of the struct pointed to by the destination argument // passed to Get or to Iterator.Next. type ErrFieldMismatch struct { StructType reflect.Type FieldName string Reason string } func (e *ErrFieldMismatch) Error() string { return fmt.Sprintf("datastore: cannot load field %q into a %q: %s", e.FieldName, e.StructType, e.Reason) } // GeoPoint represents a location as latitude/longitude in degrees. type GeoPoint struct { Lat, Lng float64 } // Valid returns whether a GeoPoint is within [-90, 90] latitude and [-180, 180] longitude. func (g GeoPoint) Valid() bool { return -90 <= g.Lat && g.Lat <= 90 && -180 <= g.Lng && g.Lng <= 180 } func keyToProto(k *Key) *pb.Key { if k == nil { return nil } // TODO(jbd): Eliminate unrequired allocations. var path []*pb.Key_PathElement for { el := &pb.Key_PathElement{Kind: k.Kind} if k.ID != 0 { el.IdType = &pb.Key_PathElement_Id{Id: k.ID} } else if k.Name != "" { el.IdType = &pb.Key_PathElement_Name{Name: k.Name} } path = append([]*pb.Key_PathElement{el}, path...) if k.Parent == nil { break } k = k.Parent } key := &pb.Key{Path: path} if k.Namespace != "" { key.PartitionId = &pb.PartitionId{ NamespaceId: k.Namespace, } } return key } // protoToKey decodes a protocol buffer representation of a key into an // equivalent *Key object. If the key is invalid, protoToKey will return the // invalid key along with ErrInvalidKey. func protoToKey(p *pb.Key) (*Key, error) { var key *Key var namespace string if partition := p.PartitionId; partition != nil { namespace = partition.NamespaceId } for _, el := range p.Path { key = &Key{ Namespace: namespace, Kind: el.Kind, ID: el.GetId(), Name: el.GetName(), Parent: key, } } if !key.valid() { // Also detects key == nil. return key, ErrInvalidKey } return key, nil } // multiKeyToProto is a batch version of keyToProto. func multiKeyToProto(keys []*Key) []*pb.Key { ret := make([]*pb.Key, len(keys)) for i, k := range keys { ret[i] = keyToProto(k) } return ret } // multiKeyToProto is a batch version of keyToProto. func multiProtoToKey(keys []*pb.Key) ([]*Key, error) { hasErr := false ret := make([]*Key, len(keys)) err := make(MultiError, len(keys)) for i, k := range keys { ret[i], err[i] = protoToKey(k) if err[i] != nil { hasErr = true } } if hasErr { return nil, err } return ret, nil } // multiValid is a batch version of Key.valid. It returns an error, not a // []bool. func multiValid(key []*Key) error { invalid := false for _, k := range key { if !k.valid() { invalid = true break } } if !invalid { return nil } err := make(MultiError, len(key)) for i, k := range key { if !k.valid() { err[i] = ErrInvalidKey } } return err } // checkMultiArg checks that v has type []S, []*S, []I, or []P, for some struct // type S, for some interface type I, or some non-interface non-pointer type P // such that P or *P implements PropertyLoadSaver. // // It returns what category the slice's elements are, and the reflect.Type // that represents S, I or P. // // As a special case, PropertyList is an invalid type for v. // // TODO(djd): multiArg is very confusing. Fold this logic into the // relevant Put/Get methods to make the logic less opaque. func checkMultiArg(v reflect.Value) (m multiArgType, elemType reflect.Type) { if v.Kind() != reflect.Slice { return multiArgTypeInvalid, nil } if v.Type() == typeOfPropertyList { return multiArgTypeInvalid, nil } elemType = v.Type().Elem() if reflect.PtrTo(elemType).Implements(typeOfPropertyLoadSaver) { return multiArgTypePropertyLoadSaver, elemType } switch elemType.Kind() { case reflect.Struct: return multiArgTypeStruct, elemType case reflect.Interface: return multiArgTypeInterface, elemType case reflect.Ptr: elemType = elemType.Elem() if elemType.Kind() == reflect.Struct { return multiArgTypeStructPtr, elemType } } return multiArgTypeInvalid, nil } // Close closes the Client. func (c *Client) Close() error { return c.conn.Close() } // Get loads the entity stored for key into dst, which must be a struct pointer // or implement PropertyLoadSaver. If there is no such entity for the key, Get // returns ErrNoSuchEntity. // // The values of dst's unmatched struct fields are not modified, and matching // slice-typed fields are not reset before appending to them. In particular, it // is recommended to pass a pointer to a zero valued struct on each Get call. // // ErrFieldMismatch is returned when a field is to be loaded into a different // type than the one it was stored from, or when a field is missing or // unexported in the destination struct. ErrFieldMismatch is only returned if // dst is a struct pointer. func (c *Client) Get(ctx context.Context, key *Key, dst interface{}) error { if dst == nil { // get catches nil interfaces; we need to catch nil ptr here return ErrInvalidEntityType } err := c.get(ctx, []*Key{key}, []interface{}{dst}, nil) if me, ok := err.(MultiError); ok { return me[0] } return err } // GetMulti is a batch version of Get. // // dst must be a []S, []*S, []I or []P, for some struct type S, some interface // type I, or some non-interface non-pointer type P such that P or *P // implements PropertyLoadSaver. If an []I, each element must be a valid dst // for Get: it must be a struct pointer or implement PropertyLoadSaver. // // As a special case, PropertyList is an invalid type for dst, even though a // PropertyList is a slice of structs. It is treated as invalid to avoid being // mistakenly passed when []PropertyList was intended. func (c *Client) GetMulti(ctx context.Context, keys []*Key, dst interface{}) error { return c.get(ctx, keys, dst, nil) } func (c *Client) get(ctx context.Context, keys []*Key, dst interface{}, opts *pb.ReadOptions) error { v := reflect.ValueOf(dst) multiArgType, _ := checkMultiArg(v) // Sanity checks if multiArgType == multiArgTypeInvalid { return errors.New("datastore: dst has invalid type") } if len(keys) != v.Len() { return errors.New("datastore: keys and dst slices have different length") } if len(keys) == 0 { return nil } // Go through keys, validate them, serialize then, and create a dict mapping them to their index multiErr, any := make(MultiError, len(keys)), false keyMap := make(map[string]int) pbKeys := make([]*pb.Key, len(keys)) for i, k := range keys { if !k.valid() { multiErr[i] = ErrInvalidKey any = true } else { keyMap[k.String()] = i pbKeys[i] = keyToProto(k) } } if any { return multiErr } req := &pb.LookupRequest{ ProjectId: c.dataset, Keys: pbKeys, ReadOptions: opts, } resp, err := c.client.Lookup(ctx, req) if err != nil { return err } found := resp.Found missing := resp.Missing // Upper bound 100 iterations to prevent infinite loop. // We choose 100 iterations somewhat logically: // Max number of Entities you can request from Datastore is 1,000. // Max size for a Datastore Entity is 1 MiB. // Max request size is 10 MiB, so we assume max response size is also 10 MiB. // 1,000 / 10 = 100. // Note that if ctx has a deadline, the deadline will probably // be hit before we reach 100 iterations. for i := 0; len(resp.Deferred) > 0 && i < 100; i++ { req.Keys = resp.Deferred resp, err = c.client.Lookup(ctx, req) if err != nil { return err } found = append(found, resp.Found...) missing = append(missing, resp.Missing...) } if len(keys) != len(found)+len(missing) { return errors.New("datastore: internal error: server returned the wrong number of entities") } for _, e := range found { k, err := protoToKey(e.Entity.Key) if err != nil { return errors.New("datastore: internal error: server returned an invalid key") } index := keyMap[k.String()] elem := v.Index(index) if multiArgType == multiArgTypePropertyLoadSaver || multiArgType == multiArgTypeStruct { elem = elem.Addr() } if multiArgType == multiArgTypeStructPtr && elem.IsNil() { elem.Set(reflect.New(elem.Type().Elem())) } if err := loadEntityProto(elem.Interface(), e.Entity); err != nil { multiErr[index] = err any = true } } for _, e := range missing { k, err := protoToKey(e.Entity.Key) if err != nil { return errors.New("datastore: internal error: server returned an invalid key") } multiErr[keyMap[k.String()]] = ErrNoSuchEntity any = true } if any { return multiErr } return nil } // Put saves the entity src into the datastore with key k. src must be a struct // pointer or implement PropertyLoadSaver; if a struct pointer then any // unexported fields of that struct will be skipped. If k is an incomplete key, // the returned key will be a unique key generated by the datastore. func (c *Client) Put(ctx context.Context, key *Key, src interface{}) (*Key, error) { k, err := c.PutMulti(ctx, []*Key{key}, []interface{}{src}) if err != nil { if me, ok := err.(MultiError); ok { return nil, me[0] } return nil, err } return k[0], nil } // PutMulti is a batch version of Put. // // src must satisfy the same conditions as the dst argument to GetMulti. func (c *Client) PutMulti(ctx context.Context, keys []*Key, src interface{}) ([]*Key, error) { mutations, err := putMutations(keys, src) if err != nil { return nil, err } // Make the request. req := &pb.CommitRequest{ ProjectId: c.dataset, Mutations: mutations, Mode: pb.CommitRequest_NON_TRANSACTIONAL, } resp, err := c.client.Commit(ctx, req) if err != nil { return nil, err } // Copy any newly minted keys into the returned keys. ret := make([]*Key, len(keys)) for i, key := range keys { if key.Incomplete() { // This key is in the mutation results. ret[i], err = protoToKey(resp.MutationResults[i].Key) if err != nil { return nil, errors.New("datastore: internal error: server returned an invalid key") } } else { ret[i] = key } } return ret, nil } func putMutations(keys []*Key, src interface{}) ([]*pb.Mutation, error) { v := reflect.ValueOf(src) multiArgType, _ := checkMultiArg(v) if multiArgType == multiArgTypeInvalid { return nil, errors.New("datastore: src has invalid type") } if len(keys) != v.Len() { return nil, errors.New("datastore: key and src slices have different length") } if len(keys) == 0 { return nil, nil } if err := multiValid(keys); err != nil { return nil, err } mutations := make([]*pb.Mutation, 0, len(keys)) multiErr := make(MultiError, len(keys)) hasErr := false for i, k := range keys { elem := v.Index(i) // Two cases where we need to take the address: // 1) multiArgTypePropertyLoadSaver => &elem implements PLS // 2) multiArgTypeStruct => saveEntity needs *struct if multiArgType == multiArgTypePropertyLoadSaver || multiArgType == multiArgTypeStruct { elem = elem.Addr() } p, err := saveEntity(k, elem.Interface()) if err != nil { multiErr[i] = err hasErr = true } var mut *pb.Mutation if k.Incomplete() { mut = &pb.Mutation{Operation: &pb.Mutation_Insert{Insert: p}} } else { mut = &pb.Mutation{Operation: &pb.Mutation_Upsert{Upsert: p}} } mutations = append(mutations, mut) } if hasErr { return nil, multiErr } return mutations, nil } // Delete deletes the entity for the given key. func (c *Client) Delete(ctx context.Context, key *Key) error { err := c.DeleteMulti(ctx, []*Key{key}) if me, ok := err.(MultiError); ok { return me[0] } return err } // DeleteMulti is a batch version of Delete. func (c *Client) DeleteMulti(ctx context.Context, keys []*Key) error { mutations, err := deleteMutations(keys) if err != nil { return err } req := &pb.CommitRequest{ ProjectId: c.dataset, Mutations: mutations, Mode: pb.CommitRequest_NON_TRANSACTIONAL, } _, err = c.client.Commit(ctx, req) return err } func deleteMutations(keys []*Key) ([]*pb.Mutation, error) { mutations := make([]*pb.Mutation, 0, len(keys)) for _, k := range keys { if k.Incomplete() { return nil, fmt.Errorf("datastore: can't delete the incomplete key: %v", k) } mutations = append(mutations, &pb.Mutation{ Operation: &pb.Mutation_Delete{Delete: keyToProto(k)}, }) } return mutations, nil } golang-google-cloud-0.9.0/datastore/datastore_test.go000066400000000000000000002141311312234511600227070ustar00rootroot00000000000000// Copyright 2014 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package datastore import ( "encoding/json" "errors" "fmt" "reflect" "sort" "strings" "testing" "time" "github.com/golang/protobuf/proto" "golang.org/x/net/context" pb "google.golang.org/genproto/googleapis/datastore/v1" "google.golang.org/grpc" ) type ( myBlob []byte myByte byte myString string ) func makeMyByteSlice(n int) []myByte { b := make([]myByte, n) for i := range b { b[i] = myByte(i) } return b } func makeInt8Slice(n int) []int8 { b := make([]int8, n) for i := range b { b[i] = int8(i) } return b } func makeUint8Slice(n int) []uint8 { b := make([]uint8, n) for i := range b { b[i] = uint8(i) } return b } func newKey(stringID string, parent *Key) *Key { return NameKey("kind", stringID, parent) } var ( testKey0 = newKey("name0", nil) testKey1a = newKey("name1", nil) testKey1b = newKey("name1", nil) testKey2a = newKey("name2", testKey0) testKey2b = newKey("name2", testKey0) testGeoPt0 = GeoPoint{Lat: 1.2, Lng: 3.4} testGeoPt1 = GeoPoint{Lat: 5, Lng: 10} testBadGeoPt = GeoPoint{Lat: 1000, Lng: 34} ts = time.Unix(1e9, 0).UTC() ) type B0 struct { B []byte `datastore:",noindex"` } type B1 struct { B []int8 } type B2 struct { B myBlob `datastore:",noindex"` } type B3 struct { B []myByte `datastore:",noindex"` } type B4 struct { B [][]byte } type C0 struct { I int C chan int } type C1 struct { I int C *chan int } type C2 struct { I int C []chan int } type C3 struct { C string } type c4 struct { C string } type E struct{} type G0 struct { G GeoPoint } type G1 struct { G []GeoPoint } type K0 struct { K *Key } type K1 struct { K []*Key } type S struct { St string } type NoOmit struct { A string B int `datastore:"Bb"` C bool `datastore:",noindex"` } type OmitAll struct { A string `datastore:",omitempty"` B int `datastore:"Bb,omitempty"` C bool `datastore:",omitempty,noindex"` F []int `datastore:",omitempty"` } type Omit struct { A string `datastore:",omitempty"` B int `datastore:"Bb,omitempty"` C bool `datastore:",omitempty,noindex"` F []int `datastore:",omitempty"` S `datastore:",omitempty"` } type NoOmits struct { No []NoOmit `datastore:",omitempty"` S `datastore:",omitempty"` Ss S `datastore:",omitempty"` } type N0 struct { X0 Nonymous X0 Ignore string `datastore:"-"` Other string } type N1 struct { X0 Nonymous []X0 Ignore string `datastore:"-"` Other string } type N2 struct { N1 `datastore:"red"` Green N1 `datastore:"green"` Blue N1 White N1 `datastore:"-"` } type N3 struct { C3 `datastore:"red"` } type N4 struct { c4 } type N5 struct { c4 `datastore:"red"` } type O0 struct { I int64 } type O1 struct { I int32 } type U0 struct { U uint } type U1 struct { U string } type T struct { T time.Time } type X0 struct { S string I int i int } type X1 struct { S myString I int32 J int64 } type X2 struct { Z string i int } type X3 struct { S bool I int } type Y0 struct { B bool F []float64 G []float64 } type Y1 struct { B bool F float64 } type Y2 struct { B bool F []int64 } type Tagged struct { A int `datastore:"a,noindex"` B []int `datastore:"b"` C int `datastore:",noindex"` D int `datastore:""` E int I int `datastore:"-"` J int `datastore:",noindex" json:"j"` Y0 `datastore:"-"` Z chan int `datastore:"-"` } type InvalidTagged1 struct { I int `datastore:"\t"` } type InvalidTagged2 struct { I int J int `datastore:"I"` } type InvalidTagged3 struct { X string `datastore:"-,noindex"` } type InvalidTagged4 struct { X string `datastore:",garbage"` } type Inner1 struct { W int32 X string } type Inner2 struct { Y float64 } type Inner3 struct { Z bool } type Inner5 struct { WW int } type Inner4 struct { X Inner5 } type Outer struct { A int16 I []Inner1 J Inner2 Inner3 } type OuterFlatten struct { A int16 I []Inner1 `datastore:",flatten"` J Inner2 `datastore:",flatten,noindex"` Inner3 `datastore:",flatten"` K Inner4 `datastore:",flatten"` } type OuterEquivalent struct { A int16 IDotW []int32 `datastore:"I.W"` IDotX []string `datastore:"I.X"` JDotY float64 `datastore:"J.Y"` Z bool } type Dotted struct { A DottedA `datastore:"A0.A1.A2"` } type DottedA struct { B DottedB `datastore:"B3"` } type DottedB struct { C int `datastore:"C4.C5"` } type SliceOfSlices struct { I int S []struct { J int F []float64 } `datastore:",flatten"` } type Recursive struct { I int R []Recursive } type MutuallyRecursive0 struct { I int R []MutuallyRecursive1 } type MutuallyRecursive1 struct { I int R []MutuallyRecursive0 } type EntityWithKey struct { I int S string K *Key `datastore:"__key__"` } type EntityWithKey2 EntityWithKey type WithNestedEntityWithKey struct { N EntityWithKey } type WithNonKeyField struct { I int K string `datastore:"__key__"` } type NestedWithNonKeyField struct { N WithNonKeyField } type Basic struct { A string } type PtrToStructField struct { B *Basic C *Basic `datastore:"c,noindex"` *Basic D []*Basic } var two int = 2 type PtrToInt struct { I *int } type EmbeddedTime struct { time.Time } type SpecialTime struct { MyTime EmbeddedTime } type Doubler struct { S string I int64 B bool } type Repeat struct { Key string Value []byte } type Repeated struct { Repeats []Repeat } func (d *Doubler) Load(props []Property) error { return LoadStruct(d, props) } func (d *Doubler) Save() ([]Property, error) { // Save the default Property slice to an in-memory buffer (a PropertyList). props, err := SaveStruct(d) if err != nil { return nil, err } var list PropertyList if err := list.Load(props); err != nil { return nil, err } // Edit that PropertyList, and send it on. for i := range list { switch v := list[i].Value.(type) { case string: // + means string concatenation. list[i].Value = v + v case int64: // + means integer addition. list[i].Value = v + v } } return list.Save() } var _ PropertyLoadSaver = (*Doubler)(nil) type Deriver struct { S, Derived, Ignored string } func (e *Deriver) Load(props []Property) error { for _, p := range props { if p.Name != "S" { continue } e.S = p.Value.(string) e.Derived = "derived+" + e.S } return nil } func (e *Deriver) Save() ([]Property, error) { return []Property{ { Name: "S", Value: e.S, }, }, nil } var _ PropertyLoadSaver = (*Deriver)(nil) type BadMultiPropEntity struct{} func (e *BadMultiPropEntity) Load(props []Property) error { return errors.New("unimplemented") } func (e *BadMultiPropEntity) Save() ([]Property, error) { // Write multiple properties with the same name "I". var props []Property for i := 0; i < 3; i++ { props = append(props, Property{ Name: "I", Value: int64(i), }) } return props, nil } var _ PropertyLoadSaver = (*BadMultiPropEntity)(nil) type testCase struct { desc string src interface{} want interface{} putErr string getErr string } var testCases = []testCase{ { "chan save fails", &C0{I: -1}, &E{}, "unsupported struct field", "", }, { "*chan save fails", &C1{I: -1}, &E{}, "unsupported struct field", "", }, { "[]chan save fails", &C2{I: -1, C: make([]chan int, 8)}, &E{}, "unsupported struct field", "", }, { "chan load fails", &C3{C: "not a chan"}, &C0{}, "", "type mismatch", }, { "*chan load fails", &C3{C: "not a *chan"}, &C1{}, "", "type mismatch", }, { "[]chan load fails", &C3{C: "not a []chan"}, &C2{}, "", "type mismatch", }, { "empty struct", &E{}, &E{}, "", "", }, { "geopoint", &G0{G: testGeoPt0}, &G0{G: testGeoPt0}, "", "", }, { "geopoint invalid", &G0{G: testBadGeoPt}, &G0{}, "invalid GeoPoint value", "", }, { "geopoint as props", &G0{G: testGeoPt0}, &PropertyList{ Property{Name: "G", Value: testGeoPt0, NoIndex: false}, }, "", "", }, { "geopoint slice", &G1{G: []GeoPoint{testGeoPt0, testGeoPt1}}, &G1{G: []GeoPoint{testGeoPt0, testGeoPt1}}, "", "", }, { "omit empty, all", &OmitAll{}, new(PropertyList), "", "", }, { "omit empty", &Omit{}, &PropertyList{ Property{Name: "St", Value: "", NoIndex: false}, }, "", "", }, { "omit empty, fields populated", &Omit{ A: "a", B: 10, C: true, F: []int{11}, }, &PropertyList{ Property{Name: "A", Value: "a", NoIndex: false}, Property{Name: "Bb", Value: int64(10), NoIndex: false}, Property{Name: "C", Value: true, NoIndex: true}, Property{Name: "F", Value: []interface{}{int64(11)}, NoIndex: false}, Property{Name: "St", Value: "", NoIndex: false}, }, "", "", }, { "omit empty, fields populated", &Omit{ A: "a", B: 10, C: true, F: []int{11}, S: S{St: "string"}, }, &PropertyList{ Property{Name: "A", Value: "a", NoIndex: false}, Property{Name: "Bb", Value: int64(10), NoIndex: false}, Property{Name: "C", Value: true, NoIndex: true}, Property{Name: "F", Value: []interface{}{int64(11)}, NoIndex: false}, Property{Name: "St", Value: "string", NoIndex: false}, }, "", "", }, { "omit empty does not propagate", &NoOmits{ No: []NoOmit{ NoOmit{}, }, S: S{}, Ss: S{}, }, &PropertyList{ Property{Name: "No", Value: []interface{}{ &Entity{ Properties: []Property{ Property{Name: "A", Value: "", NoIndex: false}, Property{Name: "Bb", Value: int64(0), NoIndex: false}, Property{Name: "C", Value: false, NoIndex: true}, }, }, }, NoIndex: false}, Property{Name: "Ss", Value: &Entity{ Properties: []Property{ Property{Name: "St", Value: "", NoIndex: false}, }, }, NoIndex: false}, Property{Name: "St", Value: "", NoIndex: false}, }, "", "", }, { "key", &K0{K: testKey1a}, &K0{K: testKey1b}, "", "", }, { "key with parent", &K0{K: testKey2a}, &K0{K: testKey2b}, "", "", }, { "nil key", &K0{}, &K0{}, "", "", }, { "all nil keys in slice", &K1{[]*Key{nil, nil}}, &K1{[]*Key{nil, nil}}, "", "", }, { "some nil keys in slice", &K1{[]*Key{testKey1a, nil, testKey2a}}, &K1{[]*Key{testKey1b, nil, testKey2b}}, "", "", }, { "overflow", &O0{I: 1 << 48}, &O1{}, "", "overflow", }, { "time", &T{T: time.Unix(1e9, 0)}, &T{T: time.Unix(1e9, 0)}, "", "", }, { "time as props", &T{T: time.Unix(1e9, 0)}, &PropertyList{ Property{Name: "T", Value: time.Unix(1e9, 0), NoIndex: false}, }, "", "", }, { "uint save", &U0{U: 1}, &U0{}, "unsupported struct field", "", }, { "uint load", &U1{U: "not a uint"}, &U0{}, "", "type mismatch", }, { "zero", &X0{}, &X0{}, "", "", }, { "basic", &X0{S: "one", I: 2, i: 3}, &X0{S: "one", I: 2}, "", "", }, { "save string/int load myString/int32", &X0{S: "one", I: 2, i: 3}, &X1{S: "one", I: 2}, "", "", }, { "missing fields", &X0{S: "one", I: 2, i: 3}, &X2{}, "", "no such struct field", }, { "save string load bool", &X0{S: "one", I: 2, i: 3}, &X3{I: 2}, "", "type mismatch", }, { "basic slice", &Y0{B: true, F: []float64{7, 8, 9}}, &Y0{B: true, F: []float64{7, 8, 9}}, "", "", }, { "save []float64 load float64", &Y0{B: true, F: []float64{7, 8, 9}}, &Y1{B: true}, "", "requires a slice", }, { "save []float64 load []int64", &Y0{B: true, F: []float64{7, 8, 9}}, &Y2{B: true}, "", "type mismatch", }, { "single slice is too long", &Y0{F: make([]float64, maxIndexedProperties+1)}, &Y0{}, "too many indexed properties", "", }, { "two slices are too long", &Y0{F: make([]float64, maxIndexedProperties), G: make([]float64, maxIndexedProperties)}, &Y0{}, "too many indexed properties", "", }, { "one slice and one scalar are too long", &Y0{F: make([]float64, maxIndexedProperties), B: true}, &Y0{}, "too many indexed properties", "", }, { "slice of slices of bytes", &Repeated{ Repeats: []Repeat{ { Key: "key 1", Value: []byte("value 1"), }, { Key: "key 2", Value: []byte("value 2"), }, }, }, &Repeated{ Repeats: []Repeat{ { Key: "key 1", Value: []byte("value 1"), }, { Key: "key 2", Value: []byte("value 2"), }, }, }, "", "", }, { "long blob", &B0{B: makeUint8Slice(maxIndexedProperties + 1)}, &B0{B: makeUint8Slice(maxIndexedProperties + 1)}, "", "", }, { "long []int8 is too long", &B1{B: makeInt8Slice(maxIndexedProperties + 1)}, &B1{}, "too many indexed properties", "", }, { "short []int8", &B1{B: makeInt8Slice(3)}, &B1{B: makeInt8Slice(3)}, "", "", }, { "long myBlob", &B2{B: makeUint8Slice(maxIndexedProperties + 1)}, &B2{B: makeUint8Slice(maxIndexedProperties + 1)}, "", "", }, { "short myBlob", &B2{B: makeUint8Slice(3)}, &B2{B: makeUint8Slice(3)}, "", "", }, { "long []myByte", &B3{B: makeMyByteSlice(maxIndexedProperties + 1)}, &B3{B: makeMyByteSlice(maxIndexedProperties + 1)}, "", "", }, { "short []myByte", &B3{B: makeMyByteSlice(3)}, &B3{B: makeMyByteSlice(3)}, "", "", }, { "slice of blobs", &B4{B: [][]byte{ makeUint8Slice(3), makeUint8Slice(4), makeUint8Slice(5), }}, &B4{B: [][]byte{ makeUint8Slice(3), makeUint8Slice(4), makeUint8Slice(5), }}, "", "", }, { "[]byte must be noindex", &PropertyList{ Property{Name: "B", Value: makeUint8Slice(1501), NoIndex: false}, }, nil, "[]byte property too long to index", "", }, { "string must be noindex", &PropertyList{ Property{Name: "B", Value: strings.Repeat("x", 1501), NoIndex: false}, }, nil, "string property too long to index", "", }, { "slice of []byte must be noindex", &PropertyList{ Property{Name: "B", Value: []interface{}{ []byte("short"), makeUint8Slice(1501), }, NoIndex: false}, }, nil, "[]byte property too long to index", "", }, { "slice of string must be noindex", &PropertyList{ Property{Name: "B", Value: []interface{}{ "short", strings.Repeat("x", 1501), }, NoIndex: false}, }, nil, "string property too long to index", "", }, { "save tagged load props", &Tagged{A: 1, B: []int{21, 22, 23}, C: 3, D: 4, E: 5, I: 6, J: 7}, &PropertyList{ // A and B are renamed to a and b; A and C are noindex, I is ignored. // Order is sorted as per byName. Property{Name: "C", Value: int64(3), NoIndex: true}, Property{Name: "D", Value: int64(4), NoIndex: false}, Property{Name: "E", Value: int64(5), NoIndex: false}, Property{Name: "J", Value: int64(7), NoIndex: true}, Property{Name: "a", Value: int64(1), NoIndex: true}, Property{Name: "b", Value: []interface{}{int64(21), int64(22), int64(23)}, NoIndex: false}, }, "", "", }, { "save tagged load tagged", &Tagged{A: 1, B: []int{21, 22, 23}, C: 3, D: 4, E: 5, I: 6, J: 7}, &Tagged{A: 1, B: []int{21, 22, 23}, C: 3, D: 4, E: 5, J: 7}, "", "", }, { "invalid tagged1", &InvalidTagged1{I: 1}, &InvalidTagged1{}, "struct tag has invalid property name", "", }, { "invalid tagged2", &InvalidTagged2{I: 1, J: 2}, &InvalidTagged2{J: 2}, "", "", }, { "invalid tagged3", &InvalidTagged3{X: "hello"}, &InvalidTagged3{}, "struct tag has invalid property name: \"-\"", "", }, { "invalid tagged4", &InvalidTagged4{X: "hello"}, &InvalidTagged4{}, "struct tag has invalid option: \"garbage\"", "", }, { "doubler", &Doubler{S: "s", I: 1, B: true}, &Doubler{S: "ss", I: 2, B: true}, "", "", }, { "save struct load props", &X0{S: "s", I: 1}, &PropertyList{ Property{Name: "I", Value: int64(1), NoIndex: false}, Property{Name: "S", Value: "s", NoIndex: false}, }, "", "", }, { "save props load struct", &PropertyList{ Property{Name: "I", Value: int64(1), NoIndex: false}, Property{Name: "S", Value: "s", NoIndex: false}, }, &X0{S: "s", I: 1}, "", "", }, { "nil-value props", &PropertyList{ Property{Name: "I", Value: nil, NoIndex: false}, Property{Name: "B", Value: nil, NoIndex: false}, Property{Name: "S", Value: nil, NoIndex: false}, Property{Name: "F", Value: nil, NoIndex: false}, Property{Name: "K", Value: nil, NoIndex: false}, Property{Name: "T", Value: nil, NoIndex: false}, Property{Name: "J", Value: []interface{}{nil, int64(7), nil}, NoIndex: false}, }, &struct { I int64 B bool S string F float64 K *Key T time.Time J []int64 }{ J: []int64{0, 7, 0}, }, "", "", }, { "save outer load props flatten", &OuterFlatten{ A: 1, I: []Inner1{ {10, "ten"}, {20, "twenty"}, {30, "thirty"}, }, J: Inner2{ Y: 3.14, }, Inner3: Inner3{ Z: true, }, K: Inner4{ X: Inner5{ WW: 12, }, }, }, &PropertyList{ Property{Name: "A", Value: int64(1), NoIndex: false}, Property{Name: "I.W", Value: []interface{}{int64(10), int64(20), int64(30)}, NoIndex: false}, Property{Name: "I.X", Value: []interface{}{"ten", "twenty", "thirty"}, NoIndex: false}, Property{Name: "J.Y", Value: float64(3.14), NoIndex: true}, Property{Name: "K.X.WW", Value: int64(12), NoIndex: false}, Property{Name: "Z", Value: true, NoIndex: false}, }, "", "", }, { "load outer props flatten", &PropertyList{ Property{Name: "A", Value: int64(1), NoIndex: false}, Property{Name: "I.W", Value: []interface{}{int64(10), int64(20), int64(30)}, NoIndex: false}, Property{Name: "I.X", Value: []interface{}{"ten", "twenty", "thirty"}, NoIndex: false}, Property{Name: "J.Y", Value: float64(3.14), NoIndex: true}, Property{Name: "Z", Value: true, NoIndex: false}, }, &OuterFlatten{ A: 1, I: []Inner1{ {10, "ten"}, {20, "twenty"}, {30, "thirty"}, }, J: Inner2{ Y: 3.14, }, Inner3: Inner3{ Z: true, }, }, "", "", }, { "save outer load props", &Outer{ A: 1, I: []Inner1{ {10, "ten"}, {20, "twenty"}, {30, "thirty"}, }, J: Inner2{ Y: 3.14, }, Inner3: Inner3{ Z: true, }, }, &PropertyList{ Property{Name: "A", Value: int64(1), NoIndex: false}, Property{Name: "I", Value: []interface{}{ &Entity{ Properties: []Property{ Property{Name: "W", Value: int64(10), NoIndex: false}, Property{Name: "X", Value: "ten", NoIndex: false}, }, }, &Entity{ Properties: []Property{ Property{Name: "W", Value: int64(20), NoIndex: false}, Property{Name: "X", Value: "twenty", NoIndex: false}, }, }, &Entity{ Properties: []Property{ Property{Name: "W", Value: int64(30), NoIndex: false}, Property{Name: "X", Value: "thirty", NoIndex: false}, }, }, }, NoIndex: false}, Property{Name: "J", Value: &Entity{ Properties: []Property{ Property{Name: "Y", Value: float64(3.14), NoIndex: false}, }, }, NoIndex: false}, Property{Name: "Z", Value: true, NoIndex: false}, }, "", "", }, { "save props load outer-equivalent", &PropertyList{ Property{Name: "A", Value: int64(1), NoIndex: false}, Property{Name: "I.W", Value: []interface{}{int64(10), int64(20), int64(30)}, NoIndex: false}, Property{Name: "I.X", Value: []interface{}{"ten", "twenty", "thirty"}, NoIndex: false}, Property{Name: "J.Y", Value: float64(3.14), NoIndex: false}, Property{Name: "Z", Value: true, NoIndex: false}, }, &OuterEquivalent{ A: 1, IDotW: []int32{10, 20, 30}, IDotX: []string{"ten", "twenty", "thirty"}, JDotY: 3.14, Z: true, }, "", "", }, { "dotted names save", &Dotted{A: DottedA{B: DottedB{C: 88}}}, &PropertyList{ Property{Name: "A0.A1.A2", Value: &Entity{ Properties: []Property{ Property{Name: "B3", Value: &Entity{ Properties: []Property{ Property{Name: "C4.C5", Value: int64(88), NoIndex: false}, }, }, NoIndex: false}, }, }, NoIndex: false}, }, "", "", }, { "dotted names load", &PropertyList{ Property{Name: "A0.A1.A2", Value: &Entity{ Properties: []Property{ Property{Name: "B3", Value: &Entity{ Properties: []Property{ Property{Name: "C4.C5", Value: 99, NoIndex: false}, }, }, NoIndex: false}, }, }, NoIndex: false}, }, &Dotted{A: DottedA{B: DottedB{C: 99}}}, "", "", }, { "save struct load deriver", &X0{S: "s", I: 1}, &Deriver{S: "s", Derived: "derived+s"}, "", "", }, { "save deriver load struct", &Deriver{S: "s", Derived: "derived+s", Ignored: "ignored"}, &X0{S: "s"}, "", "", }, { "zero time.Time", &T{T: time.Time{}}, &T{T: time.Time{}}, "", "", }, { "time.Time near Unix zero time", &T{T: time.Unix(0, 4e3)}, &T{T: time.Unix(0, 4e3)}, "", "", }, { "time.Time, far in the future", &T{T: time.Date(99999, 1, 1, 0, 0, 0, 0, time.UTC)}, &T{T: time.Date(99999, 1, 1, 0, 0, 0, 0, time.UTC)}, "", "", }, { "time.Time, very far in the past", &T{T: time.Date(-300000, 1, 1, 0, 0, 0, 0, time.UTC)}, &T{}, "time value out of range", "", }, { "time.Time, very far in the future", &T{T: time.Date(294248, 1, 1, 0, 0, 0, 0, time.UTC)}, &T{}, "time value out of range", "", }, { "structs", &N0{ X0: X0{S: "one", I: 2, i: 3}, Nonymous: X0{S: "four", I: 5, i: 6}, Ignore: "ignore", Other: "other", }, &N0{ X0: X0{S: "one", I: 2}, Nonymous: X0{S: "four", I: 5}, Other: "other", }, "", "", }, { "slice of structs", &N1{ X0: X0{S: "one", I: 2, i: 3}, Nonymous: []X0{ {S: "four", I: 5, i: 6}, {S: "seven", I: 8, i: 9}, {S: "ten", I: 11, i: 12}, {S: "thirteen", I: 14, i: 15}, }, Ignore: "ignore", Other: "other", }, &N1{ X0: X0{S: "one", I: 2}, Nonymous: []X0{ {S: "four", I: 5}, {S: "seven", I: 8}, {S: "ten", I: 11}, {S: "thirteen", I: 14}, }, Other: "other", }, "", "", }, { "structs with slices of structs", &N2{ N1: N1{ X0: X0{S: "rouge"}, Nonymous: []X0{ {S: "rosso0"}, {S: "rosso1"}, }, }, Green: N1{ X0: X0{S: "vert"}, Nonymous: []X0{ {S: "verde0"}, {S: "verde1"}, {S: "verde2"}, }, }, Blue: N1{ X0: X0{S: "bleu"}, Nonymous: []X0{ {S: "blu0"}, {S: "blu1"}, {S: "blu2"}, {S: "blu3"}, }, }, }, &N2{ N1: N1{ X0: X0{S: "rouge"}, Nonymous: []X0{ {S: "rosso0"}, {S: "rosso1"}, }, }, Green: N1{ X0: X0{S: "vert"}, Nonymous: []X0{ {S: "verde0"}, {S: "verde1"}, {S: "verde2"}, }, }, Blue: N1{ X0: X0{S: "bleu"}, Nonymous: []X0{ {S: "blu0"}, {S: "blu1"}, {S: "blu2"}, {S: "blu3"}, }, }, }, "", "", }, { "save structs load props", &N2{ N1: N1{ X0: X0{S: "rouge"}, Nonymous: []X0{ {S: "rosso0"}, {S: "rosso1"}, }, }, Green: N1{ X0: X0{S: "vert"}, Nonymous: []X0{ {S: "verde0"}, {S: "verde1"}, {S: "verde2"}, }, }, Blue: N1{ X0: X0{S: "bleu"}, Nonymous: []X0{ {S: "blu0"}, {S: "blu1"}, {S: "blu2"}, {S: "blu3"}, }, }, }, &PropertyList{ Property{Name: "Blue", Value: &Entity{ Properties: []Property{ Property{Name: "I", Value: int64(0), NoIndex: false}, Property{Name: "Nonymous", Value: []interface{}{ &Entity{ Properties: []Property{ Property{Name: "I", Value: int64(0), NoIndex: false}, Property{Name: "S", Value: "blu0", NoIndex: false}, }, }, &Entity{ Properties: []Property{ Property{Name: "I", Value: int64(0), NoIndex: false}, Property{Name: "S", Value: "blu1", NoIndex: false}, }, }, &Entity{ Properties: []Property{ Property{Name: "I", Value: int64(0), NoIndex: false}, Property{Name: "S", Value: "blu2", NoIndex: false}, }, }, &Entity{ Properties: []Property{ Property{Name: "I", Value: int64(0), NoIndex: false}, Property{Name: "S", Value: "blu3", NoIndex: false}, }, }, }, NoIndex: false}, Property{Name: "Other", Value: "", NoIndex: false}, Property{Name: "S", Value: "bleu", NoIndex: false}, }, }, NoIndex: false}, Property{Name: "green", Value: &Entity{ Properties: []Property{ Property{Name: "I", Value: int64(0), NoIndex: false}, Property{Name: "Nonymous", Value: []interface{}{ &Entity{ Properties: []Property{ Property{Name: "I", Value: int64(0), NoIndex: false}, Property{Name: "S", Value: "verde0", NoIndex: false}, }, }, &Entity{ Properties: []Property{ Property{Name: "I", Value: int64(0), NoIndex: false}, Property{Name: "S", Value: "verde1", NoIndex: false}, }, }, &Entity{ Properties: []Property{ Property{Name: "I", Value: int64(0), NoIndex: false}, Property{Name: "S", Value: "verde2", NoIndex: false}, }, }, }, NoIndex: false}, Property{Name: "Other", Value: "", NoIndex: false}, Property{Name: "S", Value: "vert", NoIndex: false}, }, }, NoIndex: false}, Property{Name: "red", Value: &Entity{ Properties: []Property{ Property{Name: "I", Value: int64(0), NoIndex: false}, Property{Name: "Nonymous", Value: []interface{}{ &Entity{ Properties: []Property{ Property{Name: "I", Value: int64(0), NoIndex: false}, Property{Name: "S", Value: "rosso0", NoIndex: false}, }, }, &Entity{ Properties: []Property{ Property{Name: "I", Value: int64(0), NoIndex: false}, Property{Name: "S", Value: "rosso1", NoIndex: false}, }, }, }, NoIndex: false}, Property{Name: "Other", Value: "", NoIndex: false}, Property{Name: "S", Value: "rouge", NoIndex: false}, }, }, NoIndex: false}, }, "", "", }, { "nested entity with key", &WithNestedEntityWithKey{ N: EntityWithKey{ I: 12, S: "abcd", K: testKey0, }, }, &WithNestedEntityWithKey{ N: EntityWithKey{ I: 12, S: "abcd", K: testKey0, }, }, "", "", }, { "entity with key at top level", &EntityWithKey{ I: 12, S: "abc", K: testKey0, }, &EntityWithKey{ I: 12, S: "abc", K: testKey0, }, "", "", }, { "entity with key at top level (key is populated on load)", &EntityWithKey{ I: 12, S: "abc", }, &EntityWithKey{ I: 12, S: "abc", K: testKey0, }, "", "", }, { "__key__ field not a *Key", &NestedWithNonKeyField{ N: WithNonKeyField{ I: 12, K: "abcd", }, }, &NestedWithNonKeyField{ N: WithNonKeyField{ I: 12, K: "abcd", }, }, "datastore: __key__ field on struct datastore.WithNonKeyField is not a *datastore.Key", "", }, { "save struct with ptr to struct fields", &PtrToStructField{ &Basic{ A: "b", }, &Basic{ A: "c", }, &Basic{ A: "anon", }, []*Basic{ &Basic{ A: "slice0", }, &Basic{ A: "slice1", }, }, }, &PropertyList{ Property{Name: "A", Value: "anon", NoIndex: false}, Property{Name: "B", Value: &Entity{ Properties: []Property{ Property{Name: "A", Value: "b", NoIndex: false}, }, }}, Property{Name: "D", Value: []interface{}{ &Entity{ Properties: []Property{ Property{Name: "A", Value: "slice0", NoIndex: false}, }, }, &Entity{ Properties: []Property{ Property{Name: "A", Value: "slice1", NoIndex: false}, }, }, }, NoIndex: false}, Property{Name: "c", Value: &Entity{ Properties: []Property{ Property{Name: "A", Value: "c", NoIndex: true}, }, }, NoIndex: true}, }, "", "", }, { "save and load struct with ptr to struct fields", &PtrToStructField{ &Basic{ A: "b", }, &Basic{ A: "c", }, &Basic{ A: "anon", }, []*Basic{ &Basic{ A: "slice0", }, &Basic{ A: "slice1", }, }, }, &PtrToStructField{ &Basic{ A: "b", }, &Basic{ A: "c", }, &Basic{ A: "anon", }, []*Basic{ &Basic{ A: "slice0", }, &Basic{ A: "slice1", }, }, }, "", "", }, { "save struct with pointer to int field", &PtrToInt{ I: &two, }, &PtrToInt{}, "unsupported struct field", "", }, { "struct with nil ptr to struct fields", &PtrToStructField{ nil, nil, nil, nil, }, new(PropertyList), "", "", }, { "nested load entity with key", &WithNestedEntityWithKey{ N: EntityWithKey{ I: 12, S: "abcd", K: testKey0, }, }, &PropertyList{ Property{Name: "N", Value: &Entity{ Key: testKey0, Properties: []Property{ Property{Name: "I", Value: int64(12), NoIndex: false}, Property{Name: "S", Value: "abcd", NoIndex: false}, }, }, NoIndex: false}, }, "", "", }, { "nested save entity with key", &PropertyList{ Property{Name: "N", Value: &Entity{ Key: testKey0, Properties: []Property{ Property{Name: "I", Value: int64(12), NoIndex: false}, Property{Name: "S", Value: "abcd", NoIndex: false}, }, }, NoIndex: false}, }, &WithNestedEntityWithKey{ N: EntityWithKey{ I: 12, S: "abcd", K: testKey0, }, }, "", "", }, { "anonymous field with tag", &N3{ C3: C3{C: "s"}, }, &PropertyList{ Property{Name: "red", Value: &Entity{ Properties: []Property{ Property{Name: "C", Value: "s", NoIndex: false}, }, }, NoIndex: false}, }, "", "", }, { "unexported anonymous field", &N4{ c4: c4{C: "s"}, }, &PropertyList{ Property{Name: "C", Value: "s", NoIndex: false}, }, "", "", }, { "unexported anonymous field with tag", &N5{ c4: c4{C: "s"}, }, new(PropertyList), "", "", }, { "save props load structs with ragged fields", &PropertyList{ Property{Name: "red.S", Value: "rot", NoIndex: false}, Property{Name: "green.Nonymous.I", Value: []interface{}{int64(10), int64(11), int64(12), int64(13)}, NoIndex: false}, Property{Name: "Blue.Nonymous.I", Value: []interface{}{int64(20), int64(21)}, NoIndex: false}, Property{Name: "Blue.Nonymous.S", Value: []interface{}{"blau0", "blau1", "blau2"}, NoIndex: false}, }, &N2{ N1: N1{ X0: X0{S: "rot"}, }, Green: N1{ Nonymous: []X0{ {I: 10}, {I: 11}, {I: 12}, {I: 13}, }, }, Blue: N1{ Nonymous: []X0{ {S: "blau0", I: 20}, {S: "blau1", I: 21}, {S: "blau2"}, }, }, }, "", "", }, { "save structs with noindex tags", &struct { A struct { X string `datastore:",noindex"` Y string } `datastore:",noindex"` B struct { X string `datastore:",noindex"` Y string } }{}, &PropertyList{ Property{Name: "A", Value: &Entity{ Properties: []Property{ Property{Name: "X", Value: "", NoIndex: true}, Property{Name: "Y", Value: "", NoIndex: true}, }, }, NoIndex: true}, Property{Name: "B", Value: &Entity{ Properties: []Property{ Property{Name: "X", Value: "", NoIndex: true}, Property{Name: "Y", Value: "", NoIndex: false}, }, }, NoIndex: false}, }, "", "", }, { "embedded struct with name override", &struct { Inner1 `datastore:"foo"` }{}, &PropertyList{ Property{Name: "foo", Value: &Entity{ Properties: []Property{ Property{Name: "W", Value: int64(0), NoIndex: false}, Property{Name: "X", Value: "", NoIndex: false}, }, }, NoIndex: false}, }, "", "", }, { "slice of slices", &SliceOfSlices{}, nil, "flattening nested structs leads to a slice of slices", "", }, { "recursive struct", &Recursive{}, &Recursive{}, "", "", }, { "mutually recursive struct", &MutuallyRecursive0{}, &MutuallyRecursive0{}, "", "", }, { "non-exported struct fields", &struct { i, J int64 }{i: 1, J: 2}, &PropertyList{ Property{Name: "J", Value: int64(2), NoIndex: false}, }, "", "", }, { "json.RawMessage", &struct { J json.RawMessage }{ J: json.RawMessage("rawr"), }, &PropertyList{ Property{Name: "J", Value: []byte("rawr"), NoIndex: false}, }, "", "", }, { "json.RawMessage to myBlob", &struct { B json.RawMessage }{ B: json.RawMessage("rawr"), }, &B2{B: myBlob("rawr")}, "", "", }, { "repeated property names", &PropertyList{ Property{Name: "A", Value: ""}, Property{Name: "A", Value: ""}, }, nil, "duplicate Property", "", }, { "embedded time field", &SpecialTime{MyTime: EmbeddedTime{ts}}, &SpecialTime{MyTime: EmbeddedTime{ts}}, "", "", }, { "embedded time load", &PropertyList{ Property{Name: "MyTime.Time", Value: ts}, }, &SpecialTime{MyTime: EmbeddedTime{ts}}, "", "", }, } // checkErr returns the empty string if either both want and err are zero, // or if want is a non-empty substring of err's string representation. func checkErr(want string, err error) string { if err != nil { got := err.Error() if want == "" || strings.Index(got, want) == -1 { return got } } else if want != "" { return fmt.Sprintf("want error %q", want) } return "" } func TestRoundTrip(t *testing.T) { for _, tc := range testCases { p, err := saveEntity(testKey0, tc.src) if s := checkErr(tc.putErr, err); s != "" { t.Errorf("%s: save: %s", tc.desc, s) continue } if p == nil { continue } var got interface{} if _, ok := tc.want.(*PropertyList); ok { got = new(PropertyList) } else { got = reflect.New(reflect.TypeOf(tc.want).Elem()).Interface() } err = loadEntityProto(got, p) if s := checkErr(tc.getErr, err); s != "" { t.Errorf("%s: load: %s", tc.desc, s) continue } if pl, ok := got.(*PropertyList); ok { // Sort by name to make sure we have a deterministic order. sortPL(*pl) } equal := false switch v := got.(type) { // Round tripping a time.Time can result in a different time.Location: Local instead of UTC. // We therefore test equality explicitly, instead of relying on reflect.DeepEqual. case *T: equal = v.T.Equal(tc.want.(*T).T) case *SpecialTime: equal = v.MyTime.Equal(tc.want.(*SpecialTime).MyTime.Time) default: equal = reflect.DeepEqual(got, tc.want) } if !equal { t.Errorf("%s: compare:\ngot: %+#v\nwant: %+#v", tc.desc, got, tc.want) continue } } } type aPtrPLS struct { Count int } func (pls *aPtrPLS) Load([]Property) error { pls.Count += 1 return nil } func (pls *aPtrPLS) Save() ([]Property, error) { return []Property{{Name: "Count", Value: 4}}, nil } type aValuePLS struct { Count int } func (pls aValuePLS) Load([]Property) error { pls.Count += 2 return nil } func (pls aValuePLS) Save() ([]Property, error) { return []Property{{Name: "Count", Value: 8}}, nil } type aValuePtrPLS struct { Count int } func (pls *aValuePtrPLS) Load([]Property) error { pls.Count = 11 return nil } func (pls *aValuePtrPLS) Save() ([]Property, error) { return []Property{{Name: "Count", Value: 12}}, nil } type aNotPLS struct { Count int } type plsString string func (s *plsString) Load([]Property) error { *s = "LOADED" return nil } func (s *plsString) Save() ([]Property, error) { return []Property{{Name: "SS", Value: "SAVED"}}, nil } func ptrToplsString(s string) *plsString { plsStr := plsString(s) return &plsStr } type aSubPLS struct { Foo string Bar *aPtrPLS Baz aValuePtrPLS S plsString } type aSubNotPLS struct { Foo string Bar *aNotPLS } type aSubPLSErr struct { Foo string Bar aValuePLS } type aSubPLSNoErr struct { Foo string Bar aPtrPLS } type GrandparentFlatten struct { Parent Parent `datastore:",flatten"` } type GrandparentOfPtrFlatten struct { Parent ParentOfPtr `datastore:",flatten"` } type GrandparentOfSlice struct { Parent ParentOfSlice } type GrandparentOfSlicePtrs struct { Parent ParentOfSlicePtrs } type GrandparentOfSliceFlatten struct { Parent ParentOfSlice `datastore:",flatten"` } type GrandparentOfSlicePtrsFlatten struct { Parent ParentOfSlicePtrs `datastore:",flatten"` } type Grandparent struct { Parent Parent } type Parent struct { Child Child String plsString } type ParentOfPtr struct { Child *Child String *plsString } type ParentOfSlice struct { Children []Child Strings []plsString } type ParentOfSlicePtrs struct { Children []*Child Strings []*plsString } type Child struct { I int Grandchild Grandchild } type Grandchild struct { S string } func (c *Child) Load(props []Property) error { for _, p := range props { if p.Name == "I" { c.I += 1 } else if p.Name == "Grandchild.S" { c.Grandchild.S = "grandchild loaded" } } return nil } func (c *Child) Save() ([]Property, error) { v := c.I + 1 return []Property{ {Name: "I", Value: v}, {Name: "Grandchild.S", Value: fmt.Sprintf("grandchild saved %d", v)}, }, nil } func TestLoadSavePLS(t *testing.T) { type testCase struct { desc string src interface{} wantSave *pb.Entity wantLoad interface{} saveErr string loadErr string } testCases := []testCase{ { desc: "non-struct implements PLS (top-level)", src: ptrToplsString("hello"), wantSave: &pb.Entity{ Key: keyToProto(testKey0), Properties: map[string]*pb.Value{ "SS": {ValueType: &pb.Value_StringValue{StringValue: "SAVED"}}, }, }, wantLoad: ptrToplsString("LOADED"), }, { desc: "substructs do implement PLS", src: &aSubPLS{Foo: "foo", Bar: &aPtrPLS{Count: 2}, Baz: aValuePtrPLS{Count: 15}, S: "something"}, wantSave: &pb.Entity{ Key: keyToProto(testKey0), Properties: map[string]*pb.Value{ "Foo": {ValueType: &pb.Value_StringValue{StringValue: "foo"}}, "Bar": {ValueType: &pb.Value_EntityValue{ EntityValue: &pb.Entity{ Properties: map[string]*pb.Value{ "Count": {ValueType: &pb.Value_IntegerValue{IntegerValue: 4}}, }, }, }}, "Baz": {ValueType: &pb.Value_EntityValue{ EntityValue: &pb.Entity{ Properties: map[string]*pb.Value{ "Count": {ValueType: &pb.Value_IntegerValue{IntegerValue: 12}}, }, }, }}, "S": {ValueType: &pb.Value_EntityValue{ EntityValue: &pb.Entity{ Properties: map[string]*pb.Value{ "SS": {ValueType: &pb.Value_StringValue{StringValue: "SAVED"}}, }, }, }}, }, }, wantLoad: &aSubPLS{Foo: "foo", Bar: &aPtrPLS{Count: 1}, Baz: aValuePtrPLS{Count: 11}, S: "LOADED"}, }, { desc: "substruct (ptr) does implement PLS, nil valued substruct", src: &aSubPLS{Foo: "foo", S: "something"}, wantSave: &pb.Entity{ Key: keyToProto(testKey0), Properties: map[string]*pb.Value{ "Foo": {ValueType: &pb.Value_StringValue{StringValue: "foo"}}, "Baz": {ValueType: &pb.Value_EntityValue{ EntityValue: &pb.Entity{ Properties: map[string]*pb.Value{ "Count": {ValueType: &pb.Value_IntegerValue{IntegerValue: 12}}, }, }, }}, "S": {ValueType: &pb.Value_EntityValue{ EntityValue: &pb.Entity{ Properties: map[string]*pb.Value{ "SS": {ValueType: &pb.Value_StringValue{StringValue: "SAVED"}}, }, }, }}, }, }, wantLoad: &aSubPLS{Foo: "foo", Baz: aValuePtrPLS{Count: 11}, S: "LOADED"}, }, { desc: "substruct (ptr) does not implement PLS", src: &aSubNotPLS{Foo: "foo", Bar: &aNotPLS{Count: 2}}, wantSave: &pb.Entity{ Key: keyToProto(testKey0), Properties: map[string]*pb.Value{ "Foo": {ValueType: &pb.Value_StringValue{StringValue: "foo"}}, "Bar": {ValueType: &pb.Value_EntityValue{ EntityValue: &pb.Entity{ Properties: map[string]*pb.Value{ "Count": {ValueType: &pb.Value_IntegerValue{IntegerValue: 2}}, }, }, }}, }, }, wantLoad: &aSubNotPLS{Foo: "foo", Bar: &aNotPLS{Count: 2}}, }, { desc: "substruct (value) does implement PLS, error on save", src: &aSubPLSErr{Foo: "foo", Bar: aValuePLS{Count: 2}}, wantSave: (*pb.Entity)(nil), wantLoad: &aSubPLSErr{}, saveErr: "PropertyLoadSaver methods must be implemented on a pointer", }, { desc: "substruct (value) does implement PLS, error on load", src: &aSubPLSNoErr{Foo: "foo", Bar: aPtrPLS{Count: 2}}, wantSave: &pb.Entity{ Key: keyToProto(testKey0), Properties: map[string]*pb.Value{ "Foo": {ValueType: &pb.Value_StringValue{StringValue: "foo"}}, "Bar": {ValueType: &pb.Value_EntityValue{ EntityValue: &pb.Entity{ Properties: map[string]*pb.Value{ "Count": {ValueType: &pb.Value_IntegerValue{IntegerValue: 4}}, }, }, }}, }, }, wantLoad: &aSubPLSErr{}, loadErr: "PropertyLoadSaver methods must be implemented on a pointer", }, { desc: "parent does not have flatten option, child impl PLS", src: &Grandparent{ Parent: Parent{ Child: Child{ I: 9, Grandchild: Grandchild{ S: "BAD", }, }, String: plsString("something"), }, }, wantSave: &pb.Entity{ Key: keyToProto(testKey0), Properties: map[string]*pb.Value{ "Parent": {ValueType: &pb.Value_EntityValue{ EntityValue: &pb.Entity{ Properties: map[string]*pb.Value{ "Child": {ValueType: &pb.Value_EntityValue{ EntityValue: &pb.Entity{ Properties: map[string]*pb.Value{ "I": {ValueType: &pb.Value_IntegerValue{IntegerValue: 10}}, "Grandchild.S": {ValueType: &pb.Value_StringValue{StringValue: "grandchild saved 10"}}, }, }, }}, "String": {ValueType: &pb.Value_EntityValue{ EntityValue: &pb.Entity{ Properties: map[string]*pb.Value{ "SS": {ValueType: &pb.Value_StringValue{StringValue: "SAVED"}}, }, }, }}, }, }, }}, }, }, wantLoad: &Grandparent{ Parent: Parent{ Child: Child{ I: 1, Grandchild: Grandchild{ S: "grandchild loaded", }, }, String: "LOADED", }, }, }, { desc: "parent has flatten option enabled, child impl PLS", src: &GrandparentFlatten{ Parent: Parent{ Child: Child{ I: 7, Grandchild: Grandchild{ S: "BAD", }, }, String: plsString("something"), }, }, wantSave: &pb.Entity{ Key: keyToProto(testKey0), Properties: map[string]*pb.Value{ "Parent.Child.I": {ValueType: &pb.Value_IntegerValue{IntegerValue: 8}}, "Parent.Child.Grandchild.S": {ValueType: &pb.Value_StringValue{StringValue: "grandchild saved 8"}}, "Parent.String.SS": {ValueType: &pb.Value_StringValue{StringValue: "SAVED"}}, }, }, wantLoad: &GrandparentFlatten{ Parent: Parent{ Child: Child{ I: 1, Grandchild: Grandchild{ S: "grandchild loaded", }, }, String: "LOADED", }, }, }, { desc: "parent has flatten option enabled, child (ptr to) impl PLS", src: &GrandparentOfPtrFlatten{ Parent: ParentOfPtr{ Child: &Child{ I: 7, Grandchild: Grandchild{ S: "BAD", }, }, String: ptrToplsString("something"), }, }, wantSave: &pb.Entity{ Key: keyToProto(testKey0), Properties: map[string]*pb.Value{ "Parent.Child.I": {ValueType: &pb.Value_IntegerValue{IntegerValue: 8}}, "Parent.Child.Grandchild.S": {ValueType: &pb.Value_StringValue{StringValue: "grandchild saved 8"}}, "Parent.String.SS": {ValueType: &pb.Value_StringValue{StringValue: "SAVED"}}, }, }, wantLoad: &GrandparentOfPtrFlatten{ Parent: ParentOfPtr{ Child: &Child{ I: 1, Grandchild: Grandchild{ S: "grandchild loaded", }, }, String: ptrToplsString("LOADED"), }, }, }, { desc: "children (slice of) impl PLS", src: &GrandparentOfSlice{ Parent: ParentOfSlice{ Children: []Child{ { I: 7, Grandchild: Grandchild{ S: "BAD", }, }, { I: 9, Grandchild: Grandchild{ S: "BAD2", }, }, }, Strings: []plsString{ "something1", "something2", }, }, }, wantSave: &pb.Entity{ Key: keyToProto(testKey0), Properties: map[string]*pb.Value{ "Parent": {ValueType: &pb.Value_EntityValue{ EntityValue: &pb.Entity{ Properties: map[string]*pb.Value{ "Children": {ValueType: &pb.Value_ArrayValue{ ArrayValue: &pb.ArrayValue{Values: []*pb.Value{ {ValueType: &pb.Value_EntityValue{ EntityValue: &pb.Entity{ Properties: map[string]*pb.Value{ "I": {ValueType: &pb.Value_IntegerValue{IntegerValue: 8}}, "Grandchild.S": {ValueType: &pb.Value_StringValue{StringValue: "grandchild saved 8"}}, }, }, }}, {ValueType: &pb.Value_EntityValue{ EntityValue: &pb.Entity{ Properties: map[string]*pb.Value{ "I": {ValueType: &pb.Value_IntegerValue{IntegerValue: 10}}, "Grandchild.S": {ValueType: &pb.Value_StringValue{StringValue: "grandchild saved 10"}}, }, }, }}, }}, }}, "Strings": {ValueType: &pb.Value_ArrayValue{ ArrayValue: &pb.ArrayValue{Values: []*pb.Value{ {ValueType: &pb.Value_EntityValue{ EntityValue: &pb.Entity{ Properties: map[string]*pb.Value{ "SS": {ValueType: &pb.Value_StringValue{StringValue: "SAVED"}}, }, }, }}, {ValueType: &pb.Value_EntityValue{ EntityValue: &pb.Entity{ Properties: map[string]*pb.Value{ "SS": {ValueType: &pb.Value_StringValue{StringValue: "SAVED"}}, }, }, }}, }}, }}, }, }, }}, }, }, wantLoad: &GrandparentOfSlice{ Parent: ParentOfSlice{ Children: []Child{ { I: 1, Grandchild: Grandchild{ S: "grandchild loaded", }, }, { I: 1, Grandchild: Grandchild{ S: "grandchild loaded", }, }, }, Strings: []plsString{ "LOADED", "LOADED", }, }, }, }, { desc: "children (slice of ptrs) impl PLS", src: &GrandparentOfSlicePtrs{ Parent: ParentOfSlicePtrs{ Children: []*Child{ { I: 7, Grandchild: Grandchild{ S: "BAD", }, }, { I: 9, Grandchild: Grandchild{ S: "BAD2", }, }, }, Strings: []*plsString{ ptrToplsString("something1"), ptrToplsString("something2"), }, }, }, wantSave: &pb.Entity{ Key: keyToProto(testKey0), Properties: map[string]*pb.Value{ "Parent": {ValueType: &pb.Value_EntityValue{ EntityValue: &pb.Entity{ Properties: map[string]*pb.Value{ "Children": {ValueType: &pb.Value_ArrayValue{ ArrayValue: &pb.ArrayValue{Values: []*pb.Value{ {ValueType: &pb.Value_EntityValue{ EntityValue: &pb.Entity{ Properties: map[string]*pb.Value{ "I": {ValueType: &pb.Value_IntegerValue{IntegerValue: 8}}, "Grandchild.S": {ValueType: &pb.Value_StringValue{StringValue: "grandchild saved 8"}}, }, }, }}, {ValueType: &pb.Value_EntityValue{ EntityValue: &pb.Entity{ Properties: map[string]*pb.Value{ "I": {ValueType: &pb.Value_IntegerValue{IntegerValue: 10}}, "Grandchild.S": {ValueType: &pb.Value_StringValue{StringValue: "grandchild saved 10"}}, }, }, }}, }}, }}, "Strings": {ValueType: &pb.Value_ArrayValue{ ArrayValue: &pb.ArrayValue{Values: []*pb.Value{ {ValueType: &pb.Value_EntityValue{ EntityValue: &pb.Entity{ Properties: map[string]*pb.Value{ "SS": {ValueType: &pb.Value_StringValue{StringValue: "SAVED"}}, }, }, }}, {ValueType: &pb.Value_EntityValue{ EntityValue: &pb.Entity{ Properties: map[string]*pb.Value{ "SS": {ValueType: &pb.Value_StringValue{StringValue: "SAVED"}}, }, }, }}, }}, }}, }, }, }}, }, }, wantLoad: &GrandparentOfSlicePtrs{ Parent: ParentOfSlicePtrs{ Children: []*Child{ { I: 1, Grandchild: Grandchild{ S: "grandchild loaded", }, }, { I: 1, Grandchild: Grandchild{ S: "grandchild loaded", }, }, }, Strings: []*plsString{ ptrToplsString("LOADED"), ptrToplsString("LOADED"), }, }, }, }, { desc: "parent has flatten option, children (slice of) impl PLS", src: &GrandparentOfSliceFlatten{ Parent: ParentOfSlice{ Children: []Child{ { I: 7, Grandchild: Grandchild{ S: "BAD", }, }, { I: 9, Grandchild: Grandchild{ S: "BAD2", }, }, }, Strings: []plsString{ "something1", "something2", }, }, }, wantSave: &pb.Entity{ Key: keyToProto(testKey0), Properties: map[string]*pb.Value{ "Parent.Children.I": {ValueType: &pb.Value_ArrayValue{ArrayValue: &pb.ArrayValue{ Values: []*pb.Value{ {ValueType: &pb.Value_IntegerValue{IntegerValue: 8}}, {ValueType: &pb.Value_IntegerValue{IntegerValue: 10}}, }, }, }}, "Parent.Children.Grandchild.S": {ValueType: &pb.Value_ArrayValue{ArrayValue: &pb.ArrayValue{ Values: []*pb.Value{ {ValueType: &pb.Value_StringValue{StringValue: "grandchild saved 8"}}, {ValueType: &pb.Value_StringValue{StringValue: "grandchild saved 10"}}, }, }, }}, "Parent.Strings.SS": {ValueType: &pb.Value_ArrayValue{ArrayValue: &pb.ArrayValue{ Values: []*pb.Value{ {ValueType: &pb.Value_StringValue{StringValue: "SAVED"}}, {ValueType: &pb.Value_StringValue{StringValue: "SAVED"}}, }, }, }}, }, }, wantLoad: &GrandparentOfSliceFlatten{ Parent: ParentOfSlice{ Children: []Child{ { I: 1, Grandchild: Grandchild{ S: "grandchild loaded", }, }, { I: 1, Grandchild: Grandchild{ S: "grandchild loaded", }, }, }, Strings: []plsString{ "LOADED", "LOADED", }, }, }, }, { desc: "parent has flatten option, children (slice of ptrs) impl PLS", src: &GrandparentOfSlicePtrsFlatten{ Parent: ParentOfSlicePtrs{ Children: []*Child{ { I: 7, Grandchild: Grandchild{ S: "BAD", }, }, { I: 9, Grandchild: Grandchild{ S: "BAD2", }, }, }, Strings: []*plsString{ ptrToplsString("something1"), ptrToplsString("something1"), }, }, }, wantSave: &pb.Entity{ Key: keyToProto(testKey0), Properties: map[string]*pb.Value{ "Parent.Children.I": {ValueType: &pb.Value_ArrayValue{ArrayValue: &pb.ArrayValue{ Values: []*pb.Value{ {ValueType: &pb.Value_IntegerValue{IntegerValue: 8}}, {ValueType: &pb.Value_IntegerValue{IntegerValue: 10}}, }, }, }}, "Parent.Children.Grandchild.S": {ValueType: &pb.Value_ArrayValue{ArrayValue: &pb.ArrayValue{ Values: []*pb.Value{ {ValueType: &pb.Value_StringValue{StringValue: "grandchild saved 8"}}, {ValueType: &pb.Value_StringValue{StringValue: "grandchild saved 10"}}, }, }, }}, "Parent.Strings.SS": {ValueType: &pb.Value_ArrayValue{ArrayValue: &pb.ArrayValue{ Values: []*pb.Value{ {ValueType: &pb.Value_StringValue{StringValue: "SAVED"}}, {ValueType: &pb.Value_StringValue{StringValue: "SAVED"}}, }, }, }}, }, }, wantLoad: &GrandparentOfSlicePtrsFlatten{ Parent: ParentOfSlicePtrs{ Children: []*Child{ { I: 1, Grandchild: Grandchild{ S: "grandchild loaded", }, }, { I: 1, Grandchild: Grandchild{ S: "grandchild loaded", }, }, }, Strings: []*plsString{ ptrToplsString("LOADED"), ptrToplsString("LOADED"), }, }, }, }, } for _, tc := range testCases { e, err := saveEntity(testKey0, tc.src) if tc.saveErr == "" { // Want no error. if err != nil { t.Errorf("%s: save: %v", tc.desc, err) continue } if !reflect.DeepEqual(e, tc.wantSave) { t.Errorf("%s: save: \ngot: %+v\nwant: %+v", tc.desc, e, tc.wantSave) continue } } else { // Want error. if err == nil { t.Errorf("%s: save: want err", tc.desc) continue } if !strings.Contains(err.Error(), tc.saveErr) { t.Errorf("%s: save: \ngot err '%s'\nwant err '%s'", tc.desc, err.Error(), tc.saveErr) } continue } gota := reflect.New(reflect.TypeOf(tc.wantLoad).Elem()).Interface() err = loadEntityProto(gota, e) if tc.loadErr == "" { // Want no error. if err != nil { t.Errorf("%s: load: %v", tc.desc, err) continue } if !reflect.DeepEqual(gota, tc.wantLoad) { t.Errorf("%s: load: \ngot: %+v\nwant: %+v", tc.desc, gota, tc.wantLoad) continue } } else { // Want error. if err == nil { t.Errorf("%s: load: want err", tc.desc) continue } if !strings.Contains(err.Error(), tc.loadErr) { t.Errorf("%s: load: \ngot err '%s'\nwant err '%s'", tc.desc, err.Error(), tc.loadErr) } } } } func TestQueryConstruction(t *testing.T) { tests := []struct { q, exp *Query err string }{ { q: NewQuery("Foo"), exp: &Query{ kind: "Foo", limit: -1, }, }, { // Regular filtered query with standard spacing. q: NewQuery("Foo").Filter("foo >", 7), exp: &Query{ kind: "Foo", filter: []filter{ { FieldName: "foo", Op: greaterThan, Value: 7, }, }, limit: -1, }, }, { // Filtered query with no spacing. q: NewQuery("Foo").Filter("foo=", 6), exp: &Query{ kind: "Foo", filter: []filter{ { FieldName: "foo", Op: equal, Value: 6, }, }, limit: -1, }, }, { // Filtered query with funky spacing. q: NewQuery("Foo").Filter(" foo< ", 8), exp: &Query{ kind: "Foo", filter: []filter{ { FieldName: "foo", Op: lessThan, Value: 8, }, }, limit: -1, }, }, { // Filtered query with multicharacter op. q: NewQuery("Foo").Filter("foo >=", 9), exp: &Query{ kind: "Foo", filter: []filter{ { FieldName: "foo", Op: greaterEq, Value: 9, }, }, limit: -1, }, }, { // Query with ordering. q: NewQuery("Foo").Order("bar"), exp: &Query{ kind: "Foo", order: []order{ { FieldName: "bar", Direction: ascending, }, }, limit: -1, }, }, { // Query with reverse ordering, and funky spacing. q: NewQuery("Foo").Order(" - bar"), exp: &Query{ kind: "Foo", order: []order{ { FieldName: "bar", Direction: descending, }, }, limit: -1, }, }, { // Query with an empty ordering. q: NewQuery("Foo").Order(""), err: "empty order", }, { // Query with a + ordering. q: NewQuery("Foo").Order("+bar"), err: "invalid order", }, } for i, test := range tests { if test.q.err != nil { got := test.q.err.Error() if !strings.Contains(got, test.err) { t.Errorf("%d: error mismatch: got %q want something containing %q", i, got, test.err) } continue } if !reflect.DeepEqual(test.q, test.exp) { t.Errorf("%d: mismatch: got %v want %v", i, test.q, test.exp) } } } func TestPutMultiTypes(t *testing.T) { ctx := context.Background() type S struct { A int B string } testCases := []struct { desc string src interface{} wantErr bool }{ // Test cases to check each of the valid input types for src. // Each case has the same elements. { desc: "type []struct", src: []S{ {1, "one"}, {2, "two"}, }, }, { desc: "type []*struct", src: []*S{ {1, "one"}, {2, "two"}, }, }, { desc: "type []interface{} with PLS elems", src: []interface{}{ &PropertyList{Property{Name: "A", Value: 1}, Property{Name: "B", Value: "one"}}, &PropertyList{Property{Name: "A", Value: 2}, Property{Name: "B", Value: "two"}}, }, }, { desc: "type []interface{} with struct ptr elems", src: []interface{}{ &S{1, "one"}, &S{2, "two"}, }, }, { desc: "type []PropertyLoadSaver{}", src: []PropertyLoadSaver{ &PropertyList{Property{Name: "A", Value: 1}, Property{Name: "B", Value: "one"}}, &PropertyList{Property{Name: "A", Value: 2}, Property{Name: "B", Value: "two"}}, }, }, { desc: "type []P (non-pointer, *P implements PropertyLoadSaver)", src: []PropertyList{ {Property{Name: "A", Value: 1}, Property{Name: "B", Value: "one"}}, {Property{Name: "A", Value: 2}, Property{Name: "B", Value: "two"}}, }, }, // Test some invalid cases. { desc: "type []interface{} with struct elems", src: []interface{}{ S{1, "one"}, S{2, "two"}, }, wantErr: true, }, { desc: "PropertyList", src: PropertyList{ Property{Name: "A", Value: 1}, Property{Name: "B", Value: "one"}, }, wantErr: true, }, { desc: "type []int", src: []int{1, 2}, wantErr: true, }, { desc: "not a slice", src: S{1, "one"}, wantErr: true, }, } // Use the same keys and expected entities for all tests. keys := []*Key{ NameKey("testKind", "first", nil), NameKey("testKind", "second", nil), } want := []*pb.Mutation{ {Operation: &pb.Mutation_Upsert{ Upsert: &pb.Entity{ Key: keyToProto(keys[0]), Properties: map[string]*pb.Value{ "A": {ValueType: &pb.Value_IntegerValue{IntegerValue: 1}}, "B": {ValueType: &pb.Value_StringValue{StringValue: "one"}}, }, }}}, {Operation: &pb.Mutation_Upsert{ Upsert: &pb.Entity{ Key: keyToProto(keys[1]), Properties: map[string]*pb.Value{ "A": {ValueType: &pb.Value_IntegerValue{IntegerValue: 2}}, "B": {ValueType: &pb.Value_StringValue{StringValue: "two"}}, }, }}}, } for _, tt := range testCases { // Set up a fake client which captures upserts. var got []*pb.Mutation client := &Client{ client: &fakeClient{ commitFn: func(req *pb.CommitRequest) (*pb.CommitResponse, error) { got = req.Mutations return &pb.CommitResponse{}, nil }, }, } _, err := client.PutMulti(ctx, keys, tt.src) if err != nil { if !tt.wantErr { t.Errorf("%s: error %v", tt.desc, err) } continue } if tt.wantErr { t.Errorf("%s: wanted error, but none returned", tt.desc) continue } if len(got) != len(want) { t.Errorf("%s: got %d entities, want %d", tt.desc, len(got), len(want)) continue } for i, e := range got { if !proto.Equal(e, want[i]) { t.Logf("%s: entity %d doesn't match\ngot: %v\nwant: %v", tt.desc, i, e, want[i]) } } } } func TestNoIndexOnSliceProperties(t *testing.T) { // Check that ExcludeFromIndexes is set on the inner elements, // rather than the top-level ArrayValue value. pl := PropertyList{ Property{ Name: "repeated", Value: []interface{}{ 123, false, "short", strings.Repeat("a", 1503), }, NoIndex: true, }, } key := NameKey("dummy", "dummy", nil) entity, err := saveEntity(key, &pl) if err != nil { t.Fatalf("saveEntity: %v", err) } want := &pb.Value{ ValueType: &pb.Value_ArrayValue{ArrayValue: &pb.ArrayValue{Values: []*pb.Value{ {ValueType: &pb.Value_IntegerValue{IntegerValue: 123}, ExcludeFromIndexes: true}, {ValueType: &pb.Value_BooleanValue{BooleanValue: false}, ExcludeFromIndexes: true}, {ValueType: &pb.Value_StringValue{StringValue: "short"}, ExcludeFromIndexes: true}, {ValueType: &pb.Value_StringValue{StringValue: strings.Repeat("a", 1503)}, ExcludeFromIndexes: true}, }}}, } if got := entity.Properties["repeated"]; !proto.Equal(got, want) { t.Errorf("Entity proto differs\ngot: %v\nwant: %v", got, want) } } type byName PropertyList func (s byName) Len() int { return len(s) } func (s byName) Less(i, j int) bool { return s[i].Name < s[j].Name } func (s byName) Swap(i, j int) { s[i], s[j] = s[j], s[i] } // sortPL sorts the property list by property name, and // recursively sorts any nested property lists, or nested slices of // property lists. func sortPL(pl PropertyList) { sort.Stable(byName(pl)) for _, p := range pl { switch p.Value.(type) { case *Entity: sortPL(p.Value.(*Entity).Properties) case []interface{}: for _, p2 := range p.Value.([]interface{}) { if nent, ok := p2.(*Entity); ok { sortPL(nent.Properties) } } } } } func TestValidGeoPoint(t *testing.T) { testCases := []struct { desc string pt GeoPoint want bool }{ { "valid", GeoPoint{67.21, 13.37}, true, }, { "high lat", GeoPoint{-90.01, 13.37}, false, }, { "low lat", GeoPoint{90.01, 13.37}, false, }, { "high lng", GeoPoint{67.21, 182}, false, }, { "low lng", GeoPoint{67.21, -181}, false, }, } for _, tc := range testCases { if got := tc.pt.Valid(); got != tc.want { t.Errorf("%s: got %v, want %v", tc.desc, got, tc.want) } } } func TestPutInvalidEntity(t *testing.T) { // Test that trying to put an invalid entity always returns the correct error // type. // Fake client that can pretend to start a transaction. fakeClient := &fakeDatastoreClient{ beginTransaction: func(*pb.BeginTransactionRequest) (*pb.BeginTransactionResponse, error) { return &pb.BeginTransactionResponse{ Transaction: []byte("deadbeef"), }, nil }, } client := &Client{ client: fakeClient, } ctx := context.Background() key := IncompleteKey("kind", nil) _, err := client.Put(ctx, key, "invalid entity") if err != ErrInvalidEntityType { t.Errorf("client.Put returned err %v, want %v", err, ErrInvalidEntityType) } _, err = client.PutMulti(ctx, []*Key{key}, []interface{}{"invalid entity"}) if me, ok := err.(MultiError); !ok { t.Errorf("client.PutMulti returned err %v, want MultiError type", err) } else if len(me) != 1 || me[0] != ErrInvalidEntityType { t.Errorf("client.PutMulti returned err %v, want MulitError{ErrInvalidEntityType}", err) } client.RunInTransaction(ctx, func(tx *Transaction) error { _, err := tx.Put(key, "invalid entity") if err != ErrInvalidEntityType { t.Errorf("tx.Put returned err %v, want %v", err, ErrInvalidEntityType) } _, err = tx.PutMulti([]*Key{key}, []interface{}{"invalid entity"}) if me, ok := err.(MultiError); !ok { t.Errorf("tx.PutMulti returned err %v, want MultiError type", err) } else if len(me) != 1 || me[0] != ErrInvalidEntityType { t.Errorf("tx.PutMulti returned err %v, want MulitError{ErrInvalidEntityType}", err) } return errors.New("bang!") // Return error: we don't actually want to commit. }) } func TestDeferred(t *testing.T) { type Ent struct { A int B string } keys := []*Key{ NameKey("testKind", "first", nil), NameKey("testKind", "second", nil), } entity1 := &pb.Entity{ Key: keyToProto(keys[0]), Properties: map[string]*pb.Value{ "A": {ValueType: &pb.Value_IntegerValue{IntegerValue: 1}}, "B": {ValueType: &pb.Value_StringValue{StringValue: "one"}}, }, } entity2 := &pb.Entity{ Key: keyToProto(keys[1]), Properties: map[string]*pb.Value{ "A": {ValueType: &pb.Value_IntegerValue{IntegerValue: 2}}, "B": {ValueType: &pb.Value_StringValue{StringValue: "two"}}, }, } // count keeps track of the number of times fakeClient.lookup has been // called. var count int // Fake client that will return Deferred keys in resp on the first call. fakeClient := &fakeDatastoreClient{ lookup: func(*pb.LookupRequest) (*pb.LookupResponse, error) { count++ // On the first call, we return deferred keys. if count == 1 { return &pb.LookupResponse{ Found: []*pb.EntityResult{ { Entity: entity1, Version: 1, }, }, Deferred: []*pb.Key{ keyToProto(keys[1]), }, }, nil } // On the second call, we do not return any more deferred keys. return &pb.LookupResponse{ Found: []*pb.EntityResult{ { Entity: entity2, Version: 1, }, }, }, nil }, } client := &Client{ client: fakeClient, } ctx := context.Background() dst := make([]Ent, len(keys)) err := client.GetMulti(ctx, keys, dst) if err != nil { t.Fatalf("client.Get: %v", err) } if count != 2 { t.Fatalf("expected client.lookup to be called 2 times. Got %d", count) } if len(dst) != 2 { t.Fatalf("expected 2 entities returned, got %d", len(dst)) } for _, e := range dst { if e.A == 1 { if e.B != "one" { t.Fatalf("unexpected entity %+v", e) } } else if e.A == 2 { if e.B != "two" { t.Fatalf("unexpected entity %+v", e) } } else { t.Fatalf("unexpected entity %+v", e) } } } type KeyLoaderEnt struct { A int K *Key } func (e *KeyLoaderEnt) Load(p []Property) error { e.A = 2 return nil } func (e *KeyLoaderEnt) LoadKey(k *Key) error { e.K = k return nil } func (e *KeyLoaderEnt) Save() ([]Property, error) { return []Property{{Name: "A", Value: int64(3)}}, nil } func TestKeyLoaderEndToEnd(t *testing.T) { keys := []*Key{ NameKey("testKind", "first", nil), NameKey("testKind", "second", nil), } entity1 := &pb.Entity{ Key: keyToProto(keys[0]), Properties: map[string]*pb.Value{ "A": {ValueType: &pb.Value_IntegerValue{IntegerValue: 1}}, "B": {ValueType: &pb.Value_StringValue{StringValue: "one"}}, }, } entity2 := &pb.Entity{ Key: keyToProto(keys[1]), Properties: map[string]*pb.Value{ "A": {ValueType: &pb.Value_IntegerValue{IntegerValue: 2}}, "B": {ValueType: &pb.Value_StringValue{StringValue: "two"}}, }, } fakeClient := &fakeDatastoreClient{ lookup: func(*pb.LookupRequest) (*pb.LookupResponse, error) { return &pb.LookupResponse{ Found: []*pb.EntityResult{ { Entity: entity1, Version: 1, }, { Entity: entity2, Version: 1, }, }, }, nil }, } client := &Client{ client: fakeClient, } ctx := context.Background() dst := make([]*KeyLoaderEnt, len(keys)) err := client.GetMulti(ctx, keys, dst) if err != nil { t.Fatalf("client.Get: %v", err) } for i := range dst { if !reflect.DeepEqual(dst[i].K, keys[i]) { t.Fatalf("unexpected entity %d to have key %+v, got %+v", i, keys[i], dst[i].K) } } } func TestDeferredMissing(t *testing.T) { type Ent struct { A int B string } keys := []*Key{ NameKey("testKind", "first", nil), NameKey("testKind", "second", nil), } entity1 := &pb.Entity{ Key: keyToProto(keys[0]), } entity2 := &pb.Entity{ Key: keyToProto(keys[1]), } var count int fakeClient := &fakeDatastoreClient{ lookup: func(*pb.LookupRequest) (*pb.LookupResponse, error) { count++ if count == 1 { return &pb.LookupResponse{ Missing: []*pb.EntityResult{ { Entity: entity1, Version: 1, }, }, Deferred: []*pb.Key{ keyToProto(keys[1]), }, }, nil } return &pb.LookupResponse{ Missing: []*pb.EntityResult{ { Entity: entity2, Version: 1, }, }, }, nil }, } client := &Client{ client: fakeClient, } ctx := context.Background() dst := make([]Ent, len(keys)) err := client.GetMulti(ctx, keys, dst) errs, ok := err.(MultiError) if !ok { t.Fatalf("expected error returns to be MultiError; got %v", err) } if len(errs) != 2 { t.Fatalf("expected 2 errors returns, got %d", len(errs)) } if errs[0] != ErrNoSuchEntity { t.Fatalf("expected error to be ErrNoSuchEntity; got %v", errs[0]) } if errs[1] != ErrNoSuchEntity { t.Fatalf("expected error to be ErrNoSuchEntity; got %v", errs[1]) } if count != 2 { t.Fatalf("expected client.lookup to be called 2 times. Got %d", count) } if len(dst) != 2 { t.Fatalf("expected 2 entities returned, got %d", len(dst)) } for _, e := range dst { if e.A != 0 || e.B != "" { t.Fatalf("unexpected entity %+v", e) } } } type fakeDatastoreClient struct { pb.DatastoreClient // Optional handlers for the datastore methods. // Any handlers left undefined will return an error. lookup func(*pb.LookupRequest) (*pb.LookupResponse, error) runQuery func(*pb.RunQueryRequest) (*pb.RunQueryResponse, error) beginTransaction func(*pb.BeginTransactionRequest) (*pb.BeginTransactionResponse, error) commit func(*pb.CommitRequest) (*pb.CommitResponse, error) rollback func(*pb.RollbackRequest) (*pb.RollbackResponse, error) allocateIds func(*pb.AllocateIdsRequest) (*pb.AllocateIdsResponse, error) } func (c *fakeDatastoreClient) Lookup(ctx context.Context, in *pb.LookupRequest, opts ...grpc.CallOption) (*pb.LookupResponse, error) { if c.lookup == nil { return nil, errors.New("no lookup handler defined") } return c.lookup(in) } func (c *fakeDatastoreClient) RunQuery(ctx context.Context, in *pb.RunQueryRequest, opts ...grpc.CallOption) (*pb.RunQueryResponse, error) { if c.runQuery == nil { return nil, errors.New("no runQuery handler defined") } return c.runQuery(in) } func (c *fakeDatastoreClient) BeginTransaction(ctx context.Context, in *pb.BeginTransactionRequest, opts ...grpc.CallOption) (*pb.BeginTransactionResponse, error) { if c.beginTransaction == nil { return nil, errors.New("no beginTransaction handler defined") } return c.beginTransaction(in) } func (c *fakeDatastoreClient) Commit(ctx context.Context, in *pb.CommitRequest, opts ...grpc.CallOption) (*pb.CommitResponse, error) { if c.commit == nil { return nil, errors.New("no commit handler defined") } return c.commit(in) } func (c *fakeDatastoreClient) Rollback(ctx context.Context, in *pb.RollbackRequest, opts ...grpc.CallOption) (*pb.RollbackResponse, error) { if c.rollback == nil { return nil, errors.New("no rollback handler defined") } return c.rollback(in) } func (c *fakeDatastoreClient) AllocateIds(ctx context.Context, in *pb.AllocateIdsRequest, opts ...grpc.CallOption) (*pb.AllocateIdsResponse, error) { if c.allocateIds == nil { return nil, errors.New("no allocateIds handler defined") } return c.allocateIds(in) } golang-google-cloud-0.9.0/datastore/doc.go000066400000000000000000000335211312234511600204310ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. /* Package datastore provides a client for Google Cloud Datastore. Note: This package is in beta. Some backwards-incompatible changes may occur. Basic Operations Entities are the unit of storage and are associated with a key. A key consists of an optional parent key, a string application ID, a string kind (also known as an entity type), and either a StringID or an IntID. A StringID is also known as an entity name or key name. It is valid to create a key with a zero StringID and a zero IntID; this is called an incomplete key, and does not refer to any saved entity. Putting an entity into the datastore under an incomplete key will cause a unique key to be generated for that entity, with a non-zero IntID. An entity's contents are a mapping from case-sensitive field names to values. Valid value types are: - signed integers (int, int8, int16, int32 and int64), - bool, - string, - float32 and float64, - []byte (up to 1 megabyte in length), - any type whose underlying type is one of the above predeclared types, - *Key, - GeoPoint, - time.Time (stored with microsecond precision), - structs whose fields are all valid value types, - pointers to structs whose fields are all valid value types, - slices of any of the above. Slices of structs are valid, as are structs that contain slices. The Get and Put functions load and save an entity's contents. An entity's contents are typically represented by a struct pointer. Example code: type Entity struct { Value string } func main() { ctx := context.Background() // Create a datastore client. In a typical application, you would create // a single client which is reused for every datastore operation. dsClient, err := datastore.NewClient(ctx, "my-project") if err != nil { // Handle error. } k := datastore.NameKey("Entity", "stringID", nil) e := new(Entity) if err := dsClient.Get(ctx, k, e); err != nil { // Handle error. } old := e.Value e.Value = "Hello World!" if _, err := dsClient.Put(ctx, k, e); err != nil { // Handle error. } fmt.Printf("Updated value from %q to %q\n", old, e.Value) } GetMulti, PutMulti and DeleteMulti are batch versions of the Get, Put and Delete functions. They take a []*Key instead of a *Key, and may return a datastore.MultiError when encountering partial failure. Properties An entity's contents can be represented by a variety of types. These are typically struct pointers, but can also be any type that implements the PropertyLoadSaver interface. If using a struct pointer, you do not have to explicitly implement the PropertyLoadSaver interface; the datastore will automatically convert via reflection. If a struct pointer does implement that interface then those methods will be used in preference to the default behavior for struct pointers. Struct pointers are more strongly typed and are easier to use; PropertyLoadSavers are more flexible. The actual types passed do not have to match between Get and Put calls or even across different calls to datastore. It is valid to put a *PropertyList and get that same entity as a *myStruct, or put a *myStruct0 and get a *myStruct1. Conceptually, any entity is saved as a sequence of properties, and is loaded into the destination value on a property-by-property basis. When loading into a struct pointer, an entity that cannot be completely represented (such as a missing field) will result in an ErrFieldMismatch error but it is up to the caller whether this error is fatal, recoverable or ignorable. By default, for struct pointers, all properties are potentially indexed, and the property name is the same as the field name (and hence must start with an upper case letter). Fields may have a `datastore:"name,options"` tag. The tag name is the property name, which must be one or more valid Go identifiers joined by ".", but may start with a lower case letter. An empty tag name means to just use the field name. A "-" tag name means that the datastore will ignore that field. The only valid options are "omitempty", "noindex" and "flatten". If the options include "omitempty" and the value of the field is empty, then the field will be omitted on Save. The empty values are false, 0, any nil interface value, and any array, slice, map, or string of length zero. Struct field values will never be empty. If options include "noindex" then the field will not be indexed. All fields are indexed by default. Strings or byte slices longer than 1500 bytes cannot be indexed; fields used to store long strings and byte slices must be tagged with "noindex" or they will cause Put operations to fail. For a nested struct field, the options may also include "flatten". This indicates that the immediate fields and any nested substruct fields of the nested struct should be flattened. See below for examples. To use multiple options together, separate them by a comma. The order does not matter. If the options is "" then the comma may be omitted. Example code: // A and B are renamed to a and b. // A, C and J are not indexed. // D's tag is equivalent to having no tag at all (E). // I is ignored entirely by the datastore. // J has tag information for both the datastore and json packages. type TaggedStruct struct { A int `datastore:"a,noindex"` B int `datastore:"b"` C int `datastore:",noindex"` D int `datastore:""` E int I int `datastore:"-"` J int `datastore:",noindex" json:"j"` } Key Field If the struct contains a *datastore.Key field tagged with the name "__key__", its value will be ignored on Put. When reading the Entity back into the Go struct, the field will be populated with the *datastore.Key value used to query for the Entity. Example code: type MyEntity struct { A int K *datastore.Key `datastore:"__key__"` } k := datastore.NameKey("Entity", "stringID", nil) e := MyEntity{A: 12} k, err = dsClient.Put(ctx, k, e) if err != nil { // Handle error. } var entities []MyEntity q := datastore.NewQuery("Entity").Filter("A =", 12).Limit(1) _, err := dsClient.GetAll(ctx, q, &entities) if err != nil { // Handle error } log.Println(entities[0]) // Prints {12 /Entity,stringID} Structured Properties If the struct pointed to contains other structs, then the nested or embedded structs are themselves saved as Entity values. For example, given these definitions: type Inner struct { W int32 X string } type Outer struct { I Inner } then an Outer would have one property, Inner, encoded as an Entity value. If an outer struct is tagged "noindex" then all of its implicit flattened fields are effectively "noindex". If the Inner struct contains a *Key field with the name "__key__", like so: type Inner struct { W int32 X string K *datastore.Key `datastore:"__key__"` } type Outer struct { I Inner } then the value of K will be used as the Key for Inner, represented as an Entity value in datastore. If any nested struct fields should be flattened, instead of encoded as Entity values, the nested struct field should be tagged with the "flatten" option. For example, given the following: type Inner1 struct { W int32 X string } type Inner2 struct { Y float64 } type Inner3 struct { Z bool } type Inner4 struct { WW int } type Inner5 struct { X Inner4 } type Outer struct { A int16 I []Inner1 `datastore:",flatten"` J Inner2 `datastore:",flatten"` K Inner5 `datastore:",flatten"` Inner3 `datastore:",flatten"` } an Outer's properties would be equivalent to those of: type OuterEquivalent struct { A int16 IDotW []int32 `datastore:"I.W"` IDotX []string `datastore:"I.X"` JDotY float64 `datastore:"J.Y"` KDotXDotWW int `datastore:"K.X.WW"` Z bool } Note that the "flatten" option cannot be used for Entity value fields. The server will reject any dotted field names for an Entity value. The PropertyLoadSaver Interface An entity's contents can also be represented by any type that implements the PropertyLoadSaver interface. This type may be a struct pointer, but it does not have to be. The datastore package will call Load when getting the entity's contents, and Save when putting the entity's contents. Possible uses include deriving non-stored fields, verifying fields, or indexing a field only if its value is positive. Example code: type CustomPropsExample struct { I, J int // Sum is not stored, but should always be equal to I + J. Sum int `datastore:"-"` } func (x *CustomPropsExample) Load(ps []datastore.Property) error { // Load I and J as usual. if err := datastore.LoadStruct(x, ps); err != nil { return err } // Derive the Sum field. x.Sum = x.I + x.J return nil } func (x *CustomPropsExample) Save() ([]datastore.Property, error) { // Validate the Sum field. if x.Sum != x.I + x.J { return nil, errors.New("CustomPropsExample has inconsistent sum") } // Save I and J as usual. The code below is equivalent to calling // "return datastore.SaveStruct(x)", but is done manually for // demonstration purposes. return []datastore.Property{ { Name: "I", Value: int64(x.I), }, { Name: "J", Value: int64(x.J), }, }, nil } The *PropertyList type implements PropertyLoadSaver, and can therefore hold an arbitrary entity's contents. The KeyLoader Interface If a type implements the PropertyLoadSaver interface, it may also want to implement the KeyLoader interface. The KeyLoader interface exists to allow implementations of PropertyLoadSaver to also load an Entity's Key into the Go type. This type may be a struct pointer, but it does not have to be. The datastore package will call LoadKey when getting the entity's contents, after calling Load. Example code: type WithKeyExample struct { I int Key *datastore.Key } func (x *WithKeyExample) LoadKey(k *datastore.Key) error { x.Key = k return nil } func (x *WithKeyExample) Load(ps []datastore.Property) error { // Load I as usual. return datastore.LoadStruct(x, ps) } func (x *WithKeyExample) Save() ([]datastore.Property, error) { // Save I as usual. return datastore.SaveStruct(x) } To load a Key into a struct which does not implement the PropertyLoadSaver interface, see the "Key Field" section above. Queries Queries retrieve entities based on their properties or key's ancestry. Running a query yields an iterator of results: either keys or (key, entity) pairs. Queries are re-usable and it is safe to call Query.Run from concurrent goroutines. Iterators are not safe for concurrent use. Queries are immutable, and are either created by calling NewQuery, or derived from an existing query by calling a method like Filter or Order that returns a new query value. A query is typically constructed by calling NewQuery followed by a chain of zero or more such methods. These methods are: - Ancestor and Filter constrain the entities returned by running a query. - Order affects the order in which they are returned. - Project constrains the fields returned. - Distinct de-duplicates projected entities. - KeysOnly makes the iterator return only keys, not (key, entity) pairs. - Start, End, Offset and Limit define which sub-sequence of matching entities to return. Start and End take cursors, Offset and Limit take integers. Start and Offset affect the first result, End and Limit affect the last result. If both Start and Offset are set, then the offset is relative to Start. If both End and Limit are set, then the earliest constraint wins. Limit is relative to Start+Offset, not relative to End. As a special case, a negative limit means unlimited. Example code: type Widget struct { Description string Price int } func printWidgets(ctx context.Context, client *datastore.Client) { q := datastore.NewQuery("Widget"). Filter("Price <", 1000). Order("-Price") for t := client.Run(ctx, q); ; { var x Widget key, err := t.Next(&x) if err == iterator.Done { break } if err != nil { // Handle error. } fmt.Printf("Key=%v\nWidget=%#v\n\n", key, x) } } Transactions Client.RunInTransaction runs a function in a transaction. Example code: type Counter struct { Count int } func incCount(ctx context.Context, client *datastore.Client) { var count int key := datastore.NameKey("Counter", "singleton", nil) _, err := client.RunInTransaction(ctx, func(tx *datastore.Transaction) error { var x Counter if err := tx.Get(key, &x); err != nil && err != datastore.ErrNoSuchEntity { return err } x.Count++ if _, err := tx.Put(key, &x); err != nil { return err } count = x.Count return nil }) if err != nil { // Handle error. } // The value of count is only valid once the transaction is successful // (RunInTransaction has returned nil). fmt.Printf("Count=%d\n", count) } Google Cloud Datastore Emulator This package supports the Cloud Datastore emulator, which is useful for testing and development. Environment variables are used to indicate that datastore traffic should be directed to the emulator instead of the production Datastore service. To install and set up the emulator and its environment variables, see the documentation at https://cloud.google.com/datastore/docs/tools/datastore-emulator. Authentication See examples of authorization and authentication at https://godoc.org/cloud.google.com/go#pkg-examples. */ package datastore // import "cloud.google.com/go/datastore" golang-google-cloud-0.9.0/datastore/errors.go000066400000000000000000000023651312234511600212020ustar00rootroot00000000000000// Copyright 2014 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // This file provides error functions for common API failure modes. package datastore import ( "fmt" ) // MultiError is returned by batch operations when there are errors with // particular elements. Errors will be in a one-to-one correspondence with // the input elements; successful elements will have a nil entry. type MultiError []error func (m MultiError) Error() string { s, n := "", 0 for _, e := range m { if e != nil { if n == 0 { s = e.Error() } n++ } } switch n { case 0: return "(0 errors)" case 1: return s case 2: return s + " (and 1 other error)" } return fmt.Sprintf("%s (and %d other errors)", s, n-1) } golang-google-cloud-0.9.0/datastore/example_test.go000066400000000000000000000313001312234511600223470ustar00rootroot00000000000000// Copyright 2014 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package datastore_test import ( "fmt" "log" "time" "cloud.google.com/go/datastore" "golang.org/x/net/context" "google.golang.org/api/iterator" ) func ExampleNewClient() { ctx := context.Background() client, err := datastore.NewClient(ctx, "project-id") if err != nil { // TODO: Handle error. } _ = client // TODO: Use client. } func ExampleClient_Get() { ctx := context.Background() client, err := datastore.NewClient(ctx, "project-id") if err != nil { // TODO: Handle error. } type Article struct { Title string Description string Body string `datastore:",noindex"` Author *datastore.Key PublishedAt time.Time } key := datastore.NameKey("Article", "articled1", nil) article := &Article{} if err := client.Get(ctx, key, article); err != nil { // TODO: Handle error. } } func ExampleClient_Put() { ctx := context.Background() client, err := datastore.NewClient(ctx, "project-id") if err != nil { // TODO: Handle error. } type Article struct { Title string Description string Body string `datastore:",noindex"` Author *datastore.Key PublishedAt time.Time } newKey := datastore.IncompleteKey("Article", nil) _, err = client.Put(ctx, newKey, &Article{ Title: "The title of the article", Description: "The description of the article...", Body: "...", Author: datastore.NameKey("Author", "jbd", nil), PublishedAt: time.Now(), }) if err != nil { // TODO: Handle error. } } func ExampleClient_Put_flatten() { ctx := context.Background() client, err := datastore.NewClient(ctx, "project-id") if err != nil { log.Fatal(err) } type Animal struct { Name string Type string Breed string } type Human struct { Name string Height int Pet Animal `datastore:",flatten"` } newKey := datastore.IncompleteKey("Human", nil) _, err = client.Put(ctx, newKey, &Human{ Name: "Susan", Height: 67, Pet: Animal{ Name: "Fluffy", Type: "Cat", Breed: "Sphynx", }, }) if err != nil { log.Fatal(err) } } func ExampleClient_Delete() { ctx := context.Background() client, err := datastore.NewClient(ctx, "project-id") if err != nil { // TODO: Handle error. } key := datastore.NameKey("Article", "articled1", nil) if err := client.Delete(ctx, key); err != nil { // TODO: Handle error. } } func ExampleClient_DeleteMulti() { ctx := context.Background() client, err := datastore.NewClient(ctx, "project-id") if err != nil { // TODO: Handle error. } var keys []*datastore.Key for i := 1; i <= 10; i++ { keys = append(keys, datastore.IDKey("Article", int64(i), nil)) } if err := client.DeleteMulti(ctx, keys); err != nil { // TODO: Handle error. } } type Post struct { Title string PublishedAt time.Time Comments int } func ExampleClient_GetMulti() { ctx := context.Background() client, err := datastore.NewClient(ctx, "project-id") if err != nil { // TODO: Handle error. } keys := []*datastore.Key{ datastore.NameKey("Post", "post1", nil), datastore.NameKey("Post", "post2", nil), datastore.NameKey("Post", "post3", nil), } posts := make([]Post, 3) if err := client.GetMulti(ctx, keys, posts); err != nil { // TODO: Handle error. } } func ExampleClient_PutMulti_slice() { ctx := context.Background() client, err := datastore.NewClient(ctx, "project-id") if err != nil { // TODO: Handle error. } keys := []*datastore.Key{ datastore.NameKey("Post", "post1", nil), datastore.NameKey("Post", "post2", nil), } // PutMulti with a Post slice. posts := []*Post{ {Title: "Post 1", PublishedAt: time.Now()}, {Title: "Post 2", PublishedAt: time.Now()}, } if _, err := client.PutMulti(ctx, keys, posts); err != nil { // TODO: Handle error. } } func ExampleClient_PutMulti_interfaceSlice() { ctx := context.Background() client, err := datastore.NewClient(ctx, "project-id") if err != nil { // TODO: Handle error. } keys := []*datastore.Key{ datastore.NameKey("Post", "post1", nil), datastore.NameKey("Post", "post2", nil), } // PutMulti with an empty interface slice. posts := []interface{}{ &Post{Title: "Post 1", PublishedAt: time.Now()}, &Post{Title: "Post 2", PublishedAt: time.Now()}, } if _, err := client.PutMulti(ctx, keys, posts); err != nil { // TODO: Handle error. } } func ExampleNewQuery() { // Query for Post entities. q := datastore.NewQuery("Post") _ = q // TODO: Use the query with Client.Run. } func ExampleNewQuery_options() { // Query to order the posts by the number of comments they have recieved. q := datastore.NewQuery("Post").Order("-Comments") // Start listing from an offset and limit the results. q = q.Offset(20).Limit(10) _ = q // TODO: Use the query. } func ExampleClient_Count() { ctx := context.Background() client, err := datastore.NewClient(ctx, "project-id") if err != nil { // TODO: Handle error. } // Count the number of the post entities. q := datastore.NewQuery("Post") n, err := client.Count(ctx, q) if err != nil { // TODO: Handle error. } fmt.Printf("There are %d posts.", n) } func ExampleClient_Run() { ctx := context.Background() client, err := datastore.NewClient(ctx, "project-id") if err != nil { // TODO: Handle error. } // List the posts published since yesterday. yesterday := time.Now().Add(-24 * time.Hour) q := datastore.NewQuery("Post").Filter("PublishedAt >", yesterday) it := client.Run(ctx, q) _ = it // TODO: iterate using Next. } func ExampleClient_NewTransaction() { ctx := context.Background() client, err := datastore.NewClient(ctx, "project-id") if err != nil { // TODO: Handle error. } const retries = 3 // Increment a counter. // See https://cloud.google.com/appengine/articles/sharding_counters for // a more scalable solution. type Counter struct { Count int } key := datastore.NameKey("counter", "CounterA", nil) var tx *datastore.Transaction for i := 0; i < retries; i++ { tx, err = client.NewTransaction(ctx) if err != nil { break } var c Counter if err = tx.Get(key, &c); err != nil && err != datastore.ErrNoSuchEntity { break } c.Count++ if _, err = tx.Put(key, &c); err != nil { break } // Attempt to commit the transaction. If there's a conflict, try again. if _, err = tx.Commit(); err != datastore.ErrConcurrentTransaction { break } } if err != nil { // TODO: Handle error. } } func ExampleClient_RunInTransaction() { ctx := context.Background() client, err := datastore.NewClient(ctx, "project-id") if err != nil { // TODO: Handle error. } // Increment a counter. // See https://cloud.google.com/appengine/articles/sharding_counters for // a more scalable solution. type Counter struct { Count int } var count int key := datastore.NameKey("Counter", "singleton", nil) _, err = client.RunInTransaction(ctx, func(tx *datastore.Transaction) error { var x Counter if err := tx.Get(key, &x); err != nil && err != datastore.ErrNoSuchEntity { return err } x.Count++ if _, err := tx.Put(key, &x); err != nil { return err } count = x.Count return nil }) if err != nil { // TODO: Handle error. } // The value of count is only valid once the transaction is successful // (RunInTransaction has returned nil). fmt.Printf("Count=%d\n", count) } func ExampleClient_AllocateIDs() { ctx := context.Background() client, err := datastore.NewClient(ctx, "project-id") if err != nil { // TODO: Handle error. } var keys []*datastore.Key for i := 0; i < 10; i++ { keys = append(keys, datastore.IncompleteKey("Article", nil)) } keys, err = client.AllocateIDs(ctx, keys) if err != nil { // TODO: Handle error. } _ = keys // TODO: Use keys. } func ExampleKey_Encode() { key := datastore.IDKey("Article", 1, nil) encoded := key.Encode() fmt.Println(encoded) // Output: EgsKB0FydGljbGUQAQ } func ExampleDecodeKey() { const encoded = "EgsKB0FydGljbGUQAQ" key, err := datastore.DecodeKey(encoded) if err != nil { // TODO: Handle error. } fmt.Println(key) // Output: /Article,1 } func ExampleIDKey() { // Key with numeric ID. k := datastore.IDKey("Article", 1, nil) _ = k // TODO: Use key. } func ExampleNameKey() { // Key with string ID. k := datastore.NameKey("Article", "article8", nil) _ = k // TODO: Use key. } func ExampleIncompleteKey() { k := datastore.IncompleteKey("Article", nil) _ = k // TODO: Use incomplete key. } func ExampleClient_GetAll() { ctx := context.Background() client, err := datastore.NewClient(ctx, "project-id") if err != nil { // TODO: Handle error. } var posts []*Post keys, err := client.GetAll(ctx, datastore.NewQuery("Post"), &posts) for i, key := range keys { fmt.Println(key) fmt.Println(posts[i]) } } func ExampleCommit_Key() { ctx := context.Background() client, err := datastore.NewClient(ctx, "") if err != nil { // TODO: Handle error. } var pk1, pk2 *datastore.PendingKey // Create two posts in a single transaction. commit, err := client.RunInTransaction(ctx, func(tx *datastore.Transaction) error { var err error pk1, err = tx.Put(datastore.IncompleteKey("Post", nil), &Post{Title: "Post 1", PublishedAt: time.Now()}) if err != nil { return err } pk2, err = tx.Put(datastore.IncompleteKey("Post", nil), &Post{Title: "Post 2", PublishedAt: time.Now()}) if err != nil { return err } return nil }) if err != nil { // TODO: Handle error. } // Now pk1, pk2 are valid PendingKeys. Let's convert them into real keys // using the Commit object. k1 := commit.Key(pk1) k2 := commit.Key(pk2) fmt.Println(k1, k2) } func ExampleIterator_Next() { ctx := context.Background() client, err := datastore.NewClient(ctx, "project-id") if err != nil { // TODO: Handle error. } it := client.Run(ctx, datastore.NewQuery("Post")) for { var p Post key, err := it.Next(&p) if err == iterator.Done { break } if err != nil { // TODO: Handle error. } fmt.Println(key, p) } } func ExampleIterator_Cursor() { ctx := context.Background() client, err := datastore.NewClient(ctx, "project-id") if err != nil { // TODO: Handle error. } it := client.Run(ctx, datastore.NewQuery("Post")) for { var p Post _, err := it.Next(&p) if err == iterator.Done { break } if err != nil { // TODO: Handle error. } fmt.Println(p) cursor, err := it.Cursor() if err != nil { // TODO: Handle error. } // When printed, a cursor will display as a string that can be passed // to datastore.NewCursor. fmt.Printf("to resume with this post, use cursor %s\n", cursor) } } func ExampleDecodeCursor() { // See Query.Start for a fuller example of DecodeCursor. // getCursor represents a function that returns a cursor from a previous // iteration in string form. cursorString := getCursor() cursor, err := datastore.DecodeCursor(cursorString) if err != nil { // TODO: Handle error. } _ = cursor // TODO: Use the cursor with Query.Start or Query.End. } func getCursor() string { return "" } func ExampleQuery_Start() { // This example demonstrates how to use cursors and Query.Start // to resume an iteration. ctx := context.Background() client, err := datastore.NewClient(ctx, "project-id") if err != nil { // TODO: Handle error. } // getCursor represents a function that returns a cursor from a previous // iteration in string form. cursorString := getCursor() cursor, err := datastore.DecodeCursor(cursorString) if err != nil { // TODO: Handle error. } it := client.Run(ctx, datastore.NewQuery("Post").Start(cursor)) _ = it // TODO: Use iterator. } func ExampleLoadStruct() { type Player struct { User string Score int } // Normally LoadStruct would only be used inside a custom implementation of // PropertyLoadSaver; this is for illustrative purposes only. props := []datastore.Property{ {Name: "User", Value: "Alice"}, {Name: "Score", Value: int64(97)}, } var p Player if err := datastore.LoadStruct(&p, props); err != nil { // TODO: Handle error. } fmt.Println(p) // Output: {Alice 97} } func ExampleSaveStruct() { type Player struct { User string Score int } p := &Player{ User: "Alice", Score: 97, } props, err := datastore.SaveStruct(p) if err != nil { // TODO: Handle error. } fmt.Println(props) // TODO(jba): make this output stable: Output: [{User Alice false} {Score 97 false}] } golang-google-cloud-0.9.0/datastore/integration_test.go000066400000000000000000000620341312234511600232470ustar00rootroot00000000000000// Copyright 2014 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package datastore import ( "errors" "fmt" "reflect" "sort" "strings" "sync" "testing" "time" "cloud.google.com/go/internal/testutil" "golang.org/x/net/context" "google.golang.org/api/iterator" "google.golang.org/api/option" ) // TODO(djd): Make test entity clean up more robust: some test entities may // be left behind if tests are aborted, the transport fails, etc. // suffix is a timestamp-based suffix which is appended to key names, // particularly for the root keys of entity groups. This reduces flakiness // when the tests are run in parallel. var suffix = fmt.Sprintf("-t%d", time.Now().UnixNano()) func newClient(ctx context.Context, t *testing.T) *Client { ts := testutil.TokenSource(ctx, ScopeDatastore) if ts == nil { t.Skip("Integration tests skipped. See CONTRIBUTING.md for details") } client, err := NewClient(ctx, testutil.ProjID(), option.WithTokenSource(ts)) if err != nil { t.Fatalf("NewClient: %v", err) } return client } func TestBasics(t *testing.T) { if testing.Short() { t.Skip("Integration tests skipped in short mode") } ctx, _ := context.WithTimeout(context.Background(), time.Second*20) client := newClient(ctx, t) defer client.Close() type X struct { I int S string T time.Time } x0 := X{66, "99", time.Now().Truncate(time.Millisecond)} k, err := client.Put(ctx, IncompleteKey("BasicsX", nil), &x0) if err != nil { t.Fatalf("client.Put: %v", err) } x1 := X{} err = client.Get(ctx, k, &x1) if err != nil { t.Errorf("client.Get: %v", err) } err = client.Delete(ctx, k) if err != nil { t.Errorf("client.Delete: %v", err) } if !reflect.DeepEqual(x0, x1) { t.Errorf("compare: x0=%v, x1=%v", x0, x1) } } func TestTopLevelKeyLoaded(t *testing.T) { if testing.Short() { t.Skip("Integration tests skipped in short mode") } ctx, _ := context.WithTimeout(context.Background(), time.Second*20) client := newClient(ctx, t) defer client.Close() completeKey := NameKey("EntityWithKey", "myent", nil) type EntityWithKey struct { I int S string K *Key `datastore:"__key__"` } in := &EntityWithKey{ I: 12, S: "abcd", } k, err := client.Put(ctx, completeKey, in) if err != nil { t.Fatalf("client.Put: %v", err) } var e EntityWithKey err = client.Get(ctx, k, &e) if err != nil { t.Fatalf("client.Get: %v", err) } // The two keys should be absolutely identical. if !reflect.DeepEqual(e.K, k) { t.Fatalf("e.K not equal to k; got %#v, want %#v", e.K, k) } } func TestListValues(t *testing.T) { if testing.Short() { t.Skip("Integration tests skipped in short mode") } ctx := context.Background() client := newClient(ctx, t) defer client.Close() p0 := PropertyList{ {Name: "L", Value: []interface{}{int64(12), "string", true}}, } k, err := client.Put(ctx, IncompleteKey("ListValue", nil), &p0) if err != nil { t.Fatalf("client.Put: %v", err) } var p1 PropertyList if err := client.Get(ctx, k, &p1); err != nil { t.Errorf("client.Get: %v", err) } if !reflect.DeepEqual(p0, p1) { t.Errorf("compare:\np0=%v\np1=%#v", p0, p1) } if err = client.Delete(ctx, k); err != nil { t.Errorf("client.Delete: %v", err) } } func TestGetMulti(t *testing.T) { if testing.Short() { t.Skip("Integration tests skipped in short mode") } ctx := context.Background() client := newClient(ctx, t) defer client.Close() type X struct { I int } p := NameKey("X", "x"+suffix, nil) cases := []struct { key *Key put bool }{ {key: NameKey("X", "item1", p), put: true}, {key: NameKey("X", "item2", p), put: false}, {key: NameKey("X", "item3", p), put: false}, {key: NameKey("X", "item4", p), put: true}, } var src, dst []*X var srcKeys, dstKeys []*Key for _, c := range cases { dst = append(dst, &X{}) dstKeys = append(dstKeys, c.key) if c.put { src = append(src, &X{}) srcKeys = append(srcKeys, c.key) } } if _, err := client.PutMulti(ctx, srcKeys, src); err != nil { t.Error(err) } err := client.GetMulti(ctx, dstKeys, dst) if err == nil { t.Errorf("client.GetMulti got %v, expected error", err) } e, ok := err.(MultiError) if !ok { t.Errorf("client.GetMulti got %T, expected MultiError", err) } for i, err := range e { got, want := err, (error)(nil) if !cases[i].put { got, want = err, ErrNoSuchEntity } if got != want { t.Errorf("MultiError[%d] == %v, want %v", i, got, want) } } } type Z struct { S string T string `datastore:",noindex"` P []byte K []byte `datastore:",noindex"` } func (z Z) String() string { var lens []string v := reflect.ValueOf(z) for i := 0; i < v.NumField(); i++ { if l := v.Field(i).Len(); l > 0 { lens = append(lens, fmt.Sprintf("len(%s)=%d", v.Type().Field(i).Name, l)) } } return fmt.Sprintf("Z{ %s }", strings.Join(lens, ",")) } func TestUnindexableValues(t *testing.T) { if testing.Short() { t.Skip("Integration tests skipped in short mode") } ctx := context.Background() client := newClient(ctx, t) defer client.Close() x1500 := strings.Repeat("x", 1500) x1501 := strings.Repeat("x", 1501) testCases := []struct { in Z wantErr bool }{ {in: Z{S: x1500}, wantErr: false}, {in: Z{S: x1501}, wantErr: true}, {in: Z{T: x1500}, wantErr: false}, {in: Z{T: x1501}, wantErr: false}, {in: Z{P: []byte(x1500)}, wantErr: false}, {in: Z{P: []byte(x1501)}, wantErr: true}, {in: Z{K: []byte(x1500)}, wantErr: false}, {in: Z{K: []byte(x1501)}, wantErr: false}, } for _, tt := range testCases { _, err := client.Put(ctx, IncompleteKey("BasicsZ", nil), &tt.in) if (err != nil) != tt.wantErr { t.Errorf("client.Put %s got err %v, want err %t", tt.in, err, tt.wantErr) } } } func TestNilKey(t *testing.T) { if testing.Short() { t.Skip("Integration tests skipped in short mode") } ctx := context.Background() client := newClient(ctx, t) defer client.Close() testCases := []struct { in K0 wantErr bool }{ {in: K0{K: testKey0}, wantErr: false}, {in: K0{}, wantErr: false}, } for _, tt := range testCases { _, err := client.Put(ctx, IncompleteKey("NilKey", nil), &tt.in) if (err != nil) != tt.wantErr { t.Errorf("client.Put %s got err %v, want err %t", tt.in, err, tt.wantErr) } } } type SQChild struct { I, J int T, U int64 } type SQTestCase struct { desc string q *Query wantCount int wantSum int } func testSmallQueries(t *testing.T, ctx context.Context, client *Client, parent *Key, children []*SQChild, testCases []SQTestCase, extraTests ...func()) { keys := make([]*Key, len(children)) for i := range keys { keys[i] = IncompleteKey("SQChild", parent) } keys, err := client.PutMulti(ctx, keys, children) if err != nil { t.Fatalf("client.PutMulti: %v", err) } defer func() { err := client.DeleteMulti(ctx, keys) if err != nil { t.Errorf("client.DeleteMulti: %v", err) } }() for _, tc := range testCases { count, err := client.Count(ctx, tc.q) if err != nil { t.Errorf("Count %q: %v", tc.desc, err) continue } if count != tc.wantCount { t.Errorf("Count %q: got %d want %d", tc.desc, count, tc.wantCount) continue } } for _, tc := range testCases { var got []SQChild _, err := client.GetAll(ctx, tc.q, &got) if err != nil { t.Errorf("client.GetAll %q: %v", tc.desc, err) continue } sum := 0 for _, c := range got { sum += c.I + c.J } if sum != tc.wantSum { t.Errorf("sum %q: got %d want %d", tc.desc, sum, tc.wantSum) continue } } for _, x := range extraTests { x() } } func TestFilters(t *testing.T) { if testing.Short() { t.Skip("Integration tests skipped in short mode") } ctx := context.Background() client := newClient(ctx, t) defer client.Close() parent := NameKey("SQParent", "TestFilters"+suffix, nil) now := time.Now().Truncate(time.Millisecond).Unix() children := []*SQChild{ {I: 0, T: now, U: now}, {I: 1, T: now, U: now}, {I: 2, T: now, U: now}, {I: 3, T: now, U: now}, {I: 4, T: now, U: now}, {I: 5, T: now, U: now}, {I: 6, T: now, U: now}, {I: 7, T: now, U: now}, } baseQuery := NewQuery("SQChild").Ancestor(parent).Filter("T=", now) testSmallQueries(t, ctx, client, parent, children, []SQTestCase{ { "I>1", baseQuery.Filter("I>", 1), 6, 2 + 3 + 4 + 5 + 6 + 7, }, { "I>2 AND I<=5", baseQuery.Filter("I>", 2).Filter("I<=", 5), 3, 3 + 4 + 5, }, { "I>=3 AND I<3", baseQuery.Filter("I>=", 3).Filter("I<", 3), 0, 0, }, { "I=4", baseQuery.Filter("I=", 4), 1, 4, }, }, func() { got := []*SQChild{} want := []*SQChild{ {I: 0, T: now, U: now}, {I: 1, T: now, U: now}, {I: 2, T: now, U: now}, {I: 3, T: now, U: now}, {I: 4, T: now, U: now}, {I: 5, T: now, U: now}, {I: 6, T: now, U: now}, {I: 7, T: now, U: now}, } _, err := client.GetAll(ctx, baseQuery.Order("I"), &got) if err != nil { t.Errorf("client.GetAll: %v", err) } if !reflect.DeepEqual(got, want) { t.Errorf("compare: got=%v, want=%v", got, want) } }, func() { got := []*SQChild{} want := []*SQChild{ {I: 7, T: now, U: now}, {I: 6, T: now, U: now}, {I: 5, T: now, U: now}, {I: 4, T: now, U: now}, {I: 3, T: now, U: now}, {I: 2, T: now, U: now}, {I: 1, T: now, U: now}, {I: 0, T: now, U: now}, } _, err := client.GetAll(ctx, baseQuery.Order("-I"), &got) if err != nil { t.Errorf("client.GetAll: %v", err) } if !reflect.DeepEqual(got, want) { t.Errorf("compare: got=%v, want=%v", got, want) } }) } func TestLargeQuery(t *testing.T) { if testing.Short() { t.Skip("Integration tests skipped in short mode") } ctx := context.Background() client := newClient(ctx, t) defer client.Close() parent := NameKey("LQParent", "TestFilters"+suffix, nil) now := time.Now().Truncate(time.Millisecond).Unix() // Make a large number of children entities. const n = 800 children := make([]*SQChild, 0, n) keys := make([]*Key, 0, n) for i := 0; i < n; i++ { children = append(children, &SQChild{I: i, T: now, U: now}) keys = append(keys, IncompleteKey("SQChild", parent)) } // Store using PutMulti in batches. const batchSize = 500 for i := 0; i < n; i = i + 500 { j := i + batchSize if j > n { j = n } fullKeys, err := client.PutMulti(ctx, keys[i:j], children[i:j]) if err != nil { t.Fatalf("PutMulti(%d, %d): %v", i, j, err) } defer func() { err := client.DeleteMulti(ctx, fullKeys) if err != nil { t.Errorf("client.DeleteMulti: %v", err) } }() } q := NewQuery("SQChild").Ancestor(parent).Filter("T=", now).Order("I") // Wait group to allow us to run query tests in parallel below. var wg sync.WaitGroup // Check we get the expected count and results for various limits/offsets. queryTests := []struct { limit, offset, want int }{ // Just limit. {limit: 0, want: 0}, {limit: 100, want: 100}, {limit: 501, want: 501}, {limit: n, want: n}, {limit: n * 2, want: n}, {limit: -1, want: n}, // Just offset. {limit: -1, offset: 100, want: n - 100}, {limit: -1, offset: 500, want: n - 500}, {limit: -1, offset: n, want: 0}, // Limit and offset. {limit: 100, offset: 100, want: 100}, {limit: 1000, offset: 100, want: n - 100}, {limit: 500, offset: 500, want: n - 500}, } for _, tt := range queryTests { q := q.Limit(tt.limit).Offset(tt.offset) wg.Add(1) go func(limit, offset, want int) { defer wg.Done() // Check Count returns the expected number of results. count, err := client.Count(ctx, q) if err != nil { t.Errorf("client.Count(limit=%d offset=%d): %v", limit, offset, err) return } if count != want { t.Errorf("Count(limit=%d offset=%d) returned %d, want %d", limit, offset, count, want) } var got []SQChild _, err = client.GetAll(ctx, q, &got) if err != nil { t.Errorf("client.GetAll(limit=%d offset=%d): %v", limit, offset, err) return } if len(got) != want { t.Errorf("GetAll(limit=%d offset=%d) returned %d, want %d", limit, offset, len(got), want) } for i, child := range got { if got, want := child.I, i+offset; got != want { t.Errorf("GetAll(limit=%d offset=%d) got[%d].I == %d; want %d", limit, offset, i, got, want) break } } }(tt.limit, tt.offset, tt.want) } // Also check iterator cursor behaviour. cursorTests := []struct { limit, offset int // Query limit and offset. count int // The number of times to call "next" want int // The I value of the desired element, -1 for "Done". }{ // No limits. {count: 0, limit: -1, want: 0}, {count: 5, limit: -1, want: 5}, {count: 500, limit: -1, want: 500}, {count: 1000, limit: -1, want: -1}, // No more results. // Limits. {count: 5, limit: 5, want: 5}, {count: 500, limit: 5, want: 5}, {count: 1000, limit: 1000, want: -1}, // No more results. // Offsets. {count: 0, offset: 5, limit: -1, want: 5}, {count: 5, offset: 5, limit: -1, want: 10}, {count: 200, offset: 500, limit: -1, want: 700}, {count: 200, offset: 1000, limit: -1, want: -1}, // No more results. } for _, tt := range cursorTests { wg.Add(1) go func(count, limit, offset, want int) { defer wg.Done() // Run iterator through count calls to Next. it := client.Run(ctx, q.Limit(limit).Offset(offset).KeysOnly()) for i := 0; i < count; i++ { _, err := it.Next(nil) if err == iterator.Done { break } if err != nil { t.Errorf("count=%d, limit=%d, offset=%d: it.Next failed at i=%d", count, limit, offset, i) return } } // Grab the cursor. cursor, err := it.Cursor() if err != nil { t.Errorf("count=%d, limit=%d, offset=%d: it.Cursor: %v", count, limit, offset, err) return } // Make a request for the next element. it = client.Run(ctx, q.Limit(1).Start(cursor)) var entity SQChild _, err = it.Next(&entity) switch { case want == -1: if err != iterator.Done { t.Errorf("count=%d, limit=%d, offset=%d: it.Next from cursor %v, want Done", count, limit, offset, err) } case err != nil: t.Errorf("count=%d, limit=%d, offset=%d: it.Next from cursor: %v, want nil", count, limit, offset, err) case entity.I != want: t.Errorf("count=%d, limit=%d, offset=%d: got.I = %d, want %d", count, limit, offset, entity.I, want) } }(tt.count, tt.limit, tt.offset, tt.want) } wg.Wait() } func TestEventualConsistency(t *testing.T) { // TODO(jba): either make this actually test eventual consistency, or // delete it. Currently it behaves the same with or without the // EventualConsistency call. if testing.Short() { t.Skip("Integration tests skipped in short mode") } ctx := context.Background() client := newClient(ctx, t) defer client.Close() parent := NameKey("SQParent", "TestEventualConsistency"+suffix, nil) now := time.Now().Truncate(time.Millisecond).Unix() children := []*SQChild{ {I: 0, T: now, U: now}, {I: 1, T: now, U: now}, {I: 2, T: now, U: now}, } query := NewQuery("SQChild").Ancestor(parent).Filter("T =", now).EventualConsistency() testSmallQueries(t, ctx, client, parent, children, nil, func() { got, err := client.Count(ctx, query) if err != nil { t.Fatalf("Count: %v", err) } if got < 0 || 3 < got { t.Errorf("Count: got %d, want [0,3]", got) } }) } func TestProjection(t *testing.T) { if testing.Short() { t.Skip("Integration tests skipped in short mode") } ctx := context.Background() client := newClient(ctx, t) defer client.Close() parent := NameKey("SQParent", "TestProjection"+suffix, nil) now := time.Now().Truncate(time.Millisecond).Unix() children := []*SQChild{ {I: 1 << 0, J: 100, T: now, U: now}, {I: 1 << 1, J: 100, T: now, U: now}, {I: 1 << 2, J: 200, T: now, U: now}, {I: 1 << 3, J: 300, T: now, U: now}, {I: 1 << 4, J: 300, T: now, U: now}, } baseQuery := NewQuery("SQChild").Ancestor(parent).Filter("T=", now).Filter("J>", 150) testSmallQueries(t, ctx, client, parent, children, []SQTestCase{ { "project", baseQuery.Project("J"), 3, 200 + 300 + 300, }, { "distinct", baseQuery.Project("J").Distinct(), 2, 200 + 300, }, { "distinct on", baseQuery.Project("J").DistinctOn("J"), 2, 200 + 300, }, { "project on meaningful (GD_WHEN) field", baseQuery.Project("U"), 3, 0, }, }) } func TestAllocateIDs(t *testing.T) { if testing.Short() { t.Skip("Integration tests skipped in short mode") } ctx := context.Background() client := newClient(ctx, t) defer client.Close() keys := make([]*Key, 5) for i := range keys { keys[i] = IncompleteKey("AllocID", nil) } keys, err := client.AllocateIDs(ctx, keys) if err != nil { t.Errorf("AllocID #0 failed: %v", err) } if want := len(keys); want != 5 { t.Errorf("Expected to allocate 5 keys, %d keys are found", want) } for _, k := range keys { if k.Incomplete() { t.Errorf("Unexpeceted incomplete key found: %v", k) } } } func TestGetAllWithFieldMismatch(t *testing.T) { if testing.Short() { t.Skip("Integration tests skipped in short mode") } ctx := context.Background() client := newClient(ctx, t) defer client.Close() type Fat struct { X, Y int } type Thin struct { X int } // Ancestor queries (those within an entity group) are strongly consistent // by default, which prevents a test from being flaky. // See https://cloud.google.com/appengine/docs/go/datastore/queries#Go_Data_consistency // for more information. parent := NameKey("SQParent", "TestGetAllWithFieldMismatch"+suffix, nil) putKeys := make([]*Key, 3) for i := range putKeys { putKeys[i] = IDKey("GetAllThing", int64(10+i), parent) _, err := client.Put(ctx, putKeys[i], &Fat{X: 20 + i, Y: 30 + i}) if err != nil { t.Fatalf("client.Put: %v", err) } } var got []Thin want := []Thin{ {X: 20}, {X: 21}, {X: 22}, } getKeys, err := client.GetAll(ctx, NewQuery("GetAllThing").Ancestor(parent), &got) if len(getKeys) != 3 && !reflect.DeepEqual(getKeys, putKeys) { t.Errorf("client.GetAll: keys differ\ngetKeys=%v\nputKeys=%v", getKeys, putKeys) } if !reflect.DeepEqual(got, want) { t.Errorf("client.GetAll: entities differ\ngot =%v\nwant=%v", got, want) } if _, ok := err.(*ErrFieldMismatch); !ok { t.Errorf("client.GetAll: got err=%v, want ErrFieldMismatch", err) } } func TestKindlessQueries(t *testing.T) { if testing.Short() { t.Skip("Integration tests skipped in short mode") } ctx := context.Background() client := newClient(ctx, t) defer client.Close() type Dee struct { I int Why string } type Dum struct { I int Pling string } parent := NameKey("Tweedle", "tweedle"+suffix, nil) keys := []*Key{ NameKey("Dee", "dee0", parent), NameKey("Dum", "dum1", parent), NameKey("Dum", "dum2", parent), NameKey("Dum", "dum3", parent), } src := []interface{}{ &Dee{1, "binary0001"}, &Dum{2, "binary0010"}, &Dum{4, "binary0100"}, &Dum{8, "binary1000"}, } keys, err := client.PutMulti(ctx, keys, src) if err != nil { t.Fatalf("put: %v", err) } testCases := []struct { desc string query *Query want []int wantErr string }{ { desc: "Dee", query: NewQuery("Dee"), want: []int{1}, }, { desc: "Doh", query: NewQuery("Doh"), want: nil}, { desc: "Dum", query: NewQuery("Dum"), want: []int{2, 4, 8}, }, { desc: "", query: NewQuery(""), want: []int{1, 2, 4, 8}, }, { desc: "Kindless filter", query: NewQuery("").Filter("__key__ =", keys[2]), want: []int{4}, }, { desc: "Kindless order", query: NewQuery("").Order("__key__"), want: []int{1, 2, 4, 8}, }, { desc: "Kindless bad filter", query: NewQuery("").Filter("I =", 4), wantErr: "kind is required", }, { desc: "Kindless bad order", query: NewQuery("").Order("-__key__"), wantErr: "kind is required for all orders except __key__ ascending", }, } loop: for _, tc := range testCases { q := tc.query.Ancestor(parent) gotCount, err := client.Count(ctx, q) if err != nil { if tc.wantErr == "" || !strings.Contains(err.Error(), tc.wantErr) { t.Errorf("count %q: err %v, want err %q", tc.desc, err, tc.wantErr) } continue } if tc.wantErr != "" { t.Errorf("count %q: want err %q", tc.desc, tc.wantErr) continue } if gotCount != len(tc.want) { t.Errorf("count %q: got %d want %d", tc.desc, gotCount, len(tc.want)) continue } var got []int for iter := client.Run(ctx, q); ; { var dst struct { I int Why, Pling string } _, err := iter.Next(&dst) if err == iterator.Done { break } if err != nil { t.Errorf("iter.Next %q: %v", tc.desc, err) continue loop } got = append(got, dst.I) } sort.Ints(got) if !reflect.DeepEqual(got, tc.want) { t.Errorf("elems %q: got %+v want %+v", tc.desc, got, tc.want) continue } } } func TestTransaction(t *testing.T) { if testing.Short() { t.Skip("Integration tests skipped in short mode") } ctx := context.Background() client := newClient(ctx, t) defer client.Close() type Counter struct { N int T time.Time } bangErr := errors.New("bang") tests := []struct { desc string causeConflict []bool retErr []error want int wantErr error }{ { desc: "3 attempts, no conflicts", causeConflict: []bool{false}, retErr: []error{nil}, want: 11, }, { desc: "1 attempt, user error", causeConflict: []bool{false}, retErr: []error{bangErr}, wantErr: bangErr, }, { desc: "2 attempts, 1 conflict", causeConflict: []bool{true, false}, retErr: []error{nil, nil}, want: 13, // Each conflict increments by 2. }, { desc: "3 attempts, 3 conflicts", causeConflict: []bool{true, true, true}, retErr: []error{nil, nil, nil}, wantErr: ErrConcurrentTransaction, }, } for i, tt := range tests { // Put a new counter. c := &Counter{N: 10, T: time.Now()} key, err := client.Put(ctx, IncompleteKey("TransCounter", nil), c) if err != nil { t.Errorf("%s: client.Put: %v", tt.desc, err) continue } defer client.Delete(ctx, key) // Increment the counter in a transaction. // The test case can manually cause a conflict or return an // error at each attempt. var attempts int _, err = client.RunInTransaction(ctx, func(tx *Transaction) error { attempts++ if attempts > len(tt.causeConflict) { return fmt.Errorf("too many attempts. Got %d, max %d", attempts, len(tt.causeConflict)) } var c Counter if err := tx.Get(key, &c); err != nil { return err } c.N++ if _, err := tx.Put(key, &c); err != nil { return err } if tt.causeConflict[attempts-1] { c.N += 1 if _, err := client.Put(ctx, key, &c); err != nil { return err } } return tt.retErr[attempts-1] }, MaxAttempts(i)) // Check the error returned by RunInTransaction. if err != tt.wantErr { t.Errorf("%s: got err %v, want %v", tt.desc, err, tt.wantErr) continue } if err != nil { continue } // Check the final value of the counter. if err := client.Get(ctx, key, c); err != nil { t.Errorf("%s: client.Get: %v", tt.desc, err) continue } if c.N != tt.want { t.Errorf("%s: counter N=%d, want N=%d", tt.desc, c.N, tt.want) } } } func TestNilPointers(t *testing.T) { if testing.Short() { t.Skip("Integration tests skipped in short mode") } ctx := context.Background() client := newClient(ctx, t) defer client.Close() type X struct { S string } src := []*X{{"zero"}, {"one"}} keys := []*Key{IncompleteKey("NilX", nil), IncompleteKey("NilX", nil)} keys, err := client.PutMulti(ctx, keys, src) if err != nil { t.Fatalf("PutMulti: %v", err) } // It's okay to store into a slice of nil *X. xs := make([]*X, 2) if err := client.GetMulti(ctx, keys, xs); err != nil { t.Errorf("GetMulti: %v", err) } else if !reflect.DeepEqual(xs, src) { t.Errorf("GetMulti fetched %v, want %v", xs, src) } // It isn't okay to store into a single nil *X. var x0 *X if err, want := client.Get(ctx, keys[0], x0), ErrInvalidEntityType; err != want { t.Errorf("Get: err %v; want %v", err, want) } if err := client.DeleteMulti(ctx, keys); err != nil { t.Errorf("Delete: %v", err) } } func TestNestedRepeatedElementNoIndex(t *testing.T) { if testing.Short() { t.Skip("Integration tests skipped in short mode") } ctx := context.Background() client := newClient(ctx, t) defer client.Close() type Inner struct { Name string Value string `datastore:",noindex"` } type Outer struct { Config []Inner } m := &Outer{ Config: []Inner{ {Name: "short", Value: "a"}, {Name: "long", Value: strings.Repeat("a", 2000)}, }, } key := NameKey("Nested", "Nested"+suffix, nil) if _, err := client.Put(ctx, key, m); err != nil { t.Fatalf("client.Put: %v", err) } if err := client.Delete(ctx, key); err != nil { t.Fatalf("client.Delete: %v", err) } } golang-google-cloud-0.9.0/datastore/key.go000066400000000000000000000152261312234511600204560ustar00rootroot00000000000000// Copyright 2014 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package datastore import ( "bytes" "encoding/base64" "encoding/gob" "errors" "strconv" "strings" "github.com/golang/protobuf/proto" "golang.org/x/net/context" pb "google.golang.org/genproto/googleapis/datastore/v1" ) // Key represents the datastore key for a stored entity. type Key struct { // Kind cannot be empty. Kind string // Either ID or Name must be zero for the Key to be valid. // If both are zero, the Key is incomplete. ID int64 Name string // Parent must either be a complete Key or nil. Parent *Key // Namespace provides the ability to partition your data for multiple // tenants. In most cases, it is not necessary to specify a namespace. // See docs on datastore multitenancy for details: // https://cloud.google.com/datastore/docs/concepts/multitenancy Namespace string } // Incomplete reports whether the key does not refer to a stored entity. func (k *Key) Incomplete() bool { return k.Name == "" && k.ID == 0 } // valid returns whether the key is valid. func (k *Key) valid() bool { if k == nil { return false } for ; k != nil; k = k.Parent { if k.Kind == "" { return false } if k.Name != "" && k.ID != 0 { return false } if k.Parent != nil { if k.Parent.Incomplete() { return false } if k.Parent.Namespace != k.Namespace { return false } } } return true } // Equal reports whether two keys are equal. Two keys are equal if they are // both nil, or if their kinds, IDs, names, namespaces and parents are equal. func (k *Key) Equal(o *Key) bool { for { if k == nil || o == nil { return k == o // if either is nil, both must be nil } if k.Namespace != o.Namespace || k.Name != o.Name || k.ID != o.ID || k.Kind != o.Kind { return false } if k.Parent == nil && o.Parent == nil { return true } k = k.Parent o = o.Parent } } // marshal marshals the key's string representation to the buffer. func (k *Key) marshal(b *bytes.Buffer) { if k.Parent != nil { k.Parent.marshal(b) } b.WriteByte('/') b.WriteString(k.Kind) b.WriteByte(',') if k.Name != "" { b.WriteString(k.Name) } else { b.WriteString(strconv.FormatInt(k.ID, 10)) } } // String returns a string representation of the key. func (k *Key) String() string { if k == nil { return "" } b := bytes.NewBuffer(make([]byte, 0, 512)) k.marshal(b) return b.String() } // Note: Fields not renamed compared to appengine gobKey struct // This ensures gobs created by appengine can be read here, and vice/versa type gobKey struct { Kind string StringID string IntID int64 Parent *gobKey AppID string Namespace string } func keyToGobKey(k *Key) *gobKey { if k == nil { return nil } return &gobKey{ Kind: k.Kind, StringID: k.Name, IntID: k.ID, Parent: keyToGobKey(k.Parent), Namespace: k.Namespace, } } func gobKeyToKey(gk *gobKey) *Key { if gk == nil { return nil } return &Key{ Kind: gk.Kind, Name: gk.StringID, ID: gk.IntID, Parent: gobKeyToKey(gk.Parent), Namespace: gk.Namespace, } } // GobEncode marshals the key into a sequence of bytes // using an encoding/gob.Encoder. func (k *Key) GobEncode() ([]byte, error) { buf := new(bytes.Buffer) if err := gob.NewEncoder(buf).Encode(keyToGobKey(k)); err != nil { return nil, err } return buf.Bytes(), nil } // GobDecode unmarshals a sequence of bytes using an encoding/gob.Decoder. func (k *Key) GobDecode(buf []byte) error { gk := new(gobKey) if err := gob.NewDecoder(bytes.NewBuffer(buf)).Decode(gk); err != nil { return err } *k = *gobKeyToKey(gk) return nil } // MarshalJSON marshals the key into JSON. func (k *Key) MarshalJSON() ([]byte, error) { return []byte(`"` + k.Encode() + `"`), nil } // UnmarshalJSON unmarshals a key JSON object into a Key. func (k *Key) UnmarshalJSON(buf []byte) error { if len(buf) < 2 || buf[0] != '"' || buf[len(buf)-1] != '"' { return errors.New("datastore: bad JSON key") } k2, err := DecodeKey(string(buf[1 : len(buf)-1])) if err != nil { return err } *k = *k2 return nil } // Encode returns an opaque representation of the key // suitable for use in HTML and URLs. // This is compatible with the Python and Java runtimes. func (k *Key) Encode() string { pKey := keyToProto(k) b, err := proto.Marshal(pKey) if err != nil { panic(err) } // Trailing padding is stripped. return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=") } // DecodeKey decodes a key from the opaque representation returned by Encode. func DecodeKey(encoded string) (*Key, error) { // Re-add padding. if m := len(encoded) % 4; m != 0 { encoded += strings.Repeat("=", 4-m) } b, err := base64.URLEncoding.DecodeString(encoded) if err != nil { return nil, err } pKey := new(pb.Key) if err := proto.Unmarshal(b, pKey); err != nil { return nil, err } return protoToKey(pKey) } // AllocateIDs accepts a slice of incomplete keys and returns a // slice of complete keys that are guaranteed to be valid in the datastore. func (c *Client) AllocateIDs(ctx context.Context, keys []*Key) ([]*Key, error) { if keys == nil { return nil, nil } req := &pb.AllocateIdsRequest{ ProjectId: c.dataset, Keys: multiKeyToProto(keys), } resp, err := c.client.AllocateIds(ctx, req) if err != nil { return nil, err } return multiProtoToKey(resp.Keys) } // IncompleteKey creates a new incomplete key. // The supplied kind cannot be empty. // The namespace of the new key is empty. func IncompleteKey(kind string, parent *Key) *Key { return &Key{ Kind: kind, Parent: parent, } } // NameKey creates a new key with a name. // The supplied kind cannot be empty. // The supplied parent must either be a complete key or nil. // The namespace of the new key is empty. func NameKey(kind, name string, parent *Key) *Key { return &Key{ Kind: kind, Name: name, Parent: parent, } } // IDKey creates a new key with an ID. // The supplied kind cannot be empty. // The supplied parent must either be a complete key or nil. // The namespace of the new key is empty. func IDKey(kind string, id int64, parent *Key) *Key { return &Key{ Kind: kind, ID: id, Parent: parent, } } golang-google-cloud-0.9.0/datastore/key_test.go000066400000000000000000000116661312234511600215210ustar00rootroot00000000000000// Copyright 2014 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package datastore import ( "bytes" "encoding/gob" "encoding/json" "testing" ) func TestEqual(t *testing.T) { testCases := []struct { x, y *Key equal bool }{ { x: nil, y: nil, equal: true, }, { x: &Key{Kind: "kindA"}, y: &Key{Kind: "kindA"}, equal: true, }, { x: &Key{Kind: "kindA", Name: "nameA"}, y: &Key{Kind: "kindA", Name: "nameA"}, equal: true, }, { x: &Key{Kind: "kindA", Name: "nameA", Namespace: "gopherspace"}, y: &Key{Kind: "kindA", Name: "nameA", Namespace: "gopherspace"}, equal: true, }, { x: &Key{Kind: "kindA", ID: 1337, Parent: &Key{Kind: "kindX", Name: "nameX"}}, y: &Key{Kind: "kindA", ID: 1337, Parent: &Key{Kind: "kindX", Name: "nameX"}}, equal: true, }, { x: &Key{Kind: "kindA", Name: "nameA"}, y: &Key{Kind: "kindB", Name: "nameA"}, equal: false, }, { x: &Key{Kind: "kindA", Name: "nameA"}, y: &Key{Kind: "kindA", Name: "nameB"}, equal: false, }, { x: &Key{Kind: "kindA", Name: "nameA"}, y: &Key{Kind: "kindA", ID: 1337}, equal: false, }, { x: &Key{Kind: "kindA", Name: "nameA"}, y: &Key{Kind: "kindA", Name: "nameA", Namespace: "gopherspace"}, equal: false, }, { x: &Key{Kind: "kindA", ID: 1337, Parent: &Key{Kind: "kindX", Name: "nameX"}}, y: &Key{Kind: "kindA", ID: 1337, Parent: &Key{Kind: "kindY", Name: "nameX"}}, equal: false, }, { x: &Key{Kind: "kindA", ID: 1337, Parent: &Key{Kind: "kindX", Name: "nameX"}}, y: &Key{Kind: "kindA", ID: 1337}, equal: false, }, } for _, tt := range testCases { if got := tt.x.Equal(tt.y); got != tt.equal { t.Errorf("Equal(%v, %v) = %t; want %t", tt.x, tt.y, got, tt.equal) } if got := tt.y.Equal(tt.x); got != tt.equal { t.Errorf("Equal(%v, %v) = %t; want %t", tt.y, tt.x, got, tt.equal) } } } func TestEncoding(t *testing.T) { testCases := []struct { k *Key valid bool }{ { k: nil, valid: false, }, { k: &Key{}, valid: false, }, { k: &Key{Kind: "kindA"}, valid: true, }, { k: &Key{Kind: "kindA", Namespace: "gopherspace"}, valid: true, }, { k: &Key{Kind: "kindA", Name: "nameA"}, valid: true, }, { k: &Key{Kind: "kindA", ID: 1337}, valid: true, }, { k: &Key{Kind: "kindA", Name: "nameA", ID: 1337}, valid: false, }, { k: &Key{Kind: "kindA", Parent: &Key{Kind: "kindB", Name: "nameB"}}, valid: true, }, { k: &Key{Kind: "kindA", Parent: &Key{Kind: "kindB"}}, valid: false, }, { k: &Key{Kind: "kindA", Parent: &Key{Kind: "kindB", Name: "nameB", Namespace: "gopherspace"}}, valid: false, }, } for _, tt := range testCases { if got := tt.k.valid(); got != tt.valid { t.Errorf("valid(%v) = %t; want %t", tt.k, got, tt.valid) } // Check encoding/decoding for valid keys. if !tt.valid { continue } enc := tt.k.Encode() dec, err := DecodeKey(enc) if err != nil { t.Errorf("DecodeKey(%q) from %v: %v", enc, tt.k, err) continue } if !tt.k.Equal(dec) { t.Logf("Proto: %s", keyToProto(tt.k)) t.Errorf("Decoded key %v not equal to %v", dec, tt.k) } b, err := json.Marshal(tt.k) if err != nil { t.Errorf("json.Marshal(%v): %v", tt.k, err) continue } key := &Key{} if err := json.Unmarshal(b, key); err != nil { t.Errorf("json.Unmarshal(%s) for key %v: %v", b, tt.k, err) continue } if !tt.k.Equal(key) { t.Errorf("JSON decoded key %v not equal to %v", dec, tt.k) } buf := &bytes.Buffer{} gobEnc := gob.NewEncoder(buf) if err := gobEnc.Encode(tt.k); err != nil { t.Errorf("gobEnc.Encode(%v): %v", tt.k, err) continue } gobDec := gob.NewDecoder(buf) key = &Key{} if err := gobDec.Decode(key); err != nil { t.Errorf("gobDec.Decode() for key %v: %v", tt.k, err) } if !tt.k.Equal(key) { t.Errorf("gob decoded key %v not equal to %v", dec, tt.k) } } } func TestInvalidKeyDecode(t *testing.T) { // Check that decoding an invalid key returns an err and doesn't panic. enc := NameKey("Kind", "Foo", nil).Encode() invalid := []string{ "", "Laboratorio", enc + "Junk", enc[:len(enc)-4], } for _, enc := range invalid { key, err := DecodeKey(enc) if err == nil || key != nil { t.Errorf("DecodeKey(%q) = %v, %v; want nil, error", enc, key, err) } } } golang-google-cloud-0.9.0/datastore/load.go000066400000000000000000000316551312234511600206110ustar00rootroot00000000000000// Copyright 2014 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package datastore import ( "fmt" "reflect" "strings" "time" "cloud.google.com/go/internal/fields" pb "google.golang.org/genproto/googleapis/datastore/v1" ) var ( typeOfByteSlice = reflect.TypeOf([]byte(nil)) typeOfTime = reflect.TypeOf(time.Time{}) typeOfGeoPoint = reflect.TypeOf(GeoPoint{}) typeOfKeyPtr = reflect.TypeOf(&Key{}) typeOfEntityPtr = reflect.TypeOf(&Entity{}) ) // typeMismatchReason returns a string explaining why the property p could not // be stored in an entity field of type v.Type(). func typeMismatchReason(p Property, v reflect.Value) string { entityType := "empty" switch p.Value.(type) { case int64: entityType = "int" case bool: entityType = "bool" case string: entityType = "string" case float64: entityType = "float" case *Key: entityType = "*datastore.Key" case *Entity: entityType = "*datastore.Entity" case GeoPoint: entityType = "GeoPoint" case time.Time: entityType = "time.Time" case []byte: entityType = "[]byte" } return fmt.Sprintf("type mismatch: %s versus %v", entityType, v.Type()) } type propertyLoader struct { // m holds the number of times a substruct field like "Foo.Bar.Baz" has // been seen so far. The map is constructed lazily. m map[string]int } func (l *propertyLoader) load(codec fields.List, structValue reflect.Value, p Property, prev map[string]struct{}) string { sl, ok := p.Value.([]interface{}) if !ok { return l.loadOneElement(codec, structValue, p, prev) } for _, val := range sl { p.Value = val if errStr := l.loadOneElement(codec, structValue, p, prev); errStr != "" { return errStr } } return "" } // loadOneElement loads the value of Property p into structValue based on the provided // codec. codec is used to find the field in structValue into which p should be loaded. // prev is the set of property names already seen for structValue. func (l *propertyLoader) loadOneElement(codec fields.List, structValue reflect.Value, p Property, prev map[string]struct{}) string { var sliceOk bool var sliceIndex int var v reflect.Value name := p.Name fieldNames := strings.Split(name, ".") for len(fieldNames) > 0 { var field *fields.Field // Start by trying to find a field with name. If none found, // cut off the last field (delimited by ".") and find its parent // in the codec. // eg. for name "A.B.C.D", split off "A.B.C" and try to // find a field in the codec with this name. // Loop again with "A.B", etc. for i := len(fieldNames); i > 0; i-- { parent := strings.Join(fieldNames[:i], ".") field = codec.Match(parent) if field != nil { fieldNames = fieldNames[i:] break } } // If we never found a matching field in the codec, return // error message. if field == nil { return "no such struct field" } v = initField(structValue, field.Index) if !v.IsValid() { return "no such struct field" } if !v.CanSet() { return "cannot set struct field" } // If field implements PLS, we delegate loading to the PLS's Load early, // and stop iterating through fields. ok, err := plsFieldLoad(v, p, fieldNames) if err != nil { return err.Error() } if ok { return "" } if field.Type.Kind() == reflect.Struct { codec, err = structCache.Fields(field.Type) if err != nil { return err.Error() } structValue = v } // If the element is a slice, we need to accommodate it. if v.Kind() == reflect.Slice && v.Type() != typeOfByteSlice { if l.m == nil { l.m = make(map[string]int) } sliceIndex = l.m[p.Name] l.m[p.Name] = sliceIndex + 1 for v.Len() <= sliceIndex { v.Set(reflect.Append(v, reflect.New(v.Type().Elem()).Elem())) } structValue = v.Index(sliceIndex) // If structValue implements PLS, we delegate loading to the PLS's // Load early, and stop iterating through fields. ok, err := plsFieldLoad(structValue, p, fieldNames) if err != nil { return err.Error() } if ok { return "" } if structValue.Type().Kind() == reflect.Struct { codec, err = structCache.Fields(structValue.Type()) if err != nil { return err.Error() } } sliceOk = true } } var slice reflect.Value if v.Kind() == reflect.Slice && v.Type().Elem().Kind() != reflect.Uint8 { slice = v v = reflect.New(v.Type().Elem()).Elem() } else if _, ok := prev[p.Name]; ok && !sliceOk { // Zero the field back out that was set previously, turns out // it's a slice and we don't know what to do with it v.Set(reflect.Zero(v.Type())) return "multiple-valued property requires a slice field type" } prev[p.Name] = struct{}{} if errReason := setVal(v, p); errReason != "" { // Set the slice back to its zero value. if slice.IsValid() { slice.Set(reflect.Zero(slice.Type())) } return errReason } if slice.IsValid() { slice.Index(sliceIndex).Set(v) } return "" } // plsFieldLoad first tries to converts v's value to a PLS, then v's addressed // value to a PLS. If neither succeeds, plsFieldLoad returns false for first return // value. Otherwise, the first return value will be true. // If v is successfully converted to a PLS, plsFieldLoad will then try to Load // the property p into v (by way of the PLS's Load method). // // If the field v has been flattened, the Property's name must be altered // before calling Load to reflect the field v. // For example, if our original field name was "A.B.C.D", // and at this point in iteration we had initialized the field // corresponding to "A" and have moved into the struct, so that now // v corresponds to the field named "B", then we want to let the // PLS handle this field (B)'s subfields ("C", "D"), // so we send the property to the PLS's Load, renamed to "C.D". // // If subfields are present, the field v has been flattened. func plsFieldLoad(v reflect.Value, p Property, subfields []string) (ok bool, err error) { vpls, err := plsForLoad(v) if err != nil { return false, err } if vpls == nil { return false, nil } // If Entity, load properties as well as key. if e, ok := p.Value.(*Entity); ok { err = loadEntity(vpls, e) return true, err } // If flattened, we must alter the property's name to reflect // the field v. if len(subfields) > 0 { p.Name = strings.Join(subfields, ".") } return true, vpls.Load([]Property{p}) } // setVal sets 'v' to the value of the Property 'p'. func setVal(v reflect.Value, p Property) string { pValue := p.Value switch v.Kind() { case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: x, ok := pValue.(int64) if !ok && pValue != nil { return typeMismatchReason(p, v) } if v.OverflowInt(x) { return fmt.Sprintf("value %v overflows struct field of type %v", x, v.Type()) } v.SetInt(x) case reflect.Bool: x, ok := pValue.(bool) if !ok && pValue != nil { return typeMismatchReason(p, v) } v.SetBool(x) case reflect.String: x, ok := pValue.(string) if !ok && pValue != nil { return typeMismatchReason(p, v) } v.SetString(x) case reflect.Float32, reflect.Float64: x, ok := pValue.(float64) if !ok && pValue != nil { return typeMismatchReason(p, v) } if v.OverflowFloat(x) { return fmt.Sprintf("value %v overflows struct field of type %v", x, v.Type()) } v.SetFloat(x) case reflect.Ptr: // v must be either a pointer to a Key or Entity. if v.Type() != typeOfKeyPtr && v.Type().Elem().Kind() != reflect.Struct { return typeMismatchReason(p, v) } if pValue == nil { // If v is populated already, set it to nil. if !v.IsNil() { v.Set(reflect.New(v.Type()).Elem()) } return "" } switch x := pValue.(type) { case *Key: if _, ok := v.Interface().(*Key); !ok { return typeMismatchReason(p, v) } v.Set(reflect.ValueOf(x)) case *Entity: if v.IsNil() { v.Set(reflect.New(v.Type().Elem())) } err := loadEntity(v.Interface(), x) if err != nil { return err.Error() } default: return typeMismatchReason(p, v) } case reflect.Struct: switch v.Type() { case typeOfTime: x, ok := pValue.(time.Time) if !ok && pValue != nil { return typeMismatchReason(p, v) } v.Set(reflect.ValueOf(x)) case typeOfGeoPoint: x, ok := pValue.(GeoPoint) if !ok && pValue != nil { return typeMismatchReason(p, v) } v.Set(reflect.ValueOf(x)) default: ent, ok := pValue.(*Entity) if !ok { return typeMismatchReason(p, v) } err := loadEntity(v.Addr().Interface(), ent) if err != nil { return err.Error() } } case reflect.Slice: x, ok := pValue.([]byte) if !ok && pValue != nil { return typeMismatchReason(p, v) } if v.Type().Elem().Kind() != reflect.Uint8 { return typeMismatchReason(p, v) } v.SetBytes(x) default: return typeMismatchReason(p, v) } return "" } // initField is similar to reflect's Value.FieldByIndex, in that it // returns the nested struct field corresponding to index, but it // initialises any nil pointers encountered when traversing the structure. func initField(val reflect.Value, index []int) reflect.Value { for _, i := range index[:len(index)-1] { val = val.Field(i) if val.Kind() == reflect.Ptr { if val.IsNil() { val.Set(reflect.New(val.Type().Elem())) } val = val.Elem() } } return val.Field(index[len(index)-1]) } // loadEntityProto loads an EntityProto into PropertyLoadSaver or struct pointer. func loadEntityProto(dst interface{}, src *pb.Entity) error { ent, err := protoToEntity(src) if err != nil { return err } return loadEntity(dst, ent) } func loadEntity(dst interface{}, ent *Entity) error { if pls, ok := dst.(PropertyLoadSaver); ok { err := pls.Load(ent.Properties) if err != nil { return err } if e, ok := dst.(KeyLoader); ok { err = e.LoadKey(ent.Key) } return err } return loadEntityToStruct(dst, ent) } func loadEntityToStruct(dst interface{}, ent *Entity) error { pls, err := newStructPLS(dst) if err != nil { return err } // Load properties. err = pls.Load(ent.Properties) if err != nil { return err } // Load key. keyField := pls.codec.Match(keyFieldName) if keyField != nil && ent.Key != nil { pls.v.FieldByIndex(keyField.Index).Set(reflect.ValueOf(ent.Key)) } return nil } func (s structPLS) Load(props []Property) error { var fieldName, errReason string var l propertyLoader prev := make(map[string]struct{}) for _, p := range props { if errStr := l.load(s.codec, s.v, p, prev); errStr != "" { // We don't return early, as we try to load as many properties as possible. // It is valid to load an entity into a struct that cannot fully represent it. // That case returns an error, but the caller is free to ignore it. fieldName, errReason = p.Name, errStr } } if errReason != "" { return &ErrFieldMismatch{ StructType: s.v.Type(), FieldName: fieldName, Reason: errReason, } } return nil } func protoToEntity(src *pb.Entity) (*Entity, error) { props := make([]Property, 0, len(src.Properties)) for name, val := range src.Properties { v, err := propToValue(val) if err != nil { return nil, err } props = append(props, Property{ Name: name, Value: v, NoIndex: val.ExcludeFromIndexes, }) } var key *Key if src.Key != nil { // Ignore any error, since nested entity values // are allowed to have an invalid key. key, _ = protoToKey(src.Key) } return &Entity{key, props}, nil } // propToValue returns a Go value that represents the PropertyValue. For // example, a TimestampValue becomes a time.Time. func propToValue(v *pb.Value) (interface{}, error) { switch v := v.ValueType.(type) { case *pb.Value_NullValue: return nil, nil case *pb.Value_BooleanValue: return v.BooleanValue, nil case *pb.Value_IntegerValue: return v.IntegerValue, nil case *pb.Value_DoubleValue: return v.DoubleValue, nil case *pb.Value_TimestampValue: return time.Unix(v.TimestampValue.Seconds, int64(v.TimestampValue.Nanos)), nil case *pb.Value_KeyValue: return protoToKey(v.KeyValue) case *pb.Value_StringValue: return v.StringValue, nil case *pb.Value_BlobValue: return []byte(v.BlobValue), nil case *pb.Value_GeoPointValue: return GeoPoint{Lat: v.GeoPointValue.Latitude, Lng: v.GeoPointValue.Longitude}, nil case *pb.Value_EntityValue: return protoToEntity(v.EntityValue) case *pb.Value_ArrayValue: arr := make([]interface{}, 0, len(v.ArrayValue.Values)) for _, v := range v.ArrayValue.Values { vv, err := propToValue(v) if err != nil { return nil, err } arr = append(arr, vv) } return arr, nil default: return nil, nil } } golang-google-cloud-0.9.0/datastore/load_test.go000066400000000000000000000406241312234511600216440ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package datastore import ( "reflect" "testing" pb "google.golang.org/genproto/googleapis/datastore/v1" ) type Simple struct { I int64 } type SimpleWithTag struct { I int64 `datastore:"II"` } type NestedSimpleWithTag struct { A SimpleWithTag `datastore:"AA"` } type NestedSliceOfSimple struct { A []Simple } type SimpleTwoFields struct { S string SS string } type NestedSimpleAnonymous struct { Simple X string } type NestedSimple struct { A Simple I int } type NestedSimple1 struct { A Simple X string } type NestedSimple2X struct { AA NestedSimple A SimpleTwoFields S string } type BDotB struct { B string `datastore:"B.B"` } type ABDotB struct { A BDotB } type MultiAnonymous struct { Simple SimpleTwoFields X string } func TestLoadEntityNestedLegacy(t *testing.T) { testCases := []struct { desc string src *pb.Entity want interface{} }{ { desc: "nested", src: &pb.Entity{ Key: keyToProto(testKey0), Properties: map[string]*pb.Value{ "X": {ValueType: &pb.Value_StringValue{StringValue: "two"}}, "A.I": {ValueType: &pb.Value_IntegerValue{IntegerValue: 2}}, }, }, want: &NestedSimple1{ A: Simple{I: 2}, X: "two", }, }, { desc: "nested with tag", src: &pb.Entity{ Key: keyToProto(testKey0), Properties: map[string]*pb.Value{ "AA.II": {ValueType: &pb.Value_IntegerValue{IntegerValue: 2}}, }, }, want: &NestedSimpleWithTag{ A: SimpleWithTag{I: 2}, }, }, { desc: "nested with anonymous struct field", src: &pb.Entity{ Key: keyToProto(testKey0), Properties: map[string]*pb.Value{ "X": {ValueType: &pb.Value_StringValue{StringValue: "two"}}, "I": {ValueType: &pb.Value_IntegerValue{IntegerValue: 2}}, }, }, want: &NestedSimpleAnonymous{ Simple: Simple{I: 2}, X: "two", }, }, { desc: "nested with dotted field tag", src: &pb.Entity{ Key: keyToProto(testKey0), Properties: map[string]*pb.Value{ "A.B.B": {ValueType: &pb.Value_StringValue{StringValue: "bb"}}, }, }, want: &ABDotB{ A: BDotB{ B: "bb", }, }, }, { desc: "nested with multiple anonymous fields", src: &pb.Entity{ Key: keyToProto(testKey0), Properties: map[string]*pb.Value{ "I": {ValueType: &pb.Value_IntegerValue{IntegerValue: 3}}, "S": {ValueType: &pb.Value_StringValue{StringValue: "S"}}, "SS": {ValueType: &pb.Value_StringValue{StringValue: "s"}}, "X": {ValueType: &pb.Value_StringValue{StringValue: "s"}}, }, }, want: &MultiAnonymous{ Simple: Simple{I: 3}, SimpleTwoFields: SimpleTwoFields{S: "S", SS: "s"}, X: "s", }, }, } for _, tc := range testCases { dst := reflect.New(reflect.TypeOf(tc.want).Elem()).Interface() err := loadEntityProto(dst, tc.src) if err != nil { t.Errorf("loadEntityProto: %s: %v", tc.desc, err) continue } if !reflect.DeepEqual(tc.want, dst) { t.Errorf("%s: compare:\ngot: %#v\nwant: %#v", tc.desc, dst, tc.want) } } } type WithKey struct { X string I int K *Key `datastore:"__key__"` } type NestedWithKey struct { Y string N WithKey } var ( incompleteKey = newKey("", nil) invalidKey = newKey("s", incompleteKey) ) func TestLoadEntityNested(t *testing.T) { testCases := []struct { desc string src *pb.Entity want interface{} }{ { desc: "nested basic", src: &pb.Entity{ Properties: map[string]*pb.Value{ "A": {ValueType: &pb.Value_EntityValue{ EntityValue: &pb.Entity{ Properties: map[string]*pb.Value{ "I": {ValueType: &pb.Value_IntegerValue{IntegerValue: 3}}, }, }, }}, "I": {ValueType: &pb.Value_IntegerValue{IntegerValue: 10}}, }, }, want: &NestedSimple{ A: Simple{I: 3}, I: 10, }, }, { desc: "nested with struct tags", src: &pb.Entity{ Properties: map[string]*pb.Value{ "AA": {ValueType: &pb.Value_EntityValue{ EntityValue: &pb.Entity{ Properties: map[string]*pb.Value{ "II": {ValueType: &pb.Value_IntegerValue{IntegerValue: 1}}, }, }, }}, }, }, want: &NestedSimpleWithTag{ A: SimpleWithTag{I: 1}, }, }, { desc: "nested 2x", src: &pb.Entity{ Properties: map[string]*pb.Value{ "AA": {ValueType: &pb.Value_EntityValue{ EntityValue: &pb.Entity{ Properties: map[string]*pb.Value{ "A": {ValueType: &pb.Value_EntityValue{ EntityValue: &pb.Entity{ Properties: map[string]*pb.Value{ "I": {ValueType: &pb.Value_IntegerValue{IntegerValue: 3}}, }, }, }}, "I": {ValueType: &pb.Value_IntegerValue{IntegerValue: 1}}, }, }, }}, "A": {ValueType: &pb.Value_EntityValue{ EntityValue: &pb.Entity{ Properties: map[string]*pb.Value{ "S": {ValueType: &pb.Value_StringValue{StringValue: "S"}}, "SS": {ValueType: &pb.Value_StringValue{StringValue: "s"}}, }, }, }}, "S": {ValueType: &pb.Value_StringValue{StringValue: "SS"}}, }, }, want: &NestedSimple2X{ AA: NestedSimple{ A: Simple{I: 3}, I: 1, }, A: SimpleTwoFields{S: "S", SS: "s"}, S: "SS", }, }, { desc: "nested anonymous", src: &pb.Entity{ Properties: map[string]*pb.Value{ "I": {ValueType: &pb.Value_IntegerValue{IntegerValue: 3}}, "X": {ValueType: &pb.Value_StringValue{StringValue: "SomeX"}}, }, }, want: &NestedSimpleAnonymous{ Simple: Simple{I: 3}, X: "SomeX", }, }, { desc: "nested simple with slice", src: &pb.Entity{ Properties: map[string]*pb.Value{ "A": {ValueType: &pb.Value_ArrayValue{ ArrayValue: &pb.ArrayValue{ Values: []*pb.Value{ {ValueType: &pb.Value_EntityValue{ EntityValue: &pb.Entity{ Properties: map[string]*pb.Value{ "I": {ValueType: &pb.Value_IntegerValue{IntegerValue: 3}}, }, }, }}, {ValueType: &pb.Value_EntityValue{ EntityValue: &pb.Entity{ Properties: map[string]*pb.Value{ "I": {ValueType: &pb.Value_IntegerValue{IntegerValue: 4}}, }, }, }}, }, }, }}, }, }, want: &NestedSliceOfSimple{ A: []Simple{Simple{I: 3}, Simple{I: 4}}, }, }, { desc: "nested with multiple anonymous fields", src: &pb.Entity{ Properties: map[string]*pb.Value{ "I": {ValueType: &pb.Value_IntegerValue{IntegerValue: 3}}, "S": {ValueType: &pb.Value_StringValue{StringValue: "S"}}, "SS": {ValueType: &pb.Value_StringValue{StringValue: "s"}}, "X": {ValueType: &pb.Value_StringValue{StringValue: "ss"}}, }, }, want: &MultiAnonymous{ Simple: Simple{I: 3}, SimpleTwoFields: SimpleTwoFields{S: "S", SS: "s"}, X: "ss", }, }, { desc: "nested with dotted field tag", src: &pb.Entity{ Properties: map[string]*pb.Value{ "A": {ValueType: &pb.Value_EntityValue{ EntityValue: &pb.Entity{ Properties: map[string]*pb.Value{ "B.B": {ValueType: &pb.Value_StringValue{StringValue: "bb"}}, }, }, }}, }, }, want: &ABDotB{ A: BDotB{ B: "bb", }, }, }, { desc: "nested entity with key", src: &pb.Entity{ Key: keyToProto(testKey0), Properties: map[string]*pb.Value{ "Y": {ValueType: &pb.Value_StringValue{StringValue: "yyy"}}, "N": {ValueType: &pb.Value_EntityValue{ EntityValue: &pb.Entity{ Key: keyToProto(testKey1a), Properties: map[string]*pb.Value{ "X": {ValueType: &pb.Value_StringValue{StringValue: "two"}}, "I": {ValueType: &pb.Value_IntegerValue{IntegerValue: 2}}, }, }, }}, }, }, want: &NestedWithKey{ Y: "yyy", N: WithKey{ X: "two", I: 2, K: testKey1a, }, }, }, { desc: "nested entity with invalid key", src: &pb.Entity{ Key: keyToProto(testKey0), Properties: map[string]*pb.Value{ "Y": {ValueType: &pb.Value_StringValue{StringValue: "yyy"}}, "N": {ValueType: &pb.Value_EntityValue{ EntityValue: &pb.Entity{ Key: keyToProto(invalidKey), Properties: map[string]*pb.Value{ "X": {ValueType: &pb.Value_StringValue{StringValue: "two"}}, "I": {ValueType: &pb.Value_IntegerValue{IntegerValue: 2}}, }, }, }}, }, }, want: &NestedWithKey{ Y: "yyy", N: WithKey{ X: "two", I: 2, K: invalidKey, }, }, }, } for _, tc := range testCases { dst := reflect.New(reflect.TypeOf(tc.want).Elem()).Interface() err := loadEntityProto(dst, tc.src) if err != nil { t.Errorf("loadEntityProto: %s: %v", tc.desc, err) continue } if !reflect.DeepEqual(tc.want, dst) { t.Errorf("%s: compare:\ngot: %#v\nwant: %#v", tc.desc, dst, tc.want) } } } type NestedStructPtrs struct { *SimpleTwoFields Nest *SimpleTwoFields TwiceNest *NestedSimple2 I int } type NestedSimple2 struct { A *Simple I int } func TestAlreadyPopulatedDst(t *testing.T) { testCases := []struct { desc string src *pb.Entity dst interface{} want interface{} }{ { desc: "simple already populated, nil properties", src: &pb.Entity{ Key: keyToProto(testKey0), Properties: map[string]*pb.Value{ "I": {ValueType: &pb.Value_NullValue{}}, }, }, dst: &Simple{ I: 12, }, want: &Simple{}, }, { desc: "nested structs already populated", src: &pb.Entity{ Key: keyToProto(testKey0), Properties: map[string]*pb.Value{ "SS": {ValueType: &pb.Value_StringValue{StringValue: "world"}}, }, }, dst: &SimpleTwoFields{S: "hello" /* SS: "" */}, want: &SimpleTwoFields{S: "hello", SS: "world"}, }, { desc: "nested structs already populated, pValues nil", src: &pb.Entity{ Key: keyToProto(testKey0), Properties: map[string]*pb.Value{ "S": {ValueType: &pb.Value_NullValue{}}, "SS": {ValueType: &pb.Value_StringValue{StringValue: "ss hello"}}, "Nest": {ValueType: &pb.Value_NullValue{}}, "TwiceNest": {ValueType: &pb.Value_EntityValue{ EntityValue: &pb.Entity{ Properties: map[string]*pb.Value{ "A": {ValueType: &pb.Value_NullValue{}}, "I": {ValueType: &pb.Value_IntegerValue{IntegerValue: 2}}, }, }, }}, "I": {ValueType: &pb.Value_IntegerValue{IntegerValue: 5}}, }, }, dst: &NestedStructPtrs{ &SimpleTwoFields{S: "hello" /* SS: "" */}, &SimpleTwoFields{ /* S: "" */ SS: "twice hello"}, &NestedSimple2{ A: &Simple{I: 2}, /* I: 0 */ }, 0, }, want: &NestedStructPtrs{ &SimpleTwoFields{ /* S: "" */ SS: "ss hello"}, nil, &NestedSimple2{ /* A: nil, */ I: 2, }, 5, }, }, } for _, tc := range testCases { err := loadEntityProto(tc.dst, tc.src) if err != nil { t.Errorf("loadEntityProto: %s: %v", tc.desc, err) continue } if !reflect.DeepEqual(tc.want, tc.dst) { t.Errorf("%s: compare:\ngot: %#v\nwant: %#v", tc.desc, tc.dst, tc.want) } } } type PLS0 struct { A string } func (p *PLS0) Load(props []Property) error { for _, pp := range props { if pp.Name == "A" { p.A = pp.Value.(string) } } return nil } func (p *PLS0) Save() (props []Property, err error) { return []Property{{Name: "A", Value: p.A}}, nil } type KeyLoader1 struct { A string K *Key } func (kl *KeyLoader1) Load(props []Property) error { for _, pp := range props { if pp.Name == "A" { kl.A = pp.Value.(string) } } return nil } func (kl *KeyLoader1) Save() (props []Property, err error) { return []Property{{Name: "A", Value: kl.A}}, nil } func (kl *KeyLoader1) LoadKey(k *Key) error { kl.K = k return nil } type KeyLoader2 struct { B int Key *Key } func (kl *KeyLoader2) Load(props []Property) error { for _, pp := range props { if pp.Name == "B" { kl.B = int(pp.Value.(int64)) } } return nil } func (kl *KeyLoader2) Save() (props []Property, err error) { return []Property{{Name: "B", Value: int64(kl.B)}}, nil } func (kl *KeyLoader2) LoadKey(k *Key) error { kl.Key = k return nil } type KeyLoader3 struct { C bool K *Key } func (kl *KeyLoader3) Load(props []Property) error { for _, pp := range props { if pp.Name == "C" { kl.C = pp.Value.(bool) } } return nil } func (kl *KeyLoader3) Save() (props []Property, err error) { return []Property{{Name: "C", Value: kl.C}}, nil } func (kl *KeyLoader3) LoadKey(k *Key) error { kl.K = k return nil } type KeyLoader4 struct { PLS0 K *Key } func (kl *KeyLoader4) LoadKey(k *Key) error { kl.K = k return nil } type NotKeyLoader struct { A string K *Key } func (p *NotKeyLoader) Load(props []Property) error { for _, pp := range props { if pp.Name == "A" { p.A = pp.Value.(string) } } return nil } func (p *NotKeyLoader) Save() (props []Property, err error) { return []Property{{Name: "A", Value: p.A}}, nil } type NestedKeyLoaders struct { Two *KeyLoader2 Three []*KeyLoader3 Four *KeyLoader4 PLS *NotKeyLoader } func TestKeyLoader(t *testing.T) { testCases := []struct { desc string src *pb.Entity dst interface{} want interface{} }{ { desc: "simple key loader", src: &pb.Entity{ Key: keyToProto(testKey0), Properties: map[string]*pb.Value{ "A": {ValueType: &pb.Value_StringValue{StringValue: "hello"}}, }, }, dst: &KeyLoader1{}, want: &KeyLoader1{ A: "hello", K: testKey0, }, }, { desc: "embedded PLS key loader", src: &pb.Entity{ Key: keyToProto(testKey0), Properties: map[string]*pb.Value{ "A": {ValueType: &pb.Value_StringValue{StringValue: "hello"}}, }, }, dst: &KeyLoader4{}, want: &KeyLoader4{ PLS0: PLS0{A: "hello"}, K: testKey0, }, }, { desc: "nested key loaders", src: &pb.Entity{ Key: keyToProto(testKey0), Properties: map[string]*pb.Value{ "Two": {ValueType: &pb.Value_EntityValue{ EntityValue: &pb.Entity{ Properties: map[string]*pb.Value{ "B": {ValueType: &pb.Value_IntegerValue{IntegerValue: 12}}, }, Key: keyToProto(testKey1a), }, }}, "Three": {ValueType: &pb.Value_ArrayValue{ ArrayValue: &pb.ArrayValue{ Values: []*pb.Value{ {ValueType: &pb.Value_EntityValue{ EntityValue: &pb.Entity{ Properties: map[string]*pb.Value{ "C": {ValueType: &pb.Value_BooleanValue{BooleanValue: true}}, }, Key: keyToProto(testKey1b), }, }}, {ValueType: &pb.Value_EntityValue{ EntityValue: &pb.Entity{ Properties: map[string]*pb.Value{ "C": {ValueType: &pb.Value_BooleanValue{BooleanValue: false}}, }, Key: keyToProto(testKey0), }, }}, }, }, }}, "Four": {ValueType: &pb.Value_EntityValue{ EntityValue: &pb.Entity{ Properties: map[string]*pb.Value{ "A": {ValueType: &pb.Value_StringValue{StringValue: "testing"}}, }, Key: keyToProto(testKey2a), }, }}, "PLS": {ValueType: &pb.Value_EntityValue{ EntityValue: &pb.Entity{ Properties: map[string]*pb.Value{ "A": {ValueType: &pb.Value_StringValue{StringValue: "something"}}, }, Key: keyToProto(testKey1a), }, }}, }, }, dst: &NestedKeyLoaders{}, want: &NestedKeyLoaders{ Two: &KeyLoader2{B: 12, Key: testKey1a}, Three: []*KeyLoader3{ { C: true, K: testKey1b, }, { C: false, K: testKey0, }, }, Four: &KeyLoader4{ PLS0: PLS0{A: "testing"}, K: testKey2a, }, PLS: &NotKeyLoader{A: "something"}, }, }, } for _, tc := range testCases { err := loadEntityProto(tc.dst, tc.src) if err != nil { t.Errorf("loadEntityProto: %s: %v", tc.desc, err) continue } if !reflect.DeepEqual(tc.want, tc.dst) { t.Errorf("%s: compare:\ngot: %+v\nwant: %+v", tc.desc, tc.dst, tc.want) } } } golang-google-cloud-0.9.0/datastore/prop.go000066400000000000000000000231561312234511600206470ustar00rootroot00000000000000// Copyright 2014 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package datastore import ( "fmt" "reflect" "strings" "unicode" "cloud.google.com/go/internal/fields" ) // Entities with more than this many indexed properties will not be saved. const maxIndexedProperties = 20000 // []byte fields more than 1 megabyte long will not be loaded or saved. const maxBlobLen = 1 << 20 // Property is a name/value pair plus some metadata. A datastore entity's // contents are loaded and saved as a sequence of Properties. Each property // name must be unique within an entity. type Property struct { // Name is the property name. Name string // Value is the property value. The valid types are: // - int64 // - bool // - string // - float64 // - *Key // - time.Time // - GeoPoint // - []byte (up to 1 megabyte in length) // - *Entity (representing a nested struct) // Value can also be: // - []interface{} where each element is one of the above types // This set is smaller than the set of valid struct field types that the // datastore can load and save. A Value's type must be explicitly on // the list above; it is not sufficient for the underlying type to be // on that list. For example, a Value of "type myInt64 int64" is // invalid. Smaller-width integers and floats are also invalid. Again, // this is more restrictive than the set of valid struct field types. // // A Value will have an opaque type when loading entities from an index, // such as via a projection query. Load entities into a struct instead // of a PropertyLoadSaver when using a projection query. // // A Value may also be the nil interface value; this is equivalent to // Python's None but not directly representable by a Go struct. Loading // a nil-valued property into a struct will set that field to the zero // value. Value interface{} // NoIndex is whether the datastore cannot index this property. // If NoIndex is set to false, []byte and string values are limited to // 1500 bytes. NoIndex bool } // An Entity is the value type for a nested struct. // This type is only used for a Property's Value. type Entity struct { Key *Key Properties []Property } // PropertyLoadSaver can be converted from and to a slice of Properties. type PropertyLoadSaver interface { Load([]Property) error Save() ([]Property, error) } // KeyLoader can store a Key. type KeyLoader interface { // PropertyLoadSaver is embedded because a KeyLoader // must also always implement PropertyLoadSaver. PropertyLoadSaver LoadKey(k *Key) error } // PropertyList converts a []Property to implement PropertyLoadSaver. type PropertyList []Property var ( typeOfPropertyLoadSaver = reflect.TypeOf((*PropertyLoadSaver)(nil)).Elem() typeOfPropertyList = reflect.TypeOf(PropertyList(nil)) ) // Load loads all of the provided properties into l. // It does not first reset *l to an empty slice. func (l *PropertyList) Load(p []Property) error { *l = append(*l, p...) return nil } // Save saves all of l's properties as a slice of Properties. func (l *PropertyList) Save() ([]Property, error) { return *l, nil } // validPropertyName returns whether name consists of one or more valid Go // identifiers joined by ".". func validPropertyName(name string) bool { if name == "" { return false } for _, s := range strings.Split(name, ".") { if s == "" { return false } first := true for _, c := range s { if first { first = false if c != '_' && !unicode.IsLetter(c) { return false } } else { if c != '_' && !unicode.IsLetter(c) && !unicode.IsDigit(c) { return false } } } } return true } // parseTag interprets datastore struct field tags func parseTag(t reflect.StructTag) (name string, keep bool, other interface{}, err error) { s := t.Get("datastore") parts := strings.Split(s, ",") if parts[0] == "-" && len(parts) == 1 { return "", false, nil, nil } if parts[0] != "" && !validPropertyName(parts[0]) { err = fmt.Errorf("datastore: struct tag has invalid property name: %q", parts[0]) return "", false, nil, err } var opts saveOpts if len(parts) > 1 { for _, p := range parts[1:] { switch p { case "flatten": opts.flatten = true case "omitempty": opts.omitEmpty = true case "noindex": opts.noIndex = true default: err = fmt.Errorf("datastore: struct tag has invalid option: %q", p) return "", false, nil, err } } other = opts } return parts[0], true, other, nil } func validateType(t reflect.Type) error { if t.Kind() != reflect.Struct { return fmt.Errorf("datastore: validate called with non-struct type %s", t) } return validateChildType(t, "", false, false, map[reflect.Type]bool{}) } // validateChildType is a recursion helper func for validateType func validateChildType(t reflect.Type, fieldName string, flatten, prevSlice bool, prevTypes map[reflect.Type]bool) error { if prevTypes[t] { return nil } prevTypes[t] = true switch t.Kind() { case reflect.Slice: if flatten && prevSlice { return fmt.Errorf("datastore: flattening nested structs leads to a slice of slices: field %q", fieldName) } return validateChildType(t.Elem(), fieldName, flatten, true, prevTypes) case reflect.Struct: if t == typeOfTime || t == typeOfGeoPoint { return nil } for i := 0; i < t.NumField(); i++ { f := t.Field(i) // If a named field is unexported, ignore it. An anonymous // unexported field is processed, because it may contain // exported fields, which are visible. exported := (f.PkgPath == "") if !exported && !f.Anonymous { continue } _, keep, other, err := parseTag(f.Tag) // Handle error from parseTag now instead of later (in cache.Fields call). if err != nil { return err } if !keep { continue } if other != nil { opts := other.(saveOpts) flatten = flatten || opts.flatten } if err := validateChildType(f.Type, f.Name, flatten, prevSlice, prevTypes); err != nil { return err } } case reflect.Ptr: if t == typeOfKeyPtr { return nil } return validateChildType(t.Elem(), fieldName, flatten, prevSlice, prevTypes) } return nil } // isLeafType determines whether or not a type is a 'leaf type' // and should not be recursed into, but considered one field. func isLeafType(t reflect.Type) bool { return t == typeOfTime || t == typeOfGeoPoint } // structCache collects the structs whose fields have already been calculated. var structCache = fields.NewCache(parseTag, validateType, isLeafType) // structPLS adapts a struct to be a PropertyLoadSaver. type structPLS struct { v reflect.Value codec fields.List } // newStructPLS returns a structPLS, which implements the // PropertyLoadSaver interface, for the struct pointer p. func newStructPLS(p interface{}) (*structPLS, error) { v := reflect.ValueOf(p) if v.Kind() != reflect.Ptr || v.Elem().Kind() != reflect.Struct { return nil, ErrInvalidEntityType } v = v.Elem() f, err := structCache.Fields(v.Type()) if err != nil { return nil, err } return &structPLS{v, f}, nil } // LoadStruct loads the properties from p to dst. // dst must be a struct pointer. // // The values of dst's unmatched struct fields are not modified, // and matching slice-typed fields are not reset before appending to // them. In particular, it is recommended to pass a pointer to a zero // valued struct on each LoadStruct call. func LoadStruct(dst interface{}, p []Property) error { x, err := newStructPLS(dst) if err != nil { return err } return x.Load(p) } // SaveStruct returns the properties from src as a slice of Properties. // src must be a struct pointer. func SaveStruct(src interface{}) ([]Property, error) { x, err := newStructPLS(src) if err != nil { return nil, err } return x.Save() } // plsForLoad tries to convert v to a PropertyLoadSaver. // If successful, plsForLoad returns a settable v as a PropertyLoadSaver. // // plsForLoad is intended to be used with nested struct fields which // may implement PropertyLoadSaver. // // v must be settable. func plsForLoad(v reflect.Value) (PropertyLoadSaver, error) { var nilPtr bool if v.Kind() == reflect.Ptr && v.IsNil() { nilPtr = true v.Set(reflect.New(v.Type().Elem())) } vpls, err := pls(v) if nilPtr && (vpls == nil || err != nil) { // unset v v.Set(reflect.Zero(v.Type())) } return vpls, err } // plsForSave tries to convert v to a PropertyLoadSaver. // If successful, plsForSave returns v as a PropertyLoadSaver. // // plsForSave is intended to be used with nested struct fields which // may implement PropertyLoadSaver. // // v must be settable. func plsForSave(v reflect.Value) (PropertyLoadSaver, error) { switch v.Kind() { case reflect.Ptr, reflect.Slice, reflect.Map, reflect.Interface, reflect.Chan, reflect.Func: // If v is nil, return early. v contains no data to save. if v.IsNil() { return nil, nil } } return pls(v) } func pls(v reflect.Value) (PropertyLoadSaver, error) { if v.Kind() != reflect.Ptr { if _, ok := v.Interface().(PropertyLoadSaver); ok { return nil, fmt.Errorf("datastore: PropertyLoadSaver methods must be implemented on a pointer to %T.", v.Interface()) } v = v.Addr() } vpls, _ := v.Interface().(PropertyLoadSaver) return vpls, nil } golang-google-cloud-0.9.0/datastore/query.go000066400000000000000000000541531312234511600210350ustar00rootroot00000000000000// Copyright 2014 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package datastore import ( "encoding/base64" "errors" "fmt" "math" "reflect" "strconv" "strings" wrapperspb "github.com/golang/protobuf/ptypes/wrappers" "golang.org/x/net/context" "google.golang.org/api/iterator" pb "google.golang.org/genproto/googleapis/datastore/v1" ) type operator int const ( lessThan operator = iota + 1 lessEq equal greaterEq greaterThan keyFieldName = "__key__" ) var operatorToProto = map[operator]pb.PropertyFilter_Operator{ lessThan: pb.PropertyFilter_LESS_THAN, lessEq: pb.PropertyFilter_LESS_THAN_OR_EQUAL, equal: pb.PropertyFilter_EQUAL, greaterEq: pb.PropertyFilter_GREATER_THAN_OR_EQUAL, greaterThan: pb.PropertyFilter_GREATER_THAN, } // filter is a conditional filter on query results. type filter struct { FieldName string Op operator Value interface{} } type sortDirection bool const ( ascending sortDirection = false descending sortDirection = true ) var sortDirectionToProto = map[sortDirection]pb.PropertyOrder_Direction{ ascending: pb.PropertyOrder_ASCENDING, descending: pb.PropertyOrder_DESCENDING, } // order is a sort order on query results. type order struct { FieldName string Direction sortDirection } // NewQuery creates a new Query for a specific entity kind. // // An empty kind means to return all entities, including entities created and // managed by other App Engine features, and is called a kindless query. // Kindless queries cannot include filters or sort orders on property values. func NewQuery(kind string) *Query { return &Query{ kind: kind, limit: -1, } } // Query represents a datastore query. type Query struct { kind string ancestor *Key filter []filter order []order projection []string distinct bool distinctOn []string keysOnly bool eventual bool limit int32 offset int32 start []byte end []byte namespace string trans *Transaction err error } func (q *Query) clone() *Query { x := *q // Copy the contents of the slice-typed fields to a new backing store. if len(q.filter) > 0 { x.filter = make([]filter, len(q.filter)) copy(x.filter, q.filter) } if len(q.order) > 0 { x.order = make([]order, len(q.order)) copy(x.order, q.order) } return &x } // Ancestor returns a derivative query with an ancestor filter. // The ancestor should not be nil. func (q *Query) Ancestor(ancestor *Key) *Query { q = q.clone() if ancestor == nil { q.err = errors.New("datastore: nil query ancestor") return q } q.ancestor = ancestor return q } // EventualConsistency returns a derivative query that returns eventually // consistent results. // It only has an effect on ancestor queries. func (q *Query) EventualConsistency() *Query { q = q.clone() q.eventual = true return q } // Namespace returns a derivative query that is associated with the given // namespace. // // A namespace may be used to partition data for multi-tenant applications. // For details, see https://cloud.google.com/datastore/docs/concepts/multitenancy. func (q *Query) Namespace(ns string) *Query { q = q.clone() q.namespace = ns return q } // Transaction returns a derivative query that is associated with the given // transaction. // // All reads performed as part of the transaction will come from a single // consistent snapshot. Furthermore, if the transaction is set to a // serializable isolation level, another transaction cannot concurrently modify // the data that is read or modified by this transaction. func (q *Query) Transaction(t *Transaction) *Query { q = q.clone() q.trans = t return q } // Filter returns a derivative query with a field-based filter. // The filterStr argument must be a field name followed by optional space, // followed by an operator, one of ">", "<", ">=", "<=", or "=". // Fields are compared against the provided value using the operator. // Multiple filters are AND'ed together. // Field names which contain spaces, quote marks, or operator characters // should be passed as quoted Go string literals as returned by strconv.Quote // or the fmt package's %q verb. func (q *Query) Filter(filterStr string, value interface{}) *Query { q = q.clone() filterStr = strings.TrimSpace(filterStr) if filterStr == "" { q.err = fmt.Errorf("datastore: invalid filter %q", filterStr) return q } f := filter{ FieldName: strings.TrimRight(filterStr, " ><=!"), Value: value, } switch op := strings.TrimSpace(filterStr[len(f.FieldName):]); op { case "<=": f.Op = lessEq case ">=": f.Op = greaterEq case "<": f.Op = lessThan case ">": f.Op = greaterThan case "=": f.Op = equal default: q.err = fmt.Errorf("datastore: invalid operator %q in filter %q", op, filterStr) return q } var err error f.FieldName, err = unquote(f.FieldName) if err != nil { q.err = fmt.Errorf("datastore: invalid syntax for quoted field name %q", f.FieldName) return q } q.filter = append(q.filter, f) return q } // Order returns a derivative query with a field-based sort order. Orders are // applied in the order they are added. The default order is ascending; to sort // in descending order prefix the fieldName with a minus sign (-). // Field names which contain spaces, quote marks, or the minus sign // should be passed as quoted Go string literals as returned by strconv.Quote // or the fmt package's %q verb. func (q *Query) Order(fieldName string) *Query { q = q.clone() fieldName, dir := strings.TrimSpace(fieldName), ascending if strings.HasPrefix(fieldName, "-") { fieldName, dir = strings.TrimSpace(fieldName[1:]), descending } else if strings.HasPrefix(fieldName, "+") { q.err = fmt.Errorf("datastore: invalid order: %q", fieldName) return q } fieldName, err := unquote(fieldName) if err != nil { q.err = fmt.Errorf("datastore: invalid syntax for quoted field name %q", fieldName) return q } if fieldName == "" { q.err = errors.New("datastore: empty order") return q } q.order = append(q.order, order{ Direction: dir, FieldName: fieldName, }) return q } // unquote optionally interprets s as a double-quoted or backquoted Go // string literal if it begins with the relevant character. func unquote(s string) (string, error) { if s == "" || (s[0] != '`' && s[0] != '"') { return s, nil } return strconv.Unquote(s) } // Project returns a derivative query that yields only the given fields. It // cannot be used with KeysOnly. func (q *Query) Project(fieldNames ...string) *Query { q = q.clone() q.projection = append([]string(nil), fieldNames...) return q } // Distinct returns a derivative query that yields de-duplicated entities with // respect to the set of projected fields. It is only used for projection // queries. Distinct cannot be used with DistinctOn. func (q *Query) Distinct() *Query { q = q.clone() q.distinct = true return q } // DistinctOn returns a derivative query that yields de-duplicated entities with // respect to the set of the specified fields. It is only used for projection // queries. The field list should be a subset of the projected field list. // DistinctOn cannot be used with Distinct. func (q *Query) DistinctOn(fieldNames ...string) *Query { q = q.clone() q.distinctOn = fieldNames return q } // KeysOnly returns a derivative query that yields only keys, not keys and // entities. It cannot be used with projection queries. func (q *Query) KeysOnly() *Query { q = q.clone() q.keysOnly = true return q } // Limit returns a derivative query that has a limit on the number of results // returned. A negative value means unlimited. func (q *Query) Limit(limit int) *Query { q = q.clone() if limit < math.MinInt32 || limit > math.MaxInt32 { q.err = errors.New("datastore: query limit overflow") return q } q.limit = int32(limit) return q } // Offset returns a derivative query that has an offset of how many keys to // skip over before returning results. A negative value is invalid. func (q *Query) Offset(offset int) *Query { q = q.clone() if offset < 0 { q.err = errors.New("datastore: negative query offset") return q } if offset > math.MaxInt32 { q.err = errors.New("datastore: query offset overflow") return q } q.offset = int32(offset) return q } // Start returns a derivative query with the given start point. func (q *Query) Start(c Cursor) *Query { q = q.clone() q.start = c.cc return q } // End returns a derivative query with the given end point. func (q *Query) End(c Cursor) *Query { q = q.clone() q.end = c.cc return q } // toProto converts the query to a protocol buffer. func (q *Query) toProto(req *pb.RunQueryRequest) error { if len(q.projection) != 0 && q.keysOnly { return errors.New("datastore: query cannot both project and be keys-only") } if len(q.distinctOn) != 0 && q.distinct { return errors.New("datastore: query cannot be both distinct and distinct-on") } dst := &pb.Query{} if q.kind != "" { dst.Kind = []*pb.KindExpression{{Name: q.kind}} } if q.projection != nil { for _, propertyName := range q.projection { dst.Projection = append(dst.Projection, &pb.Projection{Property: &pb.PropertyReference{Name: propertyName}}) } for _, propertyName := range q.distinctOn { dst.DistinctOn = append(dst.DistinctOn, &pb.PropertyReference{Name: propertyName}) } if q.distinct { for _, propertyName := range q.projection { dst.DistinctOn = append(dst.DistinctOn, &pb.PropertyReference{Name: propertyName}) } } } if q.keysOnly { dst.Projection = []*pb.Projection{{Property: &pb.PropertyReference{Name: keyFieldName}}} } var filters []*pb.Filter for _, qf := range q.filter { if qf.FieldName == "" { return errors.New("datastore: empty query filter field name") } v, err := interfaceToProto(reflect.ValueOf(qf.Value).Interface(), false) if err != nil { return fmt.Errorf("datastore: bad query filter value type: %v", err) } op, ok := operatorToProto[qf.Op] if !ok { return errors.New("datastore: unknown query filter operator") } xf := &pb.PropertyFilter{ Op: op, Property: &pb.PropertyReference{Name: qf.FieldName}, Value: v, } filters = append(filters, &pb.Filter{ FilterType: &pb.Filter_PropertyFilter{PropertyFilter: xf}, }) } if q.ancestor != nil { filters = append(filters, &pb.Filter{ FilterType: &pb.Filter_PropertyFilter{PropertyFilter: &pb.PropertyFilter{ Property: &pb.PropertyReference{Name: keyFieldName}, Op: pb.PropertyFilter_HAS_ANCESTOR, Value: &pb.Value{ValueType: &pb.Value_KeyValue{KeyValue: keyToProto(q.ancestor)}}, }}}) } if len(filters) == 1 { dst.Filter = filters[0] } else if len(filters) > 1 { dst.Filter = &pb.Filter{FilterType: &pb.Filter_CompositeFilter{CompositeFilter: &pb.CompositeFilter{ Op: pb.CompositeFilter_AND, Filters: filters, }}} } for _, qo := range q.order { if qo.FieldName == "" { return errors.New("datastore: empty query order field name") } xo := &pb.PropertyOrder{ Property: &pb.PropertyReference{Name: qo.FieldName}, Direction: sortDirectionToProto[qo.Direction], } dst.Order = append(dst.Order, xo) } if q.limit >= 0 { dst.Limit = &wrapperspb.Int32Value{Value: q.limit} } dst.Offset = q.offset dst.StartCursor = q.start dst.EndCursor = q.end if t := q.trans; t != nil { if t.id == nil { return errExpiredTransaction } if q.eventual { return errors.New("datastore: cannot use EventualConsistency query in a transaction") } req.ReadOptions = &pb.ReadOptions{ ConsistencyType: &pb.ReadOptions_Transaction{Transaction: t.id}, } } if q.eventual { req.ReadOptions = &pb.ReadOptions{ConsistencyType: &pb.ReadOptions_ReadConsistency_{ReadConsistency: pb.ReadOptions_EVENTUAL}} } req.QueryType = &pb.RunQueryRequest_Query{Query: dst} return nil } // Count returns the number of results for the given query. // // The running time and number of API calls made by Count scale linearly with // with the sum of the query's offset and limit. Unless the result count is // expected to be small, it is best to specify a limit; otherwise Count will // continue until it finishes counting or the provided context expires. func (c *Client) Count(ctx context.Context, q *Query) (int, error) { // Check that the query is well-formed. if q.err != nil { return 0, q.err } // Create a copy of the query, with keysOnly true (if we're not a projection, // since the two are incompatible). newQ := q.clone() newQ.keysOnly = len(newQ.projection) == 0 // Create an iterator and use it to walk through the batches of results // directly. it := c.Run(ctx, newQ) n := 0 for { err := it.nextBatch() if err == iterator.Done { return n, nil } if err != nil { return 0, err } n += len(it.results) } } // GetAll runs the provided query in the given context and returns all keys // that match that query, as well as appending the values to dst. // // dst must have type *[]S or *[]*S or *[]P, for some struct type S or some non- // interface, non-pointer type P such that P or *P implements PropertyLoadSaver. // // As a special case, *PropertyList is an invalid type for dst, even though a // PropertyList is a slice of structs. It is treated as invalid to avoid being // mistakenly passed when *[]PropertyList was intended. // // The keys returned by GetAll will be in a 1-1 correspondence with the entities // added to dst. // // If q is a ``keys-only'' query, GetAll ignores dst and only returns the keys. // // The running time and number of API calls made by GetAll scale linearly with // with the sum of the query's offset and limit. Unless the result count is // expected to be small, it is best to specify a limit; otherwise GetAll will // continue until it finishes collecting results or the provided context // expires. func (c *Client) GetAll(ctx context.Context, q *Query, dst interface{}) ([]*Key, error) { var ( dv reflect.Value mat multiArgType elemType reflect.Type errFieldMismatch error ) if !q.keysOnly { dv = reflect.ValueOf(dst) if dv.Kind() != reflect.Ptr || dv.IsNil() { return nil, ErrInvalidEntityType } dv = dv.Elem() mat, elemType = checkMultiArg(dv) if mat == multiArgTypeInvalid || mat == multiArgTypeInterface { return nil, ErrInvalidEntityType } } var keys []*Key for t := c.Run(ctx, q); ; { k, e, err := t.next() if err == iterator.Done { break } if err != nil { return keys, err } if !q.keysOnly { ev := reflect.New(elemType) if elemType.Kind() == reflect.Map { // This is a special case. The zero values of a map type are // not immediately useful; they have to be make'd. // // Funcs and channels are similar, in that a zero value is not useful, // but even a freshly make'd channel isn't useful: there's no fixed // channel buffer size that is always going to be large enough, and // there's no goroutine to drain the other end. Theoretically, these // types could be supported, for example by sniffing for a constructor // method or requiring prior registration, but for now it's not a // frequent enough concern to be worth it. Programmers can work around // it by explicitly using Iterator.Next instead of the Query.GetAll // convenience method. x := reflect.MakeMap(elemType) ev.Elem().Set(x) } if err = loadEntityProto(ev.Interface(), e); err != nil { if _, ok := err.(*ErrFieldMismatch); ok { // We continue loading entities even in the face of field mismatch errors. // If we encounter any other error, that other error is returned. Otherwise, // an ErrFieldMismatch is returned. errFieldMismatch = err } else { return keys, err } } if mat != multiArgTypeStructPtr { ev = ev.Elem() } dv.Set(reflect.Append(dv, ev)) } keys = append(keys, k) } return keys, errFieldMismatch } // Run runs the given query in the given context. func (c *Client) Run(ctx context.Context, q *Query) *Iterator { if q.err != nil { return &Iterator{err: q.err} } t := &Iterator{ ctx: ctx, client: c, limit: q.limit, offset: q.offset, keysOnly: q.keysOnly, pageCursor: q.start, entityCursor: q.start, req: &pb.RunQueryRequest{ ProjectId: c.dataset, }, } if q.namespace != "" { t.req.PartitionId = &pb.PartitionId{ NamespaceId: q.namespace, } } if err := q.toProto(t.req); err != nil { t.err = err } return t } // Iterator is the result of running a query. type Iterator struct { ctx context.Context client *Client err error // results is the list of EntityResults still to be iterated over from the // most recent API call. It will be nil if no requests have yet been issued. results []*pb.EntityResult // req is the request to send. It may be modified and used multiple times. req *pb.RunQueryRequest // limit is the limit on the number of results this iterator should return. // The zero value is used to prevent further fetches from the server. // A negative value means unlimited. limit int32 // offset is the number of results that still need to be skipped. offset int32 // keysOnly records whether the query was keys-only (skip entity loading). keysOnly bool // pageCursor is the compiled cursor for the next batch/page of result. // TODO(djd): Can we delete this in favour of paging with the last // entityCursor from each batch? pageCursor []byte // entityCursor is the compiled cursor of the next result. entityCursor []byte } // Next returns the key of the next result. When there are no more results, // iterator.Done is returned as the error. // // If the query is not keys only and dst is non-nil, it also loads the entity // stored for that key into the struct pointer or PropertyLoadSaver dst, with // the same semantics and possible errors as for the Get function. func (t *Iterator) Next(dst interface{}) (*Key, error) { k, e, err := t.next() if err != nil { return nil, err } if dst != nil && !t.keysOnly { err = loadEntityProto(dst, e) } return k, err } func (t *Iterator) next() (*Key, *pb.Entity, error) { // Fetch additional batches while there are no more results. for t.err == nil && len(t.results) == 0 { t.err = t.nextBatch() } if t.err != nil { return nil, nil, t.err } // Extract the next result, update cursors, and parse the entity's key. e := t.results[0] t.results = t.results[1:] t.entityCursor = e.Cursor if len(t.results) == 0 { t.entityCursor = t.pageCursor // At the end of the batch. } if e.Entity.Key == nil { return nil, nil, errors.New("datastore: internal error: server did not return a key") } k, err := protoToKey(e.Entity.Key) if err != nil || k.Incomplete() { return nil, nil, errors.New("datastore: internal error: server returned an invalid key") } return k, e.Entity, nil } // nextBatch makes a single call to the server for a batch of results. func (t *Iterator) nextBatch() error { if t.limit == 0 { return iterator.Done // Short-circuits the zero-item response. } // Adjust the query with the latest start cursor, limit and offset. q := t.req.GetQuery() q.StartCursor = t.pageCursor q.Offset = t.offset if t.limit >= 0 { q.Limit = &wrapperspb.Int32Value{Value: t.limit} } else { q.Limit = nil } // Run the query. resp, err := t.client.client.RunQuery(t.ctx, t.req) if err != nil { return err } // Adjust any offset from skipped results. skip := resp.Batch.SkippedResults if skip < 0 { return errors.New("datastore: internal error: negative number of skipped_results") } t.offset -= skip if t.offset < 0 { return errors.New("datastore: internal error: query skipped too many results") } if t.offset > 0 && len(resp.Batch.EntityResults) > 0 { return errors.New("datastore: internal error: query returned results before requested offset") } // Adjust the limit. if t.limit >= 0 { t.limit -= int32(len(resp.Batch.EntityResults)) if t.limit < 0 { return errors.New("datastore: internal error: query returned more results than the limit") } } // If there are no more results available, set limit to zero to prevent // further fetches. Otherwise, check that there is a next page cursor available. if resp.Batch.MoreResults != pb.QueryResultBatch_NOT_FINISHED { t.limit = 0 } else if resp.Batch.EndCursor == nil { return errors.New("datastore: internal error: server did not return a cursor") } // Update cursors. // If any results were skipped, use the SkippedCursor as the next entity cursor. if skip > 0 { t.entityCursor = resp.Batch.SkippedCursor } else { t.entityCursor = q.StartCursor } t.pageCursor = resp.Batch.EndCursor t.results = resp.Batch.EntityResults return nil } // Cursor returns a cursor for the iterator's current location. func (t *Iterator) Cursor() (Cursor, error) { // If there is still an offset, we need to the skip those results first. for t.err == nil && t.offset > 0 { t.err = t.nextBatch() } if t.err != nil && t.err != iterator.Done { return Cursor{}, t.err } return Cursor{t.entityCursor}, nil } // Cursor is an iterator's position. It can be converted to and from an opaque // string. A cursor can be used from different HTTP requests, but only with a // query with the same kind, ancestor, filter and order constraints. // // The zero Cursor can be used to indicate that there is no start and/or end // constraint for a query. type Cursor struct { cc []byte } // String returns a base-64 string representation of a cursor. func (c Cursor) String() string { if c.cc == nil { return "" } return strings.TrimRight(base64.URLEncoding.EncodeToString(c.cc), "=") } // Decode decodes a cursor from its base-64 string representation. func DecodeCursor(s string) (Cursor, error) { if s == "" { return Cursor{}, nil } if n := len(s) % 4; n != 0 { s += strings.Repeat("=", 4-n) } b, err := base64.URLEncoding.DecodeString(s) if err != nil { return Cursor{}, err } return Cursor{b}, nil } golang-google-cloud-0.9.0/datastore/query_test.go000066400000000000000000000331771312234511600220770ustar00rootroot00000000000000// Copyright 2014 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package datastore import ( "errors" "fmt" "reflect" "sort" "testing" "github.com/golang/protobuf/proto" "golang.org/x/net/context" pb "google.golang.org/genproto/googleapis/datastore/v1" "google.golang.org/grpc" ) var ( key1 = &pb.Key{ Path: []*pb.Key_PathElement{ { Kind: "Gopher", IdType: &pb.Key_PathElement_Id{Id: 6}, }, }, } key2 = &pb.Key{ Path: []*pb.Key_PathElement{ { Kind: "Gopher", IdType: &pb.Key_PathElement_Id{Id: 6}, }, { Kind: "Gopher", IdType: &pb.Key_PathElement_Id{Id: 8}, }, }, } ) type fakeClient struct { pb.DatastoreClient queryFn func(*pb.RunQueryRequest) (*pb.RunQueryResponse, error) commitFn func(*pb.CommitRequest) (*pb.CommitResponse, error) } func (c *fakeClient) RunQuery(_ context.Context, req *pb.RunQueryRequest, _ ...grpc.CallOption) (*pb.RunQueryResponse, error) { return c.queryFn(req) } func (c *fakeClient) Commit(_ context.Context, req *pb.CommitRequest, _ ...grpc.CallOption) (*pb.CommitResponse, error) { return c.commitFn(req) } func fakeRunQuery(in *pb.RunQueryRequest) (*pb.RunQueryResponse, error) { expectedIn := &pb.RunQueryRequest{ QueryType: &pb.RunQueryRequest_Query{Query: &pb.Query{ Kind: []*pb.KindExpression{{Name: "Gopher"}}, }}, } if !proto.Equal(in, expectedIn) { return nil, fmt.Errorf("unsupported argument: got %v want %v", in, expectedIn) } return &pb.RunQueryResponse{ Batch: &pb.QueryResultBatch{ MoreResults: pb.QueryResultBatch_NO_MORE_RESULTS, EntityResultType: pb.EntityResult_FULL, EntityResults: []*pb.EntityResult{ { Entity: &pb.Entity{ Key: key1, Properties: map[string]*pb.Value{ "Name": {ValueType: &pb.Value_StringValue{StringValue: "George"}}, "Height": {ValueType: &pb.Value_IntegerValue{IntegerValue: 32}}, }, }, }, { Entity: &pb.Entity{ Key: key2, Properties: map[string]*pb.Value{ "Name": {ValueType: &pb.Value_StringValue{StringValue: "Rufus"}}, // No height for Rufus. }, }, }, }, }, }, nil } type StructThatImplementsPLS struct{} func (StructThatImplementsPLS) Load(p []Property) error { return nil } func (StructThatImplementsPLS) Save() ([]Property, error) { return nil, nil } var _ PropertyLoadSaver = StructThatImplementsPLS{} type StructPtrThatImplementsPLS struct{} func (*StructPtrThatImplementsPLS) Load(p []Property) error { return nil } func (*StructPtrThatImplementsPLS) Save() ([]Property, error) { return nil, nil } var _ PropertyLoadSaver = &StructPtrThatImplementsPLS{} type PropertyMap map[string]Property func (m PropertyMap) Load(props []Property) error { for _, p := range props { m[p.Name] = p } return nil } func (m PropertyMap) Save() ([]Property, error) { props := make([]Property, 0, len(m)) for _, p := range m { props = append(props, p) } return props, nil } var _ PropertyLoadSaver = PropertyMap{} type Gopher struct { Name string Height int } // typeOfEmptyInterface is the type of interface{}, but we can't use // reflect.TypeOf((interface{})(nil)) directly because TypeOf takes an // interface{}. var typeOfEmptyInterface = reflect.TypeOf((*interface{})(nil)).Elem() func TestCheckMultiArg(t *testing.T) { testCases := []struct { v interface{} mat multiArgType elemType reflect.Type }{ // Invalid cases. {nil, multiArgTypeInvalid, nil}, {Gopher{}, multiArgTypeInvalid, nil}, {&Gopher{}, multiArgTypeInvalid, nil}, {PropertyList{}, multiArgTypeInvalid, nil}, // This is a special case. {PropertyMap{}, multiArgTypeInvalid, nil}, {[]*PropertyList(nil), multiArgTypeInvalid, nil}, {[]*PropertyMap(nil), multiArgTypeInvalid, nil}, {[]**Gopher(nil), multiArgTypeInvalid, nil}, {[]*interface{}(nil), multiArgTypeInvalid, nil}, // Valid cases. { []PropertyList(nil), multiArgTypePropertyLoadSaver, reflect.TypeOf(PropertyList{}), }, { []PropertyMap(nil), multiArgTypePropertyLoadSaver, reflect.TypeOf(PropertyMap{}), }, { []StructThatImplementsPLS(nil), multiArgTypePropertyLoadSaver, reflect.TypeOf(StructThatImplementsPLS{}), }, { []StructPtrThatImplementsPLS(nil), multiArgTypePropertyLoadSaver, reflect.TypeOf(StructPtrThatImplementsPLS{}), }, { []Gopher(nil), multiArgTypeStruct, reflect.TypeOf(Gopher{}), }, { []*Gopher(nil), multiArgTypeStructPtr, reflect.TypeOf(Gopher{}), }, { []interface{}(nil), multiArgTypeInterface, typeOfEmptyInterface, }, } for _, tc := range testCases { mat, elemType := checkMultiArg(reflect.ValueOf(tc.v)) if mat != tc.mat || elemType != tc.elemType { t.Errorf("checkMultiArg(%T): got %v, %v want %v, %v", tc.v, mat, elemType, tc.mat, tc.elemType) } } } func TestSimpleQuery(t *testing.T) { struct1 := Gopher{Name: "George", Height: 32} struct2 := Gopher{Name: "Rufus"} pList1 := PropertyList{ { Name: "Height", Value: int64(32), }, { Name: "Name", Value: "George", }, } pList2 := PropertyList{ { Name: "Name", Value: "Rufus", }, } pMap1 := PropertyMap{ "Name": Property{ Name: "Name", Value: "George", }, "Height": Property{ Name: "Height", Value: int64(32), }, } pMap2 := PropertyMap{ "Name": Property{ Name: "Name", Value: "Rufus", }, } testCases := []struct { dst interface{} want interface{} }{ // The destination must have type *[]P, *[]S or *[]*S, for some non-interface // type P such that *P implements PropertyLoadSaver, or for some struct type S. {new([]Gopher), &[]Gopher{struct1, struct2}}, {new([]*Gopher), &[]*Gopher{&struct1, &struct2}}, {new([]PropertyList), &[]PropertyList{pList1, pList2}}, {new([]PropertyMap), &[]PropertyMap{pMap1, pMap2}}, // Any other destination type is invalid. {0, nil}, {Gopher{}, nil}, {PropertyList{}, nil}, {PropertyMap{}, nil}, {[]int{}, nil}, {[]Gopher{}, nil}, {[]PropertyList{}, nil}, {new(int), nil}, {new(Gopher), nil}, {new(PropertyList), nil}, // This is a special case. {new(PropertyMap), nil}, {new([]int), nil}, {new([]map[int]int), nil}, {new([]map[string]Property), nil}, {new([]map[string]interface{}), nil}, {new([]*int), nil}, {new([]*map[int]int), nil}, {new([]*map[string]Property), nil}, {new([]*map[string]interface{}), nil}, {new([]**Gopher), nil}, {new([]*PropertyList), nil}, {new([]*PropertyMap), nil}, } for _, tc := range testCases { nCall := 0 client := &Client{ client: &fakeClient{ queryFn: func(req *pb.RunQueryRequest) (*pb.RunQueryResponse, error) { nCall++ return fakeRunQuery(req) }, }, } ctx := context.Background() var ( expectedErr error expectedNCall int ) if tc.want == nil { expectedErr = ErrInvalidEntityType } else { expectedNCall = 1 } keys, err := client.GetAll(ctx, NewQuery("Gopher"), tc.dst) if err != expectedErr { t.Errorf("dst type %T: got error %v, want %v", tc.dst, err, expectedErr) continue } if nCall != expectedNCall { t.Errorf("dst type %T: Context.Call was called an incorrect number of times: got %d want %d", tc.dst, nCall, expectedNCall) continue } if err != nil { continue } key1 := IDKey("Gopher", 6, nil) expectedKeys := []*Key{ key1, IDKey("Gopher", 8, key1), } if l1, l2 := len(keys), len(expectedKeys); l1 != l2 { t.Errorf("dst type %T: got %d keys, want %d keys", tc.dst, l1, l2) continue } for i, key := range keys { if !keysEqual(key, expectedKeys[i]) { t.Errorf("dst type %T: got key #%d %v, want %v", tc.dst, i, key, expectedKeys[i]) continue } } // Make sure we sort any PropertyList items (the order is not deterministic). if pLists, ok := tc.dst.(*[]PropertyList); ok { for _, p := range *pLists { sort.Sort(byName(p)) } } if !reflect.DeepEqual(tc.dst, tc.want) { t.Errorf("dst type %T: Entities\ngot %+v\nwant %+v", tc.dst, tc.dst, tc.want) continue } } } // keysEqual is like (*Key).Equal, but ignores the App ID. func keysEqual(a, b *Key) bool { for a != nil && b != nil { if a.Kind != b.Kind || a.Name != b.Name || a.ID != b.ID { return false } a, b = a.Parent, b.Parent } return a == b } func TestQueriesAreImmutable(t *testing.T) { // Test that deriving q2 from q1 does not modify q1. q0 := NewQuery("foo") q1 := NewQuery("foo") q2 := q1.Offset(2) if !reflect.DeepEqual(q0, q1) { t.Errorf("q0 and q1 were not equal") } if reflect.DeepEqual(q1, q2) { t.Errorf("q1 and q2 were equal") } // Test that deriving from q4 twice does not conflict, even though // q4 has a long list of order clauses. This tests that the arrays // backed by a query's slice of orders are not shared. f := func() *Query { q := NewQuery("bar") // 47 is an ugly number that is unlikely to be near a re-allocation // point in repeated append calls. For example, it's not near a power // of 2 or a multiple of 10. for i := 0; i < 47; i++ { q = q.Order(fmt.Sprintf("x%d", i)) } return q } q3 := f().Order("y") q4 := f() q5 := q4.Order("y") q6 := q4.Order("z") if !reflect.DeepEqual(q3, q5) { t.Errorf("q3 and q5 were not equal") } if reflect.DeepEqual(q5, q6) { t.Errorf("q5 and q6 were equal") } } func TestFilterParser(t *testing.T) { testCases := []struct { filterStr string wantOK bool wantFieldName string wantOp operator }{ // Supported ops. {"x<", true, "x", lessThan}, {"x <", true, "x", lessThan}, {"x <", true, "x", lessThan}, {" x < ", true, "x", lessThan}, {"x <=", true, "x", lessEq}, {"x =", true, "x", equal}, {"x >=", true, "x", greaterEq}, {"x >", true, "x", greaterThan}, {"in >", true, "in", greaterThan}, {"in>", true, "in", greaterThan}, // Valid but (currently) unsupported ops. {"x!=", false, "", 0}, {"x !=", false, "", 0}, {" x != ", false, "", 0}, {"x IN", false, "", 0}, {"x in", false, "", 0}, // Invalid ops. {"x EQ", false, "", 0}, {"x lt", false, "", 0}, {"x <>", false, "", 0}, {"x >>", false, "", 0}, {"x ==", false, "", 0}, {"x =<", false, "", 0}, {"x =>", false, "", 0}, {"x !", false, "", 0}, {"x ", false, "", 0}, {"x", false, "", 0}, // Quoted and interesting field names. {"x > y =", true, "x > y", equal}, {"` x ` =", true, " x ", equal}, {`" x " =`, true, " x ", equal}, {`" \"x " =`, true, ` "x `, equal}, {`" x =`, false, "", 0}, {`" x ="`, false, "", 0}, {"` x \" =", false, "", 0}, } for _, tc := range testCases { q := NewQuery("foo").Filter(tc.filterStr, 42) if ok := q.err == nil; ok != tc.wantOK { t.Errorf("%q: ok=%t, want %t", tc.filterStr, ok, tc.wantOK) continue } if !tc.wantOK { continue } if len(q.filter) != 1 { t.Errorf("%q: len=%d, want %d", tc.filterStr, len(q.filter), 1) continue } got, want := q.filter[0], filter{tc.wantFieldName, tc.wantOp, 42} if got != want { t.Errorf("%q: got %v, want %v", tc.filterStr, got, want) continue } } } func TestNamespaceQuery(t *testing.T) { gotNamespace := make(chan string, 1) ctx := context.Background() client := &Client{ client: &fakeClient{ queryFn: func(req *pb.RunQueryRequest) (*pb.RunQueryResponse, error) { if part := req.PartitionId; part != nil { gotNamespace <- part.NamespaceId } else { gotNamespace <- "" } return nil, errors.New("not implemented") }, }, } var gs []Gopher client.GetAll(ctx, NewQuery("gopher"), &gs) if got, want := <-gotNamespace, ""; got != want { t.Errorf("GetAll: got namespace %q, want %q", got, want) } client.Count(ctx, NewQuery("gopher")) if got, want := <-gotNamespace, ""; got != want { t.Errorf("Count: got namespace %q, want %q", got, want) } const ns = "not_default" client.GetAll(ctx, NewQuery("gopher").Namespace(ns), &gs) if got, want := <-gotNamespace, ns; got != want { t.Errorf("GetAll: got namespace %q, want %q", got, want) } client.Count(ctx, NewQuery("gopher").Namespace(ns)) if got, want := <-gotNamespace, ns; got != want { t.Errorf("Count: got namespace %q, want %q", got, want) } } func TestReadOptions(t *testing.T) { tid := []byte{1} for _, test := range []struct { q *Query want *pb.ReadOptions }{ { q: NewQuery(""), want: nil, }, { q: NewQuery("").Transaction(nil), want: nil, }, { q: NewQuery("").Transaction(&Transaction{id: tid}), want: &pb.ReadOptions{ ConsistencyType: &pb.ReadOptions_Transaction{ Transaction: tid, }, }, }, { q: NewQuery("").EventualConsistency(), want: &pb.ReadOptions{ ConsistencyType: &pb.ReadOptions_ReadConsistency_{ ReadConsistency: pb.ReadOptions_EVENTUAL, }, }, }, } { req := &pb.RunQueryRequest{} if err := test.q.toProto(req); err != nil { t.Fatalf("%+v: got %v, want no error", test.q, err) } if got := req.ReadOptions; !proto.Equal(got, test.want) { t.Errorf("%+v:\ngot %+v\nwant %+v", test.q, got, test.want) } } // Test errors. for _, q := range []*Query{ NewQuery("").Transaction(&Transaction{id: nil}), NewQuery("").Transaction(&Transaction{id: tid}).EventualConsistency(), } { req := &pb.RunQueryRequest{} if err := q.toProto(req); err == nil { t.Errorf("%+v: got nil, wanted error", q) } } } golang-google-cloud-0.9.0/datastore/save.go000066400000000000000000000266421312234511600206300ustar00rootroot00000000000000// Copyright 4 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package datastore import ( "errors" "fmt" "reflect" "time" "unicode/utf8" timepb "github.com/golang/protobuf/ptypes/timestamp" pb "google.golang.org/genproto/googleapis/datastore/v1" llpb "google.golang.org/genproto/googleapis/type/latlng" ) type saveOpts struct { noIndex bool flatten bool omitEmpty bool } // saveEntity saves an EntityProto into a PropertyLoadSaver or struct pointer. func saveEntity(key *Key, src interface{}) (*pb.Entity, error) { var err error var props []Property if e, ok := src.(PropertyLoadSaver); ok { props, err = e.Save() } else { props, err = SaveStruct(src) } if err != nil { return nil, err } return propertiesToProto(key, props) } // TODO(djd): Convert this and below to return ([]Property, error). func saveStructProperty(props *[]Property, name string, opts saveOpts, v reflect.Value) error { p := Property{ Name: name, NoIndex: opts.noIndex, } if opts.omitEmpty && isEmptyValue(v) { return nil } // First check if field type implements PLS. If so, use PLS to // save. ok, err := plsFieldSave(props, p, name, opts, v) if err != nil { return err } if ok { return nil } switch x := v.Interface().(type) { case *Key, time.Time, GeoPoint: p.Value = x default: switch v.Kind() { case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: p.Value = v.Int() case reflect.Bool: p.Value = v.Bool() case reflect.String: p.Value = v.String() case reflect.Float32, reflect.Float64: p.Value = v.Float() case reflect.Slice: if v.Type().Elem().Kind() == reflect.Uint8 { p.Value = v.Bytes() } else { return saveSliceProperty(props, name, opts, v) } case reflect.Ptr: if v.Type().Elem().Kind() != reflect.Struct { return fmt.Errorf("datastore: unsupported struct field type: %s", v.Type()) } if v.IsNil() { return nil } v = v.Elem() fallthrough case reflect.Struct: if !v.CanAddr() { return fmt.Errorf("datastore: unsupported struct field: value is unaddressable") } vi := v.Addr().Interface() sub, err := newStructPLS(vi) if err != nil { return fmt.Errorf("datastore: unsupported struct field: %v", err) } if opts.flatten { return sub.save(props, opts, name+".") } var subProps []Property err = sub.save(&subProps, opts, "") if err != nil { return err } subKey, err := sub.key(v) if err != nil { return err } p.Value = &Entity{ Key: subKey, Properties: subProps, } } } if p.Value == nil { return fmt.Errorf("datastore: unsupported struct field type: %v", v.Type()) } *props = append(*props, p) return nil } // plsFieldSave first tries to converts v's value to a PLS, then v's addressed // value to a PLS. If neither succeeds, plsFieldSave returns false for first return // value. // If v is successfully converted to a PLS, plsFieldSave will then add the // Value to property p by way of the PLS's Save method, and append it to props. // // If the flatten option is present in opts, name must be prepended to each property's // name before it is appended to props. Eg. if name were "A" and a subproperty's name // were "B", the resultant name of the property to be appended to props would be "A.B". func plsFieldSave(props *[]Property, p Property, name string, opts saveOpts, v reflect.Value) (ok bool, err error) { vpls, err := plsForSave(v) if err != nil { return false, err } if vpls == nil { return false, nil } subProps, err := vpls.Save() if err != nil { return true, err } if opts.flatten { for _, subp := range subProps { subp.Name = name + "." + subp.Name *props = append(*props, subp) } return true, nil } p.Value = &Entity{Properties: subProps} *props = append(*props, p) return true, nil } // key extracts the *Key struct field from struct v based on the structCodec of s. func (s structPLS) key(v reflect.Value) (*Key, error) { if v.Kind() != reflect.Struct { return nil, errors.New("datastore: cannot save key of non-struct type") } keyField := s.codec.Match(keyFieldName) if keyField == nil { return nil, nil } f := v.FieldByIndex(keyField.Index) k, ok := f.Interface().(*Key) if !ok { return nil, fmt.Errorf("datastore: %s field on struct %T is not a *datastore.Key", keyFieldName, v.Interface()) } return k, nil } func saveSliceProperty(props *[]Property, name string, opts saveOpts, v reflect.Value) error { // Easy case: if the slice is empty, we're done. if v.Len() == 0 { return nil } // Work out the properties generated by the first element in the slice. This will // usually be a single property, but will be more if this is a slice of structs. var headProps []Property if err := saveStructProperty(&headProps, name, opts, v.Index(0)); err != nil { return err } // Convert the first element's properties into slice properties, and // keep track of the values in a map. values := make(map[string][]interface{}, len(headProps)) for _, p := range headProps { values[p.Name] = append(make([]interface{}, 0, v.Len()), p.Value) } // Find the elements for the subsequent elements. for i := 1; i < v.Len(); i++ { elemProps := make([]Property, 0, len(headProps)) if err := saveStructProperty(&elemProps, name, opts, v.Index(i)); err != nil { return err } for _, p := range elemProps { v, ok := values[p.Name] if !ok { return fmt.Errorf("datastore: unexpected property %q in elem %d of slice", p.Name, i) } values[p.Name] = append(v, p.Value) } } // Convert to the final properties. for _, p := range headProps { p.Value = values[p.Name] *props = append(*props, p) } return nil } func (s structPLS) Save() ([]Property, error) { var props []Property if err := s.save(&props, saveOpts{}, ""); err != nil { return nil, err } return props, nil } func (s structPLS) save(props *[]Property, opts saveOpts, prefix string) error { for _, f := range s.codec { name := prefix + f.Name v := getField(s.v, f.Index) if !v.IsValid() || !v.CanSet() { continue } var tagOpts saveOpts if f.ParsedTag != nil { tagOpts = f.ParsedTag.(saveOpts) } var opts1 saveOpts opts1.noIndex = opts.noIndex || tagOpts.noIndex opts1.flatten = opts.flatten || tagOpts.flatten opts1.omitEmpty = tagOpts.omitEmpty // don't propagate if err := saveStructProperty(props, name, opts1, v); err != nil { return err } } return nil } // getField returns the field from v at the given index path. // If it encounters a nil-valued field in the path, getField // stops and returns a zero-valued reflect.Value, preventing the // panic that would have been caused by reflect's FieldByIndex. func getField(v reflect.Value, index []int) reflect.Value { var zero reflect.Value if v.Type().Kind() != reflect.Struct { return zero } for _, i := range index { if v.Kind() == reflect.Ptr && v.Type().Elem().Kind() == reflect.Struct { if v.IsNil() { return zero } v = v.Elem() } v = v.Field(i) } return v } func propertiesToProto(key *Key, props []Property) (*pb.Entity, error) { e := &pb.Entity{ Key: keyToProto(key), Properties: map[string]*pb.Value{}, } indexedProps := 0 for _, p := range props { // Do not send a Key value a a field to datastore. if p.Name == keyFieldName { continue } val, err := interfaceToProto(p.Value, p.NoIndex) if err != nil { return nil, fmt.Errorf("datastore: %v for a Property with Name %q", err, p.Name) } if !p.NoIndex { rVal := reflect.ValueOf(p.Value) if rVal.Kind() == reflect.Slice && rVal.Type().Elem().Kind() != reflect.Uint8 { indexedProps += rVal.Len() } else { indexedProps++ } } if indexedProps > maxIndexedProperties { return nil, errors.New("datastore: too many indexed properties") } if _, ok := e.Properties[p.Name]; ok { return nil, fmt.Errorf("datastore: duplicate Property with Name %q", p.Name) } e.Properties[p.Name] = val } return e, nil } func interfaceToProto(iv interface{}, noIndex bool) (*pb.Value, error) { val := &pb.Value{ExcludeFromIndexes: noIndex} switch v := iv.(type) { case int: val.ValueType = &pb.Value_IntegerValue{IntegerValue: int64(v)} case int32: val.ValueType = &pb.Value_IntegerValue{IntegerValue: int64(v)} case int64: val.ValueType = &pb.Value_IntegerValue{IntegerValue: v} case bool: val.ValueType = &pb.Value_BooleanValue{BooleanValue: v} case string: if len(v) > 1500 && !noIndex { return nil, errors.New("string property too long to index") } if !utf8.ValidString(v) { return nil, fmt.Errorf("string is not valid utf8: %q", v) } val.ValueType = &pb.Value_StringValue{StringValue: v} case float32: val.ValueType = &pb.Value_DoubleValue{DoubleValue: float64(v)} case float64: val.ValueType = &pb.Value_DoubleValue{DoubleValue: v} case *Key: if v == nil { val.ValueType = &pb.Value_NullValue{} } else { val.ValueType = &pb.Value_KeyValue{KeyValue: keyToProto(v)} } case GeoPoint: if !v.Valid() { return nil, errors.New("invalid GeoPoint value") } val.ValueType = &pb.Value_GeoPointValue{GeoPointValue: &llpb.LatLng{ Latitude: v.Lat, Longitude: v.Lng, }} case time.Time: if v.Before(minTime) || v.After(maxTime) { return nil, errors.New("time value out of range") } val.ValueType = &pb.Value_TimestampValue{TimestampValue: &timepb.Timestamp{ Seconds: v.Unix(), Nanos: int32(v.Nanosecond()), }} case []byte: if len(v) > 1500 && !noIndex { return nil, errors.New("[]byte property too long to index") } val.ValueType = &pb.Value_BlobValue{BlobValue: v} case *Entity: e, err := propertiesToProto(v.Key, v.Properties) if err != nil { return nil, err } val.ValueType = &pb.Value_EntityValue{EntityValue: e} case []interface{}: arr := make([]*pb.Value, 0, len(v)) for i, v := range v { elem, err := interfaceToProto(v, noIndex) if err != nil { return nil, fmt.Errorf("%v at index %d", err, i) } arr = append(arr, elem) } val.ValueType = &pb.Value_ArrayValue{ArrayValue: &pb.ArrayValue{Values: arr}} // ArrayValues have ExcludeFromIndexes set on the individual items, rather // than the top-level value. val.ExcludeFromIndexes = false default: if iv != nil { return nil, fmt.Errorf("invalid Value type %t", iv) } val.ValueType = &pb.Value_NullValue{} } // TODO(jbd): Support EntityValue. return val, nil } // isEmptyValue is taken from the encoding/json package in the // standard library. func isEmptyValue(v reflect.Value) bool { switch v.Kind() { case reflect.Array, reflect.Map, reflect.Slice, reflect.String: return v.Len() == 0 case reflect.Bool: return !v.Bool() case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: return v.Int() == 0 case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: return v.Uint() == 0 case reflect.Float32, reflect.Float64: return v.Float() == 0 case reflect.Interface, reflect.Ptr: return v.IsNil() } return false } golang-google-cloud-0.9.0/datastore/save_test.go000066400000000000000000000104251312234511600216570ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package datastore import ( "reflect" "testing" pb "google.golang.org/genproto/googleapis/datastore/v1" ) func TestInterfaceToProtoNilKey(t *testing.T) { var iv *Key pv, err := interfaceToProto(iv, false) if err != nil { t.Fatalf("nil key: interfaceToProto: %v", err) } _, ok := pv.ValueType.(*pb.Value_NullValue) if !ok { t.Errorf("nil key: type:\ngot: %T\nwant: %T", pv.ValueType, &pb.Value_NullValue{}) } } func TestSaveEntityNested(t *testing.T) { type WithKey struct { X string I int K *Key `datastore:"__key__"` } type NestedWithKey struct { Y string N WithKey } type WithoutKey struct { X string I int } type NestedWithoutKey struct { Y string N WithoutKey } type a struct { S string } type UnexpAnonym struct { a } testCases := []struct { desc string src interface{} key *Key want *pb.Entity }{ { desc: "nested entity with key", src: &NestedWithKey{ Y: "yyy", N: WithKey{ X: "two", I: 2, K: testKey1a, }, }, key: testKey0, want: &pb.Entity{ Key: keyToProto(testKey0), Properties: map[string]*pb.Value{ "Y": {ValueType: &pb.Value_StringValue{StringValue: "yyy"}}, "N": {ValueType: &pb.Value_EntityValue{ EntityValue: &pb.Entity{ Key: keyToProto(testKey1a), Properties: map[string]*pb.Value{ "X": {ValueType: &pb.Value_StringValue{StringValue: "two"}}, "I": {ValueType: &pb.Value_IntegerValue{IntegerValue: 2}}, }, }, }}, }, }, }, { desc: "nested entity with incomplete key", src: &NestedWithKey{ Y: "yyy", N: WithKey{ X: "two", I: 2, K: incompleteKey, }, }, key: testKey0, want: &pb.Entity{ Key: keyToProto(testKey0), Properties: map[string]*pb.Value{ "Y": {ValueType: &pb.Value_StringValue{StringValue: "yyy"}}, "N": {ValueType: &pb.Value_EntityValue{ EntityValue: &pb.Entity{ Key: keyToProto(incompleteKey), Properties: map[string]*pb.Value{ "X": {ValueType: &pb.Value_StringValue{StringValue: "two"}}, "I": {ValueType: &pb.Value_IntegerValue{IntegerValue: 2}}, }, }, }}, }, }, }, { desc: "nested entity without key", src: &NestedWithoutKey{ Y: "yyy", N: WithoutKey{ X: "two", I: 2, }, }, key: testKey0, want: &pb.Entity{ Key: keyToProto(testKey0), Properties: map[string]*pb.Value{ "Y": {ValueType: &pb.Value_StringValue{StringValue: "yyy"}}, "N": {ValueType: &pb.Value_EntityValue{ EntityValue: &pb.Entity{ Properties: map[string]*pb.Value{ "X": {ValueType: &pb.Value_StringValue{StringValue: "two"}}, "I": {ValueType: &pb.Value_IntegerValue{IntegerValue: 2}}, }, }, }}, }, }, }, { desc: "key at top level", src: &WithKey{ X: "three", I: 3, K: testKey0, }, key: testKey0, want: &pb.Entity{ Key: keyToProto(testKey0), Properties: map[string]*pb.Value{ "X": {ValueType: &pb.Value_StringValue{StringValue: "three"}}, "I": {ValueType: &pb.Value_IntegerValue{IntegerValue: 3}}, }, }, }, { desc: "nested unexported anonymous struct field", src: &UnexpAnonym{ a{S: "hello"}, }, key: testKey0, want: &pb.Entity{ Key: keyToProto(testKey0), Properties: map[string]*pb.Value{ "S": {ValueType: &pb.Value_StringValue{StringValue: "hello"}}, }, }, }, } for _, tc := range testCases { got, err := saveEntity(tc.key, tc.src) if err != nil { t.Errorf("saveEntity: %s: %v", tc.desc, err) continue } if !reflect.DeepEqual(tc.want, got) { t.Errorf("%s: compare:\ngot: %#v\nwant: %#v", tc.desc, got, tc.want) } } } golang-google-cloud-0.9.0/datastore/testdata/000077500000000000000000000000001312234511600211425ustar00rootroot00000000000000golang-google-cloud-0.9.0/datastore/testdata/index.yaml000066400000000000000000000007521312234511600231410ustar00rootroot00000000000000indexes: - kind: SQChild ancestor: yes properties: - name: T - name: I - kind: SQChild ancestor: yes properties: - name: T - name: I direction: desc - kind: SQChild ancestor: yes properties: - name: I - name: T - name: U - kind: SQChild ancestor: yes properties: - name: I - name: T - name: U - kind: SQChild ancestor: yes properties: - name: T - name: J - kind: SQChild ancestor: yes properties: - name: T - name: J - name: Ugolang-google-cloud-0.9.0/datastore/time.go000066400000000000000000000023011312234511600206120ustar00rootroot00000000000000// Copyright 2014 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package datastore import ( "math" "time" ) var ( minTime = time.Unix(int64(math.MinInt64)/1e6, (int64(math.MinInt64)%1e6)*1e3) maxTime = time.Unix(int64(math.MaxInt64)/1e6, (int64(math.MaxInt64)%1e6)*1e3) ) func toUnixMicro(t time.Time) int64 { // We cannot use t.UnixNano() / 1e3 because we want to handle times more than // 2^63 nanoseconds (which is about 292 years) away from 1970, and those cannot // be represented in the numerator of a single int64 divide. return t.Unix()*1e6 + int64(t.Nanosecond()/1e3) } func fromUnixMicro(t int64) time.Time { return time.Unix(t/1e6, (t%1e6)*1e3) } golang-google-cloud-0.9.0/datastore/time_test.go000066400000000000000000000046031312234511600216600ustar00rootroot00000000000000// Copyright 2014 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package datastore import ( "testing" "time" ) func TestUnixMicro(t *testing.T) { // Test that all these time.Time values survive a round trip to unix micros. testCases := []time.Time{ {}, time.Date(2, 1, 1, 0, 0, 0, 0, time.UTC), time.Date(23, 1, 1, 0, 0, 0, 0, time.UTC), time.Date(234, 1, 1, 0, 0, 0, 0, time.UTC), time.Date(1000, 1, 1, 0, 0, 0, 0, time.UTC), time.Date(1600, 1, 1, 0, 0, 0, 0, time.UTC), time.Date(1700, 1, 1, 0, 0, 0, 0, time.UTC), time.Date(1800, 1, 1, 0, 0, 0, 0, time.UTC), time.Date(1900, 1, 1, 0, 0, 0, 0, time.UTC), time.Unix(-1e6, -1000), time.Unix(-1e6, 0), time.Unix(-1e6, +1000), time.Unix(-60, -1000), time.Unix(-60, 0), time.Unix(-60, +1000), time.Unix(-1, -1000), time.Unix(-1, 0), time.Unix(-1, +1000), time.Unix(0, -3000), time.Unix(0, -2000), time.Unix(0, -1000), time.Unix(0, 0), time.Unix(0, +1000), time.Unix(0, +2000), time.Unix(+60, -1000), time.Unix(+60, 0), time.Unix(+60, +1000), time.Unix(+1e6, -1000), time.Unix(+1e6, 0), time.Unix(+1e6, +1000), time.Date(1999, 12, 31, 23, 59, 59, 999000, time.UTC), time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC), time.Date(2006, 1, 2, 15, 4, 5, 678000, time.UTC), time.Date(2009, 11, 10, 23, 0, 0, 0, time.UTC), time.Date(3456, 1, 1, 0, 0, 0, 0, time.UTC), } for _, tc := range testCases { got := fromUnixMicro(toUnixMicro(tc)) if !got.Equal(tc) { t.Errorf("got %q, want %q", got, tc) } } // Test that a time.Time that isn't an integral number of microseconds // is not perfectly reconstructed after a round trip. t0 := time.Unix(0, 123) t1 := fromUnixMicro(toUnixMicro(t0)) if t1.Nanosecond()%1000 != 0 || t0.Nanosecond()%1000 == 0 { t.Errorf("quantization to µs: got %q with %d ns, started with %d ns", t1, t1.Nanosecond(), t0.Nanosecond()) } } golang-google-cloud-0.9.0/datastore/transaction.go000066400000000000000000000226301312234511600222100ustar00rootroot00000000000000// Copyright 2014 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package datastore import ( "errors" "golang.org/x/net/context" "google.golang.org/grpc" "google.golang.org/grpc/codes" pb "google.golang.org/genproto/googleapis/datastore/v1" ) // ErrConcurrentTransaction is returned when a transaction is rolled back due // to a conflict with a concurrent transaction. var ErrConcurrentTransaction = errors.New("datastore: concurrent transaction") var errExpiredTransaction = errors.New("datastore: transaction expired") type transactionSettings struct { attempts int } // newTransactionSettings creates a transactionSettings with a given TransactionOption slice. // Unconfigured options will be set to default values. func newTransactionSettings(opts []TransactionOption) *transactionSettings { s := &transactionSettings{attempts: 3} for _, o := range opts { o.apply(s) } return s } // TransactionOption configures the way a transaction is executed. type TransactionOption interface { apply(*transactionSettings) } // MaxAttempts returns a TransactionOption that overrides the default 3 attempt times. func MaxAttempts(attempts int) TransactionOption { return maxAttempts(attempts) } type maxAttempts int func (w maxAttempts) apply(s *transactionSettings) { if w > 0 { s.attempts = int(w) } } // Transaction represents a set of datastore operations to be committed atomically. // // Operations are enqueued by calling the Put and Delete methods on Transaction // (or their Multi-equivalents). These operations are only committed when the // Commit method is invoked. To ensure consistency, reads must be performed by // using Transaction's Get method or by using the Transaction method when // building a query. // // A Transaction must be committed or rolled back exactly once. type Transaction struct { id []byte client *Client ctx context.Context mutations []*pb.Mutation // The mutations to apply. pending map[int]*PendingKey // Map from mutation index to incomplete keys pending transaction completion. } // NewTransaction starts a new transaction. func (c *Client) NewTransaction(ctx context.Context, opts ...TransactionOption) (*Transaction, error) { for _, o := range opts { if _, ok := o.(maxAttempts); ok { return nil, errors.New("datastore: NewTransaction does not accept MaxAttempts option") } } req := &pb.BeginTransactionRequest{ ProjectId: c.dataset, } resp, err := c.client.BeginTransaction(ctx, req) if err != nil { return nil, err } return &Transaction{ id: resp.Transaction, ctx: ctx, client: c, mutations: nil, pending: make(map[int]*PendingKey), }, nil } // RunInTransaction runs f in a transaction. f is invoked with a Transaction // that f should use for all the transaction's datastore operations. // // f must not call Commit or Rollback on the provided Transaction. // // If f returns nil, RunInTransaction commits the transaction, // returning the Commit and a nil error if it succeeds. If the commit fails due // to a conflicting transaction, RunInTransaction retries f with a new // Transaction. It gives up and returns ErrConcurrentTransaction after three // failed attempts (or as configured with MaxAttempts). // // If f returns non-nil, then the transaction will be rolled back and // RunInTransaction will return the same error. The function f is not retried. // // Note that when f returns, the transaction is not committed. Calling code // must not assume that any of f's changes have been committed until // RunInTransaction returns nil. // // Since f may be called multiple times, f should usually be idempotent – that // is, it should have the same result when called multiple times. Note that // Transaction.Get will append when unmarshalling slice fields, so it is not // necessarily idempotent. func (c *Client) RunInTransaction(ctx context.Context, f func(tx *Transaction) error, opts ...TransactionOption) (*Commit, error) { settings := newTransactionSettings(opts) for n := 0; n < settings.attempts; n++ { tx, err := c.NewTransaction(ctx) if err != nil { return nil, err } if err := f(tx); err != nil { tx.Rollback() return nil, err } if cmt, err := tx.Commit(); err != ErrConcurrentTransaction { return cmt, err } } return nil, ErrConcurrentTransaction } // Commit applies the enqueued operations atomically. func (t *Transaction) Commit() (*Commit, error) { if t.id == nil { return nil, errExpiredTransaction } req := &pb.CommitRequest{ ProjectId: t.client.dataset, TransactionSelector: &pb.CommitRequest_Transaction{Transaction: t.id}, Mutations: t.mutations, Mode: pb.CommitRequest_TRANSACTIONAL, } t.id = nil resp, err := t.client.client.Commit(t.ctx, req) if err != nil { if grpc.Code(err) == codes.Aborted { return nil, ErrConcurrentTransaction } return nil, err } // Copy any newly minted keys into the returned keys. commit := &Commit{} for i, p := range t.pending { if i >= len(resp.MutationResults) || resp.MutationResults[i].Key == nil { return nil, errors.New("datastore: internal error: server returned the wrong mutation results") } key, err := protoToKey(resp.MutationResults[i].Key) if err != nil { return nil, errors.New("datastore: internal error: server returned an invalid key") } p.key = key p.commit = commit } return commit, nil } // Rollback abandons a pending transaction. func (t *Transaction) Rollback() error { if t.id == nil { return errExpiredTransaction } id := t.id t.id = nil _, err := t.client.client.Rollback(t.ctx, &pb.RollbackRequest{ ProjectId: t.client.dataset, Transaction: id, }) return err } // Get is the transaction-specific version of the package function Get. // All reads performed during the transaction will come from a single consistent // snapshot. Furthermore, if the transaction is set to a serializable isolation // level, another transaction cannot concurrently modify the data that is read // or modified by this transaction. func (t *Transaction) Get(key *Key, dst interface{}) error { opts := &pb.ReadOptions{ ConsistencyType: &pb.ReadOptions_Transaction{Transaction: t.id}, } err := t.client.get(t.ctx, []*Key{key}, []interface{}{dst}, opts) if me, ok := err.(MultiError); ok { return me[0] } return err } // GetMulti is a batch version of Get. func (t *Transaction) GetMulti(keys []*Key, dst interface{}) error { if t.id == nil { return errExpiredTransaction } opts := &pb.ReadOptions{ ConsistencyType: &pb.ReadOptions_Transaction{Transaction: t.id}, } return t.client.get(t.ctx, keys, dst, opts) } // Put is the transaction-specific version of the package function Put. // // Put returns a PendingKey which can be resolved into a Key using the // return value from a successful Commit. If key is an incomplete key, the // returned pending key will resolve to a unique key generated by the // datastore. func (t *Transaction) Put(key *Key, src interface{}) (*PendingKey, error) { h, err := t.PutMulti([]*Key{key}, []interface{}{src}) if err != nil { if me, ok := err.(MultiError); ok { return nil, me[0] } return nil, err } return h[0], nil } // PutMulti is a batch version of Put. One PendingKey is returned for each // element of src in the same order. func (t *Transaction) PutMulti(keys []*Key, src interface{}) ([]*PendingKey, error) { if t.id == nil { return nil, errExpiredTransaction } mutations, err := putMutations(keys, src) if err != nil { return nil, err } origin := len(t.mutations) t.mutations = append(t.mutations, mutations...) // Prepare the returned handles, pre-populating where possible. ret := make([]*PendingKey, len(keys)) for i, key := range keys { p := &PendingKey{} if key.Incomplete() { // This key will be in the final commit result. t.pending[origin+i] = p } else { p.key = key } ret[i] = p } return ret, nil } // Delete is the transaction-specific version of the package function Delete. // Delete enqueues the deletion of the entity for the given key, to be // committed atomically upon calling Commit. func (t *Transaction) Delete(key *Key) error { err := t.DeleteMulti([]*Key{key}) if me, ok := err.(MultiError); ok { return me[0] } return err } // DeleteMulti is a batch version of Delete. func (t *Transaction) DeleteMulti(keys []*Key) error { if t.id == nil { return errExpiredTransaction } mutations, err := deleteMutations(keys) if err != nil { return err } t.mutations = append(t.mutations, mutations...) return nil } // Commit represents the result of a committed transaction. type Commit struct{} // Key resolves a pending key handle into a final key. func (c *Commit) Key(p *PendingKey) *Key { if c != p.commit { panic("PendingKey was not created by corresponding transaction") } return p.key } // PendingKey represents the key for newly-inserted entity. It can be // resolved into a Key by calling the Key method of Commit. type PendingKey struct { key *Key commit *Commit } golang-google-cloud-0.9.0/debugger/000077500000000000000000000000001312234511600171275ustar00rootroot00000000000000golang-google-cloud-0.9.0/debugger/apiv2/000077500000000000000000000000001312234511600201505ustar00rootroot00000000000000golang-google-cloud-0.9.0/debugger/apiv2/controller2_client.go000066400000000000000000000212211312234511600243000ustar00rootroot00000000000000// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // AUTO-GENERATED CODE. DO NOT EDIT. package debugger import ( "time" "cloud.google.com/go/internal/version" gax "github.com/googleapis/gax-go" "golang.org/x/net/context" "google.golang.org/api/option" "google.golang.org/api/transport" clouddebuggerpb "google.golang.org/genproto/googleapis/devtools/clouddebugger/v2" "google.golang.org/grpc" "google.golang.org/grpc/codes" ) // Controller2CallOptions contains the retry settings for each method of Controller2Client. type Controller2CallOptions struct { RegisterDebuggee []gax.CallOption ListActiveBreakpoints []gax.CallOption UpdateActiveBreakpoint []gax.CallOption } func defaultController2ClientOptions() []option.ClientOption { return []option.ClientOption{ option.WithEndpoint("clouddebugger.googleapis.com:443"), option.WithScopes(DefaultAuthScopes()...), } } func defaultController2CallOptions() *Controller2CallOptions { retry := map[[2]string][]gax.CallOption{ {"default", "idempotent"}: { gax.WithRetry(func() gax.Retryer { return gax.OnCodes([]codes.Code{ codes.DeadlineExceeded, codes.Unavailable, }, gax.Backoff{ Initial: 100 * time.Millisecond, Max: 60000 * time.Millisecond, Multiplier: 1.3, }) }), }, } return &Controller2CallOptions{ RegisterDebuggee: retry[[2]string{"default", "non_idempotent"}], ListActiveBreakpoints: retry[[2]string{"default", "idempotent"}], UpdateActiveBreakpoint: retry[[2]string{"default", "idempotent"}], } } // Controller2Client is a client for interacting with Stackdriver Debugger API. type Controller2Client struct { // The connection to the service. conn *grpc.ClientConn // The gRPC API client. controller2Client clouddebuggerpb.Controller2Client // The call options for this service. CallOptions *Controller2CallOptions // The metadata to be sent with each request. xGoogHeader []string } // NewController2Client creates a new controller2 client. // // The Controller service provides the API for orchestrating a collection of // debugger agents to perform debugging tasks. These agents are each attached // to a process of an application which may include one or more replicas. // // The debugger agents register with the Controller to identify the application // being debugged, the Debuggee. All agents that register with the same data, // represent the same Debuggee, and are assigned the same `debuggee_id`. // // The debugger agents call the Controller to retrieve the list of active // Breakpoints. Agents with the same `debuggee_id` get the same breakpoints // list. An agent that can fulfill the breakpoint request updates the // Controller with the breakpoint result. The controller selects the first // result received and discards the rest of the results. // Agents that poll again for active breakpoints will no longer have // the completed breakpoint in the list and should remove that breakpoint from // their attached process. // // The Controller service does not provide a way to retrieve the results of // a completed breakpoint. This functionality is available using the Debugger // service. func NewController2Client(ctx context.Context, opts ...option.ClientOption) (*Controller2Client, error) { conn, err := transport.DialGRPC(ctx, append(defaultController2ClientOptions(), opts...)...) if err != nil { return nil, err } c := &Controller2Client{ conn: conn, CallOptions: defaultController2CallOptions(), controller2Client: clouddebuggerpb.NewController2Client(conn), } c.SetGoogleClientInfo() return c, nil } // Connection returns the client's connection to the API service. func (c *Controller2Client) Connection() *grpc.ClientConn { return c.conn } // Close closes the connection to the API service. The user should invoke this when // the client is no longer required. func (c *Controller2Client) Close() error { return c.conn.Close() } // SetGoogleClientInfo sets the name and version of the application in // the `x-goog-api-client` header passed on each request. Intended for // use by Google-written clients. func (c *Controller2Client) SetGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", version.Go()}, keyval...) kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) c.xGoogHeader = []string{gax.XGoogHeader(kv...)} } // RegisterDebuggee registers the debuggee with the controller service. // // All agents attached to the same application should call this method with // the same request content to get back the same stable `debuggee_id`. Agents // should call this method again whenever `google.rpc.Code.NOT_FOUND` is // returned from any controller method. // // This allows the controller service to disable the agent or recover from any // data loss. If the debuggee is disabled by the server, the response will // have `is_disabled` set to `true`. func (c *Controller2Client) RegisterDebuggee(ctx context.Context, req *clouddebuggerpb.RegisterDebuggeeRequest, opts ...gax.CallOption) (*clouddebuggerpb.RegisterDebuggeeResponse, error) { ctx = insertXGoog(ctx, c.xGoogHeader) opts = append(c.CallOptions.RegisterDebuggee[0:len(c.CallOptions.RegisterDebuggee):len(c.CallOptions.RegisterDebuggee)], opts...) var resp *clouddebuggerpb.RegisterDebuggeeResponse err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error resp, err = c.controller2Client.RegisterDebuggee(ctx, req, settings.GRPC...) return err }, opts...) if err != nil { return nil, err } return resp, nil } // ListActiveBreakpoints returns the list of all active breakpoints for the debuggee. // // The breakpoint specification (location, condition, and expression // fields) is semantically immutable, although the field values may // change. For example, an agent may update the location line number // to reflect the actual line where the breakpoint was set, but this // doesn't change the breakpoint semantics. // // This means that an agent does not need to check if a breakpoint has changed // when it encounters the same breakpoint on a successive call. // Moreover, an agent should remember the breakpoints that are completed // until the controller removes them from the active list to avoid // setting those breakpoints again. func (c *Controller2Client) ListActiveBreakpoints(ctx context.Context, req *clouddebuggerpb.ListActiveBreakpointsRequest, opts ...gax.CallOption) (*clouddebuggerpb.ListActiveBreakpointsResponse, error) { ctx = insertXGoog(ctx, c.xGoogHeader) opts = append(c.CallOptions.ListActiveBreakpoints[0:len(c.CallOptions.ListActiveBreakpoints):len(c.CallOptions.ListActiveBreakpoints)], opts...) var resp *clouddebuggerpb.ListActiveBreakpointsResponse err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error resp, err = c.controller2Client.ListActiveBreakpoints(ctx, req, settings.GRPC...) return err }, opts...) if err != nil { return nil, err } return resp, nil } // UpdateActiveBreakpoint updates the breakpoint state or mutable fields. // The entire Breakpoint message must be sent back to the controller // service. // // Updates to active breakpoint fields are only allowed if the new value // does not change the breakpoint specification. Updates to the `location`, // `condition` and `expression` fields should not alter the breakpoint // semantics. These may only make changes such as canonicalizing a value // or snapping the location to the correct line of code. func (c *Controller2Client) UpdateActiveBreakpoint(ctx context.Context, req *clouddebuggerpb.UpdateActiveBreakpointRequest, opts ...gax.CallOption) (*clouddebuggerpb.UpdateActiveBreakpointResponse, error) { ctx = insertXGoog(ctx, c.xGoogHeader) opts = append(c.CallOptions.UpdateActiveBreakpoint[0:len(c.CallOptions.UpdateActiveBreakpoint):len(c.CallOptions.UpdateActiveBreakpoint)], opts...) var resp *clouddebuggerpb.UpdateActiveBreakpointResponse err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error resp, err = c.controller2Client.UpdateActiveBreakpoint(ctx, req, settings.GRPC...) return err }, opts...) if err != nil { return nil, err } return resp, nil } golang-google-cloud-0.9.0/debugger/apiv2/controller2_client_example_test.go000066400000000000000000000042361312234511600270610ustar00rootroot00000000000000// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // AUTO-GENERATED CODE. DO NOT EDIT. package debugger_test import ( "cloud.google.com/go/debugger/apiv2" "golang.org/x/net/context" clouddebuggerpb "google.golang.org/genproto/googleapis/devtools/clouddebugger/v2" ) func ExampleNewController2Client() { ctx := context.Background() c, err := debugger.NewController2Client(ctx) if err != nil { // TODO: Handle error. } // TODO: Use client. _ = c } func ExampleController2Client_RegisterDebuggee() { ctx := context.Background() c, err := debugger.NewController2Client(ctx) if err != nil { // TODO: Handle error. } req := &clouddebuggerpb.RegisterDebuggeeRequest{ // TODO: Fill request struct fields. } resp, err := c.RegisterDebuggee(ctx, req) if err != nil { // TODO: Handle error. } // TODO: Use resp. _ = resp } func ExampleController2Client_ListActiveBreakpoints() { ctx := context.Background() c, err := debugger.NewController2Client(ctx) if err != nil { // TODO: Handle error. } req := &clouddebuggerpb.ListActiveBreakpointsRequest{ // TODO: Fill request struct fields. } resp, err := c.ListActiveBreakpoints(ctx, req) if err != nil { // TODO: Handle error. } // TODO: Use resp. _ = resp } func ExampleController2Client_UpdateActiveBreakpoint() { ctx := context.Background() c, err := debugger.NewController2Client(ctx) if err != nil { // TODO: Handle error. } req := &clouddebuggerpb.UpdateActiveBreakpointRequest{ // TODO: Fill request struct fields. } resp, err := c.UpdateActiveBreakpoint(ctx, req) if err != nil { // TODO: Handle error. } // TODO: Use resp. _ = resp } golang-google-cloud-0.9.0/debugger/apiv2/debugger2_client.go000066400000000000000000000176031312234511600237120ustar00rootroot00000000000000// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // AUTO-GENERATED CODE. DO NOT EDIT. package debugger import ( "time" "cloud.google.com/go/internal/version" gax "github.com/googleapis/gax-go" "golang.org/x/net/context" "google.golang.org/api/option" "google.golang.org/api/transport" clouddebuggerpb "google.golang.org/genproto/googleapis/devtools/clouddebugger/v2" "google.golang.org/grpc" "google.golang.org/grpc/codes" ) // Debugger2CallOptions contains the retry settings for each method of Debugger2Client. type Debugger2CallOptions struct { SetBreakpoint []gax.CallOption GetBreakpoint []gax.CallOption DeleteBreakpoint []gax.CallOption ListBreakpoints []gax.CallOption ListDebuggees []gax.CallOption } func defaultDebugger2ClientOptions() []option.ClientOption { return []option.ClientOption{ option.WithEndpoint("clouddebugger.googleapis.com:443"), option.WithScopes(DefaultAuthScopes()...), } } func defaultDebugger2CallOptions() *Debugger2CallOptions { retry := map[[2]string][]gax.CallOption{ {"default", "idempotent"}: { gax.WithRetry(func() gax.Retryer { return gax.OnCodes([]codes.Code{ codes.DeadlineExceeded, codes.Unavailable, }, gax.Backoff{ Initial: 100 * time.Millisecond, Max: 60000 * time.Millisecond, Multiplier: 1.3, }) }), }, } return &Debugger2CallOptions{ SetBreakpoint: retry[[2]string{"default", "non_idempotent"}], GetBreakpoint: retry[[2]string{"default", "idempotent"}], DeleteBreakpoint: retry[[2]string{"default", "idempotent"}], ListBreakpoints: retry[[2]string{"default", "idempotent"}], ListDebuggees: retry[[2]string{"default", "idempotent"}], } } // Debugger2Client is a client for interacting with Stackdriver Debugger API. type Debugger2Client struct { // The connection to the service. conn *grpc.ClientConn // The gRPC API client. debugger2Client clouddebuggerpb.Debugger2Client // The call options for this service. CallOptions *Debugger2CallOptions // The metadata to be sent with each request. xGoogHeader []string } // NewDebugger2Client creates a new debugger2 client. // // The Debugger service provides the API that allows users to collect run-time // information from a running application, without stopping or slowing it down // and without modifying its state. An application may include one or // more replicated processes performing the same work. // // The application is represented using the Debuggee concept. The Debugger // service provides a way to query for available Debuggees, but does not // provide a way to create one. A debuggee is created using the Controller // service, usually by running a debugger agent with the application. // // The Debugger service enables the client to set one or more Breakpoints on a // Debuggee and collect the results of the set Breakpoints. func NewDebugger2Client(ctx context.Context, opts ...option.ClientOption) (*Debugger2Client, error) { conn, err := transport.DialGRPC(ctx, append(defaultDebugger2ClientOptions(), opts...)...) if err != nil { return nil, err } c := &Debugger2Client{ conn: conn, CallOptions: defaultDebugger2CallOptions(), debugger2Client: clouddebuggerpb.NewDebugger2Client(conn), } c.SetGoogleClientInfo() return c, nil } // Connection returns the client's connection to the API service. func (c *Debugger2Client) Connection() *grpc.ClientConn { return c.conn } // Close closes the connection to the API service. The user should invoke this when // the client is no longer required. func (c *Debugger2Client) Close() error { return c.conn.Close() } // SetGoogleClientInfo sets the name and version of the application in // the `x-goog-api-client` header passed on each request. Intended for // use by Google-written clients. func (c *Debugger2Client) SetGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", version.Go()}, keyval...) kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) c.xGoogHeader = []string{gax.XGoogHeader(kv...)} } // SetBreakpoint sets the breakpoint to the debuggee. func (c *Debugger2Client) SetBreakpoint(ctx context.Context, req *clouddebuggerpb.SetBreakpointRequest, opts ...gax.CallOption) (*clouddebuggerpb.SetBreakpointResponse, error) { ctx = insertXGoog(ctx, c.xGoogHeader) opts = append(c.CallOptions.SetBreakpoint[0:len(c.CallOptions.SetBreakpoint):len(c.CallOptions.SetBreakpoint)], opts...) var resp *clouddebuggerpb.SetBreakpointResponse err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error resp, err = c.debugger2Client.SetBreakpoint(ctx, req, settings.GRPC...) return err }, opts...) if err != nil { return nil, err } return resp, nil } // GetBreakpoint gets breakpoint information. func (c *Debugger2Client) GetBreakpoint(ctx context.Context, req *clouddebuggerpb.GetBreakpointRequest, opts ...gax.CallOption) (*clouddebuggerpb.GetBreakpointResponse, error) { ctx = insertXGoog(ctx, c.xGoogHeader) opts = append(c.CallOptions.GetBreakpoint[0:len(c.CallOptions.GetBreakpoint):len(c.CallOptions.GetBreakpoint)], opts...) var resp *clouddebuggerpb.GetBreakpointResponse err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error resp, err = c.debugger2Client.GetBreakpoint(ctx, req, settings.GRPC...) return err }, opts...) if err != nil { return nil, err } return resp, nil } // DeleteBreakpoint deletes the breakpoint from the debuggee. func (c *Debugger2Client) DeleteBreakpoint(ctx context.Context, req *clouddebuggerpb.DeleteBreakpointRequest, opts ...gax.CallOption) error { ctx = insertXGoog(ctx, c.xGoogHeader) opts = append(c.CallOptions.DeleteBreakpoint[0:len(c.CallOptions.DeleteBreakpoint):len(c.CallOptions.DeleteBreakpoint)], opts...) err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error _, err = c.debugger2Client.DeleteBreakpoint(ctx, req, settings.GRPC...) return err }, opts...) return err } // ListBreakpoints lists all breakpoints for the debuggee. func (c *Debugger2Client) ListBreakpoints(ctx context.Context, req *clouddebuggerpb.ListBreakpointsRequest, opts ...gax.CallOption) (*clouddebuggerpb.ListBreakpointsResponse, error) { ctx = insertXGoog(ctx, c.xGoogHeader) opts = append(c.CallOptions.ListBreakpoints[0:len(c.CallOptions.ListBreakpoints):len(c.CallOptions.ListBreakpoints)], opts...) var resp *clouddebuggerpb.ListBreakpointsResponse err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error resp, err = c.debugger2Client.ListBreakpoints(ctx, req, settings.GRPC...) return err }, opts...) if err != nil { return nil, err } return resp, nil } // ListDebuggees lists all the debuggees that the user can set breakpoints to. func (c *Debugger2Client) ListDebuggees(ctx context.Context, req *clouddebuggerpb.ListDebuggeesRequest, opts ...gax.CallOption) (*clouddebuggerpb.ListDebuggeesResponse, error) { ctx = insertXGoog(ctx, c.xGoogHeader) opts = append(c.CallOptions.ListDebuggees[0:len(c.CallOptions.ListDebuggees):len(c.CallOptions.ListDebuggees)], opts...) var resp *clouddebuggerpb.ListDebuggeesResponse err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error resp, err = c.debugger2Client.ListDebuggees(ctx, req, settings.GRPC...) return err }, opts...) if err != nil { return nil, err } return resp, nil } golang-google-cloud-0.9.0/debugger/apiv2/debugger2_client_example_test.go000066400000000000000000000054421312234511600264620ustar00rootroot00000000000000// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // AUTO-GENERATED CODE. DO NOT EDIT. package debugger_test import ( "cloud.google.com/go/debugger/apiv2" "golang.org/x/net/context" clouddebuggerpb "google.golang.org/genproto/googleapis/devtools/clouddebugger/v2" ) func ExampleNewDebugger2Client() { ctx := context.Background() c, err := debugger.NewDebugger2Client(ctx) if err != nil { // TODO: Handle error. } // TODO: Use client. _ = c } func ExampleDebugger2Client_SetBreakpoint() { ctx := context.Background() c, err := debugger.NewDebugger2Client(ctx) if err != nil { // TODO: Handle error. } req := &clouddebuggerpb.SetBreakpointRequest{ // TODO: Fill request struct fields. } resp, err := c.SetBreakpoint(ctx, req) if err != nil { // TODO: Handle error. } // TODO: Use resp. _ = resp } func ExampleDebugger2Client_GetBreakpoint() { ctx := context.Background() c, err := debugger.NewDebugger2Client(ctx) if err != nil { // TODO: Handle error. } req := &clouddebuggerpb.GetBreakpointRequest{ // TODO: Fill request struct fields. } resp, err := c.GetBreakpoint(ctx, req) if err != nil { // TODO: Handle error. } // TODO: Use resp. _ = resp } func ExampleDebugger2Client_DeleteBreakpoint() { ctx := context.Background() c, err := debugger.NewDebugger2Client(ctx) if err != nil { // TODO: Handle error. } req := &clouddebuggerpb.DeleteBreakpointRequest{ // TODO: Fill request struct fields. } err = c.DeleteBreakpoint(ctx, req) if err != nil { // TODO: Handle error. } } func ExampleDebugger2Client_ListBreakpoints() { ctx := context.Background() c, err := debugger.NewDebugger2Client(ctx) if err != nil { // TODO: Handle error. } req := &clouddebuggerpb.ListBreakpointsRequest{ // TODO: Fill request struct fields. } resp, err := c.ListBreakpoints(ctx, req) if err != nil { // TODO: Handle error. } // TODO: Use resp. _ = resp } func ExampleDebugger2Client_ListDebuggees() { ctx := context.Background() c, err := debugger.NewDebugger2Client(ctx) if err != nil { // TODO: Handle error. } req := &clouddebuggerpb.ListDebuggeesRequest{ // TODO: Fill request struct fields. } resp, err := c.ListDebuggees(ctx, req) if err != nil { // TODO: Handle error. } // TODO: Use resp. _ = resp } golang-google-cloud-0.9.0/debugger/apiv2/doc.go000066400000000000000000000030121312234511600212400ustar00rootroot00000000000000// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // AUTO-GENERATED CODE. DO NOT EDIT. // Package debugger is an experimental, auto-generated package for the // Stackdriver Debugger API. // // Examines the call stack and variables of a running application // without stopping or slowing it down. // // Use the client at cloud.google.com/go/cmd/go-cloud-debug-agent in preference to this. package debugger // import "cloud.google.com/go/debugger/apiv2" import ( "golang.org/x/net/context" "google.golang.org/grpc/metadata" ) func insertXGoog(ctx context.Context, val []string) context.Context { md, _ := metadata.FromOutgoingContext(ctx) md = md.Copy() md["x-goog-api-client"] = val return metadata.NewOutgoingContext(ctx, md) } // DefaultAuthScopes reports the authentication scopes required // by this package. func DefaultAuthScopes() []string { return []string{ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/cloud_debugger", } } golang-google-cloud-0.9.0/debugger/apiv2/mock_test.go000066400000000000000000000503171312234511600224750ustar00rootroot00000000000000// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // AUTO-GENERATED CODE. DO NOT EDIT. package debugger import ( emptypb "github.com/golang/protobuf/ptypes/empty" clouddebuggerpb "google.golang.org/genproto/googleapis/devtools/clouddebugger/v2" ) import ( "flag" "fmt" "io" "log" "net" "os" "strings" "testing" "github.com/golang/protobuf/proto" "github.com/golang/protobuf/ptypes" "golang.org/x/net/context" "google.golang.org/api/option" status "google.golang.org/genproto/googleapis/rpc/status" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/metadata" gstatus "google.golang.org/grpc/status" ) var _ = io.EOF var _ = ptypes.MarshalAny var _ status.Status type mockDebugger2Server struct { // Embed for forward compatibility. // Tests will keep working if more methods are added // in the future. clouddebuggerpb.Debugger2Server reqs []proto.Message // If set, all calls return this error. err error // responses to return if err == nil resps []proto.Message } func (s *mockDebugger2Server) SetBreakpoint(ctx context.Context, req *clouddebuggerpb.SetBreakpointRequest) (*clouddebuggerpb.SetBreakpointResponse, error) { md, _ := metadata.FromIncomingContext(ctx) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } s.reqs = append(s.reqs, req) if s.err != nil { return nil, s.err } return s.resps[0].(*clouddebuggerpb.SetBreakpointResponse), nil } func (s *mockDebugger2Server) GetBreakpoint(ctx context.Context, req *clouddebuggerpb.GetBreakpointRequest) (*clouddebuggerpb.GetBreakpointResponse, error) { md, _ := metadata.FromIncomingContext(ctx) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } s.reqs = append(s.reqs, req) if s.err != nil { return nil, s.err } return s.resps[0].(*clouddebuggerpb.GetBreakpointResponse), nil } func (s *mockDebugger2Server) DeleteBreakpoint(ctx context.Context, req *clouddebuggerpb.DeleteBreakpointRequest) (*emptypb.Empty, error) { md, _ := metadata.FromIncomingContext(ctx) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } s.reqs = append(s.reqs, req) if s.err != nil { return nil, s.err } return s.resps[0].(*emptypb.Empty), nil } func (s *mockDebugger2Server) ListBreakpoints(ctx context.Context, req *clouddebuggerpb.ListBreakpointsRequest) (*clouddebuggerpb.ListBreakpointsResponse, error) { md, _ := metadata.FromIncomingContext(ctx) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } s.reqs = append(s.reqs, req) if s.err != nil { return nil, s.err } return s.resps[0].(*clouddebuggerpb.ListBreakpointsResponse), nil } func (s *mockDebugger2Server) ListDebuggees(ctx context.Context, req *clouddebuggerpb.ListDebuggeesRequest) (*clouddebuggerpb.ListDebuggeesResponse, error) { md, _ := metadata.FromIncomingContext(ctx) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } s.reqs = append(s.reqs, req) if s.err != nil { return nil, s.err } return s.resps[0].(*clouddebuggerpb.ListDebuggeesResponse), nil } type mockController2Server struct { // Embed for forward compatibility. // Tests will keep working if more methods are added // in the future. clouddebuggerpb.Controller2Server reqs []proto.Message // If set, all calls return this error. err error // responses to return if err == nil resps []proto.Message } func (s *mockController2Server) RegisterDebuggee(ctx context.Context, req *clouddebuggerpb.RegisterDebuggeeRequest) (*clouddebuggerpb.RegisterDebuggeeResponse, error) { md, _ := metadata.FromIncomingContext(ctx) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } s.reqs = append(s.reqs, req) if s.err != nil { return nil, s.err } return s.resps[0].(*clouddebuggerpb.RegisterDebuggeeResponse), nil } func (s *mockController2Server) ListActiveBreakpoints(ctx context.Context, req *clouddebuggerpb.ListActiveBreakpointsRequest) (*clouddebuggerpb.ListActiveBreakpointsResponse, error) { md, _ := metadata.FromIncomingContext(ctx) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } s.reqs = append(s.reqs, req) if s.err != nil { return nil, s.err } return s.resps[0].(*clouddebuggerpb.ListActiveBreakpointsResponse), nil } func (s *mockController2Server) UpdateActiveBreakpoint(ctx context.Context, req *clouddebuggerpb.UpdateActiveBreakpointRequest) (*clouddebuggerpb.UpdateActiveBreakpointResponse, error) { md, _ := metadata.FromIncomingContext(ctx) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } s.reqs = append(s.reqs, req) if s.err != nil { return nil, s.err } return s.resps[0].(*clouddebuggerpb.UpdateActiveBreakpointResponse), nil } // clientOpt is the option tests should use to connect to the test server. // It is initialized by TestMain. var clientOpt option.ClientOption var ( mockDebugger2 mockDebugger2Server mockController2 mockController2Server ) func TestMain(m *testing.M) { flag.Parse() serv := grpc.NewServer() clouddebuggerpb.RegisterDebugger2Server(serv, &mockDebugger2) clouddebuggerpb.RegisterController2Server(serv, &mockController2) lis, err := net.Listen("tcp", "localhost:0") if err != nil { log.Fatal(err) } go serv.Serve(lis) conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) if err != nil { log.Fatal(err) } clientOpt = option.WithGRPCConn(conn) os.Exit(m.Run()) } func TestDebugger2SetBreakpoint(t *testing.T) { var expectedResponse *clouddebuggerpb.SetBreakpointResponse = &clouddebuggerpb.SetBreakpointResponse{} mockDebugger2.err = nil mockDebugger2.reqs = nil mockDebugger2.resps = append(mockDebugger2.resps[:0], expectedResponse) var debuggeeId string = "debuggeeId-997255898" var breakpoint *clouddebuggerpb.Breakpoint = &clouddebuggerpb.Breakpoint{} var clientVersion string = "clientVersion-1506231196" var request = &clouddebuggerpb.SetBreakpointRequest{ DebuggeeId: debuggeeId, Breakpoint: breakpoint, ClientVersion: clientVersion, } c, err := NewDebugger2Client(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.SetBreakpoint(context.Background(), request) if err != nil { t.Fatal(err) } if want, got := request, mockDebugger2.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } if want, got := expectedResponse, resp; !proto.Equal(want, got) { t.Errorf("wrong response %q, want %q)", got, want) } } func TestDebugger2SetBreakpointError(t *testing.T) { errCode := codes.PermissionDenied mockDebugger2.err = gstatus.Error(errCode, "test error") var debuggeeId string = "debuggeeId-997255898" var breakpoint *clouddebuggerpb.Breakpoint = &clouddebuggerpb.Breakpoint{} var clientVersion string = "clientVersion-1506231196" var request = &clouddebuggerpb.SetBreakpointRequest{ DebuggeeId: debuggeeId, Breakpoint: breakpoint, ClientVersion: clientVersion, } c, err := NewDebugger2Client(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.SetBreakpoint(context.Background(), request) if st, ok := gstatus.FromError(err); !ok { t.Errorf("got error %v, expected grpc error", err) } else if c := st.Code(); c != errCode { t.Errorf("got error code %q, want %q", c, errCode) } _ = resp } func TestDebugger2GetBreakpoint(t *testing.T) { var expectedResponse *clouddebuggerpb.GetBreakpointResponse = &clouddebuggerpb.GetBreakpointResponse{} mockDebugger2.err = nil mockDebugger2.reqs = nil mockDebugger2.resps = append(mockDebugger2.resps[:0], expectedResponse) var debuggeeId string = "debuggeeId-997255898" var breakpointId string = "breakpointId498424873" var clientVersion string = "clientVersion-1506231196" var request = &clouddebuggerpb.GetBreakpointRequest{ DebuggeeId: debuggeeId, BreakpointId: breakpointId, ClientVersion: clientVersion, } c, err := NewDebugger2Client(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.GetBreakpoint(context.Background(), request) if err != nil { t.Fatal(err) } if want, got := request, mockDebugger2.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } if want, got := expectedResponse, resp; !proto.Equal(want, got) { t.Errorf("wrong response %q, want %q)", got, want) } } func TestDebugger2GetBreakpointError(t *testing.T) { errCode := codes.PermissionDenied mockDebugger2.err = gstatus.Error(errCode, "test error") var debuggeeId string = "debuggeeId-997255898" var breakpointId string = "breakpointId498424873" var clientVersion string = "clientVersion-1506231196" var request = &clouddebuggerpb.GetBreakpointRequest{ DebuggeeId: debuggeeId, BreakpointId: breakpointId, ClientVersion: clientVersion, } c, err := NewDebugger2Client(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.GetBreakpoint(context.Background(), request) if st, ok := gstatus.FromError(err); !ok { t.Errorf("got error %v, expected grpc error", err) } else if c := st.Code(); c != errCode { t.Errorf("got error code %q, want %q", c, errCode) } _ = resp } func TestDebugger2DeleteBreakpoint(t *testing.T) { var expectedResponse *emptypb.Empty = &emptypb.Empty{} mockDebugger2.err = nil mockDebugger2.reqs = nil mockDebugger2.resps = append(mockDebugger2.resps[:0], expectedResponse) var debuggeeId string = "debuggeeId-997255898" var breakpointId string = "breakpointId498424873" var clientVersion string = "clientVersion-1506231196" var request = &clouddebuggerpb.DeleteBreakpointRequest{ DebuggeeId: debuggeeId, BreakpointId: breakpointId, ClientVersion: clientVersion, } c, err := NewDebugger2Client(context.Background(), clientOpt) if err != nil { t.Fatal(err) } err = c.DeleteBreakpoint(context.Background(), request) if err != nil { t.Fatal(err) } if want, got := request, mockDebugger2.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } } func TestDebugger2DeleteBreakpointError(t *testing.T) { errCode := codes.PermissionDenied mockDebugger2.err = gstatus.Error(errCode, "test error") var debuggeeId string = "debuggeeId-997255898" var breakpointId string = "breakpointId498424873" var clientVersion string = "clientVersion-1506231196" var request = &clouddebuggerpb.DeleteBreakpointRequest{ DebuggeeId: debuggeeId, BreakpointId: breakpointId, ClientVersion: clientVersion, } c, err := NewDebugger2Client(context.Background(), clientOpt) if err != nil { t.Fatal(err) } err = c.DeleteBreakpoint(context.Background(), request) if st, ok := gstatus.FromError(err); !ok { t.Errorf("got error %v, expected grpc error", err) } else if c := st.Code(); c != errCode { t.Errorf("got error code %q, want %q", c, errCode) } } func TestDebugger2ListBreakpoints(t *testing.T) { var nextWaitToken string = "nextWaitToken1006864251" var expectedResponse = &clouddebuggerpb.ListBreakpointsResponse{ NextWaitToken: nextWaitToken, } mockDebugger2.err = nil mockDebugger2.reqs = nil mockDebugger2.resps = append(mockDebugger2.resps[:0], expectedResponse) var debuggeeId string = "debuggeeId-997255898" var clientVersion string = "clientVersion-1506231196" var request = &clouddebuggerpb.ListBreakpointsRequest{ DebuggeeId: debuggeeId, ClientVersion: clientVersion, } c, err := NewDebugger2Client(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.ListBreakpoints(context.Background(), request) if err != nil { t.Fatal(err) } if want, got := request, mockDebugger2.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } if want, got := expectedResponse, resp; !proto.Equal(want, got) { t.Errorf("wrong response %q, want %q)", got, want) } } func TestDebugger2ListBreakpointsError(t *testing.T) { errCode := codes.PermissionDenied mockDebugger2.err = gstatus.Error(errCode, "test error") var debuggeeId string = "debuggeeId-997255898" var clientVersion string = "clientVersion-1506231196" var request = &clouddebuggerpb.ListBreakpointsRequest{ DebuggeeId: debuggeeId, ClientVersion: clientVersion, } c, err := NewDebugger2Client(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.ListBreakpoints(context.Background(), request) if st, ok := gstatus.FromError(err); !ok { t.Errorf("got error %v, expected grpc error", err) } else if c := st.Code(); c != errCode { t.Errorf("got error code %q, want %q", c, errCode) } _ = resp } func TestDebugger2ListDebuggees(t *testing.T) { var expectedResponse *clouddebuggerpb.ListDebuggeesResponse = &clouddebuggerpb.ListDebuggeesResponse{} mockDebugger2.err = nil mockDebugger2.reqs = nil mockDebugger2.resps = append(mockDebugger2.resps[:0], expectedResponse) var project string = "project-309310695" var clientVersion string = "clientVersion-1506231196" var request = &clouddebuggerpb.ListDebuggeesRequest{ Project: project, ClientVersion: clientVersion, } c, err := NewDebugger2Client(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.ListDebuggees(context.Background(), request) if err != nil { t.Fatal(err) } if want, got := request, mockDebugger2.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } if want, got := expectedResponse, resp; !proto.Equal(want, got) { t.Errorf("wrong response %q, want %q)", got, want) } } func TestDebugger2ListDebuggeesError(t *testing.T) { errCode := codes.PermissionDenied mockDebugger2.err = gstatus.Error(errCode, "test error") var project string = "project-309310695" var clientVersion string = "clientVersion-1506231196" var request = &clouddebuggerpb.ListDebuggeesRequest{ Project: project, ClientVersion: clientVersion, } c, err := NewDebugger2Client(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.ListDebuggees(context.Background(), request) if st, ok := gstatus.FromError(err); !ok { t.Errorf("got error %v, expected grpc error", err) } else if c := st.Code(); c != errCode { t.Errorf("got error code %q, want %q", c, errCode) } _ = resp } func TestController2RegisterDebuggee(t *testing.T) { var expectedResponse *clouddebuggerpb.RegisterDebuggeeResponse = &clouddebuggerpb.RegisterDebuggeeResponse{} mockController2.err = nil mockController2.reqs = nil mockController2.resps = append(mockController2.resps[:0], expectedResponse) var debuggee *clouddebuggerpb.Debuggee = &clouddebuggerpb.Debuggee{} var request = &clouddebuggerpb.RegisterDebuggeeRequest{ Debuggee: debuggee, } c, err := NewController2Client(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.RegisterDebuggee(context.Background(), request) if err != nil { t.Fatal(err) } if want, got := request, mockController2.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } if want, got := expectedResponse, resp; !proto.Equal(want, got) { t.Errorf("wrong response %q, want %q)", got, want) } } func TestController2RegisterDebuggeeError(t *testing.T) { errCode := codes.PermissionDenied mockController2.err = gstatus.Error(errCode, "test error") var debuggee *clouddebuggerpb.Debuggee = &clouddebuggerpb.Debuggee{} var request = &clouddebuggerpb.RegisterDebuggeeRequest{ Debuggee: debuggee, } c, err := NewController2Client(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.RegisterDebuggee(context.Background(), request) if st, ok := gstatus.FromError(err); !ok { t.Errorf("got error %v, expected grpc error", err) } else if c := st.Code(); c != errCode { t.Errorf("got error code %q, want %q", c, errCode) } _ = resp } func TestController2ListActiveBreakpoints(t *testing.T) { var nextWaitToken string = "nextWaitToken1006864251" var waitExpired bool = false var expectedResponse = &clouddebuggerpb.ListActiveBreakpointsResponse{ NextWaitToken: nextWaitToken, WaitExpired: waitExpired, } mockController2.err = nil mockController2.reqs = nil mockController2.resps = append(mockController2.resps[:0], expectedResponse) var debuggeeId string = "debuggeeId-997255898" var request = &clouddebuggerpb.ListActiveBreakpointsRequest{ DebuggeeId: debuggeeId, } c, err := NewController2Client(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.ListActiveBreakpoints(context.Background(), request) if err != nil { t.Fatal(err) } if want, got := request, mockController2.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } if want, got := expectedResponse, resp; !proto.Equal(want, got) { t.Errorf("wrong response %q, want %q)", got, want) } } func TestController2ListActiveBreakpointsError(t *testing.T) { errCode := codes.PermissionDenied mockController2.err = gstatus.Error(errCode, "test error") var debuggeeId string = "debuggeeId-997255898" var request = &clouddebuggerpb.ListActiveBreakpointsRequest{ DebuggeeId: debuggeeId, } c, err := NewController2Client(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.ListActiveBreakpoints(context.Background(), request) if st, ok := gstatus.FromError(err); !ok { t.Errorf("got error %v, expected grpc error", err) } else if c := st.Code(); c != errCode { t.Errorf("got error code %q, want %q", c, errCode) } _ = resp } func TestController2UpdateActiveBreakpoint(t *testing.T) { var expectedResponse *clouddebuggerpb.UpdateActiveBreakpointResponse = &clouddebuggerpb.UpdateActiveBreakpointResponse{} mockController2.err = nil mockController2.reqs = nil mockController2.resps = append(mockController2.resps[:0], expectedResponse) var debuggeeId string = "debuggeeId-997255898" var breakpoint *clouddebuggerpb.Breakpoint = &clouddebuggerpb.Breakpoint{} var request = &clouddebuggerpb.UpdateActiveBreakpointRequest{ DebuggeeId: debuggeeId, Breakpoint: breakpoint, } c, err := NewController2Client(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.UpdateActiveBreakpoint(context.Background(), request) if err != nil { t.Fatal(err) } if want, got := request, mockController2.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } if want, got := expectedResponse, resp; !proto.Equal(want, got) { t.Errorf("wrong response %q, want %q)", got, want) } } func TestController2UpdateActiveBreakpointError(t *testing.T) { errCode := codes.PermissionDenied mockController2.err = gstatus.Error(errCode, "test error") var debuggeeId string = "debuggeeId-997255898" var breakpoint *clouddebuggerpb.Breakpoint = &clouddebuggerpb.Breakpoint{} var request = &clouddebuggerpb.UpdateActiveBreakpointRequest{ DebuggeeId: debuggeeId, Breakpoint: breakpoint, } c, err := NewController2Client(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.UpdateActiveBreakpoint(context.Background(), request) if st, ok := gstatus.FromError(err); !ok { t.Errorf("got error %v, expected grpc error", err) } else if c := st.Code(); c != errCode { t.Errorf("got error code %q, want %q", c, errCode) } _ = resp } golang-google-cloud-0.9.0/errorreporting/000077500000000000000000000000001312234511600204265ustar00rootroot00000000000000golang-google-cloud-0.9.0/errorreporting/apiv1beta1/000077500000000000000000000000001312234511600223635ustar00rootroot00000000000000golang-google-cloud-0.9.0/errorreporting/apiv1beta1/ReportErrorEvent_smoke_test.go000066400000000000000000000045441312234511600304450ustar00rootroot00000000000000// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // AUTO-GENERATED CODE. DO NOT EDIT. package errorreporting import ( clouderrorreportingpb "google.golang.org/genproto/googleapis/devtools/clouderrorreporting/v1beta1" ) import ( "strconv" "testing" "time" "cloud.google.com/go/internal/testutil" "golang.org/x/net/context" "google.golang.org/api/iterator" "google.golang.org/api/option" ) var _ = iterator.Done var _ = strconv.FormatUint var _ = time.Now func TestReportErrorsServiceSmoke(t *testing.T) { if testing.Short() { t.Skip("skipping smoke test in short mode") } ctx := context.Background() ts := testutil.TokenSource(ctx, DefaultAuthScopes()...) if ts == nil { t.Skip("Integration tests skipped. See CONTRIBUTING.md for details") } projectId := testutil.ProjID() _ = projectId c, err := NewReportErrorsClient(ctx, option.WithTokenSource(ts)) if err != nil { t.Fatal(err) } var formattedProjectName string = ReportErrorsProjectPath(projectId) var message string = "[MESSAGE]" var service string = "[SERVICE]" var serviceContext = &clouderrorreportingpb.ServiceContext{ Service: service, } var filePath string = "path/to/file.lang" var lineNumber int32 = 42 var functionName string = "meaningOfLife" var reportLocation = &clouderrorreportingpb.SourceLocation{ FilePath: filePath, LineNumber: lineNumber, FunctionName: functionName, } var context = &clouderrorreportingpb.ErrorContext{ ReportLocation: reportLocation, } var event = &clouderrorreportingpb.ReportedErrorEvent{ Message: message, ServiceContext: serviceContext, Context: context, } var request = &clouderrorreportingpb.ReportErrorEventRequest{ ProjectName: formattedProjectName, Event: event, } if _, err := c.ReportErrorEvent(ctx, request); err != nil { t.Error(err) } } golang-google-cloud-0.9.0/errorreporting/apiv1beta1/doc.go000066400000000000000000000030161312234511600234570ustar00rootroot00000000000000// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // AUTO-GENERATED CODE. DO NOT EDIT. // Package errorreporting is an experimental, auto-generated package for the // Stackdriver Error Reporting API. // // Stackdriver Error Reporting groups and counts similar errors from cloud // services. The Stackdriver Error Reporting API provides a way to report new // errors and read access to error groups and their associated errors. package errorreporting // import "cloud.google.com/go/errorreporting/apiv1beta1" import ( "golang.org/x/net/context" "google.golang.org/grpc/metadata" ) func insertXGoog(ctx context.Context, val []string) context.Context { md, _ := metadata.FromOutgoingContext(ctx) md = md.Copy() md["x-goog-api-client"] = val return metadata.NewOutgoingContext(ctx, md) } // DefaultAuthScopes reports the authentication scopes required // by this package. func DefaultAuthScopes() []string { return []string{ "https://www.googleapis.com/auth/cloud-platform", } } golang-google-cloud-0.9.0/errorreporting/apiv1beta1/error_group_client.go000066400000000000000000000126201312234511600266160ustar00rootroot00000000000000// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // AUTO-GENERATED CODE. DO NOT EDIT. package errorreporting import ( "time" "cloud.google.com/go/internal/version" gax "github.com/googleapis/gax-go" "golang.org/x/net/context" "google.golang.org/api/option" "google.golang.org/api/transport" clouderrorreportingpb "google.golang.org/genproto/googleapis/devtools/clouderrorreporting/v1beta1" "google.golang.org/grpc" "google.golang.org/grpc/codes" ) var ( errorGroupGroupPathTemplate = gax.MustCompilePathTemplate("projects/{project}/groups/{group}") ) // ErrorGroupCallOptions contains the retry settings for each method of ErrorGroupClient. type ErrorGroupCallOptions struct { GetGroup []gax.CallOption UpdateGroup []gax.CallOption } func defaultErrorGroupClientOptions() []option.ClientOption { return []option.ClientOption{ option.WithEndpoint("clouderrorreporting.googleapis.com:443"), option.WithScopes(DefaultAuthScopes()...), } } func defaultErrorGroupCallOptions() *ErrorGroupCallOptions { retry := map[[2]string][]gax.CallOption{ {"default", "idempotent"}: { gax.WithRetry(func() gax.Retryer { return gax.OnCodes([]codes.Code{ codes.DeadlineExceeded, codes.Unavailable, }, gax.Backoff{ Initial: 100 * time.Millisecond, Max: 60000 * time.Millisecond, Multiplier: 1.3, }) }), }, } return &ErrorGroupCallOptions{ GetGroup: retry[[2]string{"default", "idempotent"}], UpdateGroup: retry[[2]string{"default", "idempotent"}], } } // ErrorGroupClient is a client for interacting with Stackdriver Error Reporting API. type ErrorGroupClient struct { // The connection to the service. conn *grpc.ClientConn // The gRPC API client. errorGroupClient clouderrorreportingpb.ErrorGroupServiceClient // The call options for this service. CallOptions *ErrorGroupCallOptions // The metadata to be sent with each request. xGoogHeader []string } // NewErrorGroupClient creates a new error group service client. // // Service for retrieving and updating individual error groups. func NewErrorGroupClient(ctx context.Context, opts ...option.ClientOption) (*ErrorGroupClient, error) { conn, err := transport.DialGRPC(ctx, append(defaultErrorGroupClientOptions(), opts...)...) if err != nil { return nil, err } c := &ErrorGroupClient{ conn: conn, CallOptions: defaultErrorGroupCallOptions(), errorGroupClient: clouderrorreportingpb.NewErrorGroupServiceClient(conn), } c.SetGoogleClientInfo() return c, nil } // Connection returns the client's connection to the API service. func (c *ErrorGroupClient) Connection() *grpc.ClientConn { return c.conn } // Close closes the connection to the API service. The user should invoke this when // the client is no longer required. func (c *ErrorGroupClient) Close() error { return c.conn.Close() } // SetGoogleClientInfo sets the name and version of the application in // the `x-goog-api-client` header passed on each request. Intended for // use by Google-written clients. func (c *ErrorGroupClient) SetGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", version.Go()}, keyval...) kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) c.xGoogHeader = []string{gax.XGoogHeader(kv...)} } // ErrorGroupGroupPath returns the path for the group resource. func ErrorGroupGroupPath(project, group string) string { path, err := errorGroupGroupPathTemplate.Render(map[string]string{ "project": project, "group": group, }) if err != nil { panic(err) } return path } // GetGroup get the specified group. func (c *ErrorGroupClient) GetGroup(ctx context.Context, req *clouderrorreportingpb.GetGroupRequest, opts ...gax.CallOption) (*clouderrorreportingpb.ErrorGroup, error) { ctx = insertXGoog(ctx, c.xGoogHeader) opts = append(c.CallOptions.GetGroup[0:len(c.CallOptions.GetGroup):len(c.CallOptions.GetGroup)], opts...) var resp *clouderrorreportingpb.ErrorGroup err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error resp, err = c.errorGroupClient.GetGroup(ctx, req, settings.GRPC...) return err }, opts...) if err != nil { return nil, err } return resp, nil } // UpdateGroup replace the data for the specified group. // Fails if the group does not exist. func (c *ErrorGroupClient) UpdateGroup(ctx context.Context, req *clouderrorreportingpb.UpdateGroupRequest, opts ...gax.CallOption) (*clouderrorreportingpb.ErrorGroup, error) { ctx = insertXGoog(ctx, c.xGoogHeader) opts = append(c.CallOptions.UpdateGroup[0:len(c.CallOptions.UpdateGroup):len(c.CallOptions.UpdateGroup)], opts...) var resp *clouderrorreportingpb.ErrorGroup err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error resp, err = c.errorGroupClient.UpdateGroup(ctx, req, settings.GRPC...) return err }, opts...) if err != nil { return nil, err } return resp, nil } golang-google-cloud-0.9.0/errorreporting/apiv1beta1/error_group_client_example_test.go000066400000000000000000000034201312234511600313660ustar00rootroot00000000000000// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // AUTO-GENERATED CODE. DO NOT EDIT. package errorreporting_test import ( "cloud.google.com/go/errorreporting/apiv1beta1" "golang.org/x/net/context" clouderrorreportingpb "google.golang.org/genproto/googleapis/devtools/clouderrorreporting/v1beta1" ) func ExampleNewErrorGroupClient() { ctx := context.Background() c, err := errorreporting.NewErrorGroupClient(ctx) if err != nil { // TODO: Handle error. } // TODO: Use client. _ = c } func ExampleErrorGroupClient_GetGroup() { ctx := context.Background() c, err := errorreporting.NewErrorGroupClient(ctx) if err != nil { // TODO: Handle error. } req := &clouderrorreportingpb.GetGroupRequest{ // TODO: Fill request struct fields. } resp, err := c.GetGroup(ctx, req) if err != nil { // TODO: Handle error. } // TODO: Use resp. _ = resp } func ExampleErrorGroupClient_UpdateGroup() { ctx := context.Background() c, err := errorreporting.NewErrorGroupClient(ctx) if err != nil { // TODO: Handle error. } req := &clouderrorreportingpb.UpdateGroupRequest{ // TODO: Fill request struct fields. } resp, err := c.UpdateGroup(ctx, req) if err != nil { // TODO: Handle error. } // TODO: Use resp. _ = resp } golang-google-cloud-0.9.0/errorreporting/apiv1beta1/error_stats_client.go000066400000000000000000000246261312234511600266310ustar00rootroot00000000000000// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // AUTO-GENERATED CODE. DO NOT EDIT. package errorreporting import ( "math" "time" "cloud.google.com/go/internal/version" gax "github.com/googleapis/gax-go" "golang.org/x/net/context" "google.golang.org/api/iterator" "google.golang.org/api/option" "google.golang.org/api/transport" clouderrorreportingpb "google.golang.org/genproto/googleapis/devtools/clouderrorreporting/v1beta1" "google.golang.org/grpc" "google.golang.org/grpc/codes" ) var ( errorStatsProjectPathTemplate = gax.MustCompilePathTemplate("projects/{project}") ) // ErrorStatsCallOptions contains the retry settings for each method of ErrorStatsClient. type ErrorStatsCallOptions struct { ListGroupStats []gax.CallOption ListEvents []gax.CallOption DeleteEvents []gax.CallOption } func defaultErrorStatsClientOptions() []option.ClientOption { return []option.ClientOption{ option.WithEndpoint("clouderrorreporting.googleapis.com:443"), option.WithScopes(DefaultAuthScopes()...), } } func defaultErrorStatsCallOptions() *ErrorStatsCallOptions { retry := map[[2]string][]gax.CallOption{ {"default", "idempotent"}: { gax.WithRetry(func() gax.Retryer { return gax.OnCodes([]codes.Code{ codes.DeadlineExceeded, codes.Unavailable, }, gax.Backoff{ Initial: 100 * time.Millisecond, Max: 60000 * time.Millisecond, Multiplier: 1.3, }) }), }, } return &ErrorStatsCallOptions{ ListGroupStats: retry[[2]string{"default", "idempotent"}], ListEvents: retry[[2]string{"default", "idempotent"}], DeleteEvents: retry[[2]string{"default", "idempotent"}], } } // ErrorStatsClient is a client for interacting with Stackdriver Error Reporting API. type ErrorStatsClient struct { // The connection to the service. conn *grpc.ClientConn // The gRPC API client. errorStatsClient clouderrorreportingpb.ErrorStatsServiceClient // The call options for this service. CallOptions *ErrorStatsCallOptions // The metadata to be sent with each request. xGoogHeader []string } // NewErrorStatsClient creates a new error stats service client. // // An API for retrieving and managing error statistics as well as data for // individual events. func NewErrorStatsClient(ctx context.Context, opts ...option.ClientOption) (*ErrorStatsClient, error) { conn, err := transport.DialGRPC(ctx, append(defaultErrorStatsClientOptions(), opts...)...) if err != nil { return nil, err } c := &ErrorStatsClient{ conn: conn, CallOptions: defaultErrorStatsCallOptions(), errorStatsClient: clouderrorreportingpb.NewErrorStatsServiceClient(conn), } c.SetGoogleClientInfo() return c, nil } // Connection returns the client's connection to the API service. func (c *ErrorStatsClient) Connection() *grpc.ClientConn { return c.conn } // Close closes the connection to the API service. The user should invoke this when // the client is no longer required. func (c *ErrorStatsClient) Close() error { return c.conn.Close() } // SetGoogleClientInfo sets the name and version of the application in // the `x-goog-api-client` header passed on each request. Intended for // use by Google-written clients. func (c *ErrorStatsClient) SetGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", version.Go()}, keyval...) kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) c.xGoogHeader = []string{gax.XGoogHeader(kv...)} } // ErrorStatsProjectPath returns the path for the project resource. func ErrorStatsProjectPath(project string) string { path, err := errorStatsProjectPathTemplate.Render(map[string]string{ "project": project, }) if err != nil { panic(err) } return path } // ListGroupStats lists the specified groups. func (c *ErrorStatsClient) ListGroupStats(ctx context.Context, req *clouderrorreportingpb.ListGroupStatsRequest, opts ...gax.CallOption) *ErrorGroupStatsIterator { ctx = insertXGoog(ctx, c.xGoogHeader) opts = append(c.CallOptions.ListGroupStats[0:len(c.CallOptions.ListGroupStats):len(c.CallOptions.ListGroupStats)], opts...) it := &ErrorGroupStatsIterator{} it.InternalFetch = func(pageSize int, pageToken string) ([]*clouderrorreportingpb.ErrorGroupStats, string, error) { var resp *clouderrorreportingpb.ListGroupStatsResponse req.PageToken = pageToken if pageSize > math.MaxInt32 { req.PageSize = math.MaxInt32 } else { req.PageSize = int32(pageSize) } err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error resp, err = c.errorStatsClient.ListGroupStats(ctx, req, settings.GRPC...) return err }, opts...) if err != nil { return nil, "", err } return resp.ErrorGroupStats, resp.NextPageToken, nil } fetch := func(pageSize int, pageToken string) (string, error) { items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) if err != nil { return "", err } it.items = append(it.items, items...) return nextPageToken, nil } it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) return it } // ListEvents lists the specified events. func (c *ErrorStatsClient) ListEvents(ctx context.Context, req *clouderrorreportingpb.ListEventsRequest, opts ...gax.CallOption) *ErrorEventIterator { ctx = insertXGoog(ctx, c.xGoogHeader) opts = append(c.CallOptions.ListEvents[0:len(c.CallOptions.ListEvents):len(c.CallOptions.ListEvents)], opts...) it := &ErrorEventIterator{} it.InternalFetch = func(pageSize int, pageToken string) ([]*clouderrorreportingpb.ErrorEvent, string, error) { var resp *clouderrorreportingpb.ListEventsResponse req.PageToken = pageToken if pageSize > math.MaxInt32 { req.PageSize = math.MaxInt32 } else { req.PageSize = int32(pageSize) } err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error resp, err = c.errorStatsClient.ListEvents(ctx, req, settings.GRPC...) return err }, opts...) if err != nil { return nil, "", err } return resp.ErrorEvents, resp.NextPageToken, nil } fetch := func(pageSize int, pageToken string) (string, error) { items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) if err != nil { return "", err } it.items = append(it.items, items...) return nextPageToken, nil } it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) return it } // DeleteEvents deletes all error events of a given project. func (c *ErrorStatsClient) DeleteEvents(ctx context.Context, req *clouderrorreportingpb.DeleteEventsRequest, opts ...gax.CallOption) (*clouderrorreportingpb.DeleteEventsResponse, error) { ctx = insertXGoog(ctx, c.xGoogHeader) opts = append(c.CallOptions.DeleteEvents[0:len(c.CallOptions.DeleteEvents):len(c.CallOptions.DeleteEvents)], opts...) var resp *clouderrorreportingpb.DeleteEventsResponse err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error resp, err = c.errorStatsClient.DeleteEvents(ctx, req, settings.GRPC...) return err }, opts...) if err != nil { return nil, err } return resp, nil } // ErrorEventIterator manages a stream of *clouderrorreportingpb.ErrorEvent. type ErrorEventIterator struct { items []*clouderrorreportingpb.ErrorEvent pageInfo *iterator.PageInfo nextFunc func() error // InternalFetch is for use by the Google Cloud Libraries only. // It is not part of the stable interface of this package. // // InternalFetch returns results from a single call to the underlying RPC. // The number of results is no greater than pageSize. // If there are no more results, nextPageToken is empty and err is nil. InternalFetch func(pageSize int, pageToken string) (results []*clouderrorreportingpb.ErrorEvent, nextPageToken string, err error) } // PageInfo supports pagination. See the google.golang.org/api/iterator package for details. func (it *ErrorEventIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } // Next returns the next result. Its second return value is iterator.Done if there are no more // results. Once Next returns Done, all subsequent calls will return Done. func (it *ErrorEventIterator) Next() (*clouderrorreportingpb.ErrorEvent, error) { var item *clouderrorreportingpb.ErrorEvent if err := it.nextFunc(); err != nil { return item, err } item = it.items[0] it.items = it.items[1:] return item, nil } func (it *ErrorEventIterator) bufLen() int { return len(it.items) } func (it *ErrorEventIterator) takeBuf() interface{} { b := it.items it.items = nil return b } // ErrorGroupStatsIterator manages a stream of *clouderrorreportingpb.ErrorGroupStats. type ErrorGroupStatsIterator struct { items []*clouderrorreportingpb.ErrorGroupStats pageInfo *iterator.PageInfo nextFunc func() error // InternalFetch is for use by the Google Cloud Libraries only. // It is not part of the stable interface of this package. // // InternalFetch returns results from a single call to the underlying RPC. // The number of results is no greater than pageSize. // If there are no more results, nextPageToken is empty and err is nil. InternalFetch func(pageSize int, pageToken string) (results []*clouderrorreportingpb.ErrorGroupStats, nextPageToken string, err error) } // PageInfo supports pagination. See the google.golang.org/api/iterator package for details. func (it *ErrorGroupStatsIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } // Next returns the next result. Its second return value is iterator.Done if there are no more // results. Once Next returns Done, all subsequent calls will return Done. func (it *ErrorGroupStatsIterator) Next() (*clouderrorreportingpb.ErrorGroupStats, error) { var item *clouderrorreportingpb.ErrorGroupStats if err := it.nextFunc(); err != nil { return item, err } item = it.items[0] it.items = it.items[1:] return item, nil } func (it *ErrorGroupStatsIterator) bufLen() int { return len(it.items) } func (it *ErrorGroupStatsIterator) takeBuf() interface{} { b := it.items it.items = nil return b } golang-google-cloud-0.9.0/errorreporting/apiv1beta1/error_stats_client_example_test.go000066400000000000000000000045231312234511600313750ustar00rootroot00000000000000// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // AUTO-GENERATED CODE. DO NOT EDIT. package errorreporting_test import ( "cloud.google.com/go/errorreporting/apiv1beta1" "golang.org/x/net/context" "google.golang.org/api/iterator" clouderrorreportingpb "google.golang.org/genproto/googleapis/devtools/clouderrorreporting/v1beta1" ) func ExampleNewErrorStatsClient() { ctx := context.Background() c, err := errorreporting.NewErrorStatsClient(ctx) if err != nil { // TODO: Handle error. } // TODO: Use client. _ = c } func ExampleErrorStatsClient_ListGroupStats() { ctx := context.Background() c, err := errorreporting.NewErrorStatsClient(ctx) if err != nil { // TODO: Handle error. } req := &clouderrorreportingpb.ListGroupStatsRequest{ // TODO: Fill request struct fields. } it := c.ListGroupStats(ctx, req) for { resp, err := it.Next() if err == iterator.Done { break } if err != nil { // TODO: Handle error. } // TODO: Use resp. _ = resp } } func ExampleErrorStatsClient_ListEvents() { ctx := context.Background() c, err := errorreporting.NewErrorStatsClient(ctx) if err != nil { // TODO: Handle error. } req := &clouderrorreportingpb.ListEventsRequest{ // TODO: Fill request struct fields. } it := c.ListEvents(ctx, req) for { resp, err := it.Next() if err == iterator.Done { break } if err != nil { // TODO: Handle error. } // TODO: Use resp. _ = resp } } func ExampleErrorStatsClient_DeleteEvents() { ctx := context.Background() c, err := errorreporting.NewErrorStatsClient(ctx) if err != nil { // TODO: Handle error. } req := &clouderrorreportingpb.DeleteEventsRequest{ // TODO: Fill request struct fields. } resp, err := c.DeleteEvents(ctx, req) if err != nil { // TODO: Handle error. } // TODO: Use resp. _ = resp } golang-google-cloud-0.9.0/errorreporting/apiv1beta1/mock_test.go000066400000000000000000000421511312234511600247050ustar00rootroot00000000000000// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // AUTO-GENERATED CODE. DO NOT EDIT. package errorreporting import ( clouderrorreportingpb "google.golang.org/genproto/googleapis/devtools/clouderrorreporting/v1beta1" ) import ( "flag" "fmt" "io" "log" "net" "os" "strings" "testing" "github.com/golang/protobuf/proto" "github.com/golang/protobuf/ptypes" "golang.org/x/net/context" "google.golang.org/api/option" status "google.golang.org/genproto/googleapis/rpc/status" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/metadata" gstatus "google.golang.org/grpc/status" ) var _ = io.EOF var _ = ptypes.MarshalAny var _ status.Status type mockErrorGroupServer struct { // Embed for forward compatibility. // Tests will keep working if more methods are added // in the future. clouderrorreportingpb.ErrorGroupServiceServer reqs []proto.Message // If set, all calls return this error. err error // responses to return if err == nil resps []proto.Message } func (s *mockErrorGroupServer) GetGroup(ctx context.Context, req *clouderrorreportingpb.GetGroupRequest) (*clouderrorreportingpb.ErrorGroup, error) { md, _ := metadata.FromIncomingContext(ctx) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } s.reqs = append(s.reqs, req) if s.err != nil { return nil, s.err } return s.resps[0].(*clouderrorreportingpb.ErrorGroup), nil } func (s *mockErrorGroupServer) UpdateGroup(ctx context.Context, req *clouderrorreportingpb.UpdateGroupRequest) (*clouderrorreportingpb.ErrorGroup, error) { md, _ := metadata.FromIncomingContext(ctx) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } s.reqs = append(s.reqs, req) if s.err != nil { return nil, s.err } return s.resps[0].(*clouderrorreportingpb.ErrorGroup), nil } type mockErrorStatsServer struct { // Embed for forward compatibility. // Tests will keep working if more methods are added // in the future. clouderrorreportingpb.ErrorStatsServiceServer reqs []proto.Message // If set, all calls return this error. err error // responses to return if err == nil resps []proto.Message } func (s *mockErrorStatsServer) ListGroupStats(ctx context.Context, req *clouderrorreportingpb.ListGroupStatsRequest) (*clouderrorreportingpb.ListGroupStatsResponse, error) { md, _ := metadata.FromIncomingContext(ctx) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } s.reqs = append(s.reqs, req) if s.err != nil { return nil, s.err } return s.resps[0].(*clouderrorreportingpb.ListGroupStatsResponse), nil } func (s *mockErrorStatsServer) ListEvents(ctx context.Context, req *clouderrorreportingpb.ListEventsRequest) (*clouderrorreportingpb.ListEventsResponse, error) { md, _ := metadata.FromIncomingContext(ctx) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } s.reqs = append(s.reqs, req) if s.err != nil { return nil, s.err } return s.resps[0].(*clouderrorreportingpb.ListEventsResponse), nil } func (s *mockErrorStatsServer) DeleteEvents(ctx context.Context, req *clouderrorreportingpb.DeleteEventsRequest) (*clouderrorreportingpb.DeleteEventsResponse, error) { md, _ := metadata.FromIncomingContext(ctx) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } s.reqs = append(s.reqs, req) if s.err != nil { return nil, s.err } return s.resps[0].(*clouderrorreportingpb.DeleteEventsResponse), nil } type mockReportErrorsServer struct { // Embed for forward compatibility. // Tests will keep working if more methods are added // in the future. clouderrorreportingpb.ReportErrorsServiceServer reqs []proto.Message // If set, all calls return this error. err error // responses to return if err == nil resps []proto.Message } func (s *mockReportErrorsServer) ReportErrorEvent(ctx context.Context, req *clouderrorreportingpb.ReportErrorEventRequest) (*clouderrorreportingpb.ReportErrorEventResponse, error) { md, _ := metadata.FromIncomingContext(ctx) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } s.reqs = append(s.reqs, req) if s.err != nil { return nil, s.err } return s.resps[0].(*clouderrorreportingpb.ReportErrorEventResponse), nil } // clientOpt is the option tests should use to connect to the test server. // It is initialized by TestMain. var clientOpt option.ClientOption var ( mockErrorGroup mockErrorGroupServer mockErrorStats mockErrorStatsServer mockReportErrors mockReportErrorsServer ) func TestMain(m *testing.M) { flag.Parse() serv := grpc.NewServer() clouderrorreportingpb.RegisterErrorGroupServiceServer(serv, &mockErrorGroup) clouderrorreportingpb.RegisterErrorStatsServiceServer(serv, &mockErrorStats) clouderrorreportingpb.RegisterReportErrorsServiceServer(serv, &mockReportErrors) lis, err := net.Listen("tcp", "localhost:0") if err != nil { log.Fatal(err) } go serv.Serve(lis) conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) if err != nil { log.Fatal(err) } clientOpt = option.WithGRPCConn(conn) os.Exit(m.Run()) } func TestErrorGroupServiceGetGroup(t *testing.T) { var name string = "name3373707" var groupId string = "groupId506361563" var expectedResponse = &clouderrorreportingpb.ErrorGroup{ Name: name, GroupId: groupId, } mockErrorGroup.err = nil mockErrorGroup.reqs = nil mockErrorGroup.resps = append(mockErrorGroup.resps[:0], expectedResponse) var formattedGroupName string = ErrorGroupGroupPath("[PROJECT]", "[GROUP]") var request = &clouderrorreportingpb.GetGroupRequest{ GroupName: formattedGroupName, } c, err := NewErrorGroupClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.GetGroup(context.Background(), request) if err != nil { t.Fatal(err) } if want, got := request, mockErrorGroup.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } if want, got := expectedResponse, resp; !proto.Equal(want, got) { t.Errorf("wrong response %q, want %q)", got, want) } } func TestErrorGroupServiceGetGroupError(t *testing.T) { errCode := codes.PermissionDenied mockErrorGroup.err = gstatus.Error(errCode, "test error") var formattedGroupName string = ErrorGroupGroupPath("[PROJECT]", "[GROUP]") var request = &clouderrorreportingpb.GetGroupRequest{ GroupName: formattedGroupName, } c, err := NewErrorGroupClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.GetGroup(context.Background(), request) if st, ok := gstatus.FromError(err); !ok { t.Errorf("got error %v, expected grpc error", err) } else if c := st.Code(); c != errCode { t.Errorf("got error code %q, want %q", c, errCode) } _ = resp } func TestErrorGroupServiceUpdateGroup(t *testing.T) { var name string = "name3373707" var groupId string = "groupId506361563" var expectedResponse = &clouderrorreportingpb.ErrorGroup{ Name: name, GroupId: groupId, } mockErrorGroup.err = nil mockErrorGroup.reqs = nil mockErrorGroup.resps = append(mockErrorGroup.resps[:0], expectedResponse) var group *clouderrorreportingpb.ErrorGroup = &clouderrorreportingpb.ErrorGroup{} var request = &clouderrorreportingpb.UpdateGroupRequest{ Group: group, } c, err := NewErrorGroupClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.UpdateGroup(context.Background(), request) if err != nil { t.Fatal(err) } if want, got := request, mockErrorGroup.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } if want, got := expectedResponse, resp; !proto.Equal(want, got) { t.Errorf("wrong response %q, want %q)", got, want) } } func TestErrorGroupServiceUpdateGroupError(t *testing.T) { errCode := codes.PermissionDenied mockErrorGroup.err = gstatus.Error(errCode, "test error") var group *clouderrorreportingpb.ErrorGroup = &clouderrorreportingpb.ErrorGroup{} var request = &clouderrorreportingpb.UpdateGroupRequest{ Group: group, } c, err := NewErrorGroupClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.UpdateGroup(context.Background(), request) if st, ok := gstatus.FromError(err); !ok { t.Errorf("got error %v, expected grpc error", err) } else if c := st.Code(); c != errCode { t.Errorf("got error code %q, want %q", c, errCode) } _ = resp } func TestErrorStatsServiceListGroupStats(t *testing.T) { var nextPageToken string = "" var errorGroupStatsElement *clouderrorreportingpb.ErrorGroupStats = &clouderrorreportingpb.ErrorGroupStats{} var errorGroupStats = []*clouderrorreportingpb.ErrorGroupStats{errorGroupStatsElement} var expectedResponse = &clouderrorreportingpb.ListGroupStatsResponse{ NextPageToken: nextPageToken, ErrorGroupStats: errorGroupStats, } mockErrorStats.err = nil mockErrorStats.reqs = nil mockErrorStats.resps = append(mockErrorStats.resps[:0], expectedResponse) var formattedProjectName string = ErrorStatsProjectPath("[PROJECT]") var timeRange *clouderrorreportingpb.QueryTimeRange = &clouderrorreportingpb.QueryTimeRange{} var request = &clouderrorreportingpb.ListGroupStatsRequest{ ProjectName: formattedProjectName, TimeRange: timeRange, } c, err := NewErrorStatsClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.ListGroupStats(context.Background(), request).Next() if err != nil { t.Fatal(err) } if want, got := request, mockErrorStats.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } want := (interface{})(expectedResponse.ErrorGroupStats[0]) got := (interface{})(resp) var ok bool switch want := (want).(type) { case proto.Message: ok = proto.Equal(want, got.(proto.Message)) default: ok = want == got } if !ok { t.Errorf("wrong response %q, want %q)", got, want) } } func TestErrorStatsServiceListGroupStatsError(t *testing.T) { errCode := codes.PermissionDenied mockErrorStats.err = gstatus.Error(errCode, "test error") var formattedProjectName string = ErrorStatsProjectPath("[PROJECT]") var timeRange *clouderrorreportingpb.QueryTimeRange = &clouderrorreportingpb.QueryTimeRange{} var request = &clouderrorreportingpb.ListGroupStatsRequest{ ProjectName: formattedProjectName, TimeRange: timeRange, } c, err := NewErrorStatsClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.ListGroupStats(context.Background(), request).Next() if st, ok := gstatus.FromError(err); !ok { t.Errorf("got error %v, expected grpc error", err) } else if c := st.Code(); c != errCode { t.Errorf("got error code %q, want %q", c, errCode) } _ = resp } func TestErrorStatsServiceListEvents(t *testing.T) { var nextPageToken string = "" var errorEventsElement *clouderrorreportingpb.ErrorEvent = &clouderrorreportingpb.ErrorEvent{} var errorEvents = []*clouderrorreportingpb.ErrorEvent{errorEventsElement} var expectedResponse = &clouderrorreportingpb.ListEventsResponse{ NextPageToken: nextPageToken, ErrorEvents: errorEvents, } mockErrorStats.err = nil mockErrorStats.reqs = nil mockErrorStats.resps = append(mockErrorStats.resps[:0], expectedResponse) var formattedProjectName string = ErrorStatsProjectPath("[PROJECT]") var groupId string = "groupId506361563" var request = &clouderrorreportingpb.ListEventsRequest{ ProjectName: formattedProjectName, GroupId: groupId, } c, err := NewErrorStatsClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.ListEvents(context.Background(), request).Next() if err != nil { t.Fatal(err) } if want, got := request, mockErrorStats.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } want := (interface{})(expectedResponse.ErrorEvents[0]) got := (interface{})(resp) var ok bool switch want := (want).(type) { case proto.Message: ok = proto.Equal(want, got.(proto.Message)) default: ok = want == got } if !ok { t.Errorf("wrong response %q, want %q)", got, want) } } func TestErrorStatsServiceListEventsError(t *testing.T) { errCode := codes.PermissionDenied mockErrorStats.err = gstatus.Error(errCode, "test error") var formattedProjectName string = ErrorStatsProjectPath("[PROJECT]") var groupId string = "groupId506361563" var request = &clouderrorreportingpb.ListEventsRequest{ ProjectName: formattedProjectName, GroupId: groupId, } c, err := NewErrorStatsClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.ListEvents(context.Background(), request).Next() if st, ok := gstatus.FromError(err); !ok { t.Errorf("got error %v, expected grpc error", err) } else if c := st.Code(); c != errCode { t.Errorf("got error code %q, want %q", c, errCode) } _ = resp } func TestErrorStatsServiceDeleteEvents(t *testing.T) { var expectedResponse *clouderrorreportingpb.DeleteEventsResponse = &clouderrorreportingpb.DeleteEventsResponse{} mockErrorStats.err = nil mockErrorStats.reqs = nil mockErrorStats.resps = append(mockErrorStats.resps[:0], expectedResponse) var formattedProjectName string = ErrorStatsProjectPath("[PROJECT]") var request = &clouderrorreportingpb.DeleteEventsRequest{ ProjectName: formattedProjectName, } c, err := NewErrorStatsClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.DeleteEvents(context.Background(), request) if err != nil { t.Fatal(err) } if want, got := request, mockErrorStats.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } if want, got := expectedResponse, resp; !proto.Equal(want, got) { t.Errorf("wrong response %q, want %q)", got, want) } } func TestErrorStatsServiceDeleteEventsError(t *testing.T) { errCode := codes.PermissionDenied mockErrorStats.err = gstatus.Error(errCode, "test error") var formattedProjectName string = ErrorStatsProjectPath("[PROJECT]") var request = &clouderrorreportingpb.DeleteEventsRequest{ ProjectName: formattedProjectName, } c, err := NewErrorStatsClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.DeleteEvents(context.Background(), request) if st, ok := gstatus.FromError(err); !ok { t.Errorf("got error %v, expected grpc error", err) } else if c := st.Code(); c != errCode { t.Errorf("got error code %q, want %q", c, errCode) } _ = resp } func TestReportErrorsServiceReportErrorEvent(t *testing.T) { var expectedResponse *clouderrorreportingpb.ReportErrorEventResponse = &clouderrorreportingpb.ReportErrorEventResponse{} mockReportErrors.err = nil mockReportErrors.reqs = nil mockReportErrors.resps = append(mockReportErrors.resps[:0], expectedResponse) var formattedProjectName string = ReportErrorsProjectPath("[PROJECT]") var event *clouderrorreportingpb.ReportedErrorEvent = &clouderrorreportingpb.ReportedErrorEvent{} var request = &clouderrorreportingpb.ReportErrorEventRequest{ ProjectName: formattedProjectName, Event: event, } c, err := NewReportErrorsClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.ReportErrorEvent(context.Background(), request) if err != nil { t.Fatal(err) } if want, got := request, mockReportErrors.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } if want, got := expectedResponse, resp; !proto.Equal(want, got) { t.Errorf("wrong response %q, want %q)", got, want) } } func TestReportErrorsServiceReportErrorEventError(t *testing.T) { errCode := codes.PermissionDenied mockReportErrors.err = gstatus.Error(errCode, "test error") var formattedProjectName string = ReportErrorsProjectPath("[PROJECT]") var event *clouderrorreportingpb.ReportedErrorEvent = &clouderrorreportingpb.ReportedErrorEvent{} var request = &clouderrorreportingpb.ReportErrorEventRequest{ ProjectName: formattedProjectName, Event: event, } c, err := NewReportErrorsClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.ReportErrorEvent(context.Background(), request) if st, ok := gstatus.FromError(err); !ok { t.Errorf("got error %v, expected grpc error", err) } else if c := st.Code(); c != errCode { t.Errorf("got error code %q, want %q", c, errCode) } _ = resp } golang-google-cloud-0.9.0/errorreporting/apiv1beta1/report_errors_client.go000066400000000000000000000114051312234511600271600ustar00rootroot00000000000000// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // AUTO-GENERATED CODE. DO NOT EDIT. package errorreporting import ( "cloud.google.com/go/internal/version" gax "github.com/googleapis/gax-go" "golang.org/x/net/context" "google.golang.org/api/option" "google.golang.org/api/transport" clouderrorreportingpb "google.golang.org/genproto/googleapis/devtools/clouderrorreporting/v1beta1" "google.golang.org/grpc" ) var ( reportErrorsProjectPathTemplate = gax.MustCompilePathTemplate("projects/{project}") ) // ReportErrorsCallOptions contains the retry settings for each method of ReportErrorsClient. type ReportErrorsCallOptions struct { ReportErrorEvent []gax.CallOption } func defaultReportErrorsClientOptions() []option.ClientOption { return []option.ClientOption{ option.WithEndpoint("clouderrorreporting.googleapis.com:443"), option.WithScopes(DefaultAuthScopes()...), } } func defaultReportErrorsCallOptions() *ReportErrorsCallOptions { retry := map[[2]string][]gax.CallOption{} return &ReportErrorsCallOptions{ ReportErrorEvent: retry[[2]string{"default", "non_idempotent"}], } } // ReportErrorsClient is a client for interacting with Stackdriver Error Reporting API. type ReportErrorsClient struct { // The connection to the service. conn *grpc.ClientConn // The gRPC API client. reportErrorsClient clouderrorreportingpb.ReportErrorsServiceClient // The call options for this service. CallOptions *ReportErrorsCallOptions // The metadata to be sent with each request. xGoogHeader []string } // NewReportErrorsClient creates a new report errors service client. // // An API for reporting error events. func NewReportErrorsClient(ctx context.Context, opts ...option.ClientOption) (*ReportErrorsClient, error) { conn, err := transport.DialGRPC(ctx, append(defaultReportErrorsClientOptions(), opts...)...) if err != nil { return nil, err } c := &ReportErrorsClient{ conn: conn, CallOptions: defaultReportErrorsCallOptions(), reportErrorsClient: clouderrorreportingpb.NewReportErrorsServiceClient(conn), } c.SetGoogleClientInfo() return c, nil } // Connection returns the client's connection to the API service. func (c *ReportErrorsClient) Connection() *grpc.ClientConn { return c.conn } // Close closes the connection to the API service. The user should invoke this when // the client is no longer required. func (c *ReportErrorsClient) Close() error { return c.conn.Close() } // SetGoogleClientInfo sets the name and version of the application in // the `x-goog-api-client` header passed on each request. Intended for // use by Google-written clients. func (c *ReportErrorsClient) SetGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", version.Go()}, keyval...) kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) c.xGoogHeader = []string{gax.XGoogHeader(kv...)} } // ReportErrorsProjectPath returns the path for the project resource. func ReportErrorsProjectPath(project string) string { path, err := reportErrorsProjectPathTemplate.Render(map[string]string{ "project": project, }) if err != nil { panic(err) } return path } // ReportErrorEvent report an individual error event. // // This endpoint accepts either an OAuth token, // or an // API key // for authentication. To use an API key, append it to the URL as the value of // a `key` parameter. For example: //
POST https://clouderrorreporting.googleapis.com/v1beta1/projects/example-project/events:report?key=123ABC456
func (c *ReportErrorsClient) ReportErrorEvent(ctx context.Context, req *clouderrorreportingpb.ReportErrorEventRequest, opts ...gax.CallOption) (*clouderrorreportingpb.ReportErrorEventResponse, error) { ctx = insertXGoog(ctx, c.xGoogHeader) opts = append(c.CallOptions.ReportErrorEvent[0:len(c.CallOptions.ReportErrorEvent):len(c.CallOptions.ReportErrorEvent)], opts...) var resp *clouderrorreportingpb.ReportErrorEventResponse err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error resp, err = c.reportErrorsClient.ReportErrorEvent(ctx, req, settings.GRPC...) return err }, opts...) if err != nil { return nil, err } return resp, nil } golang-google-cloud-0.9.0/errorreporting/apiv1beta1/report_errors_client_example_test.go000066400000000000000000000026651312234511600317420ustar00rootroot00000000000000// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // AUTO-GENERATED CODE. DO NOT EDIT. package errorreporting_test import ( "cloud.google.com/go/errorreporting/apiv1beta1" "golang.org/x/net/context" clouderrorreportingpb "google.golang.org/genproto/googleapis/devtools/clouderrorreporting/v1beta1" ) func ExampleNewReportErrorsClient() { ctx := context.Background() c, err := errorreporting.NewReportErrorsClient(ctx) if err != nil { // TODO: Handle error. } // TODO: Use client. _ = c } func ExampleReportErrorsClient_ReportErrorEvent() { ctx := context.Background() c, err := errorreporting.NewReportErrorsClient(ctx) if err != nil { // TODO: Handle error. } req := &clouderrorreportingpb.ReportErrorEventRequest{ // TODO: Fill request struct fields. } resp, err := c.ReportErrorEvent(ctx, req) if err != nil { // TODO: Handle error. } // TODO: Use resp. _ = resp } golang-google-cloud-0.9.0/errors/000077500000000000000000000000001312234511600166575ustar00rootroot00000000000000golang-google-cloud-0.9.0/errors/error_logging_test.go000066400000000000000000000125241312234511600231100ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package errors import ( "bytes" "errors" "log" "strings" "testing" "cloud.google.com/go/logging" "golang.org/x/net/context" "google.golang.org/api/option" ) type fakeLogger struct { entry *logging.Entry fail bool } func (c *fakeLogger) LogSync(ctx context.Context, e logging.Entry) error { if c.fail { return errors.New("request failed") } c.entry = &e return nil } func (c *fakeLogger) Close() error { return nil } func newTestClientUsingLogging(c *fakeLogger) *Client { newLoggerInterface = func(ctx context.Context, project string, opts ...option.ClientOption) (loggerInterface, error) { return c, nil } t, err := NewClient(context.Background(), testProjectID, "myservice", "v1.000", true) if err != nil { panic(err) } t.RepanicDefault = false return t } func TestCatchNothingUsingLogging(t *testing.T) { fl := &fakeLogger{} c := newTestClientUsingLogging(fl) defer func() { e := fl.entry if e != nil { t.Errorf("got error report, expected none") } }() defer c.Catch(ctx) } func entryMessage(e *logging.Entry) string { return e.Payload.(map[string]interface{})["message"].(string) } func commonLoggingChecks(t *testing.T, e *logging.Entry, panickingFunction string) { if e.Payload.(map[string]interface{})["serviceContext"].(map[string]string)["service"] != "myservice" { t.Errorf("error report didn't contain service name") } if e.Payload.(map[string]interface{})["serviceContext"].(map[string]string)["version"] != "v1.000" { t.Errorf("error report didn't contain version name") } if !strings.Contains(entryMessage(e), "hello, error") { t.Errorf("error report didn't contain message") } if !strings.Contains(entryMessage(e), panickingFunction) { t.Errorf("error report didn't contain stack trace") } } func TestCatchPanicUsingLogging(t *testing.T) { fl := &fakeLogger{} c := newTestClientUsingLogging(fl) defer func() { e := fl.entry if e == nil { t.Fatalf("got no error report, expected one") } commonLoggingChecks(t, e, "TestCatchPanic") if !strings.Contains(entryMessage(e), "divide by zero") { t.Errorf("error report didn't contain recovered value") } }() defer c.Catch(ctx, WithMessage("hello, error")) var x int x = x / x } func TestCatchPanicNilClientUsingLogging(t *testing.T) { buf := new(bytes.Buffer) log.SetOutput(buf) defer func() { recover() body := buf.String() if !strings.Contains(body, "divide by zero") { t.Errorf("error report didn't contain recovered value") } if !strings.Contains(body, "hello, error") { t.Errorf("error report didn't contain message") } if !strings.Contains(body, "TestCatchPanicNilClient") { t.Errorf("error report didn't contain recovered value") } }() var c *Client defer c.Catch(ctx, WithMessage("hello, error")) var x int x = x / x } func TestLogFailedReportsUsingLogging(t *testing.T) { fl := &fakeLogger{fail: true} c := newTestClientUsingLogging(fl) buf := new(bytes.Buffer) log.SetOutput(buf) defer func() { recover() body := buf.String() if !strings.Contains(body, "hello, error") { t.Errorf("error report didn't contain message") } if !strings.Contains(body, "errors.TestLogFailedReports") { t.Errorf("error report didn't contain stack trace") } if !strings.Contains(body, "divide by zero") { t.Errorf("error report didn't contain recovered value") } }() defer c.Catch(ctx, WithMessage("hello, error")) var x int x = x / x } func TestCatchNilPanicUsingLogging(t *testing.T) { fl := &fakeLogger{} c := newTestClientUsingLogging(fl) defer func() { e := fl.entry if e == nil { t.Fatalf("got no error report, expected one") } commonLoggingChecks(t, e, "TestCatchNilPanic") if !strings.Contains(entryMessage(e), "nil") { t.Errorf("error report didn't contain recovered value") } }() b := true defer c.Catch(ctx, WithMessage("hello, error"), PanicFlag(&b)) panic(nil) } func TestNotCatchNilPanicUsingLogging(t *testing.T) { fl := &fakeLogger{} c := newTestClientUsingLogging(fl) defer func() { e := fl.entry if e != nil { t.Errorf("got error report, expected none") } }() defer c.Catch(ctx, WithMessage("hello, error")) panic(nil) } func TestReportUsingLogging(t *testing.T) { fl := &fakeLogger{} c := newTestClientUsingLogging(fl) c.Report(ctx, nil, "hello, ", "error") e := fl.entry if e == nil { t.Fatalf("got no error report, expected one") } commonLoggingChecks(t, e, "TestReport") } func TestReportfUsingLogging(t *testing.T) { fl := &fakeLogger{} c := newTestClientUsingLogging(fl) c.Reportf(ctx, nil, "hello, error 2+%d=%d", 2, 2+2) e := fl.entry if e == nil { t.Fatalf("got no error report, expected one") } commonLoggingChecks(t, e, "TestReportf") if !strings.Contains(entryMessage(e), "2+2=4") { t.Errorf("error report didn't contain formatted message") } } golang-google-cloud-0.9.0/errors/errors.go000066400000000000000000000322301312234511600205220ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package errors is a Google Stackdriver Error Reporting library. // // This package is still experimental and subject to change. // // See https://cloud.google.com/error-reporting/ for more information. // // To initialize a client, use the NewClient function. Generally you will want // to do this on program initialization. The NewClient function takes as // arguments a context, the project name, a service name, and a version string. // The service name and version string identify the running program, and are // included in error reports. The version string can be left empty. NewClient // also takes a bool that indicates whether to report errors using Stackdriver // Logging, which will result in errors appearing in both the logs and the error // dashboard. This is useful if you are already a user of Stackdriver Logging. // // import "cloud.google.com/go/errors" // ... // errorsClient, err = errors.NewClient(ctx, projectID, "myservice", "v1.0", true) // // The client can recover panics in your program and report them as errors. // To use this functionality, defer its Catch method, as you would any other // function for recovering panics. // // func foo(ctx context.Context, ...) { // defer errorsClient.Catch(ctx) // ... // } // // Catch writes an error report containing the recovered value and a stack trace // to Stackdriver Error Reporting. // // There are various options you can add to the call to Catch that modify how // panics are handled. // // WithMessage and WithMessagef add a custom message after the recovered value, // using fmt.Sprint and fmt.Sprintf respectively. // // defer errorsClient.Catch(ctx, errors.WithMessagef("x=%d", x)) // // WithRequest fills in various fields in the error report with information // about an http.Request that's being handled. // // defer errorsClient.Catch(ctx, errors.WithRequest(httpReq)) // // By default, after recovering a panic, Catch will panic again with the // recovered value. You can turn off this behavior with the Repanic option. // // defer errorsClient.Catch(ctx, errors.Repanic(false)) // // You can also change the default behavior for the client by changing the // RepanicDefault field. // // errorsClient.RepanicDefault = false // // It is also possible to write an error report directly without recovering a // panic, using Report or Reportf. // // if err != nil { // errorsClient.Reportf(ctx, r, "unexpected error %v", err) // } // // If you try to write an error report with a nil client, or if the client // fails to write the report to the server, the error report is logged using // log.Println. package errors // import "cloud.google.com/go/errors" import ( "bytes" "fmt" "log" "net/http" "runtime" "strings" "time" api "cloud.google.com/go/errorreporting/apiv1beta1" "cloud.google.com/go/internal/version" "cloud.google.com/go/logging" "github.com/golang/protobuf/ptypes/timestamp" gax "github.com/googleapis/gax-go" "golang.org/x/net/context" "google.golang.org/api/option" erpb "google.golang.org/genproto/googleapis/devtools/clouderrorreporting/v1beta1" ) const ( userAgent = `gcloud-golang-errorreporting/20160701` ) type apiInterface interface { ReportErrorEvent(ctx context.Context, req *erpb.ReportErrorEventRequest, opts ...gax.CallOption) (*erpb.ReportErrorEventResponse, error) Close() error } var newApiInterface = func(ctx context.Context, opts ...option.ClientOption) (apiInterface, error) { client, err := api.NewReportErrorsClient(ctx, opts...) if err != nil { return nil, err } client.SetGoogleClientInfo("gccl", version.Repo) return client, nil } type loggerInterface interface { LogSync(ctx context.Context, e logging.Entry) error Close() error } type logger struct { *logging.Logger c *logging.Client } func (l logger) Close() error { return l.c.Close() } var newLoggerInterface = func(ctx context.Context, projectID string, opts ...option.ClientOption) (loggerInterface, error) { lc, err := logging.NewClient(ctx, projectID, opts...) if err != nil { return nil, fmt.Errorf("creating Logging client: %v", err) } l := lc.Logger("errorreports") return logger{l, lc}, nil } type sender interface { send(ctx context.Context, r *http.Request, message string) close() error } // errorApiSender sends error reports using the Stackdriver Error Reporting API. type errorApiSender struct { apiClient apiInterface projectID string serviceContext erpb.ServiceContext } // loggingSender sends error reports using the Stackdriver Logging API. type loggingSender struct { logger loggerInterface projectID string serviceContext map[string]string client *logging.Client } type Client struct { sender // RepanicDefault determines whether Catch will re-panic after recovering a // panic. This behavior can be overridden for an individual call to Catch using // the Repanic option. RepanicDefault bool } func NewClient(ctx context.Context, projectID, serviceName, serviceVersion string, useLogging bool, opts ...option.ClientOption) (*Client, error) { if useLogging { l, err := newLoggerInterface(ctx, projectID, opts...) if err != nil { return nil, fmt.Errorf("creating Logging client: %v", err) } sender := &loggingSender{ logger: l, projectID: projectID, serviceContext: map[string]string{ "service": serviceName, }, } if serviceVersion != "" { sender.serviceContext["version"] = serviceVersion } c := &Client{ sender: sender, RepanicDefault: true, } return c, nil } else { a, err := newApiInterface(ctx, opts...) if err != nil { return nil, fmt.Errorf("creating Error Reporting client: %v", err) } c := &Client{ sender: &errorApiSender{ apiClient: a, projectID: "projects/" + projectID, serviceContext: erpb.ServiceContext{ Service: serviceName, Version: serviceVersion, }, }, RepanicDefault: true, } return c, nil } } // Close closes any resources held by the client. // Close should be called when the client is no longer needed. // It need not be called at program exit. func (c *Client) Close() error { err := c.sender.close() c.sender = nil return err } // An Option is an optional argument to Catch. type Option interface { isOption() } // PanicFlag returns an Option that can inform Catch that a panic has occurred. // If *p is true when Catch is called, an error report is made even if recover // returns nil. This allows Catch to report an error for panic(nil). // If p is nil, the option is ignored. // // Here is an example of how to use PanicFlag: // // func foo(ctx context.Context, ...) { // hasPanicked := true // defer errorsClient.Catch(ctx, errors.PanicFlag(&hasPanicked)) // ... // ... // // We have reached the end of the function, so we're not panicking. // hasPanicked = false // } func PanicFlag(p *bool) Option { return panicFlag{p} } type panicFlag struct { *bool } func (h panicFlag) isOption() {} // Repanic returns an Option that determines whether Catch will re-panic after // it reports an error. This overrides the default in the client. func Repanic(r bool) Option { return repanic(r) } type repanic bool func (r repanic) isOption() {} // WithRequest returns an Option that informs Catch or Report of an http.Request // that is being handled. Information from the Request is included in the error // report, if one is made. func WithRequest(r *http.Request) Option { return withRequest{r} } type withRequest struct { *http.Request } func (w withRequest) isOption() {} // WithMessage returns an Option that sets a message to be included in the error // report, if one is made. v is converted to a string with fmt.Sprint. func WithMessage(v ...interface{}) Option { return message(v) } type message []interface{} func (m message) isOption() {} // WithMessagef returns an Option that sets a message to be included in the error // report, if one is made. format and v are converted to a string with fmt.Sprintf. func WithMessagef(format string, v ...interface{}) Option { return messagef{format, v} } type messagef struct { format string v []interface{} } func (m messagef) isOption() {} // Catch tries to recover a panic; if it succeeds, it writes an error report. // It should be called by deferring it, like any other function for recovering // panics. // // Catch can be called concurrently with other calls to Catch, Report or Reportf. func (c *Client) Catch(ctx context.Context, opt ...Option) { panicked := false for _, o := range opt { switch o := o.(type) { case panicFlag: panicked = panicked || o.bool != nil && *o.bool } } x := recover() if x == nil && !panicked { return } var ( r *http.Request shouldRepanic = true messages = []string{fmt.Sprint(x)} ) if c != nil { shouldRepanic = c.RepanicDefault } for _, o := range opt { switch o := o.(type) { case repanic: shouldRepanic = bool(o) case withRequest: r = o.Request case message: messages = append(messages, fmt.Sprint(o...)) case messagef: messages = append(messages, fmt.Sprintf(o.format, o.v...)) } } c.logInternal(ctx, r, true, strings.Join(messages, " ")) if shouldRepanic { panic(x) } } // Report writes an error report unconditionally, instead of only when a panic // occurs. // If r is non-nil, information from the Request is included in the error report. // // Report can be called concurrently with other calls to Catch, Report or Reportf. func (c *Client) Report(ctx context.Context, r *http.Request, v ...interface{}) { c.logInternal(ctx, r, false, fmt.Sprint(v...)) } // Reportf writes an error report unconditionally, instead of only when a panic // occurs. // If r is non-nil, information from the Request is included in the error report. // // Reportf can be called concurrently with other calls to Catch, Report or Reportf. func (c *Client) Reportf(ctx context.Context, r *http.Request, format string, v ...interface{}) { c.logInternal(ctx, r, false, fmt.Sprintf(format, v...)) } func (c *Client) logInternal(ctx context.Context, r *http.Request, isPanic bool, msg string) { // limit the stack trace to 16k. var buf [16384]byte stack := buf[0:runtime.Stack(buf[:], false)] message := msg + "\n" + chopStack(stack, isPanic) if c == nil { log.Println("Error report used nil client:", message) return } c.send(ctx, r, message) } func (s *loggingSender) send(ctx context.Context, r *http.Request, message string) { payload := map[string]interface{}{ "eventTime": time.Now().In(time.UTC).Format(time.RFC3339Nano), "message": message, "serviceContext": s.serviceContext, } if r != nil { payload["context"] = map[string]interface{}{ "httpRequest": map[string]interface{}{ "method": r.Method, "url": r.Host + r.RequestURI, "userAgent": r.UserAgent(), "referrer": r.Referer(), "remoteIp": r.RemoteAddr, }, } } e := logging.Entry{ Severity: logging.Error, Payload: payload, } err := s.logger.LogSync(ctx, e) if err != nil { log.Println("Error writing error report:", err, "report:", payload) } } func (s *loggingSender) close() error { return s.client.Close() } func (s *errorApiSender) send(ctx context.Context, r *http.Request, message string) { time := time.Now() var errorContext *erpb.ErrorContext if r != nil { errorContext = &erpb.ErrorContext{ HttpRequest: &erpb.HttpRequestContext{ Method: r.Method, Url: r.Host + r.RequestURI, UserAgent: r.UserAgent(), Referrer: r.Referer(), RemoteIp: r.RemoteAddr, }, } } req := erpb.ReportErrorEventRequest{ ProjectName: s.projectID, Event: &erpb.ReportedErrorEvent{ EventTime: ×tamp.Timestamp{ Seconds: time.Unix(), Nanos: int32(time.Nanosecond()), }, ServiceContext: &s.serviceContext, Message: message, Context: errorContext, }, } _, err := s.apiClient.ReportErrorEvent(ctx, &req) if err != nil { log.Println("Error writing error report:", err, "report:", message) } } func (s *errorApiSender) close() error { return s.apiClient.Close() } // chopStack trims a stack trace so that the function which panics or calls // Report is first. func chopStack(s []byte, isPanic bool) string { var f []byte if isPanic { f = []byte("panic(") } else { f = []byte("cloud.google.com/go/errors.(*Client).Report") } lfFirst := bytes.IndexByte(s, '\n') if lfFirst == -1 { return string(s) } stack := s[lfFirst:] panicLine := bytes.Index(stack, f) if panicLine == -1 { return string(s) } stack = stack[panicLine+1:] for i := 0; i < 2; i++ { nextLine := bytes.IndexByte(stack, '\n') if nextLine == -1 { return string(s) } stack = stack[nextLine+1:] } return string(s[:lfFirst+1]) + string(stack) } golang-google-cloud-0.9.0/errors/errors_test.go000066400000000000000000000125711312234511600215670ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package errors import ( "bytes" "errors" "log" "strings" "testing" gax "github.com/googleapis/gax-go" "golang.org/x/net/context" "google.golang.org/api/option" erpb "google.golang.org/genproto/googleapis/devtools/clouderrorreporting/v1beta1" ) const testProjectID = "testproject" type fakeReportErrorsClient struct { req *erpb.ReportErrorEventRequest fail bool } func (c *fakeReportErrorsClient) ReportErrorEvent(ctx context.Context, req *erpb.ReportErrorEventRequest, _ ...gax.CallOption) (*erpb.ReportErrorEventResponse, error) { if c.fail { return nil, errors.New("request failed") } c.req = req return &erpb.ReportErrorEventResponse{}, nil } func (c *fakeReportErrorsClient) Close() error { return nil } func newTestClient(c *fakeReportErrorsClient) *Client { newApiInterface = func(ctx context.Context, opts ...option.ClientOption) (apiInterface, error) { return c, nil } t, err := NewClient(context.Background(), testProjectID, "myservice", "v1.000", false) if err != nil { panic(err) } t.RepanicDefault = false return t } var ctx context.Context func init() { ctx = context.Background() } func TestCatchNothing(t *testing.T) { fc := &fakeReportErrorsClient{} c := newTestClient(fc) defer func() { r := fc.req if r != nil { t.Errorf("got error report, expected none") } }() defer c.Catch(ctx) } func commonChecks(t *testing.T, req *erpb.ReportErrorEventRequest, panickingFunction string) { if req.Event.ServiceContext.Service != "myservice" { t.Errorf("error report didn't contain service name") } if req.Event.ServiceContext.Version != "v1.000" { t.Errorf("error report didn't contain version name") } if !strings.Contains(req.Event.Message, "hello, error") { t.Errorf("error report didn't contain message") } if !strings.Contains(req.Event.Message, panickingFunction) { t.Errorf("error report didn't contain stack trace") } } func TestCatchPanic(t *testing.T) { fc := &fakeReportErrorsClient{} c := newTestClient(fc) defer func() { r := fc.req if r == nil { t.Fatalf("got no error report, expected one") } commonChecks(t, r, "errors.TestCatchPanic") if !strings.Contains(r.Event.Message, "divide by zero") { t.Errorf("error report didn't contain recovered value") } }() defer c.Catch(ctx, WithMessage("hello, error")) var x int x = x / x } func TestCatchPanicNilClient(t *testing.T) { buf := new(bytes.Buffer) log.SetOutput(buf) defer func() { recover() body := buf.String() if !strings.Contains(body, "divide by zero") { t.Errorf("error report didn't contain recovered value") } if !strings.Contains(body, "hello, error") { t.Errorf("error report didn't contain message") } if !strings.Contains(body, "TestCatchPanicNilClient") { t.Errorf("error report didn't contain recovered value") } }() var c *Client defer c.Catch(ctx, WithMessage("hello, error")) var x int x = x / x } func TestLogFailedReports(t *testing.T) { fc := &fakeReportErrorsClient{fail: true} c := newTestClient(fc) buf := new(bytes.Buffer) log.SetOutput(buf) defer func() { recover() body := buf.String() if !strings.Contains(body, "hello, error") { t.Errorf("error report didn't contain message") } if !strings.Contains(body, "errors.TestLogFailedReports") { t.Errorf("error report didn't contain stack trace") } if !strings.Contains(body, "divide by zero") { t.Errorf("error report didn't contain recovered value") } }() defer c.Catch(ctx, WithMessage("hello, error")) var x int x = x / x } func TestCatchNilPanic(t *testing.T) { fc := &fakeReportErrorsClient{} c := newTestClient(fc) defer func() { r := fc.req if r == nil { t.Fatalf("got no error report, expected one") } commonChecks(t, r, "errors.TestCatchNilPanic") if !strings.Contains(r.Event.Message, "nil") { t.Errorf("error report didn't contain recovered value") } }() b := true defer c.Catch(ctx, WithMessage("hello, error"), PanicFlag(&b)) panic(nil) } func TestNotCatchNilPanic(t *testing.T) { fc := &fakeReportErrorsClient{} c := newTestClient(fc) defer func() { r := fc.req if r != nil { t.Errorf("got error report, expected none") } }() defer c.Catch(ctx, WithMessage("hello, error")) panic(nil) } func TestReport(t *testing.T) { fc := &fakeReportErrorsClient{} c := newTestClient(fc) c.Report(ctx, nil, "hello, ", "error") r := fc.req if r == nil { t.Fatalf("got no error report, expected one") } commonChecks(t, r, "errors.TestReport") } func TestReportf(t *testing.T) { fc := &fakeReportErrorsClient{} c := newTestClient(fc) c.Reportf(ctx, nil, "hello, error 2+%d=%d", 2, 2+2) r := fc.req if r == nil { t.Fatalf("got no error report, expected one") } commonChecks(t, r, "errors.TestReportf") if !strings.Contains(r.Event.Message, "2+2=4") { t.Errorf("error report didn't contain formatted message") } } golang-google-cloud-0.9.0/errors/stack_test.go000066400000000000000000000076451312234511600213660ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package errors import "testing" func TestChopStack(t *testing.T) { for _, test := range []struct { name string in []byte expected string isPanic bool }{ { name: "Catch", in: []byte(`goroutine 20 [running]: runtime/debug.Stack() /gopath/src/runtime/debug/stack.go:24 +0x79 cloud.google.com/go/errors.(*Client).logInternal() /gopath/src/cloud.google.com/go/errors/errors.go:259 +0x18b cloud.google.com/go/errors.(*Client).Catch() /gopath/src/cloud.google.com/go/errors/errors.go:219 +0x6ed panic() /gopath/src/runtime/panic.go:458 +0x243 cloud.google.com/go/errors_test.TestCatchPanic() /gopath/src/cloud.google.com/go/errors/errors_test.go:93 +0x171 testing.tRunner() /gopath/src/testing/testing.go:610 +0x81 created by testing.(*T).Run /gopath/src/testing/testing.go:646 +0x2ec `), expected: `goroutine 20 [running]: cloud.google.com/go/errors_test.TestCatchPanic() /gopath/src/cloud.google.com/go/errors/errors_test.go:93 +0x171 testing.tRunner() /gopath/src/testing/testing.go:610 +0x81 created by testing.(*T).Run /gopath/src/testing/testing.go:646 +0x2ec `, isPanic: true, }, { name: "function not found", in: []byte(`goroutine 20 [running]: runtime/debug.Stack() /gopath/src/runtime/debug/stack.go:24 +0x79 cloud.google.com/go/errors.(*Client).logInternal() /gopath/src/cloud.google.com/go/errors/errors.go:259 +0x18b cloud.google.com/go/errors.(*Client).Catch() /gopath/src/cloud.google.com/go/errors/errors.go:219 +0x6ed cloud.google.com/go/errors_test.TestCatchPanic() /gopath/src/cloud.google.com/go/errors/errors_test.go:93 +0x171 testing.tRunner() /gopath/src/testing/testing.go:610 +0x81 created by testing.(*T).Run /gopath/src/testing/testing.go:646 +0x2ec `), expected: `goroutine 20 [running]: runtime/debug.Stack() /gopath/src/runtime/debug/stack.go:24 +0x79 cloud.google.com/go/errors.(*Client).logInternal() /gopath/src/cloud.google.com/go/errors/errors.go:259 +0x18b cloud.google.com/go/errors.(*Client).Catch() /gopath/src/cloud.google.com/go/errors/errors.go:219 +0x6ed cloud.google.com/go/errors_test.TestCatchPanic() /gopath/src/cloud.google.com/go/errors/errors_test.go:93 +0x171 testing.tRunner() /gopath/src/testing/testing.go:610 +0x81 created by testing.(*T).Run /gopath/src/testing/testing.go:646 +0x2ec `, isPanic: true, }, { name: "Report", in: []byte(` goroutine 39 [running]: runtime/debug.Stack() /gopath/runtime/debug/stack.go:24 +0x79 cloud.google.com/go/errors.(*Client).logInternal() /gopath/cloud.google.com/go/errors/errors.go:259 +0x18b cloud.google.com/go/errors.(*Client).Report() /gopath/cloud.google.com/go/errors/errors.go:248 +0x4ed cloud.google.com/go/errors_test.TestReport() /gopath/cloud.google.com/go/errors/errors_test.go:137 +0x2a1 testing.tRunner() /gopath/testing/testing.go:610 +0x81 created by testing.(*T).Run /gopath/testing/testing.go:646 +0x2ec `), expected: ` goroutine 39 [running]: cloud.google.com/go/errors_test.TestReport() /gopath/cloud.google.com/go/errors/errors_test.go:137 +0x2a1 testing.tRunner() /gopath/testing/testing.go:610 +0x81 created by testing.(*T).Run /gopath/testing/testing.go:646 +0x2ec `, isPanic: false, }, } { out := chopStack(test.in, test.isPanic) if out != test.expected { t.Errorf("case %q: chopStack(%q, %t): got %q want %q", test.name, test.in, test.isPanic, out, test.expected) } } } golang-google-cloud-0.9.0/iam/000077500000000000000000000000001312234511600161115ustar00rootroot00000000000000golang-google-cloud-0.9.0/iam/admin/000077500000000000000000000000001312234511600172015ustar00rootroot00000000000000golang-google-cloud-0.9.0/iam/admin/apiv1/000077500000000000000000000000001312234511600202215ustar00rootroot00000000000000golang-google-cloud-0.9.0/iam/admin/apiv1/doc.go000066400000000000000000000030111312234511600213100ustar00rootroot00000000000000// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // AUTO-GENERATED CODE. DO NOT EDIT. // Package admin is an experimental, auto-generated package for the // Google Identity and Access Management (IAM) API. // // Manages identity and access control for Google Cloud Platform resources, // including the creation of service accounts, which you can use to // authenticate to Google and make API calls. package admin // import "cloud.google.com/go/iam/admin/apiv1" import ( "golang.org/x/net/context" "google.golang.org/grpc/metadata" ) func insertXGoog(ctx context.Context, val []string) context.Context { md, _ := metadata.FromOutgoingContext(ctx) md = md.Copy() md["x-goog-api-client"] = val return metadata.NewOutgoingContext(ctx, md) } // DefaultAuthScopes reports the authentication scopes required // by this package. func DefaultAuthScopes() []string { return []string{ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/iam", } } golang-google-cloud-0.9.0/iam/admin/apiv1/iam_client.go000066400000000000000000000454341312234511600226660ustar00rootroot00000000000000// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // AUTO-GENERATED CODE. DO NOT EDIT. package admin import ( "math" "time" "cloud.google.com/go/internal/version" gax "github.com/googleapis/gax-go" "golang.org/x/net/context" "google.golang.org/api/iterator" "google.golang.org/api/option" "google.golang.org/api/transport" adminpb "google.golang.org/genproto/googleapis/iam/admin/v1" iampb "google.golang.org/genproto/googleapis/iam/v1" "google.golang.org/grpc" "google.golang.org/grpc/codes" ) var ( iamProjectPathTemplate = gax.MustCompilePathTemplate("projects/{project}") iamServiceAccountPathTemplate = gax.MustCompilePathTemplate("projects/{project}/serviceAccounts/{service_account}") iamKeyPathTemplate = gax.MustCompilePathTemplate("projects/{project}/serviceAccounts/{service_account}/keys/{key}") ) // IamCallOptions contains the retry settings for each method of IamClient. type IamCallOptions struct { ListServiceAccounts []gax.CallOption GetServiceAccount []gax.CallOption CreateServiceAccount []gax.CallOption UpdateServiceAccount []gax.CallOption DeleteServiceAccount []gax.CallOption ListServiceAccountKeys []gax.CallOption GetServiceAccountKey []gax.CallOption CreateServiceAccountKey []gax.CallOption DeleteServiceAccountKey []gax.CallOption SignBlob []gax.CallOption GetIamPolicy []gax.CallOption SetIamPolicy []gax.CallOption TestIamPermissions []gax.CallOption QueryGrantableRoles []gax.CallOption } func defaultIamClientOptions() []option.ClientOption { return []option.ClientOption{ option.WithEndpoint("iam.googleapis.com:443"), option.WithScopes(DefaultAuthScopes()...), } } func defaultIamCallOptions() *IamCallOptions { retry := map[[2]string][]gax.CallOption{ {"default", "idempotent"}: { gax.WithRetry(func() gax.Retryer { return gax.OnCodes([]codes.Code{ codes.DeadlineExceeded, codes.Unavailable, }, gax.Backoff{ Initial: 100 * time.Millisecond, Max: 60000 * time.Millisecond, Multiplier: 1.3, }) }), }, } return &IamCallOptions{ ListServiceAccounts: retry[[2]string{"default", "idempotent"}], GetServiceAccount: retry[[2]string{"default", "idempotent"}], CreateServiceAccount: retry[[2]string{"default", "non_idempotent"}], UpdateServiceAccount: retry[[2]string{"default", "idempotent"}], DeleteServiceAccount: retry[[2]string{"default", "idempotent"}], ListServiceAccountKeys: retry[[2]string{"default", "idempotent"}], GetServiceAccountKey: retry[[2]string{"default", "idempotent"}], CreateServiceAccountKey: retry[[2]string{"default", "non_idempotent"}], DeleteServiceAccountKey: retry[[2]string{"default", "idempotent"}], SignBlob: retry[[2]string{"default", "non_idempotent"}], GetIamPolicy: retry[[2]string{"default", "non_idempotent"}], SetIamPolicy: retry[[2]string{"default", "non_idempotent"}], TestIamPermissions: retry[[2]string{"default", "non_idempotent"}], QueryGrantableRoles: retry[[2]string{"default", "non_idempotent"}], } } // IamClient is a client for interacting with Google Identity and Access Management (IAM) API. type IamClient struct { // The connection to the service. conn *grpc.ClientConn // The gRPC API client. iamClient adminpb.IAMClient // The call options for this service. CallOptions *IamCallOptions // The metadata to be sent with each request. xGoogHeader []string } // NewIamClient creates a new iam client. // // Creates and manages service account objects. // // Service account is an account that belongs to your project instead // of to an individual end user. It is used to authenticate calls // to a Google API. // // To create a service account, specify the `project_id` and `account_id` // for the account. The `account_id` is unique within the project, and used // to generate the service account email address and a stable // `unique_id`. // // All other methods can identify accounts using the format // `projects/{project}/serviceAccounts/{account}`. // Using `-` as a wildcard for the project will infer the project from // the account. The `account` value can be the `email` address or the // `unique_id` of the service account. func NewIamClient(ctx context.Context, opts ...option.ClientOption) (*IamClient, error) { conn, err := transport.DialGRPC(ctx, append(defaultIamClientOptions(), opts...)...) if err != nil { return nil, err } c := &IamClient{ conn: conn, CallOptions: defaultIamCallOptions(), iamClient: adminpb.NewIAMClient(conn), } c.SetGoogleClientInfo() return c, nil } // Connection returns the client's connection to the API service. func (c *IamClient) Connection() *grpc.ClientConn { return c.conn } // Close closes the connection to the API service. The user should invoke this when // the client is no longer required. func (c *IamClient) Close() error { return c.conn.Close() } // SetGoogleClientInfo sets the name and version of the application in // the `x-goog-api-client` header passed on each request. Intended for // use by Google-written clients. func (c *IamClient) SetGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", version.Go()}, keyval...) kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) c.xGoogHeader = []string{gax.XGoogHeader(kv...)} } // IamProjectPath returns the path for the project resource. func IamProjectPath(project string) string { path, err := iamProjectPathTemplate.Render(map[string]string{ "project": project, }) if err != nil { panic(err) } return path } // IamServiceAccountPath returns the path for the service account resource. func IamServiceAccountPath(project, serviceAccount string) string { path, err := iamServiceAccountPathTemplate.Render(map[string]string{ "project": project, "service_account": serviceAccount, }) if err != nil { panic(err) } return path } // IamKeyPath returns the path for the key resource. func IamKeyPath(project, serviceAccount, key string) string { path, err := iamKeyPathTemplate.Render(map[string]string{ "project": project, "service_account": serviceAccount, "key": key, }) if err != nil { panic(err) } return path } // ListServiceAccounts lists [ServiceAccounts][google.iam.admin.v1.ServiceAccount] for a project. func (c *IamClient) ListServiceAccounts(ctx context.Context, req *adminpb.ListServiceAccountsRequest, opts ...gax.CallOption) *ServiceAccountIterator { ctx = insertXGoog(ctx, c.xGoogHeader) opts = append(c.CallOptions.ListServiceAccounts[0:len(c.CallOptions.ListServiceAccounts):len(c.CallOptions.ListServiceAccounts)], opts...) it := &ServiceAccountIterator{} it.InternalFetch = func(pageSize int, pageToken string) ([]*adminpb.ServiceAccount, string, error) { var resp *adminpb.ListServiceAccountsResponse req.PageToken = pageToken if pageSize > math.MaxInt32 { req.PageSize = math.MaxInt32 } else { req.PageSize = int32(pageSize) } err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error resp, err = c.iamClient.ListServiceAccounts(ctx, req, settings.GRPC...) return err }, opts...) if err != nil { return nil, "", err } return resp.Accounts, resp.NextPageToken, nil } fetch := func(pageSize int, pageToken string) (string, error) { items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) if err != nil { return "", err } it.items = append(it.items, items...) return nextPageToken, nil } it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) return it } // GetServiceAccount gets a [ServiceAccount][google.iam.admin.v1.ServiceAccount]. func (c *IamClient) GetServiceAccount(ctx context.Context, req *adminpb.GetServiceAccountRequest, opts ...gax.CallOption) (*adminpb.ServiceAccount, error) { ctx = insertXGoog(ctx, c.xGoogHeader) opts = append(c.CallOptions.GetServiceAccount[0:len(c.CallOptions.GetServiceAccount):len(c.CallOptions.GetServiceAccount)], opts...) var resp *adminpb.ServiceAccount err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error resp, err = c.iamClient.GetServiceAccount(ctx, req, settings.GRPC...) return err }, opts...) if err != nil { return nil, err } return resp, nil } // CreateServiceAccount creates a [ServiceAccount][google.iam.admin.v1.ServiceAccount] // and returns it. func (c *IamClient) CreateServiceAccount(ctx context.Context, req *adminpb.CreateServiceAccountRequest, opts ...gax.CallOption) (*adminpb.ServiceAccount, error) { ctx = insertXGoog(ctx, c.xGoogHeader) opts = append(c.CallOptions.CreateServiceAccount[0:len(c.CallOptions.CreateServiceAccount):len(c.CallOptions.CreateServiceAccount)], opts...) var resp *adminpb.ServiceAccount err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error resp, err = c.iamClient.CreateServiceAccount(ctx, req, settings.GRPC...) return err }, opts...) if err != nil { return nil, err } return resp, nil } // UpdateServiceAccount updates a [ServiceAccount][google.iam.admin.v1.ServiceAccount]. // // Currently, only the following fields are updatable: // `display_name` . // The `etag` is mandatory. func (c *IamClient) UpdateServiceAccount(ctx context.Context, req *adminpb.ServiceAccount, opts ...gax.CallOption) (*adminpb.ServiceAccount, error) { ctx = insertXGoog(ctx, c.xGoogHeader) opts = append(c.CallOptions.UpdateServiceAccount[0:len(c.CallOptions.UpdateServiceAccount):len(c.CallOptions.UpdateServiceAccount)], opts...) var resp *adminpb.ServiceAccount err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error resp, err = c.iamClient.UpdateServiceAccount(ctx, req, settings.GRPC...) return err }, opts...) if err != nil { return nil, err } return resp, nil } // DeleteServiceAccount deletes a [ServiceAccount][google.iam.admin.v1.ServiceAccount]. func (c *IamClient) DeleteServiceAccount(ctx context.Context, req *adminpb.DeleteServiceAccountRequest, opts ...gax.CallOption) error { ctx = insertXGoog(ctx, c.xGoogHeader) opts = append(c.CallOptions.DeleteServiceAccount[0:len(c.CallOptions.DeleteServiceAccount):len(c.CallOptions.DeleteServiceAccount)], opts...) err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error _, err = c.iamClient.DeleteServiceAccount(ctx, req, settings.GRPC...) return err }, opts...) return err } // ListServiceAccountKeys lists [ServiceAccountKeys][google.iam.admin.v1.ServiceAccountKey]. func (c *IamClient) ListServiceAccountKeys(ctx context.Context, req *adminpb.ListServiceAccountKeysRequest, opts ...gax.CallOption) (*adminpb.ListServiceAccountKeysResponse, error) { ctx = insertXGoog(ctx, c.xGoogHeader) opts = append(c.CallOptions.ListServiceAccountKeys[0:len(c.CallOptions.ListServiceAccountKeys):len(c.CallOptions.ListServiceAccountKeys)], opts...) var resp *adminpb.ListServiceAccountKeysResponse err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error resp, err = c.iamClient.ListServiceAccountKeys(ctx, req, settings.GRPC...) return err }, opts...) if err != nil { return nil, err } return resp, nil } // GetServiceAccountKey gets the [ServiceAccountKey][google.iam.admin.v1.ServiceAccountKey] // by key id. func (c *IamClient) GetServiceAccountKey(ctx context.Context, req *adminpb.GetServiceAccountKeyRequest, opts ...gax.CallOption) (*adminpb.ServiceAccountKey, error) { ctx = insertXGoog(ctx, c.xGoogHeader) opts = append(c.CallOptions.GetServiceAccountKey[0:len(c.CallOptions.GetServiceAccountKey):len(c.CallOptions.GetServiceAccountKey)], opts...) var resp *adminpb.ServiceAccountKey err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error resp, err = c.iamClient.GetServiceAccountKey(ctx, req, settings.GRPC...) return err }, opts...) if err != nil { return nil, err } return resp, nil } // CreateServiceAccountKey creates a [ServiceAccountKey][google.iam.admin.v1.ServiceAccountKey] // and returns it. func (c *IamClient) CreateServiceAccountKey(ctx context.Context, req *adminpb.CreateServiceAccountKeyRequest, opts ...gax.CallOption) (*adminpb.ServiceAccountKey, error) { ctx = insertXGoog(ctx, c.xGoogHeader) opts = append(c.CallOptions.CreateServiceAccountKey[0:len(c.CallOptions.CreateServiceAccountKey):len(c.CallOptions.CreateServiceAccountKey)], opts...) var resp *adminpb.ServiceAccountKey err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error resp, err = c.iamClient.CreateServiceAccountKey(ctx, req, settings.GRPC...) return err }, opts...) if err != nil { return nil, err } return resp, nil } // DeleteServiceAccountKey deletes a [ServiceAccountKey][google.iam.admin.v1.ServiceAccountKey]. func (c *IamClient) DeleteServiceAccountKey(ctx context.Context, req *adminpb.DeleteServiceAccountKeyRequest, opts ...gax.CallOption) error { ctx = insertXGoog(ctx, c.xGoogHeader) opts = append(c.CallOptions.DeleteServiceAccountKey[0:len(c.CallOptions.DeleteServiceAccountKey):len(c.CallOptions.DeleteServiceAccountKey)], opts...) err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error _, err = c.iamClient.DeleteServiceAccountKey(ctx, req, settings.GRPC...) return err }, opts...) return err } // SignBlob signs a blob using a service account's system-managed private key. func (c *IamClient) SignBlob(ctx context.Context, req *adminpb.SignBlobRequest, opts ...gax.CallOption) (*adminpb.SignBlobResponse, error) { ctx = insertXGoog(ctx, c.xGoogHeader) opts = append(c.CallOptions.SignBlob[0:len(c.CallOptions.SignBlob):len(c.CallOptions.SignBlob)], opts...) var resp *adminpb.SignBlobResponse err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error resp, err = c.iamClient.SignBlob(ctx, req, settings.GRPC...) return err }, opts...) if err != nil { return nil, err } return resp, nil } // getIamPolicy returns the IAM access control policy for a // [ServiceAccount][google.iam.admin.v1.ServiceAccount]. func (c *IamClient) getIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) { ctx = insertXGoog(ctx, c.xGoogHeader) opts = append(c.CallOptions.GetIamPolicy[0:len(c.CallOptions.GetIamPolicy):len(c.CallOptions.GetIamPolicy)], opts...) var resp *iampb.Policy err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error resp, err = c.iamClient.GetIamPolicy(ctx, req, settings.GRPC...) return err }, opts...) if err != nil { return nil, err } return resp, nil } // setIamPolicy sets the IAM access control policy for a // [ServiceAccount][google.iam.admin.v1.ServiceAccount]. func (c *IamClient) setIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) { ctx = insertXGoog(ctx, c.xGoogHeader) opts = append(c.CallOptions.SetIamPolicy[0:len(c.CallOptions.SetIamPolicy):len(c.CallOptions.SetIamPolicy)], opts...) var resp *iampb.Policy err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error resp, err = c.iamClient.SetIamPolicy(ctx, req, settings.GRPC...) return err }, opts...) if err != nil { return nil, err } return resp, nil } // TestIamPermissions tests the specified permissions against the IAM access control policy // for a [ServiceAccount][google.iam.admin.v1.ServiceAccount]. func (c *IamClient) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermissionsRequest, opts ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) { ctx = insertXGoog(ctx, c.xGoogHeader) opts = append(c.CallOptions.TestIamPermissions[0:len(c.CallOptions.TestIamPermissions):len(c.CallOptions.TestIamPermissions)], opts...) var resp *iampb.TestIamPermissionsResponse err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error resp, err = c.iamClient.TestIamPermissions(ctx, req, settings.GRPC...) return err }, opts...) if err != nil { return nil, err } return resp, nil } // QueryGrantableRoles queries roles that can be granted on a particular resource. // A role is grantable if it can be used as the role in a binding for a policy // for that resource. func (c *IamClient) QueryGrantableRoles(ctx context.Context, req *adminpb.QueryGrantableRolesRequest, opts ...gax.CallOption) (*adminpb.QueryGrantableRolesResponse, error) { ctx = insertXGoog(ctx, c.xGoogHeader) opts = append(c.CallOptions.QueryGrantableRoles[0:len(c.CallOptions.QueryGrantableRoles):len(c.CallOptions.QueryGrantableRoles)], opts...) var resp *adminpb.QueryGrantableRolesResponse err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error resp, err = c.iamClient.QueryGrantableRoles(ctx, req, settings.GRPC...) return err }, opts...) if err != nil { return nil, err } return resp, nil } // ServiceAccountIterator manages a stream of *adminpb.ServiceAccount. type ServiceAccountIterator struct { items []*adminpb.ServiceAccount pageInfo *iterator.PageInfo nextFunc func() error // InternalFetch is for use by the Google Cloud Libraries only. // It is not part of the stable interface of this package. // // InternalFetch returns results from a single call to the underlying RPC. // The number of results is no greater than pageSize. // If there are no more results, nextPageToken is empty and err is nil. InternalFetch func(pageSize int, pageToken string) (results []*adminpb.ServiceAccount, nextPageToken string, err error) } // PageInfo supports pagination. See the google.golang.org/api/iterator package for details. func (it *ServiceAccountIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } // Next returns the next result. Its second return value is iterator.Done if there are no more // results. Once Next returns Done, all subsequent calls will return Done. func (it *ServiceAccountIterator) Next() (*adminpb.ServiceAccount, error) { var item *adminpb.ServiceAccount if err := it.nextFunc(); err != nil { return item, err } item = it.items[0] it.items = it.items[1:] return item, nil } func (it *ServiceAccountIterator) bufLen() int { return len(it.items) } func (it *ServiceAccountIterator) takeBuf() interface{} { b := it.items it.items = nil return b } golang-google-cloud-0.9.0/iam/admin/apiv1/iam_client_example_test.go000066400000000000000000000125071312234511600254330ustar00rootroot00000000000000// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // AUTO-GENERATED CODE. DO NOT EDIT. package admin_test import ( "cloud.google.com/go/iam/admin/apiv1" "golang.org/x/net/context" "google.golang.org/api/iterator" adminpb "google.golang.org/genproto/googleapis/iam/admin/v1" iampb "google.golang.org/genproto/googleapis/iam/v1" ) func ExampleNewIamClient() { ctx := context.Background() c, err := admin.NewIamClient(ctx) if err != nil { // TODO: Handle error. } // TODO: Use client. _ = c } func ExampleIamClient_ListServiceAccounts() { ctx := context.Background() c, err := admin.NewIamClient(ctx) if err != nil { // TODO: Handle error. } req := &adminpb.ListServiceAccountsRequest{ // TODO: Fill request struct fields. } it := c.ListServiceAccounts(ctx, req) for { resp, err := it.Next() if err == iterator.Done { break } if err != nil { // TODO: Handle error. } // TODO: Use resp. _ = resp } } func ExampleIamClient_GetServiceAccount() { ctx := context.Background() c, err := admin.NewIamClient(ctx) if err != nil { // TODO: Handle error. } req := &adminpb.GetServiceAccountRequest{ // TODO: Fill request struct fields. } resp, err := c.GetServiceAccount(ctx, req) if err != nil { // TODO: Handle error. } // TODO: Use resp. _ = resp } func ExampleIamClient_CreateServiceAccount() { ctx := context.Background() c, err := admin.NewIamClient(ctx) if err != nil { // TODO: Handle error. } req := &adminpb.CreateServiceAccountRequest{ // TODO: Fill request struct fields. } resp, err := c.CreateServiceAccount(ctx, req) if err != nil { // TODO: Handle error. } // TODO: Use resp. _ = resp } func ExampleIamClient_UpdateServiceAccount() { ctx := context.Background() c, err := admin.NewIamClient(ctx) if err != nil { // TODO: Handle error. } req := &adminpb.ServiceAccount{ // TODO: Fill request struct fields. } resp, err := c.UpdateServiceAccount(ctx, req) if err != nil { // TODO: Handle error. } // TODO: Use resp. _ = resp } func ExampleIamClient_DeleteServiceAccount() { ctx := context.Background() c, err := admin.NewIamClient(ctx) if err != nil { // TODO: Handle error. } req := &adminpb.DeleteServiceAccountRequest{ // TODO: Fill request struct fields. } err = c.DeleteServiceAccount(ctx, req) if err != nil { // TODO: Handle error. } } func ExampleIamClient_ListServiceAccountKeys() { ctx := context.Background() c, err := admin.NewIamClient(ctx) if err != nil { // TODO: Handle error. } req := &adminpb.ListServiceAccountKeysRequest{ // TODO: Fill request struct fields. } resp, err := c.ListServiceAccountKeys(ctx, req) if err != nil { // TODO: Handle error. } // TODO: Use resp. _ = resp } func ExampleIamClient_GetServiceAccountKey() { ctx := context.Background() c, err := admin.NewIamClient(ctx) if err != nil { // TODO: Handle error. } req := &adminpb.GetServiceAccountKeyRequest{ // TODO: Fill request struct fields. } resp, err := c.GetServiceAccountKey(ctx, req) if err != nil { // TODO: Handle error. } // TODO: Use resp. _ = resp } func ExampleIamClient_CreateServiceAccountKey() { ctx := context.Background() c, err := admin.NewIamClient(ctx) if err != nil { // TODO: Handle error. } req := &adminpb.CreateServiceAccountKeyRequest{ // TODO: Fill request struct fields. } resp, err := c.CreateServiceAccountKey(ctx, req) if err != nil { // TODO: Handle error. } // TODO: Use resp. _ = resp } func ExampleIamClient_DeleteServiceAccountKey() { ctx := context.Background() c, err := admin.NewIamClient(ctx) if err != nil { // TODO: Handle error. } req := &adminpb.DeleteServiceAccountKeyRequest{ // TODO: Fill request struct fields. } err = c.DeleteServiceAccountKey(ctx, req) if err != nil { // TODO: Handle error. } } func ExampleIamClient_SignBlob() { ctx := context.Background() c, err := admin.NewIamClient(ctx) if err != nil { // TODO: Handle error. } req := &adminpb.SignBlobRequest{ // TODO: Fill request struct fields. } resp, err := c.SignBlob(ctx, req) if err != nil { // TODO: Handle error. } // TODO: Use resp. _ = resp } func ExampleIamClient_TestIamPermissions() { ctx := context.Background() c, err := admin.NewIamClient(ctx) if err != nil { // TODO: Handle error. } req := &iampb.TestIamPermissionsRequest{ // TODO: Fill request struct fields. } resp, err := c.TestIamPermissions(ctx, req) if err != nil { // TODO: Handle error. } // TODO: Use resp. _ = resp } func ExampleIamClient_QueryGrantableRoles() { ctx := context.Background() c, err := admin.NewIamClient(ctx) if err != nil { // TODO: Handle error. } req := &adminpb.QueryGrantableRolesRequest{ // TODO: Fill request struct fields. } resp, err := c.QueryGrantableRoles(ctx, req) if err != nil { // TODO: Handle error. } // TODO: Use resp. _ = resp } golang-google-cloud-0.9.0/iam/admin/apiv1/mock_test.go000066400000000000000000000771761312234511600225620ustar00rootroot00000000000000// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // AUTO-GENERATED CODE. DO NOT EDIT. package admin import ( emptypb "github.com/golang/protobuf/ptypes/empty" adminpb "google.golang.org/genproto/googleapis/iam/admin/v1" iampb "google.golang.org/genproto/googleapis/iam/v1" ) import ( "flag" "fmt" "io" "log" "net" "os" "strings" "testing" "github.com/golang/protobuf/proto" "github.com/golang/protobuf/ptypes" "golang.org/x/net/context" "google.golang.org/api/option" status "google.golang.org/genproto/googleapis/rpc/status" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/metadata" gstatus "google.golang.org/grpc/status" ) var _ = io.EOF var _ = ptypes.MarshalAny var _ status.Status type mockIamServer struct { // Embed for forward compatibility. // Tests will keep working if more methods are added // in the future. adminpb.IAMServer reqs []proto.Message // If set, all calls return this error. err error // responses to return if err == nil resps []proto.Message } func (s *mockIamServer) ListServiceAccounts(ctx context.Context, req *adminpb.ListServiceAccountsRequest) (*adminpb.ListServiceAccountsResponse, error) { md, _ := metadata.FromIncomingContext(ctx) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } s.reqs = append(s.reqs, req) if s.err != nil { return nil, s.err } return s.resps[0].(*adminpb.ListServiceAccountsResponse), nil } func (s *mockIamServer) GetServiceAccount(ctx context.Context, req *adminpb.GetServiceAccountRequest) (*adminpb.ServiceAccount, error) { md, _ := metadata.FromIncomingContext(ctx) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } s.reqs = append(s.reqs, req) if s.err != nil { return nil, s.err } return s.resps[0].(*adminpb.ServiceAccount), nil } func (s *mockIamServer) CreateServiceAccount(ctx context.Context, req *adminpb.CreateServiceAccountRequest) (*adminpb.ServiceAccount, error) { md, _ := metadata.FromIncomingContext(ctx) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } s.reqs = append(s.reqs, req) if s.err != nil { return nil, s.err } return s.resps[0].(*adminpb.ServiceAccount), nil } func (s *mockIamServer) UpdateServiceAccount(ctx context.Context, req *adminpb.ServiceAccount) (*adminpb.ServiceAccount, error) { md, _ := metadata.FromIncomingContext(ctx) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } s.reqs = append(s.reqs, req) if s.err != nil { return nil, s.err } return s.resps[0].(*adminpb.ServiceAccount), nil } func (s *mockIamServer) DeleteServiceAccount(ctx context.Context, req *adminpb.DeleteServiceAccountRequest) (*emptypb.Empty, error) { md, _ := metadata.FromIncomingContext(ctx) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } s.reqs = append(s.reqs, req) if s.err != nil { return nil, s.err } return s.resps[0].(*emptypb.Empty), nil } func (s *mockIamServer) ListServiceAccountKeys(ctx context.Context, req *adminpb.ListServiceAccountKeysRequest) (*adminpb.ListServiceAccountKeysResponse, error) { md, _ := metadata.FromIncomingContext(ctx) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } s.reqs = append(s.reqs, req) if s.err != nil { return nil, s.err } return s.resps[0].(*adminpb.ListServiceAccountKeysResponse), nil } func (s *mockIamServer) GetServiceAccountKey(ctx context.Context, req *adminpb.GetServiceAccountKeyRequest) (*adminpb.ServiceAccountKey, error) { md, _ := metadata.FromIncomingContext(ctx) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } s.reqs = append(s.reqs, req) if s.err != nil { return nil, s.err } return s.resps[0].(*adminpb.ServiceAccountKey), nil } func (s *mockIamServer) CreateServiceAccountKey(ctx context.Context, req *adminpb.CreateServiceAccountKeyRequest) (*adminpb.ServiceAccountKey, error) { md, _ := metadata.FromIncomingContext(ctx) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } s.reqs = append(s.reqs, req) if s.err != nil { return nil, s.err } return s.resps[0].(*adminpb.ServiceAccountKey), nil } func (s *mockIamServer) DeleteServiceAccountKey(ctx context.Context, req *adminpb.DeleteServiceAccountKeyRequest) (*emptypb.Empty, error) { md, _ := metadata.FromIncomingContext(ctx) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } s.reqs = append(s.reqs, req) if s.err != nil { return nil, s.err } return s.resps[0].(*emptypb.Empty), nil } func (s *mockIamServer) SignBlob(ctx context.Context, req *adminpb.SignBlobRequest) (*adminpb.SignBlobResponse, error) { md, _ := metadata.FromIncomingContext(ctx) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } s.reqs = append(s.reqs, req) if s.err != nil { return nil, s.err } return s.resps[0].(*adminpb.SignBlobResponse), nil } func (s *mockIamServer) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest) (*iampb.Policy, error) { md, _ := metadata.FromIncomingContext(ctx) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } s.reqs = append(s.reqs, req) if s.err != nil { return nil, s.err } return s.resps[0].(*iampb.Policy), nil } func (s *mockIamServer) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest) (*iampb.Policy, error) { md, _ := metadata.FromIncomingContext(ctx) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } s.reqs = append(s.reqs, req) if s.err != nil { return nil, s.err } return s.resps[0].(*iampb.Policy), nil } func (s *mockIamServer) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermissionsRequest) (*iampb.TestIamPermissionsResponse, error) { md, _ := metadata.FromIncomingContext(ctx) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } s.reqs = append(s.reqs, req) if s.err != nil { return nil, s.err } return s.resps[0].(*iampb.TestIamPermissionsResponse), nil } func (s *mockIamServer) QueryGrantableRoles(ctx context.Context, req *adminpb.QueryGrantableRolesRequest) (*adminpb.QueryGrantableRolesResponse, error) { md, _ := metadata.FromIncomingContext(ctx) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } s.reqs = append(s.reqs, req) if s.err != nil { return nil, s.err } return s.resps[0].(*adminpb.QueryGrantableRolesResponse), nil } // clientOpt is the option tests should use to connect to the test server. // It is initialized by TestMain. var clientOpt option.ClientOption var ( mockIam mockIamServer ) func TestMain(m *testing.M) { flag.Parse() serv := grpc.NewServer() adminpb.RegisterIAMServer(serv, &mockIam) lis, err := net.Listen("tcp", "localhost:0") if err != nil { log.Fatal(err) } go serv.Serve(lis) conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) if err != nil { log.Fatal(err) } clientOpt = option.WithGRPCConn(conn) os.Exit(m.Run()) } func TestIamListServiceAccounts(t *testing.T) { var nextPageToken string = "" var accountsElement *adminpb.ServiceAccount = &adminpb.ServiceAccount{} var accounts = []*adminpb.ServiceAccount{accountsElement} var expectedResponse = &adminpb.ListServiceAccountsResponse{ NextPageToken: nextPageToken, Accounts: accounts, } mockIam.err = nil mockIam.reqs = nil mockIam.resps = append(mockIam.resps[:0], expectedResponse) var formattedName string = IamProjectPath("[PROJECT]") var request = &adminpb.ListServiceAccountsRequest{ Name: formattedName, } c, err := NewIamClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.ListServiceAccounts(context.Background(), request).Next() if err != nil { t.Fatal(err) } if want, got := request, mockIam.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } want := (interface{})(expectedResponse.Accounts[0]) got := (interface{})(resp) var ok bool switch want := (want).(type) { case proto.Message: ok = proto.Equal(want, got.(proto.Message)) default: ok = want == got } if !ok { t.Errorf("wrong response %q, want %q)", got, want) } } func TestIamListServiceAccountsError(t *testing.T) { errCode := codes.PermissionDenied mockIam.err = gstatus.Error(errCode, "test error") var formattedName string = IamProjectPath("[PROJECT]") var request = &adminpb.ListServiceAccountsRequest{ Name: formattedName, } c, err := NewIamClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.ListServiceAccounts(context.Background(), request).Next() if st, ok := gstatus.FromError(err); !ok { t.Errorf("got error %v, expected grpc error", err) } else if c := st.Code(); c != errCode { t.Errorf("got error code %q, want %q", c, errCode) } _ = resp } func TestIamGetServiceAccount(t *testing.T) { var name2 string = "name2-1052831874" var projectId string = "projectId-1969970175" var uniqueId string = "uniqueId-538310583" var email string = "email96619420" var displayName string = "displayName1615086568" var etag []byte = []byte("21") var oauth2ClientId string = "oauth2ClientId-1833466037" var expectedResponse = &adminpb.ServiceAccount{ Name: name2, ProjectId: projectId, UniqueId: uniqueId, Email: email, DisplayName: displayName, Etag: etag, Oauth2ClientId: oauth2ClientId, } mockIam.err = nil mockIam.reqs = nil mockIam.resps = append(mockIam.resps[:0], expectedResponse) var formattedName string = IamServiceAccountPath("[PROJECT]", "[SERVICE_ACCOUNT]") var request = &adminpb.GetServiceAccountRequest{ Name: formattedName, } c, err := NewIamClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.GetServiceAccount(context.Background(), request) if err != nil { t.Fatal(err) } if want, got := request, mockIam.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } if want, got := expectedResponse, resp; !proto.Equal(want, got) { t.Errorf("wrong response %q, want %q)", got, want) } } func TestIamGetServiceAccountError(t *testing.T) { errCode := codes.PermissionDenied mockIam.err = gstatus.Error(errCode, "test error") var formattedName string = IamServiceAccountPath("[PROJECT]", "[SERVICE_ACCOUNT]") var request = &adminpb.GetServiceAccountRequest{ Name: formattedName, } c, err := NewIamClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.GetServiceAccount(context.Background(), request) if st, ok := gstatus.FromError(err); !ok { t.Errorf("got error %v, expected grpc error", err) } else if c := st.Code(); c != errCode { t.Errorf("got error code %q, want %q", c, errCode) } _ = resp } func TestIamCreateServiceAccount(t *testing.T) { var name2 string = "name2-1052831874" var projectId string = "projectId-1969970175" var uniqueId string = "uniqueId-538310583" var email string = "email96619420" var displayName string = "displayName1615086568" var etag []byte = []byte("21") var oauth2ClientId string = "oauth2ClientId-1833466037" var expectedResponse = &adminpb.ServiceAccount{ Name: name2, ProjectId: projectId, UniqueId: uniqueId, Email: email, DisplayName: displayName, Etag: etag, Oauth2ClientId: oauth2ClientId, } mockIam.err = nil mockIam.reqs = nil mockIam.resps = append(mockIam.resps[:0], expectedResponse) var formattedName string = IamProjectPath("[PROJECT]") var accountId string = "accountId-803333011" var request = &adminpb.CreateServiceAccountRequest{ Name: formattedName, AccountId: accountId, } c, err := NewIamClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.CreateServiceAccount(context.Background(), request) if err != nil { t.Fatal(err) } if want, got := request, mockIam.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } if want, got := expectedResponse, resp; !proto.Equal(want, got) { t.Errorf("wrong response %q, want %q)", got, want) } } func TestIamCreateServiceAccountError(t *testing.T) { errCode := codes.PermissionDenied mockIam.err = gstatus.Error(errCode, "test error") var formattedName string = IamProjectPath("[PROJECT]") var accountId string = "accountId-803333011" var request = &adminpb.CreateServiceAccountRequest{ Name: formattedName, AccountId: accountId, } c, err := NewIamClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.CreateServiceAccount(context.Background(), request) if st, ok := gstatus.FromError(err); !ok { t.Errorf("got error %v, expected grpc error", err) } else if c := st.Code(); c != errCode { t.Errorf("got error code %q, want %q", c, errCode) } _ = resp } func TestIamUpdateServiceAccount(t *testing.T) { var name string = "name3373707" var projectId string = "projectId-1969970175" var uniqueId string = "uniqueId-538310583" var email string = "email96619420" var displayName string = "displayName1615086568" var etag2 []byte = []byte("-120") var oauth2ClientId string = "oauth2ClientId-1833466037" var expectedResponse = &adminpb.ServiceAccount{ Name: name, ProjectId: projectId, UniqueId: uniqueId, Email: email, DisplayName: displayName, Etag: etag2, Oauth2ClientId: oauth2ClientId, } mockIam.err = nil mockIam.reqs = nil mockIam.resps = append(mockIam.resps[:0], expectedResponse) var etag []byte = []byte("21") var request = &adminpb.ServiceAccount{ Etag: etag, } c, err := NewIamClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.UpdateServiceAccount(context.Background(), request) if err != nil { t.Fatal(err) } if want, got := request, mockIam.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } if want, got := expectedResponse, resp; !proto.Equal(want, got) { t.Errorf("wrong response %q, want %q)", got, want) } } func TestIamUpdateServiceAccountError(t *testing.T) { errCode := codes.PermissionDenied mockIam.err = gstatus.Error(errCode, "test error") var etag []byte = []byte("21") var request = &adminpb.ServiceAccount{ Etag: etag, } c, err := NewIamClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.UpdateServiceAccount(context.Background(), request) if st, ok := gstatus.FromError(err); !ok { t.Errorf("got error %v, expected grpc error", err) } else if c := st.Code(); c != errCode { t.Errorf("got error code %q, want %q", c, errCode) } _ = resp } func TestIamDeleteServiceAccount(t *testing.T) { var expectedResponse *emptypb.Empty = &emptypb.Empty{} mockIam.err = nil mockIam.reqs = nil mockIam.resps = append(mockIam.resps[:0], expectedResponse) var formattedName string = IamServiceAccountPath("[PROJECT]", "[SERVICE_ACCOUNT]") var request = &adminpb.DeleteServiceAccountRequest{ Name: formattedName, } c, err := NewIamClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } err = c.DeleteServiceAccount(context.Background(), request) if err != nil { t.Fatal(err) } if want, got := request, mockIam.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } } func TestIamDeleteServiceAccountError(t *testing.T) { errCode := codes.PermissionDenied mockIam.err = gstatus.Error(errCode, "test error") var formattedName string = IamServiceAccountPath("[PROJECT]", "[SERVICE_ACCOUNT]") var request = &adminpb.DeleteServiceAccountRequest{ Name: formattedName, } c, err := NewIamClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } err = c.DeleteServiceAccount(context.Background(), request) if st, ok := gstatus.FromError(err); !ok { t.Errorf("got error %v, expected grpc error", err) } else if c := st.Code(); c != errCode { t.Errorf("got error code %q, want %q", c, errCode) } } func TestIamListServiceAccountKeys(t *testing.T) { var expectedResponse *adminpb.ListServiceAccountKeysResponse = &adminpb.ListServiceAccountKeysResponse{} mockIam.err = nil mockIam.reqs = nil mockIam.resps = append(mockIam.resps[:0], expectedResponse) var formattedName string = IamServiceAccountPath("[PROJECT]", "[SERVICE_ACCOUNT]") var request = &adminpb.ListServiceAccountKeysRequest{ Name: formattedName, } c, err := NewIamClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.ListServiceAccountKeys(context.Background(), request) if err != nil { t.Fatal(err) } if want, got := request, mockIam.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } if want, got := expectedResponse, resp; !proto.Equal(want, got) { t.Errorf("wrong response %q, want %q)", got, want) } } func TestIamListServiceAccountKeysError(t *testing.T) { errCode := codes.PermissionDenied mockIam.err = gstatus.Error(errCode, "test error") var formattedName string = IamServiceAccountPath("[PROJECT]", "[SERVICE_ACCOUNT]") var request = &adminpb.ListServiceAccountKeysRequest{ Name: formattedName, } c, err := NewIamClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.ListServiceAccountKeys(context.Background(), request) if st, ok := gstatus.FromError(err); !ok { t.Errorf("got error %v, expected grpc error", err) } else if c := st.Code(); c != errCode { t.Errorf("got error code %q, want %q", c, errCode) } _ = resp } func TestIamGetServiceAccountKey(t *testing.T) { var name2 string = "name2-1052831874" var privateKeyData []byte = []byte("-58") var publicKeyData []byte = []byte("-96") var expectedResponse = &adminpb.ServiceAccountKey{ Name: name2, PrivateKeyData: privateKeyData, PublicKeyData: publicKeyData, } mockIam.err = nil mockIam.reqs = nil mockIam.resps = append(mockIam.resps[:0], expectedResponse) var formattedName string = IamKeyPath("[PROJECT]", "[SERVICE_ACCOUNT]", "[KEY]") var request = &adminpb.GetServiceAccountKeyRequest{ Name: formattedName, } c, err := NewIamClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.GetServiceAccountKey(context.Background(), request) if err != nil { t.Fatal(err) } if want, got := request, mockIam.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } if want, got := expectedResponse, resp; !proto.Equal(want, got) { t.Errorf("wrong response %q, want %q)", got, want) } } func TestIamGetServiceAccountKeyError(t *testing.T) { errCode := codes.PermissionDenied mockIam.err = gstatus.Error(errCode, "test error") var formattedName string = IamKeyPath("[PROJECT]", "[SERVICE_ACCOUNT]", "[KEY]") var request = &adminpb.GetServiceAccountKeyRequest{ Name: formattedName, } c, err := NewIamClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.GetServiceAccountKey(context.Background(), request) if st, ok := gstatus.FromError(err); !ok { t.Errorf("got error %v, expected grpc error", err) } else if c := st.Code(); c != errCode { t.Errorf("got error code %q, want %q", c, errCode) } _ = resp } func TestIamCreateServiceAccountKey(t *testing.T) { var name2 string = "name2-1052831874" var privateKeyData []byte = []byte("-58") var publicKeyData []byte = []byte("-96") var expectedResponse = &adminpb.ServiceAccountKey{ Name: name2, PrivateKeyData: privateKeyData, PublicKeyData: publicKeyData, } mockIam.err = nil mockIam.reqs = nil mockIam.resps = append(mockIam.resps[:0], expectedResponse) var formattedName string = IamServiceAccountPath("[PROJECT]", "[SERVICE_ACCOUNT]") var request = &adminpb.CreateServiceAccountKeyRequest{ Name: formattedName, } c, err := NewIamClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.CreateServiceAccountKey(context.Background(), request) if err != nil { t.Fatal(err) } if want, got := request, mockIam.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } if want, got := expectedResponse, resp; !proto.Equal(want, got) { t.Errorf("wrong response %q, want %q)", got, want) } } func TestIamCreateServiceAccountKeyError(t *testing.T) { errCode := codes.PermissionDenied mockIam.err = gstatus.Error(errCode, "test error") var formattedName string = IamServiceAccountPath("[PROJECT]", "[SERVICE_ACCOUNT]") var request = &adminpb.CreateServiceAccountKeyRequest{ Name: formattedName, } c, err := NewIamClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.CreateServiceAccountKey(context.Background(), request) if st, ok := gstatus.FromError(err); !ok { t.Errorf("got error %v, expected grpc error", err) } else if c := st.Code(); c != errCode { t.Errorf("got error code %q, want %q", c, errCode) } _ = resp } func TestIamDeleteServiceAccountKey(t *testing.T) { var expectedResponse *emptypb.Empty = &emptypb.Empty{} mockIam.err = nil mockIam.reqs = nil mockIam.resps = append(mockIam.resps[:0], expectedResponse) var formattedName string = IamKeyPath("[PROJECT]", "[SERVICE_ACCOUNT]", "[KEY]") var request = &adminpb.DeleteServiceAccountKeyRequest{ Name: formattedName, } c, err := NewIamClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } err = c.DeleteServiceAccountKey(context.Background(), request) if err != nil { t.Fatal(err) } if want, got := request, mockIam.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } } func TestIamDeleteServiceAccountKeyError(t *testing.T) { errCode := codes.PermissionDenied mockIam.err = gstatus.Error(errCode, "test error") var formattedName string = IamKeyPath("[PROJECT]", "[SERVICE_ACCOUNT]", "[KEY]") var request = &adminpb.DeleteServiceAccountKeyRequest{ Name: formattedName, } c, err := NewIamClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } err = c.DeleteServiceAccountKey(context.Background(), request) if st, ok := gstatus.FromError(err); !ok { t.Errorf("got error %v, expected grpc error", err) } else if c := st.Code(); c != errCode { t.Errorf("got error code %q, want %q", c, errCode) } } func TestIamSignBlob(t *testing.T) { var keyId string = "keyId-1134673157" var signature []byte = []byte("-72") var expectedResponse = &adminpb.SignBlobResponse{ KeyId: keyId, Signature: signature, } mockIam.err = nil mockIam.reqs = nil mockIam.resps = append(mockIam.resps[:0], expectedResponse) var formattedName string = IamServiceAccountPath("[PROJECT]", "[SERVICE_ACCOUNT]") var bytesToSign []byte = []byte("45") var request = &adminpb.SignBlobRequest{ Name: formattedName, BytesToSign: bytesToSign, } c, err := NewIamClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.SignBlob(context.Background(), request) if err != nil { t.Fatal(err) } if want, got := request, mockIam.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } if want, got := expectedResponse, resp; !proto.Equal(want, got) { t.Errorf("wrong response %q, want %q)", got, want) } } func TestIamSignBlobError(t *testing.T) { errCode := codes.PermissionDenied mockIam.err = gstatus.Error(errCode, "test error") var formattedName string = IamServiceAccountPath("[PROJECT]", "[SERVICE_ACCOUNT]") var bytesToSign []byte = []byte("45") var request = &adminpb.SignBlobRequest{ Name: formattedName, BytesToSign: bytesToSign, } c, err := NewIamClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.SignBlob(context.Background(), request) if st, ok := gstatus.FromError(err); !ok { t.Errorf("got error %v, expected grpc error", err) } else if c := st.Code(); c != errCode { t.Errorf("got error code %q, want %q", c, errCode) } _ = resp } func TestIamGetIamPolicy(t *testing.T) { var version int32 = 351608024 var etag []byte = []byte("21") var expectedResponse = &iampb.Policy{ Version: version, Etag: etag, } mockIam.err = nil mockIam.reqs = nil mockIam.resps = append(mockIam.resps[:0], expectedResponse) var formattedResource string = IamServiceAccountPath("[PROJECT]", "[SERVICE_ACCOUNT]") var request = &iampb.GetIamPolicyRequest{ Resource: formattedResource, } c, err := NewIamClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.getIamPolicy(context.Background(), request) if err != nil { t.Fatal(err) } if want, got := request, mockIam.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } if want, got := expectedResponse, resp; !proto.Equal(want, got) { t.Errorf("wrong response %q, want %q)", got, want) } } func TestIamGetIamPolicyError(t *testing.T) { errCode := codes.PermissionDenied mockIam.err = gstatus.Error(errCode, "test error") var formattedResource string = IamServiceAccountPath("[PROJECT]", "[SERVICE_ACCOUNT]") var request = &iampb.GetIamPolicyRequest{ Resource: formattedResource, } c, err := NewIamClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.getIamPolicy(context.Background(), request) if st, ok := gstatus.FromError(err); !ok { t.Errorf("got error %v, expected grpc error", err) } else if c := st.Code(); c != errCode { t.Errorf("got error code %q, want %q", c, errCode) } _ = resp } func TestIamSetIamPolicy(t *testing.T) { var version int32 = 351608024 var etag []byte = []byte("21") var expectedResponse = &iampb.Policy{ Version: version, Etag: etag, } mockIam.err = nil mockIam.reqs = nil mockIam.resps = append(mockIam.resps[:0], expectedResponse) var formattedResource string = IamServiceAccountPath("[PROJECT]", "[SERVICE_ACCOUNT]") var policy *iampb.Policy = &iampb.Policy{} var request = &iampb.SetIamPolicyRequest{ Resource: formattedResource, Policy: policy, } c, err := NewIamClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.setIamPolicy(context.Background(), request) if err != nil { t.Fatal(err) } if want, got := request, mockIam.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } if want, got := expectedResponse, resp; !proto.Equal(want, got) { t.Errorf("wrong response %q, want %q)", got, want) } } func TestIamSetIamPolicyError(t *testing.T) { errCode := codes.PermissionDenied mockIam.err = gstatus.Error(errCode, "test error") var formattedResource string = IamServiceAccountPath("[PROJECT]", "[SERVICE_ACCOUNT]") var policy *iampb.Policy = &iampb.Policy{} var request = &iampb.SetIamPolicyRequest{ Resource: formattedResource, Policy: policy, } c, err := NewIamClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.setIamPolicy(context.Background(), request) if st, ok := gstatus.FromError(err); !ok { t.Errorf("got error %v, expected grpc error", err) } else if c := st.Code(); c != errCode { t.Errorf("got error code %q, want %q", c, errCode) } _ = resp } func TestIamTestIamPermissions(t *testing.T) { var expectedResponse *iampb.TestIamPermissionsResponse = &iampb.TestIamPermissionsResponse{} mockIam.err = nil mockIam.reqs = nil mockIam.resps = append(mockIam.resps[:0], expectedResponse) var formattedResource string = IamServiceAccountPath("[PROJECT]", "[SERVICE_ACCOUNT]") var permissions []string = nil var request = &iampb.TestIamPermissionsRequest{ Resource: formattedResource, Permissions: permissions, } c, err := NewIamClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.TestIamPermissions(context.Background(), request) if err != nil { t.Fatal(err) } if want, got := request, mockIam.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } if want, got := expectedResponse, resp; !proto.Equal(want, got) { t.Errorf("wrong response %q, want %q)", got, want) } } func TestIamTestIamPermissionsError(t *testing.T) { errCode := codes.PermissionDenied mockIam.err = gstatus.Error(errCode, "test error") var formattedResource string = IamServiceAccountPath("[PROJECT]", "[SERVICE_ACCOUNT]") var permissions []string = nil var request = &iampb.TestIamPermissionsRequest{ Resource: formattedResource, Permissions: permissions, } c, err := NewIamClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.TestIamPermissions(context.Background(), request) if st, ok := gstatus.FromError(err); !ok { t.Errorf("got error %v, expected grpc error", err) } else if c := st.Code(); c != errCode { t.Errorf("got error code %q, want %q", c, errCode) } _ = resp } func TestIamQueryGrantableRoles(t *testing.T) { var expectedResponse *adminpb.QueryGrantableRolesResponse = &adminpb.QueryGrantableRolesResponse{} mockIam.err = nil mockIam.reqs = nil mockIam.resps = append(mockIam.resps[:0], expectedResponse) var fullResourceName string = "fullResourceName1300993644" var request = &adminpb.QueryGrantableRolesRequest{ FullResourceName: fullResourceName, } c, err := NewIamClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.QueryGrantableRoles(context.Background(), request) if err != nil { t.Fatal(err) } if want, got := request, mockIam.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } if want, got := expectedResponse, resp; !proto.Equal(want, got) { t.Errorf("wrong response %q, want %q)", got, want) } } func TestIamQueryGrantableRolesError(t *testing.T) { errCode := codes.PermissionDenied mockIam.err = gstatus.Error(errCode, "test error") var fullResourceName string = "fullResourceName1300993644" var request = &adminpb.QueryGrantableRolesRequest{ FullResourceName: fullResourceName, } c, err := NewIamClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.QueryGrantableRoles(context.Background(), request) if st, ok := gstatus.FromError(err); !ok { t.Errorf("got error %v, expected grpc error", err) } else if c := st.Code(); c != errCode { t.Errorf("got error code %q, want %q", c, errCode) } _ = resp } golang-google-cloud-0.9.0/iam/admin/apiv1/policy_methods.go000066400000000000000000000033001312234511600235660ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // This is handwritten code. These methods are implemented by hand so they can use // the iam.Policy type. package admin import ( "cloud.google.com/go/iam" "golang.org/x/net/context" iampb "google.golang.org/genproto/googleapis/iam/v1" ) // GetIamPolicy returns the IAM access control policy for a ServiceAccount. func (c *IamClient) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest) (*iam.Policy, error) { policy, err := c.getIamPolicy(ctx, req) if err != nil { return nil, err } return &iam.Policy{InternalProto: policy}, nil } // SetIamPolicyRequest is the request type for the SetIamPolicy method. type SetIamPolicyRequest struct { Resource string Policy *iam.Policy } // SetIamPolicy sets the IAM access control policy for a ServiceAccount. func (c *IamClient) SetIamPolicy(ctx context.Context, req *SetIamPolicyRequest) (*iam.Policy, error) { preq := &iampb.SetIamPolicyRequest{ Resource: req.Resource, Policy: req.Policy.InternalProto, } policy, err := c.setIamPolicy(ctx, preq) if err != nil { return nil, err } return &iam.Policy{InternalProto: policy}, nil } golang-google-cloud-0.9.0/iam/iam.go000066400000000000000000000165031312234511600172130ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package iam supports the resource-specific operations of Google Cloud // IAM (Identity and Access Management) for the Google Cloud Libraries. // See https://cloud.google.com/iam for more about IAM. // // Users of the Google Cloud Libraries will typically not use this package // directly. Instead they will begin with some resource that supports IAM, like // a pubsub topic, and call its IAM method to get a Handle for that resource. package iam import ( "golang.org/x/net/context" pb "google.golang.org/genproto/googleapis/iam/v1" "google.golang.org/grpc" ) // client abstracts the IAMPolicy API to allow multiple implementations. type client interface { Get(ctx context.Context, resource string) (*pb.Policy, error) Set(ctx context.Context, resource string, p *pb.Policy) error Test(ctx context.Context, resource string, perms []string) ([]string, error) } // grpcClient implements client for the standard gRPC-based IAMPolicy service. type grpcClient struct { c pb.IAMPolicyClient } func (g *grpcClient) Get(ctx context.Context, resource string) (*pb.Policy, error) { proto, err := g.c.GetIamPolicy(ctx, &pb.GetIamPolicyRequest{Resource: resource}) if err != nil { return nil, err } return proto, nil } func (g *grpcClient) Set(ctx context.Context, resource string, p *pb.Policy) error { _, err := g.c.SetIamPolicy(ctx, &pb.SetIamPolicyRequest{ Resource: resource, Policy: p, }) return err } func (g *grpcClient) Test(ctx context.Context, resource string, perms []string) ([]string, error) { res, err := g.c.TestIamPermissions(ctx, &pb.TestIamPermissionsRequest{ Resource: resource, Permissions: perms, }) if err != nil { return nil, err } return res.Permissions, nil } // A Handle provides IAM operations for a resource. type Handle struct { c client resource string } // InternalNewHandle is for use by the Google Cloud Libraries only. // // InternalNewHandle returns a Handle for resource. // The conn parameter refers to a server that must support the IAMPolicy service. func InternalNewHandle(conn *grpc.ClientConn, resource string) *Handle { return InternalNewHandleClient(&grpcClient{c: pb.NewIAMPolicyClient(conn)}, resource) } // InternalNewHandleClient is for use by the Google Cloud Libraries only. // // InternalNewHandleClient returns a Handle for resource using the given // client implementation. func InternalNewHandleClient(c client, resource string) *Handle { return &Handle{ c: c, resource: resource, } } // Policy retrieves the IAM policy for the resource. func (h *Handle) Policy(ctx context.Context) (*Policy, error) { proto, err := h.c.Get(ctx, h.resource) if err != nil { return nil, err } return &Policy{InternalProto: proto}, nil } // SetPolicy replaces the resource's current policy with the supplied Policy. // // If policy was created from a prior call to Get, then the modification will // only succeed if the policy has not changed since the Get. func (h *Handle) SetPolicy(ctx context.Context, policy *Policy) error { return h.c.Set(ctx, h.resource, policy.InternalProto) } // TestPermissions returns the subset of permissions that the caller has on the resource. func (h *Handle) TestPermissions(ctx context.Context, permissions []string) ([]string, error) { return h.c.Test(ctx, h.resource, permissions) } // A RoleName is a name representing a collection of permissions. type RoleName string // Common role names. const ( Owner RoleName = "roles/owner" Editor RoleName = "roles/editor" Viewer RoleName = "roles/viewer" ) const ( // AllUsers is a special member that denotes all users, even unauthenticated ones. AllUsers = "allUsers" // AllAuthenticatedUsers is a special member that denotes all authenticated users. AllAuthenticatedUsers = "allAuthenticatedUsers" ) // A Policy is a list of Bindings representing roles // granted to members. // // The zero Policy is a valid policy with no bindings. type Policy struct { // TODO(jba): when type aliases are available, put Policy into an internal package // and provide an exported alias here. // This field is exported for use by the Google Cloud Libraries only. // It may become unexported in a future release. InternalProto *pb.Policy } // Members returns the list of members with the supplied role. // The return value should not be modified. Use Add and Remove // to modify the members of a role. func (p *Policy) Members(r RoleName) []string { b := p.binding(r) if b == nil { return nil } return b.Members } // HasRole reports whether member has role r. func (p *Policy) HasRole(member string, r RoleName) bool { return memberIndex(member, p.binding(r)) >= 0 } // Add adds member member to role r if it is not already present. // A new binding is created if there is no binding for the role. func (p *Policy) Add(member string, r RoleName) { b := p.binding(r) if b == nil { if p.InternalProto == nil { p.InternalProto = &pb.Policy{} } p.InternalProto.Bindings = append(p.InternalProto.Bindings, &pb.Binding{ Role: string(r), Members: []string{member}, }) return } if memberIndex(member, b) < 0 { b.Members = append(b.Members, member) return } } // Remove removes member from role r if it is present. func (p *Policy) Remove(member string, r RoleName) { bi := p.bindingIndex(r) if bi < 0 { return } bindings := p.InternalProto.Bindings b := bindings[bi] mi := memberIndex(member, b) if mi < 0 { return } // Order doesn't matter for bindings or members, so to remove, move the last item // into the removed spot and shrink the slice. if len(b.Members) == 1 { // Remove binding. last := len(bindings) - 1 bindings[bi] = bindings[last] bindings[last] = nil p.InternalProto.Bindings = bindings[:last] return } // Remove member. // TODO(jba): worry about multiple copies of m? last := len(b.Members) - 1 b.Members[mi] = b.Members[last] b.Members[last] = "" b.Members = b.Members[:last] } // Roles returns the names of all the roles that appear in the Policy. func (p *Policy) Roles() []RoleName { if p.InternalProto == nil { return nil } var rns []RoleName for _, b := range p.InternalProto.Bindings { rns = append(rns, RoleName(b.Role)) } return rns } // binding returns the Binding for the suppied role, or nil if there isn't one. func (p *Policy) binding(r RoleName) *pb.Binding { i := p.bindingIndex(r) if i < 0 { return nil } return p.InternalProto.Bindings[i] } func (p *Policy) bindingIndex(r RoleName) int { if p.InternalProto == nil { return -1 } for i, b := range p.InternalProto.Bindings { if b.Role == string(r) { return i } } return -1 } // memberIndex returns the index of m in b's Members, or -1 if not found. func memberIndex(m string, b *pb.Binding) int { if b == nil { return -1 } for i, mm := range b.Members { if mm == m { return i } } return -1 } golang-google-cloud-0.9.0/iam/iam_test.go000066400000000000000000000044651312234511600202560ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package iam import ( "fmt" "reflect" "sort" "testing" ) func TestPolicy(t *testing.T) { p := &Policy{} add := func(member string, role RoleName) { p.Add(member, role) } remove := func(member string, role RoleName) { p.Remove(member, role) } if msg, ok := checkMembers(p, Owner, nil); !ok { t.Fatal(msg) } add("m1", Owner) if msg, ok := checkMembers(p, Owner, []string{"m1"}); !ok { t.Fatal(msg) } add("m2", Owner) if msg, ok := checkMembers(p, Owner, []string{"m1", "m2"}); !ok { t.Fatal(msg) } add("m1", Owner) // duplicate adds ignored if msg, ok := checkMembers(p, Owner, []string{"m1", "m2"}); !ok { t.Fatal(msg) } // No other roles populated yet. if msg, ok := checkMembers(p, Viewer, nil); !ok { t.Fatal(msg) } remove("m1", Owner) if msg, ok := checkMembers(p, Owner, []string{"m2"}); !ok { t.Fatal(msg) } if msg, ok := checkMembers(p, Viewer, nil); !ok { t.Fatal(msg) } remove("m3", Owner) // OK to remove non-existent member. if msg, ok := checkMembers(p, Owner, []string{"m2"}); !ok { t.Fatal(msg) } remove("m2", Owner) if msg, ok := checkMembers(p, Owner, nil); !ok { t.Fatal(msg) } if got, want := p.Roles(), []RoleName(nil); !reflect.DeepEqual(got, want) { t.Fatalf("roles: got %v, want %v", got, want) } } func checkMembers(p *Policy, role RoleName, wantMembers []string) (string, bool) { gotMembers := p.Members(role) sort.Strings(gotMembers) sort.Strings(wantMembers) if !reflect.DeepEqual(gotMembers, wantMembers) { return fmt.Sprintf("got %v, want %v", gotMembers, wantMembers), false } for _, m := range wantMembers { if !p.HasRole(m, role) { return fmt.Sprintf("member %q should have role %s but does not", m, role), false } } return "", true } golang-google-cloud-0.9.0/internal/000077500000000000000000000000001312234511600171575ustar00rootroot00000000000000golang-google-cloud-0.9.0/internal/atomiccache/000077500000000000000000000000001312234511600214175ustar00rootroot00000000000000golang-google-cloud-0.9.0/internal/atomiccache/atomiccache.go000066400000000000000000000033561312234511600242150ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package atomiccache provides a map-based cache that supports very fast // reads. package atomiccache import ( "sync" "sync/atomic" ) type mapType map[interface{}]interface{} // Cache is a map-based cache that supports fast reads via use of atomics. // Writes are slow, requiring a copy of the entire cache. // The zero Cache is an empty cache, ready for use. type Cache struct { val atomic.Value // mapType mu sync.Mutex // used only by writers } // Get returns the value of the cache at key. If there is no value, // getter is called to provide one, and the cache is updated. // The getter function may be called concurrently. It should be pure, // returning the same value for every call. func (c *Cache) Get(key interface{}, getter func() interface{}) interface{} { mp, _ := c.val.Load().(mapType) if v, ok := mp[key]; ok { return v } // Compute value without lock. // Might duplicate effort but won't hold other computations back. newV := getter() c.mu.Lock() mp, _ = c.val.Load().(mapType) newM := make(mapType, len(mp)+1) for k, v := range mp { newM[k] = v } newM[key] = newV c.val.Store(newM) c.mu.Unlock() return newV } golang-google-cloud-0.9.0/internal/atomiccache/atomiccache_test.go000066400000000000000000000022331312234511600252450ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package atomiccache import ( "fmt" "testing" ) func TestGet(t *testing.T) { var c Cache called := false get := func(k interface{}) interface{} { return c.Get(k, func() interface{} { called = true return fmt.Sprintf("v%d", k) }) } got := get(1) if want := "v1"; got != want { t.Errorf("got %v, want %v", got, want) } if !called { t.Error("getter not called, expected a call") } called = false got = get(1) if want := "v1"; got != want { t.Errorf("got %v, want %v", got, want) } if called { t.Error("getter unexpectedly called") } } golang-google-cloud-0.9.0/internal/fields/000077500000000000000000000000001312234511600204255ustar00rootroot00000000000000golang-google-cloud-0.9.0/internal/fields/fields.go000066400000000000000000000337571312234511600222410ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package fields provides a view of the fields of a struct that follows the Go // rules, amended to consider tags and case insensitivity. // // Usage // // First define a function that interprets tags: // // func parseTag(st reflect.StructTag) (name string, keep bool, other interface{}, err error) { ... } // // The function's return values describe whether to ignore the field // completely or provide an alternate name, as well as other data from the // parse that is stored to avoid re-parsing. // // Then define a function to validate the type: // // func validate(t reflect.Type) error { ... } // // Then, if necessary, define a function to specify leaf types - types // which should be considered one field and not be recursed into: // // func isLeafType(t reflect.Type) bool { ... } // // eg: // // func isLeafType(t reflect.Type) bool { // return t == reflect.TypeOf(time.Time{}) // } // // Next, construct a Cache, passing your functions. As its name suggests, a // Cache remembers validation and field information for a type, so subsequent // calls with the same type are very fast. // // cache := fields.NewCache(parseTag, validate, isLeafType) // // To get the fields of a struct type as determined by the above rules, call // the Fields method: // // fields, err := cache.Fields(reflect.TypeOf(MyStruct{})) // // The return value can be treated as a slice of Fields. // // Given a string, such as a key or column name obtained during unmarshalling, // call Match on the list of fields to find a field whose name is the best // match: // // field := fields.Match(name) // // Match looks for an exact match first, then falls back to a case-insensitive // comparison. package fields import ( "bytes" "reflect" "sort" "cloud.google.com/go/internal/atomiccache" ) // A Field records information about a struct field. type Field struct { Name string // effective field name NameFromTag bool // did Name come from a tag? Type reflect.Type // field type Index []int // index sequence, for reflect.Value.FieldByIndex ParsedTag interface{} // third return value of the parseTag function nameBytes []byte equalFold func(s, t []byte) bool } type ParseTagFunc func(reflect.StructTag) (name string, keep bool, other interface{}, err error) type ValidateFunc func(reflect.Type) error type LeafTypesFunc func(reflect.Type) bool // A Cache records information about the fields of struct types. // // A Cache is safe for use by multiple goroutines. type Cache struct { parseTag ParseTagFunc validate ValidateFunc leafTypes LeafTypesFunc cache atomiccache.Cache // from reflect.Type to cacheValue } // NewCache constructs a Cache. // // Its first argument should be a function that accepts // a struct tag and returns four values: an alternative name for the field // extracted from the tag, a boolean saying whether to keep the field or ignore // it, additional data that is stored with the field information to avoid // having to parse the tag again, and an error. // // Its second argument should be a function that accepts a reflect.Type and // returns an error if the struct type is invalid in any way. For example, it // may check that all of the struct field tags are valid, or that all fields // are of an appropriate type. func NewCache(parseTag ParseTagFunc, validate ValidateFunc, leafTypes LeafTypesFunc) *Cache { if parseTag == nil { parseTag = func(reflect.StructTag) (string, bool, interface{}, error) { return "", true, nil, nil } } if validate == nil { validate = func(reflect.Type) error { return nil } } if leafTypes == nil { leafTypes = func(reflect.Type) bool { return false } } return &Cache{ parseTag: parseTag, validate: validate, leafTypes: leafTypes, } } // A fieldScan represents an item on the fieldByNameFunc scan work list. type fieldScan struct { typ reflect.Type index []int } // Fields returns all the exported fields of t, which must be a struct type. It // follows the standard Go rules for embedded fields, modified by the presence // of tags. The result is sorted lexicographically by index. // // These rules apply in the absence of tags: // Anonymous struct fields are treated as if their inner exported fields were // fields in the outer struct (embedding). The result includes all fields that // aren't shadowed by fields at higher level of embedding. If more than one // field with the same name exists at the same level of embedding, it is // excluded. An anonymous field that is not of struct type is treated as having // its type as its name. // // Tags modify these rules as follows: // A field's tag is used as its name. // An anonymous struct field with a name given in its tag is treated as // a field having that name, rather than an embedded struct (the struct's // fields will not be returned). // If more than one field with the same name exists at the same level of embedding, // but exactly one of them is tagged, then the tagged field is reported and the others // are ignored. func (c *Cache) Fields(t reflect.Type) (List, error) { if t.Kind() != reflect.Struct { panic("fields: Fields of non-struct type") } return c.cachedTypeFields(t) } // A List is a list of Fields. type List []Field // Match returns the field in the list whose name best matches the supplied // name, nor nil if no field does. If there is a field with the exact name, it // is returned. Otherwise the first field (sorted by index) whose name matches // case-insensitively is returned. func (l List) Match(name string) *Field { return l.MatchBytes([]byte(name)) } // MatchBytes is identical to Match, except that the argument is a byte slice. func (l List) MatchBytes(name []byte) *Field { var f *Field for i := range l { ff := &l[i] if bytes.Equal(ff.nameBytes, name) { return ff } if f == nil && ff.equalFold(ff.nameBytes, name) { f = ff } } return f } type cacheValue struct { fields List err error } // cachedTypeFields is like typeFields but uses a cache to avoid repeated work. // This code has been copied and modified from // https://go.googlesource.com/go/+/go1.7.3/src/encoding/json/encode.go. func (c *Cache) cachedTypeFields(t reflect.Type) (List, error) { cv := c.cache.Get(t, func() interface{} { if err := c.validate(t); err != nil { return cacheValue{nil, err} } f, err := c.typeFields(t) return cacheValue{List(f), err} }).(cacheValue) return cv.fields, cv.err } func (c *Cache) typeFields(t reflect.Type) ([]Field, error) { fields, err := c.listFields(t) if err != nil { return nil, err } sort.Sort(byName(fields)) // Delete all fields that are hidden by the Go rules for embedded fields. // The fields are sorted in primary order of name, secondary order of field // index length. So the first field with a given name is the dominant one. var out []Field for advance, i := 0, 0; i < len(fields); i += advance { // One iteration per name. // Find the sequence of fields with the name of this first field. fi := fields[i] name := fi.Name for advance = 1; i+advance < len(fields); advance++ { fj := fields[i+advance] if fj.Name != name { break } } // Find the dominant field, if any, out of all fields that have the same name. dominant, ok := dominantField(fields[i : i+advance]) if ok { out = append(out, dominant) } } sort.Sort(byIndex(out)) return out, nil } func (c *Cache) listFields(t reflect.Type) ([]Field, error) { // This uses the same condition that the Go language does: there must be a unique instance // of the match at a given depth level. If there are multiple instances of a match at the // same depth, they annihilate each other and inhibit any possible match at a lower level. // The algorithm is breadth first search, one depth level at a time. // The current and next slices are work queues: // current lists the fields to visit on this depth level, // and next lists the fields on the next lower level. current := []fieldScan{} next := []fieldScan{{typ: t}} // nextCount records the number of times an embedded type has been // encountered and considered for queueing in the 'next' slice. // We only queue the first one, but we increment the count on each. // If a struct type T can be reached more than once at a given depth level, // then it annihilates itself and need not be considered at all when we // process that next depth level. var nextCount map[reflect.Type]int // visited records the structs that have been considered already. // Embedded pointer fields can create cycles in the graph of // reachable embedded types; visited avoids following those cycles. // It also avoids duplicated effort: if we didn't find the field in an // embedded type T at level 2, we won't find it in one at level 4 either. visited := map[reflect.Type]bool{} var fields []Field // Fields found. for len(next) > 0 { current, next = next, current[:0] count := nextCount nextCount = nil // Process all the fields at this depth, now listed in 'current'. // The loop queues embedded fields found in 'next', for processing during the next // iteration. The multiplicity of the 'current' field counts is recorded // in 'count'; the multiplicity of the 'next' field counts is recorded in 'nextCount'. for _, scan := range current { t := scan.typ if visited[t] { // We've looked through this type before, at a higher level. // That higher level would shadow the lower level we're now at, // so this one can't be useful to us. Ignore it. continue } visited[t] = true for i := 0; i < t.NumField(); i++ { f := t.Field(i) exported := (f.PkgPath == "") // If a named field is unexported, ignore it. An anonymous // unexported field is processed, because it may contain // exported fields, which are visible. if !exported && !f.Anonymous { continue } // Examine the tag. tagName, keep, other, err := c.parseTag(f.Tag) if err != nil { return nil, err } if !keep { continue } if c.leafTypes(f.Type) { fields = append(fields, newField(f, tagName, other, scan.index, i)) continue } var ntyp reflect.Type if f.Anonymous { // Anonymous field of type T or *T. ntyp = f.Type if ntyp.Kind() == reflect.Ptr { ntyp = ntyp.Elem() } } // Record fields with a tag name, non-anonymous fields, or // anonymous non-struct fields. if tagName != "" || ntyp == nil || ntyp.Kind() != reflect.Struct { if !exported { continue } fields = append(fields, newField(f, tagName, other, scan.index, i)) if count[t] > 1 { // If there were multiple instances, add a second, // so that the annihilation code will see a duplicate. fields = append(fields, fields[len(fields)-1]) } continue } // Queue embedded struct fields for processing with next level, // but only if the embedded types haven't already been queued. if nextCount[ntyp] > 0 { nextCount[ntyp] = 2 // exact multiple doesn't matter continue } if nextCount == nil { nextCount = map[reflect.Type]int{} } nextCount[ntyp] = 1 if count[t] > 1 { nextCount[ntyp] = 2 // exact multiple doesn't matter } var index []int index = append(index, scan.index...) index = append(index, i) next = append(next, fieldScan{ntyp, index}) } } } return fields, nil } func newField(f reflect.StructField, tagName string, other interface{}, index []int, i int) Field { name := tagName if name == "" { name = f.Name } sf := Field{ Name: name, NameFromTag: tagName != "", Type: f.Type, ParsedTag: other, nameBytes: []byte(name), } sf.equalFold = foldFunc(sf.nameBytes) sf.Index = append(sf.Index, index...) sf.Index = append(sf.Index, i) return sf } // byName sorts fields using the following criteria, in order: // 1. name // 2. embedding depth // 3. tag presence (preferring a tagged field) // 4. index sequence. type byName []Field func (x byName) Len() int { return len(x) } func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] } func (x byName) Less(i, j int) bool { if x[i].Name != x[j].Name { return x[i].Name < x[j].Name } if len(x[i].Index) != len(x[j].Index) { return len(x[i].Index) < len(x[j].Index) } if x[i].NameFromTag != x[j].NameFromTag { return x[i].NameFromTag } return byIndex(x).Less(i, j) } // byIndex sorts field by index sequence. type byIndex []Field func (x byIndex) Len() int { return len(x) } func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] } func (x byIndex) Less(i, j int) bool { xi := x[i].Index xj := x[j].Index ln := len(xi) if l := len(xj); l < ln { ln = l } for k := 0; k < ln; k++ { if xi[k] != xj[k] { return xi[k] < xj[k] } } return len(xi) < len(xj) } // dominantField looks through the fields, all of which are known to have the // same name, to find the single field that dominates the others using Go's // embedding rules, modified by the presence of tags. If there are multiple // top-level fields, the boolean will be false: This condition is an error in // Go and we skip all the fields. func dominantField(fs []Field) (Field, bool) { // The fields are sorted in increasing index-length order, then by presence of tag. // That means that the first field is the dominant one. We need only check // for error cases: two fields at top level, either both tagged or neither tagged. if len(fs) > 1 && len(fs[0].Index) == len(fs[1].Index) && fs[0].NameFromTag == fs[1].NameFromTag { return Field{}, false } return fs[0], true } golang-google-cloud-0.9.0/internal/fields/fields_test.go000066400000000000000000000315031312234511600232630ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package fields import ( "encoding/json" "errors" "fmt" "reflect" "strings" "testing" "time" ) type embed1 struct { Em1 int Dup int // annihilates with embed2.Dup Shadow int embed3 } type embed2 struct { Dup int embed3 embed4 } type embed3 struct { Em3 int // annihilated because embed3 is in both embed1 and embed2 embed5 } type embed4 struct { Em4 int Dup int // annihilation of Dup in embed1, embed2 hides this Dup *embed1 // ignored because it occurs at a higher level } type embed5 struct { x int } type Anonymous int type S1 struct { Exported int unexported int Shadow int // shadows S1.Shadow embed1 *embed2 Anonymous } type Time struct { time.Time } var intType = reflect.TypeOf(int(0)) func field(name string, tval interface{}, index ...int) *Field { return &Field{ Name: name, Type: reflect.TypeOf(tval), Index: index, } } func tfield(name string, tval interface{}, index ...int) *Field { return &Field{ Name: name, Type: reflect.TypeOf(tval), Index: index, NameFromTag: true, } } func TestFieldsNoTags(t *testing.T) { c := NewCache(nil, nil, nil) got, err := c.Fields(reflect.TypeOf(S1{})) if err != nil { t.Fatal(err) } want := []*Field{ field("Exported", int(0), 0), field("Shadow", int(0), 2), field("Em1", int(0), 3, 0), field("Em4", int(0), 4, 2, 0), field("Anonymous", Anonymous(0), 5), } if msg, ok := compareFields(got, want); !ok { t.Error(msg) } } func TestAgainstJSONEncodingNoTags(t *testing.T) { // Demonstrates that this package produces the same set of fields as encoding/json. s1 := S1{ Exported: 1, unexported: 2, Shadow: 3, embed1: embed1{ Em1: 4, Dup: 5, Shadow: 6, embed3: embed3{ Em3: 7, embed5: embed5{x: 8}, }, }, embed2: &embed2{ Dup: 9, embed3: embed3{ Em3: 10, embed5: embed5{x: 11}, }, embed4: embed4{ Em4: 12, Dup: 13, embed1: &embed1{Em1: 14}, }, }, Anonymous: Anonymous(15), } var want S1 jsonRoundTrip(t, s1, &want) var got S1 got.embed2 = &embed2{} // need this because reflection won't create it fields, err := NewCache(nil, nil, nil).Fields(reflect.TypeOf(got)) if err != nil { t.Fatal(err) } setFields(fields, &got, s1) if !reflect.DeepEqual(got, want) { t.Errorf("got\n%+v\nwant\n%+v", got, want) } } // Tests use of LeafTypes parameter to NewCache func TestAgainstJSONEncodingEmbeddedTime(t *testing.T) { timeLeafFn := func(t reflect.Type) bool { return t == reflect.TypeOf(time.Time{}) } // Demonstrates that this package can produce the same set of // fields as encoding/json for a struct with an embedded time.Time. now := time.Now().UTC() myt := Time{ now, } var want Time jsonRoundTrip(t, myt, &want) var got Time fields, err := NewCache(nil, nil, timeLeafFn).Fields(reflect.TypeOf(got)) if err != nil { t.Fatal(err) } setFields(fields, &got, myt) if !reflect.DeepEqual(got, want) { t.Errorf("got\n%+v\nwant\n%+v", got, want) } } type S2 struct { NoTag int XXX int `json:"tag"` // tag name takes precedence Anonymous `json:"anon"` // anonymous non-structs also get their name from the tag unexported int `json:"tag"` Embed `json:"em"` // embedded structs with tags become fields Tag int YYY int `json:"Tag"` // tag takes precedence over untagged field of the same name Empty int `json:""` // empty tag is noop tEmbed1 tEmbed2 } type Embed struct { Em int } type tEmbed1 struct { Dup int X int `json:"Dup2"` } type tEmbed2 struct { Y int `json:"Dup"` // takes precedence over tEmbed1.Dup because it is tagged Z int `json:"Dup2"` // same name as tEmbed1.X and both tagged, so ignored } func jsonTagParser(t reflect.StructTag) (name string, keep bool, other interface{}, err error) { s := t.Get("json") parts := strings.Split(s, ",") if parts[0] == "-" { return "", false, nil, nil } if len(parts) > 1 { other = parts[1:] } return parts[0], true, other, nil } func validateFunc(t reflect.Type) (err error) { if t.Kind() != reflect.Struct { return errors.New("non-struct type used") } for i := 0; i < t.NumField(); i++ { if t.Field(i).Type.Kind() == reflect.Slice { return fmt.Errorf("slice field found at field %s on struct %s", t.Field(i).Name, t.Name()) } } return nil } func TestFieldsWithTags(t *testing.T) { got, err := NewCache(jsonTagParser, nil, nil).Fields(reflect.TypeOf(S2{})) if err != nil { t.Fatal(err) } want := []*Field{ field("NoTag", int(0), 0), tfield("tag", int(0), 1), tfield("anon", Anonymous(0), 2), tfield("em", Embed{}, 4), tfield("Tag", int(0), 6), field("Empty", int(0), 7), tfield("Dup", int(0), 8, 0), } if msg, ok := compareFields(got, want); !ok { t.Error(msg) } } func TestAgainstJSONEncodingWithTags(t *testing.T) { // Demonstrates that this package produces the same set of fields as encoding/json. s2 := S2{ NoTag: 1, XXX: 2, Anonymous: 3, Embed: Embed{ Em: 4, }, tEmbed1: tEmbed1{ Dup: 5, X: 6, }, tEmbed2: tEmbed2{ Y: 7, Z: 8, }, } var want S2 jsonRoundTrip(t, s2, &want) var got S2 fields, err := NewCache(jsonTagParser, nil, nil).Fields(reflect.TypeOf(got)) if err != nil { t.Fatal(err) } setFields(fields, &got, s2) if !reflect.DeepEqual(got, want) { t.Errorf("got\n%+v\nwant\n%+v", got, want) } } func TestUnexportedAnonymousNonStruct(t *testing.T) { // An unexported anonymous non-struct field should not be recorded. // This is currently a bug in encoding/json. // https://github.com/golang/go/issues/18009 type ( u int v int S struct { u v `json:"x"` int } ) got, err := NewCache(jsonTagParser, nil, nil).Fields(reflect.TypeOf(S{})) if err != nil { t.Fatal(err) } if len(got) != 0 { t.Errorf("got %d fields, want 0", len(got)) } } func TestUnexportedAnonymousStruct(t *testing.T) { // An unexported anonymous struct with a tag is ignored. // This is currently a bug in encoding/json. // https://github.com/golang/go/issues/18009 type ( s1 struct{ X int } S2 struct { s1 `json:"Y"` } ) got, err := NewCache(jsonTagParser, nil, nil).Fields(reflect.TypeOf(S2{})) if err != nil { t.Fatal(err) } if len(got) != 0 { t.Errorf("got %d fields, want 0", len(got)) } } func TestDominantField(t *testing.T) { // With fields sorted by index length and then by tag presence, // the dominant field is always the first. Make sure all error // cases are caught. for _, test := range []struct { fields []Field wantOK bool }{ // A single field is OK. {[]Field{{Index: []int{0}}}, true}, {[]Field{{Index: []int{0}, NameFromTag: true}}, true}, // A single field at top level is OK. {[]Field{{Index: []int{0}}, {Index: []int{1, 0}}}, true}, {[]Field{{Index: []int{0}}, {Index: []int{1, 0}, NameFromTag: true}}, true}, {[]Field{{Index: []int{0}, NameFromTag: true}, {Index: []int{1, 0}, NameFromTag: true}}, true}, // A single tagged field is OK. {[]Field{{Index: []int{0}, NameFromTag: true}, {Index: []int{1}}}, true}, // Two untagged fields at the same level is an error. {[]Field{{Index: []int{0}}, {Index: []int{1}}}, false}, // Two tagged fields at the same level is an error. {[]Field{{Index: []int{0}, NameFromTag: true}, {Index: []int{1}, NameFromTag: true}}, false}, } { _, gotOK := dominantField(test.fields) if gotOK != test.wantOK { t.Errorf("%v: got %t, want %t", test.fields, gotOK, test.wantOK) } } } func TestIgnore(t *testing.T) { type S struct { X int `json:"-"` } got, err := NewCache(jsonTagParser, nil, nil).Fields(reflect.TypeOf(S{})) if err != nil { t.Fatal(err) } if len(got) != 0 { t.Errorf("got %d fields, want 0", len(got)) } } func TestParsedTag(t *testing.T) { type S struct { X int `json:"name,omitempty"` } got, err := NewCache(jsonTagParser, nil, nil).Fields(reflect.TypeOf(S{})) if err != nil { t.Fatal(err) } want := []*Field{ {Name: "name", NameFromTag: true, Type: intType, Index: []int{0}, ParsedTag: []string{"omitempty"}}, } if msg, ok := compareFields(got, want); !ok { t.Error(msg) } } func TestValidateFunc(t *testing.T) { type MyInvalidStruct struct { A string B []int } _, err := NewCache(nil, validateFunc, nil).Fields(reflect.TypeOf(MyInvalidStruct{})) if err == nil { t.Fatal("expected error, got nil") } type MyValidStruct struct { A string B int } _, err = NewCache(nil, validateFunc, nil).Fields(reflect.TypeOf(MyValidStruct{})) if err != nil { t.Fatalf("expected nil, got error: %s\n", err) } } func compareFields(got []Field, want []*Field) (msg string, ok bool) { if len(got) != len(want) { return fmt.Sprintf("got %d fields, want %d", len(got), len(want)), false } for i, g := range got { w := *want[i] if !fieldsEqual(&g, &w) { return fmt.Sprintf("got %+v, want %+v", g, w), false } } return "", true } // Need this because Field contains a function, which cannot be compared even // by reflect.DeepEqual. func fieldsEqual(f1, f2 *Field) bool { if f1 == nil || f2 == nil { return f1 == f2 } return f1.Name == f2.Name && f1.NameFromTag == f2.NameFromTag && f1.Type == f2.Type && reflect.DeepEqual(f1.ParsedTag, f2.ParsedTag) } // Set the fields of dst from those of src. // dst must be a pointer to a struct value. // src must be a struct value. func setFields(fields []Field, dst, src interface{}) { vsrc := reflect.ValueOf(src) vdst := reflect.ValueOf(dst).Elem() for _, f := range fields { fdst := vdst.FieldByIndex(f.Index) fsrc := vsrc.FieldByIndex(f.Index) fdst.Set(fsrc) } } func jsonRoundTrip(t *testing.T, in, out interface{}) { bytes, err := json.Marshal(in) if err != nil { t.Fatal(err) } if err := json.Unmarshal(bytes, out); err != nil { t.Fatal(err) } } type S3 struct { S4 Abc int AbC int Tag int X int `json:"Tag"` unexported int } type S4 struct { ABc int Y int `json:"Abc"` // ignored because of top-level Abc } func TestMatchingField(t *testing.T) { fields, err := NewCache(jsonTagParser, nil, nil).Fields(reflect.TypeOf(S3{})) if err != nil { t.Fatal(err) } for _, test := range []struct { name string want *Field }{ // Exact match wins. {"Abc", field("Abc", int(0), 1)}, {"AbC", field("AbC", int(0), 2)}, {"ABc", field("ABc", int(0), 0, 0)}, // If there are multiple matches but no exact match or tag, // the first field wins, lexicographically by index. // Here, "ABc" is at a deeper embedding level, but since S4 appears // first in S3, its index precedes the other fields of S3. {"abc", field("ABc", int(0), 0, 0)}, // Tag name takes precedence over untagged field of the same name. {"Tag", tfield("Tag", int(0), 4)}, // Unexported fields disappear. {"unexported", nil}, // Untagged embedded structs disappear. {"S4", nil}, } { if got := fields.Match(test.name); !fieldsEqual(got, test.want) { t.Errorf("match %q:\ngot %+v\nwant %+v", test.name, got, test.want) } } } func TestAgainstJSONMatchingField(t *testing.T) { s3 := S3{ S4: S4{ABc: 1, Y: 2}, Abc: 3, AbC: 4, Tag: 5, X: 6, unexported: 7, } var want S3 jsonRoundTrip(t, s3, &want) v := reflect.ValueOf(want) fields, err := NewCache(jsonTagParser, nil, nil).Fields(reflect.TypeOf(S3{})) if err != nil { t.Fatal(err) } for _, test := range []struct { name string got int }{ {"Abc", 3}, {"AbC", 4}, {"ABc", 1}, {"abc", 1}, {"Tag", 6}, } { f := fields.Match(test.name) if f == nil { t.Fatalf("%s: no match", test.name) } w := v.FieldByIndex(f.Index).Interface() if test.got != w { t.Errorf("%s: got %d, want %d", test.name, test.got, w) } } } func TestTagErrors(t *testing.T) { called := false c := NewCache(func(t reflect.StructTag) (string, bool, interface{}, error) { called = true s := t.Get("f") if s == "bad" { return "", false, nil, errors.New("error") } return s, true, nil, nil }, nil, nil) type T struct { X int `f:"ok"` Y int `f:"bad"` } _, err := c.Fields(reflect.TypeOf(T{})) if !called { t.Fatal("tag parser not called") } if err == nil { t.Error("want error, got nil") } // Second time, we should cache the error. called = false _, err = c.Fields(reflect.TypeOf(T{})) if called { t.Fatal("tag parser called on second time") } if err == nil { t.Error("want error, got nil") } } golang-google-cloud-0.9.0/internal/fields/fold.go000066400000000000000000000077441312234511600217140ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package fields // This file was copied from https://go.googlesource.com/go/+/go1.7.3/src/encoding/json/fold.go. // Only the license and package were changed. import ( "bytes" "unicode/utf8" ) const ( caseMask = ^byte(0x20) // Mask to ignore case in ASCII. kelvin = '\u212a' smallLongEss = '\u017f' ) // foldFunc returns one of four different case folding equivalence // functions, from most general (and slow) to fastest: // // 1) bytes.EqualFold, if the key s contains any non-ASCII UTF-8 // 2) equalFoldRight, if s contains special folding ASCII ('k', 'K', 's', 'S') // 3) asciiEqualFold, no special, but includes non-letters (including _) // 4) simpleLetterEqualFold, no specials, no non-letters. // // The letters S and K are special because they map to 3 runes, not just 2: // * S maps to s and to U+017F 'ſ' Latin small letter long s // * k maps to K and to U+212A 'K' Kelvin sign // See https://play.golang.org/p/tTxjOc0OGo // // The returned function is specialized for matching against s and // should only be given s. It's not curried for performance reasons. func foldFunc(s []byte) func(s, t []byte) bool { nonLetter := false special := false // special letter for _, b := range s { if b >= utf8.RuneSelf { return bytes.EqualFold } upper := b & caseMask if upper < 'A' || upper > 'Z' { nonLetter = true } else if upper == 'K' || upper == 'S' { // See above for why these letters are special. special = true } } if special { return equalFoldRight } if nonLetter { return asciiEqualFold } return simpleLetterEqualFold } // equalFoldRight is a specialization of bytes.EqualFold when s is // known to be all ASCII (including punctuation), but contains an 's', // 'S', 'k', or 'K', requiring a Unicode fold on the bytes in t. // See comments on foldFunc. func equalFoldRight(s, t []byte) bool { for _, sb := range s { if len(t) == 0 { return false } tb := t[0] if tb < utf8.RuneSelf { if sb != tb { sbUpper := sb & caseMask if 'A' <= sbUpper && sbUpper <= 'Z' { if sbUpper != tb&caseMask { return false } } else { return false } } t = t[1:] continue } // sb is ASCII and t is not. t must be either kelvin // sign or long s; sb must be s, S, k, or K. tr, size := utf8.DecodeRune(t) switch sb { case 's', 'S': if tr != smallLongEss { return false } case 'k', 'K': if tr != kelvin { return false } default: return false } t = t[size:] } if len(t) > 0 { return false } return true } // asciiEqualFold is a specialization of bytes.EqualFold for use when // s is all ASCII (but may contain non-letters) and contains no // special-folding letters. // See comments on foldFunc. func asciiEqualFold(s, t []byte) bool { if len(s) != len(t) { return false } for i, sb := range s { tb := t[i] if sb == tb { continue } if ('a' <= sb && sb <= 'z') || ('A' <= sb && sb <= 'Z') { if sb&caseMask != tb&caseMask { return false } } else { return false } } return true } // simpleLetterEqualFold is a specialization of bytes.EqualFold for // use when s is all ASCII letters (no underscores, etc) and also // doesn't contain 'k', 'K', 's', or 'S'. // See comments on foldFunc. func simpleLetterEqualFold(s, t []byte) bool { if len(s) != len(t) { return false } for i, b := range s { if b&caseMask != t[i]&caseMask { return false } } return true } golang-google-cloud-0.9.0/internal/fields/fold_test.go000066400000000000000000000067521312234511600227510ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package fields // This file was copied from https://go.googlesource.com/go/+/go1.7.3/src/encoding/json/fold_test.go. // Only the license and package were changed. import ( "bytes" "strings" "testing" "unicode/utf8" ) var foldTests = []struct { fn func(s, t []byte) bool s, t string want bool }{ {equalFoldRight, "", "", true}, {equalFoldRight, "a", "a", true}, {equalFoldRight, "", "a", false}, {equalFoldRight, "a", "", false}, {equalFoldRight, "a", "A", true}, {equalFoldRight, "AB", "ab", true}, {equalFoldRight, "AB", "ac", false}, {equalFoldRight, "sbkKc", "ſbKKc", true}, {equalFoldRight, "SbKkc", "ſbKKc", true}, {equalFoldRight, "SbKkc", "ſbKK", false}, {equalFoldRight, "e", "é", false}, {equalFoldRight, "s", "S", true}, {simpleLetterEqualFold, "", "", true}, {simpleLetterEqualFold, "abc", "abc", true}, {simpleLetterEqualFold, "abc", "ABC", true}, {simpleLetterEqualFold, "abc", "ABCD", false}, {simpleLetterEqualFold, "abc", "xxx", false}, {asciiEqualFold, "a_B", "A_b", true}, {asciiEqualFold, "aa@", "aa`", false}, // verify 0x40 and 0x60 aren't case-equivalent } func TestFold(t *testing.T) { for i, tt := range foldTests { if got := tt.fn([]byte(tt.s), []byte(tt.t)); got != tt.want { t.Errorf("%d. %q, %q = %v; want %v", i, tt.s, tt.t, got, tt.want) } truth := strings.EqualFold(tt.s, tt.t) if truth != tt.want { t.Errorf("strings.EqualFold doesn't agree with case %d", i) } } } func TestFoldAgainstUnicode(t *testing.T) { const bufSize = 5 buf1 := make([]byte, 0, bufSize) buf2 := make([]byte, 0, bufSize) var runes []rune for i := 0x20; i <= 0x7f; i++ { runes = append(runes, rune(i)) } runes = append(runes, kelvin, smallLongEss) funcs := []struct { name string fold func(s, t []byte) bool letter bool // must be ASCII letter simple bool // must be simple ASCII letter (not 'S' or 'K') }{ { name: "equalFoldRight", fold: equalFoldRight, }, { name: "asciiEqualFold", fold: asciiEqualFold, simple: true, }, { name: "simpleLetterEqualFold", fold: simpleLetterEqualFold, simple: true, letter: true, }, } for _, ff := range funcs { for _, r := range runes { if r >= utf8.RuneSelf { continue } if ff.letter && !isASCIILetter(byte(r)) { continue } if ff.simple && (r == 's' || r == 'S' || r == 'k' || r == 'K') { continue } for _, r2 := range runes { buf1 := append(buf1[:0], 'x') buf2 := append(buf2[:0], 'x') buf1 = buf1[:1+utf8.EncodeRune(buf1[1:bufSize], r)] buf2 = buf2[:1+utf8.EncodeRune(buf2[1:bufSize], r2)] buf1 = append(buf1, 'x') buf2 = append(buf2, 'x') want := bytes.EqualFold(buf1, buf2) if got := ff.fold(buf1, buf2); got != want { t.Errorf("%s(%q, %q) = %v; want %v", ff.name, buf1, buf2, got, want) } } } } } func isASCIILetter(b byte) bool { return ('A' <= b && b <= 'Z') || ('a' <= b && b <= 'z') } golang-google-cloud-0.9.0/internal/kokoro/000077500000000000000000000000001312234511600204635ustar00rootroot00000000000000golang-google-cloud-0.9.0/internal/kokoro/build.sh000077500000000000000000000023621312234511600221240ustar00rootroot00000000000000#!/bin/bash # Fail on any error set -eo pipefail # Display commands being run set -x # cd to project dir on Kokoro instance cd git/gocloud go version # Set $GOPATH export GOPATH="$HOME/go" GOCLOUD_HOME=$GOPATH/src/cloud.google.com/go mkdir -p $GOCLOUD_HOME # Move code into $GOPATH and get dependencies cp -R ./* $GOCLOUD_HOME cd $GOCLOUD_HOME go get -v ./... # # Don't run integration tests until we can protect against code from # # untrusted forks reading and storing our service account key. # cd internal/kokoro # # Don't print out encryption keys, etc # set +x # key=$(cat $KOKORO_ARTIFACTS_DIR/keystore/*_encrypted_ba2d6f7723ed_key) # iv=$(cat $KOKORO_ARTIFACTS_DIR/keystore/*_encrypted_ba2d6f7723ed_iv) # pass=$(cat $KOKORO_ARTIFACTS_DIR/keystore/*_encrypted_ba2d6f7723ed_pass) # openssl aes-256-cbc -K $key -iv $iv -pass pass:$pass -in kokoro-key.json.enc -out key.json -d # set -x # export GCLOUD_TESTS_GOLANG_KEY="$(pwd)/key.json" # export GCLOUD_TESTS_GOLANG_PROJECT_ID="dulcet-port-762" # cd $GOCLOUD_HOME # Run tests and tee output to log file, to be pushed to GCS as artifact. go test -race -v -short ./... 2>&1 | tee $KOKORO_ARTIFACTS_DIR/$KOKORO_GERRIT_REVISION.log # Make sure README.md is up to date. make -C internal/readme test diff golang-google-cloud-0.9.0/internal/kokoro/kokoro-key.json.enc000066400000000000000000000046201312234511600242160ustar00rootroot00000000000000Salted__ң_t3붜&d\IT# B#UcC:r&}$En:6( mV!Cf F20 !LPщT÷~ i"ˈǺ2v5< 9b:R?v㶆zYxl! >X }%,ӧ{ۄ&:oRewWdd#AS`B9G*'>'=zPym?]j޴G3oʨ"@U"?^cϡ&~#ZPx' :mI ~4W+!хr.bǻݨyYc<yvd|-|pސ[CcAY4Bg:G}tev/q8ۨE:7"ށ?WyK>AleuEK)*h6Lgl9=u /r.IJ_`ܤDxV1 Eb Nө`ӝAڗ@L "Y:U qOuiSE3vT[n#1~ݾ"h~Ǎd/,Y S9VpJ1YvX13Ie!Ӽ{ Dܘ7D{#5˕VmN\YsDG0霷zfgͷUjV͖{7f^6)h%VZ #d*v}KTX+}/4Rx:/*IGa 8:3DTwCgARjɟ#3Vj+xOC9 6Fo㸓3IhER.*I’7hPo>!*~'s8"PLp9 0ᨋxlMW9G 9D\R-+T8= 'NeiY9a>kԋ ±0[Qq_hFvp2؎bQ>EyNlvSrFO+^lxJmY*QTq! /,ʸIF)`weE8PErV/O8# ~a^O^pd2t 8X¶н4%TPJ0B".9ޘ:A$aK|;J,T}Ps#ɯ j_T1mɣ maxLevel { fmt.Fprintln(w, "pretty: max nested depth exceeded") return } indent := strings.Repeat(Indent, s.level) fmt.Fprintf(w, "%s%s", indent, s.prefix) if isNil(v) { fmt.Fprintf(w, "nil%s", s.suffix) return } if v.Type().Kind() == reflect.Interface { v = v.Elem() } if v.Type() == typeOfTime { fmt.Fprintf(w, "%s%s", v.Interface(), s.suffix) return } for v.Type().Kind() == reflect.Ptr { fmt.Fprintf(w, "&") v = v.Elem() } switch v.Type().Kind() { default: fmt.Fprintf(w, "%s%s", short(v), s.suffix) case reflect.Array: fmt.Fprintf(w, "%s{\n", v.Type()) for i := 0; i < v.Len(); i++ { fprint(w, v.Index(i), state{ level: s.level + 1, prefix: "", suffix: ",", defaults: s.defaults, }) fmt.Fprintln(w) } fmt.Fprintf(w, "%s}", indent) case reflect.Slice: fmt.Fprintf(w, "%s{", v.Type()) if v.Len() > 0 { fmt.Fprintln(w) for i := 0; i < v.Len(); i++ { fprint(w, v.Index(i), state{ level: s.level + 1, prefix: "", suffix: ",", defaults: s.defaults, }) fmt.Fprintln(w) } } fmt.Fprintf(w, "%s}%s", indent, s.suffix) case reflect.Map: fmt.Fprintf(w, "%s{", v.Type()) if v.Len() > 0 { fmt.Fprintln(w) keys := v.MapKeys() maybeSort(keys, v.Type().Key()) for _, key := range keys { val := v.MapIndex(key) if s.defaults || !isDefault(val) { fprint(w, val, state{ level: s.level + 1, prefix: short(key) + ": ", suffix: ",", defaults: s.defaults, }) fmt.Fprintln(w) } } } fmt.Fprintf(w, "%s}%s", indent, s.suffix) case reflect.Struct: t := v.Type() fmt.Fprintf(w, "%s{\n", t) for i := 0; i < t.NumField(); i++ { f := v.Field(i) if s.defaults || !isDefault(f) { fprint(w, f, state{ level: s.level + 1, prefix: t.Field(i).Name + ": ", suffix: ",", defaults: s.defaults, }) fmt.Fprintln(w) } } fmt.Fprintf(w, "%s}%s", indent, s.suffix) } } func isNil(v reflect.Value) bool { if !v.IsValid() { return true } switch v.Type().Kind() { case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: return v.IsNil() default: return false } } func isDefault(v reflect.Value) bool { if !v.IsValid() { return true } t := v.Type() switch t.Kind() { case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: return v.IsNil() default: if !v.CanInterface() { return false } return t.Comparable() && v.Interface() == reflect.Zero(t).Interface() } } // short returns a short, one-line string for v. func short(v reflect.Value) string { if !v.IsValid() { return "nil" } if v.Type().Kind() == reflect.String { return fmt.Sprintf("%q", v) } return fmt.Sprintf("%v", v) } func indent(w io.Writer, level int) { for i := 0; i < level; i++ { io.WriteString(w, Indent) // ignore errors } } func maybeSort(vs []reflect.Value, t reflect.Type) { if less := lessFunc(t); less != nil { sort.Sort(&sorter{vs, less}) } } // lessFunc returns a function that implements the "<" operator // for the given type, or nil if the type doesn't support "<" . func lessFunc(t reflect.Type) func(v1, v2 interface{}) bool { switch t.Kind() { case reflect.String: return func(v1, v2 interface{}) bool { return v1.(string) < v2.(string) } case reflect.Int: return func(v1, v2 interface{}) bool { return v1.(int) < v2.(int) } case reflect.Int8: return func(v1, v2 interface{}) bool { return v1.(int8) < v2.(int8) } case reflect.Int16: return func(v1, v2 interface{}) bool { return v1.(int16) < v2.(int16) } case reflect.Int32: return func(v1, v2 interface{}) bool { return v1.(int32) < v2.(int32) } case reflect.Int64: return func(v1, v2 interface{}) bool { return v1.(int64) < v2.(int64) } case reflect.Uint: return func(v1, v2 interface{}) bool { return v1.(uint) < v2.(uint) } case reflect.Uint8: return func(v1, v2 interface{}) bool { return v1.(uint8) < v2.(uint8) } case reflect.Uint16: return func(v1, v2 interface{}) bool { return v1.(uint16) < v2.(uint16) } case reflect.Uint32: return func(v1, v2 interface{}) bool { return v1.(uint32) < v2.(uint32) } case reflect.Uint64: return func(v1, v2 interface{}) bool { return v1.(uint64) < v2.(uint64) } case reflect.Float32: return func(v1, v2 interface{}) bool { return v1.(float32) < v2.(float32) } case reflect.Float64: return func(v1, v2 interface{}) bool { return v1.(float64) < v2.(float64) } default: return nil } } type sorter struct { vs []reflect.Value less func(v1, v2 interface{}) bool } func (s *sorter) Len() int { return len(s.vs) } func (s *sorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] } func (s *sorter) Less(i, j int) bool { return s.less(s.vs[i].Interface(), s.vs[j].Interface()) } golang-google-cloud-0.9.0/internal/pretty/pretty_test.go000066400000000000000000000046171312234511600234330ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package pretty import ( "fmt" "strings" "testing" ) type S struct { X int Y bool z *string } func TestSprint(t *testing.T) { Indent = "~" i := 17 for _, test := range []struct { value interface{} want string }{ // primitives and pointer {nil, "nil"}, {3, "3"}, {9.8, "9.8"}, {true, "true"}, {"foo", `"foo"`}, {&i, "&17"}, // array and slice {[3]int{1, 2, 3}, "[3]int{\n~1,\n~2,\n~3,\n}"}, {[]int{1, 2, 3}, "[]int{\n~1,\n~2,\n~3,\n}"}, {[]int{}, "[]int{}"}, {[]string{"foo"}, "[]string{\n~\"foo\",\n}"}, // map {map[int]bool{}, "map[int]bool{}"}, {map[int]bool{1: true, 2: false, 3: true}, "map[int]bool{\n~1: true,\n~3: true,\n}"}, // struct {S{}, "pretty.S{\n}"}, {S{3, true, ptr("foo")}, "pretty.S{\n~X: 3,\n~Y: true,\n~z: &\"foo\",\n}"}, // interface {[]interface{}{&i}, "[]interface {}{\n~&17,\n}"}, // nesting {[]S{{1, false, ptr("a")}, {2, true, ptr("b")}}, `[]pretty.S{ ~pretty.S{ ~~X: 1, ~~z: &"a", ~}, ~pretty.S{ ~~X: 2, ~~Y: true, ~~z: &"b", ~}, }`}, } { got := fmt.Sprintf("%v", Value(test.value)) if got != test.want { t.Errorf("%v: got:\n%q\nwant:\n%q", test.value, got, test.want) } } } func TestWithDefaults(t *testing.T) { Indent = "~" for _, test := range []struct { value interface{} want string }{ {map[int]bool{1: true, 2: false, 3: true}, "map[int]bool{\n~1: true,\n~2: false,\n~3: true,\n}"}, {S{}, "pretty.S{\n~X: 0,\n~Y: false,\n~z: nil,\n}"}, } { got := fmt.Sprintf("%+v", Value(test.value)) if got != test.want { t.Errorf("%v: got:\n%q\nwant:\n%q", test.value, got, test.want) } } } func TestBadVerb(t *testing.T) { got := fmt.Sprintf("%d", Value(8)) want := "%!d(" if !strings.HasPrefix(got, want) { t.Errorf("got %q, want prefix %q", got, want) } } func ptr(s string) *string { return &s } golang-google-cloud-0.9.0/internal/readme/000077500000000000000000000000001312234511600204145ustar00rootroot00000000000000golang-google-cloud-0.9.0/internal/readme/Makefile000066400000000000000000000021261312234511600220550ustar00rootroot00000000000000# Rebuild the README.md file at repo root by inserting code samples # from compilable go files. SHELL=/bin/bash GOCLOUD_HOME=$(GOPATH)/src/cloud.google.com/go README=$(GOCLOUD_HOME)/README.md .PHONY: readme test test-good test-bad-go test-bad-md readme: @tmp=$$(mktemp); \ awk -f snipmd.awk snippets.go $(README) > $$tmp; \ mv $$tmp $(README) diff: diff $(README) <(awk -f snipmd.awk snippets.go $(README)) test: test-good test-bad-go test-bad-md @echo PASS test-good: @echo testdata/good.md @cd testdata >& /dev/null; \ diff -u want.md <(awk -f ../snipmd.awk snips.go good.md) @echo "testdata/want.md (round trip)" @cd testdata >& /dev/null; \ diff -u want.md <(awk -f ../snipmd.awk snips.go want.md) test-bad-go: @for f in testdata/bad-*.go; do \ echo $$f; \ if awk -f snipmd.awk $$f >& /dev/null; then \ echo "$f succeeded, want failure"; \ exit 1; \ fi; \ done test-bad-md: @for f in testdata/bad-*.md; do \ echo $$f; \ if awk -f snipmd.awk testdata/snips.go $$f >& /dev/null; then \ echo "$f succeeded, want failure"; \ exit 1; \ fi; \ done golang-google-cloud-0.9.0/internal/readme/snipmd.awk000066400000000000000000000056161312234511600224220ustar00rootroot00000000000000# Copyright 2017 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # snipmd inserts code snippets from Go source files into a markdown file. # # Call with one or more .go files and a .md file: # # awk -f snipmd.awk foo.go bar.go template.md # # In the Go files, start a snippet with # //[ NAME # and end it with # //] # # In the markdown, write # [snip]:# NAME # to insert the snippet NAME just below that line. # If there is already a code block after the [snip]:# line, it will be # replaced, so a previous output can be used as input. # # The following transformations are made to the Go code: # - The first tab of each line is removed. # - Trailing blank lines are removed. # - `ELLIPSIS` and `_ = ELLIPSIS` are replaced by `...` /^[ \t]*\/\/\[/ { # start snippet in Go file if (inGo()) { if ($2 == "") { die("missing snippet name") } curSnip = $2 next } } /^[ \t]*\/\/]/ { # end snippet in Go file if (inGo()) { if (curSnip != "") { # Remove all but one trailing newline. gsub(/\n+$/, "\n", snips[curSnip]) curSnip = "" next } else { die("//] without corresponding //[") } } } ENDFILE { if (curSnip != "") { die("unclosed snippet: " curSnip) } } # Skip code blocks in the input that immediately follow [snip]:# lines, # because we just inserted the snippet. Supports round-tripping. /^```go$/,/^```$/ { if (inMarkdown() && afterSnip) { next } } # Matches every line. { if (curSnip != "") { line = $0 # Remove initial tab, if any. if (line ~ /^\t/) { line = substr(line, 2) } # Replace ELLIPSIS. gsub(/_ = ELLIPSIS/, "...", line) gsub(/ELLIPSIS/, "...", line) snips[curSnip] = snips[curSnip] line "\n" } else if (inMarkdown()) { afterSnip = 0 # Copy .md to output. print } } $1 ~ /\[snip\]:#/ { # Snippet marker in .md file. if (inMarkdown()) { # We expect '[snip]:#' to be followed by '(NAME)' if ($2 !~ /\(.*\)/) { die("bad snip spec: " $0) } name = substr($2, 2, length($2)-2) if (snips[name] == "") { die("no snippet named " name) } printf("```go\n%s```\n", snips[name]) afterSnip = 1 } } function inMarkdown() { return match(FILENAME, /\.md$/) } function inGo() { return match(FILENAME, /\.go$/) } function die(msg) { printf("%s:%d: %s\n", FILENAME, FNR, msg) > "/dev/stderr" exit 1 } golang-google-cloud-0.9.0/internal/readme/snippets.go000066400000000000000000000116151312234511600226140ustar00rootroot00000000000000// Copyright 2017 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // This file holds samples that are embedded into README.md. // This file has to compile, but need not execute. // If it fails to compile, fix it, then run `make` to regenerate README.md. package readme import ( "fmt" "io/ioutil" "log" "time" "cloud.google.com/go/bigquery" "cloud.google.com/go/datastore" "cloud.google.com/go/logging" "cloud.google.com/go/pubsub" "cloud.google.com/go/spanner" "cloud.google.com/go/storage" "golang.org/x/net/context" "golang.org/x/oauth2" "google.golang.org/api/iterator" "google.golang.org/api/option" ) var ctx context.Context const END = 0 func auth() { //[ auth client, err := storage.NewClient(ctx) //] _ = client _ = err } func auth2() { //[ auth-JSON client, err := storage.NewClient(ctx, option.WithServiceAccountFile("path/to/keyfile.json")) //] _ = client _ = err } func auth3() { var ELLIPSIS oauth2.TokenSource //[ auth-ts tokenSource := ELLIPSIS client, err := storage.NewClient(ctx, option.WithTokenSource(tokenSource)) //] _ = client _ = err } func datastoreSnippets() { //[ datastore-1 client, err := datastore.NewClient(ctx, "my-project-id") if err != nil { log.Fatal(err) } //] //[ datastore-2 type Post struct { Title string Body string `datastore:",noindex"` PublishedAt time.Time } keys := []*datastore.Key{ datastore.NameKey("Post", "post1", nil), datastore.NameKey("Post", "post2", nil), } posts := []*Post{ {Title: "Post 1", Body: "...", PublishedAt: time.Now()}, {Title: "Post 2", Body: "...", PublishedAt: time.Now()}, } if _, err := client.PutMulti(ctx, keys, posts); err != nil { log.Fatal(err) } //] } func storageSnippets() { //[ storage-1 client, err := storage.NewClient(ctx) if err != nil { log.Fatal(err) } //] //[ storage-2 // Read the object1 from bucket. rc, err := client.Bucket("bucket").Object("object1").NewReader(ctx) if err != nil { log.Fatal(err) } defer rc.Close() body, err := ioutil.ReadAll(rc) if err != nil { log.Fatal(err) } //] _ = body } func pubsubSnippets() { //[ pubsub-1 client, err := pubsub.NewClient(ctx, "project-id") if err != nil { log.Fatal(err) } //] const ELLIPSIS = 0 //[ pubsub-2 // Publish "hello world" on topic1. topic := client.Topic("topic1") res := topic.Publish(ctx, &pubsub.Message{ Data: []byte("hello world"), }) // The publish happens asynchronously. // Later, you can get the result from res: _ = ELLIPSIS msgID, err := res.Get(ctx) if err != nil { log.Fatal(err) } // Use a callback to receive messages via subscription1. sub := client.Subscription("subscription1") err = sub.Receive(ctx, func(ctx context.Context, m *pubsub.Message) { fmt.Println(m.Data) m.Ack() // Acknowledge that we've consumed the message. }) if err != nil { log.Println(err) } //] _ = msgID } func bqSnippets() { //[ bq-1 c, err := bigquery.NewClient(ctx, "my-project-ID") if err != nil { // TODO: Handle error. } //] //[ bq-2 // Construct a query. q := c.Query(` SELECT year, SUM(number) FROM [bigquery-public-data:usa_names.usa_1910_2013] WHERE name = "William" GROUP BY year ORDER BY year `) // Execute the query. it, err := q.Read(ctx) if err != nil { // TODO: Handle error. } // Iterate through the results. for { var values []bigquery.Value err := it.Next(&values) if err == iterator.Done { break } if err != nil { // TODO: Handle error. } fmt.Println(values) } //] } func loggingSnippets() { //[ logging-1 ctx := context.Background() client, err := logging.NewClient(ctx, "my-project") if err != nil { // TODO: Handle error. } //] //[ logging-2 logger := client.Logger("my-log") logger.Log(logging.Entry{Payload: "something happened!"}) //] //[ logging-3 err = client.Close() if err != nil { // TODO: Handle error. } //] } func spannerSnippets() { //[ spanner-1 client, err := spanner.NewClient(ctx, "projects/P/instances/I/databases/D") if err != nil { log.Fatal(err) } //] //[ spanner-2 // Simple Reads And Writes _, err = client.Apply(ctx, []*spanner.Mutation{ spanner.Insert("Users", []string{"name", "email"}, []interface{}{"alice", "a@example.com"})}) if err != nil { log.Fatal(err) } row, err := client.Single().ReadRow(ctx, "Users", spanner.Key{"alice"}, []string{"email"}) if err != nil { log.Fatal(err) } //] _ = row } golang-google-cloud-0.9.0/internal/readme/testdata/000077500000000000000000000000001312234511600222255ustar00rootroot00000000000000golang-google-cloud-0.9.0/internal/readme/testdata/bad-no-name.go000066400000000000000000000012471312234511600246360ustar00rootroot00000000000000// Copyright 2017 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package readme import "fmt" func f() { //[ fmt.Println() //] } golang-google-cloud-0.9.0/internal/readme/testdata/bad-no-open.go000066400000000000000000000012051312234511600246510ustar00rootroot00000000000000// Copyright 2017 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package readme func f() { //] } golang-google-cloud-0.9.0/internal/readme/testdata/bad-nosnip.md000066400000000000000000000000241312234511600245750ustar00rootroot00000000000000[snip]:# (unknown) golang-google-cloud-0.9.0/internal/readme/testdata/bad-spec.md000066400000000000000000000000301312234511600242160ustar00rootroot00000000000000[snip]:# missing-parens golang-google-cloud-0.9.0/internal/readme/testdata/bad-unclosed.go000066400000000000000000000012341312234511600251140ustar00rootroot00000000000000// Copyright 2017 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package readme // unclosed snippet func f() { //[ X } golang-google-cloud-0.9.0/internal/readme/testdata/good.md000066400000000000000000000003551312234511600235020ustar00rootroot00000000000000This template is for testing snipmd.awk. Put the first snippet here. [snip]:# (first) And now the second. [snip]:# (second) A top-level snippet. [snip]:# (top-level) ```go // A code block that is not included. ``` And we're done. golang-google-cloud-0.9.0/internal/readme/testdata/snips.go000066400000000000000000000015041312234511600237100ustar00rootroot00000000000000// Copyright 2017 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package readme import ( "errors" "fmt" ) func f() { ELLIPSIS := 3 //[ first fmt.Println("hello") x := ELLIPSIS //] //[ second if x > 2 { _ = ELLIPSIS } //] } //[ top-level var ErrBad = errors.New("bad") //] golang-google-cloud-0.9.0/internal/readme/testdata/want.md000066400000000000000000000005321312234511600235200ustar00rootroot00000000000000This template is for testing snipmd.awk. Put the first snippet here. [snip]:# (first) ```go fmt.Println("hello") x := ... ``` And now the second. [snip]:# (second) ```go if x > 2 { ... } ``` A top-level snippet. [snip]:# (top-level) ```go var ErrBad = errors.New("bad") ``` ```go // A code block that is not included. ``` And we're done. golang-google-cloud-0.9.0/internal/retry.go000066400000000000000000000033461312234511600206610ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package internal import ( "fmt" "time" gax "github.com/googleapis/gax-go" "golang.org/x/net/context" ) // Retry calls the supplied function f repeatedly according to the provided // backoff parameters. It returns when one of the following occurs: // When f's first return value is true, Retry immediately returns with f's second // return value. // When the provided context is done, Retry returns with an error that // includes both ctx.Error() and the last error returned by f. func Retry(ctx context.Context, bo gax.Backoff, f func() (stop bool, err error)) error { return retry(ctx, bo, f, gax.Sleep) } func retry(ctx context.Context, bo gax.Backoff, f func() (stop bool, err error), sleep func(context.Context, time.Duration) error) error { var lastErr error for { stop, err := f() if stop { return err } // Remember the last "real" error from f. if err != nil && err != context.Canceled && err != context.DeadlineExceeded { lastErr = err } p := bo.Pause() if cerr := sleep(ctx, p); cerr != nil { if lastErr != nil { return fmt.Errorf("%v; last function err: %v", cerr, lastErr) } return cerr } } } golang-google-cloud-0.9.0/internal/retry_test.go000066400000000000000000000031601312234511600217120ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package internal import ( "errors" "testing" "time" "golang.org/x/net/context" gax "github.com/googleapis/gax-go" ) func TestRetry(t *testing.T) { ctx := context.Background() // Without a context deadline, retry will run until the function // says not to retry any more. n := 0 endRetry := errors.New("end retry") err := retry(ctx, gax.Backoff{}, func() (bool, error) { n++ if n < 10 { return false, nil } return true, endRetry }, func(context.Context, time.Duration) error { return nil }) if got, want := err, endRetry; got != want { t.Errorf("got %v, want %v", err, endRetry) } if n != 10 { t.Errorf("n: got %d, want %d", n, 10) } // If the context has a deadline, sleep will return an error // and end the function. n = 0 err = retry(ctx, gax.Backoff{}, func() (bool, error) { return false, nil }, func(context.Context, time.Duration) error { n++ if n < 10 { return nil } return context.DeadlineExceeded }) if err == nil { t.Error("got nil, want error") } } golang-google-cloud-0.9.0/internal/rpcreplay/000077500000000000000000000000001312234511600211605ustar00rootroot00000000000000golang-google-cloud-0.9.0/internal/rpcreplay/Makefile000066400000000000000000000020121312234511600226130ustar00rootroot00000000000000# Copyright 2017 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Makefile for building Go files from protos. # Change these to match your environment. PROTOC=$(HOME)/bin/protoc PROTOC_GO_PLUGIN_DIR=$(GOPATH)/bin PROTOBUF_REPO=$(HOME)/git-repos/protobuf gen-protos: sync-protobuf for d in proto/*; do \ PATH=$(PATH):$(PROTOC_GO_PLUGIN_DIR) \ $(PROTOC) --go_out=plugins=grpc:$$d \ -I $$d -I $(PROTOBUF_REPO)/src $$d/*.proto; \ done sync-protobuf: cd $(PROTOBUF_REPO); git pull golang-google-cloud-0.9.0/internal/rpcreplay/doc.go000066400000000000000000000076671312234511600222740ustar00rootroot00000000000000// Copyright 2017 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. /* Package rpcreplay supports the capture and replay of gRPC calls. Its main goal is to improve testing. Once one captures the calls of a test that runs against a real service, one has an "automatic mock" that can be replayed against the same test, yielding a unit test that is fast and flake-free. Recording To record a sequence of gRPC calls to a file, create a Recorder and pass its DialOptions to grpc.Dial: rec, err := rpcreplay.NewRecorder("service.replay", nil) if err != nil { ... } defer func() { if err := rec.Close(); err != nil { ... } }() conn, err := grpc.Dial(serverAddress, rec.DialOptions()...) It's essential to close the Recorder when the interaction is finished. There is also a NewRecorderWriter function for capturing to an arbitrary io.Writer. Replaying Replaying a captured file looks almost identical: create a Replayer and use its DialOptions. (Since we're reading the file and not writing it, we don't have to be as careful about the error returned from Close). rep, err := rpcreplay.NewReplayer("service.replay") if err != nil { ... } defer rep.Close() conn, err := grpc.Dial(serverAddress, rep.DialOptions()...) Initial State A test might use random or time-sensitive values, for instance to create unique resources for isolation from other tests. The test therefore has initial values -- the current time, a random seed -- that differ from run to run. You must record this initial state and re-establish it on replay. To record the initial state, serialize it into a []byte and pass it as the second argument to NewRecorder: timeNow := time.Now() b, err := timeNow.MarshalBinary() if err != nil { ... } rec, err := rpcreplay.NewRecorder("service.replay", b) On replay, get the bytes from Replayer.Initial: rep, err := rpcreplay.NewReplayer("service.replay") if err != nil { ... } defer rep.Close() err = timeNow.UnmarshalBinary(rep.Initial()) if err != nil { ... } Nondeterminism A nondeterministic program may invoke RPCs in a different order each time it is run. The order in which RPCs are called during recording may differ from the order during replay. The replayer matches incoming to recorded requests by method name and request contents, so nondeterminism is only a concern for identical requests that result in different responses. A nondeterministic program whose behavior differs depending on the order of such RPCs probably has a race condition: since both the recorded sequence of RPCs and the sequence during replay are valid orderings, the program should behave the same under both. Other Replayer Differences Besides the differences in replay mentioned above, other differences may cause issues for some programs. We list them here. The Replayer delivers a response to an RPC immediately, without waiting for other incoming RPCs. This can violate causality. For example, in a Pub/Sub program where one goroutine publishes and another subscribes, during replay the Subscribe call may finish before the Publish call begins. For streaming RPCs, the Replayer delivers the result of Send and Recv calls in the order they were recorded. No attempt is made to match message contents. At present, this package does not record or replay stream headers and trailers, or the result of the CloseSend method. */ package rpcreplay // import "cloud.google.com/go/internal/rpcreplay" golang-google-cloud-0.9.0/internal/rpcreplay/example_test.go000066400000000000000000000025651312234511600242110ustar00rootroot00000000000000// Copyright 2017 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package rpcreplay_test var serverAddress string // func Example_NewRecorder() { // rec, err := rpcreplay.NewRecorder("service.replay", nil) // if err != nil { // // TODO: Handle error. // } // defer func() { // if err := rec.Close(); err != nil { // // TODO: Handle error. // } // }() // conn, err := grpc.Dial(serverAddress, rec.DialOptions()...) // if err != nil { // // TODO: Handle error. // } // _ = conn // TODO: use connection // } // func Example_NewReplayer() { // rep, err := rpcreplay.NewReplayer("service.replay") // if err != nil { // // TODO: Handle error. // } // defer rep.Close() // conn, err := grpc.Dial(serverAddress, rep.DialOptions()...) // if err != nil { // // TODO: Handle error. // } // _ = conn // TODO: use connection // } golang-google-cloud-0.9.0/internal/rpcreplay/fake_test.go000066400000000000000000000036171312234511600234630ustar00rootroot00000000000000// Copyright 2017 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package rpcreplay import ( "log" "net" "golang.org/x/net/context" "google.golang.org/grpc" "google.golang.org/grpc/codes" pb "cloud.google.com/go/internal/rpcreplay/proto/intstore" ) // intStoreServer is an in-memory implementation of IntStore. type intStoreServer struct { pb.IntStoreServer Addr string l net.Listener gsrv *grpc.Server items map[string]int32 } func newIntStoreServer() *intStoreServer { l, err := net.Listen("tcp", "127.0.0.1:0") if err != nil { log.Fatal(err) } s := &intStoreServer{ Addr: l.Addr().String(), l: l, gsrv: grpc.NewServer(), } pb.RegisterIntStoreServer(s.gsrv, s) go s.gsrv.Serve(s.l) return s } func (s *intStoreServer) stop() { s.gsrv.Stop() s.l.Close() } func (s *intStoreServer) Set(_ context.Context, item *pb.Item) (*pb.SetResponse, error) { old := s.setItem(item) return &pb.SetResponse{PrevValue: old}, nil } func (s *intStoreServer) setItem(item *pb.Item) int32 { if s.items == nil { s.items = map[string]int32{} } old := s.items[item.Name] s.items[item.Name] = item.Value return old } func (s *intStoreServer) Get(_ context.Context, req *pb.GetRequest) (*pb.Item, error) { val, ok := s.items[req.Name] if !ok { return nil, grpc.Errorf(codes.NotFound, "%q", req.Name) } return &pb.Item{Name: req.Name, Value: val}, nil } golang-google-cloud-0.9.0/internal/rpcreplay/proto/000077500000000000000000000000001312234511600223235ustar00rootroot00000000000000golang-google-cloud-0.9.0/internal/rpcreplay/proto/intstore/000077500000000000000000000000001312234511600241725ustar00rootroot00000000000000golang-google-cloud-0.9.0/internal/rpcreplay/proto/intstore/intstore.pb.go000066400000000000000000000324731312234511600270010ustar00rootroot00000000000000// Code generated by protoc-gen-go. // source: intstore.proto // DO NOT EDIT! /* Package intstore is a generated protocol buffer package. It is generated from these files: intstore.proto It has these top-level messages: Item SetResponse GetRequest Summary ListItemsRequest */ package intstore import proto "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" import ( context "golang.org/x/net/context" grpc "google.golang.org/grpc" ) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package type Item struct { Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` Value int32 `protobuf:"varint,2,opt,name=value" json:"value,omitempty"` } func (m *Item) Reset() { *m = Item{} } func (m *Item) String() string { return proto.CompactTextString(m) } func (*Item) ProtoMessage() {} func (*Item) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } func (m *Item) GetName() string { if m != nil { return m.Name } return "" } func (m *Item) GetValue() int32 { if m != nil { return m.Value } return 0 } type SetResponse struct { PrevValue int32 `protobuf:"varint,1,opt,name=prev_value,json=prevValue" json:"prev_value,omitempty"` } func (m *SetResponse) Reset() { *m = SetResponse{} } func (m *SetResponse) String() string { return proto.CompactTextString(m) } func (*SetResponse) ProtoMessage() {} func (*SetResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } func (m *SetResponse) GetPrevValue() int32 { if m != nil { return m.PrevValue } return 0 } type GetRequest struct { Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` } func (m *GetRequest) Reset() { *m = GetRequest{} } func (m *GetRequest) String() string { return proto.CompactTextString(m) } func (*GetRequest) ProtoMessage() {} func (*GetRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } func (m *GetRequest) GetName() string { if m != nil { return m.Name } return "" } type Summary struct { Count int32 `protobuf:"varint,1,opt,name=count" json:"count,omitempty"` } func (m *Summary) Reset() { *m = Summary{} } func (m *Summary) String() string { return proto.CompactTextString(m) } func (*Summary) ProtoMessage() {} func (*Summary) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } func (m *Summary) GetCount() int32 { if m != nil { return m.Count } return 0 } type ListItemsRequest struct { } func (m *ListItemsRequest) Reset() { *m = ListItemsRequest{} } func (m *ListItemsRequest) String() string { return proto.CompactTextString(m) } func (*ListItemsRequest) ProtoMessage() {} func (*ListItemsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } func init() { proto.RegisterType((*Item)(nil), "intstore.Item") proto.RegisterType((*SetResponse)(nil), "intstore.SetResponse") proto.RegisterType((*GetRequest)(nil), "intstore.GetRequest") proto.RegisterType((*Summary)(nil), "intstore.Summary") proto.RegisterType((*ListItemsRequest)(nil), "intstore.ListItemsRequest") } // Reference imports to suppress errors if they are not otherwise used. var _ context.Context var _ grpc.ClientConn // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. const _ = grpc.SupportPackageIsVersion4 // Client API for IntStore service type IntStoreClient interface { Set(ctx context.Context, in *Item, opts ...grpc.CallOption) (*SetResponse, error) Get(ctx context.Context, in *GetRequest, opts ...grpc.CallOption) (*Item, error) // A server-to-client streaming RPC. ListItems(ctx context.Context, in *ListItemsRequest, opts ...grpc.CallOption) (IntStore_ListItemsClient, error) // A client-to-server streaming RPC. SetStream(ctx context.Context, opts ...grpc.CallOption) (IntStore_SetStreamClient, error) // A Bidirectional streaming RPC. StreamChat(ctx context.Context, opts ...grpc.CallOption) (IntStore_StreamChatClient, error) } type intStoreClient struct { cc *grpc.ClientConn } func NewIntStoreClient(cc *grpc.ClientConn) IntStoreClient { return &intStoreClient{cc} } func (c *intStoreClient) Set(ctx context.Context, in *Item, opts ...grpc.CallOption) (*SetResponse, error) { out := new(SetResponse) err := grpc.Invoke(ctx, "/intstore.IntStore/Set", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } func (c *intStoreClient) Get(ctx context.Context, in *GetRequest, opts ...grpc.CallOption) (*Item, error) { out := new(Item) err := grpc.Invoke(ctx, "/intstore.IntStore/Get", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } func (c *intStoreClient) ListItems(ctx context.Context, in *ListItemsRequest, opts ...grpc.CallOption) (IntStore_ListItemsClient, error) { stream, err := grpc.NewClientStream(ctx, &_IntStore_serviceDesc.Streams[0], c.cc, "/intstore.IntStore/ListItems", opts...) if err != nil { return nil, err } x := &intStoreListItemsClient{stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } if err := x.ClientStream.CloseSend(); err != nil { return nil, err } return x, nil } type IntStore_ListItemsClient interface { Recv() (*Item, error) grpc.ClientStream } type intStoreListItemsClient struct { grpc.ClientStream } func (x *intStoreListItemsClient) Recv() (*Item, error) { m := new(Item) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func (c *intStoreClient) SetStream(ctx context.Context, opts ...grpc.CallOption) (IntStore_SetStreamClient, error) { stream, err := grpc.NewClientStream(ctx, &_IntStore_serviceDesc.Streams[1], c.cc, "/intstore.IntStore/SetStream", opts...) if err != nil { return nil, err } x := &intStoreSetStreamClient{stream} return x, nil } type IntStore_SetStreamClient interface { Send(*Item) error CloseAndRecv() (*Summary, error) grpc.ClientStream } type intStoreSetStreamClient struct { grpc.ClientStream } func (x *intStoreSetStreamClient) Send(m *Item) error { return x.ClientStream.SendMsg(m) } func (x *intStoreSetStreamClient) CloseAndRecv() (*Summary, error) { if err := x.ClientStream.CloseSend(); err != nil { return nil, err } m := new(Summary) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func (c *intStoreClient) StreamChat(ctx context.Context, opts ...grpc.CallOption) (IntStore_StreamChatClient, error) { stream, err := grpc.NewClientStream(ctx, &_IntStore_serviceDesc.Streams[2], c.cc, "/intstore.IntStore/StreamChat", opts...) if err != nil { return nil, err } x := &intStoreStreamChatClient{stream} return x, nil } type IntStore_StreamChatClient interface { Send(*Item) error Recv() (*Item, error) grpc.ClientStream } type intStoreStreamChatClient struct { grpc.ClientStream } func (x *intStoreStreamChatClient) Send(m *Item) error { return x.ClientStream.SendMsg(m) } func (x *intStoreStreamChatClient) Recv() (*Item, error) { m := new(Item) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } // Server API for IntStore service type IntStoreServer interface { Set(context.Context, *Item) (*SetResponse, error) Get(context.Context, *GetRequest) (*Item, error) // A server-to-client streaming RPC. ListItems(*ListItemsRequest, IntStore_ListItemsServer) error // A client-to-server streaming RPC. SetStream(IntStore_SetStreamServer) error // A Bidirectional streaming RPC. StreamChat(IntStore_StreamChatServer) error } func RegisterIntStoreServer(s *grpc.Server, srv IntStoreServer) { s.RegisterService(&_IntStore_serviceDesc, srv) } func _IntStore_Set_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(Item) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(IntStoreServer).Set(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/intstore.IntStore/Set", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(IntStoreServer).Set(ctx, req.(*Item)) } return interceptor(ctx, in, info, handler) } func _IntStore_Get_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(GetRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(IntStoreServer).Get(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/intstore.IntStore/Get", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(IntStoreServer).Get(ctx, req.(*GetRequest)) } return interceptor(ctx, in, info, handler) } func _IntStore_ListItems_Handler(srv interface{}, stream grpc.ServerStream) error { m := new(ListItemsRequest) if err := stream.RecvMsg(m); err != nil { return err } return srv.(IntStoreServer).ListItems(m, &intStoreListItemsServer{stream}) } type IntStore_ListItemsServer interface { Send(*Item) error grpc.ServerStream } type intStoreListItemsServer struct { grpc.ServerStream } func (x *intStoreListItemsServer) Send(m *Item) error { return x.ServerStream.SendMsg(m) } func _IntStore_SetStream_Handler(srv interface{}, stream grpc.ServerStream) error { return srv.(IntStoreServer).SetStream(&intStoreSetStreamServer{stream}) } type IntStore_SetStreamServer interface { SendAndClose(*Summary) error Recv() (*Item, error) grpc.ServerStream } type intStoreSetStreamServer struct { grpc.ServerStream } func (x *intStoreSetStreamServer) SendAndClose(m *Summary) error { return x.ServerStream.SendMsg(m) } func (x *intStoreSetStreamServer) Recv() (*Item, error) { m := new(Item) if err := x.ServerStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func _IntStore_StreamChat_Handler(srv interface{}, stream grpc.ServerStream) error { return srv.(IntStoreServer).StreamChat(&intStoreStreamChatServer{stream}) } type IntStore_StreamChatServer interface { Send(*Item) error Recv() (*Item, error) grpc.ServerStream } type intStoreStreamChatServer struct { grpc.ServerStream } func (x *intStoreStreamChatServer) Send(m *Item) error { return x.ServerStream.SendMsg(m) } func (x *intStoreStreamChatServer) Recv() (*Item, error) { m := new(Item) if err := x.ServerStream.RecvMsg(m); err != nil { return nil, err } return m, nil } var _IntStore_serviceDesc = grpc.ServiceDesc{ ServiceName: "intstore.IntStore", HandlerType: (*IntStoreServer)(nil), Methods: []grpc.MethodDesc{ { MethodName: "Set", Handler: _IntStore_Set_Handler, }, { MethodName: "Get", Handler: _IntStore_Get_Handler, }, }, Streams: []grpc.StreamDesc{ { StreamName: "ListItems", Handler: _IntStore_ListItems_Handler, ServerStreams: true, }, { StreamName: "SetStream", Handler: _IntStore_SetStream_Handler, ClientStreams: true, }, { StreamName: "StreamChat", Handler: _IntStore_StreamChat_Handler, ServerStreams: true, ClientStreams: true, }, }, Metadata: "intstore.proto", } func init() { proto.RegisterFile("intstore.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ // 273 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x6c, 0x91, 0x4f, 0x4b, 0xc3, 0x40, 0x10, 0xc5, 0xb3, 0xfd, 0xa3, 0xcd, 0x08, 0x45, 0x87, 0x0a, 0x25, 0x20, 0x86, 0x3d, 0xe5, 0xa0, 0x21, 0xd4, 0xa3, 0x47, 0x0f, 0xa5, 0xe0, 0x29, 0x0b, 0x5e, 0x25, 0xca, 0x80, 0x05, 0xb3, 0x1b, 0x77, 0x27, 0x05, 0xbf, 0x84, 0x9f, 0x59, 0x36, 0x5b, 0x9b, 0xd2, 0x78, 0xdb, 0xb7, 0xf3, 0x66, 0xde, 0x6f, 0x76, 0x61, 0xbe, 0xd5, 0xec, 0xd8, 0x58, 0xca, 0x1b, 0x6b, 0xd8, 0xe0, 0xec, 0x4f, 0xcb, 0x02, 0x26, 0x1b, 0xa6, 0x1a, 0x11, 0x26, 0xba, 0xaa, 0x69, 0x29, 0x52, 0x91, 0xc5, 0x65, 0x77, 0xc6, 0x05, 0x4c, 0x77, 0xd5, 0x67, 0x4b, 0xcb, 0x51, 0x2a, 0xb2, 0x69, 0x19, 0x84, 0xbc, 0x83, 0x0b, 0x45, 0x5c, 0x92, 0x6b, 0x8c, 0x76, 0x84, 0x37, 0x00, 0x8d, 0xa5, 0xdd, 0x6b, 0x70, 0x8a, 0xce, 0x19, 0xfb, 0x9b, 0x97, 0xce, 0x9d, 0x02, 0xac, 0xbd, 0xfb, 0xab, 0x25, 0xc7, 0xff, 0xa5, 0xc8, 0x5b, 0x38, 0x57, 0x6d, 0x5d, 0x57, 0xf6, 0xdb, 0x07, 0xbe, 0x9b, 0x56, 0xf3, 0x7e, 0x4c, 0x10, 0x12, 0xe1, 0xf2, 0x79, 0xeb, 0xd8, 0x63, 0xba, 0xfd, 0xa0, 0xd5, 0xcf, 0x08, 0x66, 0x1b, 0xcd, 0xca, 0xef, 0x80, 0x39, 0x8c, 0x15, 0x31, 0xce, 0xf3, 0xc3, 0x96, 0xde, 0x9b, 0x5c, 0xf7, 0xfa, 0x08, 0x58, 0x46, 0x78, 0x0f, 0xe3, 0x35, 0x31, 0x2e, 0xfa, 0x7a, 0x8f, 0x98, 0x9c, 0x4c, 0x91, 0x11, 0x3e, 0x42, 0x7c, 0xc8, 0xc7, 0xa4, 0x2f, 0x9f, 0x42, 0x0d, 0x5b, 0x0b, 0x81, 0x2b, 0x88, 0x15, 0xb1, 0x62, 0x4b, 0x55, 0x3d, 0x20, 0xbc, 0x3a, 0x22, 0x0c, 0x4f, 0x20, 0xa3, 0xcc, 0xf7, 0x40, 0x68, 0x78, 0xfa, 0xa8, 0x86, 0x6b, 0x0d, 0x52, 0x32, 0x51, 0x88, 0xb7, 0xb3, 0xee, 0x63, 0x1f, 0x7e, 0x03, 0x00, 0x00, 0xff, 0xff, 0x22, 0x28, 0xa0, 0x49, 0xea, 0x01, 0x00, 0x00, } golang-google-cloud-0.9.0/internal/rpcreplay/proto/intstore/intstore.proto000066400000000000000000000025201312234511600271250ustar00rootroot00000000000000// Copyright 2017 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // IntStore is a service for testing the rpcreplay package. // It is a simple key-value store for integers. syntax = "proto3"; package intstore; service IntStore { rpc Set(Item) returns (SetResponse) {} rpc Get(GetRequest) returns (Item) {} // A server-to-client streaming RPC. rpc ListItems(ListItemsRequest) returns (stream Item) {} // A client-to-server streaming RPC. rpc SetStream(stream Item) returns (Summary) {} // A Bidirectional streaming RPC. rpc StreamChat(stream Item) returns (stream Item) {} } message Item { string name = 1; int32 value = 2; } message SetResponse { int32 prev_value = 1; } message GetRequest { string name = 1; } message Summary { int32 count = 1; } message ListItemsRequest {} golang-google-cloud-0.9.0/internal/rpcreplay/proto/rpcreplay/000077500000000000000000000000001312234511600243245ustar00rootroot00000000000000golang-google-cloud-0.9.0/internal/rpcreplay/proto/rpcreplay/rpcreplay.pb.go000066400000000000000000000136011312234511600272550ustar00rootroot00000000000000// Code generated by protoc-gen-go. // source: rpcreplay.proto // DO NOT EDIT! /* Package rpcreplay is a generated protocol buffer package. It is generated from these files: rpcreplay.proto It has these top-level messages: Entry */ package rpcreplay import proto "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" import google_protobuf "github.com/golang/protobuf/ptypes/any" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package type Entry_Kind int32 const ( Entry_TYPE_UNSPECIFIED Entry_Kind = 0 // A unary request. // method: the full name of the method // message: the request proto // is_error: false // ref_index: 0 Entry_REQUEST Entry_Kind = 1 // A unary response. // method: the full name of the method // message: // if is_error: a google.rpc.Status proto // else: the response proto // ref_index: index in the sequence of Entries of matching request (1-based) Entry_RESPONSE Entry_Kind = 2 // A method that creates a stream. // method: the full name of the method // message: // if is_error: a google.rpc.Status proto // else: nil // ref_index: 0 Entry_CREATE_STREAM Entry_Kind = 3 // A call to Send on the client returned by a stream-creating method. // method: unset // message: the proto being sent // is_error: false // ref_index: index of matching CREATE_STREAM entry (1-based) Entry_SEND Entry_Kind = 4 // A call to Recv on the client returned by a stream-creating method. // method: unset // message: // if is_error: a google.rpc.Status proto, or nil on EOF // else: the received message // ref_index: index of matching CREATE_STREAM entry Entry_RECV Entry_Kind = 5 ) var Entry_Kind_name = map[int32]string{ 0: "TYPE_UNSPECIFIED", 1: "REQUEST", 2: "RESPONSE", 3: "CREATE_STREAM", 4: "SEND", 5: "RECV", } var Entry_Kind_value = map[string]int32{ "TYPE_UNSPECIFIED": 0, "REQUEST": 1, "RESPONSE": 2, "CREATE_STREAM": 3, "SEND": 4, "RECV": 5, } func (x Entry_Kind) String() string { return proto.EnumName(Entry_Kind_name, int32(x)) } func (Entry_Kind) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0, 0} } // An Entry represents a single RPC activity, typically a request or response. type Entry struct { Kind Entry_Kind `protobuf:"varint,1,opt,name=kind,enum=rpcreplay.Entry_Kind" json:"kind,omitempty"` Method string `protobuf:"bytes,2,opt,name=method" json:"method,omitempty"` Message *google_protobuf.Any `protobuf:"bytes,3,opt,name=message" json:"message,omitempty"` IsError bool `protobuf:"varint,4,opt,name=is_error,json=isError" json:"is_error,omitempty"` RefIndex int32 `protobuf:"varint,5,opt,name=ref_index,json=refIndex" json:"ref_index,omitempty"` } func (m *Entry) Reset() { *m = Entry{} } func (m *Entry) String() string { return proto.CompactTextString(m) } func (*Entry) ProtoMessage() {} func (*Entry) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } func (m *Entry) GetKind() Entry_Kind { if m != nil { return m.Kind } return Entry_TYPE_UNSPECIFIED } func (m *Entry) GetMethod() string { if m != nil { return m.Method } return "" } func (m *Entry) GetMessage() *google_protobuf.Any { if m != nil { return m.Message } return nil } func (m *Entry) GetIsError() bool { if m != nil { return m.IsError } return false } func (m *Entry) GetRefIndex() int32 { if m != nil { return m.RefIndex } return 0 } func init() { proto.RegisterType((*Entry)(nil), "rpcreplay.Entry") proto.RegisterEnum("rpcreplay.Entry_Kind", Entry_Kind_name, Entry_Kind_value) } func init() { proto.RegisterFile("rpcreplay.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ // 289 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x44, 0x8e, 0xdf, 0x4e, 0xc2, 0x30, 0x14, 0xc6, 0x2d, 0x6c, 0x30, 0x0e, 0xfe, 0xa9, 0x0d, 0x9a, 0xa1, 0x37, 0x0b, 0x57, 0xf3, 0xa6, 0x24, 0xf8, 0x04, 0x04, 0x8e, 0x09, 0x31, 0x22, 0xb6, 0xc3, 0xc4, 0x1b, 0x17, 0x70, 0x05, 0x17, 0xa1, 0x25, 0xdd, 0x4c, 0xdc, 0x6b, 0xf8, 0xc4, 0x66, 0x13, 0xf4, 0xae, 0xbf, 0x7e, 0xbf, 0x9c, 0xef, 0x83, 0x33, 0xbb, 0x7b, 0xb3, 0x6a, 0xb7, 0x59, 0x14, 0x7c, 0x67, 0x4d, 0x6e, 0x58, 0xeb, 0xef, 0xe3, 0xaa, 0xbb, 0x36, 0x66, 0xbd, 0x51, 0xfd, 0x2a, 0x58, 0x7e, 0xae, 0xfa, 0x0b, 0xbd, 0xb7, 0x7a, 0xdf, 0x35, 0x70, 0x51, 0xe7, 0xb6, 0x60, 0x37, 0xe0, 0x7c, 0xa4, 0x3a, 0xf1, 0x49, 0x40, 0xc2, 0xd3, 0xc1, 0x05, 0xff, 0xbf, 0x57, 0xe5, 0xfc, 0x3e, 0xd5, 0x89, 0xa8, 0x14, 0x76, 0x09, 0x8d, 0xad, 0xca, 0xdf, 0x4d, 0xe2, 0xd7, 0x02, 0x12, 0xb6, 0xc4, 0x9e, 0x18, 0x87, 0xe6, 0x56, 0x65, 0xd9, 0x62, 0xad, 0xfc, 0x7a, 0x40, 0xc2, 0xf6, 0xa0, 0xc3, 0x7f, 0x9b, 0xf9, 0xa1, 0x99, 0x0f, 0x75, 0x21, 0x0e, 0x12, 0xeb, 0x82, 0x97, 0x66, 0xb1, 0xb2, 0xd6, 0x58, 0xdf, 0x09, 0x48, 0xe8, 0x89, 0x66, 0x9a, 0x61, 0x89, 0xec, 0x1a, 0x5a, 0x56, 0xad, 0xe2, 0x54, 0x27, 0xea, 0xcb, 0x77, 0x03, 0x12, 0xba, 0xc2, 0xb3, 0x6a, 0x35, 0x29, 0xb9, 0xf7, 0x0a, 0x4e, 0xb9, 0x86, 0x75, 0x80, 0x46, 0x2f, 0x33, 0x8c, 0xe7, 0x53, 0x39, 0xc3, 0xd1, 0xe4, 0x6e, 0x82, 0x63, 0x7a, 0xc4, 0xda, 0xd0, 0x14, 0xf8, 0x34, 0x47, 0x19, 0x51, 0xc2, 0x8e, 0xc1, 0x13, 0x28, 0x67, 0x8f, 0x53, 0x89, 0xb4, 0xc6, 0xce, 0xe1, 0x64, 0x24, 0x70, 0x18, 0x61, 0x2c, 0x23, 0x81, 0xc3, 0x07, 0x5a, 0x67, 0x1e, 0x38, 0x12, 0xa7, 0x63, 0xea, 0x94, 0x2f, 0x81, 0xa3, 0x67, 0xea, 0x2e, 0x1b, 0xd5, 0xdc, 0xdb, 0x9f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xe7, 0x9b, 0x9d, 0x4f, 0x54, 0x01, 0x00, 0x00, } golang-google-cloud-0.9.0/internal/rpcreplay/proto/rpcreplay/rpcreplay.proto000066400000000000000000000045731312234511600274230ustar00rootroot00000000000000// Copyright 2017 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. syntax = "proto3"; package rpcreplay; import "google/protobuf/any.proto"; // An Entry represents a single RPC activity, typically a request or response. message Entry { enum Kind { TYPE_UNSPECIFIED = 0; // A unary request. // method: the full name of the method // message: the request proto // is_error: false // ref_index: 0 REQUEST = 1; // A unary response. // method: the full name of the method // message: // if is_error: a google.rpc.Status proto // else: the response proto // ref_index: index in the sequence of Entries of matching request (1-based) RESPONSE = 2; // A method that creates a stream. // method: the full name of the method // message: // if is_error: a google.rpc.Status proto // else: nil // ref_index: 0 CREATE_STREAM = 3; // A call to Send on the client returned by a stream-creating method. // method: unset // message: the proto being sent // is_error: false // ref_index: index of matching CREATE_STREAM entry (1-based) SEND = 4; // message sent on stream // A call to Recv on the client returned by a stream-creating method. // method: unset // message: // if is_error: a google.rpc.Status proto, or nil on EOF // else: the received message // ref_index: index of matching CREATE_STREAM entry RECV = 5; // message received from stream } Kind kind = 1; string method = 2; // method name google.protobuf.Any message = 3; // request, response or error status bool is_error = 4; // was response an error? int32 ref_index = 5; // for RESPONSE, index of matching request; // for SEND/RECV, index of CREATE_STREAM } golang-google-cloud-0.9.0/internal/rpcreplay/rpcreplay.go000066400000000000000000000300031312234511600235040ustar00rootroot00000000000000// Copyright 2017 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package rpcreplay import ( "bufio" "encoding/binary" "errors" "fmt" "io" "os" "sync" "golang.org/x/net/context" "google.golang.org/grpc" "google.golang.org/grpc/status" pb "cloud.google.com/go/internal/rpcreplay/proto/rpcreplay" "github.com/golang/protobuf/proto" "github.com/golang/protobuf/ptypes" "github.com/golang/protobuf/ptypes/any" spb "google.golang.org/genproto/googleapis/rpc/status" ) // A Recorder records RPCs for later playback. type Recorder struct { mu sync.Mutex w *bufio.Writer f *os.File next int err error } // NewRecorder creates a recorder that writes to filename. The file will // also store the initial bytes for retrieval during replay. // // You must call Close on the Recorder to ensure that all data is written. func NewRecorder(filename string, initial []byte) (*Recorder, error) { f, err := os.Create(filename) if err != nil { return nil, err } rec, err := NewRecorderWriter(f, initial) if err != nil { _ = f.Close() return nil, err } rec.f = f return rec, nil } // NewRecorderWriter creates a recorder that writes to w. The initial // bytes will also be written to w for retrieval during replay. // // You must call Close on the Recorder to ensure that all data is written. func NewRecorderWriter(w io.Writer, initial []byte) (*Recorder, error) { bw := bufio.NewWriter(w) if err := writeHeader(bw, initial); err != nil { return nil, err } return &Recorder{w: bw, next: 1}, nil } // DialOptions returns the options that must be passed to grpc.Dial // to enable recording. func (r *Recorder) DialOptions() []grpc.DialOption { return []grpc.DialOption{ grpc.WithUnaryInterceptor(r.interceptUnary), } } // Close saves any unwritten information. func (r *Recorder) Close() error { r.mu.Lock() defer r.mu.Unlock() if r.err != nil { return r.err } err := r.w.Flush() if r.f != nil { if err2 := r.f.Close(); err == nil { err = err2 } } return err } // Intercepts all unary (non-stream) RPCs. func (r *Recorder) interceptUnary(ctx context.Context, method string, req, res interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { ereq := &entry{ kind: pb.Entry_REQUEST, method: method, msg: message{msg: req.(proto.Message)}, } refIndex, err := r.writeEntry(ereq) if err != nil { return err } ierr := invoker(ctx, method, req, res, cc, opts...) eres := &entry{ kind: pb.Entry_RESPONSE, refIndex: refIndex, } // If the error is not a gRPC status, then something more // serious is wrong. More significantly, we have no way // of serializing an arbitrary error. So just return it // without recording the response. if _, ok := status.FromError(ierr); !ok { r.mu.Lock() r.err = fmt.Errorf("saw non-status error in %s response: %v (%T)", method, ierr, ierr) r.mu.Unlock() return ierr } eres.msg.set(res, ierr) if _, err := r.writeEntry(eres); err != nil { return err } return ierr } func (r *Recorder) writeEntry(e *entry) (int, error) { r.mu.Lock() defer r.mu.Unlock() if r.err != nil { return 0, r.err } err := writeEntry(r.w, e) if err != nil { r.err = err return 0, err } n := r.next r.next++ return n, nil } // A Replayer replays a set of RPCs saved by a Recorder. type Replayer struct { initial []byte // initial state log func(format string, v ...interface{}) // for debugging mu sync.Mutex calls []*call } // A call represents a unary RPC, with a request and response (or error). type call struct { method string request proto.Message response message } // NewReplayer creates a Replayer that reads from filename. func NewReplayer(filename string) (*Replayer, error) { f, err := os.Open(filename) if err != nil { return nil, err } defer f.Close() return NewReplayerReader(f) } // NewReplayerReader creates a Replayer that reads from r. func NewReplayerReader(r io.Reader) (*Replayer, error) { rep := &Replayer{ log: func(string, ...interface{}) {}, } if err := rep.read(r); err != nil { return nil, err } return rep, nil } // read reads the stream of recorded entries. // It matches requests with responses, with each pair grouped // into a call struct. func (rep *Replayer) read(r io.Reader) error { r = bufio.NewReader(r) bytes, err := readHeader(r) if err != nil { return err } rep.initial = bytes callsByIndex := map[int]*call{} for i := 1; ; i++ { e, err := readEntry(r) if err != nil { return err } if e == nil { break } switch e.kind { case pb.Entry_REQUEST: callsByIndex[i] = &call{ method: e.method, request: e.msg.msg, } case pb.Entry_RESPONSE: call := callsByIndex[e.refIndex] if call == nil { return fmt.Errorf("replayer: no request for response #%d", i) } delete(callsByIndex, e.refIndex) call.response = e.msg rep.calls = append(rep.calls, call) default: return fmt.Errorf("replayer: unknown kind %s", e.kind) } } if len(callsByIndex) > 0 { return fmt.Errorf("replayer: %d unmatched requests", len(callsByIndex)) } return nil } // DialOptions returns the options that must be passed to grpc.Dial // to enable replaying. func (r *Replayer) DialOptions() []grpc.DialOption { return []grpc.DialOption{ // On replay, we make no RPCs, which means the connection may be closed // before the normally async Dial completes. Making the Dial synchronous // fixes that. grpc.WithBlock(), grpc.WithUnaryInterceptor(r.interceptUnary), } } // Initial returns the initial state saved by the Recorder. func (r *Replayer) Initial() []byte { return r.initial } // SetLogFunc sets a function to be used for debug logging. The function // should be safe to be called from multiple goroutines. func (r *Replayer) SetLogFunc(f func(format string, v ...interface{})) { r.log = f } // Close closes the Replayer. func (r *Replayer) Close() error { return nil } func (r *Replayer) interceptUnary(_ context.Context, method string, req, res interface{}, _ *grpc.ClientConn, _ grpc.UnaryInvoker, _ ...grpc.CallOption) error { mreq := req.(proto.Message) r.log("request %s (%s)", method, req) call := r.extractCall(method, mreq) if call == nil { return fmt.Errorf("replayer: request not found: %s", mreq) } r.log("returning %v", call.response) if call.response.err != nil { return call.response.err } proto.Merge(res.(proto.Message), call.response.msg) // copy msg into res return nil } // extractCall finds the first call in the list with the same method // and request. It returns nil if it can't find such a call. func (r *Replayer) extractCall(method string, req proto.Message) *call { r.mu.Lock() defer r.mu.Unlock() for i, call := range r.calls { if call == nil { continue } if method == call.method && proto.Equal(req, call.request) { r.calls[i] = nil // nil out this call so we don't reuse it return call } } return nil } // Fprint reads the entries from filename and writes them to w in human-readable form. // It is intended for debugging. func Fprint(w io.Writer, filename string) error { f, err := os.Open(filename) if err != nil { return err } defer f.Close() return FprintReader(w, f) } // FprintReader reads the entries from r and writes them to w in human-readable form. // It is intended for debugging. func FprintReader(w io.Writer, r io.Reader) error { initial, err := readHeader(r) if err != nil { return err } fmt.Fprintf(w, "initial state: %q\n", string(initial)) for i := 1; ; i++ { e, err := readEntry(r) if err != nil { return err } if e == nil { return nil } s := "message" if e.msg.err != nil { s = "error" } fmt.Fprintf(w, "#%d: kind: %s, method: %s, ref index: %d, %s:\n", i, e.kind, e.method, e.refIndex, s) if e.msg.err == nil { if err := proto.MarshalText(w, e.msg.msg); err != nil { return err } } else { fmt.Fprintf(w, "%v\n", e.msg.err) } } } // An entry holds one gRPC action (request, response, etc.). type entry struct { kind pb.Entry_Kind method string msg message refIndex int // index of corresponding request or create-stream } func (e1 *entry) equal(e2 *entry) bool { if e1 == nil && e2 == nil { return true } if e1 == nil || e2 == nil { return false } return e1.kind == e2.kind && e1.method == e2.method && proto.Equal(e1.msg.msg, e2.msg.msg) && errEqual(e1.msg.err, e2.msg.err) && e1.refIndex == e2.refIndex } func errEqual(e1, e2 error) bool { if e1 == e2 { return true } s1, ok1 := status.FromError(e1) s2, ok2 := status.FromError(e2) if !ok1 || !ok2 { return false } return proto.Equal(s1.Proto(), s2.Proto()) } // message holds either a single proto.Message or an error. type message struct { msg proto.Message err error } func (m *message) set(msg interface{}, err error) { if msg != nil { m.msg = msg.(proto.Message) } m.err = err } // File format: // header // sequence of Entry protos // // Header format: // magic string // a record containing the bytes of the initial state const magic = "RPCReplay" func writeHeader(w io.Writer, initial []byte) error { if _, err := io.WriteString(w, magic); err != nil { return err } return writeRecord(w, initial) } func readHeader(r io.Reader) ([]byte, error) { var buf [len(magic)]byte if _, err := io.ReadFull(r, buf[:]); err != nil { if err == io.EOF { err = errors.New("rpcreplay: empty replay file") } return nil, err } if string(buf[:]) != magic { return nil, errors.New("rpcreplay: not a replay file (does not begin with magic string)") } bytes, err := readRecord(r) if err == io.EOF { err = errors.New("rpcreplay: missing initial state") } return bytes, err } func writeEntry(w io.Writer, e *entry) error { var m proto.Message if e.msg.err != nil && e.msg.err != io.EOF { s, ok := status.FromError(e.msg.err) if !ok { return fmt.Errorf("rpcreplay: error %v is not a Status", e.msg.err) } m = s.Proto() } else { m = e.msg.msg } var a *any.Any var err error if m != nil { a, err = ptypes.MarshalAny(m) if err != nil { return err } } pe := &pb.Entry{ Kind: e.kind, Method: e.method, Message: a, IsError: e.msg.err != nil, RefIndex: int32(e.refIndex), } bytes, err := proto.Marshal(pe) if err != nil { return err } return writeRecord(w, bytes) } func readEntry(r io.Reader) (*entry, error) { buf, err := readRecord(r) if err == io.EOF { return nil, nil } if err != nil { return nil, err } var pe pb.Entry if err := proto.Unmarshal(buf, &pe); err != nil { return nil, err } var msg message if pe.Message != nil { var any ptypes.DynamicAny if err := ptypes.UnmarshalAny(pe.Message, &any); err != nil { return nil, err } if pe.IsError { msg.err = status.ErrorProto(any.Message.(*spb.Status)) } else { msg.msg = any.Message } } else if pe.IsError { msg.err = io.EOF } else { return nil, errors.New("rpcreplay: entry with nil message and false is_error") } return &entry{ kind: pe.Kind, method: pe.Method, msg: msg, refIndex: int(pe.RefIndex), }, nil } // A record consists of an unsigned 32-bit little-endian length L followed by L // bytes. func writeRecord(w io.Writer, data []byte) error { if err := binary.Write(w, binary.LittleEndian, uint32(len(data))); err != nil { return err } _, err := w.Write(data) return err } func readRecord(r io.Reader) ([]byte, error) { var size uint32 if err := binary.Read(r, binary.LittleEndian, &size); err != nil { return nil, err } buf := make([]byte, size) if _, err := io.ReadFull(r, buf); err != nil { return nil, err } return buf, nil } golang-google-cloud-0.9.0/internal/rpcreplay/rpcreplay_test.go000066400000000000000000000123541312234511600245540ustar00rootroot00000000000000// Copyright 2017 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package rpcreplay import ( "bytes" "io" "reflect" "testing" ipb "cloud.google.com/go/internal/rpcreplay/proto/intstore" rpb "cloud.google.com/go/internal/rpcreplay/proto/rpcreplay" "github.com/golang/protobuf/proto" "golang.org/x/net/context" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" ) func TestRecordIO(t *testing.T) { buf := &bytes.Buffer{} want := []byte{1, 2, 3} if err := writeRecord(buf, want); err != nil { t.Fatal(err) } got, err := readRecord(buf) if err != nil { t.Fatal(err) } if !bytes.Equal(got, want) { t.Errorf("got %v, want %v", got, want) } } func TestHeaderIO(t *testing.T) { buf := &bytes.Buffer{} want := []byte{1, 2, 3} if err := writeHeader(buf, want); err != nil { t.Fatal(err) } got, err := readHeader(buf) if err != nil { t.Fatal(err) } if !reflect.DeepEqual(got, want) { t.Errorf("got %v, want %v", got, want) } // readHeader errors for _, contents := range []string{"", "badmagic", "gRPCReplay"} { if _, err := readHeader(bytes.NewBufferString(contents)); err == nil { t.Errorf("%q: got nil, want error", contents) } } } func TestEntryIO(t *testing.T) { for i, want := range []*entry{ { kind: rpb.Entry_REQUEST, method: "method", msg: message{msg: &rpb.Entry{}}, refIndex: 7, }, { kind: rpb.Entry_RESPONSE, method: "method", msg: message{err: status.Error(codes.NotFound, "not found")}, refIndex: 8, }, { kind: rpb.Entry_RECV, method: "method", msg: message{err: io.EOF}, refIndex: 3, }, } { buf := &bytes.Buffer{} if err := writeEntry(buf, want); err != nil { t.Fatal(err) } got, err := readEntry(buf) if err != nil { t.Fatal(err) } if !got.equal(want) { t.Errorf("#%d: got %v, want %v", i, got, want) } } } var initialState = []byte{1, 2, 3} func TestRecord(t *testing.T) { srv := newIntStoreServer() defer srv.stop() buf := record(t, srv) gotIstate, err := readHeader(buf) if err != nil { t.Fatal(err) } if !reflect.DeepEqual(gotIstate, initialState) { t.Fatalf("got %v, want %v", gotIstate, initialState) } item := &ipb.Item{Name: "a", Value: 1} wantEntries := []*entry{ // Set { kind: rpb.Entry_REQUEST, method: "/intstore.IntStore/Set", msg: message{msg: item}, }, { kind: rpb.Entry_RESPONSE, msg: message{msg: &ipb.SetResponse{PrevValue: 0}}, refIndex: 1, }, // Get { kind: rpb.Entry_REQUEST, method: "/intstore.IntStore/Get", msg: message{msg: &ipb.GetRequest{Name: "a"}}, }, { kind: rpb.Entry_RESPONSE, msg: message{msg: item}, refIndex: 3, }, { kind: rpb.Entry_REQUEST, method: "/intstore.IntStore/Get", msg: message{msg: &ipb.GetRequest{Name: "x"}}, }, { kind: rpb.Entry_RESPONSE, msg: message{err: status.Error(codes.NotFound, `"x"`)}, refIndex: 5, }, } for i, w := range wantEntries { g, err := readEntry(buf) if err != nil { t.Fatal(err) } if !g.equal(w) { t.Errorf("#%d:\ngot %+v\nwant %+v", i+1, g, w) } } g, err := readEntry(buf) if err != nil { t.Fatal(err) } if g != nil { t.Errorf("\ngot %+v\nwant nil", g) } } func TestReplay(t *testing.T) { srv := newIntStoreServer() defer srv.stop() buf := record(t, srv) rep, err := NewReplayerReader(buf) if err != nil { t.Fatal(err) } if got, want := rep.Initial(), initialState; !reflect.DeepEqual(got, want) { t.Fatalf("got %v, want %v", got, want) } // Replay the test. testService(t, srv.Addr, rep.DialOptions()) } func record(t *testing.T, srv *intStoreServer) *bytes.Buffer { buf := &bytes.Buffer{} rec, err := NewRecorderWriter(buf, initialState) if err != nil { t.Fatal(err) } testService(t, srv.Addr, rec.DialOptions()) if err := rec.Close(); err != nil { t.Fatal(err) } return buf } func testService(t *testing.T, addr string, opts []grpc.DialOption) { conn, err := grpc.Dial(addr, append([]grpc.DialOption{grpc.WithInsecure()}, opts...)...) if err != nil { t.Fatal(err) } defer conn.Close() client := ipb.NewIntStoreClient(conn) ctx := context.Background() item := &ipb.Item{Name: "a", Value: 1} res, err := client.Set(ctx, item) if err != nil { t.Fatal(err) } if res.PrevValue != 0 { t.Errorf("got %d, want 0", res.PrevValue) } got, err := client.Get(ctx, &ipb.GetRequest{Name: "a"}) if err != nil { t.Fatal(err) } if !proto.Equal(got, item) { t.Errorf("got %v, want %v", got, item) } _, err = client.Get(ctx, &ipb.GetRequest{Name: "x"}) if err == nil { t.Fatal("got nil, want error") } if _, ok := status.FromError(err); !ok { t.Errorf("got error type %T, want a grpc/status.Status", err) } } golang-google-cloud-0.9.0/internal/testutil/000077500000000000000000000000001312234511600210345ustar00rootroot00000000000000golang-google-cloud-0.9.0/internal/testutil/context.go000066400000000000000000000040511312234511600230470ustar00rootroot00000000000000// Copyright 2014 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package testutil contains helper functions for writing tests. package testutil import ( "io/ioutil" "log" "os" "golang.org/x/net/context" "golang.org/x/oauth2" "golang.org/x/oauth2/google" ) const ( envProjID = "GCLOUD_TESTS_GOLANG_PROJECT_ID" envPrivateKey = "GCLOUD_TESTS_GOLANG_KEY" ) // ProjID returns the project ID to use in integration tests, or the empty // string if none is configured. func ProjID() string { projID := os.Getenv(envProjID) if projID == "" { return "" } return projID } // TokenSource returns the OAuth2 token source to use in integration tests, // or nil if none is configured. If the environment variable is unset, // TokenSource will try to find 'Application Default Credentials'. Else, // TokenSource will return nil. // TokenSource will log.Fatal if the token source is specified but missing or invalid. func TokenSource(ctx context.Context, scopes ...string) oauth2.TokenSource { key := os.Getenv(envPrivateKey) if key == "" { // Try for application default credentials. ts, err := google.DefaultTokenSource(ctx, scopes...) if err != nil { log.Println("No 'Application Default Credentials' found.") return nil } return ts } jsonKey, err := ioutil.ReadFile(key) if err != nil { log.Fatalf("Cannot read the JSON key file, err: %v", err) } conf, err := google.JWTConfigFromJSON(jsonKey, scopes...) if err != nil { log.Fatalf("google.JWTConfigFromJSON: %v", err) } return conf.TokenSource(ctx) } golang-google-cloud-0.9.0/internal/testutil/server.go000066400000000000000000000035451312234511600227000ustar00rootroot00000000000000/* Copyright 2016 Google Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package testutil import ( "net" grpc "google.golang.org/grpc" ) // A Server is an in-process gRPC server, listening on a system-chosen port on // the local loopback interface. Servers are for testing only and are not // intended to be used in production code. // // To create a server, make a new Server, register your handlers, then call // Start: // // srv, err := NewServer() // ... // mypb.RegisterMyServiceServer(srv.Gsrv, &myHandler) // .... // srv.Start() // // Clients should connect to the server with no security: // // conn, err := grpc.Dial(srv.Addr, grpc.WithInsecure()) // ... type Server struct { Addr string l net.Listener Gsrv *grpc.Server } // NewServer creates a new Server. The Server will be listening for gRPC connections // at the address named by the Addr field, without TLS. func NewServer(opts ...grpc.ServerOption) (*Server, error) { l, err := net.Listen("tcp", "127.0.0.1:0") if err != nil { return nil, err } s := &Server{ Addr: l.Addr().String(), l: l, Gsrv: grpc.NewServer(opts...), } return s, nil } // Start causes the server to start accepting incoming connections. // Call Start after registering handlers. func (s *Server) Start() { go s.Gsrv.Serve(s.l) } // Close shuts down the server. func (s *Server) Close() { s.Gsrv.Stop() s.l.Close() } golang-google-cloud-0.9.0/internal/testutil/server_test.go000066400000000000000000000016171312234511600237350ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package testutil import ( "testing" grpc "google.golang.org/grpc" ) func TestNewServer(t *testing.T) { srv, err := NewServer() if err != nil { t.Fatal(err) } srv.Start() conn, err := grpc.Dial(srv.Addr, grpc.WithInsecure()) if err != nil { t.Fatal(err) } conn.Close() srv.Close() } golang-google-cloud-0.9.0/internal/testutil/unique.go000066400000000000000000000056421312234511600227000ustar00rootroot00000000000000// Copyright 2017 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // This file supports generating unique IDs so that multiple test executions // don't interfere with each other, and cleaning up old entities that may // remain if tests exit early. package testutil import ( "fmt" "regexp" "strconv" "sync" "time" ) var startTime = time.Now().UTC() // A UIDSpace manages a set of unique IDs distinguished by a prefix. type UIDSpace struct { Prefix string re *regexp.Regexp mu sync.Mutex count int } func NewUIDSpace(prefix string) *UIDSpace { return &UIDSpace{ Prefix: prefix, re: regexp.MustCompile("^" + regexp.QuoteMeta(prefix) + `-(\d{4})(\d{2})(\d{2})-(\d+)-\d+$`), } } // New generates a new unique ID . The ID consists of the UIDSpace's prefix, a // timestamp, and a counter value. All unique IDs generated in the same test // execution will have the same timestamp. // // Aside from the characters in the prefix, IDs contain only letters, numbers // and hyphens. func (s *UIDSpace) New() string { return s.newID(startTime) } func (s *UIDSpace) newID(t time.Time) string { s.mu.Lock() c := s.count s.count++ s.mu.Unlock() // Write the time as a date followed by nanoseconds from midnight of that date. // That makes it easier to see the approximate time of the ID when it is displayed. y, m, d := t.Date() ns := t.Sub(time.Date(y, m, d, 0, 0, 0, 0, time.UTC)) // Zero-pad the counter for lexical sort order for IDs with the same timestamp. return fmt.Sprintf("%s-%04d%02d%02d-%d-%04d", s.Prefix, y, m, d, ns, c) } // Timestamp extracts the timestamp of uid, which must have been generated by // s. The second return value is true on success, false if there was a problem. func (s *UIDSpace) Timestamp(uid string) (time.Time, bool) { subs := s.re.FindStringSubmatch(uid) if subs == nil { return time.Time{}, false } y, err1 := strconv.Atoi(subs[1]) m, err2 := strconv.Atoi(subs[2]) d, err3 := strconv.Atoi(subs[3]) ns, err4 := strconv.Atoi(subs[4]) if err1 != nil || err2 != nil || err3 != nil || err4 != nil { return time.Time{}, false } return time.Date(y, time.Month(m), d, 0, 0, 0, ns, time.UTC), true } // Older reports whether uid was created by m and has a timestamp older than // the current time by at least d. func (s *UIDSpace) Older(uid string, d time.Duration) bool { ts, ok := s.Timestamp(uid) if !ok { return false } return time.Since(ts) > d } golang-google-cloud-0.9.0/internal/testutil/unique_test.go000066400000000000000000000032331312234511600237310ustar00rootroot00000000000000// Copyright 2017 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package testutil import ( "testing" "time" ) func TestNew(t *testing.T) { s := NewUIDSpace("prefix") tm := time.Date(2017, 1, 6, 0, 0, 0, 21, time.UTC) got := s.newID(tm) want := "prefix-20170106-21-0000" if got != want { t.Errorf("got %q, want %q", got, want) } } func TestTimestamp(t *testing.T) { s := NewUIDSpace("unique-ID") uid := s.New() got, ok := s.Timestamp(uid) if !ok { t.Fatal("got ok = false, want true") } if !startTime.Equal(got) { t.Errorf("got %s, want %s", got, startTime) } got, ok = s.Timestamp("unique-ID-20160308-123-8") if !ok { t.Fatal("got false, want true") } if want := time.Date(2016, 3, 8, 0, 0, 0, 123, time.UTC); !want.Equal(got) { t.Errorf("got %s, want %s", got, want) } if _, ok = s.Timestamp("invalid-time-1234"); ok { t.Error("got true, want false") } } func TestOlder(t *testing.T) { s := NewUIDSpace("uid") // A non-matching ID returns false. id2 := NewUIDSpace("different-prefix").New() if got, want := s.Older(id2, time.Second), false; got != want { t.Errorf("got %t, want %t", got, want) } } golang-google-cloud-0.9.0/internal/tracecontext/000077500000000000000000000000001312234511600216625ustar00rootroot00000000000000golang-google-cloud-0.9.0/internal/tracecontext/tracecontext.go000066400000000000000000000046011312234511600247150ustar00rootroot00000000000000// Copyright 2017 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package tracecontext provides encoders and decoders for Stackdriver Trace contexts. package tracecontext import "encoding/binary" const ( versionID = 0 traceIDField = 0 spanIDField = 1 optsField = 2 traceIDLen = 16 spanIDLen = 8 optsLen = 1 // Len represents the length of trace context. Len = 1 + 1 + traceIDLen + 1 + spanIDLen + 1 + optsLen ) // Encode encodes trace ID, span ID and options into dst. The number of bytes // written will be returned. If len(dst) isn't big enough to fit the trace context, // a negative number is returned. func Encode(dst []byte, traceID []byte, spanID uint64, opts byte) (n int) { if len(dst) < Len { return -1 } var offset = 0 putByte := func(b byte) { dst[offset] = b; offset++ } putUint64 := func(u uint64) { binary.LittleEndian.PutUint64(dst[offset:], u); offset += 8 } putByte(versionID) putByte(traceIDField) for _, b := range traceID { putByte(b) } putByte(spanIDField) putUint64(spanID) putByte(optsField) putByte(opts) return offset } // Decode decodes the src into a trace ID, span ID and options. If src doesn't // contain a valid trace context, ok = false is returned. func Decode(src []byte) (traceID []byte, spanID uint64, opts byte, ok bool) { if len(src) < Len { return traceID, spanID, 0, false } var offset = 0 readByte := func() byte { b := src[offset]; offset++; return b } readUint64 := func() uint64 { v := binary.LittleEndian.Uint64(src[offset:]); offset += 8; return v } if readByte() != versionID { return traceID, spanID, 0, false } for offset < len(src) { switch readByte() { case traceIDField: traceID = src[offset : offset+traceIDLen] offset += traceIDLen case spanIDField: spanID = readUint64() case optsField: opts = readByte() } } return traceID, spanID, opts, true } golang-google-cloud-0.9.0/internal/tracecontext/tracecontext_test.go000066400000000000000000000067451312234511600257670ustar00rootroot00000000000000// Copyright 2014 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package tracecontext import ( "reflect" "testing" ) var validData = []byte{0, 0, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 1, 97, 98, 99, 100, 101, 102, 103, 104, 2, 1} func TestDecode(t *testing.T) { tests := []struct { name string data []byte wantTraceID []byte wantSpanID uint64 wantOpts byte wantOk bool }{ { name: "nil data", data: nil, wantTraceID: nil, wantSpanID: 0, wantOpts: 0, wantOk: false, }, { name: "short data", data: []byte{0, 0, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77}, wantTraceID: nil, wantSpanID: 0, wantOpts: 0, wantOk: false, }, { name: "wrong field number", data: []byte{0, 1, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77}, wantTraceID: nil, wantSpanID: 0, wantOpts: 0, wantOk: false, }, { name: "valid data", data: validData, wantTraceID: []byte{64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79}, wantSpanID: 0x6867666564636261, wantOpts: 1, wantOk: true, }, } for _, tt := range tests { gotTraceID, gotSpanID, gotOpts, gotOk := Decode(tt.data) if !reflect.DeepEqual(gotTraceID, tt.wantTraceID) { t.Errorf("%s: Decode() gotTraceID = %v, want %v", tt.name, gotTraceID, tt.wantTraceID) } if gotSpanID != tt.wantSpanID { t.Errorf("%s: Decode() gotSpanID = %v, want %v", tt.name, gotSpanID, tt.wantSpanID) } if gotOpts != tt.wantOpts { t.Errorf("%s: Decode() gotOpts = %v, want %v", tt.name, gotOpts, tt.wantOpts) } if gotOk != tt.wantOk { t.Errorf("%s: Decode() gotOk = %v, want %v", tt.name, gotOk, tt.wantOk) } } } func TestEncode(t *testing.T) { tests := []struct { name string dst []byte traceID []byte spanID uint64 opts byte wantN int wantData []byte }{ { name: "short data", dst: make([]byte, 0), traceID: []byte("00112233445566"), spanID: 0x6867666564636261, opts: 1, wantN: -1, wantData: make([]byte, 0), }, { name: "valid data", dst: make([]byte, Len), traceID: []byte{64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79}, spanID: 0x6867666564636261, opts: 1, wantN: Len, wantData: validData, }, } for _, tt := range tests { gotN := Encode(tt.dst, tt.traceID, tt.spanID, tt.opts) if gotN != tt.wantN { t.Errorf("%s: n = %v, want %v", tt.name, gotN, tt.wantN) } if gotData := tt.dst; !reflect.DeepEqual(gotData, tt.wantData) { t.Errorf("%s: dst = %v, want %v", tt.name, gotData, tt.wantData) } } } func BenchmarkDecode(b *testing.B) { for i := 0; i < b.N; i++ { Decode(validData) } } func BenchmarkEncode(b *testing.B) { for i := 0; i < b.N; i++ { traceID := make([]byte, 16) var opts byte Encode(validData, traceID, 0, opts) } } golang-google-cloud-0.9.0/internal/version/000077500000000000000000000000001312234511600206445ustar00rootroot00000000000000golang-google-cloud-0.9.0/internal/version/update_version.sh000077500000000000000000000001611312234511600242300ustar00rootroot00000000000000#!/bin/bash today=$(date +%Y%m%d) sed -i -r -e 's/const Repo = "([0-9]{8})"/const Repo = "'$today'"/' $GOFILE golang-google-cloud-0.9.0/internal/version/version.go000066400000000000000000000033631312234511600226650ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:generate ./update_version.sh // Package version contains version information for Google Cloud Client // Libraries for Go, as reported in request headers. package version import ( "runtime" "strings" "unicode" ) // Repo is the current version of the client libraries in this // repo. It should be a date in YYYYMMDD format. const Repo = "20170621" // Go returns the Go runtime version. The returned string // has no whitespace. func Go() string { return goVersion } var goVersion = goVer(runtime.Version()) const develPrefix = "devel +" func goVer(s string) string { if strings.HasPrefix(s, develPrefix) { s = s[len(develPrefix):] if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 { s = s[:p] } return s } if strings.HasPrefix(s, "go1") { s = s[2:] var prerelease string if p := strings.IndexFunc(s, notSemverRune); p >= 0 { s, prerelease = s[:p], s[p:] } if strings.HasSuffix(s, ".") { s += "0" } else if strings.Count(s, ".") < 2 { s += ".0" } if prerelease != "" { s += "-" + prerelease } return s } return "" } func notSemverRune(r rune) bool { return strings.IndexRune("0123456789.", r) < 0 } golang-google-cloud-0.9.0/internal/version/version_test.go000066400000000000000000000021161312234511600237170ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package version import "testing" func TestGoVer(t *testing.T) { for _, tst := range []struct { in, want string }{ {"go1.8", "1.8.0"}, {"go1.7.3", "1.7.3"}, {"go1.8.typealias", "1.8.0-typealias"}, {"go1.8beta1", "1.8.0-beta1"}, {"go1.8rc2", "1.8.0-rc2"}, {"devel +824f981dd4b7 Tue Apr 29 21:41:54 2014 -0400", "824f981dd4b7"}, {"foo bar zipzap", ""}, } { if got := goVer(tst.in); got != tst.want { t.Errorf("goVer(%q) = %q, want %q", tst.in, got, tst.want) } } } golang-google-cloud-0.9.0/key.json.enc000066400000000000000000000046001312234511600175720ustar00rootroot00000000000000-{_z ohJG_=`B#3xBpK9w)9ۄ>@:ZOxڸ]Jj~V:nf+cۅk5??6E+S$7pNNsK7 xɘ(J3*d٩E6MCÜ+\e|8 {p"s|IWA`}dBOtbZn-I`@Fahzҫ l<뛏}Rv&=Vq&c;9Bt1Ɲ'ҙ5hAZt&0g?hPKOkFxW{ᵪv"S%f8j`jHX=ެޠ&e};Nl/v̈갛2k}H0yq!8JoO7M%5M_Bѵ _A+VI*;*{R쟨iᅳ^}{l4Rʓzی 2 Æ!X n +UJH>}f|<3qgp>yc޲Jk䇌W`>Ӱ EZQ&رg|,.cvJj)d6u\w*"}(6vvG2j6+J!I1bA2NNqa#,^r9mn__6MhU1SI(T3%Yz9pSKq<䉂Cc/Q7\0 qʗ쬼^/e7rc&7rgLZ,sUeayuo hL)Cgo?=PU4K#Nԣ$*Lr~_䘕S(nQG*i+ W{HaOoļ c|:&JJ>YvIԊ^EtAo%NW 3}2uͶcMvt4I~HKECEvA=gx4"n+uFvaԶ~ pj*T+$ubkuG@r||F58Y5 W>te73B1- 2h#tI6zUuoHNʰS{w^6Z}x)mbeAUC%Dۻy@`/jJal_*-[Rm98<㿏rreU8qLWҨöLmQ7hksĜ)D'P'0=쀛%+#!A"&AoAI"tt\po#r|ALw{NG:(9&YIi{; QТ+-ө|r1 (|RtUNXP\6RBU~.V' }넂j ܛEZ(_{zIՃ̟)Yp$~lƶ5!g/*}CPL-UU}&@ȋ[:⢵ 8m;.j;XܬbLhI,t>Uave MQV= /ZU<4yh u)]y ex$*n@_^/au-_F oز)Em=[r[%NHk攰 ` dir?۶l~ p]YaV7al Czu ma+Gѡ!dh[P6$WJ } `EkNF֑ɒåja* am^ڊLJ|mQB]x@E-/QgmxY޴G;79R[fR^~?x햛t^fa-cSxEYY,H&숝XX SL3H:8U8/AI"u7)#~ 3N caCum嘭4DC00Bf\80e[EeB<|[<Ë4 ty~ݢEDŽfՌZ ^ 6Is5Z%'d 3-N'X:zT1H -'2zXuAg浝G\%= BS _q}Prõn\V! !?9E~ 4d/0En[X'9golang-google-cloud-0.9.0/language/000077500000000000000000000000001312234511600171265ustar00rootroot00000000000000golang-google-cloud-0.9.0/language/apiv1/000077500000000000000000000000001312234511600201465ustar00rootroot00000000000000golang-google-cloud-0.9.0/language/apiv1/doc.go000066400000000000000000000027271312234511600212520ustar00rootroot00000000000000// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // AUTO-GENERATED CODE. DO NOT EDIT. // Package language is an experimental, auto-generated package for the // Google Cloud Natural Language API. // // Google Cloud Natural Language API provides natural language understanding // technologies to developers. Examples include sentiment analysis, entity // recognition, and text annotations. package language // import "cloud.google.com/go/language/apiv1" import ( "golang.org/x/net/context" "google.golang.org/grpc/metadata" ) func insertXGoog(ctx context.Context, val []string) context.Context { md, _ := metadata.FromOutgoingContext(ctx) md = md.Copy() md["x-goog-api-client"] = val return metadata.NewOutgoingContext(ctx, md) } // DefaultAuthScopes reports the authentication scopes required // by this package. func DefaultAuthScopes() []string { return []string{ "https://www.googleapis.com/auth/cloud-platform", } } golang-google-cloud-0.9.0/language/apiv1/language_client.go000066400000000000000000000151671312234511600236300ustar00rootroot00000000000000// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // AUTO-GENERATED CODE. DO NOT EDIT. package language import ( "time" "cloud.google.com/go/internal/version" gax "github.com/googleapis/gax-go" "golang.org/x/net/context" "google.golang.org/api/option" "google.golang.org/api/transport" languagepb "google.golang.org/genproto/googleapis/cloud/language/v1" "google.golang.org/grpc" "google.golang.org/grpc/codes" ) // CallOptions contains the retry settings for each method of Client. type CallOptions struct { AnalyzeSentiment []gax.CallOption AnalyzeEntities []gax.CallOption AnalyzeSyntax []gax.CallOption AnnotateText []gax.CallOption } func defaultClientOptions() []option.ClientOption { return []option.ClientOption{ option.WithEndpoint("language.googleapis.com:443"), option.WithScopes(DefaultAuthScopes()...), } } func defaultCallOptions() *CallOptions { retry := map[[2]string][]gax.CallOption{ {"default", "idempotent"}: { gax.WithRetry(func() gax.Retryer { return gax.OnCodes([]codes.Code{ codes.DeadlineExceeded, codes.Unavailable, }, gax.Backoff{ Initial: 100 * time.Millisecond, Max: 60000 * time.Millisecond, Multiplier: 1.3, }) }), }, } return &CallOptions{ AnalyzeSentiment: retry[[2]string{"default", "idempotent"}], AnalyzeEntities: retry[[2]string{"default", "idempotent"}], AnalyzeSyntax: retry[[2]string{"default", "idempotent"}], AnnotateText: retry[[2]string{"default", "idempotent"}], } } // Client is a client for interacting with Google Cloud Natural Language API. type Client struct { // The connection to the service. conn *grpc.ClientConn // The gRPC API client. client languagepb.LanguageServiceClient // The call options for this service. CallOptions *CallOptions // The metadata to be sent with each request. xGoogHeader []string } // NewClient creates a new language service client. // // Provides text analysis operations such as sentiment analysis and entity // recognition. func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) { conn, err := transport.DialGRPC(ctx, append(defaultClientOptions(), opts...)...) if err != nil { return nil, err } c := &Client{ conn: conn, CallOptions: defaultCallOptions(), client: languagepb.NewLanguageServiceClient(conn), } c.SetGoogleClientInfo() return c, nil } // Connection returns the client's connection to the API service. func (c *Client) Connection() *grpc.ClientConn { return c.conn } // Close closes the connection to the API service. The user should invoke this when // the client is no longer required. func (c *Client) Close() error { return c.conn.Close() } // SetGoogleClientInfo sets the name and version of the application in // the `x-goog-api-client` header passed on each request. Intended for // use by Google-written clients. func (c *Client) SetGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", version.Go()}, keyval...) kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) c.xGoogHeader = []string{gax.XGoogHeader(kv...)} } // AnalyzeSentiment analyzes the sentiment of the provided text. func (c *Client) AnalyzeSentiment(ctx context.Context, req *languagepb.AnalyzeSentimentRequest, opts ...gax.CallOption) (*languagepb.AnalyzeSentimentResponse, error) { ctx = insertXGoog(ctx, c.xGoogHeader) opts = append(c.CallOptions.AnalyzeSentiment[0:len(c.CallOptions.AnalyzeSentiment):len(c.CallOptions.AnalyzeSentiment)], opts...) var resp *languagepb.AnalyzeSentimentResponse err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error resp, err = c.client.AnalyzeSentiment(ctx, req, settings.GRPC...) return err }, opts...) if err != nil { return nil, err } return resp, nil } // AnalyzeEntities finds named entities (currently proper names and common nouns) in the text // along with entity types, salience, mentions for each entity, and // other properties. func (c *Client) AnalyzeEntities(ctx context.Context, req *languagepb.AnalyzeEntitiesRequest, opts ...gax.CallOption) (*languagepb.AnalyzeEntitiesResponse, error) { ctx = insertXGoog(ctx, c.xGoogHeader) opts = append(c.CallOptions.AnalyzeEntities[0:len(c.CallOptions.AnalyzeEntities):len(c.CallOptions.AnalyzeEntities)], opts...) var resp *languagepb.AnalyzeEntitiesResponse err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error resp, err = c.client.AnalyzeEntities(ctx, req, settings.GRPC...) return err }, opts...) if err != nil { return nil, err } return resp, nil } // AnalyzeSyntax analyzes the syntax of the text and provides sentence boundaries and // tokenization along with part of speech tags, dependency trees, and other // properties. func (c *Client) AnalyzeSyntax(ctx context.Context, req *languagepb.AnalyzeSyntaxRequest, opts ...gax.CallOption) (*languagepb.AnalyzeSyntaxResponse, error) { ctx = insertXGoog(ctx, c.xGoogHeader) opts = append(c.CallOptions.AnalyzeSyntax[0:len(c.CallOptions.AnalyzeSyntax):len(c.CallOptions.AnalyzeSyntax)], opts...) var resp *languagepb.AnalyzeSyntaxResponse err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error resp, err = c.client.AnalyzeSyntax(ctx, req, settings.GRPC...) return err }, opts...) if err != nil { return nil, err } return resp, nil } // AnnotateText a convenience method that provides all the features that analyzeSentiment, // analyzeEntities, and analyzeSyntax provide in one call. func (c *Client) AnnotateText(ctx context.Context, req *languagepb.AnnotateTextRequest, opts ...gax.CallOption) (*languagepb.AnnotateTextResponse, error) { ctx = insertXGoog(ctx, c.xGoogHeader) opts = append(c.CallOptions.AnnotateText[0:len(c.CallOptions.AnnotateText):len(c.CallOptions.AnnotateText)], opts...) var resp *languagepb.AnnotateTextResponse err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error resp, err = c.client.AnnotateText(ctx, req, settings.GRPC...) return err }, opts...) if err != nil { return nil, err } return resp, nil } golang-google-cloud-0.9.0/language/apiv1/language_client_example_test.go000066400000000000000000000045261312234511600263770ustar00rootroot00000000000000// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // AUTO-GENERATED CODE. DO NOT EDIT. package language_test import ( "cloud.google.com/go/language/apiv1" "golang.org/x/net/context" languagepb "google.golang.org/genproto/googleapis/cloud/language/v1" ) func ExampleNewClient() { ctx := context.Background() c, err := language.NewClient(ctx) if err != nil { // TODO: Handle error. } // TODO: Use client. _ = c } func ExampleClient_AnalyzeSentiment() { ctx := context.Background() c, err := language.NewClient(ctx) if err != nil { // TODO: Handle error. } req := &languagepb.AnalyzeSentimentRequest{ // TODO: Fill request struct fields. } resp, err := c.AnalyzeSentiment(ctx, req) if err != nil { // TODO: Handle error. } // TODO: Use resp. _ = resp } func ExampleClient_AnalyzeEntities() { ctx := context.Background() c, err := language.NewClient(ctx) if err != nil { // TODO: Handle error. } req := &languagepb.AnalyzeEntitiesRequest{ // TODO: Fill request struct fields. } resp, err := c.AnalyzeEntities(ctx, req) if err != nil { // TODO: Handle error. } // TODO: Use resp. _ = resp } func ExampleClient_AnalyzeSyntax() { ctx := context.Background() c, err := language.NewClient(ctx) if err != nil { // TODO: Handle error. } req := &languagepb.AnalyzeSyntaxRequest{ // TODO: Fill request struct fields. } resp, err := c.AnalyzeSyntax(ctx, req) if err != nil { // TODO: Handle error. } // TODO: Use resp. _ = resp } func ExampleClient_AnnotateText() { ctx := context.Background() c, err := language.NewClient(ctx) if err != nil { // TODO: Handle error. } req := &languagepb.AnnotateTextRequest{ // TODO: Fill request struct fields. } resp, err := c.AnnotateText(ctx, req) if err != nil { // TODO: Handle error. } // TODO: Use resp. _ = resp } golang-google-cloud-0.9.0/language/apiv1/mock_test.go000066400000000000000000000256121312234511600224730ustar00rootroot00000000000000// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // AUTO-GENERATED CODE. DO NOT EDIT. package language import ( languagepb "google.golang.org/genproto/googleapis/cloud/language/v1" ) import ( "flag" "fmt" "io" "log" "net" "os" "strings" "testing" "github.com/golang/protobuf/proto" "github.com/golang/protobuf/ptypes" "golang.org/x/net/context" "google.golang.org/api/option" status "google.golang.org/genproto/googleapis/rpc/status" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/metadata" gstatus "google.golang.org/grpc/status" ) var _ = io.EOF var _ = ptypes.MarshalAny var _ status.Status type mockLanguageServer struct { // Embed for forward compatibility. // Tests will keep working if more methods are added // in the future. languagepb.LanguageServiceServer reqs []proto.Message // If set, all calls return this error. err error // responses to return if err == nil resps []proto.Message } func (s *mockLanguageServer) AnalyzeSentiment(ctx context.Context, req *languagepb.AnalyzeSentimentRequest) (*languagepb.AnalyzeSentimentResponse, error) { md, _ := metadata.FromIncomingContext(ctx) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } s.reqs = append(s.reqs, req) if s.err != nil { return nil, s.err } return s.resps[0].(*languagepb.AnalyzeSentimentResponse), nil } func (s *mockLanguageServer) AnalyzeEntities(ctx context.Context, req *languagepb.AnalyzeEntitiesRequest) (*languagepb.AnalyzeEntitiesResponse, error) { md, _ := metadata.FromIncomingContext(ctx) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } s.reqs = append(s.reqs, req) if s.err != nil { return nil, s.err } return s.resps[0].(*languagepb.AnalyzeEntitiesResponse), nil } func (s *mockLanguageServer) AnalyzeSyntax(ctx context.Context, req *languagepb.AnalyzeSyntaxRequest) (*languagepb.AnalyzeSyntaxResponse, error) { md, _ := metadata.FromIncomingContext(ctx) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } s.reqs = append(s.reqs, req) if s.err != nil { return nil, s.err } return s.resps[0].(*languagepb.AnalyzeSyntaxResponse), nil } func (s *mockLanguageServer) AnnotateText(ctx context.Context, req *languagepb.AnnotateTextRequest) (*languagepb.AnnotateTextResponse, error) { md, _ := metadata.FromIncomingContext(ctx) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } s.reqs = append(s.reqs, req) if s.err != nil { return nil, s.err } return s.resps[0].(*languagepb.AnnotateTextResponse), nil } // clientOpt is the option tests should use to connect to the test server. // It is initialized by TestMain. var clientOpt option.ClientOption var ( mockLanguage mockLanguageServer ) func TestMain(m *testing.M) { flag.Parse() serv := grpc.NewServer() languagepb.RegisterLanguageServiceServer(serv, &mockLanguage) lis, err := net.Listen("tcp", "localhost:0") if err != nil { log.Fatal(err) } go serv.Serve(lis) conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) if err != nil { log.Fatal(err) } clientOpt = option.WithGRPCConn(conn) os.Exit(m.Run()) } func TestLanguageServiceAnalyzeSentiment(t *testing.T) { var language string = "language-1613589672" var expectedResponse = &languagepb.AnalyzeSentimentResponse{ Language: language, } mockLanguage.err = nil mockLanguage.reqs = nil mockLanguage.resps = append(mockLanguage.resps[:0], expectedResponse) var document *languagepb.Document = &languagepb.Document{} var request = &languagepb.AnalyzeSentimentRequest{ Document: document, } c, err := NewClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.AnalyzeSentiment(context.Background(), request) if err != nil { t.Fatal(err) } if want, got := request, mockLanguage.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } if want, got := expectedResponse, resp; !proto.Equal(want, got) { t.Errorf("wrong response %q, want %q)", got, want) } } func TestLanguageServiceAnalyzeSentimentError(t *testing.T) { errCode := codes.PermissionDenied mockLanguage.err = gstatus.Error(errCode, "test error") var document *languagepb.Document = &languagepb.Document{} var request = &languagepb.AnalyzeSentimentRequest{ Document: document, } c, err := NewClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.AnalyzeSentiment(context.Background(), request) if st, ok := gstatus.FromError(err); !ok { t.Errorf("got error %v, expected grpc error", err) } else if c := st.Code(); c != errCode { t.Errorf("got error code %q, want %q", c, errCode) } _ = resp } func TestLanguageServiceAnalyzeEntities(t *testing.T) { var language string = "language-1613589672" var expectedResponse = &languagepb.AnalyzeEntitiesResponse{ Language: language, } mockLanguage.err = nil mockLanguage.reqs = nil mockLanguage.resps = append(mockLanguage.resps[:0], expectedResponse) var document *languagepb.Document = &languagepb.Document{} var encodingType languagepb.EncodingType = languagepb.EncodingType_NONE var request = &languagepb.AnalyzeEntitiesRequest{ Document: document, EncodingType: encodingType, } c, err := NewClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.AnalyzeEntities(context.Background(), request) if err != nil { t.Fatal(err) } if want, got := request, mockLanguage.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } if want, got := expectedResponse, resp; !proto.Equal(want, got) { t.Errorf("wrong response %q, want %q)", got, want) } } func TestLanguageServiceAnalyzeEntitiesError(t *testing.T) { errCode := codes.PermissionDenied mockLanguage.err = gstatus.Error(errCode, "test error") var document *languagepb.Document = &languagepb.Document{} var encodingType languagepb.EncodingType = languagepb.EncodingType_NONE var request = &languagepb.AnalyzeEntitiesRequest{ Document: document, EncodingType: encodingType, } c, err := NewClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.AnalyzeEntities(context.Background(), request) if st, ok := gstatus.FromError(err); !ok { t.Errorf("got error %v, expected grpc error", err) } else if c := st.Code(); c != errCode { t.Errorf("got error code %q, want %q", c, errCode) } _ = resp } func TestLanguageServiceAnalyzeSyntax(t *testing.T) { var language string = "language-1613589672" var expectedResponse = &languagepb.AnalyzeSyntaxResponse{ Language: language, } mockLanguage.err = nil mockLanguage.reqs = nil mockLanguage.resps = append(mockLanguage.resps[:0], expectedResponse) var document *languagepb.Document = &languagepb.Document{} var encodingType languagepb.EncodingType = languagepb.EncodingType_NONE var request = &languagepb.AnalyzeSyntaxRequest{ Document: document, EncodingType: encodingType, } c, err := NewClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.AnalyzeSyntax(context.Background(), request) if err != nil { t.Fatal(err) } if want, got := request, mockLanguage.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } if want, got := expectedResponse, resp; !proto.Equal(want, got) { t.Errorf("wrong response %q, want %q)", got, want) } } func TestLanguageServiceAnalyzeSyntaxError(t *testing.T) { errCode := codes.PermissionDenied mockLanguage.err = gstatus.Error(errCode, "test error") var document *languagepb.Document = &languagepb.Document{} var encodingType languagepb.EncodingType = languagepb.EncodingType_NONE var request = &languagepb.AnalyzeSyntaxRequest{ Document: document, EncodingType: encodingType, } c, err := NewClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.AnalyzeSyntax(context.Background(), request) if st, ok := gstatus.FromError(err); !ok { t.Errorf("got error %v, expected grpc error", err) } else if c := st.Code(); c != errCode { t.Errorf("got error code %q, want %q", c, errCode) } _ = resp } func TestLanguageServiceAnnotateText(t *testing.T) { var language string = "language-1613589672" var expectedResponse = &languagepb.AnnotateTextResponse{ Language: language, } mockLanguage.err = nil mockLanguage.reqs = nil mockLanguage.resps = append(mockLanguage.resps[:0], expectedResponse) var document *languagepb.Document = &languagepb.Document{} var features *languagepb.AnnotateTextRequest_Features = &languagepb.AnnotateTextRequest_Features{} var encodingType languagepb.EncodingType = languagepb.EncodingType_NONE var request = &languagepb.AnnotateTextRequest{ Document: document, Features: features, EncodingType: encodingType, } c, err := NewClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.AnnotateText(context.Background(), request) if err != nil { t.Fatal(err) } if want, got := request, mockLanguage.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } if want, got := expectedResponse, resp; !proto.Equal(want, got) { t.Errorf("wrong response %q, want %q)", got, want) } } func TestLanguageServiceAnnotateTextError(t *testing.T) { errCode := codes.PermissionDenied mockLanguage.err = gstatus.Error(errCode, "test error") var document *languagepb.Document = &languagepb.Document{} var features *languagepb.AnnotateTextRequest_Features = &languagepb.AnnotateTextRequest_Features{} var encodingType languagepb.EncodingType = languagepb.EncodingType_NONE var request = &languagepb.AnnotateTextRequest{ Document: document, Features: features, EncodingType: encodingType, } c, err := NewClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.AnnotateText(context.Background(), request) if st, ok := gstatus.FromError(err); !ok { t.Errorf("got error %v, expected grpc error", err) } else if c := st.Code(); c != errCode { t.Errorf("got error code %q, want %q", c, errCode) } _ = resp } golang-google-cloud-0.9.0/language/apiv1beta2/000077500000000000000000000000001312234511600210645ustar00rootroot00000000000000golang-google-cloud-0.9.0/language/apiv1beta2/doc.go000066400000000000000000000027341312234511600221660ustar00rootroot00000000000000// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // AUTO-GENERATED CODE. DO NOT EDIT. // Package language is an experimental, auto-generated package for the // Google Cloud Natural Language API. // // Google Cloud Natural Language API provides natural language understanding // technologies to developers. Examples include sentiment analysis, entity // recognition, and text annotations. package language // import "cloud.google.com/go/language/apiv1beta2" import ( "golang.org/x/net/context" "google.golang.org/grpc/metadata" ) func insertXGoog(ctx context.Context, val []string) context.Context { md, _ := metadata.FromOutgoingContext(ctx) md = md.Copy() md["x-goog-api-client"] = val return metadata.NewOutgoingContext(ctx, md) } // DefaultAuthScopes reports the authentication scopes required // by this package. func DefaultAuthScopes() []string { return []string{ "https://www.googleapis.com/auth/cloud-platform", } } golang-google-cloud-0.9.0/language/apiv1beta2/language_client.go000066400000000000000000000171731312234511600245450ustar00rootroot00000000000000// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // AUTO-GENERATED CODE. DO NOT EDIT. package language import ( "time" "cloud.google.com/go/internal/version" gax "github.com/googleapis/gax-go" "golang.org/x/net/context" "google.golang.org/api/option" "google.golang.org/api/transport" languagepb "google.golang.org/genproto/googleapis/cloud/language/v1beta2" "google.golang.org/grpc" "google.golang.org/grpc/codes" ) // CallOptions contains the retry settings for each method of Client. type CallOptions struct { AnalyzeSentiment []gax.CallOption AnalyzeEntities []gax.CallOption AnalyzeEntitySentiment []gax.CallOption AnalyzeSyntax []gax.CallOption AnnotateText []gax.CallOption } func defaultClientOptions() []option.ClientOption { return []option.ClientOption{ option.WithEndpoint("language.googleapis.com:443"), option.WithScopes(DefaultAuthScopes()...), } } func defaultCallOptions() *CallOptions { retry := map[[2]string][]gax.CallOption{ {"default", "idempotent"}: { gax.WithRetry(func() gax.Retryer { return gax.OnCodes([]codes.Code{ codes.DeadlineExceeded, codes.Unavailable, }, gax.Backoff{ Initial: 100 * time.Millisecond, Max: 60000 * time.Millisecond, Multiplier: 1.3, }) }), }, } return &CallOptions{ AnalyzeSentiment: retry[[2]string{"default", "idempotent"}], AnalyzeEntities: retry[[2]string{"default", "idempotent"}], AnalyzeEntitySentiment: retry[[2]string{"default", "idempotent"}], AnalyzeSyntax: retry[[2]string{"default", "idempotent"}], AnnotateText: retry[[2]string{"default", "idempotent"}], } } // Client is a client for interacting with Google Cloud Natural Language API. type Client struct { // The connection to the service. conn *grpc.ClientConn // The gRPC API client. client languagepb.LanguageServiceClient // The call options for this service. CallOptions *CallOptions // The metadata to be sent with each request. xGoogHeader []string } // NewClient creates a new language service client. // // Provides text analysis operations such as sentiment analysis and entity // recognition. func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) { conn, err := transport.DialGRPC(ctx, append(defaultClientOptions(), opts...)...) if err != nil { return nil, err } c := &Client{ conn: conn, CallOptions: defaultCallOptions(), client: languagepb.NewLanguageServiceClient(conn), } c.SetGoogleClientInfo() return c, nil } // Connection returns the client's connection to the API service. func (c *Client) Connection() *grpc.ClientConn { return c.conn } // Close closes the connection to the API service. The user should invoke this when // the client is no longer required. func (c *Client) Close() error { return c.conn.Close() } // SetGoogleClientInfo sets the name and version of the application in // the `x-goog-api-client` header passed on each request. Intended for // use by Google-written clients. func (c *Client) SetGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", version.Go()}, keyval...) kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) c.xGoogHeader = []string{gax.XGoogHeader(kv...)} } // AnalyzeSentiment analyzes the sentiment of the provided text. func (c *Client) AnalyzeSentiment(ctx context.Context, req *languagepb.AnalyzeSentimentRequest, opts ...gax.CallOption) (*languagepb.AnalyzeSentimentResponse, error) { ctx = insertXGoog(ctx, c.xGoogHeader) opts = append(c.CallOptions.AnalyzeSentiment[0:len(c.CallOptions.AnalyzeSentiment):len(c.CallOptions.AnalyzeSentiment)], opts...) var resp *languagepb.AnalyzeSentimentResponse err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error resp, err = c.client.AnalyzeSentiment(ctx, req, settings.GRPC...) return err }, opts...) if err != nil { return nil, err } return resp, nil } // AnalyzeEntities finds named entities (currently proper names and common nouns) in the text // along with entity types, salience, mentions for each entity, and // other properties. func (c *Client) AnalyzeEntities(ctx context.Context, req *languagepb.AnalyzeEntitiesRequest, opts ...gax.CallOption) (*languagepb.AnalyzeEntitiesResponse, error) { ctx = insertXGoog(ctx, c.xGoogHeader) opts = append(c.CallOptions.AnalyzeEntities[0:len(c.CallOptions.AnalyzeEntities):len(c.CallOptions.AnalyzeEntities)], opts...) var resp *languagepb.AnalyzeEntitiesResponse err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error resp, err = c.client.AnalyzeEntities(ctx, req, settings.GRPC...) return err }, opts...) if err != nil { return nil, err } return resp, nil } // AnalyzeEntitySentiment finds entities, similar to [AnalyzeEntities][google.cloud.language.v1beta2.LanguageService.AnalyzeEntities] in the text and analyzes // sentiment associated with each entity and its mentions. func (c *Client) AnalyzeEntitySentiment(ctx context.Context, req *languagepb.AnalyzeEntitySentimentRequest, opts ...gax.CallOption) (*languagepb.AnalyzeEntitySentimentResponse, error) { ctx = insertXGoog(ctx, c.xGoogHeader) opts = append(c.CallOptions.AnalyzeEntitySentiment[0:len(c.CallOptions.AnalyzeEntitySentiment):len(c.CallOptions.AnalyzeEntitySentiment)], opts...) var resp *languagepb.AnalyzeEntitySentimentResponse err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error resp, err = c.client.AnalyzeEntitySentiment(ctx, req, settings.GRPC...) return err }, opts...) if err != nil { return nil, err } return resp, nil } // AnalyzeSyntax analyzes the syntax of the text and provides sentence boundaries and // tokenization along with part of speech tags, dependency trees, and other // properties. func (c *Client) AnalyzeSyntax(ctx context.Context, req *languagepb.AnalyzeSyntaxRequest, opts ...gax.CallOption) (*languagepb.AnalyzeSyntaxResponse, error) { ctx = insertXGoog(ctx, c.xGoogHeader) opts = append(c.CallOptions.AnalyzeSyntax[0:len(c.CallOptions.AnalyzeSyntax):len(c.CallOptions.AnalyzeSyntax)], opts...) var resp *languagepb.AnalyzeSyntaxResponse err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error resp, err = c.client.AnalyzeSyntax(ctx, req, settings.GRPC...) return err }, opts...) if err != nil { return nil, err } return resp, nil } // AnnotateText a convenience method that provides all syntax, sentiment, and entity // features in one call. func (c *Client) AnnotateText(ctx context.Context, req *languagepb.AnnotateTextRequest, opts ...gax.CallOption) (*languagepb.AnnotateTextResponse, error) { ctx = insertXGoog(ctx, c.xGoogHeader) opts = append(c.CallOptions.AnnotateText[0:len(c.CallOptions.AnnotateText):len(c.CallOptions.AnnotateText)], opts...) var resp *languagepb.AnnotateTextResponse err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error resp, err = c.client.AnnotateText(ctx, req, settings.GRPC...) return err }, opts...) if err != nil { return nil, err } return resp, nil } golang-google-cloud-0.9.0/language/apiv1beta2/language_client_example_test.go000066400000000000000000000053271312234511600273150ustar00rootroot00000000000000// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // AUTO-GENERATED CODE. DO NOT EDIT. package language_test import ( "cloud.google.com/go/language/apiv1beta2" "golang.org/x/net/context" languagepb "google.golang.org/genproto/googleapis/cloud/language/v1beta2" ) func ExampleNewClient() { ctx := context.Background() c, err := language.NewClient(ctx) if err != nil { // TODO: Handle error. } // TODO: Use client. _ = c } func ExampleClient_AnalyzeSentiment() { ctx := context.Background() c, err := language.NewClient(ctx) if err != nil { // TODO: Handle error. } req := &languagepb.AnalyzeSentimentRequest{ // TODO: Fill request struct fields. } resp, err := c.AnalyzeSentiment(ctx, req) if err != nil { // TODO: Handle error. } // TODO: Use resp. _ = resp } func ExampleClient_AnalyzeEntities() { ctx := context.Background() c, err := language.NewClient(ctx) if err != nil { // TODO: Handle error. } req := &languagepb.AnalyzeEntitiesRequest{ // TODO: Fill request struct fields. } resp, err := c.AnalyzeEntities(ctx, req) if err != nil { // TODO: Handle error. } // TODO: Use resp. _ = resp } func ExampleClient_AnalyzeEntitySentiment() { ctx := context.Background() c, err := language.NewClient(ctx) if err != nil { // TODO: Handle error. } req := &languagepb.AnalyzeEntitySentimentRequest{ // TODO: Fill request struct fields. } resp, err := c.AnalyzeEntitySentiment(ctx, req) if err != nil { // TODO: Handle error. } // TODO: Use resp. _ = resp } func ExampleClient_AnalyzeSyntax() { ctx := context.Background() c, err := language.NewClient(ctx) if err != nil { // TODO: Handle error. } req := &languagepb.AnalyzeSyntaxRequest{ // TODO: Fill request struct fields. } resp, err := c.AnalyzeSyntax(ctx, req) if err != nil { // TODO: Handle error. } // TODO: Use resp. _ = resp } func ExampleClient_AnnotateText() { ctx := context.Background() c, err := language.NewClient(ctx) if err != nil { // TODO: Handle error. } req := &languagepb.AnnotateTextRequest{ // TODO: Fill request struct fields. } resp, err := c.AnnotateText(ctx, req) if err != nil { // TODO: Handle error. } // TODO: Use resp. _ = resp } golang-google-cloud-0.9.0/language/apiv1beta2/mock_test.go000066400000000000000000000322561312234511600234130ustar00rootroot00000000000000// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // AUTO-GENERATED CODE. DO NOT EDIT. package language import ( languagepb "google.golang.org/genproto/googleapis/cloud/language/v1beta2" ) import ( "flag" "fmt" "io" "log" "net" "os" "strings" "testing" "github.com/golang/protobuf/proto" "github.com/golang/protobuf/ptypes" "golang.org/x/net/context" "google.golang.org/api/option" status "google.golang.org/genproto/googleapis/rpc/status" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/metadata" gstatus "google.golang.org/grpc/status" ) var _ = io.EOF var _ = ptypes.MarshalAny var _ status.Status type mockLanguageServer struct { // Embed for forward compatibility. // Tests will keep working if more methods are added // in the future. languagepb.LanguageServiceServer reqs []proto.Message // If set, all calls return this error. err error // responses to return if err == nil resps []proto.Message } func (s *mockLanguageServer) AnalyzeSentiment(ctx context.Context, req *languagepb.AnalyzeSentimentRequest) (*languagepb.AnalyzeSentimentResponse, error) { md, _ := metadata.FromIncomingContext(ctx) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } s.reqs = append(s.reqs, req) if s.err != nil { return nil, s.err } return s.resps[0].(*languagepb.AnalyzeSentimentResponse), nil } func (s *mockLanguageServer) AnalyzeEntities(ctx context.Context, req *languagepb.AnalyzeEntitiesRequest) (*languagepb.AnalyzeEntitiesResponse, error) { md, _ := metadata.FromIncomingContext(ctx) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } s.reqs = append(s.reqs, req) if s.err != nil { return nil, s.err } return s.resps[0].(*languagepb.AnalyzeEntitiesResponse), nil } func (s *mockLanguageServer) AnalyzeEntitySentiment(ctx context.Context, req *languagepb.AnalyzeEntitySentimentRequest) (*languagepb.AnalyzeEntitySentimentResponse, error) { md, _ := metadata.FromIncomingContext(ctx) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } s.reqs = append(s.reqs, req) if s.err != nil { return nil, s.err } return s.resps[0].(*languagepb.AnalyzeEntitySentimentResponse), nil } func (s *mockLanguageServer) AnalyzeSyntax(ctx context.Context, req *languagepb.AnalyzeSyntaxRequest) (*languagepb.AnalyzeSyntaxResponse, error) { md, _ := metadata.FromIncomingContext(ctx) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } s.reqs = append(s.reqs, req) if s.err != nil { return nil, s.err } return s.resps[0].(*languagepb.AnalyzeSyntaxResponse), nil } func (s *mockLanguageServer) AnnotateText(ctx context.Context, req *languagepb.AnnotateTextRequest) (*languagepb.AnnotateTextResponse, error) { md, _ := metadata.FromIncomingContext(ctx) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } s.reqs = append(s.reqs, req) if s.err != nil { return nil, s.err } return s.resps[0].(*languagepb.AnnotateTextResponse), nil } // clientOpt is the option tests should use to connect to the test server. // It is initialized by TestMain. var clientOpt option.ClientOption var ( mockLanguage mockLanguageServer ) func TestMain(m *testing.M) { flag.Parse() serv := grpc.NewServer() languagepb.RegisterLanguageServiceServer(serv, &mockLanguage) lis, err := net.Listen("tcp", "localhost:0") if err != nil { log.Fatal(err) } go serv.Serve(lis) conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) if err != nil { log.Fatal(err) } clientOpt = option.WithGRPCConn(conn) os.Exit(m.Run()) } func TestLanguageServiceAnalyzeSentiment(t *testing.T) { var language string = "language-1613589672" var expectedResponse = &languagepb.AnalyzeSentimentResponse{ Language: language, } mockLanguage.err = nil mockLanguage.reqs = nil mockLanguage.resps = append(mockLanguage.resps[:0], expectedResponse) var document *languagepb.Document = &languagepb.Document{} var request = &languagepb.AnalyzeSentimentRequest{ Document: document, } c, err := NewClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.AnalyzeSentiment(context.Background(), request) if err != nil { t.Fatal(err) } if want, got := request, mockLanguage.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } if want, got := expectedResponse, resp; !proto.Equal(want, got) { t.Errorf("wrong response %q, want %q)", got, want) } } func TestLanguageServiceAnalyzeSentimentError(t *testing.T) { errCode := codes.PermissionDenied mockLanguage.err = gstatus.Error(errCode, "test error") var document *languagepb.Document = &languagepb.Document{} var request = &languagepb.AnalyzeSentimentRequest{ Document: document, } c, err := NewClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.AnalyzeSentiment(context.Background(), request) if st, ok := gstatus.FromError(err); !ok { t.Errorf("got error %v, expected grpc error", err) } else if c := st.Code(); c != errCode { t.Errorf("got error code %q, want %q", c, errCode) } _ = resp } func TestLanguageServiceAnalyzeEntities(t *testing.T) { var language string = "language-1613589672" var expectedResponse = &languagepb.AnalyzeEntitiesResponse{ Language: language, } mockLanguage.err = nil mockLanguage.reqs = nil mockLanguage.resps = append(mockLanguage.resps[:0], expectedResponse) var document *languagepb.Document = &languagepb.Document{} var encodingType languagepb.EncodingType = languagepb.EncodingType_NONE var request = &languagepb.AnalyzeEntitiesRequest{ Document: document, EncodingType: encodingType, } c, err := NewClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.AnalyzeEntities(context.Background(), request) if err != nil { t.Fatal(err) } if want, got := request, mockLanguage.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } if want, got := expectedResponse, resp; !proto.Equal(want, got) { t.Errorf("wrong response %q, want %q)", got, want) } } func TestLanguageServiceAnalyzeEntitiesError(t *testing.T) { errCode := codes.PermissionDenied mockLanguage.err = gstatus.Error(errCode, "test error") var document *languagepb.Document = &languagepb.Document{} var encodingType languagepb.EncodingType = languagepb.EncodingType_NONE var request = &languagepb.AnalyzeEntitiesRequest{ Document: document, EncodingType: encodingType, } c, err := NewClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.AnalyzeEntities(context.Background(), request) if st, ok := gstatus.FromError(err); !ok { t.Errorf("got error %v, expected grpc error", err) } else if c := st.Code(); c != errCode { t.Errorf("got error code %q, want %q", c, errCode) } _ = resp } func TestLanguageServiceAnalyzeEntitySentiment(t *testing.T) { var language string = "language-1613589672" var expectedResponse = &languagepb.AnalyzeEntitySentimentResponse{ Language: language, } mockLanguage.err = nil mockLanguage.reqs = nil mockLanguage.resps = append(mockLanguage.resps[:0], expectedResponse) var document *languagepb.Document = &languagepb.Document{} var encodingType languagepb.EncodingType = languagepb.EncodingType_NONE var request = &languagepb.AnalyzeEntitySentimentRequest{ Document: document, EncodingType: encodingType, } c, err := NewClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.AnalyzeEntitySentiment(context.Background(), request) if err != nil { t.Fatal(err) } if want, got := request, mockLanguage.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } if want, got := expectedResponse, resp; !proto.Equal(want, got) { t.Errorf("wrong response %q, want %q)", got, want) } } func TestLanguageServiceAnalyzeEntitySentimentError(t *testing.T) { errCode := codes.PermissionDenied mockLanguage.err = gstatus.Error(errCode, "test error") var document *languagepb.Document = &languagepb.Document{} var encodingType languagepb.EncodingType = languagepb.EncodingType_NONE var request = &languagepb.AnalyzeEntitySentimentRequest{ Document: document, EncodingType: encodingType, } c, err := NewClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.AnalyzeEntitySentiment(context.Background(), request) if st, ok := gstatus.FromError(err); !ok { t.Errorf("got error %v, expected grpc error", err) } else if c := st.Code(); c != errCode { t.Errorf("got error code %q, want %q", c, errCode) } _ = resp } func TestLanguageServiceAnalyzeSyntax(t *testing.T) { var language string = "language-1613589672" var expectedResponse = &languagepb.AnalyzeSyntaxResponse{ Language: language, } mockLanguage.err = nil mockLanguage.reqs = nil mockLanguage.resps = append(mockLanguage.resps[:0], expectedResponse) var document *languagepb.Document = &languagepb.Document{} var encodingType languagepb.EncodingType = languagepb.EncodingType_NONE var request = &languagepb.AnalyzeSyntaxRequest{ Document: document, EncodingType: encodingType, } c, err := NewClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.AnalyzeSyntax(context.Background(), request) if err != nil { t.Fatal(err) } if want, got := request, mockLanguage.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } if want, got := expectedResponse, resp; !proto.Equal(want, got) { t.Errorf("wrong response %q, want %q)", got, want) } } func TestLanguageServiceAnalyzeSyntaxError(t *testing.T) { errCode := codes.PermissionDenied mockLanguage.err = gstatus.Error(errCode, "test error") var document *languagepb.Document = &languagepb.Document{} var encodingType languagepb.EncodingType = languagepb.EncodingType_NONE var request = &languagepb.AnalyzeSyntaxRequest{ Document: document, EncodingType: encodingType, } c, err := NewClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.AnalyzeSyntax(context.Background(), request) if st, ok := gstatus.FromError(err); !ok { t.Errorf("got error %v, expected grpc error", err) } else if c := st.Code(); c != errCode { t.Errorf("got error code %q, want %q", c, errCode) } _ = resp } func TestLanguageServiceAnnotateText(t *testing.T) { var language string = "language-1613589672" var expectedResponse = &languagepb.AnnotateTextResponse{ Language: language, } mockLanguage.err = nil mockLanguage.reqs = nil mockLanguage.resps = append(mockLanguage.resps[:0], expectedResponse) var document *languagepb.Document = &languagepb.Document{} var features *languagepb.AnnotateTextRequest_Features = &languagepb.AnnotateTextRequest_Features{} var encodingType languagepb.EncodingType = languagepb.EncodingType_NONE var request = &languagepb.AnnotateTextRequest{ Document: document, Features: features, EncodingType: encodingType, } c, err := NewClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.AnnotateText(context.Background(), request) if err != nil { t.Fatal(err) } if want, got := request, mockLanguage.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } if want, got := expectedResponse, resp; !proto.Equal(want, got) { t.Errorf("wrong response %q, want %q)", got, want) } } func TestLanguageServiceAnnotateTextError(t *testing.T) { errCode := codes.PermissionDenied mockLanguage.err = gstatus.Error(errCode, "test error") var document *languagepb.Document = &languagepb.Document{} var features *languagepb.AnnotateTextRequest_Features = &languagepb.AnnotateTextRequest_Features{} var encodingType languagepb.EncodingType = languagepb.EncodingType_NONE var request = &languagepb.AnnotateTextRequest{ Document: document, Features: features, EncodingType: encodingType, } c, err := NewClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.AnnotateText(context.Background(), request) if st, ok := gstatus.FromError(err); !ok { t.Errorf("got error %v, expected grpc error", err) } else if c := st.Code(); c != errCode { t.Errorf("got error code %q, want %q", c, errCode) } _ = resp } golang-google-cloud-0.9.0/license_test.go000066400000000000000000000033021312234511600203510ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package cloud import ( "bytes" "io/ioutil" "os" "path/filepath" "strings" "testing" ) var sentinels = []string{ "Copyright", "Google Inc", `Licensed under the Apache License, Version 2.0 (the "License");`, } func TestLicense(t *testing.T) { err := filepath.Walk(".", func(path string, fi os.FileInfo, err error) error { if err != nil { return err } if ext := filepath.Ext(path); ext != ".go" && ext != ".proto" { return nil } if strings.HasSuffix(path, ".pb.go") { // .pb.go files are generated from the proto files. // .proto files must have license headers. return nil } if path == "bigtable/cmd/cbt/cbtdoc.go" { // Automatically generated. return nil } src, err := ioutil.ReadFile(path) if err != nil { return nil } src = src[:140] // Ensure all of the sentinel values are at the top of the file. // Find license for _, sentinel := range sentinels { if !bytes.Contains(src, []byte(sentinel)) { t.Errorf("%v: license header not present. want %q", path, sentinel) return nil } } return nil }) if err != nil { t.Fatal(err) } } golang-google-cloud-0.9.0/logging/000077500000000000000000000000001312234511600167715ustar00rootroot00000000000000golang-google-cloud-0.9.0/logging/apiv2/000077500000000000000000000000001312234511600200125ustar00rootroot00000000000000golang-google-cloud-0.9.0/logging/apiv2/README.md000066400000000000000000000004741312234511600212760ustar00rootroot00000000000000Auto-generated logging v2 clients ================================= This package includes auto-generated clients for the logging v2 API. Use the handwritten logging client (in the parent directory, cloud.google.com/go/logging) in preference to this. This code is EXPERIMENTAL and subject to CHANGE AT ANY TIME. golang-google-cloud-0.9.0/logging/apiv2/WriteLogEntries_smoke_test.go000066400000000000000000000033251312234511600256670ustar00rootroot00000000000000// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // AUTO-GENERATED CODE. DO NOT EDIT. package logging import ( loggingpb "google.golang.org/genproto/googleapis/logging/v2" ) import ( "strconv" "testing" "time" "cloud.google.com/go/internal/testutil" "golang.org/x/net/context" "google.golang.org/api/iterator" "google.golang.org/api/option" ) var _ = iterator.Done var _ = strconv.FormatUint var _ = time.Now func TestLoggingServiceV2Smoke(t *testing.T) { if testing.Short() { t.Skip("skipping smoke test in short mode") } ctx := context.Background() ts := testutil.TokenSource(ctx, DefaultAuthScopes()...) if ts == nil { t.Skip("Integration tests skipped. See CONTRIBUTING.md for details") } projectId := testutil.ProjID() _ = projectId c, err := NewClient(ctx, option.WithTokenSource(ts)) if err != nil { t.Fatal(err) } var entries []*loggingpb.LogEntry = nil var formattedLogName string = LogPath(projectId, "test-"+strconv.FormatInt(time.Now().UnixNano(), 10)+"") var request = &loggingpb.WriteLogEntriesRequest{ Entries: entries, LogName: formattedLogName, } if _, err := c.WriteLogEntries(ctx, request); err != nil { t.Error(err) } } golang-google-cloud-0.9.0/logging/apiv2/config_client.go000066400000000000000000000242731312234511600231540ustar00rootroot00000000000000// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // AUTO-GENERATED CODE. DO NOT EDIT. package logging import ( "math" "time" "cloud.google.com/go/internal/version" gax "github.com/googleapis/gax-go" "golang.org/x/net/context" "google.golang.org/api/iterator" "google.golang.org/api/option" "google.golang.org/api/transport" loggingpb "google.golang.org/genproto/googleapis/logging/v2" "google.golang.org/grpc" "google.golang.org/grpc/codes" ) var ( configProjectPathTemplate = gax.MustCompilePathTemplate("projects/{project}") configSinkPathTemplate = gax.MustCompilePathTemplate("projects/{project}/sinks/{sink}") ) // ConfigCallOptions contains the retry settings for each method of ConfigClient. type ConfigCallOptions struct { ListSinks []gax.CallOption GetSink []gax.CallOption CreateSink []gax.CallOption UpdateSink []gax.CallOption DeleteSink []gax.CallOption } func defaultConfigClientOptions() []option.ClientOption { return []option.ClientOption{ option.WithEndpoint("logging.googleapis.com:443"), option.WithScopes(DefaultAuthScopes()...), } } func defaultConfigCallOptions() *ConfigCallOptions { retry := map[[2]string][]gax.CallOption{ {"default", "idempotent"}: { gax.WithRetry(func() gax.Retryer { return gax.OnCodes([]codes.Code{ codes.DeadlineExceeded, codes.Internal, codes.Unavailable, }, gax.Backoff{ Initial: 100 * time.Millisecond, Max: 1000 * time.Millisecond, Multiplier: 1.2, }) }), }, } return &ConfigCallOptions{ ListSinks: retry[[2]string{"default", "idempotent"}], GetSink: retry[[2]string{"default", "idempotent"}], CreateSink: retry[[2]string{"default", "non_idempotent"}], UpdateSink: retry[[2]string{"default", "non_idempotent"}], DeleteSink: retry[[2]string{"default", "idempotent"}], } } // ConfigClient is a client for interacting with Stackdriver Logging API. type ConfigClient struct { // The connection to the service. conn *grpc.ClientConn // The gRPC API client. configClient loggingpb.ConfigServiceV2Client // The call options for this service. CallOptions *ConfigCallOptions // The metadata to be sent with each request. xGoogHeader []string } // NewConfigClient creates a new config service v2 client. // // Service for configuring sinks used to export log entries outside of // Stackdriver Logging. func NewConfigClient(ctx context.Context, opts ...option.ClientOption) (*ConfigClient, error) { conn, err := transport.DialGRPC(ctx, append(defaultConfigClientOptions(), opts...)...) if err != nil { return nil, err } c := &ConfigClient{ conn: conn, CallOptions: defaultConfigCallOptions(), configClient: loggingpb.NewConfigServiceV2Client(conn), } c.SetGoogleClientInfo() return c, nil } // Connection returns the client's connection to the API service. func (c *ConfigClient) Connection() *grpc.ClientConn { return c.conn } // Close closes the connection to the API service. The user should invoke this when // the client is no longer required. func (c *ConfigClient) Close() error { return c.conn.Close() } // SetGoogleClientInfo sets the name and version of the application in // the `x-goog-api-client` header passed on each request. Intended for // use by Google-written clients. func (c *ConfigClient) SetGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", version.Go()}, keyval...) kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) c.xGoogHeader = []string{gax.XGoogHeader(kv...)} } // ConfigProjectPath returns the path for the project resource. func ConfigProjectPath(project string) string { path, err := configProjectPathTemplate.Render(map[string]string{ "project": project, }) if err != nil { panic(err) } return path } // ConfigSinkPath returns the path for the sink resource. func ConfigSinkPath(project, sink string) string { path, err := configSinkPathTemplate.Render(map[string]string{ "project": project, "sink": sink, }) if err != nil { panic(err) } return path } // ListSinks lists sinks. func (c *ConfigClient) ListSinks(ctx context.Context, req *loggingpb.ListSinksRequest, opts ...gax.CallOption) *LogSinkIterator { ctx = insertXGoog(ctx, c.xGoogHeader) opts = append(c.CallOptions.ListSinks[0:len(c.CallOptions.ListSinks):len(c.CallOptions.ListSinks)], opts...) it := &LogSinkIterator{} it.InternalFetch = func(pageSize int, pageToken string) ([]*loggingpb.LogSink, string, error) { var resp *loggingpb.ListSinksResponse req.PageToken = pageToken if pageSize > math.MaxInt32 { req.PageSize = math.MaxInt32 } else { req.PageSize = int32(pageSize) } err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error resp, err = c.configClient.ListSinks(ctx, req, settings.GRPC...) return err }, opts...) if err != nil { return nil, "", err } return resp.Sinks, resp.NextPageToken, nil } fetch := func(pageSize int, pageToken string) (string, error) { items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) if err != nil { return "", err } it.items = append(it.items, items...) return nextPageToken, nil } it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) return it } // GetSink gets a sink. func (c *ConfigClient) GetSink(ctx context.Context, req *loggingpb.GetSinkRequest, opts ...gax.CallOption) (*loggingpb.LogSink, error) { ctx = insertXGoog(ctx, c.xGoogHeader) opts = append(c.CallOptions.GetSink[0:len(c.CallOptions.GetSink):len(c.CallOptions.GetSink)], opts...) var resp *loggingpb.LogSink err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error resp, err = c.configClient.GetSink(ctx, req, settings.GRPC...) return err }, opts...) if err != nil { return nil, err } return resp, nil } // CreateSink creates a sink that exports specified log entries to a destination. The // export of newly-ingested log entries begins immediately, unless the current // time is outside the sink's start and end times or the sink's // `writer_identity` is not permitted to write to the destination. A sink can // export log entries only from the resource owning the sink. func (c *ConfigClient) CreateSink(ctx context.Context, req *loggingpb.CreateSinkRequest, opts ...gax.CallOption) (*loggingpb.LogSink, error) { ctx = insertXGoog(ctx, c.xGoogHeader) opts = append(c.CallOptions.CreateSink[0:len(c.CallOptions.CreateSink):len(c.CallOptions.CreateSink)], opts...) var resp *loggingpb.LogSink err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error resp, err = c.configClient.CreateSink(ctx, req, settings.GRPC...) return err }, opts...) if err != nil { return nil, err } return resp, nil } // UpdateSink updates a sink. If the named sink doesn't exist, then this method is // identical to // [sinks.create](/logging/docs/api/reference/rest/v2/projects.sinks/create). // If the named sink does exist, then this method replaces the following // fields in the existing sink with values from the new sink: `destination`, // `filter`, `output_version_format`, `start_time`, and `end_time`. // The updated filter might also have a new `writer_identity`; see the // `unique_writer_identity` field. func (c *ConfigClient) UpdateSink(ctx context.Context, req *loggingpb.UpdateSinkRequest, opts ...gax.CallOption) (*loggingpb.LogSink, error) { ctx = insertXGoog(ctx, c.xGoogHeader) opts = append(c.CallOptions.UpdateSink[0:len(c.CallOptions.UpdateSink):len(c.CallOptions.UpdateSink)], opts...) var resp *loggingpb.LogSink err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error resp, err = c.configClient.UpdateSink(ctx, req, settings.GRPC...) return err }, opts...) if err != nil { return nil, err } return resp, nil } // DeleteSink deletes a sink. If the sink has a unique `writer_identity`, then that // service account is also deleted. func (c *ConfigClient) DeleteSink(ctx context.Context, req *loggingpb.DeleteSinkRequest, opts ...gax.CallOption) error { ctx = insertXGoog(ctx, c.xGoogHeader) opts = append(c.CallOptions.DeleteSink[0:len(c.CallOptions.DeleteSink):len(c.CallOptions.DeleteSink)], opts...) err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error _, err = c.configClient.DeleteSink(ctx, req, settings.GRPC...) return err }, opts...) return err } // LogSinkIterator manages a stream of *loggingpb.LogSink. type LogSinkIterator struct { items []*loggingpb.LogSink pageInfo *iterator.PageInfo nextFunc func() error // InternalFetch is for use by the Google Cloud Libraries only. // It is not part of the stable interface of this package. // // InternalFetch returns results from a single call to the underlying RPC. // The number of results is no greater than pageSize. // If there are no more results, nextPageToken is empty and err is nil. InternalFetch func(pageSize int, pageToken string) (results []*loggingpb.LogSink, nextPageToken string, err error) } // PageInfo supports pagination. See the google.golang.org/api/iterator package for details. func (it *LogSinkIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } // Next returns the next result. Its second return value is iterator.Done if there are no more // results. Once Next returns Done, all subsequent calls will return Done. func (it *LogSinkIterator) Next() (*loggingpb.LogSink, error) { var item *loggingpb.LogSink if err := it.nextFunc(); err != nil { return item, err } item = it.items[0] it.items = it.items[1:] return item, nil } func (it *LogSinkIterator) bufLen() int { return len(it.items) } func (it *LogSinkIterator) takeBuf() interface{} { b := it.items it.items = nil return b } golang-google-cloud-0.9.0/logging/apiv2/config_client_example_test.go000066400000000000000000000053471312234511600257270ustar00rootroot00000000000000// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // AUTO-GENERATED CODE. DO NOT EDIT. package logging_test import ( "cloud.google.com/go/logging/apiv2" "golang.org/x/net/context" "google.golang.org/api/iterator" loggingpb "google.golang.org/genproto/googleapis/logging/v2" ) func ExampleNewConfigClient() { ctx := context.Background() c, err := logging.NewConfigClient(ctx) if err != nil { // TODO: Handle error. } // TODO: Use client. _ = c } func ExampleConfigClient_ListSinks() { ctx := context.Background() c, err := logging.NewConfigClient(ctx) if err != nil { // TODO: Handle error. } req := &loggingpb.ListSinksRequest{ // TODO: Fill request struct fields. } it := c.ListSinks(ctx, req) for { resp, err := it.Next() if err == iterator.Done { break } if err != nil { // TODO: Handle error. } // TODO: Use resp. _ = resp } } func ExampleConfigClient_GetSink() { ctx := context.Background() c, err := logging.NewConfigClient(ctx) if err != nil { // TODO: Handle error. } req := &loggingpb.GetSinkRequest{ // TODO: Fill request struct fields. } resp, err := c.GetSink(ctx, req) if err != nil { // TODO: Handle error. } // TODO: Use resp. _ = resp } func ExampleConfigClient_CreateSink() { ctx := context.Background() c, err := logging.NewConfigClient(ctx) if err != nil { // TODO: Handle error. } req := &loggingpb.CreateSinkRequest{ // TODO: Fill request struct fields. } resp, err := c.CreateSink(ctx, req) if err != nil { // TODO: Handle error. } // TODO: Use resp. _ = resp } func ExampleConfigClient_UpdateSink() { ctx := context.Background() c, err := logging.NewConfigClient(ctx) if err != nil { // TODO: Handle error. } req := &loggingpb.UpdateSinkRequest{ // TODO: Fill request struct fields. } resp, err := c.UpdateSink(ctx, req) if err != nil { // TODO: Handle error. } // TODO: Use resp. _ = resp } func ExampleConfigClient_DeleteSink() { ctx := context.Background() c, err := logging.NewConfigClient(ctx) if err != nil { // TODO: Handle error. } req := &loggingpb.DeleteSinkRequest{ // TODO: Fill request struct fields. } err = c.DeleteSink(ctx, req) if err != nil { // TODO: Handle error. } } golang-google-cloud-0.9.0/logging/apiv2/doc.go000066400000000000000000000032421312234511600211070ustar00rootroot00000000000000// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // AUTO-GENERATED CODE. DO NOT EDIT. // Package logging is an experimental, auto-generated package for the // Stackdriver Logging API. // // The Stackdriver Logging API lets you write log entries and manage your // logs, log sinks and logs-based metrics. // // Use the client at cloud.google.com/go/logging in preference to this. package logging // import "cloud.google.com/go/logging/apiv2" import ( "golang.org/x/net/context" "google.golang.org/grpc/metadata" ) func insertXGoog(ctx context.Context, val []string) context.Context { md, _ := metadata.FromOutgoingContext(ctx) md = md.Copy() md["x-goog-api-client"] = val return metadata.NewOutgoingContext(ctx, md) } // DefaultAuthScopes reports the authentication scopes required // by this package. func DefaultAuthScopes() []string { return []string{ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/cloud-platform.read-only", "https://www.googleapis.com/auth/logging.admin", "https://www.googleapis.com/auth/logging.read", "https://www.googleapis.com/auth/logging.write", } } golang-google-cloud-0.9.0/logging/apiv2/logging_client.go000066400000000000000000000350461312234511600233350ustar00rootroot00000000000000// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // AUTO-GENERATED CODE. DO NOT EDIT. package logging import ( "math" "time" "cloud.google.com/go/internal/version" gax "github.com/googleapis/gax-go" "golang.org/x/net/context" "google.golang.org/api/iterator" "google.golang.org/api/option" "google.golang.org/api/transport" monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres" loggingpb "google.golang.org/genproto/googleapis/logging/v2" "google.golang.org/grpc" "google.golang.org/grpc/codes" ) var ( loggingProjectPathTemplate = gax.MustCompilePathTemplate("projects/{project}") loggingLogPathTemplate = gax.MustCompilePathTemplate("projects/{project}/logs/{log}") ) // CallOptions contains the retry settings for each method of Client. type CallOptions struct { DeleteLog []gax.CallOption WriteLogEntries []gax.CallOption ListLogEntries []gax.CallOption ListMonitoredResourceDescriptors []gax.CallOption ListLogs []gax.CallOption } func defaultClientOptions() []option.ClientOption { return []option.ClientOption{ option.WithEndpoint("logging.googleapis.com:443"), option.WithScopes(DefaultAuthScopes()...), } } func defaultCallOptions() *CallOptions { retry := map[[2]string][]gax.CallOption{ {"default", "idempotent"}: { gax.WithRetry(func() gax.Retryer { return gax.OnCodes([]codes.Code{ codes.DeadlineExceeded, codes.Internal, codes.Unavailable, }, gax.Backoff{ Initial: 100 * time.Millisecond, Max: 1000 * time.Millisecond, Multiplier: 1.2, }) }), }, {"list", "idempotent"}: { gax.WithRetry(func() gax.Retryer { return gax.OnCodes([]codes.Code{ codes.DeadlineExceeded, codes.Internal, codes.Unavailable, }, gax.Backoff{ Initial: 100 * time.Millisecond, Max: 1000 * time.Millisecond, Multiplier: 1.2, }) }), }, } return &CallOptions{ DeleteLog: retry[[2]string{"default", "idempotent"}], WriteLogEntries: retry[[2]string{"default", "non_idempotent"}], ListLogEntries: retry[[2]string{"list", "idempotent"}], ListMonitoredResourceDescriptors: retry[[2]string{"default", "idempotent"}], ListLogs: retry[[2]string{"default", "idempotent"}], } } // Client is a client for interacting with Stackdriver Logging API. type Client struct { // The connection to the service. conn *grpc.ClientConn // The gRPC API client. client loggingpb.LoggingServiceV2Client // The call options for this service. CallOptions *CallOptions // The metadata to be sent with each request. xGoogHeader []string } // NewClient creates a new logging service v2 client. // // Service for ingesting and querying logs. func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) { conn, err := transport.DialGRPC(ctx, append(defaultClientOptions(), opts...)...) if err != nil { return nil, err } c := &Client{ conn: conn, CallOptions: defaultCallOptions(), client: loggingpb.NewLoggingServiceV2Client(conn), } c.SetGoogleClientInfo() return c, nil } // Connection returns the client's connection to the API service. func (c *Client) Connection() *grpc.ClientConn { return c.conn } // Close closes the connection to the API service. The user should invoke this when // the client is no longer required. func (c *Client) Close() error { return c.conn.Close() } // SetGoogleClientInfo sets the name and version of the application in // the `x-goog-api-client` header passed on each request. Intended for // use by Google-written clients. func (c *Client) SetGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", version.Go()}, keyval...) kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) c.xGoogHeader = []string{gax.XGoogHeader(kv...)} } // ProjectPath returns the path for the project resource. func ProjectPath(project string) string { path, err := loggingProjectPathTemplate.Render(map[string]string{ "project": project, }) if err != nil { panic(err) } return path } // LogPath returns the path for the log resource. func LogPath(project, log string) string { path, err := loggingLogPathTemplate.Render(map[string]string{ "project": project, "log": log, }) if err != nil { panic(err) } return path } // DeleteLog deletes all the log entries in a log. // The log reappears if it receives new entries. // Log entries written shortly before the delete operation might not be // deleted. func (c *Client) DeleteLog(ctx context.Context, req *loggingpb.DeleteLogRequest, opts ...gax.CallOption) error { ctx = insertXGoog(ctx, c.xGoogHeader) opts = append(c.CallOptions.DeleteLog[0:len(c.CallOptions.DeleteLog):len(c.CallOptions.DeleteLog)], opts...) err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error _, err = c.client.DeleteLog(ctx, req, settings.GRPC...) return err }, opts...) return err } // WriteLogEntries writes log entries to Stackdriver Logging. func (c *Client) WriteLogEntries(ctx context.Context, req *loggingpb.WriteLogEntriesRequest, opts ...gax.CallOption) (*loggingpb.WriteLogEntriesResponse, error) { ctx = insertXGoog(ctx, c.xGoogHeader) opts = append(c.CallOptions.WriteLogEntries[0:len(c.CallOptions.WriteLogEntries):len(c.CallOptions.WriteLogEntries)], opts...) var resp *loggingpb.WriteLogEntriesResponse err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error resp, err = c.client.WriteLogEntries(ctx, req, settings.GRPC...) return err }, opts...) if err != nil { return nil, err } return resp, nil } // ListLogEntries lists log entries. Use this method to retrieve log entries from // Stackdriver Logging. For ways to export log entries, see // [Exporting Logs](/logging/docs/export). func (c *Client) ListLogEntries(ctx context.Context, req *loggingpb.ListLogEntriesRequest, opts ...gax.CallOption) *LogEntryIterator { ctx = insertXGoog(ctx, c.xGoogHeader) opts = append(c.CallOptions.ListLogEntries[0:len(c.CallOptions.ListLogEntries):len(c.CallOptions.ListLogEntries)], opts...) it := &LogEntryIterator{} it.InternalFetch = func(pageSize int, pageToken string) ([]*loggingpb.LogEntry, string, error) { var resp *loggingpb.ListLogEntriesResponse req.PageToken = pageToken if pageSize > math.MaxInt32 { req.PageSize = math.MaxInt32 } else { req.PageSize = int32(pageSize) } err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error resp, err = c.client.ListLogEntries(ctx, req, settings.GRPC...) return err }, opts...) if err != nil { return nil, "", err } return resp.Entries, resp.NextPageToken, nil } fetch := func(pageSize int, pageToken string) (string, error) { items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) if err != nil { return "", err } it.items = append(it.items, items...) return nextPageToken, nil } it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) return it } // ListMonitoredResourceDescriptors lists the descriptors for monitored resource types used by Stackdriver // Logging. func (c *Client) ListMonitoredResourceDescriptors(ctx context.Context, req *loggingpb.ListMonitoredResourceDescriptorsRequest, opts ...gax.CallOption) *MonitoredResourceDescriptorIterator { ctx = insertXGoog(ctx, c.xGoogHeader) opts = append(c.CallOptions.ListMonitoredResourceDescriptors[0:len(c.CallOptions.ListMonitoredResourceDescriptors):len(c.CallOptions.ListMonitoredResourceDescriptors)], opts...) it := &MonitoredResourceDescriptorIterator{} it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoredrespb.MonitoredResourceDescriptor, string, error) { var resp *loggingpb.ListMonitoredResourceDescriptorsResponse req.PageToken = pageToken if pageSize > math.MaxInt32 { req.PageSize = math.MaxInt32 } else { req.PageSize = int32(pageSize) } err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error resp, err = c.client.ListMonitoredResourceDescriptors(ctx, req, settings.GRPC...) return err }, opts...) if err != nil { return nil, "", err } return resp.ResourceDescriptors, resp.NextPageToken, nil } fetch := func(pageSize int, pageToken string) (string, error) { items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) if err != nil { return "", err } it.items = append(it.items, items...) return nextPageToken, nil } it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) return it } // ListLogs lists the logs in projects, organizations, folders, or billing accounts. // Only logs that have entries are listed. func (c *Client) ListLogs(ctx context.Context, req *loggingpb.ListLogsRequest, opts ...gax.CallOption) *StringIterator { ctx = insertXGoog(ctx, c.xGoogHeader) opts = append(c.CallOptions.ListLogs[0:len(c.CallOptions.ListLogs):len(c.CallOptions.ListLogs)], opts...) it := &StringIterator{} it.InternalFetch = func(pageSize int, pageToken string) ([]string, string, error) { var resp *loggingpb.ListLogsResponse req.PageToken = pageToken if pageSize > math.MaxInt32 { req.PageSize = math.MaxInt32 } else { req.PageSize = int32(pageSize) } err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error resp, err = c.client.ListLogs(ctx, req, settings.GRPC...) return err }, opts...) if err != nil { return nil, "", err } return resp.LogNames, resp.NextPageToken, nil } fetch := func(pageSize int, pageToken string) (string, error) { items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) if err != nil { return "", err } it.items = append(it.items, items...) return nextPageToken, nil } it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) return it } // LogEntryIterator manages a stream of *loggingpb.LogEntry. type LogEntryIterator struct { items []*loggingpb.LogEntry pageInfo *iterator.PageInfo nextFunc func() error // InternalFetch is for use by the Google Cloud Libraries only. // It is not part of the stable interface of this package. // // InternalFetch returns results from a single call to the underlying RPC. // The number of results is no greater than pageSize. // If there are no more results, nextPageToken is empty and err is nil. InternalFetch func(pageSize int, pageToken string) (results []*loggingpb.LogEntry, nextPageToken string, err error) } // PageInfo supports pagination. See the google.golang.org/api/iterator package for details. func (it *LogEntryIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } // Next returns the next result. Its second return value is iterator.Done if there are no more // results. Once Next returns Done, all subsequent calls will return Done. func (it *LogEntryIterator) Next() (*loggingpb.LogEntry, error) { var item *loggingpb.LogEntry if err := it.nextFunc(); err != nil { return item, err } item = it.items[0] it.items = it.items[1:] return item, nil } func (it *LogEntryIterator) bufLen() int { return len(it.items) } func (it *LogEntryIterator) takeBuf() interface{} { b := it.items it.items = nil return b } // MonitoredResourceDescriptorIterator manages a stream of *monitoredrespb.MonitoredResourceDescriptor. type MonitoredResourceDescriptorIterator struct { items []*monitoredrespb.MonitoredResourceDescriptor pageInfo *iterator.PageInfo nextFunc func() error // InternalFetch is for use by the Google Cloud Libraries only. // It is not part of the stable interface of this package. // // InternalFetch returns results from a single call to the underlying RPC. // The number of results is no greater than pageSize. // If there are no more results, nextPageToken is empty and err is nil. InternalFetch func(pageSize int, pageToken string) (results []*monitoredrespb.MonitoredResourceDescriptor, nextPageToken string, err error) } // PageInfo supports pagination. See the google.golang.org/api/iterator package for details. func (it *MonitoredResourceDescriptorIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } // Next returns the next result. Its second return value is iterator.Done if there are no more // results. Once Next returns Done, all subsequent calls will return Done. func (it *MonitoredResourceDescriptorIterator) Next() (*monitoredrespb.MonitoredResourceDescriptor, error) { var item *monitoredrespb.MonitoredResourceDescriptor if err := it.nextFunc(); err != nil { return item, err } item = it.items[0] it.items = it.items[1:] return item, nil } func (it *MonitoredResourceDescriptorIterator) bufLen() int { return len(it.items) } func (it *MonitoredResourceDescriptorIterator) takeBuf() interface{} { b := it.items it.items = nil return b } // StringIterator manages a stream of string. type StringIterator struct { items []string pageInfo *iterator.PageInfo nextFunc func() error // InternalFetch is for use by the Google Cloud Libraries only. // It is not part of the stable interface of this package. // // InternalFetch returns results from a single call to the underlying RPC. // The number of results is no greater than pageSize. // If there are no more results, nextPageToken is empty and err is nil. InternalFetch func(pageSize int, pageToken string) (results []string, nextPageToken string, err error) } // PageInfo supports pagination. See the google.golang.org/api/iterator package for details. func (it *StringIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } // Next returns the next result. Its second return value is iterator.Done if there are no more // results. Once Next returns Done, all subsequent calls will return Done. func (it *StringIterator) Next() (string, error) { var item string if err := it.nextFunc(); err != nil { return item, err } item = it.items[0] it.items = it.items[1:] return item, nil } func (it *StringIterator) bufLen() int { return len(it.items) } func (it *StringIterator) takeBuf() interface{} { b := it.items it.items = nil return b } golang-google-cloud-0.9.0/logging/apiv2/logging_client_example_test.go000066400000000000000000000056231312234511600261050ustar00rootroot00000000000000// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // AUTO-GENERATED CODE. DO NOT EDIT. package logging_test import ( "cloud.google.com/go/logging/apiv2" "golang.org/x/net/context" "google.golang.org/api/iterator" loggingpb "google.golang.org/genproto/googleapis/logging/v2" ) func ExampleNewClient() { ctx := context.Background() c, err := logging.NewClient(ctx) if err != nil { // TODO: Handle error. } // TODO: Use client. _ = c } func ExampleClient_DeleteLog() { ctx := context.Background() c, err := logging.NewClient(ctx) if err != nil { // TODO: Handle error. } req := &loggingpb.DeleteLogRequest{ // TODO: Fill request struct fields. } err = c.DeleteLog(ctx, req) if err != nil { // TODO: Handle error. } } func ExampleClient_WriteLogEntries() { ctx := context.Background() c, err := logging.NewClient(ctx) if err != nil { // TODO: Handle error. } req := &loggingpb.WriteLogEntriesRequest{ // TODO: Fill request struct fields. } resp, err := c.WriteLogEntries(ctx, req) if err != nil { // TODO: Handle error. } // TODO: Use resp. _ = resp } func ExampleClient_ListLogEntries() { ctx := context.Background() c, err := logging.NewClient(ctx) if err != nil { // TODO: Handle error. } req := &loggingpb.ListLogEntriesRequest{ // TODO: Fill request struct fields. } it := c.ListLogEntries(ctx, req) for { resp, err := it.Next() if err == iterator.Done { break } if err != nil { // TODO: Handle error. } // TODO: Use resp. _ = resp } } func ExampleClient_ListMonitoredResourceDescriptors() { ctx := context.Background() c, err := logging.NewClient(ctx) if err != nil { // TODO: Handle error. } req := &loggingpb.ListMonitoredResourceDescriptorsRequest{ // TODO: Fill request struct fields. } it := c.ListMonitoredResourceDescriptors(ctx, req) for { resp, err := it.Next() if err == iterator.Done { break } if err != nil { // TODO: Handle error. } // TODO: Use resp. _ = resp } } func ExampleClient_ListLogs() { ctx := context.Background() c, err := logging.NewClient(ctx) if err != nil { // TODO: Handle error. } req := &loggingpb.ListLogsRequest{ // TODO: Fill request struct fields. } it := c.ListLogs(ctx, req) for { resp, err := it.Next() if err == iterator.Done { break } if err != nil { // TODO: Handle error. } // TODO: Use resp. _ = resp } } golang-google-cloud-0.9.0/logging/apiv2/metrics_client.go000066400000000000000000000232331312234511600233500ustar00rootroot00000000000000// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // AUTO-GENERATED CODE. DO NOT EDIT. package logging import ( "math" "time" "cloud.google.com/go/internal/version" gax "github.com/googleapis/gax-go" "golang.org/x/net/context" "google.golang.org/api/iterator" "google.golang.org/api/option" "google.golang.org/api/transport" loggingpb "google.golang.org/genproto/googleapis/logging/v2" "google.golang.org/grpc" "google.golang.org/grpc/codes" ) var ( metricsProjectPathTemplate = gax.MustCompilePathTemplate("projects/{project}") metricsMetricPathTemplate = gax.MustCompilePathTemplate("projects/{project}/metrics/{metric}") ) // MetricsCallOptions contains the retry settings for each method of MetricsClient. type MetricsCallOptions struct { ListLogMetrics []gax.CallOption GetLogMetric []gax.CallOption CreateLogMetric []gax.CallOption UpdateLogMetric []gax.CallOption DeleteLogMetric []gax.CallOption } func defaultMetricsClientOptions() []option.ClientOption { return []option.ClientOption{ option.WithEndpoint("logging.googleapis.com:443"), option.WithScopes(DefaultAuthScopes()...), } } func defaultMetricsCallOptions() *MetricsCallOptions { retry := map[[2]string][]gax.CallOption{ {"default", "idempotent"}: { gax.WithRetry(func() gax.Retryer { return gax.OnCodes([]codes.Code{ codes.DeadlineExceeded, codes.Internal, codes.Unavailable, }, gax.Backoff{ Initial: 100 * time.Millisecond, Max: 1000 * time.Millisecond, Multiplier: 1.2, }) }), }, } return &MetricsCallOptions{ ListLogMetrics: retry[[2]string{"default", "idempotent"}], GetLogMetric: retry[[2]string{"default", "idempotent"}], CreateLogMetric: retry[[2]string{"default", "non_idempotent"}], UpdateLogMetric: retry[[2]string{"default", "non_idempotent"}], DeleteLogMetric: retry[[2]string{"default", "idempotent"}], } } // MetricsClient is a client for interacting with Stackdriver Logging API. type MetricsClient struct { // The connection to the service. conn *grpc.ClientConn // The gRPC API client. metricsClient loggingpb.MetricsServiceV2Client // The call options for this service. CallOptions *MetricsCallOptions // The metadata to be sent with each request. xGoogHeader []string } // NewMetricsClient creates a new metrics service v2 client. // // Service for configuring logs-based metrics. func NewMetricsClient(ctx context.Context, opts ...option.ClientOption) (*MetricsClient, error) { conn, err := transport.DialGRPC(ctx, append(defaultMetricsClientOptions(), opts...)...) if err != nil { return nil, err } c := &MetricsClient{ conn: conn, CallOptions: defaultMetricsCallOptions(), metricsClient: loggingpb.NewMetricsServiceV2Client(conn), } c.SetGoogleClientInfo() return c, nil } // Connection returns the client's connection to the API service. func (c *MetricsClient) Connection() *grpc.ClientConn { return c.conn } // Close closes the connection to the API service. The user should invoke this when // the client is no longer required. func (c *MetricsClient) Close() error { return c.conn.Close() } // SetGoogleClientInfo sets the name and version of the application in // the `x-goog-api-client` header passed on each request. Intended for // use by Google-written clients. func (c *MetricsClient) SetGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", version.Go()}, keyval...) kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) c.xGoogHeader = []string{gax.XGoogHeader(kv...)} } // MetricsProjectPath returns the path for the project resource. func MetricsProjectPath(project string) string { path, err := metricsProjectPathTemplate.Render(map[string]string{ "project": project, }) if err != nil { panic(err) } return path } // MetricsMetricPath returns the path for the metric resource. func MetricsMetricPath(project, metric string) string { path, err := metricsMetricPathTemplate.Render(map[string]string{ "project": project, "metric": metric, }) if err != nil { panic(err) } return path } // ListLogMetrics lists logs-based metrics. func (c *MetricsClient) ListLogMetrics(ctx context.Context, req *loggingpb.ListLogMetricsRequest, opts ...gax.CallOption) *LogMetricIterator { ctx = insertXGoog(ctx, c.xGoogHeader) opts = append(c.CallOptions.ListLogMetrics[0:len(c.CallOptions.ListLogMetrics):len(c.CallOptions.ListLogMetrics)], opts...) it := &LogMetricIterator{} it.InternalFetch = func(pageSize int, pageToken string) ([]*loggingpb.LogMetric, string, error) { var resp *loggingpb.ListLogMetricsResponse req.PageToken = pageToken if pageSize > math.MaxInt32 { req.PageSize = math.MaxInt32 } else { req.PageSize = int32(pageSize) } err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error resp, err = c.metricsClient.ListLogMetrics(ctx, req, settings.GRPC...) return err }, opts...) if err != nil { return nil, "", err } return resp.Metrics, resp.NextPageToken, nil } fetch := func(pageSize int, pageToken string) (string, error) { items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) if err != nil { return "", err } it.items = append(it.items, items...) return nextPageToken, nil } it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) return it } // GetLogMetric gets a logs-based metric. func (c *MetricsClient) GetLogMetric(ctx context.Context, req *loggingpb.GetLogMetricRequest, opts ...gax.CallOption) (*loggingpb.LogMetric, error) { ctx = insertXGoog(ctx, c.xGoogHeader) opts = append(c.CallOptions.GetLogMetric[0:len(c.CallOptions.GetLogMetric):len(c.CallOptions.GetLogMetric)], opts...) var resp *loggingpb.LogMetric err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error resp, err = c.metricsClient.GetLogMetric(ctx, req, settings.GRPC...) return err }, opts...) if err != nil { return nil, err } return resp, nil } // CreateLogMetric creates a logs-based metric. func (c *MetricsClient) CreateLogMetric(ctx context.Context, req *loggingpb.CreateLogMetricRequest, opts ...gax.CallOption) (*loggingpb.LogMetric, error) { ctx = insertXGoog(ctx, c.xGoogHeader) opts = append(c.CallOptions.CreateLogMetric[0:len(c.CallOptions.CreateLogMetric):len(c.CallOptions.CreateLogMetric)], opts...) var resp *loggingpb.LogMetric err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error resp, err = c.metricsClient.CreateLogMetric(ctx, req, settings.GRPC...) return err }, opts...) if err != nil { return nil, err } return resp, nil } // UpdateLogMetric creates or updates a logs-based metric. func (c *MetricsClient) UpdateLogMetric(ctx context.Context, req *loggingpb.UpdateLogMetricRequest, opts ...gax.CallOption) (*loggingpb.LogMetric, error) { ctx = insertXGoog(ctx, c.xGoogHeader) opts = append(c.CallOptions.UpdateLogMetric[0:len(c.CallOptions.UpdateLogMetric):len(c.CallOptions.UpdateLogMetric)], opts...) var resp *loggingpb.LogMetric err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error resp, err = c.metricsClient.UpdateLogMetric(ctx, req, settings.GRPC...) return err }, opts...) if err != nil { return nil, err } return resp, nil } // DeleteLogMetric deletes a logs-based metric. func (c *MetricsClient) DeleteLogMetric(ctx context.Context, req *loggingpb.DeleteLogMetricRequest, opts ...gax.CallOption) error { ctx = insertXGoog(ctx, c.xGoogHeader) opts = append(c.CallOptions.DeleteLogMetric[0:len(c.CallOptions.DeleteLogMetric):len(c.CallOptions.DeleteLogMetric)], opts...) err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error _, err = c.metricsClient.DeleteLogMetric(ctx, req, settings.GRPC...) return err }, opts...) return err } // LogMetricIterator manages a stream of *loggingpb.LogMetric. type LogMetricIterator struct { items []*loggingpb.LogMetric pageInfo *iterator.PageInfo nextFunc func() error // InternalFetch is for use by the Google Cloud Libraries only. // It is not part of the stable interface of this package. // // InternalFetch returns results from a single call to the underlying RPC. // The number of results is no greater than pageSize. // If there are no more results, nextPageToken is empty and err is nil. InternalFetch func(pageSize int, pageToken string) (results []*loggingpb.LogMetric, nextPageToken string, err error) } // PageInfo supports pagination. See the google.golang.org/api/iterator package for details. func (it *LogMetricIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } // Next returns the next result. Its second return value is iterator.Done if there are no more // results. Once Next returns Done, all subsequent calls will return Done. func (it *LogMetricIterator) Next() (*loggingpb.LogMetric, error) { var item *loggingpb.LogMetric if err := it.nextFunc(); err != nil { return item, err } item = it.items[0] it.items = it.items[1:] return item, nil } func (it *LogMetricIterator) bufLen() int { return len(it.items) } func (it *LogMetricIterator) takeBuf() interface{} { b := it.items it.items = nil return b } golang-google-cloud-0.9.0/logging/apiv2/metrics_client_example_test.go000066400000000000000000000054761312234511600261330ustar00rootroot00000000000000// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // AUTO-GENERATED CODE. DO NOT EDIT. package logging_test import ( "cloud.google.com/go/logging/apiv2" "golang.org/x/net/context" "google.golang.org/api/iterator" loggingpb "google.golang.org/genproto/googleapis/logging/v2" ) func ExampleNewMetricsClient() { ctx := context.Background() c, err := logging.NewMetricsClient(ctx) if err != nil { // TODO: Handle error. } // TODO: Use client. _ = c } func ExampleMetricsClient_ListLogMetrics() { ctx := context.Background() c, err := logging.NewMetricsClient(ctx) if err != nil { // TODO: Handle error. } req := &loggingpb.ListLogMetricsRequest{ // TODO: Fill request struct fields. } it := c.ListLogMetrics(ctx, req) for { resp, err := it.Next() if err == iterator.Done { break } if err != nil { // TODO: Handle error. } // TODO: Use resp. _ = resp } } func ExampleMetricsClient_GetLogMetric() { ctx := context.Background() c, err := logging.NewMetricsClient(ctx) if err != nil { // TODO: Handle error. } req := &loggingpb.GetLogMetricRequest{ // TODO: Fill request struct fields. } resp, err := c.GetLogMetric(ctx, req) if err != nil { // TODO: Handle error. } // TODO: Use resp. _ = resp } func ExampleMetricsClient_CreateLogMetric() { ctx := context.Background() c, err := logging.NewMetricsClient(ctx) if err != nil { // TODO: Handle error. } req := &loggingpb.CreateLogMetricRequest{ // TODO: Fill request struct fields. } resp, err := c.CreateLogMetric(ctx, req) if err != nil { // TODO: Handle error. } // TODO: Use resp. _ = resp } func ExampleMetricsClient_UpdateLogMetric() { ctx := context.Background() c, err := logging.NewMetricsClient(ctx) if err != nil { // TODO: Handle error. } req := &loggingpb.UpdateLogMetricRequest{ // TODO: Fill request struct fields. } resp, err := c.UpdateLogMetric(ctx, req) if err != nil { // TODO: Handle error. } // TODO: Use resp. _ = resp } func ExampleMetricsClient_DeleteLogMetric() { ctx := context.Background() c, err := logging.NewMetricsClient(ctx) if err != nil { // TODO: Handle error. } req := &loggingpb.DeleteLogMetricRequest{ // TODO: Fill request struct fields. } err = c.DeleteLogMetric(ctx, req) if err != nil { // TODO: Handle error. } } golang-google-cloud-0.9.0/logging/apiv2/mock_test.go000066400000000000000000001064451312234511600223430ustar00rootroot00000000000000// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // AUTO-GENERATED CODE. DO NOT EDIT. package logging import ( emptypb "github.com/golang/protobuf/ptypes/empty" monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres" loggingpb "google.golang.org/genproto/googleapis/logging/v2" ) import ( "flag" "fmt" "io" "log" "net" "os" "strings" "testing" "github.com/golang/protobuf/proto" "github.com/golang/protobuf/ptypes" "golang.org/x/net/context" "google.golang.org/api/option" status "google.golang.org/genproto/googleapis/rpc/status" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/metadata" gstatus "google.golang.org/grpc/status" ) var _ = io.EOF var _ = ptypes.MarshalAny var _ status.Status type mockLoggingServer struct { // Embed for forward compatibility. // Tests will keep working if more methods are added // in the future. loggingpb.LoggingServiceV2Server reqs []proto.Message // If set, all calls return this error. err error // responses to return if err == nil resps []proto.Message } func (s *mockLoggingServer) DeleteLog(ctx context.Context, req *loggingpb.DeleteLogRequest) (*emptypb.Empty, error) { md, _ := metadata.FromIncomingContext(ctx) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } s.reqs = append(s.reqs, req) if s.err != nil { return nil, s.err } return s.resps[0].(*emptypb.Empty), nil } func (s *mockLoggingServer) WriteLogEntries(ctx context.Context, req *loggingpb.WriteLogEntriesRequest) (*loggingpb.WriteLogEntriesResponse, error) { md, _ := metadata.FromIncomingContext(ctx) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } s.reqs = append(s.reqs, req) if s.err != nil { return nil, s.err } return s.resps[0].(*loggingpb.WriteLogEntriesResponse), nil } func (s *mockLoggingServer) ListLogEntries(ctx context.Context, req *loggingpb.ListLogEntriesRequest) (*loggingpb.ListLogEntriesResponse, error) { md, _ := metadata.FromIncomingContext(ctx) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } s.reqs = append(s.reqs, req) if s.err != nil { return nil, s.err } return s.resps[0].(*loggingpb.ListLogEntriesResponse), nil } func (s *mockLoggingServer) ListMonitoredResourceDescriptors(ctx context.Context, req *loggingpb.ListMonitoredResourceDescriptorsRequest) (*loggingpb.ListMonitoredResourceDescriptorsResponse, error) { md, _ := metadata.FromIncomingContext(ctx) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } s.reqs = append(s.reqs, req) if s.err != nil { return nil, s.err } return s.resps[0].(*loggingpb.ListMonitoredResourceDescriptorsResponse), nil } func (s *mockLoggingServer) ListLogs(ctx context.Context, req *loggingpb.ListLogsRequest) (*loggingpb.ListLogsResponse, error) { md, _ := metadata.FromIncomingContext(ctx) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } s.reqs = append(s.reqs, req) if s.err != nil { return nil, s.err } return s.resps[0].(*loggingpb.ListLogsResponse), nil } type mockConfigServer struct { // Embed for forward compatibility. // Tests will keep working if more methods are added // in the future. loggingpb.ConfigServiceV2Server reqs []proto.Message // If set, all calls return this error. err error // responses to return if err == nil resps []proto.Message } func (s *mockConfigServer) ListSinks(ctx context.Context, req *loggingpb.ListSinksRequest) (*loggingpb.ListSinksResponse, error) { md, _ := metadata.FromIncomingContext(ctx) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } s.reqs = append(s.reqs, req) if s.err != nil { return nil, s.err } return s.resps[0].(*loggingpb.ListSinksResponse), nil } func (s *mockConfigServer) GetSink(ctx context.Context, req *loggingpb.GetSinkRequest) (*loggingpb.LogSink, error) { md, _ := metadata.FromIncomingContext(ctx) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } s.reqs = append(s.reqs, req) if s.err != nil { return nil, s.err } return s.resps[0].(*loggingpb.LogSink), nil } func (s *mockConfigServer) CreateSink(ctx context.Context, req *loggingpb.CreateSinkRequest) (*loggingpb.LogSink, error) { md, _ := metadata.FromIncomingContext(ctx) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } s.reqs = append(s.reqs, req) if s.err != nil { return nil, s.err } return s.resps[0].(*loggingpb.LogSink), nil } func (s *mockConfigServer) UpdateSink(ctx context.Context, req *loggingpb.UpdateSinkRequest) (*loggingpb.LogSink, error) { md, _ := metadata.FromIncomingContext(ctx) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } s.reqs = append(s.reqs, req) if s.err != nil { return nil, s.err } return s.resps[0].(*loggingpb.LogSink), nil } func (s *mockConfigServer) DeleteSink(ctx context.Context, req *loggingpb.DeleteSinkRequest) (*emptypb.Empty, error) { md, _ := metadata.FromIncomingContext(ctx) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } s.reqs = append(s.reqs, req) if s.err != nil { return nil, s.err } return s.resps[0].(*emptypb.Empty), nil } type mockMetricsServer struct { // Embed for forward compatibility. // Tests will keep working if more methods are added // in the future. loggingpb.MetricsServiceV2Server reqs []proto.Message // If set, all calls return this error. err error // responses to return if err == nil resps []proto.Message } func (s *mockMetricsServer) ListLogMetrics(ctx context.Context, req *loggingpb.ListLogMetricsRequest) (*loggingpb.ListLogMetricsResponse, error) { md, _ := metadata.FromIncomingContext(ctx) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } s.reqs = append(s.reqs, req) if s.err != nil { return nil, s.err } return s.resps[0].(*loggingpb.ListLogMetricsResponse), nil } func (s *mockMetricsServer) GetLogMetric(ctx context.Context, req *loggingpb.GetLogMetricRequest) (*loggingpb.LogMetric, error) { md, _ := metadata.FromIncomingContext(ctx) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } s.reqs = append(s.reqs, req) if s.err != nil { return nil, s.err } return s.resps[0].(*loggingpb.LogMetric), nil } func (s *mockMetricsServer) CreateLogMetric(ctx context.Context, req *loggingpb.CreateLogMetricRequest) (*loggingpb.LogMetric, error) { md, _ := metadata.FromIncomingContext(ctx) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } s.reqs = append(s.reqs, req) if s.err != nil { return nil, s.err } return s.resps[0].(*loggingpb.LogMetric), nil } func (s *mockMetricsServer) UpdateLogMetric(ctx context.Context, req *loggingpb.UpdateLogMetricRequest) (*loggingpb.LogMetric, error) { md, _ := metadata.FromIncomingContext(ctx) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } s.reqs = append(s.reqs, req) if s.err != nil { return nil, s.err } return s.resps[0].(*loggingpb.LogMetric), nil } func (s *mockMetricsServer) DeleteLogMetric(ctx context.Context, req *loggingpb.DeleteLogMetricRequest) (*emptypb.Empty, error) { md, _ := metadata.FromIncomingContext(ctx) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } s.reqs = append(s.reqs, req) if s.err != nil { return nil, s.err } return s.resps[0].(*emptypb.Empty), nil } // clientOpt is the option tests should use to connect to the test server. // It is initialized by TestMain. var clientOpt option.ClientOption var ( mockLogging mockLoggingServer mockConfig mockConfigServer mockMetrics mockMetricsServer ) func TestMain(m *testing.M) { flag.Parse() serv := grpc.NewServer() loggingpb.RegisterLoggingServiceV2Server(serv, &mockLogging) loggingpb.RegisterConfigServiceV2Server(serv, &mockConfig) loggingpb.RegisterMetricsServiceV2Server(serv, &mockMetrics) lis, err := net.Listen("tcp", "localhost:0") if err != nil { log.Fatal(err) } go serv.Serve(lis) conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) if err != nil { log.Fatal(err) } clientOpt = option.WithGRPCConn(conn) os.Exit(m.Run()) } func TestLoggingServiceV2DeleteLog(t *testing.T) { var expectedResponse *emptypb.Empty = &emptypb.Empty{} mockLogging.err = nil mockLogging.reqs = nil mockLogging.resps = append(mockLogging.resps[:0], expectedResponse) var formattedLogName string = LogPath("[PROJECT]", "[LOG]") var request = &loggingpb.DeleteLogRequest{ LogName: formattedLogName, } c, err := NewClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } err = c.DeleteLog(context.Background(), request) if err != nil { t.Fatal(err) } if want, got := request, mockLogging.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } } func TestLoggingServiceV2DeleteLogError(t *testing.T) { errCode := codes.PermissionDenied mockLogging.err = gstatus.Error(errCode, "test error") var formattedLogName string = LogPath("[PROJECT]", "[LOG]") var request = &loggingpb.DeleteLogRequest{ LogName: formattedLogName, } c, err := NewClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } err = c.DeleteLog(context.Background(), request) if st, ok := gstatus.FromError(err); !ok { t.Errorf("got error %v, expected grpc error", err) } else if c := st.Code(); c != errCode { t.Errorf("got error code %q, want %q", c, errCode) } } func TestLoggingServiceV2WriteLogEntries(t *testing.T) { var expectedResponse *loggingpb.WriteLogEntriesResponse = &loggingpb.WriteLogEntriesResponse{} mockLogging.err = nil mockLogging.reqs = nil mockLogging.resps = append(mockLogging.resps[:0], expectedResponse) var entries []*loggingpb.LogEntry = nil var request = &loggingpb.WriteLogEntriesRequest{ Entries: entries, } c, err := NewClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.WriteLogEntries(context.Background(), request) if err != nil { t.Fatal(err) } if want, got := request, mockLogging.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } if want, got := expectedResponse, resp; !proto.Equal(want, got) { t.Errorf("wrong response %q, want %q)", got, want) } } func TestLoggingServiceV2WriteLogEntriesError(t *testing.T) { errCode := codes.PermissionDenied mockLogging.err = gstatus.Error(errCode, "test error") var entries []*loggingpb.LogEntry = nil var request = &loggingpb.WriteLogEntriesRequest{ Entries: entries, } c, err := NewClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.WriteLogEntries(context.Background(), request) if st, ok := gstatus.FromError(err); !ok { t.Errorf("got error %v, expected grpc error", err) } else if c := st.Code(); c != errCode { t.Errorf("got error code %q, want %q", c, errCode) } _ = resp } func TestLoggingServiceV2ListLogEntries(t *testing.T) { var nextPageToken string = "" var entriesElement *loggingpb.LogEntry = &loggingpb.LogEntry{} var entries = []*loggingpb.LogEntry{entriesElement} var expectedResponse = &loggingpb.ListLogEntriesResponse{ NextPageToken: nextPageToken, Entries: entries, } mockLogging.err = nil mockLogging.reqs = nil mockLogging.resps = append(mockLogging.resps[:0], expectedResponse) var resourceNames []string = nil var request = &loggingpb.ListLogEntriesRequest{ ResourceNames: resourceNames, } c, err := NewClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.ListLogEntries(context.Background(), request).Next() if err != nil { t.Fatal(err) } if want, got := request, mockLogging.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } want := (interface{})(expectedResponse.Entries[0]) got := (interface{})(resp) var ok bool switch want := (want).(type) { case proto.Message: ok = proto.Equal(want, got.(proto.Message)) default: ok = want == got } if !ok { t.Errorf("wrong response %q, want %q)", got, want) } } func TestLoggingServiceV2ListLogEntriesError(t *testing.T) { errCode := codes.PermissionDenied mockLogging.err = gstatus.Error(errCode, "test error") var resourceNames []string = nil var request = &loggingpb.ListLogEntriesRequest{ ResourceNames: resourceNames, } c, err := NewClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.ListLogEntries(context.Background(), request).Next() if st, ok := gstatus.FromError(err); !ok { t.Errorf("got error %v, expected grpc error", err) } else if c := st.Code(); c != errCode { t.Errorf("got error code %q, want %q", c, errCode) } _ = resp } func TestLoggingServiceV2ListMonitoredResourceDescriptors(t *testing.T) { var nextPageToken string = "" var resourceDescriptorsElement *monitoredrespb.MonitoredResourceDescriptor = &monitoredrespb.MonitoredResourceDescriptor{} var resourceDescriptors = []*monitoredrespb.MonitoredResourceDescriptor{resourceDescriptorsElement} var expectedResponse = &loggingpb.ListMonitoredResourceDescriptorsResponse{ NextPageToken: nextPageToken, ResourceDescriptors: resourceDescriptors, } mockLogging.err = nil mockLogging.reqs = nil mockLogging.resps = append(mockLogging.resps[:0], expectedResponse) var request *loggingpb.ListMonitoredResourceDescriptorsRequest = &loggingpb.ListMonitoredResourceDescriptorsRequest{} c, err := NewClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.ListMonitoredResourceDescriptors(context.Background(), request).Next() if err != nil { t.Fatal(err) } if want, got := request, mockLogging.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } want := (interface{})(expectedResponse.ResourceDescriptors[0]) got := (interface{})(resp) var ok bool switch want := (want).(type) { case proto.Message: ok = proto.Equal(want, got.(proto.Message)) default: ok = want == got } if !ok { t.Errorf("wrong response %q, want %q)", got, want) } } func TestLoggingServiceV2ListMonitoredResourceDescriptorsError(t *testing.T) { errCode := codes.PermissionDenied mockLogging.err = gstatus.Error(errCode, "test error") var request *loggingpb.ListMonitoredResourceDescriptorsRequest = &loggingpb.ListMonitoredResourceDescriptorsRequest{} c, err := NewClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.ListMonitoredResourceDescriptors(context.Background(), request).Next() if st, ok := gstatus.FromError(err); !ok { t.Errorf("got error %v, expected grpc error", err) } else if c := st.Code(); c != errCode { t.Errorf("got error code %q, want %q", c, errCode) } _ = resp } func TestLoggingServiceV2ListLogs(t *testing.T) { var nextPageToken string = "" var logNamesElement string = "logNamesElement-1079688374" var logNames = []string{logNamesElement} var expectedResponse = &loggingpb.ListLogsResponse{ NextPageToken: nextPageToken, LogNames: logNames, } mockLogging.err = nil mockLogging.reqs = nil mockLogging.resps = append(mockLogging.resps[:0], expectedResponse) var formattedParent string = ProjectPath("[PROJECT]") var request = &loggingpb.ListLogsRequest{ Parent: formattedParent, } c, err := NewClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.ListLogs(context.Background(), request).Next() if err != nil { t.Fatal(err) } if want, got := request, mockLogging.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } want := (interface{})(expectedResponse.LogNames[0]) got := (interface{})(resp) var ok bool switch want := (want).(type) { case proto.Message: ok = proto.Equal(want, got.(proto.Message)) default: ok = want == got } if !ok { t.Errorf("wrong response %q, want %q)", got, want) } } func TestLoggingServiceV2ListLogsError(t *testing.T) { errCode := codes.PermissionDenied mockLogging.err = gstatus.Error(errCode, "test error") var formattedParent string = ProjectPath("[PROJECT]") var request = &loggingpb.ListLogsRequest{ Parent: formattedParent, } c, err := NewClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.ListLogs(context.Background(), request).Next() if st, ok := gstatus.FromError(err); !ok { t.Errorf("got error %v, expected grpc error", err) } else if c := st.Code(); c != errCode { t.Errorf("got error code %q, want %q", c, errCode) } _ = resp } func TestConfigServiceV2ListSinks(t *testing.T) { var nextPageToken string = "" var sinksElement *loggingpb.LogSink = &loggingpb.LogSink{} var sinks = []*loggingpb.LogSink{sinksElement} var expectedResponse = &loggingpb.ListSinksResponse{ NextPageToken: nextPageToken, Sinks: sinks, } mockConfig.err = nil mockConfig.reqs = nil mockConfig.resps = append(mockConfig.resps[:0], expectedResponse) var formattedParent string = ConfigProjectPath("[PROJECT]") var request = &loggingpb.ListSinksRequest{ Parent: formattedParent, } c, err := NewConfigClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.ListSinks(context.Background(), request).Next() if err != nil { t.Fatal(err) } if want, got := request, mockConfig.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } want := (interface{})(expectedResponse.Sinks[0]) got := (interface{})(resp) var ok bool switch want := (want).(type) { case proto.Message: ok = proto.Equal(want, got.(proto.Message)) default: ok = want == got } if !ok { t.Errorf("wrong response %q, want %q)", got, want) } } func TestConfigServiceV2ListSinksError(t *testing.T) { errCode := codes.PermissionDenied mockConfig.err = gstatus.Error(errCode, "test error") var formattedParent string = ConfigProjectPath("[PROJECT]") var request = &loggingpb.ListSinksRequest{ Parent: formattedParent, } c, err := NewConfigClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.ListSinks(context.Background(), request).Next() if st, ok := gstatus.FromError(err); !ok { t.Errorf("got error %v, expected grpc error", err) } else if c := st.Code(); c != errCode { t.Errorf("got error code %q, want %q", c, errCode) } _ = resp } func TestConfigServiceV2GetSink(t *testing.T) { var name string = "name3373707" var destination string = "destination-1429847026" var filter string = "filter-1274492040" var writerIdentity string = "writerIdentity775638794" var includeChildren bool = true var expectedResponse = &loggingpb.LogSink{ Name: name, Destination: destination, Filter: filter, WriterIdentity: writerIdentity, IncludeChildren: includeChildren, } mockConfig.err = nil mockConfig.reqs = nil mockConfig.resps = append(mockConfig.resps[:0], expectedResponse) var formattedSinkName string = ConfigSinkPath("[PROJECT]", "[SINK]") var request = &loggingpb.GetSinkRequest{ SinkName: formattedSinkName, } c, err := NewConfigClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.GetSink(context.Background(), request) if err != nil { t.Fatal(err) } if want, got := request, mockConfig.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } if want, got := expectedResponse, resp; !proto.Equal(want, got) { t.Errorf("wrong response %q, want %q)", got, want) } } func TestConfigServiceV2GetSinkError(t *testing.T) { errCode := codes.PermissionDenied mockConfig.err = gstatus.Error(errCode, "test error") var formattedSinkName string = ConfigSinkPath("[PROJECT]", "[SINK]") var request = &loggingpb.GetSinkRequest{ SinkName: formattedSinkName, } c, err := NewConfigClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.GetSink(context.Background(), request) if st, ok := gstatus.FromError(err); !ok { t.Errorf("got error %v, expected grpc error", err) } else if c := st.Code(); c != errCode { t.Errorf("got error code %q, want %q", c, errCode) } _ = resp } func TestConfigServiceV2CreateSink(t *testing.T) { var name string = "name3373707" var destination string = "destination-1429847026" var filter string = "filter-1274492040" var writerIdentity string = "writerIdentity775638794" var includeChildren bool = true var expectedResponse = &loggingpb.LogSink{ Name: name, Destination: destination, Filter: filter, WriterIdentity: writerIdentity, IncludeChildren: includeChildren, } mockConfig.err = nil mockConfig.reqs = nil mockConfig.resps = append(mockConfig.resps[:0], expectedResponse) var formattedParent string = ConfigProjectPath("[PROJECT]") var sink *loggingpb.LogSink = &loggingpb.LogSink{} var request = &loggingpb.CreateSinkRequest{ Parent: formattedParent, Sink: sink, } c, err := NewConfigClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.CreateSink(context.Background(), request) if err != nil { t.Fatal(err) } if want, got := request, mockConfig.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } if want, got := expectedResponse, resp; !proto.Equal(want, got) { t.Errorf("wrong response %q, want %q)", got, want) } } func TestConfigServiceV2CreateSinkError(t *testing.T) { errCode := codes.PermissionDenied mockConfig.err = gstatus.Error(errCode, "test error") var formattedParent string = ConfigProjectPath("[PROJECT]") var sink *loggingpb.LogSink = &loggingpb.LogSink{} var request = &loggingpb.CreateSinkRequest{ Parent: formattedParent, Sink: sink, } c, err := NewConfigClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.CreateSink(context.Background(), request) if st, ok := gstatus.FromError(err); !ok { t.Errorf("got error %v, expected grpc error", err) } else if c := st.Code(); c != errCode { t.Errorf("got error code %q, want %q", c, errCode) } _ = resp } func TestConfigServiceV2UpdateSink(t *testing.T) { var name string = "name3373707" var destination string = "destination-1429847026" var filter string = "filter-1274492040" var writerIdentity string = "writerIdentity775638794" var includeChildren bool = true var expectedResponse = &loggingpb.LogSink{ Name: name, Destination: destination, Filter: filter, WriterIdentity: writerIdentity, IncludeChildren: includeChildren, } mockConfig.err = nil mockConfig.reqs = nil mockConfig.resps = append(mockConfig.resps[:0], expectedResponse) var formattedSinkName string = ConfigSinkPath("[PROJECT]", "[SINK]") var sink *loggingpb.LogSink = &loggingpb.LogSink{} var request = &loggingpb.UpdateSinkRequest{ SinkName: formattedSinkName, Sink: sink, } c, err := NewConfigClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.UpdateSink(context.Background(), request) if err != nil { t.Fatal(err) } if want, got := request, mockConfig.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } if want, got := expectedResponse, resp; !proto.Equal(want, got) { t.Errorf("wrong response %q, want %q)", got, want) } } func TestConfigServiceV2UpdateSinkError(t *testing.T) { errCode := codes.PermissionDenied mockConfig.err = gstatus.Error(errCode, "test error") var formattedSinkName string = ConfigSinkPath("[PROJECT]", "[SINK]") var sink *loggingpb.LogSink = &loggingpb.LogSink{} var request = &loggingpb.UpdateSinkRequest{ SinkName: formattedSinkName, Sink: sink, } c, err := NewConfigClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.UpdateSink(context.Background(), request) if st, ok := gstatus.FromError(err); !ok { t.Errorf("got error %v, expected grpc error", err) } else if c := st.Code(); c != errCode { t.Errorf("got error code %q, want %q", c, errCode) } _ = resp } func TestConfigServiceV2DeleteSink(t *testing.T) { var expectedResponse *emptypb.Empty = &emptypb.Empty{} mockConfig.err = nil mockConfig.reqs = nil mockConfig.resps = append(mockConfig.resps[:0], expectedResponse) var formattedSinkName string = ConfigSinkPath("[PROJECT]", "[SINK]") var request = &loggingpb.DeleteSinkRequest{ SinkName: formattedSinkName, } c, err := NewConfigClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } err = c.DeleteSink(context.Background(), request) if err != nil { t.Fatal(err) } if want, got := request, mockConfig.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } } func TestConfigServiceV2DeleteSinkError(t *testing.T) { errCode := codes.PermissionDenied mockConfig.err = gstatus.Error(errCode, "test error") var formattedSinkName string = ConfigSinkPath("[PROJECT]", "[SINK]") var request = &loggingpb.DeleteSinkRequest{ SinkName: formattedSinkName, } c, err := NewConfigClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } err = c.DeleteSink(context.Background(), request) if st, ok := gstatus.FromError(err); !ok { t.Errorf("got error %v, expected grpc error", err) } else if c := st.Code(); c != errCode { t.Errorf("got error code %q, want %q", c, errCode) } } func TestMetricsServiceV2ListLogMetrics(t *testing.T) { var nextPageToken string = "" var metricsElement *loggingpb.LogMetric = &loggingpb.LogMetric{} var metrics = []*loggingpb.LogMetric{metricsElement} var expectedResponse = &loggingpb.ListLogMetricsResponse{ NextPageToken: nextPageToken, Metrics: metrics, } mockMetrics.err = nil mockMetrics.reqs = nil mockMetrics.resps = append(mockMetrics.resps[:0], expectedResponse) var formattedParent string = MetricsProjectPath("[PROJECT]") var request = &loggingpb.ListLogMetricsRequest{ Parent: formattedParent, } c, err := NewMetricsClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.ListLogMetrics(context.Background(), request).Next() if err != nil { t.Fatal(err) } if want, got := request, mockMetrics.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } want := (interface{})(expectedResponse.Metrics[0]) got := (interface{})(resp) var ok bool switch want := (want).(type) { case proto.Message: ok = proto.Equal(want, got.(proto.Message)) default: ok = want == got } if !ok { t.Errorf("wrong response %q, want %q)", got, want) } } func TestMetricsServiceV2ListLogMetricsError(t *testing.T) { errCode := codes.PermissionDenied mockMetrics.err = gstatus.Error(errCode, "test error") var formattedParent string = MetricsProjectPath("[PROJECT]") var request = &loggingpb.ListLogMetricsRequest{ Parent: formattedParent, } c, err := NewMetricsClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.ListLogMetrics(context.Background(), request).Next() if st, ok := gstatus.FromError(err); !ok { t.Errorf("got error %v, expected grpc error", err) } else if c := st.Code(); c != errCode { t.Errorf("got error code %q, want %q", c, errCode) } _ = resp } func TestMetricsServiceV2GetLogMetric(t *testing.T) { var name string = "name3373707" var description string = "description-1724546052" var filter string = "filter-1274492040" var expectedResponse = &loggingpb.LogMetric{ Name: name, Description: description, Filter: filter, } mockMetrics.err = nil mockMetrics.reqs = nil mockMetrics.resps = append(mockMetrics.resps[:0], expectedResponse) var formattedMetricName string = MetricsMetricPath("[PROJECT]", "[METRIC]") var request = &loggingpb.GetLogMetricRequest{ MetricName: formattedMetricName, } c, err := NewMetricsClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.GetLogMetric(context.Background(), request) if err != nil { t.Fatal(err) } if want, got := request, mockMetrics.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } if want, got := expectedResponse, resp; !proto.Equal(want, got) { t.Errorf("wrong response %q, want %q)", got, want) } } func TestMetricsServiceV2GetLogMetricError(t *testing.T) { errCode := codes.PermissionDenied mockMetrics.err = gstatus.Error(errCode, "test error") var formattedMetricName string = MetricsMetricPath("[PROJECT]", "[METRIC]") var request = &loggingpb.GetLogMetricRequest{ MetricName: formattedMetricName, } c, err := NewMetricsClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.GetLogMetric(context.Background(), request) if st, ok := gstatus.FromError(err); !ok { t.Errorf("got error %v, expected grpc error", err) } else if c := st.Code(); c != errCode { t.Errorf("got error code %q, want %q", c, errCode) } _ = resp } func TestMetricsServiceV2CreateLogMetric(t *testing.T) { var name string = "name3373707" var description string = "description-1724546052" var filter string = "filter-1274492040" var expectedResponse = &loggingpb.LogMetric{ Name: name, Description: description, Filter: filter, } mockMetrics.err = nil mockMetrics.reqs = nil mockMetrics.resps = append(mockMetrics.resps[:0], expectedResponse) var formattedParent string = MetricsProjectPath("[PROJECT]") var metric *loggingpb.LogMetric = &loggingpb.LogMetric{} var request = &loggingpb.CreateLogMetricRequest{ Parent: formattedParent, Metric: metric, } c, err := NewMetricsClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.CreateLogMetric(context.Background(), request) if err != nil { t.Fatal(err) } if want, got := request, mockMetrics.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } if want, got := expectedResponse, resp; !proto.Equal(want, got) { t.Errorf("wrong response %q, want %q)", got, want) } } func TestMetricsServiceV2CreateLogMetricError(t *testing.T) { errCode := codes.PermissionDenied mockMetrics.err = gstatus.Error(errCode, "test error") var formattedParent string = MetricsProjectPath("[PROJECT]") var metric *loggingpb.LogMetric = &loggingpb.LogMetric{} var request = &loggingpb.CreateLogMetricRequest{ Parent: formattedParent, Metric: metric, } c, err := NewMetricsClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.CreateLogMetric(context.Background(), request) if st, ok := gstatus.FromError(err); !ok { t.Errorf("got error %v, expected grpc error", err) } else if c := st.Code(); c != errCode { t.Errorf("got error code %q, want %q", c, errCode) } _ = resp } func TestMetricsServiceV2UpdateLogMetric(t *testing.T) { var name string = "name3373707" var description string = "description-1724546052" var filter string = "filter-1274492040" var expectedResponse = &loggingpb.LogMetric{ Name: name, Description: description, Filter: filter, } mockMetrics.err = nil mockMetrics.reqs = nil mockMetrics.resps = append(mockMetrics.resps[:0], expectedResponse) var formattedMetricName string = MetricsMetricPath("[PROJECT]", "[METRIC]") var metric *loggingpb.LogMetric = &loggingpb.LogMetric{} var request = &loggingpb.UpdateLogMetricRequest{ MetricName: formattedMetricName, Metric: metric, } c, err := NewMetricsClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.UpdateLogMetric(context.Background(), request) if err != nil { t.Fatal(err) } if want, got := request, mockMetrics.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } if want, got := expectedResponse, resp; !proto.Equal(want, got) { t.Errorf("wrong response %q, want %q)", got, want) } } func TestMetricsServiceV2UpdateLogMetricError(t *testing.T) { errCode := codes.PermissionDenied mockMetrics.err = gstatus.Error(errCode, "test error") var formattedMetricName string = MetricsMetricPath("[PROJECT]", "[METRIC]") var metric *loggingpb.LogMetric = &loggingpb.LogMetric{} var request = &loggingpb.UpdateLogMetricRequest{ MetricName: formattedMetricName, Metric: metric, } c, err := NewMetricsClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.UpdateLogMetric(context.Background(), request) if st, ok := gstatus.FromError(err); !ok { t.Errorf("got error %v, expected grpc error", err) } else if c := st.Code(); c != errCode { t.Errorf("got error code %q, want %q", c, errCode) } _ = resp } func TestMetricsServiceV2DeleteLogMetric(t *testing.T) { var expectedResponse *emptypb.Empty = &emptypb.Empty{} mockMetrics.err = nil mockMetrics.reqs = nil mockMetrics.resps = append(mockMetrics.resps[:0], expectedResponse) var formattedMetricName string = MetricsMetricPath("[PROJECT]", "[METRIC]") var request = &loggingpb.DeleteLogMetricRequest{ MetricName: formattedMetricName, } c, err := NewMetricsClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } err = c.DeleteLogMetric(context.Background(), request) if err != nil { t.Fatal(err) } if want, got := request, mockMetrics.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } } func TestMetricsServiceV2DeleteLogMetricError(t *testing.T) { errCode := codes.PermissionDenied mockMetrics.err = gstatus.Error(errCode, "test error") var formattedMetricName string = MetricsMetricPath("[PROJECT]", "[METRIC]") var request = &loggingpb.DeleteLogMetricRequest{ MetricName: formattedMetricName, } c, err := NewMetricsClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } err = c.DeleteLogMetric(context.Background(), request) if st, ok := gstatus.FromError(err); !ok { t.Errorf("got error %v, expected grpc error", err) } else if c := st.Code(); c != errCode { t.Errorf("got error code %q, want %q", c, errCode) } } golang-google-cloud-0.9.0/logging/doc.go000066400000000000000000000047371312234511600201000ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. /* Package logging contains a Stackdriver Logging client suitable for writing logs. For reading logs, and working with sinks, metrics and monitored resources, see package cloud.google.com/go/logging/logadmin. This client uses Logging API v2. See https://cloud.google.com/logging/docs/api/v2/ for an introduction to the API. Note: This package is in beta. Some backwards-incompatible changes may occur. Creating a Client Use a Client to interact with the Stackdriver Logging API. // Create a Client ctx := context.Background() client, err := logging.NewClient(ctx, "my-project") if err != nil { // TODO: Handle error. } Basic Usage For most use-cases, you'll want to add log entries to a buffer to be periodically flushed (automatically and asynchronously) to the Stackdriver Logging service. // Initialize a logger lg := client.Logger("my-log") // Add entry to log buffer lg.Log(logging.Entry{Payload: "something happened!"}) Closing your Client You should call Client.Close before your program exits to flush any buffered log entries to the Stackdriver Logging service. // Close the client when finished. err = client.Close() if err != nil { // TODO: Handle error. } Synchronous Logging For critical errors, you may want to send your log entries immediately. LogSync is slow and will block until the log entry has been sent, so it is not recommended for basic use. lg.LogSync(ctx, logging.Entry{Payload: "ALERT! Something critical happened!"}) The Standard Logger Interface You may want use a standard log.Logger in your program. // stdlg implements log.Logger stdlg := lg.StandardLogger(logging.Info) stdlg.Println("some info") Log Levels An Entry may have one of a number of severity levels associated with it. logging.Entry{ Payload: "something terrible happened!", Severity: logging.Critical, } */ package logging // import "cloud.google.com/go/logging" golang-google-cloud-0.9.0/logging/examples_test.go000066400000000000000000000060241312234511600221770ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package logging_test import ( "fmt" "os" "cloud.google.com/go/logging" "golang.org/x/net/context" ) func ExampleNewClient() { ctx := context.Background() client, err := logging.NewClient(ctx, "my-project") if err != nil { // TODO: Handle error. } // Use client to manage logs, metrics and sinks. // Close the client when finished. if err := client.Close(); err != nil { // TODO: Handle error. } } func ExampleClient_Ping() { ctx := context.Background() client, err := logging.NewClient(ctx, "my-project") if err != nil { // TODO: Handle error. } if err := client.Ping(ctx); err != nil { // TODO: Handle error. } } func ExampleNewClient_errorFunc() { ctx := context.Background() client, err := logging.NewClient(ctx, "my-project") if err != nil { // TODO: Handle error. } // Print all errors to stdout. client.OnError = func(e error) { fmt.Fprintf(os.Stdout, "logging: %v", e) } // Use client to manage logs, metrics and sinks. // Close the client when finished. if err := client.Close(); err != nil { // TODO: Handle error. } } func ExampleClient_Logger() { ctx := context.Background() client, err := logging.NewClient(ctx, "my-project") if err != nil { // TODO: Handle error. } lg := client.Logger("my-log") _ = lg // TODO: use the Logger. } func ExampleLogger_LogSync() { ctx := context.Background() client, err := logging.NewClient(ctx, "my-project") if err != nil { // TODO: Handle error. } lg := client.Logger("my-log") err = lg.LogSync(ctx, logging.Entry{Payload: "red alert"}) if err != nil { // TODO: Handle error. } } func ExampleLogger_Log() { ctx := context.Background() client, err := logging.NewClient(ctx, "my-project") if err != nil { // TODO: Handle error. } lg := client.Logger("my-log") lg.Log(logging.Entry{Payload: "something happened"}) } func ExampleLogger_Flush() { ctx := context.Background() client, err := logging.NewClient(ctx, "my-project") if err != nil { // TODO: Handle error. } lg := client.Logger("my-log") lg.Log(logging.Entry{Payload: "something happened"}) lg.Flush() } func ExampleLogger_StandardLogger() { ctx := context.Background() client, err := logging.NewClient(ctx, "my-project") if err != nil { // TODO: Handle error. } lg := client.Logger("my-log") slg := lg.StandardLogger(logging.Info) slg.Println("an informative message") } func ExampleParseSeverity() { sev := logging.ParseSeverity("ALERT") fmt.Println(sev) // Output: Alert } golang-google-cloud-0.9.0/logging/internal/000077500000000000000000000000001312234511600206055ustar00rootroot00000000000000golang-google-cloud-0.9.0/logging/internal/common.go000066400000000000000000000020611312234511600224230ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package internal import ( "fmt" "strings" ) const ( ProdAddr = "logging.googleapis.com:443" Version = "0.2.0" ) func LogPath(parent, logID string) string { logID = strings.Replace(logID, "/", "%2F", -1) return fmt.Sprintf("%s/logs/%s", parent, logID) } func LogIDFromPath(parent, path string) string { start := len(parent) + len("/logs/") if len(path) < start { return "" } logID := path[start:] return strings.Replace(logID, "%2F", "/", -1) } golang-google-cloud-0.9.0/logging/internal/testing/000077500000000000000000000000001312234511600222625ustar00rootroot00000000000000golang-google-cloud-0.9.0/logging/internal/testing/equal.go000066400000000000000000000020561312234511600237230ustar00rootroot00000000000000/* Copyright 2017 Google Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package testing import ( "fmt" "github.com/golang/protobuf/proto" ) // Compare two payloads, assuming they are both proto.Messages // or both strings. func PayloadEqual(a, b interface{}) bool { if a == nil && b == nil { return true } if a == nil || b == nil { return false } switch a := a.(type) { case proto.Message: return proto.Equal(a, b.(proto.Message)) case string: return a == b.(string) default: panic(fmt.Sprintf("payloadEqual: unexpected type %T", a)) } } golang-google-cloud-0.9.0/logging/internal/testing/fake.go000066400000000000000000000310471312234511600235240ustar00rootroot00000000000000/* Copyright 2016 Google Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Package testing provides support for testing the logging client. package testing import ( "errors" "fmt" "regexp" "sort" "strconv" "strings" "sync" "time" emptypb "github.com/golang/protobuf/ptypes/empty" tspb "github.com/golang/protobuf/ptypes/timestamp" "cloud.google.com/go/internal/testutil" context "golang.org/x/net/context" lpb "google.golang.org/genproto/googleapis/api/label" mrpb "google.golang.org/genproto/googleapis/api/monitoredres" logpb "google.golang.org/genproto/googleapis/logging/v2" ) type loggingHandler struct { logpb.LoggingServiceV2Server mu sync.Mutex logs map[string][]*logpb.LogEntry // indexed by log name } type configHandler struct { logpb.ConfigServiceV2Server mu sync.Mutex sinks map[string]*logpb.LogSink // indexed by (full) sink name } type metricHandler struct { logpb.MetricsServiceV2Server mu sync.Mutex metrics map[string]*logpb.LogMetric // indexed by (full) metric name } // NewServer creates a new in-memory fake server implementing the logging service. // It returns the address of the server. func NewServer() (string, error) { srv, err := testutil.NewServer() if err != nil { return "", err } logpb.RegisterLoggingServiceV2Server(srv.Gsrv, &loggingHandler{ logs: make(map[string][]*logpb.LogEntry), }) logpb.RegisterConfigServiceV2Server(srv.Gsrv, &configHandler{ sinks: make(map[string]*logpb.LogSink), }) logpb.RegisterMetricsServiceV2Server(srv.Gsrv, &metricHandler{ metrics: make(map[string]*logpb.LogMetric), }) srv.Start() return srv.Addr, nil } // DeleteLog deletes a log and all its log entries. The log will reappear if it // receives new entries. func (h *loggingHandler) DeleteLog(_ context.Context, req *logpb.DeleteLogRequest) (*emptypb.Empty, error) { // TODO(jba): return NotFound if log isn't there? h.mu.Lock() defer h.mu.Unlock() delete(h.logs, req.LogName) return &emptypb.Empty{}, nil } // The only project ID that WriteLogEntries will accept. // Important for testing Ping. const validProjectID = "PROJECT_ID" // WriteLogEntries writes log entries to Stackdriver Logging. All log entries in // Stackdriver Logging are written by this method. func (h *loggingHandler) WriteLogEntries(_ context.Context, req *logpb.WriteLogEntriesRequest) (*logpb.WriteLogEntriesResponse, error) { if !strings.HasPrefix(req.LogName, "projects/"+validProjectID+"/") { return nil, fmt.Errorf("bad project ID: %q", req.LogName) } // TODO(jba): support insertId? h.mu.Lock() defer h.mu.Unlock() for _, e := range req.Entries { // Assign timestamp if missing. if e.Timestamp == nil { e.Timestamp = &tspb.Timestamp{Seconds: time.Now().Unix(), Nanos: 0} } // Fill from common fields in request. if e.LogName == "" { e.LogName = req.LogName } if e.Resource == nil { // TODO(jba): use a global one if nil? e.Resource = req.Resource } for k, v := range req.Labels { if _, ok := e.Labels[k]; !ok { e.Labels[k] = v } } // Store by log name. h.logs[e.LogName] = append(h.logs[e.LogName], e) } return &logpb.WriteLogEntriesResponse{}, nil } // ListLogEntries lists log entries. Use this method to retrieve log entries // from Stackdriver Logging. // // This fake implementation ignores project IDs. It does not support full filtering, only // expressions of the form "logName = NAME". func (h *loggingHandler) ListLogEntries(_ context.Context, req *logpb.ListLogEntriesRequest) (*logpb.ListLogEntriesResponse, error) { h.mu.Lock() defer h.mu.Unlock() entries, err := h.filterEntries(req.Filter) if err != nil { return nil, err } if err = sortEntries(entries, req.OrderBy); err != nil { return nil, err } from, to, nextPageToken, err := getPage(int(req.PageSize), req.PageToken, len(entries)) if err != nil { return nil, err } return &logpb.ListLogEntriesResponse{ Entries: entries[from:to], NextPageToken: nextPageToken, }, nil } // getPage converts an incoming page size and token from an RPC request into // slice bounds and the outgoing next-page token. // // getPage assumes that the complete, unpaginated list of items exists as a // single slice. In addition to the page size and token, getPage needs the // length of that slice. // // getPage's first two return values should be used to construct a sub-slice of // the complete, unpaginated slice. E.g. if the complete slice is s, then // s[from:to] is the desired page. Its third return value should be set as the // NextPageToken field of the RPC response. func getPage(pageSize int, pageToken string, length int) (from, to int, nextPageToken string, err error) { from, to = 0, length if pageToken != "" { from, err = strconv.Atoi(pageToken) if err != nil { return 0, 0, "", invalidArgument("bad page token") } if from >= length { return length, length, "", nil } } if pageSize > 0 && from+pageSize < length { to = from + pageSize nextPageToken = strconv.Itoa(to) } return from, to, nextPageToken, nil } func (h *loggingHandler) filterEntries(filter string) ([]*logpb.LogEntry, error) { logName, err := parseFilter(filter) if err != nil { return nil, err } if logName != "" { return h.logs[logName], nil } var entries []*logpb.LogEntry for _, es := range h.logs { entries = append(entries, es...) } return entries, nil } var filterRegexp = regexp.MustCompile(`^logName\s*=\s*"?([-_/.%\w]+)"?$`) // returns the log name, or "" for the empty filter func parseFilter(filter string) (string, error) { if filter == "" { return "", nil } subs := filterRegexp.FindStringSubmatch(filter) if subs == nil { return "", invalidArgument("bad filter") } return subs[1], nil // cannot panic by construction of regexp } func sortEntries(entries []*logpb.LogEntry, orderBy string) error { switch orderBy { case "", "timestamp asc": sort.Sort(byTimestamp(entries)) return nil case "timestamp desc": sort.Sort(sort.Reverse(byTimestamp(entries))) return nil default: return invalidArgument("bad order_by") } } type byTimestamp []*logpb.LogEntry func (s byTimestamp) Len() int { return len(s) } func (s byTimestamp) Swap(i, j int) { s[i], s[j] = s[j], s[i] } func (s byTimestamp) Less(i, j int) bool { c := compareTimestamps(s[i].Timestamp, s[j].Timestamp) switch { case c < 0: return true case c > 0: return false default: return s[i].InsertId < s[j].InsertId } } func compareTimestamps(ts1, ts2 *tspb.Timestamp) int64 { if ts1.Seconds != ts2.Seconds { return ts1.Seconds - ts2.Seconds } return int64(ts1.Nanos - ts2.Nanos) } // Lists monitored resource descriptors that are used by Stackdriver Logging. func (h *loggingHandler) ListMonitoredResourceDescriptors(context.Context, *logpb.ListMonitoredResourceDescriptorsRequest) (*logpb.ListMonitoredResourceDescriptorsResponse, error) { return &logpb.ListMonitoredResourceDescriptorsResponse{ ResourceDescriptors: []*mrpb.MonitoredResourceDescriptor{ { Type: "global", DisplayName: "Global", Description: "... a log is not associated with any specific resource.", Labels: []*lpb.LabelDescriptor{ {Key: "project_id", Description: "The identifier of the GCP project..."}, }, }, }, }, nil } // Lists logs. func (h *loggingHandler) ListLogs(_ context.Context, req *logpb.ListLogsRequest) (*logpb.ListLogsResponse, error) { // Return fixed, fake response. logNames := []string{"a", "b", "c"} from, to, npt, err := getPage(int(req.PageSize), req.PageToken, len(logNames)) if err != nil { return nil, err } return &logpb.ListLogsResponse{ LogNames: logNames[from:to], NextPageToken: npt, }, nil } // Gets a sink. func (h *configHandler) GetSink(_ context.Context, req *logpb.GetSinkRequest) (*logpb.LogSink, error) { h.mu.Lock() defer h.mu.Unlock() if s, ok := h.sinks[req.SinkName]; ok { return s, nil } // TODO(jba): use error codes return nil, fmt.Errorf("sink %q not found", req.SinkName) } // Creates a sink. func (h *configHandler) CreateSink(_ context.Context, req *logpb.CreateSinkRequest) (*logpb.LogSink, error) { h.mu.Lock() defer h.mu.Unlock() fullName := fmt.Sprintf("%s/sinks/%s", req.Parent, req.Sink.Name) if _, ok := h.sinks[fullName]; ok { return nil, fmt.Errorf("sink with name %q already exists", fullName) } h.sinks[fullName] = req.Sink return req.Sink, nil } // Creates or updates a sink. func (h *configHandler) UpdateSink(_ context.Context, req *logpb.UpdateSinkRequest) (*logpb.LogSink, error) { h.mu.Lock() defer h.mu.Unlock() // Update of a non-existent sink will create it. h.sinks[req.SinkName] = req.Sink return req.Sink, nil } // Deletes a sink. func (h *configHandler) DeleteSink(_ context.Context, req *logpb.DeleteSinkRequest) (*emptypb.Empty, error) { h.mu.Lock() defer h.mu.Unlock() delete(h.sinks, req.SinkName) return &emptypb.Empty{}, nil } // Lists sinks. This fake implementation ignores the Parent field of // ListSinksRequest. All sinks are listed, regardless of their project. func (h *configHandler) ListSinks(_ context.Context, req *logpb.ListSinksRequest) (*logpb.ListSinksResponse, error) { h.mu.Lock() var sinks []*logpb.LogSink for _, s := range h.sinks { sinks = append(sinks, s) } h.mu.Unlock() // safe because no *logpb.LogSink is ever modified // Since map iteration varies, sort the sinks. sort.Sort(sinksByName(sinks)) from, to, nextPageToken, err := getPage(int(req.PageSize), req.PageToken, len(sinks)) if err != nil { return nil, err } return &logpb.ListSinksResponse{ Sinks: sinks[from:to], NextPageToken: nextPageToken, }, nil } type sinksByName []*logpb.LogSink func (s sinksByName) Len() int { return len(s) } func (s sinksByName) Swap(i, j int) { s[i], s[j] = s[j], s[i] } func (s sinksByName) Less(i, j int) bool { return s[i].Name < s[j].Name } // Gets a metric. func (h *metricHandler) GetLogMetric(_ context.Context, req *logpb.GetLogMetricRequest) (*logpb.LogMetric, error) { h.mu.Lock() defer h.mu.Unlock() if s, ok := h.metrics[req.MetricName]; ok { return s, nil } // TODO(jba): use error codes return nil, fmt.Errorf("metric %q not found", req.MetricName) } // Creates a metric. func (h *metricHandler) CreateLogMetric(_ context.Context, req *logpb.CreateLogMetricRequest) (*logpb.LogMetric, error) { h.mu.Lock() defer h.mu.Unlock() fullName := fmt.Sprintf("%s/metrics/%s", req.Parent, req.Metric.Name) if _, ok := h.metrics[fullName]; ok { return nil, fmt.Errorf("metric with name %q already exists", fullName) } h.metrics[fullName] = req.Metric return req.Metric, nil } // Creates or updates a metric. func (h *metricHandler) UpdateLogMetric(_ context.Context, req *logpb.UpdateLogMetricRequest) (*logpb.LogMetric, error) { h.mu.Lock() defer h.mu.Unlock() // Update of a non-existent metric will create it. h.metrics[req.MetricName] = req.Metric return req.Metric, nil } // Deletes a metric. func (h *metricHandler) DeleteLogMetric(_ context.Context, req *logpb.DeleteLogMetricRequest) (*emptypb.Empty, error) { h.mu.Lock() defer h.mu.Unlock() delete(h.metrics, req.MetricName) return &emptypb.Empty{}, nil } // Lists metrics. This fake implementation ignores the Parent field of // ListMetricsRequest. All metrics are listed, regardless of their project. func (h *metricHandler) ListLogMetrics(_ context.Context, req *logpb.ListLogMetricsRequest) (*logpb.ListLogMetricsResponse, error) { h.mu.Lock() var metrics []*logpb.LogMetric for _, s := range h.metrics { metrics = append(metrics, s) } h.mu.Unlock() // safe because no *logpb.LogMetric is ever modified // Since map iteration varies, sort the metrics. sort.Sort(metricsByName(metrics)) from, to, nextPageToken, err := getPage(int(req.PageSize), req.PageToken, len(metrics)) if err != nil { return nil, err } return &logpb.ListLogMetricsResponse{ Metrics: metrics[from:to], NextPageToken: nextPageToken, }, nil } type metricsByName []*logpb.LogMetric func (s metricsByName) Len() int { return len(s) } func (s metricsByName) Swap(i, j int) { s[i], s[j] = s[j], s[i] } func (s metricsByName) Less(i, j int) bool { return s[i].Name < s[j].Name } func invalidArgument(msg string) error { // TODO(jba): status codes return errors.New(msg) } golang-google-cloud-0.9.0/logging/internal/testing/fake_test.go000066400000000000000000000061771312234511600245710ustar00rootroot00000000000000/* Copyright 2016 Google Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // This file contains only basic checks. The fake is effectively tested by the // logging client unit tests. package testing import ( "testing" "time" "github.com/golang/protobuf/proto" tspb "github.com/golang/protobuf/ptypes/timestamp" logpb "google.golang.org/genproto/googleapis/logging/v2" grpc "google.golang.org/grpc" ) func TestNewServer(t *testing.T) { // Confirm that we can create and use a working gRPC server. addr, err := NewServer() if err != nil { t.Fatal(err) } conn, err := grpc.Dial(addr, grpc.WithInsecure()) if err != nil { t.Fatal(err) } // Avoid "connection is closing; please retry" message from gRPC. time.Sleep(300 * time.Millisecond) conn.Close() } func TestParseFilter(t *testing.T) { for _, test := range []struct { filter string want string wantErr bool }{ {"", "", false}, {"logName = syslog", "syslog", false}, {"logname = syslog", "", true}, {"logName = 'syslog'", "", true}, {"logName == syslog", "", true}, } { got, err := parseFilter(test.filter) if err != nil { if !test.wantErr { t.Errorf("%q: got %v, want no error", test.filter, err) } continue } if test.wantErr { t.Errorf("%q: got no error, want one", test.filter) continue } if got != test.want { t.Errorf("%q: got %q, want %q", test.filter, got, test.want) } } } func TestSortEntries(t *testing.T) { entries := []*logpb.LogEntry{ /* 0 */ {Timestamp: &tspb.Timestamp{Seconds: 30}}, /* 1 */ {Timestamp: &tspb.Timestamp{Seconds: 10}}, /* 2 */ {Timestamp: &tspb.Timestamp{Seconds: 20}, InsertId: "b"}, /* 3 */ {Timestamp: &tspb.Timestamp{Seconds: 20}, InsertId: "a"}, /* 4 */ {Timestamp: &tspb.Timestamp{Seconds: 20}, InsertId: "c"}, } for _, test := range []struct { orderBy string want []int // slice of index into entries; nil == error }{ {"", []int{1, 3, 2, 4, 0}}, {"timestamp asc", []int{1, 3, 2, 4, 0}}, {"timestamp desc", []int{0, 4, 2, 3, 1}}, {"something else", nil}, } { got := make([]*logpb.LogEntry, len(entries)) copy(got, entries) err := sortEntries(got, test.orderBy) if err != nil { if test.want != nil { t.Errorf("%q: got %v, want nil error", test.orderBy, err) } continue } want := make([]*logpb.LogEntry, len(entries)) for i, j := range test.want { want[i] = entries[j] } if !logEntriesEqual(got, want) { t.Errorf("%q: got %v, want %v", test.orderBy, got, want) } } } func logEntriesEqual(a, b []*logpb.LogEntry) bool { if len(a) != len(b) { return false } for i, aa := range a { if !proto.Equal(aa, b[i]) { return false } } return true } golang-google-cloud-0.9.0/logging/logadmin/000077500000000000000000000000001312234511600205635ustar00rootroot00000000000000golang-google-cloud-0.9.0/logging/logadmin/example_entry_iterator_test.go000066400000000000000000000036001312234511600267350ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package logadmin_test import ( "fmt" "time" "cloud.google.com/go/logging/logadmin" "golang.org/x/net/context" "google.golang.org/api/iterator" ) func ExampleClient_Entries() { ctx := context.Background() client, err := logadmin.NewClient(ctx, "my-project") if err != nil { // TODO: Handle error. } it := client.Entries(ctx, logadmin.Filter(`logName = "projects/my-project/logs/my-log"`)) _ = it // TODO: iterate using Next or iterator.Pager. } func ExampleFilter_timestamp() { // This example demonstrates how to list the last 24 hours of log entries. ctx := context.Background() client, err := logadmin.NewClient(ctx, "my-project") if err != nil { // TODO: Handle error. } oneDayAgo := time.Now().Add(-24 * time.Hour) t := oneDayAgo.Format(time.RFC3339) // Logging API wants timestamps in RFC 3339 format. it := client.Entries(ctx, logadmin.Filter(fmt.Sprintf(`timestamp > "%s"`, t))) _ = it // TODO: iterate using Next or iterator.Pager. } func ExampleEntryIterator_Next() { ctx := context.Background() client, err := logadmin.NewClient(ctx, "my-project") if err != nil { // TODO: Handle error. } it := client.Entries(ctx) for { entry, err := it.Next() if err == iterator.Done { break } if err != nil { // TODO: Handle error. } fmt.Println(entry) } } golang-google-cloud-0.9.0/logging/logadmin/example_metric_iterator_test.go000066400000000000000000000024751312234511600270700ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package logadmin_test import ( "fmt" "cloud.google.com/go/logging/logadmin" "golang.org/x/net/context" "google.golang.org/api/iterator" ) func ExampleClient_Metrics() { ctx := context.Background() client, err := logadmin.NewClient(ctx, "my-project") if err != nil { // TODO: Handle error. } it := client.Metrics(ctx) _ = it // TODO: iterate using Next or iterator.Pager. } func ExampleMetricIterator_Next() { ctx := context.Background() client, err := logadmin.NewClient(ctx, "my-project") if err != nil { // TODO: Handle error. } it := client.Metrics(ctx) for { metric, err := it.Next() if err == iterator.Done { break } if err != nil { // TODO: Handle error. } fmt.Println(metric) } } golang-google-cloud-0.9.0/logging/logadmin/example_paging_test.go000066400000000000000000000052331312234511600251340ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package logadmin_test import ( "bytes" "flag" "fmt" "html/template" "log" "net/http" "cloud.google.com/go/logging" "cloud.google.com/go/logging/logadmin" "golang.org/x/net/context" "google.golang.org/api/iterator" ) var ( client *logadmin.Client projectID = flag.String("project-id", "", "ID of the project to use") ) func ExampleClient_Entries_pagination() { // This example demonstrates how to iterate through items a page at a time // even if each successive page is fetched by a different process. It is a // complete web server that displays pages of log entries. To run it as a // standalone program, rename both the package and this function to "main". ctx := context.Background() flag.Parse() if *projectID == "" { log.Fatal("-project-id missing") } var err error client, err = logadmin.NewClient(ctx, *projectID) if err != nil { log.Fatalf("creating logging client: %v", err) } http.HandleFunc("/entries", handleEntries) log.Print("listening on 8080") log.Fatal(http.ListenAndServe(":8080", nil)) } var pageTemplate = template.Must(template.New("").Parse(`
{{range .Entries}} {{end}}
{{.}}
{{if .Next}} Next Page {{end}} `)) func handleEntries(w http.ResponseWriter, r *http.Request) { ctx := context.Background() filter := fmt.Sprintf(`logName = "projects/%s/logs/testlog"`, *projectID) it := client.Entries(ctx, logadmin.Filter(filter)) var entries []*logging.Entry nextTok, err := iterator.NewPager(it, 5, r.URL.Query().Get("pageToken")).NextPage(&entries) if err != nil { http.Error(w, fmt.Sprintf("problem getting the next page: %v", err), http.StatusInternalServerError) return } data := struct { Entries []*logging.Entry Next string }{ entries, nextTok, } var buf bytes.Buffer if err := pageTemplate.Execute(&buf, data); err != nil { http.Error(w, fmt.Sprintf("problem executing page template: %v", err), http.StatusInternalServerError) } if _, err := buf.WriteTo(w); err != nil { log.Printf("writing response: %v", err) } } golang-google-cloud-0.9.0/logging/logadmin/example_resource_iterator_test.go000066400000000000000000000025531312234511600274310ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package logadmin_test import ( "fmt" "cloud.google.com/go/logging/logadmin" "golang.org/x/net/context" "google.golang.org/api/iterator" ) func ExampleClient_ResourceDescriptors() { ctx := context.Background() client, err := logadmin.NewClient(ctx, "my-project") if err != nil { // TODO: Handle error. } it := client.ResourceDescriptors(ctx) _ = it // TODO: iterate using Next or iterator.Pager. } func ExampleResourceDescriptorIterator_Next() { ctx := context.Background() client, err := logadmin.NewClient(ctx, "my-project") if err != nil { // TODO: Handle error. } it := client.ResourceDescriptors(ctx) for { rdesc, err := it.Next() if err == iterator.Done { break } if err != nil { // TODO: Handle error. } fmt.Println(rdesc) } } golang-google-cloud-0.9.0/logging/logadmin/example_sink_iterator_test.go000066400000000000000000000024611312234511600265440ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package logadmin_test import ( "fmt" "cloud.google.com/go/logging/logadmin" "golang.org/x/net/context" "google.golang.org/api/iterator" ) func ExampleClient_Sinks() { ctx := context.Background() client, err := logadmin.NewClient(ctx, "my-project") if err != nil { // TODO: Handle error. } it := client.Sinks(ctx) _ = it // TODO: iterate using Next or iterator.Pager. } func ExampleSinkIterator_Next() { ctx := context.Background() client, err := logadmin.NewClient(ctx, "my-project") if err != nil { // TODO: Handle error. } it := client.Sinks(ctx) for { sink, err := it.Next() if err == iterator.Done { break } if err != nil { // TODO: Handle error. } fmt.Println(sink) } } golang-google-cloud-0.9.0/logging/logadmin/examples_test.go000066400000000000000000000075521312234511600240000ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package logadmin_test import ( "fmt" "cloud.google.com/go/logging/logadmin" "golang.org/x/net/context" ) func ExampleNewClient() { ctx := context.Background() client, err := logadmin.NewClient(ctx, "my-project") if err != nil { // TODO: Handle error. } // Use client to manage logs, metrics and sinks. // Close the client when finished. if err := client.Close(); err != nil { // TODO: Handle error. } } func ExampleClient_DeleteLog() { ctx := context.Background() client, err := logadmin.NewClient(ctx, "my-project") if err != nil { // TODO: Handle error. } err = client.DeleteLog(ctx, "my-log") if err != nil { // TODO: Handle error. } } func ExampleClient_CreateMetric() { ctx := context.Background() client, err := logadmin.NewClient(ctx, "my-project") if err != nil { // TODO: Handle error. } err = client.CreateMetric(ctx, &logadmin.Metric{ ID: "severe-errors", Description: "entries at ERROR or higher severities", Filter: "severity >= ERROR", }) if err != nil { // TODO: Handle error. } } func ExampleClient_DeleteMetric() { ctx := context.Background() client, err := logadmin.NewClient(ctx, "my-project") if err != nil { // TODO: Handle error. } if err := client.DeleteMetric(ctx, "severe-errors"); err != nil { // TODO: Handle error. } } func ExampleClient_Metric() { ctx := context.Background() client, err := logadmin.NewClient(ctx, "my-project") if err != nil { // TODO: Handle error. } m, err := client.Metric(ctx, "severe-errors") if err != nil { // TODO: Handle error. } fmt.Println(m) } func ExampleClient_UpdateMetric() { ctx := context.Background() client, err := logadmin.NewClient(ctx, "my-project") if err != nil { // TODO: Handle error. } err = client.UpdateMetric(ctx, &logadmin.Metric{ ID: "severe-errors", Description: "entries at high severities", Filter: "severity > ERROR", }) if err != nil { // TODO: Handle error. } } func ExampleClient_CreateSink() { ctx := context.Background() client, err := logadmin.NewClient(ctx, "my-project") if err != nil { // TODO: Handle error. } sink, err := client.CreateSink(ctx, &logadmin.Sink{ ID: "severe-errors-to-gcs", Destination: "storage.googleapis.com/my-bucket", Filter: "severity >= ERROR", }) if err != nil { // TODO: Handle error. } fmt.Println(sink) } func ExampleClient_DeleteSink() { ctx := context.Background() client, err := logadmin.NewClient(ctx, "my-project") if err != nil { // TODO: Handle error. } if err := client.DeleteSink(ctx, "severe-errors-to-gcs"); err != nil { // TODO: Handle error. } } func ExampleClient_Sink() { ctx := context.Background() client, err := logadmin.NewClient(ctx, "my-project") if err != nil { // TODO: Handle error. } s, err := client.Sink(ctx, "severe-errors-to-gcs") if err != nil { // TODO: Handle error. } fmt.Println(s) } func ExampleClient_UpdateSink() { ctx := context.Background() client, err := logadmin.NewClient(ctx, "my-project") if err != nil { // TODO: Handle error. } sink, err := client.UpdateSink(ctx, &logadmin.Sink{ ID: "severe-errors-to-gcs", Destination: "storage.googleapis.com/my-other-bucket", Filter: "severity >= ERROR", }) if err != nil { // TODO: Handle error. } fmt.Println(sink) } golang-google-cloud-0.9.0/logging/logadmin/logadmin.go000066400000000000000000000313541312234511600227120ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // These features are missing now, but will likely be added: // - There is no way to specify CallOptions. // Package logadmin contains a Stackdriver Logging client that can be used // for reading logs and working with sinks, metrics and monitored resources. // For a client that can write logs, see package cloud.google.com/go/logging. // // The client uses Logging API v2. // See https://cloud.google.com/logging/docs/api/v2/ for an introduction to the API. // // Note: This package is in beta. Some backwards-incompatible changes may occur. package logadmin // import "cloud.google.com/go/logging/logadmin" import ( "errors" "fmt" "math" "net/http" "net/url" "strings" "time" "cloud.google.com/go/internal/version" "cloud.google.com/go/logging" vkit "cloud.google.com/go/logging/apiv2" "cloud.google.com/go/logging/internal" "github.com/golang/protobuf/ptypes" gax "github.com/googleapis/gax-go" "golang.org/x/net/context" "google.golang.org/api/iterator" "google.golang.org/api/option" logtypepb "google.golang.org/genproto/googleapis/logging/type" logpb "google.golang.org/genproto/googleapis/logging/v2" "google.golang.org/grpc/codes" // Import the following so EntryIterator can unmarshal log protos. _ "google.golang.org/genproto/googleapis/cloud/audit" ) // Client is a Logging client. A Client is associated with a single Cloud project. type Client struct { lClient *vkit.Client // logging client sClient *vkit.ConfigClient // sink client mClient *vkit.MetricsClient // metric client projectID string closed bool } // NewClient returns a new logging client associated with the provided project ID. // // By default NewClient uses AdminScope. To use a different scope, call // NewClient using a WithScopes option (see https://godoc.org/google.golang.org/api/option#WithScopes). func NewClient(ctx context.Context, projectID string, opts ...option.ClientOption) (*Client, error) { // Check for '/' in project ID to reserve the ability to support various owning resources, // in the form "{Collection}/{Name}", for instance "organizations/my-org". if strings.ContainsRune(projectID, '/') { return nil, errors.New("logging: project ID contains '/'") } opts = append([]option.ClientOption{ option.WithEndpoint(internal.ProdAddr), option.WithScopes(logging.AdminScope), }, opts...) lc, err := vkit.NewClient(ctx, opts...) if err != nil { return nil, err } // TODO(jba): pass along any client options that should be provided to all clients. sc, err := vkit.NewConfigClient(ctx, option.WithGRPCConn(lc.Connection())) if err != nil { return nil, err } mc, err := vkit.NewMetricsClient(ctx, option.WithGRPCConn(lc.Connection())) if err != nil { return nil, err } // Retry some non-idempotent methods on INTERNAL, because it happens sometimes // and in all observed cases the operation did not complete. retryerOnInternal := func() gax.Retryer { return gax.OnCodes([]codes.Code{ codes.Internal, }, gax.Backoff{ Initial: 100 * time.Millisecond, Max: 1000 * time.Millisecond, Multiplier: 1.2, }) } mc.CallOptions.CreateLogMetric = []gax.CallOption{gax.WithRetry(retryerOnInternal)} mc.CallOptions.UpdateLogMetric = []gax.CallOption{gax.WithRetry(retryerOnInternal)} lc.SetGoogleClientInfo("gccl", version.Repo) sc.SetGoogleClientInfo("gccl", version.Repo) mc.SetGoogleClientInfo("gccl", version.Repo) client := &Client{ lClient: lc, sClient: sc, mClient: mc, projectID: projectID, } return client, nil } // parent returns the string used in many RPCs to denote the parent resource of the log. func (c *Client) parent() string { return "projects/" + c.projectID } // Close closes the client. func (c *Client) Close() error { if c.closed { return nil } // Return only the first error. Since all clients share an underlying connection, // Closes after the first always report a "connection is closing" error. err := c.lClient.Close() _ = c.sClient.Close() _ = c.mClient.Close() c.closed = true return err } // DeleteLog deletes a log and all its log entries. The log will reappear if it receives new entries. // logID identifies the log within the project. An example log ID is "syslog". Requires AdminScope. func (c *Client) DeleteLog(ctx context.Context, logID string) error { return c.lClient.DeleteLog(ctx, &logpb.DeleteLogRequest{ LogName: internal.LogPath(c.parent(), logID), }) } func toHTTPRequest(p *logtypepb.HttpRequest) (*logging.HTTPRequest, error) { if p == nil { return nil, nil } u, err := url.Parse(p.RequestUrl) if err != nil { return nil, err } var dur time.Duration if p.Latency != nil { dur, err = ptypes.Duration(p.Latency) if err != nil { return nil, err } } hr := &http.Request{ Method: p.RequestMethod, URL: u, Header: map[string][]string{}, } if p.UserAgent != "" { hr.Header.Set("User-Agent", p.UserAgent) } if p.Referer != "" { hr.Header.Set("Referer", p.Referer) } return &logging.HTTPRequest{ Request: hr, RequestSize: p.RequestSize, Status: int(p.Status), ResponseSize: p.ResponseSize, Latency: dur, RemoteIP: p.RemoteIp, CacheHit: p.CacheHit, CacheValidatedWithOriginServer: p.CacheValidatedWithOriginServer, }, nil } // An EntriesOption is an option for listing log entries. type EntriesOption interface { set(*logpb.ListLogEntriesRequest) } // ProjectIDs sets the project IDs or project numbers from which to retrieve // log entries. Examples of a project ID: "my-project-1A", "1234567890". func ProjectIDs(pids []string) EntriesOption { return projectIDs(pids) } type projectIDs []string func (p projectIDs) set(r *logpb.ListLogEntriesRequest) { r.ResourceNames = make([]string, len(p)) for i, v := range p { r.ResourceNames[i] = fmt.Sprintf("projects/%s", v) } } // Filter sets an advanced logs filter for listing log entries (see // https://cloud.google.com/logging/docs/view/advanced_filters). The filter is // compared against all log entries in the projects specified by ProjectIDs. // Only entries that match the filter are retrieved. An empty filter (the // default) matches all log entries. // // In the filter string, log names must be written in their full form, as // "projects/PROJECT-ID/logs/LOG-ID". Forward slashes in LOG-ID must be // replaced by %2F before calling Filter. // // Timestamps in the filter string must be written in RFC 3339 format. See the // timestamp example. func Filter(f string) EntriesOption { return filter(f) } type filter string func (f filter) set(r *logpb.ListLogEntriesRequest) { r.Filter = string(f) } // NewestFirst causes log entries to be listed from most recent (newest) to // least recent (oldest). By default, they are listed from oldest to newest. func NewestFirst() EntriesOption { return newestFirst{} } type newestFirst struct{} func (newestFirst) set(r *logpb.ListLogEntriesRequest) { r.OrderBy = "timestamp desc" } // Entries returns an EntryIterator for iterating over log entries. By default, // the log entries will be restricted to those from the project passed to // NewClient. This may be overridden by passing a ProjectIDs option. Requires ReadScope or AdminScope. func (c *Client) Entries(ctx context.Context, opts ...EntriesOption) *EntryIterator { it := &EntryIterator{ it: c.lClient.ListLogEntries(ctx, listLogEntriesRequest(c.projectID, opts)), } it.pageInfo, it.nextFunc = iterator.NewPageInfo( it.fetch, func() int { return len(it.items) }, func() interface{} { b := it.items; it.items = nil; return b }) return it } func listLogEntriesRequest(projectID string, opts []EntriesOption) *logpb.ListLogEntriesRequest { req := &logpb.ListLogEntriesRequest{ ResourceNames: []string{"projects/" + projectID}, } for _, opt := range opts { opt.set(req) } return req } // An EntryIterator iterates over log entries. type EntryIterator struct { it *vkit.LogEntryIterator pageInfo *iterator.PageInfo nextFunc func() error items []*logging.Entry } // PageInfo supports pagination. See https://godoc.org/google.golang.org/api/iterator package for details. func (it *EntryIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } // Next returns the next result. Its second return value is iterator.Done // (https://godoc.org/google.golang.org/api/iterator) if there are no more // results. Once Next returns Done, all subsequent calls will return Done. func (it *EntryIterator) Next() (*logging.Entry, error) { if err := it.nextFunc(); err != nil { return nil, err } item := it.items[0] it.items = it.items[1:] return item, nil } func (it *EntryIterator) fetch(pageSize int, pageToken string) (string, error) { return iterFetch(pageSize, pageToken, it.it.PageInfo(), func() error { item, err := it.it.Next() if err != nil { return err } e, err := fromLogEntry(item) if err != nil { return err } it.items = append(it.items, e) return nil }) } func trunc32(i int) int32 { if i > math.MaxInt32 { i = math.MaxInt32 } return int32(i) } var slashUnescaper = strings.NewReplacer("%2F", "/", "%2f", "/") func fromLogEntry(le *logpb.LogEntry) (*logging.Entry, error) { time, err := ptypes.Timestamp(le.Timestamp) if err != nil { return nil, err } var payload interface{} switch x := le.Payload.(type) { case *logpb.LogEntry_TextPayload: payload = x.TextPayload case *logpb.LogEntry_ProtoPayload: var d ptypes.DynamicAny if err := ptypes.UnmarshalAny(x.ProtoPayload, &d); err != nil { return nil, fmt.Errorf("logging: unmarshalling proto payload: %v", err) } payload = d.Message case *logpb.LogEntry_JsonPayload: // Leave this as a Struct. // TODO(jba): convert to map[string]interface{}? payload = x.JsonPayload default: return nil, fmt.Errorf("logging: unknown payload type: %T", le.Payload) } hr, err := toHTTPRequest(le.HttpRequest) if err != nil { return nil, err } return &logging.Entry{ Timestamp: time, Severity: logging.Severity(le.Severity), Payload: payload, Labels: le.Labels, InsertID: le.InsertId, HTTPRequest: hr, Operation: le.Operation, LogName: slashUnescaper.Replace(le.LogName), Resource: le.Resource, Trace: le.Trace, }, nil } // Logs lists the logs owned by the parent resource of the client. func (c *Client) Logs(ctx context.Context) *LogIterator { it := &LogIterator{ parentResource: c.parent(), it: c.lClient.ListLogs(ctx, &logpb.ListLogsRequest{Parent: c.parent()}), } it.pageInfo, it.nextFunc = iterator.NewPageInfo( it.fetch, func() int { return len(it.items) }, func() interface{} { b := it.items; it.items = nil; return b }) return it } // A LogIterator iterates over logs. type LogIterator struct { parentResource string it *vkit.StringIterator pageInfo *iterator.PageInfo nextFunc func() error items []string } // PageInfo supports pagination. See https://godoc.org/google.golang.org/api/iterator package for details. func (it *LogIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } // Next returns the next result. Its second return value is iterator.Done // (https://godoc.org/google.golang.org/api/iterator) if there are no more // results. Once Next returns Done, all subsequent calls will return Done. func (it *LogIterator) Next() (string, error) { if err := it.nextFunc(); err != nil { return "", err } item := it.items[0] it.items = it.items[1:] return item, nil } func (it *LogIterator) fetch(pageSize int, pageToken string) (string, error) { return iterFetch(pageSize, pageToken, it.it.PageInfo(), func() error { logPath, err := it.it.Next() if err != nil { return err } logID := internal.LogIDFromPath(it.parentResource, logPath) it.items = append(it.items, logID) return nil }) } // Common fetch code for iterators that are backed by vkit iterators. func iterFetch(pageSize int, pageToken string, pi *iterator.PageInfo, next func() error) (string, error) { pi.MaxSize = pageSize pi.Token = pageToken // Get one item, which will fill the buffer. if err := next(); err != nil { return "", err } // Collect the rest of the buffer. for pi.Remaining() > 0 { if err := next(); err != nil { return "", err } } return pi.Token, nil } golang-google-cloud-0.9.0/logging/logadmin/logadmin_test.go000066400000000000000000000200701312234511600237420ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // TODO(jba): test that OnError is getting called appropriately. package logadmin import ( "flag" "log" "net/http" "net/url" "os" "reflect" "testing" "time" "cloud.google.com/go/internal/testutil" "cloud.google.com/go/logging" ltesting "cloud.google.com/go/logging/internal/testing" "github.com/golang/protobuf/proto" "github.com/golang/protobuf/ptypes" durpb "github.com/golang/protobuf/ptypes/duration" structpb "github.com/golang/protobuf/ptypes/struct" "golang.org/x/net/context" "google.golang.org/api/option" mrpb "google.golang.org/genproto/googleapis/api/monitoredres" audit "google.golang.org/genproto/googleapis/cloud/audit" logtypepb "google.golang.org/genproto/googleapis/logging/type" logpb "google.golang.org/genproto/googleapis/logging/v2" "google.golang.org/grpc" ) var ( client *Client testProjectID string ) var ( // If true, this test is using the production service, not a fake. integrationTest bool newClient func(ctx context.Context, projectID string) *Client ) func TestMain(m *testing.M) { flag.Parse() // needed for testing.Short() ctx := context.Background() testProjectID = testutil.ProjID() if testProjectID == "" || testing.Short() { integrationTest = false if testProjectID != "" { log.Print("Integration tests skipped in short mode (using fake instead)") } testProjectID = "PROJECT_ID" addr, err := ltesting.NewServer() if err != nil { log.Fatalf("creating fake server: %v", err) } newClient = func(ctx context.Context, projectID string) *Client { conn, err := grpc.Dial(addr, grpc.WithInsecure(), grpc.WithBlock()) if err != nil { log.Fatalf("dialing %q: %v", addr, err) } c, err := NewClient(ctx, projectID, option.WithGRPCConn(conn)) if err != nil { log.Fatalf("creating client for fake at %q: %v", addr, err) } return c } } else { integrationTest = true ts := testutil.TokenSource(ctx, logging.AdminScope) if ts == nil { log.Fatal("The project key must be set. See CONTRIBUTING.md for details") } log.Printf("running integration tests with project %s", testProjectID) newClient = func(ctx context.Context, projectID string) *Client { c, err := NewClient(ctx, projectID, option.WithTokenSource(ts), option.WithGRPCDialOption(grpc.WithBlock())) if err != nil { log.Fatalf("creating prod client: %v", err) } return c } } client = newClient(ctx, testProjectID) initMetrics(ctx) cleanup := initSinks(ctx) exit := m.Run() cleanup() client.Close() os.Exit(exit) } // EntryIterator and DeleteLog are tested in the logging package. func TestClientClose(t *testing.T) { c := newClient(context.Background(), testProjectID) if err := c.Close(); err != nil { t.Errorf("want got %v, want nil", err) } } func TestFromLogEntry(t *testing.T) { now := time.Now() res := &mrpb.MonitoredResource{Type: "global"} ts, err := ptypes.TimestampProto(now) if err != nil { t.Fatal(err) } logEntry := logpb.LogEntry{ LogName: "projects/PROJECT_ID/logs/LOG_ID", Resource: res, Payload: &logpb.LogEntry_TextPayload{TextPayload: "hello"}, Timestamp: ts, Severity: logtypepb.LogSeverity_INFO, InsertId: "123", HttpRequest: &logtypepb.HttpRequest{ RequestMethod: "GET", RequestUrl: "http:://example.com/path?q=1", RequestSize: 100, Status: 200, ResponseSize: 25, Latency: &durpb.Duration{Seconds: 100}, UserAgent: "user-agent", RemoteIp: "127.0.0.1", Referer: "referer", CacheHit: true, CacheValidatedWithOriginServer: true, }, Labels: map[string]string{ "a": "1", "b": "two", "c": "true", }, } u, err := url.Parse("http:://example.com/path?q=1") if err != nil { t.Fatal(err) } want := &logging.Entry{ LogName: "projects/PROJECT_ID/logs/LOG_ID", Resource: res, Timestamp: now.In(time.UTC), Severity: logging.Info, Payload: "hello", Labels: map[string]string{ "a": "1", "b": "two", "c": "true", }, InsertID: "123", HTTPRequest: &logging.HTTPRequest{ Request: &http.Request{ Method: "GET", URL: u, Header: map[string][]string{ "User-Agent": []string{"user-agent"}, "Referer": []string{"referer"}, }, }, RequestSize: 100, Status: 200, ResponseSize: 25, Latency: 100 * time.Second, RemoteIP: "127.0.0.1", CacheHit: true, CacheValidatedWithOriginServer: true, }, } got, err := fromLogEntry(&logEntry) if err != nil { t.Fatal(err) } // Test sub-values separately because %+v and %#v do not follow pointers. // TODO(jba): use a differ or pretty-printer. if !reflect.DeepEqual(got.HTTPRequest.Request, want.HTTPRequest.Request) { t.Fatalf("HTTPRequest.Request:\ngot %+v\nwant %+v", got.HTTPRequest.Request, want.HTTPRequest.Request) } if !reflect.DeepEqual(got.HTTPRequest, want.HTTPRequest) { t.Fatalf("HTTPRequest:\ngot %+v\nwant %+v", got.HTTPRequest, want.HTTPRequest) } if !reflect.DeepEqual(got, want) { t.Errorf("FullEntry:\ngot %+v\nwant %+v", got, want) } // Proto payload. alog := &audit.AuditLog{ ServiceName: "svc", MethodName: "method", ResourceName: "shelves/S/books/B", } any, err := ptypes.MarshalAny(alog) if err != nil { t.Fatal(err) } logEntry = logpb.LogEntry{ LogName: "projects/PROJECT_ID/logs/LOG_ID", Resource: res, Timestamp: ts, Payload: &logpb.LogEntry_ProtoPayload{ProtoPayload: any}, } got, err = fromLogEntry(&logEntry) if err != nil { t.Fatal(err) } if !ltesting.PayloadEqual(got.Payload, alog) { t.Errorf("got %+v, want %+v", got.Payload, alog) } // JSON payload. jstruct := &structpb.Struct{Fields: map[string]*structpb.Value{ "f": &structpb.Value{Kind: &structpb.Value_NumberValue{NumberValue: 3.1}}, }} logEntry = logpb.LogEntry{ LogName: "projects/PROJECT_ID/logs/LOG_ID", Resource: res, Timestamp: ts, Payload: &logpb.LogEntry_JsonPayload{JsonPayload: jstruct}, } got, err = fromLogEntry(&logEntry) if err != nil { t.Fatal(err) } if !ltesting.PayloadEqual(got.Payload, jstruct) { t.Errorf("got %+v, want %+v", got.Payload, jstruct) } } func TestListLogEntriesRequest(t *testing.T) { for _, test := range []struct { opts []EntriesOption projectIDs []string filter string orderBy string }{ // Default is client's project ID, empty filter and orderBy. {nil, []string{"PROJECT_ID"}, "", ""}, {[]EntriesOption{NewestFirst(), Filter("f")}, []string{"PROJECT_ID"}, "f", "timestamp desc"}, {[]EntriesOption{ProjectIDs([]string{"foo"})}, []string{"foo"}, "", ""}, {[]EntriesOption{NewestFirst(), Filter("f"), ProjectIDs([]string{"foo"})}, []string{"foo"}, "f", "timestamp desc"}, {[]EntriesOption{NewestFirst(), Filter("f"), ProjectIDs([]string{"foo"})}, []string{"foo"}, "f", "timestamp desc"}, // If there are repeats, last one wins. {[]EntriesOption{NewestFirst(), Filter("no"), ProjectIDs([]string{"foo"}), Filter("f")}, []string{"foo"}, "f", "timestamp desc"}, } { got := listLogEntriesRequest("PROJECT_ID", test.opts) want := &logpb.ListLogEntriesRequest{ ResourceNames: []string{"projects/" + test.projectIDs[0]}, Filter: test.filter, OrderBy: test.orderBy, } if !proto.Equal(got, want) { t.Errorf("%v:\ngot %v\nwant %v", test.opts, got, want) } } } golang-google-cloud-0.9.0/logging/logadmin/metrics.go000066400000000000000000000115231312234511600225620ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package logadmin import ( "fmt" vkit "cloud.google.com/go/logging/apiv2" "golang.org/x/net/context" "google.golang.org/api/iterator" logpb "google.golang.org/genproto/googleapis/logging/v2" ) // Metric describes a logs-based metric. The value of the metric is the // number of log entries that match a logs filter. // // Metrics are a feature of Stackdriver Monitoring. // See https://cloud.google.com/monitoring/api/v3/metrics for more about them. type Metric struct { // ID is a client-assigned metric identifier. Example: // "severe_errors". Metric identifiers are limited to 1000 // characters and can include only the following characters: A-Z, // a-z, 0-9, and the special characters _-.,+!*',()%/\. The // forward-slash character (/) denotes a hierarchy of name pieces, // and it cannot be the first character of the name. ID string // Description describes this metric. It is used in documentation. Description string // Filter is an advanced logs filter (see // https://cloud.google.com/logging/docs/view/advanced_filters). // Example: "logName:syslog AND severity>=ERROR". Filter string } // CreateMetric creates a logs-based metric. func (c *Client) CreateMetric(ctx context.Context, m *Metric) error { _, err := c.mClient.CreateLogMetric(ctx, &logpb.CreateLogMetricRequest{ Parent: c.parent(), Metric: toLogMetric(m), }) return err } // DeleteMetric deletes a log-based metric. // The provided metric ID is the metric identifier. For example, "severe_errors". func (c *Client) DeleteMetric(ctx context.Context, metricID string) error { return c.mClient.DeleteLogMetric(ctx, &logpb.DeleteLogMetricRequest{ MetricName: c.metricPath(metricID), }) } // Metric gets a logs-based metric. // The provided metric ID is the metric identifier. For example, "severe_errors". // Requires ReadScope or AdminScope. func (c *Client) Metric(ctx context.Context, metricID string) (*Metric, error) { lm, err := c.mClient.GetLogMetric(ctx, &logpb.GetLogMetricRequest{ MetricName: c.metricPath(metricID), }) if err != nil { return nil, err } return fromLogMetric(lm), nil } // UpdateMetric creates a logs-based metric if it does not exist, or updates an // existing one. func (c *Client) UpdateMetric(ctx context.Context, m *Metric) error { _, err := c.mClient.UpdateLogMetric(ctx, &logpb.UpdateLogMetricRequest{ MetricName: c.metricPath(m.ID), Metric: toLogMetric(m), }) return err } func (c *Client) metricPath(metricID string) string { return fmt.Sprintf("%s/metrics/%s", c.parent(), metricID) } // Metrics returns a MetricIterator for iterating over all Metrics in the Client's project. // Requires ReadScope or AdminScope. func (c *Client) Metrics(ctx context.Context) *MetricIterator { it := &MetricIterator{ it: c.mClient.ListLogMetrics(ctx, &logpb.ListLogMetricsRequest{Parent: c.parent()}), } it.pageInfo, it.nextFunc = iterator.NewPageInfo( it.fetch, func() int { return len(it.items) }, func() interface{} { b := it.items; it.items = nil; return b }) return it } // A MetricIterator iterates over Metrics. type MetricIterator struct { it *vkit.LogMetricIterator pageInfo *iterator.PageInfo nextFunc func() error items []*Metric } // PageInfo supports pagination. See the google.golang.org/api/iterator package for details. func (it *MetricIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } // Next returns the next result. Its second return value is Done if there are // no more results. Once Next returns Done, all subsequent calls will return // Done. func (it *MetricIterator) Next() (*Metric, error) { if err := it.nextFunc(); err != nil { return nil, err } item := it.items[0] it.items = it.items[1:] return item, nil } func (it *MetricIterator) fetch(pageSize int, pageToken string) (string, error) { return iterFetch(pageSize, pageToken, it.it.PageInfo(), func() error { item, err := it.it.Next() if err != nil { return err } it.items = append(it.items, fromLogMetric(item)) return nil }) } func toLogMetric(m *Metric) *logpb.LogMetric { return &logpb.LogMetric{ Name: m.ID, Description: m.Description, Filter: m.Filter, } } func fromLogMetric(lm *logpb.LogMetric) *Metric { return &Metric{ ID: lm.Name, Description: lm.Description, Filter: lm.Filter, } } golang-google-cloud-0.9.0/logging/logadmin/metrics_test.go000066400000000000000000000070601312234511600236220ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package logadmin import ( "log" "reflect" "testing" "time" "cloud.google.com/go/internal/testutil" "golang.org/x/net/context" "google.golang.org/api/iterator" ) var metricIDs = testutil.NewUIDSpace("GO-CLIENT-TEST-METRIC") // Initializes the tests before they run. func initMetrics(ctx context.Context) { // Clean up from aborted tests. it := client.Metrics(ctx) loop: for { m, err := it.Next() switch err { case nil: if metricIDs.Older(m.ID, 24*time.Hour) { client.DeleteMetric(ctx, m.ID) } case iterator.Done: break loop default: log.Printf("cleanupMetrics: %v", err) return } } } func TestCreateDeleteMetric(t *testing.T) { ctx := context.Background() metric := &Metric{ ID: metricIDs.New(), Description: "DESC", Filter: "FILTER", } if err := client.CreateMetric(ctx, metric); err != nil { t.Fatal(err) } defer client.DeleteMetric(ctx, metric.ID) got, err := client.Metric(ctx, metric.ID) if err != nil { t.Fatal(err) } if want := metric; !reflect.DeepEqual(got, want) { t.Errorf("got %+v, want %+v", got, want) } if err := client.DeleteMetric(ctx, metric.ID); err != nil { t.Fatal(err) } if _, err := client.Metric(ctx, metric.ID); err == nil { t.Fatal("got no error, expected one") } } func TestUpdateMetric(t *testing.T) { ctx := context.Background() metric := &Metric{ ID: metricIDs.New(), Description: "DESC", Filter: "FILTER", } // Updating a non-existent metric creates a new one. if err := client.UpdateMetric(ctx, metric); err != nil { t.Fatal(err) } defer client.DeleteMetric(ctx, metric.ID) got, err := client.Metric(ctx, metric.ID) if err != nil { t.Fatal(err) } if want := metric; !reflect.DeepEqual(got, want) { t.Errorf("got %+v, want %+v", got, want) } // Updating an existing metric changes it. metric.Description = "CHANGED" if err := client.UpdateMetric(ctx, metric); err != nil { t.Fatal(err) } got, err = client.Metric(ctx, metric.ID) if err != nil { t.Fatal(err) } if want := metric; !reflect.DeepEqual(got, want) { t.Errorf("got %+v, want %+v", got, want) } } func TestListMetrics(t *testing.T) { ctx := context.Background() var metrics []*Metric want := map[string]*Metric{} for i := 0; i < 10; i++ { m := &Metric{ ID: metricIDs.New(), Description: "DESC", Filter: "FILTER", } metrics = append(metrics, m) want[m.ID] = m } for _, m := range metrics { if err := client.CreateMetric(ctx, m); err != nil { t.Fatalf("Create(%q): %v", m.ID, err) } defer client.DeleteMetric(ctx, m.ID) } got := map[string]*Metric{} it := client.Metrics(ctx) for { m, err := it.Next() if err == iterator.Done { break } if err != nil { t.Fatal(err) } // If tests run simultaneously, we may have more metrics than we // created. So only check for our own. if _, ok := want[m.ID]; ok { got[m.ID] = m } } if !reflect.DeepEqual(got, want) { t.Errorf("got %+v, want %+v", got, want) } } golang-google-cloud-0.9.0/logging/logadmin/resources.go000066400000000000000000000053251312234511600231310ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package logadmin import ( vkit "cloud.google.com/go/logging/apiv2" "golang.org/x/net/context" "google.golang.org/api/iterator" mrpb "google.golang.org/genproto/googleapis/api/monitoredres" logpb "google.golang.org/genproto/googleapis/logging/v2" ) // ResourceDescriptors returns a ResourceDescriptorIterator // for iterating over MonitoredResourceDescriptors. Requires ReadScope or AdminScope. // See https://cloud.google.com/logging/docs/api/v2/#monitored-resources for an explanation of // monitored resources. // See https://cloud.google.com/logging/docs/api/v2/resource-list for a list of monitored resources. func (c *Client) ResourceDescriptors(ctx context.Context) *ResourceDescriptorIterator { it := &ResourceDescriptorIterator{ it: c.lClient.ListMonitoredResourceDescriptors(ctx, &logpb.ListMonitoredResourceDescriptorsRequest{}), } it.pageInfo, it.nextFunc = iterator.NewPageInfo( it.fetch, func() int { return len(it.items) }, func() interface{} { b := it.items; it.items = nil; return b }) return it } // ResourceDescriptorIterator is an iterator over MonitoredResourceDescriptors. type ResourceDescriptorIterator struct { it *vkit.MonitoredResourceDescriptorIterator pageInfo *iterator.PageInfo nextFunc func() error items []*mrpb.MonitoredResourceDescriptor } // PageInfo supports pagination. See the google.golang.org/api/iterator package for details. func (it *ResourceDescriptorIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } // Next returns the next result. Its second return value is Done if there are // no more results. Once Next returns Done, all subsequent calls will return // Done. func (it *ResourceDescriptorIterator) Next() (*mrpb.MonitoredResourceDescriptor, error) { if err := it.nextFunc(); err != nil { return nil, err } item := it.items[0] it.items = it.items[1:] return item, nil } func (it *ResourceDescriptorIterator) fetch(pageSize int, pageToken string) (string, error) { return iterFetch(pageSize, pageToken, it.it.PageInfo(), func() error { item, err := it.it.Next() if err != nil { return err } it.items = append(it.items, item) return nil }) } golang-google-cloud-0.9.0/logging/logadmin/resources_test.go000066400000000000000000000023571312234511600241720ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package logadmin import ( "testing" "golang.org/x/net/context" "google.golang.org/api/iterator" ) func TestMonitoredResourceDescriptors(t *testing.T) { // We can't create MonitoredResourceDescriptors, and there is no guarantee // about what the service will return. So we just check that the result is // non-empty. it := client.ResourceDescriptors(context.Background()) n := 0 loop: for { _, err := it.Next() switch err { case nil: n++ case iterator.Done: break loop default: t.Fatal(err) } } if n == 0 { t.Fatal("Next: got no MetricResourceDescriptors, expected at least one") } // TODO(jba) test pagination. } golang-google-cloud-0.9.0/logging/logadmin/sinks.go000066400000000000000000000124701312234511600222450ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package logadmin import ( "fmt" vkit "cloud.google.com/go/logging/apiv2" "golang.org/x/net/context" "google.golang.org/api/iterator" logpb "google.golang.org/genproto/googleapis/logging/v2" ) // Sink describes a sink used to export log entries outside Stackdriver // Logging. Incoming log entries matching a filter are exported to a // destination (a Cloud Storage bucket, BigQuery dataset or Cloud Pub/Sub // topic). // // For more information, see https://cloud.google.com/logging/docs/export/using_exported_logs. // (The Sinks in this package are what the documentation refers to as "project sinks".) type Sink struct { // ID is a client-assigned sink identifier. Example: // "my-severe-errors-to-pubsub". // Sink identifiers are limited to 1000 characters // and can include only the following characters: A-Z, a-z, // 0-9, and the special characters "_-.". ID string // Destination is the export destination. See // https://cloud.google.com/logging/docs/api/tasks/exporting-logs. // Examples: "storage.googleapis.com/a-bucket", // "bigquery.googleapis.com/projects/a-project-id/datasets/a-dataset". Destination string // Filter optionally specifies an advanced logs filter (see // https://cloud.google.com/logging/docs/view/advanced_filters) that // defines the log entries to be exported. Example: "logName:syslog AND // severity>=ERROR". If omitted, all entries are returned. Filter string } // CreateSink creates a Sink. It returns an error if the Sink already exists. // Requires AdminScope. func (c *Client) CreateSink(ctx context.Context, sink *Sink) (*Sink, error) { ls, err := c.sClient.CreateSink(ctx, &logpb.CreateSinkRequest{ Parent: c.parent(), Sink: toLogSink(sink), }) if err != nil { fmt.Printf("Sink: %+v\n", toLogSink(sink)) return nil, err } return fromLogSink(ls), nil } // DeleteSink deletes a sink. The provided sinkID is the sink's identifier, such as // "my-severe-errors-to-pubsub". // Requires AdminScope. func (c *Client) DeleteSink(ctx context.Context, sinkID string) error { return c.sClient.DeleteSink(ctx, &logpb.DeleteSinkRequest{ SinkName: c.sinkPath(sinkID), }) } // Sink gets a sink. The provided sinkID is the sink's identifier, such as // "my-severe-errors-to-pubsub". // Requires ReadScope or AdminScope. func (c *Client) Sink(ctx context.Context, sinkID string) (*Sink, error) { ls, err := c.sClient.GetSink(ctx, &logpb.GetSinkRequest{ SinkName: c.sinkPath(sinkID), }) if err != nil { return nil, err } return fromLogSink(ls), nil } // UpdateSink updates an existing Sink, or creates a new one if the Sink doesn't exist. // Requires AdminScope. func (c *Client) UpdateSink(ctx context.Context, sink *Sink) (*Sink, error) { ls, err := c.sClient.UpdateSink(ctx, &logpb.UpdateSinkRequest{ SinkName: c.sinkPath(sink.ID), Sink: toLogSink(sink), }) if err != nil { return nil, err } return fromLogSink(ls), err } func (c *Client) sinkPath(sinkID string) string { return fmt.Sprintf("%s/sinks/%s", c.parent(), sinkID) } // Sinks returns a SinkIterator for iterating over all Sinks in the Client's project. // Requires ReadScope or AdminScope. func (c *Client) Sinks(ctx context.Context) *SinkIterator { it := &SinkIterator{ it: c.sClient.ListSinks(ctx, &logpb.ListSinksRequest{Parent: c.parent()}), } it.pageInfo, it.nextFunc = iterator.NewPageInfo( it.fetch, func() int { return len(it.items) }, func() interface{} { b := it.items; it.items = nil; return b }) return it } // A SinkIterator iterates over Sinks. type SinkIterator struct { it *vkit.LogSinkIterator pageInfo *iterator.PageInfo nextFunc func() error items []*Sink } // PageInfo supports pagination. See the google.golang.org/api/iterator package for details. func (it *SinkIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } // Next returns the next result. Its second return value is Done if there are // no more results. Once Next returns Done, all subsequent calls will return // Done. func (it *SinkIterator) Next() (*Sink, error) { if err := it.nextFunc(); err != nil { return nil, err } item := it.items[0] it.items = it.items[1:] return item, nil } func (it *SinkIterator) fetch(pageSize int, pageToken string) (string, error) { return iterFetch(pageSize, pageToken, it.it.PageInfo(), func() error { item, err := it.it.Next() if err != nil { return err } it.items = append(it.items, fromLogSink(item)) return nil }) } func toLogSink(s *Sink) *logpb.LogSink { return &logpb.LogSink{ Name: s.ID, Destination: s.Destination, Filter: s.Filter, OutputVersionFormat: logpb.LogSink_V2, } } func fromLogSink(ls *logpb.LogSink) *Sink { return &Sink{ ID: ls.Name, Destination: ls.Destination, Filter: ls.Filter, } } golang-google-cloud-0.9.0/logging/logadmin/sinks_test.go000066400000000000000000000137131312234511600233050ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // TODO(jba): document in CONTRIBUTING.md that service account must be given "Logs Configuration Writer" IAM role for sink tests to pass. // TODO(jba): [cont] (1) From top left menu, go to IAM & Admin. (2) In Roles dropdown for acct, select Logging > Logs Configuration Writer. (3) Save. // TODO(jba): Also, cloud-logs@google.com must have Owner permission on the GCS bucket named for the test project. package logadmin import ( "log" "reflect" "testing" "time" "cloud.google.com/go/internal/testutil" "cloud.google.com/go/storage" "golang.org/x/net/context" "google.golang.org/api/iterator" "google.golang.org/api/option" ) var sinkIDs = testutil.NewUIDSpace("GO-CLIENT-TEST-SINK") const testFilter = "" var testSinkDestination string // Called just before TestMain calls m.Run. // Returns a cleanup function to be called after the tests finish. func initSinks(ctx context.Context) func() { // Create a unique GCS bucket so concurrent tests don't interfere with each other. bucketIDs := testutil.NewUIDSpace(testProjectID + "-log-sink") testBucket := bucketIDs.New() testSinkDestination = "storage.googleapis.com/" + testBucket var storageClient *storage.Client if integrationTest { // Create a unique bucket as a sink destination, and give the cloud logging account // owner right. ts := testutil.TokenSource(ctx, storage.ScopeFullControl) var err error storageClient, err = storage.NewClient(ctx, option.WithTokenSource(ts)) if err != nil { log.Fatalf("new storage client: %v", err) } bucket := storageClient.Bucket(testBucket) if err := bucket.Create(ctx, testProjectID, nil); err != nil { log.Fatalf("creating storage bucket %q: %v", testBucket, err) } if err := bucket.ACL().Set(ctx, "group-cloud-logs@google.com", storage.RoleOwner); err != nil { log.Fatalf("setting owner role: %v", err) } } // Clean up from aborted tests. it := client.Sinks(ctx) for { s, err := it.Next() if err == iterator.Done { break } if err != nil { log.Printf("listing sinks: %v", err) break } if sinkIDs.Older(s.ID, 24*time.Hour) { client.DeleteSink(ctx, s.ID) // ignore error } } if integrationTest { for _, bn := range bucketNames(ctx, storageClient) { if bucketIDs.Older(bn, 24*time.Hour) { storageClient.Bucket(bn).Delete(ctx) // ignore error } } return func() { if err := storageClient.Bucket(testBucket).Delete(ctx); err != nil { log.Printf("deleting %q: %v", testBucket, err) } storageClient.Close() } } return func() {} } // Collect the name of all buckets for the test project. func bucketNames(ctx context.Context, client *storage.Client) []string { var names []string it := client.Buckets(ctx, testProjectID) loop: for { b, err := it.Next() switch err { case nil: names = append(names, b.Name) case iterator.Done: break loop default: log.Printf("listing buckets: %v", err) break loop } } return names } func TestCreateDeleteSink(t *testing.T) { ctx := context.Background() sink := &Sink{ ID: sinkIDs.New(), Destination: testSinkDestination, Filter: testFilter, } got, err := client.CreateSink(ctx, sink) if err != nil { t.Fatal(err) } defer client.DeleteSink(ctx, sink.ID) if want := sink; !reflect.DeepEqual(got, want) { t.Errorf("got %+v, want %+v", got, want) } got, err = client.Sink(ctx, sink.ID) if err != nil { t.Fatal(err) } if want := sink; !reflect.DeepEqual(got, want) { t.Errorf("got %+v, want %+v", got, want) } if err := client.DeleteSink(ctx, sink.ID); err != nil { t.Fatal(err) } if _, err := client.Sink(ctx, sink.ID); err == nil { t.Fatal("got no error, expected one") } } func TestUpdateSink(t *testing.T) { ctx := context.Background() sink := &Sink{ ID: sinkIDs.New(), Destination: testSinkDestination, Filter: testFilter, } // Updating a non-existent sink creates a new one. got, err := client.UpdateSink(ctx, sink) if err != nil { t.Fatal(err) } defer client.DeleteSink(ctx, sink.ID) if want := sink; !reflect.DeepEqual(got, want) { t.Errorf("got %+v, want %+v", got, want) } got, err = client.Sink(ctx, sink.ID) if err != nil { t.Fatal(err) } if want := sink; !reflect.DeepEqual(got, want) { t.Errorf("got %+v, want %+v", got, want) } // Updating an existing sink changes it. sink.Filter = "" if _, err := client.UpdateSink(ctx, sink); err != nil { t.Fatal(err) } got, err = client.Sink(ctx, sink.ID) if err != nil { t.Fatal(err) } if want := sink; !reflect.DeepEqual(got, want) { t.Errorf("got %+v, want %+v", got, want) } } func TestListSinks(t *testing.T) { ctx := context.Background() var sinks []*Sink want := map[string]*Sink{} for i := 0; i < 4; i++ { s := &Sink{ ID: sinkIDs.New(), Destination: testSinkDestination, Filter: testFilter, } sinks = append(sinks, s) want[s.ID] = s } for _, s := range sinks { if _, err := client.CreateSink(ctx, s); err != nil { t.Fatalf("Create(%q): %v", s.ID, err) } defer client.DeleteSink(ctx, s.ID) } got := map[string]*Sink{} it := client.Sinks(ctx) for { s, err := it.Next() if err == iterator.Done { break } if err != nil { t.Fatal(err) } // If tests run simultaneously, we may have more sinks than we // created. So only check for our own. if _, ok := want[s.ID]; ok { got[s.ID] = s } } if !reflect.DeepEqual(got, want) { t.Errorf("got %+v, want %+v", got, want) } } golang-google-cloud-0.9.0/logging/logging.go000066400000000000000000000613601312234511600207540ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // API/gRPC features intentionally missing from this client: // - You cannot have the server pick the time of the entry. This client // always sends a time. // - There is no way to provide a protocol buffer payload. // - No support for the "partial success" feature when writing log entries. // TODO(jba): test whether forward-slash characters in the log ID must be URL-encoded. // These features are missing now, but will likely be added: // - There is no way to specify CallOptions. package logging import ( "encoding/json" "errors" "fmt" "log" "math" "net/http" "strconv" "strings" "sync" "time" "cloud.google.com/go/compute/metadata" "cloud.google.com/go/internal/version" vkit "cloud.google.com/go/logging/apiv2" "cloud.google.com/go/logging/internal" "github.com/golang/protobuf/proto" "github.com/golang/protobuf/ptypes" structpb "github.com/golang/protobuf/ptypes/struct" tspb "github.com/golang/protobuf/ptypes/timestamp" "golang.org/x/net/context" "google.golang.org/api/option" "google.golang.org/api/support/bundler" mrpb "google.golang.org/genproto/googleapis/api/monitoredres" logtypepb "google.golang.org/genproto/googleapis/logging/type" logpb "google.golang.org/genproto/googleapis/logging/v2" ) const ( // Scope for reading from the logging service. ReadScope = "https://www.googleapis.com/auth/logging.read" // Scope for writing to the logging service. WriteScope = "https://www.googleapis.com/auth/logging.write" // Scope for administrative actions on the logging service. AdminScope = "https://www.googleapis.com/auth/logging.admin" ) const ( // defaultErrorCapacity is the capacity of the channel used to deliver // errors to the OnError function. defaultErrorCapacity = 10 // DefaultDelayThreshold is the default value for the DelayThreshold LoggerOption. DefaultDelayThreshold = time.Second // DefaultEntryCountThreshold is the default value for the EntryCountThreshold LoggerOption. DefaultEntryCountThreshold = 1000 // DefaultEntryByteThreshold is the default value for the EntryByteThreshold LoggerOption. DefaultEntryByteThreshold = 1 << 20 // 1MiB // DefaultBufferedByteLimit is the default value for the BufferedByteLimit LoggerOption. DefaultBufferedByteLimit = 1 << 30 // 1GiB ) // For testing: var now = time.Now // ErrOverflow signals that the number of buffered entries for a Logger // exceeds its BufferLimit. var ErrOverflow = errors.New("logging: log entry overflowed buffer limits") // Client is a Logging client. A Client is associated with a single Cloud project. type Client struct { client *vkit.Client // client for the logging service projectID string errc chan error // should be buffered to minimize dropped errors donec chan struct{} // closed on Client.Close to close Logger bundlers loggers sync.WaitGroup // so we can wait for loggers to close closed bool // OnError is called when an error occurs in a call to Log or Flush. The // error may be due to an invalid Entry, an overflow because BufferLimit // was reached (in which case the error will be ErrOverflow) or an error // communicating with the logging service. OnError is called with errors // from all Loggers. It is never called concurrently. OnError is expected // to return quickly; if errors occur while OnError is running, some may // not be reported. The default behavior is to call log.Printf. // // This field should be set only once, before any method of Client is called. OnError func(err error) } // NewClient returns a new logging client associated with the provided project ID. // // By default NewClient uses WriteScope. To use a different scope, call // NewClient using a WithScopes option (see https://godoc.org/google.golang.org/api/option#WithScopes). func NewClient(ctx context.Context, projectID string, opts ...option.ClientOption) (*Client, error) { // Check for '/' in project ID to reserve the ability to support various owning resources, // in the form "{Collection}/{Name}", for instance "organizations/my-org". if strings.ContainsRune(projectID, '/') { return nil, errors.New("logging: project ID contains '/'") } opts = append([]option.ClientOption{ option.WithEndpoint(internal.ProdAddr), option.WithScopes(WriteScope), }, opts...) c, err := vkit.NewClient(ctx, opts...) if err != nil { return nil, err } c.SetGoogleClientInfo("gccl", version.Repo) client := &Client{ client: c, projectID: projectID, errc: make(chan error, defaultErrorCapacity), // create a small buffer for errors donec: make(chan struct{}), OnError: func(e error) { log.Printf("logging client: %v", e) }, } // Call the user's function synchronously, to make life easier for them. go func() { for err := range client.errc { // This reference to OnError is memory-safe if the user sets OnError before // calling any client methods. The reference happens before the first read from // client.errc, which happens before the first write to client.errc, which // happens before any call, which happens before the user sets OnError. if fn := client.OnError; fn != nil { fn(err) } else { log.Printf("logging (project ID %q): %v", projectID, err) } } }() return client, nil } // parent returns the string used in many RPCs to denote the parent resource of the log. func (c *Client) parent() string { return "projects/" + c.projectID } var unixZeroTimestamp *tspb.Timestamp func init() { var err error unixZeroTimestamp, err = ptypes.TimestampProto(time.Unix(0, 0)) if err != nil { panic(err) } } // Ping reports whether the client's connection to the logging service and the // authentication configuration are valid. To accomplish this, Ping writes a // log entry "ping" to a log named "ping". func (c *Client) Ping(ctx context.Context) error { ent := &logpb.LogEntry{ Payload: &logpb.LogEntry_TextPayload{TextPayload: "ping"}, Timestamp: unixZeroTimestamp, // Identical timestamps and insert IDs are both InsertId: "ping", // necessary for the service to dedup these entries. } _, err := c.client.WriteLogEntries(ctx, &logpb.WriteLogEntriesRequest{ LogName: internal.LogPath(c.parent(), "ping"), Resource: globalResource(c.projectID), Entries: []*logpb.LogEntry{ent}, }) return err } // A Logger is used to write log messages to a single log. It can be configured // with a log ID, common monitored resource, and a set of common labels. type Logger struct { client *Client logName string // "projects/{projectID}/logs/{logID}" stdLoggers map[Severity]*log.Logger bundler *bundler.Bundler // Options commonResource *mrpb.MonitoredResource commonLabels map[string]string } // A LoggerOption is a configuration option for a Logger. type LoggerOption interface { set(*Logger) } // CommonResource sets the monitored resource associated with all log entries // written from a Logger. If not provided, the resource is automatically // detected based on the running environment. This value can be overridden // per-entry by setting an Entry's Resource field. func CommonResource(r *mrpb.MonitoredResource) LoggerOption { return commonResource{r} } type commonResource struct{ *mrpb.MonitoredResource } func (r commonResource) set(l *Logger) { l.commonResource = r.MonitoredResource } var detectedResource struct { pb *mrpb.MonitoredResource once sync.Once } func detectResource() *mrpb.MonitoredResource { detectedResource.once.Do(func() { if !metadata.OnGCE() { return } projectID, err := metadata.ProjectID() if err != nil { return } id, err := metadata.InstanceID() if err != nil { return } zone, err := metadata.Zone() if err != nil { return } detectedResource.pb = &mrpb.MonitoredResource{ Type: "gce_instance", Labels: map[string]string{ "project_id": projectID, "instance_id": id, "zone": zone, }, } }) return detectedResource.pb } func globalResource(projectID string) *mrpb.MonitoredResource { return &mrpb.MonitoredResource{ Type: "global", Labels: map[string]string{ "project_id": projectID, }, } } // CommonLabels are labels that apply to all log entries written from a Logger, // so that you don't have to repeat them in each log entry's Labels field. If // any of the log entries contains a (key, value) with the same key that is in // CommonLabels, then the entry's (key, value) overrides the one in // CommonLabels. func CommonLabels(m map[string]string) LoggerOption { return commonLabels(m) } type commonLabels map[string]string func (c commonLabels) set(l *Logger) { l.commonLabels = c } // DelayThreshold is the maximum amount of time that an entry should remain // buffered in memory before a call to the logging service is triggered. Larger // values of DelayThreshold will generally result in fewer calls to the logging // service, while increasing the risk that log entries will be lost if the // process crashes. // The default is DefaultDelayThreshold. func DelayThreshold(d time.Duration) LoggerOption { return delayThreshold(d) } type delayThreshold time.Duration func (d delayThreshold) set(l *Logger) { l.bundler.DelayThreshold = time.Duration(d) } // EntryCountThreshold is the maximum number of entries that will be buffered // in memory before a call to the logging service is triggered. Larger values // will generally result in fewer calls to the logging service, while // increasing both memory consumption and the risk that log entries will be // lost if the process crashes. // The default is DefaultEntryCountThreshold. func EntryCountThreshold(n int) LoggerOption { return entryCountThreshold(n) } type entryCountThreshold int func (e entryCountThreshold) set(l *Logger) { l.bundler.BundleCountThreshold = int(e) } // EntryByteThreshold is the maximum number of bytes of entries that will be // buffered in memory before a call to the logging service is triggered. See // EntryCountThreshold for a discussion of the tradeoffs involved in setting // this option. // The default is DefaultEntryByteThreshold. func EntryByteThreshold(n int) LoggerOption { return entryByteThreshold(n) } type entryByteThreshold int func (e entryByteThreshold) set(l *Logger) { l.bundler.BundleByteThreshold = int(e) } // EntryByteLimit is the maximum number of bytes of entries that will be sent // in a single call to the logging service. This option limits the size of a // single RPC payload, to account for network or service issues with large // RPCs. If EntryByteLimit is smaller than EntryByteThreshold, the latter has // no effect. // The default is zero, meaning there is no limit. func EntryByteLimit(n int) LoggerOption { return entryByteLimit(n) } type entryByteLimit int func (e entryByteLimit) set(l *Logger) { l.bundler.BundleByteLimit = int(e) } // BufferedByteLimit is the maximum number of bytes that the Logger will keep // in memory before returning ErrOverflow. This option limits the total memory // consumption of the Logger (but note that each Logger has its own, separate // limit). It is possible to reach BufferedByteLimit even if it is larger than // EntryByteThreshold or EntryByteLimit, because calls triggered by the latter // two options may be enqueued (and hence occupying memory) while new log // entries are being added. // The default is DefaultBufferedByteLimit. func BufferedByteLimit(n int) LoggerOption { return bufferedByteLimit(n) } type bufferedByteLimit int func (b bufferedByteLimit) set(l *Logger) { l.bundler.BufferedByteLimit = int(b) } // Logger returns a Logger that will write entries with the given log ID, such as // "syslog". A log ID must be less than 512 characters long and can only // include the following characters: upper and lower case alphanumeric // characters: [A-Za-z0-9]; and punctuation characters: forward-slash, // underscore, hyphen, and period. func (c *Client) Logger(logID string, opts ...LoggerOption) *Logger { r := detectResource() if r == nil { r = globalResource(c.projectID) } l := &Logger{ client: c, logName: internal.LogPath(c.parent(), logID), commonResource: r, } // TODO(jba): determine the right context for the bundle handler. ctx := context.TODO() l.bundler = bundler.NewBundler(&logpb.LogEntry{}, func(entries interface{}) { l.writeLogEntries(ctx, entries.([]*logpb.LogEntry)) }) l.bundler.DelayThreshold = DefaultDelayThreshold l.bundler.BundleCountThreshold = DefaultEntryCountThreshold l.bundler.BundleByteThreshold = DefaultEntryByteThreshold l.bundler.BufferedByteLimit = DefaultBufferedByteLimit for _, opt := range opts { opt.set(l) } l.stdLoggers = map[Severity]*log.Logger{} for s := range severityName { l.stdLoggers[s] = log.New(severityWriter{l, s}, "", 0) } c.loggers.Add(1) go func() { defer c.loggers.Done() <-c.donec l.bundler.Flush() }() return l } type severityWriter struct { l *Logger s Severity } func (w severityWriter) Write(p []byte) (n int, err error) { w.l.Log(Entry{ Severity: w.s, Payload: string(p), }) return len(p), nil } // Close waits for all opened loggers to be flushed and closes the client. func (c *Client) Close() error { if c.closed { return nil } close(c.donec) // close Logger bundlers c.loggers.Wait() // wait for all bundlers to flush and close // Now there can be no more errors. close(c.errc) // terminate error goroutine // Return only the first error. Since all clients share an underlying connection, // Closes after the first always report a "connection is closing" error. err := c.client.Close() c.closed = true return err } // Severity is the severity of the event described in a log entry. These // guideline severity levels are ordered, with numerically smaller levels // treated as less severe than numerically larger levels. type Severity int const ( // Default means the log entry has no assigned severity level. Default = Severity(logtypepb.LogSeverity_DEFAULT) // Debug means debug or trace information. Debug = Severity(logtypepb.LogSeverity_DEBUG) // Info means routine information, such as ongoing status or performance. Info = Severity(logtypepb.LogSeverity_INFO) // Notice means normal but significant events, such as start up, shut down, or configuration. Notice = Severity(logtypepb.LogSeverity_NOTICE) // Warning means events that might cause problems. Warning = Severity(logtypepb.LogSeverity_WARNING) // Error means events that are likely to cause problems. Error = Severity(logtypepb.LogSeverity_ERROR) // Critical means events that cause more severe problems or brief outages. Critical = Severity(logtypepb.LogSeverity_CRITICAL) // Alert means a person must take an action immediately. Alert = Severity(logtypepb.LogSeverity_ALERT) // Emergency means one or more systems are unusable. Emergency = Severity(logtypepb.LogSeverity_EMERGENCY) ) var severityName = map[Severity]string{ Default: "Default", Debug: "Debug", Info: "Info", Notice: "Notice", Warning: "Warning", Error: "Error", Critical: "Critical", Alert: "Alert", Emergency: "Emergency", } // String converts a severity level to a string. func (v Severity) String() string { // same as proto.EnumName s, ok := severityName[v] if ok { return s } return strconv.Itoa(int(v)) } // ParseSeverity returns the Severity whose name equals s, ignoring case. It // returns Default if no Severity matches. func ParseSeverity(s string) Severity { sl := strings.ToLower(s) for sev, name := range severityName { if strings.ToLower(name) == sl { return sev } } return Default } // Entry is a log entry. // See https://cloud.google.com/logging/docs/view/logs_index for more about entries. type Entry struct { // Timestamp is the time of the entry. If zero, the current time is used. Timestamp time.Time // Severity is the entry's severity level. // The zero value is Default. Severity Severity // Payload must be either a string or something that // marshals via the encoding/json package to a JSON object // (and not any other type of JSON value). Payload interface{} // Labels optionally specifies key/value labels for the log entry. // The Logger.Log method takes ownership of this map. See Logger.CommonLabels // for more about labels. Labels map[string]string // InsertID is a unique ID for the log entry. If you provide this field, // the logging service considers other log entries in the same log with the // same ID as duplicates which can be removed. If omitted, the logging // service will generate a unique ID for this log entry. Note that because // this client retries RPCs automatically, it is possible (though unlikely) // that an Entry without an InsertID will be written more than once. InsertID string // HTTPRequest optionally specifies metadata about the HTTP request // associated with this log entry, if applicable. It is optional. HTTPRequest *HTTPRequest // Operation optionally provides information about an operation associated // with the log entry, if applicable. Operation *logpb.LogEntryOperation // LogName is the full log name, in the form // "projects/{ProjectID}/logs/{LogID}". It is set by the client when // reading entries. It is an error to set it when writing entries. LogName string // Resource is the monitored resource associated with the entry. It is set // by the client when reading entries. It is an error to set it when // writing entries. Resource *mrpb.MonitoredResource // Trace is the resource name of the trace associated with the log entry, // if any. If it contains a relative resource name, the name is assumed to // be relative to //tracing.googleapis.com. Trace string } // HTTPRequest contains an http.Request as well as additional // information about the request and its response. type HTTPRequest struct { // Request is the http.Request passed to the handler. Request *http.Request // RequestSize is the size of the HTTP request message in bytes, including // the request headers and the request body. RequestSize int64 // Status is the response code indicating the status of the response. // Examples: 200, 404. Status int // ResponseSize is the size of the HTTP response message sent back to the client, in bytes, // including the response headers and the response body. ResponseSize int64 // Latency is the request processing latency on the server, from the time the request was // received until the response was sent. Latency time.Duration // LocalIP is the IP address (IPv4 or IPv6) of the origin server that the request // was sent to. LocalIP string // RemoteIP is the IP address (IPv4 or IPv6) of the client that issued the // HTTP request. Examples: "192.168.1.1", "FE80::0202:B3FF:FE1E:8329". RemoteIP string // CacheHit reports whether an entity was served from cache (with or without // validation). CacheHit bool // CacheValidatedWithOriginServer reports whether the response was // validated with the origin server before being served from cache. This // field is only meaningful if CacheHit is true. CacheValidatedWithOriginServer bool } func fromHTTPRequest(r *HTTPRequest) *logtypepb.HttpRequest { if r == nil { return nil } if r.Request == nil { panic("HTTPRequest must have a non-nil Request") } u := *r.Request.URL u.Fragment = "" pb := &logtypepb.HttpRequest{ RequestMethod: r.Request.Method, RequestUrl: u.String(), RequestSize: r.RequestSize, Status: int32(r.Status), ResponseSize: r.ResponseSize, UserAgent: r.Request.UserAgent(), ServerIp: r.LocalIP, RemoteIp: r.RemoteIP, // TODO(jba): attempt to parse http.Request.RemoteAddr? Referer: r.Request.Referer(), CacheHit: r.CacheHit, CacheValidatedWithOriginServer: r.CacheValidatedWithOriginServer, } if r.Latency != 0 { pb.Latency = ptypes.DurationProto(r.Latency) } return pb } // toProtoStruct converts v, which must marshal into a JSON object, // into a Google Struct proto. func toProtoStruct(v interface{}) (*structpb.Struct, error) { // Fast path: if v is already a *structpb.Struct, nothing to do. if s, ok := v.(*structpb.Struct); ok { return s, nil } // v is a Go struct that supports JSON marshalling. We want a Struct // protobuf. Some day we may have a more direct way to get there, but right // now the only way is to marshal the Go struct to JSON, unmarshal into a // map, and then build the Struct proto from the map. jb, err := json.Marshal(v) if err != nil { return nil, fmt.Errorf("logging: json.Marshal: %v", err) } var m map[string]interface{} err = json.Unmarshal(jb, &m) if err != nil { return nil, fmt.Errorf("logging: json.Unmarshal: %v", err) } return jsonMapToProtoStruct(m), nil } func jsonMapToProtoStruct(m map[string]interface{}) *structpb.Struct { fields := map[string]*structpb.Value{} for k, v := range m { fields[k] = jsonValueToStructValue(v) } return &structpb.Struct{Fields: fields} } func jsonValueToStructValue(v interface{}) *structpb.Value { switch x := v.(type) { case bool: return &structpb.Value{Kind: &structpb.Value_BoolValue{BoolValue: x}} case float64: return &structpb.Value{Kind: &structpb.Value_NumberValue{NumberValue: x}} case string: return &structpb.Value{Kind: &structpb.Value_StringValue{StringValue: x}} case nil: return &structpb.Value{Kind: &structpb.Value_NullValue{}} case map[string]interface{}: return &structpb.Value{Kind: &structpb.Value_StructValue{StructValue: jsonMapToProtoStruct(x)}} case []interface{}: var vals []*structpb.Value for _, e := range x { vals = append(vals, jsonValueToStructValue(e)) } return &structpb.Value{Kind: &structpb.Value_ListValue{ListValue: &structpb.ListValue{Values: vals}}} default: panic(fmt.Sprintf("bad type %T for JSON value", v)) } } // LogSync logs the Entry synchronously without any buffering. Because LogSync is slow // and will block, it is intended primarily for debugging or critical errors. // Prefer Log for most uses. // TODO(jba): come up with a better name (LogNow?) or eliminate. func (l *Logger) LogSync(ctx context.Context, e Entry) error { ent, err := toLogEntry(e) if err != nil { return err } _, err = l.client.client.WriteLogEntries(ctx, &logpb.WriteLogEntriesRequest{ LogName: l.logName, Resource: l.commonResource, Labels: l.commonLabels, Entries: []*logpb.LogEntry{ent}, }) return err } // Log buffers the Entry for output to the logging service. It never blocks. func (l *Logger) Log(e Entry) { ent, err := toLogEntry(e) if err != nil { l.error(err) return } if err := l.bundler.Add(ent, proto.Size(ent)); err != nil { l.error(err) } } // Flush blocks until all currently buffered log entries are sent. func (l *Logger) Flush() { l.bundler.Flush() } func (l *Logger) writeLogEntries(ctx context.Context, entries []*logpb.LogEntry) { req := &logpb.WriteLogEntriesRequest{ LogName: l.logName, Resource: l.commonResource, Labels: l.commonLabels, Entries: entries, } _, err := l.client.client.WriteLogEntries(ctx, req) if err != nil { l.error(err) } } // error puts the error on the client's error channel // without blocking. func (l *Logger) error(err error) { select { case l.client.errc <- err: default: } } // StandardLogger returns a *log.Logger for the provided severity. // // This method is cheap. A single log.Logger is pre-allocated for each // severity level in each Logger. Callers may mutate the returned log.Logger // (for example by calling SetFlags or SetPrefix). func (l *Logger) StandardLogger(s Severity) *log.Logger { return l.stdLoggers[s] } func trunc32(i int) int32 { if i > math.MaxInt32 { i = math.MaxInt32 } return int32(i) } func toLogEntry(e Entry) (*logpb.LogEntry, error) { if e.LogName != "" { return nil, errors.New("logging: Entry.LogName should be not be set when writing") } t := e.Timestamp if t.IsZero() { t = now() } ts, err := ptypes.TimestampProto(t) if err != nil { return nil, err } ent := &logpb.LogEntry{ Timestamp: ts, Severity: logtypepb.LogSeverity(e.Severity), InsertId: e.InsertID, HttpRequest: fromHTTPRequest(e.HTTPRequest), Operation: e.Operation, Labels: e.Labels, Trace: e.Trace, } switch p := e.Payload.(type) { case string: ent.Payload = &logpb.LogEntry_TextPayload{TextPayload: p} default: s, err := toProtoStruct(p) if err != nil { return nil, err } ent.Payload = &logpb.LogEntry_JsonPayload{JsonPayload: s} } return ent, nil } golang-google-cloud-0.9.0/logging/logging_test.go000066400000000000000000000316461312234511600220170ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // TODO(jba): test that OnError is getting called appropriately. package logging_test import ( "flag" "fmt" "log" "os" "reflect" "strings" "testing" "time" gax "github.com/googleapis/gax-go" cinternal "cloud.google.com/go/internal" "cloud.google.com/go/internal/testutil" "cloud.google.com/go/logging" ltesting "cloud.google.com/go/logging/internal/testing" "cloud.google.com/go/logging/logadmin" "golang.org/x/net/context" "golang.org/x/oauth2" "google.golang.org/api/iterator" "google.golang.org/api/option" mrpb "google.golang.org/genproto/googleapis/api/monitoredres" "google.golang.org/grpc" ) const testLogIDPrefix = "GO-LOGGING-CLIENT/TEST-LOG" var uids = testutil.NewUIDSpace(testLogIDPrefix) var ( client *logging.Client aclient *logadmin.Client testProjectID string testLogID string testFilter string errorc chan error ctx context.Context // Adjust the fields of a FullEntry received from the production service // before comparing it with the expected result. We can't correctly // compare certain fields, like times or server-generated IDs. clean func(*logging.Entry) // Create a new client with the given project ID. newClients func(ctx context.Context, projectID string) (*logging.Client, *logadmin.Client) ) func testNow() time.Time { return time.Unix(1000, 0) } // If true, this test is using the production service, not a fake. var integrationTest bool func TestMain(m *testing.M) { flag.Parse() // needed for testing.Short() ctx = context.Background() testProjectID = testutil.ProjID() errorc = make(chan error, 100) if testProjectID == "" || testing.Short() { integrationTest = false if testProjectID != "" { log.Print("Integration tests skipped in short mode (using fake instead)") } testProjectID = "PROJECT_ID" clean = func(e *logging.Entry) { // Remove the insert ID for consistency with the integration test. e.InsertID = "" } addr, err := ltesting.NewServer() if err != nil { log.Fatalf("creating fake server: %v", err) } logging.SetNow(testNow) newClients = func(ctx context.Context, projectID string) (*logging.Client, *logadmin.Client) { conn, err := grpc.Dial(addr, grpc.WithInsecure()) if err != nil { log.Fatalf("dialing %q: %v", addr, err) } c, err := logging.NewClient(ctx, projectID, option.WithGRPCConn(conn)) if err != nil { log.Fatalf("creating client for fake at %q: %v", addr, err) } ac, err := logadmin.NewClient(ctx, projectID, option.WithGRPCConn(conn)) if err != nil { log.Fatalf("creating client for fake at %q: %v", addr, err) } return c, ac } } else { integrationTest = true clean = func(e *logging.Entry) { // We cannot compare timestamps, so set them to the test time. // Also, remove the insert ID added by the service. e.Timestamp = testNow().UTC() e.InsertID = "" } ts := testutil.TokenSource(ctx, logging.AdminScope) if ts == nil { log.Fatal("The project key must be set. See CONTRIBUTING.md for details") } log.Printf("running integration tests with project %s", testProjectID) newClients = func(ctx context.Context, projectID string) (*logging.Client, *logadmin.Client) { c, err := logging.NewClient(ctx, projectID, option.WithTokenSource(ts)) if err != nil { log.Fatalf("creating prod client: %v", err) } ac, err := logadmin.NewClient(ctx, projectID, option.WithTokenSource(ts)) if err != nil { log.Fatalf("creating prod client: %v", err) } return c, ac } } client, aclient = newClients(ctx, testProjectID) client.OnError = func(e error) { errorc <- e } exit := m.Run() client.Close() os.Exit(exit) } func initLogs(ctx context.Context) { testLogID = uids.New() testFilter = fmt.Sprintf(`logName = "projects/%s/logs/%s"`, testProjectID, strings.Replace(testLogID, "/", "%2F", -1)) } // Testing of Logger.Log is done in logadmin_test.go, TestEntries. func TestLogSync(t *testing.T) { initLogs(ctx) // Generate new testLogID ctx := context.Background() lg := client.Logger(testLogID) err := lg.LogSync(ctx, logging.Entry{Payload: "hello"}) if err != nil { t.Fatal(err) } err = lg.LogSync(ctx, logging.Entry{Payload: "goodbye"}) if err != nil { t.Fatal(err) } // Allow overriding the MonitoredResource. err = lg.LogSync(ctx, logging.Entry{Payload: "mr", Resource: &mrpb.MonitoredResource{Type: "global"}}) if err != nil { t.Fatal(err) } want := []*logging.Entry{ entryForTesting("hello"), entryForTesting("goodbye"), entryForTesting("mr"), } var got []*logging.Entry ok := waitFor(func() bool { got, err = allTestLogEntries(ctx) if err != nil { t.Log("fetching log entries: ", err) return false } return len(got) == len(want) }) if !ok { t.Fatalf("timed out; got: %d, want: %d\n", len(got), len(want)) } if msg, ok := compareEntries(got, want); !ok { t.Error(msg) } } func TestLogAndEntries(t *testing.T) { initLogs(ctx) // Generate new testLogID ctx := context.Background() payloads := []string{"p1", "p2", "p3", "p4", "p5"} lg := client.Logger(testLogID) for _, p := range payloads { // Use the insert ID to guarantee iteration order. lg.Log(logging.Entry{Payload: p, InsertID: p}) } lg.Flush() var want []*logging.Entry for _, p := range payloads { want = append(want, entryForTesting(p)) } var got []*logging.Entry ok := waitFor(func() bool { var err error got, err = allTestLogEntries(ctx) if err != nil { t.Log("fetching log entries: ", err) return false } return len(got) == len(want) }) if !ok { t.Fatalf("timed out; got: %d, want: %d\n", len(got), len(want)) } if msg, ok := compareEntries(got, want); !ok { t.Error(msg) } } // compareEntries compares most fields list of Entries against expected. compareEntries does not compare: // - HTTPRequest // - Operation // - Resource func compareEntries(got, want []*logging.Entry) (string, bool) { if len(got) != len(want) { return fmt.Sprintf("got %d entries, want %d", len(got), len(want)), false } for i := range got { if !compareEntry(got[i], want[i]) { return fmt.Sprintf("#%d:\ngot %+v\nwant %+v", i, got[i], want[i]), false } } return "", true } func compareEntry(got, want *logging.Entry) bool { if got.Timestamp.Unix() != want.Timestamp.Unix() { return false } if got.Severity != want.Severity { return false } if !ltesting.PayloadEqual(got.Payload, want.Payload) { return false } if !reflect.DeepEqual(got.Labels, want.Labels) { return false } if got.InsertID != want.InsertID { return false } if got.LogName != want.LogName { return false } return true } func entryForTesting(payload interface{}) *logging.Entry { return &logging.Entry{ Timestamp: testNow().UTC(), Payload: payload, LogName: "projects/" + testProjectID + "/logs/" + testLogID, Resource: &mrpb.MonitoredResource{Type: "global", Labels: map[string]string{"project_id": testProjectID}}, } } func countLogEntries(ctx context.Context, filter string) int { it := aclient.Entries(ctx, logadmin.Filter(filter)) n := 0 for { _, err := it.Next() if err == iterator.Done { return n } if err != nil { log.Fatalf("counting log entries: %v", err) } n++ } } func allTestLogEntries(ctx context.Context) ([]*logging.Entry, error) { var es []*logging.Entry it := aclient.Entries(ctx, logadmin.Filter(testFilter)) for { e, err := cleanNext(it) switch err { case nil: es = append(es, e) case iterator.Done: return es, nil default: return nil, err } } } func cleanNext(it *logadmin.EntryIterator) (*logging.Entry, error) { e, err := it.Next() if err != nil { return nil, err } clean(e) return e, nil } func TestStandardLogger(t *testing.T) { initLogs(ctx) // Generate new testLogID ctx := context.Background() lg := client.Logger(testLogID) slg := lg.StandardLogger(logging.Info) if slg != lg.StandardLogger(logging.Info) { t.Error("There should be only one standard logger at each severity.") } if slg == lg.StandardLogger(logging.Debug) { t.Error("There should be a different standard logger for each severity.") } slg.Print("info") lg.Flush() var got []*logging.Entry ok := waitFor(func() bool { var err error got, err = allTestLogEntries(ctx) if err != nil { t.Log("fetching log entries: ", err) return false } return len(got) == 1 }) if !ok { t.Fatalf("timed out; got: %d, want: %d\n", len(got), 1) } if len(got) != 1 { t.Fatalf("expected non-nil request with one entry; got:\n%+v", got) } if got, want := got[0].Payload.(string), "info\n"; got != want { t.Errorf("payload: got %q, want %q", got, want) } if got, want := logging.Severity(got[0].Severity), logging.Info; got != want { t.Errorf("severity: got %s, want %s", got, want) } } func TestSeverity(t *testing.T) { if got, want := logging.Info.String(), "Info"; got != want { t.Errorf("got %q, want %q", got, want) } if got, want := logging.Severity(-99).String(), "-99"; got != want { t.Errorf("got %q, want %q", got, want) } } func TestParseSeverity(t *testing.T) { for _, test := range []struct { in string want logging.Severity }{ {"", logging.Default}, {"whatever", logging.Default}, {"Default", logging.Default}, {"ERROR", logging.Error}, {"Error", logging.Error}, {"error", logging.Error}, } { got := logging.ParseSeverity(test.in) if got != test.want { t.Errorf("%q: got %s, want %s\n", test.in, got, test.want) } } } func TestErrors(t *testing.T) { initLogs(ctx) // Generate new testLogID // Drain errors already seen. loop: for { select { case <-errorc: default: break loop } } // Try to log something that can't be JSON-marshalled. lg := client.Logger(testLogID) lg.Log(logging.Entry{Payload: func() {}}) // Expect an error. select { case <-errorc: // pass case <-time.After(100 * time.Millisecond): t.Fatal("expected an error but timed out") } } type badTokenSource struct{} func (badTokenSource) Token() (*oauth2.Token, error) { return &oauth2.Token{}, nil } func TestPing(t *testing.T) { // Ping twice, in case the service's InsertID logic messes with the error code. ctx := context.Background() // The global client should be valid. if err := client.Ping(ctx); err != nil { t.Errorf("project %s: got %v, expected nil", testProjectID, err) } if err := client.Ping(ctx); err != nil { t.Errorf("project %s, #2: got %v, expected nil", testProjectID, err) } // nonexistent project c, _ := newClients(ctx, testProjectID+"-BAD") if err := c.Ping(ctx); err == nil { t.Errorf("nonexistent project: want error pinging logging api, got nil") } if err := c.Ping(ctx); err == nil { t.Errorf("nonexistent project, #2: want error pinging logging api, got nil") } // Bad creds. We cannot test this with the fake, since it doesn't do auth. if integrationTest { c, err := logging.NewClient(ctx, testProjectID, option.WithTokenSource(badTokenSource{})) if err != nil { t.Fatal(err) } if err := c.Ping(ctx); err == nil { t.Errorf("bad creds: want error pinging logging api, got nil") } if err := c.Ping(ctx); err == nil { t.Errorf("bad creds, #2: want error pinging logging api, got nil") } if err := c.Close(); err != nil { t.Fatalf("error closing client: %v", err) } } } func TestLogsAndDelete(t *testing.T) { // This function tests both the Logs and DeleteLog methods. We only try to // delete those logs that we can observe and that were generated by this // test. This may not include the logs generated from the current test run, // because the logging service is only eventually consistent. It's // therefore possible that on some runs, this test will do nothing. ctx := context.Background() it := aclient.Logs(ctx) nDeleted := 0 for { logID, err := it.Next() if err == iterator.Done { break } if err != nil { t.Fatal(err) } if strings.HasPrefix(logID, testLogIDPrefix) { if err := aclient.DeleteLog(ctx, logID); err != nil { t.Fatalf("deleting %q: %v", logID, err) } nDeleted++ } } t.Logf("deleted %d logs", nDeleted) } // waitFor calls f repeatedly with exponential backoff, blocking until it returns true. // It returns false after a while (if it times out). func waitFor(f func() bool) bool { // TODO(shadams): Find a better way to deflake these tests. ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute) defer cancel() err := cinternal.Retry(ctx, gax.Backoff{Initial: time.Second, Multiplier: 2}, func() (bool, error) { return f(), nil }) return err == nil } golang-google-cloud-0.9.0/logging/logging_unexported_test.go000066400000000000000000000156431312234511600242730ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Tests that require access to unexported names of the logging package. package logging import ( "net/http" "net/url" "reflect" "testing" "time" "github.com/golang/protobuf/proto" durpb "github.com/golang/protobuf/ptypes/duration" structpb "github.com/golang/protobuf/ptypes/struct" "google.golang.org/api/support/bundler" mrpb "google.golang.org/genproto/googleapis/api/monitoredres" logtypepb "google.golang.org/genproto/googleapis/logging/type" ) func TestLoggerCreation(t *testing.T) { const logID = "testing" c := &Client{projectID: "PROJECT_ID"} customResource := &mrpb.MonitoredResource{ Type: "global", Labels: map[string]string{ "project_id": "ANOTHER_PROJECT", }, } defaultBundler := &bundler.Bundler{ DelayThreshold: DefaultDelayThreshold, BundleCountThreshold: DefaultEntryCountThreshold, BundleByteThreshold: DefaultEntryByteThreshold, BundleByteLimit: 0, BufferedByteLimit: DefaultBufferedByteLimit, } for _, test := range []struct { options []LoggerOption wantLogger *Logger defaultResource bool wantBundler *bundler.Bundler }{ { options: nil, wantLogger: &Logger{}, defaultResource: true, wantBundler: defaultBundler, }, { options: []LoggerOption{ CommonResource(nil), CommonLabels(map[string]string{"a": "1"}), }, wantLogger: &Logger{ commonResource: nil, commonLabels: map[string]string{"a": "1"}, }, wantBundler: defaultBundler, }, { options: []LoggerOption{CommonResource(customResource)}, wantLogger: &Logger{commonResource: customResource}, wantBundler: defaultBundler, }, { options: []LoggerOption{ DelayThreshold(time.Minute), EntryCountThreshold(99), EntryByteThreshold(17), EntryByteLimit(18), BufferedByteLimit(19), }, wantLogger: &Logger{}, defaultResource: true, wantBundler: &bundler.Bundler{ DelayThreshold: time.Minute, BundleCountThreshold: 99, BundleByteThreshold: 17, BundleByteLimit: 18, BufferedByteLimit: 19, }, }, } { gotLogger := c.Logger(logID, test.options...) if got, want := gotLogger.commonResource, test.wantLogger.commonResource; !test.defaultResource && !proto.Equal(got, want) { t.Errorf("%v: resource: got %v, want %v", test.options, got, want) } if got, want := gotLogger.commonLabels, test.wantLogger.commonLabels; !reflect.DeepEqual(got, want) { t.Errorf("%v: commonLabels: got %v, want %v", test.options, got, want) } if got, want := gotLogger.bundler.DelayThreshold, test.wantBundler.DelayThreshold; got != want { t.Errorf("%v: DelayThreshold: got %v, want %v", test.options, got, want) } if got, want := gotLogger.bundler.BundleCountThreshold, test.wantBundler.BundleCountThreshold; got != want { t.Errorf("%v: BundleCountThreshold: got %v, want %v", test.options, got, want) } if got, want := gotLogger.bundler.BundleByteThreshold, test.wantBundler.BundleByteThreshold; got != want { t.Errorf("%v: BundleByteThreshold: got %v, want %v", test.options, got, want) } if got, want := gotLogger.bundler.BundleByteLimit, test.wantBundler.BundleByteLimit; got != want { t.Errorf("%v: BundleByteLimit: got %v, want %v", test.options, got, want) } if got, want := gotLogger.bundler.BufferedByteLimit, test.wantBundler.BufferedByteLimit; got != want { t.Errorf("%v: BufferedByteLimit: got %v, want %v", test.options, got, want) } } } func TestToProtoStruct(t *testing.T) { v := struct { Foo string `json:"foo"` Bar int `json:"bar,omitempty"` Baz []float64 `json:"baz"` Moo map[string]interface{} `json:"moo"` }{ Foo: "foovalue", Baz: []float64{1.1}, Moo: map[string]interface{}{ "a": 1, "b": "two", "c": true, }, } got, err := toProtoStruct(v) if err != nil { t.Fatal(err) } want := &structpb.Struct{ Fields: map[string]*structpb.Value{ "foo": {Kind: &structpb.Value_StringValue{StringValue: v.Foo}}, "baz": {Kind: &structpb.Value_ListValue{ListValue: &structpb.ListValue{Values: []*structpb.Value{ {Kind: &structpb.Value_NumberValue{NumberValue: 1.1}}, }}}}, "moo": {Kind: &structpb.Value_StructValue{ StructValue: &structpb.Struct{ Fields: map[string]*structpb.Value{ "a": {Kind: &structpb.Value_NumberValue{NumberValue: 1}}, "b": {Kind: &structpb.Value_StringValue{StringValue: "two"}}, "c": {Kind: &structpb.Value_BoolValue{BoolValue: true}}, }, }, }}, }, } if !proto.Equal(got, want) { t.Errorf("got %+v\nwant %+v", got, want) } // Non-structs should fail to convert. for v := range []interface{}{3, "foo", []int{1, 2, 3}} { _, err := toProtoStruct(v) if err == nil { t.Errorf("%v: got nil, want error", v) } } // Test fast path. got, err = toProtoStruct(want) if err != nil { t.Fatal(err) } if got != want { t.Error("got and want should be identical, but are not") } } func TestFromHTTPRequest(t *testing.T) { const testURL = "http:://example.com/path?q=1" u, err := url.Parse(testURL) if err != nil { t.Fatal(err) } req := &HTTPRequest{ Request: &http.Request{ Method: "GET", URL: u, Header: map[string][]string{ "User-Agent": []string{"user-agent"}, "Referer": []string{"referer"}, }, }, RequestSize: 100, Status: 200, ResponseSize: 25, Latency: 100 * time.Second, LocalIP: "127.0.0.1", RemoteIP: "10.0.1.1", CacheHit: true, CacheValidatedWithOriginServer: true, } got := fromHTTPRequest(req) want := &logtypepb.HttpRequest{ RequestMethod: "GET", RequestUrl: testURL, RequestSize: 100, Status: 200, ResponseSize: 25, Latency: &durpb.Duration{Seconds: 100}, UserAgent: "user-agent", ServerIp: "127.0.0.1", RemoteIp: "10.0.1.1", Referer: "referer", CacheHit: true, CacheValidatedWithOriginServer: true, } if !proto.Equal(got, want) { t.Errorf("got %+v\nwant %+v", got, want) } } // Used by the tests in logging_test. func SetNow(f func() time.Time) { now = f } golang-google-cloud-0.9.0/longrunning/000077500000000000000000000000001312234511600177035ustar00rootroot00000000000000golang-google-cloud-0.9.0/longrunning/autogen/000077500000000000000000000000001312234511600213455ustar00rootroot00000000000000golang-google-cloud-0.9.0/longrunning/autogen/doc.go000066400000000000000000000024761312234511600224520ustar00rootroot00000000000000// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // AUTO-GENERATED CODE. DO NOT EDIT. // Package longrunning is an experimental, auto-generated package for the // Google Long Running Operations API. // // // Use the client at cloud.google.com/go/longrunning in preference to this. package longrunning // import "cloud.google.com/go/longrunning/autogen" import ( "golang.org/x/net/context" "google.golang.org/grpc/metadata" ) func insertXGoog(ctx context.Context, val []string) context.Context { md, _ := metadata.FromOutgoingContext(ctx) md = md.Copy() md["x-goog-api-client"] = val return metadata.NewOutgoingContext(ctx, md) } // DefaultAuthScopes reports the authentication scopes required // by this package. func DefaultAuthScopes() []string { return []string{} } golang-google-cloud-0.9.0/longrunning/autogen/from_conn.go000066400000000000000000000021631312234511600236560ustar00rootroot00000000000000// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package longrunning import ( longrunningpb "google.golang.org/genproto/googleapis/longrunning" "google.golang.org/grpc" ) // InternalFromConn is for use by the google Cloud Libraries only. // // InternalFromConn creates OperationsClient from available connection. func InternalFromConn(conn *grpc.ClientConn) *OperationsClient { c := &OperationsClient{ conn: conn, CallOptions: defaultOperationsCallOptions(), operationsClient: longrunningpb.NewOperationsClient(conn), } c.SetGoogleClientInfo() return c } golang-google-cloud-0.9.0/longrunning/autogen/mock_test.go000066400000000000000000000236001312234511600236650ustar00rootroot00000000000000// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // AUTO-GENERATED CODE. DO NOT EDIT. package longrunning import ( emptypb "github.com/golang/protobuf/ptypes/empty" longrunningpb "google.golang.org/genproto/googleapis/longrunning" ) import ( "flag" "fmt" "io" "log" "net" "os" "strings" "testing" "github.com/golang/protobuf/proto" "github.com/golang/protobuf/ptypes" "golang.org/x/net/context" "google.golang.org/api/option" status "google.golang.org/genproto/googleapis/rpc/status" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/metadata" gstatus "google.golang.org/grpc/status" ) var _ = io.EOF var _ = ptypes.MarshalAny var _ status.Status type mockOperationsServer struct { // Embed for forward compatibility. // Tests will keep working if more methods are added // in the future. longrunningpb.OperationsServer reqs []proto.Message // If set, all calls return this error. err error // responses to return if err == nil resps []proto.Message } func (s *mockOperationsServer) ListOperations(ctx context.Context, req *longrunningpb.ListOperationsRequest) (*longrunningpb.ListOperationsResponse, error) { md, _ := metadata.FromIncomingContext(ctx) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } s.reqs = append(s.reqs, req) if s.err != nil { return nil, s.err } return s.resps[0].(*longrunningpb.ListOperationsResponse), nil } func (s *mockOperationsServer) GetOperation(ctx context.Context, req *longrunningpb.GetOperationRequest) (*longrunningpb.Operation, error) { md, _ := metadata.FromIncomingContext(ctx) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } s.reqs = append(s.reqs, req) if s.err != nil { return nil, s.err } return s.resps[0].(*longrunningpb.Operation), nil } func (s *mockOperationsServer) DeleteOperation(ctx context.Context, req *longrunningpb.DeleteOperationRequest) (*emptypb.Empty, error) { md, _ := metadata.FromIncomingContext(ctx) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } s.reqs = append(s.reqs, req) if s.err != nil { return nil, s.err } return s.resps[0].(*emptypb.Empty), nil } func (s *mockOperationsServer) CancelOperation(ctx context.Context, req *longrunningpb.CancelOperationRequest) (*emptypb.Empty, error) { md, _ := metadata.FromIncomingContext(ctx) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } s.reqs = append(s.reqs, req) if s.err != nil { return nil, s.err } return s.resps[0].(*emptypb.Empty), nil } // clientOpt is the option tests should use to connect to the test server. // It is initialized by TestMain. var clientOpt option.ClientOption var ( mockOperations mockOperationsServer ) func TestMain(m *testing.M) { flag.Parse() serv := grpc.NewServer() longrunningpb.RegisterOperationsServer(serv, &mockOperations) lis, err := net.Listen("tcp", "localhost:0") if err != nil { log.Fatal(err) } go serv.Serve(lis) conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) if err != nil { log.Fatal(err) } clientOpt = option.WithGRPCConn(conn) os.Exit(m.Run()) } func TestOperationsGetOperation(t *testing.T) { var name2 string = "name2-1052831874" var done bool = true var expectedResponse = &longrunningpb.Operation{ Name: name2, Done: done, } mockOperations.err = nil mockOperations.reqs = nil mockOperations.resps = append(mockOperations.resps[:0], expectedResponse) var name string = "name3373707" var request = &longrunningpb.GetOperationRequest{ Name: name, } c, err := NewOperationsClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.GetOperation(context.Background(), request) if err != nil { t.Fatal(err) } if want, got := request, mockOperations.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } if want, got := expectedResponse, resp; !proto.Equal(want, got) { t.Errorf("wrong response %q, want %q)", got, want) } } func TestOperationsGetOperationError(t *testing.T) { errCode := codes.PermissionDenied mockOperations.err = gstatus.Error(errCode, "test error") var name string = "name3373707" var request = &longrunningpb.GetOperationRequest{ Name: name, } c, err := NewOperationsClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.GetOperation(context.Background(), request) if st, ok := gstatus.FromError(err); !ok { t.Errorf("got error %v, expected grpc error", err) } else if c := st.Code(); c != errCode { t.Errorf("got error code %q, want %q", c, errCode) } _ = resp } func TestOperationsListOperations(t *testing.T) { var nextPageToken string = "" var operationsElement *longrunningpb.Operation = &longrunningpb.Operation{} var operations = []*longrunningpb.Operation{operationsElement} var expectedResponse = &longrunningpb.ListOperationsResponse{ NextPageToken: nextPageToken, Operations: operations, } mockOperations.err = nil mockOperations.reqs = nil mockOperations.resps = append(mockOperations.resps[:0], expectedResponse) var name string = "name3373707" var filter string = "filter-1274492040" var request = &longrunningpb.ListOperationsRequest{ Name: name, Filter: filter, } c, err := NewOperationsClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.ListOperations(context.Background(), request).Next() if err != nil { t.Fatal(err) } if want, got := request, mockOperations.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } want := (interface{})(expectedResponse.Operations[0]) got := (interface{})(resp) var ok bool switch want := (want).(type) { case proto.Message: ok = proto.Equal(want, got.(proto.Message)) default: ok = want == got } if !ok { t.Errorf("wrong response %q, want %q)", got, want) } } func TestOperationsListOperationsError(t *testing.T) { errCode := codes.PermissionDenied mockOperations.err = gstatus.Error(errCode, "test error") var name string = "name3373707" var filter string = "filter-1274492040" var request = &longrunningpb.ListOperationsRequest{ Name: name, Filter: filter, } c, err := NewOperationsClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.ListOperations(context.Background(), request).Next() if st, ok := gstatus.FromError(err); !ok { t.Errorf("got error %v, expected grpc error", err) } else if c := st.Code(); c != errCode { t.Errorf("got error code %q, want %q", c, errCode) } _ = resp } func TestOperationsCancelOperation(t *testing.T) { var expectedResponse *emptypb.Empty = &emptypb.Empty{} mockOperations.err = nil mockOperations.reqs = nil mockOperations.resps = append(mockOperations.resps[:0], expectedResponse) var name string = "name3373707" var request = &longrunningpb.CancelOperationRequest{ Name: name, } c, err := NewOperationsClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } err = c.CancelOperation(context.Background(), request) if err != nil { t.Fatal(err) } if want, got := request, mockOperations.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } } func TestOperationsCancelOperationError(t *testing.T) { errCode := codes.PermissionDenied mockOperations.err = gstatus.Error(errCode, "test error") var name string = "name3373707" var request = &longrunningpb.CancelOperationRequest{ Name: name, } c, err := NewOperationsClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } err = c.CancelOperation(context.Background(), request) if st, ok := gstatus.FromError(err); !ok { t.Errorf("got error %v, expected grpc error", err) } else if c := st.Code(); c != errCode { t.Errorf("got error code %q, want %q", c, errCode) } } func TestOperationsDeleteOperation(t *testing.T) { var expectedResponse *emptypb.Empty = &emptypb.Empty{} mockOperations.err = nil mockOperations.reqs = nil mockOperations.resps = append(mockOperations.resps[:0], expectedResponse) var name string = "name3373707" var request = &longrunningpb.DeleteOperationRequest{ Name: name, } c, err := NewOperationsClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } err = c.DeleteOperation(context.Background(), request) if err != nil { t.Fatal(err) } if want, got := request, mockOperations.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } } func TestOperationsDeleteOperationError(t *testing.T) { errCode := codes.PermissionDenied mockOperations.err = gstatus.Error(errCode, "test error") var name string = "name3373707" var request = &longrunningpb.DeleteOperationRequest{ Name: name, } c, err := NewOperationsClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } err = c.DeleteOperation(context.Background(), request) if st, ok := gstatus.FromError(err); !ok { t.Errorf("got error %v, expected grpc error", err) } else if c := st.Code(); c != errCode { t.Errorf("got error code %q, want %q", c, errCode) } } golang-google-cloud-0.9.0/longrunning/autogen/operations_client.go000066400000000000000000000240621312234511600254210ustar00rootroot00000000000000// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // AUTO-GENERATED CODE. DO NOT EDIT. package longrunning import ( "math" "time" "cloud.google.com/go/internal/version" gax "github.com/googleapis/gax-go" "golang.org/x/net/context" "google.golang.org/api/iterator" "google.golang.org/api/option" "google.golang.org/api/transport" longrunningpb "google.golang.org/genproto/googleapis/longrunning" "google.golang.org/grpc" "google.golang.org/grpc/codes" ) // OperationsCallOptions contains the retry settings for each method of OperationsClient. type OperationsCallOptions struct { GetOperation []gax.CallOption ListOperations []gax.CallOption CancelOperation []gax.CallOption DeleteOperation []gax.CallOption } func defaultOperationsClientOptions() []option.ClientOption { return []option.ClientOption{ option.WithEndpoint("longrunning.googleapis.com:443"), option.WithScopes(DefaultAuthScopes()...), } } func defaultOperationsCallOptions() *OperationsCallOptions { retry := map[[2]string][]gax.CallOption{ {"default", "idempotent"}: { gax.WithRetry(func() gax.Retryer { return gax.OnCodes([]codes.Code{ codes.DeadlineExceeded, codes.Unavailable, }, gax.Backoff{ Initial: 100 * time.Millisecond, Max: 60000 * time.Millisecond, Multiplier: 1.3, }) }), }, } return &OperationsCallOptions{ GetOperation: retry[[2]string{"default", "idempotent"}], ListOperations: retry[[2]string{"default", "idempotent"}], CancelOperation: retry[[2]string{"default", "idempotent"}], DeleteOperation: retry[[2]string{"default", "idempotent"}], } } // OperationsClient is a client for interacting with Google Long Running Operations API. type OperationsClient struct { // The connection to the service. conn *grpc.ClientConn // The gRPC API client. operationsClient longrunningpb.OperationsClient // The call options for this service. CallOptions *OperationsCallOptions // The metadata to be sent with each request. xGoogHeader []string } // NewOperationsClient creates a new operations client. // // Manages long-running operations with an API service. // // When an API method normally takes long time to complete, it can be designed // to return [Operation][google.longrunning.Operation] to the client, and the client can use this // interface to receive the real response asynchronously by polling the // operation resource, or pass the operation resource to another API (such as // Google Cloud Pub/Sub API) to receive the response. Any API service that // returns long-running operations should implement the `Operations` interface // so developers can have a consistent client experience. func NewOperationsClient(ctx context.Context, opts ...option.ClientOption) (*OperationsClient, error) { conn, err := transport.DialGRPC(ctx, append(defaultOperationsClientOptions(), opts...)...) if err != nil { return nil, err } c := &OperationsClient{ conn: conn, CallOptions: defaultOperationsCallOptions(), operationsClient: longrunningpb.NewOperationsClient(conn), } c.SetGoogleClientInfo() return c, nil } // Connection returns the client's connection to the API service. func (c *OperationsClient) Connection() *grpc.ClientConn { return c.conn } // Close closes the connection to the API service. The user should invoke this when // the client is no longer required. func (c *OperationsClient) Close() error { return c.conn.Close() } // SetGoogleClientInfo sets the name and version of the application in // the `x-goog-api-client` header passed on each request. Intended for // use by Google-written clients. func (c *OperationsClient) SetGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", version.Go()}, keyval...) kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) c.xGoogHeader = []string{gax.XGoogHeader(kv...)} } // GetOperation gets the latest state of a long-running operation. Clients can use this // method to poll the operation result at intervals as recommended by the API // service. func (c *OperationsClient) GetOperation(ctx context.Context, req *longrunningpb.GetOperationRequest, opts ...gax.CallOption) (*longrunningpb.Operation, error) { ctx = insertXGoog(ctx, c.xGoogHeader) opts = append(c.CallOptions.GetOperation[0:len(c.CallOptions.GetOperation):len(c.CallOptions.GetOperation)], opts...) var resp *longrunningpb.Operation err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error resp, err = c.operationsClient.GetOperation(ctx, req, settings.GRPC...) return err }, opts...) if err != nil { return nil, err } return resp, nil } // ListOperations lists operations that match the specified filter in the request. If the // server doesn't support this method, it returns `UNIMPLEMENTED`. // // NOTE: the `name` binding below allows API services to override the binding // to use different resource name schemes, such as `users/*/operations`. func (c *OperationsClient) ListOperations(ctx context.Context, req *longrunningpb.ListOperationsRequest, opts ...gax.CallOption) *OperationIterator { ctx = insertXGoog(ctx, c.xGoogHeader) opts = append(c.CallOptions.ListOperations[0:len(c.CallOptions.ListOperations):len(c.CallOptions.ListOperations)], opts...) it := &OperationIterator{} it.InternalFetch = func(pageSize int, pageToken string) ([]*longrunningpb.Operation, string, error) { var resp *longrunningpb.ListOperationsResponse req.PageToken = pageToken if pageSize > math.MaxInt32 { req.PageSize = math.MaxInt32 } else { req.PageSize = int32(pageSize) } err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error resp, err = c.operationsClient.ListOperations(ctx, req, settings.GRPC...) return err }, opts...) if err != nil { return nil, "", err } return resp.Operations, resp.NextPageToken, nil } fetch := func(pageSize int, pageToken string) (string, error) { items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) if err != nil { return "", err } it.items = append(it.items, items...) return nextPageToken, nil } it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) return it } // CancelOperation starts asynchronous cancellation on a long-running operation. The server // makes a best effort to cancel the operation, but success is not // guaranteed. If the server doesn't support this method, it returns // `google.rpc.Code.UNIMPLEMENTED`. Clients can use // [Operations.GetOperation][google.longrunning.Operations.GetOperation] or // other methods to check whether the cancellation succeeded or whether the // operation completed despite cancellation. On successful cancellation, // the operation is not deleted; instead, it becomes an operation with // an [Operation.error][google.longrunning.Operation.error] value with a [google.rpc.Status.code][google.rpc.Status.code] of 1, // corresponding to `Code.CANCELLED`. func (c *OperationsClient) CancelOperation(ctx context.Context, req *longrunningpb.CancelOperationRequest, opts ...gax.CallOption) error { ctx = insertXGoog(ctx, c.xGoogHeader) opts = append(c.CallOptions.CancelOperation[0:len(c.CallOptions.CancelOperation):len(c.CallOptions.CancelOperation)], opts...) err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error _, err = c.operationsClient.CancelOperation(ctx, req, settings.GRPC...) return err }, opts...) return err } // DeleteOperation deletes a long-running operation. This method indicates that the client is // no longer interested in the operation result. It does not cancel the // operation. If the server doesn't support this method, it returns // `google.rpc.Code.UNIMPLEMENTED`. func (c *OperationsClient) DeleteOperation(ctx context.Context, req *longrunningpb.DeleteOperationRequest, opts ...gax.CallOption) error { ctx = insertXGoog(ctx, c.xGoogHeader) opts = append(c.CallOptions.DeleteOperation[0:len(c.CallOptions.DeleteOperation):len(c.CallOptions.DeleteOperation)], opts...) err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error _, err = c.operationsClient.DeleteOperation(ctx, req, settings.GRPC...) return err }, opts...) return err } // OperationIterator manages a stream of *longrunningpb.Operation. type OperationIterator struct { items []*longrunningpb.Operation pageInfo *iterator.PageInfo nextFunc func() error // InternalFetch is for use by the Google Cloud Libraries only. // It is not part of the stable interface of this package. // // InternalFetch returns results from a single call to the underlying RPC. // The number of results is no greater than pageSize. // If there are no more results, nextPageToken is empty and err is nil. InternalFetch func(pageSize int, pageToken string) (results []*longrunningpb.Operation, nextPageToken string, err error) } // PageInfo supports pagination. See the google.golang.org/api/iterator package for details. func (it *OperationIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } // Next returns the next result. Its second return value is iterator.Done if there are no more // results. Once Next returns Done, all subsequent calls will return Done. func (it *OperationIterator) Next() (*longrunningpb.Operation, error) { var item *longrunningpb.Operation if err := it.nextFunc(); err != nil { return item, err } item = it.items[0] it.items = it.items[1:] return item, nil } func (it *OperationIterator) bufLen() int { return len(it.items) } func (it *OperationIterator) takeBuf() interface{} { b := it.items it.items = nil return b } golang-google-cloud-0.9.0/longrunning/autogen/operations_client_example_test.go000066400000000000000000000047741312234511600302030ustar00rootroot00000000000000// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // AUTO-GENERATED CODE. DO NOT EDIT. package longrunning_test import ( "cloud.google.com/go/longrunning/autogen" "golang.org/x/net/context" "google.golang.org/api/iterator" longrunningpb "google.golang.org/genproto/googleapis/longrunning" ) func ExampleNewOperationsClient() { ctx := context.Background() c, err := longrunning.NewOperationsClient(ctx) if err != nil { // TODO: Handle error. } // TODO: Use client. _ = c } func ExampleOperationsClient_GetOperation() { ctx := context.Background() c, err := longrunning.NewOperationsClient(ctx) if err != nil { // TODO: Handle error. } req := &longrunningpb.GetOperationRequest{ // TODO: Fill request struct fields. } resp, err := c.GetOperation(ctx, req) if err != nil { // TODO: Handle error. } // TODO: Use resp. _ = resp } func ExampleOperationsClient_ListOperations() { ctx := context.Background() c, err := longrunning.NewOperationsClient(ctx) if err != nil { // TODO: Handle error. } req := &longrunningpb.ListOperationsRequest{ // TODO: Fill request struct fields. } it := c.ListOperations(ctx, req) for { resp, err := it.Next() if err == iterator.Done { break } if err != nil { // TODO: Handle error. } // TODO: Use resp. _ = resp } } func ExampleOperationsClient_CancelOperation() { ctx := context.Background() c, err := longrunning.NewOperationsClient(ctx) if err != nil { // TODO: Handle error. } req := &longrunningpb.CancelOperationRequest{ // TODO: Fill request struct fields. } err = c.CancelOperation(ctx, req) if err != nil { // TODO: Handle error. } } func ExampleOperationsClient_DeleteOperation() { ctx := context.Background() c, err := longrunning.NewOperationsClient(ctx) if err != nil { // TODO: Handle error. } req := &longrunningpb.DeleteOperationRequest{ // TODO: Fill request struct fields. } err = c.DeleteOperation(ctx, req) if err != nil { // TODO: Handle error. } } golang-google-cloud-0.9.0/longrunning/example_test.go000066400000000000000000000055541312234511600227350ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package longrunning import ( "fmt" "time" "github.com/golang/protobuf/ptypes" "github.com/golang/protobuf/ptypes/duration" "github.com/golang/protobuf/ptypes/timestamp" "golang.org/x/net/context" pb "google.golang.org/genproto/googleapis/longrunning" ) func bestMomentInHistory() (*Operation, error) { t, err := time.Parse("2006-01-02 15:04:05.999999999 -0700 MST", "2009-11-10 23:00:00 +0000 UTC") if err != nil { return nil, err } resp, err := ptypes.TimestampProto(t) if err != nil { return nil, err } respAny, err := ptypes.MarshalAny(resp) if err != nil { return nil, err } metaAny, err := ptypes.MarshalAny(ptypes.DurationProto(1 * time.Hour)) return &Operation{ proto: &pb.Operation{ Name: "best-moment", Done: true, Metadata: metaAny, Result: &pb.Operation_Response{ Response: respAny, }, }, }, err } func ExampleOperation_Wait() { // Complex computation, might take a long time. op, err := bestMomentInHistory() if err != nil { // TODO: Handle err. } var ts timestamp.Timestamp err = op.Wait(context.TODO(), &ts) if err != nil && !op.Done() { fmt.Println("failed to fetch operation status", err) } else if err != nil && op.Done() { fmt.Println("operation completed with error", err) } else { fmt.Println(ptypes.TimestampString(&ts)) } // Output: // 2009-11-10T23:00:00Z } func ExampleOperation_Metadata() { op, err := bestMomentInHistory() if err != nil { // TODO: Handle err. } // The operation might contain metadata. // In this example, the metadata contains the estimated length of time // the operation might take to complete. var meta duration.Duration if err := op.Metadata(&meta); err != nil { // TODO: Handle err. } d, err := ptypes.Duration(&meta) if err == ErrNoMetadata { fmt.Println("no metadata") } else if err != nil { // TODO: Handle err. } else { fmt.Println(d) } // Output: // 1h0m0s } func ExampleOperation_Cancel() { op, err := bestMomentInHistory() if err != nil { // TODO: Handle err. } if err := op.Cancel(context.Background()); err != nil { // TODO: Handle err. } } func ExampleOperation_Delete() { op, err := bestMomentInHistory() if err != nil { // TODO: Handle err. } if err := op.Delete(context.Background()); err != nil { // TODO: Handle err. } } golang-google-cloud-0.9.0/longrunning/longrunning.go000066400000000000000000000140251312234511600225740ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package longrunning supports Long Running Operations for the Google Cloud Libraries. // See google.golang.org/genproto/googleapis/longrunning for its service definition. // // Users of the Google Cloud Libraries will typically not use this package directly. // Instead they will call functions returning Operations and call their methods. // // This package is still experimental and subject to change. package longrunning // import "cloud.google.com/go/longrunning" import ( "errors" "fmt" "time" "github.com/golang/protobuf/proto" "github.com/golang/protobuf/ptypes" gax "github.com/googleapis/gax-go" "golang.org/x/net/context" autogen "cloud.google.com/go/longrunning/autogen" pb "google.golang.org/genproto/googleapis/longrunning" "google.golang.org/grpc" "google.golang.org/grpc/codes" ) // ErrNoMetadata is the error returned by Metadata if the operation contains no metadata. var ErrNoMetadata = errors.New("operation contains no metadata") // Operation represents the result of an API call that may not be ready yet. type Operation struct { c operationsClient proto *pb.Operation } type operationsClient interface { GetOperation(context.Context, *pb.GetOperationRequest, ...gax.CallOption) (*pb.Operation, error) CancelOperation(context.Context, *pb.CancelOperationRequest, ...gax.CallOption) error DeleteOperation(context.Context, *pb.DeleteOperationRequest, ...gax.CallOption) error } // InternalNewOperation is for use by the google Cloud Libraries only. // // InternalNewOperation returns an long-running operation, abstracting the raw pb.Operation. // The conn parameter refers to a server that proto was received from. func InternalNewOperation(inner *autogen.OperationsClient, proto *pb.Operation) *Operation { return &Operation{ c: inner, proto: proto, } } // Name returns the name of the long-running operation. // The name is assigned by the server and is unique within the service // from which the operation is created. func (op *Operation) Name() string { return op.proto.Name } // Done reports whether the long-running operation has completed. func (op *Operation) Done() bool { return op.proto.Done } // Metadata unmarshals op's metadata into meta. // If op does not contain any metadata, Metadata returns ErrNoMetadata and meta is unmodified. func (op *Operation) Metadata(meta proto.Message) error { if m := op.proto.Metadata; m != nil { return ptypes.UnmarshalAny(m, meta) } return ErrNoMetadata } // Poll fetches the latest state of a long-running operation. // // If Poll fails, the error is returned and op is unmodified. // If Poll succeeds and the operation has completed with failure, // the error is returned and op.Done will return true. // If Poll succeeds and the operation has completed successfully, // op.Done will return true; if resp != nil, the response of the operation // is stored in resp. func (op *Operation) Poll(ctx context.Context, resp proto.Message, opts ...gax.CallOption) error { if !op.Done() { p, err := op.c.GetOperation(ctx, &pb.GetOperationRequest{Name: op.Name()}, opts...) if err != nil { return err } op.proto = p } if !op.Done() { return nil } switch r := op.proto.Result.(type) { case *pb.Operation_Error: // TODO (pongad): r.Details may contain further information return grpc.Errorf(codes.Code(r.Error.Code), "%s", r.Error.Message) case *pb.Operation_Response: if resp == nil { return nil } return ptypes.UnmarshalAny(r.Response, resp) default: return fmt.Errorf("unsupported result type %[1]T: %[1]v", r) } } // Wait blocks until the operation is completed. // If resp != nil, Wait stores the response in resp. // // See documentation of Poll for error-handling information. func (op *Operation) Wait(ctx context.Context, resp proto.Message, opts ...gax.CallOption) error { bo := gax.Backoff{ Initial: 100 * time.Millisecond, Max: 10 * time.Second, } return op.wait(ctx, resp, &bo, gax.Sleep, opts...) } type sleeper func(context.Context, time.Duration) error // wait implements Wait, taking exponentialBackoff and sleeper arguments for testing. func (op *Operation) wait(ctx context.Context, resp proto.Message, bo *gax.Backoff, sl sleeper, opts ...gax.CallOption) error { for { if err := op.Poll(ctx, resp, opts...); err != nil { return err } if op.Done() { return nil } if err := sl(ctx, bo.Pause()); err != nil { return err } } } // Cancel starts asynchronous cancellation on a long-running operation. The server // makes a best effort to cancel the operation, but success is not // guaranteed. If the server doesn't support this method, it returns // grpc.Code(error) == codes.Unimplemented. Clients can use // Poll or other methods to check whether the cancellation succeeded or whether the // operation completed despite cancellation. On successful cancellation, // the operation is not deleted; instead, op.Poll returns an error // with code Canceled. func (op *Operation) Cancel(ctx context.Context, opts ...gax.CallOption) error { return op.c.CancelOperation(ctx, &pb.CancelOperationRequest{Name: op.Name()}, opts...) } // Delete deletes a long-running operation. This method indicates that the client is // no longer interested in the operation result. It does not cancel the // operation. If the server doesn't support this method, grpc.Code(error) == codes.Unimplemented. func (op *Operation) Delete(ctx context.Context, opts ...gax.CallOption) error { return op.c.DeleteOperation(ctx, &pb.DeleteOperationRequest{Name: op.Name()}, opts...) } golang-google-cloud-0.9.0/longrunning/longrunning_test.go000066400000000000000000000122641312234511600236360ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package lro supports Long Running Operations for the Google Cloud Libraries. // // This package is still experimental and subject to change. package longrunning import ( "errors" "testing" "time" "github.com/golang/protobuf/proto" "github.com/golang/protobuf/ptypes" "github.com/golang/protobuf/ptypes/duration" gax "github.com/googleapis/gax-go" "golang.org/x/net/context" pb "google.golang.org/genproto/googleapis/longrunning" status "google.golang.org/genproto/googleapis/rpc/status" "google.golang.org/grpc" "google.golang.org/grpc/codes" ) type getterService struct { operationsClient // clock represents the fake current time of the service. // It is the running sum of the of the duration we have slept. clock time.Duration // getTimes records the the times at which GetOperation is called. getTimes []time.Duration // results are the fake results that GetOperation should return. results []*pb.Operation } func (s *getterService) GetOperation(context.Context, *pb.GetOperationRequest, ...gax.CallOption) (*pb.Operation, error) { i := len(s.getTimes) s.getTimes = append(s.getTimes, s.clock) if i >= len(s.results) { return nil, errors.New("unexpected call") } return s.results[i], nil } func (s *getterService) sleeper() sleeper { return func(_ context.Context, d time.Duration) error { s.clock += d return nil } } func TestWait(t *testing.T) { responseDur := ptypes.DurationProto(42 * time.Second) responseAny, err := ptypes.MarshalAny(responseDur) if err != nil { t.Fatal(err) } s := &getterService{ results: []*pb.Operation{ {Name: "foo"}, {Name: "foo"}, {Name: "foo"}, {Name: "foo"}, {Name: "foo"}, { Name: "foo", Done: true, Result: &pb.Operation_Response{ Response: responseAny, }, }, }, } op := &Operation{ c: s, proto: &pb.Operation{Name: "foo"}, } if op.Done() { t.Fatal("operation should not have completed yet") } var resp duration.Duration bo := gax.Backoff{ Initial: 1 * time.Second, Max: 3 * time.Second, } if err := op.wait(context.Background(), &resp, &bo, s.sleeper()); err != nil { t.Fatal(err) } if !proto.Equal(&resp, responseDur) { t.Errorf("response, got %v, want %v", resp, responseDur) } if !op.Done() { t.Errorf("operation should have completed") } maxWait := []time.Duration{ 1 * time.Second, 2 * time.Second, 3 * time.Second, 3 * time.Second, 3 * time.Second, } for i := 0; i < len(s.getTimes)-1; i++ { w := s.getTimes[i+1] - s.getTimes[i] if mw := maxWait[i]; w > mw { t.Errorf("backoff, waited %s, max %s", w, mw) } } } func TestPollRequestError(t *testing.T) { const opName = "foo" // All calls error. s := &getterService{} op := &Operation{ c: s, proto: &pb.Operation{Name: opName}, } if err := op.Poll(context.Background(), nil); err == nil { t.Fatalf("Poll should error") } if n := op.Name(); n != opName { t.Errorf("operation name, got %q, want %q", n, opName) } if op.Done() { t.Errorf("operation should not have completed; we failed to fetch state") } } func TestPollErrorResult(t *testing.T) { const ( errCode = codes.NotFound errMsg = "my error" ) op := &Operation{ proto: &pb.Operation{ Name: "foo", Done: true, Result: &pb.Operation_Error{ Error: &status.Status{ Code: int32(errCode), Message: errMsg, }, }, }, } err := op.Poll(context.Background(), nil) if got := grpc.Code(err); got != errCode { t.Errorf("error code, want %s, got %s", errCode, got) } if got := grpc.ErrorDesc(err); got != errMsg { t.Errorf("error code, want %s, got %s", errMsg, got) } if !op.Done() { t.Errorf("operation should have completed") } } type errService struct { operationsClient errCancel, errDelete error } func (s *errService) CancelOperation(context.Context, *pb.CancelOperationRequest, ...gax.CallOption) error { return s.errCancel } func (s *errService) DeleteOperation(context.Context, *pb.DeleteOperationRequest, ...gax.CallOption) error { return s.errDelete } func TestCancelReturnsError(t *testing.T) { s := &errService{ errCancel: errors.New("cancel error"), } op := &Operation{ c: s, proto: &pb.Operation{Name: "foo"}, } if got, want := op.Cancel(context.Background()), s.errCancel; got != want { t.Errorf("cancel, got error %s, want %s", got, want) } } func TestDeleteReturnsError(t *testing.T) { s := &errService{ errDelete: errors.New("delete error"), } op := &Operation{ c: s, proto: &pb.Operation{Name: "foo"}, } if got, want := op.Delete(context.Background()), s.errDelete; got != want { t.Errorf("cancel, got error %s, want %s", got, want) } } golang-google-cloud-0.9.0/monitoring/000077500000000000000000000000001312234511600175305ustar00rootroot00000000000000golang-google-cloud-0.9.0/monitoring/apiv3/000077500000000000000000000000001312234511600205525ustar00rootroot00000000000000golang-google-cloud-0.9.0/monitoring/apiv3/ListMonitoredResourceDescriptors_smoke_test.go000066400000000000000000000032761312234511600320740ustar00rootroot00000000000000// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // AUTO-GENERATED CODE. DO NOT EDIT. package monitoring import ( monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3" ) import ( "strconv" "testing" "time" "cloud.google.com/go/internal/testutil" "golang.org/x/net/context" "google.golang.org/api/iterator" "google.golang.org/api/option" ) var _ = iterator.Done var _ = strconv.FormatUint var _ = time.Now func TestMetricServiceSmoke(t *testing.T) { if testing.Short() { t.Skip("skipping smoke test in short mode") } ctx := context.Background() ts := testutil.TokenSource(ctx, DefaultAuthScopes()...) if ts == nil { t.Skip("Integration tests skipped. See CONTRIBUTING.md for details") } projectId := testutil.ProjID() _ = projectId c, err := NewMetricClient(ctx, option.WithTokenSource(ts)) if err != nil { t.Fatal(err) } var formattedName string = MetricProjectPath(projectId) var request = &monitoringpb.ListMonitoredResourceDescriptorsRequest{ Name: formattedName, } iter := c.ListMonitoredResourceDescriptors(ctx, request) if _, err := iter.Next(); err != nil && err != iterator.Done { t.Error(err) } } golang-google-cloud-0.9.0/monitoring/apiv3/doc.go000066400000000000000000000031651312234511600216530ustar00rootroot00000000000000// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // AUTO-GENERATED CODE. DO NOT EDIT. // Package monitoring is an experimental, auto-generated package for the // Stackdriver Monitoring API. // // Manages your Stackdriver Monitoring data and configurations. Most projects // must be associated with a Stackdriver account, with a few exceptions as // noted on the individual method pages. package monitoring // import "cloud.google.com/go/monitoring/apiv3" import ( "golang.org/x/net/context" "google.golang.org/grpc/metadata" ) func insertXGoog(ctx context.Context, val []string) context.Context { md, _ := metadata.FromOutgoingContext(ctx) md = md.Copy() md["x-goog-api-client"] = val return metadata.NewOutgoingContext(ctx, md) } // DefaultAuthScopes reports the authentication scopes required // by this package. func DefaultAuthScopes() []string { return []string{ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/monitoring", "https://www.googleapis.com/auth/monitoring.read", "https://www.googleapis.com/auth/monitoring.write", } } golang-google-cloud-0.9.0/monitoring/apiv3/group_client.go000066400000000000000000000317551312234511600236060ustar00rootroot00000000000000// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // AUTO-GENERATED CODE. DO NOT EDIT. package monitoring import ( "math" "time" "cloud.google.com/go/internal/version" gax "github.com/googleapis/gax-go" "golang.org/x/net/context" "google.golang.org/api/iterator" "google.golang.org/api/option" "google.golang.org/api/transport" monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres" monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3" "google.golang.org/grpc" "google.golang.org/grpc/codes" ) var ( groupProjectPathTemplate = gax.MustCompilePathTemplate("projects/{project}") groupGroupPathTemplate = gax.MustCompilePathTemplate("projects/{project}/groups/{group}") ) // GroupCallOptions contains the retry settings for each method of GroupClient. type GroupCallOptions struct { ListGroups []gax.CallOption GetGroup []gax.CallOption CreateGroup []gax.CallOption UpdateGroup []gax.CallOption DeleteGroup []gax.CallOption ListGroupMembers []gax.CallOption } func defaultGroupClientOptions() []option.ClientOption { return []option.ClientOption{ option.WithEndpoint("monitoring.googleapis.com:443"), option.WithScopes(DefaultAuthScopes()...), } } func defaultGroupCallOptions() *GroupCallOptions { retry := map[[2]string][]gax.CallOption{ {"default", "idempotent"}: { gax.WithRetry(func() gax.Retryer { return gax.OnCodes([]codes.Code{ codes.DeadlineExceeded, codes.Unavailable, }, gax.Backoff{ Initial: 100 * time.Millisecond, Max: 60000 * time.Millisecond, Multiplier: 1.3, }) }), }, } return &GroupCallOptions{ ListGroups: retry[[2]string{"default", "idempotent"}], GetGroup: retry[[2]string{"default", "idempotent"}], CreateGroup: retry[[2]string{"default", "non_idempotent"}], UpdateGroup: retry[[2]string{"default", "idempotent"}], DeleteGroup: retry[[2]string{"default", "idempotent"}], ListGroupMembers: retry[[2]string{"default", "idempotent"}], } } // GroupClient is a client for interacting with Stackdriver Monitoring API. type GroupClient struct { // The connection to the service. conn *grpc.ClientConn // The gRPC API client. groupClient monitoringpb.GroupServiceClient // The call options for this service. CallOptions *GroupCallOptions // The metadata to be sent with each request. xGoogHeader []string } // NewGroupClient creates a new group service client. // // The Group API lets you inspect and manage your // [groups](google.monitoring.v3.Group). // // A group is a named filter that is used to identify // a collection of monitored resources. Groups are typically used to // mirror the physical and/or logical topology of the environment. // Because group membership is computed dynamically, monitored // resources that are started in the future are automatically placed // in matching groups. By using a group to name monitored resources in, // for example, an alert policy, the target of that alert policy is // updated automatically as monitored resources are added and removed // from the infrastructure. func NewGroupClient(ctx context.Context, opts ...option.ClientOption) (*GroupClient, error) { conn, err := transport.DialGRPC(ctx, append(defaultGroupClientOptions(), opts...)...) if err != nil { return nil, err } c := &GroupClient{ conn: conn, CallOptions: defaultGroupCallOptions(), groupClient: monitoringpb.NewGroupServiceClient(conn), } c.SetGoogleClientInfo() return c, nil } // Connection returns the client's connection to the API service. func (c *GroupClient) Connection() *grpc.ClientConn { return c.conn } // Close closes the connection to the API service. The user should invoke this when // the client is no longer required. func (c *GroupClient) Close() error { return c.conn.Close() } // SetGoogleClientInfo sets the name and version of the application in // the `x-goog-api-client` header passed on each request. Intended for // use by Google-written clients. func (c *GroupClient) SetGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", version.Go()}, keyval...) kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) c.xGoogHeader = []string{gax.XGoogHeader(kv...)} } // GroupProjectPath returns the path for the project resource. func GroupProjectPath(project string) string { path, err := groupProjectPathTemplate.Render(map[string]string{ "project": project, }) if err != nil { panic(err) } return path } // GroupGroupPath returns the path for the group resource. func GroupGroupPath(project, group string) string { path, err := groupGroupPathTemplate.Render(map[string]string{ "project": project, "group": group, }) if err != nil { panic(err) } return path } // ListGroups lists the existing groups. func (c *GroupClient) ListGroups(ctx context.Context, req *monitoringpb.ListGroupsRequest, opts ...gax.CallOption) *GroupIterator { ctx = insertXGoog(ctx, c.xGoogHeader) opts = append(c.CallOptions.ListGroups[0:len(c.CallOptions.ListGroups):len(c.CallOptions.ListGroups)], opts...) it := &GroupIterator{} it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.Group, string, error) { var resp *monitoringpb.ListGroupsResponse req.PageToken = pageToken if pageSize > math.MaxInt32 { req.PageSize = math.MaxInt32 } else { req.PageSize = int32(pageSize) } err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error resp, err = c.groupClient.ListGroups(ctx, req, settings.GRPC...) return err }, opts...) if err != nil { return nil, "", err } return resp.Group, resp.NextPageToken, nil } fetch := func(pageSize int, pageToken string) (string, error) { items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) if err != nil { return "", err } it.items = append(it.items, items...) return nextPageToken, nil } it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) return it } // GetGroup gets a single group. func (c *GroupClient) GetGroup(ctx context.Context, req *monitoringpb.GetGroupRequest, opts ...gax.CallOption) (*monitoringpb.Group, error) { ctx = insertXGoog(ctx, c.xGoogHeader) opts = append(c.CallOptions.GetGroup[0:len(c.CallOptions.GetGroup):len(c.CallOptions.GetGroup)], opts...) var resp *monitoringpb.Group err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error resp, err = c.groupClient.GetGroup(ctx, req, settings.GRPC...) return err }, opts...) if err != nil { return nil, err } return resp, nil } // CreateGroup creates a new group. func (c *GroupClient) CreateGroup(ctx context.Context, req *monitoringpb.CreateGroupRequest, opts ...gax.CallOption) (*monitoringpb.Group, error) { ctx = insertXGoog(ctx, c.xGoogHeader) opts = append(c.CallOptions.CreateGroup[0:len(c.CallOptions.CreateGroup):len(c.CallOptions.CreateGroup)], opts...) var resp *monitoringpb.Group err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error resp, err = c.groupClient.CreateGroup(ctx, req, settings.GRPC...) return err }, opts...) if err != nil { return nil, err } return resp, nil } // UpdateGroup updates an existing group. // You can change any group attributes except `name`. func (c *GroupClient) UpdateGroup(ctx context.Context, req *monitoringpb.UpdateGroupRequest, opts ...gax.CallOption) (*monitoringpb.Group, error) { ctx = insertXGoog(ctx, c.xGoogHeader) opts = append(c.CallOptions.UpdateGroup[0:len(c.CallOptions.UpdateGroup):len(c.CallOptions.UpdateGroup)], opts...) var resp *monitoringpb.Group err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error resp, err = c.groupClient.UpdateGroup(ctx, req, settings.GRPC...) return err }, opts...) if err != nil { return nil, err } return resp, nil } // DeleteGroup deletes an existing group. func (c *GroupClient) DeleteGroup(ctx context.Context, req *monitoringpb.DeleteGroupRequest, opts ...gax.CallOption) error { ctx = insertXGoog(ctx, c.xGoogHeader) opts = append(c.CallOptions.DeleteGroup[0:len(c.CallOptions.DeleteGroup):len(c.CallOptions.DeleteGroup)], opts...) err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error _, err = c.groupClient.DeleteGroup(ctx, req, settings.GRPC...) return err }, opts...) return err } // ListGroupMembers lists the monitored resources that are members of a group. func (c *GroupClient) ListGroupMembers(ctx context.Context, req *monitoringpb.ListGroupMembersRequest, opts ...gax.CallOption) *MonitoredResourceIterator { ctx = insertXGoog(ctx, c.xGoogHeader) opts = append(c.CallOptions.ListGroupMembers[0:len(c.CallOptions.ListGroupMembers):len(c.CallOptions.ListGroupMembers)], opts...) it := &MonitoredResourceIterator{} it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoredrespb.MonitoredResource, string, error) { var resp *monitoringpb.ListGroupMembersResponse req.PageToken = pageToken if pageSize > math.MaxInt32 { req.PageSize = math.MaxInt32 } else { req.PageSize = int32(pageSize) } err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error resp, err = c.groupClient.ListGroupMembers(ctx, req, settings.GRPC...) return err }, opts...) if err != nil { return nil, "", err } return resp.Members, resp.NextPageToken, nil } fetch := func(pageSize int, pageToken string) (string, error) { items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) if err != nil { return "", err } it.items = append(it.items, items...) return nextPageToken, nil } it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) return it } // GroupIterator manages a stream of *monitoringpb.Group. type GroupIterator struct { items []*monitoringpb.Group pageInfo *iterator.PageInfo nextFunc func() error // InternalFetch is for use by the Google Cloud Libraries only. // It is not part of the stable interface of this package. // // InternalFetch returns results from a single call to the underlying RPC. // The number of results is no greater than pageSize. // If there are no more results, nextPageToken is empty and err is nil. InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.Group, nextPageToken string, err error) } // PageInfo supports pagination. See the google.golang.org/api/iterator package for details. func (it *GroupIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } // Next returns the next result. Its second return value is iterator.Done if there are no more // results. Once Next returns Done, all subsequent calls will return Done. func (it *GroupIterator) Next() (*monitoringpb.Group, error) { var item *monitoringpb.Group if err := it.nextFunc(); err != nil { return item, err } item = it.items[0] it.items = it.items[1:] return item, nil } func (it *GroupIterator) bufLen() int { return len(it.items) } func (it *GroupIterator) takeBuf() interface{} { b := it.items it.items = nil return b } // MonitoredResourceIterator manages a stream of *monitoredrespb.MonitoredResource. type MonitoredResourceIterator struct { items []*monitoredrespb.MonitoredResource pageInfo *iterator.PageInfo nextFunc func() error // InternalFetch is for use by the Google Cloud Libraries only. // It is not part of the stable interface of this package. // // InternalFetch returns results from a single call to the underlying RPC. // The number of results is no greater than pageSize. // If there are no more results, nextPageToken is empty and err is nil. InternalFetch func(pageSize int, pageToken string) (results []*monitoredrespb.MonitoredResource, nextPageToken string, err error) } // PageInfo supports pagination. See the google.golang.org/api/iterator package for details. func (it *MonitoredResourceIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } // Next returns the next result. Its second return value is iterator.Done if there are no more // results. Once Next returns Done, all subsequent calls will return Done. func (it *MonitoredResourceIterator) Next() (*monitoredrespb.MonitoredResource, error) { var item *monitoredrespb.MonitoredResource if err := it.nextFunc(); err != nil { return item, err } item = it.items[0] it.items = it.items[1:] return item, nil } func (it *MonitoredResourceIterator) bufLen() int { return len(it.items) } func (it *MonitoredResourceIterator) takeBuf() interface{} { b := it.items it.items = nil return b } golang-google-cloud-0.9.0/monitoring/apiv3/group_client_example_test.go000066400000000000000000000063241312234511600263520ustar00rootroot00000000000000// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // AUTO-GENERATED CODE. DO NOT EDIT. package monitoring_test import ( "cloud.google.com/go/monitoring/apiv3" "golang.org/x/net/context" "google.golang.org/api/iterator" monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3" ) func ExampleNewGroupClient() { ctx := context.Background() c, err := monitoring.NewGroupClient(ctx) if err != nil { // TODO: Handle error. } // TODO: Use client. _ = c } func ExampleGroupClient_ListGroups() { ctx := context.Background() c, err := monitoring.NewGroupClient(ctx) if err != nil { // TODO: Handle error. } req := &monitoringpb.ListGroupsRequest{ // TODO: Fill request struct fields. } it := c.ListGroups(ctx, req) for { resp, err := it.Next() if err == iterator.Done { break } if err != nil { // TODO: Handle error. } // TODO: Use resp. _ = resp } } func ExampleGroupClient_GetGroup() { ctx := context.Background() c, err := monitoring.NewGroupClient(ctx) if err != nil { // TODO: Handle error. } req := &monitoringpb.GetGroupRequest{ // TODO: Fill request struct fields. } resp, err := c.GetGroup(ctx, req) if err != nil { // TODO: Handle error. } // TODO: Use resp. _ = resp } func ExampleGroupClient_CreateGroup() { ctx := context.Background() c, err := monitoring.NewGroupClient(ctx) if err != nil { // TODO: Handle error. } req := &monitoringpb.CreateGroupRequest{ // TODO: Fill request struct fields. } resp, err := c.CreateGroup(ctx, req) if err != nil { // TODO: Handle error. } // TODO: Use resp. _ = resp } func ExampleGroupClient_UpdateGroup() { ctx := context.Background() c, err := monitoring.NewGroupClient(ctx) if err != nil { // TODO: Handle error. } req := &monitoringpb.UpdateGroupRequest{ // TODO: Fill request struct fields. } resp, err := c.UpdateGroup(ctx, req) if err != nil { // TODO: Handle error. } // TODO: Use resp. _ = resp } func ExampleGroupClient_DeleteGroup() { ctx := context.Background() c, err := monitoring.NewGroupClient(ctx) if err != nil { // TODO: Handle error. } req := &monitoringpb.DeleteGroupRequest{ // TODO: Fill request struct fields. } err = c.DeleteGroup(ctx, req) if err != nil { // TODO: Handle error. } } func ExampleGroupClient_ListGroupMembers() { ctx := context.Background() c, err := monitoring.NewGroupClient(ctx) if err != nil { // TODO: Handle error. } req := &monitoringpb.ListGroupMembersRequest{ // TODO: Fill request struct fields. } it := c.ListGroupMembers(ctx, req) for { resp, err := it.Next() if err == iterator.Done { break } if err != nil { // TODO: Handle error. } // TODO: Use resp. _ = resp } } golang-google-cloud-0.9.0/monitoring/apiv3/metric_client.go000066400000000000000000000450011312234511600237220ustar00rootroot00000000000000// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // AUTO-GENERATED CODE. DO NOT EDIT. package monitoring import ( "math" "time" "cloud.google.com/go/internal/version" gax "github.com/googleapis/gax-go" "golang.org/x/net/context" "google.golang.org/api/iterator" "google.golang.org/api/option" "google.golang.org/api/transport" metricpb "google.golang.org/genproto/googleapis/api/metric" monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres" monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3" "google.golang.org/grpc" "google.golang.org/grpc/codes" ) var ( metricProjectPathTemplate = gax.MustCompilePathTemplate("projects/{project}") metricMetricDescriptorPathTemplate = gax.MustCompilePathTemplate("projects/{project}/metricDescriptors/{metric_descriptor=**}") metricMonitoredResourceDescriptorPathTemplate = gax.MustCompilePathTemplate("projects/{project}/monitoredResourceDescriptors/{monitored_resource_descriptor}") ) // MetricCallOptions contains the retry settings for each method of MetricClient. type MetricCallOptions struct { ListMonitoredResourceDescriptors []gax.CallOption GetMonitoredResourceDescriptor []gax.CallOption ListMetricDescriptors []gax.CallOption GetMetricDescriptor []gax.CallOption CreateMetricDescriptor []gax.CallOption DeleteMetricDescriptor []gax.CallOption ListTimeSeries []gax.CallOption CreateTimeSeries []gax.CallOption } func defaultMetricClientOptions() []option.ClientOption { return []option.ClientOption{ option.WithEndpoint("monitoring.googleapis.com:443"), option.WithScopes(DefaultAuthScopes()...), } } func defaultMetricCallOptions() *MetricCallOptions { retry := map[[2]string][]gax.CallOption{ {"default", "idempotent"}: { gax.WithRetry(func() gax.Retryer { return gax.OnCodes([]codes.Code{ codes.DeadlineExceeded, codes.Unavailable, }, gax.Backoff{ Initial: 100 * time.Millisecond, Max: 60000 * time.Millisecond, Multiplier: 1.3, }) }), }, } return &MetricCallOptions{ ListMonitoredResourceDescriptors: retry[[2]string{"default", "idempotent"}], GetMonitoredResourceDescriptor: retry[[2]string{"default", "idempotent"}], ListMetricDescriptors: retry[[2]string{"default", "idempotent"}], GetMetricDescriptor: retry[[2]string{"default", "idempotent"}], CreateMetricDescriptor: retry[[2]string{"default", "non_idempotent"}], DeleteMetricDescriptor: retry[[2]string{"default", "idempotent"}], ListTimeSeries: retry[[2]string{"default", "idempotent"}], CreateTimeSeries: retry[[2]string{"default", "non_idempotent"}], } } // MetricClient is a client for interacting with Stackdriver Monitoring API. type MetricClient struct { // The connection to the service. conn *grpc.ClientConn // The gRPC API client. metricClient monitoringpb.MetricServiceClient // The call options for this service. CallOptions *MetricCallOptions // The metadata to be sent with each request. xGoogHeader []string } // NewMetricClient creates a new metric service client. // // Manages metric descriptors, monitored resource descriptors, and // time series data. func NewMetricClient(ctx context.Context, opts ...option.ClientOption) (*MetricClient, error) { conn, err := transport.DialGRPC(ctx, append(defaultMetricClientOptions(), opts...)...) if err != nil { return nil, err } c := &MetricClient{ conn: conn, CallOptions: defaultMetricCallOptions(), metricClient: monitoringpb.NewMetricServiceClient(conn), } c.SetGoogleClientInfo() return c, nil } // Connection returns the client's connection to the API service. func (c *MetricClient) Connection() *grpc.ClientConn { return c.conn } // Close closes the connection to the API service. The user should invoke this when // the client is no longer required. func (c *MetricClient) Close() error { return c.conn.Close() } // SetGoogleClientInfo sets the name and version of the application in // the `x-goog-api-client` header passed on each request. Intended for // use by Google-written clients. func (c *MetricClient) SetGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", version.Go()}, keyval...) kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) c.xGoogHeader = []string{gax.XGoogHeader(kv...)} } // MetricProjectPath returns the path for the project resource. func MetricProjectPath(project string) string { path, err := metricProjectPathTemplate.Render(map[string]string{ "project": project, }) if err != nil { panic(err) } return path } // MetricMetricDescriptorPath returns the path for the metric descriptor resource. func MetricMetricDescriptorPath(project, metricDescriptor string) string { path, err := metricMetricDescriptorPathTemplate.Render(map[string]string{ "project": project, "metric_descriptor": metricDescriptor, }) if err != nil { panic(err) } return path } // MetricMonitoredResourceDescriptorPath returns the path for the monitored resource descriptor resource. func MetricMonitoredResourceDescriptorPath(project, monitoredResourceDescriptor string) string { path, err := metricMonitoredResourceDescriptorPathTemplate.Render(map[string]string{ "project": project, "monitored_resource_descriptor": monitoredResourceDescriptor, }) if err != nil { panic(err) } return path } // ListMonitoredResourceDescriptors lists monitored resource descriptors that match a filter. This method does not require a Stackdriver account. func (c *MetricClient) ListMonitoredResourceDescriptors(ctx context.Context, req *monitoringpb.ListMonitoredResourceDescriptorsRequest, opts ...gax.CallOption) *MonitoredResourceDescriptorIterator { ctx = insertXGoog(ctx, c.xGoogHeader) opts = append(c.CallOptions.ListMonitoredResourceDescriptors[0:len(c.CallOptions.ListMonitoredResourceDescriptors):len(c.CallOptions.ListMonitoredResourceDescriptors)], opts...) it := &MonitoredResourceDescriptorIterator{} it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoredrespb.MonitoredResourceDescriptor, string, error) { var resp *monitoringpb.ListMonitoredResourceDescriptorsResponse req.PageToken = pageToken if pageSize > math.MaxInt32 { req.PageSize = math.MaxInt32 } else { req.PageSize = int32(pageSize) } err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error resp, err = c.metricClient.ListMonitoredResourceDescriptors(ctx, req, settings.GRPC...) return err }, opts...) if err != nil { return nil, "", err } return resp.ResourceDescriptors, resp.NextPageToken, nil } fetch := func(pageSize int, pageToken string) (string, error) { items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) if err != nil { return "", err } it.items = append(it.items, items...) return nextPageToken, nil } it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) return it } // GetMonitoredResourceDescriptor gets a single monitored resource descriptor. This method does not require a Stackdriver account. func (c *MetricClient) GetMonitoredResourceDescriptor(ctx context.Context, req *monitoringpb.GetMonitoredResourceDescriptorRequest, opts ...gax.CallOption) (*monitoredrespb.MonitoredResourceDescriptor, error) { ctx = insertXGoog(ctx, c.xGoogHeader) opts = append(c.CallOptions.GetMonitoredResourceDescriptor[0:len(c.CallOptions.GetMonitoredResourceDescriptor):len(c.CallOptions.GetMonitoredResourceDescriptor)], opts...) var resp *monitoredrespb.MonitoredResourceDescriptor err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error resp, err = c.metricClient.GetMonitoredResourceDescriptor(ctx, req, settings.GRPC...) return err }, opts...) if err != nil { return nil, err } return resp, nil } // ListMetricDescriptors lists metric descriptors that match a filter. This method does not require a Stackdriver account. func (c *MetricClient) ListMetricDescriptors(ctx context.Context, req *monitoringpb.ListMetricDescriptorsRequest, opts ...gax.CallOption) *MetricDescriptorIterator { ctx = insertXGoog(ctx, c.xGoogHeader) opts = append(c.CallOptions.ListMetricDescriptors[0:len(c.CallOptions.ListMetricDescriptors):len(c.CallOptions.ListMetricDescriptors)], opts...) it := &MetricDescriptorIterator{} it.InternalFetch = func(pageSize int, pageToken string) ([]*metricpb.MetricDescriptor, string, error) { var resp *monitoringpb.ListMetricDescriptorsResponse req.PageToken = pageToken if pageSize > math.MaxInt32 { req.PageSize = math.MaxInt32 } else { req.PageSize = int32(pageSize) } err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error resp, err = c.metricClient.ListMetricDescriptors(ctx, req, settings.GRPC...) return err }, opts...) if err != nil { return nil, "", err } return resp.MetricDescriptors, resp.NextPageToken, nil } fetch := func(pageSize int, pageToken string) (string, error) { items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) if err != nil { return "", err } it.items = append(it.items, items...) return nextPageToken, nil } it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) return it } // GetMetricDescriptor gets a single metric descriptor. This method does not require a Stackdriver account. func (c *MetricClient) GetMetricDescriptor(ctx context.Context, req *monitoringpb.GetMetricDescriptorRequest, opts ...gax.CallOption) (*metricpb.MetricDescriptor, error) { ctx = insertXGoog(ctx, c.xGoogHeader) opts = append(c.CallOptions.GetMetricDescriptor[0:len(c.CallOptions.GetMetricDescriptor):len(c.CallOptions.GetMetricDescriptor)], opts...) var resp *metricpb.MetricDescriptor err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error resp, err = c.metricClient.GetMetricDescriptor(ctx, req, settings.GRPC...) return err }, opts...) if err != nil { return nil, err } return resp, nil } // CreateMetricDescriptor creates a new metric descriptor. // User-created metric descriptors define // [custom metrics](/monitoring/custom-metrics). func (c *MetricClient) CreateMetricDescriptor(ctx context.Context, req *monitoringpb.CreateMetricDescriptorRequest, opts ...gax.CallOption) (*metricpb.MetricDescriptor, error) { ctx = insertXGoog(ctx, c.xGoogHeader) opts = append(c.CallOptions.CreateMetricDescriptor[0:len(c.CallOptions.CreateMetricDescriptor):len(c.CallOptions.CreateMetricDescriptor)], opts...) var resp *metricpb.MetricDescriptor err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error resp, err = c.metricClient.CreateMetricDescriptor(ctx, req, settings.GRPC...) return err }, opts...) if err != nil { return nil, err } return resp, nil } // DeleteMetricDescriptor deletes a metric descriptor. Only user-created // [custom metrics](/monitoring/custom-metrics) can be deleted. func (c *MetricClient) DeleteMetricDescriptor(ctx context.Context, req *monitoringpb.DeleteMetricDescriptorRequest, opts ...gax.CallOption) error { ctx = insertXGoog(ctx, c.xGoogHeader) opts = append(c.CallOptions.DeleteMetricDescriptor[0:len(c.CallOptions.DeleteMetricDescriptor):len(c.CallOptions.DeleteMetricDescriptor)], opts...) err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error _, err = c.metricClient.DeleteMetricDescriptor(ctx, req, settings.GRPC...) return err }, opts...) return err } // ListTimeSeries lists time series that match a filter. This method does not require a Stackdriver account. func (c *MetricClient) ListTimeSeries(ctx context.Context, req *monitoringpb.ListTimeSeriesRequest, opts ...gax.CallOption) *TimeSeriesIterator { ctx = insertXGoog(ctx, c.xGoogHeader) opts = append(c.CallOptions.ListTimeSeries[0:len(c.CallOptions.ListTimeSeries):len(c.CallOptions.ListTimeSeries)], opts...) it := &TimeSeriesIterator{} it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.TimeSeries, string, error) { var resp *monitoringpb.ListTimeSeriesResponse req.PageToken = pageToken if pageSize > math.MaxInt32 { req.PageSize = math.MaxInt32 } else { req.PageSize = int32(pageSize) } err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error resp, err = c.metricClient.ListTimeSeries(ctx, req, settings.GRPC...) return err }, opts...) if err != nil { return nil, "", err } return resp.TimeSeries, resp.NextPageToken, nil } fetch := func(pageSize int, pageToken string) (string, error) { items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) if err != nil { return "", err } it.items = append(it.items, items...) return nextPageToken, nil } it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) return it } // CreateTimeSeries creates or adds data to one or more time series. // The response is empty if all time series in the request were written. // If any time series could not be written, a corresponding failure message is // included in the error response. func (c *MetricClient) CreateTimeSeries(ctx context.Context, req *monitoringpb.CreateTimeSeriesRequest, opts ...gax.CallOption) error { ctx = insertXGoog(ctx, c.xGoogHeader) opts = append(c.CallOptions.CreateTimeSeries[0:len(c.CallOptions.CreateTimeSeries):len(c.CallOptions.CreateTimeSeries)], opts...) err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error _, err = c.metricClient.CreateTimeSeries(ctx, req, settings.GRPC...) return err }, opts...) return err } // MetricDescriptorIterator manages a stream of *metricpb.MetricDescriptor. type MetricDescriptorIterator struct { items []*metricpb.MetricDescriptor pageInfo *iterator.PageInfo nextFunc func() error // InternalFetch is for use by the Google Cloud Libraries only. // It is not part of the stable interface of this package. // // InternalFetch returns results from a single call to the underlying RPC. // The number of results is no greater than pageSize. // If there are no more results, nextPageToken is empty and err is nil. InternalFetch func(pageSize int, pageToken string) (results []*metricpb.MetricDescriptor, nextPageToken string, err error) } // PageInfo supports pagination. See the google.golang.org/api/iterator package for details. func (it *MetricDescriptorIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } // Next returns the next result. Its second return value is iterator.Done if there are no more // results. Once Next returns Done, all subsequent calls will return Done. func (it *MetricDescriptorIterator) Next() (*metricpb.MetricDescriptor, error) { var item *metricpb.MetricDescriptor if err := it.nextFunc(); err != nil { return item, err } item = it.items[0] it.items = it.items[1:] return item, nil } func (it *MetricDescriptorIterator) bufLen() int { return len(it.items) } func (it *MetricDescriptorIterator) takeBuf() interface{} { b := it.items it.items = nil return b } // MonitoredResourceDescriptorIterator manages a stream of *monitoredrespb.MonitoredResourceDescriptor. type MonitoredResourceDescriptorIterator struct { items []*monitoredrespb.MonitoredResourceDescriptor pageInfo *iterator.PageInfo nextFunc func() error // InternalFetch is for use by the Google Cloud Libraries only. // It is not part of the stable interface of this package. // // InternalFetch returns results from a single call to the underlying RPC. // The number of results is no greater than pageSize. // If there are no more results, nextPageToken is empty and err is nil. InternalFetch func(pageSize int, pageToken string) (results []*monitoredrespb.MonitoredResourceDescriptor, nextPageToken string, err error) } // PageInfo supports pagination. See the google.golang.org/api/iterator package for details. func (it *MonitoredResourceDescriptorIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } // Next returns the next result. Its second return value is iterator.Done if there are no more // results. Once Next returns Done, all subsequent calls will return Done. func (it *MonitoredResourceDescriptorIterator) Next() (*monitoredrespb.MonitoredResourceDescriptor, error) { var item *monitoredrespb.MonitoredResourceDescriptor if err := it.nextFunc(); err != nil { return item, err } item = it.items[0] it.items = it.items[1:] return item, nil } func (it *MonitoredResourceDescriptorIterator) bufLen() int { return len(it.items) } func (it *MonitoredResourceDescriptorIterator) takeBuf() interface{} { b := it.items it.items = nil return b } // TimeSeriesIterator manages a stream of *monitoringpb.TimeSeries. type TimeSeriesIterator struct { items []*monitoringpb.TimeSeries pageInfo *iterator.PageInfo nextFunc func() error // InternalFetch is for use by the Google Cloud Libraries only. // It is not part of the stable interface of this package. // // InternalFetch returns results from a single call to the underlying RPC. // The number of results is no greater than pageSize. // If there are no more results, nextPageToken is empty and err is nil. InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.TimeSeries, nextPageToken string, err error) } // PageInfo supports pagination. See the google.golang.org/api/iterator package for details. func (it *TimeSeriesIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } // Next returns the next result. Its second return value is iterator.Done if there are no more // results. Once Next returns Done, all subsequent calls will return Done. func (it *TimeSeriesIterator) Next() (*monitoringpb.TimeSeries, error) { var item *monitoringpb.TimeSeries if err := it.nextFunc(); err != nil { return item, err } item = it.items[0] it.items = it.items[1:] return item, nil } func (it *TimeSeriesIterator) bufLen() int { return len(it.items) } func (it *TimeSeriesIterator) takeBuf() interface{} { b := it.items it.items = nil return b } golang-google-cloud-0.9.0/monitoring/apiv3/metric_client_example_test.go000066400000000000000000000103301312234511600264710ustar00rootroot00000000000000// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // AUTO-GENERATED CODE. DO NOT EDIT. package monitoring_test import ( "cloud.google.com/go/monitoring/apiv3" "golang.org/x/net/context" "google.golang.org/api/iterator" monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3" ) func ExampleNewMetricClient() { ctx := context.Background() c, err := monitoring.NewMetricClient(ctx) if err != nil { // TODO: Handle error. } // TODO: Use client. _ = c } func ExampleMetricClient_ListMonitoredResourceDescriptors() { ctx := context.Background() c, err := monitoring.NewMetricClient(ctx) if err != nil { // TODO: Handle error. } req := &monitoringpb.ListMonitoredResourceDescriptorsRequest{ // TODO: Fill request struct fields. } it := c.ListMonitoredResourceDescriptors(ctx, req) for { resp, err := it.Next() if err == iterator.Done { break } if err != nil { // TODO: Handle error. } // TODO: Use resp. _ = resp } } func ExampleMetricClient_GetMonitoredResourceDescriptor() { ctx := context.Background() c, err := monitoring.NewMetricClient(ctx) if err != nil { // TODO: Handle error. } req := &monitoringpb.GetMonitoredResourceDescriptorRequest{ // TODO: Fill request struct fields. } resp, err := c.GetMonitoredResourceDescriptor(ctx, req) if err != nil { // TODO: Handle error. } // TODO: Use resp. _ = resp } func ExampleMetricClient_ListMetricDescriptors() { ctx := context.Background() c, err := monitoring.NewMetricClient(ctx) if err != nil { // TODO: Handle error. } req := &monitoringpb.ListMetricDescriptorsRequest{ // TODO: Fill request struct fields. } it := c.ListMetricDescriptors(ctx, req) for { resp, err := it.Next() if err == iterator.Done { break } if err != nil { // TODO: Handle error. } // TODO: Use resp. _ = resp } } func ExampleMetricClient_GetMetricDescriptor() { ctx := context.Background() c, err := monitoring.NewMetricClient(ctx) if err != nil { // TODO: Handle error. } req := &monitoringpb.GetMetricDescriptorRequest{ // TODO: Fill request struct fields. } resp, err := c.GetMetricDescriptor(ctx, req) if err != nil { // TODO: Handle error. } // TODO: Use resp. _ = resp } func ExampleMetricClient_CreateMetricDescriptor() { ctx := context.Background() c, err := monitoring.NewMetricClient(ctx) if err != nil { // TODO: Handle error. } req := &monitoringpb.CreateMetricDescriptorRequest{ // TODO: Fill request struct fields. } resp, err := c.CreateMetricDescriptor(ctx, req) if err != nil { // TODO: Handle error. } // TODO: Use resp. _ = resp } func ExampleMetricClient_DeleteMetricDescriptor() { ctx := context.Background() c, err := monitoring.NewMetricClient(ctx) if err != nil { // TODO: Handle error. } req := &monitoringpb.DeleteMetricDescriptorRequest{ // TODO: Fill request struct fields. } err = c.DeleteMetricDescriptor(ctx, req) if err != nil { // TODO: Handle error. } } func ExampleMetricClient_ListTimeSeries() { ctx := context.Background() c, err := monitoring.NewMetricClient(ctx) if err != nil { // TODO: Handle error. } req := &monitoringpb.ListTimeSeriesRequest{ // TODO: Fill request struct fields. } it := c.ListTimeSeries(ctx, req) for { resp, err := it.Next() if err == iterator.Done { break } if err != nil { // TODO: Handle error. } // TODO: Use resp. _ = resp } } func ExampleMetricClient_CreateTimeSeries() { ctx := context.Background() c, err := monitoring.NewMetricClient(ctx) if err != nil { // TODO: Handle error. } req := &monitoringpb.CreateTimeSeriesRequest{ // TODO: Fill request struct fields. } err = c.CreateTimeSeries(ctx, req) if err != nil { // TODO: Handle error. } } golang-google-cloud-0.9.0/monitoring/apiv3/mock_test.go000066400000000000000000001052331312234511600230750ustar00rootroot00000000000000// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // AUTO-GENERATED CODE. DO NOT EDIT. package monitoring import ( emptypb "github.com/golang/protobuf/ptypes/empty" metricpb "google.golang.org/genproto/googleapis/api/metric" monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres" monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3" ) import ( "flag" "fmt" "io" "log" "net" "os" "strings" "testing" "github.com/golang/protobuf/proto" "github.com/golang/protobuf/ptypes" "golang.org/x/net/context" "google.golang.org/api/option" status "google.golang.org/genproto/googleapis/rpc/status" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/metadata" gstatus "google.golang.org/grpc/status" ) var _ = io.EOF var _ = ptypes.MarshalAny var _ status.Status type mockGroupServer struct { // Embed for forward compatibility. // Tests will keep working if more methods are added // in the future. monitoringpb.GroupServiceServer reqs []proto.Message // If set, all calls return this error. err error // responses to return if err == nil resps []proto.Message } func (s *mockGroupServer) ListGroups(ctx context.Context, req *monitoringpb.ListGroupsRequest) (*monitoringpb.ListGroupsResponse, error) { md, _ := metadata.FromIncomingContext(ctx) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } s.reqs = append(s.reqs, req) if s.err != nil { return nil, s.err } return s.resps[0].(*monitoringpb.ListGroupsResponse), nil } func (s *mockGroupServer) GetGroup(ctx context.Context, req *monitoringpb.GetGroupRequest) (*monitoringpb.Group, error) { md, _ := metadata.FromIncomingContext(ctx) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } s.reqs = append(s.reqs, req) if s.err != nil { return nil, s.err } return s.resps[0].(*monitoringpb.Group), nil } func (s *mockGroupServer) CreateGroup(ctx context.Context, req *monitoringpb.CreateGroupRequest) (*monitoringpb.Group, error) { md, _ := metadata.FromIncomingContext(ctx) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } s.reqs = append(s.reqs, req) if s.err != nil { return nil, s.err } return s.resps[0].(*monitoringpb.Group), nil } func (s *mockGroupServer) UpdateGroup(ctx context.Context, req *monitoringpb.UpdateGroupRequest) (*monitoringpb.Group, error) { md, _ := metadata.FromIncomingContext(ctx) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } s.reqs = append(s.reqs, req) if s.err != nil { return nil, s.err } return s.resps[0].(*monitoringpb.Group), nil } func (s *mockGroupServer) DeleteGroup(ctx context.Context, req *monitoringpb.DeleteGroupRequest) (*emptypb.Empty, error) { md, _ := metadata.FromIncomingContext(ctx) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } s.reqs = append(s.reqs, req) if s.err != nil { return nil, s.err } return s.resps[0].(*emptypb.Empty), nil } func (s *mockGroupServer) ListGroupMembers(ctx context.Context, req *monitoringpb.ListGroupMembersRequest) (*monitoringpb.ListGroupMembersResponse, error) { md, _ := metadata.FromIncomingContext(ctx) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } s.reqs = append(s.reqs, req) if s.err != nil { return nil, s.err } return s.resps[0].(*monitoringpb.ListGroupMembersResponse), nil } type mockMetricServer struct { // Embed for forward compatibility. // Tests will keep working if more methods are added // in the future. monitoringpb.MetricServiceServer reqs []proto.Message // If set, all calls return this error. err error // responses to return if err == nil resps []proto.Message } func (s *mockMetricServer) ListMonitoredResourceDescriptors(ctx context.Context, req *monitoringpb.ListMonitoredResourceDescriptorsRequest) (*monitoringpb.ListMonitoredResourceDescriptorsResponse, error) { md, _ := metadata.FromIncomingContext(ctx) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } s.reqs = append(s.reqs, req) if s.err != nil { return nil, s.err } return s.resps[0].(*monitoringpb.ListMonitoredResourceDescriptorsResponse), nil } func (s *mockMetricServer) GetMonitoredResourceDescriptor(ctx context.Context, req *monitoringpb.GetMonitoredResourceDescriptorRequest) (*monitoredrespb.MonitoredResourceDescriptor, error) { md, _ := metadata.FromIncomingContext(ctx) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } s.reqs = append(s.reqs, req) if s.err != nil { return nil, s.err } return s.resps[0].(*monitoredrespb.MonitoredResourceDescriptor), nil } func (s *mockMetricServer) ListMetricDescriptors(ctx context.Context, req *monitoringpb.ListMetricDescriptorsRequest) (*monitoringpb.ListMetricDescriptorsResponse, error) { md, _ := metadata.FromIncomingContext(ctx) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } s.reqs = append(s.reqs, req) if s.err != nil { return nil, s.err } return s.resps[0].(*monitoringpb.ListMetricDescriptorsResponse), nil } func (s *mockMetricServer) GetMetricDescriptor(ctx context.Context, req *monitoringpb.GetMetricDescriptorRequest) (*metricpb.MetricDescriptor, error) { md, _ := metadata.FromIncomingContext(ctx) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } s.reqs = append(s.reqs, req) if s.err != nil { return nil, s.err } return s.resps[0].(*metricpb.MetricDescriptor), nil } func (s *mockMetricServer) CreateMetricDescriptor(ctx context.Context, req *monitoringpb.CreateMetricDescriptorRequest) (*metricpb.MetricDescriptor, error) { md, _ := metadata.FromIncomingContext(ctx) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } s.reqs = append(s.reqs, req) if s.err != nil { return nil, s.err } return s.resps[0].(*metricpb.MetricDescriptor), nil } func (s *mockMetricServer) DeleteMetricDescriptor(ctx context.Context, req *monitoringpb.DeleteMetricDescriptorRequest) (*emptypb.Empty, error) { md, _ := metadata.FromIncomingContext(ctx) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } s.reqs = append(s.reqs, req) if s.err != nil { return nil, s.err } return s.resps[0].(*emptypb.Empty), nil } func (s *mockMetricServer) ListTimeSeries(ctx context.Context, req *monitoringpb.ListTimeSeriesRequest) (*monitoringpb.ListTimeSeriesResponse, error) { md, _ := metadata.FromIncomingContext(ctx) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } s.reqs = append(s.reqs, req) if s.err != nil { return nil, s.err } return s.resps[0].(*monitoringpb.ListTimeSeriesResponse), nil } func (s *mockMetricServer) CreateTimeSeries(ctx context.Context, req *monitoringpb.CreateTimeSeriesRequest) (*emptypb.Empty, error) { md, _ := metadata.FromIncomingContext(ctx) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } s.reqs = append(s.reqs, req) if s.err != nil { return nil, s.err } return s.resps[0].(*emptypb.Empty), nil } // clientOpt is the option tests should use to connect to the test server. // It is initialized by TestMain. var clientOpt option.ClientOption var ( mockGroup mockGroupServer mockMetric mockMetricServer ) func TestMain(m *testing.M) { flag.Parse() serv := grpc.NewServer() monitoringpb.RegisterGroupServiceServer(serv, &mockGroup) monitoringpb.RegisterMetricServiceServer(serv, &mockMetric) lis, err := net.Listen("tcp", "localhost:0") if err != nil { log.Fatal(err) } go serv.Serve(lis) conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) if err != nil { log.Fatal(err) } clientOpt = option.WithGRPCConn(conn) os.Exit(m.Run()) } func TestGroupServiceListGroups(t *testing.T) { var nextPageToken string = "" var groupElement *monitoringpb.Group = &monitoringpb.Group{} var group = []*monitoringpb.Group{groupElement} var expectedResponse = &monitoringpb.ListGroupsResponse{ NextPageToken: nextPageToken, Group: group, } mockGroup.err = nil mockGroup.reqs = nil mockGroup.resps = append(mockGroup.resps[:0], expectedResponse) var formattedName string = GroupProjectPath("[PROJECT]") var request = &monitoringpb.ListGroupsRequest{ Name: formattedName, } c, err := NewGroupClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.ListGroups(context.Background(), request).Next() if err != nil { t.Fatal(err) } if want, got := request, mockGroup.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } want := (interface{})(expectedResponse.Group[0]) got := (interface{})(resp) var ok bool switch want := (want).(type) { case proto.Message: ok = proto.Equal(want, got.(proto.Message)) default: ok = want == got } if !ok { t.Errorf("wrong response %q, want %q)", got, want) } } func TestGroupServiceListGroupsError(t *testing.T) { errCode := codes.PermissionDenied mockGroup.err = gstatus.Error(errCode, "test error") var formattedName string = GroupProjectPath("[PROJECT]") var request = &monitoringpb.ListGroupsRequest{ Name: formattedName, } c, err := NewGroupClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.ListGroups(context.Background(), request).Next() if st, ok := gstatus.FromError(err); !ok { t.Errorf("got error %v, expected grpc error", err) } else if c := st.Code(); c != errCode { t.Errorf("got error code %q, want %q", c, errCode) } _ = resp } func TestGroupServiceGetGroup(t *testing.T) { var name2 string = "name2-1052831874" var displayName string = "displayName1615086568" var parentName string = "parentName1015022848" var filter string = "filter-1274492040" var isCluster bool = false var expectedResponse = &monitoringpb.Group{ Name: name2, DisplayName: displayName, ParentName: parentName, Filter: filter, IsCluster: isCluster, } mockGroup.err = nil mockGroup.reqs = nil mockGroup.resps = append(mockGroup.resps[:0], expectedResponse) var formattedName string = GroupGroupPath("[PROJECT]", "[GROUP]") var request = &monitoringpb.GetGroupRequest{ Name: formattedName, } c, err := NewGroupClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.GetGroup(context.Background(), request) if err != nil { t.Fatal(err) } if want, got := request, mockGroup.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } if want, got := expectedResponse, resp; !proto.Equal(want, got) { t.Errorf("wrong response %q, want %q)", got, want) } } func TestGroupServiceGetGroupError(t *testing.T) { errCode := codes.PermissionDenied mockGroup.err = gstatus.Error(errCode, "test error") var formattedName string = GroupGroupPath("[PROJECT]", "[GROUP]") var request = &monitoringpb.GetGroupRequest{ Name: formattedName, } c, err := NewGroupClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.GetGroup(context.Background(), request) if st, ok := gstatus.FromError(err); !ok { t.Errorf("got error %v, expected grpc error", err) } else if c := st.Code(); c != errCode { t.Errorf("got error code %q, want %q", c, errCode) } _ = resp } func TestGroupServiceCreateGroup(t *testing.T) { var name2 string = "name2-1052831874" var displayName string = "displayName1615086568" var parentName string = "parentName1015022848" var filter string = "filter-1274492040" var isCluster bool = false var expectedResponse = &monitoringpb.Group{ Name: name2, DisplayName: displayName, ParentName: parentName, Filter: filter, IsCluster: isCluster, } mockGroup.err = nil mockGroup.reqs = nil mockGroup.resps = append(mockGroup.resps[:0], expectedResponse) var formattedName string = GroupProjectPath("[PROJECT]") var group *monitoringpb.Group = &monitoringpb.Group{} var request = &monitoringpb.CreateGroupRequest{ Name: formattedName, Group: group, } c, err := NewGroupClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.CreateGroup(context.Background(), request) if err != nil { t.Fatal(err) } if want, got := request, mockGroup.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } if want, got := expectedResponse, resp; !proto.Equal(want, got) { t.Errorf("wrong response %q, want %q)", got, want) } } func TestGroupServiceCreateGroupError(t *testing.T) { errCode := codes.PermissionDenied mockGroup.err = gstatus.Error(errCode, "test error") var formattedName string = GroupProjectPath("[PROJECT]") var group *monitoringpb.Group = &monitoringpb.Group{} var request = &monitoringpb.CreateGroupRequest{ Name: formattedName, Group: group, } c, err := NewGroupClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.CreateGroup(context.Background(), request) if st, ok := gstatus.FromError(err); !ok { t.Errorf("got error %v, expected grpc error", err) } else if c := st.Code(); c != errCode { t.Errorf("got error code %q, want %q", c, errCode) } _ = resp } func TestGroupServiceUpdateGroup(t *testing.T) { var name string = "name3373707" var displayName string = "displayName1615086568" var parentName string = "parentName1015022848" var filter string = "filter-1274492040" var isCluster bool = false var expectedResponse = &monitoringpb.Group{ Name: name, DisplayName: displayName, ParentName: parentName, Filter: filter, IsCluster: isCluster, } mockGroup.err = nil mockGroup.reqs = nil mockGroup.resps = append(mockGroup.resps[:0], expectedResponse) var group *monitoringpb.Group = &monitoringpb.Group{} var request = &monitoringpb.UpdateGroupRequest{ Group: group, } c, err := NewGroupClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.UpdateGroup(context.Background(), request) if err != nil { t.Fatal(err) } if want, got := request, mockGroup.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } if want, got := expectedResponse, resp; !proto.Equal(want, got) { t.Errorf("wrong response %q, want %q)", got, want) } } func TestGroupServiceUpdateGroupError(t *testing.T) { errCode := codes.PermissionDenied mockGroup.err = gstatus.Error(errCode, "test error") var group *monitoringpb.Group = &monitoringpb.Group{} var request = &monitoringpb.UpdateGroupRequest{ Group: group, } c, err := NewGroupClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.UpdateGroup(context.Background(), request) if st, ok := gstatus.FromError(err); !ok { t.Errorf("got error %v, expected grpc error", err) } else if c := st.Code(); c != errCode { t.Errorf("got error code %q, want %q", c, errCode) } _ = resp } func TestGroupServiceDeleteGroup(t *testing.T) { var expectedResponse *emptypb.Empty = &emptypb.Empty{} mockGroup.err = nil mockGroup.reqs = nil mockGroup.resps = append(mockGroup.resps[:0], expectedResponse) var formattedName string = GroupGroupPath("[PROJECT]", "[GROUP]") var request = &monitoringpb.DeleteGroupRequest{ Name: formattedName, } c, err := NewGroupClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } err = c.DeleteGroup(context.Background(), request) if err != nil { t.Fatal(err) } if want, got := request, mockGroup.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } } func TestGroupServiceDeleteGroupError(t *testing.T) { errCode := codes.PermissionDenied mockGroup.err = gstatus.Error(errCode, "test error") var formattedName string = GroupGroupPath("[PROJECT]", "[GROUP]") var request = &monitoringpb.DeleteGroupRequest{ Name: formattedName, } c, err := NewGroupClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } err = c.DeleteGroup(context.Background(), request) if st, ok := gstatus.FromError(err); !ok { t.Errorf("got error %v, expected grpc error", err) } else if c := st.Code(); c != errCode { t.Errorf("got error code %q, want %q", c, errCode) } } func TestGroupServiceListGroupMembers(t *testing.T) { var nextPageToken string = "" var totalSize int32 = -705419236 var membersElement *monitoredrespb.MonitoredResource = &monitoredrespb.MonitoredResource{} var members = []*monitoredrespb.MonitoredResource{membersElement} var expectedResponse = &monitoringpb.ListGroupMembersResponse{ NextPageToken: nextPageToken, TotalSize: totalSize, Members: members, } mockGroup.err = nil mockGroup.reqs = nil mockGroup.resps = append(mockGroup.resps[:0], expectedResponse) var formattedName string = GroupGroupPath("[PROJECT]", "[GROUP]") var request = &monitoringpb.ListGroupMembersRequest{ Name: formattedName, } c, err := NewGroupClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.ListGroupMembers(context.Background(), request).Next() if err != nil { t.Fatal(err) } if want, got := request, mockGroup.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } want := (interface{})(expectedResponse.Members[0]) got := (interface{})(resp) var ok bool switch want := (want).(type) { case proto.Message: ok = proto.Equal(want, got.(proto.Message)) default: ok = want == got } if !ok { t.Errorf("wrong response %q, want %q)", got, want) } } func TestGroupServiceListGroupMembersError(t *testing.T) { errCode := codes.PermissionDenied mockGroup.err = gstatus.Error(errCode, "test error") var formattedName string = GroupGroupPath("[PROJECT]", "[GROUP]") var request = &monitoringpb.ListGroupMembersRequest{ Name: formattedName, } c, err := NewGroupClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.ListGroupMembers(context.Background(), request).Next() if st, ok := gstatus.FromError(err); !ok { t.Errorf("got error %v, expected grpc error", err) } else if c := st.Code(); c != errCode { t.Errorf("got error code %q, want %q", c, errCode) } _ = resp } func TestMetricServiceListMonitoredResourceDescriptors(t *testing.T) { var nextPageToken string = "" var resourceDescriptorsElement *monitoredrespb.MonitoredResourceDescriptor = &monitoredrespb.MonitoredResourceDescriptor{} var resourceDescriptors = []*monitoredrespb.MonitoredResourceDescriptor{resourceDescriptorsElement} var expectedResponse = &monitoringpb.ListMonitoredResourceDescriptorsResponse{ NextPageToken: nextPageToken, ResourceDescriptors: resourceDescriptors, } mockMetric.err = nil mockMetric.reqs = nil mockMetric.resps = append(mockMetric.resps[:0], expectedResponse) var formattedName string = MetricProjectPath("[PROJECT]") var request = &monitoringpb.ListMonitoredResourceDescriptorsRequest{ Name: formattedName, } c, err := NewMetricClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.ListMonitoredResourceDescriptors(context.Background(), request).Next() if err != nil { t.Fatal(err) } if want, got := request, mockMetric.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } want := (interface{})(expectedResponse.ResourceDescriptors[0]) got := (interface{})(resp) var ok bool switch want := (want).(type) { case proto.Message: ok = proto.Equal(want, got.(proto.Message)) default: ok = want == got } if !ok { t.Errorf("wrong response %q, want %q)", got, want) } } func TestMetricServiceListMonitoredResourceDescriptorsError(t *testing.T) { errCode := codes.PermissionDenied mockMetric.err = gstatus.Error(errCode, "test error") var formattedName string = MetricProjectPath("[PROJECT]") var request = &monitoringpb.ListMonitoredResourceDescriptorsRequest{ Name: formattedName, } c, err := NewMetricClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.ListMonitoredResourceDescriptors(context.Background(), request).Next() if st, ok := gstatus.FromError(err); !ok { t.Errorf("got error %v, expected grpc error", err) } else if c := st.Code(); c != errCode { t.Errorf("got error code %q, want %q", c, errCode) } _ = resp } func TestMetricServiceGetMonitoredResourceDescriptor(t *testing.T) { var name2 string = "name2-1052831874" var type_ string = "type3575610" var displayName string = "displayName1615086568" var description string = "description-1724546052" var expectedResponse = &monitoredrespb.MonitoredResourceDescriptor{ Name: name2, Type: type_, DisplayName: displayName, Description: description, } mockMetric.err = nil mockMetric.reqs = nil mockMetric.resps = append(mockMetric.resps[:0], expectedResponse) var formattedName string = MetricMonitoredResourceDescriptorPath("[PROJECT]", "[MONITORED_RESOURCE_DESCRIPTOR]") var request = &monitoringpb.GetMonitoredResourceDescriptorRequest{ Name: formattedName, } c, err := NewMetricClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.GetMonitoredResourceDescriptor(context.Background(), request) if err != nil { t.Fatal(err) } if want, got := request, mockMetric.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } if want, got := expectedResponse, resp; !proto.Equal(want, got) { t.Errorf("wrong response %q, want %q)", got, want) } } func TestMetricServiceGetMonitoredResourceDescriptorError(t *testing.T) { errCode := codes.PermissionDenied mockMetric.err = gstatus.Error(errCode, "test error") var formattedName string = MetricMonitoredResourceDescriptorPath("[PROJECT]", "[MONITORED_RESOURCE_DESCRIPTOR]") var request = &monitoringpb.GetMonitoredResourceDescriptorRequest{ Name: formattedName, } c, err := NewMetricClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.GetMonitoredResourceDescriptor(context.Background(), request) if st, ok := gstatus.FromError(err); !ok { t.Errorf("got error %v, expected grpc error", err) } else if c := st.Code(); c != errCode { t.Errorf("got error code %q, want %q", c, errCode) } _ = resp } func TestMetricServiceListMetricDescriptors(t *testing.T) { var nextPageToken string = "" var metricDescriptorsElement *metricpb.MetricDescriptor = &metricpb.MetricDescriptor{} var metricDescriptors = []*metricpb.MetricDescriptor{metricDescriptorsElement} var expectedResponse = &monitoringpb.ListMetricDescriptorsResponse{ NextPageToken: nextPageToken, MetricDescriptors: metricDescriptors, } mockMetric.err = nil mockMetric.reqs = nil mockMetric.resps = append(mockMetric.resps[:0], expectedResponse) var formattedName string = MetricProjectPath("[PROJECT]") var request = &monitoringpb.ListMetricDescriptorsRequest{ Name: formattedName, } c, err := NewMetricClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.ListMetricDescriptors(context.Background(), request).Next() if err != nil { t.Fatal(err) } if want, got := request, mockMetric.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } want := (interface{})(expectedResponse.MetricDescriptors[0]) got := (interface{})(resp) var ok bool switch want := (want).(type) { case proto.Message: ok = proto.Equal(want, got.(proto.Message)) default: ok = want == got } if !ok { t.Errorf("wrong response %q, want %q)", got, want) } } func TestMetricServiceListMetricDescriptorsError(t *testing.T) { errCode := codes.PermissionDenied mockMetric.err = gstatus.Error(errCode, "test error") var formattedName string = MetricProjectPath("[PROJECT]") var request = &monitoringpb.ListMetricDescriptorsRequest{ Name: formattedName, } c, err := NewMetricClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.ListMetricDescriptors(context.Background(), request).Next() if st, ok := gstatus.FromError(err); !ok { t.Errorf("got error %v, expected grpc error", err) } else if c := st.Code(); c != errCode { t.Errorf("got error code %q, want %q", c, errCode) } _ = resp } func TestMetricServiceGetMetricDescriptor(t *testing.T) { var name2 string = "name2-1052831874" var type_ string = "type3575610" var unit string = "unit3594628" var description string = "description-1724546052" var displayName string = "displayName1615086568" var expectedResponse = &metricpb.MetricDescriptor{ Name: name2, Type: type_, Unit: unit, Description: description, DisplayName: displayName, } mockMetric.err = nil mockMetric.reqs = nil mockMetric.resps = append(mockMetric.resps[:0], expectedResponse) var formattedName string = MetricMetricDescriptorPath("[PROJECT]", "[METRIC_DESCRIPTOR]") var request = &monitoringpb.GetMetricDescriptorRequest{ Name: formattedName, } c, err := NewMetricClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.GetMetricDescriptor(context.Background(), request) if err != nil { t.Fatal(err) } if want, got := request, mockMetric.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } if want, got := expectedResponse, resp; !proto.Equal(want, got) { t.Errorf("wrong response %q, want %q)", got, want) } } func TestMetricServiceGetMetricDescriptorError(t *testing.T) { errCode := codes.PermissionDenied mockMetric.err = gstatus.Error(errCode, "test error") var formattedName string = MetricMetricDescriptorPath("[PROJECT]", "[METRIC_DESCRIPTOR]") var request = &monitoringpb.GetMetricDescriptorRequest{ Name: formattedName, } c, err := NewMetricClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.GetMetricDescriptor(context.Background(), request) if st, ok := gstatus.FromError(err); !ok { t.Errorf("got error %v, expected grpc error", err) } else if c := st.Code(); c != errCode { t.Errorf("got error code %q, want %q", c, errCode) } _ = resp } func TestMetricServiceCreateMetricDescriptor(t *testing.T) { var name2 string = "name2-1052831874" var type_ string = "type3575610" var unit string = "unit3594628" var description string = "description-1724546052" var displayName string = "displayName1615086568" var expectedResponse = &metricpb.MetricDescriptor{ Name: name2, Type: type_, Unit: unit, Description: description, DisplayName: displayName, } mockMetric.err = nil mockMetric.reqs = nil mockMetric.resps = append(mockMetric.resps[:0], expectedResponse) var formattedName string = MetricProjectPath("[PROJECT]") var metricDescriptor *metricpb.MetricDescriptor = &metricpb.MetricDescriptor{} var request = &monitoringpb.CreateMetricDescriptorRequest{ Name: formattedName, MetricDescriptor: metricDescriptor, } c, err := NewMetricClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.CreateMetricDescriptor(context.Background(), request) if err != nil { t.Fatal(err) } if want, got := request, mockMetric.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } if want, got := expectedResponse, resp; !proto.Equal(want, got) { t.Errorf("wrong response %q, want %q)", got, want) } } func TestMetricServiceCreateMetricDescriptorError(t *testing.T) { errCode := codes.PermissionDenied mockMetric.err = gstatus.Error(errCode, "test error") var formattedName string = MetricProjectPath("[PROJECT]") var metricDescriptor *metricpb.MetricDescriptor = &metricpb.MetricDescriptor{} var request = &monitoringpb.CreateMetricDescriptorRequest{ Name: formattedName, MetricDescriptor: metricDescriptor, } c, err := NewMetricClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.CreateMetricDescriptor(context.Background(), request) if st, ok := gstatus.FromError(err); !ok { t.Errorf("got error %v, expected grpc error", err) } else if c := st.Code(); c != errCode { t.Errorf("got error code %q, want %q", c, errCode) } _ = resp } func TestMetricServiceDeleteMetricDescriptor(t *testing.T) { var expectedResponse *emptypb.Empty = &emptypb.Empty{} mockMetric.err = nil mockMetric.reqs = nil mockMetric.resps = append(mockMetric.resps[:0], expectedResponse) var formattedName string = MetricMetricDescriptorPath("[PROJECT]", "[METRIC_DESCRIPTOR]") var request = &monitoringpb.DeleteMetricDescriptorRequest{ Name: formattedName, } c, err := NewMetricClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } err = c.DeleteMetricDescriptor(context.Background(), request) if err != nil { t.Fatal(err) } if want, got := request, mockMetric.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } } func TestMetricServiceDeleteMetricDescriptorError(t *testing.T) { errCode := codes.PermissionDenied mockMetric.err = gstatus.Error(errCode, "test error") var formattedName string = MetricMetricDescriptorPath("[PROJECT]", "[METRIC_DESCRIPTOR]") var request = &monitoringpb.DeleteMetricDescriptorRequest{ Name: formattedName, } c, err := NewMetricClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } err = c.DeleteMetricDescriptor(context.Background(), request) if st, ok := gstatus.FromError(err); !ok { t.Errorf("got error %v, expected grpc error", err) } else if c := st.Code(); c != errCode { t.Errorf("got error code %q, want %q", c, errCode) } } func TestMetricServiceListTimeSeries(t *testing.T) { var nextPageToken string = "" var timeSeriesElement *monitoringpb.TimeSeries = &monitoringpb.TimeSeries{} var timeSeries = []*monitoringpb.TimeSeries{timeSeriesElement} var expectedResponse = &monitoringpb.ListTimeSeriesResponse{ NextPageToken: nextPageToken, TimeSeries: timeSeries, } mockMetric.err = nil mockMetric.reqs = nil mockMetric.resps = append(mockMetric.resps[:0], expectedResponse) var formattedName string = MetricProjectPath("[PROJECT]") var filter string = "filter-1274492040" var interval *monitoringpb.TimeInterval = &monitoringpb.TimeInterval{} var view monitoringpb.ListTimeSeriesRequest_TimeSeriesView = monitoringpb.ListTimeSeriesRequest_FULL var request = &monitoringpb.ListTimeSeriesRequest{ Name: formattedName, Filter: filter, Interval: interval, View: view, } c, err := NewMetricClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.ListTimeSeries(context.Background(), request).Next() if err != nil { t.Fatal(err) } if want, got := request, mockMetric.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } want := (interface{})(expectedResponse.TimeSeries[0]) got := (interface{})(resp) var ok bool switch want := (want).(type) { case proto.Message: ok = proto.Equal(want, got.(proto.Message)) default: ok = want == got } if !ok { t.Errorf("wrong response %q, want %q)", got, want) } } func TestMetricServiceListTimeSeriesError(t *testing.T) { errCode := codes.PermissionDenied mockMetric.err = gstatus.Error(errCode, "test error") var formattedName string = MetricProjectPath("[PROJECT]") var filter string = "filter-1274492040" var interval *monitoringpb.TimeInterval = &monitoringpb.TimeInterval{} var view monitoringpb.ListTimeSeriesRequest_TimeSeriesView = monitoringpb.ListTimeSeriesRequest_FULL var request = &monitoringpb.ListTimeSeriesRequest{ Name: formattedName, Filter: filter, Interval: interval, View: view, } c, err := NewMetricClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.ListTimeSeries(context.Background(), request).Next() if st, ok := gstatus.FromError(err); !ok { t.Errorf("got error %v, expected grpc error", err) } else if c := st.Code(); c != errCode { t.Errorf("got error code %q, want %q", c, errCode) } _ = resp } func TestMetricServiceCreateTimeSeries(t *testing.T) { var expectedResponse *emptypb.Empty = &emptypb.Empty{} mockMetric.err = nil mockMetric.reqs = nil mockMetric.resps = append(mockMetric.resps[:0], expectedResponse) var formattedName string = MetricProjectPath("[PROJECT]") var timeSeries []*monitoringpb.TimeSeries = nil var request = &monitoringpb.CreateTimeSeriesRequest{ Name: formattedName, TimeSeries: timeSeries, } c, err := NewMetricClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } err = c.CreateTimeSeries(context.Background(), request) if err != nil { t.Fatal(err) } if want, got := request, mockMetric.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } } func TestMetricServiceCreateTimeSeriesError(t *testing.T) { errCode := codes.PermissionDenied mockMetric.err = gstatus.Error(errCode, "test error") var formattedName string = MetricProjectPath("[PROJECT]") var timeSeries []*monitoringpb.TimeSeries = nil var request = &monitoringpb.CreateTimeSeriesRequest{ Name: formattedName, TimeSeries: timeSeries, } c, err := NewMetricClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } err = c.CreateTimeSeries(context.Background(), request) if st, ok := gstatus.FromError(err); !ok { t.Errorf("got error %v, expected grpc error", err) } else if c := st.Code(); c != errCode { t.Errorf("got error code %q, want %q", c, errCode) } } golang-google-cloud-0.9.0/old-news.md000066400000000000000000000264251312234511600174260ustar00rootroot00000000000000_December 12, 2016_ Beta release of BigQuery, DataStore, Logging and Storage. See the [blog post](https://cloudplatform.googleblog.com/2016/12/announcing-new-google-cloud-client.html). Also, BigQuery now supports structs. Read a row directly into a struct with `RowIterator.Next`, and upload a row directly from a struct with `Uploader.Put`. You can also use field tags. See the [package documentation][cloud-bigquery-ref] for details. _December 5, 2016_ More changes to BigQuery: * The `ValueList` type was removed. It is no longer necessary. Instead of ```go var v ValueList ... it.Next(&v) .. ``` use ```go var v []Value ... it.Next(&v) ... ``` * Previously, repeatedly calling `RowIterator.Next` on the same `[]Value` or `ValueList` would append to the slice. Now each call resets the size to zero first. * Schema inference will infer the SQL type BYTES for a struct field of type []byte. Previously it inferred STRING. * The types `uint`, `uint64` and `uintptr` are no longer supported in schema inference. BigQuery's integer type is INT64, and those types may hold values that are not correctly represented in a 64-bit signed integer. * The SQL types DATE, TIME and DATETIME are now supported. They correspond to the `Date`, `Time` and `DateTime` types in the new `cloud.google.com/go/civil` package. _November 17, 2016_ Change to BigQuery: values from INTEGER columns will now be returned as int64, not int. This will avoid errors arising from large values on 32-bit systems. _November 8, 2016_ New datastore feature: datastore now encodes your nested Go structs as Entity values, instead of a flattened list of the embedded struct's fields. This means that you may now have twice-nested slices, eg. ```go type State struct { Cities []struct{ Populations []int } } ``` See [the announcement](https://groups.google.com/forum/#!topic/google-api-go-announce/79jtrdeuJAg) for more details. _November 8, 2016_ Breaking changes to datastore: contexts no longer hold namespaces; instead you must set a key's namespace explicitly. Also, key functions have been changed and renamed. * The WithNamespace function has been removed. To specify a namespace in a Query, use the Query.Namespace method: ```go q := datastore.NewQuery("Kind").Namespace("ns") ``` * All the fields of Key are exported. That means you can construct any Key with a struct literal: ```go k := &Key{Kind: "Kind", ID: 37, Namespace: "ns"} ``` * As a result of the above, the Key methods Kind, ID, d.Name, Parent, SetParent and Namespace have been removed. * `NewIncompleteKey` has been removed, replaced by `IncompleteKey`. Replace ```go NewIncompleteKey(ctx, kind, parent) ``` with ```go IncompleteKey(kind, parent) ``` and if you do use namespaces, make sure you set the namespace on the returned key. * `NewKey` has been removed, replaced by `NameKey` and `IDKey`. Replace ```go NewKey(ctx, kind, name, 0, parent) NewKey(ctx, kind, "", id, parent) ``` with ```go NameKey(kind, name, parent) IDKey(kind, id, parent) ``` and if you do use namespaces, make sure you set the namespace on the returned key. * The `Done` variable has been removed. Replace `datastore.Done` with `iterator.Done`, from the package `google.golang.org/api/iterator`. * The `Client.Close` method will have a return type of error. It will return the result of closing the underlying gRPC connection. See [the announcement](https://groups.google.com/forum/#!topic/google-api-go-announce/hqXtM_4Ix-0) for more details. _October 27, 2016_ Breaking change to bigquery: `NewGCSReference` is now a function, not a method on `Client`. New bigquery feature: `Table.LoaderFrom` now accepts a `ReaderSource`, enabling loading data into a table from a file or any `io.Reader`. _October 21, 2016_ Breaking change to pubsub: removed `pubsub.Done`. Use `iterator.Done` instead, where `iterator` is the package `google.golang.org/api/iterator`. _October 19, 2016_ Breaking changes to cloud.google.com/go/bigquery: * Client.Table and Client.OpenTable have been removed. Replace ```go client.OpenTable("project", "dataset", "table") ``` with ```go client.DatasetInProject("project", "dataset").Table("table") ``` * Client.CreateTable has been removed. Replace ```go client.CreateTable(ctx, "project", "dataset", "table") ``` with ```go client.DatasetInProject("project", "dataset").Table("table").Create(ctx) ``` * Dataset.ListTables have been replaced with Dataset.Tables. Replace ```go tables, err := ds.ListTables(ctx) ``` with ```go it := ds.Tables(ctx) for { table, err := it.Next() if err == iterator.Done { break } if err != nil { // TODO: Handle error. } // TODO: use table. } ``` * Client.Read has been replaced with Job.Read, Table.Read and Query.Read. Replace ```go it, err := client.Read(ctx, job) ``` with ```go it, err := job.Read(ctx) ``` and similarly for reading from tables or queries. * The iterator returned from the Read methods is now named RowIterator. Its behavior is closer to the other iterators in these libraries. It no longer supports the Schema method; see the next item. Replace ```go for it.Next(ctx) { var vals ValueList if err := it.Get(&vals); err != nil { // TODO: Handle error. } // TODO: use vals. } if err := it.Err(); err != nil { // TODO: Handle error. } ``` with ``` for { var vals ValueList err := it.Next(&vals) if err == iterator.Done { break } if err != nil { // TODO: Handle error. } // TODO: use vals. } ``` Instead of the `RecordsPerRequest(n)` option, write ```go it.PageInfo().MaxSize = n ``` Instead of the `StartIndex(i)` option, write ```go it.StartIndex = i ``` * ValueLoader.Load now takes a Schema in addition to a slice of Values. Replace ```go func (vl *myValueLoader) Load(v []bigquery.Value) ``` with ```go func (vl *myValueLoader) Load(v []bigquery.Value, s bigquery.Schema) ``` * Table.Patch is replace by Table.Update. Replace ```go p := table.Patch() p.Description("new description") metadata, err := p.Apply(ctx) ``` with ```go metadata, err := table.Update(ctx, bigquery.TableMetadataToUpdate{ Description: "new description", }) ``` * Client.Copy is replaced by separate methods for each of its four functions. All options have been replaced by struct fields. * To load data from Google Cloud Storage into a table, use Table.LoaderFrom. Replace ```go client.Copy(ctx, table, gcsRef) ``` with ```go table.LoaderFrom(gcsRef).Run(ctx) ``` Instead of passing options to Copy, set fields on the Loader: ```go loader := table.LoaderFrom(gcsRef) loader.WriteDisposition = bigquery.WriteTruncate ``` * To extract data from a table into Google Cloud Storage, use Table.ExtractorTo. Set fields on the returned Extractor instead of passing options. Replace ```go client.Copy(ctx, gcsRef, table) ``` with ```go table.ExtractorTo(gcsRef).Run(ctx) ``` * To copy data into a table from one or more other tables, use Table.CopierFrom. Set fields on the returned Copier instead of passing options. Replace ```go client.Copy(ctx, dstTable, srcTable) ``` with ```go dst.Table.CopierFrom(srcTable).Run(ctx) ``` * To start a query job, create a Query and call its Run method. Set fields on the query instead of passing options. Replace ```go client.Copy(ctx, table, query) ``` with ```go query.Run(ctx) ``` * Table.NewUploader has been renamed to Table.Uploader. Instead of options, configure an Uploader by setting its fields. Replace ```go u := table.NewUploader(bigquery.UploadIgnoreUnknownValues()) ``` with ```go u := table.NewUploader(bigquery.UploadIgnoreUnknownValues()) u.IgnoreUnknownValues = true ``` _October 10, 2016_ Breaking changes to cloud.google.com/go/storage: * AdminClient replaced by methods on Client. Replace ```go adminClient.CreateBucket(ctx, bucketName, attrs) ``` with ```go client.Bucket(bucketName).Create(ctx, projectID, attrs) ``` * BucketHandle.List replaced by BucketHandle.Objects. Replace ```go for query != nil { objs, err := bucket.List(d.ctx, query) if err != nil { ... } query = objs.Next for _, obj := range objs.Results { fmt.Println(obj) } } ``` with ```go iter := bucket.Objects(d.ctx, query) for { obj, err := iter.Next() if err == iterator.Done { break } if err != nil { ... } fmt.Println(obj) } ``` (The `iterator` package is at `google.golang.org/api/iterator`.) Replace `Query.Cursor` with `ObjectIterator.PageInfo().Token`. Replace `Query.MaxResults` with `ObjectIterator.PageInfo().MaxSize`. * ObjectHandle.CopyTo replaced by ObjectHandle.CopierFrom. Replace ```go attrs, err := src.CopyTo(ctx, dst, nil) ``` with ```go attrs, err := dst.CopierFrom(src).Run(ctx) ``` Replace ```go attrs, err := src.CopyTo(ctx, dst, &storage.ObjectAttrs{ContextType: "text/html"}) ``` with ```go c := dst.CopierFrom(src) c.ContextType = "text/html" attrs, err := c.Run(ctx) ``` * ObjectHandle.ComposeFrom replaced by ObjectHandle.ComposerFrom. Replace ```go attrs, err := dst.ComposeFrom(ctx, []*storage.ObjectHandle{src1, src2}, nil) ``` with ```go attrs, err := dst.ComposerFrom(src1, src2).Run(ctx) ``` * ObjectHandle.Update's ObjectAttrs argument replaced by ObjectAttrsToUpdate. Replace ```go attrs, err := obj.Update(ctx, &storage.ObjectAttrs{ContextType: "text/html"}) ``` with ```go attrs, err := obj.Update(ctx, storage.ObjectAttrsToUpdate{ContextType: "text/html"}) ``` * ObjectHandle.WithConditions replaced by ObjectHandle.If. Replace ```go obj.WithConditions(storage.Generation(gen), storage.IfMetaGenerationMatch(mgen)) ``` with ```go obj.Generation(gen).If(storage.Conditions{MetagenerationMatch: mgen}) ``` Replace ```go obj.WithConditions(storage.IfGenerationMatch(0)) ``` with ```go obj.If(storage.Conditions{DoesNotExist: true}) ``` * `storage.Done` replaced by `iterator.Done` (from package `google.golang.org/api/iterator`). _October 6, 2016_ Package preview/logging deleted. Use logging instead. _September 27, 2016_ Logging client replaced with preview version (see below). _September 8, 2016_ * New clients for some of Google's Machine Learning APIs: Vision, Speech, and Natural Language. * Preview version of a new [Stackdriver Logging][cloud-logging] client in [`cloud.google.com/go/preview/logging`](https://godoc.org/cloud.google.com/go/preview/logging). This client uses gRPC as its transport layer, and supports log reading, sinks and metrics. It will replace the current client at `cloud.google.com/go/logging` shortly. golang-google-cloud-0.9.0/profiler/000077500000000000000000000000001312234511600171655ustar00rootroot00000000000000golang-google-cloud-0.9.0/profiler/mocks/000077500000000000000000000000001312234511600203015ustar00rootroot00000000000000golang-google-cloud-0.9.0/profiler/mocks/mock_profiler_client.go000066400000000000000000000054451312234511600250310ustar00rootroot00000000000000// Copyright 2017 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Automatically generated by MockGen. DO NOT EDIT! // Source: google.golang.org/genproto/googleapis/devtools/cloudprofiler/v2 (interfaces: ProfilerServiceClient) package mocks import ( gomock "github.com/golang/mock/gomock" context "golang.org/x/net/context" v2 "google.golang.org/genproto/googleapis/devtools/cloudprofiler/v2" grpc "google.golang.org/grpc" ) // Mock of ProfilerServiceClient interface type MockProfilerServiceClient struct { ctrl *gomock.Controller recorder *_MockProfilerServiceClientRecorder } // Recorder for MockProfilerServiceClient (not exported) type _MockProfilerServiceClientRecorder struct { mock *MockProfilerServiceClient } func NewMockProfilerServiceClient(ctrl *gomock.Controller) *MockProfilerServiceClient { mock := &MockProfilerServiceClient{ctrl: ctrl} mock.recorder = &_MockProfilerServiceClientRecorder{mock} return mock } func (_m *MockProfilerServiceClient) EXPECT() *_MockProfilerServiceClientRecorder { return _m.recorder } func (_m *MockProfilerServiceClient) CreateProfile(_param0 context.Context, _param1 *v2.CreateProfileRequest, _param2 ...grpc.CallOption) (*v2.Profile, error) { _s := []interface{}{_param0, _param1} for _, _x := range _param2 { _s = append(_s, _x) } ret := _m.ctrl.Call(_m, "CreateProfile", _s...) ret0, _ := ret[0].(*v2.Profile) ret1, _ := ret[1].(error) return ret0, ret1 } func (_mr *_MockProfilerServiceClientRecorder) CreateProfile(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { _s := append([]interface{}{arg0, arg1}, arg2...) return _mr.mock.ctrl.RecordCall(_mr.mock, "CreateProfile", _s...) } func (_m *MockProfilerServiceClient) UpdateProfile(_param0 context.Context, _param1 *v2.UpdateProfileRequest, _param2 ...grpc.CallOption) (*v2.Profile, error) { _s := []interface{}{_param0, _param1} for _, _x := range _param2 { _s = append(_s, _x) } ret := _m.ctrl.Call(_m, "UpdateProfile", _s...) ret0, _ := ret[0].(*v2.Profile) ret1, _ := ret[1].(error) return ret0, ret1 } func (_mr *_MockProfilerServiceClientRecorder) UpdateProfile(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { _s := append([]interface{}{arg0, arg1}, arg2...) return _mr.mock.ctrl.RecordCall(_mr.mock, "UpdateProfile", _s...) } golang-google-cloud-0.9.0/profiler/profiler.go000066400000000000000000000254341312234511600213460ustar00rootroot00000000000000// Copyright 2017 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package profiler is a client for the Google Cloud Profiler service. // // This package is still experimental and subject to change. // // Calling Start will start a goroutine to collect profiles and // upload to Cloud Profiler server, at the rhythm specified by // the server. // // The caller should provide the target string in the config so Cloud // Profiler knows how to group the profile data. Otherwise the target // string is set to "unknown". // // Optionally DebugLogging can be set in the config to enable detailed // logging from profiler. // // Start should only be called once. The first call will start // the profiling goroutine. Any additional calls will be ignored. package profiler import ( "bytes" "errors" "fmt" "log" "runtime/pprof" "sort" "strings" "sync" "time" gcemd "cloud.google.com/go/compute/metadata" "cloud.google.com/go/internal/version" "github.com/golang/protobuf/proto" "github.com/golang/protobuf/ptypes" gax "github.com/googleapis/gax-go" "golang.org/x/net/context" "golang.org/x/oauth2" "golang.org/x/oauth2/google" "google.golang.org/api/option" "google.golang.org/api/transport" pb "google.golang.org/genproto/googleapis/devtools/cloudprofiler/v2" edpb "google.golang.org/genproto/googleapis/rpc/errdetails" "google.golang.org/grpc" "google.golang.org/grpc/codes" grpcmd "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" ) var ( config = &Config{} startOnce sync.Once // getProjectID, getInstanceName, getZone, startCPUProfile, stopCPUProfile, // writeHeapProfile and sleep are overrideable for testing. getProjectID = gcemd.ProjectID getInstanceName = gcemd.InstanceName getZone = gcemd.Zone startCPUProfile = pprof.StartCPUProfile stopCPUProfile = pprof.StopCPUProfile writeHeapProfile = pprof.WriteHeapProfile sleep = gax.Sleep ) const ( apiAddress = "cloudprofiler.googleapis.com:443" xGoogAPIMetadata = "x-goog-api-client" deploymentKeyMetadata = "x-profiler-deployment-key-bin" zoneNameLabel = "zone" instanceLabel = "instance" scope = "https://www.googleapis.com/auth/monitoring.write" initialBackoff = time.Second // Ensure the agent will recover within 1 hour. maxBackoff = time.Hour backoffMultiplier = 1.3 // Backoff envelope increases by this factor on each retry. retryInfoMetadata = "google.rpc.retryinfo-bin" ) // Config is the profiler configuration. type Config struct { // Target groups related deployments together, defaults to "unknown". Target string // DebugLogging enables detailed debug logging from profiler. DebugLogging bool // ProjectID is the ID of the cloud project to use instead of // the one read from the VM metadata server. Typically for testing. ProjectID string // InstanceName is the name of the VM instance to use instead of // the one read from the VM metadata server. Typically for testing. InstanceName string // ZoneName is the name of the zone to use instead of // the one read from the VM metadata server. Typically for testing. ZoneName string // APIAddr is the HTTP endpoint to use to connect to the profiler // agent API. Defaults to the production environment, overridable // for testing. APIAddr string } // Start starts a goroutine to collect and upload profiles. // See package level documentation for details. func Start(cfg *Config) error { var err error startOnce.Do(func() { initializeConfig(cfg) ctx := context.Background() var ts oauth2.TokenSource ts, err = google.DefaultTokenSource(ctx, scope) if err != nil { debugLog("failed to get application default credentials: %v", err) return } opts := []option.ClientOption{ option.WithEndpoint(config.APIAddr), option.WithTokenSource(ts), option.WithScopes(scope), } var conn *grpc.ClientConn conn, err = transport.DialGRPC(ctx, opts...) if err != nil { debugLog("failed to dial GRPC: %v", err) return } var d *pb.Deployment d, err = initializeDeployment() if err != nil { debugLog("failed to initialize deployment: %v", err) return } a, ctx := initializeResources(ctx, conn, d) go pollProfilerService(ctx, a) }) return err } func debugLog(format string, e ...interface{}) { if config.DebugLogging { log.Printf(format, e...) } } // agent polls Cloud Profiler server for instructions on behalf of // a task, and collects and uploads profiles as requested. type agent struct { client *client deployment *pb.Deployment creationErrorCount int64 } // abortedBackoffDuration retrieves the retry duration from gRPC trailing // metadata, which is set by Cloud Profiler server. func abortedBackoffDuration(md grpcmd.MD) (time.Duration, error) { elem := md[retryInfoMetadata] if len(elem) <= 0 { return 0, errors.New("no retry info") } var retryInfo edpb.RetryInfo if err := proto.Unmarshal([]byte(elem[0]), &retryInfo); err != nil { return 0, err } else if time, err := ptypes.Duration(retryInfo.RetryDelay); err != nil { return 0, err } else { if time < 0 { return 0, errors.New("negative retry duration") } return time, nil } } type retryer struct { backoff gax.Backoff md grpcmd.MD } func (r *retryer) Retry(err error) (time.Duration, bool) { st, _ := status.FromError(err) if st != nil && st.Code() == codes.Aborted { dur, err := abortedBackoffDuration(r.md) if err == nil { return dur, true } debugLog("failed to get backoff duration: %v", err) } return r.backoff.Pause(), true } // createProfile talks to Cloud Profiler server to create profile. In // case of error, the goroutine will sleep and retry. Sleep duration may // be specified by the server. Otherwise it will be an exponentially // increasing value, bounded by maxBackoff. func (a *agent) createProfile(ctx context.Context) *pb.Profile { req := pb.CreateProfileRequest{ Deployment: a.deployment, ProfileType: []pb.ProfileType{pb.ProfileType_CPU, pb.ProfileType_HEAP}, } var p *pb.Profile md := grpcmd.New(map[string]string{}) gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error p, err = a.client.client.CreateProfile(ctx, &req, grpc.Trailer(&md)) return err }, gax.WithRetry(func() gax.Retryer { return &retryer{ backoff: gax.Backoff{ Initial: initialBackoff, Max: maxBackoff, Multiplier: backoffMultiplier, }, md: md, } })) return p } func (a *agent) profileAndUpload(ctx context.Context, p *pb.Profile) { var prof bytes.Buffer pt := p.GetProfileType() switch pt { case pb.ProfileType_CPU: duration, err := ptypes.Duration(p.Duration) if err != nil { debugLog("failed to get profile duration: %v", err) return } if err := startCPUProfile(&prof); err != nil { debugLog("failed to start CPU profile: %v", err) return } sleep(ctx, duration) stopCPUProfile() case pb.ProfileType_HEAP: if err := writeHeapProfile(&prof); err != nil { debugLog("failed to write heap profile: %v", err) return } default: debugLog("unexpected profile type: %v", pt) return } p.ProfileBytes = prof.Bytes() p.Labels = a.deployment.Labels req := pb.UpdateProfileRequest{Profile: p} // Upload profile, discard profile in case of error. _, err := a.client.client.UpdateProfile(ctx, &req) if err != nil { debugLog("failed to upload profile: %v", err) } } // client is a client for interacting with Cloud Profiler API. type client struct { // gRPC API client. client pb.ProfilerServiceClient // Metadata for google API to be sent with each request. xGoogHeader []string // Metadata for Cloud Profiler API to be sent with each request. profilerHeader []string } // setProfilerHeader sets the unique key string for a deployment target in // the `x-profiler-deployment-key-bin` header passed on each request. // Intended for use by Cloud Profiler agents. func (c *client) setProfilerHeader(d *pb.Deployment) { labels := make([]string, 0, len(d.Labels)) for k, v := range d.Labels { labels = append(labels, fmt.Sprintf("%s|%s", k, v)) } sort.Strings(labels) key := d.ProjectId + "##" + d.Target + "##" + strings.Join(labels, "#") c.profilerHeader = []string{key} } // setXGoogHeader sets the name and version of the application in // the `x-goog-api-client` header passed on each request. Intended for // use by Google-written clients. func (c *client) setXGoogHeader(keyval ...string) { kv := append([]string{"gl-go", version.Go(), "gccl", version.Repo}, keyval...) kv = append(kv, "gax", gax.Version, "grpc", grpc.Version) c.xGoogHeader = []string{gax.XGoogHeader(kv...)} } func (c *client) insertMetadata(ctx context.Context) context.Context { md, _ := grpcmd.FromOutgoingContext(ctx) md = md.Copy() md[xGoogAPIMetadata] = c.xGoogHeader md[deploymentKeyMetadata] = c.profilerHeader return grpcmd.NewOutgoingContext(ctx, md) } func initializeDeployment() (*pb.Deployment, error) { var projectID, instance, zone string var err error if config.ProjectID != "" { projectID = config.ProjectID } else { projectID, err = getProjectID() if err != nil { return nil, err } } if config.InstanceName != "" { instance = config.InstanceName } else { instance, err = getInstanceName() if err != nil { return nil, err } } if config.ZoneName != "" { zone = config.ZoneName } else { zone, err = getZone() if err != nil { return nil, err } } labels := make(map[string]string) labels[zoneNameLabel] = zone labels[instanceLabel] = instance return &pb.Deployment{ ProjectId: projectID, Target: config.Target, Labels: labels, }, nil } func initializeResources(ctx context.Context, conn *grpc.ClientConn, d *pb.Deployment) (*agent, context.Context) { c := &client{ client: pb.NewProfilerServiceClient(conn), } c.setXGoogHeader() c.setProfilerHeader(d) ctx = c.insertMetadata(ctx) return &agent{ client: c, deployment: d, }, ctx } func initializeConfig(cfg *Config) { *config = *cfg if config.Target == "" { config.Target = "unknown" } if config.APIAddr == "" { config.APIAddr = apiAddress } } // pollProfilerService starts an endless loop to poll Cloud Profiler // server for instructions, and collects and uploads profiles as // requested. func pollProfilerService(ctx context.Context, a *agent) { for { p := a.createProfile(ctx) a.profileAndUpload(ctx, p) } } golang-google-cloud-0.9.0/profiler/profiler_example_test.go000066400000000000000000000020751312234511600241140ustar00rootroot00000000000000// Copyright 2017 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package profiler_test import ( "cloud.google.com/go/profiler" ) func ExampleStart() { // The caller should provide the target string in the config so Cloud // Profiler knows how to group the profile data. Otherwise the target // string is set to "unknown". // // Optionally DebugLogging can be set in the config to enable detailed // logging from profiler. err := profiler.Start(&profiler.Config{Target: "my-target"}) if err != nil { //TODO: Handle error. } } golang-google-cloud-0.9.0/profiler/profiler_test.go000066400000000000000000000227461312234511600224100ustar00rootroot00000000000000// Copyright 2017 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package profiler import ( "errors" "io" "reflect" "strings" "testing" "time" "cloud.google.com/go/profiler/mocks" "github.com/golang/mock/gomock" "github.com/golang/protobuf/proto" "github.com/golang/protobuf/ptypes" gax "github.com/googleapis/gax-go" "golang.org/x/net/context" pb "google.golang.org/genproto/googleapis/devtools/cloudprofiler/v2" edpb "google.golang.org/genproto/googleapis/rpc/errdetails" "google.golang.org/grpc/codes" grpcmd "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" ) const ( testProjectID = "test-project-ID" testInstanceName = "test-instance-name" testZoneName = "test-zone-name" testTarget = "test-target" ) func createTestDeployment() *pb.Deployment { labels := make(map[string]string) labels[zoneNameLabel] = testZoneName labels[instanceLabel] = testInstanceName return &pb.Deployment{ ProjectId: testProjectID, Target: testTarget, Labels: labels, } } func createTestAgent(psc pb.ProfilerServiceClient) *agent { c := &client{client: psc} a := &agent{ client: c, deployment: createTestDeployment(), } return a } func createTrailers(dur time.Duration) map[string]string { b, _ := proto.Marshal(&edpb.RetryInfo{ RetryDelay: ptypes.DurationProto(dur), }) return map[string]string{ retryInfoMetadata: string(b), } } func TestCreateProfile(t *testing.T) { ctx := context.Background() ctrl := gomock.NewController(t) defer ctrl.Finish() mpc := mocks.NewMockProfilerServiceClient(ctrl) a := createTestAgent(mpc) p := &pb.Profile{Name: "test_profile"} wantRequest := pb.CreateProfileRequest{ Deployment: a.deployment, ProfileType: []pb.ProfileType{pb.ProfileType_CPU, pb.ProfileType_HEAP}, } mpc.EXPECT().CreateProfile(ctx, gomock.Eq(&wantRequest), gomock.Any()).Times(1).Return(p, nil) gotP := a.createProfile(ctx) if !reflect.DeepEqual(gotP, p) { t.Errorf("CreateProfile() got wrong profile, got %v, want %v", gotP, p) } } func TestProfileAndUpload(t *testing.T) { ctx := context.Background() ctrl := gomock.NewController(t) defer ctrl.Finish() errFunc := func(io.Writer) error { return errors.New("") } testDuration := time.Second * 5 tests := []struct { profileType pb.ProfileType duration *time.Duration startCPUProfileFunc func(io.Writer) error writeHeapProfileFunc func(io.Writer) error wantBytes []byte }{ { profileType: pb.ProfileType_CPU, duration: &testDuration, startCPUProfileFunc: func(w io.Writer) error { w.Write([]byte{1}) return nil }, writeHeapProfileFunc: errFunc, wantBytes: []byte{1}, }, { profileType: pb.ProfileType_CPU, startCPUProfileFunc: errFunc, writeHeapProfileFunc: errFunc, }, { profileType: pb.ProfileType_CPU, duration: &testDuration, startCPUProfileFunc: func(w io.Writer) error { w.Write([]byte{2}) return nil }, writeHeapProfileFunc: func(w io.Writer) error { w.Write([]byte{3}) return nil }, wantBytes: []byte{2}, }, { profileType: pb.ProfileType_HEAP, startCPUProfileFunc: errFunc, writeHeapProfileFunc: func(w io.Writer) error { w.Write([]byte{4}) return nil }, wantBytes: []byte{4}, }, { profileType: pb.ProfileType_HEAP, startCPUProfileFunc: errFunc, writeHeapProfileFunc: errFunc, }, { profileType: pb.ProfileType_HEAP, startCPUProfileFunc: func(w io.Writer) error { w.Write([]byte{5}) return nil }, writeHeapProfileFunc: func(w io.Writer) error { w.Write([]byte{6}) return nil }, wantBytes: []byte{6}, }, { profileType: pb.ProfileType_PROFILE_TYPE_UNSPECIFIED, startCPUProfileFunc: func(w io.Writer) error { w.Write([]byte{7}) return nil }, writeHeapProfileFunc: func(w io.Writer) error { w.Write([]byte{8}) return nil }, }, } for _, tt := range tests { mpc := mocks.NewMockProfilerServiceClient(ctrl) a := createTestAgent(mpc) startCPUProfile = tt.startCPUProfileFunc stopCPUProfile = func() {} writeHeapProfile = tt.writeHeapProfileFunc var gotSleep *time.Duration sleep = func(ctx context.Context, d time.Duration) error { gotSleep = &d return nil } p := &pb.Profile{ProfileType: tt.profileType} if tt.duration != nil { p.Duration = ptypes.DurationProto(*tt.duration) } if tt.wantBytes != nil { wantProfile := &pb.Profile{ ProfileType: p.ProfileType, Duration: p.Duration, } wantProfile.Labels = a.deployment.Labels wantProfile.ProfileBytes = tt.wantBytes wantRequest := pb.UpdateProfileRequest{ Profile: wantProfile, } mpc.EXPECT().UpdateProfile(ctx, gomock.Eq(&wantRequest)).Times(1) } else { mpc.EXPECT().UpdateProfile(gomock.Any(), gomock.Any()).MaxTimes(0) } a.profileAndUpload(ctx, p) if tt.duration == nil { if gotSleep != nil { t.Errorf("profileAndUpload(%v) slept for: %v, want no sleep", p, gotSleep) } } else { if gotSleep == nil { t.Errorf("profileAndUpload(%v) didn't sleep, want sleep for: %v", p, tt.duration) } else if *gotSleep != *tt.duration { t.Errorf("profileAndUpload(%v) slept for wrong duration, got: %v, want: %v", p, gotSleep, tt.duration) } } } } func TestRetry(t *testing.T) { normalDuration := time.Second * 3 negativeDuration := time.Second * -3 tests := []struct { trailers map[string]string wantPause *time.Duration }{ { createTrailers(normalDuration), &normalDuration, }, { createTrailers(negativeDuration), nil, }, { map[string]string{retryInfoMetadata: "wrong format"}, nil, }, { map[string]string{}, nil, }, } for _, tt := range tests { md := grpcmd.New(tt.trailers) r := &retryer{ backoff: gax.Backoff{ Initial: initialBackoff, Max: maxBackoff, Multiplier: backoffMultiplier, }, md: md, } pause, shouldRetry := r.Retry(status.Error(codes.Aborted, "")) if !shouldRetry { t.Error("retryer.Retry() returned shouldRetry false, want true") } if tt.wantPause != nil { if pause != *tt.wantPause { t.Errorf("retryer.Retry() returned wrong pause, got: %v, want: %v", pause, tt.wantPause) } } else { if pause > initialBackoff { t.Errorf("retryer.Retry() returned wrong pause, got: %v, want: < %v", pause, initialBackoff) } } } md := grpcmd.New(map[string]string{}) r := &retryer{ backoff: gax.Backoff{ Initial: initialBackoff, Max: maxBackoff, Multiplier: backoffMultiplier, }, md: md, } for i := 0; i < 100; i++ { pause, shouldRetry := r.Retry(errors.New("")) if !shouldRetry { t.Errorf("retryer.Retry() called %v times, returned shouldRetry false, want true", i) } if pause > maxBackoff { t.Errorf("retryer.Retry() called %v times, returned wrong pause, got: %v, want: < %v", i, pause, maxBackoff) } } } func TestInitializeResources(t *testing.T) { d := createTestDeployment() ctx := context.Background() a, ctx := initializeResources(ctx, nil, d) if xg := a.client.xGoogHeader; len(xg) == 0 { t.Errorf("initializeResources() sets empty xGoogHeader") } else { if !strings.Contains(xg[0], "gl-go/") { t.Errorf("initializeResources() sets wrong xGoogHeader, got: %v, want gl-go key", xg[0]) } if !strings.Contains(xg[0], "gccl/") { t.Errorf("initializeResources() sets wrong xGoogHeader, got: %v, want gccl key", xg[0]) } if !strings.Contains(xg[0], "gax/") { t.Errorf("initializeResources() sets wrong xGoogHeader, got: %v, want gax key", xg[0]) } if !strings.Contains(xg[0], "grpc/") { t.Errorf("initializeResources() sets wrong xGoogHeader, got: %v, want grpc key", xg[0]) } } wantPH := "test-project-ID##test-target##instance|test-instance-name#zone|test-zone-name" if ph := a.client.profilerHeader; len(ph) == 0 { t.Errorf("initializeResources() sets empty profilerHeader") } else if ph[0] != wantPH { t.Errorf("initializeResources() sets wrong profilerHeader, got: %v, want: %v", ph[0], wantPH) } md, _ := grpcmd.FromOutgoingContext(ctx) if !reflect.DeepEqual(md[xGoogAPIMetadata], a.client.xGoogHeader) { t.Errorf("md[%v] = %v, want equal xGoogHeader = %v", xGoogAPIMetadata, md[xGoogAPIMetadata], a.client.xGoogHeader) } if !reflect.DeepEqual(md[deploymentKeyMetadata], a.client.profilerHeader) { t.Errorf("md[%v] = %v, want equal profilerHeader = %v", deploymentKeyMetadata, md[deploymentKeyMetadata], a.client.profilerHeader) } } func TestInitializeDeployment(t *testing.T) { getProjectID = func() (string, error) { return testProjectID, nil } getInstanceName = func() (string, error) { return testInstanceName, nil } getZone = func() (string, error) { return testZoneName, nil } config = &Config{Target: testTarget} d, err := initializeDeployment() if err != nil { t.Errorf("initializeDeployment() got error: %v, want no error", err) } want := createTestDeployment() if !reflect.DeepEqual(d, want) { t.Errorf("initializeDeployment() got wrong deployment, got: %v, want %v", d, want) } } golang-google-cloud-0.9.0/pubsub/000077500000000000000000000000001312234511600166435ustar00rootroot00000000000000golang-google-cloud-0.9.0/pubsub/acker.go000066400000000000000000000100021312234511600202500ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package pubsub import ( "sync" "time" "golang.org/x/net/context" ) // ackBuffer stores the pending ack IDs and notifies the Dirty channel when it becomes non-empty. type ackBuffer struct { Dirty chan struct{} // Close done when ackBuffer is no longer needed. Done chan struct{} mu sync.Mutex pending []string send bool } // Add adds ackID to the buffer. func (buf *ackBuffer) Add(ackID string) { buf.mu.Lock() defer buf.mu.Unlock() buf.pending = append(buf.pending, ackID) // If we are transitioning into a non-empty notification state. if buf.send && len(buf.pending) == 1 { buf.notify() } } // RemoveAll removes all ackIDs from the buffer and returns them. func (buf *ackBuffer) RemoveAll() []string { buf.mu.Lock() defer buf.mu.Unlock() ret := buf.pending buf.pending = nil return ret } // SendNotifications enables sending dirty notification on empty -> non-empty transitions. // If the buffer is already non-empty, a notification will be sent immediately. func (buf *ackBuffer) SendNotifications() { buf.mu.Lock() defer buf.mu.Unlock() buf.send = true // If we are transitioning into a non-empty notification state. if len(buf.pending) > 0 { buf.notify() } } func (buf *ackBuffer) notify() { go func() { select { case buf.Dirty <- struct{}{}: case <-buf.Done: } }() } // acker acks messages in batches. type acker struct { s service Ctx context.Context // The context to use when acknowledging messages. Sub string // The full name of the subscription. AckTick <-chan time.Time // AckTick supplies the frequency with which to make ack requests. // Notify is called with an ack ID after the message with that ack ID // has been processed. An ackID is considered to have been processed // if at least one attempt has been made to acknowledge it. Notify func(string) ackBuffer wg sync.WaitGroup done chan struct{} } // Start intiates processing of ackIDs which are added via Add. // Notify is called with each ackID once it has been processed. func (a *acker) Start() { a.done = make(chan struct{}) a.ackBuffer.Dirty = make(chan struct{}) a.ackBuffer.Done = a.done a.wg.Add(1) go func() { defer a.wg.Done() for { select { case <-a.ackBuffer.Dirty: a.ack(a.ackBuffer.RemoveAll()) case <-a.AckTick: a.ack(a.ackBuffer.RemoveAll()) case <-a.done: return } } }() } // Ack adds an ack id to be acked in the next batch. func (a *acker) Ack(ackID string) { a.ackBuffer.Add(ackID) } // FastMode switches acker into a mode which acks messages as they arrive, rather than waiting // for a.AckTick. func (a *acker) FastMode() { a.ackBuffer.SendNotifications() } // Stop drops all pending messages, and releases resources before returning. func (a *acker) Stop() { close(a.done) a.wg.Wait() } const maxAckAttempts = 2 // ack acknowledges the supplied ackIDs. // After the acknowledgement request has completed (regardless of its success // or failure), ids will be passed to a.Notify. func (a *acker) ack(ids []string) { head, tail := a.s.splitAckIDs(ids) for len(head) > 0 { for i := 0; i < maxAckAttempts; i++ { if a.s.acknowledge(a.Ctx, a.Sub, head) == nil { break } } // NOTE: if retry gives up and returns an error, we simply drop // those ack IDs. The messages will be redelivered and this is // a documented behaviour of the API. head, tail = a.s.splitAckIDs(tail) } for _, id := range ids { a.Notify(id) } } golang-google-cloud-0.9.0/pubsub/acker_test.go000066400000000000000000000135211312234511600213200ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package pubsub import ( "errors" "reflect" "sort" "testing" "time" "golang.org/x/net/context" ) func TestAcker(t *testing.T) { tick := make(chan time.Time) s := &testService{acknowledgeCalled: make(chan acknowledgeCall)} processed := make(chan string, 10) acker := &acker{ s: s, Ctx: context.Background(), Sub: "subname", AckTick: tick, Notify: func(ackID string) { processed <- ackID }, } acker.Start() checkAckProcessed := func(ackIDs []string) { got := <-s.acknowledgeCalled sort.Strings(got.ackIDs) want := acknowledgeCall{ subName: "subname", ackIDs: ackIDs, } if !reflect.DeepEqual(got, want) { t.Errorf("acknowledge: got:\n%v\nwant:\n%v", got, want) } } acker.Ack("a") acker.Ack("b") tick <- time.Time{} checkAckProcessed([]string{"a", "b"}) acker.Ack("c") tick <- time.Time{} checkAckProcessed([]string{"c"}) acker.Stop() // all IDS should have been sent to processed. close(processed) processedIDs := []string{} for id := range processed { processedIDs = append(processedIDs, id) } sort.Strings(processedIDs) want := []string{"a", "b", "c"} if !reflect.DeepEqual(processedIDs, want) { t.Errorf("acker processed: got:\n%v\nwant:\n%v", processedIDs, want) } } func TestAckerFastMode(t *testing.T) { tick := make(chan time.Time) s := &testService{acknowledgeCalled: make(chan acknowledgeCall)} processed := make(chan string, 10) acker := &acker{ s: s, Ctx: context.Background(), Sub: "subname", AckTick: tick, Notify: func(ackID string) { processed <- ackID }, } acker.Start() checkAckProcessed := func(ackIDs []string) { got := <-s.acknowledgeCalled sort.Strings(got.ackIDs) want := acknowledgeCall{ subName: "subname", ackIDs: ackIDs, } if !reflect.DeepEqual(got, want) { t.Errorf("acknowledge: got:\n%v\nwant:\n%v", got, want) } } // No ticks are sent; fast mode doesn't need them. acker.Ack("a") acker.Ack("b") acker.FastMode() checkAckProcessed([]string{"a", "b"}) acker.Ack("c") checkAckProcessed([]string{"c"}) acker.Stop() // all IDS should have been sent to processed. close(processed) processedIDs := []string{} for id := range processed { processedIDs = append(processedIDs, id) } sort.Strings(processedIDs) want := []string{"a", "b", "c"} if !reflect.DeepEqual(processedIDs, want) { t.Errorf("acker processed: got:\n%v\nwant:\n%v", processedIDs, want) } } // TestAckerStop checks that Stop returns immediately. func TestAckerStop(t *testing.T) { tick := make(chan time.Time) s := &testService{acknowledgeCalled: make(chan acknowledgeCall, 10)} processed := make(chan string) acker := &acker{ s: s, Ctx: context.Background(), Sub: "subname", AckTick: tick, Notify: func(ackID string) { processed <- ackID }, } acker.Start() stopped := make(chan struct{}) acker.Ack("a") go func() { acker.Stop() stopped <- struct{}{} }() // Stopped should have been written to by the time this sleep completes. time.Sleep(time.Millisecond) // Receiving from processed should cause Stop to subsequently return, // so it should never be possible to read from stopped before // processed. select { case <-stopped: case <-processed: t.Errorf("acker.Stop processed an ack id before returning") case <-time.After(time.Millisecond): t.Errorf("acker.Stop never returned") } } type ackCallResult struct { ackIDs []string err error } type ackService struct { service calls []ackCallResult t *testing.T // used for error logging. } func (as *ackService) acknowledge(ctx context.Context, subName string, ackIDs []string) error { if len(as.calls) == 0 { as.t.Fatalf("unexpected call to acknowledge: ackIDs: %v", ackIDs) } call := as.calls[0] as.calls = as.calls[1:] if got, want := ackIDs, call.ackIDs; !reflect.DeepEqual(got, want) { as.t.Errorf("unexpected arguments to acknowledge: got: %v ; want: %v", got, want) } return call.err } // Test implementation returns the first 2 elements as head, and the rest as tail. func (as *ackService) splitAckIDs(ids []string) ([]string, []string) { if len(ids) < 2 { return ids, nil } return ids[:2], ids[2:] } func TestAckerSplitsBatches(t *testing.T) { type testCase struct { calls []ackCallResult } for _, tc := range []testCase{ { calls: []ackCallResult{ { ackIDs: []string{"a", "b"}, }, { ackIDs: []string{"c", "d"}, }, { ackIDs: []string{"e", "f"}, }, }, }, { calls: []ackCallResult{ { ackIDs: []string{"a", "b"}, err: errors.New("bang"), }, // On error we retry once. { ackIDs: []string{"a", "b"}, err: errors.New("bang"), }, // We give up after failing twice, so we move on to the next set, "c" and "d" { ackIDs: []string{"c", "d"}, err: errors.New("bang"), }, // Again, we retry once. { ackIDs: []string{"c", "d"}, }, { ackIDs: []string{"e", "f"}, }, }, }, } { s := &ackService{ t: t, calls: tc.calls, } acker := &acker{ s: s, Ctx: context.Background(), Sub: "subname", Notify: func(string) {}, } acker.ack([]string{"a", "b", "c", "d", "e", "f"}) if len(s.calls) != 0 { t.Errorf("expected ack calls did not occur: %v", s.calls) } } } golang-google-cloud-0.9.0/pubsub/apiv1/000077500000000000000000000000001312234511600176635ustar00rootroot00000000000000golang-google-cloud-0.9.0/pubsub/apiv1/ListTopics_smoke_test.go000066400000000000000000000032151312234511600245450ustar00rootroot00000000000000// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // AUTO-GENERATED CODE. DO NOT EDIT. package pubsub import ( pubsubpb "google.golang.org/genproto/googleapis/pubsub/v1" ) import ( "strconv" "testing" "time" "cloud.google.com/go/internal/testutil" "golang.org/x/net/context" "google.golang.org/api/iterator" "google.golang.org/api/option" ) var _ = iterator.Done var _ = strconv.FormatUint var _ = time.Now func TestPublisherSmoke(t *testing.T) { if testing.Short() { t.Skip("skipping smoke test in short mode") } ctx := context.Background() ts := testutil.TokenSource(ctx, DefaultAuthScopes()...) if ts == nil { t.Skip("Integration tests skipped. See CONTRIBUTING.md for details") } projectId := testutil.ProjID() _ = projectId c, err := NewPublisherClient(ctx, option.WithTokenSource(ts)) if err != nil { t.Fatal(err) } var formattedProject string = PublisherProjectPath(projectId) var request = &pubsubpb.ListTopicsRequest{ Project: formattedProject, } iter := c.ListTopics(ctx, request) if _, err := iter.Next(); err != nil && err != iterator.Done { t.Error(err) } } golang-google-cloud-0.9.0/pubsub/apiv1/README.md000066400000000000000000000004571312234511600211500ustar00rootroot00000000000000Auto-generated pubsub v1 clients ================================= This package includes auto-generated clients for the pubsub v1 API. Use the handwritten client (in the parent directory, cloud.google.com/go/pubsub) in preference to this. This code is EXPERIMENTAL and subject to CHANGE AT ANY TIME. golang-google-cloud-0.9.0/pubsub/apiv1/doc.go000066400000000000000000000027241312234511600207640ustar00rootroot00000000000000// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // AUTO-GENERATED CODE. DO NOT EDIT. // Package pubsub is an experimental, auto-generated package for the // Google Cloud Pub/Sub API. // // Provides reliable, many-to-many, asynchronous messaging between // applications. // // Use the client at cloud.google.com/go/pubsub in preference to this. package pubsub // import "cloud.google.com/go/pubsub/apiv1" import ( "golang.org/x/net/context" "google.golang.org/grpc/metadata" ) func insertXGoog(ctx context.Context, val []string) context.Context { md, _ := metadata.FromOutgoingContext(ctx) md = md.Copy() md["x-goog-api-client"] = val return metadata.NewOutgoingContext(ctx, md) } // DefaultAuthScopes reports the authentication scopes required // by this package. func DefaultAuthScopes() []string { return []string{ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/pubsub", } } golang-google-cloud-0.9.0/pubsub/apiv1/mock_test.go000066400000000000000000001377131312234511600222160ustar00rootroot00000000000000// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // AUTO-GENERATED CODE. DO NOT EDIT. package pubsub import ( emptypb "github.com/golang/protobuf/ptypes/empty" iampb "google.golang.org/genproto/googleapis/iam/v1" pubsubpb "google.golang.org/genproto/googleapis/pubsub/v1" field_maskpb "google.golang.org/genproto/protobuf/field_mask" ) import ( "flag" "fmt" "io" "log" "net" "os" "strings" "testing" "github.com/golang/protobuf/proto" "github.com/golang/protobuf/ptypes" "golang.org/x/net/context" "google.golang.org/api/option" status "google.golang.org/genproto/googleapis/rpc/status" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/metadata" gstatus "google.golang.org/grpc/status" ) var _ = io.EOF var _ = ptypes.MarshalAny var _ status.Status type mockPublisherServer struct { // Embed for forward compatibility. // Tests will keep working if more methods are added // in the future. pubsubpb.PublisherServer reqs []proto.Message // If set, all calls return this error. err error // responses to return if err == nil resps []proto.Message } func (s *mockPublisherServer) CreateTopic(ctx context.Context, req *pubsubpb.Topic) (*pubsubpb.Topic, error) { md, _ := metadata.FromIncomingContext(ctx) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } s.reqs = append(s.reqs, req) if s.err != nil { return nil, s.err } return s.resps[0].(*pubsubpb.Topic), nil } func (s *mockPublisherServer) Publish(ctx context.Context, req *pubsubpb.PublishRequest) (*pubsubpb.PublishResponse, error) { md, _ := metadata.FromIncomingContext(ctx) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } s.reqs = append(s.reqs, req) if s.err != nil { return nil, s.err } return s.resps[0].(*pubsubpb.PublishResponse), nil } func (s *mockPublisherServer) GetTopic(ctx context.Context, req *pubsubpb.GetTopicRequest) (*pubsubpb.Topic, error) { md, _ := metadata.FromIncomingContext(ctx) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } s.reqs = append(s.reqs, req) if s.err != nil { return nil, s.err } return s.resps[0].(*pubsubpb.Topic), nil } func (s *mockPublisherServer) ListTopics(ctx context.Context, req *pubsubpb.ListTopicsRequest) (*pubsubpb.ListTopicsResponse, error) { md, _ := metadata.FromIncomingContext(ctx) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } s.reqs = append(s.reqs, req) if s.err != nil { return nil, s.err } return s.resps[0].(*pubsubpb.ListTopicsResponse), nil } func (s *mockPublisherServer) ListTopicSubscriptions(ctx context.Context, req *pubsubpb.ListTopicSubscriptionsRequest) (*pubsubpb.ListTopicSubscriptionsResponse, error) { md, _ := metadata.FromIncomingContext(ctx) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } s.reqs = append(s.reqs, req) if s.err != nil { return nil, s.err } return s.resps[0].(*pubsubpb.ListTopicSubscriptionsResponse), nil } func (s *mockPublisherServer) DeleteTopic(ctx context.Context, req *pubsubpb.DeleteTopicRequest) (*emptypb.Empty, error) { md, _ := metadata.FromIncomingContext(ctx) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } s.reqs = append(s.reqs, req) if s.err != nil { return nil, s.err } return s.resps[0].(*emptypb.Empty), nil } type mockIamPolicyServer struct { // Embed for forward compatibility. // Tests will keep working if more methods are added // in the future. iampb.IAMPolicyServer reqs []proto.Message // If set, all calls return this error. err error // responses to return if err == nil resps []proto.Message } func (s *mockIamPolicyServer) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest) (*iampb.Policy, error) { md, _ := metadata.FromIncomingContext(ctx) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } s.reqs = append(s.reqs, req) if s.err != nil { return nil, s.err } return s.resps[0].(*iampb.Policy), nil } func (s *mockIamPolicyServer) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest) (*iampb.Policy, error) { md, _ := metadata.FromIncomingContext(ctx) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } s.reqs = append(s.reqs, req) if s.err != nil { return nil, s.err } return s.resps[0].(*iampb.Policy), nil } func (s *mockIamPolicyServer) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermissionsRequest) (*iampb.TestIamPermissionsResponse, error) { md, _ := metadata.FromIncomingContext(ctx) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } s.reqs = append(s.reqs, req) if s.err != nil { return nil, s.err } return s.resps[0].(*iampb.TestIamPermissionsResponse), nil } type mockSubscriberServer struct { // Embed for forward compatibility. // Tests will keep working if more methods are added // in the future. pubsubpb.SubscriberServer reqs []proto.Message // If set, all calls return this error. err error // responses to return if err == nil resps []proto.Message } func (s *mockSubscriberServer) CreateSubscription(ctx context.Context, req *pubsubpb.Subscription) (*pubsubpb.Subscription, error) { md, _ := metadata.FromIncomingContext(ctx) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } s.reqs = append(s.reqs, req) if s.err != nil { return nil, s.err } return s.resps[0].(*pubsubpb.Subscription), nil } func (s *mockSubscriberServer) GetSubscription(ctx context.Context, req *pubsubpb.GetSubscriptionRequest) (*pubsubpb.Subscription, error) { md, _ := metadata.FromIncomingContext(ctx) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } s.reqs = append(s.reqs, req) if s.err != nil { return nil, s.err } return s.resps[0].(*pubsubpb.Subscription), nil } func (s *mockSubscriberServer) UpdateSubscription(ctx context.Context, req *pubsubpb.UpdateSubscriptionRequest) (*pubsubpb.Subscription, error) { md, _ := metadata.FromIncomingContext(ctx) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } s.reqs = append(s.reqs, req) if s.err != nil { return nil, s.err } return s.resps[0].(*pubsubpb.Subscription), nil } func (s *mockSubscriberServer) ListSubscriptions(ctx context.Context, req *pubsubpb.ListSubscriptionsRequest) (*pubsubpb.ListSubscriptionsResponse, error) { md, _ := metadata.FromIncomingContext(ctx) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } s.reqs = append(s.reqs, req) if s.err != nil { return nil, s.err } return s.resps[0].(*pubsubpb.ListSubscriptionsResponse), nil } func (s *mockSubscriberServer) DeleteSubscription(ctx context.Context, req *pubsubpb.DeleteSubscriptionRequest) (*emptypb.Empty, error) { md, _ := metadata.FromIncomingContext(ctx) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } s.reqs = append(s.reqs, req) if s.err != nil { return nil, s.err } return s.resps[0].(*emptypb.Empty), nil } func (s *mockSubscriberServer) ModifyAckDeadline(ctx context.Context, req *pubsubpb.ModifyAckDeadlineRequest) (*emptypb.Empty, error) { md, _ := metadata.FromIncomingContext(ctx) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } s.reqs = append(s.reqs, req) if s.err != nil { return nil, s.err } return s.resps[0].(*emptypb.Empty), nil } func (s *mockSubscriberServer) Acknowledge(ctx context.Context, req *pubsubpb.AcknowledgeRequest) (*emptypb.Empty, error) { md, _ := metadata.FromIncomingContext(ctx) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } s.reqs = append(s.reqs, req) if s.err != nil { return nil, s.err } return s.resps[0].(*emptypb.Empty), nil } func (s *mockSubscriberServer) Pull(ctx context.Context, req *pubsubpb.PullRequest) (*pubsubpb.PullResponse, error) { md, _ := metadata.FromIncomingContext(ctx) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } s.reqs = append(s.reqs, req) if s.err != nil { return nil, s.err } return s.resps[0].(*pubsubpb.PullResponse), nil } func (s *mockSubscriberServer) StreamingPull(stream pubsubpb.Subscriber_StreamingPullServer) error { md, _ := metadata.FromIncomingContext(stream.Context()) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } for { if req, err := stream.Recv(); err == io.EOF { break } else if err != nil { return err } else { s.reqs = append(s.reqs, req) } } if s.err != nil { return s.err } for _, v := range s.resps { if err := stream.Send(v.(*pubsubpb.StreamingPullResponse)); err != nil { return err } } return nil } func (s *mockSubscriberServer) ModifyPushConfig(ctx context.Context, req *pubsubpb.ModifyPushConfigRequest) (*emptypb.Empty, error) { md, _ := metadata.FromIncomingContext(ctx) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } s.reqs = append(s.reqs, req) if s.err != nil { return nil, s.err } return s.resps[0].(*emptypb.Empty), nil } func (s *mockSubscriberServer) ListSnapshots(ctx context.Context, req *pubsubpb.ListSnapshotsRequest) (*pubsubpb.ListSnapshotsResponse, error) { md, _ := metadata.FromIncomingContext(ctx) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } s.reqs = append(s.reqs, req) if s.err != nil { return nil, s.err } return s.resps[0].(*pubsubpb.ListSnapshotsResponse), nil } func (s *mockSubscriberServer) CreateSnapshot(ctx context.Context, req *pubsubpb.CreateSnapshotRequest) (*pubsubpb.Snapshot, error) { md, _ := metadata.FromIncomingContext(ctx) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } s.reqs = append(s.reqs, req) if s.err != nil { return nil, s.err } return s.resps[0].(*pubsubpb.Snapshot), nil } func (s *mockSubscriberServer) DeleteSnapshot(ctx context.Context, req *pubsubpb.DeleteSnapshotRequest) (*emptypb.Empty, error) { md, _ := metadata.FromIncomingContext(ctx) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } s.reqs = append(s.reqs, req) if s.err != nil { return nil, s.err } return s.resps[0].(*emptypb.Empty), nil } func (s *mockSubscriberServer) Seek(ctx context.Context, req *pubsubpb.SeekRequest) (*pubsubpb.SeekResponse, error) { md, _ := metadata.FromIncomingContext(ctx) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } s.reqs = append(s.reqs, req) if s.err != nil { return nil, s.err } return s.resps[0].(*pubsubpb.SeekResponse), nil } // clientOpt is the option tests should use to connect to the test server. // It is initialized by TestMain. var clientOpt option.ClientOption var ( mockPublisher mockPublisherServer mockIamPolicy mockIamPolicyServer mockSubscriber mockSubscriberServer ) func TestMain(m *testing.M) { flag.Parse() serv := grpc.NewServer() pubsubpb.RegisterPublisherServer(serv, &mockPublisher) iampb.RegisterIAMPolicyServer(serv, &mockIamPolicy) pubsubpb.RegisterSubscriberServer(serv, &mockSubscriber) lis, err := net.Listen("tcp", "localhost:0") if err != nil { log.Fatal(err) } go serv.Serve(lis) conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) if err != nil { log.Fatal(err) } clientOpt = option.WithGRPCConn(conn) os.Exit(m.Run()) } func TestPublisherCreateTopic(t *testing.T) { var name2 string = "name2-1052831874" var expectedResponse = &pubsubpb.Topic{ Name: name2, } mockPublisher.err = nil mockPublisher.reqs = nil mockPublisher.resps = append(mockPublisher.resps[:0], expectedResponse) var formattedName string = PublisherTopicPath("[PROJECT]", "[TOPIC]") var request = &pubsubpb.Topic{ Name: formattedName, } c, err := NewPublisherClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.CreateTopic(context.Background(), request) if err != nil { t.Fatal(err) } if want, got := request, mockPublisher.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } if want, got := expectedResponse, resp; !proto.Equal(want, got) { t.Errorf("wrong response %q, want %q)", got, want) } } func TestPublisherCreateTopicError(t *testing.T) { errCode := codes.PermissionDenied mockPublisher.err = gstatus.Error(errCode, "test error") var formattedName string = PublisherTopicPath("[PROJECT]", "[TOPIC]") var request = &pubsubpb.Topic{ Name: formattedName, } c, err := NewPublisherClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.CreateTopic(context.Background(), request) if st, ok := gstatus.FromError(err); !ok { t.Errorf("got error %v, expected grpc error", err) } else if c := st.Code(); c != errCode { t.Errorf("got error code %q, want %q", c, errCode) } _ = resp } func TestPublisherPublish(t *testing.T) { var messageIdsElement string = "messageIdsElement-744837059" var messageIds = []string{messageIdsElement} var expectedResponse = &pubsubpb.PublishResponse{ MessageIds: messageIds, } mockPublisher.err = nil mockPublisher.reqs = nil mockPublisher.resps = append(mockPublisher.resps[:0], expectedResponse) var formattedTopic string = PublisherTopicPath("[PROJECT]", "[TOPIC]") var data []byte = []byte("-86") var messagesElement = &pubsubpb.PubsubMessage{ Data: data, } var messages = []*pubsubpb.PubsubMessage{messagesElement} var request = &pubsubpb.PublishRequest{ Topic: formattedTopic, Messages: messages, } c, err := NewPublisherClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.Publish(context.Background(), request) if err != nil { t.Fatal(err) } if want, got := request, mockPublisher.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } if want, got := expectedResponse, resp; !proto.Equal(want, got) { t.Errorf("wrong response %q, want %q)", got, want) } } func TestPublisherPublishError(t *testing.T) { errCode := codes.PermissionDenied mockPublisher.err = gstatus.Error(errCode, "test error") var formattedTopic string = PublisherTopicPath("[PROJECT]", "[TOPIC]") var data []byte = []byte("-86") var messagesElement = &pubsubpb.PubsubMessage{ Data: data, } var messages = []*pubsubpb.PubsubMessage{messagesElement} var request = &pubsubpb.PublishRequest{ Topic: formattedTopic, Messages: messages, } c, err := NewPublisherClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.Publish(context.Background(), request) if st, ok := gstatus.FromError(err); !ok { t.Errorf("got error %v, expected grpc error", err) } else if c := st.Code(); c != errCode { t.Errorf("got error code %q, want %q", c, errCode) } _ = resp } func TestPublisherGetTopic(t *testing.T) { var name string = "name3373707" var expectedResponse = &pubsubpb.Topic{ Name: name, } mockPublisher.err = nil mockPublisher.reqs = nil mockPublisher.resps = append(mockPublisher.resps[:0], expectedResponse) var formattedTopic string = PublisherTopicPath("[PROJECT]", "[TOPIC]") var request = &pubsubpb.GetTopicRequest{ Topic: formattedTopic, } c, err := NewPublisherClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.GetTopic(context.Background(), request) if err != nil { t.Fatal(err) } if want, got := request, mockPublisher.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } if want, got := expectedResponse, resp; !proto.Equal(want, got) { t.Errorf("wrong response %q, want %q)", got, want) } } func TestPublisherGetTopicError(t *testing.T) { errCode := codes.PermissionDenied mockPublisher.err = gstatus.Error(errCode, "test error") var formattedTopic string = PublisherTopicPath("[PROJECT]", "[TOPIC]") var request = &pubsubpb.GetTopicRequest{ Topic: formattedTopic, } c, err := NewPublisherClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.GetTopic(context.Background(), request) if st, ok := gstatus.FromError(err); !ok { t.Errorf("got error %v, expected grpc error", err) } else if c := st.Code(); c != errCode { t.Errorf("got error code %q, want %q", c, errCode) } _ = resp } func TestPublisherListTopics(t *testing.T) { var nextPageToken string = "" var topicsElement *pubsubpb.Topic = &pubsubpb.Topic{} var topics = []*pubsubpb.Topic{topicsElement} var expectedResponse = &pubsubpb.ListTopicsResponse{ NextPageToken: nextPageToken, Topics: topics, } mockPublisher.err = nil mockPublisher.reqs = nil mockPublisher.resps = append(mockPublisher.resps[:0], expectedResponse) var formattedProject string = PublisherProjectPath("[PROJECT]") var request = &pubsubpb.ListTopicsRequest{ Project: formattedProject, } c, err := NewPublisherClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.ListTopics(context.Background(), request).Next() if err != nil { t.Fatal(err) } if want, got := request, mockPublisher.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } want := (interface{})(expectedResponse.Topics[0]) got := (interface{})(resp) var ok bool switch want := (want).(type) { case proto.Message: ok = proto.Equal(want, got.(proto.Message)) default: ok = want == got } if !ok { t.Errorf("wrong response %q, want %q)", got, want) } } func TestPublisherListTopicsError(t *testing.T) { errCode := codes.PermissionDenied mockPublisher.err = gstatus.Error(errCode, "test error") var formattedProject string = PublisherProjectPath("[PROJECT]") var request = &pubsubpb.ListTopicsRequest{ Project: formattedProject, } c, err := NewPublisherClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.ListTopics(context.Background(), request).Next() if st, ok := gstatus.FromError(err); !ok { t.Errorf("got error %v, expected grpc error", err) } else if c := st.Code(); c != errCode { t.Errorf("got error code %q, want %q", c, errCode) } _ = resp } func TestPublisherListTopicSubscriptions(t *testing.T) { var nextPageToken string = "" var subscriptionsElement string = "subscriptionsElement1698708147" var subscriptions = []string{subscriptionsElement} var expectedResponse = &pubsubpb.ListTopicSubscriptionsResponse{ NextPageToken: nextPageToken, Subscriptions: subscriptions, } mockPublisher.err = nil mockPublisher.reqs = nil mockPublisher.resps = append(mockPublisher.resps[:0], expectedResponse) var formattedTopic string = PublisherTopicPath("[PROJECT]", "[TOPIC]") var request = &pubsubpb.ListTopicSubscriptionsRequest{ Topic: formattedTopic, } c, err := NewPublisherClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.ListTopicSubscriptions(context.Background(), request).Next() if err != nil { t.Fatal(err) } if want, got := request, mockPublisher.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } want := (interface{})(expectedResponse.Subscriptions[0]) got := (interface{})(resp) var ok bool switch want := (want).(type) { case proto.Message: ok = proto.Equal(want, got.(proto.Message)) default: ok = want == got } if !ok { t.Errorf("wrong response %q, want %q)", got, want) } } func TestPublisherListTopicSubscriptionsError(t *testing.T) { errCode := codes.PermissionDenied mockPublisher.err = gstatus.Error(errCode, "test error") var formattedTopic string = PublisherTopicPath("[PROJECT]", "[TOPIC]") var request = &pubsubpb.ListTopicSubscriptionsRequest{ Topic: formattedTopic, } c, err := NewPublisherClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.ListTopicSubscriptions(context.Background(), request).Next() if st, ok := gstatus.FromError(err); !ok { t.Errorf("got error %v, expected grpc error", err) } else if c := st.Code(); c != errCode { t.Errorf("got error code %q, want %q", c, errCode) } _ = resp } func TestPublisherDeleteTopic(t *testing.T) { var expectedResponse *emptypb.Empty = &emptypb.Empty{} mockPublisher.err = nil mockPublisher.reqs = nil mockPublisher.resps = append(mockPublisher.resps[:0], expectedResponse) var formattedTopic string = PublisherTopicPath("[PROJECT]", "[TOPIC]") var request = &pubsubpb.DeleteTopicRequest{ Topic: formattedTopic, } c, err := NewPublisherClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } err = c.DeleteTopic(context.Background(), request) if err != nil { t.Fatal(err) } if want, got := request, mockPublisher.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } } func TestPublisherDeleteTopicError(t *testing.T) { errCode := codes.PermissionDenied mockPublisher.err = gstatus.Error(errCode, "test error") var formattedTopic string = PublisherTopicPath("[PROJECT]", "[TOPIC]") var request = &pubsubpb.DeleteTopicRequest{ Topic: formattedTopic, } c, err := NewPublisherClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } err = c.DeleteTopic(context.Background(), request) if st, ok := gstatus.FromError(err); !ok { t.Errorf("got error %v, expected grpc error", err) } else if c := st.Code(); c != errCode { t.Errorf("got error code %q, want %q", c, errCode) } } func TestSubscriberCreateSubscription(t *testing.T) { var name2 string = "name2-1052831874" var topic2 string = "topic2-1139259102" var ackDeadlineSeconds int32 = 2135351438 var retainAckedMessages bool = false var expectedResponse = &pubsubpb.Subscription{ Name: name2, Topic: topic2, AckDeadlineSeconds: ackDeadlineSeconds, RetainAckedMessages: retainAckedMessages, } mockSubscriber.err = nil mockSubscriber.reqs = nil mockSubscriber.resps = append(mockSubscriber.resps[:0], expectedResponse) var formattedName string = SubscriberSubscriptionPath("[PROJECT]", "[SUBSCRIPTION]") var formattedTopic string = SubscriberTopicPath("[PROJECT]", "[TOPIC]") var request = &pubsubpb.Subscription{ Name: formattedName, Topic: formattedTopic, } c, err := NewSubscriberClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.CreateSubscription(context.Background(), request) if err != nil { t.Fatal(err) } if want, got := request, mockSubscriber.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } if want, got := expectedResponse, resp; !proto.Equal(want, got) { t.Errorf("wrong response %q, want %q)", got, want) } } func TestSubscriberCreateSubscriptionError(t *testing.T) { errCode := codes.PermissionDenied mockSubscriber.err = gstatus.Error(errCode, "test error") var formattedName string = SubscriberSubscriptionPath("[PROJECT]", "[SUBSCRIPTION]") var formattedTopic string = SubscriberTopicPath("[PROJECT]", "[TOPIC]") var request = &pubsubpb.Subscription{ Name: formattedName, Topic: formattedTopic, } c, err := NewSubscriberClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.CreateSubscription(context.Background(), request) if st, ok := gstatus.FromError(err); !ok { t.Errorf("got error %v, expected grpc error", err) } else if c := st.Code(); c != errCode { t.Errorf("got error code %q, want %q", c, errCode) } _ = resp } func TestSubscriberGetSubscription(t *testing.T) { var name string = "name3373707" var topic string = "topic110546223" var ackDeadlineSeconds int32 = 2135351438 var retainAckedMessages bool = false var expectedResponse = &pubsubpb.Subscription{ Name: name, Topic: topic, AckDeadlineSeconds: ackDeadlineSeconds, RetainAckedMessages: retainAckedMessages, } mockSubscriber.err = nil mockSubscriber.reqs = nil mockSubscriber.resps = append(mockSubscriber.resps[:0], expectedResponse) var formattedSubscription string = SubscriberSubscriptionPath("[PROJECT]", "[SUBSCRIPTION]") var request = &pubsubpb.GetSubscriptionRequest{ Subscription: formattedSubscription, } c, err := NewSubscriberClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.GetSubscription(context.Background(), request) if err != nil { t.Fatal(err) } if want, got := request, mockSubscriber.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } if want, got := expectedResponse, resp; !proto.Equal(want, got) { t.Errorf("wrong response %q, want %q)", got, want) } } func TestSubscriberGetSubscriptionError(t *testing.T) { errCode := codes.PermissionDenied mockSubscriber.err = gstatus.Error(errCode, "test error") var formattedSubscription string = SubscriberSubscriptionPath("[PROJECT]", "[SUBSCRIPTION]") var request = &pubsubpb.GetSubscriptionRequest{ Subscription: formattedSubscription, } c, err := NewSubscriberClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.GetSubscription(context.Background(), request) if st, ok := gstatus.FromError(err); !ok { t.Errorf("got error %v, expected grpc error", err) } else if c := st.Code(); c != errCode { t.Errorf("got error code %q, want %q", c, errCode) } _ = resp } func TestSubscriberUpdateSubscription(t *testing.T) { var name string = "name3373707" var topic string = "topic110546223" var ackDeadlineSeconds int32 = 2135351438 var retainAckedMessages bool = false var expectedResponse = &pubsubpb.Subscription{ Name: name, Topic: topic, AckDeadlineSeconds: ackDeadlineSeconds, RetainAckedMessages: retainAckedMessages, } mockSubscriber.err = nil mockSubscriber.reqs = nil mockSubscriber.resps = append(mockSubscriber.resps[:0], expectedResponse) var subscription *pubsubpb.Subscription = &pubsubpb.Subscription{} var updateMask *field_maskpb.FieldMask = &field_maskpb.FieldMask{} var request = &pubsubpb.UpdateSubscriptionRequest{ Subscription: subscription, UpdateMask: updateMask, } c, err := NewSubscriberClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.UpdateSubscription(context.Background(), request) if err != nil { t.Fatal(err) } if want, got := request, mockSubscriber.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } if want, got := expectedResponse, resp; !proto.Equal(want, got) { t.Errorf("wrong response %q, want %q)", got, want) } } func TestSubscriberUpdateSubscriptionError(t *testing.T) { errCode := codes.PermissionDenied mockSubscriber.err = gstatus.Error(errCode, "test error") var subscription *pubsubpb.Subscription = &pubsubpb.Subscription{} var updateMask *field_maskpb.FieldMask = &field_maskpb.FieldMask{} var request = &pubsubpb.UpdateSubscriptionRequest{ Subscription: subscription, UpdateMask: updateMask, } c, err := NewSubscriberClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.UpdateSubscription(context.Background(), request) if st, ok := gstatus.FromError(err); !ok { t.Errorf("got error %v, expected grpc error", err) } else if c := st.Code(); c != errCode { t.Errorf("got error code %q, want %q", c, errCode) } _ = resp } func TestSubscriberListSubscriptions(t *testing.T) { var nextPageToken string = "" var subscriptionsElement *pubsubpb.Subscription = &pubsubpb.Subscription{} var subscriptions = []*pubsubpb.Subscription{subscriptionsElement} var expectedResponse = &pubsubpb.ListSubscriptionsResponse{ NextPageToken: nextPageToken, Subscriptions: subscriptions, } mockSubscriber.err = nil mockSubscriber.reqs = nil mockSubscriber.resps = append(mockSubscriber.resps[:0], expectedResponse) var formattedProject string = SubscriberProjectPath("[PROJECT]") var request = &pubsubpb.ListSubscriptionsRequest{ Project: formattedProject, } c, err := NewSubscriberClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.ListSubscriptions(context.Background(), request).Next() if err != nil { t.Fatal(err) } if want, got := request, mockSubscriber.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } want := (interface{})(expectedResponse.Subscriptions[0]) got := (interface{})(resp) var ok bool switch want := (want).(type) { case proto.Message: ok = proto.Equal(want, got.(proto.Message)) default: ok = want == got } if !ok { t.Errorf("wrong response %q, want %q)", got, want) } } func TestSubscriberListSubscriptionsError(t *testing.T) { errCode := codes.PermissionDenied mockSubscriber.err = gstatus.Error(errCode, "test error") var formattedProject string = SubscriberProjectPath("[PROJECT]") var request = &pubsubpb.ListSubscriptionsRequest{ Project: formattedProject, } c, err := NewSubscriberClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.ListSubscriptions(context.Background(), request).Next() if st, ok := gstatus.FromError(err); !ok { t.Errorf("got error %v, expected grpc error", err) } else if c := st.Code(); c != errCode { t.Errorf("got error code %q, want %q", c, errCode) } _ = resp } func TestSubscriberDeleteSubscription(t *testing.T) { var expectedResponse *emptypb.Empty = &emptypb.Empty{} mockSubscriber.err = nil mockSubscriber.reqs = nil mockSubscriber.resps = append(mockSubscriber.resps[:0], expectedResponse) var formattedSubscription string = SubscriberSubscriptionPath("[PROJECT]", "[SUBSCRIPTION]") var request = &pubsubpb.DeleteSubscriptionRequest{ Subscription: formattedSubscription, } c, err := NewSubscriberClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } err = c.DeleteSubscription(context.Background(), request) if err != nil { t.Fatal(err) } if want, got := request, mockSubscriber.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } } func TestSubscriberDeleteSubscriptionError(t *testing.T) { errCode := codes.PermissionDenied mockSubscriber.err = gstatus.Error(errCode, "test error") var formattedSubscription string = SubscriberSubscriptionPath("[PROJECT]", "[SUBSCRIPTION]") var request = &pubsubpb.DeleteSubscriptionRequest{ Subscription: formattedSubscription, } c, err := NewSubscriberClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } err = c.DeleteSubscription(context.Background(), request) if st, ok := gstatus.FromError(err); !ok { t.Errorf("got error %v, expected grpc error", err) } else if c := st.Code(); c != errCode { t.Errorf("got error code %q, want %q", c, errCode) } } func TestSubscriberModifyAckDeadline(t *testing.T) { var expectedResponse *emptypb.Empty = &emptypb.Empty{} mockSubscriber.err = nil mockSubscriber.reqs = nil mockSubscriber.resps = append(mockSubscriber.resps[:0], expectedResponse) var formattedSubscription string = SubscriberSubscriptionPath("[PROJECT]", "[SUBSCRIPTION]") var ackIds []string = nil var ackDeadlineSeconds int32 = 2135351438 var request = &pubsubpb.ModifyAckDeadlineRequest{ Subscription: formattedSubscription, AckIds: ackIds, AckDeadlineSeconds: ackDeadlineSeconds, } c, err := NewSubscriberClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } err = c.ModifyAckDeadline(context.Background(), request) if err != nil { t.Fatal(err) } if want, got := request, mockSubscriber.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } } func TestSubscriberModifyAckDeadlineError(t *testing.T) { errCode := codes.PermissionDenied mockSubscriber.err = gstatus.Error(errCode, "test error") var formattedSubscription string = SubscriberSubscriptionPath("[PROJECT]", "[SUBSCRIPTION]") var ackIds []string = nil var ackDeadlineSeconds int32 = 2135351438 var request = &pubsubpb.ModifyAckDeadlineRequest{ Subscription: formattedSubscription, AckIds: ackIds, AckDeadlineSeconds: ackDeadlineSeconds, } c, err := NewSubscriberClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } err = c.ModifyAckDeadline(context.Background(), request) if st, ok := gstatus.FromError(err); !ok { t.Errorf("got error %v, expected grpc error", err) } else if c := st.Code(); c != errCode { t.Errorf("got error code %q, want %q", c, errCode) } } func TestSubscriberAcknowledge(t *testing.T) { var expectedResponse *emptypb.Empty = &emptypb.Empty{} mockSubscriber.err = nil mockSubscriber.reqs = nil mockSubscriber.resps = append(mockSubscriber.resps[:0], expectedResponse) var formattedSubscription string = SubscriberSubscriptionPath("[PROJECT]", "[SUBSCRIPTION]") var ackIds []string = nil var request = &pubsubpb.AcknowledgeRequest{ Subscription: formattedSubscription, AckIds: ackIds, } c, err := NewSubscriberClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } err = c.Acknowledge(context.Background(), request) if err != nil { t.Fatal(err) } if want, got := request, mockSubscriber.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } } func TestSubscriberAcknowledgeError(t *testing.T) { errCode := codes.PermissionDenied mockSubscriber.err = gstatus.Error(errCode, "test error") var formattedSubscription string = SubscriberSubscriptionPath("[PROJECT]", "[SUBSCRIPTION]") var ackIds []string = nil var request = &pubsubpb.AcknowledgeRequest{ Subscription: formattedSubscription, AckIds: ackIds, } c, err := NewSubscriberClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } err = c.Acknowledge(context.Background(), request) if st, ok := gstatus.FromError(err); !ok { t.Errorf("got error %v, expected grpc error", err) } else if c := st.Code(); c != errCode { t.Errorf("got error code %q, want %q", c, errCode) } } func TestSubscriberPull(t *testing.T) { var expectedResponse *pubsubpb.PullResponse = &pubsubpb.PullResponse{} mockSubscriber.err = nil mockSubscriber.reqs = nil mockSubscriber.resps = append(mockSubscriber.resps[:0], expectedResponse) var formattedSubscription string = SubscriberSubscriptionPath("[PROJECT]", "[SUBSCRIPTION]") var maxMessages int32 = 496131527 var request = &pubsubpb.PullRequest{ Subscription: formattedSubscription, MaxMessages: maxMessages, } c, err := NewSubscriberClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.Pull(context.Background(), request) if err != nil { t.Fatal(err) } if want, got := request, mockSubscriber.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } if want, got := expectedResponse, resp; !proto.Equal(want, got) { t.Errorf("wrong response %q, want %q)", got, want) } } func TestSubscriberPullError(t *testing.T) { errCode := codes.PermissionDenied mockSubscriber.err = gstatus.Error(errCode, "test error") var formattedSubscription string = SubscriberSubscriptionPath("[PROJECT]", "[SUBSCRIPTION]") var maxMessages int32 = 496131527 var request = &pubsubpb.PullRequest{ Subscription: formattedSubscription, MaxMessages: maxMessages, } c, err := NewSubscriberClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.Pull(context.Background(), request) if st, ok := gstatus.FromError(err); !ok { t.Errorf("got error %v, expected grpc error", err) } else if c := st.Code(); c != errCode { t.Errorf("got error code %q, want %q", c, errCode) } _ = resp } func TestSubscriberStreamingPull(t *testing.T) { var receivedMessagesElement *pubsubpb.ReceivedMessage = &pubsubpb.ReceivedMessage{} var receivedMessages = []*pubsubpb.ReceivedMessage{receivedMessagesElement} var expectedResponse = &pubsubpb.StreamingPullResponse{ ReceivedMessages: receivedMessages, } mockSubscriber.err = nil mockSubscriber.reqs = nil mockSubscriber.resps = append(mockSubscriber.resps[:0], expectedResponse) var formattedSubscription string = SubscriberSubscriptionPath("[PROJECT]", "[SUBSCRIPTION]") var streamAckDeadlineSeconds int32 = 1875467245 var request = &pubsubpb.StreamingPullRequest{ Subscription: formattedSubscription, StreamAckDeadlineSeconds: streamAckDeadlineSeconds, } c, err := NewSubscriberClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } stream, err := c.StreamingPull(context.Background()) if err != nil { t.Fatal(err) } if err := stream.Send(request); err != nil { t.Fatal(err) } if err := stream.CloseSend(); err != nil { t.Fatal(err) } resp, err := stream.Recv() if err != nil { t.Fatal(err) } if want, got := request, mockSubscriber.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } if want, got := expectedResponse, resp; !proto.Equal(want, got) { t.Errorf("wrong response %q, want %q)", got, want) } } func TestSubscriberStreamingPullError(t *testing.T) { errCode := codes.PermissionDenied mockSubscriber.err = gstatus.Error(errCode, "test error") var formattedSubscription string = SubscriberSubscriptionPath("[PROJECT]", "[SUBSCRIPTION]") var streamAckDeadlineSeconds int32 = 1875467245 var request = &pubsubpb.StreamingPullRequest{ Subscription: formattedSubscription, StreamAckDeadlineSeconds: streamAckDeadlineSeconds, } c, err := NewSubscriberClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } stream, err := c.StreamingPull(context.Background()) if err != nil { t.Fatal(err) } if err := stream.Send(request); err != nil { t.Fatal(err) } if err := stream.CloseSend(); err != nil { t.Fatal(err) } resp, err := stream.Recv() if st, ok := gstatus.FromError(err); !ok { t.Errorf("got error %v, expected grpc error", err) } else if c := st.Code(); c != errCode { t.Errorf("got error code %q, want %q", c, errCode) } _ = resp } func TestSubscriberModifyPushConfig(t *testing.T) { var expectedResponse *emptypb.Empty = &emptypb.Empty{} mockSubscriber.err = nil mockSubscriber.reqs = nil mockSubscriber.resps = append(mockSubscriber.resps[:0], expectedResponse) var formattedSubscription string = SubscriberSubscriptionPath("[PROJECT]", "[SUBSCRIPTION]") var pushConfig *pubsubpb.PushConfig = &pubsubpb.PushConfig{} var request = &pubsubpb.ModifyPushConfigRequest{ Subscription: formattedSubscription, PushConfig: pushConfig, } c, err := NewSubscriberClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } err = c.ModifyPushConfig(context.Background(), request) if err != nil { t.Fatal(err) } if want, got := request, mockSubscriber.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } } func TestSubscriberModifyPushConfigError(t *testing.T) { errCode := codes.PermissionDenied mockSubscriber.err = gstatus.Error(errCode, "test error") var formattedSubscription string = SubscriberSubscriptionPath("[PROJECT]", "[SUBSCRIPTION]") var pushConfig *pubsubpb.PushConfig = &pubsubpb.PushConfig{} var request = &pubsubpb.ModifyPushConfigRequest{ Subscription: formattedSubscription, PushConfig: pushConfig, } c, err := NewSubscriberClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } err = c.ModifyPushConfig(context.Background(), request) if st, ok := gstatus.FromError(err); !ok { t.Errorf("got error %v, expected grpc error", err) } else if c := st.Code(); c != errCode { t.Errorf("got error code %q, want %q", c, errCode) } } func TestSubscriberListSnapshots(t *testing.T) { var nextPageToken string = "" var snapshotsElement *pubsubpb.Snapshot = &pubsubpb.Snapshot{} var snapshots = []*pubsubpb.Snapshot{snapshotsElement} var expectedResponse = &pubsubpb.ListSnapshotsResponse{ NextPageToken: nextPageToken, Snapshots: snapshots, } mockSubscriber.err = nil mockSubscriber.reqs = nil mockSubscriber.resps = append(mockSubscriber.resps[:0], expectedResponse) var formattedProject string = SubscriberProjectPath("[PROJECT]") var request = &pubsubpb.ListSnapshotsRequest{ Project: formattedProject, } c, err := NewSubscriberClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.ListSnapshots(context.Background(), request).Next() if err != nil { t.Fatal(err) } if want, got := request, mockSubscriber.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } want := (interface{})(expectedResponse.Snapshots[0]) got := (interface{})(resp) var ok bool switch want := (want).(type) { case proto.Message: ok = proto.Equal(want, got.(proto.Message)) default: ok = want == got } if !ok { t.Errorf("wrong response %q, want %q)", got, want) } } func TestSubscriberListSnapshotsError(t *testing.T) { errCode := codes.PermissionDenied mockSubscriber.err = gstatus.Error(errCode, "test error") var formattedProject string = SubscriberProjectPath("[PROJECT]") var request = &pubsubpb.ListSnapshotsRequest{ Project: formattedProject, } c, err := NewSubscriberClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.ListSnapshots(context.Background(), request).Next() if st, ok := gstatus.FromError(err); !ok { t.Errorf("got error %v, expected grpc error", err) } else if c := st.Code(); c != errCode { t.Errorf("got error code %q, want %q", c, errCode) } _ = resp } func TestSubscriberCreateSnapshot(t *testing.T) { var name2 string = "name2-1052831874" var topic string = "topic110546223" var expectedResponse = &pubsubpb.Snapshot{ Name: name2, Topic: topic, } mockSubscriber.err = nil mockSubscriber.reqs = nil mockSubscriber.resps = append(mockSubscriber.resps[:0], expectedResponse) var formattedName string = SubscriberSnapshotPath("[PROJECT]", "[SNAPSHOT]") var formattedSubscription string = SubscriberSubscriptionPath("[PROJECT]", "[SUBSCRIPTION]") var request = &pubsubpb.CreateSnapshotRequest{ Name: formattedName, Subscription: formattedSubscription, } c, err := NewSubscriberClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.CreateSnapshot(context.Background(), request) if err != nil { t.Fatal(err) } if want, got := request, mockSubscriber.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } if want, got := expectedResponse, resp; !proto.Equal(want, got) { t.Errorf("wrong response %q, want %q)", got, want) } } func TestSubscriberCreateSnapshotError(t *testing.T) { errCode := codes.PermissionDenied mockSubscriber.err = gstatus.Error(errCode, "test error") var formattedName string = SubscriberSnapshotPath("[PROJECT]", "[SNAPSHOT]") var formattedSubscription string = SubscriberSubscriptionPath("[PROJECT]", "[SUBSCRIPTION]") var request = &pubsubpb.CreateSnapshotRequest{ Name: formattedName, Subscription: formattedSubscription, } c, err := NewSubscriberClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.CreateSnapshot(context.Background(), request) if st, ok := gstatus.FromError(err); !ok { t.Errorf("got error %v, expected grpc error", err) } else if c := st.Code(); c != errCode { t.Errorf("got error code %q, want %q", c, errCode) } _ = resp } func TestSubscriberDeleteSnapshot(t *testing.T) { var expectedResponse *emptypb.Empty = &emptypb.Empty{} mockSubscriber.err = nil mockSubscriber.reqs = nil mockSubscriber.resps = append(mockSubscriber.resps[:0], expectedResponse) var formattedSnapshot string = SubscriberSnapshotPath("[PROJECT]", "[SNAPSHOT]") var request = &pubsubpb.DeleteSnapshotRequest{ Snapshot: formattedSnapshot, } c, err := NewSubscriberClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } err = c.DeleteSnapshot(context.Background(), request) if err != nil { t.Fatal(err) } if want, got := request, mockSubscriber.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } } func TestSubscriberDeleteSnapshotError(t *testing.T) { errCode := codes.PermissionDenied mockSubscriber.err = gstatus.Error(errCode, "test error") var formattedSnapshot string = SubscriberSnapshotPath("[PROJECT]", "[SNAPSHOT]") var request = &pubsubpb.DeleteSnapshotRequest{ Snapshot: formattedSnapshot, } c, err := NewSubscriberClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } err = c.DeleteSnapshot(context.Background(), request) if st, ok := gstatus.FromError(err); !ok { t.Errorf("got error %v, expected grpc error", err) } else if c := st.Code(); c != errCode { t.Errorf("got error code %q, want %q", c, errCode) } } func TestSubscriberSeek(t *testing.T) { var expectedResponse *pubsubpb.SeekResponse = &pubsubpb.SeekResponse{} mockSubscriber.err = nil mockSubscriber.reqs = nil mockSubscriber.resps = append(mockSubscriber.resps[:0], expectedResponse) var formattedSubscription string = SubscriberSubscriptionPath("[PROJECT]", "[SUBSCRIPTION]") var request = &pubsubpb.SeekRequest{ Subscription: formattedSubscription, } c, err := NewSubscriberClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.Seek(context.Background(), request) if err != nil { t.Fatal(err) } if want, got := request, mockSubscriber.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } if want, got := expectedResponse, resp; !proto.Equal(want, got) { t.Errorf("wrong response %q, want %q)", got, want) } } func TestSubscriberSeekError(t *testing.T) { errCode := codes.PermissionDenied mockSubscriber.err = gstatus.Error(errCode, "test error") var formattedSubscription string = SubscriberSubscriptionPath("[PROJECT]", "[SUBSCRIPTION]") var request = &pubsubpb.SeekRequest{ Subscription: formattedSubscription, } c, err := NewSubscriberClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.Seek(context.Background(), request) if st, ok := gstatus.FromError(err); !ok { t.Errorf("got error %v, expected grpc error", err) } else if c := st.Code(); c != errCode { t.Errorf("got error code %q, want %q", c, errCode) } _ = resp } golang-google-cloud-0.9.0/pubsub/apiv1/publisher_client.go000066400000000000000000000327621312234511600235570ustar00rootroot00000000000000// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // AUTO-GENERATED CODE. DO NOT EDIT. package pubsub import ( "math" "time" "cloud.google.com/go/iam" "cloud.google.com/go/internal/version" gax "github.com/googleapis/gax-go" "golang.org/x/net/context" "google.golang.org/api/iterator" "google.golang.org/api/option" "google.golang.org/api/transport" pubsubpb "google.golang.org/genproto/googleapis/pubsub/v1" "google.golang.org/grpc" "google.golang.org/grpc/codes" ) var ( publisherProjectPathTemplate = gax.MustCompilePathTemplate("projects/{project}") publisherTopicPathTemplate = gax.MustCompilePathTemplate("projects/{project}/topics/{topic}") ) // PublisherCallOptions contains the retry settings for each method of PublisherClient. type PublisherCallOptions struct { CreateTopic []gax.CallOption Publish []gax.CallOption GetTopic []gax.CallOption ListTopics []gax.CallOption ListTopicSubscriptions []gax.CallOption DeleteTopic []gax.CallOption } func defaultPublisherClientOptions() []option.ClientOption { return []option.ClientOption{ option.WithEndpoint("pubsub.googleapis.com:443"), option.WithScopes(DefaultAuthScopes()...), } } func defaultPublisherCallOptions() *PublisherCallOptions { retry := map[[2]string][]gax.CallOption{ {"default", "idempotent"}: { gax.WithRetry(func() gax.Retryer { return gax.OnCodes([]codes.Code{ codes.DeadlineExceeded, codes.Unavailable, }, gax.Backoff{ Initial: 100 * time.Millisecond, Max: 60000 * time.Millisecond, Multiplier: 1.3, }) }), }, {"messaging", "one_plus_delivery"}: { gax.WithRetry(func() gax.Retryer { return gax.OnCodes([]codes.Code{ codes.Canceled, codes.Unknown, codes.DeadlineExceeded, codes.ResourceExhausted, codes.Aborted, codes.Internal, codes.Unavailable, }, gax.Backoff{ Initial: 100 * time.Millisecond, Max: 60000 * time.Millisecond, Multiplier: 1.3, }) }), }, } return &PublisherCallOptions{ CreateTopic: retry[[2]string{"default", "idempotent"}], Publish: retry[[2]string{"messaging", "one_plus_delivery"}], GetTopic: retry[[2]string{"default", "idempotent"}], ListTopics: retry[[2]string{"default", "idempotent"}], ListTopicSubscriptions: retry[[2]string{"default", "idempotent"}], DeleteTopic: retry[[2]string{"default", "idempotent"}], } } // PublisherClient is a client for interacting with Google Cloud Pub/Sub API. type PublisherClient struct { // The connection to the service. conn *grpc.ClientConn // The gRPC API client. publisherClient pubsubpb.PublisherClient // The call options for this service. CallOptions *PublisherCallOptions // The metadata to be sent with each request. xGoogHeader []string } // NewPublisherClient creates a new publisher client. // // The service that an application uses to manipulate topics, and to send // messages to a topic. func NewPublisherClient(ctx context.Context, opts ...option.ClientOption) (*PublisherClient, error) { conn, err := transport.DialGRPC(ctx, append(defaultPublisherClientOptions(), opts...)...) if err != nil { return nil, err } c := &PublisherClient{ conn: conn, CallOptions: defaultPublisherCallOptions(), publisherClient: pubsubpb.NewPublisherClient(conn), } c.SetGoogleClientInfo() return c, nil } // Connection returns the client's connection to the API service. func (c *PublisherClient) Connection() *grpc.ClientConn { return c.conn } // Close closes the connection to the API service. The user should invoke this when // the client is no longer required. func (c *PublisherClient) Close() error { return c.conn.Close() } // SetGoogleClientInfo sets the name and version of the application in // the `x-goog-api-client` header passed on each request. Intended for // use by Google-written clients. func (c *PublisherClient) SetGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", version.Go()}, keyval...) kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) c.xGoogHeader = []string{gax.XGoogHeader(kv...)} } // PublisherProjectPath returns the path for the project resource. func PublisherProjectPath(project string) string { path, err := publisherProjectPathTemplate.Render(map[string]string{ "project": project, }) if err != nil { panic(err) } return path } // PublisherTopicPath returns the path for the topic resource. func PublisherTopicPath(project, topic string) string { path, err := publisherTopicPathTemplate.Render(map[string]string{ "project": project, "topic": topic, }) if err != nil { panic(err) } return path } func (c *PublisherClient) SubscriptionIAM(subscription *pubsubpb.Subscription) *iam.Handle { return iam.InternalNewHandle(c.Connection(), subscription.Name) } func (c *PublisherClient) TopicIAM(topic *pubsubpb.Topic) *iam.Handle { return iam.InternalNewHandle(c.Connection(), topic.Name) } // CreateTopic creates the given topic with the given name. func (c *PublisherClient) CreateTopic(ctx context.Context, req *pubsubpb.Topic, opts ...gax.CallOption) (*pubsubpb.Topic, error) { ctx = insertXGoog(ctx, c.xGoogHeader) opts = append(c.CallOptions.CreateTopic[0:len(c.CallOptions.CreateTopic):len(c.CallOptions.CreateTopic)], opts...) var resp *pubsubpb.Topic err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error resp, err = c.publisherClient.CreateTopic(ctx, req, settings.GRPC...) return err }, opts...) if err != nil { return nil, err } return resp, nil } // Publish adds one or more messages to the topic. Returns `NOT_FOUND` if the topic // does not exist. The message payload must not be empty; it must contain // either a non-empty data field, or at least one attribute. func (c *PublisherClient) Publish(ctx context.Context, req *pubsubpb.PublishRequest, opts ...gax.CallOption) (*pubsubpb.PublishResponse, error) { ctx = insertXGoog(ctx, c.xGoogHeader) opts = append(c.CallOptions.Publish[0:len(c.CallOptions.Publish):len(c.CallOptions.Publish)], opts...) var resp *pubsubpb.PublishResponse err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error resp, err = c.publisherClient.Publish(ctx, req, settings.GRPC...) return err }, opts...) if err != nil { return nil, err } return resp, nil } // GetTopic gets the configuration of a topic. func (c *PublisherClient) GetTopic(ctx context.Context, req *pubsubpb.GetTopicRequest, opts ...gax.CallOption) (*pubsubpb.Topic, error) { ctx = insertXGoog(ctx, c.xGoogHeader) opts = append(c.CallOptions.GetTopic[0:len(c.CallOptions.GetTopic):len(c.CallOptions.GetTopic)], opts...) var resp *pubsubpb.Topic err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error resp, err = c.publisherClient.GetTopic(ctx, req, settings.GRPC...) return err }, opts...) if err != nil { return nil, err } return resp, nil } // ListTopics lists matching topics. func (c *PublisherClient) ListTopics(ctx context.Context, req *pubsubpb.ListTopicsRequest, opts ...gax.CallOption) *TopicIterator { ctx = insertXGoog(ctx, c.xGoogHeader) opts = append(c.CallOptions.ListTopics[0:len(c.CallOptions.ListTopics):len(c.CallOptions.ListTopics)], opts...) it := &TopicIterator{} it.InternalFetch = func(pageSize int, pageToken string) ([]*pubsubpb.Topic, string, error) { var resp *pubsubpb.ListTopicsResponse req.PageToken = pageToken if pageSize > math.MaxInt32 { req.PageSize = math.MaxInt32 } else { req.PageSize = int32(pageSize) } err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error resp, err = c.publisherClient.ListTopics(ctx, req, settings.GRPC...) return err }, opts...) if err != nil { return nil, "", err } return resp.Topics, resp.NextPageToken, nil } fetch := func(pageSize int, pageToken string) (string, error) { items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) if err != nil { return "", err } it.items = append(it.items, items...) return nextPageToken, nil } it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) return it } // ListTopicSubscriptions lists the name of the subscriptions for this topic. func (c *PublisherClient) ListTopicSubscriptions(ctx context.Context, req *pubsubpb.ListTopicSubscriptionsRequest, opts ...gax.CallOption) *StringIterator { ctx = insertXGoog(ctx, c.xGoogHeader) opts = append(c.CallOptions.ListTopicSubscriptions[0:len(c.CallOptions.ListTopicSubscriptions):len(c.CallOptions.ListTopicSubscriptions)], opts...) it := &StringIterator{} it.InternalFetch = func(pageSize int, pageToken string) ([]string, string, error) { var resp *pubsubpb.ListTopicSubscriptionsResponse req.PageToken = pageToken if pageSize > math.MaxInt32 { req.PageSize = math.MaxInt32 } else { req.PageSize = int32(pageSize) } err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error resp, err = c.publisherClient.ListTopicSubscriptions(ctx, req, settings.GRPC...) return err }, opts...) if err != nil { return nil, "", err } return resp.Subscriptions, resp.NextPageToken, nil } fetch := func(pageSize int, pageToken string) (string, error) { items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) if err != nil { return "", err } it.items = append(it.items, items...) return nextPageToken, nil } it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) return it } // DeleteTopic deletes the topic with the given name. Returns `NOT_FOUND` if the topic // does not exist. After a topic is deleted, a new topic may be created with // the same name; this is an entirely new topic with none of the old // configuration or subscriptions. Existing subscriptions to this topic are // not deleted, but their `topic` field is set to `_deleted-topic_`. func (c *PublisherClient) DeleteTopic(ctx context.Context, req *pubsubpb.DeleteTopicRequest, opts ...gax.CallOption) error { ctx = insertXGoog(ctx, c.xGoogHeader) opts = append(c.CallOptions.DeleteTopic[0:len(c.CallOptions.DeleteTopic):len(c.CallOptions.DeleteTopic)], opts...) err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error _, err = c.publisherClient.DeleteTopic(ctx, req, settings.GRPC...) return err }, opts...) return err } // StringIterator manages a stream of string. type StringIterator struct { items []string pageInfo *iterator.PageInfo nextFunc func() error // InternalFetch is for use by the Google Cloud Libraries only. // It is not part of the stable interface of this package. // // InternalFetch returns results from a single call to the underlying RPC. // The number of results is no greater than pageSize. // If there are no more results, nextPageToken is empty and err is nil. InternalFetch func(pageSize int, pageToken string) (results []string, nextPageToken string, err error) } // PageInfo supports pagination. See the google.golang.org/api/iterator package for details. func (it *StringIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } // Next returns the next result. Its second return value is iterator.Done if there are no more // results. Once Next returns Done, all subsequent calls will return Done. func (it *StringIterator) Next() (string, error) { var item string if err := it.nextFunc(); err != nil { return item, err } item = it.items[0] it.items = it.items[1:] return item, nil } func (it *StringIterator) bufLen() int { return len(it.items) } func (it *StringIterator) takeBuf() interface{} { b := it.items it.items = nil return b } // TopicIterator manages a stream of *pubsubpb.Topic. type TopicIterator struct { items []*pubsubpb.Topic pageInfo *iterator.PageInfo nextFunc func() error // InternalFetch is for use by the Google Cloud Libraries only. // It is not part of the stable interface of this package. // // InternalFetch returns results from a single call to the underlying RPC. // The number of results is no greater than pageSize. // If there are no more results, nextPageToken is empty and err is nil. InternalFetch func(pageSize int, pageToken string) (results []*pubsubpb.Topic, nextPageToken string, err error) } // PageInfo supports pagination. See the google.golang.org/api/iterator package for details. func (it *TopicIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } // Next returns the next result. Its second return value is iterator.Done if there are no more // results. Once Next returns Done, all subsequent calls will return Done. func (it *TopicIterator) Next() (*pubsubpb.Topic, error) { var item *pubsubpb.Topic if err := it.nextFunc(); err != nil { return item, err } item = it.items[0] it.items = it.items[1:] return item, nil } func (it *TopicIterator) bufLen() int { return len(it.items) } func (it *TopicIterator) takeBuf() interface{} { b := it.items it.items = nil return b } golang-google-cloud-0.9.0/pubsub/apiv1/publisher_client_example_test.go000066400000000000000000000075641312234511600263330ustar00rootroot00000000000000// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // AUTO-GENERATED CODE. DO NOT EDIT. package pubsub_test import ( "cloud.google.com/go/pubsub/apiv1" "golang.org/x/net/context" "google.golang.org/api/iterator" pubsubpb "google.golang.org/genproto/googleapis/pubsub/v1" ) func ExampleNewPublisherClient() { ctx := context.Background() c, err := pubsub.NewPublisherClient(ctx) if err != nil { // TODO: Handle error. } // TODO: Use client. _ = c } func ExamplePublisherClient_SubscriptionIAM() { ctx := context.Background() c, err := pubsub.NewPublisherClient(ctx) if err != nil { // TODO: Handle error. } subscription := &pubsubpb.Subscription{} h := c.SubscriptionIAM(subscription) policy, err := h.Policy(ctx) if err != nil { // TODO: Handle error. } //TODO: Use the IAM policy _ = policy } func ExamplePublisherClient_TopicIAM() { ctx := context.Background() c, err := pubsub.NewPublisherClient(ctx) if err != nil { // TODO: Handle error. } topic := &pubsubpb.Topic{} h := c.TopicIAM(topic) policy, err := h.Policy(ctx) if err != nil { // TODO: Handle error. } //TODO: Use the IAM policy _ = policy } func ExamplePublisherClient_CreateTopic() { ctx := context.Background() c, err := pubsub.NewPublisherClient(ctx) if err != nil { // TODO: Handle error. } req := &pubsubpb.Topic{ // TODO: Fill request struct fields. } resp, err := c.CreateTopic(ctx, req) if err != nil { // TODO: Handle error. } // TODO: Use resp. _ = resp } func ExamplePublisherClient_Publish() { ctx := context.Background() c, err := pubsub.NewPublisherClient(ctx) if err != nil { // TODO: Handle error. } req := &pubsubpb.PublishRequest{ // TODO: Fill request struct fields. } resp, err := c.Publish(ctx, req) if err != nil { // TODO: Handle error. } // TODO: Use resp. _ = resp } func ExamplePublisherClient_GetTopic() { ctx := context.Background() c, err := pubsub.NewPublisherClient(ctx) if err != nil { // TODO: Handle error. } req := &pubsubpb.GetTopicRequest{ // TODO: Fill request struct fields. } resp, err := c.GetTopic(ctx, req) if err != nil { // TODO: Handle error. } // TODO: Use resp. _ = resp } func ExamplePublisherClient_ListTopics() { ctx := context.Background() c, err := pubsub.NewPublisherClient(ctx) if err != nil { // TODO: Handle error. } req := &pubsubpb.ListTopicsRequest{ // TODO: Fill request struct fields. } it := c.ListTopics(ctx, req) for { resp, err := it.Next() if err == iterator.Done { break } if err != nil { // TODO: Handle error. } // TODO: Use resp. _ = resp } } func ExamplePublisherClient_ListTopicSubscriptions() { ctx := context.Background() c, err := pubsub.NewPublisherClient(ctx) if err != nil { // TODO: Handle error. } req := &pubsubpb.ListTopicSubscriptionsRequest{ // TODO: Fill request struct fields. } it := c.ListTopicSubscriptions(ctx, req) for { resp, err := it.Next() if err == iterator.Done { break } if err != nil { // TODO: Handle error. } // TODO: Use resp. _ = resp } } func ExamplePublisherClient_DeleteTopic() { ctx := context.Background() c, err := pubsub.NewPublisherClient(ctx) if err != nil { // TODO: Handle error. } req := &pubsubpb.DeleteTopicRequest{ // TODO: Fill request struct fields. } err = c.DeleteTopic(ctx, req) if err != nil { // TODO: Handle error. } } golang-google-cloud-0.9.0/pubsub/apiv1/subscriber_client.go000066400000000000000000000566421312234511600237300ustar00rootroot00000000000000// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // AUTO-GENERATED CODE. DO NOT EDIT. package pubsub import ( "math" "time" "cloud.google.com/go/iam" "cloud.google.com/go/internal/version" gax "github.com/googleapis/gax-go" "golang.org/x/net/context" "google.golang.org/api/iterator" "google.golang.org/api/option" "google.golang.org/api/transport" pubsubpb "google.golang.org/genproto/googleapis/pubsub/v1" "google.golang.org/grpc" "google.golang.org/grpc/codes" ) var ( subscriberProjectPathTemplate = gax.MustCompilePathTemplate("projects/{project}") subscriberSnapshotPathTemplate = gax.MustCompilePathTemplate("projects/{project}/snapshots/{snapshot}") subscriberSubscriptionPathTemplate = gax.MustCompilePathTemplate("projects/{project}/subscriptions/{subscription}") subscriberTopicPathTemplate = gax.MustCompilePathTemplate("projects/{project}/topics/{topic}") ) // SubscriberCallOptions contains the retry settings for each method of SubscriberClient. type SubscriberCallOptions struct { CreateSubscription []gax.CallOption GetSubscription []gax.CallOption UpdateSubscription []gax.CallOption ListSubscriptions []gax.CallOption DeleteSubscription []gax.CallOption ModifyAckDeadline []gax.CallOption Acknowledge []gax.CallOption Pull []gax.CallOption StreamingPull []gax.CallOption ModifyPushConfig []gax.CallOption ListSnapshots []gax.CallOption CreateSnapshot []gax.CallOption DeleteSnapshot []gax.CallOption Seek []gax.CallOption } func defaultSubscriberClientOptions() []option.ClientOption { return []option.ClientOption{ option.WithEndpoint("pubsub.googleapis.com:443"), option.WithScopes(DefaultAuthScopes()...), } } func defaultSubscriberCallOptions() *SubscriberCallOptions { retry := map[[2]string][]gax.CallOption{ {"default", "idempotent"}: { gax.WithRetry(func() gax.Retryer { return gax.OnCodes([]codes.Code{ codes.DeadlineExceeded, codes.Unavailable, }, gax.Backoff{ Initial: 100 * time.Millisecond, Max: 60000 * time.Millisecond, Multiplier: 1.3, }) }), }, {"messaging", "pull"}: { gax.WithRetry(func() gax.Retryer { return gax.OnCodes([]codes.Code{ codes.Canceled, codes.DeadlineExceeded, codes.ResourceExhausted, codes.Internal, codes.Unavailable, }, gax.Backoff{ Initial: 100 * time.Millisecond, Max: 60000 * time.Millisecond, Multiplier: 1.3, }) }), }, } return &SubscriberCallOptions{ CreateSubscription: retry[[2]string{"default", "idempotent"}], GetSubscription: retry[[2]string{"default", "idempotent"}], UpdateSubscription: retry[[2]string{"default", "idempotent"}], ListSubscriptions: retry[[2]string{"default", "idempotent"}], DeleteSubscription: retry[[2]string{"default", "idempotent"}], ModifyAckDeadline: retry[[2]string{"default", "non_idempotent"}], Acknowledge: retry[[2]string{"messaging", "non_idempotent"}], Pull: retry[[2]string{"messaging", "pull"}], StreamingPull: retry[[2]string{"messaging", "pull"}], ModifyPushConfig: retry[[2]string{"default", "non_idempotent"}], ListSnapshots: retry[[2]string{"default", "idempotent"}], CreateSnapshot: retry[[2]string{"default", "idempotent"}], DeleteSnapshot: retry[[2]string{"default", "idempotent"}], Seek: retry[[2]string{"default", "non_idempotent"}], } } // SubscriberClient is a client for interacting with Google Cloud Pub/Sub API. type SubscriberClient struct { // The connection to the service. conn *grpc.ClientConn // The gRPC API client. subscriberClient pubsubpb.SubscriberClient // The call options for this service. CallOptions *SubscriberCallOptions // The metadata to be sent with each request. xGoogHeader []string } // NewSubscriberClient creates a new subscriber client. // // The service that an application uses to manipulate subscriptions and to // consume messages from a subscription via the `Pull` method. func NewSubscriberClient(ctx context.Context, opts ...option.ClientOption) (*SubscriberClient, error) { conn, err := transport.DialGRPC(ctx, append(defaultSubscriberClientOptions(), opts...)...) if err != nil { return nil, err } c := &SubscriberClient{ conn: conn, CallOptions: defaultSubscriberCallOptions(), subscriberClient: pubsubpb.NewSubscriberClient(conn), } c.SetGoogleClientInfo() return c, nil } // Connection returns the client's connection to the API service. func (c *SubscriberClient) Connection() *grpc.ClientConn { return c.conn } // Close closes the connection to the API service. The user should invoke this when // the client is no longer required. func (c *SubscriberClient) Close() error { return c.conn.Close() } // SetGoogleClientInfo sets the name and version of the application in // the `x-goog-api-client` header passed on each request. Intended for // use by Google-written clients. func (c *SubscriberClient) SetGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", version.Go()}, keyval...) kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) c.xGoogHeader = []string{gax.XGoogHeader(kv...)} } // SubscriberProjectPath returns the path for the project resource. func SubscriberProjectPath(project string) string { path, err := subscriberProjectPathTemplate.Render(map[string]string{ "project": project, }) if err != nil { panic(err) } return path } // SubscriberSnapshotPath returns the path for the snapshot resource. func SubscriberSnapshotPath(project, snapshot string) string { path, err := subscriberSnapshotPathTemplate.Render(map[string]string{ "project": project, "snapshot": snapshot, }) if err != nil { panic(err) } return path } // SubscriberSubscriptionPath returns the path for the subscription resource. func SubscriberSubscriptionPath(project, subscription string) string { path, err := subscriberSubscriptionPathTemplate.Render(map[string]string{ "project": project, "subscription": subscription, }) if err != nil { panic(err) } return path } // SubscriberTopicPath returns the path for the topic resource. func SubscriberTopicPath(project, topic string) string { path, err := subscriberTopicPathTemplate.Render(map[string]string{ "project": project, "topic": topic, }) if err != nil { panic(err) } return path } func (c *SubscriberClient) SubscriptionIAM(subscription *pubsubpb.Subscription) *iam.Handle { return iam.InternalNewHandle(c.Connection(), subscription.Name) } func (c *SubscriberClient) TopicIAM(topic *pubsubpb.Topic) *iam.Handle { return iam.InternalNewHandle(c.Connection(), topic.Name) } // CreateSubscription creates a subscription to a given topic. // If the subscription already exists, returns `ALREADY_EXISTS`. // If the corresponding topic doesn't exist, returns `NOT_FOUND`. // // If the name is not provided in the request, the server will assign a random // name for this subscription on the same project as the topic, conforming // to the // [resource name format](https://cloud.google.com/pubsub/docs/overview#names). // The generated name is populated in the returned Subscription object. // Note that for REST API requests, you must specify a name in the request. func (c *SubscriberClient) CreateSubscription(ctx context.Context, req *pubsubpb.Subscription, opts ...gax.CallOption) (*pubsubpb.Subscription, error) { ctx = insertXGoog(ctx, c.xGoogHeader) opts = append(c.CallOptions.CreateSubscription[0:len(c.CallOptions.CreateSubscription):len(c.CallOptions.CreateSubscription)], opts...) var resp *pubsubpb.Subscription err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error resp, err = c.subscriberClient.CreateSubscription(ctx, req, settings.GRPC...) return err }, opts...) if err != nil { return nil, err } return resp, nil } // GetSubscription gets the configuration details of a subscription. func (c *SubscriberClient) GetSubscription(ctx context.Context, req *pubsubpb.GetSubscriptionRequest, opts ...gax.CallOption) (*pubsubpb.Subscription, error) { ctx = insertXGoog(ctx, c.xGoogHeader) opts = append(c.CallOptions.GetSubscription[0:len(c.CallOptions.GetSubscription):len(c.CallOptions.GetSubscription)], opts...) var resp *pubsubpb.Subscription err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error resp, err = c.subscriberClient.GetSubscription(ctx, req, settings.GRPC...) return err }, opts...) if err != nil { return nil, err } return resp, nil } // UpdateSubscription updates an existing subscription. Note that certain properties of a // subscription, such as its topic, are not modifiable. func (c *SubscriberClient) UpdateSubscription(ctx context.Context, req *pubsubpb.UpdateSubscriptionRequest, opts ...gax.CallOption) (*pubsubpb.Subscription, error) { ctx = insertXGoog(ctx, c.xGoogHeader) opts = append(c.CallOptions.UpdateSubscription[0:len(c.CallOptions.UpdateSubscription):len(c.CallOptions.UpdateSubscription)], opts...) var resp *pubsubpb.Subscription err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error resp, err = c.subscriberClient.UpdateSubscription(ctx, req, settings.GRPC...) return err }, opts...) if err != nil { return nil, err } return resp, nil } // ListSubscriptions lists matching subscriptions. func (c *SubscriberClient) ListSubscriptions(ctx context.Context, req *pubsubpb.ListSubscriptionsRequest, opts ...gax.CallOption) *SubscriptionIterator { ctx = insertXGoog(ctx, c.xGoogHeader) opts = append(c.CallOptions.ListSubscriptions[0:len(c.CallOptions.ListSubscriptions):len(c.CallOptions.ListSubscriptions)], opts...) it := &SubscriptionIterator{} it.InternalFetch = func(pageSize int, pageToken string) ([]*pubsubpb.Subscription, string, error) { var resp *pubsubpb.ListSubscriptionsResponse req.PageToken = pageToken if pageSize > math.MaxInt32 { req.PageSize = math.MaxInt32 } else { req.PageSize = int32(pageSize) } err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error resp, err = c.subscriberClient.ListSubscriptions(ctx, req, settings.GRPC...) return err }, opts...) if err != nil { return nil, "", err } return resp.Subscriptions, resp.NextPageToken, nil } fetch := func(pageSize int, pageToken string) (string, error) { items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) if err != nil { return "", err } it.items = append(it.items, items...) return nextPageToken, nil } it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) return it } // DeleteSubscription deletes an existing subscription. All messages retained in the subscription // are immediately dropped. Calls to `Pull` after deletion will return // `NOT_FOUND`. After a subscription is deleted, a new one may be created with // the same name, but the new one has no association with the old // subscription or its topic unless the same topic is specified. func (c *SubscriberClient) DeleteSubscription(ctx context.Context, req *pubsubpb.DeleteSubscriptionRequest, opts ...gax.CallOption) error { ctx = insertXGoog(ctx, c.xGoogHeader) opts = append(c.CallOptions.DeleteSubscription[0:len(c.CallOptions.DeleteSubscription):len(c.CallOptions.DeleteSubscription)], opts...) err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error _, err = c.subscriberClient.DeleteSubscription(ctx, req, settings.GRPC...) return err }, opts...) return err } // ModifyAckDeadline modifies the ack deadline for a specific message. This method is useful // to indicate that more time is needed to process a message by the // subscriber, or to make the message available for redelivery if the // processing was interrupted. Note that this does not modify the // subscription-level `ackDeadlineSeconds` used for subsequent messages. func (c *SubscriberClient) ModifyAckDeadline(ctx context.Context, req *pubsubpb.ModifyAckDeadlineRequest, opts ...gax.CallOption) error { ctx = insertXGoog(ctx, c.xGoogHeader) opts = append(c.CallOptions.ModifyAckDeadline[0:len(c.CallOptions.ModifyAckDeadline):len(c.CallOptions.ModifyAckDeadline)], opts...) err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error _, err = c.subscriberClient.ModifyAckDeadline(ctx, req, settings.GRPC...) return err }, opts...) return err } // Acknowledge acknowledges the messages associated with the `ack_ids` in the // `AcknowledgeRequest`. The Pub/Sub system can remove the relevant messages // from the subscription. // // Acknowledging a message whose ack deadline has expired may succeed, // but such a message may be redelivered later. Acknowledging a message more // than once will not result in an error. func (c *SubscriberClient) Acknowledge(ctx context.Context, req *pubsubpb.AcknowledgeRequest, opts ...gax.CallOption) error { ctx = insertXGoog(ctx, c.xGoogHeader) opts = append(c.CallOptions.Acknowledge[0:len(c.CallOptions.Acknowledge):len(c.CallOptions.Acknowledge)], opts...) err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error _, err = c.subscriberClient.Acknowledge(ctx, req, settings.GRPC...) return err }, opts...) return err } // Pull pulls messages from the server. Returns an empty list if there are no // messages available in the backlog. The server may return `UNAVAILABLE` if // there are too many concurrent pull requests pending for the given // subscription. func (c *SubscriberClient) Pull(ctx context.Context, req *pubsubpb.PullRequest, opts ...gax.CallOption) (*pubsubpb.PullResponse, error) { ctx = insertXGoog(ctx, c.xGoogHeader) opts = append(c.CallOptions.Pull[0:len(c.CallOptions.Pull):len(c.CallOptions.Pull)], opts...) var resp *pubsubpb.PullResponse err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error resp, err = c.subscriberClient.Pull(ctx, req, settings.GRPC...) return err }, opts...) if err != nil { return nil, err } return resp, nil } // StreamingPull (EXPERIMENTAL) StreamingPull is an experimental feature. This RPC will // respond with UNIMPLEMENTED errors unless you have been invited to test // this feature. Contact cloud-pubsub@google.com with any questions. // // Establishes a stream with the server, which sends messages down to the // client. The client streams acknowledgements and ack deadline modifications // back to the server. The server will close the stream and return the status // on any error. The server may close the stream with status `OK` to reassign // server-side resources, in which case, the client should re-establish the // stream. `UNAVAILABLE` may also be returned in the case of a transient error // (e.g., a server restart). These should also be retried by the client. Flow // control can be achieved by configuring the underlying RPC channel. func (c *SubscriberClient) StreamingPull(ctx context.Context, opts ...gax.CallOption) (pubsubpb.Subscriber_StreamingPullClient, error) { ctx = insertXGoog(ctx, c.xGoogHeader) opts = append(c.CallOptions.StreamingPull[0:len(c.CallOptions.StreamingPull):len(c.CallOptions.StreamingPull)], opts...) var resp pubsubpb.Subscriber_StreamingPullClient err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error resp, err = c.subscriberClient.StreamingPull(ctx, settings.GRPC...) return err }, opts...) if err != nil { return nil, err } return resp, nil } // ModifyPushConfig modifies the `PushConfig` for a specified subscription. // // This may be used to change a push subscription to a pull one (signified by // an empty `PushConfig`) or vice versa, or change the endpoint URL and other // attributes of a push subscription. Messages will accumulate for delivery // continuously through the call regardless of changes to the `PushConfig`. func (c *SubscriberClient) ModifyPushConfig(ctx context.Context, req *pubsubpb.ModifyPushConfigRequest, opts ...gax.CallOption) error { ctx = insertXGoog(ctx, c.xGoogHeader) opts = append(c.CallOptions.ModifyPushConfig[0:len(c.CallOptions.ModifyPushConfig):len(c.CallOptions.ModifyPushConfig)], opts...) err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error _, err = c.subscriberClient.ModifyPushConfig(ctx, req, settings.GRPC...) return err }, opts...) return err } // ListSnapshots lists the existing snapshots. func (c *SubscriberClient) ListSnapshots(ctx context.Context, req *pubsubpb.ListSnapshotsRequest, opts ...gax.CallOption) *SnapshotIterator { ctx = insertXGoog(ctx, c.xGoogHeader) opts = append(c.CallOptions.ListSnapshots[0:len(c.CallOptions.ListSnapshots):len(c.CallOptions.ListSnapshots)], opts...) it := &SnapshotIterator{} it.InternalFetch = func(pageSize int, pageToken string) ([]*pubsubpb.Snapshot, string, error) { var resp *pubsubpb.ListSnapshotsResponse req.PageToken = pageToken if pageSize > math.MaxInt32 { req.PageSize = math.MaxInt32 } else { req.PageSize = int32(pageSize) } err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error resp, err = c.subscriberClient.ListSnapshots(ctx, req, settings.GRPC...) return err }, opts...) if err != nil { return nil, "", err } return resp.Snapshots, resp.NextPageToken, nil } fetch := func(pageSize int, pageToken string) (string, error) { items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) if err != nil { return "", err } it.items = append(it.items, items...) return nextPageToken, nil } it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) return it } // CreateSnapshot creates a snapshot from the requested subscription. // If the snapshot already exists, returns `ALREADY_EXISTS`. // If the requested subscription doesn't exist, returns `NOT_FOUND`. // // If the name is not provided in the request, the server will assign a random // name for this snapshot on the same project as the subscription, conforming // to the // [resource name format](https://cloud.google.com/pubsub/docs/overview#names). // The generated name is populated in the returned Snapshot object. // Note that for REST API requests, you must specify a name in the request. func (c *SubscriberClient) CreateSnapshot(ctx context.Context, req *pubsubpb.CreateSnapshotRequest, opts ...gax.CallOption) (*pubsubpb.Snapshot, error) { ctx = insertXGoog(ctx, c.xGoogHeader) opts = append(c.CallOptions.CreateSnapshot[0:len(c.CallOptions.CreateSnapshot):len(c.CallOptions.CreateSnapshot)], opts...) var resp *pubsubpb.Snapshot err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error resp, err = c.subscriberClient.CreateSnapshot(ctx, req, settings.GRPC...) return err }, opts...) if err != nil { return nil, err } return resp, nil } // DeleteSnapshot removes an existing snapshot. All messages retained in the snapshot // are immediately dropped. After a snapshot is deleted, a new one may be // created with the same name, but the new one has no association with the old // snapshot or its subscription, unless the same subscription is specified. func (c *SubscriberClient) DeleteSnapshot(ctx context.Context, req *pubsubpb.DeleteSnapshotRequest, opts ...gax.CallOption) error { ctx = insertXGoog(ctx, c.xGoogHeader) opts = append(c.CallOptions.DeleteSnapshot[0:len(c.CallOptions.DeleteSnapshot):len(c.CallOptions.DeleteSnapshot)], opts...) err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error _, err = c.subscriberClient.DeleteSnapshot(ctx, req, settings.GRPC...) return err }, opts...) return err } // Seek seeks an existing subscription to a point in time or to a given snapshot, // whichever is provided in the request. func (c *SubscriberClient) Seek(ctx context.Context, req *pubsubpb.SeekRequest, opts ...gax.CallOption) (*pubsubpb.SeekResponse, error) { ctx = insertXGoog(ctx, c.xGoogHeader) opts = append(c.CallOptions.Seek[0:len(c.CallOptions.Seek):len(c.CallOptions.Seek)], opts...) var resp *pubsubpb.SeekResponse err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error resp, err = c.subscriberClient.Seek(ctx, req, settings.GRPC...) return err }, opts...) if err != nil { return nil, err } return resp, nil } // SnapshotIterator manages a stream of *pubsubpb.Snapshot. type SnapshotIterator struct { items []*pubsubpb.Snapshot pageInfo *iterator.PageInfo nextFunc func() error // InternalFetch is for use by the Google Cloud Libraries only. // It is not part of the stable interface of this package. // // InternalFetch returns results from a single call to the underlying RPC. // The number of results is no greater than pageSize. // If there are no more results, nextPageToken is empty and err is nil. InternalFetch func(pageSize int, pageToken string) (results []*pubsubpb.Snapshot, nextPageToken string, err error) } // PageInfo supports pagination. See the google.golang.org/api/iterator package for details. func (it *SnapshotIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } // Next returns the next result. Its second return value is iterator.Done if there are no more // results. Once Next returns Done, all subsequent calls will return Done. func (it *SnapshotIterator) Next() (*pubsubpb.Snapshot, error) { var item *pubsubpb.Snapshot if err := it.nextFunc(); err != nil { return item, err } item = it.items[0] it.items = it.items[1:] return item, nil } func (it *SnapshotIterator) bufLen() int { return len(it.items) } func (it *SnapshotIterator) takeBuf() interface{} { b := it.items it.items = nil return b } // SubscriptionIterator manages a stream of *pubsubpb.Subscription. type SubscriptionIterator struct { items []*pubsubpb.Subscription pageInfo *iterator.PageInfo nextFunc func() error // InternalFetch is for use by the Google Cloud Libraries only. // It is not part of the stable interface of this package. // // InternalFetch returns results from a single call to the underlying RPC. // The number of results is no greater than pageSize. // If there are no more results, nextPageToken is empty and err is nil. InternalFetch func(pageSize int, pageToken string) (results []*pubsubpb.Subscription, nextPageToken string, err error) } // PageInfo supports pagination. See the google.golang.org/api/iterator package for details. func (it *SubscriptionIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } // Next returns the next result. Its second return value is iterator.Done if there are no more // results. Once Next returns Done, all subsequent calls will return Done. func (it *SubscriptionIterator) Next() (*pubsubpb.Subscription, error) { var item *pubsubpb.Subscription if err := it.nextFunc(); err != nil { return item, err } item = it.items[0] it.items = it.items[1:] return item, nil } func (it *SubscriptionIterator) bufLen() int { return len(it.items) } func (it *SubscriptionIterator) takeBuf() interface{} { b := it.items it.items = nil return b } golang-google-cloud-0.9.0/pubsub/apiv1/subscriber_client_example_test.go000066400000000000000000000156301312234511600264720ustar00rootroot00000000000000// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // AUTO-GENERATED CODE. DO NOT EDIT. package pubsub_test import ( "io" "cloud.google.com/go/pubsub/apiv1" "golang.org/x/net/context" "google.golang.org/api/iterator" pubsubpb "google.golang.org/genproto/googleapis/pubsub/v1" ) func ExampleNewSubscriberClient() { ctx := context.Background() c, err := pubsub.NewSubscriberClient(ctx) if err != nil { // TODO: Handle error. } // TODO: Use client. _ = c } func ExampleSubscriberClient_SubscriptionIAM() { ctx := context.Background() c, err := pubsub.NewSubscriberClient(ctx) if err != nil { // TODO: Handle error. } subscription := &pubsubpb.Subscription{} h := c.SubscriptionIAM(subscription) policy, err := h.Policy(ctx) if err != nil { // TODO: Handle error. } //TODO: Use the IAM policy _ = policy } func ExampleSubscriberClient_TopicIAM() { ctx := context.Background() c, err := pubsub.NewSubscriberClient(ctx) if err != nil { // TODO: Handle error. } topic := &pubsubpb.Topic{} h := c.TopicIAM(topic) policy, err := h.Policy(ctx) if err != nil { // TODO: Handle error. } //TODO: Use the IAM policy _ = policy } func ExampleSubscriberClient_CreateSubscription() { ctx := context.Background() c, err := pubsub.NewSubscriberClient(ctx) if err != nil { // TODO: Handle error. } req := &pubsubpb.Subscription{ // TODO: Fill request struct fields. } resp, err := c.CreateSubscription(ctx, req) if err != nil { // TODO: Handle error. } // TODO: Use resp. _ = resp } func ExampleSubscriberClient_GetSubscription() { ctx := context.Background() c, err := pubsub.NewSubscriberClient(ctx) if err != nil { // TODO: Handle error. } req := &pubsubpb.GetSubscriptionRequest{ // TODO: Fill request struct fields. } resp, err := c.GetSubscription(ctx, req) if err != nil { // TODO: Handle error. } // TODO: Use resp. _ = resp } func ExampleSubscriberClient_UpdateSubscription() { ctx := context.Background() c, err := pubsub.NewSubscriberClient(ctx) if err != nil { // TODO: Handle error. } req := &pubsubpb.UpdateSubscriptionRequest{ // TODO: Fill request struct fields. } resp, err := c.UpdateSubscription(ctx, req) if err != nil { // TODO: Handle error. } // TODO: Use resp. _ = resp } func ExampleSubscriberClient_ListSubscriptions() { ctx := context.Background() c, err := pubsub.NewSubscriberClient(ctx) if err != nil { // TODO: Handle error. } req := &pubsubpb.ListSubscriptionsRequest{ // TODO: Fill request struct fields. } it := c.ListSubscriptions(ctx, req) for { resp, err := it.Next() if err == iterator.Done { break } if err != nil { // TODO: Handle error. } // TODO: Use resp. _ = resp } } func ExampleSubscriberClient_DeleteSubscription() { ctx := context.Background() c, err := pubsub.NewSubscriberClient(ctx) if err != nil { // TODO: Handle error. } req := &pubsubpb.DeleteSubscriptionRequest{ // TODO: Fill request struct fields. } err = c.DeleteSubscription(ctx, req) if err != nil { // TODO: Handle error. } } func ExampleSubscriberClient_ModifyAckDeadline() { ctx := context.Background() c, err := pubsub.NewSubscriberClient(ctx) if err != nil { // TODO: Handle error. } req := &pubsubpb.ModifyAckDeadlineRequest{ // TODO: Fill request struct fields. } err = c.ModifyAckDeadline(ctx, req) if err != nil { // TODO: Handle error. } } func ExampleSubscriberClient_Acknowledge() { ctx := context.Background() c, err := pubsub.NewSubscriberClient(ctx) if err != nil { // TODO: Handle error. } req := &pubsubpb.AcknowledgeRequest{ // TODO: Fill request struct fields. } err = c.Acknowledge(ctx, req) if err != nil { // TODO: Handle error. } } func ExampleSubscriberClient_Pull() { ctx := context.Background() c, err := pubsub.NewSubscriberClient(ctx) if err != nil { // TODO: Handle error. } req := &pubsubpb.PullRequest{ // TODO: Fill request struct fields. } resp, err := c.Pull(ctx, req) if err != nil { // TODO: Handle error. } // TODO: Use resp. _ = resp } func ExampleSubscriberClient_StreamingPull() { ctx := context.Background() c, err := pubsub.NewSubscriberClient(ctx) if err != nil { // TODO: Handle error. } stream, err := c.StreamingPull(ctx) if err != nil { // TODO: Handle error. } go func() { reqs := []*pubsubpb.StreamingPullRequest{ // TODO: Create requests. } for _, req := range reqs { if err := stream.Send(req); err != nil { // TODO: Handle error. } } stream.CloseSend() }() for { resp, err := stream.Recv() if err == io.EOF { break } if err != nil { // TODO: handle error. } // TODO: Use resp. _ = resp } } func ExampleSubscriberClient_ModifyPushConfig() { ctx := context.Background() c, err := pubsub.NewSubscriberClient(ctx) if err != nil { // TODO: Handle error. } req := &pubsubpb.ModifyPushConfigRequest{ // TODO: Fill request struct fields. } err = c.ModifyPushConfig(ctx, req) if err != nil { // TODO: Handle error. } } func ExampleSubscriberClient_ListSnapshots() { ctx := context.Background() c, err := pubsub.NewSubscriberClient(ctx) if err != nil { // TODO: Handle error. } req := &pubsubpb.ListSnapshotsRequest{ // TODO: Fill request struct fields. } it := c.ListSnapshots(ctx, req) for { resp, err := it.Next() if err == iterator.Done { break } if err != nil { // TODO: Handle error. } // TODO: Use resp. _ = resp } } func ExampleSubscriberClient_CreateSnapshot() { ctx := context.Background() c, err := pubsub.NewSubscriberClient(ctx) if err != nil { // TODO: Handle error. } req := &pubsubpb.CreateSnapshotRequest{ // TODO: Fill request struct fields. } resp, err := c.CreateSnapshot(ctx, req) if err != nil { // TODO: Handle error. } // TODO: Use resp. _ = resp } func ExampleSubscriberClient_DeleteSnapshot() { ctx := context.Background() c, err := pubsub.NewSubscriberClient(ctx) if err != nil { // TODO: Handle error. } req := &pubsubpb.DeleteSnapshotRequest{ // TODO: Fill request struct fields. } err = c.DeleteSnapshot(ctx, req) if err != nil { // TODO: Handle error. } } func ExampleSubscriberClient_Seek() { ctx := context.Background() c, err := pubsub.NewSubscriberClient(ctx) if err != nil { // TODO: Handle error. } req := &pubsubpb.SeekRequest{ // TODO: Fill request struct fields. } resp, err := c.Seek(ctx, req) if err != nil { // TODO: Handle error. } // TODO: Use resp. _ = resp } golang-google-cloud-0.9.0/pubsub/doc.go000066400000000000000000000117371312234511600177500ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. /* Package pubsub provides an easy way to publish and receive Google Cloud Pub/Sub messages, hiding the the details of the underlying server RPCs. Google Cloud Pub/Sub is a many-to-many, asynchronous messaging system that decouples senders and receivers. Note: This package is experimental and may make backwards-incompatible changes. More information about Google Cloud Pub/Sub is available at https://cloud.google.com/pubsub/docs Publishing Google Cloud Pub/Sub messages are published to topics. Topics may be created using the pubsub package like so: topic, err := pubsubClient.CreateTopic(context.Background(), "topic-name") Messages may then be published to a topic: res := topic.Publish(ctx, &pubsub.Message{Data: []byte("payload")}) Publish queues the message for publishing and returns immediately. When enough messages have accumulated, or enough time has elapsed, the batch of messages is sent to the Pub/Sub service. Publish returns a PublishResult, which behaves like a future: its Get method blocks until the message has been sent to the service. The first time you call Publish on a topic, goroutines are started in the background. To clean up these goroutines, call Stop: topic.Stop() Receiving To receive messages published to a topic, clients create subscriptions to the topic. There may be more than one subscription per topic; each message that is published to the topic will be delivered to all of its subscriptions. Subsciptions may be created like so: sub, err := pubsubClient.CreateSubscription(context.Background(), "sub-name", topic, 0, nil) Messages are then consumed from a subscription via callback. err := sub.Receive(context.Background(), func(ctx context.Context, m *Message) { log.Printf("Got message: %s", m.Data) m.Ack() }) if err != nil { // Handle error. } The callback is invoked concurrently by multiple goroutines, maximizing throughput. To terminate a call to Receive, cancel its context. Once client code has processed the message, it must call Message.Ack, otherwise the message will eventually be redelivered. As an optimization, if the client cannot or doesn't want to process the message, it can call Message.Nack to speed redelivery. For more information and configuration options, see "Deadlines" below. Note: It is possible for Messages to be redelivered, even if Message.Ack has been called. Client code must be robust to multiple deliveries of messages. Deadlines The default pubsub deadlines are suitable for most use cases, but may be overridden. This section describes the tradeoffs that should be considered when overriding the defaults. Behind the scenes, each message returned by the Pub/Sub server has an associated lease, known as an "ACK deadline". Unless a message is acknowledged within the ACK deadline, or the client requests that the ACK deadline be extended, the message will become elegible for redelivery. As a convenience, the pubsub package will automatically extend deadlines until either: * Message.Ack or Message.Nack is called, or * the "MaxExtension" period elapses from the time the message is fetched from the server. The initial ACK deadline given to each messages defaults to 10 seconds, but may be overridden during subscription creation. Selecting an ACK deadline is a tradeoff between message redelivery latency and RPC volume. If the pubsub package fails to acknowledge or extend a message (e.g. due to unexpected termination of the process), a shorter ACK deadline will generally result in faster message redelivery by the Pub/Sub system. However, a short ACK deadline may also increase the number of deadline extension RPCs that the pubsub package sends to the server. The default max extension period is DefaultReceiveSettings.MaxExtension, and can be overridden by setting Subscription.ReceiveSettings.MaxExtension. Selecting a max extension period is a tradeoff between the speed at which client code must process messages, and the redelivery delay if messages fail to be acknowledged (e.g. because client code neglects to do so). Using a large MaxExtension increases the available time for client code to process messages. However, if the client code neglects to call Message.Ack/Nack, a large MaxExtension will increase the delay before the message is redelivered. Authentication See examples of authorization and authentication at https://godoc.org/cloud.google.com/go#pkg-examples. */ package pubsub // import "cloud.google.com/go/pubsub" golang-google-cloud-0.9.0/pubsub/endtoend_test.go000066400000000000000000000150141312234511600220320ustar00rootroot00000000000000// Copyright 2014 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package pubsub import ( "bytes" "fmt" "log" "math/rand" "os" "reflect" "sync" "testing" "time" "golang.org/x/net/context" "cloud.google.com/go/internal/testutil" "google.golang.org/api/option" ) const timeout = time.Minute * 10 const ackDeadline = time.Second * 10 const nMessages = 1e4 // Buffer log messages to debug failures. var logBuf bytes.Buffer // TestEndToEnd pumps many messages into a topic and tests that they are all // delivered to each subscription for the topic. It also tests that messages // are not unexpectedly redelivered. func TestEndToEnd(t *testing.T) { t.Parallel() if testing.Short() { t.Skip("Integration tests skipped in short mode") } log.SetOutput(&logBuf) ctx := context.Background() ts := testutil.TokenSource(ctx, ScopePubSub, ScopeCloudPlatform) if ts == nil { t.Skip("Integration tests skipped. See CONTRIBUTING.md for details") } now := time.Now() topicName := fmt.Sprintf("endtoend-%d", now.UnixNano()) subPrefix := fmt.Sprintf("endtoend-%d", now.UnixNano()) client, err := NewClient(ctx, testutil.ProjID(), option.WithTokenSource(ts)) if err != nil { t.Fatalf("Creating client error: %v", err) } var topic *Topic if topic, err = client.CreateTopic(ctx, topicName); err != nil { t.Fatalf("CreateTopic error: %v", err) } defer topic.Delete(ctx) // Two subscriptions to the same topic. var subs [2]*Subscription for i := 0; i < len(subs); i++ { subs[i], err = client.CreateSubscription(ctx, fmt.Sprintf("%s-%d", subPrefix, i), SubscriptionConfig{ Topic: topic, AckDeadline: ackDeadline, }) if err != nil { t.Fatalf("CreateSub error: %v", err) } defer subs[i].Delete(ctx) } ids, err := publish(ctx, topic, nMessages) if err != nil { t.Fatalf("publish: %v", err) } wantCounts := make(map[string]int) for _, id := range ids { wantCounts[id] = 1 } // recv provides an indication that messages are still arriving. recv := make(chan struct{}) // We have two subscriptions to our topic. // Each subscription will get a copy of each published message. var wg sync.WaitGroup cctx, cancel := context.WithTimeout(ctx, timeout) defer cancel() consumers := []*consumer{ {counts: make(map[string]int), recv: recv, durations: []time.Duration{time.Hour}}, {counts: make(map[string]int), recv: recv, durations: []time.Duration{ackDeadline, ackDeadline, ackDeadline / 2, ackDeadline / 2, time.Hour}}, } for i, con := range consumers { con := con sub := subs[i] wg.Add(1) go func() { defer wg.Done() con.consume(t, cctx, sub) }() } // Wait for a while after the last message before declaring quiescence. // We wait a multiple of the ack deadline, for two reasons: // 1. To detect if messages are redelivered after having their ack // deadline extended. // 2. To wait for redelivery of messages that were en route when a Receive // is canceled. This can take considerably longer than the ack deadline. quiescenceDur := ackDeadline * 6 quiescenceTimer := time.NewTimer(quiescenceDur) loop: for { select { case <-recv: // Reset timer so we wait quiescenceDur after the last message. // See https://godoc.org/time#Timer.Reset for why the Stop // and channel drain are necessary. if !quiescenceTimer.Stop() { <-quiescenceTimer.C } quiescenceTimer.Reset(quiescenceDur) case <-quiescenceTimer.C: cancel() log.Println("quiesced") break loop case <-cctx.Done(): t.Fatal("timed out") } } wg.Wait() ok := true for i, con := range consumers { if got, want := con.counts, wantCounts; !reflect.DeepEqual(got, want) { t.Errorf("%d: message counts: %v\n", i, diff(got, want)) ok = false } } if !ok { logBuf.WriteTo(os.Stdout) } } // publish publishes n messages to topic, and returns the published message IDs. func publish(ctx context.Context, topic *Topic, n int) ([]string, error) { var rs []*PublishResult for i := 0; i < n; i++ { m := &Message{Data: []byte(fmt.Sprintf("msg %d", i))} rs = append(rs, topic.Publish(ctx, m)) } var ids []string for _, r := range rs { id, err := r.Get(ctx) if err != nil { return nil, err } ids = append(ids, id) } return ids, nil } // consumer consumes messages according to its configuration. type consumer struct { durations []time.Duration // A value is sent to recv each time Inc is called. recv chan struct{} mu sync.Mutex counts map[string]int total int } // consume reads messages from a subscription, and keeps track of what it receives in mc. // After consume returns, the caller should wait on wg to ensure that no more updates to mc will be made. func (c *consumer) consume(t *testing.T, ctx context.Context, sub *Subscription) { for _, dur := range c.durations { ctx2, cancel := context.WithTimeout(ctx, dur) defer cancel() id := sub.name[len(sub.name)-2:] log.Printf("%s: start receive", id) prev := c.total err := sub.Receive(ctx2, c.process) log.Printf("%s: end receive; read %d", id, c.total-prev) if err != nil { t.Errorf("error from Receive: %v", err) return } select { case <-ctx.Done(): return default: } } } // process handles a message and records it in mc. func (c *consumer) process(_ context.Context, m *Message) { c.mu.Lock() c.counts[m.ID] += 1 c.total++ c.mu.Unlock() c.recv <- struct{}{} // Simulate time taken to process m, while continuing to process more messages. // Some messages will need to have their ack deadline extended due to this delay. delay := rand.Intn(int(ackDeadline * 3)) time.AfterFunc(time.Duration(delay), m.Ack) } // diff returns counts of the differences between got and want. func diff(got, want map[string]int) map[string]int { ids := make(map[string]struct{}) for k := range got { ids[k] = struct{}{} } for k := range want { ids[k] = struct{}{} } gotWantCount := make(map[string]int) for k := range ids { if got[k] == want[k] { continue } desc := fmt.Sprintf("", got[k], want[k]) gotWantCount[desc] += 1 } return gotWantCount } golang-google-cloud-0.9.0/pubsub/example_subscription_iterator_test.go000066400000000000000000000026031312234511600264020ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package pubsub_test import ( "fmt" "cloud.google.com/go/pubsub" "golang.org/x/net/context" "google.golang.org/api/iterator" ) func ExampleClient_Subscriptions() { ctx := context.Background() client, err := pubsub.NewClient(ctx, "project-id") if err != nil { // TODO: Handle error. } // List all subscriptions of the project. it := client.Subscriptions(ctx) _ = it // TODO: iterate using Next. } func ExampleSubscriptionIterator_Next() { ctx := context.Background() client, err := pubsub.NewClient(ctx, "project-id") if err != nil { // TODO: Handle error. } // List all subscriptions of the project. it := client.Subscriptions(ctx) for { sub, err := it.Next() if err == iterator.Done { break } if err != nil { // TODO: Handle error. } fmt.Println(sub) } } golang-google-cloud-0.9.0/pubsub/example_test.go000066400000000000000000000150351312234511600216700ustar00rootroot00000000000000// Copyright 2014 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package pubsub_test import ( "fmt" "time" "cloud.google.com/go/pubsub" "golang.org/x/net/context" "google.golang.org/api/iterator" ) func ExampleNewClient() { ctx := context.Background() _, err := pubsub.NewClient(ctx, "project-id") if err != nil { // TODO: Handle error. } // See the other examples to learn how to use the Client. } func ExampleClient_CreateTopic() { ctx := context.Background() client, err := pubsub.NewClient(ctx, "project-id") if err != nil { // TODO: Handle error. } // Create a new topic with the given name. topic, err := client.CreateTopic(ctx, "topicName") if err != nil { // TODO: Handle error. } _ = topic // TODO: use the topic. } func ExampleClient_CreateSubscription() { ctx := context.Background() client, err := pubsub.NewClient(ctx, "project-id") if err != nil { // TODO: Handle error. } // Create a new topic with the given name. topic, err := client.CreateTopic(ctx, "topicName") if err != nil { // TODO: Handle error. } // Create a new subscription to the previously created topic // with the given name. sub, err := client.CreateSubscription(ctx, "subName", pubsub.SubscriptionConfig{ Topic: topic, AckDeadline: 10 * time.Second, }) if err != nil { // TODO: Handle error. } _ = sub // TODO: use the subscription. } func ExampleTopic_Delete() { ctx := context.Background() client, err := pubsub.NewClient(ctx, "project-id") if err != nil { // TODO: Handle error. } topic := client.Topic("topicName") if err := topic.Delete(ctx); err != nil { // TODO: Handle error. } } func ExampleTopic_Exists() { ctx := context.Background() client, err := pubsub.NewClient(ctx, "project-id") if err != nil { // TODO: Handle error. } topic := client.Topic("topicName") ok, err := topic.Exists(ctx) if err != nil { // TODO: Handle error. } if !ok { // Topic doesn't exist. } } func ExampleTopic_Publish() { ctx := context.Background() client, err := pubsub.NewClient(ctx, "project-id") if err != nil { // TODO: Handle error. } topic := client.Topic("topicName") defer topic.Stop() var results []*pubsub.PublishResult r := topic.Publish(ctx, &pubsub.Message{ Data: []byte("hello world"), }) results = append(results, r) // Do other work ... for _, r := range results { id, err := r.Get(ctx) if err != nil { // TODO: Handle error. } fmt.Printf("Published a message with a message ID: %s\n", id) } } func ExampleTopic_Subscriptions() { ctx := context.Background() client, err := pubsub.NewClient(ctx, "project-id") if err != nil { // TODO: Handle error. } topic := client.Topic("topic-name") // List all subscriptions of the topic (maybe of multiple projects). for subs := topic.Subscriptions(ctx); ; { sub, err := subs.Next() if err == iterator.Done { break } if err != nil { // TODO: Handle error. } _ = sub // TODO: use the subscription. } } func ExampleSubscription_Delete() { ctx := context.Background() client, err := pubsub.NewClient(ctx, "project-id") if err != nil { // TODO: Handle error. } sub := client.Subscription("subName") if err := sub.Delete(ctx); err != nil { // TODO: Handle error. } } func ExampleSubscription_Exists() { ctx := context.Background() client, err := pubsub.NewClient(ctx, "project-id") if err != nil { // TODO: Handle error. } sub := client.Subscription("subName") ok, err := sub.Exists(ctx) if err != nil { // TODO: Handle error. } if !ok { // Subscription doesn't exist. } } func ExampleSubscription_Config() { ctx := context.Background() client, err := pubsub.NewClient(ctx, "project-id") if err != nil { // TODO: Handle error. } sub := client.Subscription("subName") config, err := sub.Config(ctx) if err != nil { // TODO: Handle error. } fmt.Println(config) } func ExampleSubscription_Receive() { ctx := context.Background() client, err := pubsub.NewClient(ctx, "project-id") if err != nil { // TODO: Handle error. } sub := client.Subscription("subName") err = sub.Receive(ctx, func(ctx context.Context, m *pubsub.Message) { // TODO: Handle message. // NOTE: May be called concurrently; synchronize access to shared memory. m.Ack() }) if err != context.Canceled { // TODO: Handle error. } } // This example shows how to configure keepalive so that unacknoweldged messages // expire quickly, allowing other subscribers to take them. func ExampleSubscription_Receive_maxExtension() { ctx := context.Background() client, err := pubsub.NewClient(ctx, "project-id") if err != nil { // TODO: Handle error. } sub := client.Subscription("subName") // This program is expected to process and acknowledge messages in 30 seconds. If // not, the Pub/Sub API will assume the message is not acknowledged. sub.ReceiveSettings.MaxExtension = 30 * time.Second err = sub.Receive(ctx, func(ctx context.Context, m *pubsub.Message) { // TODO: Handle message. m.Ack() }) if err != context.Canceled { // TODO: Handle error. } } // This example shows how to throttle Subscription.Receive, which aims for high // throughput by default. By limiting the number of messages and/or bytes being // processed at once, you can bound your program's resource consumption. func ExampleSubscription_Receive_maxOutstanding() { ctx := context.Background() client, err := pubsub.NewClient(ctx, "project-id") if err != nil { // TODO: Handle error. } sub := client.Subscription("subName") sub.ReceiveSettings.MaxOutstandingMessages = 5 sub.ReceiveSettings.MaxOutstandingBytes = 10e6 err = sub.Receive(ctx, func(ctx context.Context, m *pubsub.Message) { // TODO: Handle message. m.Ack() }) if err != context.Canceled { // TODO: Handle error. } } func ExampleSubscription_ModifyPushConfig() { ctx := context.Background() client, err := pubsub.NewClient(ctx, "project-id") if err != nil { // TODO: Handle error. } sub := client.Subscription("subName") if err := sub.ModifyPushConfig(ctx, pubsub.PushConfig{Endpoint: "https://example.com/push"}); err != nil { // TODO: Handle error. } } golang-google-cloud-0.9.0/pubsub/example_topic_iterator_test.go000066400000000000000000000024421312234511600247750ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package pubsub_test import ( "fmt" "cloud.google.com/go/pubsub" "golang.org/x/net/context" "google.golang.org/api/iterator" ) func ExampleClient_Topics() { ctx := context.Background() client, err := pubsub.NewClient(ctx, "project-id") if err != nil { // TODO: Handle error. } it := client.Topics(ctx) _ = it // TODO: iterate using Next. } func ExampleTopicIterator_Next() { ctx := context.Background() client, err := pubsub.NewClient(ctx, "project-id") if err != nil { // TODO: Handle error. } // List all topics. it := client.Topics(ctx) for { t, err := it.Next() if err == iterator.Done { break } if err != nil { // TODO: Handle error. } fmt.Println(t) } } golang-google-cloud-0.9.0/pubsub/fake_test.go000066400000000000000000000072311312234511600211420ustar00rootroot00000000000000// Copyright 2017 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package pubsub // This file provides a fake/mock in-memory pubsub server. // (Really just a mock at the moment, but we hope to turn it into // more of a fake.) import ( "io" "sync" "time" "golang.org/x/net/context" "cloud.google.com/go/internal/testutil" pb "google.golang.org/genproto/googleapis/pubsub/v1" ) type fakeServer struct { pb.PublisherServer pb.SubscriberServer Addr string Acked map[string]bool // acked message IDs Deadlines map[string]int32 // deadlines by message ID pullResponses []*pullResponse wg sync.WaitGroup } type pullResponse struct { msgs []*pb.ReceivedMessage err error } func newFakeServer() (*fakeServer, error) { srv, err := testutil.NewServer() if err != nil { return nil, err } fake := &fakeServer{ Addr: srv.Addr, Acked: map[string]bool{}, Deadlines: map[string]int32{}, } pb.RegisterPublisherServer(srv.Gsrv, fake) pb.RegisterSubscriberServer(srv.Gsrv, fake) srv.Start() return fake, nil } // Each call to addStreamingPullMessages results in one StreamingPullResponse. func (s *fakeServer) addStreamingPullMessages(msgs []*pb.ReceivedMessage) { s.pullResponses = append(s.pullResponses, &pullResponse{msgs, nil}) } func (s *fakeServer) addStreamingPullError(err error) { s.pullResponses = append(s.pullResponses, &pullResponse{nil, err}) } func (s *fakeServer) wait() { s.wg.Wait() } func (s *fakeServer) StreamingPull(stream pb.Subscriber_StreamingPullServer) error { // Receive initial request. _, err := stream.Recv() if err == io.EOF { return nil } if err != nil { return err } // Consume and ignore subsequent requests. errc := make(chan error, 1) s.wg.Add(1) go func() { defer s.wg.Done() for { req, err := stream.Recv() if err != nil { errc <- err return } for _, id := range req.AckIds { s.Acked[id] = true } for i, id := range req.ModifyDeadlineAckIds { s.Deadlines[id] = req.ModifyDeadlineSeconds[i] } } }() // Send responses. for { if len(s.pullResponses) == 0 { // Nothing to send, so wait for the client to shut down the stream. err := <-errc // a real error, or at least EOF if err == io.EOF { return nil } return err } pr := s.pullResponses[0] s.pullResponses = s.pullResponses[1:] if pr.err != nil { // Add a slight delay to ensure the server receives any // messages en route from the client before shutting down the stream. // This reduces flakiness of tests involving retry. time.Sleep(100 * time.Millisecond) } if pr.err == io.EOF { return nil } if pr.err != nil { return pr.err } // Return any error from Recv. select { case err := <-errc: return err default: } res := &pb.StreamingPullResponse{ReceivedMessages: pr.msgs} if err := stream.Send(res); err != nil { return err } } } func (s *fakeServer) GetSubscription(ctx context.Context, req *pb.GetSubscriptionRequest) (*pb.Subscription, error) { return &pb.Subscription{ Name: req.Subscription, AckDeadlineSeconds: 10, PushConfig: &pb.PushConfig{}, }, nil } golang-google-cloud-0.9.0/pubsub/flow_controller.go000066400000000000000000000057041312234511600224120ustar00rootroot00000000000000// Copyright 2017 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package pubsub import ( "golang.org/x/net/context" "golang.org/x/sync/semaphore" ) // flowController implements flow control for Subscription.Receive. type flowController struct { maxSize int // max total size of messages semCount, semSize *semaphore.Weighted // enforces max number and size of messages } // newFlowController creates a new flowController that ensures no more than // maxCount messages or maxSize bytes are outstanding at once. If maxCount or // maxSize is < 1, then an unlimited number of messages or bytes is permitted, // respectively. func newFlowController(maxCount, maxSize int) *flowController { fc := &flowController{ maxSize: maxSize, semCount: nil, semSize: nil, } if maxCount > 0 { fc.semCount = semaphore.NewWeighted(int64(maxCount)) } if maxSize > 0 { fc.semSize = semaphore.NewWeighted(int64(maxSize)) } return fc } // acquire blocks until one message of size bytes can proceed or ctx is done. // It returns nil in the first case, or ctx.Err() in the second. // // acquire allows large messages to proceed by treating a size greater than maxSize // as if it were equal to maxSize. func (f *flowController) acquire(ctx context.Context, size int) error { if f.semCount != nil { if err := f.semCount.Acquire(ctx, 1); err != nil { return err } } if f.semSize != nil { if err := f.semSize.Acquire(ctx, f.bound(size)); err != nil { if f.semCount != nil { f.semCount.Release(1) } return err } } return nil } // tryAcquire returns false if acquire would block. Otherwise, it behaves like // acquire and returns true. // // tryAcquire allows large messages to proceed by treating a size greater than // maxSize as if it were equal to maxSize. func (f *flowController) tryAcquire(size int) bool { if f.semCount != nil { if !f.semCount.TryAcquire(1) { return false } } if f.semSize != nil { if !f.semSize.TryAcquire(f.bound(size)) { if f.semCount != nil { f.semCount.Release(1) } return false } } return true } // release notes that one message of size bytes is no longer outstanding. func (f *flowController) release(size int) { if f.semCount != nil { f.semCount.Release(1) } if f.semSize != nil { f.semSize.Release(f.bound(size)) } } func (f *flowController) bound(size int) int64 { if size > f.maxSize { return int64(f.maxSize) } return int64(size) } golang-google-cloud-0.9.0/pubsub/flow_controller_test.go000066400000000000000000000137701312234511600234530ustar00rootroot00000000000000// Copyright 2017 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package pubsub import ( "errors" "fmt" "sync/atomic" "testing" "time" "golang.org/x/net/context" "golang.org/x/sync/errgroup" ) func TestFlowControllerCancel(t *testing.T) { // Test canceling a flow controller's context. t.Parallel() fc := newFlowController(3, 10) if err := fc.acquire(context.Background(), 5); err != nil { t.Fatal(err) } // Experiment: a context that times out should always return an error. ctx, cancel := context.WithTimeout(context.Background(), 5*time.Millisecond) defer cancel() if err := fc.acquire(ctx, 6); err != context.DeadlineExceeded { t.Fatalf("got %v, expected DeadlineExceeded", err) } // Control: a context that is not done should always return nil. go func() { time.Sleep(5 * time.Millisecond) fc.release(5) }() if err := fc.acquire(context.Background(), 6); err != nil { t.Errorf("got %v, expected nil", err) } } func TestFlowControllerLargeRequest(t *testing.T) { // Large requests succeed, consuming the entire allotment. t.Parallel() fc := newFlowController(3, 10) err := fc.acquire(context.Background(), 11) if err != nil { t.Fatal(err) } } func TestFlowControllerNoStarve(t *testing.T) { // A large request won't starve, because the flowController is // (best-effort) FIFO. t.Parallel() ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) defer cancel() fc := newFlowController(10, 10) first := make(chan int) for i := 0; i < 20; i++ { go func() { for { if err := fc.acquire(ctx, 1); err != nil { if err != context.Canceled { t.Error(err) } return } select { case first <- 1: default: } fc.release(1) } }() } <-first // Wait until the flowController's state is non-zero. if err := fc.acquire(ctx, 11); err != nil { t.Errorf("got %v, want nil", err) } } func TestFlowControllerSaturation(t *testing.T) { t.Parallel() const ( maxCount = 6 maxSize = 10 ) for _, test := range []struct { acquireSize int wantCount, wantSize int64 }{ { // Many small acquires cause the flow controller to reach its max count. acquireSize: 1, wantCount: 6, wantSize: 6, }, { // Five acquires of size 2 will cause the flow controller to reach its max size, // but not its max count. acquireSize: 2, wantCount: 5, wantSize: 10, }, { // If the requests are the right size (relatively prime to maxSize), // the flow controller will not saturate on size. (In this case, not on count either.) acquireSize: 3, wantCount: 3, wantSize: 9, }, } { fc := newFlowController(maxCount, maxSize) // Atomically track flow controller state. var curCount, curSize int64 success := errors.New("") // Time out if wantSize or wantCount is never reached. ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() g, ctx := errgroup.WithContext(ctx) for i := 0; i < 10; i++ { g.Go(func() error { var hitCount, hitSize bool // Run at least until we hit the expected values, and at least // for enough iterations to exceed them if the flow controller // is broken. for i := 0; i < 100 || !hitCount || !hitSize; i++ { select { case <-ctx.Done(): return ctx.Err() default: } if err := fc.acquire(ctx, test.acquireSize); err != nil { return err } c := atomic.AddInt64(&curCount, 1) if c > test.wantCount { return fmt.Errorf("count %d exceeds want %d", c, test.wantCount) } if c == test.wantCount { hitCount = true } s := atomic.AddInt64(&curSize, int64(test.acquireSize)) if s > test.wantSize { return fmt.Errorf("size %d exceeds want %d", s, test.wantSize) } if s == test.wantSize { hitSize = true } time.Sleep(5 * time.Millisecond) // Let other goroutines make progress. if atomic.AddInt64(&curCount, -1) < 0 { return errors.New("negative count") } if atomic.AddInt64(&curSize, -int64(test.acquireSize)) < 0 { return errors.New("negative size") } fc.release(test.acquireSize) } return success }) } if err := g.Wait(); err != success { t.Errorf("%+v: %v", test, err) continue } } } func TestFlowControllerTryAcquire(t *testing.T) { fc := newFlowController(3, 10) // Successfully tryAcquire 4 bytes. if !fc.tryAcquire(4) { t.Error("got false, wanted true") } // Fail to tryAcquire 7 bytes. if fc.tryAcquire(7) { t.Error("got true, wanted false") } // Successfully tryAcquire 6 byte. if !fc.tryAcquire(6) { t.Error("got false, wanted true") } } func TestFlowControllerUnboundedCount(t *testing.T) { ctx := context.Background() fc := newFlowController(0, 10) // Successfully acquire 4 bytes. if err := fc.acquire(ctx, 4); err != nil { t.Errorf("got %v, wanted no error", err) } // Successfully tryAcquire 4 bytes. if !fc.tryAcquire(4) { t.Error("got false, wanted true") } // Fail to tryAcquire 3 bytes. if fc.tryAcquire(3) { t.Error("got true, wanted false") } } func TestFlowControllerUnboundedBytes(t *testing.T) { ctx := context.Background() fc := newFlowController(2, 0) // Successfully acquire 4GB. if err := fc.acquire(ctx, 4e9); err != nil { t.Errorf("got %v, wanted no error", err) } // Successfully tryAcquire 4GB bytes. if !fc.tryAcquire(4e9) { t.Error("got false, wanted true") } // Fail to tryAcquire a third message. if fc.tryAcquire(3) { t.Error("got true, wanted false") } } golang-google-cloud-0.9.0/pubsub/integration_test.go000066400000000000000000000172171312234511600225640ustar00rootroot00000000000000// Copyright 2014 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package pubsub import ( "fmt" "reflect" "testing" "time" gax "github.com/googleapis/gax-go" "golang.org/x/net/context" "cloud.google.com/go/iam" "cloud.google.com/go/internal" "cloud.google.com/go/internal/testutil" "google.golang.org/api/iterator" "google.golang.org/api/option" ) // messageData is used to hold the contents of a message so that it can be compared against the contents // of another message without regard to irrelevant fields. type messageData struct { ID string Data []byte Attributes map[string]string } func extractMessageData(m *Message) *messageData { return &messageData{ ID: m.ID, Data: m.Data, Attributes: m.Attributes, } } func TestAll(t *testing.T) { t.Parallel() if testing.Short() { t.Skip("Integration tests skipped in short mode") } ctx := context.Background() ts := testutil.TokenSource(ctx, ScopePubSub, ScopeCloudPlatform) if ts == nil { t.Skip("Integration tests skipped. See CONTRIBUTING.md for details") } now := time.Now() topicName := fmt.Sprintf("topic-%d", now.Unix()) subName := fmt.Sprintf("subscription-%d", now.Unix()) client, err := NewClient(ctx, testutil.ProjID(), option.WithTokenSource(ts)) if err != nil { t.Fatalf("Creating client error: %v", err) } defer client.Close() var topic *Topic if topic, err = client.CreateTopic(ctx, topicName); err != nil { t.Errorf("CreateTopic error: %v", err) } defer topic.Stop() var sub *Subscription if sub, err = client.CreateSubscription(ctx, subName, SubscriptionConfig{Topic: topic}); err != nil { t.Errorf("CreateSub error: %v", err) } exists, err := topic.Exists(ctx) if err != nil { t.Fatalf("TopicExists error: %v", err) } if !exists { t.Errorf("topic %s should exist, but it doesn't", topic) } exists, err = sub.Exists(ctx) if err != nil { t.Fatalf("SubExists error: %v", err) } if !exists { t.Errorf("subscription %s should exist, but it doesn't", subName) } msgs := []*Message{} for i := 0; i < 10; i++ { text := fmt.Sprintf("a message with an index %d", i) attrs := make(map[string]string) attrs["foo"] = "bar" msgs = append(msgs, &Message{ Data: []byte(text), Attributes: attrs, }) } // Publish the messages. type pubResult struct { m *Message r *PublishResult } var rs []pubResult for _, m := range msgs { r := topic.Publish(ctx, m) rs = append(rs, pubResult{m, r}) } want := make(map[string]*messageData) for _, res := range rs { id, err := res.r.Get(ctx) if err != nil { t.Fatal(err) } md := extractMessageData(res.m) md.ID = id want[md.ID] = md } // Use a timeout to ensure that Pull does not block indefinitely if there are unexpectedly few messages available. timeoutCtx, _ := context.WithTimeout(ctx, time.Minute) gotMsgs, err := pullN(timeoutCtx, sub, len(want), func(ctx context.Context, m *Message) { m.Ack() }) if err != nil { t.Fatalf("Pull: %v", err) } got := make(map[string]*messageData) for _, m := range gotMsgs { md := extractMessageData(m) got[md.ID] = md } if !reflect.DeepEqual(got, want) { t.Errorf("messages: got: %v ; want: %v", got, want) } if msg, ok := testIAM(ctx, topic.IAM(), "pubsub.topics.get"); !ok { t.Errorf("topic IAM: %s", msg) } if msg, ok := testIAM(ctx, sub.IAM(), "pubsub.subscriptions.get"); !ok { t.Errorf("sub IAM: %s", msg) } snap, err := sub.createSnapshot(ctx, "") if err != nil { t.Fatalf("CreateSnapshot error: %v", err) } timeoutCtx, _ = context.WithTimeout(ctx, time.Minute) err = internal.Retry(timeoutCtx, gax.Backoff{}, func() (bool, error) { snapIt := client.snapshots(timeoutCtx) for { s, err := snapIt.Next() if err == nil && s.name == snap.name { return true, nil } if err == iterator.Done { return false, fmt.Errorf("cannot find snapshot: %q", snap.name) } if err != nil { return false, err } } }) if err != nil { t.Error(err) } err = internal.Retry(timeoutCtx, gax.Backoff{}, func() (bool, error) { err := sub.seekToSnapshot(timeoutCtx, snap.snapshot) return err == nil, err }) if err != nil { t.Error(err) } err = internal.Retry(timeoutCtx, gax.Backoff{}, func() (bool, error) { err := sub.seekToTime(timeoutCtx, time.Now()) return err == nil, err }) if err != nil { t.Error(err) } err = internal.Retry(timeoutCtx, gax.Backoff{}, func() (bool, error) { snapHandle := client.snapshot(snap.ID()) err := snapHandle.delete(timeoutCtx) return err == nil, err }) if err != nil { t.Error(err) } if err := sub.Delete(ctx); err != nil { t.Errorf("DeleteSub error: %v", err) } if err := topic.Delete(ctx); err != nil { t.Errorf("DeleteTopic error: %v", err) } } // IAM tests. // NOTE: for these to succeed, the test runner identity must have the Pub/Sub Admin or Owner roles. // To set, visit https://console.developers.google.com, select "IAM & Admin" from the top-left // menu, choose the account, click the Roles dropdown, and select "Pub/Sub > Pub/Sub Admin". // TODO(jba): move this to a testing package within cloud.google.com/iam, so we can re-use it. func testIAM(ctx context.Context, h *iam.Handle, permission string) (msg string, ok bool) { // Attempting to add an non-existent identity (e.g. "alice@example.com") causes the service // to return an internal error, so use a real identity. const member = "domain:google.com" var policy *iam.Policy var err error if policy, err = h.Policy(ctx); err != nil { return fmt.Sprintf("Policy: %v", err), false } // The resource is new, so the policy should be empty. if got := policy.Roles(); len(got) > 0 { return fmt.Sprintf("initially: got roles %v, want none", got), false } // Add a member, set the policy, then check that the member is present. policy.Add(member, iam.Viewer) if err := h.SetPolicy(ctx, policy); err != nil { return fmt.Sprintf("SetPolicy: %v", err), false } if policy, err = h.Policy(ctx); err != nil { return fmt.Sprintf("Policy: %v", err), false } if got, want := policy.Members(iam.Viewer), []string{member}; !reflect.DeepEqual(got, want) { return fmt.Sprintf("after Add: got %v, want %v", got, want), false } // Now remove that member, set the policy, and check that it's empty again. policy.Remove(member, iam.Viewer) if err := h.SetPolicy(ctx, policy); err != nil { return fmt.Sprintf("SetPolicy: %v", err), false } if policy, err = h.Policy(ctx); err != nil { return fmt.Sprintf("Policy: %v", err), false } if got := policy.Roles(); len(got) > 0 { return fmt.Sprintf("after Remove: got roles %v, want none", got), false } // Call TestPermissions. // Because this user is an admin, it has all the permissions on the // resource type. Note: the service fails if we ask for inapplicable // permissions (e.g. a subscription permission on a topic, or a topic // create permission on a topic rather than its parent). wantPerms := []string{permission} gotPerms, err := h.TestPermissions(ctx, wantPerms) if err != nil { return fmt.Sprintf("TestPermissions: %v", err), false } if !reflect.DeepEqual(gotPerms, wantPerms) { return fmt.Sprintf("TestPermissions: got %v, want %v", gotPerms, wantPerms), false } return "", true } golang-google-cloud-0.9.0/pubsub/iterator.go000066400000000000000000000351441312234511600210320ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package pubsub import ( "log" "sync" "time" "golang.org/x/net/context" "google.golang.org/api/iterator" "google.golang.org/api/support/bundler" pb "google.golang.org/genproto/googleapis/pubsub/v1" "google.golang.org/grpc" "google.golang.org/grpc/codes" ) type messageIterator struct { impl interface { next() (*Message, error) stop() } } type pollingMessageIterator struct { // kaTicker controls how often we send an ack deadline extension request. kaTicker *time.Ticker // ackTicker controls how often we acknowledge a batch of messages. ackTicker *time.Ticker ka *keepAlive acker *acker nacker *bundler.Bundler puller *puller // mu ensures that cleanup only happens once, and concurrent Stop // invocations block until cleanup completes. mu sync.Mutex // closed is used to signal that Stop has been called. closed chan struct{} } var useStreamingPull = false // newMessageIterator starts a new messageIterator. Stop must be called on the messageIterator // when it is no longer needed. // subName is the full name of the subscription to pull messages from. // ctx is the context to use for acking messages and extending message deadlines. func newMessageIterator(ctx context.Context, s service, subName string, po *pullOptions) *messageIterator { if !useStreamingPull { return &messageIterator{ impl: newPollingMessageIterator(ctx, s, subName, po), } } sp := s.newStreamingPuller(ctx, subName, int32(po.ackDeadline.Seconds())) err := sp.open() if grpc.Code(err) == codes.Unimplemented { log.Println("pubsub: streaming pull unimplemented; falling back to legacy pull") return &messageIterator{ impl: newPollingMessageIterator(ctx, s, subName, po), } } // TODO(jba): handle other non-nil error? log.Println("using streaming pull") return &messageIterator{ impl: newStreamingMessageIterator(ctx, sp, po), } } func newPollingMessageIterator(ctx context.Context, s service, subName string, po *pullOptions) *pollingMessageIterator { // TODO: make kaTicker frequency more configurable. // (ackDeadline - 5s) is a reasonable default for now, because the minimum ack period is 10s. This gives us 5s grace. keepAlivePeriod := po.ackDeadline - 5*time.Second kaTicker := time.NewTicker(keepAlivePeriod) // Stopped in it.Stop // TODO: make ackTicker more configurable. Something less than // kaTicker is a reasonable default (there's no point extending // messages when they could be acked instead). ackTicker := time.NewTicker(keepAlivePeriod / 2) // Stopped in it.Stop ka := &keepAlive{ s: s, Ctx: ctx, Sub: subName, ExtensionTick: kaTicker.C, Deadline: po.ackDeadline, MaxExtension: po.maxExtension, } ack := &acker{ s: s, Ctx: ctx, Sub: subName, AckTick: ackTicker.C, Notify: ka.Remove, } nacker := bundler.NewBundler("", func(ackIDs interface{}) { // NACK by setting the ack deadline to zero, to make the message // immediately available for redelivery. // // If the RPC fails, nothing we can do about it. In the worst case, the // deadline for these messages will expire and they will still get // redelivered. _ = s.modifyAckDeadline(ctx, subName, 0, ackIDs.([]string)) }) nacker.DelayThreshold = keepAlivePeriod / 10 // nack promptly nacker.BundleCountThreshold = 10 pull := newPuller(s, subName, ctx, po.maxPrefetch, ka.Add, ka.Remove) ka.Start() ack.Start() return &pollingMessageIterator{ kaTicker: kaTicker, ackTicker: ackTicker, ka: ka, acker: ack, nacker: nacker, puller: pull, closed: make(chan struct{}), } } // Next returns the next Message to be processed. The caller must call // Message.Done when finished with it. // Once Stop has been called, calls to Next will return iterator.Done. func (it *messageIterator) Next() (*Message, error) { return it.impl.next() } func (it *pollingMessageIterator) next() (*Message, error) { m, err := it.puller.Next() if err == nil { m.doneFunc = it.done return m, nil } select { // If Stop has been called, we return Done regardless the value of err. case <-it.closed: return nil, iterator.Done default: return nil, err } } // Client code must call Stop on a messageIterator when finished with it. // Stop will block until Done has been called on all Messages that have been // returned by Next, or until the context with which the messageIterator was created // is cancelled or exceeds its deadline. // Stop need only be called once, but may be called multiple times from // multiple goroutines. func (it *messageIterator) Stop() { it.impl.stop() } func (it *pollingMessageIterator) stop() { it.mu.Lock() defer it.mu.Unlock() select { case <-it.closed: // Cleanup has already been performed. return default: } // We close this channel before calling it.puller.Stop to ensure that we // reliably return iterator.Done from Next. close(it.closed) // Stop the puller. Once this completes, no more messages will be added // to it.ka. it.puller.Stop() // Start acking messages as they arrive, ignoring ackTicker. This will // result in it.ka.Stop, below, returning as soon as possible. it.acker.FastMode() // This will block until // (a) it.ka.Ctx is done, or // (b) all messages have been removed from keepAlive. // (b) will happen once all outstanding messages have been either ACKed or NACKed. it.ka.Stop() // There are no more live messages, so kill off the acker. it.acker.Stop() it.nacker.Flush() it.kaTicker.Stop() it.ackTicker.Stop() } func (it *pollingMessageIterator) done(ackID string, ack bool) { if ack { it.acker.Ack(ackID) // There's no need to call it.ka.Remove here, as acker will // call it via its Notify function. } else { it.ka.Remove(ackID) _ = it.nacker.Add(ackID, len(ackID)) // ignore error; this is just an optimization } } type streamingMessageIterator struct { ctx context.Context po *pullOptions sp *streamingPuller kaTicker *time.Ticker // keep-alive (deadline extensions) ackTicker *time.Ticker // message acks nackTicker *time.Ticker // message nacks (more frequent than acks) failed chan struct{} // closed on stream error stopped chan struct{} // closed when Stop is called drained chan struct{} // closed when stopped && no more pending messages msgc chan *Message wg sync.WaitGroup mu sync.Mutex keepAliveDeadlines map[string]time.Time pendingReq *pb.StreamingPullRequest err error // error from stream failure } func newStreamingMessageIterator(ctx context.Context, sp *streamingPuller, po *pullOptions) *streamingMessageIterator { // TODO: make kaTicker frequency more configurable. (ackDeadline - 5s) is a // reasonable default for now, because the minimum ack period is 10s. This // gives us 5s grace. keepAlivePeriod := po.ackDeadline - 5*time.Second kaTicker := time.NewTicker(keepAlivePeriod) // TODO: make ackTicker more configurable. Something less than // kaTicker is a reasonable default (there's no point extending // messages when they could be acked instead). ackTicker := time.NewTicker(keepAlivePeriod / 2) nackTicker := time.NewTicker(keepAlivePeriod / 10) it := &streamingMessageIterator{ ctx: ctx, sp: sp, po: po, kaTicker: kaTicker, ackTicker: ackTicker, nackTicker: nackTicker, failed: make(chan struct{}), stopped: make(chan struct{}), drained: make(chan struct{}), // use maxPrefetch as the channel's buffer size. msgc: make(chan *Message, po.maxPrefetch), keepAliveDeadlines: map[string]time.Time{}, pendingReq: &pb.StreamingPullRequest{}, } it.wg.Add(2) go it.receiver() go it.sender() return it } func (it *streamingMessageIterator) next() (*Message, error) { // If ctx has been cancelled or the iterator is done, return straight // away (even if there are buffered messages available). select { case <-it.ctx.Done(): return nil, it.ctx.Err() case <-it.failed: break case <-it.stopped: break default: // Wait for a message, but also for one of the above conditions. select { case msg := <-it.msgc: // Since active select cases are chosen at random, this can return // nil (from the channel close) even if it.failed or it.stopped is // closed. if msg == nil { break } msg.doneFunc = it.done return msg, nil case <-it.ctx.Done(): return nil, it.ctx.Err() case <-it.failed: break case <-it.stopped: break } } // Here if the iterator is done. it.mu.Lock() defer it.mu.Unlock() return nil, it.err } func (it *streamingMessageIterator) stop() { it.mu.Lock() select { case <-it.stopped: it.mu.Unlock() it.wg.Wait() return default: close(it.stopped) } if it.err == nil { it.err = iterator.Done } // Before reading from the channel, see if we're already drained. it.checkDrained() it.mu.Unlock() // Nack all the pending messages. // Grab the lock separately for each message to allow the receiver // and sender goroutines to make progress. // Why this will eventually terminate: // - If the receiver is not blocked on a stream Recv, then // it will write all the messages it has received to the channel, // then exit, closing the channel. // - If the receiver is blocked, then this loop will eventually // nack all the messages in the channel. Once done is called // on the remaining messages, the iterator will be marked as drained, // which will trigger the sender to terminate. When it does, it // performs a CloseSend on the stream, which will result in the blocked // stream Recv returning. for m := range it.msgc { it.mu.Lock() delete(it.keepAliveDeadlines, m.ackID) it.addDeadlineMod(m.ackID, 0) it.checkDrained() it.mu.Unlock() } it.wg.Wait() } // checkDrained closes the drained channel if the iterator has been stopped and all // pending messages have either been n/acked or expired. // // Called with the lock held. func (it *streamingMessageIterator) checkDrained() { select { case <-it.drained: return default: } select { case <-it.stopped: if len(it.keepAliveDeadlines) == 0 { close(it.drained) } default: } } // Called when a message is acked/nacked. func (it *streamingMessageIterator) done(ackID string, ack bool) { it.mu.Lock() defer it.mu.Unlock() delete(it.keepAliveDeadlines, ackID) if ack { it.pendingReq.AckIds = append(it.pendingReq.AckIds, ackID) } else { it.addDeadlineMod(ackID, 0) // Nack indicated by modifying the deadline to zero. } it.checkDrained() } // addDeadlineMod adds the ack ID to the pending request with the given deadline. // // Called with the lock held. func (it *streamingMessageIterator) addDeadlineMod(ackID string, deadlineSecs int32) { pr := it.pendingReq pr.ModifyDeadlineAckIds = append(pr.ModifyDeadlineAckIds, ackID) pr.ModifyDeadlineSeconds = append(pr.ModifyDeadlineSeconds, deadlineSecs) } // fail is called when a stream method returns a permanent error. func (it *streamingMessageIterator) fail(err error) { it.mu.Lock() if it.err == nil { it.err = err close(it.failed) } it.mu.Unlock() } // receiver runs in a goroutine and handles all receives from the stream. func (it *streamingMessageIterator) receiver() { defer it.wg.Done() defer close(it.msgc) for { // Stop retrieving messages if the context is done, the stream // failed, or the iterator's Stop method was called. select { case <-it.ctx.Done(): return case <-it.failed: return case <-it.stopped: return default: } // Receive messages from stream. This may block indefinitely. msgs, err := it.sp.fetchMessages() // The streamingPuller handles retries, so any error here // is fatal to the iterator. if err != nil { it.fail(err) return } // We received some messages. Remember them so we can // keep them alive. deadline := time.Now().Add(it.po.maxExtension) it.mu.Lock() for _, m := range msgs { it.keepAliveDeadlines[m.ackID] = deadline } it.mu.Unlock() // Deliver the messages to the channel. for _, m := range msgs { select { case <-it.ctx.Done(): return case <-it.failed: return // Don't return if stopped. We want to send the remaining // messages on the channel, where they will be nacked. case it.msgc <- m: } } } } // sender runs in a goroutine and handles all sends to the stream. func (it *streamingMessageIterator) sender() { defer it.wg.Done() defer it.kaTicker.Stop() defer it.ackTicker.Stop() defer it.nackTicker.Stop() defer it.sp.closeSend() done := false for !done { send := false select { case <-it.ctx.Done(): // Context canceled or timed out: stop immediately, without // another RPC. return case <-it.failed: // Stream failed: nothing to do, so stop immediately. return case <-it.drained: // All outstanding messages have been marked done: // nothing left to do except send the final request. it.mu.Lock() send = (len(it.pendingReq.AckIds) > 0 || len(it.pendingReq.ModifyDeadlineAckIds) > 0) done = true case <-it.kaTicker.C: it.mu.Lock() send = it.handleKeepAlives() case <-it.nackTicker.C: it.mu.Lock() send = (len(it.pendingReq.ModifyDeadlineAckIds) > 0) case <-it.ackTicker.C: it.mu.Lock() send = (len(it.pendingReq.AckIds) > 0) } // Lock is held here. if send { req := it.pendingReq it.pendingReq = &pb.StreamingPullRequest{} it.mu.Unlock() err := it.sp.send(req) if err != nil { // The streamingPuller handles retries, so any error here // is fatal to the iterator. it.fail(err) return } } else { it.mu.Unlock() } } } // handleKeepAlives modifies the pending request to include deadline extensions // for live messages. It also purges expired messages. It reports whether // there were any live messages. // // Called with the lock held. func (it *streamingMessageIterator) handleKeepAlives() bool { live, expired := getKeepAliveAckIDs(it.keepAliveDeadlines) for _, e := range expired { delete(it.keepAliveDeadlines, e) } dl := trunc32(int64(it.po.ackDeadline.Seconds())) for _, m := range live { it.addDeadlineMod(m, dl) } it.checkDrained() return len(live) > 0 } golang-google-cloud-0.9.0/pubsub/iterator_test.go000066400000000000000000000214521312234511600220660ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package pubsub import ( "fmt" "reflect" "testing" "time" "golang.org/x/net/context" "google.golang.org/api/iterator" ) func TestReturnsDoneOnStop(t *testing.T) { if useStreamingPull { t.Skip("iterator tests are for polling pull only") } type testCase struct { abort func(*messageIterator, context.CancelFunc) want error } for _, tc := range []testCase{ { abort: func(it *messageIterator, cancel context.CancelFunc) { it.Stop() }, want: iterator.Done, }, { abort: func(it *messageIterator, cancel context.CancelFunc) { cancel() }, want: context.Canceled, }, { abort: func(it *messageIterator, cancel context.CancelFunc) { it.Stop() cancel() }, want: iterator.Done, }, { abort: func(it *messageIterator, cancel context.CancelFunc) { cancel() it.Stop() }, want: iterator.Done, }, } { s := &blockingFetch{} ctx, cancel := context.WithCancel(context.Background()) it := newMessageIterator(ctx, s, "subname", &pullOptions{ackDeadline: time.Second * 10, maxExtension: time.Hour}) defer it.Stop() tc.abort(it, cancel) _, err := it.Next() if err != tc.want { t.Errorf("iterator Next error after abort: got:\n%v\nwant:\n%v", err, tc.want) } } } // blockingFetch implements message fetching by not returning until its context is cancelled. type blockingFetch struct { service } func (s *blockingFetch) fetchMessages(ctx context.Context, subName string, maxMessages int32) ([]*Message, error) { <-ctx.Done() return nil, ctx.Err() } func (s *blockingFetch) newStreamingPuller(ctx context.Context, subName string, ackDeadline int32) *streamingPuller { return nil } // justInTimeFetch simulates the situation where the iterator is aborted just after the fetch RPC // succeeds, so the rest of puller.Next will continue to execute and return sucessfully. type justInTimeFetch struct { service } func (s *justInTimeFetch) fetchMessages(ctx context.Context, subName string, maxMessages int32) ([]*Message, error) { <-ctx.Done() // The context was cancelled, but let's pretend that this happend just after our RPC returned. var result []*Message for i := 0; i < int(maxMessages); i++ { val := fmt.Sprintf("msg%v", i) result = append(result, &Message{Data: []byte(val), ackID: val}) } return result, nil } func (s *justInTimeFetch) splitAckIDs(ids []string) ([]string, []string) { return nil, nil } func (s *justInTimeFetch) modifyAckDeadline(ctx context.Context, subName string, deadline time.Duration, ackIDs []string) error { return nil } func (s *justInTimeFetch) newStreamingPuller(ctx context.Context, subName string, ackDeadline int32) *streamingPuller { return nil } func TestAfterAbortReturnsNoMoreThanOneMessage(t *testing.T) { // Each test case is excercised by making two concurrent blocking calls on a // messageIterator, and then aborting the iterator. // The result should be one call to Next returning a message, and the other returning an error. t.Skip(`This test has subtle timing dependencies, making it flaky. It is not worth fixing because iterators will be removed shortly.`) type testCase struct { abort func(*messageIterator, context.CancelFunc) // want is the error that should be returned from one Next invocation. want error } for n := 1; n < 3; n++ { for _, tc := range []testCase{ { abort: func(it *messageIterator, cancel context.CancelFunc) { it.Stop() }, want: iterator.Done, }, { abort: func(it *messageIterator, cancel context.CancelFunc) { cancel() }, want: context.Canceled, }, { abort: func(it *messageIterator, cancel context.CancelFunc) { it.Stop() cancel() }, want: iterator.Done, }, { abort: func(it *messageIterator, cancel context.CancelFunc) { cancel() it.Stop() }, want: iterator.Done, }, } { s := &justInTimeFetch{} ctx, cancel := context.WithCancel(context.Background()) // if maxPrefetch == 1, there will be no messages in the puller buffer when Next is invoked the second time. // if maxPrefetch == 2, there will be 1 message in the puller buffer when Next is invoked the second time. po := &pullOptions{ ackDeadline: time.Second * 10, maxExtension: time.Hour, maxPrefetch: int32(n), } it := newMessageIterator(ctx, s, "subname", po) defer it.Stop() type result struct { m *Message err error } results := make(chan *result, 2) for i := 0; i < 2; i++ { go func() { m, err := it.Next() results <- &result{m, err} if err == nil { m.Nack() } }() } // Wait for goroutines to block on it.Next(). time.Sleep(50 * time.Millisecond) tc.abort(it, cancel) result1 := <-results result2 := <-results // There should be one error result, and one non-error result. // Make result1 be the non-error result. if result1.err != nil { result1, result2 = result2, result1 } if string(result1.m.Data) != "msg0" { t.Errorf("After abort, got message: %v, want %v", result1.m.Data, "msg0") } if result1.err != nil { t.Errorf("After abort, got : %v, want nil", result1.err) } if result2.m != nil { t.Errorf("After abort, got message: %v, want nil", result2.m) } if result2.err != tc.want { t.Errorf("After abort, got err: %v, want %v", result2.err, tc.want) } } } } type fetcherServiceWithModifyAckDeadline struct { fetcherService events chan string } func (f *fetcherServiceWithModifyAckDeadline) modifyAckDeadline(_ context.Context, _ string, d time.Duration, ids []string) error { // Different versions of Go use different representations for time.Duration(0). var ds string if d == 0 { ds = "0s" } else { ds = d.String() } f.events <- fmt.Sprintf("modAck(%v, %s)", ids, ds) return nil } func (f *fetcherServiceWithModifyAckDeadline) splitAckIDs(ackIDs []string) ([]string, []string) { return ackIDs, nil } func (f *fetcherServiceWithModifyAckDeadline) newStreamingPuller(ctx context.Context, subName string, ackDeadline int32) *streamingPuller { return nil } func TestMultipleStopCallsBlockUntilMessageDone(t *testing.T) { t.Skip(`This test has subtle timing dependencies, making it flaky. It is not worth fixing because iterators will be removed shortly.`) events := make(chan string, 3) s := &fetcherServiceWithModifyAckDeadline{ fetcherService{ results: []fetchResult{ { msgs: []*Message{{ackID: "a"}, {ackID: "b"}}, }, }, }, events, } ctx := context.Background() it := newMessageIterator(ctx, s, "subname", &pullOptions{ackDeadline: time.Second * 10, maxExtension: 0}) m, err := it.Next() if err != nil { t.Errorf("error calling Next: %v", err) } go func() { it.Stop() events <- "stopped" }() go func() { it.Stop() events <- "stopped" }() select { case <-events: t.Fatal("Stop is not blocked") case <-time.After(100 * time.Millisecond): } m.Nack() got := []string{<-events, <-events, <-events} want := []string{"modAck([a], 0s)", "stopped", "stopped"} if !reflect.DeepEqual(got, want) { t.Errorf("stopping iterator, got: %v ; want: %v", got, want) } // The iterator is stopped, so should not return another message. m, err = it.Next() if m != nil { t.Errorf("message got: %v ; want: nil", m) } if err != iterator.Done { t.Errorf("err got: %v ; want: %v", err, iterator.Done) } } func TestFastNack(t *testing.T) { if useStreamingPull { t.Skip("iterator tests are for polling pull only") } events := make(chan string, 3) s := &fetcherServiceWithModifyAckDeadline{ fetcherService{ results: []fetchResult{ { msgs: []*Message{{ackID: "a"}, {ackID: "b"}}, }, }, }, events, } ctx := context.Background() it := newMessageIterator(ctx, s, "subname", &pullOptions{ ackDeadline: time.Second * 6, maxExtension: time.Second * 10, }) // Get both messages. _, err := it.Next() if err != nil { t.Errorf("error calling Next: %v", err) } m2, err := it.Next() if err != nil { t.Errorf("error calling Next: %v", err) } // Ignore the first, nack the second. m2.Nack() got := []string{<-events, <-events} // The nack should happen before the deadline extension. want := []string{"modAck([b], 0s)", "modAck([a], 6s)"} if !reflect.DeepEqual(got, want) { t.Errorf("got: %v ; want: %v", got, want) } } golang-google-cloud-0.9.0/pubsub/keepalive.go000066400000000000000000000122421312234511600211400ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package pubsub import ( "sync" "time" "golang.org/x/net/context" ) // keepAlive keeps track of which Messages need to have their deadline extended, and // periodically extends them. // Messages are tracked by Ack ID. type keepAlive struct { s service Ctx context.Context // The context to use when extending deadlines. Sub string // The full name of the subscription. ExtensionTick <-chan time.Time // ExtensionTick supplies the frequency with which to make extension requests. Deadline time.Duration // How long to extend messages for each time they are extended. Should be greater than ExtensionTick frequency. MaxExtension time.Duration // How long to keep extending each message's ack deadline before automatically removing it. mu sync.Mutex // key: ackID; value: time at which ack deadline extension should cease. items map[string]time.Time dr drain wg sync.WaitGroup } // Start initiates the deadline extension loop. Stop must be called once keepAlive is no longer needed. func (ka *keepAlive) Start() { ka.items = make(map[string]time.Time) ka.dr = drain{Drained: make(chan struct{})} ka.wg.Add(1) go func() { defer ka.wg.Done() for { select { case <-ka.Ctx.Done(): // Don't bother waiting for items to be removed: we can't extend them any more. return case <-ka.dr.Drained: return case <-ka.ExtensionTick: live, expired := ka.getAckIDs() ka.wg.Add(1) go func() { defer ka.wg.Done() ka.extendDeadlines(live) }() for _, id := range expired { ka.Remove(id) } } } }() } // Add adds an ack id to be kept alive. // It should not be called after Stop. func (ka *keepAlive) Add(ackID string) { ka.mu.Lock() defer ka.mu.Unlock() ka.items[ackID] = time.Now().Add(ka.MaxExtension) ka.dr.SetPending(true) } // Remove removes ackID from the list to be kept alive. func (ka *keepAlive) Remove(ackID string) { ka.mu.Lock() defer ka.mu.Unlock() // Note: If users NACKs a message after it has been removed due to // expiring, Remove will be called twice with same ack id. This is OK. delete(ka.items, ackID) ka.dr.SetPending(len(ka.items) != 0) } // Stop waits until all added ackIDs have been removed, and cleans up resources. // Stop may only be called once. func (ka *keepAlive) Stop() { ka.mu.Lock() ka.dr.Drain() ka.mu.Unlock() ka.wg.Wait() } // getAckIDs returns the set of ackIDs that are being kept alive. // The set is divided into two lists: one with IDs that should continue to be kept alive, // and the other with IDs that should be dropped. func (ka *keepAlive) getAckIDs() (live, expired []string) { ka.mu.Lock() defer ka.mu.Unlock() return getKeepAliveAckIDs(ka.items) } func getKeepAliveAckIDs(items map[string]time.Time) (live, expired []string) { now := time.Now() for id, expiry := range items { if expiry.Before(now) { expired = append(expired, id) } else { live = append(live, id) } } return live, expired } const maxExtensionAttempts = 2 func (ka *keepAlive) extendDeadlines(ackIDs []string) { head, tail := ka.s.splitAckIDs(ackIDs) for len(head) > 0 { for i := 0; i < maxExtensionAttempts; i++ { if ka.s.modifyAckDeadline(ka.Ctx, ka.Sub, ka.Deadline, head) == nil { break } } // NOTE: Messages whose deadlines we fail to extend will // eventually be redelivered and this is a documented behaviour // of the API. // // NOTE: If we fail to extend deadlines here, this // implementation will continue to attempt extending the // deadlines for those ack IDs the next time the extension // ticker ticks. By then the deadline will have expired. // Re-extending them is harmless, however. // // TODO: call Remove for ids which fail to be extended. head, tail = ka.s.splitAckIDs(tail) } } // A drain (once started) indicates via a channel when there is no work pending. type drain struct { started bool pending bool // Drained is closed once there are no items outstanding if Drain has been called. Drained chan struct{} } // Drain starts the drain process. This cannot be undone. func (d *drain) Drain() { d.started = true d.closeIfDrained() } // SetPending sets whether there is work pending or not. It may be called multiple times before or after Drain. func (d *drain) SetPending(pending bool) { d.pending = pending d.closeIfDrained() } func (d *drain) closeIfDrained() { if !d.pending && d.started { // Check to see if d.Drained is closed before closing it. // This allows SetPending(false) to be safely called multiple times. select { case <-d.Drained: default: close(d.Drained) } } } golang-google-cloud-0.9.0/pubsub/keepalive_test.go000066400000000000000000000167221312234511600222060ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package pubsub import ( "errors" "reflect" "sort" "testing" "time" "golang.org/x/net/context" ) func TestKeepAliveExtendsDeadline(t *testing.T) { ticker := make(chan time.Time) deadline := time.Nanosecond * 15 s := &testService{modDeadlineCalled: make(chan modDeadlineCall)} checkModDeadlineCall := func(ackIDs []string) { got := <-s.modDeadlineCalled sort.Strings(got.ackIDs) want := modDeadlineCall{ subName: "subname", deadline: deadline, ackIDs: ackIDs, } if !reflect.DeepEqual(got, want) { t.Errorf("keepalive: got:\n%v\nwant:\n%v", got, want) } } ka := &keepAlive{ s: s, Ctx: context.Background(), Sub: "subname", ExtensionTick: ticker, Deadline: deadline, MaxExtension: time.Hour, } ka.Start() ka.Add("a") ka.Add("b") ticker <- time.Time{} checkModDeadlineCall([]string{"a", "b"}) ka.Add("c") ka.Remove("b") ticker <- time.Time{} checkModDeadlineCall([]string{"a", "c"}) ka.Remove("a") ka.Remove("c") ka.Add("d") ticker <- time.Time{} checkModDeadlineCall([]string{"d"}) ka.Remove("d") ka.Stop() } func TestKeepAliveStopsWhenNoItem(t *testing.T) { ticker := make(chan time.Time) stopped := make(chan bool) s := &testService{modDeadlineCalled: make(chan modDeadlineCall, 3)} ka := &keepAlive{ s: s, Ctx: context.Background(), ExtensionTick: ticker, } ka.Start() // There should be no call to modifyAckDeadline since there is no item. ticker <- time.Time{} go func() { ka.Stop() // No items; should not block if len(s.modDeadlineCalled) > 0 { t.Errorf("unexpected extension to non-existent items: %v", <-s.modDeadlineCalled) } close(stopped) }() select { case <-stopped: case <-time.After(time.Second): t.Errorf("keepAlive timed out waiting for stop") } } func TestKeepAliveStopsWhenItemsExpired(t *testing.T) { ticker := make(chan time.Time) stopped := make(chan bool) s := &testService{modDeadlineCalled: make(chan modDeadlineCall, 2)} ka := &keepAlive{ s: s, Ctx: context.Background(), ExtensionTick: ticker, MaxExtension: time.Duration(0), // Should expire items at the first tick. } ka.Start() ka.Add("a") ka.Add("b") // Wait until the clock advances. Without this loop, this test fails on // Windows because the clock doesn't advance at all between ka.Add and the // expiration check after the tick is received. begin := time.Now() for time.Now().Equal(begin) { time.Sleep(time.Millisecond) } // There should be no call to modifyAckDeadline since both items are expired. ticker <- time.Time{} go func() { ka.Stop() // No live items; should not block. if len(s.modDeadlineCalled) > 0 { t.Errorf("unexpected extension to expired items") } close(stopped) }() select { case <-stopped: case <-time.After(time.Second): t.Errorf("timed out waiting for stop") } } func TestKeepAliveBlocksUntilAllItemsRemoved(t *testing.T) { ticker := make(chan time.Time) eventc := make(chan string, 3) s := &testService{modDeadlineCalled: make(chan modDeadlineCall)} ka := &keepAlive{ s: s, Ctx: context.Background(), ExtensionTick: ticker, MaxExtension: time.Hour, // Should not expire. } ka.Start() ka.Add("a") ka.Add("b") go func() { ticker <- time.Time{} // We expect a call since both items should be extended. select { case args := <-s.modDeadlineCalled: sort.Strings(args.ackIDs) got := args.ackIDs want := []string{"a", "b"} if !reflect.DeepEqual(got, want) { t.Errorf("mismatching IDs:\ngot %v\nwant %v", got, want) } case <-time.After(time.Second): t.Errorf("timed out waiting for deadline extend call") } time.Sleep(10 * time.Millisecond) eventc <- "pre-remove-b" // Remove one item, Stop should still be waiting. ka.Remove("b") ticker <- time.Time{} // We expect a call since the item is still alive. select { case args := <-s.modDeadlineCalled: got := args.ackIDs want := []string{"a"} if !reflect.DeepEqual(got, want) { t.Errorf("mismatching IDs:\ngot %v\nwant %v", got, want) } case <-time.After(time.Second): t.Errorf("timed out waiting for deadline extend call") } time.Sleep(10 * time.Millisecond) eventc <- "pre-remove-a" // Remove the last item so that Stop can proceed. ka.Remove("a") }() go func() { ka.Stop() // Should block all item are removed. eventc <- "post-stop" }() for i, want := range []string{"pre-remove-b", "pre-remove-a", "post-stop"} { select { case got := <-eventc: if got != want { t.Errorf("event #%d:\ngot %v\nwant %v", i, got, want) } case <-time.After(time.Second): t.Errorf("time out waiting for #%d event: want %v", i, want) } } } // extendCallResult contains a list of ackIDs which are expected in an ackID // extension request, along with the result that should be returned. type extendCallResult struct { ackIDs []string err error } // extendService implements modifyAckDeadline using a hard-coded list of extendCallResults. type extendService struct { service calls []extendCallResult t *testing.T // used for error logging. } func (es *extendService) modifyAckDeadline(ctx context.Context, subName string, deadline time.Duration, ackIDs []string) error { if len(es.calls) == 0 { es.t.Fatalf("unexpected call to modifyAckDeadline: ackIDs: %v", ackIDs) } call := es.calls[0] es.calls = es.calls[1:] if got, want := ackIDs, call.ackIDs; !reflect.DeepEqual(got, want) { es.t.Errorf("unexpected arguments to modifyAckDeadline: got: %v ; want: %v", got, want) } return call.err } // Test implementation returns the first 2 elements as head, and the rest as tail. func (es *extendService) splitAckIDs(ids []string) ([]string, []string) { if len(ids) < 2 { return ids, nil } return ids[:2], ids[2:] } func TestKeepAliveSplitsBatches(t *testing.T) { type testCase struct { calls []extendCallResult } for _, tc := range []testCase{ { calls: []extendCallResult{ { ackIDs: []string{"a", "b"}, }, { ackIDs: []string{"c", "d"}, }, { ackIDs: []string{"e", "f"}, }, }, }, { calls: []extendCallResult{ { ackIDs: []string{"a", "b"}, err: errors.New("bang"), }, // On error we retry once. { ackIDs: []string{"a", "b"}, err: errors.New("bang"), }, // We give up after failing twice, so we move on to the next set, "c" and "d". { ackIDs: []string{"c", "d"}, err: errors.New("bang"), }, // Again, we retry once. { ackIDs: []string{"c", "d"}, }, { ackIDs: []string{"e", "f"}, }, }, }, } { s := &extendService{ t: t, calls: tc.calls, } ka := &keepAlive{ s: s, Ctx: context.Background(), Sub: "subname", } ka.extendDeadlines([]string{"a", "b", "c", "d", "e", "f"}) if len(s.calls) != 0 { t.Errorf("expected extend calls did not occur: %v", s.calls) } } } golang-google-cloud-0.9.0/pubsub/loadtest/000077500000000000000000000000001312234511600204625ustar00rootroot00000000000000golang-google-cloud-0.9.0/pubsub/loadtest/benchmark_test.go000066400000000000000000000113021312234511600237770ustar00rootroot00000000000000// Copyright 2017 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package loadtest // Performance benchmarks for pubsub. // Run with // go test -bench . -cpu 1 import ( "log" "sync" "sync/atomic" "testing" "time" "golang.org/x/net/context" "google.golang.org/api/option" "google.golang.org/grpc" "cloud.google.com/go/internal/testutil" "cloud.google.com/go/pubsub" "google.golang.org/api/transport" pb "google.golang.org/genproto/googleapis/pubsub/v1" ) // These constants are designed to match the "throughput" test in // https://github.com/GoogleCloudPlatform/pubsub/blob/master/load-test-framework/run.py // and // https://github.com/GoogleCloudPlatform/pubsub/blob/master/load-test-framework/src/main/java/com/google/pubsub/clients/experimental/CPSPublisherTask.java const ( nMessages = 1e5 messageSize = 10000 // size of msg data in bytes batchSize = 10 batchDuration = 50 * time.Millisecond serverDelay = 200 * time.Millisecond maxOutstandingPublishes = 1600 // max_outstanding_messages in run.py ) func BenchmarkPublishThroughput(b *testing.B) { b.SetBytes(nMessages * messageSize) client := perfClient(serverDelay, 1, b) lts := &PubServer{ID: "xxx"} lts.init(client, "t", messageSize, batchSize, batchDuration) b.ResetTimer() for i := 0; i < b.N; i++ { runOnce(lts) } } func runOnce(lts *PubServer) { nRequests := int64(nMessages / batchSize) var nPublished int64 var wg sync.WaitGroup // The Java loadtest framework is rate-limited to 1 billion Execute calls a // second (each Execute call corresponding to a publishBatch call here), // but we can ignore this because of the following. // The framework runs 10,000 threads, each calling Execute in a loop, but // we can ignore this too. // The framework caps the number of outstanding calls to Execute at // maxOutstandingPublishes. That is what we simulate here. for i := 0; i < maxOutstandingPublishes; i++ { wg.Add(1) go func() { defer wg.Done() for atomic.AddInt64(&nRequests, -1) >= 0 { latencies, err := lts.publishBatch() if err != nil { log.Fatalf("publishBatch: %v", err) } atomic.AddInt64(&nPublished, int64(len(latencies))) } }() } wg.Wait() sent := atomic.LoadInt64(&nPublished) if sent != nMessages { log.Fatalf("sent %d messages, expected %d", sent, int(nMessages)) } } func perfClient(pubDelay time.Duration, nConns int, f interface { Fatal(...interface{}) }) *pubsub.Client { ctx := context.Background() srv, err := newPerfServer(pubDelay) if err != nil { f.Fatal(err) } conn, err := transport.DialGRPCInsecure(ctx, option.WithEndpoint(srv.Addr), option.WithGRPCConnectionPool(nConns)) if err != nil { f.Fatal(err) } client, err := pubsub.NewClient(ctx, "projectID", option.WithGRPCConn(conn)) if err != nil { f.Fatal(err) } return client } type perfServer struct { pb.PublisherServer pb.SubscriberServer Addr string pubDelay time.Duration mu sync.Mutex activePubs int maxActivePubs int } func newPerfServer(pubDelay time.Duration) (*perfServer, error) { srv, err := testutil.NewServer(grpc.MaxMsgSize(pubsub.MaxPublishRequestBytes)) if err != nil { return nil, err } perf := &perfServer{Addr: srv.Addr, pubDelay: pubDelay} pb.RegisterPublisherServer(srv.Gsrv, perf) pb.RegisterSubscriberServer(srv.Gsrv, perf) srv.Start() return perf, nil } var doLog = false func (p *perfServer) incActivePubs(n int) (int, bool) { p.mu.Lock() defer p.mu.Unlock() p.activePubs += n newMax := false if p.activePubs > p.maxActivePubs { p.maxActivePubs = p.activePubs newMax = true } return p.activePubs, newMax } func (p *perfServer) Publish(ctx context.Context, req *pb.PublishRequest) (*pb.PublishResponse, error) { a, newMax := p.incActivePubs(1) defer p.incActivePubs(-1) if newMax && doLog { log.Printf("max %d active publish calls", a) } if doLog { log.Printf("%p -> Publish %d", p, len(req.Messages)) } res := &pb.PublishResponse{MessageIds: make([]string, len(req.Messages))} for i := range res.MessageIds { res.MessageIds[i] = "x" } time.Sleep(p.pubDelay) if doLog { log.Printf("%p <- Publish %d", p, len(req.Messages)) } return res, nil } golang-google-cloud-0.9.0/pubsub/loadtest/cmd/000077500000000000000000000000001312234511600212255ustar00rootroot00000000000000golang-google-cloud-0.9.0/pubsub/loadtest/cmd/loadtest.go000066400000000000000000000025331312234511600233760ustar00rootroot00000000000000// Copyright 2017 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package main import ( "flag" "fmt" "log" "net" "strconv" "math/rand" "cloud.google.com/go/pubsub/loadtest" pb "cloud.google.com/go/pubsub/loadtest/pb" "google.golang.org/grpc" ) func main() { port := flag.Uint("worker_port", 6000, "port to bind worker to") role := flag.String("r", "", "role: pub/sub") flag.Parse() var lts pb.LoadtestWorkerServer switch *role { case "pub": lts = &loadtest.PubServer{ID: strconv.Itoa(rand.Int())} case "sub": lts = &loadtest.SubServer{} default: log.Fatalf("unknown role: %q", *role) } serv := grpc.NewServer() pb.RegisterLoadtestWorkerServer(serv, lts) lis, err := net.Listen("tcp", fmt.Sprintf(":%d", *port)) if err != nil { log.Fatalf("failed to listen: %v", err) } serv.Serve(lis) } golang-google-cloud-0.9.0/pubsub/loadtest/loadtest.go000066400000000000000000000130531312234511600226320ustar00rootroot00000000000000// Copyright 2017 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package loadtest implements load testing for pubsub, // following the interface defined in https://github.com/GoogleCloudPlatform/pubsub/tree/master/load-test-framework/ . // // This package is experimental. package loadtest import ( "bytes" "errors" "log" "runtime" "strconv" "sync" "sync/atomic" "time" "golang.org/x/net/context" "golang.org/x/time/rate" "github.com/golang/protobuf/ptypes" "cloud.google.com/go/pubsub" pb "cloud.google.com/go/pubsub/loadtest/pb" ) type pubServerConfig struct { topic *pubsub.Topic msgData []byte batchSize int32 } type PubServer struct { ID string cfg atomic.Value seqNum int32 } func (l *PubServer) Start(ctx context.Context, req *pb.StartRequest) (*pb.StartResponse, error) { log.Println("received start") c, err := pubsub.NewClient(ctx, req.Project) if err != nil { return nil, err } dur, err := ptypes.Duration(req.PublishBatchDuration) if err != nil { return nil, err } l.init(c, req.Topic, req.MessageSize, req.PublishBatchSize, dur) log.Println("started") return &pb.StartResponse{}, nil } func (l *PubServer) init(c *pubsub.Client, topicName string, msgSize, batchSize int32, batchDur time.Duration) { topic := c.Topic(topicName) topic.PublishSettings = pubsub.PublishSettings{ DelayThreshold: batchDur, CountThreshold: 950, ByteThreshold: 9500000, } l.cfg.Store(pubServerConfig{ topic: topic, msgData: bytes.Repeat([]byte{'A'}, int(msgSize)), batchSize: batchSize, }) } func (l *PubServer) Execute(ctx context.Context, _ *pb.ExecuteRequest) (*pb.ExecuteResponse, error) { latencies, err := l.publishBatch() if err != nil { log.Printf("error: %v", err) return nil, err } return &pb.ExecuteResponse{Latencies: latencies}, nil } func (l *PubServer) publishBatch() ([]int64, error) { var cfg pubServerConfig if c, ok := l.cfg.Load().(pubServerConfig); ok { cfg = c } else { return nil, errors.New("config not loaded") } start := time.Now() latencies := make([]int64, cfg.batchSize) startStr := strconv.FormatInt(start.UnixNano()/1e6, 10) seqNum := atomic.AddInt32(&l.seqNum, cfg.batchSize) - cfg.batchSize rs := make([]*pubsub.PublishResult, cfg.batchSize) for i := int32(0); i < cfg.batchSize; i++ { rs[i] = cfg.topic.Publish(context.TODO(), &pubsub.Message{ Data: cfg.msgData, Attributes: map[string]string{ "sendTime": startStr, "clientId": l.ID, "sequenceNumber": strconv.Itoa(int(seqNum + i)), }, }) } for i, r := range rs { _, err := r.Get(context.Background()) if err != nil { return nil, err } // TODO(jba,pongad): fix latencies // Later values will be skewed by earlier ones, since we wait for the // results in order. (On the other hand, it may not matter much, since // messages are added to bundles in order and bundles get sent more or // less in order.) If we want more accurate values, we can either start // a goroutine for each result (similar to the original code using a // callback), or call reflect.Select with the Ready channels of the // results. latencies[i] = time.Since(start).Nanoseconds() / 1e6 } return latencies, nil } type SubServer struct { lim *rate.Limiter mu sync.Mutex idents []*pb.MessageIdentifier latencies []int64 } func (s *SubServer) Start(ctx context.Context, req *pb.StartRequest) (*pb.StartResponse, error) { log.Println("received start") s.lim = rate.NewLimiter(rate.Every(time.Second), 1) c, err := pubsub.NewClient(ctx, req.Project) if err != nil { return nil, err } // Load test API doesn't define any way to stop right now. for i := 0; i < 30*runtime.GOMAXPROCS(0); i++ { go func() { sub := c.Subscription(req.GetPubsubOptions().Subscription) err := sub.Receive(context.Background(), s.callback) log.Fatal(err) }() } log.Println("started") return &pb.StartResponse{}, nil } func (s *SubServer) callback(_ context.Context, m *pubsub.Message) { id, err := strconv.ParseInt(m.Attributes["clientId"], 10, 64) if err != nil { log.Println(err) m.Nack() return } seqNum, err := strconv.ParseInt(m.Attributes["sequenceNumber"], 10, 32) if err != nil { log.Println(err) m.Nack() return } sendTimeMillis, err := strconv.ParseInt(m.Attributes["sendTime"], 10, 64) if err != nil { log.Println(err) m.Nack() return } latency := time.Now().UnixNano()/1e6 - sendTimeMillis ident := &pb.MessageIdentifier{ PublisherClientId: id, SequenceNumber: int32(seqNum), } s.mu.Lock() s.idents = append(s.idents, ident) s.latencies = append(s.latencies, latency) s.mu.Unlock() m.Ack() } func (s *SubServer) Execute(ctx context.Context, _ *pb.ExecuteRequest) (*pb.ExecuteResponse, error) { // Throttle so the load tester doesn't spam us and consume all our CPU. if err := s.lim.Wait(ctx); err != nil { return nil, err } s.mu.Lock() idents := s.idents s.idents = nil latencies := s.latencies s.latencies = nil s.mu.Unlock() return &pb.ExecuteResponse{ Latencies: latencies, ReceivedMessages: idents, }, nil } golang-google-cloud-0.9.0/pubsub/loadtest/pb/000077500000000000000000000000001312234511600210635ustar00rootroot00000000000000golang-google-cloud-0.9.0/pubsub/loadtest/pb/loadtest.pb.go000066400000000000000000000724221312234511600236400ustar00rootroot00000000000000// Code generated by protoc-gen-go. // source: loadtest.proto // DO NOT EDIT! /* Package google_pubsub_loadtest is a generated protocol buffer package. It is generated from these files: loadtest.proto It has these top-level messages: StartRequest StartResponse PubsubOptions KafkaOptions MessageIdentifier CheckRequest CheckResponse ExecuteRequest ExecuteResponse */ package google_pubsub_loadtest import proto "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" import google_protobuf "github.com/golang/protobuf/ptypes/duration" import google_protobuf1 "github.com/golang/protobuf/ptypes/timestamp" import ( context "golang.org/x/net/context" grpc "google.golang.org/grpc" ) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package type StartRequest struct { // The GCP project. This must be set even for Kafka, as we use it to export metrics. Project string `protobuf:"bytes,1,opt,name=project" json:"project,omitempty"` // The Pub/Sub or Kafka topic name. Topic string `protobuf:"bytes,2,opt,name=topic" json:"topic,omitempty"` // The number of requests that can be made, each second, per client. RequestRate int32 `protobuf:"varint,3,opt,name=request_rate,json=requestRate" json:"request_rate,omitempty"` // The size of each user message to publish MessageSize int32 `protobuf:"varint,4,opt,name=message_size,json=messageSize" json:"message_size,omitempty"` // The maximum outstanding requests, per client. MaxOutstandingRequests int32 `protobuf:"varint,5,opt,name=max_outstanding_requests,json=maxOutstandingRequests" json:"max_outstanding_requests,omitempty"` // The time at which the load test should start. If this is less than the current time, we start immediately. StartTime *google_protobuf1.Timestamp `protobuf:"bytes,6,opt,name=start_time,json=startTime" json:"start_time,omitempty"` // The burn-in duration, before which results should not be reported. BurnInDuration *google_protobuf.Duration `protobuf:"bytes,12,opt,name=burn_in_duration,json=burnInDuration" json:"burn_in_duration,omitempty"` // The number of user messages of size message_size to publish together. PublishBatchSize int32 `protobuf:"varint,11,opt,name=publish_batch_size,json=publishBatchSize" json:"publish_batch_size,omitempty"` // The max duration for coalescing a batch of published messages. PublishBatchDuration *google_protobuf.Duration `protobuf:"bytes,13,opt,name=publish_batch_duration,json=publishBatchDuration" json:"publish_batch_duration,omitempty"` // Types that are valid to be assigned to StopConditions: // *StartRequest_TestDuration // *StartRequest_NumberOfMessages StopConditions isStartRequest_StopConditions `protobuf_oneof:"stop_conditions"` // Types that are valid to be assigned to Options: // *StartRequest_PubsubOptions // *StartRequest_KafkaOptions Options isStartRequest_Options `protobuf_oneof:"options"` } func (m *StartRequest) Reset() { *m = StartRequest{} } func (m *StartRequest) String() string { return proto.CompactTextString(m) } func (*StartRequest) ProtoMessage() {} func (*StartRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } type isStartRequest_StopConditions interface { isStartRequest_StopConditions() } type isStartRequest_Options interface { isStartRequest_Options() } type StartRequest_TestDuration struct { TestDuration *google_protobuf.Duration `protobuf:"bytes,7,opt,name=test_duration,json=testDuration,oneof"` } type StartRequest_NumberOfMessages struct { NumberOfMessages int32 `protobuf:"varint,8,opt,name=number_of_messages,json=numberOfMessages,oneof"` } type StartRequest_PubsubOptions struct { PubsubOptions *PubsubOptions `protobuf:"bytes,9,opt,name=pubsub_options,json=pubsubOptions,oneof"` } type StartRequest_KafkaOptions struct { KafkaOptions *KafkaOptions `protobuf:"bytes,10,opt,name=kafka_options,json=kafkaOptions,oneof"` } func (*StartRequest_TestDuration) isStartRequest_StopConditions() {} func (*StartRequest_NumberOfMessages) isStartRequest_StopConditions() {} func (*StartRequest_PubsubOptions) isStartRequest_Options() {} func (*StartRequest_KafkaOptions) isStartRequest_Options() {} func (m *StartRequest) GetStopConditions() isStartRequest_StopConditions { if m != nil { return m.StopConditions } return nil } func (m *StartRequest) GetOptions() isStartRequest_Options { if m != nil { return m.Options } return nil } func (m *StartRequest) GetProject() string { if m != nil { return m.Project } return "" } func (m *StartRequest) GetTopic() string { if m != nil { return m.Topic } return "" } func (m *StartRequest) GetRequestRate() int32 { if m != nil { return m.RequestRate } return 0 } func (m *StartRequest) GetMessageSize() int32 { if m != nil { return m.MessageSize } return 0 } func (m *StartRequest) GetMaxOutstandingRequests() int32 { if m != nil { return m.MaxOutstandingRequests } return 0 } func (m *StartRequest) GetStartTime() *google_protobuf1.Timestamp { if m != nil { return m.StartTime } return nil } func (m *StartRequest) GetBurnInDuration() *google_protobuf.Duration { if m != nil { return m.BurnInDuration } return nil } func (m *StartRequest) GetPublishBatchSize() int32 { if m != nil { return m.PublishBatchSize } return 0 } func (m *StartRequest) GetPublishBatchDuration() *google_protobuf.Duration { if m != nil { return m.PublishBatchDuration } return nil } func (m *StartRequest) GetTestDuration() *google_protobuf.Duration { if x, ok := m.GetStopConditions().(*StartRequest_TestDuration); ok { return x.TestDuration } return nil } func (m *StartRequest) GetNumberOfMessages() int32 { if x, ok := m.GetStopConditions().(*StartRequest_NumberOfMessages); ok { return x.NumberOfMessages } return 0 } func (m *StartRequest) GetPubsubOptions() *PubsubOptions { if x, ok := m.GetOptions().(*StartRequest_PubsubOptions); ok { return x.PubsubOptions } return nil } func (m *StartRequest) GetKafkaOptions() *KafkaOptions { if x, ok := m.GetOptions().(*StartRequest_KafkaOptions); ok { return x.KafkaOptions } return nil } // XXX_OneofFuncs is for the internal use of the proto package. func (*StartRequest) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { return _StartRequest_OneofMarshaler, _StartRequest_OneofUnmarshaler, _StartRequest_OneofSizer, []interface{}{ (*StartRequest_TestDuration)(nil), (*StartRequest_NumberOfMessages)(nil), (*StartRequest_PubsubOptions)(nil), (*StartRequest_KafkaOptions)(nil), } } func _StartRequest_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { m := msg.(*StartRequest) // stop_conditions switch x := m.StopConditions.(type) { case *StartRequest_TestDuration: b.EncodeVarint(7<<3 | proto.WireBytes) if err := b.EncodeMessage(x.TestDuration); err != nil { return err } case *StartRequest_NumberOfMessages: b.EncodeVarint(8<<3 | proto.WireVarint) b.EncodeVarint(uint64(x.NumberOfMessages)) case nil: default: return fmt.Errorf("StartRequest.StopConditions has unexpected type %T", x) } // options switch x := m.Options.(type) { case *StartRequest_PubsubOptions: b.EncodeVarint(9<<3 | proto.WireBytes) if err := b.EncodeMessage(x.PubsubOptions); err != nil { return err } case *StartRequest_KafkaOptions: b.EncodeVarint(10<<3 | proto.WireBytes) if err := b.EncodeMessage(x.KafkaOptions); err != nil { return err } case nil: default: return fmt.Errorf("StartRequest.Options has unexpected type %T", x) } return nil } func _StartRequest_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { m := msg.(*StartRequest) switch tag { case 7: // stop_conditions.test_duration if wire != proto.WireBytes { return true, proto.ErrInternalBadWireType } msg := new(google_protobuf.Duration) err := b.DecodeMessage(msg) m.StopConditions = &StartRequest_TestDuration{msg} return true, err case 8: // stop_conditions.number_of_messages if wire != proto.WireVarint { return true, proto.ErrInternalBadWireType } x, err := b.DecodeVarint() m.StopConditions = &StartRequest_NumberOfMessages{int32(x)} return true, err case 9: // options.pubsub_options if wire != proto.WireBytes { return true, proto.ErrInternalBadWireType } msg := new(PubsubOptions) err := b.DecodeMessage(msg) m.Options = &StartRequest_PubsubOptions{msg} return true, err case 10: // options.kafka_options if wire != proto.WireBytes { return true, proto.ErrInternalBadWireType } msg := new(KafkaOptions) err := b.DecodeMessage(msg) m.Options = &StartRequest_KafkaOptions{msg} return true, err default: return false, nil } } func _StartRequest_OneofSizer(msg proto.Message) (n int) { m := msg.(*StartRequest) // stop_conditions switch x := m.StopConditions.(type) { case *StartRequest_TestDuration: s := proto.Size(x.TestDuration) n += proto.SizeVarint(7<<3 | proto.WireBytes) n += proto.SizeVarint(uint64(s)) n += s case *StartRequest_NumberOfMessages: n += proto.SizeVarint(8<<3 | proto.WireVarint) n += proto.SizeVarint(uint64(x.NumberOfMessages)) case nil: default: panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) } // options switch x := m.Options.(type) { case *StartRequest_PubsubOptions: s := proto.Size(x.PubsubOptions) n += proto.SizeVarint(9<<3 | proto.WireBytes) n += proto.SizeVarint(uint64(s)) n += s case *StartRequest_KafkaOptions: s := proto.Size(x.KafkaOptions) n += proto.SizeVarint(10<<3 | proto.WireBytes) n += proto.SizeVarint(uint64(s)) n += s case nil: default: panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) } return n } type StartResponse struct { } func (m *StartResponse) Reset() { *m = StartResponse{} } func (m *StartResponse) String() string { return proto.CompactTextString(m) } func (*StartResponse) ProtoMessage() {} func (*StartResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } type PubsubOptions struct { // The Cloud Pub/Sub subscription name Subscription string `protobuf:"bytes,1,opt,name=subscription" json:"subscription,omitempty"` // The maximum number of messages to pull which each request. MaxMessagesPerPull int32 `protobuf:"varint,2,opt,name=max_messages_per_pull,json=maxMessagesPerPull" json:"max_messages_per_pull,omitempty"` } func (m *PubsubOptions) Reset() { *m = PubsubOptions{} } func (m *PubsubOptions) String() string { return proto.CompactTextString(m) } func (*PubsubOptions) ProtoMessage() {} func (*PubsubOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } func (m *PubsubOptions) GetSubscription() string { if m != nil { return m.Subscription } return "" } func (m *PubsubOptions) GetMaxMessagesPerPull() int32 { if m != nil { return m.MaxMessagesPerPull } return 0 } type KafkaOptions struct { // The network address of the Kafka broker. Broker string `protobuf:"bytes,1,opt,name=broker" json:"broker,omitempty"` // The length of time to poll for. PollDuration *google_protobuf.Duration `protobuf:"bytes,2,opt,name=poll_duration,json=pollDuration" json:"poll_duration,omitempty"` } func (m *KafkaOptions) Reset() { *m = KafkaOptions{} } func (m *KafkaOptions) String() string { return proto.CompactTextString(m) } func (*KafkaOptions) ProtoMessage() {} func (*KafkaOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } func (m *KafkaOptions) GetBroker() string { if m != nil { return m.Broker } return "" } func (m *KafkaOptions) GetPollDuration() *google_protobuf.Duration { if m != nil { return m.PollDuration } return nil } type MessageIdentifier struct { // The unique id of the client that published the message. PublisherClientId int64 `protobuf:"varint,1,opt,name=publisher_client_id,json=publisherClientId" json:"publisher_client_id,omitempty"` // Sequence number of the published message with the given publish_client_id. SequenceNumber int32 `protobuf:"varint,2,opt,name=sequence_number,json=sequenceNumber" json:"sequence_number,omitempty"` } func (m *MessageIdentifier) Reset() { *m = MessageIdentifier{} } func (m *MessageIdentifier) String() string { return proto.CompactTextString(m) } func (*MessageIdentifier) ProtoMessage() {} func (*MessageIdentifier) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } func (m *MessageIdentifier) GetPublisherClientId() int64 { if m != nil { return m.PublisherClientId } return 0 } func (m *MessageIdentifier) GetSequenceNumber() int32 { if m != nil { return m.SequenceNumber } return 0 } type CheckRequest struct { // Duplicate messages that should not be reported for throughput and latency. Duplicates []*MessageIdentifier `protobuf:"bytes,1,rep,name=duplicates" json:"duplicates,omitempty"` } func (m *CheckRequest) Reset() { *m = CheckRequest{} } func (m *CheckRequest) String() string { return proto.CompactTextString(m) } func (*CheckRequest) ProtoMessage() {} func (*CheckRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } func (m *CheckRequest) GetDuplicates() []*MessageIdentifier { if m != nil { return m.Duplicates } return nil } type CheckResponse struct { // Histogram of latencies, each one a delta from the previous CheckResponse sent. BucketValues []int64 `protobuf:"varint,1,rep,packed,name=bucket_values,json=bucketValues" json:"bucket_values,omitempty"` // The duration from the start of the loadtest to its completion or now if is_finished is false. RunningDuration *google_protobuf.Duration `protobuf:"bytes,2,opt,name=running_duration,json=runningDuration" json:"running_duration,omitempty"` // True if the load test has finished running. IsFinished bool `protobuf:"varint,3,opt,name=is_finished,json=isFinished" json:"is_finished,omitempty"` // MessageIdentifiers of all received messages since the last Check ReceivedMessages []*MessageIdentifier `protobuf:"bytes,4,rep,name=received_messages,json=receivedMessages" json:"received_messages,omitempty"` } func (m *CheckResponse) Reset() { *m = CheckResponse{} } func (m *CheckResponse) String() string { return proto.CompactTextString(m) } func (*CheckResponse) ProtoMessage() {} func (*CheckResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } func (m *CheckResponse) GetBucketValues() []int64 { if m != nil { return m.BucketValues } return nil } func (m *CheckResponse) GetRunningDuration() *google_protobuf.Duration { if m != nil { return m.RunningDuration } return nil } func (m *CheckResponse) GetIsFinished() bool { if m != nil { return m.IsFinished } return false } func (m *CheckResponse) GetReceivedMessages() []*MessageIdentifier { if m != nil { return m.ReceivedMessages } return nil } type ExecuteRequest struct { } func (m *ExecuteRequest) Reset() { *m = ExecuteRequest{} } func (m *ExecuteRequest) String() string { return proto.CompactTextString(m) } func (*ExecuteRequest) ProtoMessage() {} func (*ExecuteRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } type ExecuteResponse struct { // Latencies of the completed operations Latencies []int64 `protobuf:"varint,1,rep,packed,name=latencies" json:"latencies,omitempty"` // MessageIdentifiers of all received messages since the last Execute ReceivedMessages []*MessageIdentifier `protobuf:"bytes,2,rep,name=received_messages,json=receivedMessages" json:"received_messages,omitempty"` } func (m *ExecuteResponse) Reset() { *m = ExecuteResponse{} } func (m *ExecuteResponse) String() string { return proto.CompactTextString(m) } func (*ExecuteResponse) ProtoMessage() {} func (*ExecuteResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } func (m *ExecuteResponse) GetLatencies() []int64 { if m != nil { return m.Latencies } return nil } func (m *ExecuteResponse) GetReceivedMessages() []*MessageIdentifier { if m != nil { return m.ReceivedMessages } return nil } func init() { proto.RegisterType((*StartRequest)(nil), "google.pubsub.loadtest.StartRequest") proto.RegisterType((*StartResponse)(nil), "google.pubsub.loadtest.StartResponse") proto.RegisterType((*PubsubOptions)(nil), "google.pubsub.loadtest.PubsubOptions") proto.RegisterType((*KafkaOptions)(nil), "google.pubsub.loadtest.KafkaOptions") proto.RegisterType((*MessageIdentifier)(nil), "google.pubsub.loadtest.MessageIdentifier") proto.RegisterType((*CheckRequest)(nil), "google.pubsub.loadtest.CheckRequest") proto.RegisterType((*CheckResponse)(nil), "google.pubsub.loadtest.CheckResponse") proto.RegisterType((*ExecuteRequest)(nil), "google.pubsub.loadtest.ExecuteRequest") proto.RegisterType((*ExecuteResponse)(nil), "google.pubsub.loadtest.ExecuteResponse") } // Reference imports to suppress errors if they are not otherwise used. var _ context.Context var _ grpc.ClientConn // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. const _ = grpc.SupportPackageIsVersion4 // Client API for Loadtest service type LoadtestClient interface { // Starts a load test Start(ctx context.Context, in *StartRequest, opts ...grpc.CallOption) (*StartResponse, error) // Checks the status of a load test Check(ctx context.Context, in *CheckRequest, opts ...grpc.CallOption) (*CheckResponse, error) } type loadtestClient struct { cc *grpc.ClientConn } func NewLoadtestClient(cc *grpc.ClientConn) LoadtestClient { return &loadtestClient{cc} } func (c *loadtestClient) Start(ctx context.Context, in *StartRequest, opts ...grpc.CallOption) (*StartResponse, error) { out := new(StartResponse) err := grpc.Invoke(ctx, "/google.pubsub.loadtest.Loadtest/Start", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } func (c *loadtestClient) Check(ctx context.Context, in *CheckRequest, opts ...grpc.CallOption) (*CheckResponse, error) { out := new(CheckResponse) err := grpc.Invoke(ctx, "/google.pubsub.loadtest.Loadtest/Check", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } // Server API for Loadtest service type LoadtestServer interface { // Starts a load test Start(context.Context, *StartRequest) (*StartResponse, error) // Checks the status of a load test Check(context.Context, *CheckRequest) (*CheckResponse, error) } func RegisterLoadtestServer(s *grpc.Server, srv LoadtestServer) { s.RegisterService(&_Loadtest_serviceDesc, srv) } func _Loadtest_Start_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(StartRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(LoadtestServer).Start(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/google.pubsub.loadtest.Loadtest/Start", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(LoadtestServer).Start(ctx, req.(*StartRequest)) } return interceptor(ctx, in, info, handler) } func _Loadtest_Check_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(CheckRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(LoadtestServer).Check(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/google.pubsub.loadtest.Loadtest/Check", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(LoadtestServer).Check(ctx, req.(*CheckRequest)) } return interceptor(ctx, in, info, handler) } var _Loadtest_serviceDesc = grpc.ServiceDesc{ ServiceName: "google.pubsub.loadtest.Loadtest", HandlerType: (*LoadtestServer)(nil), Methods: []grpc.MethodDesc{ { MethodName: "Start", Handler: _Loadtest_Start_Handler, }, { MethodName: "Check", Handler: _Loadtest_Check_Handler, }, }, Streams: []grpc.StreamDesc{}, Metadata: "loadtest.proto", } // Client API for LoadtestWorker service type LoadtestWorkerClient interface { // Starts a worker Start(ctx context.Context, in *StartRequest, opts ...grpc.CallOption) (*StartResponse, error) // Executes a command on the worker, returning the latencies of the operations. Since some // commands consist of multiple operations (i.e. pulls contain many received messages with // different end to end latencies) a single command can have multiple latencies returned. Execute(ctx context.Context, in *ExecuteRequest, opts ...grpc.CallOption) (*ExecuteResponse, error) } type loadtestWorkerClient struct { cc *grpc.ClientConn } func NewLoadtestWorkerClient(cc *grpc.ClientConn) LoadtestWorkerClient { return &loadtestWorkerClient{cc} } func (c *loadtestWorkerClient) Start(ctx context.Context, in *StartRequest, opts ...grpc.CallOption) (*StartResponse, error) { out := new(StartResponse) err := grpc.Invoke(ctx, "/google.pubsub.loadtest.LoadtestWorker/Start", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } func (c *loadtestWorkerClient) Execute(ctx context.Context, in *ExecuteRequest, opts ...grpc.CallOption) (*ExecuteResponse, error) { out := new(ExecuteResponse) err := grpc.Invoke(ctx, "/google.pubsub.loadtest.LoadtestWorker/Execute", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } // Server API for LoadtestWorker service type LoadtestWorkerServer interface { // Starts a worker Start(context.Context, *StartRequest) (*StartResponse, error) // Executes a command on the worker, returning the latencies of the operations. Since some // commands consist of multiple operations (i.e. pulls contain many received messages with // different end to end latencies) a single command can have multiple latencies returned. Execute(context.Context, *ExecuteRequest) (*ExecuteResponse, error) } func RegisterLoadtestWorkerServer(s *grpc.Server, srv LoadtestWorkerServer) { s.RegisterService(&_LoadtestWorker_serviceDesc, srv) } func _LoadtestWorker_Start_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(StartRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(LoadtestWorkerServer).Start(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/google.pubsub.loadtest.LoadtestWorker/Start", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(LoadtestWorkerServer).Start(ctx, req.(*StartRequest)) } return interceptor(ctx, in, info, handler) } func _LoadtestWorker_Execute_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(ExecuteRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(LoadtestWorkerServer).Execute(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/google.pubsub.loadtest.LoadtestWorker/Execute", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(LoadtestWorkerServer).Execute(ctx, req.(*ExecuteRequest)) } return interceptor(ctx, in, info, handler) } var _LoadtestWorker_serviceDesc = grpc.ServiceDesc{ ServiceName: "google.pubsub.loadtest.LoadtestWorker", HandlerType: (*LoadtestWorkerServer)(nil), Methods: []grpc.MethodDesc{ { MethodName: "Start", Handler: _LoadtestWorker_Start_Handler, }, { MethodName: "Execute", Handler: _LoadtestWorker_Execute_Handler, }, }, Streams: []grpc.StreamDesc{}, Metadata: "loadtest.proto", } func init() { proto.RegisterFile("loadtest.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ // 847 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x55, 0xdd, 0x6e, 0xdc, 0x44, 0x14, 0xae, 0x93, 0x6e, 0x92, 0x3d, 0x6b, 0xef, 0x6e, 0x86, 0x12, 0x99, 0x15, 0xd0, 0x60, 0x28, 0x0d, 0x12, 0x72, 0x45, 0xb8, 0x81, 0x1b, 0x84, 0x92, 0x82, 0x12, 0x15, 0x9a, 0xc8, 0x8d, 0x8a, 0xe0, 0x66, 0x34, 0xb6, 0x67, 0x93, 0x61, 0xed, 0x19, 0x33, 0x3f, 0x55, 0xd4, 0x17, 0xe0, 0x8d, 0x78, 0x00, 0x1e, 0x87, 0x5b, 0x5e, 0x00, 0xcd, 0x78, 0xbc, 0x3f, 0x6d, 0x57, 0x0b, 0x42, 0xbd, 0x3c, 0xdf, 0xf9, 0xce, 0x37, 0xe7, 0xd7, 0x86, 0x61, 0x25, 0x48, 0xa9, 0xa9, 0xd2, 0x69, 0x23, 0x85, 0x16, 0xe8, 0xe0, 0x5a, 0x88, 0xeb, 0x8a, 0xa6, 0x8d, 0xc9, 0x95, 0xc9, 0xd3, 0xce, 0x3b, 0xf9, 0xb0, 0xc5, 0x1f, 0x39, 0x56, 0x6e, 0xa6, 0x8f, 0x4a, 0x23, 0x89, 0x66, 0x82, 0xb7, 0x71, 0x93, 0xfb, 0xaf, 0xfa, 0x35, 0xab, 0xa9, 0xd2, 0xa4, 0x6e, 0x5a, 0x42, 0xf2, 0x57, 0x0f, 0xc2, 0x67, 0x9a, 0x48, 0x9d, 0xd1, 0xdf, 0x0c, 0x55, 0x1a, 0xc5, 0xb0, 0xdb, 0x48, 0xf1, 0x2b, 0x2d, 0x74, 0x1c, 0x1c, 0x06, 0x47, 0xfd, 0xac, 0x33, 0xd1, 0x3d, 0xe8, 0x69, 0xd1, 0xb0, 0x22, 0xde, 0x72, 0x78, 0x6b, 0xa0, 0x8f, 0x20, 0x94, 0x6d, 0x28, 0x96, 0x44, 0xd3, 0x78, 0xfb, 0x30, 0x38, 0xea, 0x65, 0x03, 0x8f, 0x65, 0x44, 0x53, 0x4b, 0xa9, 0xa9, 0x52, 0xe4, 0x9a, 0x62, 0xc5, 0x5e, 0xd2, 0xf8, 0x6e, 0x4b, 0xf1, 0xd8, 0x33, 0xf6, 0x92, 0xa2, 0xaf, 0x20, 0xae, 0xc9, 0x2d, 0x16, 0x46, 0x2b, 0x4d, 0x78, 0xc9, 0xf8, 0x35, 0xf6, 0x0a, 0x2a, 0xee, 0x39, 0xfa, 0x41, 0x4d, 0x6e, 0x2f, 0x16, 0x6e, 0x9f, 0xae, 0x42, 0x5f, 0x03, 0x28, 0x9b, 0x3f, 0xb6, 0x95, 0xc5, 0x3b, 0x87, 0xc1, 0xd1, 0xe0, 0x78, 0x92, 0x76, 0xed, 0xf2, 0x65, 0xa7, 0x57, 0x5d, 0xd9, 0x59, 0xdf, 0xb1, 0xad, 0x8d, 0x4e, 0x61, 0x9c, 0x1b, 0xc9, 0x31, 0xe3, 0xb8, 0x6b, 0x5b, 0x1c, 0x3a, 0x81, 0xf7, 0x5e, 0x13, 0x78, 0xec, 0x09, 0xd9, 0xd0, 0x86, 0x9c, 0xf3, 0xce, 0x46, 0x9f, 0x03, 0x6a, 0x4c, 0x5e, 0x31, 0x75, 0x83, 0x73, 0xa2, 0x8b, 0x9b, 0xb6, 0xc4, 0x81, 0xcb, 0x79, 0xec, 0x3d, 0x27, 0xd6, 0xe1, 0xea, 0xbc, 0x80, 0x83, 0x55, 0xf6, 0xfc, 0xe1, 0x68, 0xd3, 0xc3, 0xf7, 0x96, 0xc5, 0xe6, 0xcf, 0x7f, 0x0b, 0x91, 0x5d, 0x84, 0x85, 0xce, 0xee, 0x06, 0x9d, 0xb3, 0x3b, 0x59, 0x68, 0x23, 0xe6, 0x0a, 0x29, 0x20, 0x6e, 0xea, 0x9c, 0x4a, 0x2c, 0xa6, 0xd8, 0xcf, 0x44, 0xc5, 0x7b, 0xb6, 0x80, 0xb3, 0x3b, 0xd9, 0xb8, 0xf5, 0x5d, 0x4c, 0x7f, 0xf4, 0x1e, 0xf4, 0x14, 0x86, 0xed, 0x16, 0x62, 0xd1, 0x58, 0x01, 0x15, 0xf7, 0xdd, 0x93, 0x0f, 0xd2, 0x37, 0xef, 0x68, 0x7a, 0xe9, 0xec, 0x8b, 0x96, 0x7c, 0x16, 0x64, 0x51, 0xb3, 0x0c, 0xa0, 0x27, 0x10, 0xcd, 0xc8, 0x74, 0x46, 0xe6, 0x72, 0xe0, 0xe4, 0x3e, 0x59, 0x27, 0xf7, 0xc4, 0x92, 0x17, 0x6a, 0xe1, 0x6c, 0xc9, 0x3e, 0xd9, 0x87, 0x91, 0xd2, 0xa2, 0xc1, 0x85, 0xe0, 0x25, 0x6b, 0xa1, 0x3e, 0xec, 0x7a, 0xe5, 0x64, 0x04, 0x91, 0xdf, 0x75, 0xd5, 0x08, 0xae, 0x68, 0x32, 0x85, 0x68, 0x25, 0x3b, 0x94, 0x40, 0xa8, 0x4c, 0xae, 0x0a, 0xc9, 0x1c, 0xe0, 0x4f, 0x60, 0x05, 0x43, 0x5f, 0xc0, 0xbb, 0x76, 0x57, 0xbb, 0x56, 0xe1, 0x86, 0x4a, 0xdc, 0x98, 0xaa, 0x72, 0x77, 0xd1, 0xcb, 0x50, 0x4d, 0x6e, 0xbb, 0x66, 0x5d, 0x52, 0x79, 0x69, 0xaa, 0x2a, 0x99, 0x42, 0xb8, 0x9c, 0x36, 0x3a, 0x80, 0x9d, 0x5c, 0x8a, 0x19, 0x95, 0xfe, 0x01, 0x6f, 0xa1, 0x6f, 0x20, 0x6a, 0x44, 0x55, 0x2d, 0xa6, 0xb9, 0xb5, 0x69, 0x2b, 0x42, 0xcb, 0xef, 0xac, 0xa4, 0x82, 0x7d, 0xff, 0xf4, 0x79, 0x49, 0xb9, 0x66, 0x53, 0x46, 0x25, 0x4a, 0xe1, 0x1d, 0xbf, 0x3a, 0x54, 0xe2, 0xa2, 0x62, 0x94, 0x6b, 0xcc, 0x4a, 0xf7, 0xf2, 0x76, 0xb6, 0x3f, 0x77, 0x9d, 0x3a, 0xcf, 0x79, 0x89, 0x1e, 0xc2, 0x48, 0xd9, 0xeb, 0xe2, 0x05, 0xc5, 0xed, 0xf4, 0x7d, 0x65, 0xc3, 0x0e, 0x7e, 0xea, 0xd0, 0xe4, 0x67, 0x08, 0x4f, 0x6f, 0x68, 0x31, 0xeb, 0x3e, 0x1d, 0xe7, 0x00, 0xa5, 0x69, 0x2a, 0x56, 0x10, 0x4d, 0x55, 0x1c, 0x1c, 0x6e, 0x1f, 0x0d, 0x8e, 0x3f, 0x5b, 0x37, 0xc6, 0xd7, 0xf2, 0xcc, 0x96, 0x82, 0x93, 0xbf, 0x03, 0x88, 0xbc, 0x76, 0x3b, 0x2a, 0xf4, 0x31, 0x44, 0xb9, 0x29, 0x66, 0x54, 0xe3, 0x17, 0xa4, 0x32, 0x5e, 0x7f, 0x3b, 0x0b, 0x5b, 0xf0, 0xb9, 0xc3, 0xd0, 0x63, 0x18, 0x4b, 0xc3, 0xb9, 0xfd, 0x7c, 0xfc, 0xfb, 0x16, 0x8e, 0x7c, 0xc8, 0xfc, 0x22, 0xee, 0xc3, 0x80, 0x29, 0x3c, 0x65, 0xdc, 0xf6, 0xa5, 0x74, 0x5f, 0xb4, 0xbd, 0x0c, 0x98, 0xfa, 0xde, 0x23, 0xe8, 0x39, 0xec, 0x4b, 0x5a, 0x50, 0xf6, 0x82, 0x96, 0x8b, 0x8b, 0xb9, 0xfb, 0x5f, 0xeb, 0x1d, 0x77, 0x1a, 0xdd, 0xb6, 0x24, 0x63, 0x18, 0x7e, 0x77, 0x4b, 0x0b, 0xa3, 0xa9, 0x6f, 0x69, 0xf2, 0x7b, 0x00, 0xa3, 0x39, 0xe4, 0x3b, 0xf1, 0x3e, 0xf4, 0x2b, 0xa2, 0x29, 0x2f, 0xd8, 0xbc, 0x0b, 0x0b, 0xe0, 0xcd, 0xb9, 0x6d, 0xfd, 0xef, 0xdc, 0x8e, 0xff, 0x08, 0x60, 0xef, 0x07, 0x1f, 0x80, 0xae, 0xa0, 0xe7, 0x0e, 0x09, 0xad, 0xbd, 0xd2, 0xe5, 0x7f, 0xca, 0xe4, 0xc1, 0x06, 0x96, 0x2f, 0xec, 0x0a, 0x7a, 0x6e, 0xe6, 0xeb, 0x55, 0x97, 0xd7, 0x6d, 0xbd, 0xea, 0xca, 0xe2, 0x1c, 0xff, 0x19, 0xc0, 0xb0, 0x4b, 0xfc, 0x27, 0x21, 0xed, 0x99, 0xbd, 0x9d, 0xf4, 0x7f, 0x81, 0x5d, 0x3f, 0x2a, 0xf4, 0xe9, 0xba, 0x88, 0xd5, 0xf1, 0x4e, 0x1e, 0x6e, 0xe4, 0xb5, 0xda, 0x27, 0x29, 0x7c, 0x50, 0x88, 0xfa, 0x15, 0xf6, 0xb4, 0x62, 0x45, 0x5a, 0x88, 0xba, 0x16, 0xfc, 0x24, 0xea, 0x4a, 0xbc, 0x74, 0xfb, 0xbd, 0xe3, 0xd6, 0xfc, 0xcb, 0x7f, 0x02, 0x00, 0x00, 0xff, 0xff, 0xc4, 0xfc, 0xdc, 0x27, 0x48, 0x08, 0x00, 0x00, } golang-google-cloud-0.9.0/pubsub/message.go000066400000000000000000000056611312234511600206260ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package pubsub import ( "time" "github.com/golang/protobuf/ptypes" pb "google.golang.org/genproto/googleapis/pubsub/v1" ) // Message represents a Pub/Sub message. type Message struct { // ID identifies this message. // This ID is assigned by the server and is populated for Messages obtained from a subscription. // This field is read-only. ID string // Data is the actual data in the message. Data []byte // Attributes represents the key-value pairs the current message // is labelled with. Attributes map[string]string // ackID is the identifier to acknowledge this message. ackID string // The time at which the message was published. // This is populated by the server for Messages obtained from a subscription. // This field is read-only. PublishTime time.Time // size is the approximate size of the message's data and attributes. size int calledDone bool // The done method of the iterator that created this Message. doneFunc func(string, bool) } func toMessage(resp *pb.ReceivedMessage) (*Message, error) { if resp.Message == nil { return &Message{ackID: resp.AckId}, nil } pubTime, err := ptypes.Timestamp(resp.Message.PublishTime) if err != nil { return nil, err } return &Message{ ackID: resp.AckId, Data: resp.Message.Data, Attributes: resp.Message.Attributes, ID: resp.Message.MessageId, PublishTime: pubTime, }, nil } // Ack indicates successful processing of a Message passed to the Subscriber.Receive callback. // It should not be called on any other Message value. // If message acknowledgement fails, the Message will be redelivered. // Client code must call Ack or Nack when finished for each received Message. // Calls to Ack or Nack have no effect after the first call. func (m *Message) Ack() { m.done(true) } // Nack indicates that the client will not or cannot process a Message passed to the Subscriber.Receive callback. // It should not be called on any other Message value. // Nack will result in the Message being redelivered more quickly than if it were allowed to expire. // Client code must call Ack or Nack when finished for each received Message. // Calls to Ack or Nack have no effect after the first call. func (m *Message) Nack() { m.done(false) } func (m *Message) done(ack bool) { if m.calledDone { return } m.calledDone = true m.doneFunc(m.ackID, ack) } golang-google-cloud-0.9.0/pubsub/pubsub.go000066400000000000000000000073401312234511600204760ustar00rootroot00000000000000// Copyright 2014 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package pubsub // import "cloud.google.com/go/pubsub" import ( "fmt" "os" "runtime" "google.golang.org/api/iterator" "google.golang.org/api/option" "google.golang.org/grpc" "golang.org/x/net/context" ) const ( // ScopePubSub grants permissions to view and manage Pub/Sub // topics and subscriptions. ScopePubSub = "https://www.googleapis.com/auth/pubsub" // ScopeCloudPlatform grants permissions to view and manage your data // across Google Cloud Platform services. ScopeCloudPlatform = "https://www.googleapis.com/auth/cloud-platform" ) const prodAddr = "https://pubsub.googleapis.com/" // Client is a Google Pub/Sub client scoped to a single project. // // Clients should be reused rather than being created as needed. // A Client may be shared by multiple goroutines. type Client struct { projectID string s service } // NewClient creates a new PubSub client. func NewClient(ctx context.Context, projectID string, opts ...option.ClientOption) (*Client, error) { var o []option.ClientOption // Environment variables for gcloud emulator: // https://cloud.google.com/sdk/gcloud/reference/beta/emulators/pubsub/ if addr := os.Getenv("PUBSUB_EMULATOR_HOST"); addr != "" { conn, err := grpc.Dial(addr, grpc.WithInsecure()) if err != nil { return nil, fmt.Errorf("grpc.Dial: %v", err) } o = []option.ClientOption{option.WithGRPCConn(conn)} } else { o = []option.ClientOption{ // Create multiple connections to increase throughput. option.WithGRPCConnectionPool(runtime.GOMAXPROCS(0)), } } o = append(o, opts...) s, err := newPubSubService(ctx, o) if err != nil { return nil, fmt.Errorf("constructing pubsub client: %v", err) } c := &Client{ projectID: projectID, s: s, } return c, nil } // Close closes any resources held by the client. // // Close need not be called at program exit. func (c *Client) Close() error { return c.s.close() } func (c *Client) fullyQualifiedProjectName() string { return fmt.Sprintf("projects/%s", c.projectID) } // pageToken stores the next page token for a server response which is split over multiple pages. type pageToken struct { tok string explicit bool } func (pt *pageToken) set(tok string) { pt.tok = tok pt.explicit = true } func (pt *pageToken) get() string { return pt.tok } // more returns whether further pages should be fetched from the server. func (pt *pageToken) more() bool { return pt.tok != "" || !pt.explicit } // stringsIterator provides an iterator API for a sequence of API page fetches that return lists of strings. type stringsIterator struct { ctx context.Context strings []string token pageToken fetch func(ctx context.Context, tok string) (*stringsPage, error) } // Next returns the next string. If there are no more strings, iterator.Done will be returned. func (si *stringsIterator) Next() (string, error) { for len(si.strings) == 0 && si.token.more() { page, err := si.fetch(si.ctx, si.token.get()) if err != nil { return "", err } si.token.set(page.tok) si.strings = page.strings } if len(si.strings) == 0 { return "", iterator.Done } s := si.strings[0] si.strings = si.strings[1:] return s, nil } golang-google-cloud-0.9.0/pubsub/puller.go000066400000000000000000000064531312234511600205050ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package pubsub import ( "sync" "golang.org/x/net/context" ) // puller fetches messages from the server in a batch. type puller struct { ctx context.Context cancel context.CancelFunc // keepAlive takes ownership of the lifetime of the message identified // by ackID, ensuring that its ack deadline does not expire. It should // be called each time a new message is fetched from the server, even // if it is not yet returned from Next. keepAlive func(ackID string) // abandon should be called for each message which has previously been // passed to keepAlive, but will never be returned by Next. abandon func(ackID string) // fetch fetches a batch of messages from the server. fetch func() ([]*Message, error) mu sync.Mutex buf []*Message } // newPuller constructs a new puller. // batchSize is the maximum number of messages to fetch at once. // No more than batchSize messages will be outstanding at any time. func newPuller(s service, subName string, ctx context.Context, batchSize int32, keepAlive, abandon func(ackID string)) *puller { ctx, cancel := context.WithCancel(ctx) return &puller{ cancel: cancel, keepAlive: keepAlive, abandon: abandon, ctx: ctx, fetch: func() ([]*Message, error) { return s.fetchMessages(ctx, subName, batchSize) }, } } const maxPullAttempts = 2 // Next returns the next message from the server, fetching a new batch if necessary. // keepAlive is called with the ackIDs of newly fetched messages. // If p.Ctx has already been cancelled before Next is called, no new messages // will be fetched. func (p *puller) Next() (*Message, error) { p.mu.Lock() defer p.mu.Unlock() // If ctx has been cancelled, return straight away (even if there are buffered messages available). select { case <-p.ctx.Done(): return nil, p.ctx.Err() default: } for len(p.buf) == 0 { var buf []*Message var err error for i := 0; i < maxPullAttempts; i++ { // Once Stop has completed, all future calls to Next will immediately fail at this point. buf, err = p.fetch() if err == nil || err == context.Canceled || err == context.DeadlineExceeded { break } } if err != nil { return nil, err } for _, m := range buf { p.keepAlive(m.ackID) } p.buf = buf } m := p.buf[0] p.buf = p.buf[1:] return m, nil } // Stop aborts any pending calls to Next, and prevents any future ones from succeeding. // Stop also abandons any messages that have been pre-fetched. // Once Stop completes, no calls to Next will succeed. func (p *puller) Stop() { // Next may be executing in another goroutine. Cancel it, and then wait until it terminates. p.cancel() p.mu.Lock() defer p.mu.Unlock() for _, m := range p.buf { p.abandon(m.ackID) } p.buf = nil } golang-google-cloud-0.9.0/pubsub/puller_test.go000066400000000000000000000067101312234511600215400ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package pubsub import ( "errors" "reflect" "testing" "golang.org/x/net/context" ) type fetchResult struct { msgs []*Message err error } type fetcherService struct { service results []fetchResult unexpectedCall bool } func (s *fetcherService) fetchMessages(ctx context.Context, subName string, maxMessages int32) ([]*Message, error) { if len(s.results) == 0 { s.unexpectedCall = true return nil, errors.New("bang") } ret := s.results[0] s.results = s.results[1:] return ret.msgs, ret.err } func TestPuller(t *testing.T) { s := &fetcherService{ results: []fetchResult{ { msgs: []*Message{{ackID: "a"}, {ackID: "b"}}, }, {}, { msgs: []*Message{{ackID: "c"}, {ackID: "d"}}, }, { msgs: []*Message{{ackID: "e"}}, }, }, } pulled := make(chan string, 10) pull := newPuller(s, "subname", context.Background(), 2, func(ackID string) { pulled <- ackID }, func(string) {}) got := []string{} for i := 0; i < 5; i++ { m, err := pull.Next() got = append(got, m.ackID) if err != nil { t.Errorf("unexpected err from pull.Next: %v", err) } } _, err := pull.Next() if err == nil { t.Errorf("unexpected err from pull.Next: %v", err) } want := []string{"a", "b", "c", "d", "e"} if !reflect.DeepEqual(got, want) { t.Errorf("pulled ack ids: got: %v ; want: %v", got, want) } } func TestPullerAddsToKeepAlive(t *testing.T) { s := &fetcherService{ results: []fetchResult{ { msgs: []*Message{{ackID: "a"}, {ackID: "b"}}, }, { msgs: []*Message{{ackID: "c"}, {ackID: "d"}}, }, }, } pulled := make(chan string, 10) pull := newPuller(s, "subname", context.Background(), 2, func(ackID string) { pulled <- ackID }, func(string) {}) got := []string{} for i := 0; i < 3; i++ { m, err := pull.Next() got = append(got, m.ackID) if err != nil { t.Errorf("unexpected err from pull.Next: %v", err) } } want := []string{"a", "b", "c"} if !reflect.DeepEqual(got, want) { t.Errorf("pulled ack ids: got: %v ; want: %v", got, want) } close(pulled) // We should have seen "d" written to the channel too, even though it hasn't been returned yet. pulledIDs := []string{} for id := range pulled { pulledIDs = append(pulledIDs, id) } want = append(want, "d") if !reflect.DeepEqual(pulledIDs, want) { t.Errorf("pulled ack ids: got: %v ; want: %v", pulledIDs, want) } } func TestPullerRetriesOnce(t *testing.T) { bang := errors.New("bang") s := &fetcherService{ results: []fetchResult{ { err: bang, }, { err: bang, }, }, } pull := newPuller(s, "subname", context.Background(), 2, func(string) {}, func(string) {}) _, err := pull.Next() if err != bang { t.Errorf("pull.Next err got: %v, want: %v", err, bang) } if s.unexpectedCall { t.Errorf("unexpected retry") } if len(s.results) != 0 { t.Errorf("outstanding calls: got: %v, want: 0", len(s.results)) } } golang-google-cloud-0.9.0/pubsub/service.go000066400000000000000000000416111312234511600206350ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package pubsub import ( "fmt" "io" "math" "sync" "time" "github.com/golang/protobuf/ptypes" "cloud.google.com/go/iam" "cloud.google.com/go/internal/version" vkit "cloud.google.com/go/pubsub/apiv1" durpb "github.com/golang/protobuf/ptypes/duration" "golang.org/x/net/context" "google.golang.org/api/option" pb "google.golang.org/genproto/googleapis/pubsub/v1" "google.golang.org/grpc" "google.golang.org/grpc/codes" ) type nextStringFunc func() (string, error) type nextSnapshotFunc func() (*snapshotConfig, error) // service provides an internal abstraction to isolate the generated // PubSub API; most of this package uses this interface instead. // The single implementation, *apiService, contains all the knowledge // of the generated PubSub API (except for that present in legacy code). type service interface { createSubscription(ctx context.Context, subName string, cfg SubscriptionConfig) error getSubscriptionConfig(ctx context.Context, subName string) (SubscriptionConfig, string, error) listProjectSubscriptions(ctx context.Context, projName string) nextStringFunc deleteSubscription(ctx context.Context, name string) error subscriptionExists(ctx context.Context, name string) (bool, error) modifyPushConfig(ctx context.Context, subName string, conf PushConfig) error createTopic(ctx context.Context, name string) error deleteTopic(ctx context.Context, name string) error topicExists(ctx context.Context, name string) (bool, error) listProjectTopics(ctx context.Context, projName string) nextStringFunc listTopicSubscriptions(ctx context.Context, topicName string) nextStringFunc modifyAckDeadline(ctx context.Context, subName string, deadline time.Duration, ackIDs []string) error fetchMessages(ctx context.Context, subName string, maxMessages int32) ([]*Message, error) publishMessages(ctx context.Context, topicName string, msgs []*Message) ([]string, error) // splitAckIDs divides ackIDs into // * a batch of a size which is suitable for passing to acknowledge or // modifyAckDeadline, and // * the rest. splitAckIDs(ackIDs []string) ([]string, []string) // acknowledge ACKs the IDs in ackIDs. acknowledge(ctx context.Context, subName string, ackIDs []string) error iamHandle(resourceName string) *iam.Handle newStreamingPuller(ctx context.Context, subName string, ackDeadline int32) *streamingPuller createSnapshot(ctx context.Context, snapName, subName string) (*snapshotConfig, error) deleteSnapshot(ctx context.Context, snapName string) error listProjectSnapshots(ctx context.Context, projName string) nextSnapshotFunc // TODO(pongad): Raw proto returns an empty SeekResponse; figure out if we want to return it before GA. seekToTime(ctx context.Context, subName string, t time.Time) error seekToSnapshot(ctx context.Context, subName, snapName string) error close() error } type apiService struct { pubc *vkit.PublisherClient subc *vkit.SubscriberClient } func newPubSubService(ctx context.Context, opts []option.ClientOption) (*apiService, error) { pubc, err := vkit.NewPublisherClient(ctx, opts...) if err != nil { return nil, err } subc, err := vkit.NewSubscriberClient(ctx, option.WithGRPCConn(pubc.Connection())) if err != nil { _ = pubc.Close() // ignore error return nil, err } pubc.SetGoogleClientInfo("gccl", version.Repo) subc.SetGoogleClientInfo("gccl", version.Repo) return &apiService{pubc: pubc, subc: subc}, nil } func (s *apiService) close() error { // Return the first error, because the first call closes the connection. err := s.pubc.Close() _ = s.subc.Close() return err } func (s *apiService) createSubscription(ctx context.Context, subName string, cfg SubscriptionConfig) error { var rawPushConfig *pb.PushConfig if cfg.PushConfig.Endpoint != "" || len(cfg.PushConfig.Attributes) != 0 { rawPushConfig = &pb.PushConfig{ Attributes: cfg.PushConfig.Attributes, PushEndpoint: cfg.PushConfig.Endpoint, } } var retentionDuration *durpb.Duration if cfg.retentionDuration != 0 { retentionDuration = ptypes.DurationProto(cfg.retentionDuration) } _, err := s.subc.CreateSubscription(ctx, &pb.Subscription{ Name: subName, Topic: cfg.Topic.name, PushConfig: rawPushConfig, AckDeadlineSeconds: trunc32(int64(cfg.AckDeadline.Seconds())), RetainAckedMessages: cfg.retainAckedMessages, MessageRetentionDuration: retentionDuration, }) return err } func (s *apiService) getSubscriptionConfig(ctx context.Context, subName string) (SubscriptionConfig, string, error) { rawSub, err := s.subc.GetSubscription(ctx, &pb.GetSubscriptionRequest{Subscription: subName}) if err != nil { return SubscriptionConfig{}, "", err } var rd time.Duration // TODO(pongad): Remove nil-check after white list is removed. if rawSub.MessageRetentionDuration != nil { if rd, err = ptypes.Duration(rawSub.MessageRetentionDuration); err != nil { return SubscriptionConfig{}, "", err } } sub := SubscriptionConfig{ AckDeadline: time.Second * time.Duration(rawSub.AckDeadlineSeconds), PushConfig: PushConfig{ Endpoint: rawSub.PushConfig.PushEndpoint, Attributes: rawSub.PushConfig.Attributes, }, retainAckedMessages: rawSub.RetainAckedMessages, retentionDuration: rd, } return sub, rawSub.Topic, nil } // stringsPage contains a list of strings and a token for fetching the next page. type stringsPage struct { strings []string tok string } func (s *apiService) listProjectSubscriptions(ctx context.Context, projName string) nextStringFunc { it := s.subc.ListSubscriptions(ctx, &pb.ListSubscriptionsRequest{ Project: projName, }) return func() (string, error) { sub, err := it.Next() if err != nil { return "", err } return sub.Name, nil } } func (s *apiService) deleteSubscription(ctx context.Context, name string) error { return s.subc.DeleteSubscription(ctx, &pb.DeleteSubscriptionRequest{Subscription: name}) } func (s *apiService) subscriptionExists(ctx context.Context, name string) (bool, error) { _, err := s.subc.GetSubscription(ctx, &pb.GetSubscriptionRequest{Subscription: name}) if err == nil { return true, nil } if grpc.Code(err) == codes.NotFound { return false, nil } return false, err } func (s *apiService) createTopic(ctx context.Context, name string) error { _, err := s.pubc.CreateTopic(ctx, &pb.Topic{Name: name}) return err } func (s *apiService) listProjectTopics(ctx context.Context, projName string) nextStringFunc { it := s.pubc.ListTopics(ctx, &pb.ListTopicsRequest{ Project: projName, }) return func() (string, error) { topic, err := it.Next() if err != nil { return "", err } return topic.Name, nil } } func (s *apiService) deleteTopic(ctx context.Context, name string) error { return s.pubc.DeleteTopic(ctx, &pb.DeleteTopicRequest{Topic: name}) } func (s *apiService) topicExists(ctx context.Context, name string) (bool, error) { _, err := s.pubc.GetTopic(ctx, &pb.GetTopicRequest{Topic: name}) if err == nil { return true, nil } if grpc.Code(err) == codes.NotFound { return false, nil } return false, err } func (s *apiService) listTopicSubscriptions(ctx context.Context, topicName string) nextStringFunc { it := s.pubc.ListTopicSubscriptions(ctx, &pb.ListTopicSubscriptionsRequest{ Topic: topicName, }) return it.Next } func (s *apiService) modifyAckDeadline(ctx context.Context, subName string, deadline time.Duration, ackIDs []string) error { return s.subc.ModifyAckDeadline(ctx, &pb.ModifyAckDeadlineRequest{ Subscription: subName, AckIds: ackIDs, AckDeadlineSeconds: trunc32(int64(deadline.Seconds())), }) } // maxPayload is the maximum number of bytes to devote to actual ids in // acknowledgement or modifyAckDeadline requests. A serialized // AcknowledgeRequest proto has a small constant overhead, plus the size of the // subscription name, plus 3 bytes per ID (a tag byte and two size bytes). A // ModifyAckDeadlineRequest has an additional few bytes for the deadline. We // don't know the subscription name here, so we just assume the size exclusive // of ids is 100 bytes. // // With gRPC there is no way for the client to know the server's max message size (it is // configurable on the server). We know from experience that it // it 512K. const ( maxPayload = 512 * 1024 reqFixedOverhead = 100 overheadPerID = 3 ) // splitAckIDs splits ids into two slices, the first of which contains at most maxPayload bytes of ackID data. func (s *apiService) splitAckIDs(ids []string) ([]string, []string) { total := reqFixedOverhead for i, id := range ids { total += len(id) + overheadPerID if total > maxPayload { return ids[:i], ids[i:] } } return ids, nil } func (s *apiService) acknowledge(ctx context.Context, subName string, ackIDs []string) error { return s.subc.Acknowledge(ctx, &pb.AcknowledgeRequest{ Subscription: subName, AckIds: ackIDs, }) } func (s *apiService) fetchMessages(ctx context.Context, subName string, maxMessages int32) ([]*Message, error) { resp, err := s.subc.Pull(ctx, &pb.PullRequest{ Subscription: subName, MaxMessages: maxMessages, }) if err != nil { return nil, err } return convertMessages(resp.ReceivedMessages) } func convertMessages(rms []*pb.ReceivedMessage) ([]*Message, error) { msgs := make([]*Message, 0, len(rms)) for i, m := range rms { msg, err := toMessage(m) if err != nil { return nil, fmt.Errorf("pubsub: cannot decode the retrieved message at index: %d, message: %+v", i, m) } msgs = append(msgs, msg) } return msgs, nil } func (s *apiService) publishMessages(ctx context.Context, topicName string, msgs []*Message) ([]string, error) { rawMsgs := make([]*pb.PubsubMessage, len(msgs)) for i, msg := range msgs { rawMsgs[i] = &pb.PubsubMessage{ Data: msg.Data, Attributes: msg.Attributes, } } resp, err := s.pubc.Publish(ctx, &pb.PublishRequest{ Topic: topicName, Messages: rawMsgs, }) if err != nil { return nil, err } return resp.MessageIds, nil } func (s *apiService) modifyPushConfig(ctx context.Context, subName string, conf PushConfig) error { return s.subc.ModifyPushConfig(ctx, &pb.ModifyPushConfigRequest{ Subscription: subName, PushConfig: &pb.PushConfig{ Attributes: conf.Attributes, PushEndpoint: conf.Endpoint, }, }) } func (s *apiService) iamHandle(resourceName string) *iam.Handle { return iam.InternalNewHandle(s.pubc.Connection(), resourceName) } func trunc32(i int64) int32 { if i > math.MaxInt32 { i = math.MaxInt32 } return int32(i) } func (s *apiService) newStreamingPuller(ctx context.Context, subName string, ackDeadlineSecs int32) *streamingPuller { p := &streamingPuller{ ctx: ctx, subName: subName, ackDeadlineSecs: ackDeadlineSecs, subc: s.subc, } p.c = sync.NewCond(&p.mu) return p } type streamingPuller struct { ctx context.Context subName string ackDeadlineSecs int32 subc *vkit.SubscriberClient mu sync.Mutex c *sync.Cond inFlight bool closed bool // set after CloseSend called spc pb.Subscriber_StreamingPullClient err error } // open establishes (or re-establishes) a stream for pulling messages. // It takes care that only one RPC is in flight at a time. func (p *streamingPuller) open() error { p.c.L.Lock() defer p.c.L.Unlock() p.openLocked() return p.err } func (p *streamingPuller) openLocked() { if p.inFlight { // Another goroutine is opening; wait for it. for p.inFlight { p.c.Wait() } return } // No opens in flight; start one. p.inFlight = true p.c.L.Unlock() spc, err := p.subc.StreamingPull(p.ctx) if err == nil { err = spc.Send(&pb.StreamingPullRequest{ Subscription: p.subName, StreamAckDeadlineSeconds: p.ackDeadlineSecs, }) } p.c.L.Lock() p.spc = spc p.err = err p.inFlight = false p.c.Broadcast() } func (p *streamingPuller) call(f func(pb.Subscriber_StreamingPullClient) error) error { p.c.L.Lock() defer p.c.L.Unlock() // Wait for an open in flight. for p.inFlight { p.c.Wait() } // TODO(jba): better retry strategy. var err error for i := 0; i < 3; i++ { if p.err != nil { return p.err } spc := p.spc // Do not call f with the lock held. Only one goroutine calls Send // (streamingMessageIterator.sender) and only one calls Recv // (streamingMessageIterator.receiver). If we locked, then a // blocked Recv would prevent a Send from happening. p.c.L.Unlock() err = f(spc) p.c.L.Lock() if !p.closed && (err == io.EOF || grpc.Code(err) == codes.Unavailable) { time.Sleep(500 * time.Millisecond) p.openLocked() continue } // Not a retry-able error; fail permanently. // TODO(jba): for some errors, should we retry f (the Send or Recv) // but not re-open the stream? p.err = err return err } p.err = fmt.Errorf("retry exceeded; last error was %v", err) return p.err } func (p *streamingPuller) fetchMessages() ([]*Message, error) { var res *pb.StreamingPullResponse err := p.call(func(spc pb.Subscriber_StreamingPullClient) error { var err error res, err = spc.Recv() return err }) if err != nil { return nil, err } return convertMessages(res.ReceivedMessages) } func (p *streamingPuller) send(req *pb.StreamingPullRequest) error { // Note: len(modAckIDs) == len(modSecs) var rest *pb.StreamingPullRequest for len(req.AckIds) > 0 || len(req.ModifyDeadlineAckIds) > 0 { req, rest = splitRequest(req, maxPayload) err := p.call(func(spc pb.Subscriber_StreamingPullClient) error { x := spc.Send(req) return x }) if err != nil { return err } req = rest } return nil } func (p *streamingPuller) closeSend() { p.mu.Lock() p.closed = true p.mu.Unlock() p.spc.CloseSend() } // Split req into a prefix that is smaller than maxSize, and a remainder. func splitRequest(req *pb.StreamingPullRequest, maxSize int) (prefix, remainder *pb.StreamingPullRequest) { const int32Bytes = 4 // Copy all fields before splitting the variable-sized ones. remainder = &pb.StreamingPullRequest{} *remainder = *req // Split message so it isn't too big. size := reqFixedOverhead i := 0 for size < maxSize && (i < len(req.AckIds) || i < len(req.ModifyDeadlineAckIds)) { if i < len(req.AckIds) { size += overheadPerID + len(req.AckIds[i]) } if i < len(req.ModifyDeadlineAckIds) { size += overheadPerID + len(req.ModifyDeadlineAckIds[i]) + int32Bytes } i++ } min := func(a, b int) int { if a < b { return a } return b } j := i if size > maxSize { j-- } k := min(j, len(req.AckIds)) remainder.AckIds = req.AckIds[k:] req.AckIds = req.AckIds[:k] k = min(j, len(req.ModifyDeadlineAckIds)) remainder.ModifyDeadlineAckIds = req.ModifyDeadlineAckIds[k:] remainder.ModifyDeadlineSeconds = req.ModifyDeadlineSeconds[k:] req.ModifyDeadlineAckIds = req.ModifyDeadlineAckIds[:k] req.ModifyDeadlineSeconds = req.ModifyDeadlineSeconds[:k] return req, remainder } func (s *apiService) createSnapshot(ctx context.Context, snapName, subName string) (*snapshotConfig, error) { snap, err := s.subc.CreateSnapshot(ctx, &pb.CreateSnapshotRequest{ Name: snapName, Subscription: subName, }) if err != nil { return nil, err } return s.toSnapshotConfig(snap) } func (s *apiService) deleteSnapshot(ctx context.Context, snapName string) error { return s.subc.DeleteSnapshot(ctx, &pb.DeleteSnapshotRequest{Snapshot: snapName}) } func (s *apiService) listProjectSnapshots(ctx context.Context, projName string) nextSnapshotFunc { it := s.subc.ListSnapshots(ctx, &pb.ListSnapshotsRequest{ Project: projName, }) return func() (*snapshotConfig, error) { snap, err := it.Next() if err != nil { return nil, err } return s.toSnapshotConfig(snap) } } func (s *apiService) toSnapshotConfig(snap *pb.Snapshot) (*snapshotConfig, error) { exp, err := ptypes.Timestamp(snap.ExpireTime) if err != nil { return nil, err } return &snapshotConfig{ snapshot: &snapshot{ s: s, name: snap.Name, }, Topic: newTopic(s, snap.Topic), Expiration: exp, }, nil } func (s *apiService) seekToTime(ctx context.Context, subName string, t time.Time) error { ts, err := ptypes.TimestampProto(t) if err != nil { return err } _, err = s.subc.Seek(ctx, &pb.SeekRequest{ Subscription: subName, Target: &pb.SeekRequest_Time{ts}, }) return err } func (s *apiService) seekToSnapshot(ctx context.Context, subName, snapName string) error { _, err := s.subc.Seek(ctx, &pb.SeekRequest{ Subscription: subName, Target: &pb.SeekRequest_Snapshot{snapName}, }) return err } golang-google-cloud-0.9.0/pubsub/service_test.go000066400000000000000000000037661312234511600217050ustar00rootroot00000000000000// Copyright 2017 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package pubsub import ( "reflect" "testing" pb "google.golang.org/genproto/googleapis/pubsub/v1" ) func TestSplitRequest(t *testing.T) { split := func(a []string, i int) ([]string, []string) { if len(a) < i { return a, nil } return a[:i], a[i:] } ackIDs := []string{"aaaa", "bbbb", "cccc", "dddd", "eeee"} modDeadlines := []int32{1, 2, 3, 4, 5} for i, test := range []struct { ackIDs []string modAckIDs []string splitIndex int }{ {ackIDs, ackIDs, 2}, {nil, ackIDs, 3}, {ackIDs, nil, 5}, {nil, ackIDs[:1], 1}, } { req := &pb.StreamingPullRequest{ AckIds: test.ackIDs, ModifyDeadlineAckIds: test.modAckIDs, ModifyDeadlineSeconds: modDeadlines[:len(test.modAckIDs)], } a1, a2 := split(test.ackIDs, test.splitIndex) m1, m2 := split(test.modAckIDs, test.splitIndex) want1 := &pb.StreamingPullRequest{ AckIds: a1, ModifyDeadlineAckIds: m1, ModifyDeadlineSeconds: modDeadlines[:len(m1)], } want2 := &pb.StreamingPullRequest{ AckIds: a2, ModifyDeadlineAckIds: m2, ModifyDeadlineSeconds: modDeadlines[len(m1) : len(m1)+len(m2)], } got1, got2 := splitRequest(req, reqFixedOverhead+40) if !reflect.DeepEqual(got1, want1) { t.Errorf("#%d: first:\ngot %+v\nwant %+v", i, got1, want1) } if !reflect.DeepEqual(got2, want2) { t.Errorf("#%d: second:\ngot %+v\nwant %+v", i, got2, want2) } } } golang-google-cloud-0.9.0/pubsub/snapshot.go000066400000000000000000000102701312234511600210310ustar00rootroot00000000000000// Copyright 2017 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package pubsub import ( "strings" "time" vkit "cloud.google.com/go/pubsub/apiv1" "golang.org/x/net/context" ) // Snapshot is a reference to a PubSub snapshot. type snapshot struct { s service // The fully qualified identifier for the snapshot, in the format "projects//snapshots/" name string } // ID returns the unique identifier of the snapshot within its project. func (s *snapshot) ID() string { slash := strings.LastIndex(s.name, "/") if slash == -1 { // name is not a fully-qualified name. panic("bad snapshot name") } return s.name[slash+1:] } // SnapshotConfig contains the details of a Snapshot. type snapshotConfig struct { *snapshot Topic *Topic Expiration time.Time } // Snapshot creates a reference to a snapshot. func (c *Client) snapshot(id string) *snapshot { return &snapshot{ s: c.s, name: vkit.SubscriberSnapshotPath(c.projectID, id), } } // Snapshots returns an iterator which returns snapshots for this project. func (c *Client) snapshots(ctx context.Context) *snapshotConfigIterator { return &snapshotConfigIterator{ next: c.s.listProjectSnapshots(ctx, c.fullyQualifiedProjectName()), } } // SnapshotConfigIterator is an iterator that returns a series of snapshots. type snapshotConfigIterator struct { next nextSnapshotFunc } // Next returns the next SnapshotConfig. Its second return value is iterator.Done if there are no more results. // Once Next returns iterator.Done, all subsequent calls will return iterator.Done. func (snaps *snapshotConfigIterator) Next() (*snapshotConfig, error) { return snaps.next() } // Delete deletes a snapshot. func (snap *snapshot) delete(ctx context.Context) error { return snap.s.deleteSnapshot(ctx, snap.name) } // SeekTime seeks the subscription to a point in time. // // Messages retained in the subscription that were published before this // time are marked as acknowledged, and messages retained in the // subscription that were published after this time are marked as // unacknowledged. Note that this operation affects only those messages // retained in the subscription (configured by SnapshotConfig). For example, // if `time` corresponds to a point before the message retention // window (or to a point before the system's notion of the subscription // creation time), only retained messages will be marked as unacknowledged, // and already-expunged messages will not be restored. func (s *Subscription) seekToTime(ctx context.Context, t time.Time) error { return s.s.seekToTime(ctx, s.name, t) } // Snapshot creates a new snapshot from this subscription. // The snapshot will be for the topic this subscription is subscribed to. // If the name is empty string, a unique name is assigned. // // The created snapshot is guaranteed to retain: // (a) The existing backlog on the subscription. More precisely, this is // defined as the messages in the subscription's backlog that are // unacknowledged when Snapshot returns without error. // (b) Any messages published to the subscription's topic following // Snapshot returning without error. func (s *Subscription) createSnapshot(ctx context.Context, name string) (*snapshotConfig, error) { if name != "" { name = vkit.SubscriberSnapshotPath(strings.Split(s.name, "/")[1], name) } return s.s.createSnapshot(ctx, name, s.name) } // SeekSnapshot seeks the subscription to a snapshot. // // The snapshot needs not be created from this subscription, // but the snapshot must be for the topic this subscription is subscribed to. func (s *Subscription) seekToSnapshot(ctx context.Context, snap *snapshot) error { return s.s.seekToSnapshot(ctx, s.name, snap.name) } golang-google-cloud-0.9.0/pubsub/streaming_pull_test.go000066400000000000000000000221171312234511600232610ustar00rootroot00000000000000// Copyright 2017 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package pubsub // TODO(jba): test keepalive // TODO(jba): test that expired messages are not kept alive // TODO(jba): test that when all messages expire, Stop returns. import ( "io" "reflect" "strconv" "sync" "sync/atomic" "testing" "time" tspb "github.com/golang/protobuf/ptypes/timestamp" "golang.org/x/net/context" "google.golang.org/api/option" pb "google.golang.org/genproto/googleapis/pubsub/v1" "google.golang.org/grpc" "google.golang.org/grpc/codes" ) var ( timestamp = &tspb.Timestamp{} testMessages = []*pb.ReceivedMessage{ {AckId: "0", Message: &pb.PubsubMessage{Data: []byte{1}, PublishTime: timestamp}}, {AckId: "1", Message: &pb.PubsubMessage{Data: []byte{2}, PublishTime: timestamp}}, {AckId: "2", Message: &pb.PubsubMessage{Data: []byte{3}, PublishTime: timestamp}}, } ) func TestStreamingPullBasic(t *testing.T) { client, server := newFake(t) server.addStreamingPullMessages(testMessages) testStreamingPullIteration(t, client, server, testMessages) } func TestStreamingPullMultipleFetches(t *testing.T) { client, server := newFake(t) server.addStreamingPullMessages(testMessages[:1]) server.addStreamingPullMessages(testMessages[1:]) testStreamingPullIteration(t, client, server, testMessages) } func testStreamingPullIteration(t *testing.T, client *Client, server *fakeServer, msgs []*pb.ReceivedMessage) { if !useStreamingPull { t.SkipNow() } sub := client.Subscription("s") gotMsgs, err := pullN(context.Background(), sub, len(msgs), func(_ context.Context, m *Message) { id, err := strconv.Atoi(m.ackID) if err != nil { panic(err) } // ack evens, nack odds if id%2 == 0 { m.Ack() } else { m.Nack() } }) if err != nil { t.Fatalf("Pull: %v", err) } gotMap := map[string]*Message{} for _, m := range gotMsgs { gotMap[m.ackID] = m } for i, msg := range msgs { want, err := toMessage(msg) if err != nil { t.Fatal(err) } want.calledDone = true got := gotMap[want.ackID] if got == nil { t.Errorf("%d: no message for ackID %q", i, want.ackID) continue } got.doneFunc = nil // Don't compare done; it's a function. if !reflect.DeepEqual(got, want) { t.Errorf("%d: got\n%#v\nwant\n%#v", i, got, want) } } server.wait() for i := 0; i < len(msgs); i++ { id := msgs[i].AckId if i%2 == 0 { if !server.Acked[id] { t.Errorf("msg %q should have been acked but wasn't", id) } } else { if dl, ok := server.Deadlines[id]; !ok || dl != 0 { t.Errorf("msg %q should have been nacked but wasn't", id) } } } } func TestStreamingPullError(t *testing.T) { // If an RPC to the service returns a non-retryable error, Pull should // return after all callbacks return, without waiting for messages to be // acked. if !useStreamingPull { t.SkipNow() } client, server := newFake(t) server.addStreamingPullMessages(testMessages[:1]) server.addStreamingPullError(grpc.Errorf(codes.Internal, "")) sub := client.Subscription("s") callbackDone := make(chan struct{}) ctx, _ := context.WithTimeout(context.Background(), time.Second) err := sub.Receive(ctx, func(ctx context.Context, m *Message) { defer close(callbackDone) select { case <-ctx.Done(): return } }) select { case <-callbackDone: default: t.Fatal("Receive returned but callback was not done") } if want := codes.Internal; grpc.Code(err) != want { t.Fatalf("got <%v>, want code %v", err, want) } } func TestStreamingPullCancel(t *testing.T) { // If Receive's context is canceled, it should return after all callbacks // return and all messages have been acked. if !useStreamingPull { t.SkipNow() } client, server := newFake(t) server.addStreamingPullMessages(testMessages) sub := client.Subscription("s") ctx, cancel := context.WithTimeout(context.Background(), time.Second) var n int32 err := sub.Receive(ctx, func(ctx2 context.Context, m *Message) { atomic.AddInt32(&n, 1) defer atomic.AddInt32(&n, -1) cancel() }) if got := atomic.LoadInt32(&n); got != 0 { t.Errorf("Receive returned with %d callbacks still running", got) } if err != nil { t.Fatalf("Receive got <%v>, want nil", err) } } func TestStreamingPullRetry(t *testing.T) { if !useStreamingPull { t.SkipNow() } // Check that we retry on io.EOF or Unavailable. client, server := newFake(t) server.addStreamingPullMessages(testMessages[:1]) server.addStreamingPullError(io.EOF) server.addStreamingPullError(io.EOF) server.addStreamingPullMessages(testMessages[1:2]) server.addStreamingPullError(grpc.Errorf(codes.Unavailable, "")) server.addStreamingPullError(grpc.Errorf(codes.Unavailable, "")) server.addStreamingPullMessages(testMessages[2:]) testStreamingPullIteration(t, client, server, testMessages) } func TestStreamingPullOneActive(t *testing.T) { // Only one call to Pull can be active at a time. if !useStreamingPull { t.SkipNow() } client, srv := newFake(t) srv.addStreamingPullMessages(testMessages[:1]) sub := client.Subscription("s") ctx, cancel := context.WithCancel(context.Background()) err := sub.Receive(ctx, func(ctx context.Context, m *Message) { m.Ack() err := sub.Receive(ctx, func(context.Context, *Message) {}) if err != errReceiveInProgress { t.Errorf("got <%v>, want <%v>", err, errReceiveInProgress) } cancel() }) if err != nil { t.Fatalf("got <%v>, want nil", err) } } func TestStreamingPullConcurrent(t *testing.T) { if !useStreamingPull { t.SkipNow() } newMsg := func(i int) *pb.ReceivedMessage { return &pb.ReceivedMessage{ AckId: strconv.Itoa(i), Message: &pb.PubsubMessage{Data: []byte{byte(i)}, PublishTime: timestamp}, } } // Multiple goroutines should be able to read from the same iterator. client, server := newFake(t) // Add a lot of messages, a few at a time, to make sure both threads get a chance. nMessages := 100 for i := 0; i < nMessages; i += 2 { server.addStreamingPullMessages([]*pb.ReceivedMessage{newMsg(i), newMsg(i + 1)}) } sub := client.Subscription("s") ctx, _ := context.WithTimeout(context.Background(), time.Second) gotMsgs, err := pullN(ctx, sub, nMessages, func(ctx context.Context, m *Message) { m.Ack() }) if err != nil { t.Fatalf("Receive: %v", err) } seen := map[string]bool{} for _, gm := range gotMsgs { if seen[gm.ackID] { t.Fatalf("duplicate ID %q", gm.ackID) } seen[gm.ackID] = true } if len(seen) != nMessages { t.Fatalf("got %d messages, want %d", len(seen), nMessages) } } func TestStreamingPullFlowControl(t *testing.T) { // Callback invocations should not occur if flow control limits are exceeded. if !useStreamingPull { t.SkipNow() } client, server := newFake(t) server.addStreamingPullMessages(testMessages) sub := client.Subscription("s") sub.ReceiveSettings.MaxOutstandingMessages = 2 ctx, cancel := context.WithCancel(context.Background()) activec := make(chan int) waitc := make(chan int) errc := make(chan error) go func() { errc <- sub.Receive(ctx, func(_ context.Context, m *Message) { activec <- 1 <-waitc m.Ack() }) }() // Here, two callbacks are active. Receive should be blocked in the flow // control acquire method on the third message. <-activec <-activec select { case <-activec: t.Fatal("third callback in progress") case <-time.After(100 * time.Millisecond): } cancel() // Receive still has not returned, because both callbacks are still blocked on waitc. select { case err := <-errc: t.Fatalf("Receive returned early with error %v", err) case <-time.After(100 * time.Millisecond): } // Let both callbacks proceed. waitc <- 1 waitc <- 1 // The third callback will never run, because acquire returned a non-nil // error, causing Receive to return. So now Receive should end. if err := <-errc; err != nil { t.Fatalf("got %v from Receive, want nil", err) } } func newFake(t *testing.T) (*Client, *fakeServer) { srv, err := newFakeServer() if err != nil { t.Fatal(err) } conn, err := grpc.Dial(srv.Addr, grpc.WithInsecure()) if err != nil { t.Fatal(err) } client, err := NewClient(context.Background(), "projectID", option.WithGRPCConn(conn)) if err != nil { t.Fatal(err) } return client, srv } // pullN calls sub.Receive until at least n messages are received. func pullN(ctx context.Context, sub *Subscription, n int, f func(context.Context, *Message)) ([]*Message, error) { var ( mu sync.Mutex msgs []*Message ) cctx, cancel := context.WithCancel(ctx) err := sub.Receive(cctx, func(ctx context.Context, m *Message) { mu.Lock() msgs = append(msgs, m) nSeen := len(msgs) mu.Unlock() f(ctx, m) if nSeen >= n { cancel() } }) if err != nil { return nil, err } return msgs, nil } golang-google-cloud-0.9.0/pubsub/subscription.go000066400000000000000000000305741312234511600217270ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package pubsub import ( "errors" "fmt" "strings" "sync" "time" "cloud.google.com/go/iam" "golang.org/x/net/context" "google.golang.org/api/iterator" "google.golang.org/grpc" "google.golang.org/grpc/codes" ) // Subscription is a reference to a PubSub subscription. type Subscription struct { s service // The fully qualified identifier for the subscription, in the format "projects//subscriptions/" name string // Settings for pulling messages. Configure these before calling Receive. ReceiveSettings ReceiveSettings mu sync.Mutex receiveActive bool } // Subscription creates a reference to a subscription. func (c *Client) Subscription(id string) *Subscription { return newSubscription(c.s, fmt.Sprintf("projects/%s/subscriptions/%s", c.projectID, id)) } func newSubscription(s service, name string) *Subscription { return &Subscription{ s: s, name: name, } } // String returns the globally unique printable name of the subscription. func (s *Subscription) String() string { return s.name } // ID returns the unique identifier of the subscription within its project. func (s *Subscription) ID() string { slash := strings.LastIndex(s.name, "/") if slash == -1 { // name is not a fully-qualified name. panic("bad subscription name") } return s.name[slash+1:] } // Subscriptions returns an iterator which returns all of the subscriptions for the client's project. func (c *Client) Subscriptions(ctx context.Context) *SubscriptionIterator { return &SubscriptionIterator{ s: c.s, next: c.s.listProjectSubscriptions(ctx, c.fullyQualifiedProjectName()), } } // SubscriptionIterator is an iterator that returns a series of subscriptions. type SubscriptionIterator struct { s service next nextStringFunc } // Next returns the next subscription. If there are no more subscriptions, iterator.Done will be returned. func (subs *SubscriptionIterator) Next() (*Subscription, error) { subName, err := subs.next() if err != nil { return nil, err } return newSubscription(subs.s, subName), nil } // PushConfig contains configuration for subscriptions that operate in push mode. type PushConfig struct { // A URL locating the endpoint to which messages should be pushed. Endpoint string // Endpoint configuration attributes. See https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.subscriptions#pushconfig for more details. Attributes map[string]string } // Subscription config contains the configuration of a subscription. type SubscriptionConfig struct { Topic *Topic PushConfig PushConfig // The default maximum time after a subscriber receives a message before // the subscriber should acknowledge the message. Note: messages which are // obtained via Subscription.Receive need not be acknowledged within this // deadline, as the deadline will be automatically extended. AckDeadline time.Duration // Whether to retain acknowledged messages. If true, acknowledged messages // will not be expunged until they fall out of the RetentionDuration window. retainAckedMessages bool // How long to retain messages in backlog, from the time of publish. If RetainAckedMessages is true, // this duration affects the retention of acknowledged messages, // otherwise only unacknowledged messages are retained. // Defaults to 7 days. Cannot be longer than 7 days or shorter than 10 minutes. retentionDuration time.Duration } // ReceiveSettings configure the Receive method. // A zero ReceiveSettings will result in values equivalent to DefaultReceiveSettings. type ReceiveSettings struct { // MaxExtension is the maximum period for which the Subscription should // automatically extend the ack deadline for each message. // // The Subscription will automatically extend the ack deadline of all // fetched Messages for the duration specified. Automatic deadline // extension may be disabled by specifying a duration less than 1. MaxExtension time.Duration // MaxOutstandingMessages is the maximum number of unprocessed messages // (unacknowledged but not yet expired). If MaxOutstandingMessages is 0, it // will be treated as if it were DefaultReceiveSettings.MaxOutstandingMessages. // If the value is negative, then there will be no limit on the number of // unprocessed messages. MaxOutstandingMessages int // MaxOutstandingBytes is the maximum size of unprocessed messages // (unacknowledged but not yet expired). If MaxOutstandingBytes is 0, it will // be treated as if it were DefaultReceiveSettings.MaxOutstandingBytes. If // the value is negative, then there will be no limit on the number of bytes // for unprocessed messages. MaxOutstandingBytes int } // DefaultReceiveSettings holds the default values for ReceiveSettings. var DefaultReceiveSettings = ReceiveSettings{ MaxExtension: 10 * time.Minute, MaxOutstandingMessages: 1000, MaxOutstandingBytes: 1e9, // 1G } // Delete deletes the subscription. func (s *Subscription) Delete(ctx context.Context) error { return s.s.deleteSubscription(ctx, s.name) } // Exists reports whether the subscription exists on the server. func (s *Subscription) Exists(ctx context.Context) (bool, error) { return s.s.subscriptionExists(ctx, s.name) } // Config fetches the current configuration for the subscription. func (s *Subscription) Config(ctx context.Context) (SubscriptionConfig, error) { conf, topicName, err := s.s.getSubscriptionConfig(ctx, s.name) if err != nil { return SubscriptionConfig{}, err } conf.Topic = &Topic{ s: s.s, name: topicName, } return conf, nil } // ModifyPushConfig updates the endpoint URL and other attributes of a push subscription. func (s *Subscription) ModifyPushConfig(ctx context.Context, conf PushConfig) error { return s.s.modifyPushConfig(ctx, s.name, conf) } func (s *Subscription) IAM() *iam.Handle { return s.s.iamHandle(s.name) } // CreateSubscription creates a new subscription on a topic. // // id is the name of the subscription to create. It must start with a letter, // and contain only letters ([A-Za-z]), numbers ([0-9]), dashes (-), // underscores (_), periods (.), tildes (~), plus (+) or percent signs (%). It // must be between 3 and 255 characters in length, and must not start with // "goog". // // cfg.Topic is the topic from which the subscription should receive messages. It // need not belong to the same project as the subscription. This field is required. // // cfg.AckDeadline is the maximum time after a subscriber receives a message before // the subscriber should acknowledge the message. It must be between 10 and 600 // seconds (inclusive), and is rounded down to the nearest second. If the // provided ackDeadline is 0, then the default value of 10 seconds is used. // Note: messages which are obtained via Subscription.Receive need not be // acknowledged within this deadline, as the deadline will be automatically // extended. // // cfg.PushConfig may be set to configure this subscription for push delivery. // // If the subscription already exists an error will be returned. func (c *Client) CreateSubscription(ctx context.Context, id string, cfg SubscriptionConfig) (*Subscription, error) { if cfg.Topic == nil { return nil, errors.New("pubsub: require non-nil Topic") } if cfg.AckDeadline == 0 { cfg.AckDeadline = 10 * time.Second } if d := cfg.AckDeadline; d < 10*time.Second || d > 600*time.Second { return nil, fmt.Errorf("ack deadline must be between 10 and 600 seconds; got: %v", d) } sub := c.Subscription(id) err := c.s.createSubscription(ctx, sub.name, cfg) return sub, err } var errReceiveInProgress = errors.New("pubsub: Receive already in progress for this subscription") // Receive calls f with the outstanding messages from the subscription. // It blocks until ctx is done, or the service returns a non-retryable error. // // The standard way to terminate a Receive is to cancel its context: // // cctx, cancel := context.WithCancel(ctx) // err := sub.Receive(cctx, callback) // // Call cancel from callback, or another goroutine. // // If the service returns a non-retryable error, Receive returns that error after // all of the outstanding calls to f have returned. If ctx is done, Receive // returns either nil after all of the outstanding calls to f have returned and // all messages have been acknowledged or have expired. // // Receive calls f concurrently from multiple goroutines. It is encouraged to // process messages synchronously in f, even if that processing is relatively // time-consuming; Receive will spawn new goroutines for incoming messages, // limited by MaxOutstandingMessages and MaxOutstandingBytes in ReceiveSettings. // // The context passed to f will be canceled when ctx is Done or there is a // fatal service error. // // Receive will automatically extend the ack deadline of all fetched Messages for the // period specified by s.ReceiveSettings.MaxExtension. // // Each Subscription may have only one invocation of Receive active at a time. func (s *Subscription) Receive(ctx context.Context, f func(context.Context, *Message)) error { s.mu.Lock() if s.receiveActive { s.mu.Unlock() return errReceiveInProgress } s.receiveActive = true s.mu.Unlock() defer func() { s.mu.Lock(); s.receiveActive = false; s.mu.Unlock() }() config, err := s.Config(ctx) if err != nil { if grpc.Code(err) == codes.Canceled { return nil } return err } maxCount := s.ReceiveSettings.MaxOutstandingMessages if maxCount == 0 { maxCount = DefaultReceiveSettings.MaxOutstandingMessages } maxBytes := s.ReceiveSettings.MaxOutstandingBytes if maxBytes == 0 { maxBytes = DefaultReceiveSettings.MaxOutstandingBytes } maxExt := s.ReceiveSettings.MaxExtension if maxExt == 0 { maxExt = DefaultReceiveSettings.MaxExtension } else if maxExt < 0 { // If MaxExtension is negative, disable automatic extension. maxExt = 0 } // TODO(jba): add tests that verify that ReceiveSettings are correctly processed. po := &pullOptions{ maxExtension: maxExt, maxPrefetch: trunc32(int64(maxCount)), ackDeadline: config.AckDeadline, } fc := newFlowController(maxCount, maxBytes) // Wait for all goroutines started by Receive to return, so instead of an // obscure goroutine leak we have an obvious blocked call to Receive. var wg sync.WaitGroup defer wg.Wait() return s.receive(ctx, &wg, po, fc, f) } func (s *Subscription) receive(ctx context.Context, wg *sync.WaitGroup, po *pullOptions, fc *flowController, f func(context.Context, *Message)) error { // Cancel a sub-context when we return, to kick the context-aware callbacks // and the goroutine below. ctx2, cancel := context.WithCancel(ctx) // Call stop when Receive's context is done. // Stop will block until all outstanding messages have been acknowledged // or there was a fatal service error. // The iterator does not use the context passed to Receive. If it did, canceling // that context would immediately stop the iterator without waiting for unacked // messages. iter := newMessageIterator(context.Background(), s.s, s.name, po) wg.Add(1) go func() { <-ctx2.Done() iter.Stop() wg.Done() }() defer cancel() for { msg, err := iter.Next() if err == iterator.Done { return nil } if err != nil { return err } // TODO(jba): call acquire closer to when the message is allocated. if err := fc.acquire(ctx, len(msg.Data)); err != nil { // TODO(jba): test that this "orphaned" message is nacked immediately when ctx is done. msg.Nack() return nil } wg.Add(1) go func() { defer wg.Done() // TODO(jba): call release when the message is available for GC. // This considers the message to be released when // f is finished, but f may ack early or not at all. defer fc.release(len(msg.Data)) f(ctx2, msg) }() } } // TODO(jba): remove when we delete messageIterator. type pullOptions struct { maxExtension time.Duration maxPrefetch int32 // ackDeadline is the default ack deadline for the subscription. Not // configurable. ackDeadline time.Duration } golang-google-cloud-0.9.0/pubsub/subscription_test.go000066400000000000000000000100521312234511600227530ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package pubsub import ( "reflect" "testing" "golang.org/x/net/context" "google.golang.org/api/iterator" ) type subListService struct { service subs []string err error t *testing.T // for error logging. } func (s *subListService) newNextStringFunc() nextStringFunc { return func() (string, error) { if len(s.subs) == 0 { return "", iterator.Done } sn := s.subs[0] s.subs = s.subs[1:] return sn, s.err } } func (s *subListService) listProjectSubscriptions(ctx context.Context, projName string) nextStringFunc { if projName != "projects/projid" { s.t.Fatalf("unexpected call: projName: %q", projName) return nil } return s.newNextStringFunc() } func (s *subListService) listTopicSubscriptions(ctx context.Context, topicName string) nextStringFunc { if topicName != "projects/projid/topics/topic" { s.t.Fatalf("unexpected call: topicName: %q", topicName) return nil } return s.newNextStringFunc() } // All returns the remaining subscriptions from this iterator. func slurpSubs(it *SubscriptionIterator) ([]*Subscription, error) { var subs []*Subscription for { switch sub, err := it.Next(); err { case nil: subs = append(subs, sub) case iterator.Done: return subs, nil default: return nil, err } } } func TestSubscriptionID(t *testing.T) { const id = "id" serv := &subListService{ subs: []string{"projects/projid/subscriptions/s1", "projects/projid/subscriptions/s2"}, t: t, } c := &Client{projectID: "projid", s: serv} s := c.Subscription(id) if got, want := s.ID(), id; got != want { t.Errorf("Subscription.ID() = %q; want %q", got, want) } want := []string{"s1", "s2"} subs, err := slurpSubs(c.Subscriptions(context.Background())) if err != nil { t.Errorf("error listing subscriptions: %v", err) } for i, s := range subs { if got, want := s.ID(), want[i]; got != want { t.Errorf("Subscription.ID() = %q; want %q", got, want) } } } func TestListProjectSubscriptions(t *testing.T) { snames := []string{"projects/projid/subscriptions/s1", "projects/projid/subscriptions/s2", "projects/projid/subscriptions/s3"} s := &subListService{subs: snames, t: t} c := &Client{projectID: "projid", s: s} subs, err := slurpSubs(c.Subscriptions(context.Background())) if err != nil { t.Errorf("error listing subscriptions: %v", err) } got := subNames(subs) want := []string{ "projects/projid/subscriptions/s1", "projects/projid/subscriptions/s2", "projects/projid/subscriptions/s3"} if !reflect.DeepEqual(got, want) { t.Errorf("sub list: got: %v, want: %v", got, want) } if len(s.subs) != 0 { t.Errorf("outstanding subs: %v", s.subs) } } func TestListTopicSubscriptions(t *testing.T) { snames := []string{"projects/projid/subscriptions/s1", "projects/projid/subscriptions/s2", "projects/projid/subscriptions/s3"} s := &subListService{subs: snames, t: t} c := &Client{projectID: "projid", s: s} subs, err := slurpSubs(c.Topic("topic").Subscriptions(context.Background())) if err != nil { t.Errorf("error listing subscriptions: %v", err) } got := subNames(subs) want := []string{ "projects/projid/subscriptions/s1", "projects/projid/subscriptions/s2", "projects/projid/subscriptions/s3"} if !reflect.DeepEqual(got, want) { t.Errorf("sub list: got: %v, want: %v", got, want) } if len(s.subs) != 0 { t.Errorf("outstanding subs: %v", s.subs) } } func subNames(subs []*Subscription) []string { var names []string for _, sub := range subs { names = append(names, sub.name) } return names } golang-google-cloud-0.9.0/pubsub/topic.go000066400000000000000000000251521312234511600203150ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package pubsub import ( "errors" "fmt" "runtime" "strings" "sync" "time" "cloud.google.com/go/iam" "github.com/golang/protobuf/proto" "golang.org/x/net/context" "google.golang.org/api/support/bundler" pb "google.golang.org/genproto/googleapis/pubsub/v1" ) const ( // The maximum number of messages that can be in a single publish request, as // determined by the PubSub service. MaxPublishRequestCount = 1000 // The maximum size of a single publish request in bytes, as determined by the PubSub service. MaxPublishRequestBytes = 1e7 maxInt = int(^uint(0) >> 1) ) // ErrOversizedMessage indicates that a message's size exceeds MaxPublishRequestBytes. var ErrOversizedMessage = bundler.ErrOversizedItem // Topic is a reference to a PubSub topic. // // The methods of Topic are safe for use by multiple goroutines. type Topic struct { s service // The fully qualified identifier for the topic, in the format "projects//topics/" name string // Settings for publishing messages. All changes must be made before the // first call to Publish. The default is DefaultPublishSettings. PublishSettings PublishSettings mu sync.RWMutex stopped bool bundler *bundler.Bundler wg sync.WaitGroup // Channel for message bundles to be published. Close to indicate that Stop was called. bundlec chan []*bundledMessage } // PublishSettings control the bundling of published messages. type PublishSettings struct { // Publish a non-empty batch after this delay has passed. DelayThreshold time.Duration // Publish a batch when it has this many messages. The maximum is // MaxPublishRequestCount. CountThreshold int // Publish a batch when its size in bytes reaches this value. ByteThreshold int // The number of goroutines that invoke the Publish RPC concurrently. // Defaults to a multiple of GOMAXPROCS. NumGoroutines int // The maximum time that the client will attempt to publish a bundle of messages. Timeout time.Duration } // DefaultPublishSettings holds the default values for topics' PublishSettings. var DefaultPublishSettings = PublishSettings{ DelayThreshold: 1 * time.Millisecond, CountThreshold: 100, ByteThreshold: 1e6, Timeout: 60 * time.Second, } // CreateTopic creates a new topic. // The specified topic ID must start with a letter, and contain only letters // ([A-Za-z]), numbers ([0-9]), dashes (-), underscores (_), periods (.), // tildes (~), plus (+) or percent signs (%). It must be between 3 and 255 // characters in length, and must not start with "goog". // If the topic already exists an error will be returned. func (c *Client) CreateTopic(ctx context.Context, id string) (*Topic, error) { t := c.Topic(id) err := c.s.createTopic(ctx, t.name) return t, err } // Topic creates a reference to a topic. // // If a Topic's Publish method is called, it has background goroutines // associated with it. Clean them up by calling Topic.Stop. // // Avoid creating many Topic instances if you use them to publish. func (c *Client) Topic(id string) *Topic { return newTopic(c.s, fmt.Sprintf("projects/%s/topics/%s", c.projectID, id)) } func newTopic(s service, name string) *Topic { // bundlec is unbuffered. A buffer would occupy memory not // accounted for by the bundler, so BufferedByteLimit would be a lie: // the actual memory consumed would be higher. return &Topic{ s: s, name: name, PublishSettings: DefaultPublishSettings, bundlec: make(chan []*bundledMessage), } } // Topics returns an iterator which returns all of the topics for the client's project. func (c *Client) Topics(ctx context.Context) *TopicIterator { return &TopicIterator{ s: c.s, next: c.s.listProjectTopics(ctx, c.fullyQualifiedProjectName()), } } // TopicIterator is an iterator that returns a series of topics. type TopicIterator struct { s service next nextStringFunc } // Next returns the next topic. If there are no more topics, iterator.Done will be returned. func (tps *TopicIterator) Next() (*Topic, error) { topicName, err := tps.next() if err != nil { return nil, err } return newTopic(tps.s, topicName), nil } // ID returns the unique idenfier of the topic within its project. func (t *Topic) ID() string { slash := strings.LastIndex(t.name, "/") if slash == -1 { // name is not a fully-qualified name. panic("bad topic name") } return t.name[slash+1:] } // String returns the printable globally unique name for the topic. func (t *Topic) String() string { return t.name } // Delete deletes the topic. func (t *Topic) Delete(ctx context.Context) error { return t.s.deleteTopic(ctx, t.name) } // Exists reports whether the topic exists on the server. func (t *Topic) Exists(ctx context.Context) (bool, error) { if t.name == "_deleted-topic_" { return false, nil } return t.s.topicExists(ctx, t.name) } func (t *Topic) IAM() *iam.Handle { return t.s.iamHandle(t.name) } // Subscriptions returns an iterator which returns the subscriptions for this topic. func (t *Topic) Subscriptions(ctx context.Context) *SubscriptionIterator { // NOTE: zero or more Subscriptions that are ultimately returned by this // Subscriptions iterator may belong to a different project to t. return &SubscriptionIterator{ s: t.s, next: t.s.listTopicSubscriptions(ctx, t.name), } } var errTopicStopped = errors.New("pubsub: Stop has been called for this topic") // Publish publishes msg to the topic asynchronously. Messages are batched and // sent according to the topic's PublishSettings. Publish never blocks. // // Publish returns a non-nil PublishResult which will be ready when the // message has been sent (or has failed to be sent) to the server. // // Publish creates goroutines for batching and sending messages. These goroutines // need to be stopped by calling t.Stop(). Once stopped, future calls to Publish // will immediately return a PublishResult with an error. func (t *Topic) Publish(ctx context.Context, msg *Message) *PublishResult { // TODO(jba): if this turns out to take significant time, try to approximate it. // Or, convert the messages to protos in Publish, instead of in the service. msg.size = proto.Size(&pb.PubsubMessage{ Data: msg.Data, Attributes: msg.Attributes, }) r := &PublishResult{ready: make(chan struct{})} t.initBundler() t.mu.RLock() defer t.mu.RUnlock() // TODO(aboulhosn) [from bcmills] consider changing the semantics of bundler to perform this logic so we don't have to do it here if t.stopped { r.set("", errTopicStopped) return r } // TODO(jba) [from bcmills] consider using a shared channel per bundle // (requires Bundler API changes; would reduce allocations) // The call to Add should never return an error because the bundler's // BufferedByteLimit is set to maxInt; we do not perform any flow // control in the client. err := t.bundler.Add(&bundledMessage{msg, r}, msg.size) if err != nil { r.set("", err) } return r } // Send all remaining published messages and stop goroutines created for handling // publishing. Returns once all outstanding messages have been sent or have // failed to be sent. func (t *Topic) Stop() { t.mu.Lock() noop := t.stopped || t.bundler == nil t.stopped = true t.mu.Unlock() if noop { return } t.bundler.Flush() // At this point, all pending bundles have been published and the bundler's // goroutines have exited, so it is OK for this goroutine to close bundlec. close(t.bundlec) t.wg.Wait() } // A PublishResult holds the result from a call to Publish. type PublishResult struct { ready chan struct{} serverID string err error } // Ready returns a channel that is closed when the result is ready. // When the Ready channel is closed, Get is guaranteed not to block. func (r *PublishResult) Ready() <-chan struct{} { return r.ready } // Get returns the server-generated message ID and/or error result of a Publish call. // Get blocks until the Publish call completes or the context is done. func (r *PublishResult) Get(ctx context.Context) (serverID string, err error) { // If the result is already ready, return it even if the context is done. select { case <-r.Ready(): return r.serverID, r.err default: } select { case <-ctx.Done(): return "", ctx.Err() case <-r.Ready(): return r.serverID, r.err } } func (r *PublishResult) set(sid string, err error) { r.serverID = sid r.err = err close(r.ready) } type bundledMessage struct { msg *Message res *PublishResult } func (t *Topic) initBundler() { t.mu.RLock() noop := t.stopped || t.bundler != nil t.mu.RUnlock() if noop { return } t.mu.Lock() defer t.mu.Unlock() // Must re-check, since we released the lock. if t.stopped || t.bundler != nil { return } // TODO(jba): use a context detached from the one passed to NewClient. ctx := context.TODO() // Unless overridden, run several goroutines per CPU to call the Publish RPC. n := t.PublishSettings.NumGoroutines if n <= 0 { n = 25 * runtime.GOMAXPROCS(0) } timeout := t.PublishSettings.Timeout t.wg.Add(n) for i := 0; i < n; i++ { go func() { defer t.wg.Done() for b := range t.bundlec { bctx := ctx cancel := func() {} if timeout != 0 { bctx, cancel = context.WithTimeout(ctx, timeout) } t.publishMessageBundle(bctx, b) cancel() } }() } t.bundler = bundler.NewBundler(&bundledMessage{}, func(items interface{}) { t.bundlec <- items.([]*bundledMessage) }) t.bundler.DelayThreshold = t.PublishSettings.DelayThreshold t.bundler.BundleCountThreshold = t.PublishSettings.CountThreshold if t.bundler.BundleCountThreshold > MaxPublishRequestCount { t.bundler.BundleCountThreshold = MaxPublishRequestCount } t.bundler.BundleByteThreshold = t.PublishSettings.ByteThreshold t.bundler.BufferedByteLimit = maxInt t.bundler.BundleByteLimit = MaxPublishRequestBytes } func (t *Topic) publishMessageBundle(ctx context.Context, bms []*bundledMessage) { msgs := make([]*Message, len(bms)) for i, bm := range bms { msgs[i], bm.msg = bm.msg, nil // release bm.msg for GC } ids, err := t.s.publishMessages(ctx, t.name, msgs) for i, bm := range bms { if err != nil { bm.res.set("", err) } else { bm.res.set(ids[i], nil) } } } golang-google-cloud-0.9.0/pubsub/topic_test.go000066400000000000000000000113361312234511600213530ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package pubsub import ( "net" "reflect" "testing" "time" "golang.org/x/net/context" "google.golang.org/api/iterator" "google.golang.org/api/option" pubsubpb "google.golang.org/genproto/googleapis/pubsub/v1" "google.golang.org/grpc" "google.golang.org/grpc/codes" ) type topicListService struct { service topics []string err error t *testing.T // for error logging. } func (s *topicListService) newNextStringFunc() nextStringFunc { return func() (string, error) { if len(s.topics) == 0 { return "", iterator.Done } tn := s.topics[0] s.topics = s.topics[1:] return tn, s.err } } func (s *topicListService) listProjectTopics(ctx context.Context, projName string) nextStringFunc { if projName != "projects/projid" { s.t.Fatalf("unexpected call: projName: %q", projName) return nil } return s.newNextStringFunc() } func checkTopicListing(t *testing.T, want []string) { s := &topicListService{topics: want, t: t} c := &Client{projectID: "projid", s: s} topics, err := slurpTopics(c.Topics(context.Background())) if err != nil { t.Errorf("error listing topics: %v", err) } got := topicNames(topics) if !reflect.DeepEqual(got, want) { t.Errorf("topic list: got: %v, want: %v", got, want) } if len(s.topics) != 0 { t.Errorf("outstanding topics: %v", s.topics) } } // All returns the remaining topics from this iterator. func slurpTopics(it *TopicIterator) ([]*Topic, error) { var topics []*Topic for { switch topic, err := it.Next(); err { case nil: topics = append(topics, topic) case iterator.Done: return topics, nil default: return nil, err } } } func TestTopicID(t *testing.T) { const id = "id" serv := &topicListService{ topics: []string{"projects/projid/topics/t1", "projects/projid/topics/t2"}, t: t, } c := &Client{projectID: "projid", s: serv} s := c.Topic(id) if got, want := s.ID(), id; got != want { t.Errorf("Token.ID() = %q; want %q", got, want) } want := []string{"t1", "t2"} topics, err := slurpTopics(c.Topics(context.Background())) if err != nil { t.Errorf("error listing topics: %v", err) } for i, topic := range topics { if got, want := topic.ID(), want[i]; got != want { t.Errorf("Token.ID() = %q; want %q", got, want) } } } func TestListTopics(t *testing.T) { checkTopicListing(t, []string{ "projects/projid/topics/t1", "projects/projid/topics/t2", "projects/projid/topics/t3", "projects/projid/topics/t4"}) } func TestListCompletelyEmptyTopics(t *testing.T) { var want []string checkTopicListing(t, want) } func TestStopPublishOrder(t *testing.T) { // Check that Stop doesn't panic if called before Publish. // Also that Publish after Stop returns the right error. ctx := context.Background() c := &Client{projectID: "projid"} topic := c.Topic("t") topic.Stop() r := topic.Publish(ctx, &Message{}) _, err := r.Get(ctx) if err != errTopicStopped { t.Errorf("got %v, want errTopicStopped", err) } } func TestPublishTimeout(t *testing.T) { ctx := context.Background() serv := grpc.NewServer() pubsubpb.RegisterPublisherServer(serv, &alwaysFailPublish{}) lis, err := net.Listen("tcp", "localhost:0") if err != nil { t.Fatal(err) } go serv.Serve(lis) conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) if err != nil { t.Fatal(err) } s, err := newPubSubService(context.Background(), []option.ClientOption{option.WithGRPCConn(conn)}) if err != nil { t.Fatal(err) } c := &Client{s: s} topic := c.Topic("t") topic.PublishSettings.Timeout = 3 * time.Second r := topic.Publish(ctx, &Message{}) select { case <-r.Ready(): _, err = r.Get(ctx) if err != context.DeadlineExceeded { t.Fatalf("got %v, want context.DeadlineExceeded", err) } case <-time.After(2 * topic.PublishSettings.Timeout): t.Fatal("timed out") } } type alwaysFailPublish struct { pubsubpb.PublisherServer } func (s *alwaysFailPublish) Publish(ctx context.Context, req *pubsubpb.PublishRequest) (*pubsubpb.PublishResponse, error) { return nil, grpc.Errorf(codes.Unavailable, "try again") } func topicNames(topics []*Topic) []string { var names []string for _, topic := range topics { names = append(names, topic.name) } return names } golang-google-cloud-0.9.0/pubsub/utils_test.go000066400000000000000000000031531312234511600213730ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package pubsub import ( "time" "golang.org/x/net/context" ) type modDeadlineCall struct { subName string deadline time.Duration ackIDs []string } type acknowledgeCall struct { subName string ackIDs []string } type testService struct { service // The arguments of each call to modifyAckDealine are written to this channel. modDeadlineCalled chan modDeadlineCall // The arguments of each call to acknowledge are written to this channel. acknowledgeCalled chan acknowledgeCall } func (s *testService) modifyAckDeadline(ctx context.Context, subName string, deadline time.Duration, ackIDs []string) error { s.modDeadlineCalled <- modDeadlineCall{ subName: subName, deadline: deadline, ackIDs: ackIDs, } return nil } func (s *testService) acknowledge(ctx context.Context, subName string, ackIDs []string) error { s.acknowledgeCalled <- acknowledgeCall{ subName: subName, ackIDs: ackIDs, } return nil } func (s *testService) splitAckIDs(ids []string) ([]string, []string) { return ids, nil } golang-google-cloud-0.9.0/run-tests.sh000077500000000000000000000040451312234511600176510ustar00rootroot00000000000000#!/bin/bash # Selectively run tests for this repo, based on what has changed # in a commit. Runs short tests for the whole repo, and full tests # for changed directories. set -e prefix=cloud.google.com/go dryrun=false if [[ $1 == "-n" ]]; then dryrun=true shift fi if [[ $1 == "" ]]; then echo >&2 "usage: $0 [-n] COMMIT" exit 1 fi # Files or directories that cause all tests to run if modified. declare -A run_all run_all=([.travis.yml]=1 [run-tests.sh]=1) function run { if $dryrun; then echo $* else (set -x; $*) fi } # Find all the packages that have changed in this commit. declare -A changed_packages for f in $(git diff-tree --no-commit-id --name-only -r $1); do if [[ ${run_all[$f]} == 1 ]]; then # This change requires a full test. Do it and exit. run go test -race -v $prefix/... exit fi # Map, e.g., "spanner/client.go" to "$prefix/spanner". d=$(dirname $f) if [[ $d == "." ]]; then pkg=$prefix else pkg=$prefix/$d fi changed_packages[$pkg]=1 done echo "changed packages: ${!changed_packages[*]}" # Reports whether its argument, a package name, depends (recursively) # on a changed package. function depends_on_changed_package { # According to go list, a package does not depend on itself, so # we test that separately. if [[ ${changed_packages[$1]} == 1 ]]; then return 0 fi for dep in $(go list -f '{{range .Deps}}{{.}} {{end}}' $1); do if [[ ${changed_packages[$dep]} == 1 ]]; then return 0 fi done return 1 } # Collect the packages into two separate lists. (It is faster go test a list of # packages than to individually go test each one.) shorts= fulls= for pkg in $(go list $prefix/...); do # for each package in the repo if depends_on_changed_package $pkg; then # if it depends on a changed package fulls="$fulls $pkg" # run the full test else # otherwise shorts="$shorts $pkg" # run the short test fi done run go test -race -v -short $shorts run go test -race -v $fulls golang-google-cloud-0.9.0/spanner/000077500000000000000000000000001312234511600170115ustar00rootroot00000000000000golang-google-cloud-0.9.0/spanner/admin/000077500000000000000000000000001312234511600201015ustar00rootroot00000000000000golang-google-cloud-0.9.0/spanner/admin/database/000077500000000000000000000000001312234511600216455ustar00rootroot00000000000000golang-google-cloud-0.9.0/spanner/admin/database/apiv1/000077500000000000000000000000001312234511600226655ustar00rootroot00000000000000golang-google-cloud-0.9.0/spanner/admin/database/apiv1/database_admin_client.go000066400000000000000000000525271312234511600275010ustar00rootroot00000000000000// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // AUTO-GENERATED CODE. DO NOT EDIT. package database import ( "math" "time" "cloud.google.com/go/internal/version" "cloud.google.com/go/longrunning" lroauto "cloud.google.com/go/longrunning/autogen" gax "github.com/googleapis/gax-go" "golang.org/x/net/context" "google.golang.org/api/iterator" "google.golang.org/api/option" "google.golang.org/api/transport" iampb "google.golang.org/genproto/googleapis/iam/v1" longrunningpb "google.golang.org/genproto/googleapis/longrunning" databasepb "google.golang.org/genproto/googleapis/spanner/admin/database/v1" "google.golang.org/grpc" "google.golang.org/grpc/codes" ) var ( databaseAdminInstancePathTemplate = gax.MustCompilePathTemplate("projects/{project}/instances/{instance}") databaseAdminDatabasePathTemplate = gax.MustCompilePathTemplate("projects/{project}/instances/{instance}/databases/{database}") ) // DatabaseAdminCallOptions contains the retry settings for each method of DatabaseAdminClient. type DatabaseAdminCallOptions struct { ListDatabases []gax.CallOption CreateDatabase []gax.CallOption GetDatabase []gax.CallOption UpdateDatabaseDdl []gax.CallOption DropDatabase []gax.CallOption GetDatabaseDdl []gax.CallOption SetIamPolicy []gax.CallOption GetIamPolicy []gax.CallOption TestIamPermissions []gax.CallOption } func defaultDatabaseAdminClientOptions() []option.ClientOption { return []option.ClientOption{ option.WithEndpoint("spanner.googleapis.com:443"), option.WithScopes(DefaultAuthScopes()...), } } func defaultDatabaseAdminCallOptions() *DatabaseAdminCallOptions { retry := map[[2]string][]gax.CallOption{ {"default", "idempotent"}: { gax.WithRetry(func() gax.Retryer { return gax.OnCodes([]codes.Code{ codes.DeadlineExceeded, codes.Unavailable, }, gax.Backoff{ Initial: 1000 * time.Millisecond, Max: 32000 * time.Millisecond, Multiplier: 1.3, }) }), }, } return &DatabaseAdminCallOptions{ ListDatabases: retry[[2]string{"default", "idempotent"}], CreateDatabase: retry[[2]string{"default", "non_idempotent"}], GetDatabase: retry[[2]string{"default", "idempotent"}], UpdateDatabaseDdl: retry[[2]string{"default", "idempotent"}], DropDatabase: retry[[2]string{"default", "idempotent"}], GetDatabaseDdl: retry[[2]string{"default", "idempotent"}], SetIamPolicy: retry[[2]string{"default", "non_idempotent"}], GetIamPolicy: retry[[2]string{"default", "idempotent"}], TestIamPermissions: retry[[2]string{"default", "non_idempotent"}], } } // DatabaseAdminClient is a client for interacting with Cloud Spanner Database Admin API. type DatabaseAdminClient struct { // The connection to the service. conn *grpc.ClientConn // The gRPC API client. databaseAdminClient databasepb.DatabaseAdminClient // LROClient is used internally to handle longrunning operations. // It is exposed so that its CallOptions can be modified if required. // Users should not Close this client. LROClient *lroauto.OperationsClient // The call options for this service. CallOptions *DatabaseAdminCallOptions // The metadata to be sent with each request. xGoogHeader []string } // NewDatabaseAdminClient creates a new database admin client. // // Cloud Spanner Database Admin API // // The Cloud Spanner Database Admin API can be used to create, drop, and // list databases. It also enables updating the schema of pre-existing // databases. func NewDatabaseAdminClient(ctx context.Context, opts ...option.ClientOption) (*DatabaseAdminClient, error) { conn, err := transport.DialGRPC(ctx, append(defaultDatabaseAdminClientOptions(), opts...)...) if err != nil { return nil, err } c := &DatabaseAdminClient{ conn: conn, CallOptions: defaultDatabaseAdminCallOptions(), databaseAdminClient: databasepb.NewDatabaseAdminClient(conn), } c.SetGoogleClientInfo() c.LROClient, err = lroauto.NewOperationsClient(ctx, option.WithGRPCConn(conn)) if err != nil { // This error "should not happen", since we are just reusing old connection // and never actually need to dial. // If this does happen, we could leak conn. However, we cannot close conn: // If the user invoked the function with option.WithGRPCConn, // we would close a connection that's still in use. // TODO(pongad): investigate error conditions. return nil, err } return c, nil } // Connection returns the client's connection to the API service. func (c *DatabaseAdminClient) Connection() *grpc.ClientConn { return c.conn } // Close closes the connection to the API service. The user should invoke this when // the client is no longer required. func (c *DatabaseAdminClient) Close() error { return c.conn.Close() } // SetGoogleClientInfo sets the name and version of the application in // the `x-goog-api-client` header passed on each request. Intended for // use by Google-written clients. func (c *DatabaseAdminClient) SetGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", version.Go()}, keyval...) kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) c.xGoogHeader = []string{gax.XGoogHeader(kv...)} } // DatabaseAdminInstancePath returns the path for the instance resource. func DatabaseAdminInstancePath(project, instance string) string { path, err := databaseAdminInstancePathTemplate.Render(map[string]string{ "project": project, "instance": instance, }) if err != nil { panic(err) } return path } // DatabaseAdminDatabasePath returns the path for the database resource. func DatabaseAdminDatabasePath(project, instance, database string) string { path, err := databaseAdminDatabasePathTemplate.Render(map[string]string{ "project": project, "instance": instance, "database": database, }) if err != nil { panic(err) } return path } // ListDatabases lists Cloud Spanner databases. func (c *DatabaseAdminClient) ListDatabases(ctx context.Context, req *databasepb.ListDatabasesRequest, opts ...gax.CallOption) *DatabaseIterator { ctx = insertXGoog(ctx, c.xGoogHeader) opts = append(c.CallOptions.ListDatabases[0:len(c.CallOptions.ListDatabases):len(c.CallOptions.ListDatabases)], opts...) it := &DatabaseIterator{} it.InternalFetch = func(pageSize int, pageToken string) ([]*databasepb.Database, string, error) { var resp *databasepb.ListDatabasesResponse req.PageToken = pageToken if pageSize > math.MaxInt32 { req.PageSize = math.MaxInt32 } else { req.PageSize = int32(pageSize) } err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error resp, err = c.databaseAdminClient.ListDatabases(ctx, req, settings.GRPC...) return err }, opts...) if err != nil { return nil, "", err } return resp.Databases, resp.NextPageToken, nil } fetch := func(pageSize int, pageToken string) (string, error) { items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) if err != nil { return "", err } it.items = append(it.items, items...) return nextPageToken, nil } it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) return it } // CreateDatabase creates a new Cloud Spanner database and starts to prepare it for serving. // The returned [long-running operation][google.longrunning.Operation] will // have a name of the format `/operations/` and // can be used to track preparation of the database. The // [metadata][google.longrunning.Operation.metadata] field type is // [CreateDatabaseMetadata][google.spanner.admin.database.v1.CreateDatabaseMetadata]. The // [response][google.longrunning.Operation.response] field type is // [Database][google.spanner.admin.database.v1.Database], if successful. func (c *DatabaseAdminClient) CreateDatabase(ctx context.Context, req *databasepb.CreateDatabaseRequest, opts ...gax.CallOption) (*CreateDatabaseOperation, error) { ctx = insertXGoog(ctx, c.xGoogHeader) opts = append(c.CallOptions.CreateDatabase[0:len(c.CallOptions.CreateDatabase):len(c.CallOptions.CreateDatabase)], opts...) var resp *longrunningpb.Operation err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error resp, err = c.databaseAdminClient.CreateDatabase(ctx, req, settings.GRPC...) return err }, opts...) if err != nil { return nil, err } return &CreateDatabaseOperation{ lro: longrunning.InternalNewOperation(c.LROClient, resp), }, nil } // GetDatabase gets the state of a Cloud Spanner database. func (c *DatabaseAdminClient) GetDatabase(ctx context.Context, req *databasepb.GetDatabaseRequest, opts ...gax.CallOption) (*databasepb.Database, error) { ctx = insertXGoog(ctx, c.xGoogHeader) opts = append(c.CallOptions.GetDatabase[0:len(c.CallOptions.GetDatabase):len(c.CallOptions.GetDatabase)], opts...) var resp *databasepb.Database err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error resp, err = c.databaseAdminClient.GetDatabase(ctx, req, settings.GRPC...) return err }, opts...) if err != nil { return nil, err } return resp, nil } // UpdateDatabaseDdl updates the schema of a Cloud Spanner database by // creating/altering/dropping tables, columns, indexes, etc. The returned // [long-running operation][google.longrunning.Operation] will have a name of // the format `/operations/` and can be used to // track execution of the schema change(s). The // [metadata][google.longrunning.Operation.metadata] field type is // [UpdateDatabaseDdlMetadata][google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata]. The operation has no response. func (c *DatabaseAdminClient) UpdateDatabaseDdl(ctx context.Context, req *databasepb.UpdateDatabaseDdlRequest, opts ...gax.CallOption) (*UpdateDatabaseDdlOperation, error) { ctx = insertXGoog(ctx, c.xGoogHeader) opts = append(c.CallOptions.UpdateDatabaseDdl[0:len(c.CallOptions.UpdateDatabaseDdl):len(c.CallOptions.UpdateDatabaseDdl)], opts...) var resp *longrunningpb.Operation err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error resp, err = c.databaseAdminClient.UpdateDatabaseDdl(ctx, req, settings.GRPC...) return err }, opts...) if err != nil { return nil, err } return &UpdateDatabaseDdlOperation{ lro: longrunning.InternalNewOperation(c.LROClient, resp), }, nil } // DropDatabase drops (aka deletes) a Cloud Spanner database. func (c *DatabaseAdminClient) DropDatabase(ctx context.Context, req *databasepb.DropDatabaseRequest, opts ...gax.CallOption) error { ctx = insertXGoog(ctx, c.xGoogHeader) opts = append(c.CallOptions.DropDatabase[0:len(c.CallOptions.DropDatabase):len(c.CallOptions.DropDatabase)], opts...) err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error _, err = c.databaseAdminClient.DropDatabase(ctx, req, settings.GRPC...) return err }, opts...) return err } // GetDatabaseDdl returns the schema of a Cloud Spanner database as a list of formatted // DDL statements. This method does not show pending schema updates, those may // be queried using the [Operations][google.longrunning.Operations] API. func (c *DatabaseAdminClient) GetDatabaseDdl(ctx context.Context, req *databasepb.GetDatabaseDdlRequest, opts ...gax.CallOption) (*databasepb.GetDatabaseDdlResponse, error) { ctx = insertXGoog(ctx, c.xGoogHeader) opts = append(c.CallOptions.GetDatabaseDdl[0:len(c.CallOptions.GetDatabaseDdl):len(c.CallOptions.GetDatabaseDdl)], opts...) var resp *databasepb.GetDatabaseDdlResponse err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error resp, err = c.databaseAdminClient.GetDatabaseDdl(ctx, req, settings.GRPC...) return err }, opts...) if err != nil { return nil, err } return resp, nil } // SetIamPolicy sets the access control policy on a database resource. Replaces any // existing policy. // // Authorization requires `spanner.databases.setIamPolicy` permission on // [resource][google.iam.v1.SetIamPolicyRequest.resource]. func (c *DatabaseAdminClient) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) { ctx = insertXGoog(ctx, c.xGoogHeader) opts = append(c.CallOptions.SetIamPolicy[0:len(c.CallOptions.SetIamPolicy):len(c.CallOptions.SetIamPolicy)], opts...) var resp *iampb.Policy err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error resp, err = c.databaseAdminClient.SetIamPolicy(ctx, req, settings.GRPC...) return err }, opts...) if err != nil { return nil, err } return resp, nil } // GetIamPolicy gets the access control policy for a database resource. Returns an empty // policy if a database exists but does not have a policy set. // // Authorization requires `spanner.databases.getIamPolicy` permission on // [resource][google.iam.v1.GetIamPolicyRequest.resource]. func (c *DatabaseAdminClient) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) { ctx = insertXGoog(ctx, c.xGoogHeader) opts = append(c.CallOptions.GetIamPolicy[0:len(c.CallOptions.GetIamPolicy):len(c.CallOptions.GetIamPolicy)], opts...) var resp *iampb.Policy err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error resp, err = c.databaseAdminClient.GetIamPolicy(ctx, req, settings.GRPC...) return err }, opts...) if err != nil { return nil, err } return resp, nil } // TestIamPermissions returns permissions that the caller has on the specified database resource. // // Attempting this RPC on a non-existent Cloud Spanner database will result in // a NOT_FOUND error if the user has `spanner.databases.list` permission on // the containing Cloud Spanner instance. Otherwise returns an empty set of // permissions. func (c *DatabaseAdminClient) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermissionsRequest, opts ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) { ctx = insertXGoog(ctx, c.xGoogHeader) opts = append(c.CallOptions.TestIamPermissions[0:len(c.CallOptions.TestIamPermissions):len(c.CallOptions.TestIamPermissions)], opts...) var resp *iampb.TestIamPermissionsResponse err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error resp, err = c.databaseAdminClient.TestIamPermissions(ctx, req, settings.GRPC...) return err }, opts...) if err != nil { return nil, err } return resp, nil } // DatabaseIterator manages a stream of *databasepb.Database. type DatabaseIterator struct { items []*databasepb.Database pageInfo *iterator.PageInfo nextFunc func() error // InternalFetch is for use by the Google Cloud Libraries only. // It is not part of the stable interface of this package. // // InternalFetch returns results from a single call to the underlying RPC. // The number of results is no greater than pageSize. // If there are no more results, nextPageToken is empty and err is nil. InternalFetch func(pageSize int, pageToken string) (results []*databasepb.Database, nextPageToken string, err error) } // PageInfo supports pagination. See the google.golang.org/api/iterator package for details. func (it *DatabaseIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } // Next returns the next result. Its second return value is iterator.Done if there are no more // results. Once Next returns Done, all subsequent calls will return Done. func (it *DatabaseIterator) Next() (*databasepb.Database, error) { var item *databasepb.Database if err := it.nextFunc(); err != nil { return item, err } item = it.items[0] it.items = it.items[1:] return item, nil } func (it *DatabaseIterator) bufLen() int { return len(it.items) } func (it *DatabaseIterator) takeBuf() interface{} { b := it.items it.items = nil return b } // CreateDatabaseOperation manages a long-running operation from CreateDatabase. type CreateDatabaseOperation struct { lro *longrunning.Operation } // CreateDatabaseOperation returns a new CreateDatabaseOperation from a given name. // The name must be that of a previously created CreateDatabaseOperation, possibly from a different process. func (c *DatabaseAdminClient) CreateDatabaseOperation(name string) *CreateDatabaseOperation { return &CreateDatabaseOperation{ lro: longrunning.InternalNewOperation(c.LROClient, &longrunningpb.Operation{Name: name}), } } // Wait blocks until the long-running operation is completed, returning the response and any errors encountered. // // See documentation of Poll for error-handling information. func (op *CreateDatabaseOperation) Wait(ctx context.Context, opts ...gax.CallOption) (*databasepb.Database, error) { var resp databasepb.Database if err := op.lro.Wait(ctx, &resp, opts...); err != nil { return nil, err } return &resp, nil } // Poll fetches the latest state of the long-running operation. // // Poll also fetches the latest metadata, which can be retrieved by Metadata. // // If Poll fails, the error is returned and op is unmodified. If Poll succeeds and // the operation has completed with failure, the error is returned and op.Done will return true. // If Poll succeeds and the operation has completed successfully, // op.Done will return true, and the response of the operation is returned. // If Poll succeeds and the operation has not completed, the returned response and error are both nil. func (op *CreateDatabaseOperation) Poll(ctx context.Context, opts ...gax.CallOption) (*databasepb.Database, error) { var resp databasepb.Database if err := op.lro.Poll(ctx, &resp, opts...); err != nil { return nil, err } if !op.Done() { return nil, nil } return &resp, nil } // Metadata returns metadata associated with the long-running operation. // Metadata itself does not contact the server, but Poll does. // To get the latest metadata, call this method after a successful call to Poll. // If the metadata is not available, the returned metadata and error are both nil. func (op *CreateDatabaseOperation) Metadata() (*databasepb.CreateDatabaseMetadata, error) { var meta databasepb.CreateDatabaseMetadata if err := op.lro.Metadata(&meta); err == longrunning.ErrNoMetadata { return nil, nil } else if err != nil { return nil, err } return &meta, nil } // Done reports whether the long-running operation has completed. func (op *CreateDatabaseOperation) Done() bool { return op.lro.Done() } // Name returns the name of the long-running operation. // The name is assigned by the server and is unique within the service from which the operation is created. func (op *CreateDatabaseOperation) Name() string { return op.lro.Name() } // UpdateDatabaseDdlOperation manages a long-running operation from UpdateDatabaseDdl. type UpdateDatabaseDdlOperation struct { lro *longrunning.Operation } // UpdateDatabaseDdlOperation returns a new UpdateDatabaseDdlOperation from a given name. // The name must be that of a previously created UpdateDatabaseDdlOperation, possibly from a different process. func (c *DatabaseAdminClient) UpdateDatabaseDdlOperation(name string) *UpdateDatabaseDdlOperation { return &UpdateDatabaseDdlOperation{ lro: longrunning.InternalNewOperation(c.LROClient, &longrunningpb.Operation{Name: name}), } } // Wait blocks until the long-running operation is completed, returning any error encountered. // // See documentation of Poll for error-handling information. func (op *UpdateDatabaseDdlOperation) Wait(ctx context.Context, opts ...gax.CallOption) error { return op.lro.Wait(ctx, nil, opts...) } // Poll fetches the latest state of the long-running operation. // // Poll also fetches the latest metadata, which can be retrieved by Metadata. // // If Poll fails, the error is returned and op is unmodified. If Poll succeeds and // the operation has completed with failure, the error is returned and op.Done will return true. // If Poll succeeds and the operation has completed successfully, op.Done will return true. func (op *UpdateDatabaseDdlOperation) Poll(ctx context.Context, opts ...gax.CallOption) error { return op.lro.Poll(ctx, nil, opts...) } // Metadata returns metadata associated with the long-running operation. // Metadata itself does not contact the server, but Poll does. // To get the latest metadata, call this method after a successful call to Poll. // If the metadata is not available, the returned metadata and error are both nil. func (op *UpdateDatabaseDdlOperation) Metadata() (*databasepb.UpdateDatabaseDdlMetadata, error) { var meta databasepb.UpdateDatabaseDdlMetadata if err := op.lro.Metadata(&meta); err == longrunning.ErrNoMetadata { return nil, nil } else if err != nil { return nil, err } return &meta, nil } // Done reports whether the long-running operation has completed. func (op *UpdateDatabaseDdlOperation) Done() bool { return op.lro.Done() } // Name returns the name of the long-running operation. // The name is assigned by the server and is unique within the service from which the operation is created. func (op *UpdateDatabaseDdlOperation) Name() string { return op.lro.Name() } golang-google-cloud-0.9.0/spanner/admin/database/apiv1/database_admin_client_example_test.go000066400000000000000000000110041312234511600322340ustar00rootroot00000000000000// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // AUTO-GENERATED CODE. DO NOT EDIT. package database_test import ( "cloud.google.com/go/spanner/admin/database/apiv1" "golang.org/x/net/context" "google.golang.org/api/iterator" iampb "google.golang.org/genproto/googleapis/iam/v1" databasepb "google.golang.org/genproto/googleapis/spanner/admin/database/v1" ) func ExampleNewDatabaseAdminClient() { ctx := context.Background() c, err := database.NewDatabaseAdminClient(ctx) if err != nil { // TODO: Handle error. } // TODO: Use client. _ = c } func ExampleDatabaseAdminClient_ListDatabases() { ctx := context.Background() c, err := database.NewDatabaseAdminClient(ctx) if err != nil { // TODO: Handle error. } req := &databasepb.ListDatabasesRequest{ // TODO: Fill request struct fields. } it := c.ListDatabases(ctx, req) for { resp, err := it.Next() if err == iterator.Done { break } if err != nil { // TODO: Handle error. } // TODO: Use resp. _ = resp } } func ExampleDatabaseAdminClient_CreateDatabase() { ctx := context.Background() c, err := database.NewDatabaseAdminClient(ctx) if err != nil { // TODO: Handle error. } req := &databasepb.CreateDatabaseRequest{ // TODO: Fill request struct fields. } op, err := c.CreateDatabase(ctx, req) if err != nil { // TODO: Handle error. } resp, err := op.Wait(ctx) if err != nil { // TODO: Handle error. } // TODO: Use resp. _ = resp } func ExampleDatabaseAdminClient_GetDatabase() { ctx := context.Background() c, err := database.NewDatabaseAdminClient(ctx) if err != nil { // TODO: Handle error. } req := &databasepb.GetDatabaseRequest{ // TODO: Fill request struct fields. } resp, err := c.GetDatabase(ctx, req) if err != nil { // TODO: Handle error. } // TODO: Use resp. _ = resp } func ExampleDatabaseAdminClient_UpdateDatabaseDdl() { ctx := context.Background() c, err := database.NewDatabaseAdminClient(ctx) if err != nil { // TODO: Handle error. } req := &databasepb.UpdateDatabaseDdlRequest{ // TODO: Fill request struct fields. } op, err := c.UpdateDatabaseDdl(ctx, req) if err != nil { // TODO: Handle error. } err = op.Wait(ctx) // TODO: Handle error. } func ExampleDatabaseAdminClient_DropDatabase() { ctx := context.Background() c, err := database.NewDatabaseAdminClient(ctx) if err != nil { // TODO: Handle error. } req := &databasepb.DropDatabaseRequest{ // TODO: Fill request struct fields. } err = c.DropDatabase(ctx, req) if err != nil { // TODO: Handle error. } } func ExampleDatabaseAdminClient_GetDatabaseDdl() { ctx := context.Background() c, err := database.NewDatabaseAdminClient(ctx) if err != nil { // TODO: Handle error. } req := &databasepb.GetDatabaseDdlRequest{ // TODO: Fill request struct fields. } resp, err := c.GetDatabaseDdl(ctx, req) if err != nil { // TODO: Handle error. } // TODO: Use resp. _ = resp } func ExampleDatabaseAdminClient_SetIamPolicy() { ctx := context.Background() c, err := database.NewDatabaseAdminClient(ctx) if err != nil { // TODO: Handle error. } req := &iampb.SetIamPolicyRequest{ // TODO: Fill request struct fields. } resp, err := c.SetIamPolicy(ctx, req) if err != nil { // TODO: Handle error. } // TODO: Use resp. _ = resp } func ExampleDatabaseAdminClient_GetIamPolicy() { ctx := context.Background() c, err := database.NewDatabaseAdminClient(ctx) if err != nil { // TODO: Handle error. } req := &iampb.GetIamPolicyRequest{ // TODO: Fill request struct fields. } resp, err := c.GetIamPolicy(ctx, req) if err != nil { // TODO: Handle error. } // TODO: Use resp. _ = resp } func ExampleDatabaseAdminClient_TestIamPermissions() { ctx := context.Background() c, err := database.NewDatabaseAdminClient(ctx) if err != nil { // TODO: Handle error. } req := &iampb.TestIamPermissionsRequest{ // TODO: Fill request struct fields. } resp, err := c.TestIamPermissions(ctx, req) if err != nil { // TODO: Handle error. } // TODO: Use resp. _ = resp } golang-google-cloud-0.9.0/spanner/admin/database/apiv1/doc.go000066400000000000000000000025311312234511600237620ustar00rootroot00000000000000// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // AUTO-GENERATED CODE. DO NOT EDIT. // Package database is an experimental, auto-generated package for the // Cloud Spanner Database Admin API. // package database // import "cloud.google.com/go/spanner/admin/database/apiv1" import ( "golang.org/x/net/context" "google.golang.org/grpc/metadata" ) func insertXGoog(ctx context.Context, val []string) context.Context { md, _ := metadata.FromOutgoingContext(ctx) md = md.Copy() md["x-goog-api-client"] = val return metadata.NewOutgoingContext(ctx, md) } // DefaultAuthScopes reports the authentication scopes required // by this package. func DefaultAuthScopes() []string { return []string{ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/spanner.admin", } } golang-google-cloud-0.9.0/spanner/admin/database/apiv1/mock_test.go000066400000000000000000000552471312234511600252210ustar00rootroot00000000000000// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // AUTO-GENERATED CODE. DO NOT EDIT. package database import ( emptypb "github.com/golang/protobuf/ptypes/empty" iampb "google.golang.org/genproto/googleapis/iam/v1" longrunningpb "google.golang.org/genproto/googleapis/longrunning" databasepb "google.golang.org/genproto/googleapis/spanner/admin/database/v1" ) import ( "flag" "fmt" "io" "log" "net" "os" "strings" "testing" "github.com/golang/protobuf/proto" "github.com/golang/protobuf/ptypes" "golang.org/x/net/context" "google.golang.org/api/option" status "google.golang.org/genproto/googleapis/rpc/status" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/metadata" gstatus "google.golang.org/grpc/status" ) var _ = io.EOF var _ = ptypes.MarshalAny var _ status.Status type mockDatabaseAdminServer struct { // Embed for forward compatibility. // Tests will keep working if more methods are added // in the future. databasepb.DatabaseAdminServer reqs []proto.Message // If set, all calls return this error. err error // responses to return if err == nil resps []proto.Message } func (s *mockDatabaseAdminServer) ListDatabases(ctx context.Context, req *databasepb.ListDatabasesRequest) (*databasepb.ListDatabasesResponse, error) { md, _ := metadata.FromIncomingContext(ctx) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } s.reqs = append(s.reqs, req) if s.err != nil { return nil, s.err } return s.resps[0].(*databasepb.ListDatabasesResponse), nil } func (s *mockDatabaseAdminServer) CreateDatabase(ctx context.Context, req *databasepb.CreateDatabaseRequest) (*longrunningpb.Operation, error) { md, _ := metadata.FromIncomingContext(ctx) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } s.reqs = append(s.reqs, req) if s.err != nil { return nil, s.err } return s.resps[0].(*longrunningpb.Operation), nil } func (s *mockDatabaseAdminServer) GetDatabase(ctx context.Context, req *databasepb.GetDatabaseRequest) (*databasepb.Database, error) { md, _ := metadata.FromIncomingContext(ctx) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } s.reqs = append(s.reqs, req) if s.err != nil { return nil, s.err } return s.resps[0].(*databasepb.Database), nil } func (s *mockDatabaseAdminServer) UpdateDatabaseDdl(ctx context.Context, req *databasepb.UpdateDatabaseDdlRequest) (*longrunningpb.Operation, error) { md, _ := metadata.FromIncomingContext(ctx) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } s.reqs = append(s.reqs, req) if s.err != nil { return nil, s.err } return s.resps[0].(*longrunningpb.Operation), nil } func (s *mockDatabaseAdminServer) DropDatabase(ctx context.Context, req *databasepb.DropDatabaseRequest) (*emptypb.Empty, error) { md, _ := metadata.FromIncomingContext(ctx) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } s.reqs = append(s.reqs, req) if s.err != nil { return nil, s.err } return s.resps[0].(*emptypb.Empty), nil } func (s *mockDatabaseAdminServer) GetDatabaseDdl(ctx context.Context, req *databasepb.GetDatabaseDdlRequest) (*databasepb.GetDatabaseDdlResponse, error) { md, _ := metadata.FromIncomingContext(ctx) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } s.reqs = append(s.reqs, req) if s.err != nil { return nil, s.err } return s.resps[0].(*databasepb.GetDatabaseDdlResponse), nil } func (s *mockDatabaseAdminServer) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest) (*iampb.Policy, error) { md, _ := metadata.FromIncomingContext(ctx) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } s.reqs = append(s.reqs, req) if s.err != nil { return nil, s.err } return s.resps[0].(*iampb.Policy), nil } func (s *mockDatabaseAdminServer) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest) (*iampb.Policy, error) { md, _ := metadata.FromIncomingContext(ctx) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } s.reqs = append(s.reqs, req) if s.err != nil { return nil, s.err } return s.resps[0].(*iampb.Policy), nil } func (s *mockDatabaseAdminServer) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermissionsRequest) (*iampb.TestIamPermissionsResponse, error) { md, _ := metadata.FromIncomingContext(ctx) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } s.reqs = append(s.reqs, req) if s.err != nil { return nil, s.err } return s.resps[0].(*iampb.TestIamPermissionsResponse), nil } // clientOpt is the option tests should use to connect to the test server. // It is initialized by TestMain. var clientOpt option.ClientOption var ( mockDatabaseAdmin mockDatabaseAdminServer ) func TestMain(m *testing.M) { flag.Parse() serv := grpc.NewServer() databasepb.RegisterDatabaseAdminServer(serv, &mockDatabaseAdmin) lis, err := net.Listen("tcp", "localhost:0") if err != nil { log.Fatal(err) } go serv.Serve(lis) conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) if err != nil { log.Fatal(err) } clientOpt = option.WithGRPCConn(conn) os.Exit(m.Run()) } func TestDatabaseAdminListDatabases(t *testing.T) { var nextPageToken string = "" var databasesElement *databasepb.Database = &databasepb.Database{} var databases = []*databasepb.Database{databasesElement} var expectedResponse = &databasepb.ListDatabasesResponse{ NextPageToken: nextPageToken, Databases: databases, } mockDatabaseAdmin.err = nil mockDatabaseAdmin.reqs = nil mockDatabaseAdmin.resps = append(mockDatabaseAdmin.resps[:0], expectedResponse) var formattedParent string = DatabaseAdminInstancePath("[PROJECT]", "[INSTANCE]") var request = &databasepb.ListDatabasesRequest{ Parent: formattedParent, } c, err := NewDatabaseAdminClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.ListDatabases(context.Background(), request).Next() if err != nil { t.Fatal(err) } if want, got := request, mockDatabaseAdmin.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } want := (interface{})(expectedResponse.Databases[0]) got := (interface{})(resp) var ok bool switch want := (want).(type) { case proto.Message: ok = proto.Equal(want, got.(proto.Message)) default: ok = want == got } if !ok { t.Errorf("wrong response %q, want %q)", got, want) } } func TestDatabaseAdminListDatabasesError(t *testing.T) { errCode := codes.PermissionDenied mockDatabaseAdmin.err = gstatus.Error(errCode, "test error") var formattedParent string = DatabaseAdminInstancePath("[PROJECT]", "[INSTANCE]") var request = &databasepb.ListDatabasesRequest{ Parent: formattedParent, } c, err := NewDatabaseAdminClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.ListDatabases(context.Background(), request).Next() if st, ok := gstatus.FromError(err); !ok { t.Errorf("got error %v, expected grpc error", err) } else if c := st.Code(); c != errCode { t.Errorf("got error code %q, want %q", c, errCode) } _ = resp } func TestDatabaseAdminCreateDatabase(t *testing.T) { var name string = "name3373707" var expectedResponse = &databasepb.Database{ Name: name, } mockDatabaseAdmin.err = nil mockDatabaseAdmin.reqs = nil any, err := ptypes.MarshalAny(expectedResponse) if err != nil { t.Fatal(err) } mockDatabaseAdmin.resps = append(mockDatabaseAdmin.resps[:0], &longrunningpb.Operation{ Name: "longrunning-test", Done: true, Result: &longrunningpb.Operation_Response{Response: any}, }) var formattedParent string = DatabaseAdminInstancePath("[PROJECT]", "[INSTANCE]") var createStatement string = "createStatement552974828" var request = &databasepb.CreateDatabaseRequest{ Parent: formattedParent, CreateStatement: createStatement, } c, err := NewDatabaseAdminClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } respLRO, err := c.CreateDatabase(context.Background(), request) if err != nil { t.Fatal(err) } resp, err := respLRO.Wait(context.Background()) if err != nil { t.Fatal(err) } if want, got := request, mockDatabaseAdmin.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } if want, got := expectedResponse, resp; !proto.Equal(want, got) { t.Errorf("wrong response %q, want %q)", got, want) } } func TestDatabaseAdminCreateDatabaseError(t *testing.T) { errCode := codes.PermissionDenied mockDatabaseAdmin.err = nil mockDatabaseAdmin.resps = append(mockDatabaseAdmin.resps[:0], &longrunningpb.Operation{ Name: "longrunning-test", Done: true, Result: &longrunningpb.Operation_Error{ Error: &status.Status{ Code: int32(errCode), Message: "test error", }, }, }) var formattedParent string = DatabaseAdminInstancePath("[PROJECT]", "[INSTANCE]") var createStatement string = "createStatement552974828" var request = &databasepb.CreateDatabaseRequest{ Parent: formattedParent, CreateStatement: createStatement, } c, err := NewDatabaseAdminClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } respLRO, err := c.CreateDatabase(context.Background(), request) if err != nil { t.Fatal(err) } resp, err := respLRO.Wait(context.Background()) if st, ok := gstatus.FromError(err); !ok { t.Errorf("got error %v, expected grpc error", err) } else if c := st.Code(); c != errCode { t.Errorf("got error code %q, want %q", c, errCode) } _ = resp } func TestDatabaseAdminGetDatabase(t *testing.T) { var name2 string = "name2-1052831874" var expectedResponse = &databasepb.Database{ Name: name2, } mockDatabaseAdmin.err = nil mockDatabaseAdmin.reqs = nil mockDatabaseAdmin.resps = append(mockDatabaseAdmin.resps[:0], expectedResponse) var formattedName string = DatabaseAdminDatabasePath("[PROJECT]", "[INSTANCE]", "[DATABASE]") var request = &databasepb.GetDatabaseRequest{ Name: formattedName, } c, err := NewDatabaseAdminClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.GetDatabase(context.Background(), request) if err != nil { t.Fatal(err) } if want, got := request, mockDatabaseAdmin.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } if want, got := expectedResponse, resp; !proto.Equal(want, got) { t.Errorf("wrong response %q, want %q)", got, want) } } func TestDatabaseAdminGetDatabaseError(t *testing.T) { errCode := codes.PermissionDenied mockDatabaseAdmin.err = gstatus.Error(errCode, "test error") var formattedName string = DatabaseAdminDatabasePath("[PROJECT]", "[INSTANCE]", "[DATABASE]") var request = &databasepb.GetDatabaseRequest{ Name: formattedName, } c, err := NewDatabaseAdminClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.GetDatabase(context.Background(), request) if st, ok := gstatus.FromError(err); !ok { t.Errorf("got error %v, expected grpc error", err) } else if c := st.Code(); c != errCode { t.Errorf("got error code %q, want %q", c, errCode) } _ = resp } func TestDatabaseAdminUpdateDatabaseDdl(t *testing.T) { var expectedResponse *emptypb.Empty = &emptypb.Empty{} mockDatabaseAdmin.err = nil mockDatabaseAdmin.reqs = nil any, err := ptypes.MarshalAny(expectedResponse) if err != nil { t.Fatal(err) } mockDatabaseAdmin.resps = append(mockDatabaseAdmin.resps[:0], &longrunningpb.Operation{ Name: "longrunning-test", Done: true, Result: &longrunningpb.Operation_Response{Response: any}, }) var formattedDatabase string = DatabaseAdminDatabasePath("[PROJECT]", "[INSTANCE]", "[DATABASE]") var statements []string = nil var request = &databasepb.UpdateDatabaseDdlRequest{ Database: formattedDatabase, Statements: statements, } c, err := NewDatabaseAdminClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } respLRO, err := c.UpdateDatabaseDdl(context.Background(), request) if err != nil { t.Fatal(err) } err = respLRO.Wait(context.Background()) if err != nil { t.Fatal(err) } if want, got := request, mockDatabaseAdmin.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } } func TestDatabaseAdminUpdateDatabaseDdlError(t *testing.T) { errCode := codes.PermissionDenied mockDatabaseAdmin.err = nil mockDatabaseAdmin.resps = append(mockDatabaseAdmin.resps[:0], &longrunningpb.Operation{ Name: "longrunning-test", Done: true, Result: &longrunningpb.Operation_Error{ Error: &status.Status{ Code: int32(errCode), Message: "test error", }, }, }) var formattedDatabase string = DatabaseAdminDatabasePath("[PROJECT]", "[INSTANCE]", "[DATABASE]") var statements []string = nil var request = &databasepb.UpdateDatabaseDdlRequest{ Database: formattedDatabase, Statements: statements, } c, err := NewDatabaseAdminClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } respLRO, err := c.UpdateDatabaseDdl(context.Background(), request) if err != nil { t.Fatal(err) } err = respLRO.Wait(context.Background()) if st, ok := gstatus.FromError(err); !ok { t.Errorf("got error %v, expected grpc error", err) } else if c := st.Code(); c != errCode { t.Errorf("got error code %q, want %q", c, errCode) } } func TestDatabaseAdminDropDatabase(t *testing.T) { var expectedResponse *emptypb.Empty = &emptypb.Empty{} mockDatabaseAdmin.err = nil mockDatabaseAdmin.reqs = nil mockDatabaseAdmin.resps = append(mockDatabaseAdmin.resps[:0], expectedResponse) var formattedDatabase string = DatabaseAdminDatabasePath("[PROJECT]", "[INSTANCE]", "[DATABASE]") var request = &databasepb.DropDatabaseRequest{ Database: formattedDatabase, } c, err := NewDatabaseAdminClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } err = c.DropDatabase(context.Background(), request) if err != nil { t.Fatal(err) } if want, got := request, mockDatabaseAdmin.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } } func TestDatabaseAdminDropDatabaseError(t *testing.T) { errCode := codes.PermissionDenied mockDatabaseAdmin.err = gstatus.Error(errCode, "test error") var formattedDatabase string = DatabaseAdminDatabasePath("[PROJECT]", "[INSTANCE]", "[DATABASE]") var request = &databasepb.DropDatabaseRequest{ Database: formattedDatabase, } c, err := NewDatabaseAdminClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } err = c.DropDatabase(context.Background(), request) if st, ok := gstatus.FromError(err); !ok { t.Errorf("got error %v, expected grpc error", err) } else if c := st.Code(); c != errCode { t.Errorf("got error code %q, want %q", c, errCode) } } func TestDatabaseAdminGetDatabaseDdl(t *testing.T) { var expectedResponse *databasepb.GetDatabaseDdlResponse = &databasepb.GetDatabaseDdlResponse{} mockDatabaseAdmin.err = nil mockDatabaseAdmin.reqs = nil mockDatabaseAdmin.resps = append(mockDatabaseAdmin.resps[:0], expectedResponse) var formattedDatabase string = DatabaseAdminDatabasePath("[PROJECT]", "[INSTANCE]", "[DATABASE]") var request = &databasepb.GetDatabaseDdlRequest{ Database: formattedDatabase, } c, err := NewDatabaseAdminClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.GetDatabaseDdl(context.Background(), request) if err != nil { t.Fatal(err) } if want, got := request, mockDatabaseAdmin.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } if want, got := expectedResponse, resp; !proto.Equal(want, got) { t.Errorf("wrong response %q, want %q)", got, want) } } func TestDatabaseAdminGetDatabaseDdlError(t *testing.T) { errCode := codes.PermissionDenied mockDatabaseAdmin.err = gstatus.Error(errCode, "test error") var formattedDatabase string = DatabaseAdminDatabasePath("[PROJECT]", "[INSTANCE]", "[DATABASE]") var request = &databasepb.GetDatabaseDdlRequest{ Database: formattedDatabase, } c, err := NewDatabaseAdminClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.GetDatabaseDdl(context.Background(), request) if st, ok := gstatus.FromError(err); !ok { t.Errorf("got error %v, expected grpc error", err) } else if c := st.Code(); c != errCode { t.Errorf("got error code %q, want %q", c, errCode) } _ = resp } func TestDatabaseAdminSetIamPolicy(t *testing.T) { var version int32 = 351608024 var etag []byte = []byte("21") var expectedResponse = &iampb.Policy{ Version: version, Etag: etag, } mockDatabaseAdmin.err = nil mockDatabaseAdmin.reqs = nil mockDatabaseAdmin.resps = append(mockDatabaseAdmin.resps[:0], expectedResponse) var formattedResource string = DatabaseAdminDatabasePath("[PROJECT]", "[INSTANCE]", "[DATABASE]") var policy *iampb.Policy = &iampb.Policy{} var request = &iampb.SetIamPolicyRequest{ Resource: formattedResource, Policy: policy, } c, err := NewDatabaseAdminClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.SetIamPolicy(context.Background(), request) if err != nil { t.Fatal(err) } if want, got := request, mockDatabaseAdmin.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } if want, got := expectedResponse, resp; !proto.Equal(want, got) { t.Errorf("wrong response %q, want %q)", got, want) } } func TestDatabaseAdminSetIamPolicyError(t *testing.T) { errCode := codes.PermissionDenied mockDatabaseAdmin.err = gstatus.Error(errCode, "test error") var formattedResource string = DatabaseAdminDatabasePath("[PROJECT]", "[INSTANCE]", "[DATABASE]") var policy *iampb.Policy = &iampb.Policy{} var request = &iampb.SetIamPolicyRequest{ Resource: formattedResource, Policy: policy, } c, err := NewDatabaseAdminClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.SetIamPolicy(context.Background(), request) if st, ok := gstatus.FromError(err); !ok { t.Errorf("got error %v, expected grpc error", err) } else if c := st.Code(); c != errCode { t.Errorf("got error code %q, want %q", c, errCode) } _ = resp } func TestDatabaseAdminGetIamPolicy(t *testing.T) { var version int32 = 351608024 var etag []byte = []byte("21") var expectedResponse = &iampb.Policy{ Version: version, Etag: etag, } mockDatabaseAdmin.err = nil mockDatabaseAdmin.reqs = nil mockDatabaseAdmin.resps = append(mockDatabaseAdmin.resps[:0], expectedResponse) var formattedResource string = DatabaseAdminDatabasePath("[PROJECT]", "[INSTANCE]", "[DATABASE]") var request = &iampb.GetIamPolicyRequest{ Resource: formattedResource, } c, err := NewDatabaseAdminClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.GetIamPolicy(context.Background(), request) if err != nil { t.Fatal(err) } if want, got := request, mockDatabaseAdmin.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } if want, got := expectedResponse, resp; !proto.Equal(want, got) { t.Errorf("wrong response %q, want %q)", got, want) } } func TestDatabaseAdminGetIamPolicyError(t *testing.T) { errCode := codes.PermissionDenied mockDatabaseAdmin.err = gstatus.Error(errCode, "test error") var formattedResource string = DatabaseAdminDatabasePath("[PROJECT]", "[INSTANCE]", "[DATABASE]") var request = &iampb.GetIamPolicyRequest{ Resource: formattedResource, } c, err := NewDatabaseAdminClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.GetIamPolicy(context.Background(), request) if st, ok := gstatus.FromError(err); !ok { t.Errorf("got error %v, expected grpc error", err) } else if c := st.Code(); c != errCode { t.Errorf("got error code %q, want %q", c, errCode) } _ = resp } func TestDatabaseAdminTestIamPermissions(t *testing.T) { var expectedResponse *iampb.TestIamPermissionsResponse = &iampb.TestIamPermissionsResponse{} mockDatabaseAdmin.err = nil mockDatabaseAdmin.reqs = nil mockDatabaseAdmin.resps = append(mockDatabaseAdmin.resps[:0], expectedResponse) var formattedResource string = DatabaseAdminDatabasePath("[PROJECT]", "[INSTANCE]", "[DATABASE]") var permissions []string = nil var request = &iampb.TestIamPermissionsRequest{ Resource: formattedResource, Permissions: permissions, } c, err := NewDatabaseAdminClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.TestIamPermissions(context.Background(), request) if err != nil { t.Fatal(err) } if want, got := request, mockDatabaseAdmin.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } if want, got := expectedResponse, resp; !proto.Equal(want, got) { t.Errorf("wrong response %q, want %q)", got, want) } } func TestDatabaseAdminTestIamPermissionsError(t *testing.T) { errCode := codes.PermissionDenied mockDatabaseAdmin.err = gstatus.Error(errCode, "test error") var formattedResource string = DatabaseAdminDatabasePath("[PROJECT]", "[INSTANCE]", "[DATABASE]") var permissions []string = nil var request = &iampb.TestIamPermissionsRequest{ Resource: formattedResource, Permissions: permissions, } c, err := NewDatabaseAdminClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.TestIamPermissions(context.Background(), request) if st, ok := gstatus.FromError(err); !ok { t.Errorf("got error %v, expected grpc error", err) } else if c := st.Code(); c != errCode { t.Errorf("got error code %q, want %q", c, errCode) } _ = resp } golang-google-cloud-0.9.0/spanner/admin/instance/000077500000000000000000000000001312234511600217055ustar00rootroot00000000000000golang-google-cloud-0.9.0/spanner/admin/instance/apiv1/000077500000000000000000000000001312234511600227255ustar00rootroot00000000000000golang-google-cloud-0.9.0/spanner/admin/instance/apiv1/doc.go000066400000000000000000000025311312234511600240220ustar00rootroot00000000000000// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // AUTO-GENERATED CODE. DO NOT EDIT. // Package instance is an experimental, auto-generated package for the // Cloud Spanner Instance Admin API. // package instance // import "cloud.google.com/go/spanner/admin/instance/apiv1" import ( "golang.org/x/net/context" "google.golang.org/grpc/metadata" ) func insertXGoog(ctx context.Context, val []string) context.Context { md, _ := metadata.FromOutgoingContext(ctx) md = md.Copy() md["x-goog-api-client"] = val return metadata.NewOutgoingContext(ctx, md) } // DefaultAuthScopes reports the authentication scopes required // by this package. func DefaultAuthScopes() []string { return []string{ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/spanner.admin", } } golang-google-cloud-0.9.0/spanner/admin/instance/apiv1/instance_admin_client.go000066400000000000000000000710521312234511600275730ustar00rootroot00000000000000// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // AUTO-GENERATED CODE. DO NOT EDIT. package instance import ( "math" "time" "cloud.google.com/go/internal/version" "cloud.google.com/go/longrunning" lroauto "cloud.google.com/go/longrunning/autogen" gax "github.com/googleapis/gax-go" "golang.org/x/net/context" "google.golang.org/api/iterator" "google.golang.org/api/option" "google.golang.org/api/transport" iampb "google.golang.org/genproto/googleapis/iam/v1" longrunningpb "google.golang.org/genproto/googleapis/longrunning" instancepb "google.golang.org/genproto/googleapis/spanner/admin/instance/v1" "google.golang.org/grpc" "google.golang.org/grpc/codes" ) var ( instanceAdminProjectPathTemplate = gax.MustCompilePathTemplate("projects/{project}") instanceAdminInstanceConfigPathTemplate = gax.MustCompilePathTemplate("projects/{project}/instanceConfigs/{instance_config}") instanceAdminInstancePathTemplate = gax.MustCompilePathTemplate("projects/{project}/instances/{instance}") ) // InstanceAdminCallOptions contains the retry settings for each method of InstanceAdminClient. type InstanceAdminCallOptions struct { ListInstanceConfigs []gax.CallOption GetInstanceConfig []gax.CallOption ListInstances []gax.CallOption GetInstance []gax.CallOption CreateInstance []gax.CallOption UpdateInstance []gax.CallOption DeleteInstance []gax.CallOption SetIamPolicy []gax.CallOption GetIamPolicy []gax.CallOption TestIamPermissions []gax.CallOption } func defaultInstanceAdminClientOptions() []option.ClientOption { return []option.ClientOption{ option.WithEndpoint("spanner.googleapis.com:443"), option.WithScopes(DefaultAuthScopes()...), } } func defaultInstanceAdminCallOptions() *InstanceAdminCallOptions { retry := map[[2]string][]gax.CallOption{ {"default", "idempotent"}: { gax.WithRetry(func() gax.Retryer { return gax.OnCodes([]codes.Code{ codes.DeadlineExceeded, codes.Unavailable, }, gax.Backoff{ Initial: 1000 * time.Millisecond, Max: 32000 * time.Millisecond, Multiplier: 1.3, }) }), }, } return &InstanceAdminCallOptions{ ListInstanceConfigs: retry[[2]string{"default", "idempotent"}], GetInstanceConfig: retry[[2]string{"default", "idempotent"}], ListInstances: retry[[2]string{"default", "idempotent"}], GetInstance: retry[[2]string{"default", "idempotent"}], CreateInstance: retry[[2]string{"default", "non_idempotent"}], UpdateInstance: retry[[2]string{"default", "non_idempotent"}], DeleteInstance: retry[[2]string{"default", "idempotent"}], SetIamPolicy: retry[[2]string{"default", "non_idempotent"}], GetIamPolicy: retry[[2]string{"default", "idempotent"}], TestIamPermissions: retry[[2]string{"default", "non_idempotent"}], } } // InstanceAdminClient is a client for interacting with Cloud Spanner Instance Admin API. type InstanceAdminClient struct { // The connection to the service. conn *grpc.ClientConn // The gRPC API client. instanceAdminClient instancepb.InstanceAdminClient // LROClient is used internally to handle longrunning operations. // It is exposed so that its CallOptions can be modified if required. // Users should not Close this client. LROClient *lroauto.OperationsClient // The call options for this service. CallOptions *InstanceAdminCallOptions // The metadata to be sent with each request. xGoogHeader []string } // NewInstanceAdminClient creates a new instance admin client. // // Cloud Spanner Instance Admin API // // The Cloud Spanner Instance Admin API can be used to create, delete, // modify and list instances. Instances are dedicated Cloud Spanner serving // and storage resources to be used by Cloud Spanner databases. // // Each instance has a "configuration", which dictates where the // serving resources for the Cloud Spanner instance are located (e.g., // US-central, Europe). Configurations are created by Google based on // resource availability. // // Cloud Spanner billing is based on the instances that exist and their // sizes. After an instance exists, there are no additional // per-database or per-operation charges for use of the instance // (though there may be additional network bandwidth charges). // Instances offer isolation: problems with databases in one instance // will not affect other instances. However, within an instance // databases can affect each other. For example, if one database in an // instance receives a lot of requests and consumes most of the // instance resources, fewer resources are available for other // databases in that instance, and their performance may suffer. func NewInstanceAdminClient(ctx context.Context, opts ...option.ClientOption) (*InstanceAdminClient, error) { conn, err := transport.DialGRPC(ctx, append(defaultInstanceAdminClientOptions(), opts...)...) if err != nil { return nil, err } c := &InstanceAdminClient{ conn: conn, CallOptions: defaultInstanceAdminCallOptions(), instanceAdminClient: instancepb.NewInstanceAdminClient(conn), } c.SetGoogleClientInfo() c.LROClient, err = lroauto.NewOperationsClient(ctx, option.WithGRPCConn(conn)) if err != nil { // This error "should not happen", since we are just reusing old connection // and never actually need to dial. // If this does happen, we could leak conn. However, we cannot close conn: // If the user invoked the function with option.WithGRPCConn, // we would close a connection that's still in use. // TODO(pongad): investigate error conditions. return nil, err } return c, nil } // Connection returns the client's connection to the API service. func (c *InstanceAdminClient) Connection() *grpc.ClientConn { return c.conn } // Close closes the connection to the API service. The user should invoke this when // the client is no longer required. func (c *InstanceAdminClient) Close() error { return c.conn.Close() } // SetGoogleClientInfo sets the name and version of the application in // the `x-goog-api-client` header passed on each request. Intended for // use by Google-written clients. func (c *InstanceAdminClient) SetGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", version.Go()}, keyval...) kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) c.xGoogHeader = []string{gax.XGoogHeader(kv...)} } // InstanceAdminProjectPath returns the path for the project resource. func InstanceAdminProjectPath(project string) string { path, err := instanceAdminProjectPathTemplate.Render(map[string]string{ "project": project, }) if err != nil { panic(err) } return path } // InstanceAdminInstanceConfigPath returns the path for the instance config resource. func InstanceAdminInstanceConfigPath(project, instanceConfig string) string { path, err := instanceAdminInstanceConfigPathTemplate.Render(map[string]string{ "project": project, "instance_config": instanceConfig, }) if err != nil { panic(err) } return path } // InstanceAdminInstancePath returns the path for the instance resource. func InstanceAdminInstancePath(project, instance string) string { path, err := instanceAdminInstancePathTemplate.Render(map[string]string{ "project": project, "instance": instance, }) if err != nil { panic(err) } return path } // ListInstanceConfigs lists the supported instance configurations for a given project. func (c *InstanceAdminClient) ListInstanceConfigs(ctx context.Context, req *instancepb.ListInstanceConfigsRequest, opts ...gax.CallOption) *InstanceConfigIterator { ctx = insertXGoog(ctx, c.xGoogHeader) opts = append(c.CallOptions.ListInstanceConfigs[0:len(c.CallOptions.ListInstanceConfigs):len(c.CallOptions.ListInstanceConfigs)], opts...) it := &InstanceConfigIterator{} it.InternalFetch = func(pageSize int, pageToken string) ([]*instancepb.InstanceConfig, string, error) { var resp *instancepb.ListInstanceConfigsResponse req.PageToken = pageToken if pageSize > math.MaxInt32 { req.PageSize = math.MaxInt32 } else { req.PageSize = int32(pageSize) } err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error resp, err = c.instanceAdminClient.ListInstanceConfigs(ctx, req, settings.GRPC...) return err }, opts...) if err != nil { return nil, "", err } return resp.InstanceConfigs, resp.NextPageToken, nil } fetch := func(pageSize int, pageToken string) (string, error) { items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) if err != nil { return "", err } it.items = append(it.items, items...) return nextPageToken, nil } it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) return it } // GetInstanceConfig gets information about a particular instance configuration. func (c *InstanceAdminClient) GetInstanceConfig(ctx context.Context, req *instancepb.GetInstanceConfigRequest, opts ...gax.CallOption) (*instancepb.InstanceConfig, error) { ctx = insertXGoog(ctx, c.xGoogHeader) opts = append(c.CallOptions.GetInstanceConfig[0:len(c.CallOptions.GetInstanceConfig):len(c.CallOptions.GetInstanceConfig)], opts...) var resp *instancepb.InstanceConfig err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error resp, err = c.instanceAdminClient.GetInstanceConfig(ctx, req, settings.GRPC...) return err }, opts...) if err != nil { return nil, err } return resp, nil } // ListInstances lists all instances in the given project. func (c *InstanceAdminClient) ListInstances(ctx context.Context, req *instancepb.ListInstancesRequest, opts ...gax.CallOption) *InstanceIterator { ctx = insertXGoog(ctx, c.xGoogHeader) opts = append(c.CallOptions.ListInstances[0:len(c.CallOptions.ListInstances):len(c.CallOptions.ListInstances)], opts...) it := &InstanceIterator{} it.InternalFetch = func(pageSize int, pageToken string) ([]*instancepb.Instance, string, error) { var resp *instancepb.ListInstancesResponse req.PageToken = pageToken if pageSize > math.MaxInt32 { req.PageSize = math.MaxInt32 } else { req.PageSize = int32(pageSize) } err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error resp, err = c.instanceAdminClient.ListInstances(ctx, req, settings.GRPC...) return err }, opts...) if err != nil { return nil, "", err } return resp.Instances, resp.NextPageToken, nil } fetch := func(pageSize int, pageToken string) (string, error) { items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) if err != nil { return "", err } it.items = append(it.items, items...) return nextPageToken, nil } it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) return it } // GetInstance gets information about a particular instance. func (c *InstanceAdminClient) GetInstance(ctx context.Context, req *instancepb.GetInstanceRequest, opts ...gax.CallOption) (*instancepb.Instance, error) { ctx = insertXGoog(ctx, c.xGoogHeader) opts = append(c.CallOptions.GetInstance[0:len(c.CallOptions.GetInstance):len(c.CallOptions.GetInstance)], opts...) var resp *instancepb.Instance err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error resp, err = c.instanceAdminClient.GetInstance(ctx, req, settings.GRPC...) return err }, opts...) if err != nil { return nil, err } return resp, nil } // CreateInstance creates an instance and begins preparing it to begin serving. The // returned [long-running operation][google.longrunning.Operation] // can be used to track the progress of preparing the new // instance. The instance name is assigned by the caller. If the // named instance already exists, `CreateInstance` returns // `ALREADY_EXISTS`. // // Immediately upon completion of this request: // // * The instance is readable via the API, with all requested attributes // but no allocated resources. Its state is `CREATING`. // // Until completion of the returned operation: // // * Cancelling the operation renders the instance immediately unreadable // via the API. // * The instance can be deleted. // * All other attempts to modify the instance are rejected. // // Upon completion of the returned operation: // // * Billing for all successfully-allocated resources begins (some types // may have lower than the requested levels). // * Databases can be created in the instance. // * The instance's allocated resource levels are readable via the API. // * The instance's state becomes `READY`. // // The returned [long-running operation][google.longrunning.Operation] will // have a name of the format `/operations/` and // can be used to track creation of the instance. The // [metadata][google.longrunning.Operation.metadata] field type is // [CreateInstanceMetadata][google.spanner.admin.instance.v1.CreateInstanceMetadata]. // The [response][google.longrunning.Operation.response] field type is // [Instance][google.spanner.admin.instance.v1.Instance], if successful. func (c *InstanceAdminClient) CreateInstance(ctx context.Context, req *instancepb.CreateInstanceRequest, opts ...gax.CallOption) (*CreateInstanceOperation, error) { ctx = insertXGoog(ctx, c.xGoogHeader) opts = append(c.CallOptions.CreateInstance[0:len(c.CallOptions.CreateInstance):len(c.CallOptions.CreateInstance)], opts...) var resp *longrunningpb.Operation err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error resp, err = c.instanceAdminClient.CreateInstance(ctx, req, settings.GRPC...) return err }, opts...) if err != nil { return nil, err } return &CreateInstanceOperation{ lro: longrunning.InternalNewOperation(c.LROClient, resp), }, nil } // UpdateInstance updates an instance, and begins allocating or releasing resources // as requested. The returned [long-running // operation][google.longrunning.Operation] can be used to track the // progress of updating the instance. If the named instance does not // exist, returns `NOT_FOUND`. // // Immediately upon completion of this request: // // * For resource types for which a decrease in the instance's allocation // has been requested, billing is based on the newly-requested level. // // Until completion of the returned operation: // // * Cancelling the operation sets its metadata's // [cancel_time][google.spanner.admin.instance.v1.UpdateInstanceMetadata.cancel_time], and begins // restoring resources to their pre-request values. The operation // is guaranteed to succeed at undoing all resource changes, // after which point it terminates with a `CANCELLED` status. // * All other attempts to modify the instance are rejected. // * Reading the instance via the API continues to give the pre-request // resource levels. // // Upon completion of the returned operation: // // * Billing begins for all successfully-allocated resources (some types // may have lower than the requested levels). // * All newly-reserved resources are available for serving the instance's // tables. // * The instance's new resource levels are readable via the API. // // The returned [long-running operation][google.longrunning.Operation] will // have a name of the format `/operations/` and // can be used to track the instance modification. The // [metadata][google.longrunning.Operation.metadata] field type is // [UpdateInstanceMetadata][google.spanner.admin.instance.v1.UpdateInstanceMetadata]. // The [response][google.longrunning.Operation.response] field type is // [Instance][google.spanner.admin.instance.v1.Instance], if successful. // // Authorization requires `spanner.instances.update` permission on // resource [name][google.spanner.admin.instance.v1.Instance.name]. func (c *InstanceAdminClient) UpdateInstance(ctx context.Context, req *instancepb.UpdateInstanceRequest, opts ...gax.CallOption) (*UpdateInstanceOperation, error) { ctx = insertXGoog(ctx, c.xGoogHeader) opts = append(c.CallOptions.UpdateInstance[0:len(c.CallOptions.UpdateInstance):len(c.CallOptions.UpdateInstance)], opts...) var resp *longrunningpb.Operation err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error resp, err = c.instanceAdminClient.UpdateInstance(ctx, req, settings.GRPC...) return err }, opts...) if err != nil { return nil, err } return &UpdateInstanceOperation{ lro: longrunning.InternalNewOperation(c.LROClient, resp), }, nil } // DeleteInstance deletes an instance. // // Immediately upon completion of the request: // // * Billing ceases for all of the instance's reserved resources. // // Soon afterward: // // * The instance and *all of its databases* immediately and // irrevocably disappear from the API. All data in the databases // is permanently deleted. func (c *InstanceAdminClient) DeleteInstance(ctx context.Context, req *instancepb.DeleteInstanceRequest, opts ...gax.CallOption) error { ctx = insertXGoog(ctx, c.xGoogHeader) opts = append(c.CallOptions.DeleteInstance[0:len(c.CallOptions.DeleteInstance):len(c.CallOptions.DeleteInstance)], opts...) err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error _, err = c.instanceAdminClient.DeleteInstance(ctx, req, settings.GRPC...) return err }, opts...) return err } // SetIamPolicy sets the access control policy on an instance resource. Replaces any // existing policy. // // Authorization requires `spanner.instances.setIamPolicy` on // [resource][google.iam.v1.SetIamPolicyRequest.resource]. func (c *InstanceAdminClient) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) { ctx = insertXGoog(ctx, c.xGoogHeader) opts = append(c.CallOptions.SetIamPolicy[0:len(c.CallOptions.SetIamPolicy):len(c.CallOptions.SetIamPolicy)], opts...) var resp *iampb.Policy err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error resp, err = c.instanceAdminClient.SetIamPolicy(ctx, req, settings.GRPC...) return err }, opts...) if err != nil { return nil, err } return resp, nil } // GetIamPolicy gets the access control policy for an instance resource. Returns an empty // policy if an instance exists but does not have a policy set. // // Authorization requires `spanner.instances.getIamPolicy` on // [resource][google.iam.v1.GetIamPolicyRequest.resource]. func (c *InstanceAdminClient) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) { ctx = insertXGoog(ctx, c.xGoogHeader) opts = append(c.CallOptions.GetIamPolicy[0:len(c.CallOptions.GetIamPolicy):len(c.CallOptions.GetIamPolicy)], opts...) var resp *iampb.Policy err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error resp, err = c.instanceAdminClient.GetIamPolicy(ctx, req, settings.GRPC...) return err }, opts...) if err != nil { return nil, err } return resp, nil } // TestIamPermissions returns permissions that the caller has on the specified instance resource. // // Attempting this RPC on a non-existent Cloud Spanner instance resource will // result in a NOT_FOUND error if the user has `spanner.instances.list` // permission on the containing Google Cloud Project. Otherwise returns an // empty set of permissions. func (c *InstanceAdminClient) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermissionsRequest, opts ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) { ctx = insertXGoog(ctx, c.xGoogHeader) opts = append(c.CallOptions.TestIamPermissions[0:len(c.CallOptions.TestIamPermissions):len(c.CallOptions.TestIamPermissions)], opts...) var resp *iampb.TestIamPermissionsResponse err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error resp, err = c.instanceAdminClient.TestIamPermissions(ctx, req, settings.GRPC...) return err }, opts...) if err != nil { return nil, err } return resp, nil } // InstanceConfigIterator manages a stream of *instancepb.InstanceConfig. type InstanceConfigIterator struct { items []*instancepb.InstanceConfig pageInfo *iterator.PageInfo nextFunc func() error // InternalFetch is for use by the Google Cloud Libraries only. // It is not part of the stable interface of this package. // // InternalFetch returns results from a single call to the underlying RPC. // The number of results is no greater than pageSize. // If there are no more results, nextPageToken is empty and err is nil. InternalFetch func(pageSize int, pageToken string) (results []*instancepb.InstanceConfig, nextPageToken string, err error) } // PageInfo supports pagination. See the google.golang.org/api/iterator package for details. func (it *InstanceConfigIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } // Next returns the next result. Its second return value is iterator.Done if there are no more // results. Once Next returns Done, all subsequent calls will return Done. func (it *InstanceConfigIterator) Next() (*instancepb.InstanceConfig, error) { var item *instancepb.InstanceConfig if err := it.nextFunc(); err != nil { return item, err } item = it.items[0] it.items = it.items[1:] return item, nil } func (it *InstanceConfigIterator) bufLen() int { return len(it.items) } func (it *InstanceConfigIterator) takeBuf() interface{} { b := it.items it.items = nil return b } // InstanceIterator manages a stream of *instancepb.Instance. type InstanceIterator struct { items []*instancepb.Instance pageInfo *iterator.PageInfo nextFunc func() error // InternalFetch is for use by the Google Cloud Libraries only. // It is not part of the stable interface of this package. // // InternalFetch returns results from a single call to the underlying RPC. // The number of results is no greater than pageSize. // If there are no more results, nextPageToken is empty and err is nil. InternalFetch func(pageSize int, pageToken string) (results []*instancepb.Instance, nextPageToken string, err error) } // PageInfo supports pagination. See the google.golang.org/api/iterator package for details. func (it *InstanceIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } // Next returns the next result. Its second return value is iterator.Done if there are no more // results. Once Next returns Done, all subsequent calls will return Done. func (it *InstanceIterator) Next() (*instancepb.Instance, error) { var item *instancepb.Instance if err := it.nextFunc(); err != nil { return item, err } item = it.items[0] it.items = it.items[1:] return item, nil } func (it *InstanceIterator) bufLen() int { return len(it.items) } func (it *InstanceIterator) takeBuf() interface{} { b := it.items it.items = nil return b } // CreateInstanceOperation manages a long-running operation from CreateInstance. type CreateInstanceOperation struct { lro *longrunning.Operation } // CreateInstanceOperation returns a new CreateInstanceOperation from a given name. // The name must be that of a previously created CreateInstanceOperation, possibly from a different process. func (c *InstanceAdminClient) CreateInstanceOperation(name string) *CreateInstanceOperation { return &CreateInstanceOperation{ lro: longrunning.InternalNewOperation(c.LROClient, &longrunningpb.Operation{Name: name}), } } // Wait blocks until the long-running operation is completed, returning the response and any errors encountered. // // See documentation of Poll for error-handling information. func (op *CreateInstanceOperation) Wait(ctx context.Context, opts ...gax.CallOption) (*instancepb.Instance, error) { var resp instancepb.Instance if err := op.lro.Wait(ctx, &resp, opts...); err != nil { return nil, err } return &resp, nil } // Poll fetches the latest state of the long-running operation. // // Poll also fetches the latest metadata, which can be retrieved by Metadata. // // If Poll fails, the error is returned and op is unmodified. If Poll succeeds and // the operation has completed with failure, the error is returned and op.Done will return true. // If Poll succeeds and the operation has completed successfully, // op.Done will return true, and the response of the operation is returned. // If Poll succeeds and the operation has not completed, the returned response and error are both nil. func (op *CreateInstanceOperation) Poll(ctx context.Context, opts ...gax.CallOption) (*instancepb.Instance, error) { var resp instancepb.Instance if err := op.lro.Poll(ctx, &resp, opts...); err != nil { return nil, err } if !op.Done() { return nil, nil } return &resp, nil } // Metadata returns metadata associated with the long-running operation. // Metadata itself does not contact the server, but Poll does. // To get the latest metadata, call this method after a successful call to Poll. // If the metadata is not available, the returned metadata and error are both nil. func (op *CreateInstanceOperation) Metadata() (*instancepb.CreateInstanceMetadata, error) { var meta instancepb.CreateInstanceMetadata if err := op.lro.Metadata(&meta); err == longrunning.ErrNoMetadata { return nil, nil } else if err != nil { return nil, err } return &meta, nil } // Done reports whether the long-running operation has completed. func (op *CreateInstanceOperation) Done() bool { return op.lro.Done() } // Name returns the name of the long-running operation. // The name is assigned by the server and is unique within the service from which the operation is created. func (op *CreateInstanceOperation) Name() string { return op.lro.Name() } // UpdateInstanceOperation manages a long-running operation from UpdateInstance. type UpdateInstanceOperation struct { lro *longrunning.Operation } // UpdateInstanceOperation returns a new UpdateInstanceOperation from a given name. // The name must be that of a previously created UpdateInstanceOperation, possibly from a different process. func (c *InstanceAdminClient) UpdateInstanceOperation(name string) *UpdateInstanceOperation { return &UpdateInstanceOperation{ lro: longrunning.InternalNewOperation(c.LROClient, &longrunningpb.Operation{Name: name}), } } // Wait blocks until the long-running operation is completed, returning the response and any errors encountered. // // See documentation of Poll for error-handling information. func (op *UpdateInstanceOperation) Wait(ctx context.Context, opts ...gax.CallOption) (*instancepb.Instance, error) { var resp instancepb.Instance if err := op.lro.Wait(ctx, &resp, opts...); err != nil { return nil, err } return &resp, nil } // Poll fetches the latest state of the long-running operation. // // Poll also fetches the latest metadata, which can be retrieved by Metadata. // // If Poll fails, the error is returned and op is unmodified. If Poll succeeds and // the operation has completed with failure, the error is returned and op.Done will return true. // If Poll succeeds and the operation has completed successfully, // op.Done will return true, and the response of the operation is returned. // If Poll succeeds and the operation has not completed, the returned response and error are both nil. func (op *UpdateInstanceOperation) Poll(ctx context.Context, opts ...gax.CallOption) (*instancepb.Instance, error) { var resp instancepb.Instance if err := op.lro.Poll(ctx, &resp, opts...); err != nil { return nil, err } if !op.Done() { return nil, nil } return &resp, nil } // Metadata returns metadata associated with the long-running operation. // Metadata itself does not contact the server, but Poll does. // To get the latest metadata, call this method after a successful call to Poll. // If the metadata is not available, the returned metadata and error are both nil. func (op *UpdateInstanceOperation) Metadata() (*instancepb.UpdateInstanceMetadata, error) { var meta instancepb.UpdateInstanceMetadata if err := op.lro.Metadata(&meta); err == longrunning.ErrNoMetadata { return nil, nil } else if err != nil { return nil, err } return &meta, nil } // Done reports whether the long-running operation has completed. func (op *UpdateInstanceOperation) Done() bool { return op.lro.Done() } // Name returns the name of the long-running operation. // The name is assigned by the server and is unique within the service from which the operation is created. func (op *UpdateInstanceOperation) Name() string { return op.lro.Name() } golang-google-cloud-0.9.0/spanner/admin/instance/apiv1/instance_admin_client_example_test.go000066400000000000000000000120261312234511600323410ustar00rootroot00000000000000// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // AUTO-GENERATED CODE. DO NOT EDIT. package instance_test import ( "cloud.google.com/go/spanner/admin/instance/apiv1" "golang.org/x/net/context" "google.golang.org/api/iterator" iampb "google.golang.org/genproto/googleapis/iam/v1" instancepb "google.golang.org/genproto/googleapis/spanner/admin/instance/v1" ) func ExampleNewInstanceAdminClient() { ctx := context.Background() c, err := instance.NewInstanceAdminClient(ctx) if err != nil { // TODO: Handle error. } // TODO: Use client. _ = c } func ExampleInstanceAdminClient_ListInstanceConfigs() { ctx := context.Background() c, err := instance.NewInstanceAdminClient(ctx) if err != nil { // TODO: Handle error. } req := &instancepb.ListInstanceConfigsRequest{ // TODO: Fill request struct fields. } it := c.ListInstanceConfigs(ctx, req) for { resp, err := it.Next() if err == iterator.Done { break } if err != nil { // TODO: Handle error. } // TODO: Use resp. _ = resp } } func ExampleInstanceAdminClient_GetInstanceConfig() { ctx := context.Background() c, err := instance.NewInstanceAdminClient(ctx) if err != nil { // TODO: Handle error. } req := &instancepb.GetInstanceConfigRequest{ // TODO: Fill request struct fields. } resp, err := c.GetInstanceConfig(ctx, req) if err != nil { // TODO: Handle error. } // TODO: Use resp. _ = resp } func ExampleInstanceAdminClient_ListInstances() { ctx := context.Background() c, err := instance.NewInstanceAdminClient(ctx) if err != nil { // TODO: Handle error. } req := &instancepb.ListInstancesRequest{ // TODO: Fill request struct fields. } it := c.ListInstances(ctx, req) for { resp, err := it.Next() if err == iterator.Done { break } if err != nil { // TODO: Handle error. } // TODO: Use resp. _ = resp } } func ExampleInstanceAdminClient_GetInstance() { ctx := context.Background() c, err := instance.NewInstanceAdminClient(ctx) if err != nil { // TODO: Handle error. } req := &instancepb.GetInstanceRequest{ // TODO: Fill request struct fields. } resp, err := c.GetInstance(ctx, req) if err != nil { // TODO: Handle error. } // TODO: Use resp. _ = resp } func ExampleInstanceAdminClient_CreateInstance() { ctx := context.Background() c, err := instance.NewInstanceAdminClient(ctx) if err != nil { // TODO: Handle error. } req := &instancepb.CreateInstanceRequest{ // TODO: Fill request struct fields. } op, err := c.CreateInstance(ctx, req) if err != nil { // TODO: Handle error. } resp, err := op.Wait(ctx) if err != nil { // TODO: Handle error. } // TODO: Use resp. _ = resp } func ExampleInstanceAdminClient_UpdateInstance() { ctx := context.Background() c, err := instance.NewInstanceAdminClient(ctx) if err != nil { // TODO: Handle error. } req := &instancepb.UpdateInstanceRequest{ // TODO: Fill request struct fields. } op, err := c.UpdateInstance(ctx, req) if err != nil { // TODO: Handle error. } resp, err := op.Wait(ctx) if err != nil { // TODO: Handle error. } // TODO: Use resp. _ = resp } func ExampleInstanceAdminClient_DeleteInstance() { ctx := context.Background() c, err := instance.NewInstanceAdminClient(ctx) if err != nil { // TODO: Handle error. } req := &instancepb.DeleteInstanceRequest{ // TODO: Fill request struct fields. } err = c.DeleteInstance(ctx, req) if err != nil { // TODO: Handle error. } } func ExampleInstanceAdminClient_SetIamPolicy() { ctx := context.Background() c, err := instance.NewInstanceAdminClient(ctx) if err != nil { // TODO: Handle error. } req := &iampb.SetIamPolicyRequest{ // TODO: Fill request struct fields. } resp, err := c.SetIamPolicy(ctx, req) if err != nil { // TODO: Handle error. } // TODO: Use resp. _ = resp } func ExampleInstanceAdminClient_GetIamPolicy() { ctx := context.Background() c, err := instance.NewInstanceAdminClient(ctx) if err != nil { // TODO: Handle error. } req := &iampb.GetIamPolicyRequest{ // TODO: Fill request struct fields. } resp, err := c.GetIamPolicy(ctx, req) if err != nil { // TODO: Handle error. } // TODO: Use resp. _ = resp } func ExampleInstanceAdminClient_TestIamPermissions() { ctx := context.Background() c, err := instance.NewInstanceAdminClient(ctx) if err != nil { // TODO: Handle error. } req := &iampb.TestIamPermissionsRequest{ // TODO: Fill request struct fields. } resp, err := c.TestIamPermissions(ctx, req) if err != nil { // TODO: Handle error. } // TODO: Use resp. _ = resp } golang-google-cloud-0.9.0/spanner/admin/instance/apiv1/mock_test.go000066400000000000000000000637251312234511600252610ustar00rootroot00000000000000// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // AUTO-GENERATED CODE. DO NOT EDIT. package instance import ( emptypb "github.com/golang/protobuf/ptypes/empty" iampb "google.golang.org/genproto/googleapis/iam/v1" longrunningpb "google.golang.org/genproto/googleapis/longrunning" instancepb "google.golang.org/genproto/googleapis/spanner/admin/instance/v1" field_maskpb "google.golang.org/genproto/protobuf/field_mask" ) import ( "flag" "fmt" "io" "log" "net" "os" "strings" "testing" "github.com/golang/protobuf/proto" "github.com/golang/protobuf/ptypes" "golang.org/x/net/context" "google.golang.org/api/option" status "google.golang.org/genproto/googleapis/rpc/status" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/metadata" gstatus "google.golang.org/grpc/status" ) var _ = io.EOF var _ = ptypes.MarshalAny var _ status.Status type mockInstanceAdminServer struct { // Embed for forward compatibility. // Tests will keep working if more methods are added // in the future. instancepb.InstanceAdminServer reqs []proto.Message // If set, all calls return this error. err error // responses to return if err == nil resps []proto.Message } func (s *mockInstanceAdminServer) ListInstanceConfigs(ctx context.Context, req *instancepb.ListInstanceConfigsRequest) (*instancepb.ListInstanceConfigsResponse, error) { md, _ := metadata.FromIncomingContext(ctx) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } s.reqs = append(s.reqs, req) if s.err != nil { return nil, s.err } return s.resps[0].(*instancepb.ListInstanceConfigsResponse), nil } func (s *mockInstanceAdminServer) GetInstanceConfig(ctx context.Context, req *instancepb.GetInstanceConfigRequest) (*instancepb.InstanceConfig, error) { md, _ := metadata.FromIncomingContext(ctx) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } s.reqs = append(s.reqs, req) if s.err != nil { return nil, s.err } return s.resps[0].(*instancepb.InstanceConfig), nil } func (s *mockInstanceAdminServer) ListInstances(ctx context.Context, req *instancepb.ListInstancesRequest) (*instancepb.ListInstancesResponse, error) { md, _ := metadata.FromIncomingContext(ctx) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } s.reqs = append(s.reqs, req) if s.err != nil { return nil, s.err } return s.resps[0].(*instancepb.ListInstancesResponse), nil } func (s *mockInstanceAdminServer) GetInstance(ctx context.Context, req *instancepb.GetInstanceRequest) (*instancepb.Instance, error) { md, _ := metadata.FromIncomingContext(ctx) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } s.reqs = append(s.reqs, req) if s.err != nil { return nil, s.err } return s.resps[0].(*instancepb.Instance), nil } func (s *mockInstanceAdminServer) CreateInstance(ctx context.Context, req *instancepb.CreateInstanceRequest) (*longrunningpb.Operation, error) { md, _ := metadata.FromIncomingContext(ctx) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } s.reqs = append(s.reqs, req) if s.err != nil { return nil, s.err } return s.resps[0].(*longrunningpb.Operation), nil } func (s *mockInstanceAdminServer) UpdateInstance(ctx context.Context, req *instancepb.UpdateInstanceRequest) (*longrunningpb.Operation, error) { md, _ := metadata.FromIncomingContext(ctx) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } s.reqs = append(s.reqs, req) if s.err != nil { return nil, s.err } return s.resps[0].(*longrunningpb.Operation), nil } func (s *mockInstanceAdminServer) DeleteInstance(ctx context.Context, req *instancepb.DeleteInstanceRequest) (*emptypb.Empty, error) { md, _ := metadata.FromIncomingContext(ctx) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } s.reqs = append(s.reqs, req) if s.err != nil { return nil, s.err } return s.resps[0].(*emptypb.Empty), nil } func (s *mockInstanceAdminServer) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest) (*iampb.Policy, error) { md, _ := metadata.FromIncomingContext(ctx) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } s.reqs = append(s.reqs, req) if s.err != nil { return nil, s.err } return s.resps[0].(*iampb.Policy), nil } func (s *mockInstanceAdminServer) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest) (*iampb.Policy, error) { md, _ := metadata.FromIncomingContext(ctx) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } s.reqs = append(s.reqs, req) if s.err != nil { return nil, s.err } return s.resps[0].(*iampb.Policy), nil } func (s *mockInstanceAdminServer) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermissionsRequest) (*iampb.TestIamPermissionsResponse, error) { md, _ := metadata.FromIncomingContext(ctx) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } s.reqs = append(s.reqs, req) if s.err != nil { return nil, s.err } return s.resps[0].(*iampb.TestIamPermissionsResponse), nil } // clientOpt is the option tests should use to connect to the test server. // It is initialized by TestMain. var clientOpt option.ClientOption var ( mockInstanceAdmin mockInstanceAdminServer ) func TestMain(m *testing.M) { flag.Parse() serv := grpc.NewServer() instancepb.RegisterInstanceAdminServer(serv, &mockInstanceAdmin) lis, err := net.Listen("tcp", "localhost:0") if err != nil { log.Fatal(err) } go serv.Serve(lis) conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) if err != nil { log.Fatal(err) } clientOpt = option.WithGRPCConn(conn) os.Exit(m.Run()) } func TestInstanceAdminListInstanceConfigs(t *testing.T) { var nextPageToken string = "" var instanceConfigsElement *instancepb.InstanceConfig = &instancepb.InstanceConfig{} var instanceConfigs = []*instancepb.InstanceConfig{instanceConfigsElement} var expectedResponse = &instancepb.ListInstanceConfigsResponse{ NextPageToken: nextPageToken, InstanceConfigs: instanceConfigs, } mockInstanceAdmin.err = nil mockInstanceAdmin.reqs = nil mockInstanceAdmin.resps = append(mockInstanceAdmin.resps[:0], expectedResponse) var formattedParent string = InstanceAdminProjectPath("[PROJECT]") var request = &instancepb.ListInstanceConfigsRequest{ Parent: formattedParent, } c, err := NewInstanceAdminClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.ListInstanceConfigs(context.Background(), request).Next() if err != nil { t.Fatal(err) } if want, got := request, mockInstanceAdmin.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } want := (interface{})(expectedResponse.InstanceConfigs[0]) got := (interface{})(resp) var ok bool switch want := (want).(type) { case proto.Message: ok = proto.Equal(want, got.(proto.Message)) default: ok = want == got } if !ok { t.Errorf("wrong response %q, want %q)", got, want) } } func TestInstanceAdminListInstanceConfigsError(t *testing.T) { errCode := codes.PermissionDenied mockInstanceAdmin.err = gstatus.Error(errCode, "test error") var formattedParent string = InstanceAdminProjectPath("[PROJECT]") var request = &instancepb.ListInstanceConfigsRequest{ Parent: formattedParent, } c, err := NewInstanceAdminClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.ListInstanceConfigs(context.Background(), request).Next() if st, ok := gstatus.FromError(err); !ok { t.Errorf("got error %v, expected grpc error", err) } else if c := st.Code(); c != errCode { t.Errorf("got error code %q, want %q", c, errCode) } _ = resp } func TestInstanceAdminGetInstanceConfig(t *testing.T) { var name2 string = "name2-1052831874" var displayName string = "displayName1615086568" var expectedResponse = &instancepb.InstanceConfig{ Name: name2, DisplayName: displayName, } mockInstanceAdmin.err = nil mockInstanceAdmin.reqs = nil mockInstanceAdmin.resps = append(mockInstanceAdmin.resps[:0], expectedResponse) var formattedName string = InstanceAdminInstanceConfigPath("[PROJECT]", "[INSTANCE_CONFIG]") var request = &instancepb.GetInstanceConfigRequest{ Name: formattedName, } c, err := NewInstanceAdminClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.GetInstanceConfig(context.Background(), request) if err != nil { t.Fatal(err) } if want, got := request, mockInstanceAdmin.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } if want, got := expectedResponse, resp; !proto.Equal(want, got) { t.Errorf("wrong response %q, want %q)", got, want) } } func TestInstanceAdminGetInstanceConfigError(t *testing.T) { errCode := codes.PermissionDenied mockInstanceAdmin.err = gstatus.Error(errCode, "test error") var formattedName string = InstanceAdminInstanceConfigPath("[PROJECT]", "[INSTANCE_CONFIG]") var request = &instancepb.GetInstanceConfigRequest{ Name: formattedName, } c, err := NewInstanceAdminClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.GetInstanceConfig(context.Background(), request) if st, ok := gstatus.FromError(err); !ok { t.Errorf("got error %v, expected grpc error", err) } else if c := st.Code(); c != errCode { t.Errorf("got error code %q, want %q", c, errCode) } _ = resp } func TestInstanceAdminListInstances(t *testing.T) { var nextPageToken string = "" var instancesElement *instancepb.Instance = &instancepb.Instance{} var instances = []*instancepb.Instance{instancesElement} var expectedResponse = &instancepb.ListInstancesResponse{ NextPageToken: nextPageToken, Instances: instances, } mockInstanceAdmin.err = nil mockInstanceAdmin.reqs = nil mockInstanceAdmin.resps = append(mockInstanceAdmin.resps[:0], expectedResponse) var formattedParent string = InstanceAdminProjectPath("[PROJECT]") var request = &instancepb.ListInstancesRequest{ Parent: formattedParent, } c, err := NewInstanceAdminClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.ListInstances(context.Background(), request).Next() if err != nil { t.Fatal(err) } if want, got := request, mockInstanceAdmin.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } want := (interface{})(expectedResponse.Instances[0]) got := (interface{})(resp) var ok bool switch want := (want).(type) { case proto.Message: ok = proto.Equal(want, got.(proto.Message)) default: ok = want == got } if !ok { t.Errorf("wrong response %q, want %q)", got, want) } } func TestInstanceAdminListInstancesError(t *testing.T) { errCode := codes.PermissionDenied mockInstanceAdmin.err = gstatus.Error(errCode, "test error") var formattedParent string = InstanceAdminProjectPath("[PROJECT]") var request = &instancepb.ListInstancesRequest{ Parent: formattedParent, } c, err := NewInstanceAdminClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.ListInstances(context.Background(), request).Next() if st, ok := gstatus.FromError(err); !ok { t.Errorf("got error %v, expected grpc error", err) } else if c := st.Code(); c != errCode { t.Errorf("got error code %q, want %q", c, errCode) } _ = resp } func TestInstanceAdminGetInstance(t *testing.T) { var name2 string = "name2-1052831874" var config string = "config-1354792126" var displayName string = "displayName1615086568" var nodeCount int32 = 1539922066 var expectedResponse = &instancepb.Instance{ Name: name2, Config: config, DisplayName: displayName, NodeCount: nodeCount, } mockInstanceAdmin.err = nil mockInstanceAdmin.reqs = nil mockInstanceAdmin.resps = append(mockInstanceAdmin.resps[:0], expectedResponse) var formattedName string = InstanceAdminInstancePath("[PROJECT]", "[INSTANCE]") var request = &instancepb.GetInstanceRequest{ Name: formattedName, } c, err := NewInstanceAdminClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.GetInstance(context.Background(), request) if err != nil { t.Fatal(err) } if want, got := request, mockInstanceAdmin.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } if want, got := expectedResponse, resp; !proto.Equal(want, got) { t.Errorf("wrong response %q, want %q)", got, want) } } func TestInstanceAdminGetInstanceError(t *testing.T) { errCode := codes.PermissionDenied mockInstanceAdmin.err = gstatus.Error(errCode, "test error") var formattedName string = InstanceAdminInstancePath("[PROJECT]", "[INSTANCE]") var request = &instancepb.GetInstanceRequest{ Name: formattedName, } c, err := NewInstanceAdminClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.GetInstance(context.Background(), request) if st, ok := gstatus.FromError(err); !ok { t.Errorf("got error %v, expected grpc error", err) } else if c := st.Code(); c != errCode { t.Errorf("got error code %q, want %q", c, errCode) } _ = resp } func TestInstanceAdminCreateInstance(t *testing.T) { var name string = "name3373707" var config string = "config-1354792126" var displayName string = "displayName1615086568" var nodeCount int32 = 1539922066 var expectedResponse = &instancepb.Instance{ Name: name, Config: config, DisplayName: displayName, NodeCount: nodeCount, } mockInstanceAdmin.err = nil mockInstanceAdmin.reqs = nil any, err := ptypes.MarshalAny(expectedResponse) if err != nil { t.Fatal(err) } mockInstanceAdmin.resps = append(mockInstanceAdmin.resps[:0], &longrunningpb.Operation{ Name: "longrunning-test", Done: true, Result: &longrunningpb.Operation_Response{Response: any}, }) var formattedParent string = InstanceAdminProjectPath("[PROJECT]") var instanceId string = "instanceId-2101995259" var instance *instancepb.Instance = &instancepb.Instance{} var request = &instancepb.CreateInstanceRequest{ Parent: formattedParent, InstanceId: instanceId, Instance: instance, } c, err := NewInstanceAdminClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } respLRO, err := c.CreateInstance(context.Background(), request) if err != nil { t.Fatal(err) } resp, err := respLRO.Wait(context.Background()) if err != nil { t.Fatal(err) } if want, got := request, mockInstanceAdmin.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } if want, got := expectedResponse, resp; !proto.Equal(want, got) { t.Errorf("wrong response %q, want %q)", got, want) } } func TestInstanceAdminCreateInstanceError(t *testing.T) { errCode := codes.PermissionDenied mockInstanceAdmin.err = nil mockInstanceAdmin.resps = append(mockInstanceAdmin.resps[:0], &longrunningpb.Operation{ Name: "longrunning-test", Done: true, Result: &longrunningpb.Operation_Error{ Error: &status.Status{ Code: int32(errCode), Message: "test error", }, }, }) var formattedParent string = InstanceAdminProjectPath("[PROJECT]") var instanceId string = "instanceId-2101995259" var instance *instancepb.Instance = &instancepb.Instance{} var request = &instancepb.CreateInstanceRequest{ Parent: formattedParent, InstanceId: instanceId, Instance: instance, } c, err := NewInstanceAdminClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } respLRO, err := c.CreateInstance(context.Background(), request) if err != nil { t.Fatal(err) } resp, err := respLRO.Wait(context.Background()) if st, ok := gstatus.FromError(err); !ok { t.Errorf("got error %v, expected grpc error", err) } else if c := st.Code(); c != errCode { t.Errorf("got error code %q, want %q", c, errCode) } _ = resp } func TestInstanceAdminUpdateInstance(t *testing.T) { var name string = "name3373707" var config string = "config-1354792126" var displayName string = "displayName1615086568" var nodeCount int32 = 1539922066 var expectedResponse = &instancepb.Instance{ Name: name, Config: config, DisplayName: displayName, NodeCount: nodeCount, } mockInstanceAdmin.err = nil mockInstanceAdmin.reqs = nil any, err := ptypes.MarshalAny(expectedResponse) if err != nil { t.Fatal(err) } mockInstanceAdmin.resps = append(mockInstanceAdmin.resps[:0], &longrunningpb.Operation{ Name: "longrunning-test", Done: true, Result: &longrunningpb.Operation_Response{Response: any}, }) var instance *instancepb.Instance = &instancepb.Instance{} var fieldMask *field_maskpb.FieldMask = &field_maskpb.FieldMask{} var request = &instancepb.UpdateInstanceRequest{ Instance: instance, FieldMask: fieldMask, } c, err := NewInstanceAdminClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } respLRO, err := c.UpdateInstance(context.Background(), request) if err != nil { t.Fatal(err) } resp, err := respLRO.Wait(context.Background()) if err != nil { t.Fatal(err) } if want, got := request, mockInstanceAdmin.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } if want, got := expectedResponse, resp; !proto.Equal(want, got) { t.Errorf("wrong response %q, want %q)", got, want) } } func TestInstanceAdminUpdateInstanceError(t *testing.T) { errCode := codes.PermissionDenied mockInstanceAdmin.err = nil mockInstanceAdmin.resps = append(mockInstanceAdmin.resps[:0], &longrunningpb.Operation{ Name: "longrunning-test", Done: true, Result: &longrunningpb.Operation_Error{ Error: &status.Status{ Code: int32(errCode), Message: "test error", }, }, }) var instance *instancepb.Instance = &instancepb.Instance{} var fieldMask *field_maskpb.FieldMask = &field_maskpb.FieldMask{} var request = &instancepb.UpdateInstanceRequest{ Instance: instance, FieldMask: fieldMask, } c, err := NewInstanceAdminClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } respLRO, err := c.UpdateInstance(context.Background(), request) if err != nil { t.Fatal(err) } resp, err := respLRO.Wait(context.Background()) if st, ok := gstatus.FromError(err); !ok { t.Errorf("got error %v, expected grpc error", err) } else if c := st.Code(); c != errCode { t.Errorf("got error code %q, want %q", c, errCode) } _ = resp } func TestInstanceAdminDeleteInstance(t *testing.T) { var expectedResponse *emptypb.Empty = &emptypb.Empty{} mockInstanceAdmin.err = nil mockInstanceAdmin.reqs = nil mockInstanceAdmin.resps = append(mockInstanceAdmin.resps[:0], expectedResponse) var formattedName string = InstanceAdminInstancePath("[PROJECT]", "[INSTANCE]") var request = &instancepb.DeleteInstanceRequest{ Name: formattedName, } c, err := NewInstanceAdminClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } err = c.DeleteInstance(context.Background(), request) if err != nil { t.Fatal(err) } if want, got := request, mockInstanceAdmin.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } } func TestInstanceAdminDeleteInstanceError(t *testing.T) { errCode := codes.PermissionDenied mockInstanceAdmin.err = gstatus.Error(errCode, "test error") var formattedName string = InstanceAdminInstancePath("[PROJECT]", "[INSTANCE]") var request = &instancepb.DeleteInstanceRequest{ Name: formattedName, } c, err := NewInstanceAdminClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } err = c.DeleteInstance(context.Background(), request) if st, ok := gstatus.FromError(err); !ok { t.Errorf("got error %v, expected grpc error", err) } else if c := st.Code(); c != errCode { t.Errorf("got error code %q, want %q", c, errCode) } } func TestInstanceAdminSetIamPolicy(t *testing.T) { var version int32 = 351608024 var etag []byte = []byte("21") var expectedResponse = &iampb.Policy{ Version: version, Etag: etag, } mockInstanceAdmin.err = nil mockInstanceAdmin.reqs = nil mockInstanceAdmin.resps = append(mockInstanceAdmin.resps[:0], expectedResponse) var formattedResource string = InstanceAdminInstancePath("[PROJECT]", "[INSTANCE]") var policy *iampb.Policy = &iampb.Policy{} var request = &iampb.SetIamPolicyRequest{ Resource: formattedResource, Policy: policy, } c, err := NewInstanceAdminClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.SetIamPolicy(context.Background(), request) if err != nil { t.Fatal(err) } if want, got := request, mockInstanceAdmin.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } if want, got := expectedResponse, resp; !proto.Equal(want, got) { t.Errorf("wrong response %q, want %q)", got, want) } } func TestInstanceAdminSetIamPolicyError(t *testing.T) { errCode := codes.PermissionDenied mockInstanceAdmin.err = gstatus.Error(errCode, "test error") var formattedResource string = InstanceAdminInstancePath("[PROJECT]", "[INSTANCE]") var policy *iampb.Policy = &iampb.Policy{} var request = &iampb.SetIamPolicyRequest{ Resource: formattedResource, Policy: policy, } c, err := NewInstanceAdminClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.SetIamPolicy(context.Background(), request) if st, ok := gstatus.FromError(err); !ok { t.Errorf("got error %v, expected grpc error", err) } else if c := st.Code(); c != errCode { t.Errorf("got error code %q, want %q", c, errCode) } _ = resp } func TestInstanceAdminGetIamPolicy(t *testing.T) { var version int32 = 351608024 var etag []byte = []byte("21") var expectedResponse = &iampb.Policy{ Version: version, Etag: etag, } mockInstanceAdmin.err = nil mockInstanceAdmin.reqs = nil mockInstanceAdmin.resps = append(mockInstanceAdmin.resps[:0], expectedResponse) var formattedResource string = InstanceAdminInstancePath("[PROJECT]", "[INSTANCE]") var request = &iampb.GetIamPolicyRequest{ Resource: formattedResource, } c, err := NewInstanceAdminClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.GetIamPolicy(context.Background(), request) if err != nil { t.Fatal(err) } if want, got := request, mockInstanceAdmin.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } if want, got := expectedResponse, resp; !proto.Equal(want, got) { t.Errorf("wrong response %q, want %q)", got, want) } } func TestInstanceAdminGetIamPolicyError(t *testing.T) { errCode := codes.PermissionDenied mockInstanceAdmin.err = gstatus.Error(errCode, "test error") var formattedResource string = InstanceAdminInstancePath("[PROJECT]", "[INSTANCE]") var request = &iampb.GetIamPolicyRequest{ Resource: formattedResource, } c, err := NewInstanceAdminClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.GetIamPolicy(context.Background(), request) if st, ok := gstatus.FromError(err); !ok { t.Errorf("got error %v, expected grpc error", err) } else if c := st.Code(); c != errCode { t.Errorf("got error code %q, want %q", c, errCode) } _ = resp } func TestInstanceAdminTestIamPermissions(t *testing.T) { var expectedResponse *iampb.TestIamPermissionsResponse = &iampb.TestIamPermissionsResponse{} mockInstanceAdmin.err = nil mockInstanceAdmin.reqs = nil mockInstanceAdmin.resps = append(mockInstanceAdmin.resps[:0], expectedResponse) var formattedResource string = InstanceAdminInstancePath("[PROJECT]", "[INSTANCE]") var permissions []string = nil var request = &iampb.TestIamPermissionsRequest{ Resource: formattedResource, Permissions: permissions, } c, err := NewInstanceAdminClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.TestIamPermissions(context.Background(), request) if err != nil { t.Fatal(err) } if want, got := request, mockInstanceAdmin.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } if want, got := expectedResponse, resp; !proto.Equal(want, got) { t.Errorf("wrong response %q, want %q)", got, want) } } func TestInstanceAdminTestIamPermissionsError(t *testing.T) { errCode := codes.PermissionDenied mockInstanceAdmin.err = gstatus.Error(errCode, "test error") var formattedResource string = InstanceAdminInstancePath("[PROJECT]", "[INSTANCE]") var permissions []string = nil var request = &iampb.TestIamPermissionsRequest{ Resource: formattedResource, Permissions: permissions, } c, err := NewInstanceAdminClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.TestIamPermissions(context.Background(), request) if st, ok := gstatus.FromError(err); !ok { t.Errorf("got error %v, expected grpc error", err) } else if c := st.Code(); c != errCode { t.Errorf("got error code %q, want %q", c, errCode) } _ = resp } golang-google-cloud-0.9.0/spanner/backoff.go000066400000000000000000000027071312234511600207410ustar00rootroot00000000000000/* Copyright 2017 Google Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package spanner import ( "math/rand" "time" ) const ( // minBackoff is the minimum backoff used by default. minBackoff = 1 * time.Second // maxBackoff is the maximum backoff used by default. maxBackoff = 32 * time.Second // jitter is the jitter factor. jitter = 0.4 // rate is the rate of exponential increase in the backoff. rate = 1.3 ) var defaultBackoff = exponentialBackoff{minBackoff, maxBackoff} type exponentialBackoff struct { min, max time.Duration } // delay calculates the delay that should happen at n-th // exponential backoff in a series. func (b exponentialBackoff) delay(retries int) time.Duration { min, max := float64(b.min), float64(b.max) delay := min for delay < max && retries > 0 { delay *= rate retries-- } if delay > max { delay = max } delay -= delay * jitter * rand.Float64() if delay < min { delay = min } return time.Duration(delay) } golang-google-cloud-0.9.0/spanner/backoff_test.go000066400000000000000000000030751312234511600217770ustar00rootroot00000000000000/* Copyright 2017 Google Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package spanner import ( "math" "time" "testing" ) // Test if exponential backoff helper can produce correct series of // retry delays. func TestBackoff(t *testing.T) { b := exponentialBackoff{minBackoff, maxBackoff} tests := []struct { retries int min time.Duration max time.Duration }{ { retries: 0, min: minBackoff, max: minBackoff, }, { retries: 1, min: minBackoff, max: time.Duration(rate * float64(minBackoff)), }, { retries: 3, min: time.Duration(math.Pow(rate, 3) * (1 - jitter) * float64(minBackoff)), max: time.Duration(math.Pow(rate, 3) * float64(minBackoff)), }, { retries: 1000, min: time.Duration((1 - jitter) * float64(maxBackoff)), max: maxBackoff, }, } for _, test := range tests { got := b.delay(test.retries) if float64(got) < float64(test.min) || float64(got) > float64(test.max) { t.Errorf("delay(%v) = %v, want in range [%v, %v]", test.retries, got, test.min, test.max) } } } golang-google-cloud-0.9.0/spanner/client.go000066400000000000000000000247111312234511600206230ustar00rootroot00000000000000/* Copyright 2017 Google Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package spanner import ( "fmt" "regexp" "sync/atomic" "time" "cloud.google.com/go/internal/version" "golang.org/x/net/context" "google.golang.org/api/option" "google.golang.org/api/transport" sppb "google.golang.org/genproto/googleapis/spanner/v1" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/metadata" ) const ( prodAddr = "spanner.googleapis.com:443" // resourcePrefixHeader is the name of the metadata header used to indicate // the resource being operated on. resourcePrefixHeader = "google-cloud-resource-prefix" // apiClientHeader is the name of the metadata header used to indicate client // information. apiClientHeader = "x-goog-api-client" // numChannels is the default value for NumChannels of client numChannels = 4 ) const ( // Scope is the scope for Cloud Spanner Data API. Scope = "https://www.googleapis.com/auth/spanner.data" // AdminScope is the scope for Cloud Spanner Admin APIs. AdminScope = "https://www.googleapis.com/auth/spanner.admin" ) var ( validDBPattern = regexp.MustCompile("^projects/[^/]+/instances/[^/]+/databases/[^/]+$") clientUserAgent = fmt.Sprintf("gl-go/%s gccl/%s grpc/%s", version.Go(), version.Repo, grpc.Version) ) func validDatabaseName(db string) error { if matched := validDBPattern.MatchString(db); !matched { return fmt.Errorf("database name %q should conform to pattern %q", db, validDBPattern.String()) } return nil } // Client is a client for reading and writing data to a Cloud Spanner database. A // client is safe to use concurrently, except for its Close method. type Client struct { // rr must be accessed through atomic operations. rr uint32 conns []*grpc.ClientConn clients []sppb.SpannerClient database string // Metadata to be sent with each request. md metadata.MD idleSessions *sessionPool } // ClientConfig has configurations for the client. type ClientConfig struct { // NumChannels is the number of GRPC channels. // If zero, numChannels is used. NumChannels int co []option.ClientOption // SessionPoolConfig is the configuration for session pool. SessionPoolConfig } // errDial returns error for dialing to Cloud Spanner. func errDial(ci int, err error) error { e := toSpannerError(err).(*Error) e.decorate(fmt.Sprintf("dialing fails for channel[%v]", ci)) return e } func contextWithOutgoingMetadata(ctx context.Context, md metadata.MD) context.Context { existing, ok := metadata.FromOutgoingContext(ctx) if ok { md = metadata.Join(existing, md) } return metadata.NewOutgoingContext(ctx, md) } // NewClient creates a client to a database. A valid database name has the // form projects/PROJECT_ID/instances/INSTANCE_ID/databases/DATABASE_ID. It uses a default // configuration. func NewClient(ctx context.Context, database string, opts ...option.ClientOption) (*Client, error) { return NewClientWithConfig(ctx, database, ClientConfig{}, opts...) } // NewClientWithConfig creates a client to a database. A valid database name has the // form projects/PROJECT_ID/instances/INSTANCE_ID/databases/DATABASE_ID. func NewClientWithConfig(ctx context.Context, database string, config ClientConfig, opts ...option.ClientOption) (*Client, error) { // Validate database path. if err := validDatabaseName(database); err != nil { return nil, err } c := &Client{ database: database, md: metadata.Pairs( resourcePrefixHeader, database, apiClientHeader, clientUserAgent), } allOpts := []option.ClientOption{option.WithEndpoint(prodAddr), option.WithScopes(Scope), option.WithUserAgent(clientUserAgent), option.WithGRPCDialOption(grpc.WithDefaultCallOptions(grpc.MaxCallSendMsgSize(100<<20), grpc.MaxCallRecvMsgSize(100<<20)))} allOpts = append(allOpts, opts...) // Prepare gRPC channels. if config.NumChannels == 0 { config.NumChannels = numChannels } // Default MaxOpened sessions if config.MaxOpened == 0 { config.MaxOpened = uint64(config.NumChannels * 100) } if config.MaxBurst == 0 { config.MaxBurst = 10 } for i := 0; i < config.NumChannels; i++ { conn, err := transport.DialGRPC(ctx, allOpts...) if err != nil { return nil, errDial(i, err) } c.conns = append(c.conns, conn) c.clients = append(c.clients, sppb.NewSpannerClient(conn)) } // Prepare session pool. config.SessionPoolConfig.getRPCClient = func() (sppb.SpannerClient, error) { // TODO: support more loadbalancing options. return c.rrNext(), nil } sp, err := newSessionPool(database, config.SessionPoolConfig, c.md) if err != nil { c.Close() return nil, err } c.idleSessions = sp return c, nil } // rrNext returns the next available Cloud Spanner RPC client in a round-robin manner. func (c *Client) rrNext() sppb.SpannerClient { return c.clients[atomic.AddUint32(&c.rr, 1)%uint32(len(c.clients))] } // Close closes the client. func (c *Client) Close() { if c.idleSessions != nil { c.idleSessions.close() } for _, conn := range c.conns { conn.Close() } } // Single provides a read-only snapshot transaction optimized for the case // where only a single read or query is needed. This is more efficient than // using ReadOnlyTransaction() for a single read or query. // // Single will use a strong TimestampBound by default. Use // ReadOnlyTransaction.WithTimestampBound to specify a different // TimestampBound. A non-strong bound can be used to reduce latency, or // "time-travel" to prior versions of the database, see the documentation of // TimestampBound for details. func (c *Client) Single() *ReadOnlyTransaction { t := &ReadOnlyTransaction{singleUse: true, sp: c.idleSessions} t.txReadOnly.txReadEnv = t return t } // ReadOnlyTransaction returns a ReadOnlyTransaction that can be used for // multiple reads from the database. You must call Close() when the // ReadOnlyTransaction is no longer needed to release resources on the server. // // ReadOnlyTransaction will use a strong TimestampBound by default. Use // ReadOnlyTransaction.WithTimestampBound to specify a different // TimestampBound. A non-strong bound can be used to reduce latency, or // "time-travel" to prior versions of the database, see the documentation of // TimestampBound for details. func (c *Client) ReadOnlyTransaction() *ReadOnlyTransaction { t := &ReadOnlyTransaction{ singleUse: false, sp: c.idleSessions, txReadyOrClosed: make(chan struct{}), } t.txReadOnly.txReadEnv = t return t } type transactionInProgressKey struct{} func checkNestedTxn(ctx context.Context) error { if ctx.Value(transactionInProgressKey{}) != nil { return spannerErrorf(codes.FailedPrecondition, "Cloud Spanner does not support nested transactions") } return nil } // ReadWriteTransaction executes a read-write transaction, with retries as // necessary. // // The function f will be called one or more times. It must not maintain // any state between calls. // // If the transaction cannot be committed or if f returns an IsAborted error, // ReadWriteTransaction will call f again. It will continue to call f until the // transaction can be committed or the Context times out or is cancelled. If f // returns an error other than IsAborted, ReadWriteTransaction will abort the // transaction and return the error. // // To limit the number of retries, set a deadline on the Context rather than // using a fixed limit on the number of attempts. ReadWriteTransaction will // retry as needed until that deadline is met. func (c *Client) ReadWriteTransaction(ctx context.Context, f func(context.Context, *ReadWriteTransaction) error) (time.Time, error) { if err := checkNestedTxn(ctx); err != nil { return time.Time{}, err } var ( ts time.Time sh *sessionHandle ) err := runRetryable(ctx, func(ctx context.Context) error { var ( err error t *ReadWriteTransaction ) if sh == nil || sh.getID() == "" || sh.getClient() == nil { // Session handle hasn't been allocated or has been destroyed. sh, err = c.idleSessions.takeWriteSession(ctx) if err != nil { // If session retrieval fails, just fail the transaction. return err } t = &ReadWriteTransaction{ sh: sh, tx: sh.getTransactionID(), } } else { t = &ReadWriteTransaction{ sh: sh, } } t.txReadOnly.txReadEnv = t if err = t.begin(ctx); err != nil { // Mask error from begin operation as retryable error. return errRetry(err) } ts, err = t.runInTransaction(ctx, f) if err != nil { return err } return nil }) if sh != nil { sh.recycle() } return ts, err } // applyOption controls the behavior of Client.Apply. type applyOption struct { // If atLeastOnce == true, Client.Apply will execute the mutations on Cloud Spanner at least once. atLeastOnce bool } // An ApplyOption is an optional argument to Apply. type ApplyOption func(*applyOption) // ApplyAtLeastOnce returns an ApplyOption that removes replay protection. // // With this option, Apply may attempt to apply mutations more than once; if // the mutations are not idempotent, this may lead to a failure being reported // when the mutation was applied more than once. For example, an insert may // fail with ALREADY_EXISTS even though the row did not exist before Apply was // called. For this reason, most users of the library will prefer not to use // this option. However, ApplyAtLeastOnce requires only a single RPC, whereas // Apply's default replay protection may require an additional RPC. So this // option may be appropriate for latency sensitive and/or high throughput blind // writing. func ApplyAtLeastOnce() ApplyOption { return func(ao *applyOption) { ao.atLeastOnce = true } } // Apply applies a list of mutations atomically to the database. func (c *Client) Apply(ctx context.Context, ms []*Mutation, opts ...ApplyOption) (time.Time, error) { ao := &applyOption{} for _, opt := range opts { opt(ao) } if !ao.atLeastOnce { return c.ReadWriteTransaction(ctx, func(ctx context.Context, t *ReadWriteTransaction) error { t.BufferWrite(ms) return nil }) } t := &writeOnlyTransaction{c.idleSessions} return t.applyAtLeastOnce(ctx, ms...) } golang-google-cloud-0.9.0/spanner/client_test.go000066400000000000000000000030421312234511600216540ustar00rootroot00000000000000/* Copyright 2017 Google Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package spanner import ( "strings" "testing" ) // Test validDatabaseName() func TestValidDatabaseName(t *testing.T) { validDbUri := "projects/spanner-cloud-test/instances/foo/databases/foodb" invalidDbUris := []string{ // Completely wrong DB URI. "foobarDB", // Project ID contains "/". "projects/spanner-cloud/test/instances/foo/databases/foodb", // No instance ID. "projects/spanner-cloud-test/instances//databases/foodb", } if err := validDatabaseName(validDbUri); err != nil { t.Errorf("validateDatabaseName(%q) = %v, want nil", validDbUri, err) } for _, d := range invalidDbUris { if err, wantErr := validDatabaseName(d), "should conform to pattern"; !strings.Contains(err.Error(), wantErr) { t.Errorf("validateDatabaseName(%q) = %q, want error pattern %q", validDbUri, err, wantErr) } } } func TestReadOnlyTransactionClose(t *testing.T) { // Closing a ReadOnlyTransaction shouldn't panic. c := &Client{} tx := c.ReadOnlyTransaction() tx.Close() } golang-google-cloud-0.9.0/spanner/doc.go000066400000000000000000000230141312234511600201050ustar00rootroot00000000000000/* Copyright 2017 Google Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Package spanner provides a client for reading and writing to Cloud Spanner databases. See the packages under admin for clients that operate on databases and instances. Note: This package is in alpha. Backwards-incompatible changes may occur without notice. See https://cloud.google.com/spanner/docs/getting-started/go/ for an introduction to Cloud Spanner and additional help on using this API. Creating a Client To start working with this package, create a client that refers to the database of interest: ctx := context.Background() client, err := spanner.NewClient(ctx, "projects/P/instances/I/databases/D") defer client.Close() if err != nil { // TODO: Handle error. } Remember to close the client after use to free up the sessions in the session pool. Simple Reads and Writes Two Client methods, Apply and Single, work well for simple reads and writes. As a quick introduction, here we write a new row to the database and read it back: _, err := client.Apply(ctx, []*spanner.Mutation{ spanner.Insert("Users", []string{"name", "email"}, []interface{}{"alice", "a@example.com"})}) if err != nil { // TODO: Handle error. } row, err := client.Single().ReadRow(ctx, "Users", spanner.Key{"alice"}, []string{"email"}) if err != nil { // TODO: Handle error. } All the methods used above are discussed in more detail below. Keys Every Cloud Spanner row has a unique key, composed of one or more columns. Construct keys with a literal of type Key: key1 := spanner.Key{"alice"} KeyRanges The keys of a Cloud Spanner table are ordered. You can specify ranges of keys using the KeyRange type: kr1 := spanner.KeyRange{Start: key1, End: key2} By default, a KeyRange includes its start key but not its end key. Use the Kind field to specify other boundary conditions: // include both keys kr2 := spanner.KeyRange{Start: key1, End: key2, Kind: spanner.ClosedClosed} KeySets A KeySet represents a set of keys. A single Key or KeyRange can act as a KeySet. Use the KeySets function to build the union of several KeySets: ks1 := spanner.KeySets(key1, key2, kr1, kr2) AllKeys returns a KeySet that refers to all the keys in a table: ks2 := spanner.AllKeys() Transactions All Cloud Spanner reads and writes occur inside transactions. There are two types of transactions, read-only and read-write. Read-only transactions cannot change the database, do not acquire locks, and may access either the current database state or states in the past. Read-write transactions can read the database before writing to it, and always apply to the most recent database state. Single Reads The simplest and fastest transaction is a ReadOnlyTransaction that supports a single read operation. Use Client.Single to create such a transaction. You can chain the call to Single with a call to a Read method. When you only want one row whose key you know, use ReadRow. Provide the table name, key, and the columns you want to read: row, err := client.Single().ReadRow(ctx, "Accounts", spanner.Key{"alice"}, []string{"balance"}) Read multiple rows with the Read method. It takes a table name, KeySet, and list of columns: iter := client.Single().Read(ctx, "Accounts", keyset1, columns) Read returns a RowIterator. You can call the Do method on the iterator and pass a callback: err := iter.Do(func(row *Row) error { // TODO: use row return nil }) RowIterator also follows the standard pattern for the Google Cloud Client Libraries: defer iter.Stop() for { row, err := iter.Next() if err == iterator.Done { break } if err != nil { // TODO: Handle error. } // TODO: use row } Always call Stop when you finish using an iterator this way, whether or not you iterate to the end. (Failing to call Stop could lead you to exhaust the database's session quota.) To read rows with an index, use ReadUsingIndex. Statements The most general form of reading uses SQL statements. Construct a Statement with NewStatement, setting any parameters using the Statement's Params map: stmt := spanner.NewStatement("SELECT First, Last FROM SINGERS WHERE Last >= @start") stmt.Params["start"] = "Dylan" You can also construct a Statement directly with a struct literal, providing your own map of parameters. Use the Query method to run the statement and obtain an iterator: iter := client.Single().Query(ctx, stmt) Rows Once you have a Row, via an iterator or a call to ReadRow, you can extract column values in several ways. Pass in a pointer to a Go variable of the appropriate type when you extract a value. You can extract by column position or name: err := row.Column(0, &name) err = row.ColumnByName("balance", &balance) You can extract all the columns at once: err = row.Columns(&name, &balance) Or you can define a Go struct that corresponds to your columns, and extract into that: var s struct { Name string; Balance int64 } err = row.ToStruct(&s) For Cloud Spanner columns that may contain NULL, use one of the NullXXX types, like NullString: var ns spanner.NullString if err =: row.Column(0, &ns); err != nil { // TODO: Handle error. } if ns.Valid { fmt.Println(ns.StringVal) } else { fmt.Println("column is NULL") } Multiple Reads To perform more than one read in a transaction, use ReadOnlyTransaction: txn := client.ReadOnlyTransaction() defer txn.Close() iter := txn.Query(ctx, stmt1) // ... iter = txn.Query(ctx, stmt2) // ... You must call Close when you are done with the transaction. Timestamps and Timestamp Bounds Cloud Spanner read-only transactions conceptually perform all their reads at a single moment in time, called the transaction's read timestamp. Once a read has started, you can call ReadOnlyTransaction's Timestamp method to obtain the read timestamp. By default, a transaction will pick the most recent time (a time where all previously committed transactions are visible) for its reads. This provides the freshest data, but may involve some delay. You can often get a quicker response if you are willing to tolerate "stale" data. You can control the read timestamp selected by a transaction by calling the WithTimestampBound method on the transaction before using it. For example, to perform a query on data that is at most one minute stale, use client.Single(). WithTimestampBound(spanner.MaxStaleness(1*time.Minute)). Query(ctx, stmt) See the documentation of TimestampBound for more details. Mutations To write values to a Cloud Spanner database, construct a Mutation. The spanner package has functions for inserting, updating and deleting rows. Except for the Delete methods, which take a Key or KeyRange, each mutation-building function comes in three varieties. One takes lists of columns and values along with the table name: m1 := spanner.Insert("Users", []string{"name", "email"}, []interface{}{"alice", "a@example.com"}) One takes a map from column names to values: m2 := spanner.InsertMap("Users", map[string]interface{}{ "name": "alice", "email": "a@example.com", }) And the third accepts a struct value, and determines the columns from the struct field names: type User struct { Name, Email string } u := User{Name: "alice", Email: "a@example.com"} m3, err := spanner.InsertStruct("Users", u) Writes To apply a list of mutations to the database, use Apply: _, err := client.Apply(ctx, []*spanner.Mutation{m1, m2, m3}) If you need to read before writing in a single transaction, use a ReadWriteTransaction. ReadWriteTransactions may abort and need to be retried. You pass in a function to ReadWriteTransaction, and the client will handle the retries automatically. Use the transaction's BufferWrite method to buffer mutations, which will all be executed at the end of the transaction: _, err := client.ReadWriteTransaction(ctx, func(txn *spanner.ReadWriteTransaction) error { var balance int64 row, err := txn.ReadRow(ctx, "Accounts", spanner.Key{"alice"}, []string{"balance"}) if err != nil { // This function will be called again if this is an IsAborted error. return err } if err := row.Column(0, &balance); err != nil { return err } if balance <= 10 { return errors.New("insufficient funds in account") } balance -= 10 m := spanner.Update("Accounts", []string{"user", "balance"}, []interface{}{"alice", balance}) txn.BufferWrite([]*spanner.Mutation{m}) // The buffered mutation will be committed. If the commit // fails with an IsAborted error, this function will be called // again. return nil }) Authentication See examples of authorization and authentication at https://godoc.org/cloud.google.com/go#pkg-examples. */ package spanner // import "cloud.google.com/go/spanner" golang-google-cloud-0.9.0/spanner/errors.go000066400000000000000000000055421312234511600206620ustar00rootroot00000000000000/* Copyright 2017 Google Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package spanner import ( "fmt" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/metadata" ) // Error is the structured error returned by Cloud Spanner client. type Error struct { // Code is the canonical error code for describing the nature of a // particular error. Code codes.Code // Desc explains more details of the error. Desc string // trailers are the trailers returned in the response, if any. trailers metadata.MD } // Error implements error.Error. func (e *Error) Error() string { if e == nil { return fmt.Sprintf("spanner: OK") } return fmt.Sprintf("spanner: code = %q, desc = %q", e.Code, e.Desc) } // decorate decorates an existing spanner.Error with more information. func (e *Error) decorate(info string) { e.Desc = fmt.Sprintf("%v, %v", info, e.Desc) } // spannerErrorf generates a *spanner.Error with the given error code and // description. func spannerErrorf(ec codes.Code, format string, args ...interface{}) error { return &Error{ Code: ec, Desc: fmt.Sprintf(format, args...), } } // toSpannerError converts general Go error to *spanner.Error. func toSpannerError(err error) error { return toSpannerErrorWithMetadata(err, nil) } // toSpannerErrorWithMetadata converts general Go error and grpc trailers to *spanner.Error. // Note: modifies original error if trailers aren't nil func toSpannerErrorWithMetadata(err error, trailers metadata.MD) error { if err == nil { return nil } if se, ok := err.(*Error); ok { if trailers != nil { se.trailers = metadata.Join(se.trailers, trailers) } return se } if grpc.Code(err) == codes.Unknown { return &Error{codes.Unknown, err.Error(), trailers} } return &Error{grpc.Code(err), grpc.ErrorDesc(err), trailers} } // ErrCode extracts the canonical error code from a Go error. func ErrCode(err error) codes.Code { se, ok := toSpannerError(err).(*Error) if !ok { return codes.Unknown } return se.Code } // ErrDesc extracts the Cloud Spanner error description from a Go error. func ErrDesc(err error) string { se, ok := toSpannerError(err).(*Error) if !ok { return err.Error() } return se.Desc } // errTrailers extracts the grpc trailers if present from a Go error. func errTrailers(err error) metadata.MD { se, ok := err.(*Error) if !ok { return nil } return se.trailers } golang-google-cloud-0.9.0/spanner/examples_test.go000066400000000000000000000336061312234511600222250ustar00rootroot00000000000000/* Copyright 2017 Google Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package spanner_test import ( "errors" "fmt" "time" "cloud.google.com/go/spanner" "golang.org/x/net/context" "google.golang.org/api/iterator" sppb "google.golang.org/genproto/googleapis/spanner/v1" ) func ExampleNewClient() { ctx := context.Background() const myDB = "projects/my-project/instances/my-instance/database/my-db" client, err := spanner.NewClient(ctx, myDB) if err != nil { // TODO: Handle error. } _ = client // TODO: Use client. } const myDB = "projects/my-project/instances/my-instance/database/my-db" func ExampleNewClientWithConfig() { ctx := context.Background() const myDB = "projects/my-project/instances/my-instance/database/my-db" client, err := spanner.NewClientWithConfig(ctx, myDB, spanner.ClientConfig{ NumChannels: 10, }) if err != nil { // TODO: Handle error. } _ = client // TODO: Use client. client.Close() // Close client when done. } func ExampleClient_Single() { ctx := context.Background() client, err := spanner.NewClient(ctx, myDB) if err != nil { // TODO: Handle error. } iter := client.Single().Query(ctx, spanner.NewStatement("SELECT FirstName FROM Singers")) _ = iter // TODO: iterate using Next or Do. } func ExampleClient_ReadOnlyTransaction() { ctx := context.Background() client, err := spanner.NewClient(ctx, myDB) if err != nil { // TODO: Handle error. } t := client.ReadOnlyTransaction() defer t.Close() // TODO: Read with t using Read, ReadRow, ReadUsingIndex, or Query. } func ExampleClient_ReadWriteTransaction() { ctx := context.Background() client, err := spanner.NewClient(ctx, myDB) if err != nil { // TODO: Handle error. } _, err = client.ReadWriteTransaction(ctx, func(ctx context.Context, txn *spanner.ReadWriteTransaction) error { var balance int64 row, err := txn.ReadRow(ctx, "Accounts", spanner.Key{"alice"}, []string{"balance"}) if err != nil { // This function will be called again if this is an // IsAborted error. return err } if err := row.Column(0, &balance); err != nil { return err } if balance <= 10 { return errors.New("insufficient funds in account") } balance -= 10 m := spanner.Update("Accounts", []string{"user", "balance"}, []interface{}{"alice", balance}) return txn.BufferWrite([]*spanner.Mutation{m}) // The buffered mutation will be committed. If the commit // fails with an IsAborted error, this function will be called // again. }) if err != nil { // TODO: Handle error. } } func ExampleUpdate() { ctx := context.Background() client, err := spanner.NewClient(ctx, myDB) if err != nil { // TODO: Handle error. } _, err = client.ReadWriteTransaction(ctx, func(ctx context.Context, txn *spanner.ReadWriteTransaction) error { row, err := txn.ReadRow(ctx, "Accounts", spanner.Key{"alice"}, []string{"balance"}) if err != nil { return err } var balance int64 if err := row.Column(0, &balance); err != nil { return err } return txn.BufferWrite([]*spanner.Mutation{ spanner.Update("Accounts", []string{"user", "balance"}, []interface{}{"alice", balance + 10}), }) }) if err != nil { // TODO: Handle error. } } // This example is the same as the one for Update, except for the use of UpdateMap. func ExampleUpdateMap() { ctx := context.Background() client, err := spanner.NewClient(ctx, myDB) if err != nil { // TODO: Handle error. } _, err = client.ReadWriteTransaction(ctx, func(ctx context.Context, txn *spanner.ReadWriteTransaction) error { row, err := txn.ReadRow(ctx, "Accounts", spanner.Key{"alice"}, []string{"balance"}) if err != nil { return err } var balance int64 if err := row.Column(0, &balance); err != nil { return err } return txn.BufferWrite([]*spanner.Mutation{ spanner.UpdateMap("Accounts", map[string]interface{}{ "user": "alice", "balance": balance + 10, }), }) }) if err != nil { // TODO: Handle error. } } // This example is the same as the one for Update, except for the use of UpdateStruct. func ExampleUpdateStruct() { ctx := context.Background() client, err := spanner.NewClient(ctx, myDB) if err != nil { // TODO: Handle error. } type account struct { User string `spanner:"user"` Balance int64 `spanner:"balance"` } _, err = client.ReadWriteTransaction(ctx, func(ctx context.Context, txn *spanner.ReadWriteTransaction) error { row, err := txn.ReadRow(ctx, "Accounts", spanner.Key{"alice"}, []string{"balance"}) if err != nil { return err } var balance int64 if err := row.Column(0, &balance); err != nil { return err } m, err := spanner.UpdateStruct("Accounts", account{ User: "alice", Balance: balance + 10, }) if err != nil { return err } return txn.BufferWrite([]*spanner.Mutation{m}) }) if err != nil { // TODO: Handle error. } } func ExampleClient_Apply() { ctx := context.Background() client, err := spanner.NewClient(ctx, myDB) if err != nil { // TODO: Handle error. } m := spanner.Update("Users", []string{"name", "email"}, []interface{}{"alice", "a@example.com"}) _, err = client.Apply(ctx, []*spanner.Mutation{m}) if err != nil { // TODO: Handle error. } } func ExampleInsert() { m := spanner.Insert("Users", []string{"name", "email"}, []interface{}{"alice", "a@example.com"}) _ = m // TODO: use with Client.Apply or in a ReadWriteTransaction. } func ExampleInsertMap() { m := spanner.InsertMap("Users", map[string]interface{}{ "name": "alice", "email": "a@example.com", }) _ = m // TODO: use with Client.Apply or in a ReadWriteTransaction. } func ExampleInsertStruct() { type User struct { Name, Email string } u := User{Name: "alice", Email: "a@example.com"} m, err := spanner.InsertStruct("Users", u) if err != nil { // TODO: Handle error. } _ = m // TODO: use with Client.Apply or in a ReadWriteTransaction. } func ExampleDelete() { m := spanner.Delete("Users", spanner.Key{"alice"}) _ = m // TODO: use with Client.Apply or in a ReadWriteTransaction. } func ExampleDelete_KeyRange() { m := spanner.Delete("Users", spanner.KeyRange{ Start: spanner.Key{"alice"}, End: spanner.Key{"bob"}, Kind: spanner.ClosedClosed, }) _ = m // TODO: use with Client.Apply or in a ReadWriteTransaction. } func ExampleRowIterator_Next() { ctx := context.Background() client, err := spanner.NewClient(ctx, myDB) if err != nil { // TODO: Handle error. } iter := client.Single().Query(ctx, spanner.NewStatement("SELECT FirstName FROM Singers")) defer iter.Stop() for { row, err := iter.Next() if err == iterator.Done { break } if err != nil { // TODO: Handle error. } var firstName string if err := row.Column(0, &firstName); err != nil { // TODO: Handle error. } fmt.Println(firstName) } } func ExampleRowIterator_Do() { ctx := context.Background() client, err := spanner.NewClient(ctx, myDB) if err != nil { // TODO: Handle error. } iter := client.Single().Query(ctx, spanner.NewStatement("SELECT FirstName FROM Singers")) err = iter.Do(func(r *spanner.Row) error { var firstName string if err := r.Column(0, &firstName); err != nil { return err } fmt.Println(firstName) return nil }) if err != nil { // TODO: Handle error. } } func ExampleRow_Size() { ctx := context.Background() client, err := spanner.NewClient(ctx, myDB) if err != nil { // TODO: Handle error. } row, err := client.Single().ReadRow(ctx, "Accounts", spanner.Key{"alice"}, []string{"name", "balance"}) if err != nil { // TODO: Handle error. } fmt.Println(row.Size()) // size is 2 } func ExampleRow_ColumnName() { ctx := context.Background() client, err := spanner.NewClient(ctx, myDB) if err != nil { // TODO: Handle error. } row, err := client.Single().ReadRow(ctx, "Accounts", spanner.Key{"alice"}, []string{"name", "balance"}) if err != nil { // TODO: Handle error. } fmt.Println(row.ColumnName(1)) // prints "balance" } func ExampleRow_ColumnIndex() { ctx := context.Background() client, err := spanner.NewClient(ctx, myDB) if err != nil { // TODO: Handle error. } row, err := client.Single().ReadRow(ctx, "Accounts", spanner.Key{"alice"}, []string{"name", "balance"}) if err != nil { // TODO: Handle error. } index, err := row.ColumnIndex("balance") if err != nil { // TODO: Handle error. } fmt.Println(index) } func ExampleRow_ColumnNames() { ctx := context.Background() client, err := spanner.NewClient(ctx, myDB) if err != nil { // TODO: Handle error. } row, err := client.Single().ReadRow(ctx, "Accounts", spanner.Key{"alice"}, []string{"name", "balance"}) if err != nil { // TODO: Handle error. } fmt.Println(row.ColumnNames()) } func ExampleRow_ColumnByName() { ctx := context.Background() client, err := spanner.NewClient(ctx, myDB) if err != nil { // TODO: Handle error. } row, err := client.Single().ReadRow(ctx, "Accounts", spanner.Key{"alice"}, []string{"name", "balance"}) if err != nil { // TODO: Handle error. } var balance int64 if err := row.ColumnByName("balance", &balance); err != nil { // TODO: Handle error. } fmt.Println(balance) } func ExampleRow_Columns() { ctx := context.Background() client, err := spanner.NewClient(ctx, myDB) if err != nil { // TODO: Handle error. } row, err := client.Single().ReadRow(ctx, "Accounts", spanner.Key{"alice"}, []string{"name", "balance"}) if err != nil { // TODO: Handle error. } var name string var balance int64 if err := row.Columns(&name, &balance); err != nil { // TODO: Handle error. } fmt.Println(name, balance) } func ExampleRow_ToStruct() { ctx := context.Background() client, err := spanner.NewClient(ctx, myDB) if err != nil { // TODO: Handle error. } row, err := client.Single().ReadRow(ctx, "Accounts", spanner.Key{"alice"}, []string{"name", "balance"}) if err != nil { // TODO: Handle error. } type Account struct { Name string Balance int64 } var acct Account if err := row.ToStruct(&acct); err != nil { // TODO: Handle error. } fmt.Println(acct) } func ExampleReadOnlyTransaction_Read() { ctx := context.Background() client, err := spanner.NewClient(ctx, myDB) if err != nil { // TODO: Handle error. } iter := client.Single().Read(ctx, "Users", spanner.KeySets(spanner.Key{"alice"}, spanner.Key{"bob"}), []string{"name", "email"}) _ = iter // TODO: iterate using Next or Do. } func ExampleReadOnlyTransaction_ReadUsingIndex() { ctx := context.Background() client, err := spanner.NewClient(ctx, myDB) if err != nil { // TODO: Handle error. } iter := client.Single().ReadUsingIndex(ctx, "Users", "UsersByEmail", spanner.KeySets(spanner.Key{"a@example.com"}, spanner.Key{"b@example.com"}), []string{"name", "email"}) _ = iter // TODO: iterate using Next or Do. } func ExampleReadOnlyTransaction_ReadRow() { ctx := context.Background() client, err := spanner.NewClient(ctx, myDB) if err != nil { // TODO: Handle error. } row, err := client.Single().ReadRow(ctx, "Users", spanner.Key{"alice"}, []string{"name", "email"}) if err != nil { // TODO: Handle error. } _ = row // TODO: use row } func ExampleReadOnlyTransaction_Query() { ctx := context.Background() client, err := spanner.NewClient(ctx, myDB) if err != nil { // TODO: Handle error. } iter := client.Single().Query(ctx, spanner.NewStatement("SELECT FirstName FROM Singers")) _ = iter // TODO: iterate using Next or Do. } func ExampleNewStatement() { stmt := spanner.NewStatement("SELECT FirstName, LastName FROM SINGERS WHERE LastName >= @start") stmt.Params["start"] = "Dylan" // TODO: Use stmt in Query. } func ExampleNewStatement_structLiteral() { stmt := spanner.Statement{ SQL: "SELECT FirstName, LastName FROM SINGERS WHERE LastName >= @start", Params: map[string]interface{}{"start": "Dylan"}, } _ = stmt // TODO: Use stmt in Query. } func ExampleReadOnlyTransaction_Timestamp() { ctx := context.Background() client, err := spanner.NewClient(ctx, myDB) if err != nil { // TODO: Handle error. } txn := client.Single() row, err := txn.ReadRow(ctx, "Users", spanner.Key{"alice"}, []string{"name", "email"}) if err != nil { // TODO: Handle error. } readTimestamp, err := txn.Timestamp() if err != nil { // TODO: Handle error. } fmt.Println("read happened at", readTimestamp) _ = row // TODO: use row } func ExampleReadOnlyTransaction_WithTimestampBound() { ctx := context.Background() client, err := spanner.NewClient(ctx, myDB) if err != nil { // TODO: Handle error. } txn := client.Single().WithTimestampBound(spanner.MaxStaleness(30 * time.Second)) row, err := txn.ReadRow(ctx, "Users", spanner.Key{"alice"}, []string{"name", "email"}) if err != nil { // TODO: Handle error. } _ = row // TODO: use row readTimestamp, err := txn.Timestamp() if err != nil { // TODO: Handle error. } fmt.Println("read happened at", readTimestamp) } func ExampleNewGenericColumnValue_Decode() { // In real applications, rows can be retrieved by methods like client.Single().ReadRow(). row, err := spanner.NewRow([]string{"intCol", "strCol"}, []interface{}{42, "my-text"}) if err != nil { // TODO: Handle error. } for i := 0; i < row.Size(); i++ { var col spanner.GenericColumnValue if err := row.Column(i, &col); err != nil { // TODO: Handle error. } switch col.Type.Code { case sppb.TypeCode_INT64: var v int64 if err := col.Decode(&v); err != nil { // TODO: Handle error. } fmt.Println("int", v) case sppb.TypeCode_STRING: var v string if err := col.Decode(&v); err != nil { // TODO: Handle error. } fmt.Println("string", v) } } // Output: // int 42 // string my-text } golang-google-cloud-0.9.0/spanner/internal/000077500000000000000000000000001312234511600206255ustar00rootroot00000000000000golang-google-cloud-0.9.0/spanner/internal/testutil/000077500000000000000000000000001312234511600225025ustar00rootroot00000000000000golang-google-cloud-0.9.0/spanner/internal/testutil/mockclient.go000066400000000000000000000250021312234511600251600ustar00rootroot00000000000000/* Copyright 2017 Google Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package testutil import ( "errors" "fmt" "reflect" "sync" "testing" "time" "golang.org/x/net/context" "github.com/golang/protobuf/ptypes/empty" proto3 "github.com/golang/protobuf/ptypes/struct" pbt "github.com/golang/protobuf/ptypes/timestamp" sppb "google.golang.org/genproto/googleapis/spanner/v1" "google.golang.org/grpc" "google.golang.org/grpc/codes" ) // Action is a mocked RPC activity that MockCloudSpannerClient will take. type Action struct { method string err error } // NewAction creates Action objects. func NewAction(m string, e error) Action { return Action{m, e} } // MockCloudSpannerClient is a mock implementation of sppb.SpannerClient. type MockCloudSpannerClient struct { mu sync.Mutex t *testing.T // Live sessions on the client. sessions map[string]bool // Expected set of actions that will be executed by the client. actions []Action // Session ping history pings []string // Injected error, will be returned by all APIs injErr map[string]error // nice client will not fail on any request nice bool } // NewMockCloudSpannerClient creates new MockCloudSpannerClient instance. func NewMockCloudSpannerClient(t *testing.T, acts ...Action) *MockCloudSpannerClient { mc := &MockCloudSpannerClient{t: t, sessions: map[string]bool{}, injErr: map[string]error{}} mc.SetActions(acts...) return mc } // MakeNice makes this a nice mock which will not fail on any request. func (m *MockCloudSpannerClient) MakeNice() { m.mu.Lock() defer m.mu.Unlock() m.nice = true } // MakeStrict makes this a strict mock which will fail on any unexpected request. func (m *MockCloudSpannerClient) MakeStrict() { m.mu.Lock() defer m.mu.Unlock() m.nice = false } // InjectError injects a global error that will be returned by all APIs regardless of // the actions array. func (m *MockCloudSpannerClient) InjectError(method string, err error) { m.mu.Lock() defer m.mu.Unlock() m.injErr[method] = err } // SetActions sets the new set of expected actions to MockCloudSpannerClient. func (m *MockCloudSpannerClient) SetActions(acts ...Action) { m.mu.Lock() defer m.mu.Unlock() m.actions = []Action{} for _, act := range acts { m.actions = append(m.actions, act) } } // DumpPings dumps the ping history. func (m *MockCloudSpannerClient) DumpPings() []string { m.mu.Lock() defer m.mu.Unlock() return append([]string(nil), m.pings...) } // DumpSessions dumps the internal session table. func (m *MockCloudSpannerClient) DumpSessions() map[string]bool { m.mu.Lock() defer m.mu.Unlock() st := map[string]bool{} for s, v := range m.sessions { st[s] = v } return st } // CreateSession is a placeholder for SpannerClient.CreateSession. func (m *MockCloudSpannerClient) CreateSession(c context.Context, r *sppb.CreateSessionRequest, opts ...grpc.CallOption) (*sppb.Session, error) { m.mu.Lock() defer m.mu.Unlock() if err := m.injErr["CreateSession"]; err != nil { return nil, err } s := &sppb.Session{} if r.Database != "mockdb" { // Reject other databases return s, grpc.Errorf(codes.NotFound, fmt.Sprintf("database not found: %v", r.Database)) } // Generate & record session name. s.Name = fmt.Sprintf("mockdb-%v", time.Now().UnixNano()) m.sessions[s.Name] = true return s, nil } // GetSession is a placeholder for SpannerClient.GetSession. func (m *MockCloudSpannerClient) GetSession(c context.Context, r *sppb.GetSessionRequest, opts ...grpc.CallOption) (*sppb.Session, error) { m.mu.Lock() defer m.mu.Unlock() if err := m.injErr["GetSession"]; err != nil { return nil, err } m.pings = append(m.pings, r.Name) if _, ok := m.sessions[r.Name]; !ok { return nil, grpc.Errorf(codes.NotFound, fmt.Sprintf("Session not found: %v", r.Name)) } return &sppb.Session{Name: r.Name}, nil } // DeleteSession is a placeholder for SpannerClient.DeleteSession. func (m *MockCloudSpannerClient) DeleteSession(c context.Context, r *sppb.DeleteSessionRequest, opts ...grpc.CallOption) (*empty.Empty, error) { m.mu.Lock() defer m.mu.Unlock() if err := m.injErr["DeleteSession"]; err != nil { return nil, err } if _, ok := m.sessions[r.Name]; !ok { // Session not found. return &empty.Empty{}, grpc.Errorf(codes.NotFound, fmt.Sprintf("Session not found: %v", r.Name)) } // Delete session from in-memory table. delete(m.sessions, r.Name) return &empty.Empty{}, nil } // ExecuteSql is a placeholder for SpannerClient.ExecuteSql. func (m *MockCloudSpannerClient) ExecuteSql(c context.Context, r *sppb.ExecuteSqlRequest, opts ...grpc.CallOption) (*sppb.ResultSet, error) { return nil, errors.New("Unimplemented") } // ExecuteStreamingSql is a mock implementation of SpannerClient.ExecuteStreamingSql. func (m *MockCloudSpannerClient) ExecuteStreamingSql(c context.Context, r *sppb.ExecuteSqlRequest, opts ...grpc.CallOption) (sppb.Spanner_ExecuteStreamingSqlClient, error) { m.mu.Lock() defer m.mu.Unlock() if err := m.injErr["ExecuteStreamingSql"]; err != nil { return nil, err } if len(m.actions) == 0 { m.t.Fatalf("unexpected ExecuteStreamingSql executed") } act := m.actions[0] m.actions = m.actions[1:] if act.method != "ExecuteStreamingSql" { m.t.Fatalf("unexpected ExecuteStreamingSql call, want action: %v", act) } wantReq := &sppb.ExecuteSqlRequest{ Session: "mocksession", Transaction: &sppb.TransactionSelector{ Selector: &sppb.TransactionSelector_SingleUse{ SingleUse: &sppb.TransactionOptions{ Mode: &sppb.TransactionOptions_ReadOnly_{ ReadOnly: &sppb.TransactionOptions_ReadOnly{ TimestampBound: &sppb.TransactionOptions_ReadOnly_Strong{ Strong: true, }, ReturnReadTimestamp: false, }, }, }, }, }, Sql: "mockquery", Params: &proto3.Struct{ Fields: map[string]*proto3.Value{"var1": &proto3.Value{Kind: &proto3.Value_StringValue{StringValue: "abc"}}}, }, ParamTypes: map[string]*sppb.Type{"var1": &sppb.Type{Code: sppb.TypeCode_STRING}}, } if !reflect.DeepEqual(r, wantReq) { return nil, fmt.Errorf("got query request: %v, want: %v", r, wantReq) } if act.err != nil { return nil, act.err } return nil, errors.New("query never succeeds on mock client") } // Read is a placeholder for SpannerClient.Read. func (m *MockCloudSpannerClient) Read(c context.Context, r *sppb.ReadRequest, opts ...grpc.CallOption) (*sppb.ResultSet, error) { m.t.Fatalf("Read is unimplemented") return nil, errors.New("Unimplemented") } // StreamingRead is a placeholder for SpannerClient.StreamingRead. func (m *MockCloudSpannerClient) StreamingRead(c context.Context, r *sppb.ReadRequest, opts ...grpc.CallOption) (sppb.Spanner_StreamingReadClient, error) { m.mu.Lock() defer m.mu.Unlock() if err := m.injErr["StreamingRead"]; err != nil { return nil, err } if len(m.actions) == 0 { m.t.Fatalf("unexpected StreamingRead executed") } act := m.actions[0] m.actions = m.actions[1:] if act.method != "StreamingRead" && act.method != "StreamingIndexRead" { m.t.Fatalf("unexpected read call, want action: %v", act) } wantReq := &sppb.ReadRequest{ Session: "mocksession", Transaction: &sppb.TransactionSelector{ Selector: &sppb.TransactionSelector_SingleUse{ SingleUse: &sppb.TransactionOptions{ Mode: &sppb.TransactionOptions_ReadOnly_{ ReadOnly: &sppb.TransactionOptions_ReadOnly{ TimestampBound: &sppb.TransactionOptions_ReadOnly_Strong{ Strong: true, }, ReturnReadTimestamp: false, }, }, }, }, }, Table: "t_mock", Columns: []string{"col1", "col2"}, KeySet: &sppb.KeySet{ []*proto3.ListValue{ &proto3.ListValue{ Values: []*proto3.Value{ &proto3.Value{Kind: &proto3.Value_StringValue{StringValue: "foo"}}, }, }, }, []*sppb.KeyRange{}, false, }, } if act.method == "StreamingIndexRead" { wantReq.Index = "idx1" } if !reflect.DeepEqual(r, wantReq) { return nil, fmt.Errorf("got query request: %v, want: %v", r, wantReq) } if act.err != nil { return nil, act.err } return nil, errors.New("read never succeeds on mock client") } // BeginTransaction is a placeholder for SpannerClient.BeginTransaction. func (m *MockCloudSpannerClient) BeginTransaction(c context.Context, r *sppb.BeginTransactionRequest, opts ...grpc.CallOption) (*sppb.Transaction, error) { m.mu.Lock() defer m.mu.Unlock() if !m.nice { if err := m.injErr["BeginTransaction"]; err != nil { return nil, err } if len(m.actions) == 0 { m.t.Fatalf("unexpected Begin executed") } act := m.actions[0] m.actions = m.actions[1:] if act.method != "Begin" { m.t.Fatalf("unexpected Begin call, want action: %v", act) } if act.err != nil { return nil, act.err } } resp := &sppb.Transaction{Id: []byte("transaction-1")} if _, ok := r.Options.Mode.(*sppb.TransactionOptions_ReadOnly_); ok { resp.ReadTimestamp = &pbt.Timestamp{Seconds: 3, Nanos: 4} } return resp, nil } // Commit is a placeholder for SpannerClient.Commit. func (m *MockCloudSpannerClient) Commit(c context.Context, r *sppb.CommitRequest, opts ...grpc.CallOption) (*sppb.CommitResponse, error) { m.mu.Lock() defer m.mu.Unlock() if !m.nice { if err := m.injErr["Commit"]; err != nil { return nil, err } if len(m.actions) == 0 { m.t.Fatalf("unexpected Commit executed") } act := m.actions[0] m.actions = m.actions[1:] if act.method != "Commit" { m.t.Fatalf("unexpected Commit call, want action: %v", act) } if act.err != nil { return nil, act.err } } return &sppb.CommitResponse{CommitTimestamp: &pbt.Timestamp{Seconds: 1, Nanos: 2}}, nil } // Rollback is a placeholder for SpannerClient.Rollback. func (m *MockCloudSpannerClient) Rollback(c context.Context, r *sppb.RollbackRequest, opts ...grpc.CallOption) (*empty.Empty, error) { m.mu.Lock() defer m.mu.Unlock() if !m.nice { if err := m.injErr["Rollback"]; err != nil { return nil, err } if len(m.actions) == 0 { m.t.Fatalf("unexpected Rollback executed") } act := m.actions[0] m.actions = m.actions[1:] if act.method != "Rollback" { m.t.Fatalf("unexpected Rollback call, want action: %v", act) } if act.err != nil { return nil, act.err } } return nil, nil } golang-google-cloud-0.9.0/spanner/internal/testutil/mockserver.go000066400000000000000000000163111312234511600252130ustar00rootroot00000000000000/* Copyright 2017 Google Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package testutil import ( "encoding/binary" "errors" "fmt" "io" "net" "testing" "time" "golang.org/x/net/context" "github.com/golang/protobuf/ptypes/empty" proto3 "github.com/golang/protobuf/ptypes/struct" pbt "github.com/golang/protobuf/ptypes/timestamp" sppb "google.golang.org/genproto/googleapis/spanner/v1" "google.golang.org/grpc" "google.golang.org/grpc/codes" ) var ( // KvMeta is the Metadata for mocked KV table. KvMeta = sppb.ResultSetMetadata{ RowType: &sppb.StructType{ Fields: []*sppb.StructType_Field{ { Name: "Key", Type: &sppb.Type{Code: sppb.TypeCode_STRING}, }, { Name: "Value", Type: &sppb.Type{Code: sppb.TypeCode_STRING}, }, }, }, } ) // MockCtlMsg encapsulates PartialResultSet/error that might be sent to // client type MockCtlMsg struct { // If ResumeToken == true, mock server will generate a row with // resume token. ResumeToken bool // If Err != nil, mock server will return error in RPC response. Err error } // MockCloudSpanner is a mock implementation of SpannerServer interface. // TODO: make MockCloudSpanner a full-fleged Cloud Spanner implementation. type MockCloudSpanner struct { s *grpc.Server t *testing.T addr string msgs chan MockCtlMsg readTs time.Time next int } // Addr returns the listening address of mock server. func (m *MockCloudSpanner) Addr() string { return m.addr } // AddMsg generates a new mocked row which can be received by client. func (m *MockCloudSpanner) AddMsg(err error, resumeToken bool) { msg := MockCtlMsg{ ResumeToken: resumeToken, Err: err, } if err == io.EOF { close(m.msgs) } else { m.msgs <- msg } } // Done signals an end to a mocked stream. func (m *MockCloudSpanner) Done() { close(m.msgs) } // CreateSession is a placeholder for SpannerServer.CreateSession. func (m *MockCloudSpanner) CreateSession(c context.Context, r *sppb.CreateSessionRequest) (*sppb.Session, error) { m.t.Fatalf("CreateSession is unimplemented") return nil, errors.New("Unimplemented") } // GetSession is a placeholder for SpannerServer.GetSession. func (m *MockCloudSpanner) GetSession(c context.Context, r *sppb.GetSessionRequest) (*sppb.Session, error) { m.t.Fatalf("GetSession is unimplemented") return nil, errors.New("Unimplemented") } // DeleteSession is a placeholder for SpannerServer.DeleteSession. func (m *MockCloudSpanner) DeleteSession(c context.Context, r *sppb.DeleteSessionRequest) (*empty.Empty, error) { m.t.Fatalf("DeleteSession is unimplemented") return nil, errors.New("Unimplemented") } // ExecuteSql is a placeholder for SpannerServer.ExecuteSql. func (m *MockCloudSpanner) ExecuteSql(c context.Context, r *sppb.ExecuteSqlRequest) (*sppb.ResultSet, error) { m.t.Fatalf("ExecuteSql is unimplemented") return nil, errors.New("Unimplemented") } // EncodeResumeToken return mock resume token encoding for an uint64 integer. func EncodeResumeToken(t uint64) []byte { rt := make([]byte, 16) binary.PutUvarint(rt, t) return rt } // DecodeResumeToken decodes a mock resume token into an uint64 integer. func DecodeResumeToken(t []byte) (uint64, error) { s, n := binary.Uvarint(t) if n <= 0 { return 0, fmt.Errorf("invalid resume token: %v", t) } return s, nil } // ExecuteStreamingSql is a mock implementation of SpannerServer.ExecuteStreamingSql. func (m *MockCloudSpanner) ExecuteStreamingSql(r *sppb.ExecuteSqlRequest, s sppb.Spanner_ExecuteStreamingSqlServer) error { switch r.Sql { case "SELECT * from t_unavailable": return grpc.Errorf(codes.Unavailable, "mock table unavailable") case "SELECT t.key key, t.value value FROM t_mock t": if r.ResumeToken != nil { s, err := DecodeResumeToken(r.ResumeToken) if err != nil { return err } m.next = int(s) + 1 } for { msg, more := <-m.msgs if !more { break } if msg.Err == nil { var rt []byte if msg.ResumeToken { rt = EncodeResumeToken(uint64(m.next)) } meta := KvMeta meta.Transaction = &sppb.Transaction{ ReadTimestamp: &pbt.Timestamp{ Seconds: m.readTs.Unix(), Nanos: int32(m.readTs.Nanosecond()), }, } err := s.Send(&sppb.PartialResultSet{ Metadata: &meta, Values: []*proto3.Value{ {Kind: &proto3.Value_StringValue{StringValue: fmt.Sprintf("foo-%02d", m.next)}}, {Kind: &proto3.Value_StringValue{StringValue: fmt.Sprintf("bar-%02d", m.next)}}, }, ResumeToken: rt, }) m.next = m.next + 1 if err != nil { return err } continue } return msg.Err } return nil default: return fmt.Errorf("unsupported SQL: %v", r.Sql) } } // Read is a placeholder for SpannerServer.Read. func (m *MockCloudSpanner) Read(c context.Context, r *sppb.ReadRequest) (*sppb.ResultSet, error) { m.t.Fatalf("Read is unimplemented") return nil, errors.New("Unimplemented") } // StreamingRead is a placeholder for SpannerServer.StreamingRead. func (m *MockCloudSpanner) StreamingRead(r *sppb.ReadRequest, s sppb.Spanner_StreamingReadServer) error { m.t.Fatalf("StreamingRead is unimplemented") return errors.New("Unimplemented") } // BeginTransaction is a placeholder for SpannerServer.BeginTransaction. func (m *MockCloudSpanner) BeginTransaction(c context.Context, r *sppb.BeginTransactionRequest) (*sppb.Transaction, error) { m.t.Fatalf("BeginTransaction is unimplemented") return nil, errors.New("Unimplemented") } // Commit is a placeholder for SpannerServer.Commit. func (m *MockCloudSpanner) Commit(c context.Context, r *sppb.CommitRequest) (*sppb.CommitResponse, error) { m.t.Fatalf("Commit is unimplemented") return nil, errors.New("Unimplemented") } // Rollback is a placeholder for SpannerServer.Rollback. func (m *MockCloudSpanner) Rollback(c context.Context, r *sppb.RollbackRequest) (*empty.Empty, error) { m.t.Fatalf("Rollback is unimplemented") return nil, errors.New("Unimplemented") } // Serve runs a MockCloudSpanner listening on a random localhost address. func (m *MockCloudSpanner) Serve() { m.s = grpc.NewServer() if m.addr == "" { m.addr = "localhost:0" } lis, err := net.Listen("tcp", m.addr) if err != nil { m.t.Fatalf("Failed to listen: %v", err) } go m.s.Serve(lis) _, port, err := net.SplitHostPort(lis.Addr().String()) if err != nil { m.t.Fatalf("Failed to parse listener address: %v", err) } sppb.RegisterSpannerServer(m.s, m) m.addr = "localhost:" + port } // Stop terminates MockCloudSpanner and closes the serving port. func (m *MockCloudSpanner) Stop() { m.s.Stop() } // NewMockCloudSpanner creates a new MockCloudSpanner instance. func NewMockCloudSpanner(t *testing.T, ts time.Time) *MockCloudSpanner { mcs := &MockCloudSpanner{ t: t, msgs: make(chan MockCtlMsg, 1000), readTs: ts, } return mcs } golang-google-cloud-0.9.0/spanner/key.go000066400000000000000000000300461312234511600201330ustar00rootroot00000000000000/* Copyright 2017 Google Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package spanner import ( "bytes" "fmt" "time" "google.golang.org/grpc/codes" "cloud.google.com/go/civil" proto3 "github.com/golang/protobuf/ptypes/struct" sppb "google.golang.org/genproto/googleapis/spanner/v1" ) // A Key can be either a Cloud Spanner row's primary key or a secondary index key. // It is essentially an interface{} array, which represents a set of Cloud Spanner // columns. A Key type has the following usages: // // - Used as primary key which uniquely identifies a Cloud Spanner row. // - Used as secondary index key which maps to a set of Cloud Spanner rows // indexed under it. // - Used as endpoints of primary key/secondary index ranges, // see also the KeyRange type. // // Rows that are identified by the Key type are outputs of read operation or targets of // delete operation in a mutation. Note that for Insert/Update/InsertOrUpdate/Update // mutation types, although they don't require a primary key explicitly, the column list // provided must contain enough columns that can comprise a primary key. // // Keys are easy to construct. For example, suppose you have a table with a // primary key of username and product ID. To make a key for this table: // // key := spanner.Key{"john", 16} // // See the description of Row and Mutation types for how Go types are // mapped to Cloud Spanner types. For convenience, Key type supports a wide range // of Go types: // - int, int8, int16, int32, int64, and NullInt64 are mapped to Cloud Spanner's INT64 type. // - uint8, uint16 and uint32 are also mapped to Cloud Spanner's INT64 type. // - float32, float64, NullFloat64 are mapped to Cloud Spanner's FLOAT64 type. // - bool and NullBool are mapped to Cloud Spanner's BOOL type. // - []byte is mapped to Cloud Spanner's BYTES type. // - string and NullString are mapped to Cloud Spanner's STRING type. // - time.Time and NullTime are mapped to Cloud Spanner's TIMESTAMP type. // - civil.Date and NullDate are mapped to Cloud Spanner's DATE type. type Key []interface{} // errInvdKeyPartType returns error for unsupported key part type. func errInvdKeyPartType(part interface{}) error { return spannerErrorf(codes.InvalidArgument, "key part has unsupported type %T", part) } // keyPartValue converts a part of the Key (which is a valid Cloud Spanner type) // into a proto3.Value. Used for encoding Key type into protobuf. func keyPartValue(part interface{}) (pb *proto3.Value, err error) { switch v := part.(type) { case int: pb, _, err = encodeValue(int64(v)) case int8: pb, _, err = encodeValue(int64(v)) case int16: pb, _, err = encodeValue(int64(v)) case int32: pb, _, err = encodeValue(int64(v)) case uint8: pb, _, err = encodeValue(int64(v)) case uint16: pb, _, err = encodeValue(int64(v)) case uint32: pb, _, err = encodeValue(int64(v)) case float32: pb, _, err = encodeValue(float64(v)) case int64, float64, NullInt64, NullFloat64, bool, NullBool, []byte, string, NullString, time.Time, civil.Date, NullTime, NullDate: pb, _, err = encodeValue(v) default: return nil, errInvdKeyPartType(v) } return pb, err } // proto converts a spanner.Key into a proto3.ListValue. func (key Key) proto() (*proto3.ListValue, error) { lv := &proto3.ListValue{} lv.Values = make([]*proto3.Value, 0, len(key)) for _, part := range key { v, err := keyPartValue(part) if err != nil { return nil, err } lv.Values = append(lv.Values, v) } return lv, nil } // keySetProto lets a single Key act as a KeySet. func (key Key) keySetProto() (*sppb.KeySet, error) { kp, err := key.proto() if err != nil { return nil, err } return &sppb.KeySet{Keys: []*proto3.ListValue{kp}}, nil } // String implements fmt.Stringer for Key. For string, []byte and NullString, it // prints the uninterpreted bytes of their contents, leaving caller with the // opportunity to escape the output. func (key Key) String() string { b := &bytes.Buffer{} fmt.Fprint(b, "(") for i, part := range []interface{}(key) { if i != 0 { fmt.Fprint(b, ",") } switch v := part.(type) { case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, float32, float64, bool: // Use %v to print numeric types and bool. fmt.Fprintf(b, "%v", v) case string: fmt.Fprintf(b, "%q", v) case []byte: if v != nil { fmt.Fprintf(b, "%q", v) } else { fmt.Fprint(b, "") } case NullInt64, NullFloat64, NullBool, NullString, NullTime, NullDate: // The above types implement fmt.Stringer. fmt.Fprintf(b, "%s", v) case civil.Date: fmt.Fprintf(b, "%q", v) case time.Time: fmt.Fprintf(b, "%q", v.Format(time.RFC3339Nano)) default: fmt.Fprintf(b, "%v", v) } } fmt.Fprint(b, ")") return b.String() } // AsPrefix returns a KeyRange for all keys where k is the prefix. func (k Key) AsPrefix() KeyRange { return KeyRange{ Start: k, End: k, Kind: ClosedClosed, } } // KeyRangeKind describes the kind of interval represented by a KeyRange: // whether it is open or closed on the left and right. type KeyRangeKind int const ( // ClosedOpen is closed on the left and open on the right: the Start // key is included, the End key is excluded. ClosedOpen KeyRangeKind = iota // ClosedClosed is closed on the left and the right: both keys are included. ClosedClosed // OpenClosed is open on the left and closed on the right: the Start // key is excluded, the End key is included. OpenClosed // OpenOpen is open on the left and the right: neither key is included. OpenOpen ) // A KeyRange represents a range of rows in a table or index. // // A range has a Start key and an End key. IncludeStart and IncludeEnd // indicate whether the Start and End keys are included in the range. // // For example, consider the following table definition: // // CREATE TABLE UserEvents ( // UserName STRING(MAX), // EventDate STRING(10), // ) PRIMARY KEY(UserName, EventDate); // // The following keys name rows in this table: // // spanner.Key{"Bob", "2014-09-23"} // spanner.Key{"Alfred", "2015-06-12"} // // Since the UserEvents table's PRIMARY KEY clause names two columns, each // UserEvents key has two elements; the first is the UserName, and the second // is the EventDate. // // Key ranges with multiple components are interpreted lexicographically by // component using the table or index key's declared sort order. For example, // the following range returns all events for user "Bob" that occurred in the // year 2015: // // spanner.KeyRange{ // Start: spanner.Key{"Bob", "2015-01-01"}, // End: spanner.Key{"Bob", "2015-12-31"}, // Kind: ClosedClosed, // } // // Start and end keys can omit trailing key components. This affects the // inclusion and exclusion of rows that exactly match the provided key // components: if IncludeStart is true, then rows that exactly match the // provided components of the Start key are included; if IncludeStart is false // then rows that exactly match are not included. IncludeEnd and End key // behave in the same fashion. // // For example, the following range includes all events for "Bob" that occurred // during and after the year 2000: // // spanner.KeyRange{ // Start: spanner.Key{"Bob", "2000-01-01"}, // End: spanner.Key{"Bob"}, // Kind: ClosedClosed, // } // // The next example retrieves all events for "Bob": // // spanner.Key{"Bob"}.AsPrefix() // // To retrieve events before the year 2000: // // spanner.KeyRange{ // Start: spanner.Key{"Bob"}, // End: spanner.Key{"Bob", "2000-01-01"}, // Kind: ClosedOpen, // } // // Although we specified a Kind for this KeyRange, we didn't need to, because // the default is ClosedOpen. In later examples we'll omit Kind if it is // ClosedOpen. // // The following range includes all rows in a table or under a // index: // // spanner.AllKeys() // // This range returns all users whose UserName begins with any // character from A to C: // // spanner.KeyRange{ // Start: spanner.Key{"A"}, // End: spanner.Key{"D"}, // } // // This range returns all users whose UserName begins with B: // // spanner.KeyRange{ // Start: spanner.Key{"B"}, // End: spanner.Key{"C"}, // } // // Key ranges honor column sort order. For example, suppose a table is defined // as follows: // // CREATE TABLE DescendingSortedTable { // Key INT64, // ... // ) PRIMARY KEY(Key DESC); // // The following range retrieves all rows with key values between 1 and 100 // inclusive: // // spanner.KeyRange{ // Start: spanner.Key{100}, // End: spanner.Key{1}, // Kind: ClosedClosed, // } // // Note that 100 is passed as the start, and 1 is passed as the end, because // Key is a descending column in the schema. type KeyRange struct { // Start specifies the left boundary of the key range; End specifies // the right boundary of the key range. Start, End Key // Kind describes whether the boundaries of the key range include // their keys. Kind KeyRangeKind } // String implements fmt.Stringer for KeyRange type. func (r KeyRange) String() string { var left, right string switch r.Kind { case ClosedClosed: left, right = "[", "]" case ClosedOpen: left, right = "[", ")" case OpenClosed: left, right = "(", "]" case OpenOpen: left, right = "(", ")" default: left, right = "?", "?" } return fmt.Sprintf("%s%s,%s%s", left, r.Start, r.End, right) } // proto converts KeyRange into sppb.KeyRange. func (r KeyRange) proto() (*sppb.KeyRange, error) { var err error var start, end *proto3.ListValue pb := &sppb.KeyRange{} if start, err = r.Start.proto(); err != nil { return nil, err } if end, err = r.End.proto(); err != nil { return nil, err } if r.Kind == ClosedClosed || r.Kind == ClosedOpen { pb.StartKeyType = &sppb.KeyRange_StartClosed{StartClosed: start} } else { pb.StartKeyType = &sppb.KeyRange_StartOpen{StartOpen: start} } if r.Kind == ClosedClosed || r.Kind == OpenClosed { pb.EndKeyType = &sppb.KeyRange_EndClosed{EndClosed: end} } else { pb.EndKeyType = &sppb.KeyRange_EndOpen{EndOpen: end} } return pb, nil } // keySetProto lets a KeyRange act as a KeySet. func (r KeyRange) keySetProto() (*sppb.KeySet, error) { rp, err := r.proto() if err != nil { return nil, err } return &sppb.KeySet{Ranges: []*sppb.KeyRange{rp}}, nil } // A KeySet defines a collection of Cloud Spanner keys and/or key ranges. All the // keys are expected to be in the same table or index. The keys need not be sorted in // any particular way. // // An individual Key can act as a KeySet, as can a KeyRange. Use the KeySets function // to create a KeySet consisting of multiple Keys and KeyRanges. To obtain an empty // KeySet, call KeySets with no arguments. // // If the same key is specified multiple times in the set (for example if two // ranges, two keys, or a key and a range overlap), the Cloud Spanner backend behaves // as if the key were only specified once. type KeySet interface { keySetProto() (*sppb.KeySet, error) } // AllKeys returns a KeySet that represents all Keys of a table or a index. func AllKeys() KeySet { return all{} } type all struct{} func (all) keySetProto() (*sppb.KeySet, error) { return &sppb.KeySet{All: true}, nil } // KeySets returns the union of the KeySets. If any of the KeySets is AllKeys, then // the resulting KeySet will be equivalent to AllKeys. func KeySets(keySets ...KeySet) KeySet { u := make(union, len(keySets)) copy(u, keySets) return u } type union []KeySet func (u union) keySetProto() (*sppb.KeySet, error) { upb := &sppb.KeySet{} for _, ks := range u { pb, err := ks.keySetProto() if err != nil { return nil, err } if pb.All { return pb, nil } upb.Keys = append(upb.Keys, pb.Keys...) upb.Ranges = append(upb.Ranges, pb.Ranges...) } return upb, nil } golang-google-cloud-0.9.0/spanner/key_test.go000066400000000000000000000225271312234511600211770ustar00rootroot00000000000000/* Copyright 2017 Google Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package spanner import ( "reflect" "testing" "time" "cloud.google.com/go/civil" proto3 "github.com/golang/protobuf/ptypes/struct" sppb "google.golang.org/genproto/googleapis/spanner/v1" ) // Test Key.String() and Key.proto(). func TestKey(t *testing.T) { tm, _ := time.Parse(time.RFC3339Nano, "2016-11-15T15:04:05.999999999Z") dt, _ := civil.ParseDate("2016-11-15") for _, test := range []struct { k Key wantProto *proto3.ListValue wantStr string }{ { k: Key{int(1)}, wantProto: listValueProto(stringProto("1")), wantStr: "(1)", }, { k: Key{int8(1)}, wantProto: listValueProto(stringProto("1")), wantStr: "(1)", }, { k: Key{int16(1)}, wantProto: listValueProto(stringProto("1")), wantStr: "(1)", }, { k: Key{int32(1)}, wantProto: listValueProto(stringProto("1")), wantStr: "(1)", }, { k: Key{int64(1)}, wantProto: listValueProto(stringProto("1")), wantStr: "(1)", }, { k: Key{uint8(1)}, wantProto: listValueProto(stringProto("1")), wantStr: "(1)", }, { k: Key{uint16(1)}, wantProto: listValueProto(stringProto("1")), wantStr: "(1)", }, { k: Key{uint32(1)}, wantProto: listValueProto(stringProto("1")), wantStr: "(1)", }, { k: Key{true}, wantProto: listValueProto(boolProto(true)), wantStr: "(true)", }, { k: Key{float32(1.5)}, wantProto: listValueProto(floatProto(1.5)), wantStr: "(1.5)", }, { k: Key{float64(1.5)}, wantProto: listValueProto(floatProto(1.5)), wantStr: "(1.5)", }, { k: Key{"value"}, wantProto: listValueProto(stringProto("value")), wantStr: `("value")`, }, { k: Key{[]byte(nil)}, wantProto: listValueProto(nullProto()), wantStr: "()", }, { k: Key{[]byte{}}, wantProto: listValueProto(stringProto("")), wantStr: `("")`, }, { k: Key{tm}, wantProto: listValueProto(stringProto("2016-11-15T15:04:05.999999999Z")), wantStr: `("2016-11-15T15:04:05.999999999Z")`, }, {k: Key{dt}, wantProto: listValueProto(stringProto("2016-11-15")), wantStr: `("2016-11-15")`, }, { k: Key{[]byte("value")}, wantProto: listValueProto(bytesProto([]byte("value"))), wantStr: `("value")`, }, { k: Key{NullInt64{1, true}}, wantProto: listValueProto(stringProto("1")), wantStr: "(1)", }, { k: Key{NullInt64{2, false}}, wantProto: listValueProto(nullProto()), wantStr: "()", }, { k: Key{NullFloat64{1.5, true}}, wantProto: listValueProto(floatProto(1.5)), wantStr: "(1.5)", }, { k: Key{NullFloat64{2.0, false}}, wantProto: listValueProto(nullProto()), wantStr: "()", }, { k: Key{NullBool{true, true}}, wantProto: listValueProto(boolProto(true)), wantStr: "(true)", }, { k: Key{NullBool{true, false}}, wantProto: listValueProto(nullProto()), wantStr: "()", }, { k: Key{NullString{"value", true}}, wantProto: listValueProto(stringProto("value")), wantStr: `("value")`, }, { k: Key{NullString{"value", false}}, wantProto: listValueProto(nullProto()), wantStr: "()", }, { k: Key{NullTime{tm, true}}, wantProto: listValueProto(timeProto(tm)), wantStr: `("2016-11-15T15:04:05.999999999Z")`, }, { k: Key{NullTime{time.Now(), false}}, wantProto: listValueProto(nullProto()), wantStr: "()", }, { k: Key{NullDate{dt, true}}, wantProto: listValueProto(dateProto(dt)), wantStr: `("2016-11-15")`, }, { k: Key{NullDate{civil.Date{}, false}}, wantProto: listValueProto(nullProto()), wantStr: "()", }, { k: Key{int(1), NullString{"value", false}, "value", 1.5, true}, wantProto: listValueProto(stringProto("1"), nullProto(), stringProto("value"), floatProto(1.5), boolProto(true)), wantStr: `(1,,"value",1.5,true)`, }, } { if got := test.k.String(); got != test.wantStr { t.Errorf("%v.String() = %v, want %v", test.k, got, test.wantStr) } gotProto, err := test.k.proto() if err != nil { t.Errorf("%v.proto() returns error %v; want nil error", test.k, err) } if !reflect.DeepEqual(gotProto, test.wantProto) { t.Errorf("%v.proto() = \n%v\nwant:\n%v", test.k, gotProto, test.wantProto) } } } // Test KeyRange.String() and KeyRange.proto(). func TestKeyRange(t *testing.T) { for _, test := range []struct { kr KeyRange wantProto *sppb.KeyRange wantStr string }{ { kr: KeyRange{Key{"A"}, Key{"D"}, OpenOpen}, wantProto: &sppb.KeyRange{ &sppb.KeyRange_StartOpen{listValueProto(stringProto("A"))}, &sppb.KeyRange_EndOpen{listValueProto(stringProto("D"))}, }, wantStr: `(("A"),("D"))`, }, { kr: KeyRange{Key{1}, Key{10}, OpenClosed}, wantProto: &sppb.KeyRange{ &sppb.KeyRange_StartOpen{listValueProto(stringProto("1"))}, &sppb.KeyRange_EndClosed{listValueProto(stringProto("10"))}, }, wantStr: "((1),(10)]", }, { kr: KeyRange{Key{1.5, 2.1, 0.2}, Key{1.9, 0.7}, ClosedOpen}, wantProto: &sppb.KeyRange{ &sppb.KeyRange_StartClosed{listValueProto(floatProto(1.5), floatProto(2.1), floatProto(0.2))}, &sppb.KeyRange_EndOpen{listValueProto(floatProto(1.9), floatProto(0.7))}, }, wantStr: "[(1.5,2.1,0.2),(1.9,0.7))", }, { kr: KeyRange{Key{NullInt64{1, true}}, Key{10}, ClosedClosed}, wantProto: &sppb.KeyRange{ &sppb.KeyRange_StartClosed{listValueProto(stringProto("1"))}, &sppb.KeyRange_EndClosed{listValueProto(stringProto("10"))}, }, wantStr: "[(1),(10)]", }, } { if got := test.kr.String(); got != test.wantStr { t.Errorf("%v.String() = %v, want %v", test.kr, got, test.wantStr) } gotProto, err := test.kr.proto() if err != nil { t.Errorf("%v.proto() returns error %v; want nil error", test.kr, err) } if !reflect.DeepEqual(gotProto, test.wantProto) { t.Errorf("%v.proto() = \n%v\nwant:\n%v", test.kr, gotProto.String(), test.wantProto.String()) } } } func TestPrefixRange(t *testing.T) { got := Key{1}.AsPrefix() want := KeyRange{Start: Key{1}, End: Key{1}, Kind: ClosedClosed} if !reflect.DeepEqual(got, want) { t.Errorf("got %v, want %v", got, want) } } func TestKeySets(t *testing.T) { int1 := intProto(1) int2 := intProto(2) int3 := intProto(3) int4 := intProto(4) for i, test := range []struct { ks KeySet wantProto *sppb.KeySet }{ { KeySets(), &sppb.KeySet{}, }, { Key{4}, &sppb.KeySet{ Keys: []*proto3.ListValue{listValueProto(int4)}, }, }, { AllKeys(), &sppb.KeySet{All: true}, }, { KeySets(Key{1, 2}, Key{3, 4}), &sppb.KeySet{ Keys: []*proto3.ListValue{ listValueProto(int1, int2), listValueProto(int3, int4), }, }, }, { KeyRange{Key{1}, Key{2}, ClosedOpen}, &sppb.KeySet{Ranges: []*sppb.KeyRange{ &sppb.KeyRange{ &sppb.KeyRange_StartClosed{listValueProto(int1)}, &sppb.KeyRange_EndOpen{listValueProto(int2)}, }, }}, }, { Key{2}.AsPrefix(), &sppb.KeySet{Ranges: []*sppb.KeyRange{ &sppb.KeyRange{ &sppb.KeyRange_StartClosed{listValueProto(int2)}, &sppb.KeyRange_EndClosed{listValueProto(int2)}, }, }}, }, { KeySets( KeyRange{Key{1}, Key{2}, ClosedClosed}, KeyRange{Key{3}, Key{4}, OpenClosed}, ), &sppb.KeySet{ Ranges: []*sppb.KeyRange{ &sppb.KeyRange{ &sppb.KeyRange_StartClosed{listValueProto(int1)}, &sppb.KeyRange_EndClosed{listValueProto(int2)}, }, &sppb.KeyRange{ &sppb.KeyRange_StartOpen{listValueProto(int3)}, &sppb.KeyRange_EndClosed{listValueProto(int4)}, }, }, }, }, { KeySets( Key{1}, KeyRange{Key{2}, Key{3}, ClosedClosed}, KeyRange{Key{4}, Key{5}, OpenClosed}, KeySets(), Key{6}), &sppb.KeySet{ Keys: []*proto3.ListValue{ listValueProto(int1), listValueProto(intProto(6)), }, Ranges: []*sppb.KeyRange{ &sppb.KeyRange{ &sppb.KeyRange_StartClosed{listValueProto(int2)}, &sppb.KeyRange_EndClosed{listValueProto(int3)}, }, &sppb.KeyRange{ &sppb.KeyRange_StartOpen{listValueProto(int4)}, &sppb.KeyRange_EndClosed{listValueProto(intProto(5))}, }, }, }, }, { KeySets( Key{1}, KeyRange{Key{2}, Key{3}, ClosedClosed}, AllKeys(), KeyRange{Key{4}, Key{5}, OpenClosed}, Key{6}), &sppb.KeySet{All: true}, }, } { gotProto, err := test.ks.keySetProto() if err != nil { t.Errorf("#%d: %v.proto() returns error %v; want nil error", i, test.ks, err) } if !reflect.DeepEqual(gotProto, test.wantProto) { t.Errorf("#%d: %v.proto() = \n%v\nwant:\n%v", i, test.ks, gotProto.String(), test.wantProto.String()) } } } golang-google-cloud-0.9.0/spanner/mutation.go000066400000000000000000000333761312234511600212140ustar00rootroot00000000000000/* Copyright 2017 Google Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package spanner import ( "reflect" proto3 "github.com/golang/protobuf/ptypes/struct" sppb "google.golang.org/genproto/googleapis/spanner/v1" "google.golang.org/grpc/codes" ) // op is the mutation operation. type op int const ( // opDelete removes a row from a table. Succeeds whether or not the // key was present. opDelete op = iota // opInsert inserts a row into a table. If the row already exists, the // write or transaction fails. opInsert // opInsertOrUpdate inserts a row into a table. If the row already // exists, it updates it instead. Any column values not explicitly // written are preserved. opInsertOrUpdate // opReplace inserts a row into a table, deleting any existing row. // Unlike InsertOrUpdate, this means any values not explicitly written // become NULL. opReplace // opUpdate updates a row in a table. If the row does not already // exist, the write or transaction fails. opUpdate ) // A Mutation describes a modification to one or more Cloud Spanner rows. The // mutation represents an insert, update, delete, etc on a table. // // Many mutations can be applied in a single atomic commit. For purposes of // constraint checking (such as foreign key constraints), the operations can be // viewed as applying in same order as the mutations are supplied in (so that // e.g., a row and its logical "child" can be inserted in the same commit). // // - The Apply function applies series of mutations. // - A ReadWriteTransaction applies a series of mutations as part of an // atomic read-modify-write operation. // Example: // // m := spanner.Insert("User", // []string{"user_id", "profile"}, // []interface{}{UserID, profile}) // _, err := client.Apply(ctx, []*spanner.Mutation{m}) // // In this example, we insert a new row into the User table. The primary key // for the new row is UserID (presuming that "user_id" has been declared as the // primary key of the "User" table). // // Updating a row // // Changing the values of columns in an existing row is very similar to // inserting a new row: // // m := spanner.Update("User", // []string{"user_id", "profile"}, // []interface{}{UserID, profile}) // _, err := client.Apply(ctx, []*spanner.Mutation{m}) // // Deleting a row // // To delete a row, use spanner.Delete: // // m := spanner.Delete("User", spanner.Key{UserId}) // _, err := client.Apply(ctx, []*spanner.Mutation{m}) // // spanner.Delete accepts a KeySet, so you can also pass in a KeyRange, or use the // spanner.KeySets function to build any combination of Keys and KeyRanges. // // Note that deleting a row in a table may also delete rows from other tables // if cascading deletes are specified in those tables' schemas. Delete does // nothing if the named row does not exist (does not yield an error). // // Deleting a field // // To delete/clear a field within a row, use spanner.Update with the value nil: // // m := spanner.Update("User", // []string{"user_id", "profile"}, // []interface{}{UserID, nil}) // _, err := client.Apply(ctx, []*spanner.Mutation{m}) // // The valid Go types and their corresponding Cloud Spanner types that can be // used in the Insert/Update/InsertOrUpdate functions are: // // string, NullString - STRING // []string, []NullString - STRING ARRAY // []byte - BYTES // [][]byte - BYTES ARRAY // int, int64, NullInt64 - INT64 // []int, []int64, []NullInt64 - INT64 ARRAY // bool, NullBool - BOOL // []bool, []NullBool - BOOL ARRAY // float64, NullFloat64 - FLOAT64 // []float64, []NullFloat64 - FLOAT64 ARRAY // time.Time, NullTime - TIMESTAMP // []time.Time, []NullTime - TIMESTAMP ARRAY // Date, NullDate - DATE // []Date, []NullDate - DATE ARRAY // // To compare two Mutations for testing purposes, use reflect.DeepEqual. type Mutation struct { // op is the operation type of the mutation. // See documentation for spanner.op for more details. op op // Table is the name of the taget table to be modified. table string // keySet is a set of primary keys that names the rows // in a delete operation. keySet KeySet // columns names the set of columns that are going to be // modified by Insert, InsertOrUpdate, Replace or Update // operations. columns []string // values specifies the new values for the target columns // named by Columns. values []interface{} } // mapToMutationParams converts Go map into mutation parameters. func mapToMutationParams(in map[string]interface{}) ([]string, []interface{}) { cols := []string{} vals := []interface{}{} for k, v := range in { cols = append(cols, k) vals = append(vals, v) } return cols, vals } // errNotStruct returns error for not getting a go struct type. func errNotStruct(in interface{}) error { return spannerErrorf(codes.InvalidArgument, "%T is not a go struct type", in) } // structToMutationParams converts Go struct into mutation parameters. // If the input is not a valid Go struct type, structToMutationParams // returns error. func structToMutationParams(in interface{}) ([]string, []interface{}, error) { if in == nil { return nil, nil, errNotStruct(in) } v := reflect.ValueOf(in) t := v.Type() if t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Struct { // t is a pointer to a struct. if v.IsNil() { // Return empty results. return nil, nil, nil } // Get the struct value that in points to. v = v.Elem() t = t.Elem() } if t.Kind() != reflect.Struct { return nil, nil, errNotStruct(in) } fields, err := fieldCache.Fields(t) if err != nil { return nil, nil, toSpannerError(err) } var cols []string var vals []interface{} for _, f := range fields { cols = append(cols, f.Name) vals = append(vals, v.FieldByIndex(f.Index).Interface()) } return cols, vals, nil } // Insert returns a Mutation to insert a row into a table. If the row already // exists, the write or transaction fails. func Insert(table string, cols []string, vals []interface{}) *Mutation { return &Mutation{ op: opInsert, table: table, columns: cols, values: vals, } } // InsertMap returns a Mutation to insert a row into a table, specified by // a map of column name to value. If the row already exists, the write or // transaction fails. func InsertMap(table string, in map[string]interface{}) *Mutation { cols, vals := mapToMutationParams(in) return Insert(table, cols, vals) } // InsertStruct returns a Mutation to insert a row into a table, specified by // a Go struct. If the row already exists, the write or transaction fails. // // The in argument must be a struct or a pointer to a struct. Its exported // fields specify the column names and values. Use a field tag like "spanner:name" // to provide an alternative column name, or use "spanner:-" to ignore the field. func InsertStruct(table string, in interface{}) (*Mutation, error) { cols, vals, err := structToMutationParams(in) if err != nil { return nil, err } return Insert(table, cols, vals), nil } // Update returns a Mutation to update a row in a table. If the row does not // already exist, the write or transaction fails. func Update(table string, cols []string, vals []interface{}) *Mutation { return &Mutation{ op: opUpdate, table: table, columns: cols, values: vals, } } // UpdateMap returns a Mutation to update a row in a table, specified by // a map of column to value. If the row does not already exist, the write or // transaction fails. func UpdateMap(table string, in map[string]interface{}) *Mutation { cols, vals := mapToMutationParams(in) return Update(table, cols, vals) } // UpdateStruct returns a Mutation to update a row in a table, specified by a Go // struct. If the row does not already exist, the write or transaction fails. func UpdateStruct(table string, in interface{}) (*Mutation, error) { cols, vals, err := structToMutationParams(in) if err != nil { return nil, err } return Update(table, cols, vals), nil } // InsertOrUpdate returns a Mutation to insert a row into a table. If the row // already exists, it updates it instead. Any column values not explicitly // written are preserved. // // For a similar example, See Update. func InsertOrUpdate(table string, cols []string, vals []interface{}) *Mutation { return &Mutation{ op: opInsertOrUpdate, table: table, columns: cols, values: vals, } } // InsertOrUpdateMap returns a Mutation to insert a row into a table, // specified by a map of column to value. If the row already exists, it // updates it instead. Any column values not explicitly written are preserved. // // For a similar example, See UpdateMap. func InsertOrUpdateMap(table string, in map[string]interface{}) *Mutation { cols, vals := mapToMutationParams(in) return InsertOrUpdate(table, cols, vals) } // InsertOrUpdateStruct returns a Mutation to insert a row into a table, // specified by a Go struct. If the row already exists, it updates it instead. // Any column values not explicitly written are preserved. // // The in argument must be a struct or a pointer to a struct. Its exported // fields specify the column names and values. Use a field tag like "spanner:name" // to provide an alternative column name, or use "spanner:-" to ignore the field. // // For a similar example, See UpdateStruct. func InsertOrUpdateStruct(table string, in interface{}) (*Mutation, error) { cols, vals, err := structToMutationParams(in) if err != nil { return nil, err } return InsertOrUpdate(table, cols, vals), nil } // Replace returns a Mutation to insert a row into a table, deleting any // existing row. Unlike InsertOrUpdate, this means any values not explicitly // written become NULL. // // For a similar example, See Update. func Replace(table string, cols []string, vals []interface{}) *Mutation { return &Mutation{ op: opReplace, table: table, columns: cols, values: vals, } } // ReplaceMap returns a Mutation to insert a row into a table, deleting any // existing row. Unlike InsertOrUpdateMap, this means any values not explicitly // written become NULL. The row is specified by a map of column to value. // // For a similar example, See UpdateMap. func ReplaceMap(table string, in map[string]interface{}) *Mutation { cols, vals := mapToMutationParams(in) return Replace(table, cols, vals) } // ReplaceStruct returns a Mutation to insert a row into a table, deleting any // existing row. Unlike InsertOrUpdateMap, this means any values not explicitly // written become NULL. The row is specified by a Go struct. // // The in argument must be a struct or a pointer to a struct. Its exported // fields specify the column names and values. Use a field tag like "spanner:name" // to provide an alternative column name, or use "spanner:-" to ignore the field. // // For a similar example, See UpdateStruct. func ReplaceStruct(table string, in interface{}) (*Mutation, error) { cols, vals, err := structToMutationParams(in) if err != nil { return nil, err } return Replace(table, cols, vals), nil } // Delete removes the rows described by the KeySet from the table. It succeeds // whether or not the keys were present. func Delete(table string, ks KeySet) *Mutation { return &Mutation{ op: opDelete, table: table, keySet: ks, } } // prepareWrite generates sppb.Mutation_Write from table name, column names // and new column values. func prepareWrite(table string, columns []string, vals []interface{}) (*sppb.Mutation_Write, error) { v, err := encodeValueArray(vals) if err != nil { return nil, err } return &sppb.Mutation_Write{ Table: table, Columns: columns, Values: []*proto3.ListValue{v}, }, nil } // errInvdMutationOp returns error for unrecognized mutation operation. func errInvdMutationOp(m Mutation) error { return spannerErrorf(codes.InvalidArgument, "Unknown op type: %d", m.op) } // proto converts spanner.Mutation to sppb.Mutation, in preparation to send // RPCs. func (m Mutation) proto() (*sppb.Mutation, error) { var pb *sppb.Mutation switch m.op { case opDelete: var kp *sppb.KeySet if m.keySet != nil { var err error kp, err = m.keySet.keySetProto() if err != nil { return nil, err } } pb = &sppb.Mutation{ Operation: &sppb.Mutation_Delete_{ Delete: &sppb.Mutation_Delete{ Table: m.table, KeySet: kp, }, }, } case opInsert: w, err := prepareWrite(m.table, m.columns, m.values) if err != nil { return nil, err } pb = &sppb.Mutation{Operation: &sppb.Mutation_Insert{Insert: w}} case opInsertOrUpdate: w, err := prepareWrite(m.table, m.columns, m.values) if err != nil { return nil, err } pb = &sppb.Mutation{Operation: &sppb.Mutation_InsertOrUpdate{InsertOrUpdate: w}} case opReplace: w, err := prepareWrite(m.table, m.columns, m.values) if err != nil { return nil, err } pb = &sppb.Mutation{Operation: &sppb.Mutation_Replace{Replace: w}} case opUpdate: w, err := prepareWrite(m.table, m.columns, m.values) if err != nil { return nil, err } pb = &sppb.Mutation{Operation: &sppb.Mutation_Update{Update: w}} default: return nil, errInvdMutationOp(m) } return pb, nil } // mutationsProto turns a spanner.Mutation array into a sppb.Mutation array, // it is convenient for sending batch mutations to Cloud Spanner. func mutationsProto(ms []*Mutation) ([]*sppb.Mutation, error) { l := make([]*sppb.Mutation, 0, len(ms)) for _, m := range ms { pb, err := m.proto() if err != nil { return nil, err } l = append(l, pb) } return l, nil } golang-google-cloud-0.9.0/spanner/mutation_test.go000066400000000000000000000364021312234511600222440ustar00rootroot00000000000000/* Copyright 2017 Google Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package spanner import ( "reflect" "sort" "strings" "testing" proto3 "github.com/golang/protobuf/ptypes/struct" sppb "google.golang.org/genproto/googleapis/spanner/v1" ) // keysetProto returns protobuf encoding of valid spanner.KeySet. func keysetProto(t *testing.T, ks KeySet) *sppb.KeySet { k, err := ks.keySetProto() if err != nil { t.Fatalf("cannot convert keyset %v to protobuf: %v", ks, err) } return k } // Test encoding from spanner.Mutation to protobuf. func TestMutationToProto(t *testing.T) { for i, test := range []struct { m *Mutation want *sppb.Mutation }{ // Delete Mutation { &Mutation{opDelete, "t_foo", Key{"foo"}, nil, nil}, &sppb.Mutation{ Operation: &sppb.Mutation_Delete_{ Delete: &sppb.Mutation_Delete{ Table: "t_foo", KeySet: keysetProto(t, Key{"foo"}), }, }, }, }, // Insert Mutation { &Mutation{opInsert, "t_foo", KeySets(), []string{"col1", "col2"}, []interface{}{int64(1), int64(2)}}, &sppb.Mutation{ Operation: &sppb.Mutation_Insert{ Insert: &sppb.Mutation_Write{ Table: "t_foo", Columns: []string{"col1", "col2"}, Values: []*proto3.ListValue{ &proto3.ListValue{ Values: []*proto3.Value{intProto(1), intProto(2)}, }, }, }, }, }, }, // InsertOrUpdate Mutation { &Mutation{opInsertOrUpdate, "t_foo", KeySets(), []string{"col1", "col2"}, []interface{}{1.0, 2.0}}, &sppb.Mutation{ Operation: &sppb.Mutation_InsertOrUpdate{ InsertOrUpdate: &sppb.Mutation_Write{ Table: "t_foo", Columns: []string{"col1", "col2"}, Values: []*proto3.ListValue{ &proto3.ListValue{ Values: []*proto3.Value{floatProto(1.0), floatProto(2.0)}, }, }, }, }, }, }, // Replace Mutation { &Mutation{opReplace, "t_foo", KeySets(), []string{"col1", "col2"}, []interface{}{"one", 2.0}}, &sppb.Mutation{ Operation: &sppb.Mutation_Replace{ Replace: &sppb.Mutation_Write{ Table: "t_foo", Columns: []string{"col1", "col2"}, Values: []*proto3.ListValue{ &proto3.ListValue{ Values: []*proto3.Value{stringProto("one"), floatProto(2.0)}, }, }, }, }, }, }, // Update Mutation { &Mutation{opUpdate, "t_foo", KeySets(), []string{"col1", "col2"}, []interface{}{"one", []byte(nil)}}, &sppb.Mutation{ Operation: &sppb.Mutation_Update{ Update: &sppb.Mutation_Write{ Table: "t_foo", Columns: []string{"col1", "col2"}, Values: []*proto3.ListValue{ &proto3.ListValue{ Values: []*proto3.Value{stringProto("one"), nullProto()}, }, }, }, }, }, }, } { if got, err := test.m.proto(); err != nil || !reflect.DeepEqual(got, test.want) { t.Errorf("%d: (%#v).proto() = (%v, %v), want (%v, nil)", i, test.m, got, err, test.want) } } } // mutationColumnSorter implements sort.Interface for sorting column-value pairs in a Mutation by column names. type mutationColumnSorter struct { Mutation } // newMutationColumnSorter creates new instance of mutationColumnSorter by duplicating the input Mutation so that // sorting won't change the input Mutation. func newMutationColumnSorter(m *Mutation) *mutationColumnSorter { return &mutationColumnSorter{ Mutation{ m.op, m.table, m.keySet, append([]string(nil), m.columns...), append([]interface{}(nil), m.values...), }, } } // Len implements sort.Interface.Len. func (ms *mutationColumnSorter) Len() int { return len(ms.columns) } // Swap implements sort.Interface.Swap. func (ms *mutationColumnSorter) Swap(i, j int) { ms.columns[i], ms.columns[j] = ms.columns[j], ms.columns[i] ms.values[i], ms.values[j] = ms.values[j], ms.values[i] } // Less implements sort.Interface.Less. func (ms *mutationColumnSorter) Less(i, j int) bool { return strings.Compare(ms.columns[i], ms.columns[j]) < 0 } // mutationEqual returns true if two mutations in question are equal // to each other. func mutationEqual(t *testing.T, m1, m2 Mutation) bool { // Two mutations are considered to be equal even if their column values have different // orders. ms1 := newMutationColumnSorter(&m1) ms2 := newMutationColumnSorter(&m2) sort.Sort(ms1) sort.Sort(ms2) return reflect.DeepEqual(ms1, ms2) } // Test helper functions which help to generate spanner.Mutation. func TestMutationHelpers(t *testing.T) { for _, test := range []struct { m string got *Mutation want *Mutation }{ { "Insert", Insert("t_foo", []string{"col1", "col2"}, []interface{}{int64(1), int64(2)}), &Mutation{opInsert, "t_foo", nil, []string{"col1", "col2"}, []interface{}{int64(1), int64(2)}}, }, { "InsertMap", InsertMap("t_foo", map[string]interface{}{"col1": int64(1), "col2": int64(2)}), &Mutation{opInsert, "t_foo", nil, []string{"col1", "col2"}, []interface{}{int64(1), int64(2)}}, }, { "InsertStruct", func() *Mutation { m, err := InsertStruct( "t_foo", struct { notCol bool Col1 int64 `spanner:"col1"` Col2 int64 `spanner:"col2"` }{false, int64(1), int64(2)}, ) if err != nil { t.Errorf("cannot convert struct into mutation: %v", err) } return m }(), &Mutation{opInsert, "t_foo", nil, []string{"col1", "col2"}, []interface{}{int64(1), int64(2)}}, }, { "Update", Update("t_foo", []string{"col1", "col2"}, []interface{}{"one", []byte(nil)}), &Mutation{opUpdate, "t_foo", nil, []string{"col1", "col2"}, []interface{}{"one", []byte(nil)}}, }, { "UpdateMap", UpdateMap("t_foo", map[string]interface{}{"col1": "one", "col2": []byte(nil)}), &Mutation{opUpdate, "t_foo", nil, []string{"col1", "col2"}, []interface{}{"one", []byte(nil)}}, }, { "UpdateStruct", func() *Mutation { m, err := UpdateStruct( "t_foo", struct { Col1 string `spanner:"col1"` notCol int Col2 []byte `spanner:"col2"` }{"one", 1, nil}, ) if err != nil { t.Errorf("cannot convert struct into mutation: %v", err) } return m }(), &Mutation{opUpdate, "t_foo", nil, []string{"col1", "col2"}, []interface{}{"one", []byte(nil)}}, }, { "InsertOrUpdate", InsertOrUpdate("t_foo", []string{"col1", "col2"}, []interface{}{1.0, 2.0}), &Mutation{opInsertOrUpdate, "t_foo", nil, []string{"col1", "col2"}, []interface{}{1.0, 2.0}}, }, { "InsertOrUpdateMap", InsertOrUpdateMap("t_foo", map[string]interface{}{"col1": 1.0, "col2": 2.0}), &Mutation{opInsertOrUpdate, "t_foo", nil, []string{"col1", "col2"}, []interface{}{1.0, 2.0}}, }, { "InsertOrUpdateStruct", func() *Mutation { m, err := InsertOrUpdateStruct( "t_foo", struct { Col1 float64 `spanner:"col1"` Col2 float64 `spanner:"col2"` notCol float64 }{1.0, 2.0, 3.0}, ) if err != nil { t.Errorf("cannot convert struct into mutation: %v", err) } return m }(), &Mutation{opInsertOrUpdate, "t_foo", nil, []string{"col1", "col2"}, []interface{}{1.0, 2.0}}, }, { "Replace", Replace("t_foo", []string{"col1", "col2"}, []interface{}{"one", 2.0}), &Mutation{opReplace, "t_foo", nil, []string{"col1", "col2"}, []interface{}{"one", 2.0}}, }, { "ReplaceMap", ReplaceMap("t_foo", map[string]interface{}{"col1": "one", "col2": 2.0}), &Mutation{opReplace, "t_foo", nil, []string{"col1", "col2"}, []interface{}{"one", 2.0}}, }, { "ReplaceStruct", func() *Mutation { m, err := ReplaceStruct( "t_foo", struct { Col1 string `spanner:"col1"` Col2 float64 `spanner:"col2"` notCol string }{"one", 2.0, "foo"}, ) if err != nil { t.Errorf("cannot convert struct into mutation: %v", err) } return m }(), &Mutation{opReplace, "t_foo", nil, []string{"col1", "col2"}, []interface{}{"one", 2.0}}, }, { "Delete", Delete("t_foo", Key{"foo"}), &Mutation{opDelete, "t_foo", Key{"foo"}, nil, nil}, }, { "DeleteRange", Delete("t_foo", KeyRange{Key{"bar"}, Key{"foo"}, ClosedClosed}), &Mutation{opDelete, "t_foo", KeyRange{Key{"bar"}, Key{"foo"}, ClosedClosed}, nil, nil}, }, } { if !mutationEqual(t, *test.got, *test.want) { t.Errorf("%v: got Mutation %v, want %v", test.m, test.got, test.want) } } } // Test encoding non-struct types by using *Struct helpers. func TestBadStructs(t *testing.T) { val := "i_am_not_a_struct" wantErr := errNotStruct(val) if _, gotErr := InsertStruct("t_test", val); !reflect.DeepEqual(gotErr, wantErr) { t.Errorf("InsertStruct(%q) returns error %v, want %v", val, gotErr, wantErr) } if _, gotErr := InsertOrUpdateStruct("t_test", val); !reflect.DeepEqual(gotErr, wantErr) { t.Errorf("InsertOrUpdateStruct(%q) returns error %v, want %v", val, gotErr, wantErr) } if _, gotErr := UpdateStruct("t_test", val); !reflect.DeepEqual(gotErr, wantErr) { t.Errorf("UpdateStruct(%q) returns error %v, want %v", val, gotErr, wantErr) } if _, gotErr := ReplaceStruct("t_test", val); !reflect.DeepEqual(gotErr, wantErr) { t.Errorf("ReplaceStruct(%q) returns error %v, want %v", val, gotErr, wantErr) } } // Test encoding Mutation into proto. func TestEncodeMutation(t *testing.T) { for _, test := range []struct { name string mutation Mutation wantProto *sppb.Mutation wantErr error }{ { "OpDelete", Mutation{opDelete, "t_test", Key{1}, nil, nil}, &sppb.Mutation{ Operation: &sppb.Mutation_Delete_{ Delete: &sppb.Mutation_Delete{ Table: "t_test", KeySet: &sppb.KeySet{ Keys: []*proto3.ListValue{listValueProto(intProto(1))}, }, }, }, }, nil, }, { "OpDelete - Key error", Mutation{opDelete, "t_test", Key{struct{}{}}, nil, nil}, &sppb.Mutation{ Operation: &sppb.Mutation_Delete_{ Delete: &sppb.Mutation_Delete{ Table: "t_test", KeySet: &sppb.KeySet{}, }, }, }, errInvdKeyPartType(struct{}{}), }, { "OpInsert", Mutation{opInsert, "t_test", nil, []string{"key", "val"}, []interface{}{"foo", 1}}, &sppb.Mutation{ Operation: &sppb.Mutation_Insert{ Insert: &sppb.Mutation_Write{ Table: "t_test", Columns: []string{"key", "val"}, Values: []*proto3.ListValue{listValueProto(stringProto("foo"), intProto(1))}, }, }, }, nil, }, { "OpInsert - Value Type Error", Mutation{opInsert, "t_test", nil, []string{"key", "val"}, []interface{}{struct{}{}, 1}}, &sppb.Mutation{ Operation: &sppb.Mutation_Insert{ Insert: &sppb.Mutation_Write{}, }, }, errEncoderUnsupportedType(struct{}{}), }, { "OpInsertOrUpdate", Mutation{opInsertOrUpdate, "t_test", nil, []string{"key", "val"}, []interface{}{"foo", 1}}, &sppb.Mutation{ Operation: &sppb.Mutation_InsertOrUpdate{ InsertOrUpdate: &sppb.Mutation_Write{ Table: "t_test", Columns: []string{"key", "val"}, Values: []*proto3.ListValue{listValueProto(stringProto("foo"), intProto(1))}, }, }, }, nil, }, { "OpInsertOrUpdate - Value Type Error", Mutation{opInsertOrUpdate, "t_test", nil, []string{"key", "val"}, []interface{}{struct{}{}, 1}}, &sppb.Mutation{ Operation: &sppb.Mutation_InsertOrUpdate{ InsertOrUpdate: &sppb.Mutation_Write{}, }, }, errEncoderUnsupportedType(struct{}{}), }, { "OpReplace", Mutation{opReplace, "t_test", nil, []string{"key", "val"}, []interface{}{"foo", 1}}, &sppb.Mutation{ Operation: &sppb.Mutation_Replace{ Replace: &sppb.Mutation_Write{ Table: "t_test", Columns: []string{"key", "val"}, Values: []*proto3.ListValue{listValueProto(stringProto("foo"), intProto(1))}, }, }, }, nil, }, { "OpReplace - Value Type Error", Mutation{opReplace, "t_test", nil, []string{"key", "val"}, []interface{}{struct{}{}, 1}}, &sppb.Mutation{ Operation: &sppb.Mutation_Replace{ Replace: &sppb.Mutation_Write{}, }, }, errEncoderUnsupportedType(struct{}{}), }, { "OpUpdate", Mutation{opUpdate, "t_test", nil, []string{"key", "val"}, []interface{}{"foo", 1}}, &sppb.Mutation{ Operation: &sppb.Mutation_Update{ Update: &sppb.Mutation_Write{ Table: "t_test", Columns: []string{"key", "val"}, Values: []*proto3.ListValue{listValueProto(stringProto("foo"), intProto(1))}, }, }, }, nil, }, { "OpUpdate - Value Type Error", Mutation{opUpdate, "t_test", nil, []string{"key", "val"}, []interface{}{struct{}{}, 1}}, &sppb.Mutation{ Operation: &sppb.Mutation_Update{ Update: &sppb.Mutation_Write{}, }, }, errEncoderUnsupportedType(struct{}{}), }, { "OpKnown - Unknown Mutation Operation Code", Mutation{op(100), "t_test", nil, nil, nil}, &sppb.Mutation{}, errInvdMutationOp(Mutation{op(100), "t_test", nil, nil, nil}), }, } { gotProto, gotErr := test.mutation.proto() if gotErr != nil { if !reflect.DeepEqual(gotErr, test.wantErr) { t.Errorf("%s: %v.proto() returns error %v, want %v", test.name, test.mutation, gotErr, test.wantErr) } continue } if !reflect.DeepEqual(gotProto, test.wantProto) { t.Errorf("%s: %v.proto() = (%v, nil), want (%v, nil)", test.name, test.mutation, gotProto, test.wantProto) } } } // Test Encoding an array of mutations. func TestEncodeMutationArray(t *testing.T) { for _, test := range []struct { name string ms []*Mutation want []*sppb.Mutation wantErr error }{ { "Multiple Mutations", []*Mutation{ &Mutation{opDelete, "t_test", Key{"bar"}, nil, nil}, &Mutation{opInsertOrUpdate, "t_test", nil, []string{"key", "val"}, []interface{}{"foo", 1}}, }, []*sppb.Mutation{ &sppb.Mutation{ Operation: &sppb.Mutation_Delete_{ Delete: &sppb.Mutation_Delete{ Table: "t_test", KeySet: &sppb.KeySet{ Keys: []*proto3.ListValue{listValueProto(stringProto("bar"))}, }, }, }, }, &sppb.Mutation{ Operation: &sppb.Mutation_InsertOrUpdate{ InsertOrUpdate: &sppb.Mutation_Write{ Table: "t_test", Columns: []string{"key", "val"}, Values: []*proto3.ListValue{listValueProto(stringProto("foo"), intProto(1))}, }, }, }, }, nil, }, { "Multiple Mutations - Bad Mutation", []*Mutation{ &Mutation{opDelete, "t_test", Key{"bar"}, nil, nil}, &Mutation{opInsertOrUpdate, "t_test", nil, []string{"key", "val"}, []interface{}{"foo", struct{}{}}}, }, []*sppb.Mutation{}, errEncoderUnsupportedType(struct{}{}), }, } { gotProto, gotErr := mutationsProto(test.ms) if gotErr != nil { if !reflect.DeepEqual(gotErr, test.wantErr) { t.Errorf("%v: mutationsProto(%v) returns error %v, want %v", test.name, test.ms, gotErr, test.wantErr) } continue } if !reflect.DeepEqual(gotProto, test.want) { t.Errorf("%v: mutationsProto(%v) = (%v, nil), want (%v, nil)", test.name, test.ms, gotProto, test.want) } } } golang-google-cloud-0.9.0/spanner/protoutils.go000066400000000000000000000057421312234511600215740ustar00rootroot00000000000000/* Copyright 2017 Google Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package spanner import ( "encoding/base64" "strconv" "time" "cloud.google.com/go/civil" proto3 "github.com/golang/protobuf/ptypes/struct" sppb "google.golang.org/genproto/googleapis/spanner/v1" ) // Helpers to generate protobuf values and Cloud Spanner types. func stringProto(s string) *proto3.Value { return &proto3.Value{Kind: stringKind(s)} } func stringKind(s string) *proto3.Value_StringValue { return &proto3.Value_StringValue{StringValue: s} } func stringType() *sppb.Type { return &sppb.Type{Code: sppb.TypeCode_STRING} } func boolProto(b bool) *proto3.Value { return &proto3.Value{Kind: &proto3.Value_BoolValue{BoolValue: b}} } func boolType() *sppb.Type { return &sppb.Type{Code: sppb.TypeCode_BOOL} } func intProto(n int64) *proto3.Value { return &proto3.Value{Kind: &proto3.Value_StringValue{StringValue: strconv.FormatInt(n, 10)}} } func intType() *sppb.Type { return &sppb.Type{Code: sppb.TypeCode_INT64} } func floatProto(n float64) *proto3.Value { return &proto3.Value{Kind: &proto3.Value_NumberValue{NumberValue: n}} } func floatType() *sppb.Type { return &sppb.Type{Code: sppb.TypeCode_FLOAT64} } func bytesProto(b []byte) *proto3.Value { return &proto3.Value{Kind: &proto3.Value_StringValue{StringValue: base64.StdEncoding.EncodeToString(b)}} } func bytesType() *sppb.Type { return &sppb.Type{Code: sppb.TypeCode_BYTES} } func timeProto(t time.Time) *proto3.Value { return stringProto(t.UTC().Format(time.RFC3339Nano)) } func timeType() *sppb.Type { return &sppb.Type{Code: sppb.TypeCode_TIMESTAMP} } func dateProto(d civil.Date) *proto3.Value { return stringProto(d.String()) } func dateType() *sppb.Type { return &sppb.Type{Code: sppb.TypeCode_DATE} } func listProto(p ...*proto3.Value) *proto3.Value { return &proto3.Value{Kind: &proto3.Value_ListValue{ListValue: &proto3.ListValue{Values: p}}} } func listValueProto(p ...*proto3.Value) *proto3.ListValue { return &proto3.ListValue{Values: p} } func listType(t *sppb.Type) *sppb.Type { return &sppb.Type{Code: sppb.TypeCode_ARRAY, ArrayElementType: t} } func mkField(n string, t *sppb.Type) *sppb.StructType_Field { return &sppb.StructType_Field{n, t} } func structType(fields ...*sppb.StructType_Field) *sppb.Type { return &sppb.Type{Code: sppb.TypeCode_STRUCT, StructType: &sppb.StructType{Fields: fields}} } func nullProto() *proto3.Value { return &proto3.Value{Kind: &proto3.Value_NullValue{NullValue: proto3.NullValue_NULL_VALUE}} } golang-google-cloud-0.9.0/spanner/read.go000066400000000000000000000550221312234511600202570ustar00rootroot00000000000000/* Copyright 2017 Google Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package spanner import ( "bytes" "io" "sync/atomic" "time" log "github.com/golang/glog" proto "github.com/golang/protobuf/proto" proto3 "github.com/golang/protobuf/ptypes/struct" "golang.org/x/net/context" "google.golang.org/api/iterator" sppb "google.golang.org/genproto/googleapis/spanner/v1" "google.golang.org/grpc/codes" ) // streamingReceiver is the interface for receiving data from a client side // stream. type streamingReceiver interface { Recv() (*sppb.PartialResultSet, error) } // errEarlyReadEnd returns error for read finishes when gRPC stream is still active. func errEarlyReadEnd() error { return spannerErrorf(codes.FailedPrecondition, "read completed with active stream") } // stream is the internal fault tolerant method for streaming data from // Cloud Spanner. func stream(ctx context.Context, rpc func(ct context.Context, resumeToken []byte) (streamingReceiver, error), setTimestamp func(time.Time), release func(error)) *RowIterator { ctx, cancel := context.WithCancel(ctx) return &RowIterator{ streamd: newResumableStreamDecoder(ctx, rpc), rowd: &partialResultSetDecoder{}, setTimestamp: setTimestamp, release: release, cancel: cancel, } } // RowIterator is an iterator over Rows. type RowIterator struct { streamd *resumableStreamDecoder rowd *partialResultSetDecoder setTimestamp func(time.Time) release func(error) cancel func() err error rows []*Row } // Next returns the next result. Its second return value is iterator.Done if // there are no more results. Once Next returns Done, all subsequent calls // will return Done. func (r *RowIterator) Next() (*Row, error) { if r.err != nil { return nil, r.err } for len(r.rows) == 0 && r.streamd.next() { r.rows, r.err = r.rowd.add(r.streamd.get()) if r.err != nil { return nil, r.err } if !r.rowd.ts.IsZero() && r.setTimestamp != nil { r.setTimestamp(r.rowd.ts) r.setTimestamp = nil } } if len(r.rows) > 0 { row := r.rows[0] r.rows = r.rows[1:] return row, nil } if err := r.streamd.lastErr(); err != nil { r.err = toSpannerError(err) } else if !r.rowd.done() { r.err = errEarlyReadEnd() } else { r.err = iterator.Done } return nil, r.err } // Do calls the provided function once in sequence for each row in the iteration. If the // function returns a non-nil error, Do immediately returns that error. // // If there are no rows in the iterator, Do will return nil without calling the // provided function. // // Do always calls Stop on the iterator. func (r *RowIterator) Do(f func(r *Row) error) error { defer r.Stop() for { row, err := r.Next() switch err { case iterator.Done: return nil case nil: if err = f(row); err != nil { return err } default: return err } } } // Stop terminates the iteration. It should be called after every iteration. func (r *RowIterator) Stop() { if r.cancel != nil { r.cancel() } if r.release != nil { r.release(r.err) if r.err == nil { r.err = spannerErrorf(codes.FailedPrecondition, "Next called after Stop") } r.release = nil } } // partialResultQueue implements a simple FIFO queue. The zero value is a // valid queue. type partialResultQueue struct { q []*sppb.PartialResultSet first int last int n int // number of elements in queue } // empty returns if the partialResultQueue is empty. func (q *partialResultQueue) empty() bool { return q.n == 0 } // errEmptyQueue returns error for dequeuing an empty queue. func errEmptyQueue() error { return spannerErrorf(codes.OutOfRange, "empty partialResultQueue") } // peekLast returns the last item in partialResultQueue; if the queue // is empty, it returns error. func (q *partialResultQueue) peekLast() (*sppb.PartialResultSet, error) { if q.empty() { return nil, errEmptyQueue() } return q.q[(q.last+cap(q.q)-1)%cap(q.q)], nil } // push adds an item to the tail of partialResultQueue. func (q *partialResultQueue) push(r *sppb.PartialResultSet) { if q.q == nil { q.q = make([]*sppb.PartialResultSet, 8 /* arbitrary */) } if q.n == cap(q.q) { buf := make([]*sppb.PartialResultSet, cap(q.q)*2) for i := 0; i < q.n; i++ { buf[i] = q.q[(q.first+i)%cap(q.q)] } q.q = buf q.first = 0 q.last = q.n } q.q[q.last] = r q.last = (q.last + 1) % cap(q.q) q.n++ } // pop removes an item from the head of partialResultQueue and returns // it. func (q *partialResultQueue) pop() *sppb.PartialResultSet { if q.n == 0 { return nil } r := q.q[q.first] q.q[q.first] = nil q.first = (q.first + 1) % cap(q.q) q.n-- return r } // clear empties partialResultQueue. func (q *partialResultQueue) clear() { *q = partialResultQueue{} } // dump retrieves all items from partialResultQueue and return them in a slice. // It is used only in tests. func (q *partialResultQueue) dump() []*sppb.PartialResultSet { var dq []*sppb.PartialResultSet for i := q.first; len(dq) < q.n; i = (i + 1) % cap(q.q) { dq = append(dq, q.q[i]) } return dq } // resumableStreamDecoderState encodes resumableStreamDecoder's status. // See also the comments for resumableStreamDecoder.Next. type resumableStreamDecoderState int const ( unConnected resumableStreamDecoderState = iota // 0 queueingRetryable // 1 queueingUnretryable // 2 aborted // 3 finished // 4 ) // resumableStreamDecoder provides a resumable interface for receiving // sppb.PartialResultSet(s) from a given query wrapped by // resumableStreamDecoder.rpc(). type resumableStreamDecoder struct { // state is the current status of resumableStreamDecoder, see also // the comments for resumableStreamDecoder.Next. state resumableStreamDecoderState // stateWitness when non-nil is called to observe state change, // used for testing. stateWitness func(resumableStreamDecoderState) // ctx is the caller's context, used for cancel/timeout Next(). ctx context.Context // rpc is a factory of streamingReceiver, which might resume // a pervious stream from the point encoded in restartToken. // rpc is always a wrapper of a Cloud Spanner query which is // resumable. rpc func(ctx context.Context, restartToken []byte) (streamingReceiver, error) // stream is the current RPC streaming receiver. stream streamingReceiver // q buffers received yet undecoded partial results. q partialResultQueue // bytesBetweenResumeTokens is the proxy of the byte size of PartialResultSets being queued // between two resume tokens. Once bytesBetweenResumeTokens is greater than // maxBytesBetweenResumeTokens, resumableStreamDecoder goes into queueingUnretryable state. bytesBetweenResumeTokens int32 // maxBytesBetweenResumeTokens is the max number of bytes that can be buffered // between two resume tokens. It is always copied from the global maxBytesBetweenResumeTokens // atomically. maxBytesBetweenResumeTokens int32 // np is the next sppb.PartialResultSet ready to be returned // to caller of resumableStreamDecoder.Get(). np *sppb.PartialResultSet // resumeToken stores the resume token that resumableStreamDecoder has // last revealed to caller. resumeToken []byte // retryCount is the number of retries that have been carried out so far retryCount int // err is the last error resumableStreamDecoder has encountered so far. err error // backoff to compute delays between retries. backoff exponentialBackoff } // newResumableStreamDecoder creates a new resumeableStreamDecoder instance. // Parameter rpc should be a function that creates a new stream // beginning at the restartToken if non-nil. func newResumableStreamDecoder(ctx context.Context, rpc func(ct context.Context, restartToken []byte) (streamingReceiver, error)) *resumableStreamDecoder { return &resumableStreamDecoder{ ctx: ctx, rpc: rpc, maxBytesBetweenResumeTokens: atomic.LoadInt32(&maxBytesBetweenResumeTokens), backoff: defaultBackoff, } } // changeState fulfills state transition for resumableStateDecoder. func (d *resumableStreamDecoder) changeState(target resumableStreamDecoderState) { if d.state == queueingRetryable && d.state != target { // Reset bytesBetweenResumeTokens because it is only meaningful/changed under // queueingRetryable state. d.bytesBetweenResumeTokens = 0 } d.state = target if d.stateWitness != nil { d.stateWitness(target) } } // isNewResumeToken returns if the observed resume token is different from // the one returned from server last time. func (d *resumableStreamDecoder) isNewResumeToken(rt []byte) bool { if rt == nil { return false } if bytes.Compare(rt, d.resumeToken) == 0 { return false } return true } // Next advances to the next available partial result set. If error or no // more, returns false, call Err to determine if an error was encountered. // The following diagram illustrates the state machine of resumableStreamDecoder // that Next() implements. Note that state transition can be only triggered by // RPC activities. /* rpc() fails retryable +---------+ | | rpc() fails unretryable/ctx timeouts or cancelled | | +------------------------------------------------+ | | | | | v | v | +---+---+---+ +--------+ +------+--+ +-----+unConnected| |finished| | aborted |<----+ | | ++-----+-+ +------+--+ | +---+----+--+ ^ ^ ^ | | ^ | | | | | | | | recv() fails | | | | | | | | |recv() fails retryable | | | | | |with valid ctx | | | | | | | | | | rpc() succeeds | +-----------------------+ | | | | | | recv EOF recv EOF | | | | | | | | v | | Queue size exceeds | | | +---+----+---+----+threshold +-------+-----------+ | | +---------->+ +--------------->+ +-+ | | |queueingRetryable| |queueingUnretryable| | | | +<---------------+ | | | +---+----------+--+ pop() returns +--+----+-----------+ | | | | resume token | ^ | | | | | | | | | | | | | +---------------+ | | | | recv() succeeds | +----+ | | recv() succeeds | | | | | | | | | | | +--------------------------------------------------+ recv() fails unretryable */ var ( // maxBytesBetweenResumeTokens is the maximum amount of bytes that resumableStreamDecoder // in queueingRetryable state can use to queue PartialResultSets before getting // into queueingUnretryable state. maxBytesBetweenResumeTokens = int32(128 * 1024 * 1024) ) func (d *resumableStreamDecoder) next() bool { for { select { case <-d.ctx.Done(): // Do context check here so that even gRPC failed to do // so, resumableStreamDecoder can still break the loop // as expected. d.err = errContextCanceled(d.ctx, d.err) d.changeState(aborted) default: } switch d.state { case unConnected: // If no gRPC stream is available, try to initiate one. if d.stream, d.err = d.rpc(d.ctx, d.resumeToken); d.err != nil { if isRetryable(d.err) { d.doBackOff() // Be explicit about state transition, although the // state doesn't actually change. State transition // will be triggered only by RPC activity, regardless of // whether there is an actual state change or not. d.changeState(unConnected) continue } d.changeState(aborted) continue } d.resetBackOff() d.changeState(queueingRetryable) continue case queueingRetryable: fallthrough case queueingUnretryable: // Receiving queue is not empty. last, err := d.q.peekLast() if err != nil { // Only the case that receiving queue is empty could cause peekLast to // return error and in such case, we should try to receive from stream. d.tryRecv() continue } if d.isNewResumeToken(last.ResumeToken) { // Got new resume token, return buffered sppb.PartialResultSets to caller. d.np = d.q.pop() if d.q.empty() { d.bytesBetweenResumeTokens = 0 // The new resume token was just popped out from queue, record it. d.resumeToken = d.np.ResumeToken d.changeState(queueingRetryable) } return true } if d.bytesBetweenResumeTokens >= d.maxBytesBetweenResumeTokens && d.state == queueingRetryable { d.changeState(queueingUnretryable) continue } if d.state == queueingUnretryable { // When there is no resume token observed, // only yield sppb.PartialResultSets to caller under // queueingUnretryable state. d.np = d.q.pop() return true } // Needs to receive more from gRPC stream till a new resume token // is observed. d.tryRecv() continue case aborted: // Discard all pending items because none of them // should be yield to caller. d.q.clear() return false case finished: // If query has finished, check if there are still buffered messages. if d.q.empty() { // No buffered PartialResultSet. return false } // Although query has finished, there are still buffered PartialResultSets. d.np = d.q.pop() return true default: log.Errorf("Unexpected resumableStreamDecoder.state: %v", d.state) return false } } } // tryRecv attempts to receive a PartialResultSet from gRPC stream. func (d *resumableStreamDecoder) tryRecv() { var res *sppb.PartialResultSet if res, d.err = d.stream.Recv(); d.err != nil { if d.err == io.EOF { d.err = nil d.changeState(finished) return } if isRetryable(d.err) && d.state == queueingRetryable { d.err = nil // Discard all queue items (none have resume tokens). d.q.clear() d.stream = nil d.changeState(unConnected) d.doBackOff() return } d.changeState(aborted) return } d.q.push(res) if d.state == queueingRetryable && !d.isNewResumeToken(res.ResumeToken) { // adjusting d.bytesBetweenResumeTokens d.bytesBetweenResumeTokens += int32(proto.Size(res)) } d.resetBackOff() d.changeState(d.state) } // resetBackOff clears the internal retry counter of // resumableStreamDecoder so that the next exponential // backoff will start at a fresh state. func (d *resumableStreamDecoder) resetBackOff() { d.retryCount = 0 } // doBackoff does an exponential backoff sleep. func (d *resumableStreamDecoder) doBackOff() { ticker := time.NewTicker(d.backoff.delay(d.retryCount)) defer ticker.Stop() d.retryCount++ select { case <-d.ctx.Done(): case <-ticker.C: } } // get returns the most recent PartialResultSet generated by a call to next. func (d *resumableStreamDecoder) get() *sppb.PartialResultSet { return d.np } // lastErr returns the last non-EOF error encountered. func (d *resumableStreamDecoder) lastErr() error { return d.err } // partialResultSetDecoder assembles PartialResultSet(s) into Cloud Spanner // Rows. type partialResultSetDecoder struct { row Row tx *sppb.Transaction chunked bool // if true, next value should be merged with last values entry. ts time.Time // read timestamp } // yield checks we have a complete row, and if so returns it. A row is not // complete if it doesn't have enough columns, or if this is a chunked response // and there are no further values to process. func (p *partialResultSetDecoder) yield(chunked, last bool) *Row { if len(p.row.vals) == len(p.row.fields) && (!chunked || !last) { // When partialResultSetDecoder gets enough number of // Column values, There are two cases that a new Row // should be yield: // 1. The incoming PartialResultSet is not chunked; // 2. The incoming PartialResultSet is chunked, but the // proto3.Value being merged is not the last one in // the PartialResultSet. // // Use a fresh Row to simplify clients that want to use yielded results // after the next row is retrieved. Note that fields is never changed // so it doesn't need to be copied. fresh := Row{ fields: p.row.fields, vals: make([]*proto3.Value, len(p.row.vals)), } copy(fresh.vals, p.row.vals) p.row.vals = p.row.vals[:0] // empty and reuse slice return &fresh } return nil } // yieldTx returns transaction information via caller supplied callback. func errChunkedEmptyRow() error { return spannerErrorf(codes.FailedPrecondition, "got invalid chunked PartialResultSet with empty Row") } // add tries to merge a new PartialResultSet into buffered Row. It returns // any rows that have been completed as a result. func (p *partialResultSetDecoder) add(r *sppb.PartialResultSet) ([]*Row, error) { var rows []*Row if r.Metadata != nil { // Metadata should only be returned in the first result. if p.row.fields == nil { p.row.fields = r.Metadata.RowType.Fields } if p.tx == nil && r.Metadata.Transaction != nil { p.tx = r.Metadata.Transaction if p.tx.ReadTimestamp != nil { p.ts = time.Unix(p.tx.ReadTimestamp.Seconds, int64(p.tx.ReadTimestamp.Nanos)) } } } if len(r.Values) == 0 { return nil, nil } if p.chunked { p.chunked = false // Try to merge first value in r.Values into // uncompleted row. last := len(p.row.vals) - 1 if last < 0 { // sanity check return nil, errChunkedEmptyRow() } var err error // If p is chunked, then we should always try to merge p.last with r.first. if p.row.vals[last], err = p.merge(p.row.vals[last], r.Values[0]); err != nil { return nil, err } r.Values = r.Values[1:] // Merge is done, try to yield a complete Row. if row := p.yield(r.ChunkedValue, len(r.Values) == 0); row != nil { rows = append(rows, row) } } for i, v := range r.Values { // The rest values in r can be appened into p directly. p.row.vals = append(p.row.vals, v) // Again, check to see if a complete Row can be yielded because of // the newly added value. if row := p.yield(r.ChunkedValue, i == len(r.Values)-1); row != nil { rows = append(rows, row) } } if r.ChunkedValue { // After dealing with all values in r, if r is chunked then p must // be also chunked. p.chunked = true } return rows, nil } // isMergeable returns if a protobuf Value can be potentially merged with // other protobuf Values. func (p *partialResultSetDecoder) isMergeable(a *proto3.Value) bool { switch a.Kind.(type) { case *proto3.Value_StringValue: return true case *proto3.Value_ListValue: return true default: return false } } // errIncompatibleMergeTypes returns error for incompatible protobuf types // that cannot be merged by partialResultSetDecoder. func errIncompatibleMergeTypes(a, b *proto3.Value) error { return spannerErrorf(codes.FailedPrecondition, "incompatible type in chunked PartialResultSet. expected (%T), got (%T)", a.Kind, b.Kind) } // errUnsupportedMergeType returns error for protobuf type that cannot be // merged to other protobufs. func errUnsupportedMergeType(a *proto3.Value) error { return spannerErrorf(codes.FailedPrecondition, "unsupported type merge (%T)", a.Kind) } // merge tries to combine two protobuf Values if possible. func (p *partialResultSetDecoder) merge(a, b *proto3.Value) (*proto3.Value, error) { var err error typeErr := errIncompatibleMergeTypes(a, b) switch t := a.Kind.(type) { case *proto3.Value_StringValue: s, ok := b.Kind.(*proto3.Value_StringValue) if !ok { return nil, typeErr } return &proto3.Value{ Kind: &proto3.Value_StringValue{StringValue: t.StringValue + s.StringValue}, }, nil case *proto3.Value_ListValue: l, ok := b.Kind.(*proto3.Value_ListValue) if !ok { return nil, typeErr } if l.ListValue == nil || len(l.ListValue.Values) <= 0 { // b is an empty list, just return a. return a, nil } if t.ListValue == nil || len(t.ListValue.Values) <= 0 { // a is an empty list, just return b. return b, nil } if la := len(t.ListValue.Values) - 1; p.isMergeable(t.ListValue.Values[la]) { // When the last item in a is of type String, // List or Struct(encoded into List by Cloud Spanner), // try to Merge last item in a and first item in b. t.ListValue.Values[la], err = p.merge(t.ListValue.Values[la], l.ListValue.Values[0]) if err != nil { return nil, err } l.ListValue.Values = l.ListValue.Values[1:] } return &proto3.Value{ Kind: &proto3.Value_ListValue{ ListValue: &proto3.ListValue{ Values: append(t.ListValue.Values, l.ListValue.Values...), }, }, }, nil default: return nil, errUnsupportedMergeType(a) } } // Done returns if partialResultSetDecoder has already done with all buffered // values. func (p *partialResultSetDecoder) done() bool { // There is no explicit end of stream marker, but ending part way // through a row is obviously bad, or ending with the last column still // awaiting completion. return len(p.row.vals) == 0 && !p.chunked } golang-google-cloud-0.9.0/spanner/read_test.go000066400000000000000000001420321312234511600213140ustar00rootroot00000000000000/* Copyright 2017 Google Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package spanner import ( "errors" "fmt" "io" "reflect" "sync/atomic" "testing" "time" "golang.org/x/net/context" proto "github.com/golang/protobuf/proto" proto3 "github.com/golang/protobuf/ptypes/struct" "cloud.google.com/go/spanner/internal/testutil" "google.golang.org/api/iterator" sppb "google.golang.org/genproto/googleapis/spanner/v1" "google.golang.org/grpc" "google.golang.org/grpc/codes" ) var ( // Mocked transaction timestamp. trxTs = time.Unix(1, 2) // Metadata for mocked KV table, its rows are returned by SingleUse transactions. kvMeta = func() *sppb.ResultSetMetadata { meta := testutil.KvMeta meta.Transaction = &sppb.Transaction{ ReadTimestamp: timestampProto(trxTs), } return &meta }() // Metadata for mocked ListKV table, which uses List for its key and value. // Its rows are returned by snapshot readonly transactions, as indicated in the transaction metadata. kvListMeta = &sppb.ResultSetMetadata{ RowType: &sppb.StructType{ Fields: []*sppb.StructType_Field{ { Name: "Key", Type: &sppb.Type{ Code: sppb.TypeCode_ARRAY, ArrayElementType: &sppb.Type{ Code: sppb.TypeCode_STRING, }, }, }, { Name: "Value", Type: &sppb.Type{ Code: sppb.TypeCode_ARRAY, ArrayElementType: &sppb.Type{ Code: sppb.TypeCode_STRING, }, }, }, }, }, Transaction: &sppb.Transaction{ Id: transactionID{5, 6, 7, 8, 9}, ReadTimestamp: timestampProto(trxTs), }, } // Metadata for mocked schema of a query result set, which has two struct // columns named "Col1" and "Col2", the struct's schema is like the // following: // // STRUCT { // INT // LIST // } // // Its rows are returned in readwrite transaction, as indicated in the transaction metadata. kvObjectMeta = &sppb.ResultSetMetadata{ RowType: &sppb.StructType{ Fields: []*sppb.StructType_Field{ { Name: "Col1", Type: &sppb.Type{ Code: sppb.TypeCode_STRUCT, StructType: &sppb.StructType{ Fields: []*sppb.StructType_Field{ { Name: "foo-f1", Type: &sppb.Type{ Code: sppb.TypeCode_INT64, }, }, { Name: "foo-f2", Type: &sppb.Type{ Code: sppb.TypeCode_ARRAY, ArrayElementType: &sppb.Type{ Code: sppb.TypeCode_STRING, }, }, }, }, }, }, }, { Name: "Col2", Type: &sppb.Type{ Code: sppb.TypeCode_STRUCT, StructType: &sppb.StructType{ Fields: []*sppb.StructType_Field{ { Name: "bar-f1", Type: &sppb.Type{ Code: sppb.TypeCode_INT64, }, }, { Name: "bar-f2", Type: &sppb.Type{ Code: sppb.TypeCode_ARRAY, ArrayElementType: &sppb.Type{ Code: sppb.TypeCode_STRING, }, }, }, }, }, }, }, }, }, Transaction: &sppb.Transaction{ Id: transactionID{1, 2, 3, 4, 5}, }, } ) // String implements fmt.stringer. func (r *Row) String() string { return fmt.Sprintf("{fields: %s, val: %s}", r.fields, r.vals) } func describeRows(l []*Row) string { // generate a nice test failure description var s = "[" for i, r := range l { if i != 0 { s += ",\n " } s += fmt.Sprint(r) } s += "]" return s } // Helper for generating proto3 Value_ListValue instances, making // test code shorter and readable. func genProtoListValue(v ...string) *proto3.Value_ListValue { r := &proto3.Value_ListValue{ ListValue: &proto3.ListValue{ Values: []*proto3.Value{}, }, } for _, e := range v { r.ListValue.Values = append( r.ListValue.Values, &proto3.Value{ Kind: &proto3.Value_StringValue{StringValue: e}, }, ) } return r } // Test Row generation logics of partialResultSetDecoder. func TestPartialResultSetDecoder(t *testing.T) { restore := setMaxBytesBetweenResumeTokens() defer restore() var tests = []struct { input []*sppb.PartialResultSet wantF []*Row wantTxID transactionID wantTs time.Time wantD bool }{ { // Empty input. wantD: true, }, // String merging examples. { // Single KV result. input: []*sppb.PartialResultSet{ { Metadata: kvMeta, Values: []*proto3.Value{ {Kind: &proto3.Value_StringValue{StringValue: "foo"}}, {Kind: &proto3.Value_StringValue{StringValue: "bar"}}, }, }, }, wantF: []*Row{ { fields: kvMeta.RowType.Fields, vals: []*proto3.Value{ {Kind: &proto3.Value_StringValue{StringValue: "foo"}}, {Kind: &proto3.Value_StringValue{StringValue: "bar"}}, }, }, }, wantTs: trxTs, wantD: true, }, { // Incomplete partial result. input: []*sppb.PartialResultSet{ { Metadata: kvMeta, Values: []*proto3.Value{ {Kind: &proto3.Value_StringValue{StringValue: "foo"}}, }, }, }, wantTs: trxTs, wantD: false, }, { // Complete splitted result. input: []*sppb.PartialResultSet{ { Metadata: kvMeta, Values: []*proto3.Value{ {Kind: &proto3.Value_StringValue{StringValue: "foo"}}, }, }, { Values: []*proto3.Value{ {Kind: &proto3.Value_StringValue{StringValue: "bar"}}, }, }, }, wantF: []*Row{ { fields: kvMeta.RowType.Fields, vals: []*proto3.Value{ {Kind: &proto3.Value_StringValue{StringValue: "foo"}}, {Kind: &proto3.Value_StringValue{StringValue: "bar"}}, }, }, }, wantTs: trxTs, wantD: true, }, { // Multi-row example with splitted row in the middle. input: []*sppb.PartialResultSet{ { Metadata: kvMeta, Values: []*proto3.Value{ {Kind: &proto3.Value_StringValue{StringValue: "foo"}}, {Kind: &proto3.Value_StringValue{StringValue: "bar"}}, {Kind: &proto3.Value_StringValue{StringValue: "A"}}, }, }, { Values: []*proto3.Value{ {Kind: &proto3.Value_StringValue{StringValue: "1"}}, {Kind: &proto3.Value_StringValue{StringValue: "B"}}, {Kind: &proto3.Value_StringValue{StringValue: "2"}}, }, }, }, wantF: []*Row{ { fields: kvMeta.RowType.Fields, vals: []*proto3.Value{ {Kind: &proto3.Value_StringValue{StringValue: "foo"}}, {Kind: &proto3.Value_StringValue{StringValue: "bar"}}, }, }, { fields: kvMeta.RowType.Fields, vals: []*proto3.Value{ {Kind: &proto3.Value_StringValue{StringValue: "A"}}, {Kind: &proto3.Value_StringValue{StringValue: "1"}}, }, }, { fields: kvMeta.RowType.Fields, vals: []*proto3.Value{ {Kind: &proto3.Value_StringValue{StringValue: "B"}}, {Kind: &proto3.Value_StringValue{StringValue: "2"}}, }, }, }, wantTs: trxTs, wantD: true, }, { // Merging example in result_set.proto. input: []*sppb.PartialResultSet{ { Metadata: kvMeta, Values: []*proto3.Value{ {Kind: &proto3.Value_StringValue{StringValue: "Hello"}}, {Kind: &proto3.Value_StringValue{StringValue: "W"}}, }, ChunkedValue: true, }, { Values: []*proto3.Value{ {Kind: &proto3.Value_StringValue{StringValue: "orl"}}, }, ChunkedValue: true, }, { Values: []*proto3.Value{ {Kind: &proto3.Value_StringValue{StringValue: "d"}}, }, }, }, wantF: []*Row{ { fields: kvMeta.RowType.Fields, vals: []*proto3.Value{ {Kind: &proto3.Value_StringValue{StringValue: "Hello"}}, {Kind: &proto3.Value_StringValue{StringValue: "World"}}, }, }, }, wantTs: trxTs, wantD: true, }, { // More complex example showing completing a merge and // starting a new merge in the same partialResultSet. input: []*sppb.PartialResultSet{ { Metadata: kvMeta, Values: []*proto3.Value{ {Kind: &proto3.Value_StringValue{StringValue: "Hello"}}, {Kind: &proto3.Value_StringValue{StringValue: "W"}}, // start split in value }, ChunkedValue: true, }, { Values: []*proto3.Value{ {Kind: &proto3.Value_StringValue{StringValue: "orld"}}, // complete value {Kind: &proto3.Value_StringValue{StringValue: "i"}}, // start split in key }, ChunkedValue: true, }, { Values: []*proto3.Value{ {Kind: &proto3.Value_StringValue{StringValue: "s"}}, // complete key {Kind: &proto3.Value_StringValue{StringValue: "not"}}, {Kind: &proto3.Value_StringValue{StringValue: "a"}}, {Kind: &proto3.Value_StringValue{StringValue: "qu"}}, // split in value }, ChunkedValue: true, }, { Values: []*proto3.Value{ {Kind: &proto3.Value_StringValue{StringValue: "estion"}}, // complete value }, }, }, wantF: []*Row{ { fields: kvMeta.RowType.Fields, vals: []*proto3.Value{ {Kind: &proto3.Value_StringValue{StringValue: "Hello"}}, {Kind: &proto3.Value_StringValue{StringValue: "World"}}, }, }, { fields: kvMeta.RowType.Fields, vals: []*proto3.Value{ {Kind: &proto3.Value_StringValue{StringValue: "is"}}, {Kind: &proto3.Value_StringValue{StringValue: "not"}}, }, }, { fields: kvMeta.RowType.Fields, vals: []*proto3.Value{ {Kind: &proto3.Value_StringValue{StringValue: "a"}}, {Kind: &proto3.Value_StringValue{StringValue: "question"}}, }, }, }, wantTs: trxTs, wantD: true, }, // List merging examples. { // Non-splitting Lists. input: []*sppb.PartialResultSet{ { Metadata: kvListMeta, Values: []*proto3.Value{ { Kind: genProtoListValue("foo-1", "foo-2"), }, }, }, { Values: []*proto3.Value{ { Kind: genProtoListValue("bar-1", "bar-2"), }, }, }, }, wantF: []*Row{ { fields: kvListMeta.RowType.Fields, vals: []*proto3.Value{ { Kind: genProtoListValue("foo-1", "foo-2"), }, { Kind: genProtoListValue("bar-1", "bar-2"), }, }, }, }, wantTxID: transactionID{5, 6, 7, 8, 9}, wantTs: trxTs, wantD: true, }, { // Simple List merge case: splitted string element. input: []*sppb.PartialResultSet{ { Metadata: kvListMeta, Values: []*proto3.Value{ { Kind: genProtoListValue("foo-1", "foo-"), }, }, ChunkedValue: true, }, { Values: []*proto3.Value{ { Kind: genProtoListValue("2"), }, }, }, { Values: []*proto3.Value{ { Kind: genProtoListValue("bar-1", "bar-2"), }, }, }, }, wantF: []*Row{ { fields: kvListMeta.RowType.Fields, vals: []*proto3.Value{ { Kind: genProtoListValue("foo-1", "foo-2"), }, { Kind: genProtoListValue("bar-1", "bar-2"), }, }, }, }, wantTxID: transactionID{5, 6, 7, 8, 9}, wantTs: trxTs, wantD: true, }, { // Struct merging is also implemented by List merging. Note that // Cloud Spanner uses proto.ListValue to encode Structs as well. input: []*sppb.PartialResultSet{ { Metadata: kvObjectMeta, Values: []*proto3.Value{ { Kind: &proto3.Value_ListValue{ ListValue: &proto3.ListValue{ Values: []*proto3.Value{ {Kind: &proto3.Value_NumberValue{NumberValue: 23}}, {Kind: genProtoListValue("foo-1", "fo")}, }, }, }, }, }, ChunkedValue: true, }, { Values: []*proto3.Value{ { Kind: &proto3.Value_ListValue{ ListValue: &proto3.ListValue{ Values: []*proto3.Value{ {Kind: genProtoListValue("o-2", "f")}, }, }, }, }, }, ChunkedValue: true, }, { Values: []*proto3.Value{ { Kind: &proto3.Value_ListValue{ ListValue: &proto3.ListValue{ Values: []*proto3.Value{ {Kind: genProtoListValue("oo-3")}, }, }, }, }, { Kind: &proto3.Value_ListValue{ ListValue: &proto3.ListValue{ Values: []*proto3.Value{ {Kind: &proto3.Value_NumberValue{NumberValue: 45}}, {Kind: genProtoListValue("bar-1")}, }, }, }, }, }, }, }, wantF: []*Row{ { fields: kvObjectMeta.RowType.Fields, vals: []*proto3.Value{ { Kind: &proto3.Value_ListValue{ ListValue: &proto3.ListValue{ Values: []*proto3.Value{ {Kind: &proto3.Value_NumberValue{NumberValue: 23}}, {Kind: genProtoListValue("foo-1", "foo-2", "foo-3")}, }, }, }, }, { Kind: &proto3.Value_ListValue{ ListValue: &proto3.ListValue{ Values: []*proto3.Value{ {Kind: &proto3.Value_NumberValue{NumberValue: 45}}, {Kind: genProtoListValue("bar-1")}, }, }, }, }, }, }, }, wantTxID: transactionID{1, 2, 3, 4, 5}, wantD: true, }, } nextTest: for i, test := range tests { var rows []*Row p := &partialResultSetDecoder{} for j, v := range test.input { rs, err := p.add(v) if err != nil { t.Errorf("test %d.%d: partialResultSetDecoder.add(%v) = %v; want nil", i, j, v, err) continue nextTest } rows = append(rows, rs...) } if !reflect.DeepEqual(p.ts, test.wantTs) { t.Errorf("got transaction(%v), want %v", p.ts, test.wantTs) } if !reflect.DeepEqual(rows, test.wantF) { t.Errorf("test %d: rows=\n%v\n; want\n%v\n; p.row:\n%v\n", i, describeRows(rows), describeRows(test.wantF), p.row) } if got := p.done(); got != test.wantD { t.Errorf("test %d: partialResultSetDecoder.done() = %v", i, got) } } } const ( maxBuffers = 16 // max number of PartialResultSets that will be buffered in tests. ) // setMaxBytesBetweenResumeTokens sets the global maxBytesBetweenResumeTokens to a smaller // value more suitable for tests. It returns a function which should be called to restore // the maxBytesBetweenResumeTokens to its old value func setMaxBytesBetweenResumeTokens() func() { o := atomic.LoadInt32(&maxBytesBetweenResumeTokens) atomic.StoreInt32(&maxBytesBetweenResumeTokens, int32(maxBuffers*proto.Size(&sppb.PartialResultSet{ Metadata: kvMeta, Values: []*proto3.Value{ {Kind: &proto3.Value_StringValue{StringValue: keyStr(0)}}, {Kind: &proto3.Value_StringValue{StringValue: valStr(0)}}, }, }))) return func() { atomic.StoreInt32(&maxBytesBetweenResumeTokens, o) } } // keyStr generates key string for kvMeta schema. func keyStr(i int) string { return fmt.Sprintf("foo-%02d", i) } // valStr generates value string for kvMeta schema. func valStr(i int) string { return fmt.Sprintf("bar-%02d", i) } // Test state transitions of resumableStreamDecoder where state machine // ends up to a non-blocking state(resumableStreamDecoder.Next returns // on non-blocking state). func TestRsdNonblockingStates(t *testing.T) { restore := setMaxBytesBetweenResumeTokens() defer restore() tests := []struct { name string msgs []testutil.MockCtlMsg rpc func(ct context.Context, resumeToken []byte) (streamingReceiver, error) sql string // Expected values want []*sppb.PartialResultSet // PartialResultSets that should be returned to caller queue []*sppb.PartialResultSet // PartialResultSets that should be buffered resumeToken []byte // Resume token that is maintained by resumableStreamDecoder stateHistory []resumableStreamDecoderState // State transition history of resumableStreamDecoder wantErr error }{ { // unConnected->queueingRetryable->finished name: "unConnected->queueingRetryable->finished", msgs: []testutil.MockCtlMsg{ {}, {}, {Err: io.EOF, ResumeToken: false}, }, sql: "SELECT t.key key, t.value value FROM t_mock t", want: []*sppb.PartialResultSet{ { Metadata: kvMeta, Values: []*proto3.Value{ {Kind: &proto3.Value_StringValue{StringValue: keyStr(0)}}, {Kind: &proto3.Value_StringValue{StringValue: valStr(0)}}, }, }, }, queue: []*sppb.PartialResultSet{ { Metadata: kvMeta, Values: []*proto3.Value{ {Kind: &proto3.Value_StringValue{StringValue: keyStr(1)}}, {Kind: &proto3.Value_StringValue{StringValue: valStr(1)}}, }, }, }, stateHistory: []resumableStreamDecoderState{ queueingRetryable, // do RPC queueingRetryable, // got foo-00 queueingRetryable, // got foo-01 finished, // got EOF }, }, { // unConnected->queueingRetryable->aborted name: "unConnected->queueingRetryable->aborted", msgs: []testutil.MockCtlMsg{ {}, {Err: nil, ResumeToken: true}, {}, {Err: errors.New("I quit"), ResumeToken: false}, }, sql: "SELECT t.key key, t.value value FROM t_mock t", want: []*sppb.PartialResultSet{ { Metadata: kvMeta, Values: []*proto3.Value{ {Kind: &proto3.Value_StringValue{StringValue: keyStr(0)}}, {Kind: &proto3.Value_StringValue{StringValue: valStr(0)}}, }, }, { Metadata: kvMeta, Values: []*proto3.Value{ {Kind: &proto3.Value_StringValue{StringValue: keyStr(1)}}, {Kind: &proto3.Value_StringValue{StringValue: valStr(1)}}, }, ResumeToken: testutil.EncodeResumeToken(1), }, }, stateHistory: []resumableStreamDecoderState{ queueingRetryable, // do RPC queueingRetryable, // got foo-00 queueingRetryable, // got foo-01 queueingRetryable, // foo-01, resume token queueingRetryable, // got foo-02 aborted, // got error }, wantErr: grpc.Errorf(codes.Unknown, "I quit"), }, { // unConnected->queueingRetryable->queueingUnretryable->queueingUnretryable name: "unConnected->queueingRetryable->queueingUnretryable->queueingUnretryable", msgs: func() (m []testutil.MockCtlMsg) { for i := 0; i < maxBuffers+1; i++ { m = append(m, testutil.MockCtlMsg{}) } return m }(), sql: "SELECT t.key key, t.value value FROM t_mock t", want: func() (s []*sppb.PartialResultSet) { for i := 0; i < maxBuffers+1; i++ { s = append(s, &sppb.PartialResultSet{ Metadata: kvMeta, Values: []*proto3.Value{ {Kind: &proto3.Value_StringValue{StringValue: keyStr(i)}}, {Kind: &proto3.Value_StringValue{StringValue: valStr(i)}}, }, }) } return s }(), stateHistory: func() (s []resumableStreamDecoderState) { s = append(s, queueingRetryable) // RPC for i := 0; i < maxBuffers; i++ { s = append(s, queueingRetryable) // the internal queue of resumableStreamDecoder fills up } // the first item fills up the queue and triggers state transition; // the second item is received under queueingUnretryable state. s = append(s, queueingUnretryable) s = append(s, queueingUnretryable) return s }(), }, { // unConnected->queueingRetryable->queueingUnretryable->aborted name: "unConnected->queueingRetryable->queueingUnretryable->aborted", msgs: func() (m []testutil.MockCtlMsg) { for i := 0; i < maxBuffers; i++ { m = append(m, testutil.MockCtlMsg{}) } m = append(m, testutil.MockCtlMsg{Err: errors.New("Just Abort It"), ResumeToken: false}) return m }(), sql: "SELECT t.key key, t.value value FROM t_mock t", want: func() (s []*sppb.PartialResultSet) { for i := 0; i < maxBuffers; i++ { s = append(s, &sppb.PartialResultSet{ Metadata: kvMeta, Values: []*proto3.Value{ {Kind: &proto3.Value_StringValue{StringValue: keyStr(i)}}, {Kind: &proto3.Value_StringValue{StringValue: valStr(i)}}, }, }) } return s }(), stateHistory: func() (s []resumableStreamDecoderState) { s = append(s, queueingRetryable) // RPC for i := 0; i < maxBuffers; i++ { s = append(s, queueingRetryable) // internal queue of resumableStreamDecoder fills up } s = append(s, queueingUnretryable) // the last row triggers state change s = append(s, aborted) // Error happens return s }(), wantErr: grpc.Errorf(codes.Unknown, "Just Abort It"), }, } nextTest: for _, test := range tests { ms := testutil.NewMockCloudSpanner(t, trxTs) ms.Serve() opts := []grpc.DialOption{ grpc.WithInsecure(), } cc, err := grpc.Dial(ms.Addr(), opts...) if err != nil { t.Fatalf("%v: Dial(%q) = %v", test.name, ms.Addr(), err) } mc := sppb.NewSpannerClient(cc) if test.rpc == nil { test.rpc = func(ct context.Context, resumeToken []byte) (streamingReceiver, error) { return mc.ExecuteStreamingSql(ct, &sppb.ExecuteSqlRequest{ Sql: test.sql, ResumeToken: resumeToken, }) } } ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() r := newResumableStreamDecoder( ctx, test.rpc, ) st := []resumableStreamDecoderState{} var lastErr error // Once the expected number of state transitions are observed, // send a signal by setting stateDone = true. stateDone := false // Set stateWitness to listen to state changes. hl := len(test.stateHistory) // To avoid data race on test. r.stateWitness = func(rs resumableStreamDecoderState) { if !stateDone { // Record state transitions. st = append(st, rs) if len(st) == hl { lastErr = r.lastErr() stateDone = true } } } // Let mock server stream given messages to resumableStreamDecoder. for _, m := range test.msgs { ms.AddMsg(m.Err, m.ResumeToken) } var rs []*sppb.PartialResultSet for { select { case <-ctx.Done(): t.Errorf("context cancelled or timeout during test") continue nextTest default: } if stateDone { // Check if resumableStreamDecoder carried out expected // state transitions. if !reflect.DeepEqual(st, test.stateHistory) { t.Errorf("%v: observed state transitions: \n%v\n, want \n%v\n", test.name, st, test.stateHistory) } // Check if resumableStreamDecoder returns expected array of // PartialResultSets. if !reflect.DeepEqual(rs, test.want) { t.Errorf("%v: received PartialResultSets: \n%v\n, want \n%v\n", test.name, rs, test.want) } // Verify that resumableStreamDecoder's internal buffering is also correct. var q []*sppb.PartialResultSet for { item := r.q.pop() if item == nil { break } q = append(q, item) } if !reflect.DeepEqual(q, test.queue) { t.Errorf("%v: PartialResultSets still queued: \n%v\n, want \n%v\n", test.name, q, test.queue) } // Verify resume token. if test.resumeToken != nil && !reflect.DeepEqual(r.resumeToken, test.resumeToken) { t.Errorf("%v: Resume token is %v, want %v\n", test.name, r.resumeToken, test.resumeToken) } // Verify error message. if !reflect.DeepEqual(lastErr, test.wantErr) { t.Errorf("%v: got error %v, want %v", test.name, lastErr, test.wantErr) } // Proceed to next test continue nextTest } // Receive next decoded item. if r.next() { rs = append(rs, r.get()) } } } } // Test state transitions of resumableStreamDecoder where state machine // ends up to a blocking state(resumableStreamDecoder.Next blocks // on blocking state). func TestRsdBlockingStates(t *testing.T) { restore := setMaxBytesBetweenResumeTokens() defer restore() tests := []struct { name string msgs []testutil.MockCtlMsg rpc func(ct context.Context, resumeToken []byte) (streamingReceiver, error) sql string // Expected values want []*sppb.PartialResultSet // PartialResultSets that should be returned to caller queue []*sppb.PartialResultSet // PartialResultSets that should be buffered resumeToken []byte // Resume token that is maintained by resumableStreamDecoder stateHistory []resumableStreamDecoderState // State transition history of resumableStreamDecoder wantErr error }{ { // unConnected -> unConnected name: "unConnected -> unConnected", rpc: func(ct context.Context, resumeToken []byte) (streamingReceiver, error) { return nil, grpc.Errorf(codes.Unavailable, "trust me: server is unavailable") }, sql: "SELECT * from t_whatever", stateHistory: []resumableStreamDecoderState{unConnected, unConnected, unConnected}, wantErr: grpc.Errorf(codes.Unavailable, "trust me: server is unavailable"), }, { // unConnected -> queueingRetryable name: "unConnected -> queueingRetryable", sql: "SELECT t.key key, t.value value FROM t_mock t", stateHistory: []resumableStreamDecoderState{queueingRetryable}, }, { // unConnected->queueingRetryable->queueingRetryable name: "unConnected->queueingRetryable->queueingRetryable", msgs: []testutil.MockCtlMsg{ {}, {Err: nil, ResumeToken: true}, {Err: nil, ResumeToken: true}, {}, }, sql: "SELECT t.key key, t.value value FROM t_mock t", want: []*sppb.PartialResultSet{ { Metadata: kvMeta, Values: []*proto3.Value{ {Kind: &proto3.Value_StringValue{StringValue: keyStr(0)}}, {Kind: &proto3.Value_StringValue{StringValue: valStr(0)}}, }, }, { Metadata: kvMeta, Values: []*proto3.Value{ {Kind: &proto3.Value_StringValue{StringValue: keyStr(1)}}, {Kind: &proto3.Value_StringValue{StringValue: valStr(1)}}, }, ResumeToken: testutil.EncodeResumeToken(1), }, { Metadata: kvMeta, Values: []*proto3.Value{ {Kind: &proto3.Value_StringValue{StringValue: keyStr(2)}}, {Kind: &proto3.Value_StringValue{StringValue: valStr(2)}}, }, ResumeToken: testutil.EncodeResumeToken(2), }, }, queue: []*sppb.PartialResultSet{ { Metadata: kvMeta, Values: []*proto3.Value{ {Kind: &proto3.Value_StringValue{StringValue: keyStr(3)}}, {Kind: &proto3.Value_StringValue{StringValue: valStr(3)}}, }, }, }, resumeToken: testutil.EncodeResumeToken(2), stateHistory: []resumableStreamDecoderState{ queueingRetryable, // do RPC queueingRetryable, // got foo-00 queueingRetryable, // got foo-01 queueingRetryable, // foo-01, resume token queueingRetryable, // got foo-02 queueingRetryable, // foo-02, resume token queueingRetryable, // got foo-03 }, }, { // unConnected->queueingRetryable->queueingUnretryable->queueingRetryable->queueingRetryable name: "unConnected->queueingRetryable->queueingUnretryable->queueingRetryable->queueingRetryable", msgs: func() (m []testutil.MockCtlMsg) { for i := 0; i < maxBuffers+1; i++ { m = append(m, testutil.MockCtlMsg{}) } m = append(m, testutil.MockCtlMsg{Err: nil, ResumeToken: true}) m = append(m, testutil.MockCtlMsg{}) return m }(), sql: "SELECT t.key key, t.value value FROM t_mock t", want: func() (s []*sppb.PartialResultSet) { for i := 0; i < maxBuffers+2; i++ { s = append(s, &sppb.PartialResultSet{ Metadata: kvMeta, Values: []*proto3.Value{ {Kind: &proto3.Value_StringValue{StringValue: keyStr(i)}}, {Kind: &proto3.Value_StringValue{StringValue: valStr(i)}}, }, }) } s[maxBuffers+1].ResumeToken = testutil.EncodeResumeToken(maxBuffers + 1) return s }(), resumeToken: testutil.EncodeResumeToken(maxBuffers + 1), queue: []*sppb.PartialResultSet{ { Metadata: kvMeta, Values: []*proto3.Value{ {Kind: &proto3.Value_StringValue{StringValue: keyStr(maxBuffers + 2)}}, {Kind: &proto3.Value_StringValue{StringValue: valStr(maxBuffers + 2)}}, }, }, }, stateHistory: func() (s []resumableStreamDecoderState) { s = append(s, queueingRetryable) // RPC for i := 0; i < maxBuffers; i++ { s = append(s, queueingRetryable) // internal queue of resumableStreamDecoder filles up } for i := maxBuffers - 1; i < maxBuffers+1; i++ { // the first item fills up the queue and triggers state change; // the second item is received under queueingUnretryable state. s = append(s, queueingUnretryable) } s = append(s, queueingUnretryable) // got (maxBuffers+1)th row under Unretryable state s = append(s, queueingRetryable) // (maxBuffers+1)th row has resume token s = append(s, queueingRetryable) // (maxBuffers+2)th row has no resume token return s }(), }, { // unConnected->queueingRetryable->queueingUnretryable->finished name: "unConnected->queueingRetryable->queueingUnretryable->finished", msgs: func() (m []testutil.MockCtlMsg) { for i := 0; i < maxBuffers; i++ { m = append(m, testutil.MockCtlMsg{}) } m = append(m, testutil.MockCtlMsg{Err: io.EOF, ResumeToken: false}) return m }(), sql: "SELECT t.key key, t.value value FROM t_mock t", want: func() (s []*sppb.PartialResultSet) { for i := 0; i < maxBuffers; i++ { s = append(s, &sppb.PartialResultSet{ Metadata: kvMeta, Values: []*proto3.Value{ {Kind: &proto3.Value_StringValue{StringValue: keyStr(i)}}, {Kind: &proto3.Value_StringValue{StringValue: valStr(i)}}, }, }) } return s }(), stateHistory: func() (s []resumableStreamDecoderState) { s = append(s, queueingRetryable) // RPC for i := 0; i < maxBuffers; i++ { s = append(s, queueingRetryable) // internal queue of resumableStreamDecoder fills up } s = append(s, queueingUnretryable) // last row triggers state change s = append(s, finished) // query finishes return s }(), }, } for _, test := range tests { ms := testutil.NewMockCloudSpanner(t, trxTs) ms.Serve() opts := []grpc.DialOption{ grpc.WithInsecure(), } cc, err := grpc.Dial(ms.Addr(), opts...) if err != nil { t.Fatalf("%v: Dial(%q) = %v", test.name, ms.Addr(), err) } mc := sppb.NewSpannerClient(cc) if test.rpc == nil { // Avoid using test.sql directly in closure because for loop changes test. sql := test.sql test.rpc = func(ct context.Context, resumeToken []byte) (streamingReceiver, error) { return mc.ExecuteStreamingSql(ct, &sppb.ExecuteSqlRequest{ Sql: sql, ResumeToken: resumeToken, }) } } ctx, cancel := context.WithCancel(context.Background()) defer cancel() r := newResumableStreamDecoder( ctx, test.rpc, ) // Override backoff to make the test run faster. r.backoff = exponentialBackoff{1 * time.Nanosecond, 1 * time.Nanosecond} // st is the set of observed state transitions. st := []resumableStreamDecoderState{} // q is the content of the decoder's partial result queue when expected number of state transitions are done. q := []*sppb.PartialResultSet{} var lastErr error // Once the expected number of state transitions are observed, // send a signal to channel stateDone. stateDone := make(chan int) // Set stateWitness to listen to state changes. hl := len(test.stateHistory) // To avoid data race on test. r.stateWitness = func(rs resumableStreamDecoderState) { select { case <-stateDone: // Noop after expected number of state transitions default: // Record state transitions. st = append(st, rs) if len(st) == hl { lastErr = r.lastErr() q = r.q.dump() close(stateDone) } } } // Let mock server stream given messages to resumableStreamDecoder. for _, m := range test.msgs { ms.AddMsg(m.Err, m.ResumeToken) } var rs []*sppb.PartialResultSet go func() { for { if !r.next() { // Note that r.Next also exits on context cancel/timeout. return } rs = append(rs, r.get()) } }() // Verify that resumableStreamDecoder reaches expected state. select { case <-stateDone: // Note that at this point, receiver is still blocking on r.next(). // Check if resumableStreamDecoder carried out expected // state transitions. if !reflect.DeepEqual(st, test.stateHistory) { t.Errorf("%v: observed state transitions: \n%v\n, want \n%v\n", test.name, st, test.stateHistory) } // Check if resumableStreamDecoder returns expected array of // PartialResultSets. if !reflect.DeepEqual(rs, test.want) { t.Errorf("%v: received PartialResultSets: \n%v\n, want \n%v\n", test.name, rs, test.want) } // Verify that resumableStreamDecoder's internal buffering is also correct. if !reflect.DeepEqual(q, test.queue) { t.Errorf("%v: PartialResultSets still queued: \n%v\n, want \n%v\n", test.name, q, test.queue) } // Verify resume token. if test.resumeToken != nil && !reflect.DeepEqual(r.resumeToken, test.resumeToken) { t.Errorf("%v: Resume token is %v, want %v\n", test.name, r.resumeToken, test.resumeToken) } // Verify error message. if !reflect.DeepEqual(lastErr, test.wantErr) { t.Errorf("%v: got error %v, want %v", test.name, lastErr, test.wantErr) } case <-time.After(1 * time.Second): t.Errorf("%v: Timeout in waiting for state change", test.name) } ms.Stop() cc.Close() } } // sReceiver signals every receiving attempt through a channel, // used by TestResumeToken to determine if the receiving of a certain // PartialResultSet will be attempted next. type sReceiver struct { c chan int rpcReceiver sppb.Spanner_ExecuteStreamingSqlClient } // Recv() implements streamingReceiver.Recv for sReceiver. func (sr *sReceiver) Recv() (*sppb.PartialResultSet, error) { sr.c <- 1 return sr.rpcReceiver.Recv() } // waitn waits for nth receiving attempt from now on, until // the signal for nth Recv() attempts is received or timeout. // Note that because the way stream() works, the signal for the // nth Recv() means that the previous n - 1 PartialResultSets // has already been returned to caller or queued, if no error happened. func (sr *sReceiver) waitn(n int) error { for i := 0; i < n; i++ { select { case <-sr.c: case <-time.After(10 * time.Second): return fmt.Errorf("timeout in waiting for %v-th Recv()", i+1) } } return nil } // Test the handling of resumableStreamDecoder.bytesBetweenResumeTokens. func TestQueueBytes(t *testing.T) { restore := setMaxBytesBetweenResumeTokens() defer restore() ms := testutil.NewMockCloudSpanner(t, trxTs) ms.Serve() defer ms.Stop() opts := []grpc.DialOption{ grpc.WithInsecure(), } cc, err := grpc.Dial(ms.Addr(), opts...) if err != nil { t.Fatalf("Dial(%q) = %v", ms.Addr(), err) } defer cc.Close() mc := sppb.NewSpannerClient(cc) sr := &sReceiver{ c: make(chan int, 1000), // will never block in this test } wantQueueBytes := 0 ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() r := newResumableStreamDecoder( ctx, func(ct context.Context, resumeToken []byte) (streamingReceiver, error) { r, err := mc.ExecuteStreamingSql(ct, &sppb.ExecuteSqlRequest{ Sql: "SELECT t.key key, t.value value FROM t_mock t", ResumeToken: resumeToken, }) sr.rpcReceiver = r return sr, err }, ) go func() { for r.next() { } }() // Let server send maxBuffers / 2 rows. for i := 0; i < maxBuffers/2; i++ { wantQueueBytes += proto.Size(&sppb.PartialResultSet{ Metadata: kvMeta, Values: []*proto3.Value{ {Kind: &proto3.Value_StringValue{StringValue: keyStr(i)}}, {Kind: &proto3.Value_StringValue{StringValue: valStr(i)}}, }, }) ms.AddMsg(nil, false) } if err := sr.waitn(maxBuffers/2 + 1); err != nil { t.Fatalf("failed to wait for the first %v recv() calls: %v", maxBuffers, err) } if int32(wantQueueBytes) != r.bytesBetweenResumeTokens { t.Errorf("r.bytesBetweenResumeTokens = %v, want %v", r.bytesBetweenResumeTokens, wantQueueBytes) } // Now send a resume token to drain the queue. ms.AddMsg(nil, true) // Wait for all rows to be processes. if err := sr.waitn(1); err != nil { t.Fatalf("failed to wait for rows to be processed: %v", err) } if r.bytesBetweenResumeTokens != 0 { t.Errorf("r.bytesBetweenResumeTokens = %v, want 0", r.bytesBetweenResumeTokens) } // Let server send maxBuffers - 1 rows. wantQueueBytes = 0 for i := 0; i < maxBuffers-1; i++ { wantQueueBytes += proto.Size(&sppb.PartialResultSet{ Metadata: kvMeta, Values: []*proto3.Value{ {Kind: &proto3.Value_StringValue{StringValue: keyStr(i)}}, {Kind: &proto3.Value_StringValue{StringValue: valStr(i)}}, }, }) ms.AddMsg(nil, false) } if err := sr.waitn(maxBuffers - 1); err != nil { t.Fatalf("failed to wait for %v rows to be processed: %v", maxBuffers-1, err) } if int32(wantQueueBytes) != r.bytesBetweenResumeTokens { t.Errorf("r.bytesBetweenResumeTokens = %v, want 0", r.bytesBetweenResumeTokens) } // Trigger a state transition: queueingRetryable -> queueingUnretryable. ms.AddMsg(nil, false) if err := sr.waitn(1); err != nil { t.Fatalf("failed to wait for state transition: %v", err) } if r.bytesBetweenResumeTokens != 0 { t.Errorf("r.bytesBetweenResumeTokens = %v, want 0", r.bytesBetweenResumeTokens) } } // Verify that client can deal with resume token correctly func TestResumeToken(t *testing.T) { restore := setMaxBytesBetweenResumeTokens() defer restore() ms := testutil.NewMockCloudSpanner(t, trxTs) ms.Serve() opts := []grpc.DialOption{ grpc.WithInsecure(), } cc, err := grpc.Dial(ms.Addr(), opts...) if err != nil { t.Fatalf("Dial(%q) = %v", ms.Addr(), err) } defer func() { ms.Stop() cc.Close() }() mc := sppb.NewSpannerClient(cc) sr := &sReceiver{ c: make(chan int, 1000), // will never block in this test } rows := []*Row{} done := make(chan int) streaming := func() { // Establish a stream to mock cloud spanner server. iter := stream(context.Background(), func(ct context.Context, resumeToken []byte) (streamingReceiver, error) { r, err := mc.ExecuteStreamingSql(ct, &sppb.ExecuteSqlRequest{ Sql: "SELECT t.key key, t.value value FROM t_mock t", ResumeToken: resumeToken, }) sr.rpcReceiver = r return sr, err }, nil, func(error) {}) defer iter.Stop() for { var row *Row row, err = iter.Next() if err == iterator.Done { err = nil break } if err != nil { break } rows = append(rows, row) } done <- 1 } go streaming() // Server streaming row 0 - 2, only row 1 has resume token. // Client will receive row 0 - 2, so it will try receiving for // 4 times (the last recv will block), and only row 0 - 1 will // be yielded. for i := 0; i < 3; i++ { if i == 1 { ms.AddMsg(nil, true) } else { ms.AddMsg(nil, false) } } // Wait for 4 receive attempts, as explained above. if err = sr.waitn(4); err != nil { t.Fatalf("failed to wait for row 0 - 2: %v", err) } want := []*Row{ { fields: kvMeta.RowType.Fields, vals: []*proto3.Value{ {Kind: &proto3.Value_StringValue{StringValue: keyStr(0)}}, {Kind: &proto3.Value_StringValue{StringValue: valStr(0)}}, }, }, { fields: kvMeta.RowType.Fields, vals: []*proto3.Value{ {Kind: &proto3.Value_StringValue{StringValue: keyStr(1)}}, {Kind: &proto3.Value_StringValue{StringValue: valStr(1)}}, }, }, } if !reflect.DeepEqual(rows, want) { t.Errorf("received rows: \n%v\n; but want\n%v\n", rows, want) } // Inject resumable failure. ms.AddMsg( grpc.Errorf(codes.Unavailable, "mock server unavailable"), false, ) // Test if client detects the resumable failure and retries. if err = sr.waitn(1); err != nil { t.Fatalf("failed to wait for client to retry: %v", err) } // Client has resumed the query, now server resend row 2. ms.AddMsg(nil, true) if err = sr.waitn(1); err != nil { t.Fatalf("failed to wait for resending row 2: %v", err) } // Now client should have received row 0 - 2. want = append(want, &Row{ fields: kvMeta.RowType.Fields, vals: []*proto3.Value{ {Kind: &proto3.Value_StringValue{StringValue: keyStr(2)}}, {Kind: &proto3.Value_StringValue{StringValue: valStr(2)}}, }, }) if !reflect.DeepEqual(rows, want) { t.Errorf("received rows: \n%v\n, want\n%v\n", rows, want) } // Sending 3rd - (maxBuffers+1)th rows without resume tokens, client should buffer them. for i := 3; i < maxBuffers+2; i++ { ms.AddMsg(nil, false) } if err = sr.waitn(maxBuffers - 1); err != nil { t.Fatalf("failed to wait for row 3-%v: %v", maxBuffers+1, err) } // Received rows should be unchanged. if !reflect.DeepEqual(rows, want) { t.Errorf("receive rows: \n%v\n, want\n%v\n", rows, want) } // Send (maxBuffers+2)th row to trigger state change of resumableStreamDecoder: // queueingRetryable -> queueingUnretryable ms.AddMsg(nil, false) if err = sr.waitn(1); err != nil { t.Fatalf("failed to wait for row %v: %v", maxBuffers+2, err) } // Client should yield row 3rd - (maxBuffers+2)th to application. Therefore, application should // see row 0 - (maxBuffers+2)th so far. for i := 3; i < maxBuffers+3; i++ { want = append(want, &Row{ fields: kvMeta.RowType.Fields, vals: []*proto3.Value{ {Kind: &proto3.Value_StringValue{StringValue: keyStr(i)}}, {Kind: &proto3.Value_StringValue{StringValue: valStr(i)}}, }, }) } if !reflect.DeepEqual(rows, want) { t.Errorf("received rows: \n%v\n; want\n%v\n", rows, want) } // Inject resumable error, but since resumableStreamDecoder is already at queueingUnretryable // state, query will just fail. ms.AddMsg( grpc.Errorf(codes.Unavailable, "mock server wants some sleep"), false, ) select { case <-done: case <-time.After(10 * time.Second): t.Fatalf("timeout in waiting for failed query to return.") } if wantErr := toSpannerError(grpc.Errorf(codes.Unavailable, "mock server wants some sleep")); !reflect.DeepEqual(err, wantErr) { t.Fatalf("stream() returns error: %v, but want error: %v", err, wantErr) } // Reconnect to mock Cloud Spanner. rows = []*Row{} go streaming() // Let server send two rows without resume token. for i := maxBuffers + 3; i < maxBuffers+5; i++ { ms.AddMsg(nil, false) } if err = sr.waitn(3); err != nil { t.Fatalf("failed to wait for row %v - %v: %v", maxBuffers+3, maxBuffers+5, err) } if len(rows) > 0 { t.Errorf("client received some rows unexpectedly: %v, want nothing", rows) } // Let server end the query. ms.AddMsg(io.EOF, false) select { case <-done: case <-time.After(10 * time.Second): t.Fatalf("timeout in waiting for failed query to return") } if err != nil { t.Fatalf("stream() returns unexpected error: %v, but want no error", err) } // Verify if a normal server side EOF flushes all queued rows. want = []*Row{ { fields: kvMeta.RowType.Fields, vals: []*proto3.Value{ {Kind: &proto3.Value_StringValue{StringValue: keyStr(maxBuffers + 3)}}, {Kind: &proto3.Value_StringValue{StringValue: valStr(maxBuffers + 3)}}, }, }, { fields: kvMeta.RowType.Fields, vals: []*proto3.Value{ {Kind: &proto3.Value_StringValue{StringValue: keyStr(maxBuffers + 4)}}, {Kind: &proto3.Value_StringValue{StringValue: valStr(maxBuffers + 4)}}, }, }, } if !reflect.DeepEqual(rows, want) { t.Errorf("received rows: \n%v\n; but want\n%v\n", rows, want) } } // Verify that streaming query get retried upon real gRPC server transport failures. func TestGrpcReconnect(t *testing.T) { restore := setMaxBytesBetweenResumeTokens() defer restore() ms := testutil.NewMockCloudSpanner(t, trxTs) ms.Serve() defer ms.Stop() cc, err := grpc.Dial(ms.Addr(), grpc.WithInsecure()) if err != nil { t.Fatalf("Dial(%q) = %v", ms.Addr(), err) } defer cc.Close() mc := sppb.NewSpannerClient(cc) retry := make(chan int) row := make(chan int) go func() { r := 0 // Establish a stream to mock cloud spanner server. iter := stream(context.Background(), func(ct context.Context, resumeToken []byte) (streamingReceiver, error) { if r > 0 { // This RPC attempt is a retry, signal it. retry <- r } r++ return mc.ExecuteStreamingSql(ct, &sppb.ExecuteSqlRequest{ Sql: "SELECT t.key key, t.value value FROM t_mock t", ResumeToken: resumeToken, }) }, nil, func(error) {}) defer iter.Stop() for { _, err = iter.Next() if err == iterator.Done { err = nil break } if err != nil { break } row <- 0 } }() // Add a message and wait for the receipt. ms.AddMsg(nil, true) select { case <-row: case <-time.After(10 * time.Second): t.Fatalf("expect stream to be established within 10 seconds, but it didn't") } // Error injection: force server to close all connections. ms.Stop() // Test to see if client respond to the real RPC failure correctly by // retrying RPC. select { case r, ok := <-retry: if ok && r == 1 { break } t.Errorf("retry count = %v, want 1", r) case <-time.After(10 * time.Second): t.Errorf("client library failed to respond after 10 seconds, aborting") return } } // Test cancel/timeout for client operations. func TestCancelTimeout(t *testing.T) { restore := setMaxBytesBetweenResumeTokens() defer restore() ms := testutil.NewMockCloudSpanner(t, trxTs) ms.Serve() defer ms.Stop() opts := []grpc.DialOption{ grpc.WithInsecure(), } cc, err := grpc.Dial(ms.Addr(), opts...) defer cc.Close() if err != nil { t.Fatalf("Dial(%q) = %v", ms.Addr(), err) } mc := sppb.NewSpannerClient(cc) done := make(chan int) go func() { for { ms.AddMsg(nil, true) } }() // Test cancelling query. ctx, cancel := context.WithCancel(context.Background()) go func() { // Establish a stream to mock cloud spanner server. iter := stream(ctx, func(ct context.Context, resumeToken []byte) (streamingReceiver, error) { return mc.ExecuteStreamingSql(ct, &sppb.ExecuteSqlRequest{ Sql: "SELECT t.key key, t.value value FROM t_mock t", ResumeToken: resumeToken, }) }, nil, func(error) {}) defer iter.Stop() for { _, err = iter.Next() if err == iterator.Done { break } if err != nil { done <- 0 break } } }() cancel() select { case <-done: if ErrCode(err) != codes.Canceled { t.Errorf("streaming query is canceled and returns error %v, want error code %v", err, codes.Canceled) } case <-time.After(1 * time.Second): t.Errorf("query doesn't exit timely after being cancelled") } // Test query timeout. ctx, cancel = context.WithTimeout(context.Background(), 1*time.Second) go func() { // Establish a stream to mock cloud spanner server. iter := stream(ctx, func(ct context.Context, resumeToken []byte) (streamingReceiver, error) { return mc.ExecuteStreamingSql(ct, &sppb.ExecuteSqlRequest{ Sql: "SELECT t.key key, t.value value FROM t_mock t", ResumeToken: resumeToken, }) }, nil, func(error) {}) defer iter.Stop() for { _, err = iter.Next() if err == iterator.Done { err = nil break } if err != nil { break } } done <- 0 }() select { case <-done: if wantErr := codes.DeadlineExceeded; ErrCode(err) != wantErr { t.Errorf("streaming query timeout returns error %v, want error code %v", err, wantErr) } case <-time.After(2 * time.Second): t.Errorf("query doesn't timeout as expected") } } func TestRowIteratorDo(t *testing.T) { restore := setMaxBytesBetweenResumeTokens() defer restore() ms := testutil.NewMockCloudSpanner(t, trxTs) ms.Serve() defer ms.Stop() cc, err := grpc.Dial(ms.Addr(), grpc.WithInsecure()) if err != nil { t.Fatalf("Dial(%q) = %v", ms.Addr(), err) } defer cc.Close() mc := sppb.NewSpannerClient(cc) for i := 0; i < 3; i++ { ms.AddMsg(nil, false) } ms.AddMsg(io.EOF, true) nRows := 0 iter := stream(context.Background(), func(ct context.Context, resumeToken []byte) (streamingReceiver, error) { return mc.ExecuteStreamingSql(ct, &sppb.ExecuteSqlRequest{ Sql: "SELECT t.key key, t.value value FROM t_mock t", ResumeToken: resumeToken, }) }, nil, func(error) {}) err = iter.Do(func(r *Row) error { nRows++; return nil }) if err != nil { t.Errorf("Using Do: %v", err) } if nRows != 3 { t.Errorf("got %d rows, want 3", nRows) } } func TestIteratorStopEarly(t *testing.T) { ctx := context.Background() restore := setMaxBytesBetweenResumeTokens() defer restore() ms := testutil.NewMockCloudSpanner(t, trxTs) ms.Serve() defer ms.Stop() cc, err := grpc.Dial(ms.Addr(), grpc.WithInsecure()) if err != nil { t.Fatalf("Dial(%q) = %v", ms.Addr(), err) } defer cc.Close() mc := sppb.NewSpannerClient(cc) ms.AddMsg(nil, false) ms.AddMsg(nil, false) ms.AddMsg(io.EOF, true) iter := stream(ctx, func(ct context.Context, resumeToken []byte) (streamingReceiver, error) { return mc.ExecuteStreamingSql(ct, &sppb.ExecuteSqlRequest{ Sql: "SELECT t.key key, t.value value FROM t_mock t", ResumeToken: resumeToken, }) }, nil, func(error) {}) _, err = iter.Next() if err != nil { t.Fatalf("before Stop: %v", err) } iter.Stop() // Stop sets r.err to the FailedPrecondition error "Next called after Stop". // Override that here so this test can observe the Canceled error from the stream. iter.err = nil iter.Next() if ErrCode(iter.streamd.lastErr()) != codes.Canceled { t.Errorf("after Stop: got %v, wanted Canceled", err) } } func TestIteratorWithError(t *testing.T) { injected := errors.New("Failed iterator") iter := RowIterator{err: injected} defer iter.Stop() if _, err := iter.Next(); err != injected { t.Fatalf("Expected error: %v, got %v", injected, err) } } golang-google-cloud-0.9.0/spanner/retry.go000066400000000000000000000127011312234511600205060ustar00rootroot00000000000000/* Copyright 2017 Google Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package spanner import ( "fmt" "strings" "time" "github.com/golang/protobuf/proto" "github.com/golang/protobuf/ptypes" "golang.org/x/net/context" edpb "google.golang.org/genproto/googleapis/rpc/errdetails" "google.golang.org/grpc/codes" "google.golang.org/grpc/metadata" ) const ( retryInfoKey = "google.rpc.retryinfo-bin" ) // errRetry returns an unavailable error under error namespace EsOther. It is a // generic retryable error that is used to mask and recover unretryable errors // in a retry loop. func errRetry(err error) error { if se, ok := err.(*Error); ok { return &Error{codes.Unavailable, fmt.Sprintf("generic Cloud Spanner retryable error: { %v }", se.Error()), se.trailers} } return spannerErrorf(codes.Unavailable, "generic Cloud Spanner retryable error: { %v }", err.Error()) } // isErrorClosing reports whether the error is generated by gRPC layer talking to a closed server. func isErrorClosing(err error) bool { if err == nil { return false } if ErrCode(err) == codes.Internal && strings.Contains(ErrDesc(err), "transport is closing") { // Handle the case when connection is closed unexpectedly. // TODO: once gRPC is able to categorize // this as retryable error, we should stop parsing the // error message here. return true } return false } // isErrorRST reports whether the error is generated by gRPC client receiving a RST frame from server. func isErrorRST(err error) bool { if err == nil { return false } if ErrCode(err) == codes.Internal && strings.Contains(ErrDesc(err), "stream terminated by RST_STREAM") { // TODO: once gRPC is able to categorize this error as "go away" or "retryable", // we should stop parsing the error message. return true } return false } // isErrorUnexpectedEOF returns true if error is generated by gRPC layer // receiving io.EOF unexpectedly. func isErrorUnexpectedEOF(err error) bool { if err == nil { return false } if ErrCode(err) == codes.Unknown && strings.Contains(ErrDesc(err), "unexpected EOF") { // Unexpected EOF is an transport layer issue that // could be recovered by retries. The most likely // scenario is a flaky RecvMsg() call due to network // issues. // TODO: once gRPC is able to categorize // this as retryable error, we should stop parsing the // error message here. return true } return false } // isErrorUnavailable returns true if the error is about server being unavailable. func isErrorUnavailable(err error) bool { if err == nil { return false } if ErrCode(err) == codes.Unavailable { return true } return false } // isRetryable returns true if the Cloud Spanner error being checked is a retryable error. func isRetryable(err error) bool { if isErrorClosing(err) { return true } if isErrorUnexpectedEOF(err) { return true } if isErrorRST(err) { return true } if isErrorUnavailable(err) { return true } return false } // errContextCanceled returns *spanner.Error for canceled context. func errContextCanceled(ctx context.Context, lastErr error) error { if ctx.Err() == context.DeadlineExceeded { return spannerErrorf(codes.DeadlineExceeded, "%v, lastErr is <%v>", ctx.Err(), lastErr) } return spannerErrorf(codes.Canceled, "%v, lastErr is <%v>", ctx.Err(), lastErr) } // extractRetryDelay extracts retry backoff if present. func extractRetryDelay(err error) (time.Duration, bool) { trailers := errTrailers(err) if trailers == nil { return 0, false } elem, ok := trailers[retryInfoKey] if !ok || len(elem) <= 0 { return 0, false } _, b, err := metadata.DecodeKeyValue(retryInfoKey, elem[0]) if err != nil { return 0, false } var retryInfo edpb.RetryInfo if proto.Unmarshal([]byte(b), &retryInfo) != nil { return 0, false } delay, err := ptypes.Duration(retryInfo.RetryDelay) if err != nil { return 0, false } return delay, true } // runRetryable keeps attempting to run f until one of the following happens: // 1) f returns nil error or an unretryable error; // 2) context is cancelled or timeout. // TODO: consider using https://github.com/googleapis/gax-go once it // becomes available internally. func runRetryable(ctx context.Context, f func(context.Context) error) error { var funcErr error retryCount := 0 for { select { case <-ctx.Done(): // Do context check here so that even f() failed to do // so (for example, gRPC implementation bug), the loop // can still have a chance to exit as expected. return errContextCanceled(ctx, funcErr) default: } funcErr = f(ctx) if funcErr == nil { return nil } if isRetryable(funcErr) { // Error is retryable, do exponential backoff and continue. b, ok := extractRetryDelay(funcErr) if !ok { b = defaultBackoff.delay(retryCount) } select { case <-ctx.Done(): return errContextCanceled(ctx, funcErr) case <-time.After(b): } retryCount++ continue } // Error isn't retryable / no error, return immediately. return toSpannerError(funcErr) } } golang-google-cloud-0.9.0/spanner/retry_test.go000066400000000000000000000072751312234511600215570ustar00rootroot00000000000000/* Copyright 2017 Google Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package spanner import ( "errors" "fmt" "reflect" "testing" "time" "github.com/golang/protobuf/proto" "github.com/golang/protobuf/ptypes" "golang.org/x/net/context" edpb "google.golang.org/genproto/googleapis/rpc/errdetails" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/metadata" ) // Test if runRetryable loop deals with various errors correctly. func TestRetry(t *testing.T) { if testing.Short() { t.SkipNow() } responses := []error{ grpc.Errorf(codes.Internal, "transport is closing"), grpc.Errorf(codes.Unknown, "unexpected EOF"), grpc.Errorf(codes.Internal, "stream terminated by RST_STREAM with error code: 2"), grpc.Errorf(codes.Unavailable, "service is currently unavailable"), errRetry(fmt.Errorf("just retry it")), } err := runRetryable(context.Background(), func(ct context.Context) error { var r error if len(responses) > 0 { r = responses[0] responses = responses[1:] } return r }) if err != nil { t.Errorf("runRetryable should be able to survive all retryable errors, but it returns %v", err) } // Unretryable errors injErr := errors.New("this is unretryable") err = runRetryable(context.Background(), func(ct context.Context) error { return injErr }) if wantErr := toSpannerError(injErr); !reflect.DeepEqual(err, wantErr) { t.Errorf("runRetryable returns error %v, want %v", err, wantErr) } // Timeout ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() retryErr := errRetry(fmt.Errorf("still retrying")) err = runRetryable(ctx, func(ct context.Context) error { // Expect to trigger timeout in retryable runner after 10 executions. <-time.After(100 * time.Millisecond) // Let retryable runner to retry so that timeout will eventually happen. return retryErr }) // Check error code and error message if wantErrCode, wantErr := codes.DeadlineExceeded, errContextCanceled(ctx, retryErr); ErrCode(err) != wantErrCode || !reflect.DeepEqual(err, wantErr) { t.Errorf("=\n<%v, %v>, want:\n<%v, %v>", ErrCode(err), err, wantErrCode, wantErr) } // Cancellation ctx, cancel = context.WithCancel(context.Background()) retries := 3 retryErr = errRetry(fmt.Errorf("retry before cancel")) err = runRetryable(ctx, func(ct context.Context) error { retries-- if retries == 0 { cancel() } return retryErr }) // Check error code, error message, retry count if wantErrCode, wantErr := codes.Canceled, errContextCanceled(ctx, retryErr); ErrCode(err) != wantErrCode || !reflect.DeepEqual(err, wantErr) || retries != 0 { t.Errorf("=\n<%v, %v, %v>, want:\n<%v, %v, %v>", ErrCode(err), err, retries, wantErrCode, wantErr, 0) } } func TestRetryInfo(t *testing.T) { b, _ := proto.Marshal(&edpb.RetryInfo{ RetryDelay: ptypes.DurationProto(time.Second), }) trailers := map[string]string{ retryInfoKey: string(b), } gotDelay, ok := extractRetryDelay(errRetry(toSpannerErrorWithMetadata(grpc.Errorf(codes.Aborted, ""), metadata.New(trailers)))) if !ok || !reflect.DeepEqual(time.Second, gotDelay) { t.Errorf(" = <%t, %v>, want ", ok, gotDelay, time.Second) } } golang-google-cloud-0.9.0/spanner/row.go000066400000000000000000000242441312234511600201550ustar00rootroot00000000000000/* Copyright 2017 Google Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package spanner import ( "fmt" "reflect" proto3 "github.com/golang/protobuf/ptypes/struct" sppb "google.golang.org/genproto/googleapis/spanner/v1" "google.golang.org/grpc/codes" ) // A Row is a view of a row of data produced by a Cloud Spanner read. // // A row consists of a number of columns; the number depends on the columns // used to construct the read. // // The column values can be accessed by index, where the indices are with // respect to the columns. For instance, if the read specified // []string{"photo_id", "caption", "metadata"}, then each row will // contain three columns: the 0th column corresponds to "photo_id", the // 1st column corresponds to "caption", etc. // // Column values are decoded by using one of the Column, ColumnByName, or // Columns methods. The valid values passed to these methods depend on the // column type. For example: // // var photoID int64 // err := row.Column(0, &photoID) // Decode column 0 as an integer. // // var caption string // err := row.Column(1, &caption) // Decode column 1 as a string. // // // The above two operations at once. // err := row.Columns(&photoID, &caption) // // Supported types and their corresponding Cloud Spanner column type(s) are: // // *string(not NULL), *NullString - STRING // *[]NullString - STRING ARRAY // *[]byte - BYTES // *[][]byte - BYTES ARRAY // *int64(not NULL), *NullInt64 - INT64 // *[]NullInt64 - INT64 ARRAY // *bool(not NULL), *NullBool - BOOL // *[]NullBool - BOOL ARRAY // *float64(not NULL), *NullFloat64 - FLOAT64 // *[]NullFloat64 - FLOAT64 ARRAY // *time.Time(not NULL), *NullTime - TIMESTAMP // *[]NullTime - TIMESTAMP ARRAY // *Date(not NULL), *NullDate - DATE // *[]NullDate - DATE ARRAY // *[]*some_go_struct, *[]NullRow - STRUCT ARRAY // *GenericColumnValue - any Cloud Spanner type // // For TIMESTAMP columns, returned time.Time object will be in UTC. // // To fetch an array of BYTES, pass a *[][]byte. To fetch an array of // (sub)rows, pass a *[]spanner.NullRow or a *[]*some_go_struct where // some_go_struct holds all information of the subrow, see spannr.Row.ToStruct // for the mapping between Cloud Spanner row and Go struct. To fetch an array of // other types, pass a *[]spanner.Null* type of the appropriate type. Use // *GenericColumnValue when you don't know in advance what column type to // expect. // // Row decodes the row contents lazily; as a result, each call to a getter has // a chance of returning an error. // // A column value may be NULL if the corresponding value is not present in // Cloud Spanner. The spanner.Null* types (spanner.NullInt64 et al.) allow fetching // values that may be null. A NULL BYTES can be fetched into a *[]byte as nil. // It is an error to fetch a NULL value into any other type. type Row struct { fields []*sppb.StructType_Field vals []*proto3.Value // keep decoded for now } // errNamesValuesMismatch returns error for when columnNames count is not equal // to columnValues count. func errNamesValuesMismatch(columnNames []string, columnValues []interface{}) error { return spannerErrorf(codes.FailedPrecondition, "different number of names(%v) and values(%v)", len(columnNames), len(columnValues)) } // NewRow returns a Row containing the supplied data. This can be useful for // mocking Cloud Spanner Read and Query responses for unit testing. func NewRow(columnNames []string, columnValues []interface{}) (*Row, error) { if len(columnValues) != len(columnNames) { return nil, errNamesValuesMismatch(columnNames, columnValues) } r := Row{ fields: make([]*sppb.StructType_Field, len(columnValues)), vals: make([]*proto3.Value, len(columnValues)), } for i := range columnValues { val, typ, err := encodeValue(columnValues[i]) if err != nil { return nil, err } r.fields[i] = &sppb.StructType_Field{ Name: columnNames[i], Type: typ, } r.vals[i] = val } return &r, nil } // Size is the number of columns in the row. func (r *Row) Size() int { return len(r.fields) } // ColumnName returns the name of column i, or empty string for invalid column. func (r *Row) ColumnName(i int) string { if i < 0 || i >= len(r.fields) { return "" } return r.fields[i].Name } // ColumnIndex returns the index of the column with the given name. The // comparison is case-sensitive. func (r *Row) ColumnIndex(name string) (int, error) { found := false var index int if len(r.vals) != len(r.fields) { return 0, errFieldsMismatchVals(r) } for i, f := range r.fields { if f == nil { return 0, errNilColType(i) } if name == f.Name { if found { return 0, errDupColName(name) } found = true index = i } } if !found { return 0, errColNotFound(name) } return index, nil } // ColumnNames returns all column names of the row. func (r *Row) ColumnNames() []string { var n []string for _, c := range r.fields { n = append(n, c.Name) } return n } // errColIdxOutOfRange returns error for requested column index is out of the // range of the target Row's columns. func errColIdxOutOfRange(i int, r *Row) error { return spannerErrorf(codes.OutOfRange, "column index %d out of range [0,%d)", i, len(r.vals)) } // errDecodeColumn returns error for not being able to decode a indexed column. func errDecodeColumn(i int, err error) error { if err == nil { return nil } se, ok := toSpannerError(err).(*Error) if !ok { return spannerErrorf(codes.InvalidArgument, "failed to decode column %v, error = <%v>", i, err) } se.decorate(fmt.Sprintf("failed to decode column %v", i)) return se } // errFieldsMismatchVals returns error for field count isn't equal to value count in a Row. func errFieldsMismatchVals(r *Row) error { return spannerErrorf(codes.FailedPrecondition, "row has different number of fields(%v) and values(%v)", len(r.fields), len(r.vals)) } // errNilColType returns error for column type for column i being nil in the row. func errNilColType(i int) error { return spannerErrorf(codes.FailedPrecondition, "column(%v)'s type is nil", i) } // Column fetches the value from the ith column, decoding it into ptr. // See the Row documentation for the list of acceptable argument types. // see Client.ReadWriteTransaction for an example. func (r *Row) Column(i int, ptr interface{}) error { if len(r.vals) != len(r.fields) { return errFieldsMismatchVals(r) } if i < 0 || i >= len(r.fields) { return errColIdxOutOfRange(i, r) } if r.fields[i] == nil { return errNilColType(i) } if err := decodeValue(r.vals[i], r.fields[i].Type, ptr); err != nil { return errDecodeColumn(i, err) } return nil } // errDupColName returns error for duplicated column name in the same row. func errDupColName(n string) error { return spannerErrorf(codes.FailedPrecondition, "ambiguous column name %q", n) } // errColNotFound returns error for not being able to find a named column. func errColNotFound(n string) error { return spannerErrorf(codes.NotFound, "column %q not found", n) } // ColumnByName fetches the value from the named column, decoding it into ptr. // See the Row documentation for the list of acceptable argument types. func (r *Row) ColumnByName(name string, ptr interface{}) error { index, err := r.ColumnIndex(name) if err != nil { return err } return r.Column(index, ptr) } // errNumOfColValue returns error for providing wrong number of values to Columns. func errNumOfColValue(n int, r *Row) error { return spannerErrorf(codes.InvalidArgument, "Columns(): number of arguments (%d) does not match row size (%d)", n, len(r.vals)) } // Columns fetches all the columns in the row at once. // // The value of the kth column will be decoded into the kth argument to // Columns. See above for the list of acceptable argument types. The number of // arguments must be equal to the number of columns. Pass nil to specify that a // column should be ignored. func (r *Row) Columns(ptrs ...interface{}) error { if len(ptrs) != len(r.vals) { return errNumOfColValue(len(ptrs), r) } if len(r.vals) != len(r.fields) { return errFieldsMismatchVals(r) } for i, p := range ptrs { if p == nil { continue } if err := r.Column(i, p); err != nil { return err } } return nil } // errToStructArgType returns error for p not having the correct data type(pointer to Go struct) to // be the argument of Row.ToStruct. func errToStructArgType(p interface{}) error { return spannerErrorf(codes.InvalidArgument, "ToStruct(): type %T is not a valid pointer to Go struct", p) } // ToStruct fetches the columns in a row into the fields of a struct. // The rules for mapping a row's columns into a struct's exported fields // are as the following: // 1. If a field has a `spanner: "column_name"` tag, then decode column // 'column_name' into the field. A special case is the `spanner: "-"` // tag, which instructs ToStruct to ignore the field during decoding. // 2. Otherwise, if the name of a field matches the name of a column (ignoring case), // decode the column into the field. // // The fields of the destination struct can be of any type that is acceptable // to (*spanner.Row).Column. // // Slice and pointer fields will be set to nil if the source column // is NULL, and a non-nil value if the column is not NULL. To decode NULL // values of other types, use one of the spanner.Null* as the type of the // destination field. func (r *Row) ToStruct(p interface{}) error { // Check if p is a pointer to a struct if t := reflect.TypeOf(p); t == nil || t.Kind() != reflect.Ptr || t.Elem().Kind() != reflect.Struct { return errToStructArgType(p) } if len(r.vals) != len(r.fields) { return errFieldsMismatchVals(r) } // Call decodeStruct directly to decode the row as a typed proto.ListValue. return decodeStruct( &sppb.StructType{Fields: r.fields}, &proto3.ListValue{Values: r.vals}, p, ) } golang-google-cloud-0.9.0/spanner/row_test.go000066400000000000000000001272661312234511600212240ustar00rootroot00000000000000/* Copyright 2017 Google Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package spanner import ( "encoding/base64" "reflect" "strconv" "strings" "testing" "time" "cloud.google.com/go/civil" proto "github.com/golang/protobuf/proto" proto3 "github.com/golang/protobuf/ptypes/struct" sppb "google.golang.org/genproto/googleapis/spanner/v1" ) var ( tm = time.Date(2016, 11, 15, 0, 0, 0, 0, time.UTC) dt, _ = civil.ParseDate("2016-11-15") // row contains a column for each unique Cloud Spanner type. row = Row{ []*sppb.StructType_Field{ // STRING / STRING ARRAY {"STRING", stringType()}, {"NULL_STRING", stringType()}, {"STRING_ARRAY", listType(stringType())}, {"NULL_STRING_ARRAY", listType(stringType())}, // BYTES / BYTES ARRAY {"BYTES", bytesType()}, {"NULL_BYTES", bytesType()}, {"BYTES_ARRAY", listType(bytesType())}, {"NULL_BYTES_ARRAY", listType(bytesType())}, // INT64 / INT64 ARRAY {"INT64", intType()}, {"NULL_INT64", intType()}, {"INT64_ARRAY", listType(intType())}, {"NULL_INT64_ARRAY", listType(intType())}, // BOOL / BOOL ARRAY {"BOOL", boolType()}, {"NULL_BOOL", boolType()}, {"BOOL_ARRAY", listType(boolType())}, {"NULL_BOOL_ARRAY", listType(boolType())}, // FLOAT64 / FLOAT64 ARRAY {"FLOAT64", floatType()}, {"NULL_FLOAT64", floatType()}, {"FLOAT64_ARRAY", listType(floatType())}, {"NULL_FLOAT64_ARRAY", listType(floatType())}, // TIMESTAMP / TIMESTAMP ARRAY {"TIMESTAMP", timeType()}, {"NULL_TIMESTAMP", timeType()}, {"TIMESTAMP_ARRAY", listType(timeType())}, {"NULL_TIMESTAMP_ARRAY", listType(timeType())}, // DATE / DATE ARRAY {"DATE", dateType()}, {"NULL_DATE", dateType()}, {"DATE_ARRAY", listType(dateType())}, {"NULL_DATE_ARRAY", listType(dateType())}, // STRUCT ARRAY { "STRUCT_ARRAY", listType( structType( mkField("Col1", intType()), mkField("Col2", floatType()), mkField("Col3", stringType()), ), ), }, { "NULL_STRUCT_ARRAY", listType( structType( mkField("Col1", intType()), mkField("Col2", floatType()), mkField("Col3", stringType()), ), ), }, }, []*proto3.Value{ // STRING / STRING ARRAY stringProto("value"), nullProto(), listProto(stringProto("value1"), nullProto(), stringProto("value3")), nullProto(), // BYTES / BYTES ARRAY bytesProto([]byte("value")), nullProto(), listProto(bytesProto([]byte("value1")), nullProto(), bytesProto([]byte("value3"))), nullProto(), // INT64 / INT64 ARRAY intProto(17), nullProto(), listProto(intProto(1), intProto(2), nullProto()), nullProto(), // BOOL / BOOL ARRAY boolProto(true), nullProto(), listProto(nullProto(), boolProto(true), boolProto(false)), nullProto(), // FLOAT64 / FLOAT64 ARRAY floatProto(1.7), nullProto(), listProto(nullProto(), nullProto(), floatProto(1.7)), nullProto(), // TIMESTAMP / TIMESTAMP ARRAY timeProto(tm), nullProto(), listProto(nullProto(), timeProto(tm)), nullProto(), // DATE / DATE ARRAY dateProto(dt), nullProto(), listProto(nullProto(), dateProto(dt)), nullProto(), // STRUCT ARRAY listProto( nullProto(), listProto(intProto(3), floatProto(33.3), stringProto("three")), nullProto(), ), nullProto(), }, } ) // Test helpers for getting column values. func TestColumnValues(t *testing.T) { vals := []interface{}{} wantVals := []interface{}{} // Test getting column values. for i, wants := range [][]interface{}{ // STRING / STRING ARRAY {"value", NullString{"value", true}}, {NullString{}}, {[]NullString{{"value1", true}, {}, {"value3", true}}}, {[]NullString(nil)}, // BYTES / BYTES ARRAY {[]byte("value")}, {[]byte(nil)}, {[][]byte{[]byte("value1"), nil, []byte("value3")}}, {[][]byte(nil)}, // INT64 / INT64 ARRAY {int64(17), NullInt64{17, true}}, {NullInt64{}}, {[]NullInt64{{1, true}, {2, true}, {}}}, {[]NullInt64(nil)}, // BOOL / BOOL ARRAY {true, NullBool{true, true}}, {NullBool{}}, {[]NullBool{{}, {true, true}, {false, true}}}, {[]NullBool(nil)}, // FLOAT64 / FLOAT64 ARRAY {1.7, NullFloat64{1.7, true}}, {NullFloat64{}}, {[]NullFloat64{{}, {}, {1.7, true}}}, {[]NullFloat64(nil)}, // TIMESTAMP / TIMESTAMP ARRAY {tm, NullTime{tm, true}}, {NullTime{}}, {[]NullTime{{}, {tm, true}}}, {[]NullTime(nil)}, // DATE / DATE ARRAY {dt, NullDate{dt, true}}, {NullDate{}}, {[]NullDate{{}, {dt, true}}}, {[]NullDate(nil)}, // STRUCT ARRAY { []*struct { Col1 NullInt64 Col2 NullFloat64 Col3 string }{ nil, &struct { Col1 NullInt64 Col2 NullFloat64 Col3 string }{ NullInt64{3, true}, NullFloat64{33.3, true}, "three", }, nil, }, []NullRow{ {}, { Row: Row{ fields: []*sppb.StructType_Field{ mkField("Col1", intType()), mkField("Col2", floatType()), mkField("Col3", stringType()), }, vals: []*proto3.Value{ intProto(3), floatProto(33.3), stringProto("three"), }, }, Valid: true, }, {}, }, }, { []*struct { Col1 NullInt64 Col2 NullFloat64 Col3 string }(nil), []NullRow(nil), }, } { for j, want := range wants { // Prepare Value vector to test Row.Columns. if j == 0 { vals = append(vals, reflect.New(reflect.TypeOf(want)).Interface()) wantVals = append(wantVals, want) } // Column gotp := reflect.New(reflect.TypeOf(want)) err := row.Column(i, gotp.Interface()) if err != nil { t.Errorf("\t row.Column(%v, %T) returns error: %v, want nil", i, gotp.Interface(), err) } if got := reflect.Indirect(gotp).Interface(); !reflect.DeepEqual(got, want) { t.Errorf("\t row.Column(%v, %T) retrives %v, want %v", i, gotp.Interface(), got, want) } // ColumnByName gotp = reflect.New(reflect.TypeOf(want)) err = row.ColumnByName(row.fields[i].Name, gotp.Interface()) if err != nil { t.Errorf("\t row.ColumnByName(%v, %T) returns error: %v, want nil", row.fields[i].Name, gotp.Interface(), err) } if got := reflect.Indirect(gotp).Interface(); !reflect.DeepEqual(got, want) { t.Errorf("\t row.ColumnByName(%v, %T) retrives %v, want %v", row.fields[i].Name, gotp.Interface(), got, want) } } } // Test Row.Columns. if err := row.Columns(vals...); err != nil { t.Errorf("row.Columns() returns error: %v, want nil", err) } for i, want := range wantVals { if got := reflect.Indirect(reflect.ValueOf(vals[i])).Interface(); !reflect.DeepEqual(got, want) { t.Errorf("\t got %v(%T) for column[%v], want %v(%T)", got, got, row.fields[i].Name, want, want) } } } // Test decoding into nil destination. func TestNilDst(t *testing.T) { for i, test := range []struct { r *Row dst interface{} wantErr error structDst interface{} wantToStructErr error }{ { &Row{ []*sppb.StructType_Field{ {"Col0", stringType()}, }, []*proto3.Value{stringProto("value")}, }, nil, errDecodeColumn(0, errNilDst(nil)), nil, errToStructArgType(nil), }, { &Row{ []*sppb.StructType_Field{ {"Col0", stringType()}, }, []*proto3.Value{stringProto("value")}, }, (*string)(nil), errDecodeColumn(0, errNilDst((*string)(nil))), (*struct{ STRING string })(nil), errNilDst((*struct{ STRING string })(nil)), }, { &Row{ []*sppb.StructType_Field{ { "Col0", listType( structType( mkField("Col1", intType()), mkField("Col2", floatType()), ), ), }, }, []*proto3.Value{listProto( listProto(intProto(3), floatProto(33.3)), )}, }, (*[]*struct { Col1 int Col2 float64 })(nil), errDecodeColumn(0, errNilDst((*[]*struct { Col1 int Col2 float64 })(nil))), (*struct { StructArray []*struct { Col1 int Col2 float64 } `spanner:"STRUCT_ARRAY"` })(nil), errNilDst((*struct { StructArray []*struct { Col1 int Col2 float64 } `spanner:"STRUCT_ARRAY"` })(nil)), }, } { if gotErr := test.r.Column(0, test.dst); !reflect.DeepEqual(gotErr, test.wantErr) { t.Errorf("%v: test.r.Column() returns error %v, want %v", i, gotErr, test.wantErr) } if gotErr := test.r.ColumnByName("Col0", test.dst); !reflect.DeepEqual(gotErr, test.wantErr) { t.Errorf("%v: test.r.ColumnByName() returns error %v, want %v", i, gotErr, test.wantErr) } // Row.Columns(T) should return nil on T == nil, otherwise, it should return test.wantErr. wantColumnsErr := test.wantErr if test.dst == nil { wantColumnsErr = nil } if gotErr := test.r.Columns(test.dst); !reflect.DeepEqual(gotErr, wantColumnsErr) { t.Errorf("%v: test.r.Columns() returns error %v, want %v", i, gotErr, wantColumnsErr) } if gotErr := test.r.ToStruct(test.structDst); !reflect.DeepEqual(gotErr, test.wantToStructErr) { t.Errorf("%v: test.r.ToStruct() returns error %v, want %v", i, gotErr, test.wantToStructErr) } } } // Test decoding NULL columns using Go types that don't support NULL. func TestNullTypeErr(t *testing.T) { var tm time.Time ntoi := func(n string) int { for i, f := range row.fields { if f.Name == n { return i } } t.Errorf("cannot find column name %q in row", n) return 0 } for _, test := range []struct { colName string dst interface{} }{ { "NULL_STRING", proto.String(""), }, { "NULL_INT64", proto.Int64(0), }, { "NULL_BOOL", proto.Bool(false), }, { "NULL_FLOAT64", proto.Float64(0.0), }, { "NULL_TIMESTAMP", &tm, }, { "NULL_DATE", &dt, }, } { wantErr := errDecodeColumn(ntoi(test.colName), errDstNotForNull(test.dst)) if gotErr := row.ColumnByName(test.colName, test.dst); !reflect.DeepEqual(gotErr, wantErr) { t.Errorf("row.ColumnByName(%v) returns error %v, want %v", test.colName, gotErr, wantErr) } } } // Test using wrong destination type in column decoders. func TestColumnTypeErr(t *testing.T) { // badDst cannot hold any of the column values. badDst := &struct{}{} for i, f := range row.fields { // For each of the columns, try to decode it into badDst. tc := f.Type.Code isArray := strings.Contains(f.Name, "ARRAY") if isArray { tc = f.Type.ArrayElementType.Code } wantErr := errDecodeColumn(i, errTypeMismatch(tc, isArray, badDst)) if gotErr := row.Column(i, badDst); !reflect.DeepEqual(gotErr, wantErr) { t.Errorf("Column(%v): decoding into destination with wrong type %T returns error %v, want %v", i, badDst, gotErr, wantErr) } if gotErr := row.ColumnByName(f.Name, badDst); !reflect.DeepEqual(gotErr, wantErr) { t.Errorf("ColumnByName(%v): decoding into destination with wrong type %T returns error %v, want %v", f.Name, badDst, gotErr, wantErr) } } wantErr := errDecodeColumn(1, errTypeMismatch(sppb.TypeCode_STRING, false, badDst)) // badDst is used to receive column 1. vals := []interface{}{nil, badDst} // Row.Column() is expected to fail at column 1. // Skip decoding the rest columns by providing nils as the destinations. for i := 2; i < len(row.fields); i++ { vals = append(vals, nil) } if gotErr := row.Columns(vals...); !reflect.DeepEqual(gotErr, wantErr) { t.Errorf("Columns(): decoding column 1 with wrong type %T returns error %v, want %v", badDst, gotErr, wantErr) } } // Test the handling of invalid column decoding requests which cannot be mapped to correct column(s). func TestInvalidColumnRequest(t *testing.T) { for _, test := range []struct { desc string f func() error wantErr error }{ { "Request column index is out of range", func() error { return row.Column(10000, &struct{}{}) }, errColIdxOutOfRange(10000, &row), }, { "Cannot find the named column", func() error { return row.ColumnByName("string", &struct{}{}) }, errColNotFound("string"), }, { "Not enough arguments to call row.Columns()", func() error { return row.Columns(nil, nil) }, errNumOfColValue(2, &row), }, { "Call ColumnByName on row with duplicated column names", func() error { var s string r := &Row{ []*sppb.StructType_Field{ {"Val", stringType()}, {"Val", stringType()}, }, []*proto3.Value{stringProto("value1"), stringProto("value2")}, } return r.ColumnByName("Val", &s) }, errDupColName("Val"), }, { "Call ToStruct on row with duplicated column names", func() error { s := &struct { Val string }{} r := &Row{ []*sppb.StructType_Field{ {"Val", stringType()}, {"Val", stringType()}, }, []*proto3.Value{stringProto("value1"), stringProto("value2")}, } return r.ToStruct(s) }, errDupSpannerField("Val", &sppb.StructType{ Fields: []*sppb.StructType_Field{ {"Val", stringType()}, {"Val", stringType()}, }, }), }, { "Call ToStruct on a row with unnamed field", func() error { s := &struct { Val string }{} r := &Row{ []*sppb.StructType_Field{ {"", stringType()}, }, []*proto3.Value{stringProto("value1")}, } return r.ToStruct(s) }, errUnnamedField(&sppb.StructType{Fields: []*sppb.StructType_Field{{"", stringType()}}}, 0), }, } { if gotErr := test.f(); !reflect.DeepEqual(gotErr, test.wantErr) { t.Errorf("%v: test.f() returns error %v, want %v", test.desc, gotErr, test.wantErr) } } } // Test decoding the row with row.ToStruct into an invalid destination. func TestToStructInvalidDst(t *testing.T) { for _, test := range []struct { desc string dst interface{} wantErr error }{ { "Decode row as STRUCT into int32", proto.Int(1), errToStructArgType(proto.Int(1)), }, { "Decode row as STRUCT to nil Go struct", (*struct{})(nil), errNilDst((*struct{})(nil)), }, { "Decode row as STRUCT to Go struct with duplicated fields for the PK column", &struct { PK1 string `spanner:"STRING"` PK2 string `spanner:"STRING"` }{}, errNoOrDupGoField(&struct { PK1 string `spanner:"STRING"` PK2 string `spanner:"STRING"` }{}, "STRING"), }, { "Decode row as STRUCT to Go struct with no field for the PK column", &struct { PK1 string `spanner:"_STRING"` }{}, errNoOrDupGoField(&struct { PK1 string `spanner:"_STRING"` }{}, "STRING"), }, { "Decode row as STRUCT to Go struct with wrong type for the PK column", &struct { PK1 int64 `spanner:"STRING"` }{}, errDecodeStructField(&sppb.StructType{Fields: row.fields}, "STRING", errTypeMismatch(sppb.TypeCode_STRING, false, proto.Int64(0))), }, } { if gotErr := row.ToStruct(test.dst); !reflect.DeepEqual(gotErr, test.wantErr) { t.Errorf("%v: decoding:\ngot %v\nwant %v", test.desc, gotErr, test.wantErr) } } } // Test decoding a broken row. func TestBrokenRow(t *testing.T) { for i, test := range []struct { row *Row dst interface{} wantErr error }{ { // A row with no field. &Row{ []*sppb.StructType_Field{}, []*proto3.Value{stringProto("value")}, }, &NullString{"value", true}, errFieldsMismatchVals(&Row{ []*sppb.StructType_Field{}, []*proto3.Value{stringProto("value")}, }), }, { // A row with nil field. &Row{ []*sppb.StructType_Field{nil}, []*proto3.Value{stringProto("value")}, }, &NullString{"value", true}, errNilColType(0), }, { // Field is not nil, but its type is nil. &Row{ []*sppb.StructType_Field{ { "Col0", nil, }, }, []*proto3.Value{listProto(stringProto("value1"), stringProto("value2"))}, }, &[]NullString{}, errDecodeColumn(0, errNilSpannerType()), }, { // Field is not nil, field type is not nil, but it is an array and its array element type is nil. &Row{ []*sppb.StructType_Field{ { "Col0", &sppb.Type{ Code: sppb.TypeCode_ARRAY, }, }, }, []*proto3.Value{listProto(stringProto("value1"), stringProto("value2"))}, }, &[]NullString{}, errDecodeColumn(0, errNilArrElemType(&sppb.Type{Code: sppb.TypeCode_ARRAY})), }, { // Field specifies valid type, value is nil. &Row{ []*sppb.StructType_Field{ { "Col0", intType(), }, }, []*proto3.Value{nil}, }, &NullInt64{1, true}, errDecodeColumn(0, errNilSrc()), }, { // Field specifies INT64 type, value is having a nil Kind. &Row{ []*sppb.StructType_Field{ { "Col0", intType(), }, }, []*proto3.Value{{Kind: (*proto3.Value_StringValue)(nil)}}, }, &NullInt64{1, true}, errDecodeColumn(0, errSrcVal(&proto3.Value{Kind: (*proto3.Value_StringValue)(nil)}, "String")), }, { // Field specifies INT64 type, but value is for Number type. &Row{ []*sppb.StructType_Field{ { "Col0", intType(), }, }, []*proto3.Value{floatProto(1.0)}, }, &NullInt64{1, true}, errDecodeColumn(0, errSrcVal(floatProto(1.0), "String")), }, { // Field specifies INT64 type, but value is wrongly encoded. &Row{ []*sppb.StructType_Field{ { "Col0", intType(), }, }, []*proto3.Value{stringProto("&1")}, }, proto.Int64(0), errDecodeColumn(0, errBadEncoding(stringProto("&1"), func() error { _, err := strconv.ParseInt("&1", 10, 64) return err }())), }, { // Field specifies INT64 type, but value is wrongly encoded. &Row{ []*sppb.StructType_Field{ { "Col0", intType(), }, }, []*proto3.Value{stringProto("&1")}, }, &NullInt64{}, errDecodeColumn(0, errBadEncoding(stringProto("&1"), func() error { _, err := strconv.ParseInt("&1", 10, 64) return err }())), }, { // Field specifies STRING type, but value is having a nil Kind. &Row{ []*sppb.StructType_Field{ { "Col0", stringType(), }, }, []*proto3.Value{{Kind: (*proto3.Value_StringValue)(nil)}}, }, &NullString{"value", true}, errDecodeColumn(0, errSrcVal(&proto3.Value{Kind: (*proto3.Value_StringValue)(nil)}, "String")), }, { // Field specifies STRING type, but value is for ARRAY type. &Row{ []*sppb.StructType_Field{ { "Col0", stringType(), }, }, []*proto3.Value{listProto(stringProto("value"))}, }, &NullString{"value", true}, errDecodeColumn(0, errSrcVal(listProto(stringProto("value")), "String")), }, { // Field specifies FLOAT64 type, value is having a nil Kind. &Row{ []*sppb.StructType_Field{ { "Col0", floatType(), }, }, []*proto3.Value{{Kind: (*proto3.Value_NumberValue)(nil)}}, }, &NullFloat64{1.0, true}, errDecodeColumn(0, errSrcVal(&proto3.Value{Kind: (*proto3.Value_NumberValue)(nil)}, "Number")), }, { // Field specifies FLOAT64 type, but value is for BOOL type. &Row{ []*sppb.StructType_Field{ { "Col0", floatType(), }, }, []*proto3.Value{boolProto(true)}, }, &NullFloat64{1.0, true}, errDecodeColumn(0, errSrcVal(boolProto(true), "Number")), }, { // Field specifies FLOAT64 type, but value is wrongly encoded. &Row{ []*sppb.StructType_Field{ { "Col0", floatType(), }, }, []*proto3.Value{stringProto("nan")}, }, &NullFloat64{}, errDecodeColumn(0, errUnexpectedNumStr("nan")), }, { // Field specifies FLOAT64 type, but value is wrongly encoded. &Row{ []*sppb.StructType_Field{ { "Col0", floatType(), }, }, []*proto3.Value{stringProto("nan")}, }, proto.Float64(0), errDecodeColumn(0, errUnexpectedNumStr("nan")), }, { // Field specifies BYTES type, value is having a nil Kind. &Row{ []*sppb.StructType_Field{ { "Col0", bytesType(), }, }, []*proto3.Value{{Kind: (*proto3.Value_StringValue)(nil)}}, }, &[]byte{}, errDecodeColumn(0, errSrcVal(&proto3.Value{Kind: (*proto3.Value_StringValue)(nil)}, "String")), }, { // Field specifies BYTES type, but value is for BOOL type. &Row{ []*sppb.StructType_Field{ { "Col0", bytesType(), }, }, []*proto3.Value{boolProto(false)}, }, &[]byte{}, errDecodeColumn(0, errSrcVal(boolProto(false), "String")), }, { // Field specifies BYTES type, but value is wrongly encoded. &Row{ []*sppb.StructType_Field{ { "Col0", bytesType(), }, }, []*proto3.Value{stringProto("&&")}, }, &[]byte{}, errDecodeColumn(0, errBadEncoding(stringProto("&&"), func() error { _, err := base64.StdEncoding.DecodeString("&&") return err }())), }, { // Field specifies BOOL type, value is having a nil Kind. &Row{ []*sppb.StructType_Field{ { "Col0", boolType(), }, }, []*proto3.Value{{Kind: (*proto3.Value_BoolValue)(nil)}}, }, &NullBool{false, true}, errDecodeColumn(0, errSrcVal(&proto3.Value{Kind: (*proto3.Value_BoolValue)(nil)}, "Bool")), }, { // Field specifies BOOL type, but value is for STRING type. &Row{ []*sppb.StructType_Field{ { "Col0", boolType(), }, }, []*proto3.Value{stringProto("false")}, }, &NullBool{false, true}, errDecodeColumn(0, errSrcVal(stringProto("false"), "Bool")), }, { // Field specifies TIMESTAMP type, value is having a nil Kind. &Row{ []*sppb.StructType_Field{ { "Col0", timeType(), }, }, []*proto3.Value{{Kind: (*proto3.Value_StringValue)(nil)}}, }, &NullTime{time.Now(), true}, errDecodeColumn(0, errSrcVal(&proto3.Value{Kind: (*proto3.Value_StringValue)(nil)}, "String")), }, { // Field specifies TIMESTAMP type, but value is for BOOL type. &Row{ []*sppb.StructType_Field{ { "Col0", timeType(), }, }, []*proto3.Value{boolProto(false)}, }, &NullTime{time.Now(), true}, errDecodeColumn(0, errSrcVal(boolProto(false), "String")), }, { // Field specifies TIMESTAMP type, but value is invalid timestamp. &Row{ []*sppb.StructType_Field{ { "Col0", timeType(), }, }, []*proto3.Value{stringProto("junk")}, }, &NullTime{time.Now(), true}, errDecodeColumn(0, errBadEncoding(stringProto("junk"), func() error { _, err := time.Parse(time.RFC3339Nano, "junk") return err }())), }, { // Field specifies DATE type, value is having a nil Kind. &Row{ []*sppb.StructType_Field{ { "Col0", dateType(), }, }, []*proto3.Value{{Kind: (*proto3.Value_StringValue)(nil)}}, }, &NullDate{civil.Date{}, true}, errDecodeColumn(0, errSrcVal(&proto3.Value{Kind: (*proto3.Value_StringValue)(nil)}, "String")), }, { // Field specifies DATE type, but value is for BOOL type. &Row{ []*sppb.StructType_Field{ { "Col0", dateType(), }, }, []*proto3.Value{boolProto(false)}, }, &NullDate{civil.Date{}, true}, errDecodeColumn(0, errSrcVal(boolProto(false), "String")), }, { // Field specifies DATE type, but value is invalid timestamp. &Row{ []*sppb.StructType_Field{ { "Col0", dateType(), }, }, []*proto3.Value{stringProto("junk")}, }, &NullDate{civil.Date{}, true}, errDecodeColumn(0, errBadEncoding(stringProto("junk"), func() error { _, err := civil.ParseDate("junk") return err }())), }, { // Field specifies ARRAY type, value is having a nil Kind. &Row{ []*sppb.StructType_Field{ { "Col0", listType(intType()), }, }, []*proto3.Value{{Kind: (*proto3.Value_ListValue)(nil)}}, }, &[]NullInt64{}, errDecodeColumn(0, errSrcVal(&proto3.Value{Kind: (*proto3.Value_ListValue)(nil)}, "List")), }, { // Field specifies ARRAY type, value is having a nil ListValue. &Row{ []*sppb.StructType_Field{ { "Col0", listType(intType()), }, }, []*proto3.Value{{Kind: &proto3.Value_ListValue{}}}, }, &[]NullInt64{}, errDecodeColumn(0, errNilListValue("INT64")), }, { // Field specifies ARRAY type, but value is for BYTES type. &Row{ []*sppb.StructType_Field{ { "Col0", listType(intType()), }, }, []*proto3.Value{bytesProto([]byte("value"))}, }, &[]NullInt64{}, errDecodeColumn(0, errSrcVal(bytesProto([]byte("value")), "List")), }, { // Field specifies ARRAY type, but value is for ARRAY type. &Row{ []*sppb.StructType_Field{ { "Col0", listType(intType()), }, }, []*proto3.Value{listProto(boolProto(true))}, }, &[]NullInt64{}, errDecodeColumn(0, errDecodeArrayElement(0, boolProto(true), "INT64", errSrcVal(boolProto(true), "String"))), }, { // Field specifies ARRAY type, value is having a nil Kind. &Row{ []*sppb.StructType_Field{ { "Col0", listType(stringType()), }, }, []*proto3.Value{{Kind: (*proto3.Value_ListValue)(nil)}}, }, &[]NullString{}, errDecodeColumn(0, errSrcVal(&proto3.Value{Kind: (*proto3.Value_ListValue)(nil)}, "List")), }, { // Field specifies ARRAY type, value is having a nil ListValue. &Row{ []*sppb.StructType_Field{ { "Col0", listType(stringType()), }, }, []*proto3.Value{{Kind: &proto3.Value_ListValue{}}}, }, &[]NullString{}, errDecodeColumn(0, errNilListValue("STRING")), }, { // Field specifies ARRAY type, but value is for BOOL type. &Row{ []*sppb.StructType_Field{ { "Col0", listType(stringType()), }, }, []*proto3.Value{boolProto(true)}, }, &[]NullString{}, errDecodeColumn(0, errSrcVal(boolProto(true), "List")), }, { // Field specifies ARRAY type, but value is for ARRAY type. &Row{ []*sppb.StructType_Field{ { "Col0", listType(stringType()), }, }, []*proto3.Value{listProto(boolProto(true))}, }, &[]NullString{}, errDecodeColumn(0, errDecodeArrayElement(0, boolProto(true), "STRING", errSrcVal(boolProto(true), "String"))), }, { // Field specifies ARRAY type, value is having a nil Kind. &Row{ []*sppb.StructType_Field{ { "Col0", listType(floatType()), }, }, []*proto3.Value{{Kind: (*proto3.Value_ListValue)(nil)}}, }, &[]NullFloat64{}, errDecodeColumn(0, errSrcVal(&proto3.Value{Kind: (*proto3.Value_ListValue)(nil)}, "List")), }, { // Field specifies ARRAY type, value is having a nil ListValue. &Row{ []*sppb.StructType_Field{ { "Col0", listType(floatType()), }, }, []*proto3.Value{{Kind: &proto3.Value_ListValue{}}}, }, &[]NullFloat64{}, errDecodeColumn(0, errNilListValue("FLOAT64")), }, { // Field specifies ARRAY type, but value is for STRING type. &Row{ []*sppb.StructType_Field{ { "Col0", listType(floatType()), }, }, []*proto3.Value{stringProto("value")}, }, &[]NullFloat64{}, errDecodeColumn(0, errSrcVal(stringProto("value"), "List")), }, { // Field specifies ARRAY type, but value is for ARRAY type. &Row{ []*sppb.StructType_Field{ { "Col0", listType(floatType()), }, }, []*proto3.Value{listProto(boolProto(true))}, }, &[]NullFloat64{}, errDecodeColumn(0, errDecodeArrayElement(0, boolProto(true), "FLOAT64", errSrcVal(boolProto(true), "Number"))), }, { // Field specifies ARRAY type, value is having a nil Kind. &Row{ []*sppb.StructType_Field{ { "Col0", listType(bytesType()), }, }, []*proto3.Value{{Kind: (*proto3.Value_ListValue)(nil)}}, }, &[][]byte{}, errDecodeColumn(0, errSrcVal(&proto3.Value{Kind: (*proto3.Value_ListValue)(nil)}, "List")), }, { // Field specifies ARRAY type, value is having a nil ListValue. &Row{ []*sppb.StructType_Field{ { "Col0", listType(bytesType()), }, }, []*proto3.Value{{Kind: &proto3.Value_ListValue{}}}, }, &[][]byte{}, errDecodeColumn(0, errNilListValue("BYTES")), }, { // Field specifies ARRAY type, but value is for FLOAT64 type. &Row{ []*sppb.StructType_Field{ { "Col0", listType(bytesType()), }, }, []*proto3.Value{floatProto(1.0)}, }, &[][]byte{}, errDecodeColumn(0, errSrcVal(floatProto(1.0), "List")), }, { // Field specifies ARRAY type, but value is for ARRAY type. &Row{ []*sppb.StructType_Field{ { "Col0", listType(bytesType()), }, }, []*proto3.Value{listProto(floatProto(1.0))}, }, &[][]byte{}, errDecodeColumn(0, errDecodeArrayElement(0, floatProto(1.0), "BYTES", errSrcVal(floatProto(1.0), "String"))), }, { // Field specifies ARRAY type, value is having a nil Kind. &Row{ []*sppb.StructType_Field{ { "Col0", listType(boolType()), }, }, []*proto3.Value{{Kind: (*proto3.Value_ListValue)(nil)}}, }, &[]NullBool{}, errDecodeColumn(0, errSrcVal(&proto3.Value{Kind: (*proto3.Value_ListValue)(nil)}, "List")), }, { // Field specifies ARRAY type, value is having a nil ListValue. &Row{ []*sppb.StructType_Field{ { "Col0", listType(boolType()), }, }, []*proto3.Value{{Kind: &proto3.Value_ListValue{}}}, }, &[]NullBool{}, errDecodeColumn(0, errNilListValue("BOOL")), }, { // Field specifies ARRAY type, but value is for FLOAT64 type. &Row{ []*sppb.StructType_Field{ { "Col0", listType(boolType()), }, }, []*proto3.Value{floatProto(1.0)}, }, &[]NullBool{}, errDecodeColumn(0, errSrcVal(floatProto(1.0), "List")), }, { // Field specifies ARRAY type, but value is for ARRAY type. &Row{ []*sppb.StructType_Field{ { "Col0", listType(boolType()), }, }, []*proto3.Value{listProto(floatProto(1.0))}, }, &[]NullBool{}, errDecodeColumn(0, errDecodeArrayElement(0, floatProto(1.0), "BOOL", errSrcVal(floatProto(1.0), "Bool"))), }, { // Field specifies ARRAY type, value is having a nil Kind. &Row{ []*sppb.StructType_Field{ { "Col0", listType(timeType()), }, }, []*proto3.Value{{Kind: (*proto3.Value_ListValue)(nil)}}, }, &[]NullTime{}, errDecodeColumn(0, errSrcVal(&proto3.Value{Kind: (*proto3.Value_ListValue)(nil)}, "List")), }, { // Field specifies ARRAY type, value is having a nil ListValue. &Row{ []*sppb.StructType_Field{ { "Col0", listType(timeType()), }, }, []*proto3.Value{{Kind: &proto3.Value_ListValue{}}}, }, &[]NullTime{}, errDecodeColumn(0, errNilListValue("TIMESTAMP")), }, { // Field specifies ARRAY type, but value is for FLOAT64 type. &Row{ []*sppb.StructType_Field{ { "Col0", listType(timeType()), }, }, []*proto3.Value{floatProto(1.0)}, }, &[]NullTime{}, errDecodeColumn(0, errSrcVal(floatProto(1.0), "List")), }, { // Field specifies ARRAY type, but value is for ARRAY type. &Row{ []*sppb.StructType_Field{ { "Col0", listType(timeType()), }, }, []*proto3.Value{listProto(floatProto(1.0))}, }, &[]NullTime{}, errDecodeColumn(0, errDecodeArrayElement(0, floatProto(1.0), "TIMESTAMP", errSrcVal(floatProto(1.0), "String"))), }, { // Field specifies ARRAY type, value is having a nil Kind. &Row{ []*sppb.StructType_Field{ { "Col0", listType(dateType()), }, }, []*proto3.Value{{Kind: (*proto3.Value_ListValue)(nil)}}, }, &[]NullDate{}, errDecodeColumn(0, errSrcVal(&proto3.Value{Kind: (*proto3.Value_ListValue)(nil)}, "List")), }, { // Field specifies ARRAY type, value is having a nil ListValue. &Row{ []*sppb.StructType_Field{ { "Col0", listType(dateType()), }, }, []*proto3.Value{{Kind: &proto3.Value_ListValue{}}}, }, &[]NullDate{}, errDecodeColumn(0, errNilListValue("DATE")), }, { // Field specifies ARRAY type, but value is for FLOAT64 type. &Row{ []*sppb.StructType_Field{ { "Col0", listType(dateType()), }, }, []*proto3.Value{floatProto(1.0)}, }, &[]NullDate{}, errDecodeColumn(0, errSrcVal(floatProto(1.0), "List")), }, { // Field specifies ARRAY type, but value is for ARRAY type. &Row{ []*sppb.StructType_Field{ { "Col0", listType(dateType()), }, }, []*proto3.Value{listProto(floatProto(1.0))}, }, &[]NullDate{}, errDecodeColumn(0, errDecodeArrayElement(0, floatProto(1.0), "DATE", errSrcVal(floatProto(1.0), "String"))), }, { // Field specifies ARRAY type, value is having a nil Kind. &Row{ []*sppb.StructType_Field{ { "Col0", listType( structType( mkField("Col1", intType()), mkField("Col2", floatType()), mkField("Col3", stringType()), ), ), }, }, []*proto3.Value{{Kind: (*proto3.Value_ListValue)(nil)}}, }, &[]*struct { Col1 int64 Col2 float64 Col3 string }{}, errDecodeColumn(0, errSrcVal(&proto3.Value{Kind: (*proto3.Value_ListValue)(nil)}, "List")), }, { // Field specifies ARRAY type, value is having a nil ListValue. &Row{ []*sppb.StructType_Field{ { "Col0", listType( structType( mkField("Col1", intType()), mkField("Col2", floatType()), mkField("Col3", stringType()), ), ), }, }, []*proto3.Value{{Kind: &proto3.Value_ListValue{}}}, }, &[]*struct { Col1 int64 Col2 float64 Col3 string }{}, errDecodeColumn(0, errNilListValue("STRUCT")), }, { // Field specifies ARRAY type, value is having a nil ListValue. &Row{ []*sppb.StructType_Field{ { "Col0", listType( structType( mkField("Col1", intType()), mkField("Col2", floatType()), mkField("Col3", stringType()), ), ), }, }, []*proto3.Value{{Kind: &proto3.Value_ListValue{}}}, }, &[]NullRow{}, errDecodeColumn(0, errNilListValue("STRUCT")), }, { // Field specifies ARRAY type, value is for BYTES type. &Row{ []*sppb.StructType_Field{ { "Col0", listType( structType( mkField("Col1", intType()), mkField("Col2", floatType()), mkField("Col3", stringType()), ), ), }, }, []*proto3.Value{bytesProto([]byte("value"))}, }, &[]*struct { Col1 int64 Col2 float64 Col3 string }{}, errDecodeColumn(0, errSrcVal(bytesProto([]byte("value")), "List")), }, { // Field specifies ARRAY type, value is for BYTES type. &Row{ []*sppb.StructType_Field{ { "Col0", listType( structType( mkField("Col1", intType()), mkField("Col2", floatType()), mkField("Col3", stringType()), ), ), }, }, []*proto3.Value{listProto(bytesProto([]byte("value")))}, }, &[]NullRow{}, errDecodeColumn(0, errNotStructElement(0, bytesProto([]byte("value")))), }, { // Field specifies ARRAY type, value is for ARRAY type. &Row{ []*sppb.StructType_Field{ { "Col0", listType( structType( mkField("Col1", intType()), mkField("Col2", floatType()), mkField("Col3", stringType()), ), ), }, }, []*proto3.Value{listProto(bytesProto([]byte("value")))}, }, &[]*struct { Col1 int64 Col2 float64 Col3 string }{}, errDecodeColumn(0, errDecodeArrayElement(0, bytesProto([]byte("value")), "STRUCT", errSrcVal(bytesProto([]byte("value")), "List"))), }, { // Field specifies ARRAY, but is having nil StructType. &Row{ []*sppb.StructType_Field{ { "Col0", listType( &sppb.Type{Code: sppb.TypeCode_STRUCT}, ), }, }, []*proto3.Value{listProto(listProto(intProto(1), floatProto(2.0), stringProto("3")))}, }, &[]*struct { Col1 int64 Col2 float64 Col3 string }{}, errDecodeColumn(0, errDecodeArrayElement(0, listProto(intProto(1), floatProto(2.0), stringProto("3")), "STRUCT", errNilSpannerStructType())), }, { // Field specifies ARRAY, but the second struct value is for BOOL type instead of FLOAT64. &Row{ []*sppb.StructType_Field{ { "Col0", listType( structType( mkField("Col1", intType()), mkField("Col2", floatType()), mkField("Col3", stringType()), ), ), }, }, []*proto3.Value{listProto(listProto(intProto(1), boolProto(true), stringProto("3")))}, }, &[]*struct { Col1 int64 Col2 float64 Col3 string }{}, errDecodeColumn( 0, errDecodeArrayElement( 0, listProto(intProto(1), boolProto(true), stringProto("3")), "STRUCT", errDecodeStructField( &sppb.StructType{ Fields: []*sppb.StructType_Field{ mkField("Col1", intType()), mkField("Col2", floatType()), mkField("Col3", stringType()), }, }, "Col2", errSrcVal(boolProto(true), "Number"), ), ), ), }, } { if gotErr := test.row.Column(0, test.dst); !reflect.DeepEqual(gotErr, test.wantErr) { t.Errorf("%v: test.row.Column(0) got error %v, want %v", i, gotErr, test.wantErr) } if gotErr := test.row.ColumnByName("Col0", test.dst); !reflect.DeepEqual(gotErr, test.wantErr) { t.Errorf("%v: test.row.ColumnByName(%q) got error %v, want %v", i, "Col0", gotErr, test.wantErr) } if gotErr := test.row.Columns(test.dst); !reflect.DeepEqual(gotErr, test.wantErr) { t.Errorf("%v: test.row.Columns(%T) got error %v, want %v", i, test.dst, gotErr, test.wantErr) } } } // Test Row.ToStruct(). func TestToStruct(t *testing.T) { s := []struct { // STRING / STRING ARRAY PrimaryKey string `spanner:"STRING"` NullString NullString `spanner:"NULL_STRING"` StringArray []NullString `spanner:"STRING_ARRAY"` NullStringArray []NullString `spanner:"NULL_STRING_ARRAY"` // BYTES / BYTES ARRAY Bytes []byte `spanner:"BYTES"` NullBytes []byte `spanner:"NULL_BYTES"` BytesArray [][]byte `spanner:"BYTES_ARRAY"` NullBytesArray [][]byte `spanner:"NULL_BYTES_ARRAY"` // INT64 / INT64 ARRAY Int64 int64 `spanner:"INT64"` NullInt64 NullInt64 `spanner:"NULL_INT64"` Int64Array []NullInt64 `spanner:"INT64_ARRAY"` NullInt64Array []NullInt64 `spanner:"NULL_INT64_ARRAY"` // BOOL / BOOL ARRAY Bool bool `spanner:"BOOL"` NullBool NullBool `spanner:"NULL_BOOL"` BoolArray []NullBool `spanner:"BOOL_ARRAY"` NullBoolArray []NullBool `spanner:"NULL_BOOL_ARRAY"` // FLOAT64 / FLOAT64 ARRAY Float64 float64 `spanner:"FLOAT64"` NullFloat64 NullFloat64 `spanner:"NULL_FLOAT64"` Float64Array []NullFloat64 `spanner:"FLOAT64_ARRAY"` NullFloat64Array []NullFloat64 `spanner:"NULL_FLOAT64_ARRAY"` // TIMESTAMP / TIMESTAMP ARRAY Timestamp time.Time `spanner:"TIMESTAMP"` NullTimestamp NullTime `spanner:"NULL_TIMESTAMP"` TimestampArray []NullTime `spanner:"TIMESTAMP_ARRAY"` NullTimestampArray []NullTime `spanner:"NULL_TIMESTAMP_ARRAY"` // DATE / DATE ARRAY Date civil.Date `spanner:"DATE"` NullDate NullDate `spanner:"NULL_DATE"` DateArray []NullDate `spanner:"DATE_ARRAY"` NullDateArray []NullDate `spanner:"NULL_DATE_ARRAY"` // STRUCT ARRAY StructArray []*struct { Col1 int64 Col2 float64 Col3 string } `spanner:"STRUCT_ARRAY"` NullStructArray []*struct { Col1 int64 Col2 float64 Col3 string } `spanner:"NULL_STRUCT_ARRAY"` }{ {}, // got { // STRING / STRING ARRAY "value", NullString{}, []NullString{{"value1", true}, {}, {"value3", true}}, []NullString(nil), // BYTES / BYTES ARRAY []byte("value"), []byte(nil), [][]byte{[]byte("value1"), nil, []byte("value3")}, [][]byte(nil), // INT64 / INT64 ARRAY int64(17), NullInt64{}, []NullInt64{{int64(1), true}, {int64(2), true}, {}}, []NullInt64(nil), // BOOL / BOOL ARRAY true, NullBool{}, []NullBool{{}, {true, true}, {false, true}}, []NullBool(nil), // FLOAT64 / FLOAT64 ARRAY 1.7, NullFloat64{}, []NullFloat64{{}, {}, {1.7, true}}, []NullFloat64(nil), // TIMESTAMP / TIMESTAMP ARRAY tm, NullTime{}, []NullTime{{}, {tm, true}}, []NullTime(nil), // DATE / DATE ARRAY dt, NullDate{}, []NullDate{{}, {dt, true}}, []NullDate(nil), // STRUCT ARRAY []*struct { Col1 int64 Col2 float64 Col3 string }{ nil, &struct { Col1 int64 Col2 float64 Col3 string }{3, 33.3, "three"}, nil, }, []*struct { Col1 int64 Col2 float64 Col3 string }(nil), }, // want } err := row.ToStruct(&s[0]) if err != nil { t.Errorf("row.ToStruct() returns error: %v, want nil", err) } if !reflect.DeepEqual(s[0], s[1]) { t.Errorf("row.ToStruct() fetches struct %v, want %v", s[0], s[1]) } } // Test helpers for getting column names. func TestColumnNameAndIndex(t *testing.T) { // Test Row.Size(). if rs := row.Size(); rs != len(row.fields) { t.Errorf("row.Size() returns %v, want %v", rs, len(row.fields)) } // Test Row.Size() on empty Row. if rs := (&Row{}).Size(); rs != 0 { t.Errorf("empty_row.Size() returns %v, want %v", rs, 0) } // Test Row.ColumnName() for i, col := range row.fields { if cn := row.ColumnName(i); cn != col.Name { t.Errorf("row.ColumnName(%v) returns %q, want %q", i, cn, col.Name) } goti, err := row.ColumnIndex(col.Name) if err != nil { t.Errorf("ColumnIndex(%q) error %v", col.Name, err) continue } if goti != i { t.Errorf("ColumnIndex(%q) = %d, want %d", col.Name, goti, i) } } // Test Row.ColumnName on empty Row. if cn := (&Row{}).ColumnName(0); cn != "" { t.Errorf("empty_row.ColumnName(%v) returns %q, want %q", 0, cn, "") } // Test Row.ColumnIndex on empty Row. if _, err := (&Row{}).ColumnIndex(""); err == nil { t.Error("empty_row.ColumnIndex returns nil, want error") } } func TestNewRow(t *testing.T) { for _, test := range []struct { names []string values []interface{} want *Row wantErr error }{ { want: &Row{fields: []*sppb.StructType_Field{}, vals: []*proto3.Value{}}, }, { names: []string{}, values: []interface{}{}, want: &Row{fields: []*sppb.StructType_Field{}, vals: []*proto3.Value{}}, }, { names: []string{"a", "b"}, values: []interface{}{}, want: nil, wantErr: errNamesValuesMismatch([]string{"a", "b"}, []interface{}{}), }, { names: []string{"a", "b", "c"}, values: []interface{}{5, "abc", GenericColumnValue{listType(intType()), listProto(intProto(91), nullProto(), intProto(87))}}, want: &Row{ []*sppb.StructType_Field{ {"a", intType()}, {"b", stringType()}, {"c", listType(intType())}, }, []*proto3.Value{ intProto(5), stringProto("abc"), listProto(intProto(91), nullProto(), intProto(87)), }, }, }, } { got, err := NewRow(test.names, test.values) if !reflect.DeepEqual(err, test.wantErr) { t.Errorf("NewRow(%v,%v).err = %s, want %s", test.names, test.values, err, test.wantErr) continue } if !reflect.DeepEqual(got, test.want) { t.Errorf("NewRow(%v,%v) = %s, want %s", test.names, test.values, got, test.want) continue } } } golang-google-cloud-0.9.0/spanner/session.go000066400000000000000000000736421312234511600210370ustar00rootroot00000000000000/* Copyright 2017 Google Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package spanner import ( "container/heap" "container/list" "fmt" "math/rand" "strings" "sync" "time" log "github.com/golang/glog" "golang.org/x/net/context" sppb "google.golang.org/genproto/googleapis/spanner/v1" "google.golang.org/grpc/codes" "google.golang.org/grpc/metadata" ) // sessionHandle is an interface for transactions to access Cloud Spanner sessions safely. It is generated by sessionPool.take(). type sessionHandle struct { // mu guarantees that inner session object is returned / destroyed only once. mu sync.Mutex // session is a pointer to a session object. Transactions never need to access it directly. session *session } // recycle gives the inner session object back to its home session pool. It is safe to call recycle multiple times but only the first one would take effect. func (sh *sessionHandle) recycle() { sh.mu.Lock() defer sh.mu.Unlock() if sh.session == nil { // sessionHandle has already been recycled. return } sh.session.recycle() sh.session = nil } // getID gets the Cloud Spanner session ID from the internal session object. getID returns empty string if the sessionHandle is nil or the inner session // object has been released by recycle / destroy. func (sh *sessionHandle) getID() string { sh.mu.Lock() defer sh.mu.Unlock() if sh.session == nil { // sessionHandle has already been recycled/destroyed. return "" } return sh.session.getID() } // getClient gets the Cloud Spanner RPC client associated with the session ID in sessionHandle. func (sh *sessionHandle) getClient() sppb.SpannerClient { sh.mu.Lock() defer sh.mu.Unlock() if sh.session == nil { return nil } return sh.session.client } // getMetadata returns the metadata associated with the session in sessionHandle. func (sh *sessionHandle) getMetadata() metadata.MD { sh.mu.Lock() defer sh.mu.Unlock() if sh.session == nil { return nil } return sh.session.md } // getTransactionID returns the transaction id in the session if available. func (sh *sessionHandle) getTransactionID() transactionID { sh.mu.Lock() defer sh.mu.Unlock() if sh.session == nil { return nil } return sh.session.tx } // destroy destroys the inner session object. It is safe to call destroy multiple times and only the first call would attempt to // destroy the inner session object. func (sh *sessionHandle) destroy() { sh.mu.Lock() s := sh.session sh.session = nil sh.mu.Unlock() if s == nil { // sessionHandle has already been destroyed. return } s.destroy(false) } // session wraps a Cloud Spanner session ID through which transactions are created and executed. type session struct { // client is the RPC channel to Cloud Spanner. It is set only once during session's creation. client sppb.SpannerClient // id is the unique id of the session in Cloud Spanner. It is set only once during session's creation. id string // pool is the session's home session pool where it was created. It is set only once during session's creation. pool *sessionPool // createTime is the timestamp of the session's creation. It is set only once during session's creation. createTime time.Time // mu protects the following fields from concurrent access: both healthcheck workers and transactions can modify them. mu sync.Mutex // valid marks the validity of a session. valid bool // hcIndex is the index of the session inside the global healthcheck queue. If hcIndex < 0, session has been unregistered from the queue. hcIndex int // idleList is the linkedlist node which links the session to its home session pool's idle list. If idleList == nil, the // session is not in idle list. idleList *list.Element // nextCheck is the timestamp of next scheduled healthcheck of the session. It is maintained by the global health checker. nextCheck time.Time // checkingHelath is true if currently this session is being processed by health checker. Must be modified under health checker lock. checkingHealth bool // md is the Metadata to be sent with each request. md metadata.MD // tx contains the transaction id if the session has been prepared for write. tx transactionID } // isValid returns true if the session is still valid for use. func (s *session) isValid() bool { s.mu.Lock() defer s.mu.Unlock() return s.valid } // isWritePrepared returns true if the session is prepared for write. func (s *session) isWritePrepared() bool { s.mu.Lock() defer s.mu.Unlock() return s.tx != nil } // String implements fmt.Stringer for session. func (s *session) String() string { s.mu.Lock() defer s.mu.Unlock() return fmt.Sprintf("", s.id, s.hcIndex, s.idleList, s.valid, s.createTime, s.nextCheck) } // ping verifies if the session is still alive in Cloud Spanner. func (s *session) ping() error { ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() return runRetryable(ctx, func(ctx context.Context) error { _, err := s.client.GetSession(contextWithOutgoingMetadata(ctx, s.pool.md), &sppb.GetSessionRequest{Name: s.getID()}) // s.getID is safe even when s is invalid. return err }) } // refreshIdle refreshes the session's session ID if it is in its home session pool's idle list // and returns true if successful. func (s *session) refreshIdle() bool { s.mu.Lock() validAndIdle := s.valid && s.idleList != nil s.mu.Unlock() if !validAndIdle { // Optimization: return early if s is not valid or if s is not in idle list. return false } ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() var sid string err := runRetryable(ctx, func(ctx context.Context) error { session, e := s.client.CreateSession(contextWithOutgoingMetadata(ctx, s.pool.md), &sppb.CreateSessionRequest{Database: s.pool.db}) if e != nil { return e } sid = session.Name return nil }) if err != nil { return false } s.pool.mu.Lock() s.mu.Lock() var recycle bool if s.valid && s.idleList != nil { // session is in idle list, refresh its session id. sid, s.id = s.id, sid if s.tx != nil { s.tx = nil s.pool.idleWriteList.Remove(s.idleList) // We need to put this session back into the pool. recycle = true } } s.mu.Unlock() s.pool.mu.Unlock() if recycle { s.pool.recycle(s) } // If we fail to explicitly destroy the session, it will be eventually garbage collected by // Cloud Spanner. if err = runRetryable(ctx, func(ctx context.Context) error { _, e := s.client.DeleteSession(contextWithOutgoingMetadata(ctx, s.pool.md), &sppb.DeleteSessionRequest{Name: sid}) return e }); err != nil && log.V(2) { log.Warningf("Failed to delete session %v. Error: %v", sid, err) } return true } // setHcIndex atomically sets the session's index in the healthcheck queue and returns the old index. func (s *session) setHcIndex(i int) int { s.mu.Lock() defer s.mu.Unlock() oi := s.hcIndex s.hcIndex = i return oi } // setIdleList atomically sets the session's idle list link and returns the old link. func (s *session) setIdleList(le *list.Element) *list.Element { s.mu.Lock() defer s.mu.Unlock() old := s.idleList s.idleList = le return old } // invalidate marks a session as invalid and returns the old validity. func (s *session) invalidate() bool { s.mu.Lock() defer s.mu.Unlock() ov := s.valid s.valid = false return ov } // setNextCheck sets the timestamp for next healthcheck on the session. func (s *session) setNextCheck(t time.Time) { s.mu.Lock() defer s.mu.Unlock() s.nextCheck = t } // setTransactionID sets the transaction id in the session func (s *session) setTransactionID(tx transactionID) { s.mu.Lock() defer s.mu.Unlock() s.tx = tx } // getID returns the session ID which uniquely identifies the session in Cloud Spanner. func (s *session) getID() string { s.mu.Lock() defer s.mu.Unlock() return s.id } // getHcIndex returns the session's index into the global healthcheck priority queue. func (s *session) getHcIndex() int { s.mu.Lock() defer s.mu.Unlock() return s.hcIndex } // getIdleList returns the session's link in its home session pool's idle list. func (s *session) getIdleList() *list.Element { s.mu.Lock() defer s.mu.Unlock() return s.idleList } // getNextCheck returns the timestamp for next healthcheck on the session. func (s *session) getNextCheck() time.Time { s.mu.Lock() defer s.mu.Unlock() return s.nextCheck } // recycle turns the session back to its home session pool. func (s *session) recycle() { s.setTransactionID(nil) if !s.pool.recycle(s) { // s is rejected by its home session pool because it expired and the session pool is currently having enough number of open sessions. s.destroy(false) } } // destroy removes the session from its home session pool, healthcheck queue and Cloud Spanner service. func (s *session) destroy(isExpire bool) bool { // Remove s from session pool. if !s.pool.remove(s, isExpire) { return false } // Unregister s from healthcheck queue. s.pool.hc.unregister(s) // Remove s from Cloud Spanner service. ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() // Ignore the error returned by runRetryable because even if we fail to explicitly destroy the session, // it will be eventually garbage collected by Cloud Spanner. err := runRetryable(ctx, func(ctx context.Context) error { _, e := s.client.DeleteSession(ctx, &sppb.DeleteSessionRequest{Name: s.getID()}) return e }) if err != nil && log.V(2) { log.Warningf("Failed to delete session %v. Error: %v", s.getID(), err) } return true } // prepareForWrite prepares the session for write if it is not already in that state. func (s *session) prepareForWrite(ctx context.Context) error { if s.isWritePrepared() { return nil } tx, err := beginTransaction(ctx, s.getID(), s.client) if err != nil { return err } s.setTransactionID(tx) return nil } // SessionPoolConfig stores configurations of a session pool. type SessionPoolConfig struct { // getRPCClient is the caller supplied method for getting a gRPC client to Cloud Spanner, this makes session pool able to use client pooling. getRPCClient func() (sppb.SpannerClient, error) // MaxOpened is the maximum number of opened sessions that is allowed by the // session pool. Default to NumChannels * 100. MaxOpened uint64 // MinOpened is the minimum number of opened sessions that the session pool // tries to maintain. Session pool won't continue to expire sessions if number // of opened connections drops below MinOpened. However, if session is found // to be broken, it will still be evicted from session pool, therefore it is // posssible that the number of opened sessions drops below MinOpened. MinOpened uint64 // MaxSessionAge is the maximum duration that a session can be reused, zero // means session pool will never expire sessions. MaxSessionAge time.Duration // MaxBurst is the maximum number of concurrent session creation requests. Defaults to 10. MaxBurst uint64 // WriteSessions is the fraction of sessions we try to keep prepared for write. WriteSessions float64 // HealthCheckWorkers is number of workers used by health checker for this pool. HealthCheckWorkers int // HealthCheckInterval is how often the health checker pings a session. HealthCheckInterval time.Duration } // errNoRPCGetter returns error for SessionPoolConfig missing getRPCClient method. func errNoRPCGetter() error { return spannerErrorf(codes.InvalidArgument, "require SessionPoolConfig.getRPCClient != nil, got nil") } // errMinOpenedGTMapOpened returns error for SessionPoolConfig.MaxOpened < SessionPoolConfig.MinOpened when SessionPoolConfig.MaxOpened is set. func errMinOpenedGTMaxOpened(spc *SessionPoolConfig) error { return spannerErrorf(codes.InvalidArgument, "require SessionPoolConfig.MaxOpened >= SessionPoolConfig.MinOpened, got %v and %v", spc.MaxOpened, spc.MinOpened) } // validate verifies that the SessionPoolConfig is good for use. func (spc *SessionPoolConfig) validate() error { if spc.getRPCClient == nil { return errNoRPCGetter() } if spc.MinOpened > spc.MaxOpened && spc.MaxOpened > 0 { return errMinOpenedGTMaxOpened(spc) } return nil } // sessionPool creates and caches Cloud Spanner sessions. type sessionPool struct { // mu protects sessionPool from concurrent access. mu sync.Mutex // valid marks the validity of the session pool. valid bool // db is the database name that all sessions in the pool are associated with. db string // idleList caches idle session IDs. Session IDs in this list can be allocated for use. idleList list.List // idleWriteList caches idle sessions which have been prepared for write. idleWriteList list.List // mayGetSession is for broadcasting that session retrival/creation may proceed. mayGetSession chan struct{} // numOpened is the total number of open sessions from the session pool. numOpened uint64 // createReqs is the number of ongoing session creation requests. createReqs uint64 // prepareReqs is the number of ongoing session preparation request. prepareReqs uint64 // configuration of the session pool. SessionPoolConfig // Metadata to be sent with each request md metadata.MD // hc is the health checker hc *healthChecker } // newSessionPool creates a new session pool. func newSessionPool(db string, config SessionPoolConfig, md metadata.MD) (*sessionPool, error) { if err := config.validate(); err != nil { return nil, err } pool := &sessionPool{ db: db, valid: true, mayGetSession: make(chan struct{}), SessionPoolConfig: config, md: md, } if config.HealthCheckWorkers == 0 { // With 10 workers and assuming average latency of 5 ms for BeginTransaction, we will be able to // prepare 2000 tx/sec in advance. If the rate of takeWriteSession is more than that, it will // degrade to doing BeginTransaction inline. // TODO: consider resizing the worker pool dynamically according to the load. config.HealthCheckWorkers = 10 } if config.HealthCheckInterval == 0 { config.HealthCheckInterval = 5 * time.Minute } // On GCE VM, within the same region an healthcheck ping takes on average 10ms to finish, given a 5 minutes interval and // 10 healthcheck workers, a healthChecker can effectively mantain 100 checks_per_worker/sec * 10 workers * 300 seconds = 300K sessions. pool.hc = newHealthChecker(config.HealthCheckInterval, config.HealthCheckWorkers, pool) return pool, nil } // isValid checks if the session pool is still valid. func (p *sessionPool) isValid() bool { if p == nil { return false } p.mu.Lock() defer p.mu.Unlock() return p.valid } // close marks the session pool as closed. func (p *sessionPool) close() { if p == nil { return } p.mu.Lock() if !p.valid { p.mu.Unlock() return } p.valid = false p.mu.Unlock() p.hc.close() // destroy all the sessions p.hc.mu.Lock() allSessions := make([]*session, len(p.hc.queue.sessions)) copy(allSessions, p.hc.queue.sessions) p.hc.mu.Unlock() for _, s := range allSessions { s.destroy(false) } } // errInvalidSessionPool returns error for using an invalid session pool. func errInvalidSessionPool() error { return spannerErrorf(codes.InvalidArgument, "invalid session pool") } // errGetSessionTimeout returns error for context timeout during sessionPool.take(). func errGetSessionTimeout() error { return spannerErrorf(codes.Canceled, "timeout / context canceled during getting session") } // shouldPrepareWrite returns true if we should prepare more sessions for write. func (p *sessionPool) shouldPrepareWrite() bool { return float64(p.numOpened)*p.WriteSessions > float64(p.idleWriteList.Len()+int(p.prepareReqs)) } func (p *sessionPool) createSession(ctx context.Context) (*session, error) { doneCreate := func(done bool) { p.mu.Lock() if !done { // Session creation failed, give budget back. p.numOpened-- } p.createReqs-- // Notify other waiters blocking on session creation. close(p.mayGetSession) p.mayGetSession = make(chan struct{}) p.mu.Unlock() } sc, err := p.getRPCClient() if err != nil { doneCreate(false) return nil, err } var s *session err = runRetryable(ctx, func(ctx context.Context) error { sid, e := sc.CreateSession(ctx, &sppb.CreateSessionRequest{Database: p.db}) if e != nil { return e } // If no error, construct the new session. s = &session{valid: true, client: sc, id: sid.Name, pool: p, createTime: time.Now(), md: p.md} p.hc.register(s) return nil }) if err != nil { doneCreate(false) // Should return error directly because of the previous retries on CreateSession RPC. return nil, err } doneCreate(true) return s, nil } func (p *sessionPool) isHealthy(s *session) bool { if s.getNextCheck().Add(2 * p.hc.getInterval()).Before(time.Now()) { // TODO: figure out if we need to schedule a new healthcheck worker here. if err := s.ping(); shouldDropSession(err) { // The session is already bad, continue to fetch/create a new one. s.destroy(false) return false } p.hc.scheduledHC(s) } return true } // take returns a cached session if there are available ones; if there isn't any, it tries to allocate a new one. // Session returned by take should be used for read operations. func (p *sessionPool) take(ctx context.Context) (*sessionHandle, error) { ctx = contextWithOutgoingMetadata(ctx, p.md) for { var ( s *session err error ) p.mu.Lock() if !p.valid { p.mu.Unlock() return nil, errInvalidSessionPool() } if p.idleList.Len() > 0 { // Idle sessions are available, get one from the top of the idle list. s = p.idleList.Remove(p.idleList.Front()).(*session) } else if p.idleWriteList.Len() > 0 { s = p.idleWriteList.Remove(p.idleWriteList.Front()).(*session) } if s != nil { s.setIdleList(nil) p.mu.Unlock() // From here, session is no longer in idle list, so healthcheck workers won't destroy it. // If healthcheck workers failed to schedule healthcheck for the session timely, do the check here. // Because session check is still much cheaper than session creation, they should be reused as much as possible. if !p.isHealthy(s) { continue } return &sessionHandle{session: s}, nil } // Idle list is empty, block if session pool has reached max session creation concurrency or max number of open sessions. if (p.MaxOpened > 0 && p.numOpened >= p.MaxOpened) || (p.MaxBurst > 0 && p.createReqs >= p.MaxBurst) { mayGetSession := p.mayGetSession p.mu.Unlock() select { case <-ctx.Done(): return nil, errGetSessionTimeout() case <-mayGetSession: } continue } // Take budget before the actual session creation. p.numOpened++ p.createReqs++ p.mu.Unlock() if s, err = p.createSession(ctx); err != nil { return nil, toSpannerError(err) } return &sessionHandle{session: s}, nil } } // takeWriteSession returns a write prepared cached session if there are available ones; if there isn't any, it tries to allocate a new one. // Session returned should be used for read write transactions. func (p *sessionPool) takeWriteSession(ctx context.Context) (*sessionHandle, error) { ctx = contextWithOutgoingMetadata(ctx, p.md) for { var ( s *session err error ) p.mu.Lock() if !p.valid { p.mu.Unlock() return nil, errInvalidSessionPool() } if p.idleWriteList.Len() > 0 { // Idle sessions are available, get one from the top of the idle list. s = p.idleWriteList.Remove(p.idleWriteList.Front()).(*session) } else if p.idleList.Len() > 0 { s = p.idleList.Remove(p.idleList.Front()).(*session) } if s != nil { s.setIdleList(nil) p.mu.Unlock() // From here, session is no longer in idle list, so healthcheck workers won't destroy it. // If healthcheck workers failed to schedule healthcheck for the session timely, do the check here. // Because session check is still much cheaper than session creation, they should be reused as much as possible. if !p.isHealthy(s) { continue } if !s.isWritePrepared() { if err = s.prepareForWrite(ctx); err != nil { return nil, toSpannerError(err) } } return &sessionHandle{session: s}, nil } // Idle list is empty, block if session pool has reached max session creation concurrency or max number of open sessions. if (p.MaxOpened > 0 && p.numOpened >= p.MaxOpened) || (p.MaxBurst > 0 && p.createReqs >= p.MaxBurst) { mayGetSession := p.mayGetSession p.mu.Unlock() select { case <-ctx.Done(): return nil, errGetSessionTimeout() case <-mayGetSession: } continue } // Take budget before the actual session creation. p.numOpened++ p.createReqs++ p.mu.Unlock() if s, err = p.createSession(ctx); err != nil { return nil, toSpannerError(err) } if err = s.prepareForWrite(ctx); err != nil { return nil, toSpannerError(err) } return &sessionHandle{session: s}, nil } } // recycle puts session s back to the session pool's idle list, it returns true if the session pool successfully recycles session s. func (p *sessionPool) recycle(s *session) bool { p.mu.Lock() defer p.mu.Unlock() if !s.isValid() || !p.valid { // Reject the session if session is invalid or pool itself is invalid. return false } if p.MaxSessionAge != 0 && s.createTime.Add(p.MaxSessionAge).Before(time.Now()) && p.numOpened > p.MinOpened { // session expires and number of opened sessions exceeds MinOpened, let the session destroy itself. return false } // Hot sessions will be converging at the front of the list, cold sessions will be evicted by healthcheck workers. if s.isWritePrepared() { s.setIdleList(p.idleWriteList.PushFront(s)) } else { s.setIdleList(p.idleList.PushFront(s)) } // Broadcast that a session has been returned to idle list. close(p.mayGetSession) p.mayGetSession = make(chan struct{}) return true } // remove atomically removes session s from the session pool and invalidates s. // If isExpire == true, the removal is triggered by session expiration and in such cases, only idle sessions can be removed. func (p *sessionPool) remove(s *session, isExpire bool) bool { p.mu.Lock() defer p.mu.Unlock() if isExpire && (p.numOpened <= p.MinOpened || s.getIdleList() == nil) { // Don't expire session if the session is not in idle list (in use), or if number of open sessions is going below p.MinOpened. return false } ol := s.setIdleList(nil) // If the session is in the idlelist, remove it. if ol != nil { // Remove from whichever list it is in. p.idleList.Remove(ol) p.idleWriteList.Remove(ol) } if s.invalidate() { // Decrease the number of opened sessions. p.numOpened-- // Broadcast that a session has been destroyed. close(p.mayGetSession) p.mayGetSession = make(chan struct{}) return true } return false } // hcHeap implements heap.Interface. It is used to create the priority queue for session healthchecks. type hcHeap struct { sessions []*session } // Len impelemnts heap.Interface.Len. func (h hcHeap) Len() int { return len(h.sessions) } // Less implements heap.Interface.Less. func (h hcHeap) Less(i, j int) bool { return h.sessions[i].getNextCheck().Before(h.sessions[j].getNextCheck()) } // Swap implements heap.Interface.Swap. func (h hcHeap) Swap(i, j int) { h.sessions[i], h.sessions[j] = h.sessions[j], h.sessions[i] h.sessions[i].setHcIndex(i) h.sessions[j].setHcIndex(j) } // Push implements heap.Interface.Push. func (h *hcHeap) Push(s interface{}) { ns := s.(*session) ns.setHcIndex(len(h.sessions)) h.sessions = append(h.sessions, ns) } // Pop implements heap.Interface.Pop. func (h *hcHeap) Pop() interface{} { old := h.sessions n := len(old) s := old[n-1] h.sessions = old[:n-1] s.setHcIndex(-1) return s } // healthChecker performs periodical healthchecks on registered sessions. type healthChecker struct { // mu protects concurrent access to hcQueue. mu sync.Mutex // queue is the priority queue for session healthchecks. Sessions with lower nextCheck rank higher in the queue. queue hcHeap // interval is the average interval between two healthchecks on a session. interval time.Duration // workers is the number of concurrent healthcheck workers. workers int // waitWorkers waits for all healthcheck workers to exit waitWorkers sync.WaitGroup // pool is the underlying session pool. pool *sessionPool // closed marks if a healthChecker has been closed. closed bool } // newHealthChecker initializes new instance of healthChecker. func newHealthChecker(interval time.Duration, workers int, pool *sessionPool) *healthChecker { if workers <= 0 { workers = 1 } hc := &healthChecker{ interval: interval, workers: workers, pool: pool, } for i := 0; i < hc.workers; i++ { hc.waitWorkers.Add(1) go hc.worker(i) } return hc } // close closes the healthChecker and waits for all healthcheck workers to exit. func (hc *healthChecker) close() { hc.mu.Lock() hc.closed = true hc.mu.Unlock() hc.waitWorkers.Wait() } // isClosing checks if a healthChecker is already closing. func (hc *healthChecker) isClosing() bool { hc.mu.Lock() defer hc.mu.Unlock() return hc.closed } // getInterval gets the healthcheck interval. func (hc *healthChecker) getInterval() time.Duration { hc.mu.Lock() defer hc.mu.Unlock() return hc.interval } // scheduledHCLocked schedules next healthcheck on session s with the assumption that hc.mu is being held. func (hc *healthChecker) scheduledHCLocked(s *session) { // The next healthcheck will be scheduled after [interval*0.5, interval*1.5) nanoseconds. nsFromNow := rand.Int63n(int64(hc.interval)) + int64(hc.interval)/2 s.setNextCheck(time.Now().Add(time.Duration(nsFromNow))) if hi := s.getHcIndex(); hi != -1 { // Session is still being tracked by healthcheck workers. heap.Fix(&hc.queue, hi) } } // scheduledHC schedules next healthcheck on session s. It is safe to be called concurrently. func (hc *healthChecker) scheduledHC(s *session) { hc.mu.Lock() defer hc.mu.Unlock() hc.scheduledHCLocked(s) } // register registers a session with healthChecker for periodical healthcheck. func (hc *healthChecker) register(s *session) { hc.mu.Lock() defer hc.mu.Unlock() hc.scheduledHCLocked(s) heap.Push(&hc.queue, s) } // unregister unregisters a session from healthcheck queue. func (hc *healthChecker) unregister(s *session) { hc.mu.Lock() defer hc.mu.Unlock() oi := s.setHcIndex(-1) if oi >= 0 { heap.Remove(&hc.queue, oi) } } // markDone marks that health check for session has been performed. func (hc *healthChecker) markDone(s *session) { hc.mu.Lock() defer hc.mu.Unlock() s.checkingHealth = false } // healthCheck checks the health of the session and pings it if needed. func (hc *healthChecker) healthCheck(s *session) { defer hc.markDone(s) if !s.pool.isValid() { // Session pool is closed, perform a garbage collection. s.destroy(false) return } if s.pool.MaxSessionAge != 0 && s.createTime.Add(s.pool.MaxSessionAge).Before(time.Now()) { // Session reaches its maximum age, retire it. Failing that try to refresh it. if s.destroy(true) || !s.refreshIdle() { return } } if err := s.ping(); shouldDropSession(err) { // Ping failed, destroy the session. s.destroy(false) } } // worker performs the healthcheck on sessions in healthChecker's priority queue. func (hc *healthChecker) worker(i int) { if log.V(2) { log.Infof("Starting health check worker %v", i) } // Returns a session which we should ping to keep it alive. getNextForPing := func() *session { hc.pool.mu.Lock() defer hc.pool.mu.Unlock() hc.mu.Lock() defer hc.mu.Unlock() if hc.queue.Len() <= 0 { // Queue is empty. return nil } s := hc.queue.sessions[0] if s.getNextCheck().After(time.Now()) && hc.pool.valid { // All sessions have been checked recently. return nil } hc.scheduledHCLocked(s) if !s.checkingHealth { s.checkingHealth = true return s } return nil } // Returns a session which we should prepare for write. getNextForTx := func() *session { hc.pool.mu.Lock() defer hc.pool.mu.Unlock() if hc.pool.shouldPrepareWrite() { if hc.pool.idleList.Len() > 0 && hc.pool.valid { hc.mu.Lock() defer hc.mu.Unlock() if hc.pool.idleList.Front().Value.(*session).checkingHealth { return nil } session := hc.pool.idleList.Remove(hc.pool.idleList.Front()).(*session) session.checkingHealth = true hc.pool.prepareReqs++ return session } } return nil } for { if hc.isClosing() { if log.V(2) { log.Infof("Closing health check worker %v", i) } // Exit when the pool has been closed and all sessions have been destroyed // or when health checker has been closed. hc.waitWorkers.Done() return } ws := getNextForTx() if ws != nil { ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() ws.prepareForWrite(contextWithOutgoingMetadata(ctx, hc.pool.md)) hc.pool.recycle(ws) hc.pool.mu.Lock() hc.pool.prepareReqs-- hc.pool.mu.Unlock() hc.markDone(ws) } rs := getNextForPing() if rs == nil { if ws == nil { // No work to be done so sleep to avoid burning cpu pause := int64(100 * time.Millisecond) if pause > int64(hc.interval) { pause = int64(hc.interval) } <-time.After(time.Duration(rand.Int63n(pause) + pause/2)) } continue } hc.healthCheck(rs) } } // shouldDropSession returns true if a particular error leads to the removal of a session func shouldDropSession(err error) bool { if err == nil { return false } // If a Cloud Spanner can no longer locate the session (for example, if session is garbage collected), then caller // should not try to return the session back into the session pool. // TODO: once gRPC can return auxilary error information, stop parsing the error message. if ErrCode(err) == codes.NotFound && strings.Contains(ErrDesc(err), "Session not found:") { return true } return false } golang-google-cloud-0.9.0/spanner/session_test.go000066400000000000000000000652411312234511600220720ustar00rootroot00000000000000/* Copyright 2017 Google Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package spanner import ( "container/heap" "math/rand" "reflect" "sync" "testing" "time" "golang.org/x/net/context" "cloud.google.com/go/spanner/internal/testutil" sppb "google.golang.org/genproto/googleapis/spanner/v1" "google.golang.org/grpc" "google.golang.org/grpc/codes" ) // setup prepares test environment for regular session pool tests. func setup(t *testing.T, spc SessionPoolConfig) (sp *sessionPool, sc *testutil.MockCloudSpannerClient, cancel func()) { sc = testutil.NewMockCloudSpannerClient(t) spc.getRPCClient = func() (sppb.SpannerClient, error) { return sc, nil } spc.HealthCheckInterval = 50 * time.Millisecond sp, err := newSessionPool("mockdb", spc, nil) if err != nil { t.Fatalf("cannot create session pool: %v", err) } cancel = func() { sp.close() } return } // TestSessionCreation tests session creation during sessionPool.Take(). func TestSessionCreation(t *testing.T) { sp, sc, cancel := setup(t, SessionPoolConfig{}) defer cancel() // Take three sessions from session pool, this should trigger session pool to create three new sessions. shs := make([]*sessionHandle, 3) // gotDs holds the unique sessions taken from session pool. gotDs := map[string]bool{} for i := 0; i < len(shs); i++ { var err error shs[i], err = sp.take(context.Background()) if err != nil { t.Errorf("failed to get session(%v): %v", i, err) } gotDs[shs[i].getID()] = true } if len(gotDs) != len(shs) { t.Errorf("session pool created %v sessions, want %v", len(gotDs), len(shs)) } if wantDs := sc.DumpSessions(); !reflect.DeepEqual(gotDs, wantDs) { t.Errorf("session pool creates sessions %v, want %v", gotDs, wantDs) } // Verify that created sessions are recorded correctly in session pool. sp.mu.Lock() if int(sp.numOpened) != len(shs) { t.Errorf("session pool reports %v open sessions, want %v", sp.numOpened, len(shs)) } if sp.createReqs != 0 { t.Errorf("session pool reports %v session create requests, want 0", int(sp.createReqs)) } sp.mu.Unlock() // Verify that created sessions are tracked correctly by healthcheck queue. hc := sp.hc hc.mu.Lock() if hc.queue.Len() != len(shs) { t.Errorf("healthcheck queue length = %v, want %v", hc.queue.Len(), len(shs)) } for _, s := range hc.queue.sessions { if !gotDs[s.getID()] { t.Errorf("session %v is in healthcheck queue, but it is not created by session pool", s.getID()) } } hc.mu.Unlock() } // TestTakeFromIdleList tests taking sessions from session pool's idle list. func TestTakeFromIdleList(t *testing.T) { sp, sc, cancel := setup(t, SessionPoolConfig{}) defer cancel() // Take ten sessions from session pool and recycle them. shs := make([]*sessionHandle, 10) for i := 0; i < len(shs); i++ { var err error shs[i], err = sp.take(context.Background()) if err != nil { t.Errorf("failed to get session(%v): %v", i, err) } } for i := 0; i < len(shs); i++ { shs[i].recycle() } // Further session requests from session pool won't cause mockclient to create more sessions. wantSessions := sc.DumpSessions() // Take ten sessions from session pool again, this time all sessions should come from idle list. gotSessions := map[string]bool{} for i := 0; i < len(shs); i++ { sh, err := sp.take(context.Background()) if err != nil { t.Errorf("cannot take session from session pool: %v", err) } gotSessions[sh.getID()] = true } if len(gotSessions) != 10 { t.Errorf("got %v unique sessions, want 10", len(gotSessions)) } if !reflect.DeepEqual(gotSessions, wantSessions) { t.Errorf("got sessions: %v, want %v", gotSessions, wantSessions) } } // TesttakeWriteSessionFromIdleList tests taking write sessions from session pool's idle list. func TestTakeWriteSessionFromIdleList(t *testing.T) { sp, sc, cancel := setup(t, SessionPoolConfig{}) defer cancel() act := testutil.NewAction("Begin", nil) acts := make([]testutil.Action, 20) for i := 0; i < len(acts); i++ { acts[i] = act } sc.SetActions(acts...) // Take ten sessions from session pool and recycle them. shs := make([]*sessionHandle, 10) for i := 0; i < len(shs); i++ { var err error shs[i], err = sp.takeWriteSession(context.Background()) if err != nil { t.Errorf("failed to get session(%v): %v", i, err) } } for i := 0; i < len(shs); i++ { shs[i].recycle() } // Further session requests from session pool won't cause mockclient to create more sessions. wantSessions := sc.DumpSessions() // Take ten sessions from session pool again, this time all sessions should come from idle list. gotSessions := map[string]bool{} for i := 0; i < len(shs); i++ { sh, err := sp.takeWriteSession(context.Background()) if err != nil { t.Errorf("cannot take session from session pool: %v", err) } gotSessions[sh.getID()] = true } if len(gotSessions) != 10 { t.Errorf("got %v unique sessions, want 10", len(gotSessions)) } if !reflect.DeepEqual(gotSessions, wantSessions) { t.Errorf("got sessions: %v, want %v", gotSessions, wantSessions) } } // TestTakeFromIdleListChecked tests taking sessions from session pool's idle list, but with a extra ping check. func TestTakeFromIdleListChecked(t *testing.T) { if testing.Short() { t.SkipNow() } sp, sc, cancel := setup(t, SessionPoolConfig{}) defer cancel() // Stop healthcheck workers to simulate slow pings. sp.hc.close() // Create a session and recycle it. sh, err := sp.take(context.Background()) if err != nil { t.Errorf("failed to get session: %v", err) } wantSid := sh.getID() sh.recycle() <-time.After(time.Second) // Two back-to-back session requests, both of them should return the same session created before and // none of them should trigger a session ping. for i := 0; i < 2; i++ { // Take the session from the idle list and recycle it. sh, err = sp.take(context.Background()) if err != nil { t.Errorf("%v - failed to get session: %v", i, err) } if gotSid := sh.getID(); gotSid != wantSid { t.Errorf("%v - got session id: %v, want %v", i, gotSid, wantSid) } // The two back-to-back session requests shouldn't trigger any session pings because sessionPool.Take // reschedules the next healthcheck. if got, want := sc.DumpPings(), ([]string{wantSid}); !reflect.DeepEqual(got, want) { t.Errorf("%v - got ping session requests: %v, want %v", i, got, want) } sh.recycle() } // Inject session error to mockclient, and take the session from the session pool, the old session should be destroyed and // the session pool will create a new session. sc.InjectError("GetSession", grpc.Errorf(codes.NotFound, "Session not found:")) // Delay to trigger sessionPool.Take to ping the session. <-time.After(time.Second) sh, err = sp.take(context.Background()) if err != nil { t.Errorf("failed to get session: %v", err) } ds := sc.DumpSessions() if len(ds) != 1 { t.Errorf("dumped sessions from mockclient: %v, want %v", ds, sh.getID()) } if sh.getID() == wantSid { t.Errorf("sessionPool.Take still returns the same session %v, want it to create a new one", wantSid) } } // TestTakeFromIdleWriteListChecked tests taking sessions from session pool's idle list, but with a extra ping check. func TestTakeFromIdleWriteListChecked(t *testing.T) { if testing.Short() { t.SkipNow() } sp, sc, cancel := setup(t, SessionPoolConfig{}) defer cancel() sc.MakeNice() // Stop healthcheck workers to simulate slow pings. sp.hc.close() // Create a session and recycle it. sh, err := sp.takeWriteSession(context.Background()) if err != nil { t.Errorf("failed to get session: %v", err) } wantSid := sh.getID() sh.recycle() <-time.After(time.Second) // Two back-to-back session requests, both of them should return the same session created before and // none of them should trigger a session ping. for i := 0; i < 2; i++ { // Take the session from the idle list and recycle it. sh, err = sp.takeWriteSession(context.Background()) if err != nil { t.Errorf("%v - failed to get session: %v", i, err) } if gotSid := sh.getID(); gotSid != wantSid { t.Errorf("%v - got session id: %v, want %v", i, gotSid, wantSid) } // The two back-to-back session requests shouldn't trigger any session pings because sessionPool.Take // reschedules the next healthcheck. if got, want := sc.DumpPings(), ([]string{wantSid}); !reflect.DeepEqual(got, want) { t.Errorf("%v - got ping session requests: %v, want %v", i, got, want) } sh.recycle() } // Inject session error to mockclient, and take the session from the session pool, the old session should be destroyed and // the session pool will create a new session. sc.InjectError("GetSession", grpc.Errorf(codes.NotFound, "Session not found:")) // Delay to trigger sessionPool.Take to ping the session. <-time.After(time.Second) sh, err = sp.takeWriteSession(context.Background()) if err != nil { t.Errorf("failed to get session: %v", err) } ds := sc.DumpSessions() if len(ds) != 1 { t.Errorf("dumped sessions from mockclient: %v, want %v", ds, sh.getID()) } if sh.getID() == wantSid { t.Errorf("sessionPool.Take still returns the same session %v, want it to create a new one", wantSid) } } // TestMaxOpenedSessions tests max open sessions constraint. func TestMaxOpenedSessions(t *testing.T) { if testing.Short() { t.SkipNow() } sp, _, cancel := setup(t, SessionPoolConfig{MaxOpened: 1}) defer cancel() sh1, err := sp.take(context.Background()) if err != nil { t.Errorf("cannot take session from session pool: %v", err) } ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() // Session request will timeout due to the max open sessions constraint. sh2, gotErr := sp.take(ctx) if wantErr := errGetSessionTimeout(); !reflect.DeepEqual(gotErr, wantErr) { t.Errorf("the second session retrival returns error %v, want %v", gotErr, wantErr) } go func() { <-time.After(time.Second) // destroy the first session to allow the next session request to proceed. sh1.destroy() }() // Now session request can be processed because the first session will be destroyed. sh2, err = sp.take(context.Background()) if err != nil { t.Errorf("after the first session is destroyed, session retrival still returns error %v, want nil", err) } if !sh2.session.isValid() || sh2.getID() == "" { t.Errorf("got invalid session: %v", sh2.session) } } // TestMinOpenedSessions tests min open session constraint. func TestMinOpenedSessions(t *testing.T) { sp, _, cancel := setup(t, SessionPoolConfig{MinOpened: 1}) defer cancel() // Take ten sessions from session pool and recycle them. var ss []*session var shs []*sessionHandle for i := 0; i < 10; i++ { sh, err := sp.take(context.Background()) if err != nil { t.Errorf("failed to get session(%v): %v", i, err) } ss = append(ss, sh.session) shs = append(shs, sh) sh.recycle() } for _, sh := range shs { sh.recycle() } // Simulate session expiration. for _, s := range ss { s.destroy(true) } sp.mu.Lock() defer sp.mu.Unlock() // There should be still one session left in idle list due to the min open sessions constraint. if sp.idleList.Len() != 1 { t.Errorf("got %v sessions in idle list, want 1", sp.idleList.Len()) } } // TestMaxBurst tests max burst constraint. func TestMaxBurst(t *testing.T) { if testing.Short() { t.SkipNow() } sp, sc, cancel := setup(t, SessionPoolConfig{MaxBurst: 1}) defer cancel() // Will cause session creation RPC to be retried forever. sc.InjectError("CreateSession", grpc.Errorf(codes.Unavailable, "try later")) // This session request will never finish until the injected error is cleared. go sp.take(context.Background()) // Poll for the execution of the first session request. for { sp.mu.Lock() cr := sp.createReqs sp.mu.Unlock() if cr == 0 { <-time.After(time.Second) continue } // The first session request is being executed. break } ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() sh, gotErr := sp.take(ctx) // Since MaxBurst == 1, the second session request should block. if wantErr := errGetSessionTimeout(); !reflect.DeepEqual(gotErr, wantErr) { t.Errorf("session retrival returns error %v, want %v", gotErr, wantErr) } // Let the first session request succeed. sc.InjectError("CreateSession", nil) // Now new session request can proceed because the first session request will eventually succeed. sh, err := sp.take(context.Background()) if err != nil { t.Errorf("session retrival returns error %v, want nil", err) } if !sh.session.isValid() || sh.getID() == "" { t.Errorf("got invalid session: %v", sh.session) } } // TestSessionrecycle tests recycling sessions. func TestSessionRecycle(t *testing.T) { if testing.Short() { t.SkipNow() } sp, _, cancel := setup(t, SessionPoolConfig{MaxSessionAge: 100 * time.Millisecond, MinOpened: 1}) // Healthcheck is explicitly turned off in this test because it might aggressively expire sessions in idle list. sp.hc.close() defer cancel() var ss []*session shs := make([]*sessionHandle, 2) for i := 0; i < len(shs); i++ { var err error shs[i], err = sp.take(context.Background()) if err != nil { t.Errorf("cannot get the session %v: %v", i, err) } ss = append(ss, shs[i].session) } // recycle the first session immediately. shs[0].recycle() // Let the second session expire. <-time.After(time.Second) // recycle the second session. shs[1].recycle() // Now the first session should be still valid, but the second session should have been destroyed. if !ss[0].isValid() { t.Errorf("the first session (%v) is invalid, want it to be valid", ss[0]) } if ss[1].isValid() { t.Errorf("the second session (%v) is valid, want it to be invalid", ss[1]) } } // TestSessionDestroy tests destroying sessions. func TestSessionDestroy(t *testing.T) { sp, _, cancel := setup(t, SessionPoolConfig{MinOpened: 1}) defer cancel() sh, err := sp.take(context.Background()) if err != nil { t.Errorf("cannot get session from session pool: %v", err) } s := sh.session sh.recycle() if d := s.destroy(true); d || !s.isValid() { // Session should be remaining because of min open sessions constraint. t.Errorf("session %v was destroyed in expiration mode, want it to stay alive", s) } if d := s.destroy(false); !d || s.isValid() { // Session should be destroyed. t.Errorf("failed to destroy session %s", s) } } // TestHcHeap tests heap operation on top of hcHeap. func TestHcHeap(t *testing.T) { in := []*session{ &session{nextCheck: time.Unix(10, 0)}, &session{nextCheck: time.Unix(0, 5)}, &session{nextCheck: time.Unix(1, 8)}, &session{nextCheck: time.Unix(11, 7)}, &session{nextCheck: time.Unix(6, 3)}, } want := []*session{ &session{nextCheck: time.Unix(1, 8), hcIndex: 0}, &session{nextCheck: time.Unix(6, 3), hcIndex: 1}, &session{nextCheck: time.Unix(8, 2), hcIndex: 2}, &session{nextCheck: time.Unix(10, 0), hcIndex: 3}, &session{nextCheck: time.Unix(11, 7), hcIndex: 4}, } hh := hcHeap{} for _, s := range in { heap.Push(&hh, s) } // Change top of the heap and do a adjustment. hh.sessions[0].nextCheck = time.Unix(8, 2) heap.Fix(&hh, 0) for idx := 0; hh.Len() > 0; idx++ { got := heap.Pop(&hh).(*session) want[idx].hcIndex = -1 if !reflect.DeepEqual(got, want[idx]) { t.Errorf("%v: heap.Pop returns %v, want %v", idx, got, want[idx]) } } } // TestHealthCheckScheduler tests if healthcheck workers can schedule and perform healthchecks properly. func TestHealthCheckScheduler(t *testing.T) { if testing.Short() { t.SkipNow() } sp, sc, cancel := setup(t, SessionPoolConfig{}) defer cancel() // Create 50 sessions. ss := []string{} for i := 0; i < 50; i++ { sh, err := sp.take(context.Background()) if err != nil { t.Errorf("cannot get session from session pool: %v", err) } ss = append(ss, sh.getID()) } // Sleep for 1s, allowing healthcheck workers to perform some session pings. <-time.After(time.Second) dp := sc.DumpPings() gotPings := map[string]int64{} for _, p := range dp { gotPings[p]++ } for _, s := range ss { // The average ping interval is 50ms. want := int64(time.Second) / int64(50*time.Millisecond) if got := gotPings[s]; got < want/2 || got > want+want/2 { t.Errorf("got %v healthchecks on session %v, want it between (%v, %v)", got, s, want/2, want+want/2) } } } // Tests that a fractions of sessions are prepared for write by health checker. func TestWriteSessionsPrepared(t *testing.T) { if testing.Short() { t.SkipNow() } sp, sc, cancel := setup(t, SessionPoolConfig{WriteSessions: 0.5}) sc.MakeNice() defer cancel() shs := make([]*sessionHandle, 10) var err error for i := 0; i < 10; i++ { shs[i], err = sp.take(context.Background()) if err != nil { t.Errorf("cannot get session from session pool: %v", err) } } // Now there are 10 sessions in the pool. Release them. for _, sh := range shs { sh.recycle() } // Sleep for 1s, allowing healthcheck workers to invoke begin transaction. <-time.After(time.Second) wshs := make([]*sessionHandle, 5) for i := 0; i < 5; i++ { wshs[i], err = sp.takeWriteSession(context.Background()) if err != nil { t.Errorf("cannot get session from session pool: %v", err) } if wshs[i].getTransactionID() == nil { t.Errorf("got nil transaction id from session pool") } } for _, sh := range wshs { sh.recycle() } <-time.After(time.Second) // Now force creation of 10 more sessions. shs = make([]*sessionHandle, 20) for i := 0; i < 20; i++ { shs[i], err = sp.take(context.Background()) if err != nil { t.Errorf("cannot get session from session pool: %v", err) } } // Now there are 20 sessions in the pool. Release them. for _, sh := range shs { sh.recycle() } <-time.After(time.Second) if sp.idleWriteList.Len() != 10 { t.Errorf("Expect 10 write prepared session, got: %d", sp.idleWriteList.Len()) } } // TestTakeFromWriteQueue tests that sessionPool.take() returns write prepared sessions as well. func TestTakeFromWriteQueue(t *testing.T) { if testing.Short() { t.SkipNow() } sp, sc, cancel := setup(t, SessionPoolConfig{MaxOpened: 1, WriteSessions: 1.0}) sc.MakeNice() defer cancel() sh, err := sp.take(context.Background()) if err != nil { t.Errorf("cannot get session from session pool: %v", err) } sh.recycle() <-time.After(time.Second) // The session should now be in write queue but take should also return it. if sp.idleWriteList.Len() == 0 { t.Errorf("write queue unexpectedly empty") } if sp.idleList.Len() != 0 { t.Errorf("read queue not empty") } sh, err = sp.take(context.Background()) if err != nil { t.Errorf("cannot get session from session pool: %v", err) } sh.recycle() } // TestSessionHealthCheck tests healthchecking cases. func TestSessionHealthCheck(t *testing.T) { if testing.Short() { t.SkipNow() } sp, sc, cancel := setup(t, SessionPoolConfig{MaxSessionAge: 2 * time.Second}) defer cancel() // Test pinging sessions. sh, err := sp.take(context.Background()) if err != nil { t.Errorf("cannot get session from session pool: %v", err) } <-time.After(time.Second) pings := sc.DumpPings() if len(pings) == 0 || pings[0] != sh.getID() { t.Errorf("healthchecker didn't send any ping to session %v", sh.getID()) } // Test expiring sessions. s := sh.session sh.recycle() // Sleep enough long for session in idle list to expire. <-time.After(2 * time.Second) if s.isValid() { t.Errorf("session(%v) is still alive, want it to expire", s) } // Test broken session detection. sh, err = sp.take(context.Background()) if err != nil { t.Errorf("cannot get session from session pool: %v", err) } sc.InjectError("GetSession", grpc.Errorf(codes.NotFound, "Session not found:")) // Wait for healthcheck workers to find the broken session and tear it down. <-time.After(1 * time.Second) if sh.session.isValid() { t.Errorf("session(%v) is still alive, want it to be dropped by healthcheck workers", s) } sc.InjectError("GetSession", nil) // Test garbage collection. sh, err = sp.take(context.Background()) if err != nil { t.Errorf("cannot get session from session pool: %v", err) } sp.close() if sh.session.isValid() { t.Errorf("session(%v) is still alive, want it to be garbage collected", s) } // Test session id refresh. // Recreate the session pool with min open sessions constraint. sp, err = newSessionPool("mockdb", SessionPoolConfig{ MaxSessionAge: time.Second, MinOpened: 1, getRPCClient: func() (sppb.SpannerClient, error) { return sc, nil }, HealthCheckInterval: 50 * time.Millisecond, }, nil) sh, err = sp.take(context.Background()) if err != nil { t.Errorf("cannot get session from session pool: %v", err) } oid := sh.getID() s = sh.session sh.recycle() <-time.After(2 * time.Second) nid := s.getID() if nid == "" || nid == oid { t.Errorf("healthcheck workers failed to refresh session: oid=%v, nid=%v", oid, nid) } if gotDs, wantDs := sc.DumpSessions(), (map[string]bool{nid: true}); !reflect.DeepEqual(gotDs, wantDs) { t.Errorf("sessions in mockclient: %v, want %v", gotDs, wantDs) } } // TestStressSessionPool does stress test on session pool by the following concurrent operations: // 1) Test worker gets a session from the pool. // 2) Test worker turns a session back into the pool. // 3) Test worker destroys a session got from the pool. // 4) Healthcheck retires an old session from the pool's idlelist by refreshing its session id. // 5) Healthcheck destroys a broken session (because a worker has already destroyed it). // 6) Test worker closes the session pool. // // During the test, it is expected that all sessions that are taken from session pool remains valid and // when all test workers and healthcheck workers exit, mockclient, session pool and healthchecker should be in consistent state. func TestStressSessionPool(t *testing.T) { // Use concurrent workers to test different session pool built from different configurations. if testing.Short() { t.SkipNow() } for ti, cfg := range []SessionPoolConfig{ SessionPoolConfig{}, SessionPoolConfig{MaxSessionAge: 20 * time.Millisecond}, SessionPoolConfig{MinOpened: 10, MaxOpened: 100}, SessionPoolConfig{MaxBurst: 50}, SessionPoolConfig{MaxSessionAge: 20 * time.Millisecond, MinOpened: 10, MaxOpened: 200, MaxBurst: 5}, SessionPoolConfig{MaxSessionAge: 20 * time.Millisecond, MinOpened: 10, MaxOpened: 200, MaxBurst: 5, WriteSessions: 0.2}, } { var wg sync.WaitGroup // Create a more aggressive session healthchecker to increase test concurrency. cfg.HealthCheckInterval = 50 * time.Millisecond cfg.HealthCheckWorkers = 50 sc := testutil.NewMockCloudSpannerClient(t) sc.MakeNice() cfg.getRPCClient = func() (sppb.SpannerClient, error) { return sc, nil } sp, _ := newSessionPool("mockdb", cfg, nil) for i := 0; i < 100; i++ { wg.Add(1) // Schedule a test worker. go func(idx int, pool *sessionPool, client sppb.SpannerClient) { defer wg.Done() // Test worker iterates 1K times and tries different session / session pool operations. for j := 0; j < 1000; j++ { if idx%10 == 0 && j >= 900 { // Close the pool in selected set of workers during the middle of the test. pool.close() } // Take a write sessions ~ 20% of the times. takeWrite := rand.Intn(5) == 4 var ( sh *sessionHandle gotErr error ) if takeWrite { sh, gotErr = pool.takeWriteSession(context.Background()) } else { sh, gotErr = pool.take(context.Background()) } if gotErr != nil { if pool.isValid() { t.Errorf("%v.%v: pool.take returns error when pool is still valid: %v", ti, idx, gotErr) } if wantErr := errInvalidSessionPool(); !reflect.DeepEqual(gotErr, wantErr) { t.Errorf("%v.%v: got error when pool is closed: %v, want %v", ti, idx, gotErr, wantErr) } continue } // Verify if session is valid when session pool is valid. Note that if session pool is invalid after sh is taken, // then sh might be invalidated by healthcheck workers. if (sh.getID() == "" || sh.session == nil || !sh.session.isValid()) && pool.isValid() { t.Errorf("%v.%v.%v: pool.take returns invalid session %v", ti, idx, takeWrite, sh.session) } if takeWrite && sh.getTransactionID() == nil { t.Errorf("%v.%v: pool.takeWriteSession returns session %v without transaction", ti, idx, sh.session) } if int64(cfg.MaxSessionAge) > 0 && rand.Intn(100) < idx { // Random sleep before destroying/recycling the session, to give healthcheck worker a chance to step in. <-time.After(time.Duration(rand.Int63n(int64(cfg.MaxSessionAge)))) } if rand.Intn(100) < idx { // destroy the session. sh.destroy() continue } // recycle the session. sh.recycle() } }(i, sp, sc) } wg.Wait() sp.hc.close() // Here the states of healthchecker, session pool and mockclient are stable. idleSessions := map[string]bool{} hcSessions := map[string]bool{} mockSessions := sc.DumpSessions() // Dump session pool's idle list. for sl := sp.idleList.Front(); sl != nil; sl = sl.Next() { s := sl.Value.(*session) if idleSessions[s.getID()] { t.Errorf("%v: found duplicated session in idle list: %v", ti, s.getID()) } idleSessions[s.getID()] = true } for sl := sp.idleWriteList.Front(); sl != nil; sl = sl.Next() { s := sl.Value.(*session) if idleSessions[s.getID()] { t.Errorf("%v: found duplicated session in idle write list: %v", ti, s.getID()) } idleSessions[s.getID()] = true } if int(sp.numOpened) != len(idleSessions) { t.Errorf("%v: number of opened sessions (%v) != number of idle sessions (%v)", ti, sp.numOpened, len(idleSessions)) } if sp.createReqs != 0 { t.Errorf("%v: number of pending session creations = %v, want 0", ti, sp.createReqs) } // Dump healthcheck queue. for _, s := range sp.hc.queue.sessions { if hcSessions[s.getID()] { t.Errorf("%v: found duplicated session in healthcheck queue: %v", ti, s.getID()) } hcSessions[s.getID()] = true } // Verify that idleSessions == hcSessions == mockSessions. if !reflect.DeepEqual(idleSessions, hcSessions) { t.Errorf("%v: sessions in idle list (%v) != sessions in healthcheck queue (%v)", ti, idleSessions, hcSessions) } if !reflect.DeepEqual(hcSessions, mockSessions) { t.Errorf("%v: sessions in healthcheck queue (%v) != sessions in mockclient (%v)", ti, hcSessions, mockSessions) } sp.close() mockSessions = sc.DumpSessions() if len(mockSessions) != 0 { t.Errorf("Found live sessions: %v", mockSessions) } } } golang-google-cloud-0.9.0/spanner/spanner_test.go000066400000000000000000001304721312234511600220540ustar00rootroot00000000000000/* Copyright 2017 Google Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package spanner import ( "fmt" "math" "reflect" "strings" "sync" "testing" "time" "cloud.google.com/go/civil" "cloud.google.com/go/internal/testutil" database "cloud.google.com/go/spanner/admin/database/apiv1" "golang.org/x/net/context" "google.golang.org/api/iterator" "google.golang.org/api/option" "google.golang.org/grpc/codes" adminpb "google.golang.org/genproto/googleapis/spanner/admin/database/v1" ) var ( // testProjectID specifies the project used for testing. // It can be changed by setting environment variable GCLOUD_TESTS_GOLANG_PROJECT_ID. testProjectID = testutil.ProjID() // testInstanceID specifies the Cloud Spanner instance used for testing. testInstanceID = "go-integration-test" // client is a spanner.Client. client *Client // admin is a spanner.DatabaseAdminClient. admin *database.DatabaseAdminClient // db is the path of the testing database. db string // dbName is the short name of the testing database. dbName string ) var ( singerDBStatements = []string{ `CREATE TABLE Singers ( SingerId INT64 NOT NULL, FirstName STRING(1024), LastName STRING(1024), SingerInfo BYTES(MAX) ) PRIMARY KEY (SingerId)`, `CREATE INDEX SingerByName ON Singers(FirstName, LastName)`, `CREATE TABLE Accounts ( AccountId INT64 NOT NULL, Nickname STRING(100), Balance INT64 NOT NULL, ) PRIMARY KEY (AccountId)`, `CREATE INDEX AccountByNickname ON Accounts(Nickname) STORING (Balance)`, `CREATE TABLE Types ( RowID INT64 NOT NULL, String STRING(MAX), StringArray ARRAY, Bytes BYTES(MAX), BytesArray ARRAY, Int64a INT64, Int64Array ARRAY, Bool BOOL, BoolArray ARRAY, Float64 FLOAT64, Float64Array ARRAY, Date DATE, DateArray ARRAY, Timestamp TIMESTAMP, TimestampArray ARRAY, ) PRIMARY KEY (RowID)`, } readDBStatements = []string{ `CREATE TABLE TestTable ( Key STRING(MAX) NOT NULL, StringValue STRING(MAX) ) PRIMARY KEY (Key)`, `CREATE INDEX TestTableByValue ON TestTable(StringValue)`, `CREATE INDEX TestTableByValueDesc ON TestTable(StringValue DESC)`, } ) type testTableRow struct{ Key, StringValue string } // prepare initializes Cloud Spanner testing DB and clients. func prepare(ctx context.Context, t *testing.T, statements []string) error { if testing.Short() { t.Skip("Integration tests skipped in short mode") } if testProjectID == "" { t.Skip("Integration tests skipped: GCLOUD_TESTS_GOLANG_PROJECT_ID is missing") } ts := testutil.TokenSource(ctx, AdminScope, Scope) if ts == nil { t.Skip("Integration test skipped: cannot get service account credential from environment variable %v", "GCLOUD_TESTS_GOLANG_KEY") } var err error // Create Admin client and Data client. // TODO: Remove the EndPoint option once this is the default. admin, err = database.NewDatabaseAdminClient(ctx, option.WithTokenSource(ts), option.WithEndpoint("spanner.googleapis.com:443")) if err != nil { t.Errorf("cannot create admin client: %v", err) return err } // Construct test DB name. dbName = fmt.Sprintf("gotest_%v", time.Now().UnixNano()) db = fmt.Sprintf("projects/%v/instances/%v/databases/%v", testProjectID, testInstanceID, dbName) // Create database and tables. op, err := admin.CreateDatabase(ctx, &adminpb.CreateDatabaseRequest{ Parent: fmt.Sprintf("projects/%v/instances/%v", testProjectID, testInstanceID), CreateStatement: "CREATE DATABASE " + dbName, ExtraStatements: statements, }) if err != nil { t.Errorf("cannot create testing DB %v: %v", db, err) return err } if _, err := op.Wait(ctx); err != nil { t.Errorf("cannot create testing DB %v: %v", db, err) return err } client, err = NewClientWithConfig(ctx, db, ClientConfig{ SessionPoolConfig: SessionPoolConfig{ WriteSessions: 0.2, }, }, option.WithTokenSource(ts)) if err != nil { t.Errorf("cannot create data client on DB %v: %v", db, err) return err } return nil } // tearDown tears down the testing environment created by prepare(). func tearDown(ctx context.Context, t *testing.T) { if admin != nil { if err := admin.DropDatabase(ctx, &adminpb.DropDatabaseRequest{db}); err != nil { t.Logf("failed to drop testing database: %v, might need a manual removal", db) } admin.Close() } if client != nil { client.Close() } admin = nil client = nil db = "" } // Test SingleUse transaction. func TestSingleUse(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) defer cancel() // Set up testing environment. if err := prepare(ctx, t, singerDBStatements); err != nil { // If prepare() fails, tear down whatever that's already up. tearDown(ctx, t) t.Fatalf("cannot set up testing environment: %v", err) } // After all tests, tear down testing environment. defer tearDown(ctx, t) writes := []struct { row []interface{} ts time.Time }{ {row: []interface{}{1, "Marc", "Foo"}}, {row: []interface{}{2, "Tars", "Bar"}}, {row: []interface{}{3, "Alpha", "Beta"}}, {row: []interface{}{4, "Last", "End"}}, } // Try to write four rows through the Apply API. for i, w := range writes { var err error m := InsertOrUpdate("Singers", []string{"SingerId", "FirstName", "LastName"}, w.row) if writes[i].ts, err = client.Apply(ctx, []*Mutation{m}, ApplyAtLeastOnce()); err != nil { t.Fatal(err) } } // For testing timestamp bound staleness. <-time.After(time.Second) // Test reading rows with different timestamp bounds. for i, test := range []struct { want [][]interface{} tb TimestampBound checkTs func(time.Time) error }{ { // strong [][]interface{}{{int64(1), "Marc", "Foo"}, {int64(3), "Alpha", "Beta"}, {int64(4), "Last", "End"}}, StrongRead(), func(ts time.Time) error { // writes[3] is the last write, all subsequent strong read should have a timestamp larger than that. if ts.Before(writes[3].ts) { return fmt.Errorf("read got timestamp %v, want it to be no later than %v", ts, writes[3].ts) } return nil }, }, { // min_read_timestamp [][]interface{}{{int64(1), "Marc", "Foo"}, {int64(3), "Alpha", "Beta"}, {int64(4), "Last", "End"}}, MinReadTimestamp(writes[3].ts), func(ts time.Time) error { if ts.Before(writes[3].ts) { return fmt.Errorf("read got timestamp %v, want it to be no later than %v", ts, writes[3].ts) } return nil }, }, { // max_staleness [][]interface{}{{int64(1), "Marc", "Foo"}, {int64(3), "Alpha", "Beta"}, {int64(4), "Last", "End"}}, MaxStaleness(time.Second), func(ts time.Time) error { if ts.Before(writes[3].ts) { return fmt.Errorf("read got timestamp %v, want it to be no later than %v", ts, writes[3].ts) } return nil }, }, { // read_timestamp [][]interface{}{{int64(1), "Marc", "Foo"}, {int64(3), "Alpha", "Beta"}}, ReadTimestamp(writes[2].ts), func(ts time.Time) error { if ts != writes[2].ts { return fmt.Errorf("read got timestamp %v, expect %v", ts, writes[2].ts) } return nil }, }, { // exact_staleness nil, // Specify a staleness which should be already before this test because // context timeout is set to be 10s. ExactStaleness(11 * time.Second), func(ts time.Time) error { if ts.After(writes[0].ts) { return fmt.Errorf("read got timestamp %v, want it to be no earlier than %v", ts, writes[0].ts) } return nil }, }, } { // SingleUse.Query su := client.Single().WithTimestampBound(test.tb) got, err := readAll(su.Query( ctx, Statement{ "SELECT SingerId, FirstName, LastName FROM Singers WHERE SingerId IN (@id1, @id3, @id4)", map[string]interface{}{"id1": int64(1), "id3": int64(3), "id4": int64(4)}, })) if err != nil { t.Errorf("%d: SingleUse.Query returns error %v, want nil", i, err) } if !reflect.DeepEqual(got, test.want) { t.Errorf("%d: got unexpected result from SingleUse.Query: %v, want %v", i, got, test.want) } rts, err := su.Timestamp() if err != nil { t.Errorf("%d: SingleUse.Query doesn't return a timestamp, error: %v", i, err) } if err := test.checkTs(rts); err != nil { t.Errorf("%d: SingleUse.Query doesn't return expected timestamp: %v", i, err) } // SingleUse.Read su = client.Single().WithTimestampBound(test.tb) got, err = readAll(su.Read(ctx, "Singers", KeySets(Key{1}, Key{3}, Key{4}), []string{"SingerId", "FirstName", "LastName"})) if err != nil { t.Errorf("%d: SingleUse.Read returns error %v, want nil", i, err) } if !reflect.DeepEqual(got, test.want) { t.Errorf("%d: got unexpected result from SingleUse.Read: %v, want %v", i, got, test.want) } rts, err = su.Timestamp() if err != nil { t.Errorf("%d: SingleUse.Read doesn't return a timestamp, error: %v", i, err) } if err := test.checkTs(rts); err != nil { t.Errorf("%d: SingleUse.Read doesn't return expected timestamp: %v", i, err) } // SingleUse.ReadRow got = nil for _, k := range []Key{Key{1}, Key{3}, Key{4}} { su = client.Single().WithTimestampBound(test.tb) r, err := su.ReadRow(ctx, "Singers", k, []string{"SingerId", "FirstName", "LastName"}) if err != nil { continue } v, err := rowToValues(r) if err != nil { continue } got = append(got, v) rts, err = su.Timestamp() if err != nil { t.Errorf("%d: SingleUse.ReadRow(%v) doesn't return a timestamp, error: %v", i, k, err) } if err := test.checkTs(rts); err != nil { t.Errorf("%d: SingleUse.ReadRow(%v) doesn't return expected timestamp: %v", i, k, err) } } if !reflect.DeepEqual(got, test.want) { t.Errorf("%d: got unexpected results from SingleUse.ReadRow: %v, want %v", i, got, test.want) } // SingleUse.ReadUsingIndex su = client.Single().WithTimestampBound(test.tb) got, err = readAll(su.ReadUsingIndex(ctx, "Singers", "SingerByName", KeySets(Key{"Marc", "Foo"}, Key{"Alpha", "Beta"}, Key{"Last", "End"}), []string{"SingerId", "FirstName", "LastName"})) if err != nil { t.Errorf("%d: SingleUse.ReadUsingIndex returns error %v, want nil", i, err) } // The results from ReadUsingIndex is sorted by the index rather than primary key. if len(got) != len(test.want) { t.Errorf("%d: got unexpected result from SingleUse.ReadUsingIndex: %v, want %v", i, got, test.want) } for j, g := range got { if j > 0 { prev := got[j-1][1].(string) + got[j-1][2].(string) curr := got[j][1].(string) + got[j][2].(string) if strings.Compare(prev, curr) > 0 { t.Errorf("%d: SingleUse.ReadUsingIndex fails to order rows by index keys, %v should be after %v", i, got[j-1], got[j]) } } found := false for _, w := range test.want { if reflect.DeepEqual(g, w) { found = true } } if !found { t.Errorf("%d: got unexpected result from SingleUse.ReadUsingIndex: %v, want %v", i, got, test.want) break } } rts, err = su.Timestamp() if err != nil { t.Errorf("%d: SingleUse.ReadUsingIndex doesn't return a timestamp, error: %v", i, err) } if err := test.checkTs(rts); err != nil { t.Errorf("%d: SingleUse.ReadUsingIndex doesn't return expected timestamp: %v", i, err) } } } // Test ReadOnlyTransaction. The testsuite is mostly like SingleUse, except it // also tests for a single timestamp across multiple reads. func TestReadOnlyTransaction(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) defer cancel() // Set up testing environment. if err := prepare(ctx, t, singerDBStatements); err != nil { // If prepare() fails, tear down whatever that's already up. tearDown(ctx, t) t.Fatalf("cannot set up testing environment: %v", err) } // After all tests, tear down testing environment. defer tearDown(ctx, t) writes := []struct { row []interface{} ts time.Time }{ {row: []interface{}{1, "Marc", "Foo"}}, {row: []interface{}{2, "Tars", "Bar"}}, {row: []interface{}{3, "Alpha", "Beta"}}, {row: []interface{}{4, "Last", "End"}}, } // Try to write four rows through the Apply API. for i, w := range writes { var err error m := InsertOrUpdate("Singers", []string{"SingerId", "FirstName", "LastName"}, w.row) if writes[i].ts, err = client.Apply(ctx, []*Mutation{m}, ApplyAtLeastOnce()); err != nil { t.Fatal(err) } } // For testing timestamp bound staleness. <-time.After(time.Second) // Test reading rows with different timestamp bounds. for i, test := range []struct { want [][]interface{} tb TimestampBound checkTs func(time.Time) error }{ // Note: min_read_timestamp and max_staleness are not supported by ReadOnlyTransaction. See // API document for more details. { // strong [][]interface{}{{int64(1), "Marc", "Foo"}, {int64(3), "Alpha", "Beta"}, {int64(4), "Last", "End"}}, StrongRead(), func(ts time.Time) error { if ts.Before(writes[3].ts) { return fmt.Errorf("read got timestamp %v, want it to be no later than %v", ts, writes[3].ts) } return nil }, }, { // read_timestamp [][]interface{}{{int64(1), "Marc", "Foo"}, {int64(3), "Alpha", "Beta"}}, ReadTimestamp(writes[2].ts), func(ts time.Time) error { if ts != writes[2].ts { return fmt.Errorf("read got timestamp %v, expect %v", ts, writes[2].ts) } return nil }, }, { // exact_staleness nil, // Specify a staleness which should be already before this test because // context timeout is set to be 10s. ExactStaleness(11 * time.Second), func(ts time.Time) error { if ts.After(writes[0].ts) { return fmt.Errorf("read got timestamp %v, want it to be no earlier than %v", ts, writes[0].ts) } return nil }, }, } { // ReadOnlyTransaction.Query ro := client.ReadOnlyTransaction().WithTimestampBound(test.tb) got, err := readAll(ro.Query( ctx, Statement{ "SELECT SingerId, FirstName, LastName FROM Singers WHERE SingerId IN (@id1, @id3, @id4)", map[string]interface{}{"id1": int64(1), "id3": int64(3), "id4": int64(4)}, })) if err != nil { t.Errorf("%d: ReadOnlyTransaction.Query returns error %v, want nil", i, err) } if !reflect.DeepEqual(got, test.want) { t.Errorf("%d: got unexpected result from ReadOnlyTransaction.Query: %v, want %v", i, got, test.want) } rts, err := ro.Timestamp() if err != nil { t.Errorf("%d: ReadOnlyTransaction.Query doesn't return a timestamp, error: %v", i, err) } if err := test.checkTs(rts); err != nil { t.Errorf("%d: ReadOnlyTransaction.Query doesn't return expected timestamp: %v", i, err) } roTs := rts // ReadOnlyTransaction.Read got, err = readAll(ro.Read(ctx, "Singers", KeySets(Key{1}, Key{3}, Key{4}), []string{"SingerId", "FirstName", "LastName"})) if err != nil { t.Errorf("%d: ReadOnlyTransaction.Read returns error %v, want nil", i, err) } if !reflect.DeepEqual(got, test.want) { t.Errorf("%d: got unexpected result from ReadOnlyTransaction.Read: %v, want %v", i, got, test.want) } rts, err = ro.Timestamp() if err != nil { t.Errorf("%d: ReadOnlyTransaction.Read doesn't return a timestamp, error: %v", i, err) } if err := test.checkTs(rts); err != nil { t.Errorf("%d: ReadOnlyTransaction.Read doesn't return expected timestamp: %v", i, err) } if roTs != rts { t.Errorf("%d: got two read timestamps: %v, %v, want ReadOnlyTransaction to return always the same read timestamp", i, roTs, rts) } // ReadOnlyTransaction.ReadRow got = nil for _, k := range []Key{Key{1}, Key{3}, Key{4}} { r, err := ro.ReadRow(ctx, "Singers", k, []string{"SingerId", "FirstName", "LastName"}) if err != nil { continue } v, err := rowToValues(r) if err != nil { continue } got = append(got, v) rts, err = ro.Timestamp() if err != nil { t.Errorf("%d: ReadOnlyTransaction.ReadRow(%v) doesn't return a timestamp, error: %v", i, k, err) } if err := test.checkTs(rts); err != nil { t.Errorf("%d: ReadOnlyTransaction.ReadRow(%v) doesn't return expected timestamp: %v", i, k, err) } if roTs != rts { t.Errorf("%d: got two read timestamps: %v, %v, want ReadOnlyTransaction to return always the same read timestamp", i, roTs, rts) } } if !reflect.DeepEqual(got, test.want) { t.Errorf("%d: got unexpected results from ReadOnlyTransaction.ReadRow: %v, want %v", i, got, test.want) } // SingleUse.ReadUsingIndex got, err = readAll(ro.ReadUsingIndex(ctx, "Singers", "SingerByName", KeySets(Key{"Marc", "Foo"}, Key{"Alpha", "Beta"}, Key{"Last", "End"}), []string{"SingerId", "FirstName", "LastName"})) if err != nil { t.Errorf("%d: ReadOnlyTransaction.ReadUsingIndex returns error %v, want nil", i, err) } // The results from ReadUsingIndex is sorted by the index rather than primary key. if len(got) != len(test.want) { t.Errorf("%d: got unexpected result from ReadOnlyTransaction.ReadUsingIndex: %v, want %v", i, got, test.want) } for j, g := range got { if j > 0 { prev := got[j-1][1].(string) + got[j-1][2].(string) curr := got[j][1].(string) + got[j][2].(string) if strings.Compare(prev, curr) > 0 { t.Errorf("%d: ReadOnlyTransaction.ReadUsingIndex fails to order rows by index keys, %v should be after %v", i, got[j-1], got[j]) } } found := false for _, w := range test.want { if reflect.DeepEqual(g, w) { found = true } } if !found { t.Errorf("%d: got unexpected result from ReadOnlyTransaction.ReadUsingIndex: %v, want %v", i, got, test.want) break } } rts, err = ro.Timestamp() if err != nil { t.Errorf("%d: ReadOnlyTransaction.ReadUsingIndex doesn't return a timestamp, error: %v", i, err) } if err := test.checkTs(rts); err != nil { t.Errorf("%d: ReadOnlyTransaction.ReadUsingIndex doesn't return expected timestamp: %v", i, err) } if roTs != rts { t.Errorf("%d: got two read timestamps: %v, %v, want ReadOnlyTransaction to return always the same read timestamp", i, roTs, rts) } ro.Close() } } // Test ReadWriteTransaction. func TestReadWriteTransaction(t *testing.T) { // Give a longer deadline because of transaction backoffs. ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) defer cancel() if err := prepare(ctx, t, singerDBStatements); err != nil { tearDown(ctx, t) t.Fatalf("cannot set up testing environment: %v", err) } defer tearDown(ctx, t) // Set up two accounts accounts := []*Mutation{ Insert("Accounts", []string{"AccountId", "Nickname", "Balance"}, []interface{}{int64(1), "Foo", int64(50)}), Insert("Accounts", []string{"AccountId", "Nickname", "Balance"}, []interface{}{int64(2), "Bar", int64(1)}), } if _, err := client.Apply(ctx, accounts, ApplyAtLeastOnce()); err != nil { t.Fatal(err) } wg := sync.WaitGroup{} readBalance := func(iter *RowIterator) (int64, error) { defer iter.Stop() var bal int64 for { row, err := iter.Next() if err == iterator.Done { return bal, nil } if err != nil { return 0, err } if err := row.Column(0, &bal); err != nil { return 0, err } } } for i := 0; i < 20; i++ { wg.Add(1) go func(iter int) { defer wg.Done() _, err := client.ReadWriteTransaction(ctx, func(ctx context.Context, tx *ReadWriteTransaction) error { // Query Foo's balance and Bar's balance. bf, e := readBalance(tx.Query(ctx, Statement{"SELECT Balance FROM Accounts WHERE AccountId = @id", map[string]interface{}{"id": int64(1)}})) if e != nil { return e } bb, e := readBalance(tx.Read(ctx, "Accounts", KeySets(Key{int64(2)}), []string{"Balance"})) if e != nil { return e } if bf <= 0 { return nil } bf-- bb++ tx.BufferWrite([]*Mutation{ Update("Accounts", []string{"AccountId", "Balance"}, []interface{}{int64(1), bf}), Update("Accounts", []string{"AccountId", "Balance"}, []interface{}{int64(2), bb}), }) return nil }) if err != nil { t.Fatalf("%d: failed to execute transaction: %v", iter, err) } }(i) } // Because of context timeout, all goroutines will eventually return. wg.Wait() _, err := client.ReadWriteTransaction(ctx, func(ctx context.Context, tx *ReadWriteTransaction) error { var bf, bb int64 r, e := tx.ReadRow(ctx, "Accounts", Key{int64(1)}, []string{"Balance"}) if e != nil { return e } if ce := r.Column(0, &bf); ce != nil { return ce } bb, e = readBalance(tx.ReadUsingIndex(ctx, "Accounts", "AccountByNickname", KeySets(Key{"Bar"}), []string{"Balance"})) if e != nil { return e } if bf != 30 || bb != 21 { t.Errorf("Foo's balance is now %v and Bar's balance is now %v, want %v and %v", bf, bb, 30, 21) } return nil }) if err != nil { t.Errorf("failed to check balances: %v", err) } } const ( testTable = "TestTable" testTableIndex = "TestTableByValue" ) var testTableColumns = []string{"Key", "StringValue"} func TestReads(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) defer cancel() // Set up testing environment. if err := prepare(ctx, t, readDBStatements); err != nil { // If prepare() fails, tear down whatever that's already up. tearDown(ctx, t) t.Fatalf("cannot set up testing environment: %v", err) } // After all tests, tear down testing environment. defer tearDown(ctx, t) // Includes k0..k14. Strings sort lexically, eg "k1" < "k10" < "k2". var ms []*Mutation for i := 0; i < 15; i++ { ms = append(ms, InsertOrUpdate(testTable, testTableColumns, []interface{}{fmt.Sprintf("k%d", i), fmt.Sprintf("v%d", i)})) } if _, err := client.Apply(ctx, ms, ApplyAtLeastOnce()); err != nil { t.Fatal(err) } // Empty read. rows, err := readAllTestTable(client.Single().Read(ctx, testTable, KeyRange{Start: Key{"k99"}, End: Key{"z"}}, testTableColumns)) if err != nil { t.Fatal(err) } if got, want := len(rows), 0; got != want { t.Errorf("got %d, want %d", got, want) } // Index empty read. rows, err = readAllTestTable(client.Single().ReadUsingIndex(ctx, testTable, testTableIndex, KeyRange{Start: Key{"v99"}, End: Key{"z"}}, testTableColumns)) if err != nil { t.Fatal(err) } if got, want := len(rows), 0; got != want { t.Errorf("got %d, want %d", got, want) } // Point read. row, err := client.Single().ReadRow(ctx, testTable, Key{"k1"}, testTableColumns) if err != nil { t.Fatal(err) } var got testTableRow if err := row.ToStruct(&got); err != nil { t.Fatal(err) } if want := (testTableRow{"k1", "v1"}); got != want { t.Errorf("got %v, want %v", got, want) } // Point read not found. _, err = client.Single().ReadRow(ctx, testTable, Key{"k999"}, testTableColumns) if ErrCode(err) != codes.NotFound { t.Fatalf("got %v, want NotFound", err) } // No index point read not found, because Go does not have ReadRowUsingIndex. rangeReads(ctx, t) indexRangeReads(ctx, t) } func rangeReads(ctx context.Context, t *testing.T) { checkRange := func(ks KeySet, wantNums ...int) { if msg, ok := compareRows(client.Single().Read(ctx, testTable, ks, testTableColumns), wantNums); !ok { t.Errorf("key set %+v: %s", ks, msg) } } checkRange(Key{"k1"}, 1) checkRange(KeyRange{Key{"k3"}, Key{"k5"}, ClosedOpen}, 3, 4) checkRange(KeyRange{Key{"k3"}, Key{"k5"}, ClosedClosed}, 3, 4, 5) checkRange(KeyRange{Key{"k3"}, Key{"k5"}, OpenClosed}, 4, 5) checkRange(KeyRange{Key{"k3"}, Key{"k5"}, OpenOpen}, 4) // Partial key specification. checkRange(KeyRange{Key{"k7"}, Key{}, ClosedClosed}, 7, 8, 9) checkRange(KeyRange{Key{"k7"}, Key{}, OpenClosed}, 8, 9) checkRange(KeyRange{Key{}, Key{"k11"}, ClosedOpen}, 0, 1, 10) checkRange(KeyRange{Key{}, Key{"k11"}, ClosedClosed}, 0, 1, 10, 11) // The following produce empty ranges. // TODO(jba): Consider a multi-part key to illustrate partial key behavior. // checkRange(KeyRange{Key{"k7"}, Key{}, ClosedOpen}) // checkRange(KeyRange{Key{"k7"}, Key{}, OpenOpen}) // checkRange(KeyRange{Key{}, Key{"k11"}, OpenOpen}) // checkRange(KeyRange{Key{}, Key{"k11"}, OpenClosed}) // Prefix is component-wise, not string prefix. checkRange(Key{"k1"}.AsPrefix(), 1) checkRange(KeyRange{Key{"k1"}, Key{"k2"}, ClosedOpen}, 1, 10, 11, 12, 13, 14) checkRange(AllKeys(), 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14) } func indexRangeReads(ctx context.Context, t *testing.T) { checkRange := func(ks KeySet, wantNums ...int) { if msg, ok := compareRows(client.Single().ReadUsingIndex(ctx, testTable, testTableIndex, ks, testTableColumns), wantNums); !ok { t.Errorf("key set %+v: %s", ks, msg) } } checkRange(Key{"v1"}, 1) checkRange(KeyRange{Key{"v3"}, Key{"v5"}, ClosedOpen}, 3, 4) checkRange(KeyRange{Key{"v3"}, Key{"v5"}, ClosedClosed}, 3, 4, 5) checkRange(KeyRange{Key{"v3"}, Key{"v5"}, OpenClosed}, 4, 5) checkRange(KeyRange{Key{"v3"}, Key{"v5"}, OpenOpen}, 4) // // Partial key specification. checkRange(KeyRange{Key{"v7"}, Key{}, ClosedClosed}, 7, 8, 9) checkRange(KeyRange{Key{"v7"}, Key{}, OpenClosed}, 8, 9) checkRange(KeyRange{Key{}, Key{"v11"}, ClosedOpen}, 0, 1, 10) checkRange(KeyRange{Key{}, Key{"v11"}, ClosedClosed}, 0, 1, 10, 11) // // The following produce empty ranges. // checkRange(KeyRange{Key{"v7"}, Key{}, ClosedOpen}) // checkRange(KeyRange{Key{"v7"}, Key{}, OpenOpen}) // checkRange(KeyRange{Key{}, Key{"v11"}, OpenOpen}) // checkRange(KeyRange{Key{}, Key{"v11"}, OpenClosed}) // // Prefix is component-wise, not string prefix. checkRange(Key{"v1"}.AsPrefix(), 1) checkRange(KeyRange{Key{"v1"}, Key{"v2"}, ClosedOpen}, 1, 10, 11, 12, 13, 14) checkRange(AllKeys(), 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14) // Read from an index with DESC ordering. wantNums := []int{14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0} if msg, ok := compareRows(client.Single().ReadUsingIndex(ctx, testTable, "TestTableByValueDesc", AllKeys(), testTableColumns), wantNums); !ok { t.Errorf("desc: %s", msg) } } func compareRows(iter *RowIterator, wantNums []int) (string, bool) { rows, err := readAllTestTable(iter) if err != nil { return err.Error(), false } want := map[string]string{} for _, n := range wantNums { want[fmt.Sprintf("k%d", n)] = fmt.Sprintf("v%d", n) } got := map[string]string{} for _, r := range rows { got[r.Key] = r.StringValue } if !reflect.DeepEqual(got, want) { return fmt.Sprintf("got %v, want %v", got, want), false } return "", true } func TestEarlyTimestamp(t *testing.T) { // Test that we can get the timestamp from a read-only transaction as // soon as we have read at least one row. ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) defer cancel() // Set up testing environment. if err := prepare(ctx, t, readDBStatements); err != nil { // If prepare() fails, tear down whatever that's already up. tearDown(ctx, t) t.Fatalf("cannot set up testing environment: %v", err) } // After all tests, tear down testing environment. defer tearDown(ctx, t) var ms []*Mutation for i := 0; i < 3; i++ { ms = append(ms, InsertOrUpdate(testTable, testTableColumns, []interface{}{fmt.Sprintf("k%d", i), fmt.Sprintf("v%d", i)})) } if _, err := client.Apply(ctx, ms, ApplyAtLeastOnce()); err != nil { t.Fatal(err) } txn := client.Single() iter := txn.Read(ctx, testTable, AllKeys(), testTableColumns) defer iter.Stop() // In single-use transaction, we should get an error before reading anything. if _, err := txn.Timestamp(); err == nil { t.Error("wanted error, got nil") } // After reading one row, the timestamp should be available. _, err := iter.Next() if err != nil { t.Fatal(err) } if _, err := txn.Timestamp(); err != nil { t.Errorf("got %v, want nil", err) } txn = client.ReadOnlyTransaction() defer txn.Close() iter = txn.Read(ctx, testTable, AllKeys(), testTableColumns) defer iter.Stop() // In an ordinary read-only transaction, the timestamp should be // available immediately. if _, err := txn.Timestamp(); err != nil { t.Errorf("got %v, want nil", err) } } func TestNestedTransaction(t *testing.T) { // You cannot use a transaction from inside a read-write transaction. ctx := context.Background() if err := prepare(ctx, t, singerDBStatements); err != nil { tearDown(ctx, t) t.Fatalf("cannot set up testing environment: %v", err) } defer tearDown(ctx, t) client.ReadWriteTransaction(ctx, func(ctx context.Context, tx *ReadWriteTransaction) error { _, err := client.ReadWriteTransaction(ctx, func(context.Context, *ReadWriteTransaction) error { return nil }) if ErrCode(err) != codes.FailedPrecondition { t.Fatalf("got %v, want FailedPrecondition", err) } _, err = client.Single().ReadRow(ctx, "Singers", Key{1}, []string{"SingerId"}) if ErrCode(err) != codes.FailedPrecondition { t.Fatalf("got %v, want FailedPrecondition", err) } rot := client.ReadOnlyTransaction() defer rot.Close() _, err = rot.ReadRow(ctx, "Singers", Key{1}, []string{"SingerId"}) if ErrCode(err) != codes.FailedPrecondition { t.Fatalf("got %v, want FailedPrecondition", err) } return nil }) } // Test client recovery on database recreation. func TestDbRemovalRecovery(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) defer cancel() if err := prepare(ctx, t, singerDBStatements); err != nil { tearDown(ctx, t) t.Fatalf("cannot set up testing environment: %v", err) } defer tearDown(ctx, t) // Drop the testing database. if err := admin.DropDatabase(ctx, &adminpb.DropDatabaseRequest{db}); err != nil { t.Fatalf("failed to drop testing database %v: %v", db, err) } // Now, send the query. iter := client.Single().Query(ctx, Statement{SQL: "SELECT SingerId FROM Singers"}) defer iter.Stop() if _, err := iter.Next(); err == nil { t.Errorf("client sends query to removed database successfully, want it to fail") } // Recreate database and table. op, err := admin.CreateDatabase(ctx, &adminpb.CreateDatabaseRequest{ Parent: fmt.Sprintf("projects/%v/instances/%v", testProjectID, testInstanceID), CreateStatement: "CREATE DATABASE " + dbName, ExtraStatements: []string{ `CREATE TABLE Singers ( SingerId INT64 NOT NULL, FirstName STRING(1024), LastName STRING(1024), SingerInfo BYTES(MAX) ) PRIMARY KEY (SingerId)`, }, }) if _, err := op.Wait(ctx); err != nil { t.Errorf("cannot recreate testing DB %v: %v", db, err) } // Now, send the query again. iter = client.Single().Query(ctx, Statement{SQL: "SELECT SingerId FROM Singers"}) defer iter.Stop() _, err = iter.Next() if err != nil && err != iterator.Done { t.Fatalf("failed to send query to database %v: %v", db, err) } } // Test encoding/decoding non-struct Cloud Spanner types. func TestBasicTypes(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() if err := prepare(ctx, t, singerDBStatements); err != nil { tearDown(ctx, t) t.Fatalf("cannot set up testing environment: %v", err) } defer tearDown(ctx, t) t1, _ := time.Parse(time.RFC3339Nano, "2016-11-15T15:04:05.999999999Z") // Boundaries t2, _ := time.Parse(time.RFC3339Nano, "0001-01-01T00:00:00.000000000Z") t3, _ := time.Parse(time.RFC3339Nano, "9999-12-31T23:59:59.999999999Z") d1, _ := civil.ParseDate("2016-11-15") // Boundaries d2, _ := civil.ParseDate("0001-01-01") d3, _ := civil.ParseDate("9999-12-31") tests := []struct { col string val interface{} want interface{} }{ {col: "String", val: ""}, {col: "String", val: "", want: NullString{"", true}}, {col: "String", val: "foo"}, {col: "String", val: "foo", want: NullString{"foo", true}}, {col: "String", val: NullString{"bar", true}, want: "bar"}, {col: "String", val: NullString{"bar", false}, want: NullString{"", false}}, {col: "StringArray", val: []string(nil), want: []NullString(nil)}, {col: "StringArray", val: []string{}, want: []NullString{}}, {col: "StringArray", val: []string{"foo", "bar"}, want: []NullString{{"foo", true}, {"bar", true}}}, {col: "StringArray", val: []NullString(nil)}, {col: "StringArray", val: []NullString{}}, {col: "StringArray", val: []NullString{{"foo", true}, {}}}, {col: "Bytes", val: []byte{}}, {col: "Bytes", val: []byte{1, 2, 3}}, {col: "Bytes", val: []byte(nil)}, {col: "BytesArray", val: [][]byte(nil)}, {col: "BytesArray", val: [][]byte{}}, {col: "BytesArray", val: [][]byte{[]byte{1}, []byte{2, 3}}}, {col: "Int64a", val: 0, want: int64(0)}, {col: "Int64a", val: -1, want: int64(-1)}, {col: "Int64a", val: 2, want: int64(2)}, {col: "Int64a", val: int64(3)}, {col: "Int64a", val: 4, want: NullInt64{4, true}}, {col: "Int64a", val: NullInt64{5, true}, want: int64(5)}, {col: "Int64a", val: NullInt64{6, true}, want: int64(6)}, {col: "Int64a", val: NullInt64{7, false}, want: NullInt64{0, false}}, {col: "Int64Array", val: []int(nil), want: []NullInt64(nil)}, {col: "Int64Array", val: []int{}, want: []NullInt64{}}, {col: "Int64Array", val: []int{1, 2}, want: []NullInt64{{1, true}, {2, true}}}, {col: "Int64Array", val: []int64(nil), want: []NullInt64(nil)}, {col: "Int64Array", val: []int64{}, want: []NullInt64{}}, {col: "Int64Array", val: []int64{1, 2}, want: []NullInt64{{1, true}, {2, true}}}, {col: "Int64Array", val: []NullInt64(nil)}, {col: "Int64Array", val: []NullInt64{}}, {col: "Int64Array", val: []NullInt64{{1, true}, {}}}, {col: "Bool", val: false}, {col: "Bool", val: true}, {col: "Bool", val: false, want: NullBool{false, true}}, {col: "Bool", val: true, want: NullBool{true, true}}, {col: "Bool", val: NullBool{true, true}}, {col: "Bool", val: NullBool{false, false}}, {col: "BoolArray", val: []bool(nil), want: []NullBool(nil)}, {col: "BoolArray", val: []bool{}, want: []NullBool{}}, {col: "BoolArray", val: []bool{true, false}, want: []NullBool{{true, true}, {false, true}}}, {col: "BoolArray", val: []NullBool(nil)}, {col: "BoolArray", val: []NullBool{}}, {col: "BoolArray", val: []NullBool{{false, true}, {true, true}, {}}}, {col: "Float64", val: 0.0}, {col: "Float64", val: 3.14}, {col: "Float64", val: math.NaN()}, {col: "Float64", val: math.Inf(1)}, {col: "Float64", val: math.Inf(-1)}, {col: "Float64", val: 2.78, want: NullFloat64{2.78, true}}, {col: "Float64", val: NullFloat64{2.71, true}, want: 2.71}, {col: "Float64", val: NullFloat64{1.41, true}, want: NullFloat64{1.41, true}}, {col: "Float64", val: NullFloat64{0, false}}, {col: "Float64Array", val: []float64(nil), want: []NullFloat64(nil)}, {col: "Float64Array", val: []float64{}, want: []NullFloat64{}}, {col: "Float64Array", val: []float64{2.72, 3.14, math.Inf(1)}, want: []NullFloat64{{2.72, true}, {3.14, true}, {math.Inf(1), true}}}, {col: "Float64Array", val: []NullFloat64(nil)}, {col: "Float64Array", val: []NullFloat64{}}, {col: "Float64Array", val: []NullFloat64{{2.72, true}, {math.Inf(1), true}, {}}}, {col: "Date", val: d1}, {col: "Date", val: d1, want: NullDate{d1, true}}, {col: "Date", val: NullDate{d1, true}}, {col: "Date", val: NullDate{d1, true}, want: d1}, {col: "Date", val: NullDate{civil.Date{}, false}}, {col: "DateArray", val: []civil.Date(nil), want: []NullDate(nil)}, {col: "DateArray", val: []civil.Date{}, want: []NullDate{}}, {col: "DateArray", val: []civil.Date{d1, d2, d3}, want: []NullDate{{d1, true}, {d2, true}, {d3, true}}}, {col: "Timestamp", val: t1}, {col: "Timestamp", val: t1, want: NullTime{t1, true}}, {col: "Timestamp", val: NullTime{t1, true}}, {col: "Timestamp", val: NullTime{t1, true}, want: t1}, {col: "Timestamp", val: NullTime{}}, {col: "TimestampArray", val: []time.Time(nil), want: []NullTime(nil)}, {col: "TimestampArray", val: []time.Time{}, want: []NullTime{}}, {col: "TimestampArray", val: []time.Time{t1, t2, t3}, want: []NullTime{{t1, true}, {t2, true}, {t3, true}}}, } // Write rows into table first. var muts []*Mutation for i, test := range tests { muts = append(muts, InsertOrUpdate("Types", []string{"RowID", test.col}, []interface{}{i, test.val})) } if _, err := client.Apply(ctx, muts, ApplyAtLeastOnce()); err != nil { t.Fatal(err) } for i, test := range tests { row, err := client.Single().ReadRow(ctx, "Types", []interface{}{i}, []string{test.col}) if err != nil { t.Fatalf("Unable to fetch row %v: %v", i, err) } // Create new instance of type of test.want. want := test.want if want == nil { want = test.val } gotp := reflect.New(reflect.TypeOf(want)) if err := row.Column(0, gotp.Interface()); err != nil { t.Errorf("%d: col:%v val:%#v, %v", i, test.col, test.val, err) continue } got := reflect.Indirect(gotp).Interface() // One of the test cases is checking NaN handling. Given // NaN!=NaN, we can't use reflect to test for it. if isNaN(got) && isNaN(want) { continue } // Check non-NaN cases. if !reflect.DeepEqual(got, want) { t.Errorf("%d: col:%v val:%#v, got %#v, want %#v", i, test.col, test.val, got, want) continue } } } // Test decoding Cloud Spanner STRUCT type. func TestStructTypes(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) defer cancel() if err := prepare(ctx, t, singerDBStatements); err != nil { tearDown(ctx, t) t.Fatalf("cannot set up testing environment: %v", err) } defer tearDown(ctx, t) tests := []struct { q Statement want func(r *Row) error }{ { q: Statement{SQL: `SELECT ARRAY(SELECT STRUCT(1, 2))`}, want: func(r *Row) error { // Test STRUCT ARRAY decoding to []NullRow. var rows []NullRow if err := r.Column(0, &rows); err != nil { return err } if len(rows) != 1 { return fmt.Errorf("len(rows) = %d; want 1", len(rows)) } if !rows[0].Valid { return fmt.Errorf("rows[0] is NULL") } var i, j int64 if err := rows[0].Row.Columns(&i, &j); err != nil { return err } if i != 1 || j != 2 { return fmt.Errorf("got (%d,%d), want (1,2)", i, j) } return nil }, }, { q: Statement{SQL: `SELECT ARRAY(SELECT STRUCT(1 as foo, 2 as bar)) as col1`}, want: func(r *Row) error { // Test Row.ToStruct. s := struct { Col1 []*struct { Foo int64 `spanner:"foo"` Bar int64 `spanner:"bar"` } `spanner:"col1"` }{} if err := r.ToStruct(&s); err != nil { return err } want := struct { Col1 []*struct { Foo int64 `spanner:"foo"` Bar int64 `spanner:"bar"` } `spanner:"col1"` }{ Col1: []*struct { Foo int64 `spanner:"foo"` Bar int64 `spanner:"bar"` }{ { Foo: 1, Bar: 2, }, }, } if !reflect.DeepEqual(want, s) { return fmt.Errorf("unexpected decoding result: %v, want %v", s, want) } return nil }, }, } for i, test := range tests { iter := client.Single().Query(ctx, test.q) defer iter.Stop() row, err := iter.Next() if err != nil { t.Errorf("%d: %v", i, err) continue } if err := test.want(row); err != nil { t.Errorf("%d: %v", i, err) continue } } } // Test queries of the form "SELECT expr". func TestQueryExpressions(t *testing.T) { ctx := context.Background() if err := prepare(ctx, t, nil); err != nil { tearDown(ctx, t) t.Fatalf("cannot set up testing environment: %v", err) } defer tearDown(ctx, t) newRow := func(vals []interface{}) *Row { row, err := NewRow(make([]string, len(vals)), vals) if err != nil { t.Fatal(err) } return row } tests := []struct { expr string want interface{} }{ {"1", int64(1)}, {"[1, 2, 3]", []NullInt64{{1, true}, {2, true}, {3, true}}}, {"[1, NULL, 3]", []NullInt64{{1, true}, {0, false}, {3, true}}}, {"IEEE_DIVIDE(1, 0)", math.Inf(1)}, {"IEEE_DIVIDE(-1, 0)", math.Inf(-1)}, {"IEEE_DIVIDE(0, 0)", math.NaN()}, // TODO(jba): add IEEE_DIVIDE(0, 0) to the following array when we have a better equality predicate. {"[IEEE_DIVIDE(1, 0), IEEE_DIVIDE(-1, 0)]", []NullFloat64{{math.Inf(1), true}, {math.Inf(-1), true}}}, {"ARRAY(SELECT AS STRUCT * FROM (SELECT 'a', 1) WHERE 0 = 1)", []NullRow{}}, {"ARRAY(SELECT STRUCT(1, 2))", []NullRow{{Row: *newRow([]interface{}{1, 2}), Valid: true}}}, } for _, test := range tests { iter := client.Single().Query(ctx, Statement{SQL: "SELECT " + test.expr}) defer iter.Stop() row, err := iter.Next() if err != nil { t.Errorf("%q: %v", test.expr, err) continue } // Create new instance of type of test.want. gotp := reflect.New(reflect.TypeOf(test.want)) if err := row.Column(0, gotp.Interface()); err != nil { t.Errorf("%q: Column returned error %v", test.expr, err) continue } got := reflect.Indirect(gotp).Interface() // TODO(jba): remove isNaN special case when we have a better equality predicate. if isNaN(got) && isNaN(test.want) { continue } if !reflect.DeepEqual(got, test.want) { t.Errorf("%q\n got %#v\nwant %#v", test.expr, got, test.want) } } } func isNaN(x interface{}) bool { f, ok := x.(float64) if !ok { return false } return math.IsNaN(f) } func TestInvalidDatabase(t *testing.T) { if testing.Short() { t.Skip("Integration tests skipped in short mode") } if testProjectID == "" { t.Skip("Integration tests skipped: GCLOUD_TESTS_GOLANG_PROJECT_ID is missing") } ctx := context.Background() ts := testutil.TokenSource(ctx, Scope) if ts == nil { t.Skip("Integration test skipped: cannot get service account credential from environment variable %v", "GCLOUD_TESTS_GOLANG_KEY") } db := fmt.Sprintf("projects/%v/instances/%v/databases/invalid", testProjectID, testInstanceID) c, err := NewClient(ctx, db, option.WithTokenSource(ts)) // Client creation should succeed even if the database is invalid. if err != nil { t.Fatal(err) } _, err = c.Single().ReadRow(ctx, "TestTable", Key{1}, []string{"col1"}) if msg, ok := matchError(err, codes.NotFound, ""); !ok { t.Fatal(msg) } } func TestReadErrors(t *testing.T) { ctx := context.Background() if err := prepare(ctx, t, readDBStatements); err != nil { tearDown(ctx, t) t.Fatalf("cannot set up testing environment: %v", err) } defer tearDown(ctx, t) // Read over invalid table fails _, err := client.Single().ReadRow(ctx, "badTable", Key{1}, []string{"StringValue"}) if msg, ok := matchError(err, codes.NotFound, "badTable"); !ok { t.Error(msg) } // Read over invalid column fails _, err = client.Single().ReadRow(ctx, "TestTable", Key{1}, []string{"badcol"}) if msg, ok := matchError(err, codes.NotFound, "badcol"); !ok { t.Error(msg) } // Invalid query fails iter := client.Single().Query(ctx, Statement{SQL: "SELECT Apples AND Oranges"}) defer iter.Stop() _, err = iter.Next() if msg, ok := matchError(err, codes.InvalidArgument, "unrecognized name"); !ok { t.Error(msg) } // Read should fail on cancellation. cctx, cancel := context.WithCancel(ctx) cancel() _, err = client.Single().ReadRow(cctx, "TestTable", Key{1}, []string{"StringValue"}) if msg, ok := matchError(err, codes.Canceled, ""); !ok { t.Error(msg) } // Read should fail if deadline exceeded. dctx, _ := context.WithTimeout(ctx, time.Nanosecond) <-dctx.Done() _, err = client.Single().ReadRow(dctx, "TestTable", Key{1}, []string{"StringValue"}) if msg, ok := matchError(err, codes.DeadlineExceeded, ""); !ok { t.Error(msg) } } func matchError(got error, wantCode codes.Code, wantMsgPart string) (string, bool) { if ErrCode(got) != wantCode || !strings.Contains(strings.ToLower(ErrDesc(got)), strings.ToLower(wantMsgPart)) { return fmt.Sprintf("got error <%v>\n"+`want `, got, wantCode, wantMsgPart), false } return "", true } func rowToValues(r *Row) ([]interface{}, error) { var x int64 var y, z string if err := r.Column(0, &x); err != nil { return nil, err } if err := r.Column(1, &y); err != nil { return nil, err } if err := r.Column(2, &z); err != nil { return nil, err } return []interface{}{x, y, z}, nil } func readAll(iter *RowIterator) ([][]interface{}, error) { defer iter.Stop() var vals [][]interface{} for { row, err := iter.Next() if err == iterator.Done { return vals, nil } if err != nil { return nil, err } v, err := rowToValues(row) if err != nil { return nil, err } vals = append(vals, v) } } func readAllTestTable(iter *RowIterator) ([]testTableRow, error) { defer iter.Stop() var vals []testTableRow for { row, err := iter.Next() if err == iterator.Done { return vals, nil } if err != nil { return nil, err } var ttr testTableRow if err := row.ToStruct(&ttr); err != nil { return nil, err } vals = append(vals, ttr) } } golang-google-cloud-0.9.0/spanner/statement.go000066400000000000000000000047751312234511600213610ustar00rootroot00000000000000/* Copyright 2017 Google Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package spanner import ( "fmt" proto3 "github.com/golang/protobuf/ptypes/struct" sppb "google.golang.org/genproto/googleapis/spanner/v1" "google.golang.org/grpc/codes" ) // A Statement is a SQL query with named parameters. // // A parameter placeholder consists of '@' followed by the parameter name. // Parameter names consist of any combination of letters, numbers, and // underscores. Names may be entirely numeric (e.g., "WHERE m.id = @5"). // Parameters may appear anywhere that a literal value is expected. The same // parameter name may be used more than once. It is an error to execute a // statement with unbound parameters. On the other hand, it is allowable to // bind parameter names that are not used. // // See the documentation of the Row type for how Go types are mapped to Cloud // Spanner types. type Statement struct { SQL string Params map[string]interface{} } // NewStatement returns a Statement with the given SQL and an empty Params map. func NewStatement(sql string) Statement { return Statement{SQL: sql, Params: map[string]interface{}{}} } // errBindParam returns error for not being able to bind parameter to query request. func errBindParam(k string, v interface{}, err error) error { if err == nil { return nil } se, ok := toSpannerError(err).(*Error) if !ok { return spannerErrorf(codes.InvalidArgument, "failed to bind query parameter(name: %q, value: %q), error = <%v>", k, v, err) } se.decorate(fmt.Sprintf("failed to bind query parameter(name: %q, value: %q)", k, v)) return se } // bindParams binds parameters in a Statement to a sppb.ExecuteSqlRequest. func (s *Statement) bindParams(r *sppb.ExecuteSqlRequest) error { r.Params = &proto3.Struct{ Fields: map[string]*proto3.Value{}, } r.ParamTypes = map[string]*sppb.Type{} for k, v := range s.Params { val, t, err := encodeValue(v) if err != nil { return errBindParam(k, v, err) } r.Params.Fields[k] = val r.ParamTypes[k] = t } return nil } golang-google-cloud-0.9.0/spanner/statement_test.go000066400000000000000000000036041312234511600224060ustar00rootroot00000000000000/* Copyright 2017 Google Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package spanner import ( "reflect" "testing" proto3 "github.com/golang/protobuf/ptypes/struct" sppb "google.golang.org/genproto/googleapis/spanner/v1" ) // Test Statement.bindParams. func TestBindParams(t *testing.T) { // Verify Statement.bindParams generates correct values and types. want := sppb.ExecuteSqlRequest{ Params: &proto3.Struct{ Fields: map[string]*proto3.Value{ "var1": stringProto("abc"), "var2": intProto(1), }, }, ParamTypes: map[string]*sppb.Type{ "var1": stringType(), "var2": intType(), }, } st := Statement{ SQL: "SELECT id from t_foo WHERE col1 = @var1 AND col2 = @var2", Params: map[string]interface{}{"var1": "abc", "var2": int64(1)}, } got := sppb.ExecuteSqlRequest{} if err := st.bindParams(&got); err != nil || !reflect.DeepEqual(got, want) { t.Errorf("bind result: \n(%v, %v)\nwant\n(%v, %v)\n", got, err, want, nil) } // Verify type error reporting. st.Params["var2"] = struct{}{} wantErr := errBindParam("var2", struct{}{}, errEncoderUnsupportedType(struct{}{})) if err := st.bindParams(&got); !reflect.DeepEqual(err, wantErr) { t.Errorf("got unexpected error: %v, want: %v", err, wantErr) } } func TestNewStatement(t *testing.T) { s := NewStatement("query") if got, want := s.SQL, "query"; got != want { t.Errorf("got %q, want %q", got, want) } } golang-google-cloud-0.9.0/spanner/timestampbound.go000066400000000000000000000215231312234511600223760ustar00rootroot00000000000000/* Copyright 2017 Google Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package spanner import ( "fmt" "time" pbd "github.com/golang/protobuf/ptypes/duration" pbt "github.com/golang/protobuf/ptypes/timestamp" sppb "google.golang.org/genproto/googleapis/spanner/v1" ) // timestampBoundType specifies the timestamp bound mode. type timestampBoundType int const ( strong timestampBoundType = iota // strong reads exactStaleness // read with exact staleness maxStaleness // read with max staleness minReadTimestamp // read with min freshness readTimestamp // read data at exact timestamp ) // TimestampBound defines how Cloud Spanner will choose a timestamp for a single // read/query or read-only transaction. // // The types of timestamp bound are: // // - Strong (the default). // - Bounded staleness. // - Exact staleness. // // If the Cloud Spanner database to be read is geographically distributed, stale // read-only transactions can execute more quickly than strong or read-write // transactions, because they are able to execute far from the leader replica. // // Each type of timestamp bound is discussed in detail below. A TimestampBound // can be specified when creating transactions, see the documentation of // spanner.Client for an example. // // Strong reads // // Strong reads are guaranteed to see the effects of all transactions that have // committed before the start of the read. Furthermore, all rows yielded by a // single read are consistent with each other - if any part of the read // observes a transaction, all parts of the read see the transaction. // // Strong reads are not repeatable: two consecutive strong read-only // transactions might return inconsistent results if there are concurrent // writes. If consistency across reads is required, the reads should be // executed within a transaction or at an exact read timestamp. // // Use StrongRead() to create a bound of this type. // // Exact staleness // // These timestamp bounds execute reads at a user-specified timestamp. Reads at // a timestamp are guaranteed to see a consistent prefix of the global // transaction history: they observe modifications done by all transactions // with a commit timestamp less than or equal to the read timestamp, and // observe none of the modifications done by transactions with a larger commit // timestamp. They will block until all conflicting transactions that may be // assigned commit timestamps less than or equal to the read timestamp have // finished. // // The timestamp can either be expressed as an absolute Cloud Spanner commit // timestamp or a staleness relative to the current time. // // These modes do not require a "negotiation phase" to pick a timestamp. As a // result, they execute slightly faster than the equivalent boundedly stale // concurrency modes. On the other hand, boundedly stale reads usually return // fresher results. // // Use ReadTimestamp() and ExactStaleness() to create a bound of this type. // // Bounded staleness // // Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to // a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within // the staleness bound that allows execution of the reads at the closest // available replica without blocking. // // All rows yielded are consistent with each other -- if any part of the read // observes a transaction, all parts of the read see the transaction. Boundedly // stale reads are not repeatable: two stale reads, even if they use the same // staleness bound, can execute at different timestamps and thus return // inconsistent results. // // Boundedly stale reads execute in two phases: the first phase negotiates a // timestamp among all replicas needed to serve the read. In the second phase, // reads are executed at the negotiated timestamp. // // As a result of the two phase execution, bounded staleness reads are usually // a little slower than comparable exact staleness reads. However, they are // typically able to return fresher results, and are more likely to execute at // the closest replica. // // Because the timestamp negotiation requires up-front knowledge of which rows // will be read, it can only be used with single-use reads and single-use // read-only transactions. // // Use MinReadTimestamp() and MaxStaleness() to create a bound of this type. // // Old read timestamps and garbage collection // // Cloud Spanner continuously garbage collects deleted and overwritten data in the // background to reclaim storage space. This process is known as "version // GC". By default, version GC reclaims versions after they are four hours // old. Because of this, Cloud Spanner cannot perform reads at read timestamps more // than four hours in the past. This restriction also applies to in-progress // reads and/or SQL queries whose timestamp become too old while // executing. Reads and SQL queries with too-old read timestamps fail with the // error ErrorCode.FAILED_PRECONDITION. type TimestampBound struct { mode timestampBoundType d time.Duration t time.Time } // StrongRead returns a TimestampBound that will perform reads and queries at a // timestamp where all previously committed transactions are visible. func StrongRead() TimestampBound { return TimestampBound{mode: strong} } // ExactStaleness returns a TimestampBound that will perform reads and queries // at an exact staleness. func ExactStaleness(d time.Duration) TimestampBound { return TimestampBound{ mode: exactStaleness, d: d, } } // MaxStaleness returns a TimestampBound that will perform reads and queries at // a time chosen to be at most "d" stale. func MaxStaleness(d time.Duration) TimestampBound { return TimestampBound{ mode: maxStaleness, d: d, } } // MinReadTimestamp returns a TimestampBound that bound that will perform reads // and queries at a time chosen to be at least "t". func MinReadTimestamp(t time.Time) TimestampBound { return TimestampBound{ mode: minReadTimestamp, t: t, } } // ReadTimestamp returns a TimestampBound that will peform reads and queries at // the given time. func ReadTimestamp(t time.Time) TimestampBound { return TimestampBound{ mode: readTimestamp, t: t, } } // String implements fmt.Stringer. func (tb TimestampBound) String() string { switch tb.mode { case strong: return fmt.Sprintf("(strong)") case exactStaleness: return fmt.Sprintf("(exactStaleness: %s)", tb.d) case maxStaleness: return fmt.Sprintf("(maxStaleness: %s)", tb.d) case minReadTimestamp: return fmt.Sprintf("(minReadTimestamp: %s)", tb.t) case readTimestamp: return fmt.Sprintf("(readTimestamp: %s)", tb.t) default: return fmt.Sprintf("{mode=%v, d=%v, t=%v}", tb.mode, tb.d, tb.t) } } // durationProto takes a time.Duration and converts it into pdb.Duration for // calling gRPC APIs. func durationProto(d time.Duration) *pbd.Duration { n := d.Nanoseconds() return &pbd.Duration{ Seconds: n / int64(time.Second), Nanos: int32(n % int64(time.Second)), } } // timestampProto takes a time.Time and converts it into pbt.Timestamp for calling // gRPC APIs. func timestampProto(t time.Time) *pbt.Timestamp { return &pbt.Timestamp{ Seconds: t.Unix(), Nanos: int32(t.Nanosecond()), } } // buildTransactionOptionsReadOnly converts a spanner.TimestampBound into a sppb.TransactionOptions_ReadOnly // transaction option, which is then used in transactional reads. func buildTransactionOptionsReadOnly(tb TimestampBound, returnReadTimestamp bool) *sppb.TransactionOptions_ReadOnly { pb := &sppb.TransactionOptions_ReadOnly{ ReturnReadTimestamp: returnReadTimestamp, } switch tb.mode { case strong: pb.TimestampBound = &sppb.TransactionOptions_ReadOnly_Strong{ Strong: true, } case exactStaleness: pb.TimestampBound = &sppb.TransactionOptions_ReadOnly_ExactStaleness{ ExactStaleness: durationProto(tb.d), } case maxStaleness: pb.TimestampBound = &sppb.TransactionOptions_ReadOnly_MaxStaleness{ MaxStaleness: durationProto(tb.d), } case minReadTimestamp: pb.TimestampBound = &sppb.TransactionOptions_ReadOnly_MinReadTimestamp{ MinReadTimestamp: timestampProto(tb.t), } case readTimestamp: pb.TimestampBound = &sppb.TransactionOptions_ReadOnly_ReadTimestamp{ ReadTimestamp: timestampProto(tb.t), } default: panic(fmt.Sprintf("buildTransactionOptionsReadOnly(%v,%v)", tb, returnReadTimestamp)) } return pb } golang-google-cloud-0.9.0/spanner/timestampbound_test.go000066400000000000000000000137601312234511600234410ustar00rootroot00000000000000/* Copyright 2017 Google Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package spanner import ( "reflect" "testing" "time" pbd "github.com/golang/protobuf/ptypes/duration" pbt "github.com/golang/protobuf/ptypes/timestamp" sppb "google.golang.org/genproto/googleapis/spanner/v1" ) // Test generating TimestampBound for strong reads. func TestStrong(t *testing.T) { got := StrongRead() want := TimestampBound{mode: strong} if !reflect.DeepEqual(got, want) { t.Errorf("Strong() = %v; want %v", got, want) } } // Test generating TimestampBound for reads with exact staleness. func TestExactStaleness(t *testing.T) { got := ExactStaleness(10 * time.Second) want := TimestampBound{mode: exactStaleness, d: 10 * time.Second} if !reflect.DeepEqual(got, want) { t.Errorf("ExactStaleness(10*time.Second) = %v; want %v", got, want) } } // Test generating TimestampBound for reads with max staleness. func TestMaxStaleness(t *testing.T) { got := MaxStaleness(10 * time.Second) want := TimestampBound{mode: maxStaleness, d: 10 * time.Second} if !reflect.DeepEqual(got, want) { t.Errorf("MaxStaleness(10*time.Second) = %v; want %v", got, want) } } // Test generating TimestampBound for reads with minimum freshness requirement. func TestMinReadTimestamp(t *testing.T) { ts := time.Now() got := MinReadTimestamp(ts) want := TimestampBound{mode: minReadTimestamp, t: ts} if !reflect.DeepEqual(got, want) { t.Errorf("MinReadTimestamp(%v) = %v; want %v", ts, got, want) } } // Test generating TimestampBound for reads requesting data at a exact timestamp. func TestReadTimestamp(t *testing.T) { ts := time.Now() got := ReadTimestamp(ts) want := TimestampBound{mode: readTimestamp, t: ts} if !reflect.DeepEqual(got, want) { t.Errorf("ReadTimestamp(%v) = %v; want %v", ts, got, want) } } // Test TimestampBound.String. func TestTimestampBoundString(t *testing.T) { ts := time.Unix(1136239445, 0).UTC() var tests = []struct { tb TimestampBound want string }{ { tb: TimestampBound{mode: strong}, want: "(strong)", }, { tb: TimestampBound{mode: exactStaleness, d: 10 * time.Second}, want: "(exactStaleness: 10s)", }, { tb: TimestampBound{mode: maxStaleness, d: 10 * time.Second}, want: "(maxStaleness: 10s)", }, { tb: TimestampBound{mode: minReadTimestamp, t: ts}, want: "(minReadTimestamp: 2006-01-02 22:04:05 +0000 UTC)", }, { tb: TimestampBound{mode: readTimestamp, t: ts}, want: "(readTimestamp: 2006-01-02 22:04:05 +0000 UTC)", }, } for _, test := range tests { got := test.tb.String() if got != test.want { t.Errorf("%#v.String():\ngot %q\nwant %q", test.tb, got, test.want) } } } // Test time.Duration to pdb.Duration conversion. func TestDurationProto(t *testing.T) { var tests = []struct { d time.Duration want pbd.Duration }{ {time.Duration(0), pbd.Duration{Seconds: 0, Nanos: 0}}, {time.Second, pbd.Duration{Seconds: 1, Nanos: 0}}, {time.Millisecond, pbd.Duration{Seconds: 0, Nanos: 1e6}}, {15 * time.Nanosecond, pbd.Duration{Seconds: 0, Nanos: 15}}, {42 * time.Hour, pbd.Duration{Seconds: 151200}}, {-(1*time.Hour + 4*time.Millisecond), pbd.Duration{Seconds: -3600, Nanos: -4e6}}, } for _, test := range tests { got := durationProto(test.d) if !reflect.DeepEqual(got, &test.want) { t.Errorf("durationProto(%v) = %v; want %v", test.d, got, test.want) } } } // Test time.Time to pbt.Timestamp conversion. func TestTimeProto(t *testing.T) { var tests = []struct { t time.Time want pbt.Timestamp }{ {time.Unix(0, 0), pbt.Timestamp{}}, {time.Unix(1136239445, 12345), pbt.Timestamp{Seconds: 1136239445, Nanos: 12345}}, {time.Unix(-1000, 12345), pbt.Timestamp{Seconds: -1000, Nanos: 12345}}, } for _, test := range tests { got := timestampProto(test.t) if !reflect.DeepEqual(got, &test.want) { t.Errorf("timestampProto(%v) = %v; want %v", test.t, got, test.want) } } } // Test readonly transaction option builder. func TestBuildTransactionOptionsReadOnly(t *testing.T) { ts := time.Unix(1136239445, 12345) var tests = []struct { tb TimestampBound ts bool want sppb.TransactionOptions_ReadOnly }{ { StrongRead(), false, sppb.TransactionOptions_ReadOnly{ TimestampBound: &sppb.TransactionOptions_ReadOnly_Strong{ Strong: true}, ReturnReadTimestamp: false, }, }, { ExactStaleness(10 * time.Second), true, sppb.TransactionOptions_ReadOnly{ TimestampBound: &sppb.TransactionOptions_ReadOnly_ExactStaleness{ ExactStaleness: &pbd.Duration{Seconds: 10}}, ReturnReadTimestamp: true, }, }, { MaxStaleness(10 * time.Second), true, sppb.TransactionOptions_ReadOnly{ TimestampBound: &sppb.TransactionOptions_ReadOnly_MaxStaleness{ MaxStaleness: &pbd.Duration{Seconds: 10}}, ReturnReadTimestamp: true, }, }, { MinReadTimestamp(ts), true, sppb.TransactionOptions_ReadOnly{ TimestampBound: &sppb.TransactionOptions_ReadOnly_MinReadTimestamp{ MinReadTimestamp: &pbt.Timestamp{Seconds: 1136239445, Nanos: 12345}}, ReturnReadTimestamp: true, }, }, { ReadTimestamp(ts), true, sppb.TransactionOptions_ReadOnly{ TimestampBound: &sppb.TransactionOptions_ReadOnly_ReadTimestamp{ ReadTimestamp: &pbt.Timestamp{Seconds: 1136239445, Nanos: 12345}}, ReturnReadTimestamp: true, }, }, } for _, test := range tests { got := buildTransactionOptionsReadOnly(test.tb, test.ts) if !reflect.DeepEqual(got, &test.want) { t.Errorf("buildTransactionOptionsReadOnly(%v,%v) = %v; want %v", test.tb, test.ts, got, test.want) } } } golang-google-cloud-0.9.0/spanner/transaction.go000066400000000000000000000651501312234511600216740ustar00rootroot00000000000000/* Copyright 2017 Google Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package spanner import ( "sync" "time" "golang.org/x/net/context" "google.golang.org/api/iterator" sppb "google.golang.org/genproto/googleapis/spanner/v1" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/metadata" ) // transactionID stores a transaction ID which uniquely identifies a transaction in Cloud Spanner. type transactionID []byte // txReadEnv manages a read-transaction environment consisting of a session handle and a transaction selector. type txReadEnv interface { // acquire returns a read-transaction environment that can be used to perform a transactional read. acquire(ctx context.Context) (*sessionHandle, *sppb.TransactionSelector, error) // sets the transaction's read timestamp setTimestamp(time.Time) // release should be called at the end of every transactional read to deal with session recycling. release(error) } // txReadOnly contains methods for doing transactional reads. type txReadOnly struct { // read-transaction environment for performing transactional read operations. txReadEnv } // errSessionClosed returns error for using a recycled/destroyed session func errSessionClosed(sh *sessionHandle) error { return spannerErrorf(codes.FailedPrecondition, "session is already recycled / destroyed: session_id = %q, rpc_client = %v", sh.getID(), sh.getClient()) } // Read returns a RowIterator for reading multiple rows from the database. func (t *txReadOnly) Read(ctx context.Context, table string, keys KeySet, columns []string) *RowIterator { // ReadUsingIndex will use primary index if an empty index name is provided. return t.ReadUsingIndex(ctx, table, "", keys, columns) } // ReadUsingIndex returns a RowIterator for reading multiple rows from the database // using an index. // // Currently, this function can only read columns that are part of the index // key, part of the primary key, or stored in the index due to a STORING clause // in the index definition. func (t *txReadOnly) ReadUsingIndex(ctx context.Context, table, index string, keys KeySet, columns []string) *RowIterator { var ( sh *sessionHandle ts *sppb.TransactionSelector err error ) kset, err := keys.keySetProto() if err != nil { return &RowIterator{err: err} } if sh, ts, err = t.acquire(ctx); err != nil { return &RowIterator{err: err} } // Cloud Spanner will return "Session not found" on bad sessions. sid, client := sh.getID(), sh.getClient() if sid == "" || client == nil { // Might happen if transaction is closed in the middle of a API call. return &RowIterator{err: errSessionClosed(sh)} } return stream( contextWithOutgoingMetadata(ctx, sh.getMetadata()), func(ctx context.Context, resumeToken []byte) (streamingReceiver, error) { return client.StreamingRead(ctx, &sppb.ReadRequest{ Session: sid, Transaction: ts, Table: table, Index: index, Columns: columns, KeySet: kset, ResumeToken: resumeToken, }) }, t.setTimestamp, t.release, ) } // errRowNotFound returns error for not being able to read the row identified by key. func errRowNotFound(table string, key Key) error { return spannerErrorf(codes.NotFound, "row not found(Table: %v, PrimaryKey: %v)", table, key) } // ReadRow reads a single row from the database. // // If no row is present with the given key, then ReadRow returns an error where // spanner.ErrCode(err) is codes.NotFound. func (t *txReadOnly) ReadRow(ctx context.Context, table string, key Key, columns []string) (*Row, error) { iter := t.Read(ctx, table, key, columns) defer iter.Stop() row, err := iter.Next() switch err { case iterator.Done: return nil, errRowNotFound(table, key) case nil: return row, nil default: return nil, err } } // Query executes a query against the database. It returns a RowIterator // for retrieving the resulting rows. func (t *txReadOnly) Query(ctx context.Context, statement Statement) *RowIterator { var ( sh *sessionHandle ts *sppb.TransactionSelector err error ) if sh, ts, err = t.acquire(ctx); err != nil { return &RowIterator{err: err} } // Cloud Spanner will return "Session not found" on bad sessions. sid, client := sh.getID(), sh.getClient() if sid == "" || client == nil { // Might happen if transaction is closed in the middle of a API call. return &RowIterator{err: errSessionClosed(sh)} } req := &sppb.ExecuteSqlRequest{ Session: sid, Transaction: ts, Sql: statement.SQL, } if err := statement.bindParams(req); err != nil { return &RowIterator{err: err} } return stream( contextWithOutgoingMetadata(ctx, sh.getMetadata()), func(ctx context.Context, resumeToken []byte) (streamingReceiver, error) { req.ResumeToken = resumeToken return client.ExecuteStreamingSql(ctx, req) }, t.setTimestamp, t.release) } // txState is the status of a transaction. type txState int const ( // transaction is new, waiting to be initialized. txNew txState = iota // transaction is being initialized. txInit // transaction is active and can perform read/write. txActive // transaction is closed, cannot be used anymore. txClosed ) // errRtsUnavailable returns error for read transaction's read timestamp being unavailable. func errRtsUnavailable() error { return spannerErrorf(codes.Internal, "read timestamp is unavailable") } // errTxNotInitialized returns error for using an uninitialized transaction. func errTxNotInitialized() error { return spannerErrorf(codes.InvalidArgument, "cannot use a uninitialized transaction") } // errTxClosed returns error for using a closed transaction. func errTxClosed() error { return spannerErrorf(codes.InvalidArgument, "cannot use a closed transaction") } // errUnexpectedTxState returns error for transaction enters an unexpected state. func errUnexpectedTxState(ts txState) error { return spannerErrorf(codes.FailedPrecondition, "unexpected transaction state: %v", ts) } // ReadOnlyTransaction provides a snapshot transaction with guaranteed // consistency across reads, but does not allow writes. Read-only // transactions can be configured to read at timestamps in the past. // // Read-only transactions do not take locks. Instead, they work by choosing a // Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do // not acquire locks, they do not block concurrent read-write transactions. // // Unlike locking read-write transactions, read-only transactions never // abort. They can fail if the chosen read timestamp is garbage collected; // however, the default garbage collection policy is generous enough that most // applications do not need to worry about this in practice. See the // documentation of TimestampBound for more details. // // A ReadOnlyTransaction consumes resources on the server until Close() is // called. type ReadOnlyTransaction struct { // txReadOnly contains methods for performing transactional reads. txReadOnly // singleUse indicates that the transaction can be used for only one read. singleUse bool // sp is the session pool for allocating a session to execute the read-only transaction. It is set only once during initialization of the ReadOnlyTransaction. sp *sessionPool // mu protects concurrent access to the internal states of ReadOnlyTransaction. mu sync.Mutex // tx is the transaction ID in Cloud Spanner that uniquely identifies the ReadOnlyTransaction. tx transactionID // txReadyOrClosed is for broadcasting that transaction ID has been returned by Cloud Spanner or that transaction is closed. txReadyOrClosed chan struct{} // state is the current transaction status of the ReadOnly transaction. state txState // sh is the sessionHandle allocated from sp. sh *sessionHandle // rts is the read timestamp returned by transactional reads. rts time.Time // tb is the read staleness bound specification for transactional reads. tb TimestampBound } // errTxInitTimeout returns error for timeout in waiting for initialization of the transaction. func errTxInitTimeout() error { return spannerErrorf(codes.Canceled, "timeout/context canceled in waiting for transaction's initialization") } // getTimestampBound returns the read staleness bound specified for the ReadOnlyTransaction. func (t *ReadOnlyTransaction) getTimestampBound() TimestampBound { t.mu.Lock() defer t.mu.Unlock() return t.tb } // begin starts a snapshot read-only Transaction on Cloud Spanner. func (t *ReadOnlyTransaction) begin(ctx context.Context) error { var ( locked bool tx transactionID rts time.Time sh *sessionHandle err error ) defer func() { if !locked { t.mu.Lock() // Not necessary, just to make it clear that t.mu is being held when locked == true. locked = true } if t.state != txClosed { // Signal other initialization routines. close(t.txReadyOrClosed) t.txReadyOrClosed = make(chan struct{}) } t.mu.Unlock() if err != nil && sh != nil { // Got a valid session handle, but failed to initalize transaction on Cloud Spanner. if shouldDropSession(err) { sh.destroy() } // If sh.destroy was already executed, this becomes a noop. sh.recycle() } }() sh, err = t.sp.take(ctx) if err != nil { return err } err = runRetryable(contextWithOutgoingMetadata(ctx, sh.getMetadata()), func(ctx context.Context) error { res, e := sh.getClient().BeginTransaction(ctx, &sppb.BeginTransactionRequest{ Session: sh.getID(), Options: &sppb.TransactionOptions{ Mode: &sppb.TransactionOptions_ReadOnly_{ ReadOnly: buildTransactionOptionsReadOnly(t.getTimestampBound(), true), }, }, }) if e != nil { return e } tx = res.Id if res.ReadTimestamp != nil { rts = time.Unix(res.ReadTimestamp.Seconds, int64(res.ReadTimestamp.Nanos)) } return nil }) t.mu.Lock() locked = true // defer function will be executed with t.mu being held. if t.state == txClosed { // During the execution of t.begin(), t.Close() was invoked. return errSessionClosed(sh) } // If begin() fails, this allows other queries to take over the initialization. t.tx = nil if err == nil { t.tx = tx t.rts = rts t.sh = sh // State transite to txActive. t.state = txActive } return err } // acquire implements txReadEnv.acquire. func (t *ReadOnlyTransaction) acquire(ctx context.Context) (*sessionHandle, *sppb.TransactionSelector, error) { if err := checkNestedTxn(ctx); err != nil { return nil, nil, err } if t.singleUse { return t.acquireSingleUse(ctx) } return t.acquireMultiUse(ctx) } func (t *ReadOnlyTransaction) acquireSingleUse(ctx context.Context) (*sessionHandle, *sppb.TransactionSelector, error) { t.mu.Lock() defer t.mu.Unlock() switch t.state { case txClosed: // A closed single-use transaction can never be reused. return nil, nil, errTxClosed() case txNew: t.state = txClosed ts := &sppb.TransactionSelector{ Selector: &sppb.TransactionSelector_SingleUse{ SingleUse: &sppb.TransactionOptions{ Mode: &sppb.TransactionOptions_ReadOnly_{ ReadOnly: buildTransactionOptionsReadOnly(t.tb, true), }, }, }, } sh, err := t.sp.take(ctx) if err != nil { return nil, nil, err } // Install session handle into t, which can be used for readonly operations later. t.sh = sh return sh, ts, nil } us := t.state // SingleUse transaction should only be in either txNew state or txClosed state. return nil, nil, errUnexpectedTxState(us) } func (t *ReadOnlyTransaction) acquireMultiUse(ctx context.Context) (*sessionHandle, *sppb.TransactionSelector, error) { for { t.mu.Lock() switch t.state { case txClosed: t.mu.Unlock() return nil, nil, errTxClosed() case txNew: // State transit to txInit so that no further TimestampBound change is accepted. t.state = txInit t.mu.Unlock() continue case txInit: if t.tx != nil { // Wait for a transaction ID to become ready. txReadyOrClosed := t.txReadyOrClosed t.mu.Unlock() select { case <-txReadyOrClosed: // Need to check transaction state again. continue case <-ctx.Done(): // The waiting for initialization is timeout, return error directly. return nil, nil, errTxInitTimeout() } } // Take the ownership of initializing the transaction. t.tx = transactionID{} t.mu.Unlock() // Begin a read-only transaction. // TODO: consider adding a transaction option which allow queries to initiate transactions by themselves. Note that this option might not be // always good because the ID of the new transaction won't be ready till the query returns some data or completes. if err := t.begin(ctx); err != nil { return nil, nil, err } // If t.begin() succeeded, t.state should have been changed to txActive, so we can just continue here. continue case txActive: sh := t.sh ts := &sppb.TransactionSelector{ Selector: &sppb.TransactionSelector_Id{ Id: t.tx, }, } t.mu.Unlock() return sh, ts, nil } state := t.state t.mu.Unlock() return nil, nil, errUnexpectedTxState(state) } } func (t *ReadOnlyTransaction) setTimestamp(ts time.Time) { t.mu.Lock() defer t.mu.Unlock() if t.rts.IsZero() { t.rts = ts } } // release implements txReadEnv.release. func (t *ReadOnlyTransaction) release(err error) { t.mu.Lock() sh := t.sh t.mu.Unlock() if sh != nil { // sh could be nil if t.acquire() fails. if shouldDropSession(err) { sh.destroy() } if t.singleUse { // If session handle is already destroyed, this becomes a noop. sh.recycle() } } } // Close closes a ReadOnlyTransaction, the transaction cannot perform any reads after being closed. func (t *ReadOnlyTransaction) Close() { if t.singleUse { return } t.mu.Lock() if t.state != txClosed { t.state = txClosed close(t.txReadyOrClosed) } sh := t.sh t.mu.Unlock() if sh == nil { return } // If session handle is already destroyed, this becomes a noop. // If there are still active queries and if the recycled session is reused before they complete, Cloud Spanner will cancel them // on behalf of the new transaction on the session. if sh != nil { sh.recycle() } } // Timestamp returns the timestamp chosen to perform reads and // queries in this transaction. The value can only be read after some // read or query has either returned some data or completed without // returning any data. func (t *ReadOnlyTransaction) Timestamp() (time.Time, error) { t.mu.Lock() defer t.mu.Unlock() if t.rts.IsZero() { return t.rts, errRtsUnavailable() } return t.rts, nil } // WithTimestampBound specifies the TimestampBound to use for read or query. // This can only be used before the first read or query is invoked. Note: // bounded staleness is not available with general ReadOnlyTransactions; use a // single-use ReadOnlyTransaction instead. // // The returned value is the ReadOnlyTransaction so calls can be chained. func (t *ReadOnlyTransaction) WithTimestampBound(tb TimestampBound) *ReadOnlyTransaction { t.mu.Lock() defer t.mu.Unlock() if t.state == txNew { // Only allow to set TimestampBound before the first query. t.tb = tb } return t } // ReadWriteTransaction provides a locking read-write transaction. // // This type of transaction is the only way to write data into Cloud Spanner; // (*Client).Apply and (*Client).ApplyAtLeastOnce use transactions // internally. These transactions rely on pessimistic locking and, if // necessary, two-phase commit. Locking read-write transactions may abort, // requiring the application to retry. However, the interface exposed by // (*Client).ReadWriteTransaction eliminates the need for applications to write // retry loops explicitly. // // Locking transactions may be used to atomically read-modify-write data // anywhere in a database. This type of transaction is externally consistent. // // Clients should attempt to minimize the amount of time a transaction is // active. Faster transactions commit with higher probability and cause less // contention. Cloud Spanner attempts to keep read locks active as long as the // transaction continues to do reads. Long periods of inactivity at the client // may cause Cloud Spanner to release a transaction's locks and abort it. // // Reads performed within a transaction acquire locks on the data being // read. Writes can only be done at commit time, after all reads have been // completed. Conceptually, a read-write transaction consists of zero or more // reads or SQL queries followed by a commit. // // See (*Client).ReadWriteTransaction for an example. // // Semantics // // Cloud Spanner can commit the transaction if all read locks it acquired are still // valid at commit time, and it is able to acquire write locks for all // writes. Cloud Spanner can abort the transaction for any reason. If a commit // attempt returns ABORTED, Cloud Spanner guarantees that the transaction has not // modified any user data in Cloud Spanner. // // Unless the transaction commits, Cloud Spanner makes no guarantees about how long // the transaction's locks were held for. It is an error to use Cloud Spanner locks // for any sort of mutual exclusion other than between Cloud Spanner transactions // themselves. // // Aborted transactions // // Application code does not need to retry explicitly; RunInTransaction will // automatically retry a transaction if an attempt results in an abort. The // lock priority of a transaction increases after each prior aborted // transaction, meaning that the next attempt has a slightly better chance of // success than before. // // Under some circumstances (e.g., many transactions attempting to modify the // same row(s)), a transaction can abort many times in a short period before // successfully committing. Thus, it is not a good idea to cap the number of // retries a transaction can attempt; instead, it is better to limit the total // amount of wall time spent retrying. // // Idle transactions // // A transaction is considered idle if it has no outstanding reads or SQL // queries and has not started a read or SQL query within the last 10 // seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold // on to locks indefinitely. In that case, the commit will fail with error // ABORTED. // // If this behavior is undesirable, periodically executing a simple SQL query // in the transaction (e.g., SELECT 1) prevents the transaction from becoming // idle. type ReadWriteTransaction struct { // txReadOnly contains methods for performing transactional reads. txReadOnly // sh is the sessionHandle allocated from sp. It is set only once during the initialization of ReadWriteTransaction. sh *sessionHandle // tx is the transaction ID in Cloud Spanner that uniquely identifies the ReadWriteTransaction. // It is set only once in ReadWriteTransaction.begin() during the initialization of ReadWriteTransaction. tx transactionID // mu protects concurrent access to the internal states of ReadWriteTransaction. mu sync.Mutex // state is the current transaction status of the read-write transaction. state txState // wb is the set of buffered mutations waiting to be commited. wb []*Mutation } // BufferWrite adds a list of mutations to the set of updates that will be // applied when the transaction is committed. It does not actually apply the // write until the transaction is committed, so the operation does not // block. The effects of the write won't be visible to any reads (including // reads done in the same transaction) until the transaction commits. // // See the example for Client.ReadWriteTransaction. func (t *ReadWriteTransaction) BufferWrite(ms []*Mutation) error { t.mu.Lock() defer t.mu.Unlock() if t.state == txClosed { return errTxClosed() } if t.state != txActive { return errUnexpectedTxState(t.state) } t.wb = append(t.wb, ms...) return nil } // acquire implements txReadEnv.acquire. func (t *ReadWriteTransaction) acquire(ctx context.Context) (*sessionHandle, *sppb.TransactionSelector, error) { ts := &sppb.TransactionSelector{ Selector: &sppb.TransactionSelector_Id{ Id: t.tx, }, } t.mu.Lock() defer t.mu.Unlock() switch t.state { case txClosed: return nil, nil, errTxClosed() case txActive: return t.sh, ts, nil } return nil, nil, errUnexpectedTxState(t.state) } // release implements txReadEnv.release. func (t *ReadWriteTransaction) release(err error) { t.mu.Lock() sh := t.sh t.mu.Unlock() if sh != nil && shouldDropSession(err) { sh.destroy() } } func beginTransaction(ctx context.Context, sid string, client sppb.SpannerClient) (transactionID, error) { var tx transactionID err := runRetryable(ctx, func(ctx context.Context) error { res, e := client.BeginTransaction(ctx, &sppb.BeginTransactionRequest{ Session: sid, Options: &sppb.TransactionOptions{ Mode: &sppb.TransactionOptions_ReadWrite_{ ReadWrite: &sppb.TransactionOptions_ReadWrite{}, }, }, }) if e != nil { return e } tx = res.Id return nil }) if err != nil { return nil, err } return tx, nil } // begin starts a read-write transacton on Cloud Spanner, it is always called before any of the public APIs. func (t *ReadWriteTransaction) begin(ctx context.Context) error { if t.tx != nil { t.state = txActive return nil } tx, err := beginTransaction(contextWithOutgoingMetadata(ctx, t.sh.getMetadata()), t.sh.getID(), t.sh.getClient()) if err == nil { t.tx = tx t.state = txActive return nil } if shouldDropSession(err) { t.sh.destroy() } return err } // commit tries to commit a readwrite transaction to Cloud Spanner. It also returns the commit timestamp for the transactions. func (t *ReadWriteTransaction) commit(ctx context.Context) (time.Time, error) { var ts time.Time t.mu.Lock() t.state = txClosed // No futher operations after commit. mPb, err := mutationsProto(t.wb) t.mu.Unlock() if err != nil { return ts, err } // In case that sessionHandle was destroyed but transaction body fails to report it. sid, client := t.sh.getID(), t.sh.getClient() if sid == "" || client == nil { return ts, errSessionClosed(t.sh) } err = runRetryable(contextWithOutgoingMetadata(ctx, t.sh.getMetadata()), func(ctx context.Context) error { var trailer metadata.MD res, e := client.Commit(ctx, &sppb.CommitRequest{ Session: sid, Transaction: &sppb.CommitRequest_TransactionId{ TransactionId: t.tx, }, Mutations: mPb, }, grpc.Trailer(&trailer)) if e != nil { return toSpannerErrorWithMetadata(e, trailer) } if tstamp := res.GetCommitTimestamp(); tstamp != nil { ts = time.Unix(tstamp.Seconds, int64(tstamp.Nanos)) } return nil }) if shouldDropSession(err) { t.sh.destroy() } return ts, err } // rollback is called when a commit is aborted or the transaction body runs into error. func (t *ReadWriteTransaction) rollback(ctx context.Context) { t.mu.Lock() // Forbid further operations on rollbacked transaction. t.state = txClosed t.mu.Unlock() // In case that sessionHandle was destroyed but transaction body fails to report it. sid, client := t.sh.getID(), t.sh.getClient() if sid == "" || client == nil { return } err := runRetryable(contextWithOutgoingMetadata(ctx, t.sh.getMetadata()), func(ctx context.Context) error { _, e := client.Rollback(ctx, &sppb.RollbackRequest{ Session: sid, TransactionId: t.tx, }) return e }) if shouldDropSession(err) { t.sh.destroy() } return } // runInTransaction executes f under a read-write transaction context. func (t *ReadWriteTransaction) runInTransaction(ctx context.Context, f func(context.Context, *ReadWriteTransaction) error) (time.Time, error) { var ( ts time.Time err error ) if err = f(context.WithValue(ctx, transactionInProgressKey{}, 1), t); err == nil { // Try to commit if transaction body returns no error. ts, err = t.commit(ctx) } if err != nil { if isAbortErr(err) { // Retry the transaction using the same session on ABORT error. // Cloud Spanner will create the new transaction with the previous one's wound-wait priority. err = errRetry(err) return ts, err } // Not going to commit, according to API spec, should rollback the transaction. t.rollback(ctx) return ts, err } // err == nil, return commit timestamp. return ts, err } // writeOnlyTransaction provides the most efficient way of doing write-only transactions. It essentially does blind writes to Cloud Spanner. type writeOnlyTransaction struct { // sp is the session pool which writeOnlyTransaction uses to get Cloud Spanner sessions for blind writes. sp *sessionPool } // applyAtLeastOnce commits a list of mutations to Cloud Spanner for at least once, unless one of the following happends: // 1) Context is timeout. // 2) An unretryable error(e.g. database not found) occurs. // 3) There is a malformed Mutation object. func (t *writeOnlyTransaction) applyAtLeastOnce(ctx context.Context, ms ...*Mutation) (time.Time, error) { var ( ts time.Time sh *sessionHandle ) mPb, err := mutationsProto(ms) if err != nil { // Malformed mutation found, just return the error. return ts, err } err = runRetryable(ctx, func(ct context.Context) error { var e error var trailers metadata.MD if sh == nil || sh.getID() == "" || sh.getClient() == nil { // No usable session for doing the commit, take one from pool. sh, e = t.sp.take(ctx) if e != nil { // sessionPool.Take already retries for session creations/retrivals. return e } } res, e := sh.getClient().Commit(contextWithOutgoingMetadata(ctx, sh.getMetadata()), &sppb.CommitRequest{ Session: sh.getID(), Transaction: &sppb.CommitRequest_SingleUseTransaction{ SingleUseTransaction: &sppb.TransactionOptions{ Mode: &sppb.TransactionOptions_ReadWrite_{ ReadWrite: &sppb.TransactionOptions_ReadWrite{}, }, }, }, Mutations: mPb, }, grpc.Trailer(&trailers)) if e != nil { if isAbortErr(e) { // Mask ABORT error as retryable, because aborted transactions are allowed to be retried. return errRetry(toSpannerErrorWithMetadata(e, trailers)) } if shouldDropSession(e) { // Discard the bad session. sh.destroy() } return e } if tstamp := res.GetCommitTimestamp(); tstamp != nil { ts = time.Unix(tstamp.Seconds, int64(tstamp.Nanos)) } return nil }) if sh != nil { sh.recycle() } return ts, err } // isAbortedErr returns true if the error indicates that an gRPC call is aborted on the server side. func isAbortErr(err error) bool { if err == nil { return false } if ErrCode(err) == codes.Aborted { return true } return false } golang-google-cloud-0.9.0/spanner/value.go000066400000000000000000000757201312234511600204670ustar00rootroot00000000000000/* Copyright 2017 Google Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package spanner import ( "encoding/base64" "fmt" "math" "reflect" "strconv" "strings" "time" "cloud.google.com/go/civil" "cloud.google.com/go/internal/fields" proto "github.com/golang/protobuf/proto" proto3 "github.com/golang/protobuf/ptypes/struct" sppb "google.golang.org/genproto/googleapis/spanner/v1" "google.golang.org/grpc/codes" ) // NullInt64 represents a Cloud Spanner INT64 that may be NULL. type NullInt64 struct { Int64 int64 Valid bool // Valid is true if Int64 is not NULL. } // String implements Stringer.String for NullInt64 func (n NullInt64) String() string { if !n.Valid { return fmt.Sprintf("%v", "") } return fmt.Sprintf("%v", n.Int64) } // NullString represents a Cloud Spanner STRING that may be NULL. type NullString struct { StringVal string Valid bool // Valid is true if StringVal is not NULL. } // String implements Stringer.String for NullString func (n NullString) String() string { if !n.Valid { return fmt.Sprintf("%v", "") } return fmt.Sprintf("%q", n.StringVal) } // NullFloat64 represents a Cloud Spanner FLOAT64 that may be NULL. type NullFloat64 struct { Float64 float64 Valid bool // Valid is true if Float64 is not NULL. } // String implements Stringer.String for NullFloat64 func (n NullFloat64) String() string { if !n.Valid { return fmt.Sprintf("%v", "") } return fmt.Sprintf("%v", n.Float64) } // NullBool represents a Cloud Spanner BOOL that may be NULL. type NullBool struct { Bool bool Valid bool // Valid is true if Bool is not NULL. } // String implements Stringer.String for NullBool func (n NullBool) String() string { if !n.Valid { return fmt.Sprintf("%v", "") } return fmt.Sprintf("%v", n.Bool) } // NullTime represents a Cloud Spanner TIMESTAMP that may be null. type NullTime struct { Time time.Time Valid bool // Valid is true if Time is not NULL. } // String implements Stringer.String for NullTime func (n NullTime) String() string { if !n.Valid { return fmt.Sprintf("%s", "") } return fmt.Sprintf("%q", n.Time.Format(time.RFC3339Nano)) } // NullDate represents a Cloud Spanner DATE that may be null. type NullDate struct { Date civil.Date Valid bool // Valid is true if Date is not NULL. } // String implements Stringer.String for NullDate func (n NullDate) String() string { if !n.Valid { return fmt.Sprintf("%s", "") } return fmt.Sprintf("%q", n.Date) } // NullRow represents a Cloud Spanner STRUCT that may be NULL. // See also the document for Row. // Note that NullRow is not a valid Cloud Spanner column Type. type NullRow struct { Row Row Valid bool // Valid is true if Row is not NULL. } // GenericColumnValue represents the generic encoded value and type of the // column. See google.spanner.v1.ResultSet proto for details. This can be // useful for proxying query results when the result types are not known in // advance. type GenericColumnValue struct { Type *sppb.Type Value *proto3.Value } // Decode decodes a GenericColumnValue. The ptr argument should be a pointer // to a Go value that can accept v. func (v GenericColumnValue) Decode(ptr interface{}) error { return decodeValue(v.Value, v.Type, ptr) } // NewGenericColumnValue creates a GenericColumnValue from Go value that is // valid for Cloud Spanner. func newGenericColumnValue(v interface{}) (*GenericColumnValue, error) { value, typ, err := encodeValue(v) if err != nil { return nil, err } return &GenericColumnValue{Value: value, Type: typ}, nil } // errTypeMismatch returns error for destination not having a compatible type // with source Cloud Spanner type. func errTypeMismatch(srcType sppb.TypeCode, isArray bool, dst interface{}) error { usage := srcType.String() if isArray { usage = fmt.Sprintf("%v[%v]", sppb.TypeCode_ARRAY, srcType) } return spannerErrorf(codes.InvalidArgument, "type %T cannot be used for decoding %v", dst, usage) } // errNilSpannerType returns error for nil Cloud Spanner type in decoding. func errNilSpannerType() error { return spannerErrorf(codes.FailedPrecondition, "unexpected nil Cloud Spanner data type in decoding") } // errNilSrc returns error for decoding from nil proto value. func errNilSrc() error { return spannerErrorf(codes.FailedPrecondition, "unexpected nil Cloud Spanner value in decoding") } // errNilDst returns error for decoding into nil interface{}. func errNilDst(dst interface{}) error { return spannerErrorf(codes.InvalidArgument, "cannot decode into nil type %T", dst) } // errNilArrElemType returns error for input Cloud Spanner data type being a array but without a // non-nil array element type. func errNilArrElemType(t *sppb.Type) error { return spannerErrorf(codes.FailedPrecondition, "array type %v is with nil array element type", t) } // errDstNotForNull returns error for decoding a SQL NULL value into a destination which doesn't // support NULL values. func errDstNotForNull(dst interface{}) error { return spannerErrorf(codes.InvalidArgument, "destination %T cannot support NULL SQL values", dst) } // errBadEncoding returns error for decoding wrongly encoded types. func errBadEncoding(v *proto3.Value, err error) error { return spannerErrorf(codes.FailedPrecondition, "%v wasn't correctly encoded: <%v>", v, err) } func parseNullTime(v *proto3.Value, p *NullTime, code sppb.TypeCode, isNull bool) error { if p == nil { return errNilDst(p) } if code != sppb.TypeCode_TIMESTAMP { return errTypeMismatch(code, false, p) } if isNull { *p = NullTime{} return nil } x, err := getStringValue(v) if err != nil { return err } y, err := time.Parse(time.RFC3339Nano, x) if err != nil { return errBadEncoding(v, err) } p.Valid = true p.Time = y return nil } // decodeValue decodes a protobuf Value into a pointer to a Go value, as // specified by sppb.Type. func decodeValue(v *proto3.Value, t *sppb.Type, ptr interface{}) error { if v == nil { return errNilSrc() } if t == nil { return errNilSpannerType() } code := t.Code acode := sppb.TypeCode_TYPE_CODE_UNSPECIFIED if code == sppb.TypeCode_ARRAY { if t.ArrayElementType == nil { return errNilArrElemType(t) } acode = t.ArrayElementType.Code } typeErr := errTypeMismatch(code, false, ptr) if code == sppb.TypeCode_ARRAY { typeErr = errTypeMismatch(acode, true, ptr) } nullErr := errDstNotForNull(ptr) _, isNull := v.Kind.(*proto3.Value_NullValue) // Do the decoding based on the type of ptr. switch p := ptr.(type) { case nil: return errNilDst(nil) case *string: if p == nil { return errNilDst(p) } if code != sppb.TypeCode_STRING { return typeErr } if isNull { return nullErr } x, err := getStringValue(v) if err != nil { return err } *p = x case *NullString: if p == nil { return errNilDst(p) } if code != sppb.TypeCode_STRING { return typeErr } if isNull { *p = NullString{} break } x, err := getStringValue(v) if err != nil { return err } p.Valid = true p.StringVal = x case *[]NullString: if p == nil { return errNilDst(p) } if acode != sppb.TypeCode_STRING { return typeErr } if isNull { *p = nil break } x, err := getListValue(v) if err != nil { return err } y, err := decodeStringArray(x) if err != nil { return err } *p = y case *[]byte: if p == nil { return errNilDst(p) } if code != sppb.TypeCode_BYTES { return typeErr } if isNull { *p = nil break } x, err := getStringValue(v) if err != nil { return err } y, err := base64.StdEncoding.DecodeString(x) if err != nil { return errBadEncoding(v, err) } *p = y case *[][]byte: if p == nil { return errNilDst(p) } if acode != sppb.TypeCode_BYTES { return typeErr } if isNull { *p = nil break } x, err := getListValue(v) if err != nil { return err } y, err := decodeByteArray(x) if err != nil { return err } *p = y case *int64: if p == nil { return errNilDst(p) } if code != sppb.TypeCode_INT64 { return typeErr } if isNull { return nullErr } x, err := getStringValue(v) if err != nil { return err } y, err := strconv.ParseInt(x, 10, 64) if err != nil { return errBadEncoding(v, err) } *p = y case *NullInt64: if p == nil { return errNilDst(p) } if code != sppb.TypeCode_INT64 { return typeErr } if isNull { *p = NullInt64{} break } x, err := getStringValue(v) if err != nil { return err } y, err := strconv.ParseInt(x, 10, 64) if err != nil { return errBadEncoding(v, err) } p.Valid = true p.Int64 = y case *[]NullInt64: if p == nil { return errNilDst(p) } if acode != sppb.TypeCode_INT64 { return typeErr } if isNull { *p = nil break } x, err := getListValue(v) if err != nil { return err } y, err := decodeIntArray(x) if err != nil { return err } *p = y case *bool: if p == nil { return errNilDst(p) } if code != sppb.TypeCode_BOOL { return typeErr } if isNull { return nullErr } x, err := getBoolValue(v) if err != nil { return err } *p = x case *NullBool: if p == nil { return errNilDst(p) } if code != sppb.TypeCode_BOOL { return typeErr } if isNull { *p = NullBool{} break } x, err := getBoolValue(v) if err != nil { return err } p.Valid = true p.Bool = x case *[]NullBool: if p == nil { return errNilDst(p) } if acode != sppb.TypeCode_BOOL { return typeErr } if isNull { *p = nil break } x, err := getListValue(v) if err != nil { return err } y, err := decodeBoolArray(x) if err != nil { return err } *p = y case *float64: if p == nil { return errNilDst(p) } if code != sppb.TypeCode_FLOAT64 { return typeErr } if isNull { return nullErr } x, err := getFloat64Value(v) if err != nil { return err } *p = x case *NullFloat64: if p == nil { return errNilDst(p) } if code != sppb.TypeCode_FLOAT64 { return typeErr } if isNull { *p = NullFloat64{} break } x, err := getFloat64Value(v) if err != nil { return err } p.Valid = true p.Float64 = x case *[]NullFloat64: if p == nil { return errNilDst(p) } if acode != sppb.TypeCode_FLOAT64 { return typeErr } if isNull { *p = nil break } x, err := getListValue(v) if err != nil { return err } y, err := decodeFloat64Array(x) if err != nil { return err } *p = y case *time.Time: var nt NullTime if isNull { return nullErr } err := parseNullTime(v, &nt, code, isNull) if err != nil { return nil } *p = nt.Time case *NullTime: err := parseNullTime(v, p, code, isNull) if err != nil { return err } case *[]NullTime: if p == nil { return errNilDst(p) } if acode != sppb.TypeCode_TIMESTAMP { return typeErr } if isNull { *p = nil break } x, err := getListValue(v) if err != nil { return err } y, err := decodeTimeArray(x) if err != nil { return err } *p = y case *civil.Date: if p == nil { return errNilDst(p) } if code != sppb.TypeCode_DATE { return typeErr } if isNull { return nullErr } x, err := getStringValue(v) if err != nil { return err } y, err := civil.ParseDate(x) if err != nil { return errBadEncoding(v, err) } *p = y case *NullDate: if p == nil { return errNilDst(p) } if code != sppb.TypeCode_DATE { return typeErr } if isNull { *p = NullDate{} break } x, err := getStringValue(v) if err != nil { return err } y, err := civil.ParseDate(x) if err != nil { return errBadEncoding(v, err) } p.Valid = true p.Date = y case *[]NullDate: if p == nil { return errNilDst(p) } if acode != sppb.TypeCode_DATE { return typeErr } if isNull { *p = nil break } x, err := getListValue(v) if err != nil { return err } y, err := decodeDateArray(x) if err != nil { return err } *p = y case *[]NullRow: if p == nil { return errNilDst(p) } if acode != sppb.TypeCode_STRUCT { return typeErr } if isNull { *p = nil break } x, err := getListValue(v) if err != nil { return err } y, err := decodeRowArray(t.ArrayElementType.StructType, x) if err != nil { return err } *p = y case *GenericColumnValue: *p = GenericColumnValue{ // Deep clone to ensure subsequent changes to t or v // don't affect our decoded value. Type: proto.Clone(t).(*sppb.Type), Value: proto.Clone(v).(*proto3.Value), } default: // Check if the proto encoding is for an array of structs. if !(code == sppb.TypeCode_ARRAY && acode == sppb.TypeCode_STRUCT) { return typeErr } vp := reflect.ValueOf(p) if !vp.IsValid() { return errNilDst(p) } if !isPtrStructPtrSlice(vp.Type()) { // The container is not a pointer to a struct pointer slice. return typeErr } // Only use reflection for nil detection on slow path. // Also, IsNil panics on many types, so check it after the type check. if vp.IsNil() { return errNilDst(p) } if isNull { // The proto Value is encoding NULL, set the pointer to struct // slice to nil as well. vp.Elem().Set(reflect.Zero(vp.Elem().Type())) break } x, err := getListValue(v) if err != nil { return err } if err = decodeStructArray(t.ArrayElementType.StructType, x, p); err != nil { return err } } return nil } // errSrvVal returns an error for getting a wrong source protobuf value in decoding. func errSrcVal(v *proto3.Value, want string) error { return spannerErrorf(codes.FailedPrecondition, "cannot use %v(Kind: %T) as %s Value", v, v.GetKind(), want) } // getStringValue returns the string value encoded in proto3.Value v whose // kind is proto3.Value_StringValue. func getStringValue(v *proto3.Value) (string, error) { if x, ok := v.GetKind().(*proto3.Value_StringValue); ok && x != nil { return x.StringValue, nil } return "", errSrcVal(v, "String") } // getBoolValue returns the bool value encoded in proto3.Value v whose // kind is proto3.Value_BoolValue. func getBoolValue(v *proto3.Value) (bool, error) { if x, ok := v.GetKind().(*proto3.Value_BoolValue); ok && x != nil { return x.BoolValue, nil } return false, errSrcVal(v, "Bool") } // getListValue returns the proto3.ListValue contained in proto3.Value v whose // kind is proto3.Value_ListValue. func getListValue(v *proto3.Value) (*proto3.ListValue, error) { if x, ok := v.GetKind().(*proto3.Value_ListValue); ok && x != nil { return x.ListValue, nil } return nil, errSrcVal(v, "List") } // errUnexpectedNumStr returns error for decoder getting a unexpected string for // representing special float values. func errUnexpectedNumStr(s string) error { return spannerErrorf(codes.FailedPrecondition, "unexpected string value %q for number", s) } // getFloat64Value returns the float64 value encoded in proto3.Value v whose // kind is proto3.Value_NumberValue / proto3.Value_StringValue. // Cloud Spanner uses string to encode NaN, Infinity and -Infinity. func getFloat64Value(v *proto3.Value) (float64, error) { switch x := v.GetKind().(type) { case *proto3.Value_NumberValue: if x == nil { break } return x.NumberValue, nil case *proto3.Value_StringValue: if x == nil { break } switch x.StringValue { case "NaN": return math.NaN(), nil case "Infinity": return math.Inf(1), nil case "-Infinity": return math.Inf(-1), nil default: return 0, errUnexpectedNumStr(x.StringValue) } } return 0, errSrcVal(v, "Number") } // errNilListValue returns error for unexpected nil ListValue in decoding Cloud Spanner ARRAYs. func errNilListValue(sqlType string) error { return spannerErrorf(codes.FailedPrecondition, "unexpected nil ListValue in decoding %v array", sqlType) } // errDecodeArrayElement returns error for failure in decoding single array element. func errDecodeArrayElement(i int, v proto.Message, sqlType string, err error) error { se, ok := toSpannerError(err).(*Error) if !ok { return spannerErrorf(codes.Unknown, "cannot decode %v(array element %v) as %v, error = <%v>", v, i, sqlType, err) } se.decorate(fmt.Sprintf("cannot decode %v(array element %v) as %v", v, i, sqlType)) return se } // decodeStringArray decodes proto3.ListValue pb into a NullString slice. func decodeStringArray(pb *proto3.ListValue) ([]NullString, error) { if pb == nil { return nil, errNilListValue("STRING") } a := make([]NullString, len(pb.Values)) for i, v := range pb.Values { if err := decodeValue(v, stringType(), &a[i]); err != nil { return nil, errDecodeArrayElement(i, v, "STRING", err) } } return a, nil } // decodeIntArray decodes proto3.ListValue pb into a NullInt64 slice. func decodeIntArray(pb *proto3.ListValue) ([]NullInt64, error) { if pb == nil { return nil, errNilListValue("INT64") } a := make([]NullInt64, len(pb.Values)) for i, v := range pb.Values { if err := decodeValue(v, intType(), &a[i]); err != nil { return nil, errDecodeArrayElement(i, v, "INT64", err) } } return a, nil } // decodeBoolArray decodes proto3.ListValue pb into a NullBool slice. func decodeBoolArray(pb *proto3.ListValue) ([]NullBool, error) { if pb == nil { return nil, errNilListValue("BOOL") } a := make([]NullBool, len(pb.Values)) for i, v := range pb.Values { if err := decodeValue(v, boolType(), &a[i]); err != nil { return nil, errDecodeArrayElement(i, v, "BOOL", err) } } return a, nil } // decodeFloat64Array decodes proto3.ListValue pb into a NullFloat64 slice. func decodeFloat64Array(pb *proto3.ListValue) ([]NullFloat64, error) { if pb == nil { return nil, errNilListValue("FLOAT64") } a := make([]NullFloat64, len(pb.Values)) for i, v := range pb.Values { if err := decodeValue(v, floatType(), &a[i]); err != nil { return nil, errDecodeArrayElement(i, v, "FLOAT64", err) } } return a, nil } // decodeByteArray decodes proto3.ListValue pb into a slice of byte slice. func decodeByteArray(pb *proto3.ListValue) ([][]byte, error) { if pb == nil { return nil, errNilListValue("BYTES") } a := make([][]byte, len(pb.Values)) for i, v := range pb.Values { if err := decodeValue(v, bytesType(), &a[i]); err != nil { return nil, errDecodeArrayElement(i, v, "BYTES", err) } } return a, nil } // decodeTimeArray decodes proto3.ListValue pb into a NullTime slice. func decodeTimeArray(pb *proto3.ListValue) ([]NullTime, error) { if pb == nil { return nil, errNilListValue("TIMESTAMP") } a := make([]NullTime, len(pb.Values)) for i, v := range pb.Values { if err := decodeValue(v, timeType(), &a[i]); err != nil { return nil, errDecodeArrayElement(i, v, "TIMESTAMP", err) } } return a, nil } // decodeDateArray decodes proto3.ListValue pb into a NullDate slice. func decodeDateArray(pb *proto3.ListValue) ([]NullDate, error) { if pb == nil { return nil, errNilListValue("DATE") } a := make([]NullDate, len(pb.Values)) for i, v := range pb.Values { if err := decodeValue(v, dateType(), &a[i]); err != nil { return nil, errDecodeArrayElement(i, v, "DATE", err) } } return a, nil } func errNotStructElement(i int, v *proto3.Value) error { return errDecodeArrayElement(i, v, "STRUCT", spannerErrorf(codes.FailedPrecondition, "%v(type: %T) doesn't encode Cloud Spanner STRUCT", v, v)) } // decodeRowArray decodes proto3.ListValue pb into a NullRow slice according to // the structual information given in sppb.StructType ty. func decodeRowArray(ty *sppb.StructType, pb *proto3.ListValue) ([]NullRow, error) { if pb == nil { return nil, errNilListValue("STRUCT") } a := make([]NullRow, len(pb.Values)) for i := range pb.Values { switch v := pb.Values[i].GetKind().(type) { case *proto3.Value_ListValue: a[i] = NullRow{ Row: Row{ fields: ty.Fields, vals: v.ListValue.Values, }, Valid: true, } // Null elements not currently supported by the server, see // https://cloud.google.com/spanner/docs/query-syntax#using-structs-with-select case *proto3.Value_NullValue: // no-op, a[i] is NullRow{} already default: return nil, errNotStructElement(i, pb.Values[i]) } } return a, nil } // structFieldColumn returns the name of i-th field of struct type typ if the field // is untagged; otherwise, it returns the tagged name of the field. func structFieldColumn(typ reflect.Type, i int) (col string, ok bool) { desc := typ.Field(i) if desc.PkgPath != "" || desc.Anonymous { // Skip unexported or anonymous fields. return "", false } col = desc.Name if tag := desc.Tag.Get("spanner"); tag != "" { if tag == "-" { // Skip fields tagged "-" to match encoding/json and others. return "", false } col = tag if idx := strings.Index(tag, ","); idx != -1 { col = tag[:idx] } } return col, true } // errNilSpannerStructType returns error for unexpected nil Cloud Spanner STRUCT schema type in decoding. func errNilSpannerStructType() error { return spannerErrorf(codes.FailedPrecondition, "unexpected nil StructType in decoding Cloud Spanner STRUCT") } // errUnnamedField returns error for decoding a Cloud Spanner STRUCT with unnamed field into a Go struct. func errUnnamedField(ty *sppb.StructType, i int) error { return spannerErrorf(codes.InvalidArgument, "unnamed field %v in Cloud Spanner STRUCT %+v", i, ty) } // errNoOrDupGoField returns error for decoding a Cloud Spanner // STRUCT into a Go struct which is either missing a field, or has duplicate fields. func errNoOrDupGoField(s interface{}, f string) error { return spannerErrorf(codes.InvalidArgument, "Go struct %+v(type %T) has no or duplicate fields for Cloud Spanner STRUCT field %v", s, s, f) } // errDupColNames returns error for duplicated Cloud Spanner STRUCT field names found in decoding a Cloud Spanner STRUCT into a Go struct. func errDupSpannerField(f string, ty *sppb.StructType) error { return spannerErrorf(codes.InvalidArgument, "duplicated field name %q in Cloud Spanner STRUCT %+v", f, ty) } // errDecodeStructField returns error for failure in decoding a single field of a Cloud Spanner STRUCT. func errDecodeStructField(ty *sppb.StructType, f string, err error) error { se, ok := toSpannerError(err).(*Error) if !ok { return spannerErrorf(codes.Unknown, "cannot decode field %v of Cloud Spanner STRUCT %+v, error = <%v>", f, ty, err) } se.decorate(fmt.Sprintf("cannot decode field %v of Cloud Spanner STRUCT %+v", f, ty)) return se } // decodeStruct decodes proto3.ListValue pb into struct referenced by pointer ptr, according to // the structual information given in sppb.StructType ty. func decodeStruct(ty *sppb.StructType, pb *proto3.ListValue, ptr interface{}) error { if reflect.ValueOf(ptr).IsNil() { return errNilDst(ptr) } if ty == nil { return errNilSpannerStructType() } // t holds the structual information of ptr. t := reflect.TypeOf(ptr).Elem() // v is the actual value that ptr points to. v := reflect.ValueOf(ptr).Elem() fields, err := fieldCache.Fields(t) if err != nil { return toSpannerError(err) } seen := map[string]bool{} for i, f := range ty.Fields { if f.Name == "" { return errUnnamedField(ty, i) } sf := fields.Match(f.Name) if sf == nil { return errNoOrDupGoField(ptr, f.Name) } if seen[f.Name] { // We don't allow duplicated field name. return errDupSpannerField(f.Name, ty) } // Try to decode a single field. if err := decodeValue(pb.Values[i], f.Type, v.FieldByIndex(sf.Index).Addr().Interface()); err != nil { return errDecodeStructField(ty, f.Name, err) } // Mark field f.Name as processed. seen[f.Name] = true } return nil } // isPtrStructPtrSlice returns true if ptr is a pointer to a slice of struct pointers. func isPtrStructPtrSlice(t reflect.Type) bool { if t.Kind() != reflect.Ptr || t.Elem().Kind() != reflect.Slice { // t is not a pointer to a slice. return false } if t = t.Elem(); t.Elem().Kind() != reflect.Ptr || t.Elem().Elem().Kind() != reflect.Struct { // the slice that t points to is not a slice of struct pointers. return false } return true } // decodeStructArray decodes proto3.ListValue pb into struct slice referenced by pointer ptr, according to the // structual information given in a sppb.StructType. func decodeStructArray(ty *sppb.StructType, pb *proto3.ListValue, ptr interface{}) error { if pb == nil { return errNilListValue("STRUCT") } // Type of the struct pointers stored in the slice that ptr points to. ts := reflect.TypeOf(ptr).Elem().Elem() // The slice that ptr points to, might be nil at this point. v := reflect.ValueOf(ptr).Elem() // Allocate empty slice. v.Set(reflect.MakeSlice(v.Type(), 0, len(pb.Values))) // Decode every struct in pb.Values. for i, pv := range pb.Values { // Check if pv is a NULL value. if _, isNull := pv.Kind.(*proto3.Value_NullValue); isNull { // Append a nil pointer to the slice. v.Set(reflect.Append(v, reflect.New(ts).Elem())) continue } // Allocate empty struct. s := reflect.New(ts.Elem()) // Get proto3.ListValue l from proto3.Value pv. l, err := getListValue(pv) if err != nil { return errDecodeArrayElement(i, pv, "STRUCT", err) } // Decode proto3.ListValue l into struct referenced by s.Interface(). if err = decodeStruct(ty, l, s.Interface()); err != nil { return errDecodeArrayElement(i, pv, "STRUCT", err) } // Append the decoded struct back into the slice. v.Set(reflect.Append(v, s)) } return nil } // errEncoderUnsupportedType returns error for not being able to encode a value of // certain type. func errEncoderUnsupportedType(v interface{}) error { return spannerErrorf(codes.InvalidArgument, "client doesn't support type %T", v) } // encodeValue encodes a Go native type into a proto3.Value. func encodeValue(v interface{}) (*proto3.Value, *sppb.Type, error) { pb := &proto3.Value{ Kind: &proto3.Value_NullValue{NullValue: proto3.NullValue_NULL_VALUE}, } var pt *sppb.Type var err error switch v := v.(type) { case nil: case string: pb.Kind = stringKind(v) pt = stringType() case NullString: if v.Valid { return encodeValue(v.StringVal) } case []string: if v != nil { pb, err = encodeArray(len(v), func(i int) interface{} { return v[i] }) if err != nil { return nil, nil, err } pt = listType(stringType()) } case []NullString: if v != nil { pb, err = encodeArray(len(v), func(i int) interface{} { return v[i] }) if err != nil { return nil, nil, err } pt = listType(stringType()) } case []byte: if v != nil { pb.Kind = stringKind(base64.StdEncoding.EncodeToString(v)) pt = bytesType() } case [][]byte: if v != nil { pb, err = encodeArray(len(v), func(i int) interface{} { return v[i] }) if err != nil { return nil, nil, err } pt = listType(bytesType()) } case int: pb.Kind = stringKind(strconv.FormatInt(int64(v), 10)) pt = intType() case []int: if v != nil { pb, err = encodeArray(len(v), func(i int) interface{} { return v[i] }) if err != nil { return nil, nil, err } pt = listType(intType()) } case int64: pb.Kind = stringKind(strconv.FormatInt(v, 10)) pt = intType() case []int64: if v != nil { pb, err = encodeArray(len(v), func(i int) interface{} { return v[i] }) if err != nil { return nil, nil, err } pt = listType(intType()) } case NullInt64: if v.Valid { return encodeValue(v.Int64) } case []NullInt64: if v != nil { pb, err = encodeArray(len(v), func(i int) interface{} { return v[i] }) if err != nil { return nil, nil, err } pt = listType(intType()) } case bool: pb.Kind = &proto3.Value_BoolValue{BoolValue: v} pt = boolType() case []bool: if v != nil { pb, err = encodeArray(len(v), func(i int) interface{} { return v[i] }) if err != nil { return nil, nil, err } pt = listType(boolType()) } case NullBool: if v.Valid { return encodeValue(v.Bool) } case []NullBool: if v != nil { pb, err = encodeArray(len(v), func(i int) interface{} { return v[i] }) if err != nil { return nil, nil, err } pt = listType(boolType()) } case float64: pb.Kind = &proto3.Value_NumberValue{NumberValue: v} pt = floatType() case []float64: if v != nil { pb, err = encodeArray(len(v), func(i int) interface{} { return v[i] }) if err != nil { return nil, nil, err } pt = listType(floatType()) } case NullFloat64: if v.Valid { return encodeValue(v.Float64) } case []NullFloat64: if v != nil { pb, err = encodeArray(len(v), func(i int) interface{} { return v[i] }) if err != nil { return nil, nil, err } pt = listType(floatType()) } case time.Time: pb.Kind = stringKind(v.UTC().Format(time.RFC3339Nano)) pt = timeType() case []time.Time: if v != nil { pb, err = encodeArray(len(v), func(i int) interface{} { return v[i] }) if err != nil { return nil, nil, err } pt = listType(timeType()) } case NullTime: if v.Valid { return encodeValue(v.Time) } case []NullTime: if v != nil { pb, err = encodeArray(len(v), func(i int) interface{} { return v[i] }) if err != nil { return nil, nil, err } pt = listType(timeType()) } case civil.Date: pb.Kind = stringKind(v.String()) pt = dateType() case []civil.Date: if v != nil { pb, err = encodeArray(len(v), func(i int) interface{} { return v[i] }) if err != nil { return nil, nil, err } pt = listType(dateType()) } case NullDate: if v.Valid { return encodeValue(v.Date) } case []NullDate: if v != nil { pb, err = encodeArray(len(v), func(i int) interface{} { return v[i] }) if err != nil { return nil, nil, err } pt = listType(dateType()) } case GenericColumnValue: // Deep clone to ensure subsequent changes to v before // transmission don't affect our encoded value. pb = proto.Clone(v.Value).(*proto3.Value) pt = proto.Clone(v.Type).(*sppb.Type) default: return nil, nil, errEncoderUnsupportedType(v) } return pb, pt, nil } // encodeValueArray encodes a Value array into a proto3.ListValue. func encodeValueArray(vs []interface{}) (*proto3.ListValue, error) { lv := &proto3.ListValue{} lv.Values = make([]*proto3.Value, 0, len(vs)) for _, v := range vs { pb, _, err := encodeValue(v) if err != nil { return nil, err } lv.Values = append(lv.Values, pb) } return lv, nil } // encodeArray assumes that all values of the array element type encode without error. func encodeArray(len int, at func(int) interface{}) (*proto3.Value, error) { vs := make([]*proto3.Value, len) var err error for i := 0; i < len; i++ { vs[i], _, err = encodeValue(at(i)) if err != nil { return nil, err } } return listProto(vs...), nil } func spannerTagParser(t reflect.StructTag) (name string, keep bool, other interface{}, err error) { if s := t.Get("spanner"); s != "" { if s == "-" { return "", false, nil, nil } return s, true, nil, nil } return "", true, nil, nil } var fieldCache = fields.NewCache(spannerTagParser, nil, nil) golang-google-cloud-0.9.0/spanner/value_test.go000066400000000000000000000440321312234511600215160ustar00rootroot00000000000000/* Copyright 2017 Google Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package spanner import ( "math" "reflect" "testing" "time" "cloud.google.com/go/civil" "github.com/golang/protobuf/proto" proto3 "github.com/golang/protobuf/ptypes/struct" sppb "google.golang.org/genproto/googleapis/spanner/v1" ) var ( t1 = mustParseTime("2016-11-15T15:04:05.999999999Z") // Boundaries t2 = mustParseTime("0000-01-01T00:00:00.000000000Z") t3 = mustParseTime("9999-12-31T23:59:59.999999999Z") // Local timezone t4 = time.Now() d1 = mustParseDate("2016-11-15") d2 = mustParseDate("1678-01-01") ) func mustParseTime(s string) time.Time { t, err := time.Parse(time.RFC3339Nano, s) if err != nil { panic(err) } return t } func mustParseDate(s string) civil.Date { d, err := civil.ParseDate(s) if err != nil { panic(err) } return d } // Test encoding Values. func TestEncodeValue(t *testing.T) { var ( tString = stringType() tInt = intType() tBool = boolType() tFloat = floatType() tBytes = bytesType() tTime = timeType() tDate = dateType() ) for i, test := range []struct { in interface{} want *proto3.Value wantType *sppb.Type }{ // STRING / STRING ARRAY {"abc", stringProto("abc"), tString}, {NullString{"abc", true}, stringProto("abc"), tString}, {NullString{"abc", false}, nullProto(), nil}, {[]string{"abc", "bcd"}, listProto(stringProto("abc"), stringProto("bcd")), listType(tString)}, {[]NullString{{"abcd", true}, {"xyz", false}}, listProto(stringProto("abcd"), nullProto()), listType(tString)}, // BYTES / BYTES ARRAY {[]byte("foo"), bytesProto([]byte("foo")), tBytes}, {[]byte(nil), nullProto(), nil}, {[][]byte{nil, []byte("ab")}, listProto(nullProto(), bytesProto([]byte("ab"))), listType(tBytes)}, {[][]byte(nil), nullProto(), nil}, // INT64 / INT64 ARRAY {7, intProto(7), tInt}, {[]int{31, 127}, listProto(intProto(31), intProto(127)), listType(tInt)}, {int64(81), intProto(81), tInt}, {[]int64{33, 129}, listProto(intProto(33), intProto(129)), listType(tInt)}, {NullInt64{11, true}, intProto(11), tInt}, {NullInt64{11, false}, nullProto(), nil}, {[]NullInt64{{35, true}, {131, false}}, listProto(intProto(35), nullProto()), listType(tInt)}, // BOOL / BOOL ARRAY {true, boolProto(true), tBool}, {NullBool{true, true}, boolProto(true), tBool}, {NullBool{true, false}, nullProto(), nil}, {[]bool{true, false}, listProto(boolProto(true), boolProto(false)), listType(tBool)}, {[]NullBool{{true, true}, {true, false}}, listProto(boolProto(true), nullProto()), listType(tBool)}, // FLOAT64 / FLOAT64 ARRAY {3.14, floatProto(3.14), tFloat}, {NullFloat64{3.1415, true}, floatProto(3.1415), tFloat}, {NullFloat64{math.Inf(1), true}, floatProto(math.Inf(1)), tFloat}, {NullFloat64{3.14159, false}, nullProto(), nil}, {[]float64{3.141, 0.618, math.Inf(-1)}, listProto(floatProto(3.141), floatProto(0.618), floatProto(math.Inf(-1))), listType(tFloat)}, {[]NullFloat64{{3.141, true}, {0.618, false}}, listProto(floatProto(3.141), nullProto()), listType(tFloat)}, // TIMESTAMP / TIMESTAMP ARRAY {t1, timeProto(t1), tTime}, {NullTime{t1, true}, timeProto(t1), tTime}, {NullTime{t1, false}, nullProto(), nil}, {[]time.Time{t1, t2, t3, t4}, listProto(timeProto(t1), timeProto(t2), timeProto(t3), timeProto(t4)), listType(tTime)}, {[]NullTime{{t1, true}, {t1, false}}, listProto(timeProto(t1), nullProto()), listType(tTime)}, // DATE / DATE ARRAY {d1, dateProto(d1), tDate}, {NullDate{d1, true}, dateProto(d1), tDate}, {NullDate{civil.Date{}, false}, nullProto(), nil}, {[]civil.Date{d1, d2}, listProto(dateProto(d1), dateProto(d2)), listType(tDate)}, {[]NullDate{{d1, true}, {civil.Date{}, false}}, listProto(dateProto(d1), nullProto()), listType(tDate)}, // GenericColumnValue {GenericColumnValue{tString, stringProto("abc")}, stringProto("abc"), tString}, {GenericColumnValue{tString, nullProto()}, nullProto(), tString}, // not actually valid (stringProto inside int list), but demonstrates pass-through. { GenericColumnValue{ Type: listType(tInt), Value: listProto(intProto(5), nullProto(), stringProto("bcd")), }, listProto(intProto(5), nullProto(), stringProto("bcd")), listType(tInt), }, } { got, gotType, err := encodeValue(test.in) if err != nil { t.Fatalf("#%d: got error during encoding: %v, want nil", i, err) } if !reflect.DeepEqual(got, test.want) { t.Errorf("#%d: got encode result: %v, want %v", i, got, test.want) } if !reflect.DeepEqual(gotType, test.wantType) { t.Errorf("#%d: got encode type: %v, want %v", i, gotType, test.wantType) } } } // Test decoding Values. func TestDecodeValue(t *testing.T) { for i, test := range []struct { in *proto3.Value t *sppb.Type want interface{} fail bool }{ // STRING {stringProto("abc"), stringType(), "abc", false}, {nullProto(), stringType(), "abc", true}, {stringProto("abc"), stringType(), NullString{"abc", true}, false}, {nullProto(), stringType(), NullString{}, false}, // STRING ARRAY { listProto(stringProto("abc"), nullProto(), stringProto("bcd")), listType(stringType()), []NullString{{"abc", true}, {}, {"bcd", true}}, false, }, {nullProto(), listType(stringType()), []NullString(nil), false}, // BYTES {bytesProto([]byte("ab")), bytesType(), []byte("ab"), false}, {nullProto(), bytesType(), []byte(nil), false}, // BYTES ARRAY {listProto(bytesProto([]byte("ab")), nullProto()), listType(bytesType()), [][]byte{[]byte("ab"), nil}, false}, {nullProto(), listType(bytesType()), [][]byte(nil), false}, //INT64 {intProto(15), intType(), int64(15), false}, {nullProto(), intType(), int64(0), true}, {intProto(15), intType(), NullInt64{15, true}, false}, {nullProto(), intType(), NullInt64{}, false}, // INT64 ARRAY {listProto(intProto(91), nullProto(), intProto(87)), listType(intType()), []NullInt64{{91, true}, {}, {87, true}}, false}, {nullProto(), listType(intType()), []NullInt64(nil), false}, // BOOL {boolProto(true), boolType(), true, false}, {nullProto(), boolType(), true, true}, {boolProto(true), boolType(), NullBool{true, true}, false}, {nullProto(), boolType(), NullBool{}, false}, // BOOL ARRAY {listProto(boolProto(true), boolProto(false), nullProto()), listType(boolType()), []NullBool{{true, true}, {false, true}, {}}, false}, {nullProto(), listType(boolType()), []NullBool(nil), false}, // FLOAT64 {floatProto(3.14), floatType(), 3.14, false}, {nullProto(), floatType(), 0.00, true}, {floatProto(3.14), floatType(), NullFloat64{3.14, true}, false}, {nullProto(), floatType(), NullFloat64{}, false}, // FLOAT64 ARRAY { listProto(floatProto(math.Inf(1)), floatProto(math.Inf(-1)), nullProto(), floatProto(3.1)), listType(floatType()), []NullFloat64{{math.Inf(1), true}, {math.Inf(-1), true}, {}, {3.1, true}}, false, }, {nullProto(), listType(floatType()), []NullFloat64(nil), false}, // TIMESTAMP {timeProto(t1), timeType(), t1, false}, {timeProto(t1), timeType(), NullTime{t1, true}, false}, {nullProto(), timeType(), NullTime{}, false}, // TIMESTAMP ARRAY {listProto(timeProto(t1), timeProto(t2), timeProto(t3), nullProto()), listType(timeType()), []NullTime{{t1, true}, {t2, true}, {t3, true}, {}}, false}, {nullProto(), listType(timeType()), []NullTime(nil), false}, // DATE {dateProto(d1), dateType(), d1, false}, {dateProto(d1), dateType(), NullDate{d1, true}, false}, {nullProto(), dateType(), NullDate{}, false}, // DATE ARRAY {listProto(dateProto(d1), dateProto(d2), nullProto()), listType(dateType()), []NullDate{{d1, true}, {d2, true}, {}}, false}, {nullProto(), listType(dateType()), []NullDate(nil), false}, // STRUCT ARRAY // STRUCT schema is equal to the following Go struct: // type s struct { // Col1 NullInt64 // Col2 []struct { // SubCol1 float64 // SubCol2 string // } // } { in: listProto( listProto( intProto(3), listProto( listProto(floatProto(3.14), stringProto("this")), listProto(floatProto(0.57), stringProto("siht")), ), ), listProto( nullProto(), nullProto(), ), nullProto(), ), t: listType( structType( mkField("Col1", intType()), mkField( "Col2", listType( structType( mkField("SubCol1", floatType()), mkField("SubCol2", stringType()), ), ), ), ), ), want: []NullRow{ { Row: Row{ fields: []*sppb.StructType_Field{ mkField("Col1", intType()), mkField( "Col2", listType( structType( mkField("SubCol1", floatType()), mkField("SubCol2", stringType()), ), ), ), }, vals: []*proto3.Value{ intProto(3), listProto( listProto(floatProto(3.14), stringProto("this")), listProto(floatProto(0.57), stringProto("siht")), ), }, }, Valid: true, }, { Row: Row{ fields: []*sppb.StructType_Field{ mkField("Col1", intType()), mkField( "Col2", listType( structType( mkField("SubCol1", floatType()), mkField("SubCol2", stringType()), ), ), ), }, vals: []*proto3.Value{ nullProto(), nullProto(), }, }, Valid: true, }, {}, }, fail: false, }, { in: listProto( listProto( intProto(3), listProto( listProto(floatProto(3.14), stringProto("this")), listProto(floatProto(0.57), stringProto("siht")), ), ), listProto( nullProto(), nullProto(), ), nullProto(), ), t: listType( structType( mkField("Col1", intType()), mkField( "Col2", listType( structType( mkField("SubCol1", floatType()), mkField("SubCol2", stringType()), ), ), ), ), ), want: []*struct { Col1 NullInt64 StructCol []*struct { SubCol1 NullFloat64 SubCol2 string } `spanner:"Col2"` }{ { Col1: NullInt64{3, true}, StructCol: []*struct { SubCol1 NullFloat64 SubCol2 string }{ { SubCol1: NullFloat64{3.14, true}, SubCol2: "this", }, { SubCol1: NullFloat64{0.57, true}, SubCol2: "siht", }, }, }, { Col1: NullInt64{}, StructCol: []*struct { SubCol1 NullFloat64 SubCol2 string }(nil), }, nil, }, fail: false, }, // GenericColumnValue {stringProto("abc"), stringType(), GenericColumnValue{stringType(), stringProto("abc")}, false}, {nullProto(), stringType(), GenericColumnValue{stringType(), nullProto()}, false}, // not actually valid (stringProto inside int list), but demonstrates pass-through. { in: listProto(intProto(5), nullProto(), stringProto("bcd")), t: listType(intType()), want: GenericColumnValue{ Type: listType(intType()), Value: listProto(intProto(5), nullProto(), stringProto("bcd")), }, fail: false, }, } { gotp := reflect.New(reflect.TypeOf(test.want)) if err := decodeValue(test.in, test.t, gotp.Interface()); err != nil { if !test.fail { t.Errorf("%d: cannot decode %v(%v): %v", i, test.in, test.t, err) } continue } if test.fail { t.Errorf("%d: decoding %v(%v) succeeds unexpectedly, want error", i, test.in, test.t) continue } got := reflect.Indirect(gotp).Interface() if !reflect.DeepEqual(got, test.want) { t.Errorf("%d: unexpected decoding result - got %v, want %v", i, got, test.want) continue } } } // Test error cases for decodeValue. func TestDecodeValueErrors(t *testing.T) { for i, test := range []struct { in *proto3.Value t *sppb.Type v interface{} }{ {nullProto(), stringType(), nil}, {nullProto(), stringType(), 1}, } { err := decodeValue(test.in, test.t, test.v) if err == nil { t.Errorf("#%d: want error, got nil", i) } } } // Test NaN encoding/decoding. func TestNaN(t *testing.T) { // Decode NaN value. f := 0.0 nf := NullFloat64{} // To float64 if err := decodeValue(floatProto(math.NaN()), floatType(), &f); err != nil { t.Errorf("decodeValue returns %q for %v, want nil", err, floatProto(math.NaN())) } if !math.IsNaN(f) { t.Errorf("f = %v, want %v", f, math.NaN()) } // To NullFloat64 if err := decodeValue(floatProto(math.NaN()), floatType(), &nf); err != nil { t.Errorf("decodeValue returns %q for %v, want nil", err, floatProto(math.NaN())) } if !math.IsNaN(nf.Float64) || !nf.Valid { t.Errorf("f = %v, want %v", f, NullFloat64{math.NaN(), true}) } // Encode NaN value // From float64 v, _, err := encodeValue(math.NaN()) if err != nil { t.Errorf("encodeValue returns %q for NaN, want nil", err) } x, ok := v.GetKind().(*proto3.Value_NumberValue) if !ok { t.Errorf("incorrect type for v.GetKind(): %T, want *proto3.Value_NumberValue", v.GetKind()) } if !math.IsNaN(x.NumberValue) { t.Errorf("x.NumberValue = %v, want %v", x.NumberValue, math.NaN()) } // From NullFloat64 v, _, err = encodeValue(NullFloat64{math.NaN(), true}) if err != nil { t.Errorf("encodeValue returns %q for NaN, want nil", err) } x, ok = v.GetKind().(*proto3.Value_NumberValue) if !ok { t.Errorf("incorrect type for v.GetKind(): %T, want *proto3.Value_NumberValue", v.GetKind()) } if !math.IsNaN(x.NumberValue) { t.Errorf("x.NumberValue = %v, want %v", x.NumberValue, math.NaN()) } } func TestGenericColumnValue(t *testing.T) { for _, test := range []struct { in GenericColumnValue want interface{} fail bool }{ {GenericColumnValue{stringType(), stringProto("abc")}, "abc", false}, {GenericColumnValue{stringType(), stringProto("abc")}, 5, true}, {GenericColumnValue{listType(intType()), listProto(intProto(91), nullProto(), intProto(87))}, []NullInt64{{91, true}, {}, {87, true}}, false}, {GenericColumnValue{intType(), intProto(42)}, GenericColumnValue{intType(), intProto(42)}, false}, // trippy! :-) } { // We take a copy and mutate because we're paranoid about immutability. inCopy := GenericColumnValue{ Type: proto.Clone(test.in.Type).(*sppb.Type), Value: proto.Clone(test.in.Value).(*proto3.Value), } gotp := reflect.New(reflect.TypeOf(test.want)) if err := inCopy.Decode(gotp.Interface()); err != nil { if !test.fail { t.Errorf("cannot decode %v to %v: %v", test.in, test.want, err) } continue } if test.fail { t.Errorf("decoding %v to %v succeeds unexpectedly", test.in, test.want) } // mutations to inCopy should be invisible to gotp. inCopy.Type.Code = sppb.TypeCode_TIMESTAMP inCopy.Value.Kind = &proto3.Value_NumberValue{NumberValue: 999} got := reflect.Indirect(gotp).Interface() if !reflect.DeepEqual(got, test.want) { t.Errorf("unexpected decode result - got %v, want %v", got, test.want) } // Test we can go backwards as well. v, err := newGenericColumnValue(test.want) if err != nil { t.Errorf("NewGenericColumnValue failed: %v", err) continue } if !reflect.DeepEqual(*v, test.in) { t.Errorf("unexpected encode result - got %v, want %v", v, test.in) } // If want is a GenericColumnValue, mutate its underlying value to validate // we have taken a deep copy. if gcv, ok := test.want.(GenericColumnValue); ok { gcv.Type.Code = sppb.TypeCode_TIMESTAMP gcv.Value.Kind = &proto3.Value_NumberValue{NumberValue: 999} if !reflect.DeepEqual(*v, test.in) { t.Errorf("expected deep copy - got %v, want %v", v, test.in) } } } } func runBench(b *testing.B, size int, f func(a []int) (*proto3.Value, *sppb.Type, error)) { a := make([]int, size) for i := 0; i < b.N; i++ { f(a) } } func BenchmarkEncodeIntArrayOrig1(b *testing.B) { runBench(b, 1, encodeIntArrayOrig) } func BenchmarkEncodeIntArrayOrig10(b *testing.B) { runBench(b, 10, encodeIntArrayOrig) } func BenchmarkEncodeIntArrayOrig100(b *testing.B) { runBench(b, 100, encodeIntArrayOrig) } func BenchmarkEncodeIntArrayOrig1000(b *testing.B) { runBench(b, 1000, encodeIntArrayOrig) } func BenchmarkEncodeIntArrayFunc1(b *testing.B) { runBench(b, 1, encodeIntArrayFunc) } func BenchmarkEncodeIntArrayFunc10(b *testing.B) { runBench(b, 10, encodeIntArrayFunc) } func BenchmarkEncodeIntArrayFunc100(b *testing.B) { runBench(b, 100, encodeIntArrayFunc) } func BenchmarkEncodeIntArrayFunc1000(b *testing.B) { runBench(b, 1000, encodeIntArrayFunc) } func BenchmarkEncodeIntArrayReflect1(b *testing.B) { runBench(b, 1, encodeIntArrayReflect) } func BenchmarkEncodeIntArrayReflect10(b *testing.B) { runBench(b, 10, encodeIntArrayReflect) } func BenchmarkEncodeIntArrayReflect100(b *testing.B) { runBench(b, 100, encodeIntArrayReflect) } func BenchmarkEncodeIntArrayReflect1000(b *testing.B) { runBench(b, 1000, encodeIntArrayReflect) } func encodeIntArrayOrig(a []int) (*proto3.Value, *sppb.Type, error) { vs := make([]*proto3.Value, len(a)) var err error for i := range a { vs[i], _, err = encodeValue(a[i]) if err != nil { return nil, nil, err } } return listProto(vs...), listType(intType()), nil } func encodeIntArrayFunc(a []int) (*proto3.Value, *sppb.Type, error) { v, err := encodeArray(len(a), func(i int) interface{} { return a[i] }) if err != nil { return nil, nil, err } return v, listType(intType()), nil } func encodeIntArrayReflect(a []int) (*proto3.Value, *sppb.Type, error) { v, err := encodeArrayReflect(a) if err != nil { return nil, nil, err } return v, listType(intType()), nil } func encodeArrayReflect(a interface{}) (*proto3.Value, error) { va := reflect.ValueOf(a) len := va.Len() vs := make([]*proto3.Value, len) var err error for i := 0; i < len; i++ { vs[i], _, err = encodeValue(va.Index(i).Interface()) if err != nil { return nil, err } } return listProto(vs...), nil } golang-google-cloud-0.9.0/speech/000077500000000000000000000000001312234511600166125ustar00rootroot00000000000000golang-google-cloud-0.9.0/speech/apiv1/000077500000000000000000000000001312234511600176325ustar00rootroot00000000000000golang-google-cloud-0.9.0/speech/apiv1/doc.go000066400000000000000000000024451312234511600207330ustar00rootroot00000000000000// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // AUTO-GENERATED CODE. DO NOT EDIT. // Package speech is an experimental, auto-generated package for the // Google Cloud Speech API. // // Google Cloud Speech API. package speech // import "cloud.google.com/go/speech/apiv1" import ( "golang.org/x/net/context" "google.golang.org/grpc/metadata" ) func insertXGoog(ctx context.Context, val []string) context.Context { md, _ := metadata.FromOutgoingContext(ctx) md = md.Copy() md["x-goog-api-client"] = val return metadata.NewOutgoingContext(ctx, md) } // DefaultAuthScopes reports the authentication scopes required // by this package. func DefaultAuthScopes() []string { return []string{ "https://www.googleapis.com/auth/cloud-platform", } } golang-google-cloud-0.9.0/speech/apiv1/mock_test.go000066400000000000000000000247511312234511600221620ustar00rootroot00000000000000// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // AUTO-GENERATED CODE. DO NOT EDIT. package speech import ( speechpb "google.golang.org/genproto/googleapis/cloud/speech/v1" longrunningpb "google.golang.org/genproto/googleapis/longrunning" ) import ( "flag" "fmt" "io" "log" "net" "os" "strings" "testing" "github.com/golang/protobuf/proto" "github.com/golang/protobuf/ptypes" "golang.org/x/net/context" "google.golang.org/api/option" status "google.golang.org/genproto/googleapis/rpc/status" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/metadata" gstatus "google.golang.org/grpc/status" ) var _ = io.EOF var _ = ptypes.MarshalAny var _ status.Status type mockSpeechServer struct { // Embed for forward compatibility. // Tests will keep working if more methods are added // in the future. speechpb.SpeechServer reqs []proto.Message // If set, all calls return this error. err error // responses to return if err == nil resps []proto.Message } func (s *mockSpeechServer) Recognize(ctx context.Context, req *speechpb.RecognizeRequest) (*speechpb.RecognizeResponse, error) { md, _ := metadata.FromIncomingContext(ctx) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } s.reqs = append(s.reqs, req) if s.err != nil { return nil, s.err } return s.resps[0].(*speechpb.RecognizeResponse), nil } func (s *mockSpeechServer) LongRunningRecognize(ctx context.Context, req *speechpb.LongRunningRecognizeRequest) (*longrunningpb.Operation, error) { md, _ := metadata.FromIncomingContext(ctx) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } s.reqs = append(s.reqs, req) if s.err != nil { return nil, s.err } return s.resps[0].(*longrunningpb.Operation), nil } func (s *mockSpeechServer) StreamingRecognize(stream speechpb.Speech_StreamingRecognizeServer) error { md, _ := metadata.FromIncomingContext(stream.Context()) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } for { if req, err := stream.Recv(); err == io.EOF { break } else if err != nil { return err } else { s.reqs = append(s.reqs, req) } } if s.err != nil { return s.err } for _, v := range s.resps { if err := stream.Send(v.(*speechpb.StreamingRecognizeResponse)); err != nil { return err } } return nil } // clientOpt is the option tests should use to connect to the test server. // It is initialized by TestMain. var clientOpt option.ClientOption var ( mockSpeech mockSpeechServer ) func TestMain(m *testing.M) { flag.Parse() serv := grpc.NewServer() speechpb.RegisterSpeechServer(serv, &mockSpeech) lis, err := net.Listen("tcp", "localhost:0") if err != nil { log.Fatal(err) } go serv.Serve(lis) conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) if err != nil { log.Fatal(err) } clientOpt = option.WithGRPCConn(conn) os.Exit(m.Run()) } func TestSpeechRecognize(t *testing.T) { var expectedResponse *speechpb.RecognizeResponse = &speechpb.RecognizeResponse{} mockSpeech.err = nil mockSpeech.reqs = nil mockSpeech.resps = append(mockSpeech.resps[:0], expectedResponse) var encoding speechpb.RecognitionConfig_AudioEncoding = speechpb.RecognitionConfig_FLAC var sampleRateHertz int32 = 44100 var languageCode string = "en-US" var config = &speechpb.RecognitionConfig{ Encoding: encoding, SampleRateHertz: sampleRateHertz, LanguageCode: languageCode, } var uri string = "gs://bucket_name/file_name.flac" var audio = &speechpb.RecognitionAudio{ AudioSource: &speechpb.RecognitionAudio_Uri{ Uri: uri, }, } var request = &speechpb.RecognizeRequest{ Config: config, Audio: audio, } c, err := NewClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.Recognize(context.Background(), request) if err != nil { t.Fatal(err) } if want, got := request, mockSpeech.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } if want, got := expectedResponse, resp; !proto.Equal(want, got) { t.Errorf("wrong response %q, want %q)", got, want) } } func TestSpeechRecognizeError(t *testing.T) { errCode := codes.PermissionDenied mockSpeech.err = gstatus.Error(errCode, "test error") var encoding speechpb.RecognitionConfig_AudioEncoding = speechpb.RecognitionConfig_FLAC var sampleRateHertz int32 = 44100 var languageCode string = "en-US" var config = &speechpb.RecognitionConfig{ Encoding: encoding, SampleRateHertz: sampleRateHertz, LanguageCode: languageCode, } var uri string = "gs://bucket_name/file_name.flac" var audio = &speechpb.RecognitionAudio{ AudioSource: &speechpb.RecognitionAudio_Uri{ Uri: uri, }, } var request = &speechpb.RecognizeRequest{ Config: config, Audio: audio, } c, err := NewClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.Recognize(context.Background(), request) if st, ok := gstatus.FromError(err); !ok { t.Errorf("got error %v, expected grpc error", err) } else if c := st.Code(); c != errCode { t.Errorf("got error code %q, want %q", c, errCode) } _ = resp } func TestSpeechLongRunningRecognize(t *testing.T) { var expectedResponse *speechpb.LongRunningRecognizeResponse = &speechpb.LongRunningRecognizeResponse{} mockSpeech.err = nil mockSpeech.reqs = nil any, err := ptypes.MarshalAny(expectedResponse) if err != nil { t.Fatal(err) } mockSpeech.resps = append(mockSpeech.resps[:0], &longrunningpb.Operation{ Name: "longrunning-test", Done: true, Result: &longrunningpb.Operation_Response{Response: any}, }) var encoding speechpb.RecognitionConfig_AudioEncoding = speechpb.RecognitionConfig_FLAC var sampleRateHertz int32 = 44100 var languageCode string = "en-US" var config = &speechpb.RecognitionConfig{ Encoding: encoding, SampleRateHertz: sampleRateHertz, LanguageCode: languageCode, } var uri string = "gs://bucket_name/file_name.flac" var audio = &speechpb.RecognitionAudio{ AudioSource: &speechpb.RecognitionAudio_Uri{ Uri: uri, }, } var request = &speechpb.LongRunningRecognizeRequest{ Config: config, Audio: audio, } c, err := NewClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } respLRO, err := c.LongRunningRecognize(context.Background(), request) if err != nil { t.Fatal(err) } resp, err := respLRO.Wait(context.Background()) if err != nil { t.Fatal(err) } if want, got := request, mockSpeech.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } if want, got := expectedResponse, resp; !proto.Equal(want, got) { t.Errorf("wrong response %q, want %q)", got, want) } } func TestSpeechLongRunningRecognizeError(t *testing.T) { errCode := codes.PermissionDenied mockSpeech.err = nil mockSpeech.resps = append(mockSpeech.resps[:0], &longrunningpb.Operation{ Name: "longrunning-test", Done: true, Result: &longrunningpb.Operation_Error{ Error: &status.Status{ Code: int32(errCode), Message: "test error", }, }, }) var encoding speechpb.RecognitionConfig_AudioEncoding = speechpb.RecognitionConfig_FLAC var sampleRateHertz int32 = 44100 var languageCode string = "en-US" var config = &speechpb.RecognitionConfig{ Encoding: encoding, SampleRateHertz: sampleRateHertz, LanguageCode: languageCode, } var uri string = "gs://bucket_name/file_name.flac" var audio = &speechpb.RecognitionAudio{ AudioSource: &speechpb.RecognitionAudio_Uri{ Uri: uri, }, } var request = &speechpb.LongRunningRecognizeRequest{ Config: config, Audio: audio, } c, err := NewClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } respLRO, err := c.LongRunningRecognize(context.Background(), request) if err != nil { t.Fatal(err) } resp, err := respLRO.Wait(context.Background()) if st, ok := gstatus.FromError(err); !ok { t.Errorf("got error %v, expected grpc error", err) } else if c := st.Code(); c != errCode { t.Errorf("got error code %q, want %q", c, errCode) } _ = resp } func TestSpeechStreamingRecognize(t *testing.T) { var expectedResponse *speechpb.StreamingRecognizeResponse = &speechpb.StreamingRecognizeResponse{} mockSpeech.err = nil mockSpeech.reqs = nil mockSpeech.resps = append(mockSpeech.resps[:0], expectedResponse) var request *speechpb.StreamingRecognizeRequest = &speechpb.StreamingRecognizeRequest{} c, err := NewClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } stream, err := c.StreamingRecognize(context.Background()) if err != nil { t.Fatal(err) } if err := stream.Send(request); err != nil { t.Fatal(err) } if err := stream.CloseSend(); err != nil { t.Fatal(err) } resp, err := stream.Recv() if err != nil { t.Fatal(err) } if want, got := request, mockSpeech.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } if want, got := expectedResponse, resp; !proto.Equal(want, got) { t.Errorf("wrong response %q, want %q)", got, want) } } func TestSpeechStreamingRecognizeError(t *testing.T) { errCode := codes.PermissionDenied mockSpeech.err = gstatus.Error(errCode, "test error") var request *speechpb.StreamingRecognizeRequest = &speechpb.StreamingRecognizeRequest{} c, err := NewClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } stream, err := c.StreamingRecognize(context.Background()) if err != nil { t.Fatal(err) } if err := stream.Send(request); err != nil { t.Fatal(err) } if err := stream.CloseSend(); err != nil { t.Fatal(err) } resp, err := stream.Recv() if st, ok := gstatus.FromError(err); !ok { t.Errorf("got error %v, expected grpc error", err) } else if c := st.Code(); c != errCode { t.Errorf("got error code %q, want %q", c, errCode) } _ = resp } golang-google-cloud-0.9.0/speech/apiv1/speech_client.go000066400000000000000000000231651312234511600227750ustar00rootroot00000000000000// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // AUTO-GENERATED CODE. DO NOT EDIT. package speech import ( "time" "cloud.google.com/go/internal/version" "cloud.google.com/go/longrunning" lroauto "cloud.google.com/go/longrunning/autogen" gax "github.com/googleapis/gax-go" "golang.org/x/net/context" "google.golang.org/api/option" "google.golang.org/api/transport" speechpb "google.golang.org/genproto/googleapis/cloud/speech/v1" longrunningpb "google.golang.org/genproto/googleapis/longrunning" "google.golang.org/grpc" "google.golang.org/grpc/codes" ) // CallOptions contains the retry settings for each method of Client. type CallOptions struct { Recognize []gax.CallOption LongRunningRecognize []gax.CallOption StreamingRecognize []gax.CallOption } func defaultClientOptions() []option.ClientOption { return []option.ClientOption{ option.WithEndpoint("speech.googleapis.com:443"), option.WithScopes(DefaultAuthScopes()...), } } func defaultCallOptions() *CallOptions { retry := map[[2]string][]gax.CallOption{ {"default", "idempotent"}: { gax.WithRetry(func() gax.Retryer { return gax.OnCodes([]codes.Code{ codes.DeadlineExceeded, codes.Unavailable, }, gax.Backoff{ Initial: 100 * time.Millisecond, Max: 60000 * time.Millisecond, Multiplier: 1.3, }) }), }, } return &CallOptions{ Recognize: retry[[2]string{"default", "idempotent"}], LongRunningRecognize: retry[[2]string{"default", "non_idempotent"}], StreamingRecognize: retry[[2]string{"default", "idempotent"}], } } // Client is a client for interacting with Google Cloud Speech API. type Client struct { // The connection to the service. conn *grpc.ClientConn // The gRPC API client. client speechpb.SpeechClient // LROClient is used internally to handle longrunning operations. // It is exposed so that its CallOptions can be modified if required. // Users should not Close this client. LROClient *lroauto.OperationsClient // The call options for this service. CallOptions *CallOptions // The metadata to be sent with each request. xGoogHeader []string } // NewClient creates a new speech client. // // Service that implements Google Cloud Speech API. func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) { conn, err := transport.DialGRPC(ctx, append(defaultClientOptions(), opts...)...) if err != nil { return nil, err } c := &Client{ conn: conn, CallOptions: defaultCallOptions(), client: speechpb.NewSpeechClient(conn), } c.SetGoogleClientInfo() c.LROClient, err = lroauto.NewOperationsClient(ctx, option.WithGRPCConn(conn)) if err != nil { // This error "should not happen", since we are just reusing old connection // and never actually need to dial. // If this does happen, we could leak conn. However, we cannot close conn: // If the user invoked the function with option.WithGRPCConn, // we would close a connection that's still in use. // TODO(pongad): investigate error conditions. return nil, err } return c, nil } // Connection returns the client's connection to the API service. func (c *Client) Connection() *grpc.ClientConn { return c.conn } // Close closes the connection to the API service. The user should invoke this when // the client is no longer required. func (c *Client) Close() error { return c.conn.Close() } // SetGoogleClientInfo sets the name and version of the application in // the `x-goog-api-client` header passed on each request. Intended for // use by Google-written clients. func (c *Client) SetGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", version.Go()}, keyval...) kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) c.xGoogHeader = []string{gax.XGoogHeader(kv...)} } // Recognize performs synchronous speech recognition: receive results after all audio // has been sent and processed. func (c *Client) Recognize(ctx context.Context, req *speechpb.RecognizeRequest, opts ...gax.CallOption) (*speechpb.RecognizeResponse, error) { ctx = insertXGoog(ctx, c.xGoogHeader) opts = append(c.CallOptions.Recognize[0:len(c.CallOptions.Recognize):len(c.CallOptions.Recognize)], opts...) var resp *speechpb.RecognizeResponse err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error resp, err = c.client.Recognize(ctx, req, settings.GRPC...) return err }, opts...) if err != nil { return nil, err } return resp, nil } // LongRunningRecognize performs asynchronous speech recognition: receive results via the // google.longrunning.Operations interface. Returns either an // `Operation.error` or an `Operation.response` which contains // a `LongRunningRecognizeResponse` message. func (c *Client) LongRunningRecognize(ctx context.Context, req *speechpb.LongRunningRecognizeRequest, opts ...gax.CallOption) (*LongRunningRecognizeOperation, error) { ctx = insertXGoog(ctx, c.xGoogHeader) opts = append(c.CallOptions.LongRunningRecognize[0:len(c.CallOptions.LongRunningRecognize):len(c.CallOptions.LongRunningRecognize)], opts...) var resp *longrunningpb.Operation err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error resp, err = c.client.LongRunningRecognize(ctx, req, settings.GRPC...) return err }, opts...) if err != nil { return nil, err } return &LongRunningRecognizeOperation{ lro: longrunning.InternalNewOperation(c.LROClient, resp), }, nil } // StreamingRecognize performs bidirectional streaming speech recognition: receive results while // sending audio. This method is only available via the gRPC API (not REST). func (c *Client) StreamingRecognize(ctx context.Context, opts ...gax.CallOption) (speechpb.Speech_StreamingRecognizeClient, error) { ctx = insertXGoog(ctx, c.xGoogHeader) opts = append(c.CallOptions.StreamingRecognize[0:len(c.CallOptions.StreamingRecognize):len(c.CallOptions.StreamingRecognize)], opts...) var resp speechpb.Speech_StreamingRecognizeClient err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error resp, err = c.client.StreamingRecognize(ctx, settings.GRPC...) return err }, opts...) if err != nil { return nil, err } return resp, nil } // LongRunningRecognizeOperation manages a long-running operation from LongRunningRecognize. type LongRunningRecognizeOperation struct { lro *longrunning.Operation } // LongRunningRecognizeOperation returns a new LongRunningRecognizeOperation from a given name. // The name must be that of a previously created LongRunningRecognizeOperation, possibly from a different process. func (c *Client) LongRunningRecognizeOperation(name string) *LongRunningRecognizeOperation { return &LongRunningRecognizeOperation{ lro: longrunning.InternalNewOperation(c.LROClient, &longrunningpb.Operation{Name: name}), } } // Wait blocks until the long-running operation is completed, returning the response and any errors encountered. // // See documentation of Poll for error-handling information. func (op *LongRunningRecognizeOperation) Wait(ctx context.Context, opts ...gax.CallOption) (*speechpb.LongRunningRecognizeResponse, error) { var resp speechpb.LongRunningRecognizeResponse if err := op.lro.Wait(ctx, &resp, opts...); err != nil { return nil, err } return &resp, nil } // Poll fetches the latest state of the long-running operation. // // Poll also fetches the latest metadata, which can be retrieved by Metadata. // // If Poll fails, the error is returned and op is unmodified. If Poll succeeds and // the operation has completed with failure, the error is returned and op.Done will return true. // If Poll succeeds and the operation has completed successfully, // op.Done will return true, and the response of the operation is returned. // If Poll succeeds and the operation has not completed, the returned response and error are both nil. func (op *LongRunningRecognizeOperation) Poll(ctx context.Context, opts ...gax.CallOption) (*speechpb.LongRunningRecognizeResponse, error) { var resp speechpb.LongRunningRecognizeResponse if err := op.lro.Poll(ctx, &resp, opts...); err != nil { return nil, err } if !op.Done() { return nil, nil } return &resp, nil } // Metadata returns metadata associated with the long-running operation. // Metadata itself does not contact the server, but Poll does. // To get the latest metadata, call this method after a successful call to Poll. // If the metadata is not available, the returned metadata and error are both nil. func (op *LongRunningRecognizeOperation) Metadata() (*speechpb.LongRunningRecognizeMetadata, error) { var meta speechpb.LongRunningRecognizeMetadata if err := op.lro.Metadata(&meta); err == longrunning.ErrNoMetadata { return nil, nil } else if err != nil { return nil, err } return &meta, nil } // Done reports whether the long-running operation has completed. func (op *LongRunningRecognizeOperation) Done() bool { return op.lro.Done() } // Name returns the name of the long-running operation. // The name is assigned by the server and is unique within the service from which the operation is created. func (op *LongRunningRecognizeOperation) Name() string { return op.lro.Name() } golang-google-cloud-0.9.0/speech/apiv1/speech_client_example_test.go000066400000000000000000000045041312234511600255430ustar00rootroot00000000000000// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // AUTO-GENERATED CODE. DO NOT EDIT. package speech_test import ( "io" "cloud.google.com/go/speech/apiv1" "golang.org/x/net/context" speechpb "google.golang.org/genproto/googleapis/cloud/speech/v1" ) func ExampleNewClient() { ctx := context.Background() c, err := speech.NewClient(ctx) if err != nil { // TODO: Handle error. } // TODO: Use client. _ = c } func ExampleClient_Recognize() { ctx := context.Background() c, err := speech.NewClient(ctx) if err != nil { // TODO: Handle error. } req := &speechpb.RecognizeRequest{ // TODO: Fill request struct fields. } resp, err := c.Recognize(ctx, req) if err != nil { // TODO: Handle error. } // TODO: Use resp. _ = resp } func ExampleClient_LongRunningRecognize() { ctx := context.Background() c, err := speech.NewClient(ctx) if err != nil { // TODO: Handle error. } req := &speechpb.LongRunningRecognizeRequest{ // TODO: Fill request struct fields. } op, err := c.LongRunningRecognize(ctx, req) if err != nil { // TODO: Handle error. } resp, err := op.Wait(ctx) if err != nil { // TODO: Handle error. } // TODO: Use resp. _ = resp } func ExampleClient_StreamingRecognize() { ctx := context.Background() c, err := speech.NewClient(ctx) if err != nil { // TODO: Handle error. } stream, err := c.StreamingRecognize(ctx) if err != nil { // TODO: Handle error. } go func() { reqs := []*speechpb.StreamingRecognizeRequest{ // TODO: Create requests. } for _, req := range reqs { if err := stream.Send(req); err != nil { // TODO: Handle error. } } stream.CloseSend() }() for { resp, err := stream.Recv() if err == io.EOF { break } if err != nil { // TODO: handle error. } // TODO: Use resp. _ = resp } } golang-google-cloud-0.9.0/speech/apiv1beta1/000077500000000000000000000000001312234511600205475ustar00rootroot00000000000000golang-google-cloud-0.9.0/speech/apiv1beta1/SyncRecognize_smoke_test.go000066400000000000000000000037471312234511600261300ustar00rootroot00000000000000// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // AUTO-GENERATED CODE. DO NOT EDIT. package speech import ( speechpb "google.golang.org/genproto/googleapis/cloud/speech/v1beta1" ) import ( "strconv" "testing" "time" "cloud.google.com/go/internal/testutil" "golang.org/x/net/context" "google.golang.org/api/iterator" "google.golang.org/api/option" ) var _ = iterator.Done var _ = strconv.FormatUint var _ = time.Now func TestSpeechSmoke(t *testing.T) { if testing.Short() { t.Skip("skipping smoke test in short mode") } ctx := context.Background() ts := testutil.TokenSource(ctx, DefaultAuthScopes()...) if ts == nil { t.Skip("Integration tests skipped. See CONTRIBUTING.md for details") } projectId := testutil.ProjID() _ = projectId c, err := NewClient(ctx, option.WithTokenSource(ts)) if err != nil { t.Fatal(err) } var languageCode string = "en-US" var sampleRate int32 = 44100 var encoding speechpb.RecognitionConfig_AudioEncoding = speechpb.RecognitionConfig_FLAC var config = &speechpb.RecognitionConfig{ LanguageCode: languageCode, SampleRate: sampleRate, Encoding: encoding, } var uri string = "gs://gapic-toolkit/hello.flac" var audio = &speechpb.RecognitionAudio{ AudioSource: &speechpb.RecognitionAudio_Uri{ Uri: uri, }, } var request = &speechpb.SyncRecognizeRequest{ Config: config, Audio: audio, } if _, err := c.SyncRecognize(ctx, request); err != nil { t.Error(err) } } golang-google-cloud-0.9.0/speech/apiv1beta1/doc.go000066400000000000000000000024521312234511600216460ustar00rootroot00000000000000// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // AUTO-GENERATED CODE. DO NOT EDIT. // Package speech is an experimental, auto-generated package for the // Google Cloud Speech API. // // Google Cloud Speech API. package speech // import "cloud.google.com/go/speech/apiv1beta1" import ( "golang.org/x/net/context" "google.golang.org/grpc/metadata" ) func insertXGoog(ctx context.Context, val []string) context.Context { md, _ := metadata.FromOutgoingContext(ctx) md = md.Copy() md["x-goog-api-client"] = val return metadata.NewOutgoingContext(ctx, md) } // DefaultAuthScopes reports the authentication scopes required // by this package. func DefaultAuthScopes() []string { return []string{ "https://www.googleapis.com/auth/cloud-platform", } } golang-google-cloud-0.9.0/speech/apiv1beta1/mock_test.go000066400000000000000000000242361312234511600230750ustar00rootroot00000000000000// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // AUTO-GENERATED CODE. DO NOT EDIT. package speech import ( speechpb "google.golang.org/genproto/googleapis/cloud/speech/v1beta1" longrunningpb "google.golang.org/genproto/googleapis/longrunning" ) import ( "flag" "fmt" "io" "log" "net" "os" "strings" "testing" "github.com/golang/protobuf/proto" "github.com/golang/protobuf/ptypes" "golang.org/x/net/context" "google.golang.org/api/option" status "google.golang.org/genproto/googleapis/rpc/status" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/metadata" gstatus "google.golang.org/grpc/status" ) var _ = io.EOF var _ = ptypes.MarshalAny var _ status.Status type mockSpeechServer struct { // Embed for forward compatibility. // Tests will keep working if more methods are added // in the future. speechpb.SpeechServer reqs []proto.Message // If set, all calls return this error. err error // responses to return if err == nil resps []proto.Message } func (s *mockSpeechServer) SyncRecognize(ctx context.Context, req *speechpb.SyncRecognizeRequest) (*speechpb.SyncRecognizeResponse, error) { md, _ := metadata.FromIncomingContext(ctx) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } s.reqs = append(s.reqs, req) if s.err != nil { return nil, s.err } return s.resps[0].(*speechpb.SyncRecognizeResponse), nil } func (s *mockSpeechServer) AsyncRecognize(ctx context.Context, req *speechpb.AsyncRecognizeRequest) (*longrunningpb.Operation, error) { md, _ := metadata.FromIncomingContext(ctx) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } s.reqs = append(s.reqs, req) if s.err != nil { return nil, s.err } return s.resps[0].(*longrunningpb.Operation), nil } func (s *mockSpeechServer) StreamingRecognize(stream speechpb.Speech_StreamingRecognizeServer) error { md, _ := metadata.FromIncomingContext(stream.Context()) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } for { if req, err := stream.Recv(); err == io.EOF { break } else if err != nil { return err } else { s.reqs = append(s.reqs, req) } } if s.err != nil { return s.err } for _, v := range s.resps { if err := stream.Send(v.(*speechpb.StreamingRecognizeResponse)); err != nil { return err } } return nil } // clientOpt is the option tests should use to connect to the test server. // It is initialized by TestMain. var clientOpt option.ClientOption var ( mockSpeech mockSpeechServer ) func TestMain(m *testing.M) { flag.Parse() serv := grpc.NewServer() speechpb.RegisterSpeechServer(serv, &mockSpeech) lis, err := net.Listen("tcp", "localhost:0") if err != nil { log.Fatal(err) } go serv.Serve(lis) conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) if err != nil { log.Fatal(err) } clientOpt = option.WithGRPCConn(conn) os.Exit(m.Run()) } func TestSpeechSyncRecognize(t *testing.T) { var expectedResponse *speechpb.SyncRecognizeResponse = &speechpb.SyncRecognizeResponse{} mockSpeech.err = nil mockSpeech.reqs = nil mockSpeech.resps = append(mockSpeech.resps[:0], expectedResponse) var encoding speechpb.RecognitionConfig_AudioEncoding = speechpb.RecognitionConfig_FLAC var sampleRate int32 = 44100 var config = &speechpb.RecognitionConfig{ Encoding: encoding, SampleRate: sampleRate, } var uri string = "gs://bucket_name/file_name.flac" var audio = &speechpb.RecognitionAudio{ AudioSource: &speechpb.RecognitionAudio_Uri{ Uri: uri, }, } var request = &speechpb.SyncRecognizeRequest{ Config: config, Audio: audio, } c, err := NewClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.SyncRecognize(context.Background(), request) if err != nil { t.Fatal(err) } if want, got := request, mockSpeech.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } if want, got := expectedResponse, resp; !proto.Equal(want, got) { t.Errorf("wrong response %q, want %q)", got, want) } } func TestSpeechSyncRecognizeError(t *testing.T) { errCode := codes.PermissionDenied mockSpeech.err = gstatus.Error(errCode, "test error") var encoding speechpb.RecognitionConfig_AudioEncoding = speechpb.RecognitionConfig_FLAC var sampleRate int32 = 44100 var config = &speechpb.RecognitionConfig{ Encoding: encoding, SampleRate: sampleRate, } var uri string = "gs://bucket_name/file_name.flac" var audio = &speechpb.RecognitionAudio{ AudioSource: &speechpb.RecognitionAudio_Uri{ Uri: uri, }, } var request = &speechpb.SyncRecognizeRequest{ Config: config, Audio: audio, } c, err := NewClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.SyncRecognize(context.Background(), request) if st, ok := gstatus.FromError(err); !ok { t.Errorf("got error %v, expected grpc error", err) } else if c := st.Code(); c != errCode { t.Errorf("got error code %q, want %q", c, errCode) } _ = resp } func TestSpeechAsyncRecognize(t *testing.T) { var expectedResponse *speechpb.AsyncRecognizeResponse = &speechpb.AsyncRecognizeResponse{} mockSpeech.err = nil mockSpeech.reqs = nil any, err := ptypes.MarshalAny(expectedResponse) if err != nil { t.Fatal(err) } mockSpeech.resps = append(mockSpeech.resps[:0], &longrunningpb.Operation{ Name: "longrunning-test", Done: true, Result: &longrunningpb.Operation_Response{Response: any}, }) var encoding speechpb.RecognitionConfig_AudioEncoding = speechpb.RecognitionConfig_FLAC var sampleRate int32 = 44100 var config = &speechpb.RecognitionConfig{ Encoding: encoding, SampleRate: sampleRate, } var uri string = "gs://bucket_name/file_name.flac" var audio = &speechpb.RecognitionAudio{ AudioSource: &speechpb.RecognitionAudio_Uri{ Uri: uri, }, } var request = &speechpb.AsyncRecognizeRequest{ Config: config, Audio: audio, } c, err := NewClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } respLRO, err := c.AsyncRecognize(context.Background(), request) if err != nil { t.Fatal(err) } resp, err := respLRO.Wait(context.Background()) if err != nil { t.Fatal(err) } if want, got := request, mockSpeech.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } if want, got := expectedResponse, resp; !proto.Equal(want, got) { t.Errorf("wrong response %q, want %q)", got, want) } } func TestSpeechAsyncRecognizeError(t *testing.T) { errCode := codes.PermissionDenied mockSpeech.err = nil mockSpeech.resps = append(mockSpeech.resps[:0], &longrunningpb.Operation{ Name: "longrunning-test", Done: true, Result: &longrunningpb.Operation_Error{ Error: &status.Status{ Code: int32(errCode), Message: "test error", }, }, }) var encoding speechpb.RecognitionConfig_AudioEncoding = speechpb.RecognitionConfig_FLAC var sampleRate int32 = 44100 var config = &speechpb.RecognitionConfig{ Encoding: encoding, SampleRate: sampleRate, } var uri string = "gs://bucket_name/file_name.flac" var audio = &speechpb.RecognitionAudio{ AudioSource: &speechpb.RecognitionAudio_Uri{ Uri: uri, }, } var request = &speechpb.AsyncRecognizeRequest{ Config: config, Audio: audio, } c, err := NewClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } respLRO, err := c.AsyncRecognize(context.Background(), request) if err != nil { t.Fatal(err) } resp, err := respLRO.Wait(context.Background()) if st, ok := gstatus.FromError(err); !ok { t.Errorf("got error %v, expected grpc error", err) } else if c := st.Code(); c != errCode { t.Errorf("got error code %q, want %q", c, errCode) } _ = resp } func TestSpeechStreamingRecognize(t *testing.T) { var resultIndex int32 = 520358448 var expectedResponse = &speechpb.StreamingRecognizeResponse{ ResultIndex: resultIndex, } mockSpeech.err = nil mockSpeech.reqs = nil mockSpeech.resps = append(mockSpeech.resps[:0], expectedResponse) var request *speechpb.StreamingRecognizeRequest = &speechpb.StreamingRecognizeRequest{} c, err := NewClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } stream, err := c.StreamingRecognize(context.Background()) if err != nil { t.Fatal(err) } if err := stream.Send(request); err != nil { t.Fatal(err) } if err := stream.CloseSend(); err != nil { t.Fatal(err) } resp, err := stream.Recv() if err != nil { t.Fatal(err) } if want, got := request, mockSpeech.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } if want, got := expectedResponse, resp; !proto.Equal(want, got) { t.Errorf("wrong response %q, want %q)", got, want) } } func TestSpeechStreamingRecognizeError(t *testing.T) { errCode := codes.PermissionDenied mockSpeech.err = gstatus.Error(errCode, "test error") var request *speechpb.StreamingRecognizeRequest = &speechpb.StreamingRecognizeRequest{} c, err := NewClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } stream, err := c.StreamingRecognize(context.Background()) if err != nil { t.Fatal(err) } if err := stream.Send(request); err != nil { t.Fatal(err) } if err := stream.CloseSend(); err != nil { t.Fatal(err) } resp, err := stream.Recv() if st, ok := gstatus.FromError(err); !ok { t.Errorf("got error %v, expected grpc error", err) } else if c := st.Code(); c != errCode { t.Errorf("got error code %q, want %q", c, errCode) } _ = resp } golang-google-cloud-0.9.0/speech/apiv1beta1/speech_client.go000066400000000000000000000230351312234511600237060ustar00rootroot00000000000000// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // AUTO-GENERATED CODE. DO NOT EDIT. package speech import ( "time" "cloud.google.com/go/internal/version" "cloud.google.com/go/longrunning" lroauto "cloud.google.com/go/longrunning/autogen" gax "github.com/googleapis/gax-go" "golang.org/x/net/context" "google.golang.org/api/option" "google.golang.org/api/transport" speechpb "google.golang.org/genproto/googleapis/cloud/speech/v1beta1" longrunningpb "google.golang.org/genproto/googleapis/longrunning" "google.golang.org/grpc" "google.golang.org/grpc/codes" ) // CallOptions contains the retry settings for each method of Client. type CallOptions struct { SyncRecognize []gax.CallOption AsyncRecognize []gax.CallOption StreamingRecognize []gax.CallOption } func defaultClientOptions() []option.ClientOption { return []option.ClientOption{ option.WithEndpoint("speech.googleapis.com:443"), option.WithScopes(DefaultAuthScopes()...), } } func defaultCallOptions() *CallOptions { retry := map[[2]string][]gax.CallOption{ {"default", "idempotent"}: { gax.WithRetry(func() gax.Retryer { return gax.OnCodes([]codes.Code{ codes.DeadlineExceeded, codes.Unavailable, }, gax.Backoff{ Initial: 100 * time.Millisecond, Max: 60000 * time.Millisecond, Multiplier: 1.3, }) }), }, } return &CallOptions{ SyncRecognize: retry[[2]string{"default", "idempotent"}], AsyncRecognize: retry[[2]string{"default", "idempotent"}], StreamingRecognize: retry[[2]string{"default", "non_idempotent"}], } } // Client is a client for interacting with Google Cloud Speech API. type Client struct { // The connection to the service. conn *grpc.ClientConn // The gRPC API client. client speechpb.SpeechClient // LROClient is used internally to handle longrunning operations. // It is exposed so that its CallOptions can be modified if required. // Users should not Close this client. LROClient *lroauto.OperationsClient // The call options for this service. CallOptions *CallOptions // The metadata to be sent with each request. xGoogHeader []string } // NewClient creates a new speech client. // // Service that implements Google Cloud Speech API. func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) { conn, err := transport.DialGRPC(ctx, append(defaultClientOptions(), opts...)...) if err != nil { return nil, err } c := &Client{ conn: conn, CallOptions: defaultCallOptions(), client: speechpb.NewSpeechClient(conn), } c.SetGoogleClientInfo() c.LROClient, err = lroauto.NewOperationsClient(ctx, option.WithGRPCConn(conn)) if err != nil { // This error "should not happen", since we are just reusing old connection // and never actually need to dial. // If this does happen, we could leak conn. However, we cannot close conn: // If the user invoked the function with option.WithGRPCConn, // we would close a connection that's still in use. // TODO(pongad): investigate error conditions. return nil, err } return c, nil } // Connection returns the client's connection to the API service. func (c *Client) Connection() *grpc.ClientConn { return c.conn } // Close closes the connection to the API service. The user should invoke this when // the client is no longer required. func (c *Client) Close() error { return c.conn.Close() } // SetGoogleClientInfo sets the name and version of the application in // the `x-goog-api-client` header passed on each request. Intended for // use by Google-written clients. func (c *Client) SetGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", version.Go()}, keyval...) kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) c.xGoogHeader = []string{gax.XGoogHeader(kv...)} } // SyncRecognize performs synchronous speech recognition: receive results after all audio // has been sent and processed. func (c *Client) SyncRecognize(ctx context.Context, req *speechpb.SyncRecognizeRequest, opts ...gax.CallOption) (*speechpb.SyncRecognizeResponse, error) { ctx = insertXGoog(ctx, c.xGoogHeader) opts = append(c.CallOptions.SyncRecognize[0:len(c.CallOptions.SyncRecognize):len(c.CallOptions.SyncRecognize)], opts...) var resp *speechpb.SyncRecognizeResponse err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error resp, err = c.client.SyncRecognize(ctx, req, settings.GRPC...) return err }, opts...) if err != nil { return nil, err } return resp, nil } // AsyncRecognize performs asynchronous speech recognition: receive results via the // [google.longrunning.Operations] // (/speech/reference/rest/v1beta1/operations#Operation) // interface. Returns either an // `Operation.error` or an `Operation.response` which contains // an `AsyncRecognizeResponse` message. func (c *Client) AsyncRecognize(ctx context.Context, req *speechpb.AsyncRecognizeRequest, opts ...gax.CallOption) (*AsyncRecognizeOperation, error) { ctx = insertXGoog(ctx, c.xGoogHeader) opts = append(c.CallOptions.AsyncRecognize[0:len(c.CallOptions.AsyncRecognize):len(c.CallOptions.AsyncRecognize)], opts...) var resp *longrunningpb.Operation err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error resp, err = c.client.AsyncRecognize(ctx, req, settings.GRPC...) return err }, opts...) if err != nil { return nil, err } return &AsyncRecognizeOperation{ lro: longrunning.InternalNewOperation(c.LROClient, resp), }, nil } // StreamingRecognize performs bidirectional streaming speech recognition: receive results while // sending audio. This method is only available via the gRPC API (not REST). func (c *Client) StreamingRecognize(ctx context.Context, opts ...gax.CallOption) (speechpb.Speech_StreamingRecognizeClient, error) { ctx = insertXGoog(ctx, c.xGoogHeader) opts = append(c.CallOptions.StreamingRecognize[0:len(c.CallOptions.StreamingRecognize):len(c.CallOptions.StreamingRecognize)], opts...) var resp speechpb.Speech_StreamingRecognizeClient err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error resp, err = c.client.StreamingRecognize(ctx, settings.GRPC...) return err }, opts...) if err != nil { return nil, err } return resp, nil } // AsyncRecognizeOperation manages a long-running operation from AsyncRecognize. type AsyncRecognizeOperation struct { lro *longrunning.Operation } // AsyncRecognizeOperation returns a new AsyncRecognizeOperation from a given name. // The name must be that of a previously created AsyncRecognizeOperation, possibly from a different process. func (c *Client) AsyncRecognizeOperation(name string) *AsyncRecognizeOperation { return &AsyncRecognizeOperation{ lro: longrunning.InternalNewOperation(c.LROClient, &longrunningpb.Operation{Name: name}), } } // Wait blocks until the long-running operation is completed, returning the response and any errors encountered. // // See documentation of Poll for error-handling information. func (op *AsyncRecognizeOperation) Wait(ctx context.Context, opts ...gax.CallOption) (*speechpb.AsyncRecognizeResponse, error) { var resp speechpb.AsyncRecognizeResponse if err := op.lro.Wait(ctx, &resp, opts...); err != nil { return nil, err } return &resp, nil } // Poll fetches the latest state of the long-running operation. // // Poll also fetches the latest metadata, which can be retrieved by Metadata. // // If Poll fails, the error is returned and op is unmodified. If Poll succeeds and // the operation has completed with failure, the error is returned and op.Done will return true. // If Poll succeeds and the operation has completed successfully, // op.Done will return true, and the response of the operation is returned. // If Poll succeeds and the operation has not completed, the returned response and error are both nil. func (op *AsyncRecognizeOperation) Poll(ctx context.Context, opts ...gax.CallOption) (*speechpb.AsyncRecognizeResponse, error) { var resp speechpb.AsyncRecognizeResponse if err := op.lro.Poll(ctx, &resp, opts...); err != nil { return nil, err } if !op.Done() { return nil, nil } return &resp, nil } // Metadata returns metadata associated with the long-running operation. // Metadata itself does not contact the server, but Poll does. // To get the latest metadata, call this method after a successful call to Poll. // If the metadata is not available, the returned metadata and error are both nil. func (op *AsyncRecognizeOperation) Metadata() (*speechpb.AsyncRecognizeMetadata, error) { var meta speechpb.AsyncRecognizeMetadata if err := op.lro.Metadata(&meta); err == longrunning.ErrNoMetadata { return nil, nil } else if err != nil { return nil, err } return &meta, nil } // Done reports whether the long-running operation has completed. func (op *AsyncRecognizeOperation) Done() bool { return op.lro.Done() } // Name returns the name of the long-running operation. // The name is assigned by the server and is unique within the service from which the operation is created. func (op *AsyncRecognizeOperation) Name() string { return op.lro.Name() } golang-google-cloud-0.9.0/speech/apiv1beta1/speech_client_example_test.go000066400000000000000000000045101312234511600264550ustar00rootroot00000000000000// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // AUTO-GENERATED CODE. DO NOT EDIT. package speech_test import ( "io" "cloud.google.com/go/speech/apiv1beta1" "golang.org/x/net/context" speechpb "google.golang.org/genproto/googleapis/cloud/speech/v1beta1" ) func ExampleNewClient() { ctx := context.Background() c, err := speech.NewClient(ctx) if err != nil { // TODO: Handle error. } // TODO: Use client. _ = c } func ExampleClient_SyncRecognize() { ctx := context.Background() c, err := speech.NewClient(ctx) if err != nil { // TODO: Handle error. } req := &speechpb.SyncRecognizeRequest{ // TODO: Fill request struct fields. } resp, err := c.SyncRecognize(ctx, req) if err != nil { // TODO: Handle error. } // TODO: Use resp. _ = resp } func ExampleClient_AsyncRecognize() { ctx := context.Background() c, err := speech.NewClient(ctx) if err != nil { // TODO: Handle error. } req := &speechpb.AsyncRecognizeRequest{ // TODO: Fill request struct fields. } op, err := c.AsyncRecognize(ctx, req) if err != nil { // TODO: Handle error. } resp, err := op.Wait(ctx) if err != nil { // TODO: Handle error. } // TODO: Use resp. _ = resp } func ExampleClient_StreamingRecognize() { ctx := context.Background() c, err := speech.NewClient(ctx) if err != nil { // TODO: Handle error. } stream, err := c.StreamingRecognize(ctx) if err != nil { // TODO: Handle error. } go func() { reqs := []*speechpb.StreamingRecognizeRequest{ // TODO: Create requests. } for _, req := range reqs { if err := stream.Send(req); err != nil { // TODO: Handle error. } } stream.CloseSend() }() for { resp, err := stream.Recv() if err == io.EOF { break } if err != nil { // TODO: handle error. } // TODO: Use resp. _ = resp } } golang-google-cloud-0.9.0/storage/000077500000000000000000000000001312234511600170075ustar00rootroot00000000000000golang-google-cloud-0.9.0/storage/acl.go000066400000000000000000000162141312234511600201010ustar00rootroot00000000000000// Copyright 2014 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package storage import ( "fmt" "golang.org/x/net/context" raw "google.golang.org/api/storage/v1" ) // ACLRole is the level of access to grant. type ACLRole string const ( RoleOwner ACLRole = "OWNER" RoleReader ACLRole = "READER" RoleWriter ACLRole = "WRITER" ) // ACLEntity refers to a user or group. // They are sometimes referred to as grantees. // // It could be in the form of: // "user-", "user-", "group-", "group-", // "domain-" and "project-team-". // // Or one of the predefined constants: AllUsers, AllAuthenticatedUsers. type ACLEntity string const ( AllUsers ACLEntity = "allUsers" AllAuthenticatedUsers ACLEntity = "allAuthenticatedUsers" ) // ACLRule represents a grant for a role to an entity (user, group or team) for a Google Cloud Storage object or bucket. type ACLRule struct { Entity ACLEntity Role ACLRole } // ACLHandle provides operations on an access control list for a Google Cloud Storage bucket or object. type ACLHandle struct { c *Client bucket string object string isDefault bool } // Delete permanently deletes the ACL entry for the given entity. func (a *ACLHandle) Delete(ctx context.Context, entity ACLEntity) error { if a.object != "" { return a.objectDelete(ctx, entity) } if a.isDefault { return a.bucketDefaultDelete(ctx, entity) } return a.bucketDelete(ctx, entity) } // Set sets the permission level for the given entity. func (a *ACLHandle) Set(ctx context.Context, entity ACLEntity, role ACLRole) error { if a.object != "" { return a.objectSet(ctx, entity, role) } if a.isDefault { return a.bucketDefaultSet(ctx, entity, role) } return a.bucketSet(ctx, entity, role) } // List retrieves ACL entries. func (a *ACLHandle) List(ctx context.Context) ([]ACLRule, error) { if a.object != "" { return a.objectList(ctx) } if a.isDefault { return a.bucketDefaultList(ctx) } return a.bucketList(ctx) } func (a *ACLHandle) bucketDefaultList(ctx context.Context) ([]ACLRule, error) { var acls *raw.ObjectAccessControls var err error err = runWithRetry(ctx, func() error { req := a.c.raw.DefaultObjectAccessControls.List(a.bucket).Context(ctx) setClientHeader(req.Header()) acls, err = req.Do() return err }) if err != nil { return nil, fmt.Errorf("storage: error listing default object ACL for bucket %q: %v", a.bucket, err) } return toACLRules(acls.Items), nil } func (a *ACLHandle) bucketDefaultSet(ctx context.Context, entity ACLEntity, role ACLRole) error { acl := &raw.ObjectAccessControl{ Bucket: a.bucket, Entity: string(entity), Role: string(role), } err := runWithRetry(ctx, func() error { req := a.c.raw.DefaultObjectAccessControls.Update(a.bucket, string(entity), acl).Context(ctx) setClientHeader(req.Header()) _, err := req.Do() return err }) if err != nil { return fmt.Errorf("storage: error updating default ACL entry for bucket %q, entity %q: %v", a.bucket, entity, err) } return nil } func (a *ACLHandle) bucketDefaultDelete(ctx context.Context, entity ACLEntity) error { err := runWithRetry(ctx, func() error { req := a.c.raw.DefaultObjectAccessControls.Delete(a.bucket, string(entity)).Context(ctx) setClientHeader(req.Header()) return req.Do() }) if err != nil { return fmt.Errorf("storage: error deleting default ACL entry for bucket %q, entity %q: %v", a.bucket, entity, err) } return nil } func (a *ACLHandle) bucketList(ctx context.Context) ([]ACLRule, error) { var acls *raw.BucketAccessControls var err error err = runWithRetry(ctx, func() error { req := a.c.raw.BucketAccessControls.List(a.bucket).Context(ctx) setClientHeader(req.Header()) acls, err = req.Do() return err }) if err != nil { return nil, fmt.Errorf("storage: error listing bucket ACL for bucket %q: %v", a.bucket, err) } r := make([]ACLRule, len(acls.Items)) for i, v := range acls.Items { r[i].Entity = ACLEntity(v.Entity) r[i].Role = ACLRole(v.Role) } return r, nil } func (a *ACLHandle) bucketSet(ctx context.Context, entity ACLEntity, role ACLRole) error { acl := &raw.BucketAccessControl{ Bucket: a.bucket, Entity: string(entity), Role: string(role), } err := runWithRetry(ctx, func() error { req := a.c.raw.BucketAccessControls.Update(a.bucket, string(entity), acl).Context(ctx) setClientHeader(req.Header()) _, err := req.Do() return err }) if err != nil { return fmt.Errorf("storage: error updating bucket ACL entry for bucket %q, entity %q: %v", a.bucket, entity, err) } return nil } func (a *ACLHandle) bucketDelete(ctx context.Context, entity ACLEntity) error { err := runWithRetry(ctx, func() error { req := a.c.raw.BucketAccessControls.Delete(a.bucket, string(entity)).Context(ctx) setClientHeader(req.Header()) return req.Do() }) if err != nil { return fmt.Errorf("storage: error deleting bucket ACL entry for bucket %q, entity %q: %v", a.bucket, entity, err) } return nil } func (a *ACLHandle) objectList(ctx context.Context) ([]ACLRule, error) { var acls *raw.ObjectAccessControls var err error err = runWithRetry(ctx, func() error { req := a.c.raw.ObjectAccessControls.List(a.bucket, a.object).Context(ctx) setClientHeader(req.Header()) acls, err = req.Do() return err }) if err != nil { return nil, fmt.Errorf("storage: error listing object ACL for bucket %q, file %q: %v", a.bucket, a.object, err) } return toACLRules(acls.Items), nil } func (a *ACLHandle) objectSet(ctx context.Context, entity ACLEntity, role ACLRole) error { acl := &raw.ObjectAccessControl{ Bucket: a.bucket, Entity: string(entity), Role: string(role), } err := runWithRetry(ctx, func() error { req := a.c.raw.ObjectAccessControls.Update(a.bucket, a.object, string(entity), acl).Context(ctx) setClientHeader(req.Header()) _, err := req.Do() return err }) if err != nil { return fmt.Errorf("storage: error updating object ACL entry for bucket %q, file %q, entity %q: %v", a.bucket, a.object, entity, err) } return nil } func (a *ACLHandle) objectDelete(ctx context.Context, entity ACLEntity) error { err := runWithRetry(ctx, func() error { req := a.c.raw.ObjectAccessControls.Delete(a.bucket, a.object, string(entity)).Context(ctx) setClientHeader(req.Header()) return req.Do() }) if err != nil { return fmt.Errorf("storage: error deleting object ACL entry for bucket %q, file %q, entity %q: %v", a.bucket, a.object, entity, err) } return nil } func toACLRules(items []*raw.ObjectAccessControl) []ACLRule { r := make([]ACLRule, 0, len(items)) for _, item := range items { r = append(r, ACLRule{Entity: ACLEntity(item.Entity), Role: ACLRole(item.Role)}) } return r } golang-google-cloud-0.9.0/storage/bucket.go000066400000000000000000000377561312234511600206350ustar00rootroot00000000000000// Copyright 2014 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package storage import ( "fmt" "net/http" "reflect" "time" "cloud.google.com/go/internal/optional" "golang.org/x/net/context" "google.golang.org/api/googleapi" "google.golang.org/api/iterator" raw "google.golang.org/api/storage/v1" ) // BucketHandle provides operations on a Google Cloud Storage bucket. // Use Client.Bucket to get a handle. type BucketHandle struct { c *Client name string acl ACLHandle defaultObjectACL ACLHandle conds *BucketConditions } // Bucket returns a BucketHandle, which provides operations on the named bucket. // This call does not perform any network operations. // // The supplied name must contain only lowercase letters, numbers, dashes, // underscores, and dots. The full specification for valid bucket names can be // found at: // https://cloud.google.com/storage/docs/bucket-naming func (c *Client) Bucket(name string) *BucketHandle { return &BucketHandle{ c: c, name: name, acl: ACLHandle{ c: c, bucket: name, }, defaultObjectACL: ACLHandle{ c: c, bucket: name, isDefault: true, }, } } // Create creates the Bucket in the project. // If attrs is nil the API defaults will be used. func (b *BucketHandle) Create(ctx context.Context, projectID string, attrs *BucketAttrs) error { var bkt *raw.Bucket if attrs != nil { bkt = attrs.toRawBucket() } else { bkt = &raw.Bucket{} } bkt.Name = b.name req := b.c.raw.Buckets.Insert(projectID, bkt) setClientHeader(req.Header()) return runWithRetry(ctx, func() error { _, err := req.Context(ctx).Do(); return err }) } // Delete deletes the Bucket. func (b *BucketHandle) Delete(ctx context.Context) error { req, err := b.newDeleteCall() if err != nil { return err } return runWithRetry(ctx, func() error { return req.Context(ctx).Do() }) } func (b *BucketHandle) newDeleteCall() (*raw.BucketsDeleteCall, error) { req := b.c.raw.Buckets.Delete(b.name) setClientHeader(req.Header()) if err := applyBucketConds("BucketHandle.Delete", b.conds, req); err != nil { return nil, err } return req, nil } // ACL returns an ACLHandle, which provides access to the bucket's access control list. // This controls who can list, create or overwrite the objects in a bucket. // This call does not perform any network operations. func (b *BucketHandle) ACL() *ACLHandle { return &b.acl } // DefaultObjectACL returns an ACLHandle, which provides access to the bucket's default object ACLs. // These ACLs are applied to newly created objects in this bucket that do not have a defined ACL. // This call does not perform any network operations. func (b *BucketHandle) DefaultObjectACL() *ACLHandle { return &b.defaultObjectACL } // Object returns an ObjectHandle, which provides operations on the named object. // This call does not perform any network operations. // // name must consist entirely of valid UTF-8-encoded runes. The full specification // for valid object names can be found at: // https://cloud.google.com/storage/docs/bucket-naming func (b *BucketHandle) Object(name string) *ObjectHandle { return &ObjectHandle{ c: b.c, bucket: b.name, object: name, acl: ACLHandle{ c: b.c, bucket: b.name, object: name, }, gen: -1, } } // Attrs returns the metadata for the bucket. func (b *BucketHandle) Attrs(ctx context.Context) (*BucketAttrs, error) { req, err := b.newGetCall() if err != nil { return nil, err } var resp *raw.Bucket err = runWithRetry(ctx, func() error { resp, err = req.Context(ctx).Do() return err }) if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound { return nil, ErrBucketNotExist } if err != nil { return nil, err } return newBucket(resp), nil } func (b *BucketHandle) newGetCall() (*raw.BucketsGetCall, error) { req := b.c.raw.Buckets.Get(b.name).Projection("full") setClientHeader(req.Header()) if err := applyBucketConds("BucketHandle.Attrs", b.conds, req); err != nil { return nil, err } return req, nil } func (b *BucketHandle) Update(ctx context.Context, uattrs BucketAttrsToUpdate) (*BucketAttrs, error) { req, err := b.newPatchCall(&uattrs) if err != nil { return nil, err } // TODO(jba): retry iff metagen is set? rb, err := req.Context(ctx).Do() if err != nil { return nil, err } return newBucket(rb), nil } func (b *BucketHandle) newPatchCall(uattrs *BucketAttrsToUpdate) (*raw.BucketsPatchCall, error) { rb := uattrs.toRawBucket() req := b.c.raw.Buckets.Patch(b.name, rb).Projection("full") setClientHeader(req.Header()) if err := applyBucketConds("BucketHandle.Update", b.conds, req); err != nil { return nil, err } return req, nil } // BucketAttrs represents the metadata for a Google Cloud Storage bucket. type BucketAttrs struct { // Name is the name of the bucket. Name string // ACL is the list of access control rules on the bucket. ACL []ACLRule // DefaultObjectACL is the list of access controls to // apply to new objects when no object ACL is provided. DefaultObjectACL []ACLRule // Location is the location of the bucket. It defaults to "US". Location string // MetaGeneration is the metadata generation of the bucket. MetaGeneration int64 // StorageClass is the default storage class of the bucket. This defines // how objects in the bucket are stored and determines the SLA // and the cost of storage. Typical values are "MULTI_REGIONAL", // "REGIONAL", "NEARLINE", "COLDLINE", "STANDARD" and // "DURABLE_REDUCED_AVAILABILITY". Defaults to "STANDARD", which // is equivalent to "MULTI_REGIONAL" or "REGIONAL" depending on // the bucket's location settings. StorageClass string // Created is the creation time of the bucket. Created time.Time // VersioningEnabled reports whether this bucket has versioning enabled. // This field is read-only. VersioningEnabled bool // Labels are the bucket's labels. Labels map[string]string } func newBucket(b *raw.Bucket) *BucketAttrs { if b == nil { return nil } bucket := &BucketAttrs{ Name: b.Name, Location: b.Location, MetaGeneration: b.Metageneration, StorageClass: b.StorageClass, Created: convertTime(b.TimeCreated), VersioningEnabled: b.Versioning != nil && b.Versioning.Enabled, Labels: b.Labels, } acl := make([]ACLRule, len(b.Acl)) for i, rule := range b.Acl { acl[i] = ACLRule{ Entity: ACLEntity(rule.Entity), Role: ACLRole(rule.Role), } } bucket.ACL = acl objACL := make([]ACLRule, len(b.DefaultObjectAcl)) for i, rule := range b.DefaultObjectAcl { objACL[i] = ACLRule{ Entity: ACLEntity(rule.Entity), Role: ACLRole(rule.Role), } } bucket.DefaultObjectACL = objACL return bucket } // toRawBucket copies the editable attribute from b to the raw library's Bucket type. func (b *BucketAttrs) toRawBucket() *raw.Bucket { var acl []*raw.BucketAccessControl if len(b.ACL) > 0 { acl = make([]*raw.BucketAccessControl, len(b.ACL)) for i, rule := range b.ACL { acl[i] = &raw.BucketAccessControl{ Entity: string(rule.Entity), Role: string(rule.Role), } } } dACL := toRawObjectACL(b.DefaultObjectACL) // Copy label map. var labels map[string]string if len(b.Labels) > 0 { labels = make(map[string]string, len(b.Labels)) for k, v := range b.Labels { labels[k] = v } } // Ignore VersioningEnabled if it is false. This is OK because // we only call this method when creating a bucket, and by default // new buckets have versioning off. var v *raw.BucketVersioning if b.VersioningEnabled { v = &raw.BucketVersioning{Enabled: true} } return &raw.Bucket{ Name: b.Name, DefaultObjectAcl: dACL, Location: b.Location, StorageClass: b.StorageClass, Acl: acl, Versioning: v, Labels: labels, } } type BucketAttrsToUpdate struct { // VersioningEnabled, if set, updates whether the bucket uses versioning. VersioningEnabled optional.Bool setLabels map[string]string deleteLabels map[string]bool } // SetLabel causes a label to be added or modified when ua is used // in a call to Bucket.Update. func (ua *BucketAttrsToUpdate) SetLabel(name, value string) { if ua.setLabels == nil { ua.setLabels = map[string]string{} } ua.setLabels[name] = value } // DeleteLabel causes a label to be deleted when ua is used in a // call to Bucket.Update. func (ua *BucketAttrsToUpdate) DeleteLabel(name string) { if ua.deleteLabels == nil { ua.deleteLabels = map[string]bool{} } ua.deleteLabels[name] = true } func (ua *BucketAttrsToUpdate) toRawBucket() *raw.Bucket { rb := &raw.Bucket{} if ua.VersioningEnabled != nil { rb.Versioning = &raw.BucketVersioning{ Enabled: optional.ToBool(ua.VersioningEnabled), ForceSendFields: []string{"Enabled"}, } } if ua.setLabels != nil || ua.deleteLabels != nil { rb.Labels = map[string]string{} for k, v := range ua.setLabels { rb.Labels[k] = v } if len(rb.Labels) == 0 && len(ua.deleteLabels) > 0 { rb.ForceSendFields = append(rb.ForceSendFields, "Labels") } for l := range ua.deleteLabels { rb.NullFields = append(rb.NullFields, "Labels."+l) } } return rb } // If returns a new BucketHandle that applies a set of preconditions. // Preconditions already set on the BucketHandle are ignored. // Operations on the new handle will only occur if the preconditions are // satisfied. The only valid preconditions for buckets are MetagenerationMatch // and MetagenerationNotMatch. func (b *BucketHandle) If(conds BucketConditions) *BucketHandle { b2 := *b b2.conds = &conds return &b2 } // BucketConditions constrain bucket methods to act on specific metagenerations. // // The zero value is an empty set of constraints. type BucketConditions struct { // MetagenerationMatch specifies that the bucket must have the given // metageneration for the operation to occur. // If MetagenerationMatch is zero, it has no effect. MetagenerationMatch int64 // MetagenerationNotMatch specifies that the bucket must not have the given // metageneration for the operation to occur. // If MetagenerationNotMatch is zero, it has no effect. MetagenerationNotMatch int64 } func (c *BucketConditions) validate(method string) error { if *c == (BucketConditions{}) { return fmt.Errorf("storage: %s: empty conditions", method) } if c.MetagenerationMatch != 0 && c.MetagenerationNotMatch != 0 { return fmt.Errorf("storage: %s: multiple conditions specified for metageneration", method) } return nil } // applyBucketConds modifies the provided call using the conditions in conds. // call is something that quacks like a *raw.WhateverCall. func applyBucketConds(method string, conds *BucketConditions, call interface{}) error { if conds == nil { return nil } if err := conds.validate(method); err != nil { return err } cval := reflect.ValueOf(call) switch { case conds.MetagenerationMatch != 0: if !setConditionField(cval, "IfMetagenerationMatch", conds.MetagenerationMatch) { return fmt.Errorf("storage: %s: ifMetagenerationMatch not supported", method) } case conds.MetagenerationNotMatch != 0: if !setConditionField(cval, "IfMetagenerationNotMatch", conds.MetagenerationNotMatch) { return fmt.Errorf("storage: %s: ifMetagenerationNotMatch not supported", method) } } return nil } // Objects returns an iterator over the objects in the bucket that match the Query q. // If q is nil, no filtering is done. func (b *BucketHandle) Objects(ctx context.Context, q *Query) *ObjectIterator { it := &ObjectIterator{ ctx: ctx, bucket: b, } it.pageInfo, it.nextFunc = iterator.NewPageInfo( it.fetch, func() int { return len(it.items) }, func() interface{} { b := it.items; it.items = nil; return b }) if q != nil { it.query = *q } return it } // An ObjectIterator is an iterator over ObjectAttrs. type ObjectIterator struct { ctx context.Context bucket *BucketHandle query Query pageInfo *iterator.PageInfo nextFunc func() error items []*ObjectAttrs } // PageInfo supports pagination. See the google.golang.org/api/iterator package for details. func (it *ObjectIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } // Next returns the next result. Its second return value is iterator.Done if // there are no more results. Once Next returns iterator.Done, all subsequent // calls will return iterator.Done. // // If Query.Delimiter is non-empty, some of the ObjectAttrs returned by Next will // have a non-empty Prefix field, and a zero value for all other fields. These // represent prefixes. func (it *ObjectIterator) Next() (*ObjectAttrs, error) { if err := it.nextFunc(); err != nil { return nil, err } item := it.items[0] it.items = it.items[1:] return item, nil } func (it *ObjectIterator) fetch(pageSize int, pageToken string) (string, error) { req := it.bucket.c.raw.Objects.List(it.bucket.name) setClientHeader(req.Header()) req.Projection("full") req.Delimiter(it.query.Delimiter) req.Prefix(it.query.Prefix) req.Versions(it.query.Versions) req.PageToken(pageToken) if pageSize > 0 { req.MaxResults(int64(pageSize)) } var resp *raw.Objects var err error err = runWithRetry(it.ctx, func() error { resp, err = req.Context(it.ctx).Do() return err }) if err != nil { if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound { err = ErrBucketNotExist } return "", err } for _, item := range resp.Items { it.items = append(it.items, newObject(item)) } for _, prefix := range resp.Prefixes { it.items = append(it.items, &ObjectAttrs{Prefix: prefix}) } return resp.NextPageToken, nil } // TODO(jbd): Add storage.buckets.update. // Buckets returns an iterator over the buckets in the project. You may // optionally set the iterator's Prefix field to restrict the list to buckets // whose names begin with the prefix. By default, all buckets in the project // are returned. func (c *Client) Buckets(ctx context.Context, projectID string) *BucketIterator { it := &BucketIterator{ ctx: ctx, client: c, projectID: projectID, } it.pageInfo, it.nextFunc = iterator.NewPageInfo( it.fetch, func() int { return len(it.buckets) }, func() interface{} { b := it.buckets; it.buckets = nil; return b }) return it } // A BucketIterator is an iterator over BucketAttrs. type BucketIterator struct { // Prefix restricts the iterator to buckets whose names begin with it. Prefix string ctx context.Context client *Client projectID string buckets []*BucketAttrs pageInfo *iterator.PageInfo nextFunc func() error } // Next returns the next result. Its second return value is iterator.Done if // there are no more results. Once Next returns iterator.Done, all subsequent // calls will return iterator.Done. func (it *BucketIterator) Next() (*BucketAttrs, error) { if err := it.nextFunc(); err != nil { return nil, err } b := it.buckets[0] it.buckets = it.buckets[1:] return b, nil } // PageInfo supports pagination. See the google.golang.org/api/iterator package for details. func (it *BucketIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } func (it *BucketIterator) fetch(pageSize int, pageToken string) (string, error) { req := it.client.raw.Buckets.List(it.projectID) setClientHeader(req.Header()) req.Projection("full") req.Prefix(it.Prefix) req.PageToken(pageToken) if pageSize > 0 { req.MaxResults(int64(pageSize)) } var resp *raw.Buckets var err error err = runWithRetry(it.ctx, func() error { resp, err = req.Context(it.ctx).Do() return err }) if err != nil { return "", err } for _, item := range resp.Items { it.buckets = append(it.buckets, newBucket(item)) } return resp.NextPageToken, nil } golang-google-cloud-0.9.0/storage/bucket_test.go000066400000000000000000000113711312234511600216550ustar00rootroot00000000000000// Copyright 2017 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package storage import ( "net/http" "reflect" "testing" "time" "cloud.google.com/go/internal/pretty" raw "google.golang.org/api/storage/v1" ) func TestBucketAttrsToRawBucket(t *testing.T) { t.Parallel() attrs := &BucketAttrs{ Name: "name", ACL: []ACLRule{{Entity: "bob@example.com", Role: RoleOwner}}, DefaultObjectACL: []ACLRule{{Entity: AllUsers, Role: RoleReader}}, Location: "loc", StorageClass: "class", VersioningEnabled: false, // should be ignored: MetaGeneration: 39, Created: time.Now(), Labels: map[string]string{"label": "value"}, } got := attrs.toRawBucket() want := &raw.Bucket{ Name: "name", Acl: []*raw.BucketAccessControl{ {Entity: "bob@example.com", Role: "OWNER"}, }, DefaultObjectAcl: []*raw.ObjectAccessControl{ {Entity: "allUsers", Role: "READER"}, }, Location: "loc", StorageClass: "class", Versioning: nil, // ignore VersioningEnabled if flase Labels: map[string]string{"label": "value"}, } msg, ok, err := pretty.Diff(want, got) if err != nil { t.Fatal(err) } if !ok { t.Error(msg) } attrs.VersioningEnabled = true got = attrs.toRawBucket() want.Versioning = &raw.BucketVersioning{Enabled: true} msg, ok, err = pretty.Diff(want, got) if err != nil { t.Fatal(err) } if !ok { t.Error(msg) } } func TestBucketAttrsToUpdateToRawBucket(t *testing.T) { t.Parallel() au := &BucketAttrsToUpdate{VersioningEnabled: false} au.SetLabel("a", "foo") au.DeleteLabel("b") au.SetLabel("c", "") got := au.toRawBucket() want := &raw.Bucket{ Versioning: &raw.BucketVersioning{ Enabled: false, ForceSendFields: []string{"Enabled"}, }, Labels: map[string]string{ "a": "foo", "c": "", }, NullFields: []string{"Labels.b"}, } msg, ok, err := pretty.Diff(want, got) if err != nil { t.Fatal(err) } if !ok { t.Error(msg) } var au2 BucketAttrsToUpdate au2.DeleteLabel("b") got = au2.toRawBucket() want = &raw.Bucket{ Labels: map[string]string{}, ForceSendFields: []string{"Labels"}, NullFields: []string{"Labels.b"}, } msg, ok, err = pretty.Diff(want, got) if err != nil { t.Fatal(err) } if !ok { t.Error(msg) } } func TestCallBuilders(t *testing.T) { rc, err := raw.New(&http.Client{}) if err != nil { t.Fatal(err) } c := &Client{raw: rc} const metagen = 17 b := c.Bucket("name") bm := b.If(BucketConditions{MetagenerationMatch: metagen}) for i, test := range []struct { callFunc func(*BucketHandle) (interface{}, error) want interface { Header() http.Header } metagenFunc func(interface{}) }{ { func(b *BucketHandle) (interface{}, error) { return b.newGetCall() }, rc.Buckets.Get("name").Projection("full"), func(req interface{}) { req.(*raw.BucketsGetCall).IfMetagenerationMatch(metagen) }, }, { func(b *BucketHandle) (interface{}, error) { return b.newDeleteCall() }, rc.Buckets.Delete("name"), func(req interface{}) { req.(*raw.BucketsDeleteCall).IfMetagenerationMatch(metagen) }, }, { func(b *BucketHandle) (interface{}, error) { return b.newPatchCall(&BucketAttrsToUpdate{VersioningEnabled: false}) }, rc.Buckets.Patch("name", &raw.Bucket{ Versioning: &raw.BucketVersioning{Enabled: false, ForceSendFields: []string{"Enabled"}}, }).Projection("full"), func(req interface{}) { req.(*raw.BucketsPatchCall).IfMetagenerationMatch(metagen) }, }, } { got, err := test.callFunc(b) if err != nil { t.Fatal(err) } setClientHeader(test.want.Header()) if !reflect.DeepEqual(got, test.want) { t.Errorf("#%d: got %#v, want %#v", i, got, test.want) } got, err = test.callFunc(bm) if err != nil { t.Fatal(err) } test.metagenFunc(test.want) if !reflect.DeepEqual(got, test.want) { t.Errorf("#%d: got %#v, want %#v", i, got, test.want) } } // Error. bm = b.If(BucketConditions{MetagenerationMatch: 1, MetagenerationNotMatch: 2}) if _, err := bm.newGetCall(); err == nil { t.Errorf("got nil, want error") } if _, err := bm.newDeleteCall(); err == nil { t.Errorf("got nil, want error") } if _, err := bm.newPatchCall(&BucketAttrsToUpdate{}); err == nil { t.Errorf("got nil, want error") } } golang-google-cloud-0.9.0/storage/copy.go000066400000000000000000000147051312234511600203170ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package storage import ( "errors" "fmt" "golang.org/x/net/context" raw "google.golang.org/api/storage/v1" ) // CopierFrom creates a Copier that can copy src to dst. // You can immediately call Run on the returned Copier, or // you can configure it first. func (dst *ObjectHandle) CopierFrom(src *ObjectHandle) *Copier { return &Copier{dst: dst, src: src} } // A Copier copies a source object to a destination. type Copier struct { // ObjectAttrs are optional attributes to set on the destination object. // Any attributes must be initialized before any calls on the Copier. Nil // or zero-valued attributes are ignored. ObjectAttrs // RewriteToken can be set before calling Run to resume a copy // operation. After Run returns a non-nil error, RewriteToken will // have been updated to contain the value needed to resume the copy. RewriteToken string // ProgressFunc can be used to monitor the progress of a multi-RPC copy // operation. If ProgressFunc is not nil and CopyFrom requires multiple // calls to the underlying service (see // https://cloud.google.com/storage/docs/json_api/v1/objects/rewrite), then // ProgressFunc will be invoked after each call with the number of bytes of // content copied so far and the total size in bytes of the source object. // // ProgressFunc is intended to make upload progress available to the // application. For example, the implementation of ProgressFunc may update // a progress bar in the application's UI, or log the result of // float64(copiedBytes)/float64(totalBytes). // // ProgressFunc should return quickly without blocking. ProgressFunc func(copiedBytes, totalBytes uint64) dst, src *ObjectHandle } // Run performs the copy. func (c *Copier) Run(ctx context.Context) (*ObjectAttrs, error) { if err := c.src.validate(); err != nil { return nil, err } if err := c.dst.validate(); err != nil { return nil, err } // Convert destination attributes to raw form, omitting the bucket. // If the bucket is included but name or content-type aren't, the service // returns a 400 with "Required" as the only message. Omitting the bucket // does not cause any problems. rawObject := c.ObjectAttrs.toRawObject("") for { res, err := c.callRewrite(ctx, c.src, rawObject) if err != nil { return nil, err } if c.ProgressFunc != nil { c.ProgressFunc(uint64(res.TotalBytesRewritten), uint64(res.ObjectSize)) } if res.Done { // Finished successfully. return newObject(res.Resource), nil } } } func (c *Copier) callRewrite(ctx context.Context, src *ObjectHandle, rawObj *raw.Object) (*raw.RewriteResponse, error) { call := c.dst.c.raw.Objects.Rewrite(src.bucket, src.object, c.dst.bucket, c.dst.object, rawObj) call.Context(ctx).Projection("full") if c.RewriteToken != "" { call.RewriteToken(c.RewriteToken) } if err := applyConds("Copy destination", c.dst.gen, c.dst.conds, call); err != nil { return nil, err } if err := applySourceConds(c.src.gen, c.src.conds, call); err != nil { return nil, err } if err := setEncryptionHeaders(call.Header(), c.dst.encryptionKey, false); err != nil { return nil, err } if err := setEncryptionHeaders(call.Header(), c.src.encryptionKey, true); err != nil { return nil, err } var res *raw.RewriteResponse var err error setClientHeader(call.Header()) err = runWithRetry(ctx, func() error { res, err = call.Do(); return err }) if err != nil { return nil, err } c.RewriteToken = res.RewriteToken return res, nil } // ComposerFrom creates a Composer that can compose srcs into dst. // You can immediately call Run on the returned Composer, or you can // configure it first. // // The encryption key for the destination object will be used to decrypt all // source objects and encrypt the destination object. It is an error // to specify an encryption key for any of the source objects. func (dst *ObjectHandle) ComposerFrom(srcs ...*ObjectHandle) *Composer { return &Composer{dst: dst, srcs: srcs} } // A Composer composes source objects into a destination object. type Composer struct { // ObjectAttrs are optional attributes to set on the destination object. // Any attributes must be initialized before any calls on the Composer. Nil // or zero-valued attributes are ignored. ObjectAttrs dst *ObjectHandle srcs []*ObjectHandle } // Run performs the compose operation. func (c *Composer) Run(ctx context.Context) (*ObjectAttrs, error) { if err := c.dst.validate(); err != nil { return nil, err } if len(c.srcs) == 0 { return nil, errors.New("storage: at least one source object must be specified") } req := &raw.ComposeRequest{} // Compose requires a non-empty Destination, so we always set it, // even if the caller-provided ObjectAttrs is the zero value. req.Destination = c.ObjectAttrs.toRawObject(c.dst.bucket) for _, src := range c.srcs { if err := src.validate(); err != nil { return nil, err } if src.bucket != c.dst.bucket { return nil, fmt.Errorf("storage: all source objects must be in bucket %q, found %q", c.dst.bucket, src.bucket) } if src.encryptionKey != nil { return nil, fmt.Errorf("storage: compose source %s.%s must not have encryption key", src.bucket, src.object) } srcObj := &raw.ComposeRequestSourceObjects{ Name: src.object, } if err := applyConds("ComposeFrom source", src.gen, src.conds, composeSourceObj{srcObj}); err != nil { return nil, err } req.SourceObjects = append(req.SourceObjects, srcObj) } call := c.dst.c.raw.Objects.Compose(c.dst.bucket, c.dst.object, req).Context(ctx) if err := applyConds("ComposeFrom destination", c.dst.gen, c.dst.conds, call); err != nil { return nil, err } if err := setEncryptionHeaders(call.Header(), c.dst.encryptionKey, false); err != nil { return nil, err } var obj *raw.Object var err error setClientHeader(call.Header()) err = runWithRetry(ctx, func() error { obj, err = call.Do(); return err }) if err != nil { return nil, err } return newObject(obj), nil } golang-google-cloud-0.9.0/storage/doc.go000066400000000000000000000124201312234511600201020ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. /* Package storage provides an easy way to work with Google Cloud Storage. Google Cloud Storage stores data in named objects, which are grouped into buckets. More information about Google Cloud Storage is available at https://cloud.google.com/storage/docs. All of the methods of this package use exponential backoff to retry calls that fail with certain errors, as described in https://cloud.google.com/storage/docs/exponential-backoff. Note: This package is in beta. Some backwards-incompatible changes may occur. Creating a Client To start working with this package, create a client: ctx := context.Background() client, err := storage.NewClient(ctx) if err != nil { // TODO: Handle error. } Buckets A Google Cloud Storage bucket is a collection of objects. To work with a bucket, make a bucket handle: bkt := client.Bucket(bucketName) A handle is a reference to a bucket. You can have a handle even if the bucket doesn't exist yet. To create a bucket in Google Cloud Storage, call Create on the handle: if err := bkt.Create(ctx, projectID, nil); err != nil { // TODO: Handle error. } Note that although buckets are associated with projects, bucket names are global across all projects. Each bucket has associated metadata, represented in this package by BucketAttrs. The third argument to BucketHandle.Create allows you to set the intial BucketAttrs of a bucket. To retrieve a bucket's attributes, use Attrs: attrs, err := bkt.Attrs(ctx) if err != nil { // TODO: Handle error. } fmt.Printf("bucket %s, created at %s, is located in %s with storage class %s\n", attrs.Name, attrs.Created, attrs.Location, attrs.StorageClass) Objects An object holds arbitrary data as a sequence of bytes, like a file. You refer to objects using a handle, just as with buckets. You can use the standard Go io.Reader and io.Writer interfaces to read and write object data: obj := bkt.Object("data") // Write something to obj. // w implements io.Writer. w := obj.NewWriter(ctx) // Write some text to obj. This will overwrite whatever is there. if _, err := fmt.Fprintf(w, "This object contains text.\n"); err != nil { // TODO: Handle error. } // Close, just like writing a file. if err := w.Close(); err != nil { // TODO: Handle error. } // Read it back. r, err := obj.NewReader(ctx) if err != nil { // TODO: Handle error. } defer r.Close() if _, err := io.Copy(os.Stdout, r); err != nil { // TODO: Handle error. } // Prints "This object contains text." Objects also have attributes, which you can fetch with Attrs: objAttrs, err := obj.Attrs(ctx) if err != nil { // TODO: Handle error. } fmt.Printf("object %s has size %d and can be read using %s\n", objAttrs.Name, objAttrs.Size, objAttrs.MediaLink) ACLs Both objects and buckets have ACLs (Access Control Lists). An ACL is a list of ACLRules, each of which specifies the role of a user, group or project. ACLs are suitable for fine-grained control, but you may prefer using IAM to control access at the project level (see https://cloud.google.com/storage/docs/access-control/iam). To list the ACLs of a bucket or object, obtain an ACLHandle and call its List method: acls, err := obj.ACL().List(ctx) if err != nil { // TODO: Handle error. } for _, rule := range acls { fmt.Printf("%s has role %s\n", rule.Entity, rule.Role) } You can also set and delete ACLs. Conditions Every object has a generation and a metageneration. The generation changes whenever the content changes, and the metageneration changes whenever the metadata changes. Conditions let you check these values before an operation; the operation only executes if the conditions match. You can use conditions to prevent race conditions in read-modify-write operations. For example, say you've read an object's metadata into objAttrs. Now you want to write to that object, but only if its contents haven't changed since you read it. Here is how to express that: w = obj.If(storage.Conditions{GenerationMatch: objAttrs.Generation}).NewWriter(ctx) // Proceed with writing as above. Signed URLs You can obtain a URL that lets anyone read or write an object for a limited time. You don't need to create a client to do this. See the documentation of SignedURL for details. url, err := storage.SignedURL(bucketName, "shared-object", opts) if err != nil { // TODO: Handle error. } fmt.Println(url) Authentication See examples of authorization and authentication at https://godoc.org/cloud.google.com/go#pkg-examples. */ package storage // import "cloud.google.com/go/storage" golang-google-cloud-0.9.0/storage/example_test.go000066400000000000000000000327621312234511600220420ustar00rootroot00000000000000// Copyright 2014 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package storage_test import ( "fmt" "io" "io/ioutil" "log" "os" "time" "cloud.google.com/go/storage" "golang.org/x/net/context" "google.golang.org/api/iterator" ) func ExampleNewClient() { ctx := context.Background() client, err := storage.NewClient(ctx) if err != nil { // TODO: handle error. } // Use the client. // Close the client when finished. if err := client.Close(); err != nil { // TODO: handle error. } } func ExampleNewClient_auth() { ctx := context.Background() // Use Google Application Default Credentials to authorize and authenticate the client. // More information about Application Default Credentials and how to enable is at // https://developers.google.com/identity/protocols/application-default-credentials. client, err := storage.NewClient(ctx) if err != nil { log.Fatal(err) } // Use the client. // Close the client when finished. if err := client.Close(); err != nil { log.Fatal(err) } } func ExampleBucketHandle_Create() { ctx := context.Background() client, err := storage.NewClient(ctx) if err != nil { // TODO: handle error. } if err := client.Bucket("my-bucket").Create(ctx, "my-project", nil); err != nil { // TODO: handle error. } } func ExampleBucketHandle_Delete() { ctx := context.Background() client, err := storage.NewClient(ctx) if err != nil { // TODO: handle error. } if err := client.Bucket("my-bucket").Delete(ctx); err != nil { // TODO: handle error. } } func ExampleBucketHandle_Attrs() { ctx := context.Background() client, err := storage.NewClient(ctx) if err != nil { // TODO: handle error. } attrs, err := client.Bucket("my-bucket").Attrs(ctx) if err != nil { // TODO: handle error. } fmt.Println(attrs) } func ExampleBucketHandle_Update() { ctx := context.Background() client, err := storage.NewClient(ctx) if err != nil { // TODO: handle error. } // Enable versioning in the bucket, regardless of its previous value. attrs, err := client.Bucket("my-bucket").Update(ctx, storage.BucketAttrsToUpdate{VersioningEnabled: true}) if err != nil { // TODO: handle error. } fmt.Println(attrs) } // If your update is based on the bucket's previous attributes, match the // metageneration number to make sure the bucket hasn't changed since you read it. func ExampleBucketHandle_Update_readModifyWrite() { ctx := context.Background() client, err := storage.NewClient(ctx) if err != nil { // TODO: handle error. } b := client.Bucket("my-bucket") attrs, err := b.Attrs(ctx) if err != nil { // TODO: handle error. } var au storage.BucketAttrsToUpdate au.SetLabel("lab", attrs.Labels["lab"]+"-more") if attrs.Labels["delete-me"] == "yes" { au.DeleteLabel("delete-me") } attrs, err = b. If(storage.BucketConditions{MetagenerationMatch: attrs.MetaGeneration}). Update(ctx, au) if err != nil { // TODO: handle error. } fmt.Println(attrs) } func ExampleClient_Buckets() { ctx := context.Background() client, err := storage.NewClient(ctx) if err != nil { // TODO: handle error. } it := client.Bucket("my-bucket") _ = it // TODO: iterate using Next or iterator.Pager. } func ExampleBucketIterator_Next() { ctx := context.Background() client, err := storage.NewClient(ctx) if err != nil { // TODO: handle error. } it := client.Buckets(ctx, "my-project") for { bucketAttrs, err := it.Next() if err == iterator.Done { break } if err != nil { // TODO: Handle error. } fmt.Println(bucketAttrs) } } func ExampleBucketHandle_Objects() { ctx := context.Background() client, err := storage.NewClient(ctx) if err != nil { // TODO: handle error. } it := client.Bucket("my-bucket").Objects(ctx, nil) _ = it // TODO: iterate using Next or iterator.Pager. } func ExampleObjectIterator_Next() { ctx := context.Background() client, err := storage.NewClient(ctx) if err != nil { // TODO: handle error. } it := client.Bucket("my-bucket").Objects(ctx, nil) for { objAttrs, err := it.Next() if err == iterator.Done { break } if err != nil { // TODO: Handle error. } fmt.Println(objAttrs) } } func ExampleSignedURL() { pkey, err := ioutil.ReadFile("my-private-key.pem") if err != nil { // TODO: handle error. } url, err := storage.SignedURL("my-bucket", "my-object", &storage.SignedURLOptions{ GoogleAccessID: "xxx@developer.gserviceaccount.com", PrivateKey: pkey, Method: "GET", Expires: time.Now().Add(48 * time.Hour), }) if err != nil { // TODO: handle error. } fmt.Println(url) } func ExampleObjectHandle_Attrs() { ctx := context.Background() client, err := storage.NewClient(ctx) if err != nil { // TODO: handle error. } objAttrs, err := client.Bucket("my-bucket").Object("my-object").Attrs(ctx) if err != nil { // TODO: handle error. } fmt.Println(objAttrs) } func ExampleObjectHandle_Attrs_withConditions() { ctx := context.Background() client, err := storage.NewClient(ctx) if err != nil { // TODO: handle error. } obj := client.Bucket("my-bucket").Object("my-object") // Read the object. objAttrs1, err := obj.Attrs(ctx) if err != nil { // TODO: handle error. } // Do something else for a while. time.Sleep(5 * time.Minute) // Now read the same contents, even if the object has been written since the last read. objAttrs2, err := obj.Generation(objAttrs1.Generation).Attrs(ctx) if err != nil { // TODO: handle error. } fmt.Println(objAttrs1, objAttrs2) } func ExampleObjectHandle_Update() { ctx := context.Background() client, err := storage.NewClient(ctx) if err != nil { // TODO: handle error. } // Change only the content type of the object. objAttrs, err := client.Bucket("my-bucket").Object("my-object").Update(ctx, storage.ObjectAttrsToUpdate{ ContentType: "text/html", ContentDisposition: "", // delete ContentDisposition }) if err != nil { // TODO: handle error. } fmt.Println(objAttrs) } func ExampleObjectHandle_NewReader() { ctx := context.Background() client, err := storage.NewClient(ctx) if err != nil { // TODO: handle error. } rc, err := client.Bucket("my-bucket").Object("my-object").NewReader(ctx) if err != nil { // TODO: handle error. } slurp, err := ioutil.ReadAll(rc) rc.Close() if err != nil { // TODO: handle error. } fmt.Println("file contents:", slurp) } func ExampleObjectHandle_NewRangeReader() { ctx := context.Background() client, err := storage.NewClient(ctx) if err != nil { // TODO: handle error. } // Read only the first 64K. rc, err := client.Bucket("bucketname").Object("filename1").NewRangeReader(ctx, 0, 64*1024) if err != nil { // TODO: handle error. } slurp, err := ioutil.ReadAll(rc) rc.Close() if err != nil { // TODO: handle error. } fmt.Println("first 64K of file contents:", slurp) } func ExampleObjectHandle_NewWriter() { ctx := context.Background() client, err := storage.NewClient(ctx) if err != nil { // TODO: handle error. } wc := client.Bucket("bucketname").Object("filename1").NewWriter(ctx) _ = wc // TODO: Use the Writer. } func ExampleWriter_Write() { ctx := context.Background() client, err := storage.NewClient(ctx) if err != nil { // TODO: handle error. } wc := client.Bucket("bucketname").Object("filename1").NewWriter(ctx) wc.ContentType = "text/plain" wc.ACL = []storage.ACLRule{{storage.AllUsers, storage.RoleReader}} if _, err := wc.Write([]byte("hello world")); err != nil { // TODO: handle error. } if err := wc.Close(); err != nil { // TODO: handle error. } fmt.Println("updated object:", wc.Attrs()) } func ExampleObjectHandle_Delete() { ctx := context.Background() client, err := storage.NewClient(ctx) if err != nil { // TODO: handle error. } // To delete multiple objects in a bucket, list them with an // ObjectIterator, then Delete them. // If you are using this package on the App Engine Flex runtime, // you can init a bucket client with your app's default bucket name. // See http://godoc.org/google.golang.org/appengine/file#DefaultBucketName. bucket := client.Bucket("my-bucket") it := bucket.Objects(ctx, nil) for { objAttrs, err := it.Next() if err != nil && err != iterator.Done { // TODO: Handle error. } if err == iterator.Done { break } if err := bucket.Object(objAttrs.Name).Delete(ctx); err != nil { // TODO: Handle error. } } fmt.Println("deleted all object items in the bucket specified.") } func ExampleACLHandle_Delete() { ctx := context.Background() client, err := storage.NewClient(ctx) if err != nil { // TODO: handle error. } // No longer grant access to the bucket to everyone on the Internet. if err := client.Bucket("my-bucket").ACL().Delete(ctx, storage.AllUsers); err != nil { // TODO: handle error. } } func ExampleACLHandle_Set() { ctx := context.Background() client, err := storage.NewClient(ctx) if err != nil { // TODO: handle error. } // Let any authenticated user read my-bucket/my-object. obj := client.Bucket("my-bucket").Object("my-object") if err := obj.ACL().Set(ctx, storage.AllAuthenticatedUsers, storage.RoleReader); err != nil { // TODO: handle error. } } func ExampleACLHandle_List() { ctx := context.Background() client, err := storage.NewClient(ctx) if err != nil { // TODO: handle error. } // List the default object ACLs for my-bucket. aclRules, err := client.Bucket("my-bucket").DefaultObjectACL().List(ctx) if err != nil { // TODO: handle error. } fmt.Println(aclRules) } func ExampleCopier_Run() { ctx := context.Background() client, err := storage.NewClient(ctx) if err != nil { // TODO: handle error. } src := client.Bucket("bucketname").Object("file1") dst := client.Bucket("another-bucketname").Object("file2") // Copy content and modify metadata. copier := dst.CopierFrom(src) copier.ContentType = "text/plain" attrs, err := copier.Run(ctx) if err != nil { // TODO: Handle error, possibly resuming with copier.RewriteToken. } fmt.Println(attrs) // Just copy content. attrs, err = dst.CopierFrom(src).Run(ctx) if err != nil { // TODO: Handle error. No way to resume. } fmt.Println(attrs) } func ExampleCopier_Run_progress() { // Display progress across multiple rewrite RPCs. ctx := context.Background() client, err := storage.NewClient(ctx) if err != nil { // TODO: handle error. } src := client.Bucket("bucketname").Object("file1") dst := client.Bucket("another-bucketname").Object("file2") copier := dst.CopierFrom(src) copier.ProgressFunc = func(copiedBytes, totalBytes uint64) { log.Printf("copy %.1f%% done", float64(copiedBytes)/float64(totalBytes)*100) } if _, err := copier.Run(ctx); err != nil { // TODO: handle error. } } var key1, key2 []byte func ExampleObjectHandle_CopierFrom_rotateEncryptionKeys() { // To rotate the encryption key on an object, copy it onto itself. ctx := context.Background() client, err := storage.NewClient(ctx) if err != nil { // TODO: handle error. } obj := client.Bucket("bucketname").Object("obj") // Assume obj is encrypted with key1, and we want to change to key2. _, err = obj.Key(key2).CopierFrom(obj.Key(key1)).Run(ctx) if err != nil { // TODO: handle error. } } func ExampleComposer_Run() { ctx := context.Background() client, err := storage.NewClient(ctx) if err != nil { // TODO: handle error. } bkt := client.Bucket("bucketname") src1 := bkt.Object("o1") src2 := bkt.Object("o2") dst := bkt.Object("o3") // Compose and modify metadata. c := dst.ComposerFrom(src1, src2) c.ContentType = "text/plain" attrs, err := c.Run(ctx) if err != nil { // TODO: Handle error. } fmt.Println(attrs) // Just compose. attrs, err = dst.ComposerFrom(src1, src2).Run(ctx) if err != nil { // TODO: Handle error. } fmt.Println(attrs) } var gen int64 func ExampleObjectHandle_Generation() { // Read an object's contents from generation gen, regardless of the // current generation of the object. ctx := context.Background() client, err := storage.NewClient(ctx) if err != nil { // TODO: handle error. } obj := client.Bucket("my-bucket").Object("my-object") rc, err := obj.Generation(gen).NewReader(ctx) if err != nil { // TODO: handle error. } defer rc.Close() if _, err := io.Copy(os.Stdout, rc); err != nil { // TODO: handle error. } } func ExampleObjectHandle_If() { // Read from an object only if the current generation is gen. ctx := context.Background() client, err := storage.NewClient(ctx) if err != nil { // TODO: handle error. } obj := client.Bucket("my-bucket").Object("my-object") rc, err := obj.If(storage.Conditions{GenerationMatch: gen}).NewReader(ctx) if err != nil { // TODO: handle error. } defer rc.Close() if _, err := io.Copy(os.Stdout, rc); err != nil { // TODO: handle error. } } var secretKey []byte func ExampleObjectHandle_Key() { ctx := context.Background() client, err := storage.NewClient(ctx) if err != nil { // TODO: handle error. } obj := client.Bucket("my-bucket").Object("my-object") // Encrypt the object's contents. w := obj.Key(secretKey).NewWriter(ctx) if _, err := w.Write([]byte("top secret")); err != nil { // TODO: handle error. } if err := w.Close(); err != nil { // TODO: handle error. } } golang-google-cloud-0.9.0/storage/go17.go000066400000000000000000000014141312234511600201130ustar00rootroot00000000000000// Copyright 2017 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // +build go1.7 package storage import ( "context" "net/http" ) func withContext(r *http.Request, ctx context.Context) *http.Request { return r.WithContext(ctx) } golang-google-cloud-0.9.0/storage/iam.go000066400000000000000000000055451312234511600201150ustar00rootroot00000000000000// Copyright 2017 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package storage import ( "cloud.google.com/go/iam" "golang.org/x/net/context" raw "google.golang.org/api/storage/v1" iampb "google.golang.org/genproto/googleapis/iam/v1" ) // IAM provides access to IAM access control for the bucket. func (b *BucketHandle) IAM() *iam.Handle { return iam.InternalNewHandleClient(&iamClient{raw: b.c.raw}, b.name) } // iamClient implements the iam.client interface. type iamClient struct { raw *raw.Service } func (c *iamClient) Get(ctx context.Context, resource string) (*iampb.Policy, error) { req := c.raw.Buckets.GetIamPolicy(resource) setClientHeader(req.Header()) var rp *raw.Policy var err error err = runWithRetry(ctx, func() error { rp, err = req.Context(ctx).Do() return err }) if err != nil { return nil, err } return iamFromStoragePolicy(rp), nil } func (c *iamClient) Set(ctx context.Context, resource string, p *iampb.Policy) error { rp := iamToStoragePolicy(p) req := c.raw.Buckets.SetIamPolicy(resource, rp) setClientHeader(req.Header()) return runWithRetry(ctx, func() error { _, err := req.Context(ctx).Do() return err }) } func (c *iamClient) Test(ctx context.Context, resource string, perms []string) ([]string, error) { req := c.raw.Buckets.TestIamPermissions(resource, perms) setClientHeader(req.Header()) var res *raw.TestIamPermissionsResponse var err error err = runWithRetry(ctx, func() error { res, err = req.Context(ctx).Do() return err }) if err != nil { return nil, err } return res.Permissions, nil } func iamToStoragePolicy(ip *iampb.Policy) *raw.Policy { return &raw.Policy{ Bindings: iamToStorageBindings(ip.Bindings), Etag: string(ip.Etag), } } func iamToStorageBindings(ibs []*iampb.Binding) []*raw.PolicyBindings { var rbs []*raw.PolicyBindings for _, ib := range ibs { rbs = append(rbs, &raw.PolicyBindings{ Role: ib.Role, Members: ib.Members, }) } return rbs } func iamFromStoragePolicy(rp *raw.Policy) *iampb.Policy { return &iampb.Policy{ Bindings: iamFromStorageBindings(rp.Bindings), Etag: []byte(rp.Etag), } } func iamFromStorageBindings(rbs []*raw.PolicyBindings) []*iampb.Binding { var ibs []*iampb.Binding for _, rb := range rbs { ibs = append(ibs, &iampb.Binding{ Role: rb.Role, Members: rb.Members, }) } return ibs } golang-google-cloud-0.9.0/storage/integration_test.go000066400000000000000000001210231312234511600227170ustar00rootroot00000000000000// Copyright 2014 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package storage import ( "bytes" "compress/gzip" "crypto/md5" "crypto/sha256" "encoding/base64" "flag" "fmt" "hash/crc32" "io" "io/ioutil" "log" "math/rand" "net/http" "os" "reflect" "sort" "strconv" "strings" "testing" "time" "golang.org/x/net/context" "cloud.google.com/go/iam" "cloud.google.com/go/internal/testutil" "google.golang.org/api/googleapi" "google.golang.org/api/iterator" itesting "google.golang.org/api/iterator/testing" "google.golang.org/api/option" ) const testPrefix = "-go-cloud-storage-test" // suffix is a timestamp-based suffix which is added to all buckets created by // tests. This reduces flakiness when the tests are run in parallel and allows // automatic cleaning up of artifacts left when tests fail. var suffix = fmt.Sprintf("%s-%d", testPrefix, time.Now().UnixNano()) func TestMain(m *testing.M) { integrationTest := initIntegrationTest() exit := m.Run() if integrationTest { if err := cleanup(); err != nil { // No need to be loud if cleanup() fails; we'll get // any undeleted buckets next time. log.Printf("Post-test cleanup failed: %v\n", err) } } os.Exit(exit) } // If integration tests will be run, create a unique bucket for them. func initIntegrationTest() bool { flag.Parse() // needed for testing.Short() ctx := context.Background() if testing.Short() { return false } client, bucket := config(ctx) if client == nil { return false } defer client.Close() if err := client.Bucket(bucket).Create(ctx, testutil.ProjID(), nil); err != nil { log.Fatalf("creating bucket %q: %v", bucket, err) } return true } // testConfig returns the Client used to access GCS and the default bucket // name to use. testConfig skips the current test if credentials are not // available or when being run in Short mode. func testConfig(ctx context.Context, t *testing.T) (*Client, string) { if testing.Short() { t.Skip("Integration tests skipped in short mode") } client, bucket := config(ctx) if client == nil { t.Skip("Integration tests skipped. See CONTRIBUTING.md for details") } return client, bucket } // config is like testConfig, but it doesn't need a *testing.T. func config(ctx context.Context) (*Client, string) { ts := testutil.TokenSource(ctx, ScopeFullControl) if ts == nil { return nil, "" } p := testutil.ProjID() if p == "" { log.Fatal("The project ID must be set. See CONTRIBUTING.md for details") } client, err := NewClient(ctx, option.WithTokenSource(ts)) if err != nil { log.Fatalf("NewClient: %v", err) } return client, p + suffix } func TestBucketMethods(t *testing.T) { ctx := context.Background() client, bucket := testConfig(ctx, t) defer client.Close() projectID := testutil.ProjID() newBucket := bucket + "-new" b := client.Bucket(newBucket) // Test Create and Delete. if err := b.Create(ctx, projectID, nil); err != nil { t.Errorf("Bucket(%v).Create(%v, %v) failed: %v", newBucket, projectID, nil, err) } attrs, err := b.Attrs(ctx) if err != nil { t.Error(err) } else { if got, want := attrs.MetaGeneration, int64(1); got != want { t.Errorf("got metagen %d, want %d", got, want) } if got, want := attrs.StorageClass, "STANDARD"; got != want { t.Errorf("got storage class %q, want %q", got, want) } if attrs.VersioningEnabled { t.Error("got versioning enabled, wanted it disabled") } } if err := client.Bucket(newBucket).Delete(ctx); err != nil { t.Errorf("Bucket(%v).Delete failed: %v", newBucket, err) } // Test Create and Delete with attributes. labels := map[string]string{ "l1": "v1", "empty": "", } attrs = &BucketAttrs{ StorageClass: "NEARLINE", VersioningEnabled: true, Labels: labels, } if err := client.Bucket(newBucket).Create(ctx, projectID, attrs); err != nil { t.Errorf("Bucket(%v).Create(%v, %v) failed: %v", newBucket, projectID, attrs, err) } attrs, err = b.Attrs(ctx) if err != nil { t.Error(err) } else { if got, want := attrs.MetaGeneration, int64(1); got != want { t.Errorf("got metagen %d, want %d", got, want) } if got, want := attrs.StorageClass, "NEARLINE"; got != want { t.Errorf("got storage class %q, want %q", got, want) } if !attrs.VersioningEnabled { t.Error("got versioning disabled, wanted it enabled") } if got, want := attrs.Labels, labels; !reflect.DeepEqual(got, want) { t.Errorf("labels: got %v, want %v", got, want) } } if err := client.Bucket(newBucket).Delete(ctx); err != nil { t.Errorf("Bucket(%v).Delete failed: %v", newBucket, err) } } func TestIntegration_BucketUpdate(t *testing.T) { ctx := context.Background() client, bucket := testConfig(ctx, t) defer client.Close() b := client.Bucket(bucket) attrs, err := b.Attrs(ctx) if err != nil { t.Fatal(err) } if attrs.VersioningEnabled { t.Fatal("bucket should not have versioning by default") } if len(attrs.Labels) > 0 { t.Fatal("bucket should not have labels initially") } // Using empty BucketAttrsToUpdate should be a no-nop. attrs, err = b.Update(ctx, BucketAttrsToUpdate{}) if err != nil { t.Fatal(err) } if attrs.VersioningEnabled { t.Fatal("should not have versioning") } if len(attrs.Labels) > 0 { t.Fatal("should not have labels") } // Turn on versioning, add some labels. ua := BucketAttrsToUpdate{VersioningEnabled: true} ua.SetLabel("l1", "v1") ua.SetLabel("empty", "") attrs, err = b.Update(ctx, ua) if err != nil { t.Fatal(err) } if !attrs.VersioningEnabled { t.Fatal("should have versioning now") } wantLabels := map[string]string{ "l1": "v1", "empty": "", } if !reflect.DeepEqual(attrs.Labels, wantLabels) { t.Fatalf("got %v, want %v", attrs.Labels, wantLabels) } // Turn off versioning again; add and remove some more labels. ua = BucketAttrsToUpdate{VersioningEnabled: false} ua.SetLabel("l1", "v2") // update ua.SetLabel("new", "new") // create ua.DeleteLabel("empty") // delete ua.DeleteLabel("absent") // delete non-existent attrs, err = b.Update(ctx, ua) if err != nil { t.Fatal(err) } if attrs.VersioningEnabled { t.Fatal("should have versioning off") } wantLabels = map[string]string{ "l1": "v2", "new": "new", } if !reflect.DeepEqual(attrs.Labels, wantLabels) { t.Fatalf("got %v, want %v", attrs.Labels, wantLabels) } } func TestIntegration_ConditionalDelete(t *testing.T) { ctx := context.Background() client, bucket := testConfig(ctx, t) defer client.Close() o := client.Bucket(bucket).Object("conddel") wc := o.NewWriter(ctx) wc.ContentType = "text/plain" if _, err := wc.Write([]byte("foo")); err != nil { t.Fatal(err) } if err := wc.Close(); err != nil { t.Fatal(err) } gen := wc.Attrs().Generation metaGen := wc.Attrs().Metageneration if err := o.Generation(gen - 1).Delete(ctx); err == nil { t.Fatalf("Unexpected successful delete with Generation") } if err := o.If(Conditions{MetagenerationMatch: metaGen + 1}).Delete(ctx); err == nil { t.Fatalf("Unexpected successful delete with IfMetaGenerationMatch") } if err := o.If(Conditions{MetagenerationNotMatch: metaGen}).Delete(ctx); err == nil { t.Fatalf("Unexpected successful delete with IfMetaGenerationNotMatch") } if err := o.Generation(gen).Delete(ctx); err != nil { t.Fatalf("final delete failed: %v", err) } } func TestObjects(t *testing.T) { // TODO(djd): there are a lot of closely-related tests here which share // a common setup. Once we can depend on Go 1.7 features, we should refactor // this test to use the sub-test feature. This will increase the readability // of this test, and should also reduce the time it takes to execute. // https://golang.org/pkg/testing/#hdr-Subtests_and_Sub_benchmarks ctx := context.Background() client, bucket := testConfig(ctx, t) defer client.Close() bkt := client.Bucket(bucket) const defaultType = "text/plain" // Populate object names and make a map for their contents. objects := []string{ "obj1", "obj2", "obj/with/slashes", } contents := make(map[string][]byte) // Test Writer. for _, obj := range objects { c := randomContents() if err := writeObject(ctx, bkt.Object(obj), defaultType, c); err != nil { t.Errorf("Write for %v failed with %v", obj, err) } contents[obj] = c } testObjectIterator(t, bkt, objects) // Test Reader. for _, obj := range objects { rc, err := bkt.Object(obj).NewReader(ctx) if err != nil { t.Errorf("Can't create a reader for %v, errored with %v", obj, err) continue } if !rc.checkCRC { t.Errorf("%v: not checking CRC", obj) } slurp, err := ioutil.ReadAll(rc) if err != nil { t.Errorf("Can't ReadAll object %v, errored with %v", obj, err) } if got, want := slurp, contents[obj]; !bytes.Equal(got, want) { t.Errorf("Contents (%q) = %q; want %q", obj, got, want) } if got, want := rc.Size(), len(contents[obj]); got != int64(want) { t.Errorf("Size (%q) = %d; want %d", obj, got, want) } if got, want := rc.ContentType(), "text/plain"; got != want { t.Errorf("ContentType (%q) = %q; want %q", obj, got, want) } rc.Close() // Check early close. buf := make([]byte, 1) rc, err = bkt.Object(obj).NewReader(ctx) if err != nil { t.Fatalf("%v: %v", obj, err) } _, err = rc.Read(buf) if err != nil { t.Fatalf("%v: %v", obj, err) } if got, want := buf, contents[obj][:1]; !bytes.Equal(got, want) { t.Errorf("Contents[0] (%q) = %q; want %q", obj, got, want) } if err := rc.Close(); err != nil { t.Errorf("%v Close: %v", obj, err) } // Test SignedURL opts := &SignedURLOptions{ GoogleAccessID: "xxx@clientid", PrivateKey: dummyKey("rsa"), Method: "GET", MD5: "ICy5YqxZB1uWSwcVLSNLcA==", Expires: time.Date(2020, time.October, 2, 10, 0, 0, 0, time.UTC), ContentType: "application/json", Headers: []string{"x-header1", "x-header2"}, } u, err := SignedURL(bucket, obj, opts) if err != nil { t.Fatalf("SignedURL(%q, %q) errored with %v", bucket, obj, err) } res, err := client.hc.Get(u) if err != nil { t.Fatalf("Can't get URL %q: %v", u, err) } slurp, err = ioutil.ReadAll(res.Body) if err != nil { t.Fatalf("Can't ReadAll signed object %v, errored with %v", obj, err) } if got, want := slurp, contents[obj]; !bytes.Equal(got, want) { t.Errorf("Contents (%v) = %q; want %q", obj, got, want) } res.Body.Close() } obj := objects[0] objlen := int64(len(contents[obj])) // Test Range Reader. for i, r := range []struct { offset, length, want int64 }{ {0, objlen, objlen}, {0, objlen / 2, objlen / 2}, {objlen / 2, objlen, objlen / 2}, {0, 0, 0}, {objlen / 2, 0, 0}, {objlen / 2, -1, objlen / 2}, {0, objlen * 2, objlen}, } { rc, err := bkt.Object(obj).NewRangeReader(ctx, r.offset, r.length) if err != nil { t.Errorf("%d: Can't create a range reader for %v, errored with %v", i, obj, err) continue } if rc.Size() != objlen { t.Errorf("%d: Reader has a content-size of %d, want %d", i, rc.Size(), objlen) } if rc.Remain() != r.want { t.Errorf("%d: Reader's available bytes reported as %d, want %d", i, rc.Remain(), r.want) } slurp, err := ioutil.ReadAll(rc) if err != nil { t.Errorf("%d:Can't ReadAll object %v, errored with %v", i, obj, err) continue } if len(slurp) != int(r.want) { t.Errorf("%d:RangeReader (%d, %d): Read %d bytes, wanted %d bytes", i, r.offset, r.length, len(slurp), r.want) continue } if got, want := slurp, contents[obj][r.offset:r.offset+r.want]; !bytes.Equal(got, want) { t.Errorf("RangeReader (%d, %d) = %q; want %q", r.offset, r.length, got, want) } rc.Close() } // Test content encoding const zeroCount = 20 << 20 w := bkt.Object("gzip-test").NewWriter(ctx) w.ContentEncoding = "gzip" gw := gzip.NewWriter(w) if _, err := io.Copy(gw, io.LimitReader(zeros{}, zeroCount)); err != nil { t.Fatalf("io.Copy, upload: %v", err) } if err := gw.Close(); err != nil { t.Errorf("gzip.Close(): %v", err) } if err := w.Close(); err != nil { t.Errorf("w.Close(): %v", err) } r, err := bkt.Object("gzip-test").NewReader(ctx) if err != nil { t.Fatalf("NewReader(gzip-test): %v", err) } n, err := io.Copy(ioutil.Discard, r) if err != nil { t.Errorf("io.Copy, download: %v", err) } if n != zeroCount { t.Errorf("downloaded bad data: got %d bytes, want %d", n, zeroCount) } // Test NotFound. _, err = bkt.Object("obj-not-exists").NewReader(ctx) if err != ErrObjectNotExist { t.Errorf("Object should not exist, err found to be %v", err) } objName := objects[0] // Test NewReader googleapi.Error. // Since a 429 or 5xx is hard to cause, we trigger a 416. realLen := len(contents[objName]) _, err = bkt.Object(objName).NewRangeReader(ctx, int64(realLen*2), 10) if err, ok := err.(*googleapi.Error); !ok { t.Error("NewRangeReader did not return a googleapi.Error") } else { if err.Code != 416 { t.Errorf("Code = %d; want %d", err.Code, 416) } if len(err.Header) == 0 { t.Error("Missing googleapi.Error.Header") } if len(err.Body) == 0 { t.Error("Missing googleapi.Error.Body") } } // Test StatObject. o, err := bkt.Object(objName).Attrs(ctx) if err != nil { t.Error(err) } if got, want := o.Name, objName; got != want { t.Errorf("Name (%v) = %q; want %q", objName, got, want) } if got, want := o.ContentType, defaultType; got != want { t.Errorf("ContentType (%v) = %q; want %q", objName, got, want) } created := o.Created // Check that the object is newer than its containing bucket. bAttrs, err := bkt.Attrs(ctx) if err != nil { t.Error(err) } if o.Created.Before(bAttrs.Created) { t.Errorf("Object %v is older than its containing bucket, %v", o, bAttrs) } // Test object copy. copyName := "copy-" + objName copyObj, err := bkt.Object(copyName).CopierFrom(bkt.Object(objName)).Run(ctx) if err != nil { t.Errorf("Copier.Run failed with %v", err) } else if !namesEqual(copyObj, bucket, copyName) { t.Errorf("Copy object bucket, name: got %q.%q, want %q.%q", copyObj.Bucket, copyObj.Name, bucket, copyName) } // Copying with attributes. const contentEncoding = "identity" copier := bkt.Object(copyName).CopierFrom(bkt.Object(objName)) copier.ContentEncoding = contentEncoding copyObj, err = copier.Run(ctx) if err != nil { t.Errorf("Copier.Run failed with %v", err) } else { if !namesEqual(copyObj, bucket, copyName) { t.Errorf("Copy object bucket, name: got %q.%q, want %q.%q", copyObj.Bucket, copyObj.Name, bucket, copyName) } if copyObj.ContentEncoding != contentEncoding { t.Errorf("Copy ContentEncoding: got %q, want %q", copyObj.ContentEncoding, contentEncoding) } } // Test UpdateAttrs. metadata := map[string]string{"key": "value"} updated, err := bkt.Object(objName).Update(ctx, ObjectAttrsToUpdate{ ContentType: "text/html", ContentLanguage: "en", Metadata: metadata, ACL: []ACLRule{{Entity: "domain-google.com", Role: RoleReader}}, }) if err != nil { t.Errorf("UpdateAttrs failed with %v", err) } else { if got, want := updated.ContentType, "text/html"; got != want { t.Errorf("updated.ContentType == %q; want %q", got, want) } if got, want := updated.ContentLanguage, "en"; got != want { t.Errorf("updated.ContentLanguage == %q; want %q", updated.ContentLanguage, want) } if got, want := updated.Metadata, metadata; !reflect.DeepEqual(got, want) { t.Errorf("updated.Metadata == %+v; want %+v", updated.Metadata, want) } if got, want := updated.Created, created; got != want { t.Errorf("updated.Created == %q; want %q", got, want) } if !updated.Created.Before(updated.Updated) { t.Errorf("updated.Updated should be newer than update.Created") } } // Delete ContentType and ContentLanguage. updated, err = bkt.Object(objName).Update(ctx, ObjectAttrsToUpdate{ ContentType: "", ContentLanguage: "", Metadata: map[string]string{}, }) if err != nil { t.Errorf("UpdateAttrs failed with %v", err) } else { if got, want := updated.ContentType, ""; got != want { t.Errorf("updated.ContentType == %q; want %q", got, want) } if got, want := updated.ContentLanguage, ""; got != want { t.Errorf("updated.ContentLanguage == %q; want %q", updated.ContentLanguage, want) } if updated.Metadata != nil { t.Errorf("updated.Metadata == %+v; want nil", updated.Metadata) } if got, want := updated.Created, created; got != want { t.Errorf("updated.Created == %q; want %q", got, want) } if !updated.Created.Before(updated.Updated) { t.Errorf("updated.Updated should be newer than update.Created") } } // Test checksums. checksumCases := []struct { name string contents [][]byte size int64 md5 string crc32c uint32 }{ { name: "checksum-object", contents: [][]byte{[]byte("hello"), []byte("world")}, size: 10, md5: "fc5e038d38a57032085441e7fe7010b0", crc32c: 1456190592, }, { name: "zero-object", contents: [][]byte{}, size: 0, md5: "d41d8cd98f00b204e9800998ecf8427e", crc32c: 0, }, } for _, c := range checksumCases { wc := bkt.Object(c.name).NewWriter(ctx) for _, data := range c.contents { if _, err := wc.Write(data); err != nil { t.Errorf("Write(%q) failed with %q", data, err) } } if err = wc.Close(); err != nil { t.Errorf("%q: close failed with %q", c.name, err) } obj := wc.Attrs() if got, want := obj.Size, c.size; got != want { t.Errorf("Object (%q) Size = %v; want %v", c.name, got, want) } if got, want := fmt.Sprintf("%x", obj.MD5), c.md5; got != want { t.Errorf("Object (%q) MD5 = %q; want %q", c.name, got, want) } if got, want := obj.CRC32C, c.crc32c; got != want { t.Errorf("Object (%q) CRC32C = %v; want %v", c.name, got, want) } } // Test public ACL. publicObj := objects[0] if err = bkt.Object(publicObj).ACL().Set(ctx, AllUsers, RoleReader); err != nil { t.Errorf("PutACLEntry failed with %v", err) } publicClient, err := NewClient(ctx, option.WithHTTPClient(http.DefaultClient)) if err != nil { t.Fatal(err) } slurp, err := readObject(ctx, publicClient.Bucket(bucket).Object(publicObj)) if err != nil { t.Errorf("readObject failed with %v", err) } else if !bytes.Equal(slurp, contents[publicObj]) { t.Errorf("Public object's content: got %q, want %q", slurp, contents[publicObj]) } // Test writer error handling. wc := publicClient.Bucket(bucket).Object(publicObj).NewWriter(ctx) if _, err := wc.Write([]byte("hello")); err != nil { t.Errorf("Write unexpectedly failed with %v", err) } if err = wc.Close(); err == nil { t.Error("Close expected an error, found none") } // Test deleting the copy object. if err := bkt.Object(copyName).Delete(ctx); err != nil { t.Errorf("Deletion of %v failed with %v", copyName, err) } // Deleting it a second time should return ErrObjectNotExist. if err := bkt.Object(copyName).Delete(ctx); err != ErrObjectNotExist { t.Errorf("second deletion of %v = %v; want ErrObjectNotExist", copyName, err) } _, err = bkt.Object(copyName).Attrs(ctx) if err != ErrObjectNotExist { t.Errorf("Copy is expected to be deleted, stat errored with %v", err) } // Test object composition. var compSrcs []*ObjectHandle var wantContents []byte for _, obj := range objects { compSrcs = append(compSrcs, bkt.Object(obj)) wantContents = append(wantContents, contents[obj]...) } checkCompose := func(obj *ObjectHandle, wantContentType string) { rc, err := obj.NewReader(ctx) if err != nil { t.Fatalf("NewReader: %v", err) } slurp, err = ioutil.ReadAll(rc) if err != nil { t.Fatalf("ioutil.ReadAll: %v", err) } defer rc.Close() if !bytes.Equal(slurp, wantContents) { t.Errorf("Composed object contents\ngot: %q\nwant: %q", slurp, wantContents) } if got := rc.ContentType(); got != wantContentType { t.Errorf("Composed object content-type = %q, want %q", got, wantContentType) } } // Compose should work even if the user sets no destination attributes. compDst := bkt.Object("composed1") c := compDst.ComposerFrom(compSrcs...) if _, err := c.Run(ctx); err != nil { t.Fatalf("ComposeFrom error: %v", err) } checkCompose(compDst, "application/octet-stream") // It should also work if we do. compDst = bkt.Object("composed2") c = compDst.ComposerFrom(compSrcs...) c.ContentType = "text/json" if _, err := c.Run(ctx); err != nil { t.Fatalf("ComposeFrom error: %v", err) } checkCompose(compDst, "text/json") } func namesEqual(obj *ObjectAttrs, bucketName, objectName string) bool { return obj.Bucket == bucketName && obj.Name == objectName } func testObjectIterator(t *testing.T, bkt *BucketHandle, objects []string) { ctx := context.Background() // Collect the list of items we expect: ObjectAttrs in lexical order by name. names := make([]string, len(objects)) copy(names, objects) sort.Strings(names) var attrs []*ObjectAttrs for _, name := range names { attr, err := bkt.Object(name).Attrs(ctx) if err != nil { t.Errorf("Object(%q).Attrs: %v", name, err) return } attrs = append(attrs, attr) } msg, ok := itesting.TestIterator(attrs, func() interface{} { return bkt.Objects(ctx, &Query{Prefix: "obj"}) }, func(it interface{}) (interface{}, error) { return it.(*ObjectIterator).Next() }) if !ok { t.Errorf("ObjectIterator.Next: %s", msg) } // TODO(jba): test query.Delimiter != "" } func TestACL(t *testing.T) { ctx := context.Background() client, bucket := testConfig(ctx, t) defer client.Close() bkt := client.Bucket(bucket) entity := ACLEntity("domain-google.com") rule := ACLRule{Entity: entity, Role: RoleReader} if err := bkt.DefaultObjectACL().Set(ctx, entity, RoleReader); err != nil { t.Errorf("Can't put default ACL rule for the bucket, errored with %v", err) } acl, err := bkt.DefaultObjectACL().List(ctx) if err != nil { t.Errorf("DefaultObjectACL.List for bucket %q: %v", bucket, err) } else if !hasRule(acl, rule) { t.Errorf("default ACL missing %#v", rule) } aclObjects := []string{"acl1", "acl2"} for _, obj := range aclObjects { c := randomContents() if err := writeObject(ctx, bkt.Object(obj), "", c); err != nil { t.Errorf("Write for %v failed with %v", obj, err) } } name := aclObjects[0] o := bkt.Object(name) acl, err = o.ACL().List(ctx) if err != nil { t.Errorf("Can't retrieve ACL of %v", name) } else if !hasRule(acl, rule) { t.Errorf("object ACL missing %+v", rule) } if err := o.ACL().Delete(ctx, entity); err != nil { t.Errorf("object ACL: could not delete entity %s", entity) } // Delete the default ACL rule. We can't move this code earlier in the // test, because the test depends on the fact that the object ACL inherits // it. if err := bkt.DefaultObjectACL().Delete(ctx, entity); err != nil { t.Errorf("default ACL: could not delete entity %s", entity) } entity2 := ACLEntity("user-jbd@google.com") rule2 := ACLRule{Entity: entity2, Role: RoleReader} if err := bkt.ACL().Set(ctx, entity2, RoleReader); err != nil { t.Errorf("Error while putting bucket ACL rule: %v", err) } bACL, err := bkt.ACL().List(ctx) if err != nil { t.Errorf("Error while getting the ACL of the bucket: %v", err) } else if !hasRule(bACL, rule2) { t.Errorf("bucket ACL missing %+v", rule2) } if err := bkt.ACL().Delete(ctx, entity2); err != nil { t.Errorf("Error while deleting bucket ACL rule: %v", err) } } func hasRule(acl []ACLRule, rule ACLRule) bool { for _, r := range acl { if r == rule { return true } } return false } func TestValidObjectNames(t *testing.T) { ctx := context.Background() client, bucket := testConfig(ctx, t) defer client.Close() bkt := client.Bucket(bucket) validNames := []string{ "gopher", "Гоферови", "a", strings.Repeat("a", 1024), } for _, name := range validNames { if err := writeObject(ctx, bkt.Object(name), "", []byte("data")); err != nil { t.Errorf("Object %q write failed: %v. Want success", name, err) continue } defer bkt.Object(name).Delete(ctx) } invalidNames := []string{ "", // Too short. strings.Repeat("a", 1025), // Too long. "new\nlines", "bad\xffunicode", } for _, name := range invalidNames { // Invalid object names will either cause failure during Write or Close. if err := writeObject(ctx, bkt.Object(name), "", []byte("data")); err != nil { continue } defer bkt.Object(name).Delete(ctx) t.Errorf("%q should have failed. Didn't", name) } } func TestWriterContentType(t *testing.T) { ctx := context.Background() client, bucket := testConfig(ctx, t) defer client.Close() obj := client.Bucket(bucket).Object("content") testCases := []struct { content string setType, wantType string }{ { content: "It was the best of times, it was the worst of times.", wantType: "text/plain; charset=utf-8", }, { content: "My first page", wantType: "text/html; charset=utf-8", }, { content: "My first page", setType: "text/html", wantType: "text/html", }, { content: "My first page", setType: "image/jpeg", wantType: "image/jpeg", }, } for i, tt := range testCases { if err := writeObject(ctx, obj, tt.setType, []byte(tt.content)); err != nil { t.Errorf("writing #%d: %v", i, err) } attrs, err := obj.Attrs(ctx) if err != nil { t.Errorf("obj.Attrs: %v", err) continue } if got := attrs.ContentType; got != tt.wantType { t.Errorf("Content-Type = %q; want %q\nContent: %q\nSet Content-Type: %q", got, tt.wantType, tt.content, tt.setType) } } } func TestZeroSizedObject(t *testing.T) { t.Parallel() ctx := context.Background() client, bucket := testConfig(ctx, t) defer client.Close() obj := client.Bucket(bucket).Object("zero") // Check writing it works as expected. w := obj.NewWriter(ctx) if err := w.Close(); err != nil { t.Fatalf("Writer.Close: %v", err) } defer obj.Delete(ctx) // Check we can read it too. body, err := readObject(ctx, obj) if err != nil { t.Fatalf("readObject: %v", err) } if len(body) != 0 { t.Errorf("Body is %v, want empty []byte{}", body) } } func TestIntegration_Encryption(t *testing.T) { // This function tests customer-supplied encryption keys for all operations // involving objects. Bucket and ACL operations aren't tested because they // aren't affected customer encryption. ctx := context.Background() client, bucket := testConfig(ctx, t) defer client.Close() obj := client.Bucket(bucket).Object("customer-encryption") key := []byte("my-secret-AES-256-encryption-key") keyHash := sha256.Sum256(key) keyHashB64 := base64.StdEncoding.EncodeToString(keyHash[:]) key2 := []byte("My-Secret-AES-256-Encryption-Key") contents := "top secret." checkMetadataCall := func(msg string, f func(o *ObjectHandle) (*ObjectAttrs, error)) { // Performing a metadata operation without the key should succeed. attrs, err := f(obj) if err != nil { t.Fatalf("%s: %v", msg, err) } // The key hash should match... if got, want := attrs.CustomerKeySHA256, keyHashB64; got != want { t.Errorf("%s: key hash: got %q, want %q", msg, got, want) } // ...but CRC and MD5 should not be present. if attrs.CRC32C != 0 { t.Errorf("%s: CRC: got %v, want 0", msg, attrs.CRC32C) } if len(attrs.MD5) > 0 { t.Errorf("%s: MD5: got %v, want len == 0", msg, attrs.MD5) } // Performing a metadata operation with the key should succeed. attrs, err = f(obj.Key(key)) if err != nil { t.Fatalf("%s: %v", msg, err) } // Check the key and content hashes. if got, want := attrs.CustomerKeySHA256, keyHashB64; got != want { t.Errorf("%s: key hash: got %q, want %q", msg, got, want) } if attrs.CRC32C == 0 { t.Errorf("%s: CRC: got 0, want non-zero", msg) } if len(attrs.MD5) == 0 { t.Errorf("%s: MD5: got len == 0, want len > 0", msg) } } checkRead := func(msg string, o *ObjectHandle, k []byte, wantContents string) { // Reading the object without the key should fail. if _, err := readObject(ctx, o); err == nil { t.Errorf("%s: reading without key: want error, got nil", msg) } // Reading the object with the key should succeed. got, err := readObject(ctx, o.Key(k)) if err != nil { t.Fatalf("%s: %v", msg, err) } gotContents := string(got) // And the contents should match what we wrote. if gotContents != wantContents { t.Errorf("%s: contents: got %q, want %q", msg, gotContents, wantContents) } } checkReadUnencrypted := func(msg string, obj *ObjectHandle, wantContents string) { got, err := readObject(ctx, obj) if err != nil { t.Fatalf("%s: %v", msg, err) } gotContents := string(got) if gotContents != wantContents { t.Errorf("%s: got %q, want %q", gotContents, wantContents) } } // Write to obj using our own encryption key, which is a valid 32-byte // AES-256 key. w := obj.Key(key).NewWriter(ctx) w.Write([]byte(contents)) if err := w.Close(); err != nil { t.Fatal(err) } checkMetadataCall("Attrs", func(o *ObjectHandle) (*ObjectAttrs, error) { return o.Attrs(ctx) }) checkMetadataCall("Update", func(o *ObjectHandle) (*ObjectAttrs, error) { return o.Update(ctx, ObjectAttrsToUpdate{ContentLanguage: "en"}) }) checkRead("first object", obj, key, contents) obj2 := client.Bucket(bucket).Object("customer-encryption-2") // Copying an object without the key should fail. if _, err := obj2.CopierFrom(obj).Run(ctx); err == nil { t.Fatal("want error, got nil") } // Copying an object with the key should succeed. if _, err := obj2.CopierFrom(obj.Key(key)).Run(ctx); err != nil { t.Fatal(err) } // The destination object is not encrypted; we can read it without a key. checkReadUnencrypted("copy dest", obj2, contents) // Providing a key on the destination but not the source should fail, // since the source is encrypted. if _, err := obj2.Key(key2).CopierFrom(obj).Run(ctx); err == nil { t.Fatal("want error, got nil") } // But copying with keys for both source and destination should succeed. if _, err := obj2.Key(key2).CopierFrom(obj.Key(key)).Run(ctx); err != nil { t.Fatal(err) } // And the destination should be encrypted, meaning we can only read it // with a key. checkRead("copy destination", obj2, key2, contents) // Change obj2's key to prepare for compose, where all objects must have // the same key. Also illustrates key rotation: copy an object to itself // with a different key. if _, err := obj2.Key(key).CopierFrom(obj2.Key(key2)).Run(ctx); err != nil { t.Fatal(err) } obj3 := client.Bucket(bucket).Object("customer-encryption-3") // Composing without keys should fail. if _, err := obj3.ComposerFrom(obj, obj2).Run(ctx); err == nil { t.Fatal("want error, got nil") } // Keys on the source objects result in an error. if _, err := obj3.ComposerFrom(obj.Key(key), obj2).Run(ctx); err == nil { t.Fatal("want error, got nil") } // A key on the destination object both decrypts the source objects // and encrypts the destination. if _, err := obj3.Key(key).ComposerFrom(obj, obj2).Run(ctx); err != nil { t.Fatalf("got %v, want nil", err) } // Check that the destination in encrypted. checkRead("compose destination", obj3, key, contents+contents) // You can't compose one or more unencrypted source objects into an // encrypted destination object. _, err := obj2.CopierFrom(obj2.Key(key)).Run(ctx) // unencrypt obj2 if err != nil { t.Fatal(err) } if _, err := obj3.Key(key).ComposerFrom(obj2).Run(ctx); err == nil { t.Fatal("got nil, want error") } } func TestIntegration_NonexistentBucket(t *testing.T) { t.Parallel() ctx := context.Background() client, bucket := testConfig(ctx, t) defer client.Close() bkt := client.Bucket(bucket + "-nonexistent") if _, err := bkt.Attrs(ctx); err != ErrBucketNotExist { t.Errorf("Attrs: got %v, want ErrBucketNotExist", err) } it := bkt.Objects(ctx, nil) if _, err := it.Next(); err != ErrBucketNotExist { t.Errorf("Objects: got %v, want ErrBucketNotExist", err) } } func TestIntegration_PerObjectStorageClass(t *testing.T) { const ( defaultStorageClass = "STANDARD" newStorageClass = "MULTI_REGIONAL" ) ctx := context.Background() client, bucket := testConfig(ctx, t) defer client.Close() bkt := client.Bucket(bucket) // The bucket should have the default storage class. battrs, err := bkt.Attrs(ctx) if err != nil { t.Fatal(err) } if battrs.StorageClass != defaultStorageClass { t.Fatalf("bucket storage class: got %q, want %q", battrs.StorageClass, defaultStorageClass) } // Write an object; it should start with the bucket's storage class. obj := bkt.Object("posc") if err := writeObject(ctx, obj, "", []byte("foo")); err != nil { t.Fatal(err) } oattrs, err := obj.Attrs(ctx) if err != nil { t.Fatal(err) } if oattrs.StorageClass != defaultStorageClass { t.Fatalf("object storage class: got %q, want %q", oattrs.StorageClass, defaultStorageClass) } // Now use Copy to change the storage class. copier := obj.CopierFrom(obj) copier.StorageClass = newStorageClass oattrs2, err := copier.Run(ctx) if err != nil { log.Fatal(err) } if oattrs2.StorageClass != newStorageClass { t.Fatalf("new object storage class: got %q, want %q", oattrs2.StorageClass, newStorageClass) } // We can also write a new object using a non-default storage class. obj2 := bkt.Object("posc2") w := obj2.NewWriter(ctx) w.StorageClass = newStorageClass if _, err := w.Write([]byte("xxx")); err != nil { t.Fatal(err) } if err := w.Close(); err != nil { t.Fatal(err) } if w.Attrs().StorageClass != newStorageClass { t.Fatalf("new object storage class: got %q, want %q", w.Attrs().StorageClass, newStorageClass) } } func TestIntegration_BucketInCopyAttrs(t *testing.T) { // Confirm that if bucket is included in the object attributes of a rewrite // call, but object name and content-type aren't, then we get an error. See // the comment in Copier.Run. ctx := context.Background() client, bucket := testConfig(ctx, t) defer client.Close() bkt := client.Bucket(bucket) obj := bkt.Object("bucketInCopyAttrs") if err := writeObject(ctx, obj, "", []byte("foo")); err != nil { t.Fatal(err) } copier := obj.CopierFrom(obj) rawObject := copier.ObjectAttrs.toRawObject(bucket) _, err := copier.callRewrite(ctx, obj, rawObject) if err == nil { t.Errorf("got nil, want error") } } func TestIntegration_NoUnicodeNormalization(t *testing.T) { t.Parallel() ctx := context.Background() client, _ := testConfig(ctx, t) defer client.Close() bkt := client.Bucket("storage-library-test-bucket") for _, tst := range []struct { nameQuoted, content string }{ {`"Caf\u00e9"`, "Normalization Form C"}, {`"Cafe\u0301"`, "Normalization Form D"}, } { name, err := strconv.Unquote(tst.nameQuoted) if err != nil { t.Fatalf("invalid name: %s: %v", tst.nameQuoted, err) } got, err := readObject(ctx, bkt.Object(name)) if err != nil { t.Fatal(err) } if g := string(got); g != tst.content { t.Errorf("content of %s is %q, want %q", tst.nameQuoted, g, tst.content) } } } func TestIntegration_HashesOnUpload(t *testing.T) { // Check that the user can provide hashes on upload, and that these are checked. if testing.Short() { t.Skip("Integration tests skipped in short mode") } ctx := context.Background() client, bucket := testConfig(ctx, t) if client == nil { t.Skip("Integration tests skipped. See CONTRIBUTING.md for details") } defer client.Close() obj := client.Bucket(bucket).Object("hashesOnUpload-1") data := []byte("I can't wait to be verified") write := func(w *Writer) error { if _, err := w.Write(data); err != nil { w.Close() return err } return w.Close() } crc32c := crc32.Checksum(data, crc32cTable) // The correct CRC should succeed. w := obj.NewWriter(ctx) w.CRC32C = crc32c w.SendCRC32C = true if err := write(w); err != nil { t.Fatal(err) } // If we change the CRC, validation should fail. w = obj.NewWriter(ctx) w.CRC32C = crc32c + 1 w.SendCRC32C = true if err := write(w); err == nil { t.Fatal("write with bad CRC32c: want error, got nil") } // If we have the wrong CRC but forget to send it, we succeed. w = obj.NewWriter(ctx) w.CRC32C = crc32c + 1 if err := write(w); err != nil { t.Fatal(err) } // MD5 md5 := md5.Sum(data) // The correct MD5 should succeed. w = obj.NewWriter(ctx) w.MD5 = md5[:] if err := write(w); err != nil { t.Fatal(err) } // If we change the MD5, validation should fail. w = obj.NewWriter(ctx) w.MD5 = append([]byte(nil), md5[:]...) w.MD5[0]++ if err := write(w); err == nil { t.Fatal("write with bad MD5: want error, got nil") } } func TestIntegration_BucketIAM(t *testing.T) { ctx := context.Background() client, bucket := testConfig(ctx, t) defer client.Close() bkt := client.Bucket(bucket) // This bucket is unique to this test run. So we don't have // to worry about other runs interfering with our IAM policy // changes. member := "projectViewer:" + testutil.ProjID() role := iam.RoleName("roles/storage.objectViewer") // Get the bucket's IAM policy. policy, err := bkt.IAM().Policy(ctx) if err != nil { t.Fatalf("Getting policy: %v", err) } // The member should not have the role. if policy.HasRole(member, role) { t.Errorf("member %q has role %q", member, role) } // Change the policy. policy.Add(member, role) if err := bkt.IAM().SetPolicy(ctx, policy); err != nil { t.Fatalf("SetPolicy: %v", err) } // Confirm that the binding was added. policy, err = bkt.IAM().Policy(ctx) if err != nil { t.Fatalf("Getting policy: %v", err) } if !policy.HasRole(member, role) { t.Errorf("member %q does not have role %q", member, role) } // Check TestPermissions. // This client should have all these permissions (and more). perms := []string{"storage.buckets.get", "storage.buckets.delete"} got, err := bkt.IAM().TestPermissions(ctx, perms) if err != nil { t.Fatalf("TestPermissions: %v", err) } sort.Strings(perms) sort.Strings(got) if !reflect.DeepEqual(got, perms) { t.Errorf("got %v, want %v", got, perms) } } func writeObject(ctx context.Context, obj *ObjectHandle, contentType string, contents []byte) error { w := obj.NewWriter(ctx) w.ContentType = contentType if contents != nil { if _, err := w.Write(contents); err != nil { _ = w.Close() return err } } return w.Close() } func readObject(ctx context.Context, obj *ObjectHandle) ([]byte, error) { r, err := obj.NewReader(ctx) if err != nil { return nil, err } defer r.Close() return ioutil.ReadAll(r) } // cleanup deletes the bucket used for testing, as well as old // testing buckets that weren't cleaned previously. func cleanup() error { if testing.Short() { return nil // Don't clean up in short mode. } ctx := context.Background() client, bucket := config(ctx) if client == nil { return nil // Don't cleanup if we're not configured correctly. } defer client.Close() if err := killBucket(ctx, client, bucket); err != nil { return err } // Delete buckets whose name begins with our test prefix, and which were // created a while ago. (Unfortunately GCS doesn't provide last-modified // time, which would be a better way to check for staleness.) const expireAge = 24 * time.Hour projectID := testutil.ProjID() it := client.Buckets(ctx, projectID) it.Prefix = projectID + testPrefix for { bktAttrs, err := it.Next() if err == iterator.Done { break } if err != nil { return err } if time.Since(bktAttrs.Created) > expireAge { log.Printf("deleting bucket %q, which is more than %s old", bktAttrs.Name, expireAge) if err := killBucket(ctx, client, bktAttrs.Name); err != nil { return err } } } return nil } // killBucket deletes a bucket and all its objects. func killBucket(ctx context.Context, client *Client, bucketName string) error { bkt := client.Bucket(bucketName) // Bucket must be empty to delete. it := bkt.Objects(ctx, nil) for { objAttrs, err := it.Next() if err == iterator.Done { break } if err != nil { return err } if err := bkt.Object(objAttrs.Name).Delete(ctx); err != nil { return fmt.Errorf("deleting %q: %v", bucketName+"/"+objAttrs.Name, err) } } // GCS is eventually consistent, so this delete may fail because the // replica still sees an object in the bucket. We log the error and expect // a later test run to delete the bucket. if err := bkt.Delete(ctx); err != nil { log.Printf("deleting %q: %v", bucketName, err) } return nil } func randomContents() []byte { h := md5.New() io.WriteString(h, fmt.Sprintf("hello world%d", rand.Intn(100000))) return h.Sum(nil) } type zeros struct{} func (zeros) Read(p []byte) (int, error) { return len(p), nil } golang-google-cloud-0.9.0/storage/invoke.go000066400000000000000000000025151312234511600206340ustar00rootroot00000000000000// Copyright 2014 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package storage import ( "cloud.google.com/go/internal" gax "github.com/googleapis/gax-go" "golang.org/x/net/context" "google.golang.org/api/googleapi" ) // runWithRetry calls the function until it returns nil or a non-retryable error, or // the context is done. func runWithRetry(ctx context.Context, call func() error) error { return internal.Retry(ctx, gax.Backoff{}, func() (stop bool, err error) { err = call() if err == nil { return true, nil } e, ok := err.(*googleapi.Error) if !ok { return true, err } // Retry on 429 and 5xx, according to // https://cloud.google.com/storage/docs/exponential-backoff. if e.Code == 429 || (e.Code >= 500 && e.Code < 600) { return false, nil } return true, err }) } golang-google-cloud-0.9.0/storage/invoke_test.go000066400000000000000000000030371312234511600216730ustar00rootroot00000000000000// Copyright 2014 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package storage import ( "errors" "testing" "golang.org/x/net/context" "google.golang.org/api/googleapi" ) func TestInvoke(t *testing.T) { t.Parallel() ctx := context.Background() // Time-based tests are flaky. We just make sure that invoke eventually // returns with the right error. for _, test := range []struct { count int // number of times to return retryable error retryCode int // error code for retryable error err error // error to return after count returns of retryCode }{ {0, 0, nil}, {0, 0, errors.New("foo")}, {1, 429, nil}, {1, 429, errors.New("bar")}, {2, 518, nil}, {2, 599, &googleapi.Error{Code: 428}}, } { counter := 0 call := func() error { counter++ if counter <= test.count { return &googleapi.Error{Code: test.retryCode} } return test.err } got := runWithRetry(ctx, call) if got != test.err { t.Errorf("%v: got %v, want %v", test, got, test.err) } } } golang-google-cloud-0.9.0/storage/not_go17.go000066400000000000000000000014301312234511600207710ustar00rootroot00000000000000// Copyright 2017 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // +build !go1.7 package storage import ( "net/http" ) func withContext(r *http.Request, _ interface{}) *http.Request { // In Go 1.6 and below, ignore the context. return r } golang-google-cloud-0.9.0/storage/reader.go000066400000000000000000000041101312234511600205740ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package storage import ( "fmt" "hash/crc32" "io" ) var crc32cTable = crc32.MakeTable(crc32.Castagnoli) // Reader reads a Cloud Storage object. // It implements io.Reader. type Reader struct { body io.ReadCloser remain, size int64 contentType string checkCRC bool // should we check the CRC? wantCRC uint32 // the CRC32c value the server sent in the header gotCRC uint32 // running crc } // Close closes the Reader. It must be called when done reading. func (r *Reader) Close() error { return r.body.Close() } func (r *Reader) Read(p []byte) (int, error) { n, err := r.body.Read(p) if r.remain != -1 { r.remain -= int64(n) } if r.checkCRC { r.gotCRC = crc32.Update(r.gotCRC, crc32cTable, p[:n]) // Check CRC here. It would be natural to check it in Close, but // everybody defers Close on the assumption that it doesn't return // anything worth looking at. if r.remain == 0 && r.gotCRC != r.wantCRC { return n, fmt.Errorf("storage: bad CRC on read: got %d, want %d", r.gotCRC, r.wantCRC) } } return n, err } // Size returns the size of the object in bytes. // The returned value is always the same and is not affected by // calls to Read or Close. func (r *Reader) Size() int64 { return r.size } // Remain returns the number of bytes left to read, or -1 if unknown. func (r *Reader) Remain() int64 { return r.remain } // ContentType returns the content type of the object. func (r *Reader) ContentType() string { return r.contentType } golang-google-cloud-0.9.0/storage/storage.go000066400000000000000000001033351312234511600210070ustar00rootroot00000000000000// Copyright 2014 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package storage import ( "bytes" "crypto" "crypto/rand" "crypto/rsa" "crypto/sha256" "crypto/x509" "encoding/base64" "encoding/pem" "errors" "fmt" "io" "io/ioutil" "net/http" "net/url" "reflect" "strconv" "strings" "time" "unicode/utf8" "google.golang.org/api/option" "google.golang.org/api/transport" "cloud.google.com/go/internal/optional" "cloud.google.com/go/internal/version" "golang.org/x/net/context" "google.golang.org/api/googleapi" raw "google.golang.org/api/storage/v1" ) var ( ErrBucketNotExist = errors.New("storage: bucket doesn't exist") ErrObjectNotExist = errors.New("storage: object doesn't exist") ) const userAgent = "gcloud-golang-storage/20151204" const ( // ScopeFullControl grants permissions to manage your // data and permissions in Google Cloud Storage. ScopeFullControl = raw.DevstorageFullControlScope // ScopeReadOnly grants permissions to // view your data in Google Cloud Storage. ScopeReadOnly = raw.DevstorageReadOnlyScope // ScopeReadWrite grants permissions to manage your // data in Google Cloud Storage. ScopeReadWrite = raw.DevstorageReadWriteScope ) var xGoogHeader = fmt.Sprintf("gl-go/%s gccl/%s", version.Go(), version.Repo) func setClientHeader(headers http.Header) { headers.Set("x-goog-api-client", xGoogHeader) } // Client is a client for interacting with Google Cloud Storage. // // Clients should be reused instead of created as needed. // The methods of Client are safe for concurrent use by multiple goroutines. type Client struct { hc *http.Client raw *raw.Service } // NewClient creates a new Google Cloud Storage client. // The default scope is ScopeFullControl. To use a different scope, like ScopeReadOnly, use option.WithScopes. func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) { o := []option.ClientOption{ option.WithScopes(ScopeFullControl), option.WithUserAgent(userAgent), } opts = append(o, opts...) hc, ep, err := transport.NewHTTPClient(ctx, opts...) if err != nil { return nil, fmt.Errorf("dialing: %v", err) } rawService, err := raw.New(hc) if err != nil { return nil, fmt.Errorf("storage client: %v", err) } if ep != "" { rawService.BasePath = ep } return &Client{ hc: hc, raw: rawService, }, nil } // Close closes the Client. // // Close need not be called at program exit. func (c *Client) Close() error { c.hc = nil return nil } // SignedURLOptions allows you to restrict the access to the signed URL. type SignedURLOptions struct { // GoogleAccessID represents the authorizer of the signed URL generation. // It is typically the Google service account client email address from // the Google Developers Console in the form of "xxx@developer.gserviceaccount.com". // Required. GoogleAccessID string // PrivateKey is the Google service account private key. It is obtainable // from the Google Developers Console. // At https://console.developers.google.com/project//apiui/credential, // create a service account client ID or reuse one of your existing service account // credentials. Click on the "Generate new P12 key" to generate and download // a new private key. Once you download the P12 file, use the following command // to convert it into a PEM file. // // $ openssl pkcs12 -in key.p12 -passin pass:notasecret -out key.pem -nodes // // Provide the contents of the PEM file as a byte slice. // Exactly one of PrivateKey or SignBytes must be non-nil. PrivateKey []byte // SignBytes is a function for implementing custom signing. // If your application is running on Google App Engine, you can use appengine's internal signing function: // ctx := appengine.NewContext(request) // acc, _ := appengine.ServiceAccount(ctx) // url, err := SignedURL("bucket", "object", &SignedURLOptions{ // GoogleAccessID: acc, // SignBytes: func(b []byte) ([]byte, error) { // _, signedBytes, err := appengine.SignBytes(ctx, b) // return signedBytes, err // }, // // etc. // }) // // Exactly one of PrivateKey or SignBytes must be non-nil. SignBytes func([]byte) ([]byte, error) // Method is the HTTP method to be used with the signed URL. // Signed URLs can be used with GET, HEAD, PUT, and DELETE requests. // Required. Method string // Expires is the expiration time on the signed URL. It must be // a datetime in the future. // Required. Expires time.Time // ContentType is the content type header the client must provide // to use the generated signed URL. // Optional. ContentType string // Headers is a list of extention headers the client must provide // in order to use the generated signed URL. // Optional. Headers []string // MD5 is the base64 encoded MD5 checksum of the file. // If provided, the client should provide the exact value on the request // header in order to use the signed URL. // Optional. MD5 string } // SignedURL returns a URL for the specified object. Signed URLs allow // the users access to a restricted resource for a limited time without having a // Google account or signing in. For more information about the signed // URLs, see https://cloud.google.com/storage/docs/accesscontrol#Signed-URLs. func SignedURL(bucket, name string, opts *SignedURLOptions) (string, error) { if opts == nil { return "", errors.New("storage: missing required SignedURLOptions") } if opts.GoogleAccessID == "" { return "", errors.New("storage: missing required GoogleAccessID") } if (opts.PrivateKey == nil) == (opts.SignBytes == nil) { return "", errors.New("storage: exactly one of PrivateKey or SignedBytes must be set") } if opts.Method == "" { return "", errors.New("storage: missing required method option") } if opts.Expires.IsZero() { return "", errors.New("storage: missing required expires option") } if opts.MD5 != "" { md5, err := base64.StdEncoding.DecodeString(opts.MD5) if err != nil || len(md5) != 16 { return "", errors.New("storage: invalid MD5 checksum") } } signBytes := opts.SignBytes if opts.PrivateKey != nil { key, err := parseKey(opts.PrivateKey) if err != nil { return "", err } signBytes = func(b []byte) ([]byte, error) { sum := sha256.Sum256(b) return rsa.SignPKCS1v15( rand.Reader, key, crypto.SHA256, sum[:], ) } } u := &url.URL{ Path: fmt.Sprintf("/%s/%s", bucket, name), } buf := &bytes.Buffer{} fmt.Fprintf(buf, "%s\n", opts.Method) fmt.Fprintf(buf, "%s\n", opts.MD5) fmt.Fprintf(buf, "%s\n", opts.ContentType) fmt.Fprintf(buf, "%d\n", opts.Expires.Unix()) if len(opts.Headers) > 0 { fmt.Fprintf(buf, "%s\n", strings.Join(opts.Headers, "\n")) } fmt.Fprintf(buf, "%s", u.String()) b, err := signBytes(buf.Bytes()) if err != nil { return "", err } encoded := base64.StdEncoding.EncodeToString(b) u.Scheme = "https" u.Host = "storage.googleapis.com" q := u.Query() q.Set("GoogleAccessId", opts.GoogleAccessID) q.Set("Expires", fmt.Sprintf("%d", opts.Expires.Unix())) q.Set("Signature", string(encoded)) u.RawQuery = q.Encode() return u.String(), nil } // ObjectHandle provides operations on an object in a Google Cloud Storage bucket. // Use BucketHandle.Object to get a handle. type ObjectHandle struct { c *Client bucket string object string acl ACLHandle gen int64 // a negative value indicates latest conds *Conditions encryptionKey []byte // AES-256 key } // ACL provides access to the object's access control list. // This controls who can read and write this object. // This call does not perform any network operations. func (o *ObjectHandle) ACL() *ACLHandle { return &o.acl } // Generation returns a new ObjectHandle that operates on a specific generation // of the object. // By default, the handle operates on the latest generation. Not // all operations work when given a specific generation; check the API // endpoints at https://cloud.google.com/storage/docs/json_api/ for details. func (o *ObjectHandle) Generation(gen int64) *ObjectHandle { o2 := *o o2.gen = gen return &o2 } // If returns a new ObjectHandle that applies a set of preconditions. // Preconditions already set on the ObjectHandle are ignored. // Operations on the new handle will only occur if the preconditions are // satisfied. See https://cloud.google.com/storage/docs/generations-preconditions // for more details. func (o *ObjectHandle) If(conds Conditions) *ObjectHandle { o2 := *o o2.conds = &conds return &o2 } // Key returns a new ObjectHandle that uses the supplied encryption // key to encrypt and decrypt the object's contents. // // Encryption key must be a 32-byte AES-256 key. // See https://cloud.google.com/storage/docs/encryption for details. func (o *ObjectHandle) Key(encryptionKey []byte) *ObjectHandle { o2 := *o o2.encryptionKey = encryptionKey return &o2 } // Attrs returns meta information about the object. // ErrObjectNotExist will be returned if the object is not found. func (o *ObjectHandle) Attrs(ctx context.Context) (*ObjectAttrs, error) { if err := o.validate(); err != nil { return nil, err } call := o.c.raw.Objects.Get(o.bucket, o.object).Projection("full").Context(ctx) if err := applyConds("Attrs", o.gen, o.conds, call); err != nil { return nil, err } if err := setEncryptionHeaders(call.Header(), o.encryptionKey, false); err != nil { return nil, err } var obj *raw.Object var err error setClientHeader(call.Header()) err = runWithRetry(ctx, func() error { obj, err = call.Do(); return err }) if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound { return nil, ErrObjectNotExist } if err != nil { return nil, err } return newObject(obj), nil } // Update updates an object with the provided attributes. // All zero-value attributes are ignored. // ErrObjectNotExist will be returned if the object is not found. func (o *ObjectHandle) Update(ctx context.Context, uattrs ObjectAttrsToUpdate) (*ObjectAttrs, error) { if err := o.validate(); err != nil { return nil, err } var attrs ObjectAttrs // Lists of fields to send, and set to null, in the JSON. var forceSendFields, nullFields []string if uattrs.ContentType != nil { attrs.ContentType = optional.ToString(uattrs.ContentType) forceSendFields = append(forceSendFields, "ContentType") } if uattrs.ContentLanguage != nil { attrs.ContentLanguage = optional.ToString(uattrs.ContentLanguage) // For ContentLanguage It's an error to send the empty string. // Instead we send a null. if attrs.ContentLanguage == "" { nullFields = append(nullFields, "ContentLanguage") } else { forceSendFields = append(forceSendFields, "ContentLanguage") } } if uattrs.ContentEncoding != nil { attrs.ContentEncoding = optional.ToString(uattrs.ContentEncoding) forceSendFields = append(forceSendFields, "ContentType") } if uattrs.ContentDisposition != nil { attrs.ContentDisposition = optional.ToString(uattrs.ContentDisposition) forceSendFields = append(forceSendFields, "ContentDisposition") } if uattrs.CacheControl != nil { attrs.CacheControl = optional.ToString(uattrs.CacheControl) forceSendFields = append(forceSendFields, "CacheControl") } if uattrs.Metadata != nil { attrs.Metadata = uattrs.Metadata if len(attrs.Metadata) == 0 { // Sending the empty map is a no-op. We send null instead. nullFields = append(nullFields, "Metadata") } else { forceSendFields = append(forceSendFields, "Metadata") } } if uattrs.ACL != nil { attrs.ACL = uattrs.ACL // It's an error to attempt to delete the ACL, so // we don't append to nullFields here. forceSendFields = append(forceSendFields, "Acl") } rawObj := attrs.toRawObject(o.bucket) rawObj.ForceSendFields = forceSendFields rawObj.NullFields = nullFields call := o.c.raw.Objects.Patch(o.bucket, o.object, rawObj).Projection("full").Context(ctx) if err := applyConds("Update", o.gen, o.conds, call); err != nil { return nil, err } if err := setEncryptionHeaders(call.Header(), o.encryptionKey, false); err != nil { return nil, err } var obj *raw.Object var err error setClientHeader(call.Header()) err = runWithRetry(ctx, func() error { obj, err = call.Do(); return err }) if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound { return nil, ErrObjectNotExist } if err != nil { return nil, err } return newObject(obj), nil } // ObjectAttrsToUpdate is used to update the attributes of an object. // Only fields set to non-nil values will be updated. // Set a field to its zero value to delete it. // // For example, to change ContentType and delete ContentEncoding and // Metadata, use // ObjectAttrsToUpdate{ // ContentType: "text/html", // ContentEncoding: "", // Metadata: map[string]string{}, // } type ObjectAttrsToUpdate struct { ContentType optional.String ContentLanguage optional.String ContentEncoding optional.String ContentDisposition optional.String CacheControl optional.String Metadata map[string]string // set to map[string]string{} to delete ACL []ACLRule } // Delete deletes the single specified object. func (o *ObjectHandle) Delete(ctx context.Context) error { if err := o.validate(); err != nil { return err } call := o.c.raw.Objects.Delete(o.bucket, o.object).Context(ctx) if err := applyConds("Delete", o.gen, o.conds, call); err != nil { return err } setClientHeader(call.Header()) err := runWithRetry(ctx, func() error { return call.Do() }) switch e := err.(type) { case nil: return nil case *googleapi.Error: if e.Code == http.StatusNotFound { return ErrObjectNotExist } } return err } // NewReader creates a new Reader to read the contents of the // object. // ErrObjectNotExist will be returned if the object is not found. // // The caller must call Close on the returned Reader when done reading. func (o *ObjectHandle) NewReader(ctx context.Context) (*Reader, error) { return o.NewRangeReader(ctx, 0, -1) } // NewRangeReader reads part of an object, reading at most length bytes // starting at the given offset. If length is negative, the object is read // until the end. func (o *ObjectHandle) NewRangeReader(ctx context.Context, offset, length int64) (*Reader, error) { if err := o.validate(); err != nil { return nil, err } if offset < 0 { return nil, fmt.Errorf("storage: invalid offset %d < 0", offset) } if o.conds != nil { if err := o.conds.validate("NewRangeReader"); err != nil { return nil, err } } u := &url.URL{ Scheme: "https", Host: "storage.googleapis.com", Path: fmt.Sprintf("/%s/%s", o.bucket, o.object), RawQuery: conditionsQuery(o.gen, o.conds), } verb := "GET" if length == 0 { verb = "HEAD" } req, err := http.NewRequest(verb, u.String(), nil) if err != nil { return nil, err } req = withContext(req, ctx) if length < 0 && offset > 0 { req.Header.Set("Range", fmt.Sprintf("bytes=%d-", offset)) } else if length > 0 { req.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", offset, offset+length-1)) } if err := setEncryptionHeaders(req.Header, o.encryptionKey, false); err != nil { return nil, err } var res *http.Response err = runWithRetry(ctx, func() error { res, err = o.c.hc.Do(req) if err != nil { return err } if res.StatusCode == http.StatusNotFound { res.Body.Close() return ErrObjectNotExist } if res.StatusCode < 200 || res.StatusCode > 299 { body, _ := ioutil.ReadAll(res.Body) res.Body.Close() return &googleapi.Error{ Code: res.StatusCode, Header: res.Header, Body: string(body), } } if offset > 0 && length != 0 && res.StatusCode != http.StatusPartialContent { res.Body.Close() return errors.New("storage: partial request not satisfied") } return nil }) if err != nil { return nil, err } var size int64 // total size of object, even if a range was requested. if res.StatusCode == http.StatusPartialContent { cr := strings.TrimSpace(res.Header.Get("Content-Range")) if !strings.HasPrefix(cr, "bytes ") || !strings.Contains(cr, "/") { return nil, fmt.Errorf("storage: invalid Content-Range %q", cr) } size, err = strconv.ParseInt(cr[strings.LastIndex(cr, "/")+1:], 10, 64) if err != nil { return nil, fmt.Errorf("storage: invalid Content-Range %q", cr) } } else { size = res.ContentLength } remain := res.ContentLength body := res.Body if length == 0 { remain = 0 body.Close() body = emptyBody } var ( checkCRC bool crc uint32 ) // Even if there is a CRC header, we can't compute the hash on partial data. if remain == size { crc, checkCRC = parseCRC32c(res) } return &Reader{ body: body, size: size, remain: remain, contentType: res.Header.Get("Content-Type"), wantCRC: crc, checkCRC: checkCRC, }, nil } func parseCRC32c(res *http.Response) (uint32, bool) { const prefix = "crc32c=" for _, spec := range res.Header["X-Goog-Hash"] { if strings.HasPrefix(spec, prefix) { c, err := decodeUint32(spec[len(prefix):]) if err == nil { return c, true } } } return 0, false } var emptyBody = ioutil.NopCloser(strings.NewReader("")) // NewWriter returns a storage Writer that writes to the GCS object // associated with this ObjectHandle. // // A new object will be created unless an object with this name already exists. // Otherwise any previous object with the same name will be replaced. // The object will not be available (and any previous object will remain) // until Close has been called. // // Attributes can be set on the object by modifying the returned Writer's // ObjectAttrs field before the first call to Write. If no ContentType // attribute is specified, the content type will be automatically sniffed // using net/http.DetectContentType. // // It is the caller's responsibility to call Close when writing is done. func (o *ObjectHandle) NewWriter(ctx context.Context) *Writer { return &Writer{ ctx: ctx, o: o, donec: make(chan struct{}), ObjectAttrs: ObjectAttrs{Name: o.object}, ChunkSize: googleapi.DefaultUploadChunkSize, } } func (o *ObjectHandle) validate() error { if o.bucket == "" { return errors.New("storage: bucket name is empty") } if o.object == "" { return errors.New("storage: object name is empty") } if !utf8.ValidString(o.object) { return fmt.Errorf("storage: object name %q is not valid UTF-8", o.object) } return nil } // parseKey converts the binary contents of a private key file // to an *rsa.PrivateKey. It detects whether the private key is in a // PEM container or not. If so, it extracts the the private key // from PEM container before conversion. It only supports PEM // containers with no passphrase. func parseKey(key []byte) (*rsa.PrivateKey, error) { if block, _ := pem.Decode(key); block != nil { key = block.Bytes } parsedKey, err := x509.ParsePKCS8PrivateKey(key) if err != nil { parsedKey, err = x509.ParsePKCS1PrivateKey(key) if err != nil { return nil, err } } parsed, ok := parsedKey.(*rsa.PrivateKey) if !ok { return nil, errors.New("oauth2: private key is invalid") } return parsed, nil } func toRawObjectACL(oldACL []ACLRule) []*raw.ObjectAccessControl { var acl []*raw.ObjectAccessControl if len(oldACL) > 0 { acl = make([]*raw.ObjectAccessControl, len(oldACL)) for i, rule := range oldACL { acl[i] = &raw.ObjectAccessControl{ Entity: string(rule.Entity), Role: string(rule.Role), } } } return acl } // toRawObject copies the editable attributes from o to the raw library's Object type. func (o *ObjectAttrs) toRawObject(bucket string) *raw.Object { acl := toRawObjectACL(o.ACL) return &raw.Object{ Bucket: bucket, Name: o.Name, ContentType: o.ContentType, ContentEncoding: o.ContentEncoding, ContentLanguage: o.ContentLanguage, CacheControl: o.CacheControl, ContentDisposition: o.ContentDisposition, StorageClass: o.StorageClass, Acl: acl, Metadata: o.Metadata, } } // ObjectAttrs represents the metadata for a Google Cloud Storage (GCS) object. type ObjectAttrs struct { // Bucket is the name of the bucket containing this GCS object. // This field is read-only. Bucket string // Name is the name of the object within the bucket. // This field is read-only. Name string // ContentType is the MIME type of the object's content. ContentType string // ContentLanguage is the content language of the object's content. ContentLanguage string // CacheControl is the Cache-Control header to be sent in the response // headers when serving the object data. CacheControl string // ACL is the list of access control rules for the object. ACL []ACLRule // Owner is the owner of the object. This field is read-only. // // If non-zero, it is in the form of "user-". Owner string // Size is the length of the object's content. This field is read-only. Size int64 // ContentEncoding is the encoding of the object's content. ContentEncoding string // ContentDisposition is the optional Content-Disposition header of the object // sent in the response headers. ContentDisposition string // MD5 is the MD5 hash of the object's content. This field is read-only. MD5 []byte // CRC32C is the CRC32 checksum of the object's content using // the Castagnoli93 polynomial. This field is read-only. CRC32C uint32 // MediaLink is an URL to the object's content. This field is read-only. MediaLink string // Metadata represents user-provided metadata, in key/value pairs. // It can be nil if no metadata is provided. Metadata map[string]string // Generation is the generation number of the object's content. // This field is read-only. Generation int64 // Metageneration is the version of the metadata for this // object at this generation. This field is used for preconditions // and for detecting changes in metadata. A metageneration number // is only meaningful in the context of a particular generation // of a particular object. This field is read-only. Metageneration int64 // StorageClass is the storage class of the object. // This value defines how objects in the bucket are stored and // determines the SLA and the cost of storage. Typical values are // "MULTI_REGIONAL", "REGIONAL", "NEARLINE", "COLDLINE", "STANDARD" // and "DURABLE_REDUCED_AVAILABILITY". // It defaults to "STANDARD", which is equivalent to "MULTI_REGIONAL" // or "REGIONAL" depending on the bucket's location settings. StorageClass string // Created is the time the object was created. This field is read-only. Created time.Time // Deleted is the time the object was deleted. // If not deleted, it is the zero value. This field is read-only. Deleted time.Time // Updated is the creation or modification time of the object. // For buckets with versioning enabled, changing an object's // metadata does not change this property. This field is read-only. Updated time.Time // CustomerKeySHA256 is the base64-encoded SHA-256 hash of the // customer-supplied encryption key for the object. It is empty if there is // no customer-supplied encryption key. // See // https://cloud.google.com/storage/docs/encryption for more about // encryption in Google Cloud Storage. CustomerKeySHA256 string // Prefix is set only for ObjectAttrs which represent synthetic "directory // entries" when iterating over buckets using Query.Delimiter. See // ObjectIterator.Next. When set, no other fields in ObjectAttrs will be // populated. Prefix string } // convertTime converts a time in RFC3339 format to time.Time. // If any error occurs in parsing, the zero-value time.Time is silently returned. func convertTime(t string) time.Time { var r time.Time if t != "" { r, _ = time.Parse(time.RFC3339, t) } return r } func newObject(o *raw.Object) *ObjectAttrs { if o == nil { return nil } acl := make([]ACLRule, len(o.Acl)) for i, rule := range o.Acl { acl[i] = ACLRule{ Entity: ACLEntity(rule.Entity), Role: ACLRole(rule.Role), } } owner := "" if o.Owner != nil { owner = o.Owner.Entity } md5, _ := base64.StdEncoding.DecodeString(o.Md5Hash) crc32c, _ := decodeUint32(o.Crc32c) var sha256 string if o.CustomerEncryption != nil { sha256 = o.CustomerEncryption.KeySha256 } return &ObjectAttrs{ Bucket: o.Bucket, Name: o.Name, ContentType: o.ContentType, ContentLanguage: o.ContentLanguage, CacheControl: o.CacheControl, ACL: acl, Owner: owner, ContentEncoding: o.ContentEncoding, Size: int64(o.Size), MD5: md5, CRC32C: crc32c, MediaLink: o.MediaLink, Metadata: o.Metadata, Generation: o.Generation, Metageneration: o.Metageneration, StorageClass: o.StorageClass, CustomerKeySHA256: sha256, Created: convertTime(o.TimeCreated), Deleted: convertTime(o.TimeDeleted), Updated: convertTime(o.Updated), } } // Decode a uint32 encoded in Base64 in big-endian byte order. func decodeUint32(b64 string) (uint32, error) { d, err := base64.StdEncoding.DecodeString(b64) if err != nil { return 0, err } if len(d) != 4 { return 0, fmt.Errorf("storage: %q does not encode a 32-bit value", d) } return uint32(d[0])<<24 + uint32(d[1])<<16 + uint32(d[2])<<8 + uint32(d[3]), nil } // Encode a uint32 as Base64 in big-endian byte order. func encodeUint32(u uint32) string { b := []byte{byte(u >> 24), byte(u >> 16), byte(u >> 8), byte(u)} return base64.StdEncoding.EncodeToString(b) } // Query represents a query to filter objects from a bucket. type Query struct { // Delimiter returns results in a directory-like fashion. // Results will contain only objects whose names, aside from the // prefix, do not contain delimiter. Objects whose names, // aside from the prefix, contain delimiter will have their name, // truncated after the delimiter, returned in prefixes. // Duplicate prefixes are omitted. // Optional. Delimiter string // Prefix is the prefix filter to query objects // whose names begin with this prefix. // Optional. Prefix string // Versions indicates whether multiple versions of the same // object will be included in the results. Versions bool } // contentTyper implements ContentTyper to enable an // io.ReadCloser to specify its MIME type. type contentTyper struct { io.Reader t string } func (c *contentTyper) ContentType() string { return c.t } // Conditions constrain methods to act on specific generations of // objects. // // The zero value is an empty set of constraints. Not all conditions or // combinations of conditions are applicable to all methods. // See https://cloud.google.com/storage/docs/generations-preconditions // for details on how these operate. type Conditions struct { // Generation constraints. // At most one of the following can be set to a non-zero value. // GenerationMatch specifies that the object must have the given generation // for the operation to occur. // If GenerationMatch is zero, it has no effect. // Use DoesNotExist to specify that the object does not exist in the bucket. GenerationMatch int64 // GenerationNotMatch specifies that the object must not have the given // generation for the operation to occur. // If GenerationNotMatch is zero, it has no effect. GenerationNotMatch int64 // DoesNotExist specifies that the object must not exist in the bucket for // the operation to occur. // If DoesNotExist is false, it has no effect. DoesNotExist bool // Metadata generation constraints. // At most one of the following can be set to a non-zero value. // MetagenerationMatch specifies that the object must have the given // metageneration for the operation to occur. // If MetagenerationMatch is zero, it has no effect. MetagenerationMatch int64 // MetagenerationNotMatch specifies that the object must not have the given // metageneration for the operation to occur. // If MetagenerationNotMatch is zero, it has no effect. MetagenerationNotMatch int64 } func (c *Conditions) validate(method string) error { if *c == (Conditions{}) { return fmt.Errorf("storage: %s: empty conditions", method) } if !c.isGenerationValid() { return fmt.Errorf("storage: %s: multiple conditions specified for generation", method) } if !c.isMetagenerationValid() { return fmt.Errorf("storage: %s: multiple conditions specified for metageneration", method) } return nil } func (c *Conditions) isGenerationValid() bool { n := 0 if c.GenerationMatch != 0 { n++ } if c.GenerationNotMatch != 0 { n++ } if c.DoesNotExist { n++ } return n <= 1 } func (c *Conditions) isMetagenerationValid() bool { return c.MetagenerationMatch == 0 || c.MetagenerationNotMatch == 0 } // applyConds modifies the provided call using the conditions in conds. // call is something that quacks like a *raw.WhateverCall. func applyConds(method string, gen int64, conds *Conditions, call interface{}) error { cval := reflect.ValueOf(call) if gen >= 0 { if !setConditionField(cval, "Generation", gen) { return fmt.Errorf("storage: %s: generation not supported", method) } } if conds == nil { return nil } if err := conds.validate(method); err != nil { return err } switch { case conds.GenerationMatch != 0: if !setConditionField(cval, "IfGenerationMatch", conds.GenerationMatch) { return fmt.Errorf("storage: %s: ifGenerationMatch not supported", method) } case conds.GenerationNotMatch != 0: if !setConditionField(cval, "IfGenerationNotMatch", conds.GenerationNotMatch) { return fmt.Errorf("storage: %s: ifGenerationNotMatch not supported", method) } case conds.DoesNotExist: if !setConditionField(cval, "IfGenerationMatch", int64(0)) { return fmt.Errorf("storage: %s: DoesNotExist not supported", method) } } switch { case conds.MetagenerationMatch != 0: if !setConditionField(cval, "IfMetagenerationMatch", conds.MetagenerationMatch) { return fmt.Errorf("storage: %s: ifMetagenerationMatch not supported", method) } case conds.MetagenerationNotMatch != 0: if !setConditionField(cval, "IfMetagenerationNotMatch", conds.MetagenerationNotMatch) { return fmt.Errorf("storage: %s: ifMetagenerationNotMatch not supported", method) } } return nil } func applySourceConds(gen int64, conds *Conditions, call *raw.ObjectsRewriteCall) error { if gen >= 0 { call.SourceGeneration(gen) } if conds == nil { return nil } if err := conds.validate("CopyTo source"); err != nil { return err } switch { case conds.GenerationMatch != 0: call.IfSourceGenerationMatch(conds.GenerationMatch) case conds.GenerationNotMatch != 0: call.IfSourceGenerationNotMatch(conds.GenerationNotMatch) case conds.DoesNotExist: call.IfSourceGenerationMatch(0) } switch { case conds.MetagenerationMatch != 0: call.IfSourceMetagenerationMatch(conds.MetagenerationMatch) case conds.MetagenerationNotMatch != 0: call.IfSourceMetagenerationNotMatch(conds.MetagenerationNotMatch) } return nil } // setConditionField sets a field on a *raw.WhateverCall. // We can't use anonymous interfaces because the return type is // different, since the field setters are builders. func setConditionField(call reflect.Value, name string, value interface{}) bool { m := call.MethodByName(name) if !m.IsValid() { return false } m.Call([]reflect.Value{reflect.ValueOf(value)}) return true } // conditionsQuery returns the generation and conditions as a URL query // string suitable for URL.RawQuery. It assumes that the conditions // have been validated. func conditionsQuery(gen int64, conds *Conditions) string { // URL escapes are elided because integer strings are URL-safe. var buf []byte appendParam := func(s string, n int64) { if len(buf) > 0 { buf = append(buf, '&') } buf = append(buf, s...) buf = strconv.AppendInt(buf, n, 10) } if gen >= 0 { appendParam("generation=", gen) } if conds == nil { return string(buf) } switch { case conds.GenerationMatch != 0: appendParam("ifGenerationMatch=", conds.GenerationMatch) case conds.GenerationNotMatch != 0: appendParam("ifGenerationNotMatch=", conds.GenerationNotMatch) case conds.DoesNotExist: appendParam("ifGenerationMatch=", 0) } switch { case conds.MetagenerationMatch != 0: appendParam("ifMetagenerationMatch=", conds.MetagenerationMatch) case conds.MetagenerationNotMatch != 0: appendParam("ifMetagenerationNotMatch=", conds.MetagenerationNotMatch) } return string(buf) } // composeSourceObj wraps a *raw.ComposeRequestSourceObjects, but adds the methods // that modifyCall searches for by name. type composeSourceObj struct { src *raw.ComposeRequestSourceObjects } func (c composeSourceObj) Generation(gen int64) { c.src.Generation = gen } func (c composeSourceObj) IfGenerationMatch(gen int64) { // It's safe to overwrite ObjectPreconditions, since its only field is // IfGenerationMatch. c.src.ObjectPreconditions = &raw.ComposeRequestSourceObjectsObjectPreconditions{ IfGenerationMatch: gen, } } func setEncryptionHeaders(headers http.Header, key []byte, copySource bool) error { if key == nil { return nil } // TODO(jbd): Ask the API team to return a more user-friendly error // and avoid doing this check at the client level. if len(key) != 32 { return errors.New("storage: not a 32-byte AES-256 key") } var cs string if copySource { cs = "copy-source-" } headers.Set("x-goog-"+cs+"encryption-algorithm", "AES256") headers.Set("x-goog-"+cs+"encryption-key", base64.StdEncoding.EncodeToString(key)) keyHash := sha256.Sum256(key) headers.Set("x-goog-"+cs+"encryption-key-sha256", base64.StdEncoding.EncodeToString(keyHash[:])) return nil } // TODO(jbd): Add storage.objects.watch. golang-google-cloud-0.9.0/storage/storage_test.go000066400000000000000000000472051312234511600220510ustar00rootroot00000000000000// Copyright 2014 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package storage import ( "crypto/tls" "encoding/json" "fmt" "io" "io/ioutil" "log" "net" "net/http" "net/http/httptest" "reflect" "strings" "testing" "time" "golang.org/x/net/context" "google.golang.org/api/iterator" "google.golang.org/api/option" raw "google.golang.org/api/storage/v1" ) func TestSignedURL(t *testing.T) { t.Parallel() expires, _ := time.Parse(time.RFC3339, "2002-10-02T10:00:00-05:00") url, err := SignedURL("bucket-name", "object-name", &SignedURLOptions{ GoogleAccessID: "xxx@clientid", PrivateKey: dummyKey("rsa"), Method: "GET", MD5: "ICy5YqxZB1uWSwcVLSNLcA==", Expires: expires, ContentType: "application/json", Headers: []string{"x-header1", "x-header2"}, }) if err != nil { t.Error(err) } want := "https://storage.googleapis.com/bucket-name/object-name?" + "Expires=1033570800&GoogleAccessId=xxx%40clientid&Signature=" + "ZMw18bZVhySNYAMEX87RMyuZCUMtGLVi%2B2zU2ByiQ0Rxgij%2BhFZ5LsT" + "5ZPIH5h3QXB%2BiSb1URJnZo3aF0exVP%2FYR1hpg2e65w9HHt7yYjIqcg" + "%2FfAOIyxriFtgRYk3oAv%2FFLF62fI8iF%2BCp0fWSm%2FHggz22blVnQz" + "EtSP%2BuRhFle4172L%2B710sfMDtyQLKTz6W4TmRjC9ymTi8mVj95dZgyF" + "RXbibTdtw0JzndE0Ig4c6pU4xDPPiyaziUSVDMIpzZDJH1GYOGHxbFasba4" + "1rRoWWkdBnsMtHm2ck%2FsFD2leL6u8q0OpVAc4ZdxseucL4OpCy%2BCLhQ" + "JFQT5bqSljP0g%3D%3D" if url != want { t.Fatalf("Unexpected signed URL; found %v", url) } } func TestSignedURL_PEMPrivateKey(t *testing.T) { t.Parallel() expires, _ := time.Parse(time.RFC3339, "2002-10-02T10:00:00-05:00") url, err := SignedURL("bucket-name", "object-name", &SignedURLOptions{ GoogleAccessID: "xxx@clientid", PrivateKey: dummyKey("pem"), Method: "GET", MD5: "ICy5YqxZB1uWSwcVLSNLcA==", Expires: expires, ContentType: "application/json", Headers: []string{"x-header1", "x-header2"}, }) if err != nil { t.Error(err) } want := "https://storage.googleapis.com/bucket-name/object-name?" + "Expires=1033570800&GoogleAccessId=xxx%40clientid&Signature=" + "gHlh63sOxJnNj22X%2B%2F4kwOSNMeqwXWr4udEfrzJPQcq1xzxA8ovMM5SOrOc%" + "2FuE%2Ftc9%2Bq7a42CDBwZff1PsvuJMBDaPbluU257h%2Bvxx8lHMnb%2Bg1wD1" + "99FiCE014MRH9TlIg%2FdXRkErosVWTy4GqAgZemmKHo0HwDGT6IovB9mdg%3D" if url != want { t.Fatalf("Unexpected signed URL; found %v", url) } } func TestSignedURL_SignBytes(t *testing.T) { t.Parallel() expires, _ := time.Parse(time.RFC3339, "2002-10-02T10:00:00-05:00") url, err := SignedURL("bucket-name", "object-name", &SignedURLOptions{ GoogleAccessID: "xxx@clientid", SignBytes: func(b []byte) ([]byte, error) { return []byte("signed"), nil }, Method: "GET", MD5: "ICy5YqxZB1uWSwcVLSNLcA==", Expires: expires, ContentType: "application/json", Headers: []string{"x-header1", "x-header2"}, }) if err != nil { t.Error(err) } want := "https://storage.googleapis.com/bucket-name/object-name?" + "Expires=1033570800&GoogleAccessId=xxx%40clientid&Signature=" + "c2lnbmVk" // base64('signed') == 'c2lnbmVk' if url != want { t.Fatalf("Unexpected signed URL\ngot: %q\nwant: %q", url, want) } } func TestSignedURL_URLUnsafeObjectName(t *testing.T) { t.Parallel() expires, _ := time.Parse(time.RFC3339, "2002-10-02T10:00:00-05:00") url, err := SignedURL("bucket-name", "object name界", &SignedURLOptions{ GoogleAccessID: "xxx@clientid", PrivateKey: dummyKey("pem"), Method: "GET", MD5: "ICy5YqxZB1uWSwcVLSNLcA==", Expires: expires, ContentType: "application/json", Headers: []string{"x-header1", "x-header2"}, }) if err != nil { t.Error(err) } want := "https://storage.googleapis.com/bucket-name/object%20name%E7%95%8C?" + "Expires=1033570800&GoogleAccessId=xxx%40clientid&Signature=" + "LSxs1YwXNKOa7mQv1ZAI2ao0Fuv6yXLLU7%2BQ97z2B7hYZ57OiFwQ72EdGXSiIM" + "JwLisEKkwoSlYCMm3uuTdgJtXXVi7SYXMfdeKaonyQwMv531KETCBTSewt8CW%2B" + "FaUJ5SEYG44SeJCiqeIr3GF7t90UNWs6TdFXDaKShpQzBGg%3D" if url != want { t.Fatalf("Unexpected signed URL; found %v", url) } } func TestSignedURL_MissingOptions(t *testing.T) { t.Parallel() pk := dummyKey("rsa") expires, _ := time.Parse(time.RFC3339, "2002-10-02T10:00:00-05:00") var tests = []struct { opts *SignedURLOptions errMsg string }{ { &SignedURLOptions{}, "missing required GoogleAccessID", }, { &SignedURLOptions{GoogleAccessID: "access_id"}, "exactly one of PrivateKey or SignedBytes must be set", }, { &SignedURLOptions{ GoogleAccessID: "access_id", SignBytes: func(b []byte) ([]byte, error) { return b, nil }, PrivateKey: pk, }, "exactly one of PrivateKey or SignedBytes must be set", }, { &SignedURLOptions{ GoogleAccessID: "access_id", PrivateKey: pk, }, "missing required method", }, { &SignedURLOptions{ GoogleAccessID: "access_id", SignBytes: func(b []byte) ([]byte, error) { return b, nil }, }, "missing required method", }, { &SignedURLOptions{ GoogleAccessID: "access_id", PrivateKey: pk, Method: "PUT", }, "missing required expires", }, { &SignedURLOptions{ GoogleAccessID: "access_id", PrivateKey: pk, Method: "PUT", Expires: expires, MD5: "invalid", }, "invalid MD5 checksum", }, } for _, test := range tests { _, err := SignedURL("bucket", "name", test.opts) if !strings.Contains(err.Error(), test.errMsg) { t.Errorf("expected err: %v, found: %v", test.errMsg, err) } } } func dummyKey(kind string) []byte { slurp, err := ioutil.ReadFile(fmt.Sprintf("./testdata/dummy_%s", kind)) if err != nil { log.Fatal(err) } return slurp } func TestCopyToMissingFields(t *testing.T) { t.Parallel() var tests = []struct { srcBucket, srcName, destBucket, destName string errMsg string }{ { "mybucket", "", "mybucket", "destname", "name is empty", }, { "mybucket", "srcname", "mybucket", "", "name is empty", }, { "", "srcfile", "mybucket", "destname", "name is empty", }, { "mybucket", "srcfile", "", "destname", "name is empty", }, } ctx := context.Background() client, err := NewClient(ctx, option.WithHTTPClient(&http.Client{Transport: &fakeTransport{}})) if err != nil { panic(err) } for i, test := range tests { src := client.Bucket(test.srcBucket).Object(test.srcName) dst := client.Bucket(test.destBucket).Object(test.destName) _, err := dst.CopierFrom(src).Run(ctx) if !strings.Contains(err.Error(), test.errMsg) { t.Errorf("CopyTo test #%v:\ngot err %q\nwant err %q", i, err, test.errMsg) } } } func TestObjectNames(t *testing.T) { t.Parallel() // Naming requirements: https://cloud.google.com/storage/docs/bucket-naming const maxLegalLength = 1024 type testT struct { name, want string } tests := []testT{ // Embedded characters important in URLs. {"foo % bar", "foo%20%25%20bar"}, {"foo ? bar", "foo%20%3F%20bar"}, {"foo / bar", "foo%20/%20bar"}, {"foo %?/ bar", "foo%20%25%3F/%20bar"}, // Non-Roman scripts {"타코", "%ED%83%80%EC%BD%94"}, {"世界", "%E4%B8%96%E7%95%8C"}, // Longest legal name {strings.Repeat("a", maxLegalLength), strings.Repeat("a", maxLegalLength)}, // Line terminators besides CR and LF: https://en.wikipedia.org/wiki/Newline#Unicode {"foo \u000b bar", "foo%20%0B%20bar"}, {"foo \u000c bar", "foo%20%0C%20bar"}, {"foo \u0085 bar", "foo%20%C2%85%20bar"}, {"foo \u2028 bar", "foo%20%E2%80%A8%20bar"}, {"foo \u2029 bar", "foo%20%E2%80%A9%20bar"}, // Null byte. {"foo \u0000 bar", "foo%20%00%20bar"}, // Non-control characters that are discouraged, but not forbidden, according to the documentation. {"foo # bar", "foo%20%23%20bar"}, {"foo []*? bar", "foo%20%5B%5D%2A%3F%20bar"}, // Angstrom symbol singleton and normalized forms: http://unicode.org/reports/tr15/ {"foo \u212b bar", "foo%20%E2%84%AB%20bar"}, {"foo \u0041\u030a bar", "foo%20A%CC%8A%20bar"}, {"foo \u00c5 bar", "foo%20%C3%85%20bar"}, // Hangul separating jamo: http://www.unicode.org/versions/Unicode7.0.0/ch18.pdf (Table 18-10) {"foo \u3131\u314f bar", "foo%20%E3%84%B1%E3%85%8F%20bar"}, {"foo \u1100\u1161 bar", "foo%20%E1%84%80%E1%85%A1%20bar"}, {"foo \uac00 bar", "foo%20%EA%B0%80%20bar"}, } // C0 control characters not forbidden by the docs. var runes []rune for r := rune(0x01); r <= rune(0x1f); r++ { if r != '\u000a' && r != '\u000d' { runes = append(runes, r) } } tests = append(tests, testT{fmt.Sprintf("foo %s bar", string(runes)), "foo%20%01%02%03%04%05%06%07%08%09%0B%0C%0E%0F%10%11%12%13%14%15%16%17%18%19%1A%1B%1C%1D%1E%1F%20bar"}) // C1 control characters, plus DEL. runes = nil for r := rune(0x7f); r <= rune(0x9f); r++ { runes = append(runes, r) } tests = append(tests, testT{fmt.Sprintf("foo %s bar", string(runes)), "foo%20%7F%C2%80%C2%81%C2%82%C2%83%C2%84%C2%85%C2%86%C2%87%C2%88%C2%89%C2%8A%C2%8B%C2%8C%C2%8D%C2%8E%C2%8F%C2%90%C2%91%C2%92%C2%93%C2%94%C2%95%C2%96%C2%97%C2%98%C2%99%C2%9A%C2%9B%C2%9C%C2%9D%C2%9E%C2%9F%20bar"}) opts := &SignedURLOptions{ GoogleAccessID: "xxx@clientid", PrivateKey: dummyKey("rsa"), Method: "GET", MD5: "ICy5YqxZB1uWSwcVLSNLcA==", Expires: time.Date(2002, time.October, 2, 10, 0, 0, 0, time.UTC), ContentType: "application/json", Headers: []string{"x-header1", "x-header2"}, } for _, test := range tests { g, err := SignedURL("bucket-name", test.name, opts) if err != nil { t.Errorf("SignedURL(%q) err=%v, want nil", test.name, err) } if w := "/bucket-name/" + test.want; !strings.Contains(g, w) { t.Errorf("SignedURL(%q)=%q, want substring %q", test.name, g, w) } } } func TestCondition(t *testing.T) { t.Parallel() gotReq := make(chan *http.Request, 1) hc, close := newTestServer(func(w http.ResponseWriter, r *http.Request) { io.Copy(ioutil.Discard, r.Body) gotReq <- r w.WriteHeader(200) }) defer close() ctx := context.Background() c, err := NewClient(ctx, option.WithHTTPClient(hc)) if err != nil { t.Fatal(err) } obj := c.Bucket("buck").Object("obj") dst := c.Bucket("dstbuck").Object("dst") tests := []struct { fn func() want string }{ { func() { obj.Generation(1234).NewReader(ctx) }, "GET /buck/obj?generation=1234", }, { func() { obj.If(Conditions{GenerationMatch: 1234}).NewReader(ctx) }, "GET /buck/obj?ifGenerationMatch=1234", }, { func() { obj.If(Conditions{GenerationNotMatch: 1234}).NewReader(ctx) }, "GET /buck/obj?ifGenerationNotMatch=1234", }, { func() { obj.If(Conditions{MetagenerationMatch: 1234}).NewReader(ctx) }, "GET /buck/obj?ifMetagenerationMatch=1234", }, { func() { obj.If(Conditions{MetagenerationNotMatch: 1234}).NewReader(ctx) }, "GET /buck/obj?ifMetagenerationNotMatch=1234", }, { func() { obj.If(Conditions{MetagenerationNotMatch: 1234}).Attrs(ctx) }, "GET /storage/v1/b/buck/o/obj?alt=json&ifMetagenerationNotMatch=1234&projection=full", }, { func() { obj.If(Conditions{MetagenerationMatch: 1234}).Update(ctx, ObjectAttrsToUpdate{}) }, "PATCH /storage/v1/b/buck/o/obj?alt=json&ifMetagenerationMatch=1234&projection=full", }, { func() { obj.Generation(1234).Delete(ctx) }, "DELETE /storage/v1/b/buck/o/obj?alt=json&generation=1234", }, { func() { w := obj.If(Conditions{GenerationMatch: 1234}).NewWriter(ctx) w.ContentType = "text/plain" w.Close() }, "POST /upload/storage/v1/b/buck/o?alt=json&ifGenerationMatch=1234&projection=full&uploadType=multipart", }, { func() { w := obj.If(Conditions{DoesNotExist: true}).NewWriter(ctx) w.ContentType = "text/plain" w.Close() }, "POST /upload/storage/v1/b/buck/o?alt=json&ifGenerationMatch=0&projection=full&uploadType=multipart", }, { func() { dst.If(Conditions{MetagenerationMatch: 5678}).CopierFrom(obj.If(Conditions{GenerationMatch: 1234})).Run(ctx) }, "POST /storage/v1/b/buck/o/obj/rewriteTo/b/dstbuck/o/dst?alt=json&ifMetagenerationMatch=5678&ifSourceGenerationMatch=1234&projection=full", }, } for i, tt := range tests { tt.fn() select { case r := <-gotReq: got := r.Method + " " + r.RequestURI if got != tt.want { t.Errorf("%d. RequestURI = %q; want %q", i, got, tt.want) } case <-time.After(5 * time.Second): t.Fatalf("%d. timeout", i) } if err != nil { t.Fatal(err) } } // Test an error, too: err = obj.Generation(1234).NewWriter(ctx).Close() if err == nil || !strings.Contains(err.Error(), "NewWriter: generation not supported") { t.Errorf("want error about unsupported generation; got %v", err) } } func TestConditionErrors(t *testing.T) { t.Parallel() for _, conds := range []Conditions{ {GenerationMatch: 0}, {DoesNotExist: false}, // same as above, actually {GenerationMatch: 1, GenerationNotMatch: 2}, {GenerationNotMatch: 2, DoesNotExist: true}, {MetagenerationMatch: 1, MetagenerationNotMatch: 2}, } { if err := conds.validate(""); err == nil { t.Errorf("%+v: got nil, want error", conds) } } } // Test object compose. func TestObjectCompose(t *testing.T) { t.Parallel() gotURL := make(chan string, 1) gotBody := make(chan []byte, 1) hc, close := newTestServer(func(w http.ResponseWriter, r *http.Request) { body, _ := ioutil.ReadAll(r.Body) gotURL <- r.URL.String() gotBody <- body w.Write([]byte("{}")) }) defer close() ctx := context.Background() c, err := NewClient(ctx, option.WithHTTPClient(hc)) if err != nil { t.Fatal(err) } testCases := []struct { desc string dst *ObjectHandle srcs []*ObjectHandle attrs *ObjectAttrs wantReq raw.ComposeRequest wantURL string wantErr bool }{ { desc: "basic case", dst: c.Bucket("foo").Object("bar"), srcs: []*ObjectHandle{ c.Bucket("foo").Object("baz"), c.Bucket("foo").Object("quux"), }, wantURL: "/storage/v1/b/foo/o/bar/compose?alt=json", wantReq: raw.ComposeRequest{ Destination: &raw.Object{Bucket: "foo"}, SourceObjects: []*raw.ComposeRequestSourceObjects{ {Name: "baz"}, {Name: "quux"}, }, }, }, { desc: "with object attrs", dst: c.Bucket("foo").Object("bar"), srcs: []*ObjectHandle{ c.Bucket("foo").Object("baz"), c.Bucket("foo").Object("quux"), }, attrs: &ObjectAttrs{ Name: "not-bar", ContentType: "application/json", }, wantURL: "/storage/v1/b/foo/o/bar/compose?alt=json", wantReq: raw.ComposeRequest{ Destination: &raw.Object{ Bucket: "foo", Name: "not-bar", ContentType: "application/json", }, SourceObjects: []*raw.ComposeRequestSourceObjects{ {Name: "baz"}, {Name: "quux"}, }, }, }, { desc: "with conditions", dst: c.Bucket("foo").Object("bar").If(Conditions{ GenerationMatch: 12, MetagenerationMatch: 34, }), srcs: []*ObjectHandle{ c.Bucket("foo").Object("baz").Generation(56), c.Bucket("foo").Object("quux").If(Conditions{GenerationMatch: 78}), }, wantURL: "/storage/v1/b/foo/o/bar/compose?alt=json&ifGenerationMatch=12&ifMetagenerationMatch=34", wantReq: raw.ComposeRequest{ Destination: &raw.Object{Bucket: "foo"}, SourceObjects: []*raw.ComposeRequestSourceObjects{ { Name: "baz", Generation: 56, }, { Name: "quux", ObjectPreconditions: &raw.ComposeRequestSourceObjectsObjectPreconditions{ IfGenerationMatch: 78, }, }, }, }, }, { desc: "no sources", dst: c.Bucket("foo").Object("bar"), wantErr: true, }, { desc: "destination, no bucket", dst: c.Bucket("").Object("bar"), srcs: []*ObjectHandle{ c.Bucket("foo").Object("baz"), }, wantErr: true, }, { desc: "destination, no object", dst: c.Bucket("foo").Object(""), srcs: []*ObjectHandle{ c.Bucket("foo").Object("baz"), }, wantErr: true, }, { desc: "source, different bucket", dst: c.Bucket("foo").Object("bar"), srcs: []*ObjectHandle{ c.Bucket("otherbucket").Object("baz"), }, wantErr: true, }, { desc: "source, no object", dst: c.Bucket("foo").Object("bar"), srcs: []*ObjectHandle{ c.Bucket("foo").Object(""), }, wantErr: true, }, { desc: "destination, bad condition", dst: c.Bucket("foo").Object("bar").Generation(12), srcs: []*ObjectHandle{ c.Bucket("foo").Object("baz"), }, wantErr: true, }, { desc: "source, bad condition", dst: c.Bucket("foo").Object("bar"), srcs: []*ObjectHandle{ c.Bucket("foo").Object("baz").If(Conditions{MetagenerationMatch: 12}), }, wantErr: true, }, } for _, tt := range testCases { composer := tt.dst.ComposerFrom(tt.srcs...) if tt.attrs != nil { composer.ObjectAttrs = *tt.attrs } _, err := composer.Run(ctx) if gotErr := err != nil; gotErr != tt.wantErr { t.Errorf("%s: got error %v; want err %t", tt.desc, err, tt.wantErr) continue } if tt.wantErr { continue } url, body := <-gotURL, <-gotBody if url != tt.wantURL { t.Errorf("%s: request URL\ngot %q\nwant %q", tt.desc, url, tt.wantURL) } var req raw.ComposeRequest if err := json.Unmarshal(body, &req); err != nil { t.Errorf("%s: json.Unmarshal %v (body %s)", tt.desc, err, body) } if !reflect.DeepEqual(req, tt.wantReq) { // Print to JSON. wantReq, _ := json.Marshal(tt.wantReq) t.Errorf("%s: request body\ngot %s\nwant %s", tt.desc, body, wantReq) } } } // Test that ObjectIterator's Next and NextPage methods correctly terminate // if there is nothing to iterate over. func TestEmptyObjectIterator(t *testing.T) { t.Parallel() hClient, close := newTestServer(func(w http.ResponseWriter, r *http.Request) { io.Copy(ioutil.Discard, r.Body) fmt.Fprintf(w, "{}") }) defer close() ctx := context.Background() client, err := NewClient(ctx, option.WithHTTPClient(hClient)) if err != nil { t.Fatal(err) } it := client.Bucket("b").Objects(ctx, nil) _, err = it.Next() if err != iterator.Done { t.Errorf("got %v, want Done", err) } } // Test that BucketIterator's Next method correctly terminates if there is // nothing to iterate over. func TestEmptyBucketIterator(t *testing.T) { t.Parallel() hClient, close := newTestServer(func(w http.ResponseWriter, r *http.Request) { io.Copy(ioutil.Discard, r.Body) fmt.Fprintf(w, "{}") }) defer close() ctx := context.Background() client, err := NewClient(ctx, option.WithHTTPClient(hClient)) if err != nil { t.Fatal(err) } it := client.Buckets(ctx, "project") _, err = it.Next() if err != iterator.Done { t.Errorf("got %v, want Done", err) } } func TestCodecUint32(t *testing.T) { t.Parallel() for _, u := range []uint32{0, 1, 256, 0xFFFFFFFF} { s := encodeUint32(u) d, err := decodeUint32(s) if err != nil { t.Fatal(err) } if d != u { t.Errorf("got %d, want input %d", d, u) } } } func newTestServer(handler func(w http.ResponseWriter, r *http.Request)) (*http.Client, func()) { ts := httptest.NewTLSServer(http.HandlerFunc(handler)) tlsConf := &tls.Config{InsecureSkipVerify: true} tr := &http.Transport{ TLSClientConfig: tlsConf, DialTLS: func(netw, addr string) (net.Conn, error) { return tls.Dial("tcp", ts.Listener.Addr().String(), tlsConf) }, } return &http.Client{Transport: tr}, func() { tr.CloseIdleConnections() ts.Close() } } golang-google-cloud-0.9.0/storage/testdata/000077500000000000000000000000001312234511600206205ustar00rootroot00000000000000golang-google-cloud-0.9.0/storage/testdata/dummy_pem000066400000000000000000000042271312234511600225440ustar00rootroot00000000000000Bag Attributes friendlyName: privatekey localKeyID: 54 69 6D 65 20 31 34 31 36 38 35 32 30 30 34 37 37 32 Key Attributes: -----BEGIN RSA PRIVATE KEY----- MIICXQIBAAKBgQCtCWMoJ2Bok2QoGFyU7A6IlGprO9QfUTT0jNrLkIbM5OWNIuDx 64+PEaTS5g5m+2Hz/lmd5jJKanAH4dY9LZzsaYAPq1K17Gcmg1hEisYeKsgOcjYY kwRkV+natCTsC+tfWmS0voRh0jA1rI1J4MikceoHtgWdEuoHrrptRVpWKwIDAQAB AoGAKp3uQvx3vSnX+BwP6Um+RpsvHpwMoW3xue1bEdnVqW8SrlERz+NxZw40ZxDs KSbuuBZD4iTI7BUM5JQVnNm4FQY1YrPlWZLyI73Bj8RKTXrPdJheM/0r7xjiIXbQ 7w4cUSM9rVugnI/rxF2kPIQTGYI+EG/6+P+k6VvgPmC0T/ECQQDUPskiS18WaY+i Koalbrb3GakaBoHrC1b4ln4CAv7fq7H4WvFvqi/2rxLhHYq31iwxYy8s7J7Sba1+ 5vwJ2TxZAkEA0LVfs3Q2VWZ+cM3bv0aYTalMXg6wT+LoNvk9HnOb0zQYajF3qm4G ZFdfEqvOkje0zQ4fcihARKyda/VY84UGIwJBAIZa0FvjNmgrnn7bSKzEbxHwrnkJ EYjGfuGR8mY3mzvfpiM+/oLfSslvfhX+62cALq18yco4ZzlxsFgaxAU//NECQDcS NN94YcHlGqYPW9W7/gI4EwOaoqFhwV6II71+SfbP/0U+KlJZV+xwNZEKrqZcdqPI /zkzL8ovNha/laokRrsCQQCyoPHGcBWj+VFbNoyQnX4tghc6rOY7n4pmpgQvU825 TAM9vnYtSkKK/V56kEDNBO5LwiRsir95IUNclqqMKR1C -----END RSA PRIVATE KEY----- Bag Attributes friendlyName: privatekey localKeyID: 54 69 6D 65 20 31 34 31 36 38 35 32 30 30 34 37 37 32 subject=/CN=1079432350659-nvog0vmn9s6pqr3kr4v2avbc7nkhoa11.apps.googleusercontent.com issuer=/CN=1079432350659-nvog0vmn9s6pqr3kr4v2avbc7nkhoa11.apps.googleusercontent.com -----BEGIN CERTIFICATE----- MIICXTCCAcagAwIBAgIIHxTMQUVJRZ0wDQYJKoZIhvcNAQEFBQAwVDFSMFAGA1UE AxNJMTA3OTQzMjM1MDY1OS1udm9nMHZtbjlzNnBxcjNrcjR2MmF2YmM3bmtob2Ex MS5hcHBzLmdvb2dsZXVzZXJjb250ZW50LmNvbTAeFw0xNDExMjQxODAwMDRaFw0y NDExMjExODAwMDRaMFQxUjBQBgNVBAMTSTEwNzk0MzIzNTA2NTktbnZvZzB2bW45 czZwcXIza3I0djJhdmJjN25raG9hMTEuYXBwcy5nb29nbGV1c2VyY29udGVudC5j b20wgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAK0JYygnYGiTZCgYXJTsDoiU ams71B9RNPSM2suQhszk5Y0i4PHrj48RpNLmDmb7YfP+WZ3mMkpqcAfh1j0tnOxp gA+rUrXsZyaDWESKxh4qyA5yNhiTBGRX6dq0JOwL619aZLS+hGHSMDWsjUngyKRx 6ge2BZ0S6geuum1FWlYrAgMBAAGjODA2MAwGA1UdEwEB/wQCMAAwDgYDVR0PAQH/ BAQDAgeAMBYGA1UdJQEB/wQMMAoGCCsGAQUFBwMCMA0GCSqGSIb3DQEBBQUAA4GB ACVvKkZkomHq3uffOQwdZ4VJYuxrvDGnZu/ExW9WngO2teEsjxABL41TNnRYHN5T lMC19poFA2tR/DySDLJ2XNs/hSvyQUL6HHCncVdR4Srpie88j48peY1MZSMP51Jv qagbbP5K5DSEu02/zZaV0kaCvLEN0KAtj/noDuOOnQU2 -----END CERTIFICATE----- golang-google-cloud-0.9.0/storage/testdata/dummy_rsa000066400000000000000000000032171312234511600225460ustar00rootroot00000000000000-----BEGIN RSA PRIVATE KEY----- MIIEpAIBAAKCAQEAx4fm7dngEmOULNmAs1IGZ9Apfzh+BkaQ1dzkmbUgpcoghucE DZRnAGd2aPyB6skGMXUytWQvNYav0WTR00wFtX1ohWTfv68HGXJ8QXCpyoSKSSFY fuP9X36wBSkSX9J5DVgiuzD5VBdzUISSmapjKm+DcbRALjz6OUIPEWi1Tjl6p5RK 1w41qdbmt7E5/kGhKLDuT7+M83g4VWhgIvaAXtnhklDAggilPPa8ZJ1IFe31lNlr k4DRk38nc6sEutdf3RL7QoH7FBusI7uXV03DC6dwN1kP4GE7bjJhcRb/7jYt7CQ9 /E9Exz3c0yAp0yrTg0Fwh+qxfH9dKwN52S7SBwIDAQABAoIBAQCaCs26K07WY5Jt 3a2Cw3y2gPrIgTCqX6hJs7O5ByEhXZ8nBwsWANBUe4vrGaajQHdLj5OKfsIDrOvn 2NI1MqflqeAbu/kR32q3tq8/Rl+PPiwUsW3E6Pcf1orGMSNCXxeducF2iySySzh3 nSIhCG5uwJDWI7a4+9KiieFgK1pt/Iv30q1SQS8IEntTfXYwANQrfKUVMmVF9aIK 6/WZE2yd5+q3wVVIJ6jsmTzoDCX6QQkkJICIYwCkglmVy5AeTckOVwcXL0jqw5Kf 5/soZJQwLEyBoQq7Kbpa26QHq+CJONetPP8Ssy8MJJXBT+u/bSseMb3Zsr5cr43e DJOhwsThAoGBAPY6rPKl2NT/K7XfRCGm1sbWjUQyDShscwuWJ5+kD0yudnT/ZEJ1 M3+KS/iOOAoHDdEDi9crRvMl0UfNa8MAcDKHflzxg2jg/QI+fTBjPP5GOX0lkZ9g z6VePoVoQw2gpPFVNPPTxKfk27tEzbaffvOLGBEih0Kb7HTINkW8rIlzAoGBAM9y 1yr+jvfS1cGFtNU+Gotoihw2eMKtIqR03Yn3n0PK1nVCDKqwdUqCypz4+ml6cxRK J8+Pfdh7D+ZJd4LEG6Y4QRDLuv5OA700tUoSHxMSNn3q9As4+T3MUyYxWKvTeu3U f2NWP9ePU0lV8ttk7YlpVRaPQmc1qwooBA/z/8AdAoGAW9x0HWqmRICWTBnpjyxx QGlW9rQ9mHEtUotIaRSJ6K/F3cxSGUEkX1a3FRnp6kPLcckC6NlqdNgNBd6rb2rA cPl/uSkZP42Als+9YMoFPU/xrrDPbUhu72EDrj3Bllnyb168jKLa4VBOccUvggxr Dm08I1hgYgdN5huzs7y6GeUCgYEAj+AZJSOJ6o1aXS6rfV3mMRve9bQ9yt8jcKXw 5HhOCEmMtaSKfnOF1Ziih34Sxsb7O2428DiX0mV/YHtBnPsAJidL0SdLWIapBzeg KHArByIRkwE6IvJvwpGMdaex1PIGhx5i/3VZL9qiq/ElT05PhIb+UXgoWMabCp84 OgxDK20CgYAeaFo8BdQ7FmVX2+EEejF+8xSge6WVLtkaon8bqcn6P0O8lLypoOhd mJAYH8WU+UAy9pecUnDZj14LAGNVmYcse8HFX71MoshnvCTFEPVo4rZxIAGwMpeJ 5jgQ3slYLpqrGlcbLgUXBUgzEO684Wk/UV9DFPlHALVqCfXQ9dpJPg== -----END RSA PRIVATE KEY----- golang-google-cloud-0.9.0/storage/writer.go000066400000000000000000000130271312234511600206550ustar00rootroot00000000000000// Copyright 2014 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package storage import ( "encoding/base64" "errors" "fmt" "io" "unicode/utf8" "golang.org/x/net/context" "google.golang.org/api/googleapi" raw "google.golang.org/api/storage/v1" ) // A Writer writes a Cloud Storage object. type Writer struct { // ObjectAttrs are optional attributes to set on the object. Any attributes // must be initialized before the first Write call. Nil or zero-valued // attributes are ignored. ObjectAttrs // SendCRC specifies whether to transmit a CRC32C field. It should be set // to true in addition to setting the Writer's CRC32C field, because zero // is a valid CRC and normally a zero would not be transmitted. SendCRC32C bool // ChunkSize controls the maximum number of bytes of the object that the // Writer will attempt to send to the server in a single request. Objects // smaller than the size will be sent in a single request, while larger // objects will be split over multiple requests. The size will be rounded up // to the nearest multiple of 256K. If zero, chunking will be disabled and // the object will be uploaded in a single request. // // ChunkSize will default to a reasonable value. Any custom configuration // must be done before the first Write call. ChunkSize int // ProgressFunc can be used to monitor the progress of a large write. // operation. If ProgressFunc is not nil and writing requires multiple // calls to the underlying service (see // https://cloud.google.com/storage/docs/json_api/v1/how-tos/resumable-upload), // then ProgressFunc will be invoked after each call with the number of bytes of // content copied so far. // // ProgressFunc should return quickly without blocking. ProgressFunc func(int64) ctx context.Context o *ObjectHandle opened bool pw *io.PipeWriter donec chan struct{} // closed after err and obj are set. err error obj *ObjectAttrs } func (w *Writer) open() error { attrs := w.ObjectAttrs // Check the developer didn't change the object Name (this is unfortunate, but // we don't want to store an object under the wrong name). if attrs.Name != w.o.object { return fmt.Errorf("storage: Writer.Name %q does not match object name %q", attrs.Name, w.o.object) } if !utf8.ValidString(attrs.Name) { return fmt.Errorf("storage: object name %q is not valid UTF-8", attrs.Name) } pr, pw := io.Pipe() w.pw = pw w.opened = true if w.ChunkSize < 0 { return errors.New("storage: Writer.ChunkSize must non-negative") } mediaOpts := []googleapi.MediaOption{ googleapi.ChunkSize(w.ChunkSize), } if c := attrs.ContentType; c != "" { mediaOpts = append(mediaOpts, googleapi.ContentType(c)) } go func() { defer close(w.donec) rawObj := attrs.toRawObject(w.o.bucket) if w.SendCRC32C { rawObj.Crc32c = encodeUint32(attrs.CRC32C) } if w.MD5 != nil { rawObj.Md5Hash = base64.StdEncoding.EncodeToString(w.MD5) } call := w.o.c.raw.Objects.Insert(w.o.bucket, rawObj). Media(pr, mediaOpts...). Projection("full"). Context(w.ctx) if w.ProgressFunc != nil { call.ProgressUpdater(func(n, _ int64) { w.ProgressFunc(n) }) } if err := setEncryptionHeaders(call.Header(), w.o.encryptionKey, false); err != nil { w.err = err pr.CloseWithError(w.err) return } var resp *raw.Object err := applyConds("NewWriter", w.o.gen, w.o.conds, call) if err == nil { setClientHeader(call.Header()) // We will only retry here if the initial POST, which obtains a URI for // the resumable upload, fails with a retryable error. The upload itself // has its own retry logic. err = runWithRetry(w.ctx, func() error { var err2 error resp, err2 = call.Do() return err2 }) } if err != nil { w.err = err pr.CloseWithError(w.err) return } w.obj = newObject(resp) }() return nil } // Write appends to w. It implements the io.Writer interface. // // Since writes happen asynchronously, Write may return a nil // error even though the write failed (or will fail). Always // use the error returned from Writer.Close to determine if // the upload was successful. func (w *Writer) Write(p []byte) (n int, err error) { if w.err != nil { return 0, w.err } if !w.opened { if err := w.open(); err != nil { return 0, err } } return w.pw.Write(p) } // Close completes the write operation and flushes any buffered data. // If Close doesn't return an error, metadata about the written object // can be retrieved by calling Attrs. func (w *Writer) Close() error { if !w.opened { if err := w.open(); err != nil { return err } } if err := w.pw.Close(); err != nil { return err } <-w.donec return w.err } // CloseWithError aborts the write operation with the provided error. // CloseWithError always returns nil. func (w *Writer) CloseWithError(err error) error { if !w.opened { return nil } return w.pw.CloseWithError(err) } // Attrs returns metadata about a successfully-written object. // It's only valid to call it after Close returns nil. func (w *Writer) Attrs() *ObjectAttrs { return w.obj } golang-google-cloud-0.9.0/storage/writer_test.go000066400000000000000000000073011312234511600217120ustar00rootroot00000000000000// Copyright 2014 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package storage import ( "bytes" "crypto/sha256" "encoding/base64" "fmt" "io/ioutil" "net/http" "reflect" "strings" "testing" "golang.org/x/net/context" "google.golang.org/api/option" ) type fakeTransport struct { gotReq *http.Request results []transportResult } type transportResult struct { res *http.Response err error } func (t *fakeTransport) addResult(res *http.Response, err error) { t.results = append(t.results, transportResult{res, err}) } func (t *fakeTransport) RoundTrip(req *http.Request) (*http.Response, error) { t.gotReq = req if len(t.results) == 0 { return nil, fmt.Errorf("error handling request") } result := t.results[0] t.results = t.results[1:] return result.res, result.err } func TestErrorOnObjectsInsertCall(t *testing.T) { t.Parallel() ctx := context.Background() doWrite := func(hc *http.Client) *Writer { client, err := NewClient(ctx, option.WithHTTPClient(hc)) if err != nil { t.Fatalf("error when creating client: %v", err) } wc := client.Bucket("bucketname").Object("filename1").NewWriter(ctx) wc.ContentType = "text/plain" // We can't check that the Write fails, since it depends on the write to the // underling fakeTransport failing which is racy. wc.Write([]byte("hello world")) return wc } wc := doWrite(&http.Client{Transport: &fakeTransport{}}) // Close must always return an error though since it waits for the transport to // have closed. if err := wc.Close(); err == nil { t.Errorf("expected error on close, got nil") } // Retry on 5xx ft := &fakeTransport{} ft.addResult(&http.Response{ StatusCode: 503, Body: ioutil.NopCloser(&bytes.Buffer{}), }, nil) ft.addResult(&http.Response{ StatusCode: 200, Body: ioutil.NopCloser(strings.NewReader("{}")), }, nil) wc = doWrite(&http.Client{Transport: ft}) if err := wc.Close(); err != nil { t.Errorf("got %v, want nil", err) } } func TestEncryption(t *testing.T) { t.Parallel() ctx := context.Background() ft := &fakeTransport{} hc := &http.Client{Transport: ft} client, err := NewClient(ctx, option.WithHTTPClient(hc)) if err != nil { t.Fatalf("error when creating client: %v", err) } obj := client.Bucket("bucketname").Object("filename1") key := []byte("secret-key-that-is-32-bytes-long") wc := obj.Key(key).NewWriter(ctx) // TODO(jba): use something other than fakeTransport, which always returns error. wc.Write([]byte("hello world")) wc.Close() if got, want := ft.gotReq.Header.Get("x-goog-encryption-algorithm"), "AES256"; got != want { t.Errorf("algorithm: got %q, want %q", got, want) } gotKey, err := base64.StdEncoding.DecodeString(ft.gotReq.Header.Get("x-goog-encryption-key")) if err != nil { t.Fatalf("decoding key: %v", err) } if !reflect.DeepEqual(gotKey, key) { t.Errorf("key: got %v, want %v", gotKey, key) } wantHash := sha256.Sum256(key) gotHash, err := base64.StdEncoding.DecodeString(ft.gotReq.Header.Get("x-goog-encryption-key-sha256")) if err != nil { t.Fatalf("decoding hash: %v", err) } if !reflect.DeepEqual(gotHash, wantHash[:]) { // wantHash is an array t.Errorf("hash: got\n%v, want\n%v", gotHash, wantHash) } } golang-google-cloud-0.9.0/trace/000077500000000000000000000000001312234511600164415ustar00rootroot00000000000000golang-google-cloud-0.9.0/trace/apiv1/000077500000000000000000000000001312234511600174615ustar00rootroot00000000000000golang-google-cloud-0.9.0/trace/apiv1/ListTraces_smoke_test.go000066400000000000000000000031731312234511600243260ustar00rootroot00000000000000// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // AUTO-GENERATED CODE. DO NOT EDIT. package trace import ( cloudtracepb "google.golang.org/genproto/googleapis/devtools/cloudtrace/v1" ) import ( "strconv" "testing" "time" "cloud.google.com/go/internal/testutil" "golang.org/x/net/context" "google.golang.org/api/iterator" "google.golang.org/api/option" ) var _ = iterator.Done var _ = strconv.FormatUint var _ = time.Now func TestTraceServiceSmoke(t *testing.T) { if testing.Short() { t.Skip("skipping smoke test in short mode") } ctx := context.Background() ts := testutil.TokenSource(ctx, DefaultAuthScopes()...) if ts == nil { t.Skip("Integration tests skipped. See CONTRIBUTING.md for details") } projectId := testutil.ProjID() _ = projectId c, err := NewClient(ctx, option.WithTokenSource(ts)) if err != nil { t.Fatal(err) } var projectId2 string = projectId var request = &cloudtracepb.ListTracesRequest{ ProjectId: projectId2, } iter := c.ListTraces(ctx, request) if _, err := iter.Next(); err != nil && err != iterator.Done { t.Error(err) } } golang-google-cloud-0.9.0/trace/apiv1/doc.go000066400000000000000000000032471312234511600205630ustar00rootroot00000000000000// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // AUTO-GENERATED CODE. DO NOT EDIT. // Package trace is an experimental, auto-generated package for the // Stackdriver Trace API. // // Send and retrieve trace data from Stackdriver Trace. Data is generated and // available by default for all App Engine applications. Data from other // applications can be written to Stackdriver Trace for display, reporting, // and analysis. // // Use the client at cloud.google.com/go/trace in preference to this. package trace // import "cloud.google.com/go/trace/apiv1" import ( "golang.org/x/net/context" "google.golang.org/grpc/metadata" ) func insertXGoog(ctx context.Context, val []string) context.Context { md, _ := metadata.FromOutgoingContext(ctx) md = md.Copy() md["x-goog-api-client"] = val return metadata.NewOutgoingContext(ctx, md) } // DefaultAuthScopes reports the authentication scopes required // by this package. func DefaultAuthScopes() []string { return []string{ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/trace.append", "https://www.googleapis.com/auth/trace.readonly", } } golang-google-cloud-0.9.0/trace/apiv1/mock_test.go000066400000000000000000000203041312234511600217770ustar00rootroot00000000000000// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // AUTO-GENERATED CODE. DO NOT EDIT. package trace import ( emptypb "github.com/golang/protobuf/ptypes/empty" cloudtracepb "google.golang.org/genproto/googleapis/devtools/cloudtrace/v1" ) import ( "flag" "fmt" "io" "log" "net" "os" "strings" "testing" "github.com/golang/protobuf/proto" "github.com/golang/protobuf/ptypes" "golang.org/x/net/context" "google.golang.org/api/option" status "google.golang.org/genproto/googleapis/rpc/status" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/metadata" gstatus "google.golang.org/grpc/status" ) var _ = io.EOF var _ = ptypes.MarshalAny var _ status.Status type mockTraceServer struct { // Embed for forward compatibility. // Tests will keep working if more methods are added // in the future. cloudtracepb.TraceServiceServer reqs []proto.Message // If set, all calls return this error. err error // responses to return if err == nil resps []proto.Message } func (s *mockTraceServer) ListTraces(ctx context.Context, req *cloudtracepb.ListTracesRequest) (*cloudtracepb.ListTracesResponse, error) { md, _ := metadata.FromIncomingContext(ctx) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } s.reqs = append(s.reqs, req) if s.err != nil { return nil, s.err } return s.resps[0].(*cloudtracepb.ListTracesResponse), nil } func (s *mockTraceServer) GetTrace(ctx context.Context, req *cloudtracepb.GetTraceRequest) (*cloudtracepb.Trace, error) { md, _ := metadata.FromIncomingContext(ctx) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } s.reqs = append(s.reqs, req) if s.err != nil { return nil, s.err } return s.resps[0].(*cloudtracepb.Trace), nil } func (s *mockTraceServer) PatchTraces(ctx context.Context, req *cloudtracepb.PatchTracesRequest) (*emptypb.Empty, error) { md, _ := metadata.FromIncomingContext(ctx) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } s.reqs = append(s.reqs, req) if s.err != nil { return nil, s.err } return s.resps[0].(*emptypb.Empty), nil } // clientOpt is the option tests should use to connect to the test server. // It is initialized by TestMain. var clientOpt option.ClientOption var ( mockTrace mockTraceServer ) func TestMain(m *testing.M) { flag.Parse() serv := grpc.NewServer() cloudtracepb.RegisterTraceServiceServer(serv, &mockTrace) lis, err := net.Listen("tcp", "localhost:0") if err != nil { log.Fatal(err) } go serv.Serve(lis) conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) if err != nil { log.Fatal(err) } clientOpt = option.WithGRPCConn(conn) os.Exit(m.Run()) } func TestTraceServicePatchTraces(t *testing.T) { var expectedResponse *emptypb.Empty = &emptypb.Empty{} mockTrace.err = nil mockTrace.reqs = nil mockTrace.resps = append(mockTrace.resps[:0], expectedResponse) var projectId string = "projectId-1969970175" var traces *cloudtracepb.Traces = &cloudtracepb.Traces{} var request = &cloudtracepb.PatchTracesRequest{ ProjectId: projectId, Traces: traces, } c, err := NewClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } err = c.PatchTraces(context.Background(), request) if err != nil { t.Fatal(err) } if want, got := request, mockTrace.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } } func TestTraceServicePatchTracesError(t *testing.T) { errCode := codes.PermissionDenied mockTrace.err = gstatus.Error(errCode, "test error") var projectId string = "projectId-1969970175" var traces *cloudtracepb.Traces = &cloudtracepb.Traces{} var request = &cloudtracepb.PatchTracesRequest{ ProjectId: projectId, Traces: traces, } c, err := NewClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } err = c.PatchTraces(context.Background(), request) if st, ok := gstatus.FromError(err); !ok { t.Errorf("got error %v, expected grpc error", err) } else if c := st.Code(); c != errCode { t.Errorf("got error code %q, want %q", c, errCode) } } func TestTraceServiceGetTrace(t *testing.T) { var projectId2 string = "projectId2939242356" var traceId2 string = "traceId2987826376" var expectedResponse = &cloudtracepb.Trace{ ProjectId: projectId2, TraceId: traceId2, } mockTrace.err = nil mockTrace.reqs = nil mockTrace.resps = append(mockTrace.resps[:0], expectedResponse) var projectId string = "projectId-1969970175" var traceId string = "traceId1270300245" var request = &cloudtracepb.GetTraceRequest{ ProjectId: projectId, TraceId: traceId, } c, err := NewClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.GetTrace(context.Background(), request) if err != nil { t.Fatal(err) } if want, got := request, mockTrace.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } if want, got := expectedResponse, resp; !proto.Equal(want, got) { t.Errorf("wrong response %q, want %q)", got, want) } } func TestTraceServiceGetTraceError(t *testing.T) { errCode := codes.PermissionDenied mockTrace.err = gstatus.Error(errCode, "test error") var projectId string = "projectId-1969970175" var traceId string = "traceId1270300245" var request = &cloudtracepb.GetTraceRequest{ ProjectId: projectId, TraceId: traceId, } c, err := NewClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.GetTrace(context.Background(), request) if st, ok := gstatus.FromError(err); !ok { t.Errorf("got error %v, expected grpc error", err) } else if c := st.Code(); c != errCode { t.Errorf("got error code %q, want %q", c, errCode) } _ = resp } func TestTraceServiceListTraces(t *testing.T) { var nextPageToken string = "" var tracesElement *cloudtracepb.Trace = &cloudtracepb.Trace{} var traces = []*cloudtracepb.Trace{tracesElement} var expectedResponse = &cloudtracepb.ListTracesResponse{ NextPageToken: nextPageToken, Traces: traces, } mockTrace.err = nil mockTrace.reqs = nil mockTrace.resps = append(mockTrace.resps[:0], expectedResponse) var projectId string = "projectId-1969970175" var request = &cloudtracepb.ListTracesRequest{ ProjectId: projectId, } c, err := NewClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.ListTraces(context.Background(), request).Next() if err != nil { t.Fatal(err) } if want, got := request, mockTrace.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } want := (interface{})(expectedResponse.Traces[0]) got := (interface{})(resp) var ok bool switch want := (want).(type) { case proto.Message: ok = proto.Equal(want, got.(proto.Message)) default: ok = want == got } if !ok { t.Errorf("wrong response %q, want %q)", got, want) } } func TestTraceServiceListTracesError(t *testing.T) { errCode := codes.PermissionDenied mockTrace.err = gstatus.Error(errCode, "test error") var projectId string = "projectId-1969970175" var request = &cloudtracepb.ListTracesRequest{ ProjectId: projectId, } c, err := NewClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.ListTraces(context.Background(), request).Next() if st, ok := gstatus.FromError(err); !ok { t.Errorf("got error %v, expected grpc error", err) } else if c := st.Code(); c != errCode { t.Errorf("got error code %q, want %q", c, errCode) } _ = resp } golang-google-cloud-0.9.0/trace/apiv1/trace_client.go000066400000000000000000000174161312234511600224550ustar00rootroot00000000000000// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // AUTO-GENERATED CODE. DO NOT EDIT. package trace import ( "math" "time" "cloud.google.com/go/internal/version" gax "github.com/googleapis/gax-go" "golang.org/x/net/context" "google.golang.org/api/iterator" "google.golang.org/api/option" "google.golang.org/api/transport" cloudtracepb "google.golang.org/genproto/googleapis/devtools/cloudtrace/v1" "google.golang.org/grpc" "google.golang.org/grpc/codes" ) // CallOptions contains the retry settings for each method of Client. type CallOptions struct { PatchTraces []gax.CallOption GetTrace []gax.CallOption ListTraces []gax.CallOption } func defaultClientOptions() []option.ClientOption { return []option.ClientOption{ option.WithEndpoint("cloudtrace.googleapis.com:443"), option.WithScopes(DefaultAuthScopes()...), } } func defaultCallOptions() *CallOptions { retry := map[[2]string][]gax.CallOption{ {"default", "idempotent"}: { gax.WithRetry(func() gax.Retryer { return gax.OnCodes([]codes.Code{ codes.DeadlineExceeded, codes.Unavailable, }, gax.Backoff{ Initial: 100 * time.Millisecond, Max: 1000 * time.Millisecond, Multiplier: 1.2, }) }), }, } return &CallOptions{ PatchTraces: retry[[2]string{"default", "idempotent"}], GetTrace: retry[[2]string{"default", "idempotent"}], ListTraces: retry[[2]string{"default", "idempotent"}], } } // Client is a client for interacting with Stackdriver Trace API. type Client struct { // The connection to the service. conn *grpc.ClientConn // The gRPC API client. client cloudtracepb.TraceServiceClient // The call options for this service. CallOptions *CallOptions // The metadata to be sent with each request. xGoogHeader []string } // NewClient creates a new trace service client. // // This file describes an API for collecting and viewing traces and spans // within a trace. A Trace is a collection of spans corresponding to a single // operation or set of operations for an application. A span is an individual // timed event which forms a node of the trace tree. Spans for a single trace // may span multiple services. func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) { conn, err := transport.DialGRPC(ctx, append(defaultClientOptions(), opts...)...) if err != nil { return nil, err } c := &Client{ conn: conn, CallOptions: defaultCallOptions(), client: cloudtracepb.NewTraceServiceClient(conn), } c.SetGoogleClientInfo() return c, nil } // Connection returns the client's connection to the API service. func (c *Client) Connection() *grpc.ClientConn { return c.conn } // Close closes the connection to the API service. The user should invoke this when // the client is no longer required. func (c *Client) Close() error { return c.conn.Close() } // SetGoogleClientInfo sets the name and version of the application in // the `x-goog-api-client` header passed on each request. Intended for // use by Google-written clients. func (c *Client) SetGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", version.Go()}, keyval...) kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) c.xGoogHeader = []string{gax.XGoogHeader(kv...)} } // PatchTraces sends new traces to Stackdriver Trace or updates existing traces. If the ID // of a trace that you send matches that of an existing trace, any fields // in the existing trace and its spans are overwritten by the provided values, // and any new fields provided are merged with the existing trace data. If the // ID does not match, a new trace is created. func (c *Client) PatchTraces(ctx context.Context, req *cloudtracepb.PatchTracesRequest, opts ...gax.CallOption) error { ctx = insertXGoog(ctx, c.xGoogHeader) opts = append(c.CallOptions.PatchTraces[0:len(c.CallOptions.PatchTraces):len(c.CallOptions.PatchTraces)], opts...) err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error _, err = c.client.PatchTraces(ctx, req, settings.GRPC...) return err }, opts...) return err } // GetTrace gets a single trace by its ID. func (c *Client) GetTrace(ctx context.Context, req *cloudtracepb.GetTraceRequest, opts ...gax.CallOption) (*cloudtracepb.Trace, error) { ctx = insertXGoog(ctx, c.xGoogHeader) opts = append(c.CallOptions.GetTrace[0:len(c.CallOptions.GetTrace):len(c.CallOptions.GetTrace)], opts...) var resp *cloudtracepb.Trace err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error resp, err = c.client.GetTrace(ctx, req, settings.GRPC...) return err }, opts...) if err != nil { return nil, err } return resp, nil } // ListTraces returns of a list of traces that match the specified filter conditions. func (c *Client) ListTraces(ctx context.Context, req *cloudtracepb.ListTracesRequest, opts ...gax.CallOption) *TraceIterator { ctx = insertXGoog(ctx, c.xGoogHeader) opts = append(c.CallOptions.ListTraces[0:len(c.CallOptions.ListTraces):len(c.CallOptions.ListTraces)], opts...) it := &TraceIterator{} it.InternalFetch = func(pageSize int, pageToken string) ([]*cloudtracepb.Trace, string, error) { var resp *cloudtracepb.ListTracesResponse req.PageToken = pageToken if pageSize > math.MaxInt32 { req.PageSize = math.MaxInt32 } else { req.PageSize = int32(pageSize) } err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error resp, err = c.client.ListTraces(ctx, req, settings.GRPC...) return err }, opts...) if err != nil { return nil, "", err } return resp.Traces, resp.NextPageToken, nil } fetch := func(pageSize int, pageToken string) (string, error) { items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) if err != nil { return "", err } it.items = append(it.items, items...) return nextPageToken, nil } it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) return it } // TraceIterator manages a stream of *cloudtracepb.Trace. type TraceIterator struct { items []*cloudtracepb.Trace pageInfo *iterator.PageInfo nextFunc func() error // InternalFetch is for use by the Google Cloud Libraries only. // It is not part of the stable interface of this package. // // InternalFetch returns results from a single call to the underlying RPC. // The number of results is no greater than pageSize. // If there are no more results, nextPageToken is empty and err is nil. InternalFetch func(pageSize int, pageToken string) (results []*cloudtracepb.Trace, nextPageToken string, err error) } // PageInfo supports pagination. See the google.golang.org/api/iterator package for details. func (it *TraceIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } // Next returns the next result. Its second return value is iterator.Done if there are no more // results. Once Next returns Done, all subsequent calls will return Done. func (it *TraceIterator) Next() (*cloudtracepb.Trace, error) { var item *cloudtracepb.Trace if err := it.nextFunc(); err != nil { return item, err } item = it.items[0] it.items = it.items[1:] return item, nil } func (it *TraceIterator) bufLen() int { return len(it.items) } func (it *TraceIterator) takeBuf() interface{} { b := it.items it.items = nil return b } golang-google-cloud-0.9.0/trace/apiv1/trace_client_example_test.go000066400000000000000000000040221312234511600252140ustar00rootroot00000000000000// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // AUTO-GENERATED CODE. DO NOT EDIT. package trace_test import ( "cloud.google.com/go/trace/apiv1" "golang.org/x/net/context" "google.golang.org/api/iterator" cloudtracepb "google.golang.org/genproto/googleapis/devtools/cloudtrace/v1" ) func ExampleNewClient() { ctx := context.Background() c, err := trace.NewClient(ctx) if err != nil { // TODO: Handle error. } // TODO: Use client. _ = c } func ExampleClient_PatchTraces() { ctx := context.Background() c, err := trace.NewClient(ctx) if err != nil { // TODO: Handle error. } req := &cloudtracepb.PatchTracesRequest{ // TODO: Fill request struct fields. } err = c.PatchTraces(ctx, req) if err != nil { // TODO: Handle error. } } func ExampleClient_GetTrace() { ctx := context.Background() c, err := trace.NewClient(ctx) if err != nil { // TODO: Handle error. } req := &cloudtracepb.GetTraceRequest{ // TODO: Fill request struct fields. } resp, err := c.GetTrace(ctx, req) if err != nil { // TODO: Handle error. } // TODO: Use resp. _ = resp } func ExampleClient_ListTraces() { ctx := context.Background() c, err := trace.NewClient(ctx) if err != nil { // TODO: Handle error. } req := &cloudtracepb.ListTracesRequest{ // TODO: Fill request struct fields. } it := c.ListTraces(ctx, req) for { resp, err := it.Next() if err == iterator.Done { break } if err != nil { // TODO: Handle error. } // TODO: Use resp. _ = resp } } golang-google-cloud-0.9.0/trace/grpc.go000066400000000000000000000075441312234511600177350ustar00rootroot00000000000000// Copyright 2017 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package trace import ( "encoding/hex" "fmt" "cloud.google.com/go/internal/tracecontext" "golang.org/x/net/context" "google.golang.org/api/option" "google.golang.org/grpc" "google.golang.org/grpc/metadata" ) const grpcMetadataKey = "grpc-trace-bin" // GRPCClientInterceptor returns a grpc.UnaryClientInterceptor that traces all outgoing requests from a gRPC client. // The calling context should already have a *trace.Span; a child span will be // created for the outgoing gRPC call. If the calling context doesn't have a span, // the call will not be traced. // // The functionality in gRPC that this feature relies on is currently experimental. func GRPCClientInterceptor() grpc.UnaryClientInterceptor { return grpc.UnaryClientInterceptor(grpcUnaryInterceptor) } func grpcUnaryInterceptor(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { // TODO: also intercept streams. span := FromContext(ctx).NewChild(method) defer span.Finish() if span != nil { tc := make([]byte, tracecontext.Len) // traceID is a hex-encoded 128-bit value. // TODO(jbd): Decode trace IDs upon arrival and // represent trace IDs with 16 bytes internally. tid, err := hex.DecodeString(span.trace.traceID) if err != nil { return invoker(ctx, method, req, reply, cc, opts...) } tracecontext.Encode(tc, tid, span.span.SpanId, byte(span.trace.globalOptions)) md, ok := metadata.FromOutgoingContext(ctx) if !ok { md = metadata.Pairs(grpcMetadataKey, string(tc)) } else { md = md.Copy() // metadata is immutable, copy. md[grpcMetadataKey] = []string{string(tc)} } ctx = metadata.NewOutgoingContext(ctx, md) } err := invoker(ctx, method, req, reply, cc, opts...) if err != nil { // TODO: standardize gRPC label names? span.SetLabel("error", err.Error()) } return err } // GRPCServerInterceptor returns a grpc.UnaryServerInterceptor that enables the tracing of the incoming // gRPC calls. Incoming call's context can be used to extract the span on servers that enabled this option: // // span := trace.FromContext(ctx) // // The functionality in gRPC that this feature relies on is currently experimental. func GRPCServerInterceptor(tc *Client) grpc.UnaryServerInterceptor { return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) { md, _ := metadata.FromIncomingContext(ctx) if header, ok := md[grpcMetadataKey]; ok { traceID, spanID, opts, ok := tracecontext.Decode([]byte(header[0])) if !ok { return handler(ctx, req) } // TODO(jbd): Generate a span directly from string(traceID), spanID and opts. header := fmt.Sprintf("%x/%d;o=%d", traceID, spanID, opts) span := tc.SpanFromHeader("", header) defer span.Finish() ctx = NewContext(ctx, span) } return handler(ctx, req) } } // EnableGRPCTracing automatically traces all outgoing gRPC calls from cloud.google.com/go clients. // // The functionality in gRPC that this relies on is currently experimental. // // Deprecated: Use option.WithGRPCDialOption(grpc.WithUnaryInterceptor(GRPCClientInterceptor())) instead. var EnableGRPCTracing option.ClientOption = option.WithGRPCDialOption(grpc.WithUnaryInterceptor(GRPCClientInterceptor())) golang-google-cloud-0.9.0/trace/grpc_test.go000066400000000000000000000045101312234511600207620ustar00rootroot00000000000000// Copyright 2017 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package trace import ( "log" "net" "testing" pb "cloud.google.com/go/trace/testdata/helloworld" "golang.org/x/net/context" "google.golang.org/grpc" ) func TestGRPCInterceptors(t *testing.T) { tc := newTestClient(&noopTransport{}) incomingCh := make(chan *Span, 1) addrCh := make(chan net.Addr, 1) go func() { lis, err := net.Listen("tcp", "") if err != nil { t.Fatalf("Failed to listen: %v", err) } addrCh <- lis.Addr() s := grpc.NewServer(grpc.UnaryInterceptor(GRPCServerInterceptor(tc))) pb.RegisterGreeterServer(s, &grpcServer{ fn: func(ctx context.Context, in *pb.HelloRequest) (*pb.HelloReply, error) { incomingCh <- FromContext(ctx) return &pb.HelloReply{}, nil }, }) if err := s.Serve(lis); err != nil { t.Fatalf("Failed to serve: %v", err) } }() addr := <-addrCh conn, err := grpc.Dial(addr.String(), grpc.WithInsecure(), grpc.WithUnaryInterceptor(GRPCClientInterceptor())) if err != nil { log.Fatalf("Did not connect: %v", err) } defer conn.Close() c := pb.NewGreeterClient(conn) span := tc.NewSpan("parent") outgoingCtx := NewContext(context.Background(), span) _, err = c.SayHello(outgoingCtx, &pb.HelloRequest{}) if err != nil { log.Fatalf("Could not SayHello: %v", err) } incomingSpan := <-incomingCh if incomingSpan == nil { t.Fatalf("missing span in the incoming context") } if got, want := incomingSpan.TraceID(), span.TraceID(); got != want { t.Errorf("incoming call is not tracing the outgoing trace; TraceID = %q; want %q", got, want) } } type grpcServer struct { fn func(ctx context.Context, in *pb.HelloRequest) (*pb.HelloReply, error) } func (s *grpcServer) SayHello(ctx context.Context, in *pb.HelloRequest) (*pb.HelloReply, error) { return s.fn(ctx, in) } golang-google-cloud-0.9.0/trace/http.go000066400000000000000000000062701312234511600177540ustar00rootroot00000000000000// Copyright 2017 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // +build go1.7 package trace import ( "net/http" ) // Transport is an http.RoundTripper that traces the outgoing requests. // // Transport is safe for concurrent usage. type Transport struct { // Base is the base http.RoundTripper to be used to do the actual request. // // Optional. If nil, http.DefaultTransport is used. Base http.RoundTripper } // RoundTrip creates a trace.Span and inserts it into the outgoing request's headers. // The created span can follow a parent span, if a parent is presented in // the request's context. func (t Transport) RoundTrip(req *http.Request) (*http.Response, error) { span := FromContext(req.Context()).NewRemoteChild(req) resp, err := t.base().RoundTrip(req) // TODO(jbd): Is it possible to defer the span.Finish? // In cases where RoundTrip panics, we still can finish the span. span.Finish(WithResponse(resp)) return resp, err } // CancelRequest cancels an in-flight request by closing its connection. func (t Transport) CancelRequest(req *http.Request) { type canceler interface { CancelRequest(*http.Request) } if cr, ok := t.base().(canceler); ok { cr.CancelRequest(req) } } func (t Transport) base() http.RoundTripper { if t.Base != nil { return t.Base } return http.DefaultTransport } // HTTPHandler returns a http.Handler from the given handler // that is aware of the incoming request's span. // The span can be extracted from the incoming request in handler // functions from incoming request's context: // // span := trace.FromContext(r.Context()) // // The span will be auto finished by the handler. func (c *Client) HTTPHandler(h http.Handler) http.Handler { return &handler{traceClient: c, handler: h} } type handler struct { traceClient *Client handler http.Handler } func (h *handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { traceID, parentSpanID, options, optionsOk, ok := traceInfoFromHeader(r.Header.Get(httpHeader)) if !ok { traceID = nextTraceID() } t := &trace{ traceID: traceID, client: h.traceClient, globalOptions: options, localOptions: options, } span := startNewChildWithRequest(r, t, parentSpanID) span.span.Kind = spanKindServer span.rootSpan = true configureSpanFromPolicy(span, h.traceClient.policy, ok) defer span.Finish() r = r.WithContext(NewContext(r.Context(), span)) if ok && !optionsOk { // Inject the trace context back to the response with the sampling options. // TODO(jbd): Remove when there is a better way to report the client's sampling. w.Header().Set(httpHeader, spanHeader(traceID, parentSpanID, span.trace.localOptions)) } h.handler.ServeHTTP(w, r) } golang-google-cloud-0.9.0/trace/http_test.go000066400000000000000000000077131312234511600210160ustar00rootroot00000000000000// Copyright 2017 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // +build go1.7 package trace import ( "io/ioutil" "net/http" "net/http/httptest" "strings" "testing" ) type noopTransport struct{} func (rt *noopTransport) RoundTrip(req *http.Request) (*http.Response, error) { resp := &http.Response{ Status: "200 OK", StatusCode: 200, Body: ioutil.NopCloser(strings.NewReader("{}")), } return resp, nil } type recorderTransport struct { ch chan *http.Request } func (rt *recorderTransport) RoundTrip(req *http.Request) (*http.Response, error) { rt.ch <- req resp := &http.Response{ Status: "200 OK", StatusCode: 200, Body: ioutil.NopCloser(strings.NewReader("{}")), } return resp, nil } func TestNewHTTPClient(t *testing.T) { rt := &recorderTransport{ ch: make(chan *http.Request, 1), } tc := newTestClient(&noopTransport{}) client := &http.Client{ Transport: &Transport{ Base: rt, }, } req, _ := http.NewRequest("GET", "http://example.com", nil) t.Run("NoTrace", func(t *testing.T) { _, err := client.Do(req) if err != nil { t.Error(err) } outgoing := <-rt.ch if got, want := outgoing.Header.Get(httpHeader), ""; want != got { t.Errorf("got trace header = %q; want none", got) } }) t.Run("Trace", func(t *testing.T) { span := tc.NewSpan("/foo") req = req.WithContext(NewContext(req.Context(), span)) _, err := client.Do(req) if err != nil { t.Error(err) } outgoing := <-rt.ch s := tc.SpanFromHeader("/foo", outgoing.Header.Get(httpHeader)) if got, want := s.TraceID(), span.TraceID(); got != want { t.Errorf("trace ID = %q; want %q", got, want) } }) } func TestHTTPHandlerNoTrace(t *testing.T) { tc := newTestClient(&noopTransport{}) client := &http.Client{ Transport: &Transport{}, } handler := tc.HTTPHandler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { span := FromContext(r.Context()) if span == nil { t.Errorf("span is nil; want non-nil span") } })) ts := httptest.NewServer(handler) defer ts.Close() req, _ := http.NewRequest("GET", ts.URL, nil) _, err := client.Do(req) if err != nil { t.Fatal(err) } } func TestHTTPHandler_response(t *testing.T) { tc := newTestClient(&noopTransport{}) p, _ := NewLimitedSampler(1, 1<<32) // all tc.SetSamplingPolicy(p) handler := tc.HTTPHandler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {})) ts := httptest.NewServer(handler) defer ts.Close() tests := []struct { name string traceHeader string wantTraceHeader string }{ { name: "no global", traceHeader: "0123456789ABCDEF0123456789ABCDEF/123", wantTraceHeader: "0123456789ABCDEF0123456789ABCDEF/123;o=1", }, { name: "global=1", traceHeader: "0123456789ABCDEF0123456789ABCDEF/123;o=1", wantTraceHeader: "", }, { name: "global=0", traceHeader: "0123456789ABCDEF0123456789ABCDEF/123;o=0", wantTraceHeader: "", }, { name: "no trace context", traceHeader: "", wantTraceHeader: "", }, } for _, tt := range tests { req, _ := http.NewRequest("GET", ts.URL, nil) req.Header.Set(httpHeader, tt.traceHeader) res, err := http.DefaultClient.Do(req) if err != nil { t.Errorf("failed to request: %v", err) } if got, want := res.Header.Get(httpHeader), tt.wantTraceHeader; got != want { t.Errorf("%v: response context header = %q; want %q", tt.name, got, want) } } } golang-google-cloud-0.9.0/trace/httpexample_test.go000066400000000000000000000030741312234511600223660ustar00rootroot00000000000000// Copyright 2017 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // +build go1.7 package trace_test import ( "log" "net/http" "cloud.google.com/go/trace" ) var traceClient *trace.Client func ExampleHTTPClient_Do() { client := http.Client{ Transport: &trace.Transport{}, } span := traceClient.NewSpan("/foo") // traceClient is a *trace.Client req, _ := http.NewRequest("GET", "https://metadata/users", nil) req = req.WithContext(trace.NewContext(req.Context(), span)) if _, err := client.Do(req); err != nil { log.Fatal(err) } } func ExampleClient_HTTPHandler() { handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { client := http.Client{ Transport: &trace.Transport{}, } req, _ := http.NewRequest("GET", "https://metadata/users", nil) req = req.WithContext(r.Context()) // The outgoing request will be traced with r's trace ID. if _, err := client.Do(req); err != nil { log.Fatal(err) } }) http.Handle("/foo", traceClient.HTTPHandler(handler)) // traceClient is a *trace.Client } golang-google-cloud-0.9.0/trace/sampling.go000066400000000000000000000066521312234511600206130ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package trace import ( crand "crypto/rand" "encoding/binary" "fmt" "math/rand" "sync" "time" "golang.org/x/time/rate" ) type SamplingPolicy interface { // Sample returns a Decision. // If Trace is false in the returned Decision, then the Decision should be // the zero value. Sample(p Parameters) Decision } // Parameters contains the values passed to a SamplingPolicy's Sample method. type Parameters struct { HasTraceHeader bool // whether the incoming request has a valid X-Cloud-Trace-Context header. } // Decision is the value returned by a call to a SamplingPolicy's Sample method. type Decision struct { Trace bool // Whether to trace the request. Sample bool // Whether the trace is included in the random sample. Policy string // Name of the sampling policy. Weight float64 // Sample weight to be used in statistical calculations. } type sampler struct { fraction float64 skipped float64 *rate.Limiter *rand.Rand sync.Mutex } func (s *sampler) Sample(p Parameters) Decision { s.Lock() x := s.Float64() d := s.sample(p, time.Now(), x) s.Unlock() return d } // sample contains the a deterministic, time-independent logic of Sample. func (s *sampler) sample(p Parameters, now time.Time, x float64) (d Decision) { d.Sample = x < s.fraction d.Trace = p.HasTraceHeader || d.Sample if !d.Trace { // We have no reason to trace this request. return Decision{} } // We test separately that the rate limit is not tiny before calling AllowN, // because of overflow problems in x/time/rate. if s.Limit() < 1e-9 || !s.AllowN(now, 1) { // Rejected by the rate limit. if d.Sample { s.skipped++ } return Decision{} } if d.Sample { d.Policy, d.Weight = "default", (1.0+s.skipped)/s.fraction s.skipped = 0.0 } return } // NewLimitedSampler returns a sampling policy that randomly samples a given // fraction of requests. It also enforces a limit on the number of traces per // second. It tries to trace every request with a trace header, but will not // exceed the qps limit to do it. func NewLimitedSampler(fraction, maxqps float64) (SamplingPolicy, error) { if !(fraction >= 0) { return nil, fmt.Errorf("invalid fraction %f", fraction) } if !(maxqps >= 0) { return nil, fmt.Errorf("invalid maxqps %f", maxqps) } // Set a limit on the number of accumulated "tokens", to limit bursts of // traced requests. Use one more than a second's worth of tokens, or 100, // whichever is smaller. // See https://godoc.org/golang.org/x/time/rate#NewLimiter. maxTokens := 100 if maxqps < 99.0 { maxTokens = 1 + int(maxqps) } var seed int64 if err := binary.Read(crand.Reader, binary.LittleEndian, &seed); err != nil { seed = time.Now().UnixNano() } s := sampler{ fraction: fraction, Limiter: rate.NewLimiter(rate.Limit(maxqps), maxTokens), Rand: rand.New(rand.NewSource(seed)), } return &s, nil } golang-google-cloud-0.9.0/trace/testdata/000077500000000000000000000000001312234511600202525ustar00rootroot00000000000000golang-google-cloud-0.9.0/trace/testdata/helloworld/000077500000000000000000000000001312234511600224255ustar00rootroot00000000000000golang-google-cloud-0.9.0/trace/testdata/helloworld/helloworld.pb.go000066400000000000000000000132031312234511600255260ustar00rootroot00000000000000// Copyright 2017 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. /* Package helloworld is a generated protocol buffer package. It is generated from these files: helloworld.proto It has these top-level messages: HelloRequest HelloReply */ package helloworld import proto "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" import ( context "golang.org/x/net/context" grpc "google.golang.org/grpc" ) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package // The request message containing the user's name. type HelloRequest struct { Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` } func (m *HelloRequest) Reset() { *m = HelloRequest{} } func (m *HelloRequest) String() string { return proto.CompactTextString(m) } func (*HelloRequest) ProtoMessage() {} func (*HelloRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } // The response message containing the greetings type HelloReply struct { Message string `protobuf:"bytes,1,opt,name=message" json:"message,omitempty"` } func (m *HelloReply) Reset() { *m = HelloReply{} } func (m *HelloReply) String() string { return proto.CompactTextString(m) } func (*HelloReply) ProtoMessage() {} func (*HelloReply) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } func init() { proto.RegisterType((*HelloRequest)(nil), "helloworld.HelloRequest") proto.RegisterType((*HelloReply)(nil), "helloworld.HelloReply") } // Reference imports to suppress errors if they are not otherwise used. var _ context.Context var _ grpc.ClientConn // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. const _ = grpc.SupportPackageIsVersion4 // Client API for Greeter service type GreeterClient interface { // Sends a greeting SayHello(ctx context.Context, in *HelloRequest, opts ...grpc.CallOption) (*HelloReply, error) } type greeterClient struct { cc *grpc.ClientConn } func NewGreeterClient(cc *grpc.ClientConn) GreeterClient { return &greeterClient{cc} } func (c *greeterClient) SayHello(ctx context.Context, in *HelloRequest, opts ...grpc.CallOption) (*HelloReply, error) { out := new(HelloReply) err := grpc.Invoke(ctx, "/helloworld.Greeter/SayHello", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } // Server API for Greeter service type GreeterServer interface { // Sends a greeting SayHello(context.Context, *HelloRequest) (*HelloReply, error) } func RegisterGreeterServer(s *grpc.Server, srv GreeterServer) { s.RegisterService(&_Greeter_serviceDesc, srv) } func _Greeter_SayHello_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(HelloRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(GreeterServer).SayHello(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/helloworld.Greeter/SayHello", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(GreeterServer).SayHello(ctx, req.(*HelloRequest)) } return interceptor(ctx, in, info, handler) } var _Greeter_serviceDesc = grpc.ServiceDesc{ ServiceName: "helloworld.Greeter", HandlerType: (*GreeterServer)(nil), Methods: []grpc.MethodDesc{ { MethodName: "SayHello", Handler: _Greeter_SayHello_Handler, }, }, Streams: []grpc.StreamDesc{}, Metadata: "helloworld.proto", } func init() { proto.RegisterFile("helloworld.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ // 174 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0x12, 0xc8, 0x48, 0xcd, 0xc9, 0xc9, 0x2f, 0xcf, 0x2f, 0xca, 0x49, 0xd1, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x42, 0x88, 0x28, 0x29, 0x71, 0xf1, 0x78, 0x80, 0x78, 0x41, 0xa9, 0x85, 0xa5, 0xa9, 0xc5, 0x25, 0x42, 0x42, 0x5c, 0x2c, 0x79, 0x89, 0xb9, 0xa9, 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0x9c, 0x41, 0x60, 0xb6, 0x92, 0x1a, 0x17, 0x17, 0x54, 0x4d, 0x41, 0x4e, 0xa5, 0x90, 0x04, 0x17, 0x7b, 0x6e, 0x6a, 0x71, 0x71, 0x62, 0x3a, 0x4c, 0x11, 0x8c, 0x6b, 0xe4, 0xc9, 0xc5, 0xee, 0x5e, 0x94, 0x9a, 0x5a, 0x92, 0x5a, 0x24, 0x64, 0xc7, 0xc5, 0x11, 0x9c, 0x58, 0x09, 0xd6, 0x25, 0x24, 0xa1, 0x87, 0xe4, 0x02, 0x64, 0xcb, 0xa4, 0xc4, 0xb0, 0xc8, 0x00, 0xad, 0x50, 0x62, 0x70, 0x32, 0xe0, 0x92, 0xce, 0xcc, 0xd7, 0x4b, 0x2f, 0x2a, 0x48, 0xd6, 0x4b, 0xad, 0x48, 0xcc, 0x2d, 0xc8, 0x49, 0x2d, 0x46, 0x52, 0xeb, 0xc4, 0x0f, 0x56, 0x1c, 0x0e, 0x62, 0x07, 0x80, 0xbc, 0x14, 0xc0, 0x98, 0xc4, 0x06, 0xf6, 0x9b, 0x31, 0x20, 0x00, 0x00, 0xff, 0xff, 0x0f, 0xb7, 0xcd, 0xf2, 0xef, 0x00, 0x00, 0x00, } golang-google-cloud-0.9.0/trace/testdata/helloworld/helloworld.proto000066400000000000000000000021311312234511600256620ustar00rootroot00000000000000// Copyright 2017 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. syntax = "proto3"; option java_multiple_files = true; option java_package = "io.grpc.examples.helloworld"; option java_outer_classname = "HelloWorldProto"; package helloworld; // The greeting service definition. service Greeter { // Sends a greeting rpc SayHello (HelloRequest) returns (HelloReply) {} } // The request message containing the user's name. message HelloRequest { string name = 1; } // The response message containing the greetings message HelloReply { string message = 1; } golang-google-cloud-0.9.0/trace/trace.go000066400000000000000000000612231312234511600200720ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package trace is a Google Stackdriver Trace library. // // This package is still experimental and subject to change. // // See https://cloud.google.com/trace/api/#data_model for a discussion of traces // and spans. // // To initialize a client that connects to the Stackdriver Trace server, use the // NewClient function. Generally you will want to do this on program // initialization. // // import "cloud.google.com/go/trace" // ... // traceClient, err = trace.NewClient(ctx, projectID) // // Calling SpanFromRequest will create a new trace span for an incoming HTTP // request. If the request contains a trace context header, it is used to // determine the trace ID. Otherwise, a new trace ID is created. // // func handler(w http.ResponseWriter, r *http.Request) { // span := traceClient.SpanFromRequest(r) // defer span.Finish() // ... // } // // SpanFromRequest and NewSpan returns nil if the *Client is nil, so you can disable // tracing by not initializing your *Client variable. All of the exported // functions on *Span do nothing when the *Span is nil. // // If you need to start traces that don't correspond to an incoming HTTP request, // you can use NewSpan to create a root-level span. // // span := traceClient.NewSpan("span name") // defer span.Finish() // // Although a trace span object is created for every request, only a subset of // traces are uploaded to the server, for efficiency. By default, the requests // that are traced are those with the tracing bit set in the options field of // the trace context header. Ideally, you should override this behaviour by // calling SetSamplingPolicy. NewLimitedSampler returns an implementation of // SamplingPolicy which traces requests that have the tracing bit set, and also // randomly traces a specified fraction of requests. Additionally, it sets a // limit on the number of requests traced per second. The following example // traces one in every thousand requests, up to a limit of 5 per second. // // p, err := trace.NewLimitedSampler(0.001, 5) // traceClient.SetSamplingPolicy(p) // // You can create a new span as a child of an existing span with NewChild. // // childSpan := span.NewChild(name) // ... // childSpan.Finish() // // When sending an HTTP request to another server, NewRemoteChild will create // a span to represent the time the current program waits for the request to // complete, and attach a header to the outgoing request so that the trace will // be propagated to the destination server. // // childSpan := span.NewRemoteChild(&httpRequest) // ... // childSpan.Finish() // // Alternatively, if you have access to the X-Cloud-Trace-Context header value // but not the underlying HTTP request (this can happen if you are using a // different transport or messaging protocol, such as gRPC), you can use // SpanFromHeader instead of SpanFromRequest. In that case, you will need to // specify the span name explicility, since it cannot be constructed from the // HTTP request's URL and method. // // func handler(r *somepkg.Request) { // span := traceClient.SpanFromHeader("span name", r.TraceContext()) // defer span.Finish() // ... // } // // Spans can contain a map from keys to values that have useful information // about the span. The elements of this map are called labels. Some labels, // whose keys all begin with the string "trace.cloud.google.com/", are set // automatically in the following ways: // // - SpanFromRequest sets some labels to data about the incoming request. // // - NewRemoteChild sets some labels to data about the outgoing request. // // - Finish sets a label to a stack trace, if the stack trace option is enabled // in the incoming trace header. // // - The WithResponse option sets some labels to data about a response. // You can also set labels using SetLabel. If a label is given a value // automatically and by SetLabel, the automatically-set value is used. // // span.SetLabel(key, value) // // The WithResponse option can be used when Finish is called. // // childSpan := span.NewRemoteChild(outgoingReq) // resp, err := http.DefaultClient.Do(outgoingReq) // ... // childSpan.Finish(trace.WithResponse(resp)) // // When a span created by SpanFromRequest or SpamFromHeader is finished, the // finished spans in the corresponding trace -- the span itself and its // descendants -- are uploaded to the Stackdriver Trace server using the // *Client that created the span. Finish returns immediately, and uploading // occurs asynchronously. You can use the FinishWait function instead to wait // until uploading has finished. // // err := span.FinishWait() // // Using contexts to pass *trace.Span objects through your program will often // be a better approach than passing them around explicitly. This allows trace // spans, and other request-scoped or part-of-request-scoped values, to be // easily passed through API boundaries. Various Google Cloud libraries will // retrieve trace spans from contexts and automatically create child spans for // API requests. // See https://blog.golang.org/context for more discussion of contexts. // A derived context containing a trace span can be created using NewContext. // // span := traceClient.SpanFromRequest(r) // ctx = trace.NewContext(ctx, span) // // The span can be retrieved from a context elsewhere in the program using // FromContext. // // func foo(ctx context.Context) { // span := trace.FromContext(ctx).NewChild("in foo") // defer span.Finish() // ... // } // package trace // import "cloud.google.com/go/trace" import ( "crypto/rand" "encoding/binary" "encoding/json" "fmt" "log" "net/http" "runtime" "strconv" "strings" "sync" "sync/atomic" "time" "golang.org/x/net/context" api "google.golang.org/api/cloudtrace/v1" "google.golang.org/api/gensupport" "google.golang.org/api/option" "google.golang.org/api/support/bundler" "google.golang.org/api/transport" ) const ( httpHeader = `X-Cloud-Trace-Context` userAgent = `gcloud-golang-trace/20160501` cloudPlatformScope = `https://www.googleapis.com/auth/cloud-platform` spanKindClient = `RPC_CLIENT` spanKindServer = `RPC_SERVER` spanKindUnspecified = `SPAN_KIND_UNSPECIFIED` maxStackFrames = 20 labelHost = `trace.cloud.google.com/http/host` labelMethod = `trace.cloud.google.com/http/method` labelStackTrace = `trace.cloud.google.com/stacktrace` labelStatusCode = `trace.cloud.google.com/http/status_code` labelURL = `trace.cloud.google.com/http/url` labelSamplingPolicy = `trace.cloud.google.com/sampling_policy` labelSamplingWeight = `trace.cloud.google.com/sampling_weight` ) const ( // ScopeTraceAppend grants permissions to write trace data for a project. ScopeTraceAppend = "https://www.googleapis.com/auth/trace.append" // ScopeCloudPlatform grants permissions to view and manage your data // across Google Cloud Platform services. ScopeCloudPlatform = "https://www.googleapis.com/auth/cloud-platform" ) type contextKey struct{} type stackLabelValue struct { Frames []stackFrame `json:"stack_frame"` } type stackFrame struct { Class string `json:"class_name,omitempty"` Method string `json:"method_name"` Filename string `json:"file_name"` Line int64 `json:"line_number"` } var ( spanIDCounter uint64 spanIDIncrement uint64 ) func init() { // Set spanIDCounter and spanIDIncrement to random values. nextSpanID will // return an arithmetic progression using these values, skipping zero. We set // the LSB of spanIDIncrement to 1, so that the cycle length is 2^64. binary.Read(rand.Reader, binary.LittleEndian, &spanIDCounter) binary.Read(rand.Reader, binary.LittleEndian, &spanIDIncrement) spanIDIncrement |= 1 // Attach hook for autogenerated Google API calls. This will automatically // create trace spans for API calls if there is a trace in the context. gensupport.RegisterHook(requestHook) } func requestHook(ctx context.Context, req *http.Request) func(resp *http.Response) { span := FromContext(ctx) if span == nil || req == nil { return nil } span = span.NewRemoteChild(req) return func(resp *http.Response) { if resp != nil { span.Finish(WithResponse(resp)) } else { span.Finish() } } } // nextSpanID returns a new span ID. It will never return zero. func nextSpanID() uint64 { var id uint64 for id == 0 { id = atomic.AddUint64(&spanIDCounter, spanIDIncrement) } return id } // nextTraceID returns a new trace ID. func nextTraceID() string { id1 := nextSpanID() id2 := nextSpanID() return fmt.Sprintf("%016x%016x", id1, id2) } // Client is a client for uploading traces to the Google Stackdriver Trace server. type Client struct { service *api.Service projectID string policy SamplingPolicy bundler *bundler.Bundler } // NewClient creates a new Google Stackdriver Trace client. func NewClient(ctx context.Context, projectID string, opts ...option.ClientOption) (*Client, error) { o := []option.ClientOption{ option.WithScopes(cloudPlatformScope), option.WithUserAgent(userAgent), } o = append(o, opts...) hc, basePath, err := transport.NewHTTPClient(ctx, o...) if err != nil { return nil, fmt.Errorf("creating HTTP client for Google Stackdriver Trace API: %v", err) } apiService, err := api.New(hc) if err != nil { return nil, fmt.Errorf("creating Google Stackdriver Trace API client: %v", err) } if basePath != "" { // An option set a basepath, so override api.New's default. apiService.BasePath = basePath } c := &Client{ service: apiService, projectID: projectID, } bundler := bundler.NewBundler((*api.Trace)(nil), func(bundle interface{}) { traces := bundle.([]*api.Trace) err := c.upload(traces) if err != nil { log.Printf("failed to upload %d traces to the Cloud Trace server: %v", len(traces), err) } }) bundler.DelayThreshold = 2 * time.Second bundler.BundleCountThreshold = 100 // We're not measuring bytes here, we're counting traces and spans as one "byte" each. bundler.BundleByteThreshold = 1000 bundler.BundleByteLimit = 1000 bundler.BufferedByteLimit = 10000 c.bundler = bundler return c, nil } // SetSamplingPolicy sets the SamplingPolicy that determines how often traces // are initiated by this client. func (c *Client) SetSamplingPolicy(p SamplingPolicy) { if c != nil { c.policy = p } } // SpanFromHeader returns a new trace span, based on a provided request header // value. See https://cloud.google.com/trace/docs/faq. // // It returns nil iff the client is nil. // // The trace information and identifiers will be read from the header value. // Otherwise, a new trace ID is made and the parent span ID is zero. // // The name of the new span is provided as an argument. // // If a non-nil sampling policy has been set in the client, it can override // the options set in the header and choose whether to trace the request. // // If the header doesn't have existing tracing information, then a *Span is // returned anyway, but it will not be uploaded to the server, just as when // calling SpanFromRequest on an untraced request. // // Most users using HTTP should use SpanFromRequest, rather than // SpanFromHeader, since it provides additional functionality for HTTP // requests. In particular, it will set various pieces of request information // as labels on the *Span, which is not available from the header alone. func (c *Client) SpanFromHeader(name string, header string) *Span { if c == nil { return nil } traceID, parentSpanID, options, _, ok := traceInfoFromHeader(header) if !ok { traceID = nextTraceID() } t := &trace{ traceID: traceID, client: c, globalOptions: options, localOptions: options, } span := startNewChild(name, t, parentSpanID) span.span.Kind = spanKindServer span.rootSpan = true configureSpanFromPolicy(span, c.policy, ok) return span } // SpanFromRequest returns a new trace span for an HTTP request. // // It returns nil iff the client is nil. // // If the incoming HTTP request contains a trace context header, the trace ID, // parent span ID, and tracing options will be read from that header. // Otherwise, a new trace ID is made and the parent span ID is zero. // // If a non-nil sampling policy has been set in the client, it can override the // options set in the header and choose whether to trace the request. // // If the request is not being traced, then a *Span is returned anyway, but it // will not be uploaded to the server -- it is only useful for propagating // trace context to child requests and for getting the TraceID. All its // methods can still be called -- the Finish, FinishWait, and SetLabel methods // do nothing. NewChild does nothing, and returns the same *Span. TraceID // works as usual. func (c *Client) SpanFromRequest(r *http.Request) *Span { if c == nil { return nil } traceID, parentSpanID, options, _, ok := traceInfoFromHeader(r.Header.Get(httpHeader)) if !ok { traceID = nextTraceID() } t := &trace{ traceID: traceID, client: c, globalOptions: options, localOptions: options, } span := startNewChildWithRequest(r, t, parentSpanID) span.span.Kind = spanKindServer span.rootSpan = true configureSpanFromPolicy(span, c.policy, ok) return span } // NewSpan returns a new trace span with the given name. // // A new trace and span ID is generated to trace the span. // Returned span need to be finished by calling Finish or FinishWait. func (c *Client) NewSpan(name string) *Span { if c == nil { return nil } t := &trace{ traceID: nextTraceID(), client: c, localOptions: optionTrace, globalOptions: optionTrace, } span := startNewChild(name, t, 0) span.span.Kind = spanKindUnspecified span.rootSpan = true configureSpanFromPolicy(span, c.policy, false) return span } func configureSpanFromPolicy(s *Span, p SamplingPolicy, ok bool) { if p == nil { return } d := p.Sample(Parameters{HasTraceHeader: ok}) if d.Trace { // Turn on tracing locally, and in child requests. s.trace.localOptions |= optionTrace s.trace.globalOptions |= optionTrace } else { // Turn off tracing locally. s.trace.localOptions = 0 return } if d.Sample { // This trace is in the random sample, so set the labels. s.SetLabel(labelSamplingPolicy, d.Policy) s.SetLabel(labelSamplingWeight, fmt.Sprint(d.Weight)) } } // NewContext returns a derived context containing the span. func NewContext(ctx context.Context, s *Span) context.Context { if s == nil { return ctx } return context.WithValue(ctx, contextKey{}, s) } // FromContext returns the span contained in the context, or nil. func FromContext(ctx context.Context) *Span { s, _ := ctx.Value(contextKey{}).(*Span) return s } func traceInfoFromHeader(h string) (traceID string, spanID uint64, options optionFlags, optionsOk bool, ok bool) { // See https://cloud.google.com/trace/docs/faq for the header format. // Return if the header is empty or missing, or if the header is unreasonably // large, to avoid making unnecessary copies of a large string. if h == "" || len(h) > 200 { return "", 0, 0, false, false } // Parse the trace id field. slash := strings.Index(h, `/`) if slash == -1 { return "", 0, 0, false, false } traceID, h = h[:slash], h[slash+1:] // Parse the span id field. spanstr := h semicolon := strings.Index(h, `;`) if semicolon != -1 { spanstr, h = h[:semicolon], h[semicolon+1:] } spanID, err := strconv.ParseUint(spanstr, 10, 64) if err != nil { return "", 0, 0, false, false } // Parse the options field, options field is optional. if !strings.HasPrefix(h, "o=") { return traceID, spanID, 0, false, true } o, err := strconv.ParseUint(h[2:], 10, 64) if err != nil { return "", 0, 0, false, false } options = optionFlags(o) return traceID, spanID, options, true, true } type optionFlags uint32 const ( optionTrace optionFlags = 1 << iota optionStack ) type trace struct { mu sync.Mutex client *Client traceID string globalOptions optionFlags // options that will be passed to any child requests localOptions optionFlags // options applied in this server spans []*Span // finished spans for this trace. } // finish appends s to t.spans. If s is the root span, uploads the trace to the // server. func (t *trace) finish(s *Span, wait bool, opts ...FinishOption) error { for _, o := range opts { o.modifySpan(s) } s.end = time.Now() t.mu.Lock() t.spans = append(t.spans, s) spans := t.spans t.mu.Unlock() if s.rootSpan { if wait { return t.client.upload([]*api.Trace{t.constructTrace(spans)}) } go func() { tr := t.constructTrace(spans) err := t.client.bundler.Add(tr, 1+len(spans)) if err == bundler.ErrOversizedItem { err = t.client.upload([]*api.Trace{tr}) } if err != nil { log.Println("error uploading trace:", err) } }() } return nil } func (t *trace) constructTrace(spans []*Span) *api.Trace { apiSpans := make([]*api.TraceSpan, len(spans)) for i, sp := range spans { sp.span.StartTime = sp.start.In(time.UTC).Format(time.RFC3339Nano) sp.span.EndTime = sp.end.In(time.UTC).Format(time.RFC3339Nano) if t.localOptions&optionStack != 0 { sp.setStackLabel() } sp.SetLabel(labelHost, sp.host) sp.SetLabel(labelURL, sp.url) sp.SetLabel(labelMethod, sp.method) if sp.statusCode != 0 { sp.SetLabel(labelStatusCode, strconv.Itoa(sp.statusCode)) } apiSpans[i] = &sp.span } return &api.Trace{ ProjectId: t.client.projectID, TraceId: t.traceID, Spans: apiSpans, } } func (c *Client) upload(traces []*api.Trace) error { _, err := c.service.Projects.PatchTraces(c.projectID, &api.Traces{Traces: traces}).Do() return err } // Span contains information about one span of a trace. type Span struct { trace *trace spanMu sync.Mutex // guards span.Labels span api.TraceSpan start time.Time end time.Time rootSpan bool stack [maxStackFrames]uintptr host string method string url string statusCode int } func (s *Span) tracing() bool { return s.trace.localOptions&optionTrace != 0 } // NewChild creates a new span with the given name as a child of s. // If s is nil, does nothing and returns nil. func (s *Span) NewChild(name string) *Span { if s == nil { return nil } if !s.tracing() { return s } return startNewChild(name, s.trace, s.span.SpanId) } // NewRemoteChild creates a new span as a child of s. // // Some labels in the span are set from the outgoing *http.Request r. // // A header is set in r so that the trace context is propagated to the // destination. The parent span ID in that header is set as follows: // - If the request is being traced, then the ID of s is used. // - If the request is not being traced, but there was a trace context header // in the incoming request for this trace (the request passed to // SpanFromRequest), the parent span ID in that header is used. // - Otherwise, the parent span ID is zero. // The tracing bit in the options is set if tracing is enabled, or if it was // set in the incoming request. // // If s is nil, does nothing and returns nil. func (s *Span) NewRemoteChild(r *http.Request) *Span { if s == nil { return nil } if !s.tracing() { r.Header[httpHeader] = []string{spanHeader(s.trace.traceID, s.span.ParentSpanId, s.trace.globalOptions)} return s } newSpan := startNewChildWithRequest(r, s.trace, s.span.SpanId) r.Header[httpHeader] = []string{spanHeader(s.trace.traceID, newSpan.span.SpanId, s.trace.globalOptions)} return newSpan } // Header returns the value of the X-Cloud-Trace-Context header that // should be used to propagate the span. This is the inverse of // SpanFromHeader. // // Most users should use NewRemoteChild unless they have specific // propagation needs or want to control the naming of their span. // Header() does not create a new span. func (s *Span) Header() string { if s == nil { return "" } return spanHeader(s.trace.traceID, s.span.SpanId, s.trace.globalOptions) } func startNewChildWithRequest(r *http.Request, trace *trace, parentSpanID uint64) *Span { name := r.URL.Host + r.URL.Path // drop scheme and query params newSpan := startNewChild(name, trace, parentSpanID) if r.Host == "" { newSpan.host = r.URL.Host } else { newSpan.host = r.Host } newSpan.method = r.Method newSpan.url = r.URL.String() return newSpan } func startNewChild(name string, trace *trace, parentSpanID uint64) *Span { spanID := nextSpanID() for spanID == parentSpanID { spanID = nextSpanID() } newSpan := &Span{ trace: trace, span: api.TraceSpan{ Kind: spanKindClient, Name: name, ParentSpanId: parentSpanID, SpanId: spanID, }, start: time.Now(), } if trace.localOptions&optionStack != 0 { _ = runtime.Callers(1, newSpan.stack[:]) } return newSpan } // TraceID returns the ID of the trace to which s belongs. func (s *Span) TraceID() string { if s == nil { return "" } return s.trace.traceID } // SetLabel sets the label for the given key to the given value. // If the value is empty, the label for that key is deleted. // If a label is given a value automatically and by SetLabel, the // automatically-set value is used. // If s is nil, does nothing. // // SetLabel shouldn't be called after Finish or FinishWait. func (s *Span) SetLabel(key, value string) { if s == nil { return } if !s.tracing() { return } s.spanMu.Lock() defer s.spanMu.Unlock() if value == "" { if s.span.Labels != nil { delete(s.span.Labels, key) } return } if s.span.Labels == nil { s.span.Labels = make(map[string]string) } s.span.Labels[key] = value } type FinishOption interface { modifySpan(s *Span) } type withResponse struct { *http.Response } // WithResponse returns an option that can be passed to Finish that indicates // that some labels for the span should be set using the given *http.Response. func WithResponse(resp *http.Response) FinishOption { return withResponse{resp} } func (u withResponse) modifySpan(s *Span) { if u.Response != nil { s.statusCode = u.StatusCode } } // Finish declares that the span has finished. // // If s is nil, Finish does nothing and returns nil. // // If the option trace.WithResponse(resp) is passed, then some labels are set // for s using information in the given *http.Response. This is useful when the // span is for an outgoing http request; s will typically have been created by // NewRemoteChild in this case. // // If s is a root span (one created by SpanFromRequest) then s, and all its // descendant spans that have finished, are uploaded to the Google Stackdriver // Trace server asynchronously. func (s *Span) Finish(opts ...FinishOption) { if s == nil { return } if !s.tracing() { return } s.trace.finish(s, false, opts...) } // FinishWait is like Finish, but if s is a root span, it waits until uploading // is finished, then returns an error if one occurred. func (s *Span) FinishWait(opts ...FinishOption) error { if s == nil { return nil } if !s.tracing() { return nil } return s.trace.finish(s, true, opts...) } func spanHeader(traceID string, spanID uint64, options optionFlags) string { // See https://cloud.google.com/trace/docs/faq for the header format. return fmt.Sprintf("%s/%d;o=%d", traceID, spanID, options) } func (s *Span) setStackLabel() { var stack stackLabelValue lastSigPanic, inTraceLibrary := false, true for _, pc := range s.stack { if pc == 0 { break } if !lastSigPanic { pc-- } fn := runtime.FuncForPC(pc) file, line := fn.FileLine(pc) // Name has one of the following forms: // path/to/package.Foo // path/to/package.(Type).Foo // For the first form, we store the whole name in the Method field of the // stack frame. For the second form, we set the Method field to "Foo" and // the Class field to "path/to/package.(Type)". name := fn.Name() if inTraceLibrary && !strings.HasPrefix(name, "cloud.google.com/go/trace.") { inTraceLibrary = false } var class string if i := strings.Index(name, ")."); i != -1 { class, name = name[:i+1], name[i+2:] } frame := stackFrame{ Class: class, Method: name, Filename: file, Line: int64(line), } if inTraceLibrary && len(stack.Frames) == 1 { stack.Frames[0] = frame } else { stack.Frames = append(stack.Frames, frame) } lastSigPanic = fn.Name() == "runtime.sigpanic" } if label, err := json.Marshal(stack); err == nil { s.SetLabel(labelStackTrace, string(label)) } } golang-google-cloud-0.9.0/trace/trace_test.go000066400000000000000000000663531312234511600211420ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package trace import ( "encoding/json" "errors" "fmt" "io/ioutil" "math/rand" "net/http" "reflect" "regexp" "strings" "sync" "testing" "time" "cloud.google.com/go/datastore" "cloud.google.com/go/internal/testutil" "cloud.google.com/go/storage" "golang.org/x/net/context" api "google.golang.org/api/cloudtrace/v1" compute "google.golang.org/api/compute/v1" "google.golang.org/api/iterator" "google.golang.org/api/option" dspb "google.golang.org/genproto/googleapis/datastore/v1" "google.golang.org/grpc" ) const testProjectID = "testproject" type fakeRoundTripper struct { reqc chan *http.Request } func newFakeRoundTripper() *fakeRoundTripper { return &fakeRoundTripper{reqc: make(chan *http.Request)} } func (rt *fakeRoundTripper) RoundTrip(r *http.Request) (*http.Response, error) { rt.reqc <- r resp := &http.Response{ Status: "200 OK", StatusCode: 200, Body: ioutil.NopCloser(strings.NewReader("{}")), } return resp, nil } func newTestClient(rt http.RoundTripper) *Client { t, err := NewClient(context.Background(), testProjectID, option.WithHTTPClient(&http.Client{Transport: rt})) if err != nil { panic(err) } return t } type fakeDatastoreServer struct { dspb.DatastoreServer fail bool } func (f *fakeDatastoreServer) Lookup(ctx context.Context, req *dspb.LookupRequest) (*dspb.LookupResponse, error) { if f.fail { return nil, errors.New("lookup failed") } return &dspb.LookupResponse{}, nil } // makeRequests makes some requests. // span is the root span. rt is the trace client's http client's transport. // This is used to retrieve the trace uploaded by the client, if any. If // expectTrace is true, we expect a trace will be uploaded. If synchronous is // true, the call to Finish is expected not to return before the client has // uploaded any traces. func makeRequests(t *testing.T, span *Span, rt *fakeRoundTripper, synchronous bool, expectTrace bool) *http.Request { ctx := NewContext(context.Background(), span) // An HTTP request. { req2, err := http.NewRequest("GET", "http://example.com/bar", nil) if err != nil { t.Fatal(err) } resp := &http.Response{StatusCode: 200} s := span.NewRemoteChild(req2) s.Finish(WithResponse(resp)) } // An autogenerated API call. { rt := &fakeRoundTripper{reqc: make(chan *http.Request, 1)} hc := &http.Client{Transport: rt} computeClient, err := compute.New(hc) if err != nil { t.Fatal(err) } _, err = computeClient.Zones.List(testProjectID).Context(ctx).Do() if err != nil { t.Fatal(err) } } // A cloud library call that uses the autogenerated API. { rt := &fakeRoundTripper{reqc: make(chan *http.Request, 1)} hc := &http.Client{Transport: rt} storageClient, err := storage.NewClient(context.Background(), option.WithHTTPClient(hc)) if err != nil { t.Fatal(err) } var objAttrsList []*storage.ObjectAttrs it := storageClient.Bucket("testbucket").Objects(ctx, nil) for { objAttrs, err := it.Next() if err != nil && err != iterator.Done { t.Fatal(err) } if err == iterator.Done { break } objAttrsList = append(objAttrsList, objAttrs) } } // A cloud library call that uses grpc internally. for _, fail := range []bool{false, true} { srv, err := testutil.NewServer() if err != nil { t.Fatalf("creating test datastore server: %v", err) } dspb.RegisterDatastoreServer(srv.Gsrv, &fakeDatastoreServer{fail: fail}) srv.Start() conn, err := grpc.Dial(srv.Addr, grpc.WithInsecure(), grpc.WithUnaryInterceptor(GRPCClientInterceptor())) if err != nil { t.Fatalf("connecting to test datastore server: %v", err) } datastoreClient, err := datastore.NewClient(ctx, testProjectID, option.WithGRPCConn(conn)) if err != nil { t.Fatalf("creating datastore client: %v", err) } k := datastore.NameKey("Entity", "stringID", nil) e := new(datastore.Entity) datastoreClient.Get(ctx, k, e) } done := make(chan struct{}) go func() { if synchronous { err := span.FinishWait() if err != nil { t.Errorf("Unexpected error from span.FinishWait: %v", err) } } else { span.Finish() } done <- struct{}{} }() if !expectTrace { <-done select { case <-rt.reqc: t.Errorf("Got a trace, expected none.") case <-time.After(5 * time.Millisecond): } return nil } else if !synchronous { <-done return <-rt.reqc } else { select { case <-done: t.Errorf("Synchronous Finish didn't wait for trace upload.") return <-rt.reqc case <-time.After(5 * time.Millisecond): r := <-rt.reqc <-done return r } } } func TestHeader(t *testing.T) { tests := []struct { header string wantTraceID string wantSpanID uint64 wantOpts optionFlags wantOK bool }{ { header: "0123456789ABCDEF0123456789ABCDEF/1;o=1", wantTraceID: "0123456789ABCDEF0123456789ABCDEF", wantSpanID: 1, wantOpts: 1, wantOK: true, }, { header: "0123456789ABCDEF0123456789ABCDEF/1;o=0", wantTraceID: "0123456789ABCDEF0123456789ABCDEF", wantSpanID: 1, wantOpts: 0, wantOK: true, }, { header: "0123456789ABCDEF0123456789ABCDEF/1", wantTraceID: "0123456789ABCDEF0123456789ABCDEF", wantSpanID: 1, wantOpts: 0, wantOK: true, }, { header: "", wantTraceID: "", wantSpanID: 0, wantOpts: 0, wantOK: false, }, } for _, tt := range tests { traceID, parentSpanID, opts, _, ok := traceInfoFromHeader(tt.header) if got, want := traceID, tt.wantTraceID; got != want { t.Errorf("TraceID(%v) = %q; want %q", tt.header, got, want) } if got, want := parentSpanID, tt.wantSpanID; got != want { t.Errorf("SpanID(%v) = %v; want %v", tt.header, got, want) } if got, want := opts, tt.wantOpts; got != want { t.Errorf("Options(%v) = %v; want %v", tt.header, got, want) } if got, want := ok, tt.wantOK; got != want { t.Errorf("Header exists (%v) = %v; want %v", tt.header, got, want) } } } func TestOutgoingReqHeader(t *testing.T) { all, _ := NewLimitedSampler(1, 1<<16) // trace every request tests := []struct { desc string traceHeader string samplingPolicy SamplingPolicy wantHeaderRe *regexp.Regexp }{ { desc: "Parent span without sampling options, client samples all", traceHeader: "0123456789ABCDEF0123456789ABCDEF/1", samplingPolicy: all, wantHeaderRe: regexp.MustCompile("0123456789ABCDEF0123456789ABCDEF/\\d+;o=1"), }, { desc: "Parent span without sampling options, without client sampling", traceHeader: "0123456789ABCDEF0123456789ABCDEF/1", samplingPolicy: nil, wantHeaderRe: regexp.MustCompile("0123456789ABCDEF0123456789ABCDEF/\\d+;o=0"), }, { desc: "Parent span with o=1, client samples none", traceHeader: "0123456789ABCDEF0123456789ABCDEF/1;o=1", samplingPolicy: nil, wantHeaderRe: regexp.MustCompile("0123456789ABCDEF0123456789ABCDEF/\\d+;o=1"), }, { desc: "Parent span with o=0, without client sampling", traceHeader: "0123456789ABCDEF0123456789ABCDEF/1;o=0", samplingPolicy: nil, wantHeaderRe: regexp.MustCompile("0123456789ABCDEF0123456789ABCDEF/\\d+;o=0"), }, } tc := newTestClient(nil) for _, tt := range tests { tc.SetSamplingPolicy(tt.samplingPolicy) span := tc.SpanFromHeader("/foo", tt.traceHeader) req, _ := http.NewRequest("GET", "http://localhost", nil) span.NewRemoteChild(req) if got, re := req.Header.Get(httpHeader), tt.wantHeaderRe; !re.MatchString(got) { t.Errorf("%v (parent=%q): got header %q; want in format %q", tt.desc, tt.traceHeader, got, re) } } } func TestTrace(t *testing.T) { t.Parallel() testTrace(t, false, true) } func TestTraceWithWait(t *testing.T) { testTrace(t, true, true) } func TestTraceFromHeader(t *testing.T) { t.Parallel() testTrace(t, false, false) } func TestTraceFromHeaderWithWait(t *testing.T) { testTrace(t, false, true) } func TestNewSpan(t *testing.T) { const traceID = "0123456789ABCDEF0123456789ABCDEF" rt := newFakeRoundTripper() traceClient := newTestClient(rt) span := traceClient.NewSpan("/foo") span.trace.traceID = traceID uploaded := makeRequests(t, span, rt, true, true) if uploaded == nil { t.Fatalf("No trace uploaded, expected one.") } expected := api.Traces{ Traces: []*api.Trace{ { ProjectId: testProjectID, Spans: []*api.TraceSpan{ { Kind: "RPC_CLIENT", Labels: map[string]string{ "trace.cloud.google.com/http/host": "example.com", "trace.cloud.google.com/http/method": "GET", "trace.cloud.google.com/http/status_code": "200", "trace.cloud.google.com/http/url": "http://example.com/bar", }, Name: "example.com/bar", }, { Kind: "RPC_CLIENT", Labels: map[string]string{ "trace.cloud.google.com/http/host": "www.googleapis.com", "trace.cloud.google.com/http/method": "GET", "trace.cloud.google.com/http/status_code": "200", "trace.cloud.google.com/http/url": "https://www.googleapis.com/compute/v1/projects/testproject/zones", }, Name: "www.googleapis.com/compute/v1/projects/testproject/zones", }, { Kind: "RPC_CLIENT", Labels: map[string]string{ "trace.cloud.google.com/http/host": "www.googleapis.com", "trace.cloud.google.com/http/method": "GET", "trace.cloud.google.com/http/status_code": "200", "trace.cloud.google.com/http/url": "https://www.googleapis.com/storage/v1/b/testbucket/o", }, Name: "www.googleapis.com/storage/v1/b/testbucket/o", }, &api.TraceSpan{ Kind: "RPC_CLIENT", Labels: nil, Name: "/google.datastore.v1.Datastore/Lookup", }, &api.TraceSpan{ Kind: "RPC_CLIENT", Labels: map[string]string{"error": "rpc error: code = Unknown desc = lookup failed"}, Name: "/google.datastore.v1.Datastore/Lookup", }, { Kind: "SPAN_KIND_UNSPECIFIED", Labels: map[string]string{}, Name: "/foo", }, }, TraceId: traceID, }, }, } body, err := ioutil.ReadAll(uploaded.Body) if err != nil { t.Fatal(err) } var patch api.Traces err = json.Unmarshal(body, &patch) if err != nil { t.Fatal(err) } if len(patch.Traces) != len(expected.Traces) || len(patch.Traces[0].Spans) != len(expected.Traces[0].Spans) { got, _ := json.Marshal(patch) want, _ := json.Marshal(expected) t.Fatalf("PatchTraces request: got %s want %s", got, want) } n := len(patch.Traces[0].Spans) rootSpan := patch.Traces[0].Spans[n-1] for i, s := range patch.Traces[0].Spans { if a, b := s.StartTime, s.EndTime; a > b { t.Errorf("span %d start time is later than its end time (%q, %q)", i, a, b) } if a, b := rootSpan.StartTime, s.StartTime; a > b { t.Errorf("trace start time is later than span %d start time (%q, %q)", i, a, b) } if a, b := s.EndTime, rootSpan.EndTime; a > b { t.Errorf("span %d end time is later than trace end time (%q, %q)", i, a, b) } if i > 1 && i < n-1 { if a, b := patch.Traces[0].Spans[i-1].EndTime, s.StartTime; a > b { t.Errorf("span %d end time is later than span %d start time (%q, %q)", i-1, i, a, b) } } } if x := rootSpan.ParentSpanId; x != 0 { t.Errorf("Incorrect ParentSpanId: got %d want %d", x, 0) } for i, s := range patch.Traces[0].Spans { if x, y := rootSpan.SpanId, s.ParentSpanId; i < n-1 && x != y { t.Errorf("Incorrect ParentSpanId in span %d: got %d want %d", i, y, x) } } for i, s := range patch.Traces[0].Spans { s.EndTime = "" labels := &expected.Traces[0].Spans[i].Labels for key, value := range *labels { if v, ok := s.Labels[key]; !ok { t.Errorf("Span %d is missing Label %q:%q", i, key, value) } else if key == "trace.cloud.google.com/http/url" { if !strings.HasPrefix(v, value) { t.Errorf("Span %d Label %q: got value %q want prefix %q", i, key, v, value) } } else if v != value { t.Errorf("Span %d Label %q: got value %q want %q", i, key, v, value) } } for key := range s.Labels { if _, ok := (*labels)[key]; key != "trace.cloud.google.com/stacktrace" && !ok { t.Errorf("Span %d: unexpected label %q", i, key) } } *labels = nil s.Labels = nil s.ParentSpanId = 0 if s.SpanId == 0 { t.Errorf("Incorrect SpanId: got 0 want nonzero") } s.SpanId = 0 s.StartTime = "" } if !reflect.DeepEqual(patch, expected) { got, _ := json.Marshal(patch) want, _ := json.Marshal(expected) t.Errorf("PatchTraces request: got %s want %s", got, want) } } func testTrace(t *testing.T, synchronous bool, fromRequest bool) { const header = `0123456789ABCDEF0123456789ABCDEF/42;o=3` rt := newFakeRoundTripper() traceClient := newTestClient(rt) span := traceClient.SpanFromHeader("/foo", header) headerOrReqLabels := map[string]string{} headerOrReqName := "/foo" if fromRequest { req, err := http.NewRequest("GET", "http://example.com/foo", nil) if err != nil { t.Fatal(err) } req.Header.Set("X-Cloud-Trace-Context", header) span = traceClient.SpanFromRequest(req) headerOrReqLabels = map[string]string{ "trace.cloud.google.com/http/host": "example.com", "trace.cloud.google.com/http/method": "GET", "trace.cloud.google.com/http/url": "http://example.com/foo", } headerOrReqName = "example.com/foo" } uploaded := makeRequests(t, span, rt, synchronous, true) if uploaded == nil { t.Fatalf("No trace uploaded, expected one.") } expected := api.Traces{ Traces: []*api.Trace{ { ProjectId: testProjectID, Spans: []*api.TraceSpan{ { Kind: "RPC_CLIENT", Labels: map[string]string{ "trace.cloud.google.com/http/host": "example.com", "trace.cloud.google.com/http/method": "GET", "trace.cloud.google.com/http/status_code": "200", "trace.cloud.google.com/http/url": "http://example.com/bar", }, Name: "example.com/bar", }, { Kind: "RPC_CLIENT", Labels: map[string]string{ "trace.cloud.google.com/http/host": "www.googleapis.com", "trace.cloud.google.com/http/method": "GET", "trace.cloud.google.com/http/status_code": "200", "trace.cloud.google.com/http/url": "https://www.googleapis.com/compute/v1/projects/testproject/zones", }, Name: "www.googleapis.com/compute/v1/projects/testproject/zones", }, { Kind: "RPC_CLIENT", Labels: map[string]string{ "trace.cloud.google.com/http/host": "www.googleapis.com", "trace.cloud.google.com/http/method": "GET", "trace.cloud.google.com/http/status_code": "200", "trace.cloud.google.com/http/url": "https://www.googleapis.com/storage/v1/b/testbucket/o", }, Name: "www.googleapis.com/storage/v1/b/testbucket/o", }, &api.TraceSpan{ Kind: "RPC_CLIENT", Labels: nil, Name: "/google.datastore.v1.Datastore/Lookup", }, &api.TraceSpan{ Kind: "RPC_CLIENT", Labels: map[string]string{"error": "rpc error: code = Unknown desc = lookup failed"}, Name: "/google.datastore.v1.Datastore/Lookup", }, { Kind: "RPC_SERVER", Labels: headerOrReqLabels, Name: headerOrReqName, }, }, TraceId: "0123456789ABCDEF0123456789ABCDEF", }, }, } body, err := ioutil.ReadAll(uploaded.Body) if err != nil { t.Fatal(err) } var patch api.Traces err = json.Unmarshal(body, &patch) if err != nil { t.Fatal(err) } if len(patch.Traces) != len(expected.Traces) || len(patch.Traces[0].Spans) != len(expected.Traces[0].Spans) { got, _ := json.Marshal(patch) want, _ := json.Marshal(expected) t.Fatalf("PatchTraces request: got %s want %s", got, want) } n := len(patch.Traces[0].Spans) rootSpan := patch.Traces[0].Spans[n-1] for i, s := range patch.Traces[0].Spans { if a, b := s.StartTime, s.EndTime; a > b { t.Errorf("span %d start time is later than its end time (%q, %q)", i, a, b) } if a, b := rootSpan.StartTime, s.StartTime; a > b { t.Errorf("trace start time is later than span %d start time (%q, %q)", i, a, b) } if a, b := s.EndTime, rootSpan.EndTime; a > b { t.Errorf("span %d end time is later than trace end time (%q, %q)", i, a, b) } if i > 1 && i < n-1 { if a, b := patch.Traces[0].Spans[i-1].EndTime, s.StartTime; a > b { t.Errorf("span %d end time is later than span %d start time (%q, %q)", i-1, i, a, b) } } } if x := rootSpan.ParentSpanId; x != 42 { t.Errorf("Incorrect ParentSpanId: got %d want %d", x, 42) } for i, s := range patch.Traces[0].Spans { if x, y := rootSpan.SpanId, s.ParentSpanId; i < n-1 && x != y { t.Errorf("Incorrect ParentSpanId in span %d: got %d want %d", i, y, x) } } for i, s := range patch.Traces[0].Spans { s.EndTime = "" labels := &expected.Traces[0].Spans[i].Labels for key, value := range *labels { if v, ok := s.Labels[key]; !ok { t.Errorf("Span %d is missing Label %q:%q", i, key, value) } else if key == "trace.cloud.google.com/http/url" { if !strings.HasPrefix(v, value) { t.Errorf("Span %d Label %q: got value %q want prefix %q", i, key, v, value) } } else if v != value { t.Errorf("Span %d Label %q: got value %q want %q", i, key, v, value) } } for key := range s.Labels { if _, ok := (*labels)[key]; key != "trace.cloud.google.com/stacktrace" && !ok { t.Errorf("Span %d: unexpected label %q", i, key) } } *labels = nil s.Labels = nil s.ParentSpanId = 0 if s.SpanId == 0 { t.Errorf("Incorrect SpanId: got 0 want nonzero") } s.SpanId = 0 s.StartTime = "" } if !reflect.DeepEqual(patch, expected) { got, _ := json.Marshal(patch) want, _ := json.Marshal(expected) t.Errorf("PatchTraces request: got %s \n\n want %s", got, want) } } func TestNoTrace(t *testing.T) { testNoTrace(t, false, true) } func TestNoTraceWithWait(t *testing.T) { testNoTrace(t, true, true) } func TestNoTraceFromHeader(t *testing.T) { testNoTrace(t, false, false) } func TestNoTraceFromHeaderWithWait(t *testing.T) { testNoTrace(t, true, false) } func testNoTrace(t *testing.T, synchronous bool, fromRequest bool) { for _, header := range []string{ `0123456789ABCDEF0123456789ABCDEF/42;o=2`, `0123456789ABCDEF0123456789ABCDEF/42;o=0`, `0123456789ABCDEF0123456789ABCDEF/42`, `0123456789ABCDEF0123456789ABCDEF`, ``, } { rt := newFakeRoundTripper() traceClient := newTestClient(rt) var span *Span if fromRequest { req, err := http.NewRequest("GET", "http://example.com/foo", nil) if header != "" { req.Header.Set("X-Cloud-Trace-Context", header) } if err != nil { t.Fatal(err) } span = traceClient.SpanFromRequest(req) } else { span = traceClient.SpanFromHeader("/foo", header) } uploaded := makeRequests(t, span, rt, synchronous, false) if uploaded != nil { t.Errorf("Got a trace, expected none.") } } } func TestSample(t *testing.T) { // A deterministic test of the sampler logic. type testCase struct { rate float64 maxqps float64 want int } const delta = 25 * time.Millisecond for _, test := range []testCase{ // qps won't matter, so we will sample half of the 79 calls {0.50, 100, 40}, // with 1 qps and a burst of 2, we will sample twice in second #1, once in the partial second #2 {0.50, 1, 3}, } { sp, err := NewLimitedSampler(test.rate, test.maxqps) if err != nil { t.Fatal(err) } s := sp.(*sampler) sampled := 0 tm := time.Now() for i := 0; i < 80; i++ { if s.sample(Parameters{}, tm, float64(i%2)).Sample { sampled++ } tm = tm.Add(delta) } if sampled != test.want { t.Errorf("rate=%f, maxqps=%f: got %d samples, want %d", test.rate, test.maxqps, sampled, test.want) } } } func TestSampling(t *testing.T) { t.Parallel() // This scope tests sampling in a larger context, with real time and randomness. wg := sync.WaitGroup{} type testCase struct { rate float64 maxqps float64 expectedRange [2]int } for _, test := range []testCase{ {0, 5, [2]int{0, 0}}, {5, 0, [2]int{0, 0}}, {0.50, 100, [2]int{20, 60}}, {0.50, 1, [2]int{3, 4}}, // Windows, with its less precise clock, sometimes gives 4. } { wg.Add(1) go func(test testCase) { rt := newFakeRoundTripper() traceClient := newTestClient(rt) traceClient.bundler.BundleByteLimit = 1 p, err := NewLimitedSampler(test.rate, test.maxqps) if err != nil { t.Fatalf("NewLimitedSampler: %v", err) } traceClient.SetSamplingPolicy(p) ticker := time.NewTicker(25 * time.Millisecond) sampled := 0 for i := 0; i < 79; i++ { req, err := http.NewRequest("GET", "http://example.com/foo", nil) if err != nil { t.Fatal(err) } span := traceClient.SpanFromRequest(req) span.Finish() select { case <-rt.reqc: <-ticker.C sampled++ case <-ticker.C: } } ticker.Stop() if test.expectedRange[0] > sampled || sampled > test.expectedRange[1] { t.Errorf("rate=%f, maxqps=%f: got %d samples want ∈ %v", test.rate, test.maxqps, sampled, test.expectedRange) } wg.Done() }(test) } wg.Wait() } func TestBundling(t *testing.T) { t.Parallel() rt := newFakeRoundTripper() traceClient := newTestClient(rt) traceClient.bundler.DelayThreshold = time.Second / 2 traceClient.bundler.BundleCountThreshold = 10 p, err := NewLimitedSampler(1, 99) // sample every request. if err != nil { t.Fatalf("NewLimitedSampler: %v", err) } traceClient.SetSamplingPolicy(p) for i := 0; i < 35; i++ { go func() { req, err := http.NewRequest("GET", "http://example.com/foo", nil) if err != nil { t.Fatal(err) } span := traceClient.SpanFromRequest(req) span.Finish() }() } // Read the first three bundles. <-rt.reqc <-rt.reqc <-rt.reqc // Test that the fourth bundle isn't sent early. select { case <-rt.reqc: t.Errorf("bundle sent too early") case <-time.After(time.Second / 4): <-rt.reqc } // Test that there aren't extra bundles. select { case <-rt.reqc: t.Errorf("too many bundles sent") case <-time.After(time.Second): } } func TestWeights(t *testing.T) { const ( expectedNumTraced = 10100 numTracedEpsilon = 100 expectedTotalWeight = 50000 totalWeightEpsilon = 5000 ) rng := rand.New(rand.NewSource(1)) const delta = 2 * time.Millisecond for _, headerRate := range []float64{0.0, 0.5, 1.0} { // Simulate 10 seconds of requests arriving at 500qps. // // The sampling policy tries to sample 25% of them, but has a qps limit of // 100, so it will not be able to. The returned weight should be higher // for some sampled requests to compensate. // // headerRate is the fraction of incoming requests that have a trace header // set. The qps limit should not be exceeded, even if headerRate is high. sp, err := NewLimitedSampler(0.25, 100) if err != nil { t.Fatal(err) } s := sp.(*sampler) tm := time.Now() totalWeight := 0.0 numTraced := 0 seenLargeWeight := false for i := 0; i < 50000; i++ { d := s.sample(Parameters{HasTraceHeader: rng.Float64() < headerRate}, tm, rng.Float64()) if d.Trace { numTraced++ } if d.Sample { totalWeight += d.Weight if x := int(d.Weight) / 4; x <= 0 || x >= 100 || d.Weight != float64(x)*4.0 { t.Errorf("weight: got %f, want a small positive multiple of 4", d.Weight) } if d.Weight > 4 { seenLargeWeight = true } } tm = tm.Add(delta) } if !seenLargeWeight { t.Errorf("headerRate %f: never saw sample weight higher than 4.", headerRate) } if numTraced < expectedNumTraced-numTracedEpsilon || expectedNumTraced+numTracedEpsilon < numTraced { t.Errorf("headerRate %f: got %d traced requests, want ∈ [%d, %d]", headerRate, numTraced, expectedNumTraced-numTracedEpsilon, expectedNumTraced+numTracedEpsilon) } if totalWeight < expectedTotalWeight-totalWeightEpsilon || expectedTotalWeight+totalWeightEpsilon < totalWeight { t.Errorf("headerRate %f: got total weight %f want ∈ [%d, %d]", headerRate, totalWeight, expectedTotalWeight-totalWeightEpsilon, expectedTotalWeight+totalWeightEpsilon) } } } type alwaysTrace struct{} func (a alwaysTrace) Sample(p Parameters) Decision { return Decision{Trace: true} } type neverTrace struct{} func (a neverTrace) Sample(p Parameters) Decision { return Decision{Trace: false} } func TestPropagation(t *testing.T) { rt := newFakeRoundTripper() traceClient := newTestClient(rt) for _, header := range []string{ `0123456789ABCDEF0123456789ABCDEF/42;o=0`, `0123456789ABCDEF0123456789ABCDEF/42;o=1`, `0123456789ABCDEF0123456789ABCDEF/42;o=2`, `0123456789ABCDEF0123456789ABCDEF/42;o=3`, `0123456789ABCDEF0123456789ABCDEF/0;o=0`, `0123456789ABCDEF0123456789ABCDEF/0;o=1`, `0123456789ABCDEF0123456789ABCDEF/0;o=2`, `0123456789ABCDEF0123456789ABCDEF/0;o=3`, ``, } { for _, policy := range []SamplingPolicy{ nil, alwaysTrace{}, neverTrace{}, } { traceClient.SetSamplingPolicy(policy) req, err := http.NewRequest("GET", "http://example.com/foo", nil) if err != nil { t.Fatal(err) } if header != "" { req.Header.Set("X-Cloud-Trace-Context", header) } span := traceClient.SpanFromRequest(req) req2, err := http.NewRequest("GET", "http://example.com/bar", nil) if err != nil { t.Fatal(err) } req3, err := http.NewRequest("GET", "http://example.com/baz", nil) if err != nil { t.Fatal(err) } span.NewRemoteChild(req2) span.NewRemoteChild(req3) var ( t1, t2, t3 string s1, s2, s3 uint64 o1, o2, o3 uint64 ) fmt.Sscanf(header, "%32s/%d;o=%d", &t1, &s1, &o1) fmt.Sscanf(req2.Header.Get("X-Cloud-Trace-Context"), "%32s/%d;o=%d", &t2, &s2, &o2) fmt.Sscanf(req3.Header.Get("X-Cloud-Trace-Context"), "%32s/%d;o=%d", &t3, &s3, &o3) if header == "" { if t2 != t3 { t.Errorf("expected the same trace ID in child requests, got %q %q", t2, t3) } } else { if t2 != t1 || t3 != t1 { t.Errorf("trace IDs should be passed to child requests") } } trace := policy == alwaysTrace{} || policy == nil && (o1&1) != 0 if header == "" { if trace && (s2 == 0 || s3 == 0) { t.Errorf("got span IDs %d %d in child requests, want nonzero", s2, s3) } if trace && s2 == s3 { t.Errorf("got span IDs %d %d in child requests, should be different", s2, s3) } if !trace && (s2 != 0 || s3 != 0) { t.Errorf("got span IDs %d %d in child requests, want zero", s2, s3) } } else { if trace && (s2 == s1 || s3 == s1 || s2 == s3) { t.Errorf("parent span IDs in input and outputs should be all different, got %d %d %d", s1, s2, s3) } if !trace && (s2 != s1 || s3 != s1) { t.Errorf("parent span ID in input, %d, should have been equal to parent span IDs in output: %d %d", s1, s2, s3) } } expectTraceOption := policy == alwaysTrace{} || (o1&1) != 0 if expectTraceOption != ((o2&1) != 0) || expectTraceOption != ((o3&1) != 0) { t.Errorf("tracing flag in child requests should be %t, got options %d %d", expectTraceOption, o2, o3) } } } } golang-google-cloud-0.9.0/translate/000077500000000000000000000000001312234511600173405ustar00rootroot00000000000000golang-google-cloud-0.9.0/translate/examples_test.go000066400000000000000000000037061312234511600225520ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package translate_test import ( "fmt" "cloud.google.com/go/translate" "golang.org/x/net/context" "golang.org/x/text/language" ) func Example_NewClient() { ctx := context.Background() client, err := translate.NewClient(ctx) if err != nil { // TODO: handle error. } // Use the client. // Close the client when finished. if err := client.Close(); err != nil { // TODO: handle error. } } func Example_Translate() { ctx := context.Background() client, err := translate.NewClient(ctx) if err != nil { // TODO: handle error. } translations, err := client.Translate(ctx, []string{"Le singe est sur la branche"}, language.English, &translate.Options{ Source: language.French, Format: translate.Text, }) if err != nil { // TODO: handle error. } fmt.Println(translations[0].Text) } func Example_DetectLanguage() { ctx := context.Background() client, err := translate.NewClient(ctx) if err != nil { // TODO: handle error. } ds, err := client.DetectLanguage(ctx, []string{"Today is Monday"}) if err != nil { // TODO: handle error. } fmt.Println(ds) } func Example_SupportedLanguages() { ctx := context.Background() client, err := translate.NewClient(ctx) if err != nil { // TODO: handle error. } langs, err := client.SupportedLanguages(ctx, language.English) if err != nil { // TODO: handle error. } fmt.Println(langs) } golang-google-cloud-0.9.0/translate/internal/000077500000000000000000000000001312234511600211545ustar00rootroot00000000000000golang-google-cloud-0.9.0/translate/internal/translate/000077500000000000000000000000001312234511600231515ustar00rootroot00000000000000golang-google-cloud-0.9.0/translate/internal/translate/v2/000077500000000000000000000000001312234511600235005ustar00rootroot00000000000000golang-google-cloud-0.9.0/translate/internal/translate/v2/README000066400000000000000000000003151312234511600243570ustar00rootroot00000000000000translate-nov2016-api.json is a hand-modified version of translate-api.json. It correctly reflects the API as of 2016-11-15. Differences: - Change to base URL - Addition of OAuth scopes To generate: golang-google-cloud-0.9.0/translate/internal/translate/v2/regen.sh000077500000000000000000000016571312234511600251500ustar00rootroot00000000000000#!/bin/bash -e (cd $GOPATH/src/google.golang.org/api; make generator) $GOPATH/bin/google-api-go-generator \ -api_json_file translate-nov2016-api.json \ -api_pkg_base cloud.google.com/go/translate/internal \ -output translate-nov2016-gen.nolicense cat - translate-nov2016-gen.nolicense > translate-nov2016-gen.go <" + s + "" } tr = translate(htmlify(test.input), test.target, nil) if got, want := tr.Text, htmlify(test.output); got != want { t.Errorf("html: got %q, want %q", got, want) } // Using the HTML format behaves the same. tr = translate(htmlify(test.input), test.target, &Options{Format: HTML}) if got, want := tr.Text, htmlify(test.output); got != want { t.Errorf("html: got %q, want %q", got, want) } } } // This tests the beta "nmt" model. func TestTranslateModel(t *testing.T) { ctx := context.Background() c := initTest(ctx, t) defer c.Close() trs, err := c.Translate(ctx, []string{"Hello"}, language.French, &Options{Model: "nmt"}) if err != nil { t.Fatal(err) } if len(trs) != 1 { t.Fatalf("wanted one Translation, got %d", len(trs)) } tr := trs[0] if got, want := tr.Text, "Bonjour"; got != want { t.Errorf("text: got %q, want %q", got, want) } if got, want := tr.Model, "nmt"; got != want { t.Errorf("model: got %q, want %q", got, want) } } func TestTranslateMultipleInputs(t *testing.T) { ctx := context.Background() c := initTest(ctx, t) defer c.Close() inputs := []string{ "When you're a Jet, you're a Jet all the way", "From your first cigarette to your last dying day", "When you're a Jet if the spit hits the fan", "You got brothers around, you're a family man", } ts, err := c.Translate(ctx, inputs, language.French, nil) if err != nil { t.Fatal(err) } if got, want := len(ts), len(inputs); got != want { t.Fatalf("got %d Translations, wanted %d", got, want) } } func TestTranslateErrors(t *testing.T) { ctx := context.Background() c := initTest(ctx, t) defer c.Close() for _, test := range []struct { ctx context.Context target language.Tag inputs []string opts *Options }{ {ctx, language.English, nil, nil}, {ctx, language.Und, []string{"input"}, nil}, {ctx, language.English, []string{}, nil}, {ctx, language.English, []string{"input"}, &Options{Format: "random"}}, } { _, err := c.Translate(test.ctx, test.inputs, test.target, test.opts) if err == nil { t.Errorf("%+v: got nil, want error", test) } } } func TestDetectLanguage(t *testing.T) { ctx := context.Background() c := initTest(ctx, t) defer c.Close() ds, err := c.DetectLanguage(ctx, []string{ "Today is Monday", "Aujourd'hui est lundi", }) if err != nil { t.Fatal(err) } if len(ds) != 2 { t.Fatalf("got %d detection lists, want 2", len(ds)) } checkDetections(t, ds[0], language.English) checkDetections(t, ds[1], language.French) } func checkDetections(t *testing.T, ds []Detection, want language.Tag) { for _, d := range ds { if d.Language == want { return } } t.Errorf("%v: missing %s", ds, want) } // A small subset of the supported languages. var supportedLangs = []Language{ {Name: "Danish", Tag: language.Danish}, {Name: "English", Tag: language.English}, {Name: "French", Tag: language.French}, {Name: "German", Tag: language.German}, {Name: "Greek", Tag: language.Greek}, {Name: "Hindi", Tag: language.Hindi}, {Name: "Hungarian", Tag: language.Hungarian}, {Name: "Italian", Tag: language.Italian}, {Name: "Russian", Tag: language.Russian}, {Name: "Turkish", Tag: language.Turkish}, } func TestSupportedLanguages(t *testing.T) { ctx := context.Background() c := initTest(ctx, t) defer c.Close() got, err := c.SupportedLanguages(ctx, language.English) if err != nil { t.Fatal(err) } want := map[language.Tag]Language{} for _, sl := range supportedLangs { want[sl.Tag] = sl } for _, g := range got { w, ok := want[g.Tag] if !ok { continue } if g != w { t.Errorf("got %+v, want %+v", g, w) } delete(want, g.Tag) } if len(want) > 0 { t.Errorf("missing: %+v", want) } } golang-google-cloud-0.9.0/videointelligence/000077500000000000000000000000001312234511600210345ustar00rootroot00000000000000golang-google-cloud-0.9.0/videointelligence/apiv1beta1/000077500000000000000000000000001312234511600227715ustar00rootroot00000000000000golang-google-cloud-0.9.0/videointelligence/apiv1beta1/doc.go000066400000000000000000000026701312234511600240720ustar00rootroot00000000000000// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // AUTO-GENERATED CODE. DO NOT EDIT. // Package videointelligence is an experimental, auto-generated package for the // Google Cloud Video Intelligence API. // // Google Cloud Video Intelligence API. // // Use the client at cloud.google.com/go/videointelligence in preference to this. package videointelligence // import "cloud.google.com/go/videointelligence/apiv1beta1" import ( "golang.org/x/net/context" "google.golang.org/grpc/metadata" ) func insertXGoog(ctx context.Context, val []string) context.Context { md, _ := metadata.FromOutgoingContext(ctx) md = md.Copy() md["x-goog-api-client"] = val return metadata.NewOutgoingContext(ctx, md) } // DefaultAuthScopes reports the authentication scopes required // by this package. func DefaultAuthScopes() []string { return []string{ "https://www.googleapis.com/auth/cloud-platform", } } golang-google-cloud-0.9.0/videointelligence/apiv1beta1/mock_test.go000066400000000000000000000117771312234511600253250ustar00rootroot00000000000000// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // AUTO-GENERATED CODE. DO NOT EDIT. package videointelligence import ( videointelligencepb "google.golang.org/genproto/googleapis/cloud/videointelligence/v1beta1" longrunningpb "google.golang.org/genproto/googleapis/longrunning" ) import ( "flag" "fmt" "io" "log" "net" "os" "strings" "testing" "github.com/golang/protobuf/proto" "github.com/golang/protobuf/ptypes" "golang.org/x/net/context" "google.golang.org/api/option" status "google.golang.org/genproto/googleapis/rpc/status" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/metadata" gstatus "google.golang.org/grpc/status" ) var _ = io.EOF var _ = ptypes.MarshalAny var _ status.Status type mockVideoIntelligenceServer struct { // Embed for forward compatibility. // Tests will keep working if more methods are added // in the future. videointelligencepb.VideoIntelligenceServiceServer reqs []proto.Message // If set, all calls return this error. err error // responses to return if err == nil resps []proto.Message } func (s *mockVideoIntelligenceServer) AnnotateVideo(ctx context.Context, req *videointelligencepb.AnnotateVideoRequest) (*longrunningpb.Operation, error) { md, _ := metadata.FromIncomingContext(ctx) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } s.reqs = append(s.reqs, req) if s.err != nil { return nil, s.err } return s.resps[0].(*longrunningpb.Operation), nil } // clientOpt is the option tests should use to connect to the test server. // It is initialized by TestMain. var clientOpt option.ClientOption var ( mockVideoIntelligence mockVideoIntelligenceServer ) func TestMain(m *testing.M) { flag.Parse() serv := grpc.NewServer() videointelligencepb.RegisterVideoIntelligenceServiceServer(serv, &mockVideoIntelligence) lis, err := net.Listen("tcp", "localhost:0") if err != nil { log.Fatal(err) } go serv.Serve(lis) conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) if err != nil { log.Fatal(err) } clientOpt = option.WithGRPCConn(conn) os.Exit(m.Run()) } func TestVideoIntelligenceServiceAnnotateVideo(t *testing.T) { var expectedResponse *videointelligencepb.AnnotateVideoResponse = &videointelligencepb.AnnotateVideoResponse{} mockVideoIntelligence.err = nil mockVideoIntelligence.reqs = nil any, err := ptypes.MarshalAny(expectedResponse) if err != nil { t.Fatal(err) } mockVideoIntelligence.resps = append(mockVideoIntelligence.resps[:0], &longrunningpb.Operation{ Name: "longrunning-test", Done: true, Result: &longrunningpb.Operation_Response{Response: any}, }) var inputUri string = "inputUri1707300727" var features []videointelligencepb.Feature = nil var request = &videointelligencepb.AnnotateVideoRequest{ InputUri: inputUri, Features: features, } c, err := NewClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } respLRO, err := c.AnnotateVideo(context.Background(), request) if err != nil { t.Fatal(err) } resp, err := respLRO.Wait(context.Background()) if err != nil { t.Fatal(err) } if want, got := request, mockVideoIntelligence.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } if want, got := expectedResponse, resp; !proto.Equal(want, got) { t.Errorf("wrong response %q, want %q)", got, want) } } func TestVideoIntelligenceServiceAnnotateVideoError(t *testing.T) { errCode := codes.PermissionDenied mockVideoIntelligence.err = nil mockVideoIntelligence.resps = append(mockVideoIntelligence.resps[:0], &longrunningpb.Operation{ Name: "longrunning-test", Done: true, Result: &longrunningpb.Operation_Error{ Error: &status.Status{ Code: int32(errCode), Message: "test error", }, }, }) var inputUri string = "inputUri1707300727" var features []videointelligencepb.Feature = nil var request = &videointelligencepb.AnnotateVideoRequest{ InputUri: inputUri, Features: features, } c, err := NewClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } respLRO, err := c.AnnotateVideo(context.Background(), request) if err != nil { t.Fatal(err) } resp, err := respLRO.Wait(context.Background()) if st, ok := gstatus.FromError(err); !ok { t.Errorf("got error %v, expected grpc error", err) } else if c := st.Code(); c != errCode { t.Errorf("got error code %q, want %q", c, errCode) } _ = resp } golang-google-cloud-0.9.0/videointelligence/apiv1beta1/video_intelligence_client.go000066400000000000000000000200071312234511600305050ustar00rootroot00000000000000// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // AUTO-GENERATED CODE. DO NOT EDIT. package videointelligence import ( "time" "cloud.google.com/go/internal/version" "cloud.google.com/go/longrunning" lroauto "cloud.google.com/go/longrunning/autogen" gax "github.com/googleapis/gax-go" "golang.org/x/net/context" "google.golang.org/api/option" "google.golang.org/api/transport" videointelligencepb "google.golang.org/genproto/googleapis/cloud/videointelligence/v1beta1" longrunningpb "google.golang.org/genproto/googleapis/longrunning" "google.golang.org/grpc" "google.golang.org/grpc/codes" ) // CallOptions contains the retry settings for each method of Client. type CallOptions struct { AnnotateVideo []gax.CallOption } func defaultClientOptions() []option.ClientOption { return []option.ClientOption{ option.WithEndpoint("videointelligence.googleapis.com:443"), option.WithScopes(DefaultAuthScopes()...), } } func defaultCallOptions() *CallOptions { retry := map[[2]string][]gax.CallOption{ {"default", "idempotent"}: { gax.WithRetry(func() gax.Retryer { return gax.OnCodes([]codes.Code{ codes.DeadlineExceeded, codes.Unavailable, }, gax.Backoff{ Initial: 1000 * time.Millisecond, Max: 120000 * time.Millisecond, Multiplier: 2.5, }) }), }, } return &CallOptions{ AnnotateVideo: retry[[2]string{"default", "idempotent"}], } } // Client is a client for interacting with Google Cloud Video Intelligence API. type Client struct { // The connection to the service. conn *grpc.ClientConn // The gRPC API client. client videointelligencepb.VideoIntelligenceServiceClient // LROClient is used internally to handle longrunning operations. // It is exposed so that its CallOptions can be modified if required. // Users should not Close this client. LROClient *lroauto.OperationsClient // The call options for this service. CallOptions *CallOptions // The metadata to be sent with each request. xGoogHeader []string } // NewClient creates a new video intelligence service client. // // Service that implements Google Cloud Video Intelligence API. func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) { conn, err := transport.DialGRPC(ctx, append(defaultClientOptions(), opts...)...) if err != nil { return nil, err } c := &Client{ conn: conn, CallOptions: defaultCallOptions(), client: videointelligencepb.NewVideoIntelligenceServiceClient(conn), } c.SetGoogleClientInfo() c.LROClient, err = lroauto.NewOperationsClient(ctx, option.WithGRPCConn(conn)) if err != nil { // This error "should not happen", since we are just reusing old connection // and never actually need to dial. // If this does happen, we could leak conn. However, we cannot close conn: // If the user invoked the function with option.WithGRPCConn, // we would close a connection that's still in use. // TODO(pongad): investigate error conditions. return nil, err } return c, nil } // Connection returns the client's connection to the API service. func (c *Client) Connection() *grpc.ClientConn { return c.conn } // Close closes the connection to the API service. The user should invoke this when // the client is no longer required. func (c *Client) Close() error { return c.conn.Close() } // SetGoogleClientInfo sets the name and version of the application in // the `x-goog-api-client` header passed on each request. Intended for // use by Google-written clients. func (c *Client) SetGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", version.Go()}, keyval...) kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) c.xGoogHeader = []string{gax.XGoogHeader(kv...)} } // AnnotateVideo performs asynchronous video annotation. Progress and results can be // retrieved through the `google.longrunning.Operations` interface. // `Operation.metadata` contains `AnnotateVideoProgress` (progress). // `Operation.response` contains `AnnotateVideoResponse` (results). func (c *Client) AnnotateVideo(ctx context.Context, req *videointelligencepb.AnnotateVideoRequest, opts ...gax.CallOption) (*AnnotateVideoOperation, error) { ctx = insertXGoog(ctx, c.xGoogHeader) opts = append(c.CallOptions.AnnotateVideo[0:len(c.CallOptions.AnnotateVideo):len(c.CallOptions.AnnotateVideo)], opts...) var resp *longrunningpb.Operation err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error resp, err = c.client.AnnotateVideo(ctx, req, settings.GRPC...) return err }, opts...) if err != nil { return nil, err } return &AnnotateVideoOperation{ lro: longrunning.InternalNewOperation(c.LROClient, resp), }, nil } // AnnotateVideoOperation manages a long-running operation from AnnotateVideo. type AnnotateVideoOperation struct { lro *longrunning.Operation } // AnnotateVideoOperation returns a new AnnotateVideoOperation from a given name. // The name must be that of a previously created AnnotateVideoOperation, possibly from a different process. func (c *Client) AnnotateVideoOperation(name string) *AnnotateVideoOperation { return &AnnotateVideoOperation{ lro: longrunning.InternalNewOperation(c.LROClient, &longrunningpb.Operation{Name: name}), } } // Wait blocks until the long-running operation is completed, returning the response and any errors encountered. // // See documentation of Poll for error-handling information. func (op *AnnotateVideoOperation) Wait(ctx context.Context, opts ...gax.CallOption) (*videointelligencepb.AnnotateVideoResponse, error) { var resp videointelligencepb.AnnotateVideoResponse if err := op.lro.Wait(ctx, &resp, opts...); err != nil { return nil, err } return &resp, nil } // Poll fetches the latest state of the long-running operation. // // Poll also fetches the latest metadata, which can be retrieved by Metadata. // // If Poll fails, the error is returned and op is unmodified. If Poll succeeds and // the operation has completed with failure, the error is returned and op.Done will return true. // If Poll succeeds and the operation has completed successfully, // op.Done will return true, and the response of the operation is returned. // If Poll succeeds and the operation has not completed, the returned response and error are both nil. func (op *AnnotateVideoOperation) Poll(ctx context.Context, opts ...gax.CallOption) (*videointelligencepb.AnnotateVideoResponse, error) { var resp videointelligencepb.AnnotateVideoResponse if err := op.lro.Poll(ctx, &resp, opts...); err != nil { return nil, err } if !op.Done() { return nil, nil } return &resp, nil } // Metadata returns metadata associated with the long-running operation. // Metadata itself does not contact the server, but Poll does. // To get the latest metadata, call this method after a successful call to Poll. // If the metadata is not available, the returned metadata and error are both nil. func (op *AnnotateVideoOperation) Metadata() (*videointelligencepb.AnnotateVideoProgress, error) { var meta videointelligencepb.AnnotateVideoProgress if err := op.lro.Metadata(&meta); err == longrunning.ErrNoMetadata { return nil, nil } else if err != nil { return nil, err } return &meta, nil } // Done reports whether the long-running operation has completed. func (op *AnnotateVideoOperation) Done() bool { return op.lro.Done() } // Name returns the name of the long-running operation. // The name is assigned by the server and is unique within the service from which the operation is created. func (op *AnnotateVideoOperation) Name() string { return op.lro.Name() } golang-google-cloud-0.9.0/videointelligence/apiv1beta1/video_intelligence_client_example_test.go000066400000000000000000000027061312234511600332650ustar00rootroot00000000000000// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // AUTO-GENERATED CODE. DO NOT EDIT. package videointelligence_test import ( "cloud.google.com/go/videointelligence/apiv1beta1" "golang.org/x/net/context" videointelligencepb "google.golang.org/genproto/googleapis/cloud/videointelligence/v1beta1" ) func ExampleNewClient() { ctx := context.Background() c, err := videointelligence.NewClient(ctx) if err != nil { // TODO: Handle error. } // TODO: Use client. _ = c } func ExampleClient_AnnotateVideo() { ctx := context.Background() c, err := videointelligence.NewClient(ctx) if err != nil { // TODO: Handle error. } req := &videointelligencepb.AnnotateVideoRequest{ // TODO: Fill request struct fields. } op, err := c.AnnotateVideo(ctx, req) if err != nil { // TODO: Handle error. } resp, err := op.Wait(ctx) if err != nil { // TODO: Handle error. } // TODO: Use resp. _ = resp } golang-google-cloud-0.9.0/vision/000077500000000000000000000000001312234511600166525ustar00rootroot00000000000000golang-google-cloud-0.9.0/vision/annotations.go000066400000000000000000000540701312234511600215440ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package vision import ( "image" "golang.org/x/text/language" pb "google.golang.org/genproto/googleapis/cloud/vision/v1" "google.golang.org/grpc" "google.golang.org/grpc/codes" ) // Annotations contains all the annotations performed by the API on a single image. // A nil field indicates either that the corresponding feature was not requested, // or that annotation failed for that feature. type Annotations struct { // Faces holds the results of face detection. Faces []*FaceAnnotation // Landmarks holds the results of landmark detection. Landmarks []*EntityAnnotation // Logos holds the results of logo detection. Logos []*EntityAnnotation // Labels holds the results of label detection. Labels []*EntityAnnotation // Texts holds the results of text detection. Texts []*EntityAnnotation // FullText holds the results of full text (OCR) detection. FullText *TextAnnotation // SafeSearch holds the results of safe-search detection. SafeSearch *SafeSearchAnnotation // ImageProps contains properties of the annotated image. ImageProps *ImageProps // Web contains web annotations for the image. Web *WebDetection // CropHints contains crop hints for the image. CropHints []*CropHint // If non-nil, then one or more of the attempted annotations failed. // Non-nil annotations are guaranteed to be correct, even if Error is // non-nil. Error error } func annotationsFromProto(res *pb.AnnotateImageResponse) *Annotations { as := &Annotations{} for _, a := range res.FaceAnnotations { as.Faces = append(as.Faces, faceAnnotationFromProto(a)) } for _, a := range res.LandmarkAnnotations { as.Landmarks = append(as.Landmarks, entityAnnotationFromProto(a)) } for _, a := range res.LogoAnnotations { as.Logos = append(as.Logos, entityAnnotationFromProto(a)) } for _, a := range res.LabelAnnotations { as.Labels = append(as.Labels, entityAnnotationFromProto(a)) } for _, a := range res.TextAnnotations { as.Texts = append(as.Texts, entityAnnotationFromProto(a)) } as.FullText = textAnnotationFromProto(res.FullTextAnnotation) as.SafeSearch = safeSearchAnnotationFromProto(res.SafeSearchAnnotation) as.ImageProps = imagePropertiesFromProto(res.ImagePropertiesAnnotation) as.Web = webDetectionFromProto(res.WebDetection) as.CropHints = cropHintsFromProto(res.CropHintsAnnotation) if res.Error != nil { // res.Error is a google.rpc.Status. Convert to a Go error. Use a gRPC // error because it preserves the code as a separate field. // TODO(jba): preserve the details field. as.Error = grpc.Errorf(codes.Code(res.Error.Code), "%s", res.Error.Message) } return as } // A FaceAnnotation describes the results of face detection on an image. type FaceAnnotation struct { // BoundingPoly is the bounding polygon around the face. The coordinates of // the bounding box are in the original image's scale, as returned in // ImageParams. The bounding box is computed to "frame" the face in // accordance with human expectations. It is based on the landmarker // results. Note that one or more x and/or y coordinates may not be // generated in the BoundingPoly (the polygon will be unbounded) if only a // partial face appears in the image to be annotated. BoundingPoly []image.Point // FDBoundingPoly is tighter than BoundingPoly, and // encloses only the skin part of the face. Typically, it is used to // eliminate the face from any image analysis that detects the "amount of // skin" visible in an image. It is not based on the landmarker results, only // on the initial face detection, hence the fd (face detection) prefix. FDBoundingPoly []image.Point // Landmarks are detected face landmarks. Face FaceLandmarks // RollAngle indicates the amount of clockwise/anti-clockwise rotation of // the face relative to the image vertical, about the axis perpendicular to // the face. Range [-180,180]. RollAngle float32 // PanAngle is the yaw angle: the leftward/rightward angle that the face is // pointing, relative to the vertical plane perpendicular to the image. Range // [-180,180]. PanAngle float32 // TiltAngle is the pitch angle: the upwards/downwards angle that the face is // pointing relative to the image's horizontal plane. Range [-180,180]. TiltAngle float32 // DetectionConfidence is the detection confidence. The range is [0, 1]. DetectionConfidence float32 // LandmarkingConfidence is the face landmarking confidence. The range is [0, 1]. LandmarkingConfidence float32 // Likelihoods expresses the likelihood of various aspects of the face. Likelihoods *FaceLikelihoods } func faceAnnotationFromProto(pfa *pb.FaceAnnotation) *FaceAnnotation { fa := &FaceAnnotation{ BoundingPoly: boundingPolyFromProto(pfa.BoundingPoly), FDBoundingPoly: boundingPolyFromProto(pfa.FdBoundingPoly), RollAngle: pfa.RollAngle, PanAngle: pfa.PanAngle, TiltAngle: pfa.TiltAngle, DetectionConfidence: pfa.DetectionConfidence, LandmarkingConfidence: pfa.LandmarkingConfidence, Likelihoods: &FaceLikelihoods{ Joy: Likelihood(pfa.JoyLikelihood), Sorrow: Likelihood(pfa.SorrowLikelihood), Anger: Likelihood(pfa.AngerLikelihood), Surprise: Likelihood(pfa.SurpriseLikelihood), UnderExposed: Likelihood(pfa.UnderExposedLikelihood), Blurred: Likelihood(pfa.BlurredLikelihood), Headwear: Likelihood(pfa.HeadwearLikelihood), }, } populateFaceLandmarks(pfa.Landmarks, &fa.Face) return fa } // An EntityAnnotation describes the results of a landmark, label, logo or text // detection on an image. type EntityAnnotation struct { // ID is an opaque entity ID. Some IDs might be available in Knowledge Graph(KG). // For more details on KG please see: // https://developers.google.com/knowledge-graph/ ID string // Locale is the language code for the locale in which the entity textual // description (next field) is expressed. Locale string // Description is the entity textual description, expressed in the language of Locale. Description string // Score is the overall score of the result. Range [0, 1]. Score float32 // Confidence is the accuracy of the entity detection in an image. // For example, for an image containing the Eiffel Tower, this field represents // the confidence that there is a tower in the query image. Range [0, 1]. Confidence float32 // Topicality is the relevancy of the ICA (Image Content Annotation) label to the // image. For example, the relevancy of 'tower' to an image containing // 'Eiffel Tower' is likely higher than an image containing a distant towering // building, though the confidence that there is a tower may be the same. // Range [0, 1]. Topicality float32 // BoundingPoly is the image region to which this entity belongs. Not filled currently // for label detection. For text detection, BoundingPolys // are produced for the entire text detected in an image region, followed by // BoundingPolys for each word within the detected text. BoundingPoly []image.Point // Locations contains the location information for the detected entity. // Multiple LatLng structs can be present since one location may indicate the // location of the scene in the query image, and another the location of the // place where the query image was taken. Location information is usually // present for landmarks. Locations []LatLng // Properties are additional optional Property fields. // For example a different kind of score or string that qualifies the entity. Properties []Property } func entityAnnotationFromProto(e *pb.EntityAnnotation) *EntityAnnotation { var locs []LatLng for _, li := range e.Locations { locs = append(locs, latLngFromProto(li.LatLng)) } var props []Property for _, p := range e.Properties { props = append(props, propertyFromProto(p)) } return &EntityAnnotation{ ID: e.Mid, Locale: e.Locale, Description: e.Description, Score: e.Score, Confidence: e.Confidence, Topicality: e.Topicality, BoundingPoly: boundingPolyFromProto(e.BoundingPoly), Locations: locs, Properties: props, } } // TextAnnotation contains a structured representation of OCR extracted text. // The hierarchy of an OCR extracted text structure looks like: // TextAnnotation -> Page -> Block -> Paragraph -> Word -> Symbol // Each structural component, starting from Page, may further have its own // properties. Properties describe detected languages, breaks etc. type TextAnnotation struct { // List of pages detected by OCR. Pages []*Page // UTF-8 text detected on the pages. Text string } func textAnnotationFromProto(pta *pb.TextAnnotation) *TextAnnotation { if pta == nil { return nil } var pages []*Page for _, p := range pta.Pages { pages = append(pages, pageFromProto(p)) } return &TextAnnotation{ Pages: pages, Text: pta.Text, } } // A Page is a page of text detected from OCR. type Page struct { // Additional information detected on the page. Properties *TextProperties // Page width in pixels. Width int32 // Page height in pixels. Height int32 // List of blocks of text, images etc on this page. Blocks []*Block } func pageFromProto(p *pb.Page) *Page { if p == nil { return nil } var blocks []*Block for _, b := range p.Blocks { blocks = append(blocks, blockFromProto(b)) } return &Page{ Properties: textPropertiesFromProto(p.Property), Width: p.Width, Height: p.Height, Blocks: blocks, } } // A Block is a logical element on the page. type Block struct { // Additional information detected for the block. Properties *TextProperties // The bounding box for the block. // The vertices are in the order of top-left, top-right, bottom-right, // bottom-left. When a rotation of the bounding box is detected the rotation // is represented as around the top-left corner as defined when the text is // read in the 'natural' orientation. // For example: // * when the text is horizontal it might look like: // 0----1 // | | // 3----2 // * when it's rotated 180 degrees around the top-left corner it becomes: // 2----3 // | | // 1----0 // and the vertice order will still be (0, 1, 2, 3). BoundingBox []image.Point // List of paragraphs in this block (if this blocks is of type text). Paragraphs []*Paragraph // Detected block type (text, image etc) for this block. BlockType BlockType } // A BlockType represents the kind of Block (text, image, etc.) type BlockType int const ( // Unknown block type. UnknownBlock BlockType = BlockType(pb.Block_UNKNOWN) // Regular text block. TextBlock BlockType = BlockType(pb.Block_TEXT) // Table block. TableBlock BlockType = BlockType(pb.Block_TABLE) // Image block. PictureBlock BlockType = BlockType(pb.Block_PICTURE) // Horizontal/vertical line box. RulerBlock BlockType = BlockType(pb.Block_RULER) // Barcode block. BarcodeBlock BlockType = BlockType(pb.Block_BARCODE) ) func blockFromProto(p *pb.Block) *Block { if p == nil { return nil } var paras []*Paragraph for _, pa := range p.Paragraphs { paras = append(paras, paragraphFromProto(pa)) } return &Block{ Properties: textPropertiesFromProto(p.Property), BoundingBox: boundingPolyFromProto(p.BoundingBox), Paragraphs: paras, BlockType: BlockType(p.BlockType), } } // A Paragraph is a structural unit of text representing a number of words in // certain order. type Paragraph struct { // Additional information detected for the paragraph. Properties *TextProperties // The bounding box for the paragraph. // The vertices are in the order of top-left, top-right, bottom-right, // bottom-left. When a rotation of the bounding box is detected the rotation // is represented as around the top-left corner as defined when the text is // read in the 'natural' orientation. // For example: // * when the text is horizontal it might look like: // 0----1 // | | // 3----2 // * when it's rotated 180 degrees around the top-left corner it becomes: // 2----3 // | | // 1----0 // and the vertice order will still be (0, 1, 2, 3). BoundingBox []image.Point // List of words in this paragraph. Words []*Word } func paragraphFromProto(p *pb.Paragraph) *Paragraph { if p == nil { return nil } var words []*Word for _, w := range p.Words { words = append(words, wordFromProto(w)) } return &Paragraph{ Properties: textPropertiesFromProto(p.Property), BoundingBox: boundingPolyFromProto(p.BoundingBox), Words: words, } } // A Word is a word in a text document. type Word struct { // Additional information detected for the word. Properties *TextProperties // The bounding box for the word. // The vertices are in the order of top-left, top-right, bottom-right, // bottom-left. When a rotation of the bounding box is detected the rotation // is represented as around the top-left corner as defined when the text is // read in the 'natural' orientation. // For example: // * when the text is horizontal it might look like: // 0----1 // | | // 3----2 // * when it's rotated 180 degrees around the top-left corner it becomes: // 2----3 // | | // 1----0 // and the vertice order will still be (0, 1, 2, 3). BoundingBox []image.Point // List of symbols in the word. // The order of the symbols follows the natural reading order. Symbols []*Symbol } func wordFromProto(p *pb.Word) *Word { if p == nil { return nil } var syms []*Symbol for _, s := range p.Symbols { syms = append(syms, symbolFromProto(s)) } return &Word{ Properties: textPropertiesFromProto(p.Property), BoundingBox: boundingPolyFromProto(p.BoundingBox), Symbols: syms, } } // A Symbol is a symbol in a text document. type Symbol struct { // Additional information detected for the symbol. Properties *TextProperties // The bounding box for the symbol. // The vertices are in the order of top-left, top-right, bottom-right, // bottom-left. When a rotation of the bounding box is detected the rotation // is represented as around the top-left corner as defined when the text is // read in the 'natural' orientation. // For example: // * when the text is horizontal it might look like: // 0----1 // | | // 3----2 // * when it's rotated 180 degrees around the top-left corner it becomes: // 2----3 // | | // 1----0 // and the vertice order will still be (0, 1, 2, 3). BoundingBox []image.Point // The actual UTF-8 representation of the symbol. Text string } func symbolFromProto(p *pb.Symbol) *Symbol { if p == nil { return nil } return &Symbol{ Properties: textPropertiesFromProto(p.Property), BoundingBox: boundingPolyFromProto(p.BoundingBox), Text: p.Text, } } // TextProperties contains additional information about an OCR structural component. type TextProperties struct { // A list of detected languages together with confidence. DetectedLanguages []*DetectedLanguage // Detected start or end of a text segment. DetectedBreak *DetectedBreak } // Detected language for a structural component. type DetectedLanguage struct { // The BCP-47 language code, such as "en-US" or "sr-Latn". Code language.Tag // The confidence of the detected language, in the range [0, 1]. Confidence float32 } // DetectedBreak is the detected start or end of a structural component. type DetectedBreak struct { // The type of break. Type DetectedBreakType // True if break prepends the element. IsPrefix bool } type DetectedBreakType int const ( // Unknown break label type. UnknownBreak = DetectedBreakType(pb.TextAnnotation_DetectedBreak_UNKNOWN) // Regular space. SpaceBreak = DetectedBreakType(pb.TextAnnotation_DetectedBreak_SPACE) // Sure space (very wide). SureSpaceBreak = DetectedBreakType(pb.TextAnnotation_DetectedBreak_SURE_SPACE) // Line-wrapping break. EOLSureSpaceBreak = DetectedBreakType(pb.TextAnnotation_DetectedBreak_EOL_SURE_SPACE) // End-line hyphen that is not present in text; does not co-occur with SPACE, LEADER_SPACE, or LINE_BREAK. HyphenBreak = DetectedBreakType(pb.TextAnnotation_DetectedBreak_HYPHEN) // Line break that ends a paragraph. LineBreak = DetectedBreakType(pb.TextAnnotation_DetectedBreak_LINE_BREAK) ) func textPropertiesFromProto(p *pb.TextAnnotation_TextProperty) *TextProperties { var dls []*DetectedLanguage for _, dl := range p.DetectedLanguages { tag, _ := language.Parse(dl.LanguageCode) // Ignore error. If err != nil the returned tag will not be garbage, // but a best-effort attempt at a parse. At worst it will be // language.Und, the documented "undefined" Tag. dls = append(dls, &DetectedLanguage{Code: tag, Confidence: dl.Confidence}) } var db *DetectedBreak if p.DetectedBreak != nil { db = &DetectedBreak{ Type: DetectedBreakType(p.DetectedBreak.Type), IsPrefix: p.DetectedBreak.IsPrefix, } } return &TextProperties{ DetectedLanguages: dls, DetectedBreak: db, } } // SafeSearchAnnotation describes the results of a SafeSearch detection on an image. type SafeSearchAnnotation struct { // Adult is the likelihood that the image contains adult content. Adult Likelihood // Spoof is the likelihood that an obvious modification was made to the // image's canonical version to make it appear funny or offensive. Spoof Likelihood // Medical is the likelihood that this is a medical image. Medical Likelihood // Violence is the likelihood that this image represents violence. Violence Likelihood } func safeSearchAnnotationFromProto(s *pb.SafeSearchAnnotation) *SafeSearchAnnotation { if s == nil { return nil } return &SafeSearchAnnotation{ Adult: Likelihood(s.Adult), Spoof: Likelihood(s.Spoof), Medical: Likelihood(s.Medical), Violence: Likelihood(s.Violence), } } // ImageProps describes properties of the image itself, like the dominant colors. type ImageProps struct { // DominantColors describes the dominant colors of the image. DominantColors []*ColorInfo } func imagePropertiesFromProto(ip *pb.ImageProperties) *ImageProps { if ip == nil || ip.DominantColors == nil { return nil } var cinfos []*ColorInfo for _, ci := range ip.DominantColors.Colors { cinfos = append(cinfos, colorInfoFromProto(ci)) } return &ImageProps{DominantColors: cinfos} } // WebDetection contains relevant information for the image from the Internet. type WebDetection struct { // Deduced entities from similar images on the Internet. WebEntities []*WebEntity // Fully matching images from the Internet. // They're definite neardups and most often a copy of the query image with // merely a size change. FullMatchingImages []*WebImage // Partial matching images from the Internet. // Those images are similar enough to share some key-point features. For // example an original image will likely have partial matching for its crops. PartialMatchingImages []*WebImage // Web pages containing the matching images from the Internet. PagesWithMatchingImages []*WebPage } func webDetectionFromProto(p *pb.WebDetection) *WebDetection { if p == nil { return nil } var ( wes []*WebEntity fmis, pmis []*WebImage wps []*WebPage ) for _, e := range p.WebEntities { wes = append(wes, webEntityFromProto(e)) } for _, m := range p.FullMatchingImages { fmis = append(fmis, webImageFromProto(m)) } for _, m := range p.PartialMatchingImages { pmis = append(fmis, webImageFromProto(m)) } for _, g := range p.PagesWithMatchingImages { wps = append(wps, webPageFromProto(g)) } return &WebDetection{ WebEntities: wes, FullMatchingImages: fmis, PartialMatchingImages: pmis, PagesWithMatchingImages: wps, } } // A WebEntity is an entity deduced from similar images on the Internet. type WebEntity struct { // Opaque entity ID. ID string // Overall relevancy score for the entity. // Not normalized and not comparable across different image queries. Score float32 // Canonical description of the entity, in English. Description string } func webEntityFromProto(p *pb.WebDetection_WebEntity) *WebEntity { return &WebEntity{ ID: p.EntityId, Score: p.Score, Description: p.Description, } } // WebImage contains metadata for online images. type WebImage struct { // The result image URL. URL string // Overall relevancy score for the image. // Not normalized and not comparable across different image queries. Score float32 } func webImageFromProto(p *pb.WebDetection_WebImage) *WebImage { return &WebImage{ URL: p.Url, Score: p.Score, } } // A WebPage contains metadata for web pages. type WebPage struct { // The result web page URL. URL string // Overall relevancy score for the web page. // Not normalized and not comparable across different image queries. Score float32 } func webPageFromProto(p *pb.WebDetection_WebPage) *WebPage { return &WebPage{ URL: p.Url, Score: p.Score, } } // CropHint is a single crop hint that is used to generate a new crop when // serving an image. type CropHint struct { // The bounding polygon for the crop region. The coordinates of the bounding // box are in the original image's scale, as returned in `ImageParams`. BoundingPoly []image.Point // Confidence of this being a salient region. Range [0, 1]. Confidence float32 // Fraction of importance of this salient region with respect to the original // image. ImportanceFraction float32 } func cropHintsFromProto(p *pb.CropHintsAnnotation) []*CropHint { if p == nil { return nil } var chs []*CropHint for _, pch := range p.CropHints { chs = append(chs, cropHintFromProto(pch)) } return chs } func cropHintFromProto(pch *pb.CropHint) *CropHint { return &CropHint{ BoundingPoly: boundingPolyFromProto(pch.BoundingPoly), Confidence: pch.Confidence, ImportanceFraction: pch.ImportanceFraction, } } golang-google-cloud-0.9.0/vision/apiv1/000077500000000000000000000000001312234511600176725ustar00rootroot00000000000000golang-google-cloud-0.9.0/vision/apiv1/BatchAnnotateImages_smoke_test.go000066400000000000000000000041261312234511600263220ustar00rootroot00000000000000// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // AUTO-GENERATED CODE. DO NOT EDIT. package vision import ( visionpb "google.golang.org/genproto/googleapis/cloud/vision/v1" ) import ( "strconv" "testing" "time" "cloud.google.com/go/internal/testutil" "golang.org/x/net/context" "google.golang.org/api/iterator" "google.golang.org/api/option" ) var _ = iterator.Done var _ = strconv.FormatUint var _ = time.Now func TestImageAnnotatorSmoke(t *testing.T) { if testing.Short() { t.Skip("skipping smoke test in short mode") } ctx := context.Background() ts := testutil.TokenSource(ctx, DefaultAuthScopes()...) if ts == nil { t.Skip("Integration tests skipped. See CONTRIBUTING.md for details") } projectId := testutil.ProjID() _ = projectId c, err := NewImageAnnotatorClient(ctx, option.WithTokenSource(ts)) if err != nil { t.Fatal(err) } var gcsImageUri string = "gs://gapic-toolkit/President_Barack_Obama.jpg" var source = &visionpb.ImageSource{ GcsImageUri: gcsImageUri, } var image = &visionpb.Image{ Source: source, } var type_ visionpb.Feature_Type = visionpb.Feature_FACE_DETECTION var featuresElement = &visionpb.Feature{ Type: type_, } var features = []*visionpb.Feature{featuresElement} var requestsElement = &visionpb.AnnotateImageRequest{ Image: image, Features: features, } var requests = []*visionpb.AnnotateImageRequest{requestsElement} var request = &visionpb.BatchAnnotateImagesRequest{ Requests: requests, } if _, err := c.BatchAnnotateImages(ctx, request); err != nil { t.Error(err) } } golang-google-cloud-0.9.0/vision/apiv1/README.md000066400000000000000000000004571312234511600211570ustar00rootroot00000000000000Auto-generated vision v1 clients ================================= This package includes auto-generated clients for the vision v1 API. Use the handwritten client (in the parent directory, cloud.google.com/go/vision) in preference to this. This code is EXPERIMENTAL and subject to CHANGE AT ANY TIME. golang-google-cloud-0.9.0/vision/apiv1/client.go000066400000000000000000000142441312234511600215040ustar00rootroot00000000000000// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package vision import ( gax "github.com/googleapis/gax-go" "golang.org/x/net/context" pb "google.golang.org/genproto/googleapis/cloud/vision/v1" "google.golang.org/grpc" "google.golang.org/grpc/codes" ) // AnnotateImage runs image detection and annotation for a single image. func (c *ImageAnnotatorClient) AnnotateImage(ctx context.Context, req *pb.AnnotateImageRequest, opts ...gax.CallOption) (*pb.AnnotateImageResponse, error) { res, err := c.BatchAnnotateImages(ctx, &pb.BatchAnnotateImagesRequest{ Requests: []*pb.AnnotateImageRequest{req}, }, opts...) if err != nil { return nil, err } return res.Responses[0], nil } // Called for a single image and a single feature. func (c *ImageAnnotatorClient) annotateOne(ctx context.Context, img *pb.Image, ictx *pb.ImageContext, ftype pb.Feature_Type, maxResults int, opts []gax.CallOption) (*pb.AnnotateImageResponse, error) { res, err := c.AnnotateImage(ctx, &pb.AnnotateImageRequest{ Image: img, ImageContext: ictx, Features: []*pb.Feature{{Type: ftype, MaxResults: int32(maxResults)}}, }, opts...) if err != nil { return nil, err } // When there is only one image and one feature, the response's Error field is // unambiguously about that one detection, so we "promote" it to the error return // value. // res.Error is a google.rpc.Status. Convert to a Go error. Use a gRPC // error because it preserves the code as a separate field. // TODO(jba): preserve the details field. if res.Error != nil { return nil, grpc.Errorf(codes.Code(res.Error.Code), "%s", res.Error.Message) } return res, nil } // DetectFaces performs face detection on the image. // At most maxResults results are returned. func (c *ImageAnnotatorClient) DetectFaces(ctx context.Context, img *pb.Image, ictx *pb.ImageContext, maxResults int, opts ...gax.CallOption) ([]*pb.FaceAnnotation, error) { res, err := c.annotateOne(ctx, img, ictx, pb.Feature_FACE_DETECTION, maxResults, opts) if err != nil { return nil, err } return res.FaceAnnotations, nil } // DetectLandmarks performs landmark detection on the image. // At most maxResults results are returned. func (c *ImageAnnotatorClient) DetectLandmarks(ctx context.Context, img *pb.Image, ictx *pb.ImageContext, maxResults int, opts ...gax.CallOption) ([]*pb.EntityAnnotation, error) { res, err := c.annotateOne(ctx, img, ictx, pb.Feature_LANDMARK_DETECTION, maxResults, opts) if err != nil { return nil, err } return res.LandmarkAnnotations, nil } // DetectLogos performs logo detection on the image. // At most maxResults results are returned. func (c *ImageAnnotatorClient) DetectLogos(ctx context.Context, img *pb.Image, ictx *pb.ImageContext, maxResults int, opts ...gax.CallOption) ([]*pb.EntityAnnotation, error) { res, err := c.annotateOne(ctx, img, ictx, pb.Feature_LOGO_DETECTION, maxResults, opts) if err != nil { return nil, err } return res.LogoAnnotations, nil } // DetectLabels performs label detection on the image. // At most maxResults results are returned. func (c *ImageAnnotatorClient) DetectLabels(ctx context.Context, img *pb.Image, ictx *pb.ImageContext, maxResults int, opts ...gax.CallOption) ([]*pb.EntityAnnotation, error) { res, err := c.annotateOne(ctx, img, ictx, pb.Feature_LABEL_DETECTION, maxResults, opts) if err != nil { return nil, err } return res.LabelAnnotations, nil } // DetectTexts performs text detection on the image. // At most maxResults results are returned. func (c *ImageAnnotatorClient) DetectTexts(ctx context.Context, img *pb.Image, ictx *pb.ImageContext, maxResults int, opts ...gax.CallOption) ([]*pb.EntityAnnotation, error) { res, err := c.annotateOne(ctx, img, ictx, pb.Feature_TEXT_DETECTION, maxResults, opts) if err != nil { return nil, err } return res.TextAnnotations, nil } // DetectDocumentText performs full text (OCR) detection on the image. func (c *ImageAnnotatorClient) DetectDocumentText(ctx context.Context, img *pb.Image, ictx *pb.ImageContext, opts ...gax.CallOption) (*pb.TextAnnotation, error) { res, err := c.annotateOne(ctx, img, ictx, pb.Feature_DOCUMENT_TEXT_DETECTION, 0, opts) if err != nil { return nil, err } return res.FullTextAnnotation, nil } // DetectSafeSearch performs safe-search detection on the image. func (c *ImageAnnotatorClient) DetectSafeSearch(ctx context.Context, img *pb.Image, ictx *pb.ImageContext, opts ...gax.CallOption) (*pb.SafeSearchAnnotation, error) { res, err := c.annotateOne(ctx, img, ictx, pb.Feature_SAFE_SEARCH_DETECTION, 0, opts) if err != nil { return nil, err } return res.SafeSearchAnnotation, nil } // DetectImageProperties computes properties of the image. func (c *ImageAnnotatorClient) DetectImageProperties(ctx context.Context, img *pb.Image, ictx *pb.ImageContext, opts ...gax.CallOption) (*pb.ImageProperties, error) { res, err := c.annotateOne(ctx, img, ictx, pb.Feature_IMAGE_PROPERTIES, 0, opts) if err != nil { return nil, err } return res.ImagePropertiesAnnotation, nil } // DetectWeb computes a web annotation on the image. func (c *ImageAnnotatorClient) DetectWeb(ctx context.Context, img *pb.Image, ictx *pb.ImageContext, opts ...gax.CallOption) (*pb.WebDetection, error) { res, err := c.annotateOne(ctx, img, ictx, pb.Feature_WEB_DETECTION, 0, opts) if err != nil { return nil, err } return res.WebDetection, nil } // CropHints computes crop hints for the image. func (c *ImageAnnotatorClient) CropHints(ctx context.Context, img *pb.Image, ictx *pb.ImageContext, opts ...gax.CallOption) (*pb.CropHintsAnnotation, error) { res, err := c.annotateOne(ctx, img, ictx, pb.Feature_CROP_HINTS, 0, opts) if err != nil { return nil, err } return res.CropHintsAnnotation, nil } golang-google-cloud-0.9.0/vision/apiv1/client_test.go000066400000000000000000000143441312234511600225440ustar00rootroot00000000000000// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package vision import ( "fmt" "reflect" "testing" "github.com/golang/protobuf/proto" "golang.org/x/net/context" pb "google.golang.org/genproto/googleapis/cloud/vision/v1" "google.golang.org/genproto/googleapis/rpc/status" "google.golang.org/grpc" "google.golang.org/grpc/codes" ) var batchResponse = &pb.BatchAnnotateImagesResponse{ Responses: []*pb.AnnotateImageResponse{{ FaceAnnotations: []*pb.FaceAnnotation{ {RollAngle: 1}, {RollAngle: 2}}, LandmarkAnnotations: []*pb.EntityAnnotation{{Mid: "landmark"}}, LogoAnnotations: []*pb.EntityAnnotation{{Mid: "logo"}}, LabelAnnotations: []*pb.EntityAnnotation{{Mid: "label"}}, TextAnnotations: []*pb.EntityAnnotation{{Mid: "text"}}, FullTextAnnotation: &pb.TextAnnotation{Text: "full"}, SafeSearchAnnotation: &pb.SafeSearchAnnotation{Spoof: pb.Likelihood_POSSIBLE}, ImagePropertiesAnnotation: &pb.ImageProperties{DominantColors: &pb.DominantColorsAnnotation{}}, CropHintsAnnotation: &pb.CropHintsAnnotation{CropHints: []*pb.CropHint{{Confidence: 0.5}}}, WebDetection: &pb.WebDetection{WebEntities: []*pb.WebDetection_WebEntity{{EntityId: "web"}}}, }}, } // Verify that all the "shortcut" methods use the underlying // BatchAnnotateImages RPC correctly. func TestClientMethods(t *testing.T) { ctx := context.Background() c, err := NewImageAnnotatorClient(ctx, clientOpt) if err != nil { t.Fatal(err) } mockImageAnnotator.resps = []proto.Message{batchResponse} img := &pb.Image{Source: &pb.ImageSource{ImageUri: "http://foo.jpg"}} ictx := &pb.ImageContext{LanguageHints: []string{"en", "fr"}} req := &pb.AnnotateImageRequest{ Image: img, ImageContext: ictx, Features: []*pb.Feature{ {Type: pb.Feature_LABEL_DETECTION, MaxResults: 3}, {Type: pb.Feature_FACE_DETECTION, MaxResults: 4}, }, } for i, test := range []struct { call func() (interface{}, error) wantFeatures []*pb.Feature wantRes interface{} }{ { func() (interface{}, error) { return c.AnnotateImage(ctx, req) }, req.Features, batchResponse.Responses[0], }, { func() (interface{}, error) { return c.DetectFaces(ctx, img, ictx, 2) }, []*pb.Feature{{pb.Feature_FACE_DETECTION, 2}}, batchResponse.Responses[0].FaceAnnotations, }, { func() (interface{}, error) { return c.DetectLandmarks(ctx, img, ictx, 2) }, []*pb.Feature{{pb.Feature_LANDMARK_DETECTION, 2}}, batchResponse.Responses[0].LandmarkAnnotations, }, { func() (interface{}, error) { return c.DetectLogos(ctx, img, ictx, 2) }, []*pb.Feature{{pb.Feature_LOGO_DETECTION, 2}}, batchResponse.Responses[0].LogoAnnotations, }, { func() (interface{}, error) { return c.DetectLabels(ctx, img, ictx, 2) }, []*pb.Feature{{pb.Feature_LABEL_DETECTION, 2}}, batchResponse.Responses[0].LabelAnnotations, }, { func() (interface{}, error) { return c.DetectTexts(ctx, img, ictx, 2) }, []*pb.Feature{{pb.Feature_TEXT_DETECTION, 2}}, batchResponse.Responses[0].TextAnnotations, }, { func() (interface{}, error) { return c.DetectDocumentText(ctx, img, ictx) }, []*pb.Feature{{pb.Feature_DOCUMENT_TEXT_DETECTION, 0}}, batchResponse.Responses[0].FullTextAnnotation, }, { func() (interface{}, error) { return c.DetectSafeSearch(ctx, img, ictx) }, []*pb.Feature{{pb.Feature_SAFE_SEARCH_DETECTION, 0}}, batchResponse.Responses[0].SafeSearchAnnotation, }, { func() (interface{}, error) { return c.DetectImageProperties(ctx, img, ictx) }, []*pb.Feature{{pb.Feature_IMAGE_PROPERTIES, 0}}, batchResponse.Responses[0].ImagePropertiesAnnotation, }, { func() (interface{}, error) { return c.DetectWeb(ctx, img, ictx) }, []*pb.Feature{{pb.Feature_WEB_DETECTION, 0}}, batchResponse.Responses[0].WebDetection, }, { func() (interface{}, error) { return c.CropHints(ctx, img, ictx) }, []*pb.Feature{{pb.Feature_CROP_HINTS, 0}}, batchResponse.Responses[0].CropHintsAnnotation, }, } { mockImageAnnotator.reqs = nil res, err := test.call() if err != nil { t.Fatal(err) } got := mockImageAnnotator.reqs[0] want := &pb.BatchAnnotateImagesRequest{ Requests: []*pb.AnnotateImageRequest{{ Image: img, ImageContext: ictx, Features: test.wantFeatures, }}, } if !testEqual(got, want) { t.Errorf("#%d:\ngot %v\nwant %v", i, got, want) } if got, want := res, test.wantRes; !testEqual(got, want) { t.Errorf("#%d:\ngot %v\nwant %v", i, got, want) } } } func testEqual(a, b interface{}) bool { if a == nil && b == nil { return true } if a == nil || b == nil { return false } t := reflect.TypeOf(a) if t != reflect.TypeOf(b) { return false } if am, ok := a.(proto.Message); ok { return proto.Equal(am, b.(proto.Message)) } if t.Kind() != reflect.Slice { panic(fmt.Sprintf("testEqual can only handle proto.Message and slices, got %s", t)) } va := reflect.ValueOf(a) vb := reflect.ValueOf(b) if va.Len() != vb.Len() { return false } for i := 0; i < va.Len(); i++ { if !testEqual(va.Index(i).Interface(), vb.Index(i).Interface()) { return false } } return true } func TestAnnotateOneError(t *testing.T) { ctx := context.Background() c, err := NewImageAnnotatorClient(ctx, clientOpt) if err != nil { t.Fatal(err) } mockImageAnnotator.resps = []proto.Message{ &pb.BatchAnnotateImagesResponse{ Responses: []*pb.AnnotateImageResponse{{ Error: &status.Status{Code: int32(codes.NotFound), Message: "not found"}, }}, }, } _, err = c.annotateOne(ctx, &pb.Image{Source: &pb.ImageSource{ImageUri: "http://foo.jpg"}}, nil, pb.Feature_LOGO_DETECTION, 1, nil) if grpc.Code(err) != codes.NotFound { t.Errorf("got %v, want NotFound") } } golang-google-cloud-0.9.0/vision/apiv1/doc.go000066400000000000000000000030301312234511600207620ustar00rootroot00000000000000// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // AUTO-GENERATED CODE. DO NOT EDIT. // Package vision is an experimental, auto-generated package for the // Google Cloud Vision API. // // Integrates Google Vision features, including image labeling, face, logo, // and landmark detection, optical character recognition (OCR), and detection // of explicit content, into applications. // // Use the client at cloud.google.com/go/vision in preference to this. package vision // import "cloud.google.com/go/vision/apiv1" import ( "golang.org/x/net/context" "google.golang.org/grpc/metadata" ) func insertXGoog(ctx context.Context, val []string) context.Context { md, _ := metadata.FromOutgoingContext(ctx) md = md.Copy() md["x-goog-api-client"] = val return metadata.NewOutgoingContext(ctx, md) } // DefaultAuthScopes reports the authentication scopes required // by this package. func DefaultAuthScopes() []string { return []string{ "https://www.googleapis.com/auth/cloud-platform", } } golang-google-cloud-0.9.0/vision/apiv1/examples_test.go000066400000000000000000000045211312234511600231000ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package vision_test import ( "fmt" "os" vision "cloud.google.com/go/vision/apiv1" "golang.org/x/net/context" pb "google.golang.org/genproto/googleapis/cloud/vision/v1" ) func Example_NewImageFromReader() { f, err := os.Open("path/to/image.jpg") if err != nil { // TODO: handle error. } img, err := vision.NewImageFromReader(f) if err != nil { // TODO: handle error. } fmt.Println(img) } func Example_NewImageFromURI() { img := vision.NewImageFromURI("gs://my-bucket/my-image.png") fmt.Println(img) } func ExampleImageAnnotatorClient_AnnotateImage() { ctx := context.Background() c, err := vision.NewImageAnnotatorClient(ctx) if err != nil { // TODO: Handle error. } res, err := c.AnnotateImage(ctx, &pb.AnnotateImageRequest{ Image: vision.NewImageFromURI("gs://my-bucket/my-image.png"), Features: []*pb.Feature{ {Type: pb.Feature_LANDMARK_DETECTION, MaxResults: 5}, {Type: pb.Feature_LABEL_DETECTION, MaxResults: 3}, }, }) if err != nil { // TODO: Handle error. } // TODO: Use res. _ = res } func Example_FaceFromLandmarks() { ctx := context.Background() c, err := vision.NewImageAnnotatorClient(ctx) if err != nil { // TODO: Handle error. } resp, err := c.BatchAnnotateImages(ctx, &pb.BatchAnnotateImagesRequest{ Requests: []*pb.AnnotateImageRequest{ { Image: vision.NewImageFromURI("gs://bucket/image.jpg"), Features: []*pb.Feature{{ Type: pb.Feature_FACE_DETECTION, MaxResults: 5, }}, }, }, }) if err != nil { // TODO: Handle error. } res := resp.Responses[0] if res.Error != nil { // TODO: Handle error. } for _, a := range res.FaceAnnotations { face := vision.FaceFromLandmarks(a.Landmarks) fmt.Println(face.Nose.Tip) fmt.Println(face.Eyes.Left.Pupil) } } golang-google-cloud-0.9.0/vision/apiv1/face.go000066400000000000000000000120041312234511600211140ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package vision import ( "log" pb "google.golang.org/genproto/googleapis/cloud/vision/v1" ) // FaceLandmarks contains the positions of facial features detected by the service. type FaceLandmarks struct { Eyebrows Eyebrows Eyes Eyes Ears Ears Nose Nose Mouth Mouth Chin Chin Forehead *pb.Position } // Eyebrows represents a face's eyebrows. type Eyebrows struct { Left, Right Eyebrow } // Eyebrow represents a face's eyebrow. type Eyebrow struct { Top, Left, Right *pb.Position } // Eyes represents a face's eyes. type Eyes struct { Left, Right Eye } // Eye represents a face's eye. type Eye struct { Left, Right, Top, Bottom, Center, Pupil *pb.Position } // Ears represents a face's ears. type Ears struct { Left, Right *pb.Position } // Nose represents a face's nose. type Nose struct { Left, Right, Top, Bottom, Tip *pb.Position } // Mouth represents a face's mouth. type Mouth struct { Left, Center, Right, UpperLip, LowerLip *pb.Position } // Chin represents a face's chin. type Chin struct { Left, Center, Right *pb.Position } // FaceFromLandmarks converts the list of face landmarks returned by the service // to a FaceLandmarks struct. func FaceFromLandmarks(landmarks []*pb.FaceAnnotation_Landmark) *FaceLandmarks { face := &FaceLandmarks{} for _, lm := range landmarks { switch lm.Type { case pb.FaceAnnotation_Landmark_LEFT_OF_LEFT_EYEBROW: face.Eyebrows.Left.Left = lm.Position case pb.FaceAnnotation_Landmark_RIGHT_OF_LEFT_EYEBROW: face.Eyebrows.Left.Right = lm.Position case pb.FaceAnnotation_Landmark_LEFT_OF_RIGHT_EYEBROW: face.Eyebrows.Right.Left = lm.Position case pb.FaceAnnotation_Landmark_RIGHT_OF_RIGHT_EYEBROW: face.Eyebrows.Right.Right = lm.Position case pb.FaceAnnotation_Landmark_LEFT_EYEBROW_UPPER_MIDPOINT: face.Eyebrows.Left.Top = lm.Position case pb.FaceAnnotation_Landmark_RIGHT_EYEBROW_UPPER_MIDPOINT: face.Eyebrows.Right.Top = lm.Position case pb.FaceAnnotation_Landmark_MIDPOINT_BETWEEN_EYES: face.Nose.Top = lm.Position case pb.FaceAnnotation_Landmark_NOSE_TIP: face.Nose.Tip = lm.Position case pb.FaceAnnotation_Landmark_UPPER_LIP: face.Mouth.UpperLip = lm.Position case pb.FaceAnnotation_Landmark_LOWER_LIP: face.Mouth.LowerLip = lm.Position case pb.FaceAnnotation_Landmark_MOUTH_LEFT: face.Mouth.Left = lm.Position case pb.FaceAnnotation_Landmark_MOUTH_RIGHT: face.Mouth.Right = lm.Position case pb.FaceAnnotation_Landmark_MOUTH_CENTER: face.Mouth.Center = lm.Position case pb.FaceAnnotation_Landmark_NOSE_BOTTOM_RIGHT: face.Nose.Right = lm.Position case pb.FaceAnnotation_Landmark_NOSE_BOTTOM_LEFT: face.Nose.Left = lm.Position case pb.FaceAnnotation_Landmark_NOSE_BOTTOM_CENTER: face.Nose.Bottom = lm.Position case pb.FaceAnnotation_Landmark_LEFT_EYE: face.Eyes.Left.Center = lm.Position case pb.FaceAnnotation_Landmark_RIGHT_EYE: face.Eyes.Right.Center = lm.Position case pb.FaceAnnotation_Landmark_LEFT_EYE_TOP_BOUNDARY: face.Eyes.Left.Top = lm.Position case pb.FaceAnnotation_Landmark_LEFT_EYE_RIGHT_CORNER: face.Eyes.Left.Right = lm.Position case pb.FaceAnnotation_Landmark_LEFT_EYE_BOTTOM_BOUNDARY: face.Eyes.Left.Bottom = lm.Position case pb.FaceAnnotation_Landmark_LEFT_EYE_LEFT_CORNER: face.Eyes.Left.Left = lm.Position case pb.FaceAnnotation_Landmark_RIGHT_EYE_TOP_BOUNDARY: face.Eyes.Right.Top = lm.Position case pb.FaceAnnotation_Landmark_RIGHT_EYE_RIGHT_CORNER: face.Eyes.Right.Right = lm.Position case pb.FaceAnnotation_Landmark_RIGHT_EYE_BOTTOM_BOUNDARY: face.Eyes.Right.Bottom = lm.Position case pb.FaceAnnotation_Landmark_RIGHT_EYE_LEFT_CORNER: face.Eyes.Right.Left = lm.Position case pb.FaceAnnotation_Landmark_LEFT_EYE_PUPIL: face.Eyes.Left.Pupil = lm.Position case pb.FaceAnnotation_Landmark_RIGHT_EYE_PUPIL: face.Eyes.Right.Pupil = lm.Position case pb.FaceAnnotation_Landmark_LEFT_EAR_TRAGION: face.Ears.Left = lm.Position case pb.FaceAnnotation_Landmark_RIGHT_EAR_TRAGION: face.Ears.Right = lm.Position case pb.FaceAnnotation_Landmark_FOREHEAD_GLABELLA: face.Forehead = lm.Position case pb.FaceAnnotation_Landmark_CHIN_GNATHION: face.Chin.Center = lm.Position case pb.FaceAnnotation_Landmark_CHIN_LEFT_GONION: face.Chin.Left = lm.Position case pb.FaceAnnotation_Landmark_CHIN_RIGHT_GONION: face.Chin.Right = lm.Position default: log.Printf("vision: ignoring unknown face annotation landmark %s", lm.Type) } } return face } golang-google-cloud-0.9.0/vision/apiv1/face_test.go000066400000000000000000000155431312234511600221660ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package vision import ( "testing" "cloud.google.com/go/internal/pretty" pb "google.golang.org/genproto/googleapis/cloud/vision/v1" ) func TestFaceFromLandmarks(t *testing.T) { landmarks := []*pb.FaceAnnotation_Landmark{ { Type: pb.FaceAnnotation_Landmark_LEFT_EYE, Position: &pb.Position{X: 1192, Y: 575, Z: 0}, }, { Type: pb.FaceAnnotation_Landmark_RIGHT_EYE, Position: &pb.Position{X: 1479, Y: 571, Z: -9}, }, { Type: pb.FaceAnnotation_Landmark_LEFT_OF_LEFT_EYEBROW, Position: &pb.Position{X: 1097, Y: 522, Z: 27}, }, { Type: pb.FaceAnnotation_Landmark_RIGHT_OF_LEFT_EYEBROW, Position: &pb.Position{X: 1266, Y: 521, Z: -61}, }, { Type: pb.FaceAnnotation_Landmark_LEFT_OF_RIGHT_EYEBROW, Position: &pb.Position{X: 1402, Y: 520, Z: -66}, }, { Type: pb.FaceAnnotation_Landmark_RIGHT_OF_RIGHT_EYEBROW, Position: &pb.Position{X: 1571, Y: 519, Z: 10}, }, { Type: pb.FaceAnnotation_Landmark_MIDPOINT_BETWEEN_EYES, Position: &pb.Position{X: 1331, Y: 566, Z: -66}, }, { Type: pb.FaceAnnotation_Landmark_NOSE_TIP, Position: &pb.Position{X: 1329, Y: 743, Z: -137}, }, { Type: pb.FaceAnnotation_Landmark_UPPER_LIP, Position: &pb.Position{X: 1330, Y: 836, Z: -66}, }, { Type: pb.FaceAnnotation_Landmark_LOWER_LIP, Position: &pb.Position{X: 1334, Y: 954, Z: -36}, }, { Type: pb.FaceAnnotation_Landmark_MOUTH_LEFT, Position: &pb.Position{X: 1186, Y: 867, Z: 27}, }, { Type: pb.FaceAnnotation_Landmark_MOUTH_RIGHT, Position: &pb.Position{X: 1484, Y: 857, Z: 19}, }, { Type: pb.FaceAnnotation_Landmark_MOUTH_CENTER, Position: &pb.Position{X: 1332, Y: 894, Z: -41}, }, { Type: pb.FaceAnnotation_Landmark_NOSE_BOTTOM_RIGHT, Position: &pb.Position{X: 1432, Y: 750, Z: -26}, }, { Type: pb.FaceAnnotation_Landmark_NOSE_BOTTOM_LEFT, Position: &pb.Position{X: 1236, Y: 755, Z: -20}, }, { Type: pb.FaceAnnotation_Landmark_NOSE_BOTTOM_CENTER, Position: &pb.Position{X: 1332, Y: 783, Z: -70}, }, { Type: pb.FaceAnnotation_Landmark_LEFT_EYE_TOP_BOUNDARY, Position: &pb.Position{X: 1193, Y: 561, Z: -20}, }, { Type: pb.FaceAnnotation_Landmark_LEFT_EYE_RIGHT_CORNER, Position: &pb.Position{X: 1252, Y: 581, Z: -1}, }, { Type: pb.FaceAnnotation_Landmark_LEFT_EYE_BOTTOM_BOUNDARY, Position: &pb.Position{X: 1190, Y: 593, Z: -1}, }, { Type: pb.FaceAnnotation_Landmark_LEFT_EYE_LEFT_CORNER, Position: &pb.Position{X: 1133, Y: 584, Z: 28}, }, { Type: pb.FaceAnnotation_Landmark_LEFT_EYE_PUPIL, Position: &pb.Position{X: 1189, Y: 580, Z: -8}, }, { Type: pb.FaceAnnotation_Landmark_RIGHT_EYE_TOP_BOUNDARY, Position: &pb.Position{X: 1474, Y: 561, Z: -30}, }, { Type: pb.FaceAnnotation_Landmark_RIGHT_EYE_RIGHT_CORNER, Position: &pb.Position{X: 1536, Y: 581, Z: 15}, }, { Type: pb.FaceAnnotation_Landmark_RIGHT_EYE_BOTTOM_BOUNDARY, Position: &pb.Position{X: 1481, Y: 590, Z: -11}, }, { Type: pb.FaceAnnotation_Landmark_RIGHT_EYE_LEFT_CORNER, Position: &pb.Position{X: 1424, Y: 579, Z: -6}, }, { Type: pb.FaceAnnotation_Landmark_RIGHT_EYE_PUPIL, Position: &pb.Position{X: 1478, Y: 580, Z: -18}, }, { Type: pb.FaceAnnotation_Landmark_LEFT_EYEBROW_UPPER_MIDPOINT, Position: &pb.Position{X: 1181, Y: 482, Z: -40}, }, { Type: pb.FaceAnnotation_Landmark_RIGHT_EYEBROW_UPPER_MIDPOINT, Position: &pb.Position{X: 1485, Y: 482, Z: -50}, }, { Type: pb.FaceAnnotation_Landmark_LEFT_EAR_TRAGION, Position: &pb.Position{X: 1027, Y: 696, Z: 361}, }, { Type: pb.FaceAnnotation_Landmark_RIGHT_EAR_TRAGION, Position: &pb.Position{X: 1666, Y: 695, Z: 339}, }, { Type: pb.FaceAnnotation_Landmark_FOREHEAD_GLABELLA, Position: &pb.Position{X: 1332, Y: 514, Z: -75}, }, { Type: pb.FaceAnnotation_Landmark_CHIN_GNATHION, Position: &pb.Position{X: 1335, Y: 1058, Z: 6}, }, { Type: pb.FaceAnnotation_Landmark_CHIN_LEFT_GONION, Position: &pb.Position{X: 1055, Y: 882, Z: 257}, }, { Type: pb.FaceAnnotation_Landmark_CHIN_RIGHT_GONION, Position: &pb.Position{X: 1631, Y: 881, Z: 238}, }, } want := &FaceLandmarks{ Eyebrows: Eyebrows{ Left: Eyebrow{ Top: &pb.Position{X: 1181, Y: 482, Z: -40}, Left: &pb.Position{X: 1097, Y: 522, Z: 27}, Right: &pb.Position{X: 1266, Y: 521, Z: -61}, }, Right: Eyebrow{ Top: &pb.Position{X: 1485, Y: 482, Z: -50}, Left: &pb.Position{X: 1402, Y: 520, Z: -66}, Right: &pb.Position{X: 1571, Y: 519, Z: 10}, }, }, Eyes: Eyes{ Left: Eye{ Left: &pb.Position{X: 1133, Y: 584, Z: 28}, Right: &pb.Position{X: 1252, Y: 581, Z: -1}, Top: &pb.Position{X: 1193, Y: 561, Z: -20}, Bottom: &pb.Position{X: 1190, Y: 593, Z: -1}, Center: &pb.Position{X: 1192, Y: 575, Z: 0}, Pupil: &pb.Position{X: 1189, Y: 580, Z: -8}, }, Right: Eye{ Left: &pb.Position{X: 1424, Y: 579, Z: -6}, Right: &pb.Position{X: 1536, Y: 581, Z: 15}, Top: &pb.Position{X: 1474, Y: 561, Z: -30}, Bottom: &pb.Position{X: 1481, Y: 590, Z: -11}, Center: &pb.Position{X: 1479, Y: 571, Z: -9}, Pupil: &pb.Position{X: 1478, Y: 580, Z: -18}, }, }, Ears: Ears{ Left: &pb.Position{X: 1027, Y: 696, Z: 361}, Right: &pb.Position{X: 1666, Y: 695, Z: 339}, }, Nose: Nose{ Left: &pb.Position{X: 1236, Y: 755, Z: -20}, Right: &pb.Position{X: 1432, Y: 750, Z: -26}, Top: &pb.Position{X: 1331, Y: 566, Z: -66}, Bottom: &pb.Position{X: 1332, Y: 783, Z: -70}, Tip: &pb.Position{X: 1329, Y: 743, Z: -137}, }, Mouth: Mouth{ Left: &pb.Position{X: 1186, Y: 867, Z: 27}, Center: &pb.Position{X: 1332, Y: 894, Z: -41}, Right: &pb.Position{X: 1484, Y: 857, Z: 19}, UpperLip: &pb.Position{X: 1330, Y: 836, Z: -66}, LowerLip: &pb.Position{X: 1334, Y: 954, Z: -36}, }, Chin: Chin{ Left: &pb.Position{X: 1055, Y: 882, Z: 257}, Center: &pb.Position{X: 1335, Y: 1058, Z: 6}, Right: &pb.Position{X: 1631, Y: 881, Z: 238}, }, Forehead: &pb.Position{X: 1332, Y: 514, Z: -75}, } got := FaceFromLandmarks(landmarks) msg, ok, err := pretty.Diff(want, got) if err != nil { t.Fatal(err) } if !ok { t.Error(msg) } } golang-google-cloud-0.9.0/vision/apiv1/image.go000066400000000000000000000022741312234511600213100ustar00rootroot00000000000000// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package vision import ( "io" "io/ioutil" pb "google.golang.org/genproto/googleapis/cloud/vision/v1" ) // NewImageFromReader reads the bytes of an image from r. func NewImageFromReader(r io.Reader) (*pb.Image, error) { bytes, err := ioutil.ReadAll(r) if err != nil { return nil, err } return &pb.Image{Content: bytes}, nil } // NewImageFromURI returns an image that refers to an object in Google Cloud Storage // (when the uri is of the form "gs://BUCKET/OBJECT") or at a public URL. func NewImageFromURI(uri string) *pb.Image { return &pb.Image{Source: &pb.ImageSource{ImageUri: uri}} } golang-google-cloud-0.9.0/vision/apiv1/image_annotator_client.go000066400000000000000000000107461312234511600247360ustar00rootroot00000000000000// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // AUTO-GENERATED CODE. DO NOT EDIT. package vision import ( "time" "cloud.google.com/go/internal/version" gax "github.com/googleapis/gax-go" "golang.org/x/net/context" "google.golang.org/api/option" "google.golang.org/api/transport" visionpb "google.golang.org/genproto/googleapis/cloud/vision/v1" "google.golang.org/grpc" "google.golang.org/grpc/codes" ) // ImageAnnotatorCallOptions contains the retry settings for each method of ImageAnnotatorClient. type ImageAnnotatorCallOptions struct { BatchAnnotateImages []gax.CallOption } func defaultImageAnnotatorClientOptions() []option.ClientOption { return []option.ClientOption{ option.WithEndpoint("vision.googleapis.com:443"), option.WithScopes(DefaultAuthScopes()...), } } func defaultImageAnnotatorCallOptions() *ImageAnnotatorCallOptions { retry := map[[2]string][]gax.CallOption{ {"default", "idempotent"}: { gax.WithRetry(func() gax.Retryer { return gax.OnCodes([]codes.Code{ codes.DeadlineExceeded, codes.Unavailable, }, gax.Backoff{ Initial: 100 * time.Millisecond, Max: 60000 * time.Millisecond, Multiplier: 1.3, }) }), }, } return &ImageAnnotatorCallOptions{ BatchAnnotateImages: retry[[2]string{"default", "idempotent"}], } } // ImageAnnotatorClient is a client for interacting with Google Cloud Vision API. type ImageAnnotatorClient struct { // The connection to the service. conn *grpc.ClientConn // The gRPC API client. imageAnnotatorClient visionpb.ImageAnnotatorClient // The call options for this service. CallOptions *ImageAnnotatorCallOptions // The metadata to be sent with each request. xGoogHeader []string } // NewImageAnnotatorClient creates a new image annotator client. // // Service that performs Google Cloud Vision API detection tasks over client // images, such as face, landmark, logo, label, and text detection. The // ImageAnnotator service returns detected entities from the images. func NewImageAnnotatorClient(ctx context.Context, opts ...option.ClientOption) (*ImageAnnotatorClient, error) { conn, err := transport.DialGRPC(ctx, append(defaultImageAnnotatorClientOptions(), opts...)...) if err != nil { return nil, err } c := &ImageAnnotatorClient{ conn: conn, CallOptions: defaultImageAnnotatorCallOptions(), imageAnnotatorClient: visionpb.NewImageAnnotatorClient(conn), } c.SetGoogleClientInfo() return c, nil } // Connection returns the client's connection to the API service. func (c *ImageAnnotatorClient) Connection() *grpc.ClientConn { return c.conn } // Close closes the connection to the API service. The user should invoke this when // the client is no longer required. func (c *ImageAnnotatorClient) Close() error { return c.conn.Close() } // SetGoogleClientInfo sets the name and version of the application in // the `x-goog-api-client` header passed on each request. Intended for // use by Google-written clients. func (c *ImageAnnotatorClient) SetGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", version.Go()}, keyval...) kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) c.xGoogHeader = []string{gax.XGoogHeader(kv...)} } // BatchAnnotateImages run image detection and annotation for a batch of images. func (c *ImageAnnotatorClient) BatchAnnotateImages(ctx context.Context, req *visionpb.BatchAnnotateImagesRequest, opts ...gax.CallOption) (*visionpb.BatchAnnotateImagesResponse, error) { ctx = insertXGoog(ctx, c.xGoogHeader) opts = append(c.CallOptions.BatchAnnotateImages[0:len(c.CallOptions.BatchAnnotateImages):len(c.CallOptions.BatchAnnotateImages)], opts...) var resp *visionpb.BatchAnnotateImagesResponse err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error resp, err = c.imageAnnotatorClient.BatchAnnotateImages(ctx, req, settings.GRPC...) return err }, opts...) if err != nil { return nil, err } return resp, nil } golang-google-cloud-0.9.0/vision/apiv1/image_annotator_client_example_test.go000066400000000000000000000025621312234511600275050ustar00rootroot00000000000000// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // AUTO-GENERATED CODE. DO NOT EDIT. package vision_test import ( "cloud.google.com/go/vision/apiv1" "golang.org/x/net/context" visionpb "google.golang.org/genproto/googleapis/cloud/vision/v1" ) func ExampleNewImageAnnotatorClient() { ctx := context.Background() c, err := vision.NewImageAnnotatorClient(ctx) if err != nil { // TODO: Handle error. } // TODO: Use client. _ = c } func ExampleImageAnnotatorClient_BatchAnnotateImages() { ctx := context.Background() c, err := vision.NewImageAnnotatorClient(ctx) if err != nil { // TODO: Handle error. } req := &visionpb.BatchAnnotateImagesRequest{ // TODO: Fill request struct fields. } resp, err := c.BatchAnnotateImages(ctx, req) if err != nil { // TODO: Handle error. } // TODO: Use resp. _ = resp } golang-google-cloud-0.9.0/vision/apiv1/mock_test.go000066400000000000000000000102071312234511600222110ustar00rootroot00000000000000// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // AUTO-GENERATED CODE. DO NOT EDIT. package vision import ( visionpb "google.golang.org/genproto/googleapis/cloud/vision/v1" ) import ( "flag" "fmt" "io" "log" "net" "os" "strings" "testing" "github.com/golang/protobuf/proto" "github.com/golang/protobuf/ptypes" "golang.org/x/net/context" "google.golang.org/api/option" status "google.golang.org/genproto/googleapis/rpc/status" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/metadata" gstatus "google.golang.org/grpc/status" ) var _ = io.EOF var _ = ptypes.MarshalAny var _ status.Status type mockImageAnnotatorServer struct { // Embed for forward compatibility. // Tests will keep working if more methods are added // in the future. visionpb.ImageAnnotatorServer reqs []proto.Message // If set, all calls return this error. err error // responses to return if err == nil resps []proto.Message } func (s *mockImageAnnotatorServer) BatchAnnotateImages(ctx context.Context, req *visionpb.BatchAnnotateImagesRequest) (*visionpb.BatchAnnotateImagesResponse, error) { md, _ := metadata.FromIncomingContext(ctx) if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) } s.reqs = append(s.reqs, req) if s.err != nil { return nil, s.err } return s.resps[0].(*visionpb.BatchAnnotateImagesResponse), nil } // clientOpt is the option tests should use to connect to the test server. // It is initialized by TestMain. var clientOpt option.ClientOption var ( mockImageAnnotator mockImageAnnotatorServer ) func TestMain(m *testing.M) { flag.Parse() serv := grpc.NewServer() visionpb.RegisterImageAnnotatorServer(serv, &mockImageAnnotator) lis, err := net.Listen("tcp", "localhost:0") if err != nil { log.Fatal(err) } go serv.Serve(lis) conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) if err != nil { log.Fatal(err) } clientOpt = option.WithGRPCConn(conn) os.Exit(m.Run()) } func TestImageAnnotatorBatchAnnotateImages(t *testing.T) { var expectedResponse *visionpb.BatchAnnotateImagesResponse = &visionpb.BatchAnnotateImagesResponse{} mockImageAnnotator.err = nil mockImageAnnotator.reqs = nil mockImageAnnotator.resps = append(mockImageAnnotator.resps[:0], expectedResponse) var requests []*visionpb.AnnotateImageRequest = nil var request = &visionpb.BatchAnnotateImagesRequest{ Requests: requests, } c, err := NewImageAnnotatorClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.BatchAnnotateImages(context.Background(), request) if err != nil { t.Fatal(err) } if want, got := request, mockImageAnnotator.reqs[0]; !proto.Equal(want, got) { t.Errorf("wrong request %q, want %q", got, want) } if want, got := expectedResponse, resp; !proto.Equal(want, got) { t.Errorf("wrong response %q, want %q)", got, want) } } func TestImageAnnotatorBatchAnnotateImagesError(t *testing.T) { errCode := codes.PermissionDenied mockImageAnnotator.err = gstatus.Error(errCode, "test error") var requests []*visionpb.AnnotateImageRequest = nil var request = &visionpb.BatchAnnotateImagesRequest{ Requests: requests, } c, err := NewImageAnnotatorClient(context.Background(), clientOpt) if err != nil { t.Fatal(err) } resp, err := c.BatchAnnotateImages(context.Background(), request) if st, ok := gstatus.FromError(err); !ok { t.Errorf("got error %v, expected grpc error", err) } else if c := st.Code(); c != errCode { t.Errorf("got error code %q, want %q", c, errCode) } _ = resp } golang-google-cloud-0.9.0/vision/doc.go000066400000000000000000000100741312234511600177500ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. /* Package vision provides a client for the Google Cloud Vision API. Google Cloud Vision allows easy integration of vision detection features into developer applications, including image labeling, face and landmark detection, optical character recognition (OCR), and tagging of explicit content. For more information about Cloud Vision, read the Google Cloud Vision API Documentation at https://cloud.google.com/vision/docs. Note: This package is in beta. Some backwards-incompatible changes may occur. Creating Images The Cloud Vision API supports a variety of image file formats, including JPEG, PNG8, PNG24, Animated GIF (first frame only), and RAW. See https://cloud.google.com/vision/docs/image-best-practices#image_types for the complete list of formats. Be aware that Cloud Vision sets upper limits on file size as well as on the total combined size of all images in a request. Reducing your file size can significantly improve throughput; however, be careful not to reduce image quality in the process. See https://cloud.google.com/vision/docs/image-best-practices#image_sizing for current file size limits. Creating an Image instance does not perform an API request. Use NewImageFromReader to obtain an image from any io.Reader, such as an open file: f, err := os.Open("path/to/image.jpg") if err != nil { ... } defer f.Close() img, err := vision.NewImageFromReader(f) if err != nil { ... } Use NewImageFromURI to refer to an image in Google Cloud Storage or a public URL: img := vision.NewImageFromURI("gs://my-bucket/my-image.png") Annotating Images Client.Annotate is the most general method in the package. It can run multiple detections on multiple images with a single API call. To describe the detections you want to perform on an image, create an AnnotateRequest and specify the maximum number of results to return for each detection of interest. The exceptions are safe search and image properties, where a boolean is used instead. resultSlice, err := client.Annotate(ctx, &vision.AnnotateRequest{ Image: img, MaxLogos: 5, MaxTexts: 100, SafeSearch: true, }) if err != nil { ... } You can pass as many AnnotateRequests as desired to client.Annotate. The return value is a slice of an Annotations. Each Annotations value may contain an Error along with one or more successful results. The failed detections will have a nil annotation. result := resultSlice[0] if result.Error != nil { ... } // some detections failed for _, logo := range result.Logos { ... } for _, text := range result.Texts { ... } if result.SafeSearch != nil { ... } Other methods on Client run a single detection on a single image. For instance, Client.DetectFaces will run face detection on the provided Image. These methods return a single annotation of the appropriate type (for example, DetectFaces returns a FaceAnnotation). The error return value incorporates both API call errors and the detection errors stored in Annotations.Error, simplifying your logic. faces, err := client.DetectFaces(ctx, 10) // maximum of 10 faces if err != nil { ... } Here faces is a slice of FaceAnnotations. The Face field of each FaceAnnotation provides easy access to the positions of facial features: fmt.Println(faces[0].Face.Nose.Tip) fmt.Println(faces[0].Face.Eyes.Left.Pupil) Authentication See examples of authorization and authentication at https://godoc.org/cloud.google.com/go#pkg-examples. */ package vision // import "cloud.google.com/go/vision" golang-google-cloud-0.9.0/vision/examples_test.go000066400000000000000000000045331312234511600220630ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package vision_test import ( "fmt" "os" "cloud.google.com/go/vision" "golang.org/x/net/context" ) func ExampleNewClient() { ctx := context.Background() client, err := vision.NewClient(ctx) if err != nil { // TODO: handle error. } // Use the client. // Close the client when finished. if err := client.Close(); err != nil { // TODO: handle error. } } func Example_NewImageFromReader() { f, err := os.Open("path/to/image.jpg") if err != nil { // TODO: handle error. } img, err := vision.NewImageFromReader(f) if err != nil { // TODO: handle error. } fmt.Println(img) } func Example_NewImageFromURI() { img := vision.NewImageFromURI("gs://my-bucket/my-image.png") fmt.Println(img) } func ExampleClient_Annotate_oneImage() { ctx := context.Background() client, err := vision.NewClient(ctx) if err != nil { // TODO: handle error. } annsSlice, err := client.Annotate(ctx, &vision.AnnotateRequest{ Image: vision.NewImageFromURI("gs://my-bucket/my-image.png"), MaxLogos: 100, MaxTexts: 100, SafeSearch: true, }) if err != nil { // TODO: handle error. } anns := annsSlice[0] if anns.Logos != nil { fmt.Println(anns.Logos) } if anns.Texts != nil { fmt.Println(anns.Texts) } if anns.SafeSearch != nil { fmt.Println(anns.SafeSearch) } if anns.Error != nil { fmt.Printf("at least one of the features failed: %v", anns.Error) } } func ExampleClient_DetectFaces() { ctx := context.Background() client, err := vision.NewClient(ctx) if err != nil { // TODO: handle error. } img := vision.NewImageFromURI("gs://my-bucket/my-image.png") faces, err := client.DetectFaces(ctx, img, 10) if err != nil { // TODO: handle error. } fmt.Println(faces[0].Face.Nose.Tip) fmt.Println(faces[0].Face.Eyes.Left.Pupil) } golang-google-cloud-0.9.0/vision/face.go000066400000000000000000000122171312234511600201020ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package vision import ( "log" "github.com/golang/geo/r3" pb "google.golang.org/genproto/googleapis/cloud/vision/v1" ) // FaceLandmarks contains the positions of facial features detected by the service. // TODO(jba): write doc for all type FaceLandmarks struct { Eyebrows Eyebrows Eyes Eyes Ears Ears Nose Nose Mouth Mouth Chin Chin Forehead *r3.Vector } type Eyebrows struct { Left, Right Eyebrow } type Eyebrow struct { Top, Left, Right *r3.Vector } type Eyes struct { Left, Right Eye } type Eye struct { Left, Right, Top, Bottom, Center, Pupil *r3.Vector } type Ears struct { Left, Right *r3.Vector } type Nose struct { Left, Right, Top, Bottom, Tip *r3.Vector } type Mouth struct { Left, Center, Right, UpperLip, LowerLip *r3.Vector } type Chin struct { Left, Center, Right *r3.Vector } // FaceLikelihoods expresses the likelihood of various aspects of a face. type FaceLikelihoods struct { // Joy is the likelihood that the face expresses joy. Joy Likelihood // Sorrow is the likelihood that the face expresses sorrow. Sorrow Likelihood // Anger is the likelihood that the face expresses anger. Anger Likelihood // Surprise is the likelihood that the face expresses surprise. Surprise Likelihood // UnderExposed is the likelihood that the face is under-exposed. UnderExposed Likelihood // Blurred is the likelihood that the face is blurred. Blurred Likelihood // Headwear is the likelihood that the face has headwear. Headwear Likelihood } func populateFaceLandmarks(landmarks []*pb.FaceAnnotation_Landmark, face *FaceLandmarks) { for _, lm := range landmarks { pos := &r3.Vector{ X: float64(lm.Position.X), Y: float64(lm.Position.Y), Z: float64(lm.Position.Z), } switch lm.Type { case pb.FaceAnnotation_Landmark_LEFT_OF_LEFT_EYEBROW: face.Eyebrows.Left.Left = pos case pb.FaceAnnotation_Landmark_RIGHT_OF_LEFT_EYEBROW: face.Eyebrows.Left.Right = pos case pb.FaceAnnotation_Landmark_LEFT_OF_RIGHT_EYEBROW: face.Eyebrows.Right.Left = pos case pb.FaceAnnotation_Landmark_RIGHT_OF_RIGHT_EYEBROW: face.Eyebrows.Right.Right = pos case pb.FaceAnnotation_Landmark_LEFT_EYEBROW_UPPER_MIDPOINT: face.Eyebrows.Left.Top = pos case pb.FaceAnnotation_Landmark_RIGHT_EYEBROW_UPPER_MIDPOINT: face.Eyebrows.Right.Top = pos case pb.FaceAnnotation_Landmark_MIDPOINT_BETWEEN_EYES: face.Nose.Top = pos case pb.FaceAnnotation_Landmark_NOSE_TIP: face.Nose.Tip = pos case pb.FaceAnnotation_Landmark_UPPER_LIP: face.Mouth.UpperLip = pos case pb.FaceAnnotation_Landmark_LOWER_LIP: face.Mouth.LowerLip = pos case pb.FaceAnnotation_Landmark_MOUTH_LEFT: face.Mouth.Left = pos case pb.FaceAnnotation_Landmark_MOUTH_RIGHT: face.Mouth.Right = pos case pb.FaceAnnotation_Landmark_MOUTH_CENTER: face.Mouth.Center = pos case pb.FaceAnnotation_Landmark_NOSE_BOTTOM_RIGHT: face.Nose.Right = pos case pb.FaceAnnotation_Landmark_NOSE_BOTTOM_LEFT: face.Nose.Left = pos case pb.FaceAnnotation_Landmark_NOSE_BOTTOM_CENTER: face.Nose.Bottom = pos case pb.FaceAnnotation_Landmark_LEFT_EYE: face.Eyes.Left.Center = pos case pb.FaceAnnotation_Landmark_RIGHT_EYE: face.Eyes.Right.Center = pos case pb.FaceAnnotation_Landmark_LEFT_EYE_TOP_BOUNDARY: face.Eyes.Left.Top = pos case pb.FaceAnnotation_Landmark_LEFT_EYE_RIGHT_CORNER: face.Eyes.Left.Right = pos case pb.FaceAnnotation_Landmark_LEFT_EYE_BOTTOM_BOUNDARY: face.Eyes.Left.Bottom = pos case pb.FaceAnnotation_Landmark_LEFT_EYE_LEFT_CORNER: face.Eyes.Left.Left = pos case pb.FaceAnnotation_Landmark_RIGHT_EYE_TOP_BOUNDARY: face.Eyes.Right.Top = pos case pb.FaceAnnotation_Landmark_RIGHT_EYE_RIGHT_CORNER: face.Eyes.Right.Right = pos case pb.FaceAnnotation_Landmark_RIGHT_EYE_BOTTOM_BOUNDARY: face.Eyes.Right.Bottom = pos case pb.FaceAnnotation_Landmark_RIGHT_EYE_LEFT_CORNER: face.Eyes.Right.Left = pos case pb.FaceAnnotation_Landmark_LEFT_EYE_PUPIL: face.Eyes.Left.Pupil = pos case pb.FaceAnnotation_Landmark_RIGHT_EYE_PUPIL: face.Eyes.Right.Pupil = pos case pb.FaceAnnotation_Landmark_LEFT_EAR_TRAGION: face.Ears.Left = pos case pb.FaceAnnotation_Landmark_RIGHT_EAR_TRAGION: face.Ears.Right = pos case pb.FaceAnnotation_Landmark_FOREHEAD_GLABELLA: face.Forehead = pos case pb.FaceAnnotation_Landmark_CHIN_GNATHION: face.Chin.Center = pos case pb.FaceAnnotation_Landmark_CHIN_LEFT_GONION: face.Chin.Left = pos case pb.FaceAnnotation_Landmark_CHIN_RIGHT_GONION: face.Chin.Right = pos default: log.Printf("vision: ignoring unknown face annotation landmark %s", lm.Type) } } } golang-google-cloud-0.9.0/vision/geometry.go000066400000000000000000000017571312234511600210460ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package vision import ( "image" pb "google.golang.org/genproto/googleapis/cloud/vision/v1" ) func pointFromProto(v *pb.Vertex) image.Point { return image.Point{X: int(v.X), Y: int(v.Y)} } func boundingPolyFromProto(b *pb.BoundingPoly) []image.Point { if b == nil { return nil } var ps []image.Point for _, v := range b.Vertices { ps = append(ps, pointFromProto(v)) } return ps } golang-google-cloud-0.9.0/vision/image.go000066400000000000000000000062031312234511600202640ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package vision import ( "io" "io/ioutil" pb "google.golang.org/genproto/googleapis/cloud/vision/v1" ) // An Image represents the contents of an image to run detection algorithms on, // along with metadata. Images may be described by their raw bytes, or by a // reference to a a Google Cloude Storage (GCS) object. type Image struct { // Exactly one of content and gcsURI will be non-zero. content []byte // raw image bytes uri string // URI of the form "gs://BUCKET/OBJECT", or public URL // Rect is a rectangle on the Earth's surface represented by the // image. It is optional. Rect *LatLngRect // LanguageHints is a list of languages to use for text detection. In most // cases, leaving this field nil yields the best results since it enables // automatic language detection. For languages based on the Latin alphabet, // setting LanguageHints is not needed. In rare cases, when the language of // the text in the image is known, setting a hint will help get better // results (although it will be a significant hindrance if the hint is // wrong). Text detection returns an error if one or more of the specified // languages is not one of the supported languages (See // https://cloud.google.com/translate/v2/translate-reference#supported_languages). LanguageHints []string } // NewImageFromReader reads the bytes of an image from rc, then closes rc. // // You may optionally set Rect and LanguageHints on the returned Image before // using it. func NewImageFromReader(r io.ReadCloser) (*Image, error) { bytes, err := ioutil.ReadAll(r) if err != nil { return nil, err } if err := r.Close(); err != nil { return nil, err } return &Image{content: bytes}, nil } // NewImageFromURI returns an image that refers to an object in Google Cloud Storage // (when the uri is of the form "gs://BUCKET/OBJECT") or at a public URL. // // You may optionally set Rect and LanguageHints on the returned Image before // using it. func NewImageFromURI(uri string) *Image { return &Image{uri: uri} } // toProtos converts the Image to the two underlying API protos it represents, // pb.Image and pb.ImageContext. func (img *Image) toProtos() (*pb.Image, *pb.ImageContext) { var pimg *pb.Image switch { case img.content != nil: pimg = &pb.Image{Content: img.content} case img.uri != "": pimg = &pb.Image{Source: &pb.ImageSource{ImageUri: img.uri}} } var pctx *pb.ImageContext if img.Rect != nil || len(img.LanguageHints) > 0 { pctx = &pb.ImageContext{ LatLongRect: img.Rect.toProto(), LanguageHints: img.LanguageHints, } } return pimg, pctx } golang-google-cloud-0.9.0/vision/image_test.go000066400000000000000000000023531312234511600213250ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package vision import ( "reflect" "testing" pb "google.golang.org/genproto/googleapis/cloud/vision/v1" ) func TestImageToProtos(t *testing.T) { const url = "https://www.example.com/test.jpg" langHints := []string{"en", "fr"} img := NewImageFromURI("https://www.example.com/test.jpg") img.LanguageHints = langHints goti, gotc := img.toProtos() wanti := &pb.Image{Source: &pb.ImageSource{ImageUri: url}} if !reflect.DeepEqual(goti, wanti) { t.Errorf("got %+v, want %+v", goti, wanti) } wantc := &pb.ImageContext{ LanguageHints: langHints, } if !reflect.DeepEqual(gotc, wantc) { t.Errorf("got %+v, want %+v", gotc, wantc) } } golang-google-cloud-0.9.0/vision/latlng.go000066400000000000000000000031141312234511600204610ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package vision import ( pb "google.golang.org/genproto/googleapis/cloud/vision/v1" llpb "google.golang.org/genproto/googleapis/type/latlng" ) // A LatLng is a point on the Earth's surface, represented with a latitude and longitude. type LatLng struct { // Lat is the latitude in degrees. It must be in the range [-90.0, +90.0]. Lat float64 // Lng is the longitude in degrees. It must be in the range [-180.0, +180.0]. Lng float64 } func (l LatLng) toProto() *llpb.LatLng { return &llpb.LatLng{ Latitude: l.Lat, Longitude: l.Lng, } } func latLngFromProto(ll *llpb.LatLng) LatLng { return LatLng{ Lat: ll.Latitude, Lng: ll.Longitude, } } // A LatLngRect is a rectangular area on the Earth's surface, represented by a // minimum and maximum latitude and longitude. type LatLngRect struct { Min, Max LatLng } func (r *LatLngRect) toProto() *pb.LatLongRect { if r == nil { return nil } return &pb.LatLongRect{ MinLatLng: r.Min.toProto(), MaxLatLng: r.Max.toProto(), } } golang-google-cloud-0.9.0/vision/testdata/000077500000000000000000000000001312234511600204635ustar00rootroot00000000000000golang-google-cloud-0.9.0/vision/testdata/README.md000066400000000000000000000006231312234511600217430ustar00rootroot00000000000000The following files were copied from https://github.com/GoogleCloudPlatform/cloud-vision/tree/master/data: cat.jpg face.jpg faulkner.jpg mountain.jpg no-text.jpg eiffel-tower.jpg is from https://commons.wikimedia.org/wiki/File:Tour_Eiffel_Wikimedia_Commons_(cropped).jpg. google.png is from the Google home page: https://www.google.com/images/branding/googlelogo/1x/googlelogo_color_272x92dp.png. golang-google-cloud-0.9.0/vision/testdata/cat.jpg000066400000000000000000003574531312234511600217550ustar00rootroot00000000000000      M" A!1AQ"aq2BR#br3C$S0!1AQa"q2#3S ?8JvX~_a>8,AE]'fru*̥8^G}y:?¯ߤ| ?E^>qQHdo$UK{! SNв9bsA eNI'e9~F \Ghi;`DĂduByu/+$ )U}MG?OF{ϗm6ag9Ys+S丑Fts}@6g<~zO8qپcHiz۸Uo=Y _5f<{|!N^T x9Wt"xZG9UH#Éo-x!h8:{nn-3 D?eI㻖 qS$I>g7fg*Gqn&HSxʨmCqcO qV#碨L-9Tyi n17>R*[NJj۪[Y'Zqpܙ)Ueʺu.k@1RiD&:E.hٗ`='&H/M9 Lap8ĩu \{@SS8IH#}P9-<. 50Vuz7(F v [Ls7emA -2g済Ns(?.JOn@(sG=n\ҮsDI0rNpx?e4T%/ȯom9#WXؙ'${%iy!03YW%Wx q=ʤApi?%r|{Ħ:09QFZZC/Ý(~wJ\'W48_x3]Eac>V}仪)7Eژ֏G8?0O⾱ o{1\/ p V/glWZ,•˔!FKqPBBjBJq~-b휶m\zĭy7gHUUVܢ T*Zc5w rkSXH\rI4^D`\,U .eT'n taigYJMY굔3?TmgtN1S* *hh\ƴ$W=YXib|vR/vHiJل8 VwFS&;[ЃkgY#J2ᥦ{@hRcFe9+¢-3s (z~ Bu_7V0 ȍ8Km#<'Sੴ D>{/N+L8S+4Dz :9ISߢxS8c^m*"'OOPЏ2Creݐ.ER; CR@A C j^T4,6Q r*a4H ʗ4*cSGz(N4#k9S! .4vSf4Ȃ ,P#עmJ 6>}0KzwQɫP\LG[4 PSfaS}"}5)SlH'-bתSiLǠcq4 I*|I^|pbf%WuWWta{J=.Bv+OZ{5j/Y_eB֗V@^M+Siͩ"’5B#$!p m]mͫ%5j G-77jdEJ@ܳWqU{V\%<-%r%ntE͂7U+{|nR*Sk.d_$R0xLce,xVe)s40B=JHW%<.AoNk ?\ y>J桃1Yg}8㏾QY8F>};T)l~ .>U :2~;P;&'98IZ˞gW4R@<NcZp#3%+zBwXƼX8#]f}WTmNV;kM+tmC'ԯU->YdciyCMkEŸ;33.?7Z n:Y&9ZK9g9=~ʫVcXq,dyIZ"D04cۢxeV?j2DʒtA5َgQwVj`w)_>~R:ۙ=l~p0}TUF{y>ju)Jt 07~YRf~yK0jHsYloé Z!6F3lop\H.zv}:P4IH=JX XB2G3PrA(:Q&9L0*=(ZJ!0 ,ZT8R(NtSuB.kOE5G i e)c\u\aCJ]Ʌ&EA8TQ=S1MD J`"i4OIPETH4owt~\8YlpQ e G-JF;d!aU-(IAx]N9(e6N1ͥ$$:P(5'!\@u3BP')s.@wh#pS+-r3ߺkCp)!.fOweP y1>I(-\H"TJ*x5q Jm3w&=r0YLKT,Ho>"LTk:de[NӝQiL4BT}:>V7K {m"˅KB=׾m|8JF=J<##^_f.$/ \A .[d*vOW͂]J* 2mM!D&mW-޵rٳrµr״r=%hD`^URU799,hBF,N nV W=}.[EXycM119J]k*()]S[LbTb) S+jEwk6g =v£Z2:snI _Dg~cQn~2^NmMdϿyۚre/Of&2Ok4ˬ >X҅<љǯ-;vZ6 ~L hYUqih#cS9'Z5o9i1=W_/'?(DgGixW֫kjdCE;*c/HZO!akV*QMը^էwkܳ=D1xʬcsLho4D9 MK8T)} WAĈtTk`~@@ԶKxRq QB]T)gDreT s3*OJX5w=\>}ELADDQ!pvc {}TT)]  |A%5舭tJK1?6A02p@m9( %DN%O '@꟰@~nt8Q\HZ^pOB[R.~;N 294!A(؆ =V(F[(w"g_T8*:BRQhCS  sTtM>(PvvTlIV 3꤁8R|<*IO'e3>XS% ptw%K qj0aA^% Wi*h!.@ ӯ)2uP@2N#} tVCA&9*G%{=Byr$%el6FI8I)uu6+cJ *O.9JյҤiJF< i#%VJр6|AMe@Ӌ-#GKx\&--y zxnQ9)qF΅W+^|I/K;?ʕ'-tf:v2j)O.O'=X. dã܈ThShhef^ ?'==A꫻A3ݱ;s <UͫG9U''?zYxm_Eqsg#oa+}cŨZkQjF%$}Vp=?esi;d"Cc}BޟEX3Qד+-%dP O?$ǹHWKoe8Xz ,4gT_=JS *%U)ӧ!șS ]pOOzEJ:}JTkIp>t >Q 9OHHHw~`0.y8Pס$#[I5YpesF>In`Mi%nS!ޞԹKS5 =U*߲=MjSS.LnMuL,-)kviESsNW9ZCQ} '4BXiDqbxPDL.i r= rT hS?xV섨OTN09\Ba@%%b)rS[S(TJT0!$IW(2)LBe^hK!6 EDN( &5u6+$QsHDJΧ4iS ZMdhI >h6@eMF.K=1k՘m@{A5ĜcwׄhFTs'T/uwdI>KOҜit}ײ67Ν: mEՏnmwh{]& h`5Wq F j[Kr.4(z1͝p _2 ^!swwok𽕛/vc{2-rVR4"Hc] D"дzٶrrشz޲vVrݴr"lj)ҫQ)맚ª EvUj5tFdȡ A?4ޢF㭏GN^Uv[-JYAV+2zՉ1]se-)agMQlYӉ[ڍBɡ@EV aiRM.kl1$'>E.-+.֨hi{@UKn_Yt*+Nd&#=+&2=}YT5ʔFH8nH겮 EY$=z+[1.DV&8MuGSMhyj 3<=:Ҙ'3t;g spky\^9 : A_@Io c-R~CM Q 9p:s 5cpG_X&2OXDz{NUZcG?ou @'ӯ\,0#;9̭F>~HeI^LZnq d>LsTZ7LKg#JuBf^z'$+7T˜;n ?g0:OXG첵4/ yE_Q}NIGLMH*-mU9^JSe&2ݖn]UTK^݋.-:n^OuX D9Z~= Ѧ)91/ejU<7Z,W[pYϧ|5sU_$ -l\=P\fԑBB8P T' V׵rƢKG,hdm\^*ܢU*4 L:P «T.ƪ9BcFjŠow?]e ٔ͋Ui7/T>Ѹ5n hUtRp+4+~ꘂnff%98)̺i]>y OEuCyI m@dII$IPa$F?*78VpӦUAC@gZNlbe47Aq9ꂦO'oei|rs 0:qX *hc5HJ}=WL1R1UHSU L~@?e󎉟4@K~#SM&8]x }iDR$NԉEPrh qv~H B—xRHLS};#kW9'5\Va9KG+] j˚ҁ((S@-?e3PD,$ASޅEtz\O4 ~&Hg<Ϣ h ?LeK)JzQw 88\ lʀb4r?6$лwD!)kA+A ^̩G/?e&5aP/ڛp.򽆏 [1ĉ:{+Y4}M6@=`e5]2O\lagݮ_8()DGNkP2cԫ-Nv֏1mg^ wJ=eslg [R寮4OT+=/8?d%!+ ZhTh=_EFt96Uv`_P lO e}o G-]³g8Z*XABPQ!8 $#*JCUG**ͣSנrٳzr״rĵ譏 BYVi+Nk.Ul*ںc*Y?[—)4Լ%lWj`U71jJʸ|GSH:a_pijwnvӅzlV~-wJ%g$RnWFYcՌjaemzPc;*՚GUjzFpAPH|}.hTwth28??Ӫ$XoRncQW2ϲҢ"%fѤfbx-g?,^YyzҤK*7yȈfa צH%{j HsfGZWwq<]fmoUsbm מpvsGdϯ]<^j &@}>77O'1촯z'T.Dm1ItB1GÎ,<t,dʬ@c\GMO!Vvd~T(ܫAE`ԕ\9#ɪ/8e&BM & s\J\G`XF tB禒CO#ߌ\?Nj#PS&:er18Im1,.m*7ۄ;)APZՍ49RuܘB^$CQ1ދ!-Zb %(kц CJI)0ģ AM")MpPJ VШQcER&Bjj8bjiX; =:YzCP\Gӟ.(\g*WuR}4sKϢ ޟ4WuQzAT'O!S]Fs3Ujg;OZUmR'1s*¶%;V+_Bk8YO\`Ә\v껝+4̈rt}GP#j[sV2#<)Ll!Rw@T;R(H>3`P% & ʃ22gmbD(`9\qꧭ'4JK\4SBiĢh-JGɻ.5zSK\D.r:neBQSj'#-M32PrI*1P# PRֈRr&-SDR'A-z()FIQVULz)е!@(w 0ChB5't'?t-,-^wtTꢤ"MK[I츩O۔ K XEwPDž^2;w)~՝1%(ﲀZB&R#2cI9D"E&>~SJ 㿢ȥ^ºЧ3;JC7]JfwRc$ PqܨЍ?F~kZ*xn{/;qucMRɶZSX<'Sqt7J/OL,=*úMdW?W(ROjH*5yt"`FZX5!j .WfQƓTS(Eiu<~|× :e}B6pΜ,/.\N +Pr`+.)S֕ͫիT7l-ܼZbEt窵ZBUkJW>MR.Pj`ee J[RR]6eGei2ʨ̭;(솸\2BέK%eJ( j-g?Q8Yڃ1# B9Yڙa_7jq$)hi>ZqrJ %GP:%XcO rDGvȟA' z$jG)˜EV>.i_*#V^eDV .[֮sڍlOdr;[nO^Q[3uC DNe\̻E=?^rNH;GBG1\3N~j`s#c=}VejFyO_ex7`qe۬ڞb9T]{}VѢGϯ1Vۧ@w)՚ O0GIU>=ǨOG={%%o!H**#zTϦeB糏2B#) k[{>@s#8)xRяdes'D셭&}}BT}%Ta x쉎bЮN8CPr캝B0p2\E5y%Q;D.MC+B)wPCҚiAOɦ k2 4E(wRw)#,R\!+nJsE#jPd,o3€0'O-IP+ʒGE% i H ǪQ1S@rrRT `\]P=6>'S;p4CAv3&bsa@C1)it!k즃iqfS+0  a[OIV]L }U [i֜Z-sQ,E*Ǣoh$wZtpW]kTciB.L$WScAV393Gq^(H]l "C09[E=SK/_gNY:=Ew Uy oj~+9 )5+==enUZR)Zm*%9Ueghpz ^Hz Eg>W ^}#E.QL#r (-ABBɔڅbZXLt-{:l6םr۲~ThmWYWUg5m*ͧVV]맮F.`GT!cTh0vCU[K^Cϓ~*5jN|SvT+0Ĺ fUrQ|O{]'Hy@z+0Y3RfBW.m*bW:pPeS̕:u@]*:=A!s iG*WfUn3)EȪ=r&9=>jFL)g6?En=՛phR\sKG]ƿQ0'aD2Iy}1FTڬ9'%[zIw3RkN`)B1$DYy0lh{B$fg외t[qy{}kY[@Z b]/@䷅U\MsPB-T5+BLcR4t¢9ijAI}W÷k4w^[%iz@%fֶ*#^ZN@viN=Tj\媅KDp9#q־ux@h챫z1&aQoK%6XtsVj$g?dkVMxs`yHG\GUIpWV=zC~8|+g-hsj L۪zלIǹXėyu]S7tב}=wdI(s1b@YUFIHkqǙzTF?-m@"+#S`@3趍F}I8A0Ukȃ._J}<ϧ4OU TW-ATg䨩PPؿPZR̨ 5=Or*hMQ&iIUHS^=(R';"DE. RrsD*cjrΎQN-(FeJSM1@JhA'DSgD(n w聸}>T ?YPs\?k~!M*`MGBH,(KqZϿѥCTj6UR}QM@`#eHiL-(>9"(hQUBr k,Na.c%Ml|EBP-M/-%̦Ƹ;j6L/~p F U1B0tTRւ?E?BcJ ,[\h%әN ~hߏD*rasZ9H&:u63ק?EZ֦8nNT >5ur|0w8V$KSSh^ga`i4/!gѨ;G[Ӭ2U[KܞW!GW>ob#+Ѷux j(Fhaq{;JYO-K,+\LtʵLh(`a\BW >qtçմwk,  P! a PKN jOS(;)hQU+jʢ۵z.[vYSzw, 'g"&iĊ%ZkV2-O ޽rkʔW4&JLw IpsRjU15_.5:Cp m2!\UVs8Fe_s8QRU-|S[)w=v[GB:{D,zd2p=VZ(dͅ^u~*= \Bc-CT\vI?$u<*tCVCUJ]!΃UMz3E/0{5 蚭Qj [Y9Ӻéjz=Ǣg9c\H߆u6"uFdEqLdF>g}K˺LcCFSH"D%R{$-*G3㢨"8<3 f.&dv RF!.(+GIT$|8tKcߒ7-0h gK[9=z𦡟nm)AӺ2g R'M(88ꦫ)t? 7.c 'UێyFT=v2H#$ 8BNr?zM-턧zzb|`~˜yt5BJG}Tj/cJTSQm-lsaH,[`s%|ԠN'`]>J˦~j=}I7U ;T(KD{qBm?U;T!&W>˝J11)HE>=>U=z a8P-l{eS s\L&V#9-%CJD](F!(~Z?u>4$S A-c7칆@AxLcDrPf2ADDzPG=;$JonIm~ݗ(:%rG 7VC@1*eu6c*nSCI#r2`T ќrx`sUґt#I^WG.꾿" /?Y^9ק4jZطJs9Zr*D#腮xG ռQK%8$ppҚгArd(ڠQVi]9EJ "”ѫA ~|N/xrm\pݛzHLBBBq%!!05zv[9n=E8޴r۶w ڹm?lrśnL5*5$9^ZUU5RQ u5VPYV Sֻ hj]aoAV<},9KZR蕧Q+ܫlFU .zX:QY hJZxV,d; z!S#~5M2*;)URX}8+ʙ# UF<.?{@U* ഀ j xNhf~f<]zygB}g ۼ-@}ouę8.nA ~|~K)qzw펾j\t$PƃauɪA.}לϢθ I~ZeMIf}dW4Lvjq0$T3ϷqVL{p&%>;tĤ`dM B]10s?$xPQ>iuT.o 1DA>#r]JSLwq PD{ nwDuE*MP4cvqUjM94&SqTS(;T).k@:xPԩdDƐ U#@h_BZLO搪)Lt9V JD4"! # ;>[n8@|pz"uL -Hx9`D Aʇĵ 4*n#0PuD\IK q=0!Dzo•*Kx17PtZHZ}7s24a#) 9]2vSAh+B?鶤곥Mzro @ $vU݇tM^!M xWksϿ.WL7ԭ %ܪ̶'t$}S[d ^~"%}+Go O_rU[pv_tk۽*i" !t#i \iKxZjkT"jU!p(!52K SIa.j;Gy~o+hVϧ|7q6V@_$ %n=9:z [b$"PBe(HDTB%Е,M"l,ڽl9yW Z4߅EFL!8w8p]NRԊ4H$u$+j^^nWѵfXp?U8jGEWnVj˯Ek:RܫhUk6 г@8Yܴm=!3-2(ؙe:,NSKOE#~YZ^:ͨ2Rg1Z'GNVKѕVekϪѱ-ЖRta["sx8{_0 Aхi1zj׺g2=?Ƹ޼w}?uCqb/0H<{<V먈-=GVep^zO춝6ߏ0#&d|T0 IoZ=?_VF=}E]U7HP$O_]>U F}շ4;gYcXR^=h}{?$'OEZZ6q~,y\gFsOF̟o q\`EDw0}:! !k`I rq)@ti%)M(T f{ h1襭S#ts)+*>hZ4cOTi쀄ENxTFʐk Xm h48FSn|!EZZfd}  xh":{1G(SZZXx.o :ƺ8H+2\s*vaq;ZfbTةT'P`Ru~"6񓒀gbL=$a}X#^'סаl/Cb^1h1³]o3}Mbb58`)ڈG rSڬnUŊZZ*sON=*B/U!]sr6BZ$civ pϔ&Tt7*I.WEX 0iTGE,N~x8`꼕 ׬vz"" NOj'ޮ2P̅^ϲu13\f?DhAF;uBƅ+^Lda$OwY*ʠ2{cų2T77ɔƨ) lB 3(GAYѠshZDND=rz%>Uo*6d:Z=I%g2GE9c<:nyOhx]\펈įh3#et$~U80sEJ3qx_Uuƀ sP>o^H91b@3<>ȩo1BQAt'J}y{רDs|=~ħ# d^עJD)`E;eW#C ~L)u*SqYKTB=>9ϸXHUϩP@AfPN{`}M"V@*v~SgLgY?s) 8HU1>U"D;)[}h m? BTA8/NDs-Y:SDQ8FzMn$,IҡRWmʰ"HE:t3=p英.U5XP7ПtgN#)*X9N?E쑎~+7 5zN?);}R.QMW?u%& }S)u)UH; D&t:pzqVmO@NLXMp@@+4@깢##ZzM 2@AR v?u*=uُKThUB*OK#@_4C}iQ|1tRH_OƝasy:9鴻X[iм?'[^<+4HZѻhEaYnxYt^~\`^U T.jmfZUVQ @BAq B4ҭR*UiCcKF_8_*ߕp>^^9z0#@ #r D% Uo[Ur۱Ľ ;g+H߈VԗO5M!S*5Q̅^xX֧5 L"U`Uԕ5QVgMR 4X%s>,GSOOZm=Ϭ1=?%tϪ3"0'J;_Y43c)̏ԓJ <ۧ4i dg ~-A'88IXsHax‚s#MwʭUcT@ ?/D}sS 07TOH9=rF{BEP otN82> 0F:w u+i3]YAW:?RE:s7 =; D3K2 x'k_(^qTu\#S@ 0!]NЗ}дJk@\vA`ghR{vORnn=?(Ȁ~#ϧ :чG22̑uƏ\U÷rCB!o ジ@GF_'YB7 b>aM7-KIL`DLpQJ8D9u$qGʐ2:q:v@Х<pچz~d ͵8grMe"=g*ͳ?Z 0De9稞iDg0g 07[ #;}GIꛫ^hpisY1tMBAɫPɘV[+ogя荖ʢ  *f[:UӏE7ᛎ>Ku\/xj돒[C ZrBeA TcaQc > j^Rmh \$=ӂM@yaT+5hN[rAuT4Jcڮ b6JWjF(e1 L[kMfe2Si bL&9)0VhXTTq`<6Rꕟ^*UJU prvEZkUzJ8^ mN&GWd* rE룇?s^[VX.h}O^¾ei]2Dtys% 촓ČR0'&VGk]<ןsLu$DGڻ1GV䴉<ǁ]]QT_N A#1Ar>HG}Sg3s\d}T 4G3ʰfD  NϢ Ѓr`T&=Un TNG?(B!|6DQB N ze1Tx8S<-Ǫ7ˎ4<;q=7"!"M!= FP9P- '!TJhb(4-j-j.PQBu\"] -,U+_S Y{V*Vb'WjM5*[`U:e]c ̫TN"p,UaʷMZo 2Tbe2`Hh=PV D(tkZH S6Vr&mF@NfEQ5n+.jx/AX^%qvs\uǹ37 4w*BmәDGWO*1ˌnj`QZAȪ T"sD29Sa!(j6:Jr>q '3d(2 81%%zB)Ԉ;Qa܈meCPU?~ kH.4ֈfN*⌡{JjaDAN G+B(젶H0>Ę$ 4tMI=-JC'8=U9vJp{GZr3У 4, #W,(w*?eJ V)Cdڐ -A ]]&eg5.Q LjiB_tHYw+E+^Nt/QmB!x>o'o8Fv$.;]r"{MEQJfH-G,fKW)E]Hr1/nX7l]2cU:%=8D.IPe rX&1(&0 t\4²Wre^@}OW/_R +}Y +F L,A(IR% \ r.L¡PZڢӥ_ Eq(LnMuE2L-R4w ZW26^w3*jp^jѕ.-IbKjYnSZ" W1T#*V*t\(p\OPФk5TyUk58rEu'JZC Mb@&@}4}U*5@$BUl}fZ}ݲZu|Yt .XDyfpOaeRV !.isckPFT~r0;KDEUCIB::uʪ$+bHTPS09?EEp3#qa\X* s2 GM' 9TCΪ*esNrS> wN2TUAɠUXA'UaP@=f>OT摁~AVq}.T;s埔c.~# e &~H_cF!ߺˊf$qR $ ||/TxĠpoRy\j##PV(f1+KM/Ae7Q)Q!sZV nKè?+J/mE \ JaeA*J B2DPD& ܮT\W0aIjvARІ 50 xZD(U+R,ZuQ+ `5:YEF:QYBYnVV,WaZ+M XB*a e%{U;r)]Y*KujZgbЁR"!5@eGL+gY49ocx[H+Z"%zW><XW+2utaCLr^!cfUUf.1C5U}Rܤgʮ(RkS([McX2! BmgW 9T/u¬?$AVxTNҸsqu)R{ W)Ӏ36wQ#)_H(AJ (۲)TI솽WdӂAc5({DoD&%KUƈ3TnɂiU2=SKR~H!4 u@آ.1HϢ RҨ.CqID ̫wCLk?%Ӆn})S*KBh}V`OuVSvlaU7'SA}TI~;s"f>jY`t йzEoDUm.lrOUzژ2XmVH2rK%>O #ci%R$/SĐꯗ𞕀WҴ8/;/SBmvpB;E[]yk~zuT:mznMcPJp.UU6kjVXvo[/ګ*B;rP,DpPйW. j c%ZZfrҝL"IaL4ge{d/ׯS\p1u} ʤ ]pV.^VTP1( 2.D@A@JP C+PJp"V;-SIF0^Cڌ ]Tېgj{*U@-էpAڢ]peR蔊8,놭 TL&WX0SW".j:*\4B,ۦ-Z|*wLD ahTʻnPcڸOK{JW0qNV.rQ+LJ\rE4E* X(P(Ƹe@ +uR=wNSMm#ۄ@$ҝD|U;1 AR>?ە&ch ews̤Ң":.I'ߐh:A?dlp>nս2c8tOIwY\v^HVsci(DcwW.!H!yU}_h+ī N%29Ke%gn} h0/%@Է+ӗs3yJuSZ<.urի) =޿EP2졩xUiEm,+9es5"WL*[xGM{M`[VǪC ҋƍ}TZ.e|펩%{*fi5N6)ݱ]&3םzՏYHDh%)Q!HzqzY(!P )%5N6JD[aNiHS"VƋ[!csL>}KB /xv珒npۛiB"%6.%+BJP (J{!zQz8SCl-ݪCS*5S*]T5i T!SMtT+8HB媻rYJ[!UD*+TZ !sB",YptJ* Ƭ!6]5"J!9R/FІU UQ{&1,t! >?|By@OdZuq>.)%H@J=UC@J!Ry*( 'M{RBLc;|?3>)t t)ZӄNqWC83'VibA5\oE454S^d:w^N[N֑U{ۑx( tyJUۏ=RBeezނM+l+D wKZxyl;跬4jvOe}}VKh 5gd.epOSU},k z=!= I[/qud ȺӖjBmu %ftNu npk*k_Uc[֬Kz/r'9-)Brm!oJqLzY +HQRҤi*24$gA *ZQQcxv}KW{{IdVj箘\;Mk\JWFYB\*UKF,֡+B%_jJR4*j!7>MI(S7 O(+ =pJ`T$  ʊ칯S5NI8C<-O`:Jz{D+<šotVjQFC ?=O?,!°%ŸeZrW11M)J\;}O[d흴q{Mc$^ދm>VZ2[-C}`H#09<ۥ<^e`aniV1Jtx4AVllr|5Bį/xc7ꭀ oW%g^^Cl\ӥ[ R0(hI5E.jS)\i:/Q [v˘Z6ڜu^z!)$-}%Vݶ궬E^#{vNk%n\s,\r )1lAA^9 mHJikVd씵loʕ%y;Koq+\* UUfP B&%K"{\EsIsAqB\~"kX+=uE^UI\ KTAUUV*%mU!j# VzVuFQ(*TΜ@jsU:S(rVr 3nU@DE4&ܫQ),D"+*5Z @54eU1:(!(-PjW![bK r@)=L.@r gm^vB큏eˏ|IYiE?%8_upS~pwnc?5箨-y*ڹ.).Q4TIsP+RjuMhnU 3v!z<"4!XNJL)Fځ0ŠUizO\'S0))9 F Z`Hk~mL?+{/#y֨z>nt"?U|1'Ӆ9^L%bsoQ -?ĉV[Yޔ,2&V߉" p>83hiiiꠧ-(aj`kQcЧZj ,[N-ίRVMmaW~WmTSةw)2H&9*VvyhVKU-R}{R}еIWWj|O^<.WѨUy?Q+j!yei*rU)R cRX76PrּYtOecx7+u WeY o ʶ^ƁD-r .IyNzKմ$(@E eTƔF$SXUv09`9ni|/Sa+NpL.' yyJrʰ,=آZsJ#B&=jplfٰ[+m[i]swƆ &4Z`P9IP. qA;ZrPBJʚw#iI%JAarH(PkZBҟIT׶8_AWtz/}(suD^4V]U ,6EVT5**UVQsސ&I/B^ "$5Zg Kj\¬=TBsSBjUF&1˞},f܅^*J|\zUnZx<U^QQYuT*BGjYE( ׄV >Z5͹z&H]V{zUIaXF GJWTD#)-N1d|ЖA6ODȉF>fFBkx==o[id] SH:`!3a%;1{cyqxrS5 9=ُ(3d/3O E` L1&bG/ArVK;˕m#>+{@##o^xce@l8&:vGe'N+gV=#3\NHEw4884 eayy GQkƹ%1 ]›S듓z?ս۹gze[k!|<b X2; l1q5+A)iEH_R buu_,I/2J. 8]\ψbxraz\exMFup#P^zeE*2ej {^I˓;H|'wז' ^ +u4A,_tY#JWݵ5 U;'Ѯ" z#+]iiQotEZjmxp58ZKHC++Jwxo ;K=BMxy :pҦr WrS ;(^HF\7 k,-M`VCBˠZki5i5d.\¥ J7%MZ Pt!MjU2֕]GhԂrŕ棯t[VWϴ;̅ljLN[)%[JWBanҭWkusSRpRP)V5WbFʒMVnbV`JSR!ZU6%GLڅQ*BjEj۵fN:GmJ{STYV՛wIk]1i*dmZT{eZ'4(sT%9YڕQTP\Bz< ZWە})Ӭ+j4 cpՏym+I,x 䅷}d2WG7\)tIAV_.zcm[NyB}*0僩iyM!Vq^QW!ZT{Ug9Uj4@B؏r)IPDbieXU+q(1*">-R0AvsqVQsBntߺY>רn$@R@A4L  TpseXR'$w= { R{Oa$n A2"0'r}Q0#@xAr 3?Q;'/?HvXp=g I[$Hɐٌ㞳6 3i:<|b•D8lf z_ytn k&zu^HԌ|G?={ UJ9}}WK«W zq ~ŻL8">Q%Yh2Lqz?xOժ"ڛK?/Թ{X|s/̴e MZ q"aN  }8PFgäme=v6`->a2rDWt0CaO1 ek|SПUߒM=^oxʍR$vW-/@ӿNWWj|\Y;{dr8 QSeQ@ -8}:-=@v1Dz |ɞ=  Qо!av[U ϴ쾡i0`UlHN;,[꺑,0Zr谵x=י+[VЙTX@WW$*2W5kB^Wey]\-U/+x_a_ˈ} um{?H Zց!8sށlZVt[*֔E&5xGÀL/Ԡ Ǿ瀫>Dm\ҽ>u0)=.M>|/Z~oC\d/^4@ M~%QN=KDcp;===?\YKp 'H<-y榓k%ܻ YNVsZeUru5z¢6k#UQ\6ǔΨ)ƮsD5F xTB^UDD׾:r]r5b+TQk{M}_C}G!|gA_Or r9HQs](%Lr])2U*jIK\'%C4WxKz}@Q\\.D▴BjYqH!ubRxSH1^ hVT  `D೧!*U$WN**564֮r$.Nj  rRPJA_U**%QuάեYP3nH^_U^eN(Jڄ/Qi+ą0-l HGo}W=k{JZm=Rni/jin(ifxV+BĬ!Vc{rra=B|0doOiAi0Dxvm'0N 7]'XrYkj<ᡣ%Ğ "6ԧnݍ 5Z#qN 䬺Q?e:-j^Oc]l㿲~xA$dW>\/UCEA@Ϣﻍ4}$e$`cCgu cNH9pz^ wӅiYl!s\}_%@qh#/eȜ`kˊE "'~qW)WD>{Qe Pbݧ cEo=M+܃ ӯ hkَU-QK-k`?ꎡ䏘Dck}ן.v{0^G˕snǢ啯ϩ_CoK&ۜ0xu }tYiH^8LfXZ@(UQ+lPBԕiٴZvI+vA^L@Qz7wEOtt  +|\2 ocOojdsx@ ۭ/""-~!$<6d_=)A vs2 w/g_"^:y蕃|]OP F-J2մ1YY1u: fGeC]+Ϫ处oMYQܛIȰDTU=K UTHTUjWGPZ\_2ru5|?p᫞ƴqݾj֐\*hܣLM1G@[cnKaFPIb`KiF QP/U*EB,(rUB%=Z(k UE=VmV{oMn %yc=UGϹbzgͰ+OWʥ][eqV}EvT#^~VɆ.:bkZ3ThHFc찛VTQ~qbu'1Η c=#=巊V=GoWĪ=WS#HʟӕZ;39뀈lNzE:Fy Oe$n )uC\ $s$I_9YwĦZ?4`-ĻWl#&ZA=>4#I `zہ?~мBO0&Rpn>Q9a0]0<ե~1FffG8>5"Im!if3ب ND'&}}>)U󯢣Z9~4%*`O1T}D:޿OZl ;kg%p 8J܁sK@5hSj -*ާ64ϔc9oiX?~)`k) .1; Q$'ឝq[}Dxk\r yyW-|dZ`$ tSh.sGuVُ.wq#Ds_T;G/`qˈX 8Gs; p .8Nyr%3d{O>+Zf0x`hØ1L2 GLkI}pQVn6ϑMpU&]98=En ACB7o?>z/aci\>iKp}`~rijڔC;H ȑVOڭ@=s9}eަ>}L|'#*Ԯ Jƻэj!Uz2}9_Ap!'kƫwͦ#+{j])ibtbRt›S"U-$kKgo0O5x^BB0 ,v=+K), u>"υmF)~x%}h9|]ݏRI<9}-_w4gS ZBHeY ʊW?gBdBkzCjKlz+tqNn{/5rܕ腤|R]̕h[zߢ&ۻ +.CaW(9PR!M(_ n\# Z!iY(Q65t |O\?Q} ozzu-rS@\UE#KnUT&.J%s\j2Ri'&) (w(TKi)5B DHCɈ\*5nFUZXU/ iNSz%DJN!VURE]ZJTv+/Ki K)A*XSR2Q)B[B fOerUi+t-` *U)5ZjSQ6RY@顪՘)(Ǔմ/EM}VʺA躹c+ξ5}c}àּ31<[o#7@ދ%dY^qWJeNx TQTnmemUEKEiO !- k0V˄%ԲUK*5ʏmJ 1sz:&@0tW"dq :{V]#)ʴu? ~8SgvÇIgiEZA'F{5*  I7vp؃"sTH% Ap:+ߏg zN>}*ֱ<Ϧ'<0iy?HAI31suPX5@%0q=>Siq ~_5sw ny瞡V*uN1¬6NDp:ҷeFLym13ʧG]a-kw2 <=oU_M|؉

HèO8^ } N듦ë$>딇,]-'WBk)-mrD)JHTUPQWfU#iP.UN&PZ.HWSMw TqSRT$' +:l1MfZMvNͮ\ ]( 1r4SRB\.zj9)9 l** Rm%Uj: gIX$9YrT5Tr]Qz9CH{SIR}U\'?;,KKb ZqXޭz/=uE۪y;y,M{y+Яí=ǾH=ߞRψ|Ci8+,LhG*RVҳT 4]&[uz~Dp:4A% U{ SRr9FH[9ߢ7 ZNx#:U2:J+;+4{9Vw nNv8J -#LvJ"$ ௢~~ջ{]2IIE}|OH=0zHλuMk"W0,7W a isAXvkfYz P"+1sg{@1јWK<G3|/Lk-#NC{'N !#&b}}T4e"V.lA kd@¢qm'$LL o5MԨ[+&A6AmR a\9ZL83CA\JUĀcDBW%y-#i L."'x^j`0GQO+EJiz0p2Z֓SRCH`97/#6ї:;GCƚ| ӓNd@eؒq:͉W-#dpgR{ r#U k9>v꽕Ǽ:hT;&>V%".6zzbXdw=êIrN91EcZ7AC0 cЁ^zs@_9(!${s+[ׄȀ}}?UR}ߨUڭ[U.ha!8y sCwIDE  >t00]%Y@ZMER TJ̕JBV@SWn!CQ$U1 jI&1H=囧=ڿ7,iWYkw p?dž~5tH~Y:ma~uLp@9S v\}zW2LJPacz.UQ!^̪yA^ꍤz*_|^/]҃{ץw\po_#NZ% -w)% XV.JyMzQ RU`J%\EEYYU r/rYr}E]AXzu=r^}HƁ}6}㗨)ՕA[ w,`yC%U HB)EX ZB rF8D航7n! CKJ̄9\+RʷYVyHBIZUVY:qfЫʡl KYgVZuBͮG"aKk aR9*teKŠiSE FUWShIaN5 ®rU\<-+=INMxCH#%g'bBa jꕧ%WuJp잶(&nVV:pp |we,|T/a^+[ql_*ſZg ^5CsrZVea~|&_UϞ9pa6^2xM|_GXMmP`G?>t JNϺ?ZJsrf=kL =_~Y#1zT ?l&T6!窡B%Ou  J8Æ!G]a0fk!ĶDN _Uzoӂ p N"@I_ mff]A%}-3e1W7^_ CM-h~c=  T< =" vXn\c#$'uA,a{Fc<,z@@kg.v$E-[3@.'q ~HIU+{jC]rZ$yz8-kim$`LD`b2@e]D?99#I[j{K9`?wA乎$'煍gkxW52DuDGQ=^聴 G󕛦Q!s9"TGn?1v'pA&1Ec~%L1nX7hw_TkG1|UjDu{mhK3E0tOEh/Wkm헅Kás^F`S[0 m@Y-z IGW Pjlo&Qm. OZa/%,{.nes#;C诽ky!|;Rh <զb={B{K^K)̖2?W\4Csg^.~Թmw?.0{n˿ +j,9eR&an19ܞC(Rk-2W}ZuZq)Bʇ\%gջ'IHi.**p,]V ո|/9UT~#Z4L!2\xN Gw^9N 4(usѻnR$:κe1H{܅BӘzYJ]H*5%QkQ՗ je(I^ rոX?tj)eՒu]k9?fzTEWz{mM- P꾣k[ rүju\X\=BJmT-%ZbL\bδ%)A @\ET+X+W-QքH.x]DyJ* WoFjUL i+jV •nVn >Z ]3 GP.hZJsh’+@uB@U5X=)R)Ұ]U*`MuA_="DDiv9PNsRx˸j]6XS OchZ5k+>ɫV5C 0/ط VnMD#jͽl-MfTOX X9n1nKreƎ;-g-VGXIw@Uݶfm얼,o1Sͭq Lzịkr: nDDlǪy|sI%4S2LwOH_hQiɨ}üeW0y@U쵝S00;zd_%{G:}pAQ]U=ea>I8ḒE,{2`'hU`X+ws\g%_\Ty_\4lŴ9ˎ}f^*KֆNku&4HcL/9sv]څtwsECVuWtCH3xWFJjqI0w#:*]-wcܲ1<`zǸ-Z64G09#'p{$LppA8!uƦi>?Ɛl ',1..oƑ4 ϰIgI2HNWÿė V'AfCG5CA UZ2k^oĞ'e9pI$cĺM&O֪z4ms-2A%;x~4s\ bO$@ſ]{e(<fyO92AH<'>9 ?~,]mBىH9z,O$ATH-C\r3$p ƸQL wUw<ۺp9D=%c?_̼^ ONqq~O_/2FHF`HՎ^~Cm->Jdfr\Q0^;J-A)S~Ҟ+ 2 RВRa #ܪzVzmr˻)nm&W|{ }j0e~C6Zla}7Zpi3j&tƾd-b^7Qyʩ˧_RJϭ~ר<էRҹV+?4ۨ/V[h։T޻'dB/˝E-AXӯ}z#+0}q+*>UH;jGݶȌ.LgQ㔼D]a|dYU!:HruD5[aUMgUKrb[iFE@U URZDURCԭRu"ALT&Uʲ9%CitªjJP+:@iA1^rʧ\+f r$Է59 -&Xe%]&,r&О<6pIjsVuDT T*uC- Q#"-]QժU BHkզ:S-}8CE zZe鏦5-g~0zaUm7d.O^UbfV~i#5R-jAo_^*ZݮOZWRYۛ+gMBlWxwJ."6~J߁rsZe1JG,R!ÏnjFy*pՑun8 QV z.q#8ak \x_xsCt3%k&wr8+q$3q1mzPmu\:& sf'L-,{K]ďHvǗrӧq,P5ni zx]3m m0'l@= Z~"wC˓?$59T|iizj:[iDIpūw㡓3|j x3 8@kGL48u#,21GE+6 p'T&^ZOcg׎@{c)qnCd̪ӢL;x1̔~2:<!39su4fasGWN֖Ǧ8wX!.֎1ʩHz f=3̐~Ju g Oq]C˘83dc>E g#9%i<7qߤJi1IGV^Js 0. ǯU{o5Fwt: &Plvq8C?lp2O֮Dsn3}=ϵd9"O@O#ӄgF{Z|ĎO-RX\lp8pDpDޝXIJ`ݤD~h^Kw\#& h=Ϥ.6ܽ^^w:+:~_-tGSidN?e)?t=dFsd[דz['s<瓞KM:$ u=?0)ցO?< >6G:/“m=Hn"%$:1//M+L'6AD&YQ3zMj=THF\uUbUErzT yo[ =O&/h.!~s5hq.#SOs0-- щ a6@{_+Yΰ+:=ZJREikn6V1.i% Խ-T.Q UVjRXumi%Ml6[ptWrw+S).{;<*42)E1+XZJ}7,\Q% lR<$j @U) e@F‰l)E8Q IrZ*F (*TV"ZۅH+Q4% AUI6UVGXSXSU:1 P8*Z0jIP.LbZҔ&Sث,V"T^zX) `T³Bfja *D T<T -W`*^, QlVVU/iahڄpSI.@:HiP23>?lp7iUPG|cuݦh&rI2s;EnZ eSnA;ǧeo'"=ryj("` Ǹ9=zaApɏE 9#A|& sFNUX <Ñ7AyJWǢĐ?Z:Q.|Uj<;*(ʇp9Ԃ%8qq[[yIs#NLq,Z$H2 LIOJ49e<88Eʱ4Q/s1mvq~KvTm2CCIs]ѤHHvAGFQV H8n`S%1%fR@!,un #uo 9a.9G0'i3R0D2A>XMc?Uv'/끠o}3&~ޫWŦּ6q`^Þ%DTbuڧcrs俜R%΢;l;𒝝0͂x8^_4ρCʝ1y1}jEv\J˟^O}hPm5-b,KT5ϦqKSQ BijFupvU rw-F%jzȿ+Iݓ+~;D2R a>5MH-^=/ܽ~ >#e~X4m u?wO &

K1=9tfuy<f /d̅ZמԧC-ZLԕw5[YXS&Uj$OjKڅ [b]Ej-VT^Tʞ: i0ʴt[yt}*7[X(reIr)Ju53 \&S6"P0z䊁=JV%ZW-690$"YO)i+Fbb^VHJEjUm'aAPTS5*P*!McPmzZƕYNiY7I*I@TUUjnOjf`*2RST2j[*`*9WZp, UkYIZZTs\-T@j"£ZkQYvRMw&; vGt,=CrPY6kbsFA]ҵ72B/AC M/;my5r揧>=Wtݴ醎N1FǗyӎOԯsbV.6u12$ zt#qZdbgS*pN'"ш=2@+=70uK2cSgah^zoS0GDB^xu:|1=4"gMQMi7a9^ɖxY!{{/-[s3$zcYBO~Ds'98U]O#:n4MuuF9k}y'{/~0Ru1}k-8={*dDLGN=c#9i!yaܐLqjd 06Hf8!eViSj >VG ٭lD'f7=@ Bj,h{r A;±T4@hq;nFI1>d1zOC' ָH `T%/EGk\8yn|ю־hK9@HG"-OIrlɞplkK hF9X.&`5'ăDm!ɑ찫hiRF1OnZ\0/Cp8ס[Z1;cVks K=HUA>e- :{ jnD DǜS#/h`nW~fGە$KNwB!b`|,{ rO˕m4UsC m)>wW)N"\ʷGNBaHRdФ+Kd0 {R*6j)ʭf-ePXŹl,ۗ.RkؙKlM\{/]V]ޮ_YA0EFIh_qd4 it_BhXہ ;R[PZ+M`YIp WǼ]NLW EU>N|ϯ  j^44ou; l|o˭%|Qׄ"eOO-1 l6jןmef⥵^tW\"M}@Έz[9N!eku_Hj[?qyya{~xb+_PБIXbLaSTb2MHFJ(<*Vj*NZ T@@R)%JmD`JLVib*iQQMঙ\(]n UXj5Pj:GyDz]:e\eL#}R_vHȂ vW"*)ϧp:wF1,\ " zq0`-wq_L*SqT{Sjhho\~:\Cdr+U&'.[ۆgj/uF[R$=* 4H7K^lH?/;ԷG l .r ~ ]8K ueIA׉QB5d~w<73wO$ֶs5J7X.":a| v9[7t/z~:;S7 6cR&JNyn{d ?ݘ?o [TS9nT:y|򖃓 8E[>λTk7;M3l #-\Hqq_]1ձxdn8]Y0'9@^Y1`S{}n?2Iq>‹  ]'zjg ph>+oN ϔd#DFzt1*+9zcyV[ծI[`@~\d2I"uYi.\@geJmsvhd̸4w3 \LHB9!_m 3c.>LPxe H8f\-3#:Ho>SLl9$U/L;5N0&?u1|y@8|VZ斴q]`aFUilzK}(S0[CO'@@1H2;ue 0cO t6DHjuN3$6i@$ 21&&c:# yzB/9$ QU5sq0GN>i123aSЬ %Tm&Cr ;L4=c 4޳N0\N1;ZQx-<8O.eg Ie;Nc?EiWF=0cgL[WpLcƍ}&s_-GѼFOUHٔ@1

P{ҟ6~\UH?m/uN߆WK U}USY'LZeeTuKe?_U!C¾zqςKJZJwy{>- ɝM+~O>5Wk9 5cQg ;5UBPDou׽/2UotA^L"2gzV!kz/Xx:Z~++ؿN*:EZa^KDҔa*hDB"STERV*uUJJHM$\RZ0OaOMauE *jT@ҍ*TSrʡnp%zrNJ/j`DFci!#FBY(=&B:PU0M%:V0RHhN-4%U CP)WzT))% [YiUn'RrmvJ&Tҵbczҷ NYzzS^Q^] 9 !f\ao:dXthʿ^KOս.5yzЯTZߒ:Ty[Zsԫ\oe*opG'ErzUTO1`㑁YWZp{uZIJi\!#MpOg\jZ$HF]v,t%=AϪ U{t]7c=^j׿9"=9:q2@1F$qg?_蟁^ y?<="sMDIuoÍ%JyYjm9s-{2A2,_ usnN^;'Dlhyqkr) u-֑O-CӦ﫿3FjiEguZ{F֗4 Lp(mkq'I?2I-^FTnĐ\$=$e{x.ӐpH_m#ɆDg LX^-?t6p>eRxCjI#":_Ujk_֓ 8qz_*|7;H39_Zu;>( %EcXߵw\t9߅BQ-FG$DW)k&|H#Ơ]Qy"]6̂'yXzkֱ-. y $1ߩžBx'w zKGiQLFdY$kBR$dNğC0!lh qgpObQӬhKZ"c.=nN1'0ZwvH8q =E;b9=9Y7#s4JlÉV_l}A9锴,j:d90F;,mNݔ<{m>hLrb6Y;g&FAv-kL??DPW&IAj\`8Yq- q_|s[za~epo_7@$o[5Ǫ jh\OV*U`TMU)A:p EWV/~ k0D/[ӎ5 ,/7U-$_ԭH+TQT l*+V3Z,nTi$Mm>-J0Pzc+ rҹZ %+8eX_ U)j_>@"iSL棔Ď 2I(RUZÕwH/UP \'495(m(ņK[J?dS*>.(TYr³hzKU:[!S*IA5$9>ZJS4`*%⒑I$e#e$%Wi#{T@+u檔JHjYLaF!фz\VU9QJbϨ̭:WJUD)kO<,Z:}Na7ZYQnUVūӏz|/+qNO{U*SߒC:MgDzf!ncDê+G ޜ+{Rn,i-a-qK W),.p48=L[i&N2V֣c[WWvTi1qЈǿf[1Ș&9yixMGu}ê?=U'ix+uז? %i"z8C9'8Ȉ>);wcxrĸBNVi{{NqqZ11Uh ùbd+ڵ}GT3$?P9k'~*v^)82w`r8gXD88Lv/ϟ2mj=,rƊͰ\IhO_@&>Kqp؍ c/ҚV:~b%͗6~1siSPTɩ^.hh$O0~V)6#赃O\$qoeiݼ|O==+T$H=j`2̈''yˋ"xs&NLwhCI^C28"s/EkF#$^{5#  +ֵC =K}E_ݝZ$rgam] 0HlL/-HTD@ qo2жO~I8ӆ zI k;Iig3Z !y=n9c$Nт0'= '*Z%-az%9#iNv&b7`Zt1ѹ(:tZȉrK+ [aCF_c£e@kX2|$ GQ _y&y됫 8 d;g@nsHMZڏM$dvVP5@c>^Cv"|1H%YzCF CsԎ*$|R7qֆa%t {YtYV$c)ϠXVIh0`'$Sq8Xvpß9`ϧe[ WWht,o };f6jCܾwoF?s=,8I}8V&OENȡTUzEk^U 'j>( Ug^قğ\ݯ!|;BxQs~I⍕ 5Pm3>t[A(sڐ#\HкQ.Xv'hڰ48)7 ߈W_lBp8 WqV~XJ 6- G qus*%-J uP=T*zqȔ~Uk:M0Wm[&{M2ʹz~fj\VR9ULPZkӁT7 iKU-kOrQD!Ws;HCQDUbD…KSҎRQʥQWyNUg :.Yt(9agܽ\qT.VE+4\ru+(RnSI).L,#a@XiNeM^RLr]"䓪 ¶ UG§QXX(x@Tu@N3Tz!&U(ٰQbm:9JRڪֲMݥH+*oEZ+H%zK=-R;By;F}w0\`UH+V~L}zas GFyxhohvkZ9 ?ZasBOsu= {Z$-zdI\E_f*4x9'MrGӰ3/kۯ*nG9^J^-s ;t}e$pOH<DzNt&<p#0UAc6жlZӴyOP"Gyo-htN~olg6\\v78f{^H c;=?uQ/@ ?EUe7HIq= 8-+"ׇ6ÁdN a 9 {n6_jla RswK+ʵm\j45wc@#2/Ceq+Ck0KOw`H5{[VQ_yOgEci:!8VtH'M2y<4@Fg/Hy$F;%s/S%|=>3YӬWwä948_oa\5ݎ={m4J\`IJGpnAqgDH s9Pn ?1;Z hY~#@q"5-]9d486˙8Ú1+YcKHnsUcP/lId@s;yj6KH 0F ,-KigӐgݽcZTll,"NH$zep^ZÈ:d t1ӅUe ll܁趪FƵ7' EN78``;<6Dlہ90|,+]89ooP?0" $z+7pAAr93=QMփ&x ]ǝϖ0":9p|$b>.Շvyc wpDGu25ejwt5Il2@kCyf {UQv\" H&I#X/(3!izXn<@얟~Vxb{ >1+z}IlAH$دMEKy0fznOqCZM: g9Yu782&&NYԏ.&2 p>kjs;q$fg#d)Qxiw0}uxkI5s7 \ -FN '+D}o3 x^ٝہ$|Up> zi$0e|e_-py^FR̗~} ~ .ZʿEZ+ǃ׉IgG[ql< oo[Џϩ_y"3Ϡ :Dzxik@ 2C l>YOH??=k,8VlL#; Y>&қZ{_>wݧi7;Ӟ(~Du_zD/nh'}v Ajb%uuUȨTl_7k|-P7M SiY&JrUپRTݓ&# pA_0񯇃 \ӷ4kJ[Hs/VI_|/!6x8^G< &dw9w\ \^ԡ[[6\55JcxkҊ6$k:Y―XQ5USXTP{UtFjThԤM%ɮFѺjϨVj@BJ&p HTZRZ4AP*v-O=ZJ)pJaMIN NOsRe-QM(DҀ˜«4BJT[ }DRTYfoIEE4b.*Tr5jAa[TZv 7)PT%\v\֐o:vYZh)h}I_.WM']+GX>_|KEa~g2z6J`=O ~3cts~q=Y_7nuj$* ֩;a_0Ψhܲ<W|wh[pO<6}}|8U_M-)vnAs`=>Ypƺw9͖#)n՟$ZvGӁyU@B{*Z]8z@Tx!ٲ1=;pg9Nysdrd|@3Xsw48HB5{kIȃtʷslڹ"Ci$7Si?rp2^jnp5Y4A. c]|pWԩQi.!;9@>O p5dI397׵Ě@ᦠxx0^2`wK)Cs/;Ty3D|;:L:10ȏE^ߌXw<1O&`\Onۓ=_.,SPtm19"=̣~5V#';YNh!]-yÏn[82gcK~65=H$$\:&ʪ-4`dz*X5QGUfnV=B0VOV)x}V'\INxT\GXURȿGUߙ?9G<+RrqF3'R#́꾽Vݺ{V57H~*G}Wo4^GSeap?f?sYk!V ~Čh7[yK/Ե^*UU 9 U@>.Фw_<3]6j<5icy{0uW㷗)(L)֩jYsRMJNJle_\~-r+ \ +"*!X@McJOYӤQ^ .ˬu\*KJ! H{ Rj"U*Z5]BAʠ  \FT(VrriLk]TŒ䚥LyIp.R2P!51)TE29_VtUQ+B*Q,MrKNSD-)Z AUyЬj5jܫ+: 6,hXZ+.~lz}]ZʘLw_y%*qL-Ʈ>xʗOp?x Ù 힨kKYKOGfd|R3}o 7OFO l-$_a`)psCWX3LGA@>/kjE{Sla;DJowu'p9=U;0PT4EBw Fq<,y^N) w>k1~FNzA!qT{Lv SU~e1;hb;g3"e[Wotx`Lb9ꯞtִ-HZNG<4. =WXc!gm; ؈wӰ=v< Ьg pc&?EsV#<ԟ|1TĞ΋HZ.3HA>m[p-ya1.hCxaku6'8H@1U~;j䪱y=J̯#sFGa7EK1߯x@}A"S t>]֚C h^נ%܃ +g8^"Wq. i'tN N'l9-ݠOQ <=T#+:lwˇ1WsݰvɌGY59 ovXK\:Kq+饠@$L`F}0khN-Û2'0s ix$ c2H3JqM Jk5p8k0Md# }jus K@{v$$<7:Lˇ<T089vf?7l\!b躁fg%0se i.//c mti$cyjfKztLsks`c8퉪99lK\yp |zobc

i.!l])/Ƽg/>U+K[.wf={ B\@%4ZS&$4}8$|( (KJ" (\CD aMaHiLaPU=89#kJQ = c֑#aNiUڜ©CrX+%*S@(\4ʼ%I4JeWxOrS%w($W DRӄ=-5-1C(BSQҌK4EhQ*0r\=( :_U j9x*OS0($ "̯Ԫp54)D %lK_cm+˨Vպ,.N(dԤaxįa]+*v{b^®:U՞%AL(3lx{$k÷k=^O ppljj4TO$_758m{N:+WOFW564q1!͌p2֪xb>@rCSq"GPZ{.3gbU+<Hqk{i!c6"Ah޽4K]&"y~ƂL&浟E]F0}ADÿ)pD::{`ChvVm-z~sď>sD{int:g}IK g# ΥLK_x;we~H8nk};^\iut sHp/&N QL11/Ԗ8+hbD[o n?1:]GQ[Q z sZ6gy5j 9tg-_>Kۏ-sOx}u[2g>kjckO3ҺCj43:1iXlZLۜяN3yu n:ѹ<42YG=NpLePf2;Gh*rF]s;t*ɣdѹϨ>79>@8-a$MǨJau @#T+P\i~#mMZp}Uk:` ,r%iyY/0XiQ}cz[+;g;89$YYKC*``NqǵZZ5$5$y31 k!rD7#'=(5͗Cl8(= ݬsR&zKy̓9"s_qQ͟3`I9wIu~~熎@〵)Yøw@D-:7 @hhn# ,t]!'4ڛ\[?ӵLXԵBw9. ' 9S, ٟ +g!#˱Ie0Ip"8wU8yI {1=B_̓'c*Z68mnAOQV )ۉtI.Ș~.oTSf:r t%W]E与95?^$L}#  @+-R`zEcQ1; s–=} pN S^8Fj|2< ":Gjt 9VNd'~Waj+R+)$cJx5nㅛNu3 ;Yf$-:V {s+AZTvtSnGu*D`֎3o&WA}ƯJ%Z.ڍ>ļ_ .kr_ o+s}B^S(zߊ0Rw & EATt.P&&QU(~YʿAIa.O%%rpjR M2L%"M%&xDT+6ehۇ-y W,E\V]I.ФVTv; 23uϱ^ZB+jymE+{e{?iqm* 3+fZ lK~p-p_8֙n?_BnqWwg y^OoUuCAۡVtw/ 5^>i9{ƕ^z-`̱J6dDփϠ^Αu98vn:K Zv <91>Kf.R7m-IiZNьA&= 4_U튟 u8dm覴 yVV>-m0pGCFesd3U#8d}GQ@}m.pUY|@5H +^ 2ǔtuKa.gXa #'r$@ ``s.܎Z&Ƀ?QV7G/Dv0'# k#4\Fx2+Khj7 #t] Tmc'0T|P8I_F8ݠA3+\x., Ǵ`P1 08G1=)`hKKm~݊u8e.f=PR7nqww`N= Yʤ{ UA&C[Ǘ͹ UhܒǨlLA!y7˵$5+^X ܖ!''<I iO#3?agxXv$7AW iq G0Ss*u-64ǼpOQN{_Cŭ 2я, z%TmJgdp׶~|#0J$$cKU@8BmV`n42H=`X? sa͌dr2ߴ|G>ifqBס%ԹimNȐ[Kw2nq`$*B&'0#{Oy U+.0 Z_j.{iꞾM f|hoÀfDaҀ4\q#QFv'נ}2Dl2wI## զMIwq׏eZcl<9 Tv;*;Ke@G.¡7 3\$sx;z0t220rvR1c+xv'BG9GdH}GC+lפ8yL Uo0GtVd!ijUժe*FRuk$ӷ{z7DYZii[lezPrVmΩꌀf\U9ej֬ HJgnJ-k^ʭ1 9>m.\ ׫tpbK&13BdFys9Pi1۠u*ҩ˶KD qq^:LH.%Ovq$9p偠8ni1u0㵽C. ,kP#yM^]|ē1=p#䳵-Q~a3!+G@ =U C`p>~aiS/u$pA)IyV[D`D ONoiZp7;N:<X`";u qrbqLwhi~i0`gowQy}2ݍ-'z6ʋˈ#LG=/som 2;HSCA|0ONj6OncY583:k=Zxf7 n#IZZW.k&%-&:59QV34  7h{4H<6ycu:%Pnݷi8{Ȟӄ4nGԙq2eLz$t8J] #Ȏ$ 7S,"Kx$>|ec*+:m62I9qr%k}&dH^Z4 p  z^f--y]Pk=y1 y>f[1 3Z4g4f}8L`cd_IU-^L9kV w ni8vx=)mTO1C<ۣlc$4,D:yㅟ[Wr}:нVo-Z9 NYy䴒C9[ɇR[3{ߞѝv65LbfÃzG!hYjf2 8BH§s}8C݇0yng7̪7r)ר sZ~i'zvvk[P[ý3y -S}3+^fQj pEoZҵh!]jbup'k`CP;zc u죾3Cx[[d c'lu;Ge?74Ug^_ Vދ[߲1IQhz +W<8_`-^ߤ7Y]iPp12=$혌* :$IN{-Zd {,Qڎ>>[Zyl=pM8p>Us0LԵzBdSu Nmu^﨑ND=!OG~r'KֆݴPg,ہA/&2 璨\a`cqD U$' u _.! LĘWl&DHv'تڻָnhc#;Hs,.&A @<`a:rɘ:2:{GJŻG H?EZk1.vH0,Z4 H"d6<&INqӲM !$O<+4CLn rD|>./>X#ZNfȁ-xdL6?Es}`FyGUBxhq% $qُE]gi㵻 u##H0`J/sY/!b ]UGCݤ h2 =>J{GK֐ƟN6YN>mzC 6<& 1%Z֭C G{ w~u[a':] ǘ x[H8# ;n>#@ =$s \XBHq+χ/eV sGU1ĐYT']?X5K7N lz:1P g rw7: on@c|@ 0sbZV=rq/갰myc<<9oxq G;0&q 7oHp{vz62q{>k~@ts}@cu?1ۉY*y Yap2G\F[HI3V˥fBр9V)>hZs菿 X/8^Ë1˻,z4w8{2렑ϹmKc'ܘ+> 3v~Krփ/Ox[7q-A Go~} r _c.XS lѸs-8+Op'SˋG~5*.i Vw_ KI }vZA+6X`բXnb:u`RTj6B4+|A VS+~˶x\i^/pU/ÝIodx8@$/xoO /]cF;/]oLa\B{T^6V k_45W }~Nt ?ia sQ2u{MoF-8Ya_V`W|AɅ3cc|bvģ{:A_= 濸s+*[4" VsP yL*TKyC(e{~aLIaMܾgIPJ\KD RHaPMF* 4.LOMhdYT*UUʠ WU n,j pMU[90aRLrMeUѱA QA%ɲ 8)Iz 1)LbJHV)v HS\\R/)Or**EYio[BZ mrtO BĕcfwE %YvZUu@^u~B+ZBEՊʾZ,tsV+X*[h^PTZ5-U q!MN2Xp+Hq?~KFF$L40H?Zgkx# Jm$DW zҐ gi+{eݏ<{~K:cDɈ$s`Ǻ6 t-$gDs;/jiGL/k:s7t1%Wֵײ~ 9z۽EtY. L8 $@ O@#'uN}2>PN'۟,p- ytLdX$@qUajְ2'0N@3C¯&Cx=1ekQɧ5(׉v'nd 6.sG `$ L9cKp &;ڵܸ4dpp Б+o^|wxeCAQ!XJtZ79p_u!J얼?1sA$и15Bv1=eOxƓe$psӪ -;+ZZHH| xIBR]1.$NDpy{2cޥ 9kH{KOr u_(I2k*4KqƩHϋ2 7}˼Wn] qHl=A䑕Am#LJ<dGat۹sŮIaLs@N8켿5g\MZvg2=L2 q>~wk AA|ם_b9_5Zٌ?qJnjNX{y _LӨ$|"`/c yt#rVݿԵ O􁏗d }>@= >6kEL]:l;DVշ]V fFNc<̭iL 㤘 oXy-'t n@cz#3#t/{h,h2:hS DŽԴOƕ`A_M,_:uT y'PDr|5@"xU:sXo̒O 5ܶWԿ-yR~9OdJytɫ|赞jozዉ?0 v^i/CcfF=- ZF[tޣrK]rn[V.v_Y.lys2j5JEоY5k^h!8Lb5 V~kτ-PBz +U bYSo%F@$U|V m[GH^7GԾ%  -D=5 lAV:͏Sr%2˛PD.zu"[A;{uksN{/|0bzqOhh#D`^NkK2^:&Ix'춪xH ۳8:0`Wxw۹ѼN1? 8=Wl6Cᴘa$ ~}n> 1߉2wlf әS v]fb,M^K[N>~+1yHN݁#Hթd@FG9;*: dagHm\4dە^Ti[ЅEIeA" Un/cp Y0[߈A1eegsG|OPDЯ9N8 @QzۑbShncbg>I~C``G0}zB-!s?R[E $GX[̩Oa%Š9ʳU`R#͐dhxL $;s'@źu@s& '+kzz07ho^I/1I 4{] 䵴= "9sN`w+_tO?7,}"d7܌h*BC[3{A_|uRo^-p/j<Ҫ"aӇg~iOcjq%&~9꫚۴zO_O G{5z>+qvG%yP۬i-|^Fi=jS\+Ƹ K_*jN]\ @)+ښ R4FR2)$`4%,RځQ 4AU\V UzQLzY+H.USi*Tޢi=ܑtV(Eʝ2)S. LJZW)CJ7%$@%9Ta@Js\*05nVC6iI$r"*JkS+YJeC_&#+y[m&24@#-^,Rc_"tI?5u,OHjv$/|X Z>zq^(5iU bKE7ʲ'XpIsE!|G0=Vk;Z˘Y>x՛Mx o z>qLJΙUsqp#y] sp XQ|N`O93tKK&#&䁕/k6˜89=#Omėy&+\i`x#^NGc rݲO1f3+]kRNF DO3˽z-X?׹yN"pqKTpC̘0b!lG˙iXqב${>Ƶlld:3c+iBŮpH㬕V-/ >\G+ ^TZ"zgWw1}}{IKYu210#$L+7>+sq#//ap!ߛ=a{$`{Zz-hvgo2Ѣ]gw-h=J])g?Kg2"  Ul1 ye\=@g$A0Ѱ5pk^ r- W 9#!LTnsOkLKݖC5J.Xp"~SַA'2ܳK)CA³sKvcqkeL6Ip~b{L ,m%Ϥ`U/H#{pHz}?ZkKgV=}NcЕcQHCD˚]X&'d=¶L*6 i`A#j[ذ2]h=d%f#{dD -K'@|yv7y?UxGmzA]+꾁A1i_I|l1%a }+rΝ6=eax' adVx P X)we'TezZAJyZy5Fm_GmJן?Ci_0YjG ~_$.MCJuEVk}8ճVVWR-W*ܭ n~ KDo]Z`(KjFQ)!FPD[Nbဦ*LƧ1%=>t18)"*UʹQR )n(ޒP-ȚAvDfǦnFUzA(`YcՆIVQA JDMS+T\9)CaW<)˚PA)*1qKl*'* *"T%@rUV6J5Ӵ/CKG Յk<O9wd8^nWWoў*wAaաPm"5AWJSԁM?5E菀=+jM#JMB +"RX,U޴n,mWy^w0ҷ:+08* cJEI=ϛ#PK'oYw_R^CZ\`D&dGVVi 5knvƵ{6 ho+*銭 $q08Z^ac[$睸+cJIq=meHg堏l1x0=׿hnym6ݠvc+&}Ju'k#ZNj߄^D{ǧX:<\ 8&F$0CXOעWv7os\9 |r`4:T6 >&QlbV$2FpD}^LK@??c^x6k;uF:X:h yvt{rgaY`pd?01~IMiۛ>R 9AS_,sGvs0'8=isZ% ѭI1 Fk6vK@p۷NkH$gtnc'Ĵ`wU-JUf#NCgVՀ+0'+Wľ&J~KUŁ)mBV`ں][m[B<1S֧hz*& YQ֯vj 1H(b% +5$$31NiUsk=9Uڌ9I-zק^UZsJ, ).LRIU!*RS"$9YxU'-*'* tVUGԷ)HQr<\$*M9Is9.zrЗ$Qh=6V)SM}VPnTj.C+4:ki/)d|-߈Rjt5R/׌nԮy{hſXOuPrS+ FELjV)^_U"{=du^O_|:O/cWH{zFD/Ϟqi, ,[Y7QcWz[^2Rj5N+;Q݀RmK굝ʵOPD ߈[P4Hi8fUx>\D0HWO=Wu:$-q'GUa],$N\1_trXz?,K 0skhnm{A0:G*:adƨxf9-}@Lf:?uxa\?ӿlzNIĄ{G})pH3sׄ=ffÁ8}ז5;E-h? i?0I끌vxu SǸ;zBۡ[ q2gīn;Xhh'c88&://gXӧled9~i vYMNDyGHg5[=뜭[vO$@{TƣRƼmp x>\qn q>"P 0Hȁ0TXL9< @:|jp= i(U#XG]Ԣ75V =oWۚO{A}eP9 qcIz¿F:::Zuw5H#%׭k9hv  z$n% gɈv ӯs jP0CAo%ߘ 'ϧ6V`4x#zzu uLVh$DGAaZgy^P|\#;}x~J40{Izo [ZC8CI @{ap->]G=dx~:`6z}|#i~^LO/=oSt5kF8u/9`2enoi$FI`@%sʥpvɎ1 =wcP׸ol09EInDOo%o&)rq$?,t B99Og:DCA0v >U \4ejBCoVmJV'zm{71cZ-V.u/pHҦեoEfYUfW/QOd"iGr6j׬X7Wc]5;l//}fΡO S]3'qfT^Q֋ty$įL4Y*OԵ*VE[%}hwOe=@[$ u1okxu0,<\G_LrOFk"REzK]/`XKD N >z_0|IgwR$t虮" t _CC7GU[i6Z^wb- B´"@r"}ڕ#V $<ű2L0Le@n 4[Q {'VҪ01'.""W<=&x<@Ui s?q僐JBF%OLGL4N~e\҄=s ʹ468}ކi U3N ;ۄDFL RLsH`{, 6!@#Թ7t hҧhyq012 GI$־ y|3yBe1q?jU.2p >юa[x2cx`2&:tWehR!y!6Iw^Wkˉ3tu/ kHf PgUL}gL$vcqv72yvU[ֲa:{U e.driusX , ۏ3uU*Dyu6Uw&;S*to[}zdqUjgh$N%ž"}{}P t'ۻ0F搞/ԉʱioㅓmRDD-+ZŪ:Tz SpƑI^\]2ݷ^F¾VEmrNbxY7Kb *'0BW]:{RTH.]ܣwB*& µ5yIlY5dTPNS>Tm UYLc!(XURM~eVn5`gFX_>^)Ȝw_Vs} p0:/`j>"/'Z P5=)֒kLkpƹ|;SZUf9=HPPz5p\S\B m2LkM)),)ST!pLJ2Rj9TJB6*4Je )rTحR@5ʵP$VNm|'-J ZKu4YQsQd\➪ +/oi:~&4tJ*ir˼lqMdތ,=VPTV( a]#T *}mfV"gS{K28E.^>GXeԢּ7R$c#!̒\@Ng `ZF:Lz(Қ Aǔ̒yӠVQ#|ۢ㜌wP}&Æc&3s7|0n0@3ߘ"1^k\#xSSl;3:1|Fnל|2ւ܎zGOT;;L2O~; F܇|z7-83(=ani' 峥xWIt'$sHZ斺--by£{k6DLQ {FWk׵d{'9/=sZHhis_-݀#y zl)\$) AV<\Lʍ/t59awu3=df?Uuj?p6Ę2>Q@N3rpAa^1.x `{3IkLđ n1$>.i$D@d(irXpZ :椹Z y` DryYl?eUh98#0m#@j%2֝i{4'"#q*|ocU9TeƸv- 9/ 0OzJU[y}~^ Lg\ L" vsD˰i{4`$RӷEj# ͽqM"md0Vd545|zi5rK reS+Y+TU«񐋅QՖ6BϾjudsاo߽yYRRGUuzO\}*K36u4W2*^b3f;uS\'#8>.ä @1W=>C.&} 3!(nt1LѸdDI!<=wGvA ܏~E x1 pkA$H"E;Gnk`}O5{Xh$$z<=qIn;i85&DҮic#@IѶuInֲ!7 n$N/_T|ς{Kv|ycTux2ׂh= ͑IÁ; 尧eqʀp Â; smk.h.a\MZ NEI0H1{ytL7tKn"V*RR%x? "ʏH7@ Ď*v{Ua/kkCkug=fgV(aksaHmԺ5vDm3yjiil2 Hv3 ac"N@D7]Q]!9>DzQLy3.Z%.9&}#o>(YS/;r"AiK4!s\F bABY-̷8#'dm .Ifn"iO=Rfds=_%pgj828=qz*4U~8<Oxv#.&|ް}=.>q.0e^K409<@9@M(?] ;@P}}rtg =0v-FAA^8:a cI,3cm-{o&b3'.[*7W"裹*.ya/GP9WWa[ƛa}N2a~sV/x'ZY_UV!gW[WVS/ <䮼~3(8CRLGXP0;gHU@Ϡdop FzvY9$м#괃4'.$OXY.$=lm->29 (11wdp_Y `$y$#%i8#'g욎jM?4n"'%"s=֍PF%׻ky-"PlDu#?©hcF߸?018NםNwdz@CVT|0#ϲ*[ͽӽ:ǯpo-nHt LM?u(a $Y1 /IZ27m.F?3x*w iqlj '=̒6prkR.`=U K77i{Ǣ ̓fi/h}n~mig8OEBΉ."zG1YWui'g9/=kĎqɌXT Wnt0$v E_$;nRyU7n 9 ڼI?Έun̸Zq탴CtP8H~.uGC\@C1pWv87"bc'|A=eUDǘ`sDałݮ'Ep?" F!xZ5<>ǿm?F ps9}U ;9@2A;O4{d}%_]?,O\UĽĐ0?AFqZ{o>i%UE:R ״D*]5:ӄнayo豰ExK ׶%m[r^e8< TM74^]sXWi^c׍PR_ymZז~UpwnUTsuNނDkFʊ 4(քVצȼf^MXvx ת "uǹP Y\*"HsUON|V-?T_Xi_Vޱ erʛ2Fw_GP9WW5c(&ǩiO Js ҅ 0SZP")n(P *e-ȩ_*'+tʛA*`*BB^Q!馔JvQUNUbH-rn䆔T DԪZMt؞-b3M9M U(Pui:YBb*Yڥw5"V^Uj- uJQr<q(%q*\=(#\^˕zWw cV)l+LZ"\MɀٜG/f~q{+'a6ERҦ*i}*\XVXTLyaܯV򼆥O5Qnui[Tug5fr^+-dݎUPsH Sd'b_bv*W*c='a: A弦6Yـ^r^4;3Z@Tu12 ak ZԸ8Iss€6c|O*KLm #-Jv$p0~%cb񞹏ϯrsg0̑Ns>xhs&cOag_k âd>^U|tA{iI-#j]ۼߖ%>KǷLcÝh'Ȼ'diRq@I0\wv d1YbLFs>ɤn JZ4nn:s$&6H"2ILO(Bt +OI0tqXm.Gr8wG]xݎTuH0\y>U)o2 dt9Ǣҫ ə+Lco(NDt8ʘj7?bC0`w_. O'Ӝ nXA &28kev[v:jV#`H$zx$$8N'tȁV[A-cnv Z-<5،neM { @JW`+FϤ%}aS/l>%1c譶i2# KsV[3׷:wVtSUdyBj}3[]3 \X8,VV!xL=pAhx0*@!3ĵ<=L4jVJϊrJ&A+E9SRSZܹ U{ձViS~4趩R nW cL)V_~+f%:ATQ8':!+LpݭYX*,[5XׅLQi6N}}>SY}ajVդ{O!oPJeУ^HB _:Nb}5YXaX՞ TDҘ @)rj5 +rΩXP!knW(fzJXzk2ZPuU(M \r `}gWTאƾ=VSzj]jkjתԵU5oz|_oJu+kNZ^>/,oez+J^'Hw Z 5Q1Yª}*1$I.U*YJ.*W#{zA*RVC!@{j9kPKsЇp, T5**)HV.UaWmL15tk7紶EACAzjꕝxU׹RRLA_oڼ>V9{Q`[Z^z.gT|ʫYvYUJj**& ^bzmQb}K԰ =Jaz7]7s u3Ez@ N ]qK ikjVB Ut8괵f8$QDDL.difgbF%H<- ʤ퍖ya?*-HתYAgX dl N8d%c5$Y6ZDDBLaЅ^V8&DA D թf0${3Vqma:$a]r:z$oX@$qp>Ay >^xp2 fF D ehi#9A#D\OS=b52bcv:nAqĖq8hɎ[մZ"$pFA6CC*d|Km8cf~хfvr=`!h9zLzCN$8=u!.Ldn>%- 9*5Z 233$jתvg0Ȏ5 g鞑0k{!hն~auYV5tlCȅTmpp8.Q2Mr(-.8Uuq WPާUcj3^+TֽWlzV׽W%PTWtFrW/!{}s,uqFM;҈[تQ>*}  z`rN{O*R=ʥRBT)UJIpVS]UyHSRj$KEG%-QV9-z)@W*W*m)bxYJԴw lz5jL/;[4X,d9+檯]_ G x+zyRΠXwWbĻ 1 2椫gTjv7*np>>r[W^? );J?/"Eu=_0/cj;JEk2OPc^P\Ӆ6'Hl 8q3jUz:Ui0$UL$q+U ¼v\"`z|S'lM9<+IV[Yۇ$coi^^9qvR_ 3}}x[sMI􅑯]I$H Up@({K31=S4v @p^kD$רCF $8UF@fޜ #[8=7>J[M8=C~78r@#\j܌x"q.,qr.(83iz }s?or?i=ǿeF5lNG'U=F{Kx29WSTi`޾B) LA8(|70`DإY}jg,&\oea1'q3+^% nLVu;1p3&y,=^ ߃;#No5ӵ8Ďn:d~xq&c#9X->Jobx1' jւCItKC@ǡVN/ÈH3ZgiF 71]JR^: Scj;@r&{J :s%p^Gҗ5yߓɓ{^kb > YRt 90K2J}kHl#)\ [{#9h<$];83 _eaa]դ ̿p3}BӭF$OB ƝuR*5gV:.v`/Զ.i+kUF}.| 6ʷ.@]񡾫䔧Щ+*=(k"VEXrX ڟIZ4[IIK S(j*uj+KuDP qK%! %I!cZ *RרqBʊ ¨"kv5 Z\ܵX^W(TJq腛YTEZ9^攅^*vZWl9NcPƹ*)]ELP\FIې{9W9 klzIr(hSrnV7$TyqVJrj+7V r>樾j+xT^fQ)W^rZ} X9^Gws= +#C{}2 w[&> JhoOYgnN%[jɶ$Z{zSa*BV*W#)5=.)ҞUjNUZ[H R7&Tr-a,ҫR)UTTEօUYYi^nPvr uNNFDQ*羽9jj,Vi=\6V5{V}W%-cX4b޺[-g*̭Ywm)-`fU*8 b\+7𶭞X+:WϨ]BZ]rVoҤ@\{rX8.@,lXȞOEWݿ%M| iִh*+B ձ"UH̪ 6:mbHɓ߫@=б`W(yӐ NSEi.1k]==/;\F#u9֓9Qq'ײWiY_g&'GF[VnĸnU{Hݼi@C^Ҽ"g>BVgzUy74%Ѽ@1d ޿Pdh|ӮH b}M/LO_^vqKhuA.ahfeʳ09s#'V@7;n&kgvFwYUdP$KH 5<9ۃb!QӫH$Lw晦0y~ߩAx 8^GR);@v# >Um ps3eҽM t`ps ߹nF [E^k~o:xx~ml zSS OKNjTNu#Q7mQk[XDc:ܫDUa)*W+UUr8KCP9eTrv zκl:S *WUiEuUw>ۥetʶS ̨XL{K_^P4宩-ۺkە{ms]%tBP'5Ri` :ފз TaZZE2oH(ljM'{Buq :^0idsIIiWp8YaciT?)'Wsdu`5 S` fOXZ/2Zru;F}SccV@DJـ0O{<8CG =Vz#eg7o'W~ 6?%p3iX5G.#gRFp@ɞ쪐@93g'+5=B vTtɂ>QOc,ڴ8G'2QM.$`v*-VFNy'YuFG_u(@Uk8|M 掣YU\ ϔ n#M{=1Ӫm'e9= qqNdHr}id3=|GKVzu=#zU8TkN[{ފMo.1I~.v :-Rɣ9NI?^60)-(a͹-`:JsI&Ԋ_oXJmÄBɫS*%Un+qr]*V,ZaZS.*N)/*\%R sEJfPa-P4("(% B0qAP'&MD ]*:D WBzװa1[ Jajא~_\^r\tMjק,5j.Q+(TnB%ܖ\#OVi[M kRM%TβAYd/]ϧɵ|{Omvӕ~W>IVUx]CO+mZV /';q_h/U뭯 6꽖ax_**rFW,SJRTOjޢֶekU+cTrU*8zST8UVruW*zrQgd<&EU֨Ԭ]Q[eENJ4~"p?V*rCބUU }ōbѲMQYU&4*9;P5X,ʬuZ9[WXr˲Vtd_SnUYWD8%5ڼ]~{n/+tRVʈRÔ[6mКIM6L UjI+Q2=׺x[j,i Ǝ N|"nO S:(grRt+HI6iYv9[Lag*MDVP:cK0u,iUIxV.OOC-XBԠf?9ϢSjAB@#$dR谶LUSs^Bx cK^AeAO=Ҭ-4kC]{Uh dEiLL?r=+SnOPi48m'fj` $vV|\63a&Z@`_13Dy;=Wv斀G$X`Naj t^/PV"$q3xܜe+.vZ0zO!:>k1?Ӟ <$<ź<I@㷤**xͤtPK挐x2; B9}OB0SSNl"gNkqAJ}y[,5)xJGE>IΐBk9sOeaP}r7pMA}7VVuJL,ˇbI([`sT$)o)(j>S*PS E5:’D@QaWJ qTPZvp"0b@qr @JsQ4.QaTnJR՝74˕X+‹m~/CU_)Y>UzM雕Z5%rRS`qP,Yo)DcOTf OV}'!0\/3љ^djVt1-խy_Yׁ֬l|P-JߕX/O/DJmu Uez;R4c[Bܱb7Ǯ{}jZ+^+v6z: e 2ayrڪ׵Ž״XRz:QfPO5iZW]S]^4QT}T몏E>^*ՖkYk#UgU5ruvuJB]Bk6UWSz-A*^9 %ݥl%ɔEcWyK|^¨]zRuNkb[Xj]ByBb7^nWo+)ez;uVYCYlV >d5:uyXK[VgB˨=uA j֐!y{S}.h*a:tEwe&StFUZ-UUB!*ϣO &еVae]tĭ7VsWzKVpyZ@ eyJ8XG00Lp e:@Ng5zNF{i02NxZ 薦K֑̪zc8-B8u{q  ׸?S{T @U+?dWK`9Z@ҬGS>(x1 =Vk"0;ay`}T5'[~j,-bFޘW?&3Q.;~DCGz}z5"p2̑.t#l#VEvR;Z]E1󲡬Ydʯq~̶Al]^ 00:*@#3\ѵ v$O v!R_B,djZZsOyN9{ ;B^ITs?TOЧ]Y).RG54*eS xqҦ&PTx쪮H)HC hArڸIz!Q(jQ\J6RjBi)-6ԖENNKL8;UA(7" ] %pLBNP# kf ZmhG[Qʳ' Ǥ#QXUQ*VWV}WZVv *ir9FrgP]BkIV)MvEiYrMArp*^oUl/+].+^Q~rFbǘpB\Q\urBИoYݳԗYh]î5=妦muk..n[]-Jj~~P\=r} tm}UxRֹT\n:JZ,ոU_p O۫*j(j=&EVYBe' w,!hۥu˜JK jA! ANځDQJY.J06٨T.יsiUV9o/Mgr㭫>C;R|Mdo%o]R&Ke}Ezp)-9 n-;TU(-T)WI%a *zI-b4^F VW'ql&V պ$,:Uu-B 6 s3O.¨ *)4 Ts*a>*-CpZsMuDq(gji8-ָZ&F畺G vKO=F嵷 @*4BC=efܸ9ZVసrޤR>Iv 3L0 ȹZ:`xf}m81etQ`>2`HokGiUT u_PN@EJ o PwUwX9DkO:ZZư^>dPis:,P#5AQqWwr^~@4&xUu˜TjK*R%' &Jqu8Y+I  +BkR9Y9Z/UKR[M@t);TJ"JQ :J$/A+vw*Z4.U:u89fj5 {ʴSZDи+RJ@*\ KTaNW!QڲP+C.=|VڟD(,O6^VUnX1){-%+BYY8#A4Sz .17*ޭSr\Dœ lI[UR۞ck:^Wu B:^Lc/icUGHy}CL^s^^\\Vu%1SBT\oV.ltvݲ.t 7R^PW/Ξҫb6qSxk:k?ek(VEDIeDD9=*TXu5vܪaZPFRŎ49 "r9 KTa* ,znɭMk.թ[G*Th[Z V蕥iYQc!|,ףWVh]-+0Kl%vŇqIz;k[rbV %jC3+6mBF! p hQ Wd$ju#8JP>eUNYCwK ЍWuGcTB nࢲod$^U.Ue60|3 H:NV~!#ME ee[ݹ4p(ӷ-$4 A8YA=ad6~kܴw(ș^Bgcn8T9NGh}UJi1 m7\~)znñ*Jrg^3s3<>F˂ Yk91PUsBU@+HSiTZM1!(+ ʑX$ .㧢]Jb' W G*o([B J<+4)!?beVcSe uʮ ΕC#Y rkPB&+BrSR,z$z)Z&T5BY!Y6(a\΄D.{r-P$ a2=ij Xr {5j;B H@Ba)e|FMĨ]R4"R&=)$.%@(IVV*ʱUʛטѵ꼣iN[jb]܎v6%=8IUȽl9Uդ Ǔ4WtoE/tvx̬|I j84Wk+UBl!aZ[-˥KڀH9VTXS*,=jQWi]jeXl65+-[QgrSYYY]+IztUcRVSv*rUzkѵ{ө=EMZOWiBУQcyks"$/ga'Oi] Iuң-[apՎ>RQvԬQMjwwNxXm>!薷"=ud,z8*mԚJPɯIRn/ʹI.Qjin! @P)ɤ^Ugd,u`-=Br] JUUNX\̣e f%^̵R/7-xP %:wX X;5\$,K0*, +έzZ#W2^r@YMRΆQϾ$+BI[@mmR o 2I) I^wUSUY)Mӷ`>ޠ-#Za-9d^UΜz- p"H̡Xk F[UiMϢQrsU[¼Z\U>4JRUR6䐪5 ֲLU@NИBZK 8 jӯKu0{,Q0 ZT<! H)M:HRQ5vC \a'biEn0+P԰(Er2B 5irӚapP[+q(T@t!C7rU F֮( P!._D m45@[aMܫNܕ(^ 4rYr`j]5UWD=rFҊG0rk\q>UUaehX o T)h)RS&u0T10^؛wzx+j:?8^Nz-~KcZiSyKG^͡YWeqsְCQ*6*)rҏr@z8Zxʖ¢1RV,JuUدjj#OeߊePΨ=}E.ԕ_QkiU"UV61#z8[U5GAB2 ,̎Znn|h%ojDm*6wr~F+Aa ^u0WKN[V2+l5lbAFTʯQע')tAZk6plDUʾ*}W%F3TmX;ͥj\\H Huqoe/R+.eZ }^B'$*DP-a.+Qk@ZaMJPYɒ=MDU䫜P\JO ) RǨqNv),\*)1Aj;(\-K `i-͂UGAJ AXS nKF"TR椠ɁA Z%KJE]\QbZ7`)we Q9fDmRINTJ2BtE@l-*B5+[U'SU˔nK%tǮf9W1F9zC\,8JޡKޏU z;'V{^YZ9!k z'S+*MNVFW)*)u R9gCCT*)r'Uz`iz7*ߞCG^rI_R{eqG\_s*lJ +>yc?Cҕjbwa KuTaeXP.jZUERaV)q]2PQ&5AFЌ=XmDֽW`V)4ȃWBsnU%=Wܪn*ѹX\ArۅIe;^s&'ꦇV Bg^OTXOSUGeYXnATCӅT+e>2ďi.q Mf .*^ZuEU ;iOu!e]]pR:duHGUCdU*ÏA? ueMV)+XW(!3+~Tb݃2KZ@ӫX SWt_vO3+nϗ"V%?O\KTk1>L=p̥rZH4 73[ntԼ9Kh`Dz }ְLeu&3WeR-[eզUKsVt!QF@)ڍi)rRk)k52ǫiV iVRҬ^Q!W܀bQbAT&5EPz甁L5˞QM +A%@rmU52JۗnY` R hpPҍ!Q$-zXа+BĂMGe+s/{X=z\%=2UV=49( k&P:JJCl܀uSRuKN@/mK6H^ V n.ūrPTO }Ղ+ANxo̙L]LJ*̒YW 8{*bUmNM+M65n*S]^tN%bu*Je-0JUUje[M@j(!)<W).EQrǤ X:\ZTHyr {BKRWZYzFMi8RvաRqMhԱNaL *hhnl*F*UDR\#BS .p MY ZTX ܖJP፥r@Ci¡-EA(V1@ t)STpe("pF^JSrQj=vc˞C\NǶv!ȚmNkJe7zpI@JBܻrC)X%8p (BQVMZVVj5ZōA W< iJNP W%%*Jk\.\ g^cܓPN)PҠ՗nVQjhQqHK{Vrq4+QYuWgeN;.{GְU*Zes,Ky^iPkbeKWqۦEv%BoKtl.@ZS f9EVALz)ӦѠѶF*3֥f5*-?l2]`,m3_z-jaY˟ՌֳhPt~"muz}5EUT MA\j`2YW.*y]AV9MU,Z?T:R~h)V$,MRj0&˄ "T=қvJ)Ýte\cd*V*&5 !:@FzYV] vMLJYuR|āCXj]:6%I}T/rU:TQ"؄W4JFJT! 2"C))%N(^r$ cZB%$u4ʈK.r-5w!4U(qSRPMZW.\JRZp.PBRЗ.È#jEP9DpTEQ !&6ʨ4Za6e P4,b!MKNUYrP%,z܉(MrǨZֹVkZ1JBPo\`8% A(%C 3UERkNQirşI"B%*RZHU-$nF|DMz,5ɭ**xr\:eO+RN U :-*vE-KG,VPW0rƥVYڴUN31!V^b0.(2]\XɩnwۅQ!]2@ڄ"+Q6KDYum%G`%V@(:qdحvۦ:*WVTl%ZSFj[* V)jm-F[ҤTm%fR:nQ~FV6*)6# D2)SYV*% +Vw*JVWeFJVk5RTUV-B(p(EVg]\%UJl%T[VU 24ij+qHSru:H+VzP*IIi/RNjբl)PƒTj/zgDZ)(Rĵ%M2GBP5 $@RQ `sPB7 a)DK% rZR oLܸ*KPD礵R.zT(+4NSi1iѬBN@$BaZuI8Tj)`T)s8)cUP,b:F&Z"%6+Mr6pFہ,hNs‡uĀH.s)h.hFU!IJ8 z9xȃ,zœҩǬ-=AP8z$K\\PJS.(w&4m*7*zTirТAB/Au-ZSϹ*݅BX^^*r{ A\!+mBuB&EiZ=dzj6%MR(V659[TPTGP8UVug+Yᝪ Y(RKT)S)DSru\a\%1IQ*S4”rZ*-jY!o BjFUW%gcnMrlSW-Tr8k\)=fSWGQ+AXAܹd-͸]^6RPU LFM@V5^5Y]T+U)3χ!=MH %R/৬*-CR+MM͔3Ue$EM6Vc: ^Q_]i$V %rԂoh.j[JՖOI*Z 0' (,GNC-|c"T3 JaM!ꤌځ@ÔNb>"(L!6P 9)ohe  *0S h-ʱ(l4QÑUE$&P8]M"FBX%ؤ7)LSQa`>!*k;&4"=Us\)JK)>&$#^N;ܤ=(9FUnTZfyNUSÖv "RQ9N*2 `s8%-TKs!zZj=PcՆ=EUW)UYMzFƟHW5*85NUQs+WQ9Ti=OrmrYpN)rcܫ" yP^),zF)J[vVwgm+"PrT*0+)eK [U {U:_xT5i!ZYTUXhK4ERUE39-I{AɁʰzc8{YʠTҝTTUwFTšQhz [Gé9i9crж떳S,7Ca W&TCY-ڨU.uTwQfծU[S]\xޚu*69gkQʽ[넣P-_}~&պJ @B1>ށZ5V-$Fv@)[† Ќ*\ZHr&-jhpH yDbQ+"EMДB0p :jt"mE:VBnjpN\%sMrB[S4T֘w*SIŠ!F"h]P+ٔldٕ`R8LڙEMPW{{!kHL9KE%\}[AHe-d(8W 艍#)jN&OKSt2L4E+GB3M5r he0\ʔk]PV ޔr!WÑ$J0vےːW!jJIʓRrV4I:NOY\P=,8W rXb%-P^U Cz Bņr#PkA*!鴪)M^DE0ɸ*]ʅG!UEV)Ulr.%H{EćV NIGL ;w+rηr¹f]eFYP.)=DWDZ*ZmvUBЮm"uNIr}EYk!Lt04VIT%Ur;Pת~=W.Rs5J&T9LEj֍]aJ}eNUYJJ*wu 1@5?TuSaՊ0OuRUJ0>)/P9ܺphTB8PBΥeҁMm]t+ ]]!B T+IFP8)\Sѭv'S|TAUȨDgi U (2%Aih>ate&p`-d**6&Dmz™r] KP\cժnTXbŴi=YT)9[c?P(TC-ލ3UwFfKYSn aՐ|uQ|DUTQVU%h k9M5! Z \ M! +$\2SЀ" H(-W(+PP"I )RH^ADHjʊ b7kUX7-PB U&b!jD \W- lRQV%=qbaDHWi+4)2R 4<(#j2D4Bxh}5W0![W B)@\[*EDС]R%f j[s<0&5\И@b-ThDtT*ʇTv 4T!Q`e0$pSeTF N^C$m<$'ajH,=СPf1r"-Šhk9P1 Dj0]LRʍZ25ÒB0W;Bz5'!$mjNVFMorS@^R^Q9/rRR\\I(W9=2RSFPF Y~E- nSy5:Veڼ]NTO jQrQz*($T+DF.WnLcSUK+#.kwWA+kIUF/z'ZnVP|ԽZskI).YoH}rzuZfVrpW+*5k>CQWޣrkA* 0{ JxNBt&RqSD%Rg@)!HP˗.O %B * ( Q.4 !ĠqE 6 K%T\K\j) IPҚBB"P0Bj%s ,j"y ^W5B}\!r61B5\B/(aQTb2TK(&soշJĚH^ PS٪N.3r} ҏ+*zIx&OdPBR-E:JthPi #a@'X CNW\TR=k!/LDQF w> IJcX:lR<ʸ5%HԪ2 Ԕ{1uvBġ-DSyi**4ʕ語0İ2ښ{*oV|}ǩ܌= `X89))$iR[UfSzII\\钹:ǔ 9%q( Z%PrBZLUÑʽLzI7UW)Jzިݔ_[' 9:'TD_PէY;ㅒ=St],\:kF YY]?T7hM5P~ݤ:TކS]TFj%PN( 2V#0P)2 )Ԩ  B )r BTq (\Qt(*U(!%\ W%!*eAMQ T"jJ U^7J-D(EK:B%.'p] \'-$%8>ˆ Cb B4 KPmVVK)s-(Hʐ)M/jz9@BUs\@6d*kB,*JEZU]?5LOQEWmr45@EuU\T6t7RA+H~`GҧwDK[2@=õ-ΞJUʰ4>ք#P%NT6jK"TjH0%*R&>POb鄿QSs"|E#1p&8(X1UVU,lJ& (asC\UMz2*'+\*U#.r缞IyR*zR.zQzTJ/ZNY޶:5U՗IueUҾ:x\Uw+'#m0n**FTK&UD x-Z:@( F2N%3z@*je (`(%s]+K*BB#.KqL *.\ rr$(F˔)pbgDr2Ij@r ((ӎBKPB%˓%ru Bʴr1Q.F,d 0 4+#PT"5q*a $ ]DBd *–*DsҀ;+*21q R4" BФBxhsGR d7(a* \%-)l4QK.urVl M4TW j0 ?-GPzaz[i){Wj7`z({ 6V[N$z)O*Je $\MWӫ bDT sO2 !n9ꉤ)enB+ qD 0;P Mg Qk;8L|@)?hYbkY-t%9(cB6BSJMUoT (o)'z K-(\P(.A\OWiUYrMֳk!}uDV@pj$AF"k% 'zRj$TUj95Ja06[(AȀKbsBJB%J܌6t*)H@PB-%  ]*(%va.B\)rZ)%D%H)'PB:ZkrR7ܺS*˗"9Jr!AD(J!AE*H^&BN.E U(-DZar] t;QN(TW&XW"M,B$I !.hRJ97jXj9\җ'`v !=)h)$&GM8@fyPXm<%=j!+#÷%IKwd)’ W GIV_NU* tN}3' D*MJ)pmvJPb7 jgҩM.}/]Q(J{ I@NjL 6"cUC@E9sTʳ-4b$ a6T&= FuLE- *Ms"]LJ HMKMvH{.]/]VֶmzST6nK*NT+DҚפ@MDepP r˓eh;T@z&TrBKjp LNP+#h@55B RPDܺSĺT(\Jtb PԴ S8=KJQT$8Bs"BS! 5!]R DT$)\5r"ZˊP9r\BDT 9r7.\r\ \Hj!r!L˔B*TJiW9sJ"Y+0q\TrLA \֩rX\Ю'Ũe:i%p aqJ&z9BV7rP#cWARAWT3-9ɴꀮ%MT=†@%^%a x)53aSTӨUL1Չ0p6I0eLp1CXe+DLA$& id "LˁRjTJJcoM1E$NyDjJഗAj;StiFhb3݅C['SzOSM*icG)HKBRX$*$5)` 钄 ^z%P˜RUbhF) ULj!PTj (F&P$q)H9r e(H\W"ABJBJ@$R+W.*Tk%QJ /FJB.)c.qJ*Zr(t.H8*T!)\A(\*$+AQ*% *W% ) %@Kr rA !xT.A( Ա-TN@]*U%~*bhj:I &mB,lS֤-Ķ9pK:ACSYRQAʂH'E/U5Qbq(G1YeN*uT".xob;ws9KJB*lqz9Uf_DR3#*a$a0/ݹmrxe`: }NS*P%yzT`L "(8 2 <$E܊x+Ed{#{&: 5)؜8q Ԓ]VOkXU[ .05+{1("}ѽuW`W)Nz'$]LHEkDBtJDi`܃$"(Q@*ABJQK{$TH9( `m\&5X8R j%rj(JT$HE0D rA(%t%BmP9*mE -rP8JB1%BJAP $%DE%B75I(T@ R ↀ!tBSPPU(BQ+*JQNR.JrPq .\!(e;rPT(#P z W.( C)Pz'K֒(sT!^)m4+Ố aRQLa#1-дUj04e*7 eMrFUZ SAPtZI3XJ*Ri,Ari7U kb gx*:7V2UUȨjE'V6hLe5:8k C Xقap2ͫ^RYEOgi+W+$(ˡ*A-uR !7`/r2BXW4ЕT.RE<IcN$B0L0:hUٞW69 C๨\ڌ V .ܤ"/JW sPrLu=="Q a)PԖ&JCv@KH Av%.)n*K˗ rTڄ[ qP.\TTr+\rJAP $P.\!+.@rBT rW "% p9AR\5!!Zn#@r!RJ'%q+] Ԯ!r2T9T%t%p)Hw.܁ˁNqBFՅi( &!B@Щةh+*U 4% @ g!W*4yBiaHr[KJ5W ;B\!D iTB!Q,TʩK9@FZNa&+B>Ĉ;>W 8rT UlhWKل#kt`*ԟקEer/dzMF1M2JBcZ)Lx)i (SBiJK},K)azs1&"|\GtSAzF[6֦0)ANJRS(k2P!Cڐ%4ZyR,%A*q*A@*k(QB-%1CQnF#%EB**U[*9sZjЄmBФ)\ ڄ# I҄r \r * P Cr.$\-+ *%% %p+nLBDk!ܧr+)AA* C5 rːr.\J\ĨH*!I %IBT*n\E!P"ԒrAF0T\RjTM$H$.qV(HPQ!pKE\h %I*XQB82SSÒ S 8 \=Czj8(00J8. RW\А m+Вm0 JPZ(!pP%T%#nQ|4#]8 8g*h;aCY Ԇ`M6 u>TS:Tedon9W.Q(L=T q,dAMX. ,1=ES3RhLYѷw2zсNYiMKtêبmA%fW #c>, ?8N /%g;?T nBD:\k((Cc$8@+W+N>͎L%Q/.6P<**!Jg/ѾQ9ʽzTgolang-google-cloud-0.9.0/vision/testdata/eiffel-tower.jpg000066400000000000000000000152601312234511600235610ustar00rootroot00000000000000JFIF * %!2!%*+/..383-7(-.+  -%%.------/5--+--+-5-----+----+---------+------------"":!1"AQ#2aqBb3R$C4, !1AQ23"#Baq ?QZ#QNU*T":("E(EJ"E(F)EJ((E*TPFqN*N袀N*ThDT4aw$ =Cj p98?aǙZ|#E:**"*E:)B1DT*R1N(ETQEXS!$ c ?*4"`0A#!"{#!tbVIeYyx HlH F|A*ݏjݔt=z9E:+QDSEN*ThE* QE:(q@EQEH)"?:tT]+ <0>u# b"(QQ@*(#EJ@8EP#R*T N()0ڥDNCf6[\_v;1gvl0C7'0 (%D܏|̆>&YXʜA/M;TEhʜS"(*QN(!E"NPO"Ɩ4E"Ɩ4-\Qf4 (EN˨eDU1 u'nQ6H۬GW߂E^3 fo((D(F >Kq_N:0wKIimH{`bq[,,:1G'$6:YI>5wg.tQ8LD `T-*`QЊQVEJ-\R(QmXэYE]EXҊEj8Ҋ"(QDUDPW@XfPd|b~1ߵQLYm OJ+o+<'~-.^8/ -WXTX?>wmɧP-:!HBQ:UEf.1a= U; $WU66G@3C( dQB)R`P(*QN(!"*ȥ-(QVU\R)EDUHEVEAV5XqC@ϩ}EqVjai2cm ĉUX؎nL_:H"B6ضvx@='pjւNd,UsJ%XҖnl LIbz'qF͙W']6,9xg!,mDVen8vىTpS (#B)EYJ*qDT э],j)ƔUiTQ"+Q+P*"ƔQ*xЁVTD+Z,KmK}[=-jAP@t q돪8WM[BHEYAsq cs1Sw6/u-0p.|bpd:.]bC[ݫJ02A+ժ1wk*&S ?~Nri,ٷEF\05n{W3[R`UCt՛їvgnƳaiY׫(( ∩WQAƌjicGkAZE(rD|icWxj(rwYYؒS*_%PԂ }kk63ldSī\ 'hMMWW0A+"vOS5ލ>>f$vAD7 ˫smS_ 8+vlxԂK Ɂo;zn6MF훨s H%\7BEx_ݳmРXk:huxw1tZ\R~԰n( xE?Sfx׎.˙AR zU9[KBVvKr9v~zMr`MZ䞦 isiH謱,֬~7,gy͈VSտ=¬. Hx$WOK/} r{*O ߊ 5ɮ~&]{>\:Y;*S"+a% :0@T$FLI#ֻێX8ז9Ds.WqARq*:ohAV_, ړ0 }6/[!lቶ!_ G VPʪX2$$1TИF5{Rx.d*,ZcZcfz*],kjqVƖ4F4B]H*iEVqAZ)E,TV5i~*/^fGAe/向d޹_zAOh 1's]qHl@JȊgC}\Bb p &F)lk÷s]P!WV0ժ5` B|f0` [5K_0PZ+1PT3[:` O،b>vj+gYqt8* x0U PlwZ2x(vm֡Z gL4;-xU MVz|]"[Q.dm6≳̴CZ(IVݿ*,sgh\YzZ1% }€ [UȆE"oڼɘ YfJ-pxGb H;\z}Pfkv<,^|~)޽qA0$JzlX$NLGÉ=Z28čʒ7!I'V#)rxLՑF5lxƭQNQZXդR3cubH^cq^/qع,F?uŮě `nw}}^Xq.D}"{NBpg15?Z틈~Dm*} vkpyyeaai&9JUʖBI`IS$\&t2n,0U5W5먹loG>o5N+t s̟sڄ6UTf%F6XN\ 0ȶ.\f ARwٔ" EKE/ {c9m?S J$m pgy&@~]km 8w;o^,rnܓU`@O@%gob_htWAjd*:^1 ^`p`z^,2=`Vv(-K=oh0w܎qouZ.ܴ֭qlXv,Iq<'\k䃾KAȯ<=p|NY|m򳳦2\$Bb j:B8!B !DS5  ,2RnJ1 ;MK0lLR6 r81ީ KcF5g\Pb0`A%A?@~:Ehk ݚgU,j!eA&lXs&W9iY‚`̱4ǙV0fUQ,Oz秎i̟3"A㽼H6I; h޼C`2=` =_z&9zAv`Q&GnO wm(~(DFVZIYe3"D}Uj/bO mɬ燲s}onXb&&&>S밬H1x_O-.$\{]&@o1}Us:䖕!^HT$;'IlNL9R Bb?{۔OM\ս-\ȒnߐYd@]4k- .`F,+]gt7 nِ11eisŋm2QMloۏssox'.unG_&;V FUGK/cf8?XGWcRK\vBBª?X;6(H ET4w0b~OT]vHu;bfvҖC+2a$ |?g">[s(L>~D/ilk"uTqx5퟉XDdp2XrŃ@P}An&񫷘v[vU)Vtm8;z VQbw& ' ;^F߉"ff.U9XPZ&݂1f+;d-iګ33Aei* x/6rD .ey8–P;OCy6h Q|h@${-OF)nrw1`m\ 0WpIU4$ehvp;D톨%ݥ 3Uy;l3 Xm[rb_69Ou-iSˑ zCDWoHʣZ1#] W*A30dބׅl;\0pg DЫm nЕipq܈ bF7>P-]k IYUAb'"k2˪嘕ADyMi>-6n ݙ0+$G˷ʥ3) fwzLZ&b^V.D7cBskj.i aLTr xzU򤼇؅*q]17>0`҃ a@ܺfY=BXD;כ.:d);1>)tq8߷-@K\NXHش뚷Rw"{L]U[IhM7_7.qTI#skM2gn}+tKn$lGq[cGb%唀Q 9P x`wJB(O*؏19%A[=CQߠRV)bPXZC15GD~>ߥ1P -m@vϭ2~__zm z}w;Rx4 zxi ;E)a?1J~OR'@վ3w?Q# im7 =&?,+?_Ҧ3 2015-08-21T09:37:20.513136-07:00 Cheese UVC Camera (046d:0823) 72 72 Inch Cheese 3.10.2 2015:08:21 09:37:20 Exif Version 2.3 FlashPix Version 1.0 Internal error (unknown value 65535) 2015-08-21T09:37:20.513136-07:00 UVC Camera (046d:0823) Cheese 3.10.2 C     C    2}1FKJǦ!Y(yʟEZi\Tt7iIԨrJHCi~jM{UH H0  :8efQtjv!,ߖgӢy2U 04 ,io4UHj)J۹˞v-0(]1+V>r   @㫉hgg>}^u)Ss4}dggSυm]z ;jdYiѪ\Y:7(Vi['}S5l@@  f=kz<04oOHn6o5KǓ7K7љS޺3EhM)@psT|ZrR]B@ $aKJκ9|?y+DWVt)M*oSE4GzD7OgtbYOX 2OGEsNjf{hy͒9[U=jz6Ra|Ozk{[q| *gҗ>ɂE&2מSe{V)W@$O-?E\/󨱵;5ڊBy~%LOֱlS O [І;iM0=WNy~W-};}g9GX$0@A O^u^c99ubdG"Zc?>͓ONNhFE̖=s\5vgq_P>IR.b@<7iz2xslboq[yxf:3 xi q#F{Cr Dn;rP2v^N4g@ s r6?yx[o$5BϡZ:Ɣ BkGΪ2GtA;͖V\fc'袰y=0'in>ot0%(4i.u)1vt}Y$~IkS+ܽ6ήbWS=<ϕeң4IOcy=quDg!,rw5}ks!'γ>vx,IG՗=2ѾT -uF!vye) y~P>5)e[=էFP{eU`86m= y|zma&@M(8y:+O8Pu3þmJޑZ`piAb@pLaBI\힏% h):i[oj$hcwo9mP$מrKPu`dj'\t_/ʂ1~/z\-y:yu]A%iz=iXΕkZf°ieYw}uAp6"tq@x Ru,ulЙWaP65xR}[ie3Y/_G(JmWʯ̇\fi4_/DW&WSoMC:gDWu4LUfYƉNc;".39Efܦ"4)n|KT-(3yͺj喵O^]]sleNF$M9L#]zӜAsRݘFueaYLest}5ti8/dzz)FMbY.&ᶙ %LVVj|LA7wc?~/Isca楲՗>:xyRc1^Cq8u֜:uZ@qy"l5Q{Dqiw',KIrTJ.e&=uG !yЍ%Kt;MFP,QahT1¾/biro菞+7foyf=zX>$+R{6Oe>3>o2tn 6rߑPsAsD?=m}HBw7{`=õP/hjSWNH1v7<>6qs9]-mn_e7&~--Z*n=u:s>1=f^^S[{_&{ rj㳬|ulfNN{AVq$\[^z3rཛྷ4}sZ%cK2~JI&Q^nӸ|K64_?NM"R}}О=w1L MR!NiT^cP};%O=O8C HKvϲ6}'ͱdӯDB;1IMK)+>gFͭ+za}|_Ncz;}7'WOnV|MJ;o_5k=㪐ԺN^<B]9!m+6!Zm5vە M]Fި׻4N@\ #^[涏sxYVeXro\پ:tYSo6JMo5+z -0D&1c9]sTA$IYrԗ20vs^2N[Dy㳁ɱ=_ΩLjc 4kҩZi-S|q<ꁲ3"e3;J)Nf59}>3=9h kdlN,6{ͬj&̶q|@4ӄTt7k=^;Sij;Ӻf_mG\ruxn'<>oÑqj:sgrmF@guII6ɡN9:MRmksԌԞu%mb}?Pjf.֗W1FL o͠M&e7= +kR,S(8&ݳwO&+)eJESnR -#\/+DyƼf@M)8ʏzc徍n #K;:ޘz^q[+"Dg. k4[4-IDvW='B3E9mއ鹊8 汧$KP#佇ô|,$Rh:JZR5_M<7.l\uY'Am.D`(<HNDL-7xyuL*;|gGS80 f'rWҝ|:0z ʞaTo/mg+?s[yi6TZj2,VNz<|[:Eab27-3#nܕ2p!K\6[L]@#+ŁETF-F\DOo4G,/raW/z%Ig7Oqrt9Q%X]NV;5:ZedkLK6̂c*Z)eGg0d_;u*Q= xRYͲºFrUw'ӝZ*2̺AN_hm_kڜ{sIt;VghQ'M' k-f槁6#8EX5 kGt6G}5^+xNOB7*x%q[L\/G|sQ[Y8U;rmǢe}I˩PǷwߔ;ܯ.gm#P>4dv٦-ɵc$.I,zdP SRXdduZ0Ihţ- 4z7mpuhYwcgUo5)HQΡѝ tOtW=7C?^yhIm(+. $Q,ݳlMZOCub@ђzy.w 4mYhkЏ.r0뛍pwu% z})G_;mDmf<чAl5Cyxy Fcgm7)*p3r!Re.LCS1o4kMsL&Sw H G[Ȅ9Kjiƌy@g+pj-{Op>WlKNYFe Aq DD+ThO IvܬW*(v6 O`]28ןT9Mg]sV{)J0j|:LGr2i.u3YmThC*2 Nȍ-k*4q-r$9=Zw=ZXTaϞyV\jҸ\aP":b5)D rե+QxjDש*MBK\$DNEøӳ޹ؓ g&UrO(p'.YMe0.UoĦOb˿k#|C(S-tWbJ{:]m" Ũ҂E05ZI5i'ė1ٙuVM4sR^ˀX*]ZgxmcmU)onB n"ȗ 緹7EVs}aj?7CD o$.MM.0Md=;jDd`$1Vr3mMAT\Ǩ8XTlH\bk9my-RidBBPjҒk+Z(;VpGh!#Fw.W\S8]׃;@84ҘiβTu>D:DUFBiH"cNz* :iܛSFNOTi4!"1 2#$0345@AB%6&unjj0Q5CAn)#90XoP8a[FIf|' 'ك0'&\/Sy#w*ж#t\gޒ]H͆2̟pZoܨ}, sh4,8TVc^kF̈EKY"mEY;5D(᧒/bX|Hmgx5c TxlI3+`o;\l揌qvq݁G'DȢgMX7,qgrg`VX^{tjxsއ1G \_Ju5W>B*%`~ɢiմ&w;+ex2{6ϻFW*'ASK "R|ȾS5S墰p-eib3)+ wXZUVGvoH~ߨu|NtJb%;v[-{I$N8c^U ,2 ϽZ~<8䐤~9l\ݬh }B佅'eVXV2U=uT/F}n-;"[5$(ŁFH ]ݝ27uov9#]I I8$m̲c‚O$5~ Y\Y 7 (l:"$R.K^Hn ;]etBIW\o'd3w+5'd'IY۫(/Y"N|LCvٔ|QN*q(~@RzPhHw/&f~?W;ԯPg,nj!xaJrŴ6L)V`{1õI*;MSQKe rM=v~2qsG2RRI1-b&*W|Ⱦ2?TRq:~7Ԝ!?TAJGfB|79bW'*|}BrΉI܋MdDT4˥y3E8??OZէ Giѩ#vMJ[hLQ#zu^(A13.|OhOӽ2ZذԘGeP*O-q‡ i|1#f0q81=)gЁ=hvD?ҹl盹>괋pt.,EI7ۭtj*F݉7fN›.JH\X$wO!w};MkLf1+-ҥR9>Q~a@M$t2@VH!zQArثZ5l-E@Wu'mgy m%GJtCkCƁ*j|e؜z qxIJx$L׎M\fuiɑÔ I9X;a4Uj !)# ո'9Y#-/(w` ]CGV> tNM8S,irO⋆O]qxs_n1}>Ĺ-ӟmvEv vV jɅZ1A,ErLk@fCt䀙kȡ/#kѱ5;~v ]՟Ɔs՝>k`b"LbԎn_5,M+38=*j=ݎ=l7Ά&*r2h.Ml^Ԓ $e&BIEvN&آM'G?2Ꭵ~Hdq\w:"Q(DjlcqsB7E\HXVr^9!TNtVUU"p^Y AKB9nvO)r*tP;NvYh,p;핎(\b@sQ6 wzf>F;n$W@ld63b6mKy.JB)ߋnʍܡ6V1U|,mLGgY >;,'ջ6 7 J< [tnFM3ױn"啬Clv3c":5{a+qU:+fS(6Ҟ0uR{&g-'I>좓vLȭsSFC18y.Jݡcy\^Y.8{jD6n+ó꜄#7MK+尕N*UM%o&'`^_Q+" ̆7RWvN0amRN֚zΥUFGe싏-Nm}XQES/EѿÊy녂MbF11qnoT%" s4,|Tu\6PvNRng9my`mqY_EܓVU1Ul11bw.ʐv7Jg;=nv^hϵJֲ%yǐOH25OLJWM\48IԌ@uoN1ڶR{3E=Iesu ~3u"B.pڲ6q_O8"mzwINi8H$8Y2u!1c1W-f%#6z-aJtz'ؙ+}Hۊq+ճ_x{YYuu|f>Ms_c6(5BEoQ>9װ2/ ɨk۔*bq-:y e,+d'1u }ޛw%ŗ \ݭ2:ZFBw39J_I1ۦt~3[S8a >*7Pi( 5\ޤj 䡊}XUrx| 8 /u?țxɹŊ~\kdVh,YG"ehq+a9 *gdzWnrrQySKƶGAƆ3Fój%ɕ\,jVweEU-iE\ӷzڕ࿦4~}I5 ]`/_2[H+Mv1he؀H<^Vm2rV}v72Fw9" L[}ޫhw@LAΤ[lwcu_ڡw+@O?Rm&۹7)!o؁]~M\Ƥ=L~ZTg=bQUjݝDgk"Q+.,(@ ɳZ&~Q@e,InG+2jɝoH։׶{x\}B~jfLjuQb&zs2>[aXiba&[7q/ݏGN~'5trnD1o(Q&ԑ8@Y:rI'#~̣*r)F'z>-0ٙs؄2} r淽O*d-C/Β0EVcubYORR.꙳F \ѳ ?d[kmM' |§yZk8a'RBB5=kt1r ~` 3e,M<\y>:Ni|~JZ]cFlf Tb.gq3aGl\fl0n5vn|ZlO2,P"r!M " hbN<1cD}Gu4[P(I~I~IlyAc޴;^?UoB }VҦod Btbu'Ez" Q]E3ё"}Yv=ن凭 1;,NKٿ&hAFN~3xZ+O;Zlo.ep;jVֽIsb"_#7M.S͟T'^YfO}ԔlGYp9/tشlv2F"nB ;;wdZ{JoRIn+oEn7u8 F/YH_lto[go%#zgqzŘϔvrV8D{v-aȴ٬Rg5WfNBݱ+B7P?)ǟswk(e|}!'IC-8 _RK\Ӿ~L2uBIQ qz7E3?flCԧ2Zw4̱ю+7%[wJy%'o䛕>QE.)wY$%M/( ⸭3ٺ:")9F9?0v[(!f0'shJN-xҭqz]!a\8'~bGWJ_z:o δ qxcH4;ʈrNC1zV j[#nR{mT]v><'n;y_/wdf'm"򟤿=e ?X'*b03v)eznD-P$7Q-mV^3 ͑gx^-/s?ڵƋ#m+EO v[9@::to,Uc/P죗p3ݘ1ș7WQ>GwiX~Q\[m3)šߤSe,Rд^4Pբ,o M:bdm&N-~L/ˣ}ۭ2V-jOIaݻliT'tO/ߡɲ̜ȇ!`BJ$ 7sΤ'"@.y}ڍ[/8}~[nXG?59_ofr'[ߏ,!1 A0"2@Qa#B3?Dh ȏ铖:QD?b^*++(Eh?χXtD93|0/DGQ(B^Ѭoi緲Z)Y~hy6⍹(| <_]V:DG&"=H"YU pN#֊UBllӞ>#ן"uSGR5{¢W)m]C"/Ў%'CvXʋavj/dz# bbKP6GN; %d>b\D:Ј,!:̱<1D&pV"4* IQ SdyY$8Fa/P e?Y|aȮD(T/JCg&"J ŤNдѽhK=δk{r"鏫$;?V"=Ѽ$:Ң$21It'БMMKȑHm6t}nߌ9mpQ<3%qf,1[] JĄPD4dQ./[BcQ#2OUI%5ד'%ZGgHosE*CRCXj7ˆ-Q]%3k+o%""ێ,ˬjGvGNL%V(>GKo\kt\GC= CG  ^.{xdDRY 7.K-k}5;=| s$ "J:t۟I7(ɡ86B^L--:펢>NKOtwabˣ[, p9Y_ CK~pT=piUM$n'B8쎛\ *%MEwŗVm6)X8Y%heb"3J{xW#_O*6X\an7n73qYx]j>DP$)>HctQ8:xy٠Cu9(to^[${} ^bbFdvXYbи'/B̗9z*f}. śb $Y.)n?5UO=M,?b}b*eb^((?)b/ '/xrإ?Oq%Ic솎gؔG/#б,G/4Efnd AҢҌf"=fxYf~/(XpbϹCHyXxh֞b=S+3,ˬE<, |x}Li1x|٧v,K3^#bvǸk+tDļ{.caEcWi'E1wmi蛹w{ |O\/~s~.\Q#[AH bXCޱ-3ew/?o<{MN}=_=!śϥ{r7Ѽ+Gs{}˿(;Q$QC)\JwK369vKM YCi>$t#Gާ'Nbk D_'d #آY'#S#ɡ>[Y!)5GY^#݋F2(dRvoboqБEnٱ*G:'-lB؆%DBljHV{z;4tެZj;qgʴ%XEVYJb^qbEcGx'/=yfGwʑYo㒬Gj}Q7G $?"k.!1 A0"2Qa@Bq#3C?|OZKn%}ǵX, ._%Bڿ^쁵?1K#i/Zj\e萋/1v7$GfWEfu;!Cl>2^dŗn]a˒/ _j+"(Ȥ4%lG,L~׋_eBBj!>+\QSWw,#JݟW=/]aM@Prx:eYb6'|j p /j8'"Kuj}֨RHNx$d$hJ6ԛa )26b6 2FjR"UC\ D_n.^~< $;| eᢈ\GǖN)!)>" KF#Z+!zش((o%׬&J]28>Oi2\en;X!J*Rg;"MMHi~CZ3n=#"O1OKb[B}b%+4aOcgX}#OHq32xEf=gNU$3GCgkk_JǍnNEgOQ7PĹ.qj.p>IߢUd7 FjE(m"yƍ8) U+#ɭf Kq6M#}o~WJ7VEQJ#3슱J*T6Cǥm#-DŌڣJ*4=(\xõHlvvGk%5푤n-U*mc\[BL3iQ)94#IƨL xO $tﲨc#6y<=*VI}eL͏$?q'$y{nG8yN,&d;%Ejtt38c.A5 :5O)ҲRGϯXB*Yxx>R7}c5svh~ǍY>GC$IԍN| m׿V$Iez,UljɎo+Ŗ7~F5u:Cv7I∏Ċچv5_֊4Ȝ/%Yةcv6j-#[sQ)͚S~s45{Heb?(LKעtJXhG_hZ+%4#ٴqPD^}+~IsCİ%~(*ƫvInCb\:䞼$-uiGC>oY Ny^gg*~Cv.IXZ41$KѲ/d'J[%#x%"iڏi\y"Ƅ7(vBzt'$W<,!&<%d#4ydžy^Sgq!CcV}z!e!(Ct6Is71y+bTy|hQnK ]cpĖ(EU׫-C.Hl7O5 4ҳON0% . GyLr$CqQ%f" b7B,{2.I!Vѿo~GD%XC}dB (O⩞zXײZ0ŮU3GtI+ ИUT}HkLv[Ů~Tޭmx*C}L"s@/qQzϺ;.S71YY1oY+*Yi eC}U y:q)f >sUf$>2:'7z}*C1;xlMٝ~LXZ?n;:ocYZ黳͟? 娝|@r#ϲt=ih1;Uo~ <*ұ@:kw oG!Ah7[XuFb 5ou!^)Yƍzǭ (&:$qYqf?&IRd [tqJ>2Tm5vNq4i pVWnNTcb@Rv8/{. ٝj׳ULT].>Z\?(-- iby ˒6N;5 OF_4 :u@k YcY8s '+gG3Q4v*_U֛T.&qiʣ^kP-Do-!.j6 侏'zH}Q]wzA@Ue孢洰qlN9ٮ׊q֌JH =ئg(i~whwd v\@ UknTZSԯeZ WyF#jLs%9]->l֕X?/aL.cwshY;ce~kC2R\ F4n[%hٕd{]IhzSPޓeRp=eF6ZEŬs]&xw%>֭Rjm! wveh`tdA3}kĉ DV);1ge{{K]s~|)YW nYɍ8[Z;TӚkNmn8SOg+kXl`ĻHָ`|L.8侏'zDٟhu9v GzStAhPYj/=NYTSҟzR`n>MC7vtfHN}涋p~ITJ,EV>!89^hB&p*} 7IE-#3]i -EZ0cuj Z'V0kC7T+*M0V.sA*}K}+Vw4o~0è=^ѿGzNium4)cThA:aM5 ĔڶE=~h5h4@X M?Q ?Pk@kF :[EFҧ1yJu*Zk2<$n k=&1!Z[{*Un}& 8bgQB 8zH-u,7}aoZ(7wTw5Y6N,vέL3Vflri=P]j4{Aߐczh[]>3S24WaU.57ob:Q 58@TG6!z[̬*02uT[9ѩMΝseiL9Tc+87o8܍ZU-" !R}"?CFtT;`viv+yoU""[KH]ƌ_Ooxw2 v{:ِe_ӻ~^ؘT*G'7+CBNCZ8iLY^빣g9U9tZ9sl:2PT9o Aњ/1^6V[nܕߴ{^_5vMAsљ+i1GecWmtq胱ٌs@!p(?UhN0cj݄lFCo*`TA$5TR}}:06wF\!Ym^ɫZOW5oF5tS1)nTlFPN:أFFj[yI's]G2jk6z&=vӚչqP)9!w *TQKmJNi)6 t*yuV9՝ZQ:T7⩊iء=DTl(; in;ר]Ĺ}FOeZ5ii J\ӌԥr6xmLuZoxs yJuF@wd+e<1mP Gg5E@/}=w7clt֏v:H"pHoFc .{++;%5k.~2yhsI|nSfDQv.>N;oJѰ4~g]--\zjt31r[ L0qRA䵋G%nK`mVjNff,+O \Ek5|S 7K=ևTqW)*,jO oі %4=8Li*VPf! m+ԫxCuYj2xࠉR*Ug1^8 :I̩i 2f9gI~S?U aY\ E ,]S{Nu@2$+ V_\ Z+Hufub˼'%9f wijkӢ)tMo>}(oANON.>ޡsщ'buZs_M}zA&>[:+-, M5gpV"<>)ϭfku];綑YUŹitjn[ 9_L{L.7sFE[c˛vGt:u^;TNj0q*Uqmu7#ܵ]S~Mh5EB)ʾzxRm|0 ԥ^9:u@^ד(2t8=MtU*۠Ь^|T).+: x"{??4ϧ\p7ǽ}գ~km_q_?55oge_c? ߪ?lY بد:˷O#v\Ϋ[O7 s0>Bl\/Pϵ.}&#sPv,Sw+ GJƵZ>mWvsc~*u 쨶Z"։ʯfʁݓ~4 6ccيT~Ncp7jt5W'38]ZSfʥGctjt C  F1ۊ.DQX,0٠Sn~Ph#e5F3y Iʎrk5 4Cy l3RIl}7 (uShhvj EXB7E+[- .-UsT0 QrZ'1} ڗs9)Ic8F2P qެ[ 0U݄},>|NS;B0dǂ'7$Q{X`EF9W ǁQtFW`xZBu](mr;Ucҕ Ķ 2uX+&qĦ:Weߒw-]DD4vECi"F$T`8k|cp{wÖP8e}?Kn?x]7qT(ކtqPJxIڏq2qU>w5=g%֏v^ 8|S uNu6akY]-vҪ=rUob,yڃ=ࡹr*7D+DhhDoyȍz!TnNF-rݣJ<L 0XBmZM7FriV;=z $.ϲU*MhhNoj'yN{N>Ό{l"t#[F@YuF̌ m 9wkPz#"vBu)iŮ@>Si!P%n)JڭVz΂Jq8MP$b&qNw i]:VAYo;*j6zŭYݤQ;hUiԊH*=Jk\FЧ5/bTS%"C4'h;x={U*h-[+NG *4 ~JB]cCpMft*%Ś}k7iը:AA奪?XPotkI)ʋ% &:B@z.۔>*֌ùHqP@uNivn`Nx 8 #H7Cs\BFjtqTѰ 52k9 g\ a}(4pUj}i%4l;x/Aviܭmg%7q4qH*5;€t|tgΰZ_rH:&$劒rǹIǓ7Nѹ\ @8"ZpPsPh@l="7fo!jm ^1pXqMbgr| m .љLVtfT[b<!ߣzLx2[88[ gC]1sv@8|@6KS$sNPHcѸ{Qq딛z.C_xZ8p_x9PNz+8JIcZC:CI2Y+bgA]'Ydd2L;\hfB'bt??2kDA0Y]gV %8oEν'ڝ{SYQ8pp;YG֤ᵻUs~NVg6:n4$ L|z7PwҨsU-*ېD)VGjL,ժv74GT.gXdo4 C98c8qMG iDulg#OX^BV#㊤2yu,HIWxhDz/-' Ӂ*_|WlaT&Ms lu-S 悛^ ;>a_&Ӧ7& ;89~xh :OǨ?3-xڻ% јܮCs#ܜҌ%nK)X{[|k&-n*3ȇ|9݆bUSRWhvA=Lp`ZXy9}M̚u5Ҩ 0#i[VZcA\_ KsC0aOctT6AUGG rzFGa|G18&Wp% éD[<21X%=.E0A26v:)Awf5M=vҢr0ivu7rj/~KyCb(0!5+w~xr^Kg'?40:$wkpxÎn5tcܾфsM0'Q8_zp=P/4}YFVEMk. ] 󆉌7ɉ~{!jހ#ꇰ{Ԯܧ~+ a:<T M]o^ed7ʨ۬M`^wv;Ցquv ##0UVn/n??S\~Cr2)6bkS) N)/aif xzh{dnkserG::SMTqяW{MF sX<ޣ]N+wRoԝÂ#OwEHM al:ʂ i)y6jeFĴN յn_0Ͻj4,J!ys4n2ўTo4\Iq8ʾ *ikNs<ԥF`y@>nys_X~~FH8!CvG 7fvBt*N}"fwF@ 4=r3ca'RK.k5Q6VЏYBv~~gmq%\̵5N3$IĕrUWȩ`H>j>MaN98i;}Wێo Oinj "jj;?;s;zbWi಍k8ʂqd[4*E›~*o;thE2dn%yލz9HaZ;,d xz׷)ȁsYۧ8YghSZM_  uF1ת@Fp'}zZΧS P2w>:8螣,wIX^޲ fԛAƍ3*#NGISt9rFpt4bJ 0kwy^:; ԩ6|՞/p`p] L**jq ]xpYy>)=uN?2`Z!:#x4NbN-}S %V{Sbm!w*~^!C1i%Hoςm x祔ۙ*oӊhj>LUD6~zI܇vVYʭLM:9zkS )(h❩1QJFHk蛴!\Qs;s$8kK(y2vSl]NF @$Bo"zoLd{yM;-í 74*ptE٧6NZ;УM:7]%&1RLA@T+0wȍwiNQ!R.Gi_c_)!1AQaq 0@?!QE[Rc﯈CFps+=Oy[jy?WEL19ņkZI1B;@]ot/z1S]ÄRsfdsC{c _j;\tscR]s+%-b!JEӺ~􏡩*Z:3;S>ln̞lKDb<ǣhXS*N쀜f\*cWDg V j5詍8.Q1dnΰQՠIdV)QR2 vE p2FAWhbeQ4>!d7+XA͹K[`q^Yz`a.H樭y0q)Q:SSC]q*X$V˂_%LK+,Q@k X}K\0P]7/ ×8)lU xX_N*f:fmG3R 8T\d)89x{f9 |K =3 4zUR?ep,u؂4>R!Aڸ% rT.oM'XAD7j yYSӑ91ZۂAb甪TS Ae@b j!UkSԕ. uQu*s>'Ka/z%9*Vt3cD]+gU0A^lC V}Uz0:>ML{>r/>+Y}}xŔ8}o5n%۫vLgzS"fޞPR_wgTd7k\QBA 59aU'<}hs–iK{r" z=4[S4Sh;K+ %t̨kHmƺjn;' "e5SC{/ (W'Y/x8 ^0J=Dc%A}ŚDʳVJ\}f5xcYJN> .טxXn*'P/X4 k !s!_;ȯe4w2?hcL? )X+j 락$#BAε{(IpK7Y +)<>i33\ػשIy}-"w8~!S^XF"W~BagWJ1( J#f=|SKbZк;;gS)1en#]s7CEZu`J2_z3gw[sq4Eb?ʿD52WwPŌ1)"eDŽ28}0)7f<x< zX針ǼMɫ>?K!zC, ؉燜ѴMer Pyԧl_lM0цhv$@n9׭Zܾ+@h J-ZF.M.r.\_=1}u ?S1.Xg? @iPambj`gų!9@%N,ǒZX/9IiS71 UαC.Jkbj@* Y՘J7[rkÂ*ӅU/3/#iØZ@c#2xuNϓLs&;1aEڏ:9 Cr<YpD@n̤YqcEKgk`?b/@ǬL!E{13j\j7ju2{wj UQL5Y@l]]0f@Așg Jv0o(q)9j+&[VՔti1њC9$QW~t@+3HP\wm qZ @gvt ;)!1f-vj40 eKMaeC5"ҏADQ64x\H&+U~򆅅A]p:Ex*65/2x՗k f A})ymt(q^cݬS,Tþ!/WK[F9DJǣsSH @8%5eK,;mdb!t &u[YHo%`hHK^%msgl3#7un3DTyTul]ԍIm#q̪`Ҙw:B^C~0Xe)4宿d]6sJV9t m,6zpn?)_QDkAQp+uMX,Dz_aH9ֻԺGU/Uk:Iu#F"QR/X&G˄Qw|<#@%Uv Wҗ0B`@8]|*lF.оt|QlQ~xaؕ RP\!Ǜ;vh8t:GLaOawwcl~` &[Ue#S599Ye>pWd ),ڲ1XX:bdfнSupλE}DzɆ%:L |گ2 `!4:'Lʫ\\ʓi:|CD}?Dup%G tP(νc r B뼲w ep&Cszq 4=eʍB.SYJzGEB>XW%D%a{#k jG{lD?쨭z@,Ñ؈(!k0)=&i[;D(tY@CBV:۴y)q3]dzi=5#)%U(jemr'I>h8"Ud`wi)h[-NS]ô.= [n&d<üMGRYIkB(o  z -8u^x6Mg5;0>yTkjm}Κ~{g2Ǖuo,9>P JfG+dv-&ӼЖ@3~'?qH1SsBD@ue>8֚+%^{~Pb 8R>#0Iǎ ߗ`zr,+&FXݮnh,KC%zB5jֺ>S YQ:&bV^Ys3B߼NW6߳/ݒ]fUnnhuV5tuG={(W^Ҏw^9302%nHz+O &LKi;mBuMеit(`C iPSPګR>&Exd3V6a# ̴+Ly}gfYΌGPq)r3-@`|BX+s1 g](aGd.ӤfL\ݫ%s"U8lL3}m0hn=?-,twJ1E"V7lV%ѷ64ysPhn h\gU@fo}zh!U]0m6uCb u՞`Vҕ=(,cj5z Fu)ͼ_/X'%ķIۜĴNs3zGVÍBڬAd[wj_iRLN VJx>#p(Âέǟc)tZQ58B*nZVUXXcҦBUtX_. |G.PhsNylJ=#r`0WC .T݃,HU5_:>@,OF;-= NRPl|τŷLDt2~3X57\Xk_B`٣ INLj0(@5j^>A ng+ *nws(%9_G^F%93prMͳzLRҥ"iݐq*⎿R^8e^6+#Ո1}=n\xkP/_X9d;Z3}f& upJ_ cATY&fkTU^n}F4+*͋~dHcԹWbe@4m:aSJY2uXE Up(j'9k -A.M ġ< 2+هneʵWDΊkN:{ƹ7,ٷ|h1[pw0WEJ-he<zBj\0X ?#eCd {; B Zqі;ĸt<9om'%Hв܏^+b]-hsS􇱑۫T2".,0U`ltk"־U+J?e_C<1 *,\^=V}`p:ˇ~]-:?)9 `Tb AYYc@Zl\ÎbN+m<QҕJ<[Ϥf< ô#z]DoCp? S^+ifh<&.*gXIy**uTFlH03Qy׭ΐ'H񧴹ͷḘؘ̌(ft.;N~'8)2#pbe'*KVtjVqmØ}WyXݗ%~O6 ܝI ݋q*z2Zz\v/2lLʀe!1k7 [0. K_VoN&et! JKKeLDU !nyċ.ʗF˚^`6)|xip u'ˏlNKVW&Tj;{N~P")¶\"赻ˡ.Jֿh,E37\;2+hk`<ub;ͦ-75nh?h꺉GFZT2Qos,u:{y&Sj򈽜M9F’ߘctSۧe -/b\} SAj KU[TL7fX80<;?NޜwG-G?M<i&jh{:? !j9K%]!BDn;}.EDo1^Ľ6UzfPPtsۂF%ޒزT5Ϡ,s83H!ۈ/ tkx9<ȣ;ǃ_>pE\|Emj[mWF\Wt< Ps}Sjļ&0,52,CʻK6s(hfU]4=e91jEUҚ1f.P;z${2i:wTZѩ:̾lm1LO˗H`J£>%)ySVEY]aqtVe) \`֨qW*XzztqADJq,6#_(5Psʉ.oI/ԍ6W-$ew;xqYM"X:} ,7X]܆ٵ QršrȜEg((S_FfM{羷,U?AOc k+}=o´?#ܘBJ.H죟}+< &~˛KJ(1g_JE Oq.IX(~`?Cl-><,P "WJ)!1AQa q0@ᑱ?;p2xS}`_v#vGqil>72~\Vزg'~[omommiSlD ."G#.^hWnwHq2ivI;k$QK5j53 xp X;~61Y{ smX%݄G|a'~7]lع?_\"ʧܼ.FZih_s]y~|+v取vu`6Eeaߎ:XPO[&(y^ ̿yz;b"p24CJy'BիcNg(&GcDeͮeJ[AN=vE0'K6Kxryd!/,E`M VW!9~s-l3$O&\1/mۗ>]no72۷SVZ:/Ĕ{!KB͸_i|bhB [;t|yVl;mk/gO^Lΐ[YC7/ $cb/-cL$~ XŐPoLu+&Mwq/@&AͬM0&dn0udq ΅%wkF xI~urc'9pIj[E_!7ޖ3?$ u/pFLO\y πo//: >p6ӄѰ=` c+/c5ǰ}_v }/x|%y.^\02w[e_'" >#d}ݴe/'9=g~.\Y;:^IO/?-?3$k L'h3ٰ&^^O_)<m\P/(#S@xvz6ٟfOnÐ /˱Ay$ qy."HBY3\Bn]이9 zdG| f~pıX\ԏ2/gC<%Xy>>Fq,F@7HPy'{.vrkVf[v7tlA6Yc~/LG@M@gA5x@ap%.,ן 2fg]2^~Gђ@pH'm` EgV"{Ap̆q?hpy#̺ܰ\c=SF]jϟP~ ɞ~7G9qxOggX3t^KYIvݴv178%2>r}E@tqr]Rё693Hl 8ǖc)yc&s%3rZ6$3mAZC}Og9A$C% ͂UvZ%bHr;nGo#!Rˡ7pb~4%D{#D3.}^k<;s6No\3,<GE~ߐ'ߩsg2mCHcQ[Bc!r]~ƀݎ*پq7D%;`Y#P&1u?YLi~gmظGe|n} ->؜oA<2L僭#X9=K#xot;gܻɣgK>,X6'oacgVgg݇5ry8/ 7H:'`a{}yg\NJOKEͯmeԖg(k.o|Fd~eR_{K쑂d(=d2ls9s8N0{8OɁgO/Dmߏð[a'rߨ:۾^`[{ؐ9&xvp$Lu>X[? }_P,y'p#6c}yz? 3;=#X1|ec/^|m><8Hb^mNBs&kYЈ೷ic-˺H_|>"93yqI^omϗ]$HYecg\r4.t6#nl.2;n݉o8I^woCu팹˾݌܏2ap Xٶ- b\gxެ4618ۖܝO! E.#8݆:FGoUx`\enr^GvxNژx2RlG9qdm=M#|K]# *=8K&Z _G{JoKjGLx\6rY-ݕaȷHۖ-pCywF.CC݈_Rl!!mլvW=B1 `2}䷐0$|>)m:ii.! eNs`ٶ̠%e2Gi:vӶ,ayoe7VXrC\φd4Ix۶߫oͻpyxF'єd{1zec,I.snC{c&a? υH_%' -[r9=n%lmؿ-q b3|em s=Dn{m˛pu ~-np/[>eɿ|Ϗ]2{o$3 2> 0_ޛ}}ۤW_L輺n]v>gaٰ3%όg>o.,}}8bdY,~?<dy2Y~hq3 罹9:z-k9/#_r ; h~ _hQ.|/2G0bhdGnl ɾɏ.L t=q/rn.=| a!yy>=o}wo;7kb[;unH69> e{?_/rh=r[9{jw^O؜xC#^1i{v GXy݃ sۓ#`%;hs{gͷZNDZsoѾ@&f#f!`l[`d3/]xG#wq7^7^ؗ.?'~̱n.%Py|525p'>'N|G/8cvg{uˤ;y,g7&c9<\;/=9=M,yڬ-Ǥ. |g8YavŜov|e=FY= ;gq2O3#N_ς6&!1AQaq ?F! z9{d . #'=ϩg 27# w#/Q0 ƣ/Z[-w&͑%/6wZ:ACDv/n7cŗk7"Hg,K8 Qi7vfm޾2>/#l]]o{e3IK ?К:m4GRۅ| =<_w2eYͳ- d@0f"7Ahws\yt P};}Gs\urIPFo&)k l_7nZByu%h/XM)+_N7Ūp"dI @yec$ِ*K"pm5e6[Fmm$$,%,>m{%Rg ;*8V/rE-f{%:k <E?퇟Y_g_Q#xJYF+Kjeܝs}v͘L˥$"mf2>cxKܶ^he`m~X5ư8_96.D cݤ0~S˶c3)>Gdc-bvߤ#CNlv nuh=l&;vu  V@zƭ]~X l{߅`ۗ#nּA9o[-Ֆ ˆ|s1鍘- `G볯Q`_p|/܃I:|x;#Ib1PJJ+_尋O0 !]*5p}ɀ#'n^{!<'=3&pS|Hy}HY:{g92SWF{pZQգ ڃ\]pfC􇁓7ׯ͔ԞC?3dm^Xhf Ǿհπl !c}ZCgc9'$`o pe}H1#?ݟ%>n 8Ιb|%C\ `C?el}ɅZq2la/:gb~r`J\R՗^_驈Xf??ݳaD%z?F{۲t. b,m un$}ڐɻ.Ì`2 ClD[DYBm'Q`_rif^ƍʻct [laY}ܶYv Zağq#@ί3ہVOQ!,Y;vlHShp8qԟ Xvq/HGSix%Gn_sb/̳ ;g L<@99=z(I[%I˃}=d$Ԍז~\6pl Կ߆r \cȽ؟Pv{*&a4_RuaI?e8!܃e36_ZlW@%D2]Yn;}lw20kg>dgC@< 8uzsYwe!ݏk d s?Hk or}mA=\׭cA^e̽+e!D7b0xCf ۇ-MdC.%n;42"GXc6B<3'6Szl3wg[ 叻Ǘܺs,@z@/'}NZ3^7l5lCVorY,/1cE_vpܐgo=ܹ/';&$m8v/LHs1̏0 ˘2oAks_}sot[_B㍣Jf daR6ӗX<"q!OB(ķ >[8\Ӷ XY#݆n倗?nm{ܶ1p!b<2vDtM%ߩ.N='yY_3p@>K+˞쾡;g܆&1:O>8ϸG/Pg>anߴE3o{ , ;rB( 2g8^ۦY-<}J*>u%3es|Dgm!!R LvیRY/!g׳] շ &2/ 4#L#^6).$|0Ǘy?>HF&x9{n>G 䝗sl%]qp`V8m/_v\t*=4"ۅh2޹`L~{ ;%y>Mik>mwԲ66pJo1twLo] /o,.h٥W/"VWgeyRl]?r{;6"{%?Iu3yo-}|8v`B^ٿ 7%tއ;9.q܌vb@OEQJٽ{āɎ[ݵ͞nn-t|kAe+uPk6\1wMٚ^%vpo}}pIk>F}s~2r2^R9?ZmĚ.c/8Jϒ/<$.[{>|8|\<}ˌ0˷irM\͘!b܈ A!Z>y?﷒6vH >c[ G.?eo˶~aL߆{{g '"T]q[$N12gY u,H-.?l)}3rvLgG8v亸M卟v?V Tvܟ AR䵑__, my&? xd}6i;fhVC;> ۞r$],L0~[jOc^˰^9>/gN}e 9;;9jAe>ga9MȔk-v}[I<ۦHB߸6~~ps~<\^X}I}Yӗ e X#v`d剓X+~O˔o/p,dtt9O,#˒|s |A_aӗVP8M5|k]m\K-vqĹzq\s>޿F /m?1#.Y,6B#0 ϢCӤyr*K).q x}Mt2?~!~Xt9t-~6'rρ 4fv|_Z#*:yv7=!7fDI`tnU9f{?ۖ巌ȍeB6זg~_5D. ss:Hq)!1AQaq0 @?{)= ILWry\p:έÔG;MM@T#A9) ̺5:9) |IWO9H}}8ֲťA8 )g_tQ}ͥ{P{j`I $铼)ciuXHA0zV> >d2βqFH? oC$4ڻY H7#dǩ xAG_⁂! |ktSȴ{E4?3$4 )j2;+ )y&?p ^Qk$CWicn7͂86k1j(! 4Yy b{7|Z':xBE=fMo*@: 1DDH\Ql"CnZh#W}N5ScTn^ a*=JT9dv&W$ptPN*^$Q.U*񄼑 yD])h VUmvV DHXjJ3>!% iLWG#_(91@5c;BI{ znc:PݗY 90 ٞ ]xϗu .a&,wUCOX8M;$ÕfBDwS~{:njpxlC>|db5tqi+eпL=VK)t#ZwE+NGֲ T)gvVyu8pc~`/'8]ُۅ^Ͷ? e,LG^qQ `f*!UP&%I77QR$~0aðp!'|84 0SX B =1)n{>s+gdua;6P<ӌe|,PRFC 5‘4@@8'MdHͯ4Et BL8K?^?sK ה uS.dӷ&G]K9<'K lC(E]r&DPIR`ό;xI6fdO}` 6jSk6·4)^W Bkؠ4<@({i)=nD%qoMgĀ<DmX4)p_ sv`+ʏ @@x="@r-ODmey9^?,.=!Q:sGx0-PE|X&S$f{D ?@-~Ff 8h'J;о?w=|`\T<p#\Hn5ۈW\hla8mڕ[d ;4cve{]ֿs|aVa)A5/ $Y9OVpVW93췼0 NNy8$@1¾]BHq|(F+Ч%,ýдB*qRjr"hTK`bޫc9j C:{#Oa HS֟G˜Qש}B 92 p"Wo>S<*X+,Kh UAhSC󋪝r|tb)A_dL|̊mxF Ś'@]?0BN!񔴠2D#q T$O;w|fb `+0+X z0*3/xkcsVg5pWN&;lu  ' D=$Jh=/V=rY);ؗ֩=Dk|<]DU8?6{dA?LkU(w,Ր?wf=liv=qf{miIh_@zƜ"Gm4J_v/*jÛCYߐOZŝŴ -^ 7aDEvށsnb CG':5AfmmZ1'PAF(xvc8w87\/X#?&A2k6O-kϳ4lgw8j7b$4 a0%7}#y^`*^ZuHYB M#? fofV9[xP Mǀ- TXBkm~8g9+r;u82<8XS&'pV{kzxvSxvş]>t`P3`P+ͬ[T m5n7i FVfp@-yG/x pl%蟹N : |-6(z hbx^M {/(ƓS.Nx/w|?o9+vU盼.y͹uaL_o8=hƝ^sB)t Tw,WvpFU !l _lqJi' x DSO7 :_ ^~*F*8a3x\'rW`*|cB =Td*gS|K pKCU OPe3K~z?i\g(c XXhv=569.|<"bz8HP%0)RhLV'EiX |&(ܘ͞l\!UwYU~0V4ua VG!Pס }`2#hM&{d+7񞯧9j;"#ٷ@UW'[!IұB> j ӯ\ Qaŵ%0 #D3{|_C 05Ko%/8=c!c?͢S;Ċʕ0:88SD:$ZEӃ"䙳i$| IJcp El,pJdNIK ]=-ixG NjߣfRՀyĹTg.: n" ӛ3ɶS!\Z|r)|r=RDU,&[/D r'G%ps[ +}'6QH dCsUlbc!+8XJ(zN;#r8np  V$TJRkKY݌mY+VקG^F'5q4FpFڰc GɃulc>847 l`_lqwP*⍏oBo‰) gX ŵ D!Ua Uڼ/wYw&Kġv"t'TqvLKЂ<̳`j ;ߜb} Âm)A(v:%BUVP0 JԵi]eE H?cFdF|3e<}P(P*X=u +Xo/p郁倜 $Ӝ&%ƒÂK^sT|a`<`|򐪙Yj dX;(B_79p;[Np%@hs @ =p黊fμcRK;&7%YZ}~.#6X~h*R^ '>0Z`{94&iUT1NT"]À23HYt) :vTy[\XW^4`BH`^Q 9?1#iHuc{wPueySA ;(vN1ڈ!+/ Gbq6 FB(m^7JLJ8taÝNXڧs ;ʶH@֩"n5EC:j]a?)rXvq "TN6s;H 9:L)q-5'=p$J,z|*% 0w7А9~1ؘ}<$+fH_6Hhfor ǎr8VR#H!x G(h|(6z>'Y01"m:f߭@B 4wa 7n seؗq}H>'_QXn9^!Cم* -w"UC{9>3_ s]|ؓKr[ͩOЕ$ﱛtAT$wN҂ D()A$"(z{|5'0 ~% X 3X vAH>KGFDǂ_k5e|9h0?kэphWD?Ol)Tt*z,oDiaBm_{o1rP]p|bxd[g%-p,Ǥ٬BƋ:_ ] ) 02yX9dh-z7.0f/cš8ʕALt;LwQN=\778nlȼ]~@YPf!}'[1";a^Lƛ|8eXˣ6鋥=]"Iwy( Yε~T: }b jmj8©\sz4ڌ&.N9*2VU zfx ozP4`@΀N1dcV7pDx &+IdM nͽZno>;fNj;qրNGb ,anؐG1i$fWNX SXA*n\0YL'bBj4ޜ\Zc @PU 9*fӤr9q]k,iJnM,;0Q9TӉ)w na@}9RÜejA/խxO\uq)iI=S ^7lFDwAGGF\6X ׯ'd'Z`~\1!u|Sln I}0qˣ@q'O+Zˋ!6h6/@)! Eӯ_=%|f3|}r w1 #وF3E85JcsQևGo`ùE/94w/Cxhz|@zRo12WD b.Ě͘ιWbtF4Wd U]zG,xyͤ+K+"7D[ܯgy:D\Oj`}p `D nKl,Ӷko0hܚ`1UmND[T0˧h[:0=aXv'])NȧpX)3I W {c-ʓ_"׀#,=a3ظqj/3'>3( 1 7NoZ0Fh{GV @Gd!19zM&ZXtaC "$'ZXH@:r%߶9BoWpc -6VLkHZy|hߟb' qܢ"o^ޱJP;aRZ)|'&F[?d!UyNIa@H7բ<:5u>1(pO)7)Q}ayMjMge$a.jk*%`Ojj55(;<jC4:j-Bz 3Sw3ތGϮ;}/܊Hk:" ==|EW?9zN/>s]O/X W/э] CZ:`^yOo]}~0LCǂ*!XB^o.Y4x,In1uuyԚaz?YhboTQ# B^Y=phݫ8H n؜N*b?0o ʏjGfP+ vƮBt!*}~jx}8q7x)PY^^W= ΝlƬqċC`g"еSE9?g1 /Q5<A%Ÿ_Ȋ,ޮ|dn=pMo0Woߌ78ߝ\%0/Gh{ 6|E6؁o6RӉq!szRGmDx(U}[H#_9 iCBᯯP$Ǧ:I4gb]r4u8ާ}3k7\icIghx&B;5^4`v _Xkpqڋ/?*`N0%JD뱯\S|fOM~,)cOM{ boH+{Y.%5sJ^vvB|3|LAv2=Iֶ ʻtx4CjkK+]0[ePlh ѭQQgGo9rAez(ng!RS~qB!&20^&sR FzJ= =xyx_lxϓ6LP~_39|G7=;| HY| /Bh ^Z"#/׷| 9 %/%$ q*%įϜDqD??0)iQW.Ϗ`[QSKM.cH+_p#Cg?NHjx ĥ(:{"PJghA:4{+$ B:f콸2"ՂC'gw/'y}\5cB-3$JX7iU% G}C ^%(}=iC"F ug{<'Ozw OtBXHi!6vqVGQ" p=ݟyAz|ӆQ-}jJ!qڝw=p( /+C&jm4s mB퀯-Pb*#Wx8yP?8O&V}SfT| $K $7W&[Drj(/? 5 }kuUմ6r*| _`.7~!#v6f2oxwI|av[m\/k`2s aa 1֠.E 1!#cI؋GEOb1 WC]+qXLexO^#\L~֌T( 0{/Zj L+iZfRp*tӥI5!v}U#(vG}(!lsƓoIRۆ`s-^᳷\7B#FA/]RS14x!o~2QCxn F,ȭv|8W )wHB|`AĈ;`xS"B8q&mp]Y+G*Ц²d|3?~I} 0aQFi 3XU5W*͙ƀs%CD#aჁuZ1P4 gbRfS ȴU9FDs ▷C@P6LM6=c. n=?:(#y0LUR-"$58pXXscNʈ/ºnj`HS'9~71{Gehvv=Ζ tl4?T)k4!8uM"Cj ߍ%R.?.^XnwqAX6f%͌AIw/\'DZq"Sƪp3f*O%?Y&t}3-=YXkqbs6#Sw"?!a3'(S >1olÀb :Qr%f[OW`o>Z-4Nh+s`SY:F4 D@ֻ N{k7g߃~B%q: C*؆R2#h l:SCbP]iVK\*A:w 9{>3&^5gr$H3OLA.;Eǫp!:˻l=nGj[rko ~ @0۸ cw(gkM:!^~z ,̡e?c Pg;TZN )to:u"1;W q~dkApYҲ>uՍEC_\i=b=SSWqJю*)"p3\3lّM%  ^(n@v-tl-\: %}XL񨫺zy`[2|kYZzr,w=XpqhM\93ț8* $hYmZ"(Dv"ʺMo7)M8D cF.]8uP5>4 #!W_| f J:mn"*r$b D{/,dV/,e""}ӆBl.R1<$C(yDCS˱uG|rSМW%sNVMd!t gFNmMMKlo{R٣t!P*8 _`W4֞{G (؏M"T:ۗ,o{Z~F @C[#;FGEHGNnL-f@^1"a :"_lG(\s-%>׀;,yx;KXrh,z7.Է0Ɍ2~5F%/[rǹy98z42):P|PQ 8GE1@k4xp,w̩|* }o -4NG^w\o",c``@$z8MkA31BwDl #CM;Y.`*P$P7ӓƽqb.xWyv {wl֥{fa2\)yYPIlN;OOW6qO=(AϺl{\$A6!xpCم< 2"^X'B;Mire/5Je[+^oɄW=_ؐ$@wLXtA_$l2~8Y}09GQlw"fP=SX]  -qh|]Ɏ!89M= D ~N2% IGBKrK^ ]h,Xmޝ+G\tH9P-jr٦1( vB3\Z $dvMcmJ'0^lkx_b "v.?8JP Z1iZ,n0ү!^zS3OÏMjElQPΟ0 YxȆD'Sߊp $o` mAj?Ŷ4]rj27x\.g#фtr~_^1p!ȿiDTLAY1=d> J#{ U  5S`1d2Y{mtZeo2y!ޮI&xA>r)KgUN=#A+an>^YyR6`bDEZ޽Q;CJ"&~"(_$1q?1׌ܑj #q tS!o"{}"-'DO Fw j"mL/pSYq0Gף8/7yJo|B FLiA7 9AظB,fD A:HqB.c?B}fƿ ,fB|i @tҚ'R8 W\-Wz Ub?p&"%::~w l?8>wqgBGJ ٩05_:X-炅yv_MT߹\x se96] q\<dI/U,?~L W~1 j`LMgZ߮v>qXyrYGX>y׮X7!gڛ j+[r\4+jI"+pY31y LK|PC:Vk^jU}BfMD9%x3ѪNų^&j5=Ib+.w]L1x[:c+ر)ԥb]8 jNHa}-RXܮ%[kPŮ[BIʔ{j㏃҇[c.l1qk 72 72 Inch Google Exif Version 2.1 FlashPix Version 1.0 Internal error (unknown value 65535) C        C       ?уuWrযҀ }8=:Mi_@#ҀQ'XzFLJs``Pbc{|Z֖5|IZM,)=DP ^C)nJtcYi04&4'R:=>G~]@Y`Z0\0#I.1I#¤O5x:-qeTrޯ9>w]X}xfy[\ZoN^6k.ciҦ\ffWnEK= "b#"ƵϿPs~8s.ܖ/NRz4H"lW|֝rɭ}۬FG4VU׌w>;qkWo(Hgu-dt^cK5o^w<Ӳ;kܴM@ `&.Q;gE+m~Sjk_MO7^I^H規Ռ6άW~w3}{aϪֵxvʝFWeLt5Ykw C+d*cizn_CewsӋx5msϧbi`^u3v?'ѧFo2ߣZG%Tzղ>?Ot}s1z\} u|[|AU[g>{KY[e&Xy&pWJINUgdwF7~.dKs#^}nXH[ϙUZ%`ҷJ3_nx*GW~W$+ӕZϣ0H@Jb9n7{:9ZD/ߖsl_EZgj媶SI.<ޭM\BMג~;z8T\INVzӧYp׆_qy{`:Y˜L~TOt%5 mO;rϘVU>Ct".[LN]xnt~qz5RDDz} ZFzwEWMz<ҵ6L6Rͳݟ5?;u~z2SUҍ.s:_[#E8^W6sHhQU&مZըDq{?BT>WM#lQuiÂ>ciisUu=_:av ;su>uV\~Yܵ>4(p)،o`euӋ6sgVmY^nKfJwdsquN$̺^W<׍xY0(LG v3o4",`}sg;+k; 㼭}חHvB7*zӝ?ynpqvq/=xyXIBmyZ$qnLeSXBCı[[<2gotDg BœC J٘7aSm5UNg3^m7^2r:>̛½SA45v#k:ng]>Ӣ{2*vJtm-$*yꐃ~ 8;997|zsZR%ӧhהaCyDvfR!y="MDIg.ɆV|&J֦۴UyyרFLRsDҧK+MOK5,8;N_.ґSPpEsThnK\uOBL&yK fQ Ϫ])mj 7?./gjfuN~nozMb{4R黛Sh5|U!V짌Y-&;&E;i]VzےfQW4jC:ޑyh4CD_=m!,#'hSfӿ5vSuSb.Ol5KmWFD!r~5llĮ$%vzF3 Njҗ6וJ9o**fS-*kW4puY!}Sh!jWMc[58xlB`]ZX[]3N9 2JgW'}/i~ bIJ )Բ\8#VӤbդNZ,HMns&-S%(F}jy:XjըrJn1Fg)$ns,KtNxWafWEG4sVSVl ;CٜM9ТꪯB1~T5 S-E-+] ҽd(Y9 0Q TO78Eg.* $NԜ!;I }GYE2KL;jduF~Y{Q/ KJd[U@U]}kUJ`d,~Wֺ/9ru 67cSX{ZΣ.nB C"Ө"E(RJҦFs&*"IAsx+by,y7JQ90b!VWg"rU8ڴ!פ 4kbஏeVUvgQؖE3V{ҩE[,!ZJsYnt& j׷UVy3::rɽ.g3qIe.?Ÿ*K wu$y5]'it:snjIJlVg!Yͥ\ޙKGK̸+9mSTX0&[hfnٝx r򚷕\]bD.jhIW異-Juڶt~2p2s7}gȿSQgNja7]I1eSmenmC-2И53uo6MS){u;{ORX(EZSZסܳşs^j'֢_jLJ+ѱfULO-VԵW%so_Y:%+22&ʴwٌ%7(гz]<퀢j2d?:ƒFBj*u&l~ib骜r,zRҹň,nJDt <ŵM+:/gjoZ>j|՘.5DtPĈΎ%%o^g+ۺh%"FzdϻRPĴLgY)eu|[Qo)e:y:ռsμ!ttQgO"sJ?3GYo}}Ei6Q5jxYTP|xC< .3/owB3d?{-\"Ƥתs5L2LfVY^ﯙǚBq$tYcR%̉oY[g6%=sesk ƭbu>:`zӼoCi!~X0fmݤjr|?|/4FPSaZԳ.vWX6Y)0ơU6dkv'<ѡS$ɥjhTXmZvj썎837Q+F j-Cܢڒii1CG'D- e}"LYS;66fcUr$lVhFAN`?aTUmQ*s6ӎN׹wLbI Xg%U'qwio!,Wlfh;hyU.؏hUnsbO [VeU=׫*Mlnr]iV%z yPNe6"$E7fi[2E*TcۜJͦL&R3M͆yn۱)4t{\ F{} vZtor;Z?%y?z:L*y*۠7,:wTMklB!֟6 3zXBaѥ1&bݣΗ|[_mKNvTqFfklWMkֲή>=zm{L]_NX˙~ۦ+F3ͭQ/V{ӑn_vfr9\_'У_uso 0?.00?At=0?_zd|9fΡ-ruLcIk dyz͚ѡL)78Y]w]ZUvFY̛фZGa 괹")r^GVrGuswiK&63ҿ{9w ET6g5:%5ϕoϫsR|Ӣ~S}W@ucϹ| K-/_g8;WKTsg](l7ά{ #s'w^?cke'3AFn&C[Ț‹M259fcm'e]?x[RSgtO>{-}#'Z2%^gqMΎnÞT/N޸Gk=ȶ&џ:_2OOT _/o1L0i[j5sŭtyofi*niy]̦Z9yPyTS3 tgѣ]zbjٚ-^Ff^\nR5w?uF>7sgTeOo Z& T:N_;\sTsjܖX/w5#q;eUV_ƆT ATQ4CiWZ]Zy[eN?S8ep'ZGzYH\gV _@SMJhƒ.43ƞzdG4>K5mks훤s_/;N2%%pܗҷ81یN.l̟{"2LosciuqfЫ*Xet:1T敽lHw\KMiieh7p{2Oᑩʹzen2t<Ɏg8z+{Ng^XcgFK lVy8\cHcѩ/'/s՘CJۼj,:<.)2480Gݮ\)y3fLaSz\'2Hop˺yRms]LO7c5tx շo;YMk9bx7z9lu`Ί_|t]N\Vg2=?Ng" vwWNTޓK<׶c5ө+5BZcRF#ϩ{DYkwv񙺧j_VJzQfƜҫq3o&JϐK ܡ973ܫ\ fUݯ*aoY̩z*9ӛj쭾\ -ۍ2~&g-]E;WKYWQ];Rsp}USg(IW>k$uvthnp.bܹOS'%2OwSjRJz,\ zJTN9V/էUͪՓfx{r=]z.6yeTcUӣ%N_]^y3%ծ>k?\١~yKmhV 1CK1C&Ԙ >/OOCɟ=$}ޏJ!9ykKƇe|{F*\Lk FYr6[l;ưډG\_ϖw,μƷt1eu|y:QJ$QfoSXӦ^a{xs饼=319]z7uFFWS ʭk]7lCV쏿zq`z>fjt5~T_9w#LχؗѢ-(յ֠>99rZ9x~ZU$W;xfgAٲ[NAkkԴAǝWsQluc32[BX9yݗxߖfss_kV~(*brYպLij ˯|lYE,vTF.18KK:<:6;t)h2x<(rBgپnkJs=l3wt犝JGyhw3Mb+Sqiiqd]\W[=oz?MT)Pf0!͒YƔo=2>sN+pw5:]g7k:6XN1m3sJWTQb1+y.m+JFygǼcrTKm9i~9-sLrŝ}#\yXf Zkqgusᓻiuz!mbMI|:VDJTe8;$VOz6858r=5&_l;JUr hܑ,(°7Eޮy]|nT1(M}\ϲy{wZ-q㳝#TΦk[M;r3%")grb)qy?㾖eSmnץ+Dt;i~aKzMʞ֔80cVd,3μ̷Mmj ipVrniN`z?Ѻ60ryNn-spӸy㧽׹Oީ9xsk ѕZ8Hs{6\9Ly~aTƆVŒ!n~ly,݇,0*t{7Ӗo+`91!"12#A 3@$0BP`Oc,k|BgjG(`"NiyvjH㸮?aYZ','"%Waw²Eͳɱ$&} 8doǓɅ*1Ȣy=#vڭODdq;Bwǐa,CkQ!+r \F!e`ty #ZY2kܒe!AY!^-g&gE"qPo d_vэ[3$zNMQ; o98Kc :[v; vs+ יLQE9,T9΅^k"9?Xԛ|ҥ̎'na/gc")mUބ΋rXgѼN|҃ Z2(fcUhoՒ|?2K 4#r28vpSe+fptNDS?U3lIjr0^^19UB;}>d˪#wۧ*9^GYQ;k\ T+s(zF哔eW~B;D|ډ}XH33| ŘlN94I"V)07<2pʤܒ l뽦"A&]?o%]L*~2v%{4㬏x/V!d"!MJʥn[wyoJ7,?F#kcVӼso],bUEX9F3`wfC 9,x*N69+) ?e:RrOKp׋o;#2.L+$Cz̓dW )۾pV 0hiF#TNE:gp8sG4mcMƟ:W~2GkEI"T=De5Ĺ=B}1)#7k aiJ1%n7sTL=DeWiB)&O܉d Oįv4(̝Ƽu$%#ytF$ìeY)$`Yrt+mFVfՐIrRYM14{>QL-A3 MHSEG+'ǟ/qK2ڈ:wY: .w%OY^Am$tGHy["E&>|&*$v;}9?_s"r3N$ɑe\#sxs5^n-oeHyD\8ǯh445MX8'Jihߍ3/OÓ=/ BjWʃZ1DygbVn -UCSjW|?XY3YXiy(XcNj PU5$cD"FqSrJAЉðt?M|wlcYA5NI(C:m><;?gk6y$Ypq= 40EN!=mZ%Aۜ_ LH-G'9]a+ۑFGIe ш0zoQe?ϓ%hƴlb4|GT:~1c<Ay4??#RDǧFCVæHEg+0=dWsY6;k^eT &N>@ϐ*D\?A n TclQ#:i>u0#SNq*dXp,DG,+wwM\vUJ#o z%a; gG`D-Meʑpxo]'*]qh=m ui,Z^ԗv9+ZG$U217ذgcuSCݸHЍO('MXv:iĮ)NHaɷ7I9ֽ/P_k/!Łt2G*$TΔ{²Ʈ? KxSZ8zVL#^_1-?64l$˓Iu`e+̹G$y,KjI>l`ueǔQL=ǗJRMeJ#B96_#GPIciSh놮N$ &?ɆG?Y\rOqW9ώƛk1q0[໤wIzB*L!j(*(-m9}߁;+2Gܥ;(Th7yhILQ{9Bt;̈́(EI'UUttyTI0|yl&XGij͕Iu #b VX ΅۹*4^HHdw.r>G_*01Yjw$9&6ry9;%?O՟)aN-(Ljb-m:u簶Gu_Ezf& zzRk^D5-~Q#On:&!zɕ<>̤w(qQ~#UG'!H\~ynDU;fu_Ni&.8I`ya8r2im3Hgܮ^=peL~$UpCe0O5}6V21It4q|c;L>Ui2%nv޺b%N֫GP&]hLb_W%>M_" E[{TR7eid>R[8^c\d&ϻ)[$R¢F'[s cS)#>=_ՏL)eUVKJ笸Q(9IH3[iɽgSNI6Re! !,JSq%R/KW*pIȢ'LDYw$j>InܸmWeΧ ͊/A5߄I'( {&-1ןJw+"jgۧ=ǿ~ЧRX,G!c0rQ2@2Y}\l{ 72<~ᩤeTskveuqEe=dBU\V;"v D$fYLe:D#r$U{02"4q݋n+* zEa %ka"2c[صb H`<]{6hN5{11\=pIu%'![N6G-W.4?`B#qe$q$K:zl՟xcMU[lejh"s$+M2Ԇ=ݸc -Zɢd~kUGY0D`tWepgrĝq?]OꟌZRgGH 5 U1yOꊮV< $lR«wɼW!mI7T]Iz2Fc(V'F9AO&ur0j&jf4-L$Q]Yd.nv#rLUU؇85bMC NsO`.7,W/XGTT$9'L?E[2u>4DpNupHG ByJSC F ƕ;ٶZkxu*qڐΣ,ZLqTI~ó WAk ?a"zaGPd.rd(xɦ/LQFw,H#IYb11L+jjܝH[z`E/UXrZ9>"&M<[e缯 "+tBAޥAP[+tԹÖ Bw%0֑I&± gW$>7-X7E! ex*%9y ZJ?Oa+F0z!9h B)'֤vUo%|OGD[.\l$5FziB6 O!܎Cg t(I}pAjfr@2•qE1I;-kd"Xͅ՚{ٱvXU ]ٗ*q4R8=dR2i~NZMq}`я)_^N9̘PED^:1ҩU?Q?muEVn%eƜ6&n~A*rIge`E[{K*ZEt_{=ہm!E+!1 AQ0"a2Bq@`pPR? 96S+Š(Kvq"+R_VrG?!͜?ȳ7E/eL$lԐ/ Wb] #EY9h|-1A,6J(SO:4&)&|:c-M_iy,,r9G3_d搤,b>(CGͳIt+L_wr"ٍchm1Iĕ5h|n'6)O bVq8NyJ=#.:g? dl/DTb,EfЕ;5}Dg!|hI&&g#_4&_𲬬PG"GIXg3򜈔wnƼ!q#+DwIɣN%~YKƘbĈŖYG#)ؤ-t8Vk*M+Nf賑e~Beb]dQe^_$9b$Df}XzgEvV(xDJ?(Q(r!%b,^) ^/,Ƴ,%c5h/VՔ(Z,̾;eqNrEɽhG+/7Tc-%>og[$GgV~E%TQGBXZ^;>5EbQ8賕t^N!h#ب oIY#(yP@F"+ xՈ~/9_̾Ea4kC[ (k 5kâ]"qcYB>TS)P(g-%^(!3_/!~X?G?8ГJbEJ%GQBB(Ԇz]bYt'<"YHdC*LlWxOƉtG yC,LwXN,Dz[~%<6u".ݚ$=RŊE[ hb콈2c_,b$((Ċ8㑾!G"hx}tZD\Sr/,_XKY=x^l/Ǥ{GIHgB;8mECc"ٻ9X-Mn zdaĽ ^:~.#wY$V/OZ,}1iVLB-BV(ecHƕW F$t,]31tz9+3/;=Gc$uФ-Йc,^RE^/Drؼ.?)mI!{$"rElC:]LG{*1!ő_f:Ogb^C9 /YTr GoΊƄE=Gefd${z"?2(nlb7DdmYC%'-᳡/xe+Z8 f)(-=vK8lhoR劏e:/诳OZE&94:s9.ccd^s>6)h+"XvXbĬb?Q5glxņI:C/o(L=졢D"f#-D{$[ ,b=ZB"XXcKAU#Q9>'hkDdG~ݕQx."˲ǏdFCDDPEa~Rbg)Cgc\CcdMgŦZnR(EU٫ʲP(YchWSEP6I߂%  k _+k /B-X,D 9 hrhogУ6hjXy%C"4J["Loū*v] 򄛕NT˴=D헱e%⏉] dμ%:{m}%*d _+,Fz"{%ggt䐾VHl_:XRbϐ[>N'z)_CE5z,hzdq"_2q>NB>CZ:]]؝4V#"F/DZ9r^iأ/$Ev=]쓼S# q@q: C?ԈnU;*DY+#8h,UqފEְ{/ $NVǭ!+!DBՐtJC'[d). (xS:!|.Z؝mEz?d3EQeB/v;./r,I ~.2B{{({zŜKdccQ'ĻĻ䄅*/>lxy{ >?6x*3Tv(.t6|^RՌiv%{8 $bqm]H"Ye,!$߲'Ix$12d)Qc./=/c=h^J&-ggvʮ";#2˽M!wLǢyK_xw#!Lr9PtBo+H~MC|k_6nIZ$}bI D%l迲Jȴ?ȫg- 1=xkcQY^7u":ˢOFV'΋cr5[ƚDE~:9yt.n='bxhbT{(1zV_^L.H_b{8 ޏFW*?!rX9GDH,LxY~i {yJ^ʦ4^eՑWn9%hBo㖨L'5Š%!l~E-%zd|]_L,?Bx{=ab%P.tGd.q'HA8 b8 PEQEV$uLGذ^i¼?#C=Bb/C.4ɣ6ЖđG D6Eq^†"߳Ћ)J/1ؾ]ة;%|vDCr>-D~Ǧ>g-aaK ^ER? KŠinOg5ЏblM)P͉ZO_UО|!:& d~?'aϐ!y?ؼ=|$HB$"gdI#/=z='BCu3!1"AQ 2aBq#03R@CP?񤕍5~ ~xcHMf{rFmᯢ驏ӳِ:T-7We E"^2䗡ќ9_Bfl?OWEAnF\&$kB:YLFm#݂Pqw!jWl=Pֿ}H,m(g=#6ь!҉|8"B*e5_Ac$(9-QO86;7m](Q4tw'd呎i/&J!z| ZқOc(&{%>X?hk\Tcig̔ٴfGr#>جl%LtjC~Q$m%iihծiݛTbi$Q!闒~&q'?"}uy#QnT{34iҌh|3d6"ZKH{lStCK-UC./͆-FG-oվ(ғ% `^1'tb1H]mn$cLS8:[&Hjڟ "F`2VM CM'\DPɵL `=4F^2#Bu2zq|ҾZ2R(hHF2Yw"mOmSfFF 2IУt}<ZpM<k1!6ov9xJy"ԏ)MxMP)X;.ĐEiԓD9=%-ܒԥkwؽRߑMrW*7Qf+%;s%,cbwc=FR7 m2E%NWGWi/P*FK7 Ǫ-kBb$ Jr}zhFՓNPd}eGZT{%͎M{٧<ƞ ܿgiyP](E.qX?~2ŏdtR4%^MWo?E HKwD$WIܳSc7=Fr)sv!i%7rQpiNG,5d?nMI(BɻvNJQEt],ȏ"]+ԇ۴|JuFQMYfnFuc&rTJm4CW_qFJV}}(1 VY\ї\ugہ}(YW,]n.6Ӂ}VobZDVn,wKL#u/5oޞ'fo$D|nł2niH}C"F&&Z]WK}[,ne)S,Leos Ǒ`]5K"<#)gɛ5K+#x&"Ƅ%ج}DW/$V~!t_M[c(p&n r!%ќ\#tPЇE%eJMLv6. _HF%?qeحȱj#eo},]W]QeBM."=(E$-.Hz>zЉu+#] КvZ7[eK6&Y) +̞G6!2-)c,C8G'+k+XfHL Y,[,eabenM U"E)Q7(R2.ċ7XT9tL\ CHs^Mv2y7Ӣ'>etHHetq)"OߢX/+' krI&&7Fn^]Bɹ'h_,+k˴J9R,近=75ݒY3nzWg9&dM#n,NƼ2.%4OX"R|>tN $:"FEKp 2M2y~1v]>dX=FSu;l݌q&Zl\N辋9(QdgJ,de|{Q9 Q,$ߑH!p> GK$$~ pEg Ji<):% ļJ%g7xdQ9vn9< yMhx"J'pEۦ,"rNyfTo\ i8VMhI4nTK["Dw uGDUc8^ LnN.)˻!Y5J͢PEr8#bbvdpm+%9Ҩ(Y"Q~h,arF/߁E!7pڈ^G36c)rQ./ojɽXٷq6IbRi6G[lJF4F-̟$4)O4KCq;Lnls{p=_'r# 47hӓݸQ>F#t]碖F:jeѵՐMdbڤjZtxM""i3BTk/ o"ql_mF>M4I][$.D6n,fmJM-GdѨ :5%MԔ\M2R%r4ec#5' $ƻ*D.CLI)d=D ڍpӻ%3,#/hM7&v_#hUedkF %~<i"I#D1Pi4JKIHE5QV/#M0zmI$hrqe|Qnדű#"ɈM!ȳqbH%@pOC'*pRY"JEⅧ(cGN-d܍&8sD9|X [pE^O6:E7҈J94LrZ7Y/^B-ZLR~Q({d>q:KQ<M9Z#) e ,TA&>z'Cb%l5y5(TVNhٌ"8RHț؟oi7A8tQ6%$n7UB~ ʶiweۘӥd&iVG s{R=KMnUf$jͬQ ,䌑qΞT#ς-W#u*F$"+#rN|u#"/t>EYE sDkn(J^$ݐ}ħsi #ݞIv;4i6jj*MRLխhM=4KKNʡeYVod#3d#֗r"Wmv4jiJxl ccQB eG*n[M=;٧FH 1Y.LBElpF'JD:')5u]R4**ɦ>eEc%%#ob&[ i<|4VJvhȞ[!ѹy߶KQȊ||Ll#u1ݔSi>DIRHPGI;vBN\HdȒID';mU"ZOknX{ n|y7 BC]Sm#TFI!hԿj&9d^MOWqݲ TmדWܜ5"٥<*MVZ*Ҕm[9fJ5h'Hӹw!IY[Lڒ4[Ĺ"HHM,44y aU܄.srrJzy\OmiU*bj&ԼJݲ0ȴ6Dz1fF|KPֆEQcF=ZA)dz^ԏs*pOM,qN*Ds_VN7=!C|ѩܔ;_R_%҇[VMTIԢE_)?"vͶ.]BőE8j}Dlk$SjVͷI7ڍ>c[hi9";,qYse~QwFz|oqH]D&1"ͨhqY~VH҉F#CJMMj6D(M(qm[!I}=ɋWkSl%QF9d6B{GO/j|lSۃR)pRDRd#-DtGMNnW} -SpfdM<3'ou6 =5|D{#͞ BB%K=a]!jm!O{=PMdjʨ}%#VV[ScFi66#S"X7<1(qSHjN8٧ŏ!*7 FCWnɵ44rۏ#ɤ?Q5$]}5q#1wfT%-CӦeO{IqB"Y9l#3֔w[%6K6HQvrJ{V+{vJ7-M%7E:7SCȣ7ds.DoI/VFӳMdt850ǒ-I#{rMzM>|."( n1TyX[HsiI(#~ z{|B4n:f1OGy,R"YXIHS5Uѽ7Wˊ=f2Rl\[7*#2NW krOSb%qi$&%*M}oq8F&hG|mô{O~1OstF`\/ЦɯWh%mWoi‰7Qq]V;}UG%F6QEtY8E"1c,oٸCtjFdm]< σ|n}%cID)F5QDV\S [&i Fdef}܈U(ٱ'ĶDqG}RiF;i6imF^~RZ4VԊ[G$ڕjd`K{~H;5e?ѥjRtEc$bڦ䂨WIa.+X$!p/ӟd& nZ|>Hx.B@JKMMFJ=63ǣ.=*5$PY<=ӷI5~\x$EJJDv?GN({2A\Mv[rIY`K%Q(Yd~_$d Y\Qqj^J_͊JsK;gqxf,OR*; [ɧ,Ը"5xc']dx%*EвA15 i%))iCbmmrԹ=ۚ4ҏtMfU|vKN*D ԓ\p''O,Y3{ɽFtM-"jNwodqfGF[5dԙqYVqd٫ZoqQh(6GFz}NS&hHmաJKSwɨlWPwXlӒrl`mA.F1d65gl4vL߁m]U{lQDp B5'fPR*I8`$Sb{!|uq{[M=2OIiTME=,^F._W.]֥%Vu£B[Vi鸴E.z?O2螫ombtjGqlՉDt}kRA4NVTT[c,Q"qgpk%)nFĊfD]\Y'*%#BnC1Ǹ|I˴n͂EZYmQۢ0vM9̛GR2[XTkh|~o*[ѧQcd4*!n-Z=ص04 Vyb+"k+4᎖"}u(mM5"ZwћFh\S$*DDxcwu ~ӲIFn_(4ꭹ5&Q٧i(^ERk6Evj+4Edӿ܎Q:1Y5Q |ӥR*-9qJ7 M"oj5dciI\b&iD!Ɔ4"U`mWM%ƴv}5IjTB_S%-ݨ\PWɩ,*)K-t:,wY(ֶ՛탣بQڬ7w/$kxcMM'tGXT)bO W!ME"Lԕ;hjǴ{k#PKQ/Bm:ZbJwdM)&z}-si(R4iLOֺ>Eѱ1+!iݐJW7Ģ5~!.E\$e2:,VfhZvkܢG64 MQ}Or\R[{~ 996ص'5}V*$U5r96MFcGM!OVUCDc{2#dQCFBE}ȤX݋]uRR#C.HiHcid6E7*\VG]?/14mm%Xd`WF7sYD-4䜮M6O#8miV#jDe,<"L2:+'!rF>F9?keP vĢ6JQAaNo:[~G,Y($ڈ$~. ((0K54୧ vc ܰiBleBHܨRLŢEgnoq/1f$fk;zjEF;pLzU'%M7ݷ-fTͻUө#vHd:<LP$l d$#RJ$aJ)ӷc~HׁD_7T.D>HJ6n%(˴{hG[Պ_Etc(H}]=D^-9Ow%LznDJ*J٤wK4UEPw/&k7j;M1c gn |IquzJVrjQd6PN 'EdUa7G)-7D4M5K ~D`Vi*mc+/#nFPWEPl=m!4vH{|v$h&j/nPDHɵ+-f RONAʩ ,(G"i="(w(X4ӎ#H$3!nF,й$~|~E]W"BF2#58<3/Q}؍C`4 5 ?ɣ4"oKEF}y!A`M]5$3H!1AQaq"2BRr b0#@3CSsPc? 4\ )^m4>DZgBi&@[+ʏKE'SG9)"DD nQ$ /U 5udQW t#qՠFནjOጚ.?h5s:y3<)W=ؒ:^>ZCȊꓭ⢒XA/Tj5uR@R(h:r. îQJAj$VBچ˅:haɼ\bVH|W0z1ZL2׻k_n-4^fV H{\ЃF-gu1qk?}b ҉ #Äv:_IםpHuMRq+?I〠svu%a-=fya>̺D|A.LA2sW7RRlfK|d55bQAt̎L0`H@# ٫=tL}ɴWwMOFϟrnZҍ4d7wh'|Zl{-[M!D]!4y ݥRiK>@s q^y*<)eMV.X+CpђaB* #%%,vO^y\:^E ?^];7OE>dRYMR}*&WzQvfO쯡BܙO .yPƕwgh CחXl~ھHB XT(\_g{@\yR^U|G4DG[(}$,]y4!ǝ8Jڐ46 =?t)`GJ%RuࣴuCz.0zR(HHf JDNqJTO/ $ _~,G^u?C?A<;@R/!/JhޢG؇d` c\ҥZUj_6) SԌHP-y]!@LfNz \7xj[V&o=:GRz<}}U7@.¯9N*R㮎Α>]E:t %1eHQ]xj괂!{Pfؼ),D@Dž^0B?B*f A&~Vv{{(~7@NxU8% H0yBMiTImZWc8y4y( IP:@'h!B ʨThgH2 hGrCշz5\}46*6KZ{CO☪D1^p@_a0)XӍ@K3t+͜$@56BE#n+M2ѸU]ŝZד,V66cvS:@KsT IۓN~z|Vj .gcSSR#%cgqVyxtApFZJuY]$L B֢ERD 4.H0.M* [rɻ٣vTäHM@/Pϳ/&d0/ 5%@] +(JW~# UH|D(Ix*Kκ/AԮ-ȞKEg<`g \w(]'5)KߛQ]rJM~ʻs֭??wzSn-.+|W d S\[z1Tct'QEoVB]"u&Q'w>Yꚉ*2LHk&4wۋ;y}ƌYf<uc.;!`Գehwzwcgq=&!a 8](J+g6$ޏH~T Vۣg& Zh=f(8@+)%2C=@DhKhؗCXPZvӺ.F :#yٲVj&*L=_]ɢX _SrzpGwe0JZu;J)V"J*RՉ-ѧ|6:Ud2[K VL5`HK[;F"EF6&v6ut8 $xQ[a aR(ı;*!xwyʻލ1p pa& ~maq;;0yrnR%ph*`Ŷ;d, ġA{d-K{i~;k0ځ;ݭ6wT}o̚ohXW\|xRSQY*-=3wYwWxx Q&L2\*:d2#_&x-RwD\IS4||<7!)AxDXbO$~LUo1a>nyoِV)vlY/ѷsXÊ7n;> $jKnߧї&VO.w$@}} d!|CѳSu},OZ:uuhݜ]>W52%Ίw<,Yi^|ُ[l &;z1 FLTG ,w_f݇ѮVFlORAme94Z/Fʹ6SnM,od'Ͱhe̓oͅSΊׯ{ǝ:<ܝ۵oy154ElNZ4kMHnû|F[I4o)i=o!#/l6>luqͰxԢfVOHVJ6Bڬqp.0niܺ1V;}k̀pOT׺;nwKH{yi4lWy-^k[_+Ozw˟vUqcuܕɳW,vLcݽvݻ9OJ_T#U63uO66 L4 Ql05XlfVBnӺ?fՓ!L Y7x~ 46ܘ17Ѡ۷j+߸hm `-{F:&agNqui[뇛_59F.WyaT.~]鸰ѷY-[x7>ݵߓ&ݍ$ın-~VFnхkю\y#/rgnHoD$tZ4b~pbdmRk»̭d߅e#%z]7/r٦MFYOѓe/&)aBy5t7Wu]ћn-5_NF~[M3(nM%m罘ة1-N;Hsc>`@5g6 W5фaXm#僣{,Tmwfl߯vV;练nQvovTߺK vu[Gɯ^Xk-Eڶm{ûXnsll|/Xlףn%MmXw}ݒ3M{QKsMh?Δquj7XҾ 4}Bw75jKtU(0;;蛴=X$)lErbDmZտO)7-skbrIHC15ժE+t'>LMRkFŔ'8ݣf6;lucC#mK-ncgyѠ4[QmëDٔ:2^6M:5ceX1w-^Wmߙ!گ2 VU֪dWV_a9(UHr`gZ",eWͭ,J̮T5E[& v67X43薶?l)h"XVazr-<쿗X=±kpB7),  "Up>E~ FwBauكnϕݷR/Sr ajj{.ɂƊlV\ jVa/ >l1snAJ)#I%Z "kvIW}ul.kgEJ;Ɇ&/hr%S< XYEafC$/is[cl/Eo|)y4+.0m#^#,A+nB[!0tbL+}lpNlNʣ au۷:@yGJ]XD?xE"F^O['8f̶aG{[i#WtwT?NlSٽ*9;up穬+Τ?׫ɍd,*L$_DOo]|#<>͘ c}9XծH^562޺;/2H/ņd)ҎK+u5J4nٷ_ٹ6{5 !)ݞR)]xqgP )O!$uУ=?. a C]1aT,DxLyYXk]E3CD _ZMH I( T|;ciIwBWM1izO_6E@/˛sgRU<эƋf a[x];Vݾe~l7W;tP5};a%Y} Y/J[nMH¯ݽݽţ]$Z6]fŅQPXКr&6}E_mLn"Fj=^~(CKn{ȸy4gVK~X:%oĴ /͟.6Nxc&q=+O/&Z}Mi]k͝vm(`Qf"ݕF=V||^5## bQ%cEwh:B?}$~л˴84)Ҏ_tL7]]bA`"]6&k5)\ȈJѸi*]oGNjES#([!{ faZn/II0LmG{]IΩG&MD+Ʉ1ϸhE47o_rZWjVpx5߳6ɍi[`yqe`}>͐c S`ba07&#m[u5շn2Sw oɹrhmO.MVJde Rh#!7~l( :Os;v*^l-ξa>j}1`+>D>Lm^K anz#&{sj$ƾ>lMz,[rߛg0N{iyYK@[Nv uYAxeq} 1 zXN1]}.gw6>ųaT&FNf,4k!ȶcr{ܿyYkmeυ1=T/\Hw ! PlNneM Ԕ9MsS|na0+&l}61 탬#`Վ\ϫg⩒c ўu$F7Sf#ՃRIK=e?^V[wJ3 Ϧл@ GpD ɗ9ɻ{zo6ߓ^҉kGɏԚe}^[:7>LtŷYYqߗxM=ׯ%mǸsmsow0'Qs=Fдsy.4.J=X9}v5wr?|9o&*Q/vmZӋ[jNs}uϒɵaaQ *:gɆޭF:2 RXnF͒Sq peͲՓrjν[w6ovŷno$ѳx7<"y zu<zsdMu[ m8Qj3;F)_VȽzyP;|-d(#g{qV1ܡ6-eB X W41npѝyƔ,t"ThEr-M:_Oa[" wSC* 0HqoFkn]03CV۳ySiYзo_VS_%nW3+M'I6X6C˔qc^f nBdj;ū*(l!R`@{IJ@_gERfw6z|O #=g\X͝3Ou߄eh z ak7kFl&_"K8JHoա V×Ih\ F L͏,v0\90,M].LF^`z1DѰ\U 26;k6K<+]Ѱ)M=.NO6݆g:?瘜72МzKZ;ڴDQ/"Zx,}~sՆ0Ʌ@U,?1҇U+s?39eGn?)>Z!/BiѠ:фمcm-H}MgSE*(jdY2+ ܳJGYN$cUG۴vNwCO uAT0w}1I &ߕ<\pZ7Sg{A#<{ҽNaGC\7s*&]q"֮Eubgu`.k&,p'ڋ ,Z0v#o~sI2y^qOH2l&4CYlĜb8ކ7anі0_j[X6p$OQ:ˣsrqeڈ1>{݌. =ɲB۰o6'5k/*Ug٥ۨٴ -SG0ciT@nҤCA^L5'bk |c<G|rg^ ZyO DկxC!ɢ K\o k U3dQoڂt$2VN_n!#lOW'R氲La̰ta/, v16/2;-bKX.j*׵+kM+unlKYV~)F}?*X]bٙfP+}[Z:hE}qBL76BS~cts^fA4t/Qey RٯOB:j)+ cCϣh{Ų^Kю{EwTwzY虵ٝtRXѓ?nMċz2ٲ-8<R6%rc@!&b(RPE `.*ꬻ]|$ (i6\nTeR?M(lj4Hq6h!>('vʘR(rT`@fV rV𬖂?+cX}1J>OHik J8 yCL'VQv&Tjt YA )J1nt֥ûzN;HjKD:V^m<:'3^Aw}Yc+S^&0V5*{U:Y t5t9K `νXOv;/1б< 't R*2) £Mny1)pʗ-]\Kfr+#,R' h,mc2`  A2Y[fTgT{J; jUFv6A!=޷1~ V[#6]&깼۰]0ewPSM 4Obl`WHAufBaḷ9\۽-Md}bɄs5!/RC>s{Buj.kI *ea(R6/gS.d\bk؍#!",E9c$,p/n 0U#d)|VZ,81)iEOR)2݌+zE7k֣/)VݝJsx ѐHҋҫsgR>(T9V *a+_o#5O;Z-f$܍FSyaYJڙz I(2 XnVɞt^{!YMqdbjAͯ- i#Ȋ\#իHQ P@1{CYg/ -e!*5cUx*s E.ar" Ic/ʜ^T(+qHo%pQh& MZ5eBN\*2$e "JPhLuFH }*pPY%b ]P>[A!4RTtg66.  G=V= FEŹ6mGOMHЬϹ-gˑFV= 3B0JFu|De!##y0 0*'C?b3c5Ùe1J Τ)1Ǹl,^RI6yoVxpBl8 ! Bib!") &0Y~"GlTbso#({,b5Ƅ ҍ'`l5B[lcHyS,,tbgTɢylyķ}JT!Fb.Kt~3&ʮp֦dJ\kղVMx^sΏˌE .9x]HT"-*֩giR(  ^:_ "9 (̒/VJlb@VJ$cO?-?3s-B8;Pζ-{d|%Ab,t=3ǵpzl L:j4? dX/R0խHFE!KXW\R#LKT#Z5s6̥Ѝ&"^nT%Zlj*ID"Xɬ N5}'RJ~Rtj񀌊$ICݙE #DC$329hp %Cu ޖ\ZbztD�Ky*z0Ǒ;ɝ3Osc |IЫY~ʭi6IʪòBc˓_sj"f>,X'B raTɖGɡ}chj*&pMl+mXP!eRruҊhAYCɄM*tJ *%(Vf]0[2ZyP5vtKНZP gr%(azk5,DKEy@@ܷe`/<RC5zp1/n vG-mŮ3{t!J`}ju ՞av7-X[/srp[ۃtdC~-T߼0el,73)VDgѲ$@,,;J* Ucаm}zG5֜UBWh1vK[kiUX 9ySUԳD0?QMfb65^.|#@/EH3;cdIAZ ̩Q&0jUro 2"%7-0;߳ jѭU[(H:<kQ +PX2;THXȊ)ZzzTU3a=d"0@3 {m{XSbϟ\ O`ERoX2x1$vBp$Gή[4T$Bp"i|voGSj·,Vu2h/=8h<&~uƌ^]yQ~͌}cWW,J (PdnS:ՂxF7ZZQ̕7)eFKݐB@Kdcd+ܮ,j2>:MvD<~+) 뵨\((ͱ[tUFA*&WumR\  Lul JYmxܴ{\TPU֪`ݧҜ@Eμ%&3]GZ=cDd_bV:Ԛ8?F.TcPpU3BuՃu)7z@%i繶Czգb:׻K|NSQ]\ 򭲌w%pg 2k*ȇQLKߓ>dˈ(gb 9ѳ\NT͖NJF&Y-8LDב? ^t'enWUkGlRp^jzYw:2ݭ5*w{%AF76.V{!լ8uܷnAlgEN3GP԰KՄVIpyyЅmT;FAAS U,U*͐@snъ~nʼn:D)|4,>(;, K[V=zT*Lf)&.,+5F.-j[3ߺGNlHadU9ō6A."fF!ZA,v盳hwh+&d%ICE&=B·+Ftى,MsU>L[:,n`:`{ /$Zma`-庩'U8T hnpuoXX u!7QD8dp X^/;S+_]NY]ep>o(^qԖ3moՀtV9[,I)g R6-Aܢ08طY#py(jZ8c dƸ&KV?Hp A!MWiP )\€''@ BoVG /d=R[|4BR FFYʸ!5^rHWB>퐪0&0vX 3O!&~W+,u>\#eJ1߈UX)+$^H/M&|4 U(`4s" j!mǚ-]U$~0X!Z'>U`*dQ *OvJКtV@⢫F 53R "KEg" ]RsGjY)$ Z*Xm⊬Q @E)n^)QЧ0t>cf)VcT4 j!PQĀň).ۈT(kQ6uψOe0 tâa @|FߔTlKL * tFKW倳խiў `Ր%քEQϙ~%[bofɷLloa+\5Յ#**f5 bJ:=]1=&;ȵ<kbfi ʣQٸ8`~ `QvTh8ĵ|4ļѸEytBZ! IJ=~hI贼, (c^ɔFp稃)KvBXB3leu2C.XU $#Y5 eb*Jϧ{\( Ce[QV|TB`V((^%pk^6cOXe[TF)̂QA'NJ6!;m{'vESE/+ *;]q0p'4Z=iZg+Zbb,I2(Ub;؟ kk>'2oF? OF6>}JhHMcuHŒ(.wcI̔cp:d(x ۆ^VzJN:DIN}7/{_׵8eltaje"EhwO0 xB@[\-J=1(ʏ$j4X k}@eIWjΣM~J"I8M>wR4HIfW箌^R@#eX&U? 0ҩsIo,V9ɇ+ڿ jL`,EpD y]S(ˆSZ3!ڃ,z!*]/;1$-ET-tM_F H=N_Q/$1Y Rʼ@8zq,`E!Axq':5G4eKGYx] (ֵ1 %~lϪ0Jp}\B|_wb( (f%HQF)~fΙ#]_ؖ$o\pKTYTuUTo*,_@\8(уD)tA~")u![ A3Ja`y/K $'ٍ\&ZNGQI@ E)5#2LRV9I1 J͒4*EMپu^*(XHN JBźs\?h jDJlHAR1vB~AxoER)۸dԻ^$\-0&^.-'bD @T<2 Rui|Nupt©,x@!/85*1p!R R~e-D2EbvGPC*cXUqD$fDX;nSf/;E:OdOOFYWRQ(PoH27q ]%<&⣥lu*hz96Ln# fcjٝ*I#eO~ѴIs-7 H/C*VԎX `Ph3,]w7q2gU,;Hh&Rpލ#Qc(Y)QL8펏Q {';^ԙxJ95zRAҫ ?F"A% .-7fI@&Dx`'QEC`> ! $QJ[^N13ټ1B&5/a0ǵzT?`<|) rB3,U蘅ƶU(c$ #h&IZˎ8A6!JG|MlcGRTMd\M$XQ$(T*1Rjr<.KTD-F]<Ó:lYY!Tʠ.%"Wn04kdŪ0abֶ@K*++tJ׈,5e VTOCE'&u@La0eLX]ZeA@5i@`Sm-/B?IG )*d'WvDE- Y8ZAeM,#P-)ʴʼר)lV6"K8y/;l ic(1pLAhC<"'Y'?8IK Yh?^w$E3%$ںPEdFXV `1<ͳgpgoJEw Ce{l]L?g~Kߨua XYaRK-tϛ=FymgbŕP;a_d:Xb]/&y4nZvaw/R[O ~ ,X@0`_lŀ0g՜yѠ2ǍZ1m!,!1AQaq 0@`Pp?!dQ'FFG':222?"r2?L2?U?( >P{{_>Q2u4̔9pnX;X7GIc{ 'r"Hy?orFd8?~ݟߌoy?ːBq(;rE4iSX[e߿|vw3?L-5;B?çA6"|y?d9U)a^ ?C5C~~Ɉ;>C&NIB=8}>7t^__ ?좠YԄRo%@Ula_ROC/}4H)~ df,$ M`(%K6 P!?`FΛ=7y~>'xz??w>{~3cO o)FV"}~ I|x쟜1 wǃ패GMcnO zij8ݳL *4g\E@ho,j/SJU/SDb;f kP{!,uF,$>c_7y:9X1B|\ W 2$!K_7qۙ'y>sœ<&J;1 Qf? ܘN %F @)Q)LJp=cA(SL;@=bBDh*nyFH} pP(*rz~φu/ix='8;yՆ'? &{Or47$nO6X]y|lSe?XvS[ qdֳR.mp${wo?9k8=o8>@ Ιp`Tme⾟/X<")7Y=[o{L+8~?S"m7"볬󾙼>=t9տc:~?t0!D/(,d)Sk@Ѩ]CBd3#ub5 SMpJdYrC ABXQݵ$!x0}R^?WGCxxO)B*nH_49?o D+#%TSE$Sdû@8wҴ.Lͦ4 (P7$K$<*aY[Z<\DDd@"jh"v"TGp\)][IvB-ƜuOV??O'^d3qb}Jj]龰;[߿xQ|.E ~Y%[8yY)ff>|x Y*tARكDɀD2HEd"ƌ 3,>G;g"0]}4lqSwd4EPۏ}αP2T;ks*$6w*zLڀE?44 y%kBQH2j  3{r64aO5@⑆Abn_a{/sAy3Eذ`%SGX:Vk8Y?(rCi<@@%Lv6ܖNaC& Nđ>C?1|hE= Z\yHEFW:M?3Ph)ԙ:A[W~1"_OQ>Voq1%μ{:dN-@# d`lHEx*yE{h$4`,H.=Sa?&G|_ӗ-E/Q{%\`i_}E 0u(4|l]JTun+@bVgv:AD%b}#PHX5$4GN1^ L9L]22,F4_;.QH%gcjfbV²Є  BSo@4Ua:K*Qvrx[8c2T!6 jĥbR+TME"B S'ջe `#@c G @k;,&bG#QCKgYQygM*~Q_m_/_~tRq&m EY'9a+*IJGnCCo'/_~;퐀*~y8:[U#rZ-YTwY8#Wy$Mp;,3,G/Z;OG-3M\M5&A IrE=U܃K /td lpVw]r;#>b 8'e*>D ݜBAIoOsNwWb3FTC KM\L@RPuDc*G)71(| mt":m^ELuEP`5.JM|GU󐆥pw%(a3 ݵ{fZyZtqGj֣EYK&0k|Z>|dz>;5LǭF>w=v뉛<?g= %M8edDUtD0sÃUN̹e`q%Ym\*%B,8@9=k,)֥{5\:oM *u^H×RToF0 &03ԗxT)J^r0d Q+#51.*FsP"'Qo~?l + 58dѲ@jϛG5UdF?ٜ$G2"EFz{'xs5ˍ;zh{^G{}Bg5k>\dj;9*>Dj@^)6퉾$#U1:„\5?~sW[22@8!fWo7>O(eE0;}52K=!XIb4F9v%+~xRaTD"&{Fյ >~;mq:g5ÔD*<5 HWD([#U/gġ{ hvqc AR 䇳R 4l=FmIF`WX0CeLkedY4 뛂k'3cxq:j9i<}5\?GE/8RZjg}Xq!H)Wk?\$̽> a!as }/W~8)K1j5D,.6)F=JSD<AѮ5B֭f| 2a"'.̉M}XNG.go0\fFI.H@l(hԜ*!Qb#9QH!2 H@ L m:Wſt+>Orsb76 tt1DY2F% *H Dd vR4[u˶^R+~coS >ؙ&f|qr'?}'Z؉3*N%:hleڇ&l}Q;9Fgu!'!W5GBxYjsW2':p0G7}y<_?9̈K=w滜MH9MKo"%I߿y/Y?8z'9-<7/G4 T?"6I߷%ur`+%ps>9!G"[OZ!_/TWj(P4b }E*^t]Ay|Y(#s뎿5j9W4ϓUsMfFɚZfؙa(#y0Suqrir;+6g0}?}qØ 鿟͇7ɉ) W>9A_˓d<\6%c|OOɥAa6yL& *5r#+P1?:ڎ|)_>1jcKKPG0tF 2afB -E Wqm xR5Jݎ;k;ɘ~#f_xas}Myٔzxyj=.*wV[zDw_Z̩wO}Cξ~i3;TtzgxȞ&X_qu씩Kz#!ZKU}<Wɼ5e_\j8Sۅ"m-MzUGE^p~l?-<~dc<n*RWxɺww:*/z~5D+ӿ'#d ƅzXfR6R=wdD# /qѯDaO-x=ͫqxWIքԁ |`W3;~^?}JAڒjd%(h֣;oм~q=sU4w'1Ҽ!8=]fˆfW^&+4<~Y>d& kE.g5(υ0|B 쁆;@s#1H2k`ȼr`Wªs=\r!6DcvsBf{S@--p8,m#̛cz/M Nf%sCau0HCOeLl([;|WevRDeN| $U-gyN>fg<+P&Y53fHœÞ*=菜^I0j#:Ի9" ߆/.#B_i**<;-ܣA@%=gyrŵό OK#Bvvt 8,bhe ɱB7"#+_X %%x᝹>§ <}?7} #a>xNUNM~ӏ y0t|ywrt{o}=/ {9ODHOG|/uls_ln$𧈽.2!u}"wq#wGqCӜPI,(n: Kbw>׷). MS?#>yN A&/ ]Ls$gHuHX&߈85 oevFk)c߼߁/+/Q*^^r}q?qzD@N'h|K?%?/xgukGЊ3ηJ*T Bjo:?lM?|s83DMzYXy:u󖉍] Fd g#r#yo_Z+U 3pw_<~u?~־/yK>8ςwIz07:w (+ ҹ @b7䳺vo~LC:]R|Eo*RU4j NK',q d Peg:0bP8a@ci3_YjJWHi\z^-M4ɲ;7%`>#ԇ3IȜI\ڷQ(nGG: ?` (q3UpPN>>HOamS+(;ҷ~4ckZ/~?Z!~eX/Ǘ؟E;;oy7ke+Q)-sr͵'$.FOƣy'op+4!w6|O n0]Ӟr-լ·E~Ѡ0T#z4olܟ?]t?lk?ίI`yq< ݽQ)UIm子~R: i-DRXQ Ѯ@LrΠ}Hjrukpe#-(7!il7&&2ÁS0f Y^Z0bY ǷG}AkrnGC gބd?0e@YL|`{mzgrOu)OWOHOևGn7$ZQe%ƛf4)˸UBb&`LF,7 *{|eZX!lf |9px. [u+eiz6K޺\ilߴ¤4Q$.0Ap"ޡsVx梈,]Pc/-:bd(4O]30+!ـTJ hqS9jHJ9RN6E_< a5>T|&lJ~--l6vq@ !~ t^zN7jf3]^"MF'|ß)mEv+s8vg>buY%~)_wkWrx:K_v:_3So}3?N(O YQkFސzF-0ZĻYLM,O鱊}aN[Ѯ ulɢRӏ'39HEz>6m#և$Pgq; 5jbi>Oa6FAsľ8uܴ=`fOL"C"|9c@L) 5f)`Tm*O̟ ۖܽ rKb6HeEߓl> H-+p(Uߵ;UJfGo0l ]n ܟ*08JX&њ2P{"a ։whH2GSS BgfBdG)9mPu{pk,$o({^c1 % r\!՟Rx+떠5bjY |M`3Q_ʘWnvid\EF [$@lɲGDPJ8tK,mf ꧰G8# )SS=%Pȝ ȱpnbn QhW ^L|qj84yݚ_]wW߿YK"uⓗfx?$OBLQf[o+REl\"/ .YG$AHO5#K8W].x0 Y $`ͼRF;ѓ:jjBJ6\u(//a# (3#d2laW:~a C7?o? ofob7%w4"`C$~τzo"-rst?t߮BB~}LswWy͠f'{ߋVi vzf_cQb5Y/rp0A:rM:KeՇ )sPIg_Ȁvr zt IY/әC4\˓npODQ(C#w0{Nx9GHr G"6!]ʃׇ"$笍S$6(TUqN} /& Í !vkzb=ZnןAM^nWoleU!j**XcQQ1ɘ"A".@nr>oiO_$ifp8 .JH#FrS$yVJPͶAF\_JB6ET[pK&xtMa^9p"RRj8 *<wT BlSjz$+>M~OXB[MT^=IW~>"@ -nü/PB%%4>lpBM$Ab!DK$4.ۄR%W>fD1?}z$u OBw&>xŶ{kQGS,{BM'Kk u,fjW,Yr %ܫDd#\g]LGl]Af ޸x 0L@ܱZ ʳiaЫc|мh,'Nֱ8ܞ9M?cHWArC\5x_=zw7 @T~ G=<9p"ID/=&Ȓrs'>R/]<ɼ,z1fxQS:hʰ.#CQ` W b !&i'}k6W꒿:Yۼ槊SD>-i'.s_-{_{[gdAevuv`拔SQ%n( [G t<EVpqa/vT"mebx'=Z8AXHhAޚ87Hv#Υ5\E~fF>O5.$Fj'֧ɪXGN'_@!;}Xw! N W7-/J d\< oQ?N{rO{^j7Rt'u}O-gZlJ%5q |ZDЭpu|[)E2Ҋ6_rT)yۛ|rG,,.a&t棙&kU,+ CwjrT d P,1D"&bN$~[mϟ$,:gԋiV3Qz%_C)0=G Ri-{8$:*<APםŽks7_`mo> 9vu0ۗ©P0veSivrAͷW:Uм8D7n1C bo8|@cmդnb>h䧑D8i1;^dx&FBђq! N8~rY:Rw:/W2)O'$: H AL\\]tVnʹM$HO2C$91SSn=Ed5b<$1ْޫF#dLu s&+| xnbɇO37ŹDⵠ9;w*[MNϊU6/oO/n) ˸U_FJ u%w0_$7\R7QEL:7{Ƨ*h=,ou%x[1 UO8)+5|U3$C ʔU9oPjx~uQI)[ yP Jp*Vd!I-i.M 1|nYt+m7騑YE*B* 1&.*l{ yү"K>hNn  'VП=^<(?aG#A|0jgfRJO|pF3jDsp4Aʾ_奟(X*RD)\x9!,< ʐr{S Hv" Na|BDشDKa*]}<a]WNz\w[L  қlt( 80|U'$x{F8R96nNJ3wΚ`+brA zS0=OdxOQly1iMpiWlw{4hMn# ..ƴ`;'gi ^^TU@14GI׈| 3*y+%)-F $!-3T"_J܉U;"E#i0؍Ig@. GLܶ#Ǽ@+ZξLߋBM!@CP$plZ%Y*tsU۴T;&6ν~HLDžx.u7I-K601?RA2el a^L_| $I)n%FR1s9cٞ:;O|B;б;&/ EҜpP?',yD9?O-aз4K(IB)1hKkFaϮD^$#|2~qog>+QW!:1D$v`(|,dj`W= dQtS,)V47v=#ⵝ#c|Ed;DTh<\SI;9gCgƒg{URrq>9q)#RZEׄϳ0_O.@S3 sZ Va}D$n4iQ$.prR*Ew#en^zeŷ yš:f D !]ػEojAF"x_՜x.' ?%,|DEԩH0g"ۼ*LJ*3R_1c`$ b y2 '0L T>c%Q4$ϠIr$"Ǿ|(gD}0 F'<)w)sO BHW/y&}LQ|g^]e)x5l-Oc$.%'ga ᄱ/i~Do'O^|{a'|`:yyfoH~3ow<3u! S rrg'bJ`O0)ux:|$q0>Kwd$"O~y$r:̚_*o)0ď^VVK{ ;d5Ow?W#c6[.$R-`{ z,HXMɯ{9ƨedin >9'FD6JXp==h|n$rEX'com9xZf?-P'ɘ|cƬc}G I#z&BWZ?Z6Ņ'Jwz~74|y<#>`W5oΞd|~;O7珮=ƿ? HCYq>V yy?g}/uJHpGFn_t?~8R -s8q O kpvcz:)3U(4663–smk; 0vDx06e+Ktǐ\ehDkG?E81aD\h95]&fwP|Y)& S_%6K>j|þ5H0%U~7&HANb3,=k>Cр<orc7OXӤenB6ōB%SNmpؖF[MeKn?B}/nN4X~V|j:ZxZ`[goI\Jd*C|FD$>L2aG L'<8)IDn)IxY@ϳz^-5E2Dn|x kq)옝I32o02GL8i+h=򕗴v5A&1pLKc m~PrnlHyUYxew;pK`w4&dNW`"bҵ|R|9*uRDDE8BJYldX_=Tߜ~ dRl7``[FM5EAWHJr!%@xHvoR02XnLm=̻La0^']cZCջ$E:$Jr 2$h`^14O#q ۓd֚u#}m'. Z$@@JyL{˴/;̵Q0wϥS 3>>#-%(R5֗ssV;L +2{l5!+C]&s )Zg-7:O f?' %f!˾8_:Bo/6Q(pѺ*x/×DJm]{O!>OX;&$pp+>D~W9l_9!oED\n gK=aYR|c*?Ow? xڡ_G\]dm!޻IR'״ybzGR~UyP?,vxHbq)N^Bܤx|Wee7 Lr#\AK x2z.2HU~7UGcd3㎾}Ux}bn+=ƮCB|~D j Lo8:0P &B$CHuY A7z|MEK* S??Nh|?1t>>WۄDTrdQE7aBXTDF* a+ 127;p zY"YRWMҼPeP^c̊8lAXJăɦ6Ebd4YxRIj,XbX8pLas QzSRGAGqD݈G[E*$++QMa{EEhOe`ÊP[{}qruo9v9.}XC_}/;Xi ++Ji~.B^&VIqo_G C: G0854[A6Ɂ 4YV&IvW((N킜PFKmLйN列J E5+œi0++4UvLpUѐ$#9S#t+Ҏ"2 \Am,k ~MN%TPeg#ھCϾ'̽Gjz/p/Cc 'jhOJEӆ\t4CS23t? oF2huG7 Nj' %d ~P>^dysBA1o'؟ K34ğ |,~ H +%B\LT=|P%sW1!m`\,ߎ(|Ukt*@H-ǗU=1/U$9tYr0O.H"VIHԅ58һ@SOS od Nοp'7ZW;-G7pZojfh*zLT\A(Z_];ފlXŹ"t R1햚N%꓉C`rP*K=ɘ 'VF$ Vl "Б7XGs(ս*V"&]*9 JAqsրqO;rϪ{p7OBl`=`K +,Wp$y#񈠒d\׫ʹ D&3.|d+ee|9:Y`47k x XL8BRxVb%&5G>P$AVҞ5;,5͆ FFs: @\~@D&p"SK -8LR ݄TӶ/}J\K3}s ]+4^1T:%{0q-tUĨ&R6b *O$NC$' tiqIO0y96'{g~:Ga(h99IZb,^9e#.J++Ϯ51)n܍WĂ@ }bX< S2gdtz%_+7nyW4xn"(^D0j A(AkuFS#o?OF=5 Lj_jEB"'oDN>mzdGE?ф8仏ǩGA$}E%ۊ{+t-7ʑ7L7c5$~C/f%zcQG&:w߳>S[>q;7sFqie"$'6p c4/w*/%aa4H”] J@yڞ¤+$d(B`%=s: @0vB:&IQp]v+d-?dԍG1bJ&.j_lHW94C^~n4fqi{aED$A ԑ#"A_NcA'LI!D|=Y:6G4ѰB:BRND@#i( *#w|/$fo];/K81xif: 'h,@x #|qVc\jy19K|'[DEfU]җG125qo JKH "M7bbLu!LGTY߂(|ؖX}7nE ,\CeIxn J,ӻYY8d"4rC$ 7!M|Udi (NY2ɤjҁZHID`QrHI?@װ/Gar,pnSd&ͤӢˮ :ءe`q%#hp/&-&5 D`A tLudd4^-=2{~(<+C(@{ $.8mПgbQ{2-u'rO3c뼮R N0,E=T^2q{ ~>vgn;2hYb#]F<\q̾ I/2=g}}zR&NIY)H(AyqIxz;FIh@sNLm1&;b%AjM)EӨDY 9/5')#jeWef<@H҉[xQ"T*IlI^5KB[%"IKHy3ځb, #c0?qT X @~BcUjloOpy~RQm MʛN$B)]6ڀf ]@ȔU!-Wq5}]T cPKs "cGz˅5!n̓x7tlcfQ,NNVjRNkS -c%ž^|alvpBZrDut7Kou$Cb1 G\$Bx§ri3zTޡ!l]fVJm I&ZTiȰ97lbs@IM $H.Qƨ d^@x-VF͠$;,!"%)1!<)!@EpO3q0YSRQT'3Dl"XYKJB4I;;fAG)v66~ZѮ:mdkq':/Q6,HX4܀sa.G'2X}<[G_Y0 v6wS53*R˚C;Yʅ,y@q?tC1Ϯg' zJNj5SED _oH)*Oh*m!&")G"EBl ԱH{|3in;[H#R MrgШL ySSWm14L"'Q|E:*dbXJN'6Ede*-s\ThHvhTK$!ݳP %AyBi-R%""P" 4 HPCT7x% j4nhδp5QAxQ6F !d: #l3H^]≒-T jD! 48ThʆD$&QºFDJt"Gyq^ NpqY1.ȀTmX0̈́,oLdiʪ$Ik-iQV4z6jnHI]椚b[nCl1fUAXƉDrL3l]lS( a5El %Y B LM:X DZnoQ jWJ_"& ibhkXn31P;anL e L\9L<Ke|N~>߼ Uxy eAGO҇Ncb3T'˧}Mq /p0%/R=\>/81@~񒫠O(RE5 ҹI/xG< eé_-<xXۆGY@,`%%,K!vhHv” oF푘"xEL&#A&#Վ+mehn8#=´bVSJ'PY KN hQ`MG"V#?+K@Dβx+Y " FGe'6D0XIzMhZL 40D<-ː^EL$l€\X;33!ESV ԴmEj p@B$$Rh#0|9`%LA%V3r@@{],@)S-@J&+ V0hJ*Zg0t@%.)#w)JaBD[DC´^lL*XEL%d}'UG}[-o#A >ԃO|1t̘Wu ?#(gO|}^!M U9Ix“k%u5y{G5;4dGC~PF,ĊC06H+@) TeaHK{KgJ\֘$A2 R0(5TЧ<7 mH/NRcLꆩA PXDB J ߌ-Bnc@0& G%N!IcE!Z"JPzmys`SR&UH),+$'S8d X<;ћp!ZFiCnhts@Qa2K&+cŪ 䐌EDŽ\narC$ITpo8V$䷹%ICcrrnt9%|0obe K&"ȉc}K3'NxQ;g2{P"ٓl[,͉Q>N̏HPô2R@Q=pwy]VP! 6ѩD1b/gOVAT Loj eZd.x!l'oParIXm@&,>^aMJ(W,3jpl ,W 0 "'Lr6>фDv@ġ(K'ds!+1$5n6Fzۼr^BVN݌ ğ51iYGQʣJo^$1!TKkhcդܰ ,'WE7yu"+kf$)@y" )q(\J~_-g9B?#? bb{۬qIʮ59 y |&LN8_׽CD+}yeX=}D~kXopf>\!y4g;Odna%98 %PI,p.D+'lDԄCcJ"2GITDI 1J<`JTcIU@ tKZRAY(Z2 BhJ:'qD< Q:m_AS4"hfD@BWp v ګ xW$9U(5 $HP˰-O;`lp ^="D3,mDpv\ x;KLXVIɰ})F<'%!XbohJ $-=1p)14ؤ(/R-vШ9s]xi0Hr-V 0p-Cv.\ Is(QR YiHi36q4Ha$=Ijj+8*ȍfQx m$iӳ{/*nRY|pf[!J\\)G3 ,J9$qy,o o# rW\FL'*Xo) (ڑ'R!$8҈$PmKDhֵt6dh9`?L*G ?~~ ߀8aU,6" \Pue*CFiω(A ^i vؼJP˫IXa.̆LI)D 6op%# QU-[iй $^9ަ9c'=@əww~e8#+ /NBUwǟ~?C/kb&C8D7=q*| ɾ ;f&Xp,3GHU2ES#< 8G8ҵh&pa00ɲ( ̓k1B F`6^b Xp@Sw>.@"YR_ ypo$d40=$Fr=56Е2'-Լ*62"[.(E}r! UElpٵfK)1T;م')L`ANypԉ8g0+奋" yDqxSڝwB0rrhmر/np< v!z)h X8 Xc] 4K6 q@I!1ٻ8+Figcl\b4t>L̒XEO0nTrx""q5+zo` <Q)2 f0tddH)[ +gCO)B"R bL) H-x G؁e룦 }D9( ]{X?,HʨFYY"R<Ӄ>aX $|P"wM(3#ŵҟ*AI@"Q"3s >TL.l d:D)@w NKQgy0b6N"5Ԓ .a  5 A$`6N~ 1o1LMU >OS!؏I>y 觠SKG[2~}G.|qzeTO,g缇RvQTt?:f aC*\ľͯMU=KK`ʶ5$@@,q K xQڊ͜p(G 504ĝ&]Oef9`䕁RlX"q0lAR(`Z\Qm*6dC )R`J=~;Ax>aq*i`X75QiIoL\ DZ"$&cHeHK%8 bo; b6a0 (4`ڭD@-û&JXhFy%;xd>Ç[ K0<M8(4HiSq8 2bFJޙZɃyD^FfLa f.g`gP- MJd9vtbFdH!hN"| *JCf?aM .,O`׻q= o{ ]$c3DZ^#bRI#*QbLSnD3 !kQ%A FM=c! Jf2hP -ijN -I@&=A.ADWi;&'VNHJٱhڶJQl jxR!|-KrTFۍ0B~ QC^޳=gG)?\QizضS~q ;$݂)}}xqHU $l ?W*jmwGҿ4[ ?(b)Qu-046@[D(e n`$39Y^DFX)[S*#a(zJS1gLdS.v l@K"vL*) l2(ҰF(K]9h9f S&0Lc,i%*b+$&<]E& *YYC 1J*S;X]p&#f̠4D3@^em& n %tD|$f0gB΃B4q4$dTF CT.O&&R[#(. *H"% }*dV\U29J2d RTIBDrxhCLMQ}7a'W{X/H4Pisi9 0-4"fu ]Ė=I,`]0( Bmm_ 5$Rl5TzsQL[3C @F)aRl4!@cX0|^NmD;mRewY)2Q,d'LqE3 =lR2k86S2fl@!A&pdIZWl]^ao<@@Q\<Ϩ ~J v`ٌwgƧ=Oϱ4g`TH0Y4🸐8P/㘼~_NKG} Z9mDj4v +un ^YSVrkB!$'n#mc9\jO3ծAId- AtDQ {J#lQk:XIKHZͬ0 M"j"Q&6ԑ1Bh[07%DFV^vය;Oj̟@hrDbPD0nԄJ R!'hB D.$*թԕ2UAB  ql$)2 ͒v)0j  40s]ng4Bx4_h#X&mL\)B*v@- !Ȃ_CFX;QbcQN-Pcg F ~?v~~8Տ2Z{D/k!-I OW1[\:C1K'ȑ. tKhƧ$H%譙jȫ2Sr$)&R'h/x 珡\LNI4--u@5b ȉ E#! [2n1DPӼH6 w7;SiU7%[vk I6fD1i, Fӵɦ|E z?c,$K+ұL1YWfOd",Q PC/N2MNrvTV {^~ZޚBHiSR%wq5"6dd=!Kx# n{Q+1&6Tj Ѫ+G lRI;}\9H$X-l&ZuÂ"0iEP8ʉ 5$ս Z&Hy%64(JI3$ȝ J첈n!9C,3Ȓg[1ʓ%T0'e638V5cjn(Z +Ŋu ̂M.ȜH V9*nT}`4)蕇߰t@7E`0T4c؁(1vLxaNy4/2= \sQIp&Dtј% &e E`bKrv2Z{'IOhn+O9&(!^YOV\UVCE=( (4ՍAch} )/Æ@5ńUGN/eU6'("AWb '%5X19 KZ`ȽL)@25ubk`0|-ᰓfXQBF8L O$'4]SS$Rc5vhNB5'z2a SHڂ-%a腶a"qʣmF47с$i*+Å@)RS1RApEmwfN*oSPp. ,30㶋Y!b024ӥ1B%# xPclbIټZR@y{; Q,b>nyY1\pe2;'锇sӳ_rcd'9?#=egcyF&zN;_P^x!>' O?J4<[yWL}I,?SFQ &.HL%r1I*a&)/á^Z=mڼRͱb>঳We{q͸2I%J 0쑪׀@DqEX&5 B> TuV@M&<4( A @:jD#C1T7aS;@ٚ @B od׳ѣm-3C#}b͔@Nb7Ȫ8@&,%Ъd<ÄSV=Q.bOpMp9T!pv U6l\dCjG"MB 4Aj 3 Z->2yfN @iTR D1Xr ,5xVյ.t%i+2Cncw’~@34l k (\a2^"7", 6]Q$I4xY*T bpNYmd 4s%`#00enbf\)A^lLZ+QH0(mE! iVK.a9ZS.?6PQ%y@B;LhJBT>w"WjT"Cr7d ( P(M(%/4C˦"ezlNC d޿9 GK(Rgߏ93|I\7S"o8>e.JGF-??ϯiV?ߌ7d~|bI@AC<;Ȟo-J)%$d/!/dO&F,m =NN: E(Q%8>v"&(ĩLI!B64<$y+*lV" ̅b (ÀG"%:脍-r6ǔP`H6pdexZCيτPJBA Hx9%* Z]A% L)H ؉F%$d'Q$O!3@쎐h)B p$I!"J&JJRZ\O%SImoNF,*Njpă h7&Nʪ q74"$M9!hpr!ti m)Q%Q ±#.o:ՠ;nX ld XX<8Zo ^6A扴Si= ]>GLK)!<7ܝء^yo^ߞKdI\0Ljv&#Zt!\\Dhv8S3DȀ\h*4ܢڜQMӄD* 1ˈR,`6$ptFY 6 ␮D/ t͡%F@ǤPӂCȘL㻢a"P,3I4M2t"̖'chY@fy0zg>h?(>q#9!XOl`/di93~\5} _dOWm@|!&~Ad1|"CKڎ<  E+V!4X"@j%V R"q2F +&⭚Dfrf@ C. A/b!j?, aefBQF/Q- cfNlZ"!$n|2ŕilGn! P&9E{ $NlA0n2 s SodȢ98*2E4:pkjVI)`Z*`-UFl[ D;9f ; h4O13%* BH5HJ8 . 1BR$y2 ݀5\ 0tIHL hQzJ 1*{|T'8 4a!Н t)N8, lK i_0;jOM ,Հ$$` KSD"}F1,bXc6Y3 lLԠ[vX @< Jy ױ0M%X4DMM4#3 F0x)js&`V1Ob`cn,ĉn$MT1'6_xY#0A$L 7MY8]!^`\1Ĩ˨ 4f(d ]PxuQ:oʑƁP Tzk+<3?c7I q`>o$C:{~۵;_~z Ib3#Ck Đo}2 R lB&xV!(I/VQCF W"6 ##AǸmѷ@*ʊΞ{]ĒX:BJ{.gTMq P*ɰZ'f.PP &boOpC`JBAM2s`In`Bwb E3YnT)SK5DCOy V\؊W&Yp#FxMDB$.5J4'=R o1QI,F%oa#ԆM*J/滖L" (h˃+HDC`Fvtv. HHnJn}lr@x#Y\'JNѝEe)#N[gMQ$$Zg:$H2Cj&eC@ô$LhDa ǴUOK )a/'B ) ۖVS44TI0b,4愝cTBfS[;,")4gc~AQK}'Npy~|dvW⾜>ϣ_[ǒY3frYr9F0ؠtōAUC(P+H#K0N2 ll˄4L'3rn,̌ c+\͘ P+!%E"I^"qȾSU֮ q4"42G5Tٜ -n$NGHbv t2"ʍ)InEqCkȅ7c63nHqIFҐ!Ab)aF`b$`Ec%xc Q$qO,)X $\ #q)h-Pb  aM@ 3bS&#x _r]‘-/i=4֔BoLQ&(BA/kRgV]SXpJ 9,A"nn6G*! l!e eRh TJF[N<{s)ii;+.0D%m7% \w@"PKɋ3{A3aPmY9Қ1šrX~&0d<H8 Hʤ5-P}'y)Y51pnОcx(DȇdZL W;#`'wwq))2B8&-m%V-0 $*צD0`9BkI\~p.> /l=O޽ޱ]̂=¬+ >y?uFo2?ߜJ! 1#hbCI DHnГU$R `Mn5f(n1i "Ή817R) ;,h! PR 1KbEf&Z'M%a"b.ldUR5\!DP 1!. W}%LAfT@YPh^QEEFqҙ@ @H7:2I9`-%dH-Z D6T;ũfPIщMKYvkku/zsC (=Աg(,!kHJnBóeT PE1!6D 'Zb/f= <k=DdHSX ΃4ZچBa UN|iIQ1DكeIK~C 0'1C20lEd tp["Oqd2%dLhAĥ ^&ښ&7p ̏4dBl%f)SCS MBFh88K$.XyCH uѻ>Zxal?;@-|{v淒OF @c<;ndP% fulrHFľ?Qe5e|m,䯧OO }YKQ~o*s.lj|~߽2WŖo+҇#j@)i0A(R^D AXcJ#a(FpՂN6|@d8quB T<|W6V%܈/SvF.;Qݤuam7!VTLJ$С!2XT r3$є~"bߝ&(naԡgH@v"$0HEL~a&TJʻNٖ-UMkBf0(2G(oe1ȰʓK6Kj AcK&D2;7BuSƙӣKRlΰؔT4@nDL|e`aR=dO}9.ko8{\]2a0_0Q߼9x/=`CZP1@E&?H#0Qʲ[s/=Q\</9`Ex:€/c7~f |(@Uv~ Xj5+o~rΓ#!`N3O1IgwayaÚ]q\1zy"H:pDQsn9E-:ٮ_Op(t._Ϸ!~ M'\,k| tI$I$I$I$I$I$I$ VQ%@ ^I$I$I$I$I$I$I$DI$I$I$I$I$I$I$I$P*>_I$I$I$I$I$I$I$I$HЖ$I$I$I$I$I$I$I$I DI$I$I$I$I$I$I$I$I 5I$I$I$I$I$I$I$I$I$I$H$I$I$I$I$I$I$I$I$I$I$I$I$I$$I$$I$I$I$I$Hץ$b$T(E$I$I$I Ap%JSol)(K2I$I$D&+4&@.д-Hkfz;9M$I%2í&,"!= u\$g $l| zS cft{: 7I$rzd~e:m* z@rm{6qVXȓc02E-kdo#4'ёX?_o %D4\'"™>+? Ĉ[ K7B!N%)KdIRediWJ_*`@{CX>\ 2?B5hL!c!G*j*E:x/;XWe@W(;d5,F{N|K]ysTchlH~ilz$<ʃgL^g,/^v: 8 |BfY$RֱO1Z,t2WdVF_wƜuopO9C IVYLꙟ̰Y/̋*MUnz `X&cF4h7xHHKFV[˹,lT@?'N?tMR Šs<*Z>RZFx\2 ,uC(yhIs 7x˿jsW 9ϑGM#Ơ^ ]`2Ub!JDy=!zLHŐv<^d:L}|鈳&Yv)3qI`ӸUť۵Oiy6(3.gY kH`Oo)JrY3~ e.%dz4}ވe{nBDE53_~EBP[m^*PE6nUsهjy#]p[ThE=jCCN nNYdh.)[B7p%;V5YZZhr;6#C~ҥ3fQjfv v+bfǫ.^yRѵ" *irSQ>1.*fT8,N< 2/Bm笸;)6AlbB&W<i#+!1AQaq 0@P`?W/) ^>c~D ,K,,,SB dYAl,@ rO/_DW_ g/7Csm5f]O$XH6@lw2U\7;)SC߅.Ch% =qaԯs,> J&2'w`l{?hp^-s6EW5$g?0ݿ킿)]?/9Oa~[H9)EmOt8epsϴf?j9i>~='!x?aNOK `Jst,[X# KLYi-+ZTve r@f߂'>]CʙVKO,?"q/o䬜C3>y[ca 3ԇط#W'D|X ^vЗo?ͳ)a^IXO}ͅac%[p?V?Gq 733m~Hteϝ{1<ſ0 B^HpVі}.oh} )1^&2Yз*.oy)6e酿~ma{'\%ܯ?}d קB}cY8R-GG,_:Dײ>jdG#\K()Bm ,M3w{dǬoAݳ'r@6f21;p4"4n ~QodN}l'`82 v11\xdJ`<Lr$zH8]0YZὀ=e4r7c~0L1{K|W_V^W1q(dbl?Vf#n]meg#Uϫw叻/dOlkdS fks?pI8Y͍i˻p~~3<WK8<$[|7E}38cˢ]rzglO _~~Au y@rMnl3'6=il8!0!Kal3;ODidj~yo!2y}2'tNb J|e/eL-|͜A|nf,ds3X7۩` lM=Z1y#2d 2}gÖvKݱ $>J6$A&Bc9{ǫ_ a6;"9`q8e||'C$!A{*QgԞ%])޲/ܦ{"8Cn.~,߁vbɰ~Z2X}vYplA7߁V .s`cC]%dn]u%' 8Xyľ2a$I8ee@ u$A`:-”D0 $$f6[cRY6og6q0a\K|u2eeIwe2Cİ۲G?m"/ԧv(Ƿ5< ɀngN} rPgm2aƾ_c'09:ͿǒYyw|0إ~]s.[~,K#oH'^KDDCǜ n 8aZ{=cxX1Hl<%@ | j–\,=R^Ð`[s!'{9=%\dL0y`Ѷ҃KKDgp[a>coPi<܍D>KrIux%H }7E5l$, sᄅ}ϣqo:GkÓj!c˞_f^ϒ/!ϋfX$r{D& 䓷 IsI N[cua 6#=^.ab3/ Vgz؜8Fٿ,}IM y>)0-؃/I,6v 0&w~/̿l-[gٌ!g0&Vnayo!zL]OTk$ۮ=-W%nB] io䌹a2/, 糵w8V^^v7b'ߩX &l)={JcɍXI]D8Lb3d(9sY 'k嶛a6W۲vo.yb-ul_)۹_PANR]w{ yxFu, >?Dr ~؍w`޻䵤ܥ^AwlZN <~X)CWœʗ1u." ,e,!Gٟel/m< |lmբ 0d\qԼ-OCvdػɗvɶ#`1970E`|7p`߸s؈ݣi<%7F|+_Ϲ7[;>c M厃zHƣ .6#&8@%irZeպ˜AGa&Uf.lӱ̝I>1{wۣ?,al|Y<"~l.'ˆ]K䍼r1썔9$󐥍٫&MHX^k;ozXij=sZpFNyXvY!b1ML?S啻RW:Gwv+I_ co/9lE,8G,V6z~;f 8ޤfHky}a^ZI3db:-PI2A:ޏKxEk`I{6C% #:{zwBd&l6 􈘝],O!zys l1Bg' g͑vDr <85zL ':ȉls(rsMq=,}&V|wAG%lّeF%} 4 9q&BOd/8Fn?ͷ>uo/<[~) {.߉m6eF.ey % ~Y={=l&/S|,;~Cx˦/?.G&WIzd 4r \O}!h3e<`Ϗodsal0Kk lfF#.veN}#/V[32˗̜r'KAfhY<#$1 ߍ!xo9Q$ Cԓ@SO ip.0_At63rܷ-vG6 ܇ ـ鬅M ѥ,DL'2 vzi{9K'$; '\J6e7h墱>㌺ȾDYߊ萲!\2G+@JCXNrKא]RO9Ph~.c\R`N7\=Bz|De- '@v3 #}vEDǬ#d߹2v +DX䄡SN@^kY20gl-˘m>0x,nF[^KOO3.cL#0XCv޶鍞 -_K .F!md-;@:%XW3#Ifx%\nfl `*?w<ԳR!MIxH5Qz7ĝ7 W d@+>1u}c.r}Ё~ 7t4q䮐#dLs''k|BG}%6`_7^ƭf:ye k[==> trL^7=w48JN?`ÿ荌B+hә Mv1&iN2NգBu])L`d,,r mf[;pJ`mxYS~XQ_"⚻&5,xlo$WԹ$Ix]ZspxΌ MclmLN7?Rtw%d?^0O`a>˽}0rK>,-'ynF.Oh0mdGEۮC]? ~ &agKk?69_B  l=.\?x-;{n?K-9Lx3 %x<`v ;%Ce / Fp@ `6 HLc#%3-4I2BG }p =_dI<̽?-g>"rʰ=,w{cr?r'anDOvټ2u@շg/L[;dבk 3H,zŃ13̵%&xZ쓒@u_Y1 8'GM2R%/d)nl r==!]\2HĻc> mjģϕA<5w˪h/mɧ,pMn?]`6:NgvňT L p_|puf/R!DXpb~7xɟ ~EL{`ݻ #nWF<z.}AJm,mi܄%|p? {_[3ϸGm !`#$I~6X%τ;X˳{(!m+Kg;H.k dy]8~r< L #;k#Ġ^H@f.6Fqs7Ne%쵽9#-m l0Xrȥp7Ď]`i=6!՜ X~/::9`H,q<,<=y=x.g=fR!+l4Kyz7Yn4`\cavㅰ,o!\dƾ2cVKhHN!pN|mmt@vӟVHxyi/ >޶ڵm [=$|͆/,UPa W݋P'mAI.X^k9"=;hpXwH)}Q 2u6OS p]q,#xc;zYhp~#[o XXg<2=d KH+_a%߇%cwtL3a~63mMg-Ė{2`-??l'`wkrw[\bf]Hz6O͘mvHkZ4l`9>!rͰXO8CĨl3_5Nd<{=!='T!hȋ~r,K?c s8a7OO6,2۶ļ|G}c=||kq~-l lO]@I^A0{q,-N?adB uN[n64'>7C6ϻ~:cY%ugƦLMYteu ;-C~QI|;!c.x.t{#3Yo'a. ޜ14#";?e5 .?rN^ĉ:E&y[\Eo+Y?KBd^bCgbwDd_e^ [6[,cR[Iuߨ1-^YCM/ ޙ_Be`ӳmКr8uYu簗:fvsKĸ=l,Gɶcm6Lf].eL>BL \y*r<cLLGHOW'&+>;ks- I }ZNq~^ٚA?MÞYd ˴%4cd34͆M8%Dyj 6ڌEdrm  >`mdVGѸ%υoStbJ)RF62=º,Wm.ÜZ{;aav5K{rTꯃ> m/ԑ&sϔaLd:y?'_ni_߶}<$m9tGkd=TSAxk,lu5+l> o3_cۙsn 6 LŷaoY$ !~m 'IΟ {$d o}D!U-vBMB1[Fs%zD6>2l' K|}[m"ԻdznZm^߹ q>gN o ۏe_.},HGY#Y Jc I!M<9D mgXOc3g3!?tZK]ؽ\8m{z叢E" ,Ll?on;r F4bcvb{q"9 vBXw4{ b0Sqox&1fC}9= C龾>Gț1}x}x`!χ<_ ?ᾉ?|Kzwz>o_&<,!1AQaq 0@`Pp?Q*TT*Q0K?zr˗ /K/\1+ۗ_+/_]4xQS:wȉiPf[r,E &"+⣏L1-ܧ,sh VT0&aJ3q~}4`SbbrEP1q%A.> G҄^5DL&\B+ cwgYcWq~b&A|5DwPDm $10\rd%P:1(b,B{:2fQKf3fN~"ZH\\ Y,9(^v-I-bfgqf(:eƏ_0VԨ63 ͓09Y2 2bT%\\D;Y,sF8%5<7 | EC?jZ3IqdF;_ D JXF%%MhͣL&?8BUCq,3 ܯeeSGFBf:bšaB(\YY!9DM }ak"fBc$%%ejPo]U UD1 Q"XMeKeEăPN?s|AlP&Eu/Ie_yK:en1Wq ~T1\Yɇh Z@&b.Apظ;bZf D3OC)6ť+jk\BwmWpX9+a 2.J,HlaΡ"?q0̳-Z1@NgpAi)Vʆ s J>ьm6-UTa)T+rNBQkD#C+ iW$&#hLs/QZhALԖrIH7Pn?Duq*1c%P@ V[2J]L@x,̘wph騌jRG=B S- aIXChCPF+$a13ziu蔭Sc[`%/eFl%<SeD,\+9Dm "X_1/G#8,*n ˖aXueI~u,|]+x\E|Н@vFM12.sRxA(t[FWZ:pwFôp AJmi *#׮Z*s#,Y1hF=P9A3wjf}`&xu/fB7Ti9KVGFr TUsԯ\/Mj_O)iB# s+RAԡ^"K,G 66)dU0AK a57( T:(1,P;c@{>3̈/ N bZRZ6 )EW*JQT]xCAb0kn.bޠS7-Y7Q)8+Yn )bh!@ ˍE%^b^"5s*Q*☬@*p H.bz,! n Q g`ʻG$pnY^nXe쑮؞&E"wwA @(KAY1\tLV!sEs)AĶx@-&k0 3 8 M˜ WӃ+b73(QM^*.%-1[X6NXTj̼ṛ+iyn\Q :a %tpK q-2[X1nEUC ;ĀzeL .TRqS @I dc4tPF#KQ'oZ8E[lbU."U .T^jbUG g-P[N+(+7 dheF(`B z,˨}haWv]bfl#KBflFHƢ!Q852.8]rEEY{IYL30-+:jJJVL5[#zX#nm@O[,jP#LaCxbuQ# sЛNOۗq-5(8@`EZ03&OHW`3&1(p 85w'p] 0֗xK&ۘH2mQHR,Q/o-t+GzBUe m<*.r<@o;#f^[+ybTx|̀f9Kag9g,*mԈڢx5EA9v=~ґhë  Mjt;b8FU5PQ茭ac%ELGUļ,Nh(QK$]QZ4*YflsR⛇6, b}0Gґ1.]ex=.!:1V .R=zDA7[(48j ab[,d~z1)o6qןz8qOjh)P_ӹQS@7ϹnaY,! K c:BcD61JP ^(ZaPKu)1A1p,!1me,70Δ]Zb V~#d].{8K{yA 0EB5Eey(E<5^"[]%K<#qQ/b"itL.%*jfS550]"3K(25EcGS GذEUX:% zMQ8 *aψb `}², cp:yHc(2~ImKtpcixLZϷ!q4[x/0%q9 RW+Jh=Am0!ltD3R~wK%naBV0)R+muV[oQzq}^tگ.# yDR&12jS.\WbaP]B!962b( 0䠡F45D7pP w(TDS!p"6)gF1ֆ:|f=H /6SQqw4 ^%5ݽFbfx">{B%sX^(0xB="]5OXξy~} ,3!cgΠ?2 m`ݟ{;V fTB@xYNd|P︰#*J`V%L_(tŭCLfgqڳOdk KqP,Nk`w\y[ֈŚ)# fq3yfc~^o2OOo\vPX-#1[Z#)@xbg`(TƲ%H2leewq)\T#;Bho^F6~߼L`~|flM`/bW8yo0k(5ewƢ7qϴh5&1Ī\5zYg' w1d!B^/Lkr##=exnK#O'Gj"6DtL+qWFfk8yl1̹ )VUXNx :GD~K#1'7so*e"$hΥ1ufbH= f s-{v<[T8owC}yc:z@XQ{}:OTiq5\ a"o1\.+jbIؔ.3C[솚pNK/}D̸EaɅlP]a|[:TX{:,7OϤUn?YkKrU3O9瘦F)ܮBF,tU@Fy":w<Úpp{t!.Q8V{ g4xk4wŅdǟtG' (_V?Y^јMt1۪\)=7 !KQ#ب~bj2& T$rZs~LRy+LKuYBy^҇ KskxqPڔdNEaR/0xRʖic;g\812|%`Ym)efT$.0]22D ~ddzYU* /$pw~yD m uޱo;CSc m]t6j>|ĺq^.]Od,Jau濟y7>?'u\(g+M/^7M=yg߯<1^c;(q#8*3KwWb30RVTᨨ<f0ybVyP߯oN~AՊ 5)OJz oTtFf|mw37CLuoQY-tL`$+j%ܸ2踘f>Q77Zz˅ pօBXfgİ "J q1T`.Sߴ%G)h4G)S Ahb]GI70wyK! kF -M>7O<;6`Rc 4n,۶YV6B:UR};@[VW,<"4`'76VWLkxV|DQqS(S,ZMRxG\ L B vGE3,m_N0cTuc{vy3@hyes ׏l_X.cSVŃ{7Mbg<+WB; A7jޮWTK-WuZƫ%8|wjk_oorн9k kp\CD_a:/x#wq]"YxATn0upwUH>H)2j"Dcu fݲyP6UP2>h$LןhK5Ei8Mb8#D,-}3K>#H|KQ, ɞ}8Tbg O3v*y[o]0 Oonj%m~;k/{^?<` ׎MYOx[yu0f[$2f~|q^Á)Ik|ƪRY:òHao; ,8)2_jT y:io/OOnݻfUúY l]KZ/ï9r oW7K~~uW3 ׻s |>*ϽXU^cZi߾|<z{kHɌ%-epJlCU1W.ê^%K͑-VQ|Tp-je9Z8ߗ_O6]t ^yTݭ=wme(x1aJ/KKs·x1RytSDZ_O+u =R gh+R}{xMFW-M늿1S4˻=x"tMa\+Ǜh"|^U7C<4 s_?8S8QS+M?11pPkTbC~t$L3@`%-YǶx!f"aǶSP*Q) hrֳ*%"#0Hj2%Č4j j+.6!yQ^}9oL kUy 75XӜWs9!=cnavxFkfy|Njlx{m%T钟!U5]s_-:0x*!o?p\xykq8k3Sg AO^2sPΕl+*drgp 3DXV.^+=:ԧY^HQ)0, _3%5-5ChF05ۈ)Tt;秴W[P7)tf۬4EN޻ܥD7,/}h<_7:Gmsd *( X]RF D Y5HS~ږEYLygˆ|=b8ez?|B0{ OL0::5 <eB"54fSQpv_ODwiGS{KaG=^wŎWx5?2ߡo|oO/F;3A}xy==!eyɸ)d%w{ߌ%GOܿP'3,qQ8 e(%B z-qh5>=k arӤxg^ktw…ٻ\h!cPjn0X7T 0 afVf*p mT ~x웈~*"PlD=!ˌX5\urѕ kUr@Βyrሥ; h\zм!M0f4ߝNԺ/WyO;+@d \~_}iK'p w9ߔ#WBQK(L邈k XW(~f=#럓FG*ߕ+ ?hڹ\̅`8מw|ON>Ђj-,*eqV>@˯LRQ.԰;Jq]88mSs0ϿQ5rC2F?>gn%@bЂ7>y۔qsb|-[T\rH@Q_XUwd߿2 f &}S1~13ƒuǟʸ `}0C #/1d҂gJ GyPZj\ѹbY.0抣7ĥ%c,v*޺$W%ӭMG# !:cTD`<- D/T ȜtD@+={8 IxJ@ neh֓썲DpA.Yū%_A`fhb$bqs<jPN^<#k{u2oK}!zWqU1`s( AJUu13}?ٰ-O{AKOcPHp %s)A,oƍ޻Lb)zyu!w\J[xAf7 2EpYgUR o R\D?|/+3Ia9(1q. [wA%`13T>fq$Kk{ 5(zR/~:7o53 9+U0R?1@-5=#lgxEL"}g}媻e벾eWS$kk˂N^3G^l)^ AkyXOm\5<7=*5 148ly8cB4nt~Q_ԣH*sY3 X PsSsa71ᬪ:asD2p]O?^ r8=0ރ;`eWoh%-5!B'Q :Gܸ<֣E;|#;U:@1\Tn pN f߽;G.2Q|A |43o`8zń߬Sμ~2@y1!<0w @oOE?/x"+}QJ} P. qE031f0fT%_CPG_\F_\IՌ|@@0.?Q%ȇk[kYq̻r؆XY̲31\K.dB5Bd?f<+⢜]RPzw yD̡2ź5.UZ긔x%$Lez)PApJ.2t.Z"Ć!!ܭex}<P3, ;B*\Ra2f @~}1_; Gܯ.f{,.hX08`Wp1^gPТTr@Hd/K%EGcD, >|e pCpvsH7}sr0q-s)U.j\[m*TcԔQ*g81kǞfid<3}-sxcqh\se0΢qfJQX~e 9SZ<|yq^pkrFO _'-z.k~ מ"z  X*K*;ԵG8vk/o,"{@RRaj`Öd:\U& !1G_Pqpoc4xi6D~>*}Dupc!WIϣcm섍|ƢwBg~7@SQ0 ]?Q̜c}ߖE/)WI? OV/+!1AQaq 0@`P?^_?ў;͝~x}7+3O@Zp8?ppbBG=~y}ތ~o]g[?ɣuݕ*4},҇믇/0w7Ϭolvv߭a$_v=qc쨙GO.޿TM!~-v +S$$bCGe){k=Rdwhu|O]fgS><{Q#$ Tu ~ :U%)j;IHZv\ʺ(^hh :GH4 $=_q;B"p@-1 ${ZA#2cR$RP"Tѷ*arX"RREnrDwZ,t$@WH&U]Y؋YT r>DTʘ<ױAȟ3"BIcsn&6pFSvҭ&}6#TB:gK7;֢~y/BF:yAH PJgt9򤐭(UP[ϣj+_\AV)0KݢڻJShs T"x.yrDS^!igX$֡77us~3tqOڿs?R{BӮw3sovur\S3tq;;?_s:I:/%[-Ga,`[ 0@1 (%Ql5B梧[_lNтvC?ءƢs/˝ T5>Ҕ Ⱦܪjנ . nפ Ogst-(8 O] QN <#Hҥ藈8, !@'GM!0fU7D 6r  Ɔ4OZ â5#96zւJ09t=AU*g`w%UMZ><$Tc_ R?ݜ^şijóm~R l?\r)Je?!C\f/B!T^T ~b3$ѯ:r]_3_ێ0{ 8:5FZW#̇W8i,.s?/|*It_;tF9y^1E\̋RFCtM IrƠe3`-t(H%(( Uޝ)MO>(aƲW#D5@KrNQ|DL˽CyFCŢZbn=?uNLݣǺI{J ]]d9AM)*>pcL`fQ}xrfg*R ޭnȌD)| ?g(t(jt=BMLˑe@HL gXޔHhV8'i$,[w+YQHF2J(5dWBWB yдi㳉B3MO=7rHE[tIp+!^ W'O E5sHbEZl߱!㷸5\-Pu#nS{bu׶>*]͓0K ()P䩦 @&3Ӹc3hQ"zhxd-o,֚FٝokG#MϜ0Fh,?tόq;"V;{+)7}E)y:՞umAܢq:O^Gjbz@wrc_l/Κ'@x_kR~|׿/ώ]?ұD?rvf_ 'C^*aSP  B PXТH^@С?2e0^ @kR{^t?eg6?熭o6RD~UjdӳPi8͗MV ƼL J{cogВp336.)?dЧ+Hۮw<|V7+8~~9anKo,+B=@R)dpt|BJjʬd :{-_Hmgp(^G6̙ J'\ϸu"IT@}=|hhZB :tsƢ C2ח4~l ¢Z`yb*)arޣzccV/@¥@ r\a)oi"' :Ǵsm~7tL: ~Qs'yIJW] ?=7I0 ~C`'׹P|`-/֝HZ40ˎPMUF8euB@Y Έn=!رH8ӊ2#. h \TK$kD)TѮء*/PSҔ24ɘ# B&f팠+QŽNDAɊ QTqJvKF}`jvQ^PPM1h|7äHYl Q^40'P*8 V(:|T:V!g 6XHS GtAf3}4NJKPmJ v)kgyI\Cs1zȽ"AP⋉bfB4GR0BbA ĉS=@&.w+^=JZYi&??Sp\Hoo=SރHHWbԩ;R:Ox3Guuϟ>>O%srZ$+grr2;Z ZK$:XXҶAÜL̩$;a= "tT`sQ(<\)jQ"jVĐaE\SAQU@q)Fz Pt(K8=Ԧ@7K` `j )ߖqP$ *@d1(`gfaq x$$F50QIM3![ϳ V/P;HzJMjtz2*4N!<7ED4ت cQDS"I7] pWeCf44iA Jf ȧQS+|n}d!ј;|!t!5'>[@:1*Ņؠ>Z;0\XaX_.9eKMPxe!,2q"] :ǁASvh= v+9$0L"KTLI':1,ЂKR:u#"^hSX+ Lp (0EA#5c*.=+ͥ"krfNhՕbnO"b&Lo~LhӘ}W(Dx4¯*ډƱ򉢠` p!s80yrPu`|eEM'D앀3!"$r>f;IzHQD޴Hl}U$tiq >`}dcz9t t^6;,`&Dħ,5UMfd|s{EM/L!UO$EH8#0D< ӝdU ("԰5@/lUY@# : A(cB P T=zICM*;{PhJWপH W4BР a@M\ ؗR)bB (%C`, ˦PR9G5Rb`BS{[bŽHoAv fv_y}x(Ey0 "EN-@,4)"sD, R I'Z1t2C-ic婰AF':[Tʿ*H0`id6*"f;0 hx}OX1N" 4 ~Ώ(KJ}YL!!Z0Z)q +T0BZ%kNl8sdE*uUڴY4v9'GQ R Kp{,HE; K6uۏPkT I$Q<-GSG Gi#0* 9D *(<:݈a*XatA!l4hPXNtEpJqZ4Qm8b%Q@V  3`V@0+4jRCUxLnRMS#Նec@6C D6v4c*CըQ5C@Av{)j n5t&dĀe#Ze `+X`["Lc F8 ^Y ̶`;C!gt#RoH#J@AlIE *H۶hQYJ]zm#P#S(kŁp61.P+4EH 1N+ hå8tZҘ1ƘӘ%;-1G$&N9"Gb)  aލRڋ (dF4 :phQ'ЫPX6UQ;*1hX D58 `;bJT@GQE:hR@ a%drH%w%X`wqt"X\,E[c,bFmA>]8FTPz½1MʛJ(3AbPѢA`o*¤Pc׌' J,BpQ$S[(TzraipAJk@ŝYtv; j奔 摝"â:;o|`{'1?)XvP!~.DנU0lԅ*+z]{@~~_S> xjjA苈no]:b+8]JTX`#5#X"(jݸbaVaJE D+% k=I'A*P">J”J`8Jkb 2sg26v1Y %9Ґ(9V#VBU-q'XB\ 3$Lk,ZbP"Ђr`ZI~FAt ިTxN .< cF1nʨuPYxE-^t;|F@_(s؄؟]--b,ooe.~1e;e4ǯp1F  :g~GD,4 ht0qQMZMx( āT-*cH,"")mb tJEE+"аA5@Iwɮ" G=W 1/uH  AqCw@ el%Ab|x*"_RhU]xe[X*BhPݬlu$x訪I_ ^`E5 إ_%Gh_*`"lI n!Tm Rd,"7/@ h$SE0!Z`Pz4 @pov!R:\j$SΆ(VX~$ =yc7kOqa+હxko#R(%p%" hZ4ڂȸ'P ONI@xV oNe ЈAæ,.bPPzT+[wIKh1 AW@4aԈ- T- 0$ Ć (/E(`NA"&PU]LǢv]DW@@A3i-, 1Qj4bQYP2:vow8.*| dž(0@cK q+:4.yƪ=i » 耤`ULcZ);C`@BP vg.TڃMZ&D%JЈR8h= ٤-J԰Y"=D4!<{nQ;u`ThX* A-ȑ}$iWV)qfA*)#ՠ%Aoh~ZEDBܥ4@F(V<.0F 66`a{Q-gɂ" lghwg0X[Tm(QPT XJ4E?;JW$Gh1ZYXC . 'y1[˫:la@%E䄴f_4U4z9 U (RE [ j~6BW”ip쪼,%!9I'tM%%[&D)z PҀsD "\9xS 8¢YǛ)" &ٱ}P, 45-*.@ hN/.@TzRR aM4 KHLEAA&Np, * @#(Q&Lze pR(W+C b<B Zb m" HDy2`0ST iBGvE*2lIHIh=zV1: Dn_[*R(+"p nP(4E|Q(D LanFU*18Ώ(9t ,Ćp;4 H#I ,:փkHET!EP@J@ ˳ !: Bаd% a Ѕ`*WLbֵCT?,;8*&PRcx90"O}p,`NKv./ƒ|86Οza۪GK i.8z*U"[!N#b1 'ki0A Q,b^d{TdDJ 0PWeI0f:wHzvPy}%RS}YzɃB17|$%IVT6CqL($uEqAqlˆЧt>`- UPuSצ6^REHP M' XF K',0f/.D;OC>N6vEAiGEDRFZj4jκxp-N=L`FFVQuyFX3!VQ i`Ӓvd.*Bt9KR>W/p F\H> oE@1Ti0[Qx!SFPƨe۸p2!lh]T)puPP[QmI Ga5B3cͨ88(٨U"i4GʼnV'Fpt 굁dF1pC>:@ MXt)JS.*&j5b]6#U4]G M'ީp~ %pv*JL1n) tFA%V)$Ej\xpTBD3@qJڡD-($ {$7 %Cj h} *)BK]*vDzP@VtIGba*pi 1Nh2JG<&pYEdpCt\ ̂@G+*{:DEw">F@AIX%I"Nu4U hJ@/YmHxV.Gt(ԨBM8:] Gtiд =pV.ε A@[!Srl"?$+6H0V>$:X`n P*x>Ab9+C VC4LoP={*IRDCJ W sSBűtpѬ?gʂGDL::]H$CʍxI E@ZhT4Q4E nA #t \ ( v`52D  qiLEKZ!I0DDK<ōA 6 뮻DPDZ4*9^`EqP!lJ9#"U6GĵϴXpZ h iP5S%R-)) #TaWwv|%\h߇̜ #m)ڼ]|tD"+)A̩ë_D 踎teW׃8΁44,voQ;Zti:"PD+I!N15 b`HJ{\kw֎*E!-bI޸)()Na@# Qaʓ뀨 t@c!**J/ ,L\JBPAVTH; (Vȅ%UT- P:T0H1GHY 5Uk`5q#oA!"OB Pv+@i@`UkhA6$P އA ӓlE^z -;5P_Y32CK)d(#ώ@WD u%&J_AC0ف`PPrm!+ qeZXjp0 ! EtA B' |Ap`"]9Q@W Dk  R  (G Ć (e>*-f(ޘMQ~BBE!P"xSDX$.`BtЅ F>{H₀|g lIVt(\,dC%Z^P/iV ljQYAP!AZrfHa3b!1Z@[wkD+[TjT)D@]HI $%GBA: z"v.4i V)J{UGA̸Z".(+LX &>a b|8^D({"QJ2x *蒋z5:M a=O*L1Qrb mEw&V cK+,H4څZuXDU/g,*6I|( !PQZ::n]xp|T"̉;!G,Za$}9Bw(D+[`@8K.։P, ])#јMiPa$xPvPxd;ujV:z| Ji:(Ul2@3c(!Sjl~I,VU!)|#~0A=kV=m! $D+ȱI 0^OhҁBa: Wt(@; 0A Ӫ1WPFbW oBEBeR(r1Y^( rf # ( ̸`-pt0 FcQL<0 B_Ee'xx4)b.1JΈPZ*"*/L,47 u<2=V #PQ,pr!1q 2B61#-H+l՝e@sL`c\JEB G.ѐi] S%^UxD%Aʈs@^Ђ7Q!!ctNY8$hJKL-bO8pƫKX F)H8ڒ \VX !-)V]K"(~A'.T N*.65' PhX"QTŠ Rq$=rvP؊j .\lĠ;Z0Dq^%5 We͉q  -L@ELQXaobbm nӂ9(!ed#BW2ᕹ% A@  )Uh@@5qSQHm3(s"DTHhEoejE D-VfV<]bUڂ\(" 4q,I uqDddJ%E׀'gɪh[Ke6 ikkJ-aJp*Ќ.ę$IhYA( [%`*50HkW"ZxBF IDrB,W$Da(VJ2 *(A f$1 =U!G؋D4`"%QeЉ~ EHqJ5Nh4T$n~&@r5!( T=` $阆y{r@V*b DB.M| xTJֺ'd? P0nVaE@JhQ؇bV[VÌ${>څ`7$%%Ō 7To\:Ҳ;j){(O(K҈R `JjS60HtH?i(hvB0P ŎYzvY Ut!@9 @4EO= CJ qm\,*A. 8j`!\<5 7jb I P>3!B`m!=.g0`eCFif Q+NGj ):9cKZZm@  0@$8A|\$`l/K@&f- u 谤STA '(TRHa0"*D'45"t/=5(L"(ɏ]"ml 8Y)YA+* ,n3: HmcңLZc F[b]PTzEרaD#E#\ĉ`5q1He; ICIp#%*A!=ȖZ6d"RgEYKaXVQ4< Bx A 4Rb̯,@ *JE%e3Q$žR:Td0AuBH[`B"B"F03"F,wZqC>h{@Ctl^t%6:O̘oq;K eBul]E$+Dr)dFӞQ(Z(ވ28g5d`K ccۃ!Dؒ:42i 0m$Tz]%T0"BH0X -:W$ A( CO@P 1(ЂEQk@$8&,8)dJ 0tjuӡ@O`AT @TC: 14JV< j^]CQeAh{7'HJ%`{0d@6Xx'Nu0wS 0ޞ Q.CZ~ l PQ B9}*h^/8Ђ \EbMPH!y ÂB҄K ̂8UiE0 PD:*8hPB5'r:Q Iy_J"4fh @e@!;Q!EKA$cx1~ĻK+5~`V+Lr7nV0 GQVGxljJ> D B@ 0 [$Eҥv7&e) QtAJZ$a%TrXB1`0ˍԩTxlU$JO 19 9eֻa\4B<+"H*&{TDBJ䑢H~ֈAC#eZO$QzRZ+R6 .B@F"EJ|[!%t`8M4 ȟCǀ}fBDZ!Q@!`(Y:XX^I((  & @$@/}+h+ .f(%(A!, F^E Qq HiL H{=) :`94N s ԔPUŒ<a{~lGM;4hUZ!3 u!,UB (8,{RAфp%`*(""tVU`dVJwPTpX̪c@,R. K 8#Mbt~ ;ҙpQ{@%lr:$iHt{= ep*j)@nDp]{I-Aߌa^( XXC+M*.H^W"-oW0(nڠF$uj@%qCU`@K[&F`Q0 v0؁N)UAj"ð'1-jE7ta!]¦Xj0Zyd05bNH SKKP5@ x4z)B(SaAD5+z\Q~ X* |UTG тbÄ^7BP4M"FK.YPy;I`UHz@ ̌ JEu$x\2Ūn@C58 hXEXxXQ_Gୂ]ixv#+ft 4'I"UCTv`_&fCm * e;`"~$IrBF˦%s xT^ ȪV!0U&B;8HUlvJ MnV ]|n X14lr&q)@bV3JÈpZPmWE^ҴۢgӴBb@t52dtRƁE?HJvԍp5d¨3A0x:NBA"qTe74CEuD* +HX0H#^EehU$+DpvWg0RZ:!B8_2ڣH.,܁ qXh6 CX"]] FBiN]Kb;8bP`' (-,L` ) LIm P 2DbDP Of>@C'k`qJHPDkiv=$LSs- z 0*NqYӚ0 A+X{w@N!7(kk$8mi* %N@w`6X$2`@|xm$ (PeGaz-=NɪaKۃb\8.AкEpbqjk AzzauX ` 5ݕ9k` Q:5@' i@+^l:ƂQDVx HnńUJNBI \캚A Q@(?!i`KE*ZZ CJ eR }* x("FTYfI'v!@u ?U=&) 40:?&KG$NP Ԃ{s8Eq{1zJHA`kk"-cX<rYQDRI2EEصTyDH b9 F% BZ)fDUgEp$ U)ƜfE,G 4B58& ,4 )A 8ʓPi.j9 ) rZ"(h!AZ&â8H”zB!bCr* -q+RzF`B)' xB&l&c$0cz\ "$K(]IDQD6u1(I]"@ 1Q)BQfç1́"ۛ*="C )Ad CӁ򶮕D0iy.NH;;FU2ba^Òx9ocR E (EvB1D#(.o IqdJŪP(d5$D@KJfI b $‡EBdE%KgA'fBzU(BAn2 Aa)#R;T{{]WTWLuNvKXAD0#g+6 @ 4{XpWGJe) 8HHEv6o(L^(dcP=2°s-)jd݀pb!aeBN^bI蕣W1"E‡dSDPs t 4RJt@TEԂBmYM +D`2bU$BU+T;RKWl6BJJ/Kc었CB@lp2*,DC^: Ѐ)4h8mPd :WуvY,`=EmǻN2GS}.dB0 ^L{Е"ݭa r.Fb|-!l&U#Y 7D5ø7/&.ʨe. pnxyG)SB  &CaFT\*.S`&̀y e 2R^5`u𺂥w(V UB*K)B Q_P`TyxV*B18R{)IwDGF(ۘ 'B^` ٴp)xoJAkMtcPAF!98 C[ aziHlxt5L(P[H *(A Ed(GQÙ4l:E`z*:aHseP֘pQHK$A?wޟZ? tiv<O?$ާߜ  dE ѓő# Έ) uwUfEjУ!)P[2c`ntD  PvfDE=zdJ A?&Xc IxP5 >O )u84T2Kei)4(pHenD4)ځKͰU^!҂]>M=[a !{!]>D75`*(V!.4HDE"rD̾HJ-탡S(TO P(|48jp+ Pl7Fmyg@2N$4 VՉʀ&Մ؄Dj8j:"ޤf.4 5S_FNj7o7fJ1Ÿq9؍7A;xA#=cC:|BS{Bg_?< PwO^gc&^wJϟ:FҊ@#Uվ7 }p`oTUBsA9h\ P@h X@ˆT<*RɸB zNUi ObK"ƫd>)57{q@*P1)bEQ nBkQIUK@-$sdo<(!v# N!AP E6r!R lLT IL' EjI]Uc8SЄpisCGUQ6Jn  zҘqo)D&^%h Ά늱t"0T cHoCm@zihX3NPxwþ5D >֏@ p{:(7q`"qRG`NA=d EP# @A8Z1d 9CAÜpH/I[#j2w[ݪ9²SL q&QƫuPQFVS@2.Uq#Wf œ>tiF.х@` 2%X͠H!@0` a0.%$Bj1 1PZkcP36DwiSA. PH:+Q)jl+a^SBJ!B<*hQ(WE) H"f|5eќr3ЍA\@X،.]CXFG| D-S+@=(K@Dyn :ō@sZ!DQH0jG=¯ LT4̕\@/GnMoS>go2||׀EWFLw t_?"1guי?kݕ|NjB&M?_aŵS냶[_򞔣P<jHʁEƺ9uR^PCk"X` j 1:T`t(& F.tEQb !‘R1AE0#BQ  `.K=L8* sh.`ck:r_Pv0Z4. RJE ]pA "]T3+ }\zU*khgD!OP('D`0 IYuI-l :$@*a[(2 Z8MBR]4%B8"! A%% 6O/*H #0e,b`*`h "t- BZ L 4{!qUjͣ5Hc&@ حR`*'xcLGBӱkCUV]N#at e갨%8JSѦ IU @))+*|,-HiG(>`lQG h OymTl-UTFP&`Q$irɵ >i@Ah+darw{ZRX\MDm JPA L _MD' D(jLP9@.6!PtvA~qr!jAR"XZ #2]pf" 14 8 V h)8;5O@0)_T~NWAGpNٲpS'㒥V+yT"c\JQΙ:p,J=p&;q*.'{Z: [ސcL4:H@C 0Ey3#P@T*rQ$HE Ej*h[eڐ P ̣GHEwB F UQbBՁX5 "p?! !H`pFa*HsHG%|DŽz'abę_%!aIQڈKavFK0V7zP؜g%0$cP,UC uh]V_ ӃEI6@՝4HdhQv: @ **b# (JZbRNq; MKI0U=E탵XceHB&NF̖*(3`D5C_]AjB: A_q"]kDM(_XԼ 0T(iJt5 hP-&yO'K 7Oqs]NJ>!rhW=6IN9B4cXִ2A:'IPX0ƈ0 ODCAa082A %ڰLH\cRg/҃ EZLCO\㖅"{ƌ aM}##hkVruQŐH;nA->ensVo8a?pq 6v4 $aH )̱EDoh(DA) V M8h9@q!GYD|))v誄Xr 0 ҃9Ztim`RuP H[1CHX>Q8QIQJH+Jn s î\9b*v\B :?$!d: ih(q9 Ԙy*w<6 KK - l%b ex5.#f33vzTf?pv6 YSBLu(`6Atƹ+ sהF 59%xŕpPMbE DLS 'i0MIû( ,WhzN< DŁ!!Rx( 3D 4c|TE,,* .҃Tpv &C=*5 ”WΩuP5G3WՔd/hpt GEȅLJz!Qy1ŢSV%LAUHZVB!`0 y -Xc'iB04G;I( E堲R @!BrBu(DqӄN*-$Ij%VL*?/q dQPHAv}=$*/`mtJugvD~ $sLADRA~޷ԟ(rz,cӧcQ6P+:;={߷fp(02r3zS:7j J!:9H}pD a$[€(Ti&GbUψ}~`P ,҅F@,m GeJIP5 b9x}Xhp|A!JB[)H) d$IM*|\`*6+"(PDP'̑ʀp4&-)|4i;Ro(:aPBQȸSk l"ׄ$C%4 iaG !$ ζB1,!@ [ǺVFvޝmY%g*P5Մ 2Xh2 6+`` yЧ0 |@ 彁:ApVUX Q@X &^2P;G "cdHV5(q$I i+)m$֫3@S7pʂ&[Os&øb$b618DԧD*`9gY@SUYIJ(ƒfÛADS! dVj1LA,x')t 5!E[Ɠ-dzpkl]r*N9L-V$4dPb9@ %xJUJMQs''\Br s ;EaL-к/$Yj<ȝpiBPŁ* Ai(xA&YbvB'L?xOS5m-&Kը8pѕ_{J8}W8NU742#O o`@$q8r}EEЛ7o5G~|4ӿP%Gz\Ԯ,?0PZ#Ks {)F=^Qh2]wâVH߇UBG ٿ#iuVz] ߵj }_OP)Q5B%Q$UѺ "8GE[n%xp"J)Hb@% G  tQA3PZ# (ux5Yb+3PNHM &S9:}A7l J@sP#1D4u>DAa -KfLva`E6c/3("@ABD&; 0 wh[vI,Qmճ I8a D,e=(n?EVF\A]E0Z7Ê$9!J}"!4%QC/( AT1PcH%%X謅FM{XŐC䊨Z3OR L#N( e*Q6`LVNp2֠`V`AX)D<[cg*ԛ ~>&Ea(!RrOf7f"1!}HYH#R; :1Gȁ6#ȥo BPE+Np ijW[rj04* {8ĸTr:k%4('3PzbH>5 GJc # )K@yjqCO/:Ա-? M߁͗KBq?}:'x~WYX*"| BO耑0MqN*@}}kk~",`|vJ"o_{{v}¨FN..'r߯?y-|JrAJlgj(մ'n{M&M׆[Z4;~:AivDjPf` QJ5IPY"8@n¡8VԔ>J\AJ[BKr*~ă9CgG GWp3% gjG$H ?/씝Wkـ"S0jQB N$ 5!g"ؔ3% HÀ\i n%i"(+ ["*3SV .pR t0FX$‘]I^bMz!(ra]g(eaq@e¬`S.5FfWY4:htJj 5a[!"P[U=:V5J$$h:\+;JU4Wb4 >/+`+r Zn&9I ykr\SoET%!SO!v*=o,ӥH ڂ#<($ Ǧ'Y80>CN-l@Y p׆@+0d,jT}_$:w[=p}UY$zd<:XV8@Z:9O@/jĶP <dx &ŽIšiEG0G(AIؚ 8B!èD785O,^ I E @ I^U46 Cm 2S`$p^ E wo{:%o~$q6}< g(^SZ]cC2N%hj.RC,IY@.ZԁޠmZ,o>xBUẀVYhnu_ӣ3hqO;~x}0OhSLgFpBy:~L ⥁̟k-x71e]GFp:cxB +a.%绗Њ%OWor"X\ESw-Lp`1s(y:V{Aٌ(㔑L$Tq?ҁ -E@i*e(STmeASD*;ͦ&<`ٔ_$H#tȢՀ±U kW UJ Px Q@Ri,0%/8r^M$)OBQ @ \ zL @TjUH#\C  Y-^ZdQ"%q\rX ܴr m->Rf|]@\0 @N uaeU9Z.8xZMPft{@ tJ+U`.6D 0$e0Z,qD^0HS`#vÕ xҕ&/] E.Hg()] ։ X [`YK ,|6ZDcLA&XT^SW]EPJDB 49 Ps  Pp"$ (ͮylhe &I4[eǀGz*-Zb#!XUFJh<9 `M"2iDHkTR$g܈ N2Dx*p=阠'] O,C~DT)(t-_KKIkBGҨ>H1p@1єӲ`Ʒֆgfsh6; ψeL5g>g|biou8*Nzs%ҧR=R ꘰oeJ\[ؔ|?5Ps<:=$ -U5Y^ܮc>3륈( 3;qz7 H*,fx&̓K q`  B!Q@r|h4+'XG5R}d$/nH j$ (ޗxx$O'(`n5,PQ0M1Q)bh"HeU -oN4-3A!VE1mRj0U֌cIA&-/ַ"B{UBEp%Ɂy1{An j]Z WR$N7TcKVyPyrW`O}̂ZC4[by"1LWW Bl1܎.JWp # \b42ձ9sZ"@o K(`* ]u7JEn +VNWF׃L'|[ 0%s G+MpxqAME^ ,QHOR m -hedEilKm -hE3AG@S9 @,v䵉]ωhktF4ܵR*1F5@i>B!"$Ա. KA/j6/mb8 uwN_/^ llstޮ(@ E)hrzB@l]t:fz宊~I;p\`w9‡jإf#2x[AD1B%`Ѕ3N֢8MNހZ߸!{y>8~g\ >KtR )BjXg7{ jf2Ԋ5. rp~%Yu@@Vg'cE*`G*4$#VLsGNw.L(bUPЃ( Ɍ,J@ ,/2 Jp&5 B! VDEѡ )(fPY.3L7\-ezY\0sVPE^Vpܙ^D"$n%[ SFhB$@E`R!uqv q/%sB$eC]Kyʡ>vQUf5meTxlL02kQ q+*F1I@h*\iSNHns@ Hxw3zG$K ( 98ZH 6Hq V,/-7RQ&0HfbrJUQ۴- (:eA4*P{c$#B1G` :ZRhcCK68N)SוI?Pd1!6F@I@F0Ӱ%+)FLa@AEʼn$P̋Cj0&T T$e So`:`YFGxFpl/XZ y:M}~Kh@¾+ӒC_e:_p,@S HU!M?b hNA&lp' [txWG\>okq ;4HJgL (.^624μv)HF I>T;VƾgD`wڿ9z2$ƚcw'0G/ z$7O-AUp ?rщA6dےqa2 ъpl3-)P A2Xh8y)E61m2tuxH_)$0ƕwDZ !* BgL, "J%)VS E[j^%|r(V j%)EIÈ$B<)Qh"&a @PG"U4QقvlRyqx[a18Ѐ, p  $Vwv&!CxȢ zE }2(2藀`j4PTEV!RH$*.\!^y!6t;` L" Ty . <BNti/uHT $p>+a p00̫|D#hDS1O'ă:B8'T +E㥼ET^t.b4JƷD*U( CQIq"WE-Z FN'T,YtS҇ ĖITMB QŠ1C$q k[l~j~e mK]T]|mlCCb< 3gxjRn 0a j*V[GHش h+((Dm2=x!-B}0oM? ;oycTeGNLn|oM<2▅)>T TYTBUQ2gJ |@X:FqbXDM̨"" *\,F ʱT$X .Hd P+8 PIBx )8%|J(04D<[\pyԜp""Rpؖ(Ez6"h۝19ei6el5[/kF0IªRq%jod@bp$p 3,&.U1,f#Cib%TTxm44 .zE0 %>uP\o0ivdL2uQ}X*b L;)$52dXs(H&ݡQGK$i(('b)pc~ 1 SދE$7UqLTh:()>%;U&`+)ŹxQ\(2ÀEXŤʾ.,h ZB'Bb !qjh|Za"@ y4`-Ru1i(p"016AvHѣbˢE'^q}kjܡ! j:.>}_\pv{R[ 2GF>ak2*@h%`Q kÔ8 d"lЦBaS8j% FOo )ЀpD7H{!PB07}Z<>~'x*bzv9C=G ܀ʼn&PB/Nn E|%zMpRuk$#QU]G]',‘F$JLסmGTM9-_JE#F iN2.bBǦ`@I!/%[A:$iU \PBlc DPLNZ;M!HZ $T%S̳K* QHlYռt&M )F$FRe 5lʚ(0gV4a87q0e"A "i.Nj\$ hPI4aBdlj;^ @d%4F.X``%!*-HA8Ef@ vB"d$<&řJJa 浖@:He-JE(H8Hz]Ǣ2}½ړ'(۬Ƀ9A:/<VÖ H6U(FHIHc8qn5`^%8 gG93F =?8N޻p3B>mRI?߮Hh\ ^ юm{>e"3$ cb`:G)^%b=׵d-{yA_'ydœp>m В4#W٘+B;'){+3LG6'Rγ:'|@S?VYw >~Qjd{ˍ:5$YB&@ )$r0  +AR@Di(@Z 5>|mWMΈC ´i%*BKM Lǐ7RD;Q8EɌ'  Q #C4bvHWV1E zDL%E4v2 i샴&IQ BND,PGtrZ3t6Ԫ ` GF@XZ@C#4ܞD!4-bk!*O,7s`ԣ!Q -Q8YmU*6 Pm9A$lL9U*Eiz`:=iށFǁMI@1@ub%'IDakCQb"?_(lp:2/%:,Eì@&Ib,AҗCGn`;TiX Z9oI^%Q-jjy$*!Q.uLf'׿<`+ 8e>\QEQ@_#Y 22~o} rwm'մgt}SKH}Jr!K܅HTsnw7x-{ -k܍W7A.nکN'=:߮ ?9gƏ}A~iqi :A_V xvAþ-w42Xj-.J`cA+Wݻ#2EYb#*Qk/Qy@iqvE2E̼ @F}X[cj+3dV:QNYzEL@""8 [J [)8H)&Xp2PΊH{Mw [\)e&({RBJKj"Mh,J2 0.+CC3V@rtmغ@2(PKs ^&G: BiAs@!HT#^} NOI'tt<,^iv plB?㜀=oH 2,*mR*_c a/!@6,*Ё詈0!*G2U.M`\3E 2]$b NbT@ 0I]Q8 6p) q!"Zu3HEzO=yQj0P3 Qw-h+MP/pm*JC0OYʱJ&CIEa3f& H pH @`|[YF:??{1>GSoB߀!Ɩo9]ǹ{T:ƀю;NfP'4 c^t`+}^y}8 ҮP1ov,U-AQE&Za<[X`5?zB{;8c<4_u}7mH[]8h~1[~9ȈZi$?*T@ऐפB"ي^nH^* 0_$WH;U N$ :5;# 8 S$+ٖ dc\ %F@h3|d8OXfH@-B w@^ CN(#s葉8DK`i .N+b1Z: t1 g 3\*L"q2&kԴœ&Q<6\J5b ӞA;A  $6`jt!#hbㆴODWpIGXYh4; IA@Q ХjJ*=c, $P+2䨕X'zNtbAqbjDM` kP\Exh~njP@)5i;S"qIKwA¼F;)P9\- T[BGhD1Ў5D d2z898ْ AWKɱF ` tBX ApF6ghB@   7qH"2`BziX*ECS# )%+"!61``Z"5 E_wqIkxs q=<@vKy8?:D2'DWç~8> /3xx"iӑ3C V)iv,=-QPB*@5 m6 ?( g@mF(-`}3dF0 # ?<@$;SC?7QO:t/?>|P/R@DK*PAI!PcOL`<s&W/LnXzVh5B0.Wq,2EWdx!^fB PpqLRDBDTJ^FLVŠGULA!]Szp> LXRȚbi p[Da )p:#_i&J|$%"Fz"e1臀H)%^/ 1]H ,sI@D2*MBW>C$wTAH!))8 (Ul Us$BJUэJ+m2SnGRQu+Y(- `@G8/a!wq8G${%D"%Ne~xg?ƣ؏V12)ppz,> OkB Qv-3^3y!e|h߼|oXZ}(}Z#Y6bT$_޾RKr|a0*{](X>~-}"w7[o\tf>`/ UqT@hΐ,;U0)$x儒h|F"Ei-dUX]ŕ!>I(x ǫHD1Wp{JsRIL)V8:itPE)R`F5 ^Ă㦳y% EHܩT 5}+&xJ8jRDF)1I}X$o#o2R-Yʫ:5}v)#L<а"KūDZօr8ͤx\ %1pF*mNVwtgpvrh9b %QwE\E(J dlj Gf@Ӑ)IEBQqhc adǹۇW1ZAј\V CF)S)rYڊؖH[D"ؐQz.&KcvG7]I'i A4DB^:|eTCh#}HPt!B$C^rI*!TYJpX`hőPF]PX7FĐa?Hbdj<}n WE"y}pNouy_G/~܊@|r)T`P֥fŘy>›|yr'IgS)~ G_ Vu|'ٿ@ ~Q" |xvta!4 ~>z??_z| >%bKe*>H Q@AR$뗾P[@0 :,oo{Sfþ wwΣ}Ivq[?ߞW,x,6rC34[aX0xT^~d,z.pM[ hU{>3<=a ;>iS?!܅ /:2GO._EO.^_r ;eX-ό}y_;Ƛ?p$/+;IKgf_}a/_jNwX0+BId>*UbMy T9VH!n"cDT KIY * ctV )VU1U`!XCyDRpWѮD7W.NJ AeQgX+!AA.*H_Z 8(`zGU4Pxtgv >2"W87 {.lN|E,6]ct|'.`XVȿa}ЌG9A_Ӂ TR*\?`v{{BCPċSaFO?g?G2GoS}. ~-{48>1V@.<p0޾[sX|f;һ_*P/i?pO/C{9^ ?9ߞd{%8 ŭ^w:[6rA]N "#J" AS J"RcB*>!xv3K!Ԇ.l0@2J4GH(mL@S袊T0VikaNXZ5 w\g!^&@V+f#$q"S N)6blorqTRŠmp a~UyO;J "CϪmT%Nы@M  MiAP8M1Q*Z5 AuƟl8HJ`rDoEFk4`H=h. F1,Bx@!U$Qע~8e[*ڀ8hSxAءMjy@_0!x!4bఱi:rͳ8Q=\D",7 )~  ;+  vH%cڔ{{Y'@x#ތiE.vò8/hoJpa/ùd n>4}u 2>]it맫'9qo{~Ȫݟ5׀w?t$;N盃!']4G??<<;>6_gW}ߝoS_weo]~{iΫߙ׎/5+EC+{&7: EUêE:h1a#ri:C*Ab`Ct.#TEUrሐnK7\K83EgtP!< P $6eZ0GsdܔjSYS(aqP4xC% !UD% pE~15qcE(^m8IGqE;U0{Ȋ$ h{v -%H؃zp$i46%2rݿ$đ MV%U[ֲG@<( HK] *h'#h΅hS5Æ{ 0QCʐ0,(@Y\oTMxx&& M {M*Q@Cihh]]wsL<1 M >/ylyhhYhynHiW!4444p'8G<֡4444 !$'_`&h=@8 TAo 4444#iq'tեu ލT+jASyjTu(fyŬuw-eBɶ*5W.m~5-|_">j־c[om+KvTak_"\<u},02'hv^ӡsA Ctw+y̞5KE%n5bmտ,cw*ƃEnY#~_Iw R֜(.wK*I/,닝q,l]:n7u "Bv[7"XNb1oECgWjr@i NDf^O9 ըiE6 vEp(:&у(VɂD&-o?5|c~޸79CStL\Dlk b?v$/1]fW\qod):+c)nxUK]C`?8D<t'@ޜe(W^?K=KML(ū^IF)gY]\Bdf78w'Ur I"bzqTy^^u&vC(q뜂ml/Y ?hgEM-qs7k rTp@rq!@Khl<4V\c[Z qCǵmbw ؗ4.dMk9*Z)S74oȣƽY+T+֕pǽ-2p(w2;@iz Qow##;:d7FKB6čDѶV%FΩЍHE'\Ķ kc_W^ Dm[>IsО {} Jx`r b\rm*pa7yoQc;v( iHCثh[79OZ{⇋ܜ#ηφ N% Dd/![#}Dbouw~E$H}o||.WX4WqLe=5Fpן#I$O}mʔeM *l[Br'ʰ >0\gVЇ,ʚ"uͫh5yW}Z}~|O Rx]H0 &P=zu LEL @"P`U3{tzwrpQ'O"|p?2vnvH6m{@&uF͏eŸ#cQg^C#r̖1N#q ZTw@ KD# YQ\HBő6,D\<̅\*q!F5O$~mlhbıFYOx$k;U<;|2nV |b_VQU:%&Q_bw_g5(&~(d'2hjCllcE ؍E_"/;j2bv!Zxu Œ8 ̍/ >eVЀWZݽAvğj.Y/vx=E~ȡm4ěx8"ĖqGQ> Gӕ,B)/.)dGu1IZ] *z}Zb Ma~):s1V*GL$F?s~X}6j4w*%熄;2"w ޟGTAfp^h4mV ĸDQvLoA{ p ".Wȼy\Da < sۮOS*>1浉J%zTJ%}x0]y=w X CJ]< S~ɪMJq"PXLq;tbdq)d  J3 63~u) TgPGgܠH'|=k=MxW͏)pZ ><`7Gܸ?ܸ1@~Ay1hV|F +<%1=0՘FM܄1 ,"CWc7P]s15^51*BBHöx1y3Oj,4!_"HԹjgk(l%6HF]US=м+ΡLii[d#BF pP3GΓ~S@ Wᾚ##r'Sg!?O0@)4Ia \~ 7K(Q&{b{`l|T-F :.I Ծꂎ7:F˸7 ybT.C Wa[.hwu gհC>оfn`[3fso%Mx"4ݔ 㚧/B`hѿ3mޗ:V>mEqŁ,>|)Bnwddw= :?C!]0@Jr˪m,zQoG1r䃉]tReԶ'ջL~%Z|~WDrԣ|w;O+Qs)!/q{0_&\|@zneb̮u{q7xD"zzz,jb9k~IQ3ݿ7;B$sM*ss?NbVuQql6 i1QR1k}o.0pX>< a1`F~Ӈ@d96 K@LTYL`02B"h@)5èOYq﬘>%C$As"yS1\AIt-+ XEnEj<*_$0'I$ ' 7erǣaT  A@Y4p!q$d7:c TG#}}H֦} 'u^Dqfǩ$-хze5GE"NG2IʈR ~4con|^FaH\hϦyjJdbL#sq1^D>I9J$0K@0Kzʶ^"?H,K2ϮăF,7owGzU6Io_GsRH*t4]oMÛvsyo|`# {Əb_m7襐Xu YL/ɀn[Ke$ylbt3lD K1x!2Dk6HV|.Fr!<(6q[h xL''oulI!`mO}w̾F4"- NLfq~XުäybjJvD#-w*7I&U2ű'"O$OZR=wr78:$PӓE^;'-B^;R/|01*āa>m2uKpm9OTc)>Oaq]Iõ&D&o;<[+3Np|B7WXo<9(xia!)F\{r2vjD{e)!!T{h)xCnps$08"oH9ff@B,`v.dEn] ?7BJ fAR*`[P@jȧ5q5?T,TVps\uD')-Q>[LD'I" گ8FLX`+`?U`TUuxy0AED-y7 ʥ֬C~YQ3yʪQΕ敪D>b/,xH3alVTTc@WB 68s4'v[#&m!NpNnzS ĭnD$FŬP=čhDfwCQ;C^%-rQF `9kde䁹g  Dy`J8=w*sЈ0;uQ;C +61W4$CZF?֠oSb ";# f-PڮDg0ffodkN~h5hhhhhhhhԈiIENDB`golang-google-cloud-0.9.0/vision/testdata/mountain.jpg000066400000000000000000001215741312234511600230310ustar00rootroot00000000000000JFIFC  !"$"$CXX" }!1AQa"q2#BR$3br %&'()*456789:CDEFGHIJSTUVWXYZcdefghijstuvwxyz w!1AQaq"2B #3Rbr $4%&'()*56789:CDEFGHIJSTUVWXYZcdefghijstuvwxyz ?TUcRuJ-b'HZ6Wg$۲,ms+(~=5I "/_z4_qҟ/_>|aim&m8>իxEto2e 0j֧'e$ߨJU_jH}gij okG袹X>?DI(SlJmiSӠ)2EK-Rw%6ejڅMj7 ok$Fp?Rs :MrUai4'=b:}6hnedFrH jp+B b1J€#4SC Ta*|,u@Ercyi B@'bgDqsT \U\5覓I (HʰqZn%%t'fEA.(e$f>*F*Ҹ)v(EaZjԌ)qL6kYZyH3 W`nȰ-rR?1|ي,N_KލU 8N,`'H!**2؅V X9j;.Ug!BSy)*vc);E\aGUj@Kr#2yMA ֶh%xՆ)iU~U-wXtK!{]-a,ǠI MWnȳ*5 c Z̊>S{qS9BP}"wh4Cߥ+ } ITT5SOѬS[kueRc$z MƓnȵ7*}U@f=I)u "KKYNA1 .*rĬ&>8J*C.ړoc$4)T21A($ L݇;$iQXv6ơ} dK$GNe8T39BQJHE*@SqR"#)JӸȍ.߭H*f@'ziH-LVI4.{ԎB&ua d\) 4e~&u EŖ{MIsXkp q[Fg%b:)hW5 ϤX_:JYkW(FH,p=œ&Oo jͧmtX[ěJnRBGc'I,Ev;9ZFP DF5埳3JЮ;#o?aеe~>Vkn5eV\yiW5x"A*2؀k_GY𦑤5=/1UTGv8q&ȹ?MLՍ[1v~}h7B俺ݭ>`HE+~2p022y'I7q⮿Mf7ux2MA΃;Mh1g8r:V>VRwq 7,mX^Mm-ݕ%;Y]A/Nkȏ6E5xOgvt~UbuM PD3wk$W8k*Y=ѵ03?~*j 9ћN}6Kd# M<6Ó |߆VNmpaaT8s7&J!QhL`%ou ^?iÑZg[/Z5މhyXTirj=LJ-CI4 lc7dîv&y<: 43k>ޝyA83_fn03W$?|-%릋 {q߰.nl&6Xphb7P\5A;\xYcm$jnv'GWdVU] f![1쭫 ٟ|]O/OG:iK!dKtT3^-otKjmyCg F,PWOxjv(֑tVK,<3.Uq2;ꮼ[<͑!vƾ?Ƶ?o*O!X0P"g?OK1ҳ1|>$@𕧊3 w"n#6TBq)Һ٢mR/xZLYDemiӵtGK^qKV?gǺG}jEchn-_2 ʬz592xKFRn i^KQO VNm[I 7yAf< /(~=+O|3xZ߆4O kOPe+<b@Y3'Xxͦc|++{ses $(BpqP֍e}o v Ct`0e9 7t=hMKƭ{DOm"7 I d)EsG:a]J*`(8+1wV7.5kY$! )MW?KfƁ@thp:I@=~ o i6 &pgcXrk[Χ W;"RS8ԥrvr*6jFGB#XA_OhGi3Vzr; 845AvF(D$lz'4+w&ue-4w2br x'UWs J4N.~׍y [>6fO/ ?;q;Mۛ:֗uYtG#2FWA5x&U|Ta˭^對[1ľQ{;ڟ|=|3gWdq⬟rKBN~g $.|+OzjK#D4`QprҴ ^9ur'7-y#}DT;ߴ7ecXi+b?2K0qO?-ub*J2sablw^7?Z~ luK[25el]De(J)Hy<>6E IĒS"S|jR1s^T>wwÕ[m^MSğQhyY6)\ ^;}࿌^υ=@M?N${^(x7`?7={'~/sFkmBNv@&78xkS>{,𖵩M_j%̘|@IJJ?U 5\}6:]߈5]N=`gCeIok_?fxڵA>l+b*UiɛӤ>=t5?N^*|OHHZ{R^Ю5hF%(kL*p+'}ƭiRx:ZψR)0*<@ s^.Ӽ?GW-5^ ~rR"F㝡SܟZjjQEgKN"K0ċL< =ZJ}6ymV={B.t i'ҥ]&P3̻fs^ž!:{_4LdUh# $I$q\Wmi|iơ'đ[Jì.]T?՜\d}JjQVh>*ka⸵^}n&ĭ#G0O dgEy'_h+MMm%ֲaLb%Dv'_J|g=O<\jƕzosj!Vk|K4w~ ml|4_K#ˑAOkόw6\Glu5>i,g #R*Â5߲G5"]kĺߝN/9)$Wp=\D|`Ƞ<9Aʢx"7tʆ9)EY{8|%eu4e;1P!$^wm3 ţS[kXGd=lWy[wsq8mp ⇍4M /n<5"ou1Ȱ2rAE,UJP冃YOSǵ |XŤ]'Yo hua |z)'ڱ+yOֿ}7WR6́GlB[[kP$X(Zƺd\c6 bYǁs5rHKT( @BNEoXxV+K]>uSnx#r5}d~^;ֶ?+\TǴADWNŽZ2[ ҝWMR?)+h˽w>ak84{{k,a"4V<(8ux(.<3OO{r[+> ;dyu")+hO_{S>Z̆5=Rl*6UƂI؟6DFQHB2+j^𖗬:$GE||~9o?sה=,sq\@ThpKx m&*I: |`4 C>8j?QZ#h/cVӄ^~;wdt?߇):v\ IjpNq_:?ۿ'Ȼ?Mbܑ٣^j|Yv+r(0b8'?m杦Kc Kȍqr}FXdsSR.VU:y̫ p[X'Wi?_/՞(⮓_Q-8Ck^rHCYkzn r\"I2>$Z1Ŷ;&yRq?릝i^?RY>ώ2kPd.X:햧yoyk$xIFY8e897liRgGM<+YUkᐜ0$WIJ^!*?z8H8,5j/__Mk=Ֆq4zp!yx<NgWeǡtҴpvWDcp Chp+#k8't(Ju=[r68s#p*9u0~f<;~.8;i,/:9saՁKBԼ|:k]Ah`M8Y4NBn8$bQdWǫ.AL ZIvcMӭ?+~gxEa(E-Ces֟qc:rxN.52 EP `IzM4ƚj_c?O=m3V/ֿU5sfuxǗ6sXŭM X[NeKHdLY; I:7ǝf]_Tӣ\,A #&Zq$O.B+M?ۋ_е{o'Zu-n_-v'-s_F_xp<]Lm5(}?ޯ)zs6#W\[#N_SqF/{BcRcKKxm_,~w3sZhWFf>ا<`@lzQM{hPdFJ[ ڼCԑOj"K0eu7r:^YgGz&ȵ/My)/$ɨD-H-A?x[?o:l!ѼW}mih#gyuø*9tѼ[hx{@եkx(F>^W*xg#:idou*iT33''k(z=|0Ѿ=ZmCDm8Z=Bd2yV$*`pHnD?ࢿE4K>,xZEɅV 4oA郎2(>x*UNc $g-k]׋k]@ji6f ˴+Ǧ3^2ˮx{Ki&cJ^= ~5O]KcRa\6#7VHoRqzשφ[fx3ƚ}ŏ1Ѵ{k / XZ S$xDE*zc: 1h?» "_mu McWQ naA,9g (o${Ɵ/;ɴx?\Tխ>M {hSM.zƚL$ڹ^gO j>$I 雙HʖpA+xc[0O\ > A{G妣|i໵A$o>pÃ}EuW?O䃧?1T+'cі~|3+Fyo.Yagb̒a^3޼᷌t-SOgmg_\LGuh@e.u Mrݞ*VO'c.#FYF<A? x;QL:EZJлbQӭq4Sׇ櫧k:nR9qS1©'?^uO|EixO}WӥnHʾCpG" .i*43ux{X$-XG^=2*FřI&O~h]A>ao[ynIdX]du ľ_ҭ[F[&' GOQHgENu?JWA`ɶ ^xHMOG{$;@78\t_7-Fa](#(/F&>'߆N|3`O }h֍m6H&ӕ.ۙ`B+7Dºo3kx LQs xZ&*TI`WTп_] .ߥ>H9w0%k|ۏ hI.,cc0Jb>@|3A[Am]ZiZ\aA/ @xkFb1)*x zJ] ?h`s~֍4r%L%kJOmVNRH4 UJ);'&lVS>dU۽ribʤ!xOuK 4eeIlM7 2]̀H+nڹV5r(7PЃֹ7|#n<~XuJ)q]2e9ԥN- 8"p+#S_;}oIc[Tfcq](UГi3_xNG/'4GcԵD5vnm [(Kn1±)>PjJ\PA'fFh&j5ͲJTzRд}3Eleemj1P8@Ua;Zk[kwx@ s 8|%# 04n(E*"T`<-?m&*# No3\N-+!fd5EѴoivJ[B}N5V*N&ٗhZ6ŬiV:/0,~~GJW5\)_G]O.J"c ~hڳ#jMF0%*="mlax_vCk&"]zJ/?*/o.*9c9/g%j-yz$NGE1JgVK|P6oVMʘ-={ތ$ʤpvkOo)Zx*\_]ʙV KXmmX8Q xwA.ƥi$I`N+Sm;jMs/ ^(/  lc9y<HYbƈ*a BӱS.: 1fI9$sōvv[0DGVQIl}vqg;'d\oqm d`OvҨ@E/ hr;h>fNI:|D|)vDZ\cxwBғJtl#l4.0:Px# hmMdȮ-!8'KgÚugp 8q f{AԦ3j:&y)$ jmҽgiVDzunۙ-XRiއk,ΕeF*O\RekX\9ؼ? h%%MdzGZ/ Z"֍ʻ$ cF=. 3*#F:~֍4c3RJ[Ҕ`s׼/ayOzd\7D>#:v=~ǿ-ao"EE-d1E?˥djUilmKRj/sO XwK,26I'9xQ]Fa4'I1蠎:R8EY#)T2 xsN[?@ҭ.;e8g<%_ iDidFf>ƺ J]8Fְ{FO hrY"$Y'yc9|7:D ƭc3<->Xyw0"R7<3I,bOӤѡ6 GcЂ[괄R`s7V4^ڮi9B(>`k3NO4ۥΑo:h? iД3 3OͩX7qȹc %՗[9Ia~ Vn&3SNUq\,^!Z%jnCV^0 k-L!V(Z6ӸRaEe Z )\DxԄRd;~*rӸETR\i RerSBi Љm6ө"D!J#)Z@)\=j\Pj]m؋)اJCqZ-(ZW(R@+Rl- JQWijlRmr)Jih0RV W ZBZP)iBҸbSe< S֐"R7J Ji!"']ZӸ@E?oҞE# .2")qONNG\RD[hR<ܠӸeQi6(DJEK.R#+H"oҋzҕ6RqT^.ڕE%EMSbTiBp ⋁ZB*b)Qp-.*P)QqX-hȊm6p" Iyt;heS22q)HR\D>]&*p(jBQqXnB&)~6Q JqZ&WG Z1E2J򴊴n)v @b-(ЫRAJ.TbIqmUQW-SNҋZ6T(p<\,@;-~\,A.(;DE)HV)viiCvXT))\C%IVxT})NMKi\ЫRKT)hbZP*m+;aZM*(DRTaE V\,BEjl}i´µ.1M0 aK+IE[hehGLTPV@RmRA m&w *ҕi\,GI+J;!(+S FJW b.QEʱTRmrlGN O+JV"-2PVa؇m(Zm Z.MK\b-H-&aiqOi\meMQp JSHE)"+FxZpZ."H‹RRm6)HHe *]\,WUjFRRҐ.Ei6*rݴ\RFiBE&*meq؆&()EG*]mBB-#%+l(Y(/]aRO+JP+C-;vъJ6ҸXm4 V`Sv쿺a ӊӸX7mN“\,E|ʘ-&)s!Zw "RSM)E1EBR-+f/MehdEؤ+Jb-vҁNbZyZ p".ڗmi\6TihXJEahvѲ1ZM\,7┊."֜? .XLEhX.ڕEhBV8Z@(X *R)6QqX- )Qpd;)TPEđZZ5[heJE[(I<.!"mLH  RQq؄6*q֐ӸG%J .23EK.+mQV!(Z-!Zw H¥+E{h RJ.;2EHh Eb2)K- PEK\Qp_TRmqHQq؄.ʐ%)hj]f)*lSv)\ ր ZhZwB-(E0Zf>+ RbwTivҸDZmhX B*]}\v"(ZpZ ԁh+҅1Kw-j\Rq؋Jh6)BӸC\}*BQp R*Pi\,DETiӸX-.ړm.(DK1Eb}h RDXRK.SD8S젥\)6B#5IEpY)Qp6B)V#+F*EQpZ)QJb(֟@,31@+cF>P;MLM " m!Z.#+HaHE;mRIJZ1RINLT~hXj]m!NeqXV*`”zRm0J⋕b (RF.+Qm(R2b2(bAZP(XqAZW2b0( PRbPmR1Bq،jR(Jb"R֋C-K .+PRe"Bؤ N´TiHq؈- 6)+ IShd# W@+NbJ1Rb%;H)Eb-T(se(Z-}+XJ}\,3),G@})qJb6Z1HE Th=hXoSEi@ w %JV-b W bm m RbX-J(DV(Z6UqA)hr Rl+E{hO (XaZ]JbQRSmSqJ(+R+E`Rb JVQ~) ӸXiZiZhXo֝{ijcHE+q+SeC*I6S3mSVƑ)1Sm;Z%(Z."=(6Eݴ4E`"Q\JF)4-HREf@Xn(-;AbTZ.+T}") IMXn)HKJb1HEH&)bJE&1N;o҃JҐ SN7SȤqnZv)vQp)~EҴE),Rbst9FJVA@R@&(+N.i\cqF)sF(&)6SL9FAH("VPhXhړmpZ6Ԙ\M !Zx b-Kړ\MV); ғ&j6Qp}(ҥ^\,G&(b"iv,T-i3%ʥʟ^+hb'ٗ-D ݫ16m, [_syԗQ/o³%"?[֬i'FGj~*БUv[ѻ9Wϥmi PjP&mmP^NX?іdH .("4֍pKn)V1Lm\hf}RIRR4S(4ԎIRi,)4fiۨ E4)1KEks +@6ӸXB)1NJi\)W8S$n1NhRzP;N&b&"P\v++LUb),D–i7m*fpE.vR@A4@ J҃J +J;41@Ti-'M/JJ"C C%2 OҹVB$ک8{I y?Wm,ze+\kN?Q-=J͕:=]JQjed{S5$j ~ZQhiMV?**Jv]M%i!'5"TF*h+Hho$tvVVܻkr=A5;2AJAֆv4x?_-CawQ\*$QCqH*:P+ )qKEm֗m+(RNaF(ъpJP\v#ʴH@OMUqri0za1F)i RX1HQqMSE+b@ku4L-4!nR-:2@PQHh"h&\v~K7}iXwiOJvhLc)BR7Zpza4ҩV\Թ7Q\P)NE%R撕 i@9i15%&פ_(RyQ Iˤ'|{iܢ1Z_Ji6ͮ5R/#CډCIwI?*emzλ̹ؕ6s%ȗ:D])Ai?{j}ԫ[GWM*{)J%1][̈F2.ϿM14=ELiMl!Lt]\_>?U{%ٲ0EVu9 7ZcDT42dU$K&.jS\wҟRdԊ^Ӽ( z]H'M;Fi΢º$ 5 t2PSTFޝM5qXLъU \SOZZBh (W}hQqX\RR3HbRQ֘h"(^-!4f4 44Z4E)c@ MњJQBPh, ҃A^baSJL-Own>D~l"(K#7)|r(``xT:Vlf|>5KХY#(aؕ$V H%D}R#U竦X>eJ2kںYPH*Nt}M-Rf\1"j*LZ3&BTbt{uSU6ɲ1eO.ZZ%#Q_.OWe%a#gFRHyVC㩅n%;맛RZyuM3̪'Ӛg&/O;QX&ڨdQsOP-.P,%TCב-S-ST nښH]*5jb9D0i'$1\\(gA5-V{_SCt4gAZiW%M4E34ҀP QTQLNJ㰔ZC@E.h h KPV~CSHC5 yD>m~!"=Ybj7R;(w(SkCS_ ƥ?Yp/jiF@6\gi?LaWʈf=IVDru*.*rdsOٗ g?>?Q⢓}\cb%1S.'gJY*G13]TsKcM$'+7҆/Q&Z e?t piœ-LM+}j:m BC*K Ji.\Eofju9Qa6 ZEFM8)E&JMqkP)H zM";}yt&ʁzNå+:*HܥA;V"Q?*Km[ W#XzAmQ]n>RVK7Q;)Nf GNYO=_'ؘgSMvQG쑋,.M ZCJP%0Z N%LX]|&M ߻LL i4F)+AXu)(oҊC~CәvT@=+~~Gˢ,kOņ:PqBM U6;%?wT҄ʒ#vJEyTҬɓ!JKҰ\GQ3VF4>b]JpJ1NA䨉#"}#ԑCEQzRjI?L e1U7Ubc^õ̠*SVZҠ0Q.[ X P"~cL H ̣u>y*Dh [%XsN:r&fnmk7}RKf2R%Z*Мq^ʗ~(tO!D*;r"8Jhbզ)6P&kAJ[_2,ڤKَ=/[Ir^=ucpߒqiZliKimHUUO%+?^)je!wNlR*Ϻ\lsHJ&MXj?j[e \ gYGc?\FvHީUIS&ZK@j ԌDg`߾M+HGLLqN:i:RF:oV-)*APp+'#U-rWJt|ҤJͳD@Jh*Ð^o#L[O.1S{%4*TlG3mg.!.\V!Tzn$!i?,W(JWqXiZFmӶ%G=+FeN*iʓJi۞غXNU39j|TlE9*”)Աz !eIa#S#SwU"Y dH:zimX@*CDf2TRBG?mGS|E22EQP&"TdlG}"]h}IU9)X%W+G˦4))(loW8J[*)mj'Wm7e5QFLz]Mk\1UY>&S?ۭi{i[ [40D& M2ܵmisSș&C-ȿ[dfUN^X)k%5=><EtrES$uQ^W4(iB%8 F1mԌi7RX4 'RHEE`jq@ؔih0y R3T(`Ph42-3Qi%P@LR ~CM/Z2@)¡/@i1L K())׭ M'}1Jnt3禙h8&(&yjizc1Z)P; :Jhi)RnRmJ!j@ih+T PPiq x1Hv#.~";niivҠ; ޔN~Ԯ!o&bBNBRJ."Z&@h&SAa ѺS5QF}]i*7˧RO HV )*0vSP1HiwREXi ԉ)*qLҸ zcmM HJ#z]G0DХ7ȩzR9b/e)3K,1R)VK=qX)vՂR;ӱV)[iʵ7Ji ӶH)J`U^ILIR3=D!1AAX(zRS !)_iѺHJaSiwwR@ IN6UM$/iP1ъBi2Lњpjb;e5AB Rh%1RPZp@Z8-.b=ԅO.=.jzk)DEC҄n CEMTݴ( ZpHX@(7{ӸZiOJ.;LHvRs(QZ% RF\u%hErI*x)Xw.,yJlܪZV pRo)N~0)٨Sw+e$7*}+ JB\a.JMF*#֋45qZsERV;"TbJwXqyhBQpWu^KuZUtTЙz7TM3J!B&Rf AϿRP)*1(-G,uRR+P6U&LQR2*74:ե;+ЫLZ}DH֋C]RR lq؍^%.IQ)UZU*d:_MPyʑq#}A}?_94t~wboSiH?YܪQ]PsSѪK}N| KEgYoq󞦎tQ,:fijzg6tgJ5}-"}Um6MNиí^~?>_UE}du`z/^|XW?OՊ`s_?VUOo U= 26W㩭۩q{ovGiT%Y4zape;ƆI ҄?SȧR3ʐFBpPv})oMnD*t1# :a>]BTbZʟP1"zwنhqw&c &YYH.)X-֨,\\)R ErFmn4H~̧_ܨzyJUfzwJ8<]hG,ixJzƕmAŜrMMB{˷Q뗱uiO>j]MןhYHw9ZYJI٨7{WJ_W"Gb>*95?2Bu٫y.IUJEw.4ߨ J"dLRr&Ko"HG?J?_\H VuKdY #Qp幔:ӗMGը4%osBTٌ-g`ŦZ )Vhe*iH~殉-?Hҳu٢ }/Uv=;Z)kVl43OJ*j*}/ L8RΠ-f;JH U9C~QLfOLJ*T4GWC%D}7rI-L~JbJo̻R |Ĵ`Ut|fnn+QzUegjlGEsR=Sڭit,MˢJiE^+k􆨝e]<4ӧ5]dY]rŠ8̩Pb$UY.6J]U#~}+h'ܨYw1YwZ'Fd9R}b҂HQ{WԚXO/UU}ߥU+n-fOQk.+彬/Sd)T$t}}>@,n"*Q%+{VNyCN=nJMNmx]!ߵv "q/́]dVqLurJ,_Vu_Vt"VnhqiiǕϵX?+%'e{4RLMdO?lVUt_SJ(NQ\]M7}ͺo'jW?j6ѯO#ԖylGRho^o$~EVϓupܿ֒?*n*o}u5\[۬VG"=J3Hwּ:t7D..R?_\͓-UˬQ+ܫP'5"^f:;q.%up?ZY~`yu=Ȍ쪓[iu>DnMLosU+"]قmUWEyMuBn{CUwUw9tK_}k$rqRD'U"iu/(OMsViu ˜mLʫ=/X_ u_ffQқXeߝG^bW ^8OЮ~Rs)Y)^Ywj&OfQk>=/[zB{57'}Gwuoj)5ثQ֬_ZmrIW%icqZV |=M{pi1Eu|7_Lz~upߍu ݯ2&oζm4+XȲod+߷ﺵ?9GGSf lkZᧇyѮ?c]AKTcOe|+ܟδ"/tENTr $TZHjnRXER,U`GN JJk Y}R.;b*LUn*=4Id|nvWx ~/x¶)| ea|Ҙ EI0X~N´5k4R]Gin-X" |x)R>#Hk(ml$u4m#;LlmgoMSO_x_^Ŧ*XEr/ :bgPU02Tw{*۪,uJp*YněDdOtiGlKRɧ?5 O{SZj-uFiM"njyYQ^$VV*ENfA6)X)5kFt ,Njн E7tSY<|UbRž8<= ;ugs<i2!rҡI2K^$y%؛Vљ*>Z#&Un3ܧܪWI؆⡻*n5WPV𦌥2kꌷ\ҐbtqNUrPo?V؁zTV!E78EGX;o<1W{^>/U}k#O٨hR1W<h,s^v`ִ}P?ilRcZXwӒ:|gk^)t밡79CoÏ7(\:c(Im>-GKf)F0*ڞaجJ)\V+ʔw끚 o|Qau[]V.h.HB@=EN xGW5߀.mt9o&{y5,UI|@vZdO Op!XqJrIN+Eb  LPE8P 7-fۭI+?S-v;^mj?eN~xdq2o覬<+%V6ĭ7 zlq}By[oUc G)$^Ss ž(ӵkT=iOtsLj ,I%3zDuu52[4-^8@2qvh-ƻYZ/m~M.-'}y8ܭ͑A;BY6o* 0NV<V\SAwci#lO*I_H/kV\ˏ? ׋[hYN.uKEa]8Zm;{_P(uMv(##Y0}uzocDİ%v/HlRpH&? A\EssxK+0cm sکIz[JF~Muni7nl@ #VIa3s4mH:2"u=2RKkw O cz:S+?j YihV æjXB#GL ڕoy)lgR˫66noVŏzi%a%pY @%p(W᤺?$+E I#"L'kNj9wyFHPpkҦ&{DPSxg[fg+V*c/X1N ulW៊iV !6 .,g쁹wV|VBr$d'FywQ>:-I/|KwRK5H)Y7H^ot7SFUueG1޺_>թk$KN^+;_ĺ-z;3#Qi\GYx[‰>i:p<}F%ǺƎQ-_D^4uC2^WQQ;x(v_^\:Osx㲄K!o-'$nIj.2 Xܬ2k?m>v<B}2OFuжoױEhS@}K.*kV>X( Ž\#Pj~/д~o4o1ϰjhM&GnF}2FVV49iTǓM|K~xuK]e,b"֦8՜i.xZ~~][2&FXrR5NK?kᥔm{> ס|3Jҵ+;m~om`h#j409' z%G3/!_^7G>i@RgvNqzcމBY}F2V@}W|Wÿ^ ޿M6 X%Օqodf"=q.c&Xt G0Qdžt [:MbHnvt=I O5O]K$J19-3{ Gl'FPW5oPf]G/.4}~)hX`s^VB5sQeZwpdqv85Z7? AC}Ki!E4R!YA#x"3TyW:A# ?g^|D}&\OxOXѵ-" kH&yC[7+˩$c}_*+qzBjF7}xUlwBdefH8dg%0q_GW.} aidR@D%'֋ǟ[rweنQp\$n}~T3>,ү<+ZwY^A-42+* wDmuWr`u "WMz#\mE̅.[I$:͂;zS |Mx]Y4F&tֈƅJܧ{#OW_=i%\\hVOo,-"zu "Zjskz\iI!Pt,m5j~&CIQ z41l5s}}%wccOt dcМx3E/`Xx_Nfny$ V̑2Nd_ {U׉Dpkћ2lfK*=R8ΩPrN^?>4_VaID1[ iP@< xn5m :od"! 6C&TxIG촨Fyxֈ1W<z/Y3]$b|U}^!;D]w׉#[c`eN;/j_xK:NJ?-V{ktS$AE')z7$+_/藮'k%e}4 Jo|F5о"k)NÝjUӮeK[)b\e#876׼ +[&kCq'VѲK3ā{a/Cճj-m"i1 .@ݡ(TIPWQv:o]Ewgsmzkӫ~G%"Q+"أP4?Qj5=òWdOͺ*@n>R{ZΏPyЃ|=zr})S-w;^mje)ȏ6E5r<*ue`;hփ nkht@?4Z蕭]rom'k%c z$+/蔬/&}.8[$+/蔬/ן7N̍dO.{kZ-uX/#;%!֧_H?_w<7!S0]-A |-w_n cW}%PQ%_xO,EF\/\К:l@`>Jλ xʰƌ3t I?h\h=3ß~x{M?|H*ue#1/|)k K_|Q|ucq"͂8/~O7G'V/j:VoWwP7)$^vGlװi_+ZM[?sI ȳcលw|8ΑwcZ[NV\_Z6>h5 6U>Ekx{M,DQ1v:v8" [-,8-v{OݸA)QWP&߇9, ?'>~F⯰[21?xXzGp?fZx)S-k_BubzC8oJx?@DyY׹W~ǟ)?TIVu[(~ơGm %ԓ[Lc #?OyFDt?t{[pgl1u g?C+P+/'m[~ Ewu.X!LgΨWk|SHյ,s\$C-ԃ(8O]ƗLU[M>&HXߐk|m27έzle6#y &F {׏[iij[/7)$P0$) ?k(>rUtF$wd~k~xnk>C }k~~6ڶJ![.gn5+%s>&|3K[ {@ur(q߭}!^e_H+ߩ^G^zW4?}4oj|h]6Q>1{ٟZ6nI>Zׅ|h-Hs'mRM {>vȚ+(?i/&sw3Kц~A<5@ #_$?z8\7.h/!^:~Dr?/#@= RD(،)ef<& |J_4w@RW˿.=$+*{}Cze}C_+>ӓ# 9=}Q^c 3Ҽ/gMה?x/'J?j!zw+1${/:W9T 뽏@~Ma-?M׮ב|SO!]5垟Q<8E; )~ ^+]sLڒ8 L@u].F=OsYX@0w $'a%8\R@qKE (JI.VO'T3+\ǜ_Wk>G>vv66V6q_zp [@_Bubzfx#h@~_Bubzf< K_/\!ch5O|9x<`}A^%t]"J=KRƔP)jJ )GJ*@ 'K6uip޽ 3(e>>`q2o覮#TO7S-v;^mj7>}c$ &,@JA?O/wD+c/ke'c&ؿeRk1m[ؿeRjZ?mXR8mHTv1]H?_wOSQ 5AYH?_wy/bKdH=/M~xIlq->ޛ!l{JO:K55On6S1kk J!T|I[~;2~WBcGRk!5`mLxGZtkiiV7.?P=+Ӿ x? |98a.L$UՔ)<G.Re2v*5tcH_tg6m]9tcmaU Oq0{W筏@jxşnsWDgO5xQxA7>s?vn<4^ԮP7\vWE>:g6em%y#|r)V2 K}ޤliYp,tRk޴&yE$do]>=1]|ھGrk09/vn=WzotNVӮt{XkE"dY$@ѾJ2du^S=ĿIitc1}B9}77֛5lj7[RuV[# f--w`|/>. +_ iM]jt<߼;Bƃ1.I=~3_ UX|'JWV>.)~q[;KmW{3|x]-al;E |v _Sx@Q˒zA_"ҷ~%xC5ImrPMgp`)e\zko@l4 D[D ;#E 'ڔzTq7ۡ8+>]Ɖku7@|Tn[|gT+8|Nca'.g%+jhi,{zZ`oZ`|e֋b׉:/ȏ6E-xoZ/^&(ܼ ">`~:_5?y;q-G AK80w^ x^kHq}>3uo;yć)|]ɥ($?zx\A? kp׭ehڔ>u1eHo ᡠhyp\eIǩnVOrHI|@m Pz6G^@YUXZ[?5ܘ[̀39iVDžtng!uSGJU?k 5L_2x%?*bd.V7Ny_+>;Fhs`o0> Y&u{x_"ׅ1?)?x/'J?j!zw+1${/:W9T ?뽏@~Ma-?H!]c}ow񎍨H 6Z\b0̪{BW={5tuf&G&=T¾?i? 5? J |o&"BrV92kYຶhdE$l]H`GsQuKe}LXgl0%bQj~Ni7LI1nQH*ʋ*]x#Ï?=t߳OE?h֩?x,Pؿ{^ٿG_WgA %u?%.{e|9kW' ׾L+K^'5}VRzevm(a{ޓ=|4QgvO]Ykeʕ,yv>,WW^W7Hσ iV 7mm[KK p@\8"4G^`B'cԚuZiwc/ʅp te5eʋ[C,If[k+x rB"\U` ȥg#S-]Q񝖟gknlV(aPpnMcY[|Ef[+-sY1XCcr)ͨYAxHe| Br5}'{[evnbr|L,OZ]O'۝7Q++^ Y@7YN!$U;߉Z*q&-eJû?W"gڶm֗g^!$+qčbڬ5wgևhW45Ǘ%oh9y=QG?گ~ǦK9gv-IpsřI$Zmek/ᶯu!O3/,q]j _0AsMV9VEnuPib=y- >C>:W-[J}OLmON[AK/`V]4+t/ӭmG늟tx}MmK3#@CQ z&5 c>&kW:l-"ɱBǻ=+iBT%jy]}vB$}2@*|&Unֵ}Jnm^4x\d`zJJhO!.&?Vn?g#Yxk䰧byq/Li#bV&<9ox[O1Q?+:}^ixc)8xvAV9m{}GX|9QCIr#6%LN} >.^-  ᇴmTb<Dqwخm/-\W0f<剜m5|/+7?vijn3,->(ih4VҺʷ$($\0+V2OFm5)-Ycqq\xiC_E55Ep =t|y J:5h,ո $Q؞3'gI?z9w_Uis08ܴ)|IRXUaJWˍNIǥ;Ng"-E$hB?q 8x}>`8y ^_Ǿ*-/#[u1Ş ]u6:F𶪳|f$c23PrNIGlGw?RoWzk%{AoSD& djŹ?N+)آ$x;X#wK` 'VYE;^m7M{k+HR hVKB#UPL MKΥZ|I񎯦5Ė9WPa ʧh Ewt?GgG#E5"Z@ud!HYk"WOju|o_çLz[<ǹD0q9x[hxU݃F4QgE4C<\@+; q-s[{x,O@rhSXJ QWq!3NbXy[6Ew.HeB){OH=+ޑgiY6P'FjwS7*ǝqQ]jvV<WyȌVśԛ4"QESqP];.P?;1=z8@|+Q֮oTkV῅`WC(XݶkI_f kN!"Ҋ*\+NEILThD)*(0إEH袀mP .(P(EEhNPJ((){)Ҋ((G+|})f7/j(En8EP!إҊ(/J(ҕh!(bE SҊ) 1)R( (&(p@*Q@P((/ZUQE golang-google-cloud-0.9.0/vision/testdata/no-text.jpg000066400000000000000000000301311312234511600225610ustar00rootroot00000000000000JFIFHHC      C  "  >!1AQ"aq2 #B$R3 Cb(!1A"Qaq2B ?Ӻmd=Uhʥ mx+vUp~ޑ ^&f]qsd<|7m$.-zV . aad(k~.ԂQE mn$_)bN!EfJ\ZP+{>=3QpXW+Y}-?hܷU H􊊣.pױUK9%h*rü l;CEܝlN;ocuH*7NRzVM)mVUbUC` WXg{ \XG\Z_dQH:Gٳn46"L|vEc>x&Uk'{=likrčz N/#4$$|5AIaf:}eRJ kxҢH+S>m)S(S̷Y| blC,Zl R[դu*WՎ*}⽦%ڻu鄼u8+Q?<AS 1K1NdM =c+u>+OqQGI TԬi,vEDh?XIR]N3hdݏ<{fip.}e>tu%]cHp ƱLj-6OEHKM&Q_H0"?pJ mkגJYȿS"7[iC,o692;{ O2KkI" [R)*q5XT榙 ZZtzƼWe@F3mh/t#jF6RhuMpM^>I@)Xm0Ru$'?.i &,UmM$ U;mu$]A~{kܜhVn{FJHQÒtT4ӓD4[!wXK *g,n tdulY@ bP@Ӱ4ɓw0ŗ3'?1IA^)avY,|3`S!G]E#[H*S4򾫫S}%. Xwɀ"dr j͸5 H1V .Owβ1̤3@-sea> 25H }@mC[c]%Vegp~URsHxfv^bq~L%в5 f^MIjy|q`AZe[e7G)^:`R[Z9$i7EqZbk J+3(6BnڋЕ:QR; XN/Xh'Q4@\2N].%R}' ' .In7AQ[Ry޹t=O2آib#U!54Ӊ v]%Gl4s kK"ۑjVz12.tmզ^foUds ^v3+Ўؓk G,DzH hT7] !$zwsC ]M1ZK A=7gvz*nnl4Hp[+{6 %9SGH/1)^F0o9Ӯ i_86Аf^̠sMzzDcHSFTUR=Gmt;DTr5qnc?YXqQ/JZ̢2R{oJ1jb)!S8z@VSPF17p@0?Xb^2ҕr(rfizURvh7 FҾ[ SiQCLwnV+D%CM1*QK5譅u0%i5'C}xSkBRviAZhFBcMc%]Dzs cZ[wʁmOH=3@xSS)*PűQkKCCu;jO/0w9at)A4Bz?~x -$% 'mɽT/<+LHpR&Z)?HAo˂Ը)&J6E]e4j(JO}!6e9,Zq'RO!LJU.A"YEe X* vxt E̒Sm"e%*X'KGQ|lpq#-X#82xqtH7mcnt."廆US#QkDtYTH%ȀTb؋zÑ{YYc uns&$ P7{{mu FiPdEXY!L΀meʁM+ _+o73q+ej[*)CPs%>u JS ~#(UXE:O%;8l]e CjVegB33jn1,1/E#ˡt2ÒM-:x!JrU.pҒ3t晲 M.dnFK9\rA&s8Fst4hR6V&b~L)'. g2:f[*'te٪S:" h5. 3ȘRR 4!BIR)jZ@;ūƜ 5'6Uzc&I(WTs҂ӭ46OXdEزԾH6F\S2B] BӐ(70^uU6v:{0+ʔË歧 J`/qcwƜ=&MmHKyI-."%HsO.92ÙsK)MYtx|:fkWe*NK-m!Y @7 HMekaoW'k w\x~U&AۊȧbwqI`N}a N/E8ⶉTMHEXScQ.j\8 I2Hd$JRRnBbV'E#Mi 4 c(B\=<.:: 4E5sl4(KBr ܷq};x34Ny(E\VXnϚّ훱s׌l? gK#hS)TI fIz/ 2ItehM,M4e/ &,%SDRԐ?x^$S:K3ȴb1'7J6* ??PmYCj_k@x!@!"}aj\\iR/fr@0ќDQX“*hpx4sF9|Qs''.VBong9Ԡ1CRsAYy/$X<;ĉzJ‹{Bzӊ֯akH$hy*.,J6'SpAYl~cSUq;fD'4q-whY'-Qwh+G@Jú/RZN\!>Brt)O:2p@P!;'TvEi/RD<`[]^'m}miʤ-!@8 iG)sYYNQs;E𶯲yX 9Ab|yctC_ YWEOIΥhC(X^ċgڦIYżKn^r\8ѥ9NylCi߆Z-"k\ Jz};Eėt%[|RʖЁdl__T1~$x9TLSË}22Y@2^bN%Xoń߆W~>n3jUZӹwP齯 1_ )nOErj+ẩvܖJˋFB)[[*/~NVyv,RQehX*nR;&(GI90'+:}fe]q(UEM"r|[Er6'gxwpn +6Շe4ʿ m %/ccCp^ < ꘁԩm榦S/)0BJ MɵnJ`;@Ẃ>3W(Ke+2ZJy+cr2cpMTT$<-rVq[9RDd彄edž;4^go\gn"cT Td\Ii?$==JRB}JgKaLNI?pyT 01(- p=a+y'&Z\GIE׾hTCѓ鈩ֹ~d#t$˝BE,T'Zu?"{~SS=&Hȹ،8T.;3atJgIUS] /%t94zi^f(@p 'U[JT%e6(7OLq//7KS  fSl#E(*xЬ}gf)C. e* 64oi 6 7ʕaڵ"K5"uSP\SM] %E+%Di`H8YVqL]\2]2)R6@P l:XVixTLʙT$+S%wS%Yde$},c:'2'H/|S[\yp #XOH(,/sotUxKUI.v/<"a6jAʤ _wS\zcϹBvQ@H'+$ 29f ĺB rvT9Q`2ܪ]4Nlo[ن6'&['Uܘ$,fpߧs^ uT88ܵ@ RU3*)l7+NTQSY)vU\(jEz>׈!B.d2SLv:^. ʚ?OxHq#JV)bC(3\lE#c komQn,H? >06R2-:Nۃ:0Tyv6)[^2EOt j W05bj8U6;)$~*8GpvPdW*SN[B#x[m|"85-H@!L 3 `uƖ;B9sf aة%mbs2/=x-$QEC(e'*[2@MN:GrsPTlB.:\)_LK)Lͺ7P ymxnvLftuM%tuXfSD2Q E> VTILL6/=IOa\n64:~ ZW4Z[<]e %6Rv;[R')}G¸mqs!|uj#8x8B9AIB-;+VNYYҬN)'+:+%Ozi l CmN3'Xl #_(5<0`JUMc*w:>!JHb s]5#7@HE}MbShLV8SMSeLO)MA=c?F.`=#(TJMln(Mc2izc):HPR ku[B7v%)7>E+0&Ґn,c !k#Fx'AH)ڞ\,"ޑU[ u 豸Co6TL !s} Zn:kPڪ.^YSjʗUCKCz|:h zS[zi7Mԕ+:+C{9va!n;rX8D>)\f`Ss[x%FT*m%unn) k{(ղ.1 -Rr*^a0lqM 7xxF+0bmª̗fYB4]Y {b<<LyŦ6b:\v􏩊[6,@̝>bO`a,,_7t|yZ$hUviV[al7^R˷ʺR2I;tոUCT9(bawtMRȺ&QQW*R5y :id cs}"v[ʔu$[NЙGe+\ϥ-˂CFòb Z6\;wYP>Z 2;;5s(\%S,!:e IVJ[:`"F}!Xq2ê̕t"2LȔm\:(,n-CB.b paMV*);"RiJ~j,ڻ:,M[ހ "v~jR(vPF`t0 ؝mջ4ꂋ UTsh V5k/^S݊aM.Qq&B h?"TI}vI׍Р aH'B6e&Uk:IDk4O deJ'C TcZx7nj^RٮQ[ؠd8ONvMYZ. 9.S;xJ)y-G2o506 `mEhu*o(]8b qqb/3JH 2^ 4֥M x'&fRmn~&nR܊!K} *Vα EE6S['oXTRROox \ys W$LFRLrɶI/^+L׀z DVTKR5 ND Ћmu?mhuY3l<[iVR5"qB0™mK+m6m>nJ@lr}H? 1 ɕKV\(4~4 %vp.!bJff]?*ۘwU/(CQ7Q \Ϥ)z${I.&[ZOhcVْC9ej%Wʡp=_H WD! d}/!`Ng>sJ[P{4Qk 3Ru1mRA:%2'rY͉긙:iG'9*Jq_}A%:k] k-*T:i ͤ{?SNG=rVic{5=u+6zj%(,+sjRҗ$Ū _fR'y.3$SoԄ5( /I9eH;cw:P0'9)xV}/kz yͤR<3[8YX0OK KJ xZNoOs}mW)3'PmqXaMqUC})^eLH5FX 5,R{!hT ǍȳKehe[jm;5B9Et[0;+l Úęաmභ77&ZSPx( &lP_/WJJȰ2 %:[(O/өә&YmJ#ʯF]ԄJVtu{vkOM<[SLBQ5z@V-Ɔ۬2JI$_6 b. 0{HbW&RZx$`6J\'_W,+֦^3*ilƼT6Ba¥zch0m R]ΗRڿ19/(/ "H} [E b-nUvm-:)>QӚH-5! hO\k!t܋92Pafʝi؞YISsyɒ(N1V&Sqsa U3H:xykli([nLZMҮ}}؝-FM=u(uEI Q[t1'?I{¨yt,ipoKJemŢF^A7ng.LY&8EJRAVD4XWI 7JJeKrui$R'05Z NV2:j RʀG"͝@m`q*|=35'2[&.ig澷^!W;)Xn`$C+<`kU;uBwb"%M;PyAY6tӪy2ؒ k\D&nog_WZ*MmFixm}I-& X\ _b,Ekhu`VhNfsdNa`?Uӭbk4)HIK } 7 /Eh2{)RVD[m$,.:)[-FJ` i}5*:R1kg(&u[v c&DFE_hn _XNy:j)Agk  l% KJԝL9 ʁ8.f[уy/5 &T|WcxL9hg]Ʌ܏ʌȣk6*<Xos$XOHE#23$X'1q0I">@&J"1nM+6d.:G#MkB2[phqmbe_] } L*%T Q nxl R˳golang-google-cloud-0.9.0/vision/vision.go000066400000000000000000000265031312234511600205160ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package vision import ( "image/color" "math" "cloud.google.com/go/internal/version" vkit "cloud.google.com/go/vision/apiv1" "golang.org/x/net/context" "google.golang.org/api/option" pb "google.golang.org/genproto/googleapis/cloud/vision/v1" cpb "google.golang.org/genproto/googleapis/type/color" ) // Scope is the OAuth2 scope required by the Google Cloud Vision API. const Scope = "https://www.googleapis.com/auth/cloud-platform" // Client is a Google Cloud Vision API client. type Client struct { client *vkit.ImageAnnotatorClient } // NewClient creates a new vision client. func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) { c, err := vkit.NewImageAnnotatorClient(ctx, opts...) if err != nil { return nil, err } c.SetGoogleClientInfo("gccl", version.Repo) return &Client{client: c}, nil } // Close closes the client. func (c *Client) Close() error { return c.client.Close() } // Annotate annotates multiple images, each with a potentially different set // of features. func (c *Client) Annotate(ctx context.Context, requests ...*AnnotateRequest) ([]*Annotations, error) { var reqs []*pb.AnnotateImageRequest for _, r := range requests { reqs = append(reqs, r.toProto()) } res, err := c.client.BatchAnnotateImages(ctx, &pb.BatchAnnotateImagesRequest{Requests: reqs}) if err != nil { return nil, err } var results []*Annotations for _, res := range res.Responses { results = append(results, annotationsFromProto(res)) } return results, nil } // An AnnotateRequest specifies an image to annotate and the features to look for in that image. type AnnotateRequest struct { // Image is the image to annotate. Image *Image // MaxFaces is the maximum number of faces to detect in the image. // Specifying a number greater than zero enables face detection. MaxFaces int // MaxLandmarks is the maximum number of landmarks to detect in the image. // Specifying a number greater than zero enables landmark detection. MaxLandmarks int // MaxLogos is the maximum number of logos to detect in the image. // Specifying a number greater than zero enables logo detection. MaxLogos int // MaxLabels is the maximum number of labels to detect in the image. // Specifying a number greater than zero enables labels detection. MaxLabels int // MaxTexts is the maximum number of separate pieces of text to detect in the // image. Specifying a number greater than zero enables text detection. MaxTexts int // DocumentText specifies whether a dense text document OCR should be run // on the image. When true, takes precedence over MaxTexts. DocumentText bool // SafeSearch specifies whether a safe-search detection should be run on the image. SafeSearch bool // ImageProps specifies whether image properties should be obtained for the image. ImageProps bool // Web specifies whether web annotations should be obtained for the image. Web bool // CropHints specifies whether crop hints should be computed for the image. CropHints *CropHintsParams } func (ar *AnnotateRequest) toProto() *pb.AnnotateImageRequest { img, ictx := ar.Image.toProtos() var features []*pb.Feature add := func(typ pb.Feature_Type, max int) { var mr int32 if max > math.MaxInt32 { mr = math.MaxInt32 } else { mr = int32(max) } features = append(features, &pb.Feature{Type: typ, MaxResults: mr}) } if ar.MaxFaces > 0 { add(pb.Feature_FACE_DETECTION, ar.MaxFaces) } if ar.MaxLandmarks > 0 { add(pb.Feature_LANDMARK_DETECTION, ar.MaxLandmarks) } if ar.MaxLogos > 0 { add(pb.Feature_LOGO_DETECTION, ar.MaxLogos) } if ar.MaxLabels > 0 { add(pb.Feature_LABEL_DETECTION, ar.MaxLabels) } if ar.MaxTexts > 0 { add(pb.Feature_TEXT_DETECTION, ar.MaxTexts) } if ar.DocumentText { add(pb.Feature_DOCUMENT_TEXT_DETECTION, 0) } if ar.SafeSearch { add(pb.Feature_SAFE_SEARCH_DETECTION, 0) } if ar.ImageProps { add(pb.Feature_IMAGE_PROPERTIES, 0) } if ar.Web { add(pb.Feature_WEB_DETECTION, 0) } if ar.CropHints != nil { add(pb.Feature_CROP_HINTS, 0) if ictx == nil { ictx = &pb.ImageContext{} } ictx.CropHintsParams = &pb.CropHintsParams{ AspectRatios: ar.CropHints.AspectRatios, } } return &pb.AnnotateImageRequest{ Image: img, Features: features, ImageContext: ictx, } } // CropHintsParams are parameters for a request for crop hints. type CropHintsParams struct { // Aspect ratios for desired crop hints, representing the ratio of the // width to the height of the image. For example, if the desired aspect // ratio is 4:3, the corresponding float value should be 1.33333. If not // specified, the best possible crop is returned. The number of provided // aspect ratios is limited to a maximum of 16; any aspect ratios provided // after the 16th are ignored. AspectRatios []float32 } // Called for a single image and a single feature. func (c *Client) annotateOne(ctx context.Context, req *AnnotateRequest) (*Annotations, error) { annsSlice, err := c.Annotate(ctx, req) if err != nil { return nil, err } anns := annsSlice[0] // When there is only one image and one feature, the Annotations.Error field is // unambiguously about that one detection, so we "promote" it to the error return value. return anns, anns.Error } // TODO(jba): add examples for all single-feature functions (below). // DetectFaces performs face detection on the image. // At most maxResults results are returned. func (c *Client) DetectFaces(ctx context.Context, img *Image, maxResults int) ([]*FaceAnnotation, error) { anns, err := c.annotateOne(ctx, &AnnotateRequest{Image: img, MaxFaces: maxResults}) if err != nil { return nil, err } return anns.Faces, nil } // DetectLandmarks performs landmark detection on the image. // At most maxResults results are returned. func (c *Client) DetectLandmarks(ctx context.Context, img *Image, maxResults int) ([]*EntityAnnotation, error) { anns, err := c.annotateOne(ctx, &AnnotateRequest{Image: img, MaxLandmarks: maxResults}) if err != nil { return nil, err } return anns.Landmarks, nil } // DetectLogos performs logo detection on the image. // At most maxResults results are returned. func (c *Client) DetectLogos(ctx context.Context, img *Image, maxResults int) ([]*EntityAnnotation, error) { anns, err := c.annotateOne(ctx, &AnnotateRequest{Image: img, MaxLogos: maxResults}) if err != nil { return nil, err } return anns.Logos, nil } // DetectLabels performs label detection on the image. // At most maxResults results are returned. func (c *Client) DetectLabels(ctx context.Context, img *Image, maxResults int) ([]*EntityAnnotation, error) { anns, err := c.annotateOne(ctx, &AnnotateRequest{Image: img, MaxLabels: maxResults}) if err != nil { return nil, err } return anns.Labels, nil } // DetectTexts performs text detection on the image. // At most maxResults results are returned. func (c *Client) DetectTexts(ctx context.Context, img *Image, maxResults int) ([]*EntityAnnotation, error) { anns, err := c.annotateOne(ctx, &AnnotateRequest{Image: img, MaxTexts: maxResults}) if err != nil { return nil, err } return anns.Texts, nil } // DetectDocumentText performs full text (OCR) detection on the image. func (c *Client) DetectDocumentText(ctx context.Context, img *Image) (*TextAnnotation, error) { anns, err := c.annotateOne(ctx, &AnnotateRequest{Image: img, DocumentText: true}) if err != nil { return nil, err } return anns.FullText, nil } // DetectSafeSearch performs safe-search detection on the image. func (c *Client) DetectSafeSearch(ctx context.Context, img *Image) (*SafeSearchAnnotation, error) { anns, err := c.annotateOne(ctx, &AnnotateRequest{Image: img, SafeSearch: true}) if err != nil { return nil, err } return anns.SafeSearch, nil } // DetectImageProps computes properties of the image. func (c *Client) DetectImageProps(ctx context.Context, img *Image) (*ImageProps, error) { anns, err := c.annotateOne(ctx, &AnnotateRequest{Image: img, ImageProps: true}) if err != nil { return nil, err } return anns.ImageProps, nil } // DetectWeb computes a web annotation on the image. func (c *Client) DetectWeb(ctx context.Context, img *Image) (*WebDetection, error) { anns, err := c.annotateOne(ctx, &AnnotateRequest{Image: img, Web: true}) if err != nil { return nil, err } return anns.Web, nil } // CropHints computes crop hints for the image. func (c *Client) CropHints(ctx context.Context, img *Image, params *CropHintsParams) ([]*CropHint, error) { // A nil AnnotateRequest.CropHints means do not perform CropHints. But // here the user is explicitly asking for CropHints, so treat nil as // an empty CropHintsParams. if params == nil { params = &CropHintsParams{} } anns, err := c.annotateOne(ctx, &AnnotateRequest{Image: img, CropHints: params}) if err != nil { return nil, err } return anns.CropHints, nil } // A Likelihood is an approximate representation of a probability. type Likelihood int const ( // LikelihoodUnknown means the likelihood is unknown. LikelihoodUnknown = Likelihood(pb.Likelihood_UNKNOWN) // VeryUnlikely means the image is very unlikely to belong to the feature specified. VeryUnlikely = Likelihood(pb.Likelihood_VERY_UNLIKELY) // Unlikely means the image is unlikely to belong to the feature specified. Unlikely = Likelihood(pb.Likelihood_UNLIKELY) // Possible means the image possibly belongs to the feature specified. Possible = Likelihood(pb.Likelihood_POSSIBLE) // Likely means the image is likely to belong to the feature specified. Likely = Likelihood(pb.Likelihood_LIKELY) // VeryLikely means the image is very likely to belong to the feature specified. VeryLikely = Likelihood(pb.Likelihood_VERY_LIKELY) ) // A Property is an arbitrary name-value pair. type Property struct { Name string Value string } func propertyFromProto(p *pb.Property) Property { return Property{Name: p.Name, Value: p.Value} } // ColorInfo consists of RGB channels, score and fraction of // image the color occupies in the image. type ColorInfo struct { // RGB components of the color. Color color.NRGBA64 // Score is the image-specific score for this color, in the range [0, 1]. Score float32 // PixelFraction is the fraction of pixels the color occupies in the image, // in the range [0, 1]. PixelFraction float32 } func colorInfoFromProto(ci *pb.ColorInfo) *ColorInfo { return &ColorInfo{ Color: colorFromProto(ci.Color), Score: ci.Score, PixelFraction: ci.PixelFraction, } } // Should this go into protobuf/ptypes? The color proto is in google/types, so // not specific to this API. func colorFromProto(c *cpb.Color) color.NRGBA64 { // Convert a color component from [0.0, 1.0] to a uint16. cvt := func(f float32) uint16 { return uint16(f*math.MaxUint16 + 0.5) } var alpha float32 = 1 if c.Alpha != nil { alpha = c.Alpha.Value } return color.NRGBA64{ R: cvt(c.Red), G: cvt(c.Green), B: cvt(c.Blue), A: cvt(alpha), } }