pax_global_header00006660000000000000000000000064136503371630014521gustar00rootroot0000000000000052 comment=754ee590a4f386d0910d887f3b8776354042260b grpc-go-1.29.1/000077500000000000000000000000001365033716300131515ustar00rootroot00000000000000grpc-go-1.29.1/.github/000077500000000000000000000000001365033716300145115ustar00rootroot00000000000000grpc-go-1.29.1/.github/ISSUE_TEMPLATE/000077500000000000000000000000001365033716300166745ustar00rootroot00000000000000grpc-go-1.29.1/.github/ISSUE_TEMPLATE/bug.md000066400000000000000000000007551365033716300200020ustar00rootroot00000000000000--- name: Bug Report about: Create a report to help us improve labels: 'Type: Bug' --- Please see the FAQ in our main README.md, then answer the questions below before submitting your issue. ### What version of gRPC are you using? ### What version of Go are you using (`go version`)? ### What operating system (Linux, Windows, …) and version? ### What did you do? If possible, provide a recipe for reproducing the error. ### What did you expect to see? ### What did you see instead? grpc-go-1.29.1/.github/ISSUE_TEMPLATE/feature.md000066400000000000000000000004471365033716300206560ustar00rootroot00000000000000--- name: Feature Request about: Suggest an idea for gRPC-Go labels: 'Type: Feature' --- Please see the FAQ in our main README.md before submitting your issue. ### Use case(s) - what problem will this feature solve? ### Proposed Solution ### Alternatives Considered ### Additional Context grpc-go-1.29.1/.github/ISSUE_TEMPLATE/question.md000066400000000000000000000002351365033716300210650ustar00rootroot00000000000000--- name: Question about: Ask a question about gRPC-Go labels: 'Type: Question' --- Please see the FAQ in our main README.md before submitting your issue. grpc-go-1.29.1/.github/lock.yml000066400000000000000000000000461365033716300161640ustar00rootroot00000000000000daysUntilLock: 180 lockComment: false grpc-go-1.29.1/.github/mergeable.yml000066400000000000000000000023221365033716300171560ustar00rootroot00000000000000version: 2 mergeable: - when: pull_request.* validate: - do: label must_include: regex: '^Type:' fail: - do: checks status: 'failure' payload: title: 'Need an appropriate "Type:" label' summary: 'Need an appropriate "Type:" label' - when: pull_request.* # This validator requires either the "no release notes" label OR a "Release" milestone # to be considered successful. However, validators "pass" in mergeable only if all # checks pass. So it is implemented in reverse. # I.e.: !(!no_relnotes && !release_milestone) ==> no_relnotes || release_milestone # If both validators pass, then it is considered a failure, and if either fails, it is # considered a success. validate: - do: label must_exclude: regex: '^no release notes$' - do: milestone must_exclude: regex: 'Release$' pass: - do: checks status: 'failure' # fail on pass payload: title: 'Need Release milestone or "no release notes" label' summary: 'Need Release milestone or "no release notes" label' fail: - do: checks status: 'success' # pass on fail grpc-go-1.29.1/.github/stale.yml000066400000000000000000000037211365033716300163470ustar00rootroot00000000000000# Configuration for probot-stale - https://github.com/probot/stale # Number of days of inactivity before an Issue or Pull Request becomes stale daysUntilStale: 7 # Number of days of inactivity before an Issue or Pull Request with the stale label is closed. # Set to false to disable. If disabled, issues still need to be closed manually, but will remain marked as stale. daysUntilClose: 7 # Only issues or pull requests with all of these labels are check if stale. Defaults to `[]` (disabled) onlyLabels: - "Status: Requires Reporter Clarification" # Issues or Pull Requests with these labels will never be considered stale. Set to `[]` to disable exemptLabels: [] # Set to true to ignore issues in a project (defaults to false) exemptProjects: false # Set to true to ignore issues in a milestone (defaults to false) exemptMilestones: false # Set to true to ignore issues with an assignee (defaults to false) exemptAssignees: false # Label to use when marking as stale staleLabel: "stale" # Comment to post when marking as stale. Set to `false` to disable markComment: > This issue is labeled as requiring an update from the reporter, and no update has been received after 7 days. If no update is provided in the next 7 days, this issue will be automatically closed. # Comment to post when removing the stale label. # unmarkComment: > # Your comment here. # Comment to post when closing a stale Issue or Pull Request. # closeComment: > # Your comment here. # Limit the number of actions per hour, from 1-30. Default is 30 limitPerRun: 1 # Limit to only `issues` or `pulls` # only: issues # Optionally, specify configuration settings that are specific to just 'issues' or 'pulls': # pulls: # daysUntilStale: 30 # markComment: > # This pull request has been automatically marked as stale because it has not had # recent activity. It will be closed if no further activity occurs. Thank you # for your contributions. # issues: # exemptLabels: # - confirmed grpc-go-1.29.1/.travis.yml000066400000000000000000000024441365033716300152660ustar00rootroot00000000000000language: go matrix: include: - go: 1.13.x env: VET=1 GO111MODULE=on - go: 1.13.x env: RACE=1 GO111MODULE=on - go: 1.13.x env: RUN386=1 - go: 1.13.x env: GRPC_GO_RETRY=on - go: 1.13.x env: TESTEXTRAS=1 - go: 1.12.x env: GO111MODULE=on - go: 1.11.x env: GO111MODULE=on - go: 1.9.x env: GAE=1 go_import_path: google.golang.org/grpc before_install: - if [[ "${GO111MODULE}" = "on" ]]; then mkdir "${HOME}/go"; export GOPATH="${HOME}/go"; fi - if [[ -n "${RUN386}" ]]; then export GOARCH=386; fi - if [[ "${TRAVIS_EVENT_TYPE}" = "cron" && -z "${RUN386}" ]]; then RACE=1; fi - if [[ "${TRAVIS_EVENT_TYPE}" != "cron" ]]; then export VET_SKIP_PROTO=1; fi install: - try3() { eval "$*" || eval "$*" || eval "$*"; } - try3 'if [[ "${GO111MODULE}" = "on" ]]; then go mod download; else make testdeps; fi' - if [[ -n "${GAE}" ]]; then source ./install_gae.sh; make testappenginedeps; fi - if [[ -n "${VET}" ]]; then ./vet.sh -install; fi script: - set -e - if [[ -n "${TESTEXTRAS}" ]]; then examples/examples_test.sh; interop/interop_test.sh; make testsubmodule; exit 0; fi - if [[ -n "${VET}" ]]; then ./vet.sh; fi - if [[ -n "${GAE}" ]]; then make testappengine; exit 0; fi - if [[ -n "${RACE}" ]]; then make testrace; exit 0; fi - make test grpc-go-1.29.1/AUTHORS000066400000000000000000000000141365033716300142140ustar00rootroot00000000000000Google Inc. grpc-go-1.29.1/CODE-OF-CONDUCT.md000066400000000000000000000002121365033716300155770ustar00rootroot00000000000000## Community Code of Conduct gRPC follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md). grpc-go-1.29.1/CONTRIBUTING.md000066400000000000000000000060251365033716300154050ustar00rootroot00000000000000# How to contribute We definitely welcome your patches and contributions to gRPC! Please read the gRPC organization's [governance rules](https://github.com/grpc/grpc-community/blob/master/governance.md) and [contribution guidelines](https://github.com/grpc/grpc-community/blob/master/CONTRIBUTING.md) before proceeding. If you are new to github, please start by reading [Pull Request howto](https://help.github.com/articles/about-pull-requests/) ## Legal requirements In order to protect both you and ourselves, you will need to sign the [Contributor License Agreement](https://identity.linuxfoundation.org/projects/cncf). ## Guidelines for Pull Requests How to get your contributions merged smoothly and quickly. - Create **small PRs** that are narrowly focused on **addressing a single concern**. We often times receive PRs that are trying to fix several things at a time, but only one fix is considered acceptable, nothing gets merged and both author's & review's time is wasted. Create more PRs to address different concerns and everyone will be happy. - The grpc package should only depend on standard Go packages and a small number of exceptions. If your contribution introduces new dependencies which are NOT in the [list](https://godoc.org/google.golang.org/grpc?imports), you need a discussion with gRPC-Go authors and consultants. - For speculative changes, consider opening an issue and discussing it first. If you are suggesting a behavioral or API change, consider starting with a [gRFC proposal](https://github.com/grpc/proposal). - Provide a good **PR description** as a record of **what** change is being made and **why** it was made. Link to a github issue if it exists. - Don't fix code style and formatting unless you are already changing that line to address an issue. PRs with irrelevant changes won't be merged. If you do want to fix formatting or style, do that in a separate PR. - Unless your PR is trivial, you should expect there will be reviewer comments that you'll need to address before merging. We expect you to be reasonably responsive to those comments, otherwise the PR will be closed after 2-3 weeks of inactivity. - Maintain **clean commit history** and use **meaningful commit messages**. PRs with messy commit history are difficult to review and won't be merged. Use `rebase -i upstream/master` to curate your commit history and/or to bring in latest changes from master (but avoid rebasing in the middle of a code review). - Keep your PR up to date with upstream/master (if there are merge conflicts, we can't really merge your change). - **All tests need to be passing** before your change can be merged. We recommend you **run tests locally** before creating your PR to catch breakages early on. - `make all` to test everything, OR - `make vet` to catch vet errors - `make test` to run the tests - `make testrace` to run tests in race mode - optional `make testappengine` to run tests with appengine - Exceptions to the rules can be made if there's a compelling reason for doing so. grpc-go-1.29.1/Documentation/000077500000000000000000000000001365033716300157625ustar00rootroot00000000000000grpc-go-1.29.1/Documentation/benchmark.md000066400000000000000000000051121365033716300202350ustar00rootroot00000000000000# Benchmark gRPC-Go comes with a set of benchmarking utilities to measure performance. These utilities can be found in the `benchmark` directory within the project's root directory. The main utility, aptly named `benchmain`, supports a host of configurable parameters to simulate various environments and workloads. For example, if your server's workload is primarily streaming RPCs with large messages with compression turned on, invoking `benchmain` in the following way may closely simulate your application: ```bash $ go run google.golang.org/grpc/benchmark/benchmain/main.go \ -workloads=streaming \ -reqSizeBytes=1024 \ -respSizeBytes=1024 \ -compression=gzip ``` Pass the `-h` flag to the `benchmain` utility to see other flags and workloads that are supported. ## Varying Payload Sizes (Weighted Random Distribution) The `benchmain` utility supports two flags, `-reqPayloadCurveFiles` and `-respPayloadCurveFiles`, that can be used to specify a histograms representing a weighted random distribution of request and response payload sizes, respectively. This is useful to simulate workloads with arbitrary payload sizes. The options takes a comma-separated list of file paths as value. Each file must be a valid CSV file with three columns in each row. Each row represents a range of payload sizes (first two columns) and the weight associated with that range (third column). For example, consider the below file: ```csv 1,32,12.5 128,256,12.5 1024,2048,25.0 ``` Assume that `benchmain` is invoked like so: ```bash $ go run google.golang.org/grpc/benchmark/benchmain/main.go \ -workloads=unary \ -reqPayloadCurveFiles=/path/to/csv \ -respPayloadCurveFiles=/path/to/csv ``` This tells the `benchmain` utility to generate unary RPC requests with a 25% probability of payload sizes in the ranges 1-32 bytes, 25% probability in the 128-256 bytes range, and 50% probability in the 1024-2048 bytes range. RPC requests outside these ranges will not be generated. You may specify multiple CSV files delimited by a comma. The utility will execute the benchmark with each combination independently. That is, the following command will execute four benchmarks: ```bash $ go run google.golang.org/grpc/benchmark/benchmain/main.go \ -workloads=unary \ -reqPayloadCurveFiles=/path/to/csv1,/path/to/csv2 \ -respPayloadCurveFiles=/path/to/csv3,/path/to/csv4 ``` You may also combine `PayloadCurveFiles` with `SizeBytes` options. For example: ``` $ go run google.golang.org/grpc/benchmark/benchmain/main.go \ -workloads=unary \ -reqPayloadCurveFiles=/path/to/csv \ -respSizeBytes=1 ``` grpc-go-1.29.1/Documentation/compression.md000066400000000000000000000067571365033716300206640ustar00rootroot00000000000000# Compression The preferred method for configuring message compression on both clients and servers is to use [`encoding.RegisterCompressor`](https://godoc.org/google.golang.org/grpc/encoding#RegisterCompressor) to register an implementation of a compression algorithm. See `grpc/encoding/gzip/gzip.go` for an example of how to implement one. Once a compressor has been registered on the client-side, RPCs may be sent using it via the [`UseCompressor`](https://godoc.org/google.golang.org/grpc#UseCompressor) `CallOption`. Remember that `CallOption`s may be turned into defaults for all calls from a `ClientConn` by using the [`WithDefaultCallOptions`](https://godoc.org/google.golang.org/grpc#WithDefaultCallOptions) `DialOption`. If `UseCompressor` is used and the corresponding compressor has not been installed, an `Internal` error will be returned to the application before the RPC is sent. Server-side, registered compressors will be used automatically to decode request messages and encode the responses. Servers currently always respond using the same compression method specified by the client. If the corresponding compressor has not been registered, an `Unimplemented` status will be returned to the client. ## Deprecated API There is a deprecated API for setting compression as well. It is not recommended for use. However, if you were previously using it, the following section may be helpful in understanding how it works in combination with the new API. ### Client-Side There are two legacy functions and one new function to configure compression: ```go func WithCompressor(grpc.Compressor) DialOption {} func WithDecompressor(grpc.Decompressor) DialOption {} func UseCompressor(name) CallOption {} ``` For outgoing requests, the following rules are applied in order: 1. If `UseCompressor` is used, messages will be compressed using the compressor named. * If the compressor named is not registered, an Internal error is returned back to the client before sending the RPC. * If UseCompressor("identity"), no compressor will be used, but "identity" will be sent in the header to the server. 1. If `WithCompressor` is used, messages will be compressed using that compressor implementation. 1. Otherwise, outbound messages will be uncompressed. For incoming responses, the following rules are applied in order: 1. If `WithDecompressor` is used and it matches the message's encoding, it will be used. 1. If a registered compressor matches the response's encoding, it will be used. 1. Otherwise, the stream will be closed and an `Unimplemented` status error will be returned to the application. ### Server-Side There are two legacy functions to configure compression: ```go func RPCCompressor(grpc.Compressor) ServerOption {} func RPCDecompressor(grpc.Decompressor) ServerOption {} ``` For incoming requests, the following rules are applied in order: 1. If `RPCDecompressor` is used and that decompressor matches the request's encoding: it will be used. 1. If a registered compressor matches the request's encoding, it will be used. 1. Otherwise, an `Unimplemented` status will be returned to the client. For outgoing responses, the following rules are applied in order: 1. If `RPCCompressor` is used, that compressor will be used to compress all response messages. 1. If compression was used for the incoming request and a registered compressor supports it, that same compression method will be used for the outgoing response. 1. Otherwise, no compression will be used for the outgoing response. grpc-go-1.29.1/Documentation/concurrency.md000066400000000000000000000037641365033716300206500ustar00rootroot00000000000000# Concurrency In general, gRPC-go provides a concurrency-friendly API. What follows are some guidelines. ## Clients A [ClientConn][client-conn] can safely be accessed concurrently. Using [helloworld][helloworld] as an example, one could share the `ClientConn` across multiple goroutines to create multiple `GreeterClient` types. In this case, RPCs would be sent in parallel. `GreeterClient`, generated from the proto definitions and wrapping `ClientConn`, is also concurrency safe, and may be directly shared in the same way. Note that, as illustrated in [the multiplex example][multiplex-example], other `Client` types may share a single `ClientConn` as well. ## Streams When using streams, one must take care to avoid calling either `SendMsg` or `RecvMsg` multiple times against the same [Stream][stream] from different goroutines. In other words, it's safe to have a goroutine calling `SendMsg` and another goroutine calling `RecvMsg` on the same stream at the same time. But it is not safe to call `SendMsg` on the same stream in different goroutines, or to call `RecvMsg` on the same stream in different goroutines. ## Servers Each RPC handler attached to a registered server will be invoked in its own goroutine. For example, [SayHello][say-hello] will be invoked in its own goroutine. The same is true for service handlers for streaming RPCs, as seen in the route guide example [here][route-guide-stream]. Similar to clients, multiple services can be registered to the same server. [helloworld]: https://github.com/grpc/grpc-go/blob/master/examples/helloworld/greeter_client/main.go#L43 [client-conn]: https://godoc.org/google.golang.org/grpc#ClientConn [stream]: https://godoc.org/google.golang.org/grpc#Stream [say-hello]: https://github.com/grpc/grpc-go/blob/master/examples/helloworld/greeter_server/main.go#L41 [route-guide-stream]: https://github.com/grpc/grpc-go/blob/master/examples/route_guide/server/server.go#L126 [multiplex-example]: https://github.com/grpc/grpc-go/tree/master/examples/features/multiplex grpc-go-1.29.1/Documentation/encoding.md000066400000000000000000000123521365033716300200750ustar00rootroot00000000000000# Encoding The gRPC API for sending and receiving is based upon *messages*. However, messages cannot be transmitted directly over a network; they must first be converted into *bytes*. This document describes how gRPC-Go converts messages into bytes and vice-versa for the purposes of network transmission. ## Codecs (Serialization and Deserialization) A `Codec` contains code to serialize a message into a byte slice (`Marshal`) and deserialize a byte slice back into a message (`Unmarshal`). `Codec`s are registered by name into a global registry maintained in the `encoding` package. ### Implementing a `Codec` A typical `Codec` will be implemented in its own package with an `init` function that registers itself, and is imported anonymously. For example: ```go package proto import "google.golang.org/grpc/encoding" func init() { encoding.RegisterCodec(protoCodec{}) } // ... implementation of protoCodec ... ``` For an example, gRPC's implementation of the `proto` codec can be found in [`encoding/proto`](https://godoc.org/google.golang.org/grpc/encoding/proto). ### Using a `Codec` By default, gRPC registers and uses the "proto" codec, so it is not necessary to do this in your own code to send and receive proto messages. To use another `Codec` from a client or server: ```go package myclient import _ "path/to/another/codec" ``` `Codec`s, by definition, must be symmetric, so the same desired `Codec` should be registered in both client and server binaries. On the client-side, to specify a `Codec` to use for message transmission, the `CallOption` `CallContentSubtype` should be used as follows: ```go response, err := myclient.MyCall(ctx, request, grpc.CallContentSubtype("mycodec")) ``` As a reminder, all `CallOption`s may be converted into `DialOption`s that become the default for all RPCs sent through a client using `grpc.WithDefaultCallOptions`: ```go myclient := grpc.Dial(ctx, target, grpc.WithDefaultCallOptions(grpc.CallContentSubtype("mycodec"))) ``` When specified in either of these ways, messages will be encoded using this codec and sent along with headers indicating the codec (`content-type` set to `application/grpc+`). On the server-side, using a `Codec` is as simple as registering it into the global registry (i.e. `import`ing it). If a message is encoded with the content sub-type supported by a registered `Codec`, it will be used automatically for decoding the request and encoding the response. Otherwise, for backward-compatibility reasons, gRPC will attempt to use the "proto" codec. In an upcoming change (tracked in [this issue](https://github.com/grpc/grpc-go/issues/1824)), such requests will be rejected with status code `Unimplemented` instead. ## Compressors (Compression and Decompression) Sometimes, the resulting serialization of a message is not space-efficient, and it may be beneficial to compress this byte stream before transmitting it over the network. To facilitate this operation, gRPC supports a mechanism for performing compression and decompression. A `Compressor` contains code to compress and decompress by wrapping `io.Writer`s and `io.Reader`s, respectively. (The form of `Compress` and `Decompress` were chosen to most closely match Go's standard package [implementations](https://golang.org/pkg/compress/) of compressors. Like `Codec`s, `Compressor`s are registered by name into a global registry maintained in the `encoding` package. ### Implementing a `Compressor` A typical `Compressor` will be implemented in its own package with an `init` function that registers itself, and is imported anonymously. For example: ```go package gzip import "google.golang.org/grpc/encoding" func init() { encoding.RegisterCompressor(compressor{}) } // ... implementation of compressor ... ``` An implementation of a `gzip` compressor can be found in [`encoding/gzip`](https://godoc.org/google.golang.org/grpc/encoding/gzip). ### Using a `Compressor` By default, gRPC does not register or use any compressors. To use a `Compressor` from a client or server: ```go package myclient import _ "google.golang.org/grpc/encoding/gzip" ``` `Compressor`s, by definition, must be symmetric, so the same desired `Compressor` should be registered in both client and server binaries. On the client-side, to specify a `Compressor` to use for message transmission, the `CallOption` `UseCompressor` should be used as follows: ```go response, err := myclient.MyCall(ctx, request, grpc.UseCompressor("gzip")) ``` As a reminder, all `CallOption`s may be converted into `DialOption`s that become the default for all RPCs sent through a client using `grpc.WithDefaultCallOptions`: ```go myclient := grpc.Dial(ctx, target, grpc.WithDefaultCallOptions(grpc.UseCompresor("gzip"))) ``` When specified in either of these ways, messages will be compressed using this compressor and sent along with headers indicating the compressor (`content-coding` set to ``). On the server-side, using a `Compressor` is as simple as registering it into the global registry (i.e. `import`ing it). If a message is compressed with the content coding supported by a registered `Compressor`, it will be used automatically for decompressing the request and compressing the response. Otherwise, the request will be rejected with status code `Unimplemented`. grpc-go-1.29.1/Documentation/gomock-example.md000066400000000000000000000151221365033716300212150ustar00rootroot00000000000000# Mocking Service for gRPC [Example code unary RPC](https://github.com/grpc/grpc-go/tree/master/examples/helloworld/mock_helloworld) [Example code streaming RPC](https://github.com/grpc/grpc-go/tree/master/examples/route_guide/mock_routeguide) ## Why? To test client-side logic without the overhead of connecting to a real server. Mocking enables users to write light-weight unit tests to check functionalities on client-side without invoking RPC calls to a server. ## Idea: Mock the client stub that connects to the server. We use Gomock to mock the client interface (in the generated code) and programmatically set its methods to expect and return pre-determined values. This enables users to write tests around the client logic and use this mocked stub while making RPC calls. ## How to use Gomock? Documentation on Gomock can be found [here](https://github.com/golang/mock). A quick reading of the documentation should enable users to follow the code below. Consider a gRPC service based on following proto file: ```proto //helloworld.proto package helloworld; message HelloRequest { string name = 1; } message HelloReply { string name = 1; } service Greeter { rpc SayHello (HelloRequest) returns (HelloReply) {} } ``` The generated file helloworld.pb.go will have a client interface for each service defined in the proto file. This interface will have methods corresponding to each rpc inside that service. ```Go type GreeterClient interface { SayHello(ctx context.Context, in *HelloRequest, opts ...grpc.CallOption) (*HelloReply, error) } ``` The generated code also contains a struct that implements this interface. ```Go type greeterClient struct { cc *grpc.ClientConn } func (c *greeterClient) SayHello(ctx context.Context, in *HelloRequest, opts ...grpc.CallOption) (*HelloReply, error){ // ... // gRPC specific code here // ... } ``` Along with this the generated code has a method to create an instance of this struct. ```Go func NewGreeterClient(cc *grpc.ClientConn) GreeterClient ``` The user code uses this function to create an instance of the struct greeterClient which then can be used to make rpc calls to the server. We will mock this interface GreeterClient and use an instance of that mock to make rpc calls. These calls instead of going to server will return pre-determined values. To create a mock we’ll use [mockgen](https://github.com/golang/mock#running-mockgen). From the directory ``` examples/helloworld/ ``` run ``` mockgen google.golang.org/grpc/examples/helloworld/helloworld GreeterClient > mock_helloworld/hw_mock.go ``` Notice that in the above command we specify GreeterClient as the interface to be mocked. The user test code can import the package generated by mockgen along with library package gomock to write unit tests around client-side logic. ```Go import "github.com/golang/mock/gomock" import hwmock "google.golang.org/grpc/examples/helloworld/mock_helloworld" ``` An instance of the mocked interface can be created as: ```Go mockGreeterClient := hwmock.NewMockGreeterClient(ctrl) ``` This mocked object can be programmed to expect calls to its methods and return pre-determined values. For instance, we can program mockGreeterClient to expect a call to its method SayHello and return a HelloReply with message “Mocked RPC”. ```Go mockGreeterClient.EXPECT().SayHello( gomock.Any(), // expect any value for first parameter gomock.Any(), // expect any value for second parameter ).Return(&helloworld.HelloReply{Message: “Mocked RPC”}, nil) ``` gomock.Any() indicates that the parameter can have any value or type. We can indicate specific values for built-in types with gomock.Eq(). However, if the test code needs to specify the parameter to have a proto message type, we can replace gomock.Any() with an instance of a struct that implements gomock.Matcher interface. ```Go type rpcMsg struct { msg proto.Message } func (r *rpcMsg) Matches(msg interface{}) bool { m, ok := msg.(proto.Message) if !ok { return false } return proto.Equal(m, r.msg) } func (r *rpcMsg) String() string { return fmt.Sprintf("is %s", r.msg) } ... req := &helloworld.HelloRequest{Name: "unit_test"} mockGreeterClient.EXPECT().SayHello( gomock.Any(), &rpcMsg{msg: req}, ).Return(&helloworld.HelloReply{Message: "Mocked Interface"}, nil) ``` ## Mock streaming RPCs: For our example we consider the case of bi-directional streaming RPCs. Concretely, we'll write a test for RouteChat function from the route guide example to demonstrate how to write mocks for streams. RouteChat is a bi-directional streaming RPC, which means calling RouteChat returns a stream that can __Send__ and __Recv__ messages to and from the server, respectively. We'll start by creating a mock of this stream interface returned by RouteChat and then we'll mock the client interface and set expectation on the method RouteChat to return our mocked stream. ### Generating mocking code: Like before we'll use [mockgen](https://github.com/golang/mock#running-mockgen). From the `examples/route_guide` directory run: `mockgen google.golang.org/grpc/examples/route_guide/routeguide RouteGuideClient,RouteGuide_RouteChatClient > mock_route_guide/rg_mock.go` Notice that we are mocking both client(`RouteGuideClient`) and stream(`RouteGuide_RouteChatClient`) interfaces here. This will create a file `rg_mock.go` under directory `mock_route_guide`. This file contains all the mocking code we need to write our test. In our test code, like before, we import the this mocking code along with the generated code ```go import ( rgmock "google.golang.org/grpc/examples/route_guide/mock_routeguide" rgpb "google.golang.org/grpc/examples/route_guide/routeguide" ) ``` Now considering a test that takes the RouteGuide client object as a parameter, makes a RouteChat rpc call and sends a message on the resulting stream. Furthermore, this test expects to see the same message to be received on the stream. ```go var msg = ... // Creates a RouteChat call and sends msg on it. // Checks if the received message was equal to msg. func testRouteChat(client rgb.RouteChatClient) error{ ... } ``` We can inject our mock in here by simply passing it as an argument to the method. Creating mock for stream interface: ```go stream := rgmock.NewMockRouteGuide_RouteChatClient(ctrl) } ``` Setting Expectations: ```go stream.EXPECT().Send(gomock.Any()).Return(nil) stream.EXPECT().Recv().Return(msg, nil) ``` Creating mock for client interface: ```go rgclient := rgmock.NewMockRouteGuideClient(ctrl) ``` Setting Expectations: ```go rgclient.EXPECT().RouteChat(gomock.Any()).Return(stream, nil) ``` grpc-go-1.29.1/Documentation/grpc-auth-support.md000066400000000000000000000062031365033716300217110ustar00rootroot00000000000000# Authentication As outlined in the [gRPC authentication guide](https://grpc.io/docs/guides/auth.html) there are a number of different mechanisms for asserting identity between an client and server. We'll present some code-samples here demonstrating how to provide TLS support encryption and identity assertions as well as passing OAuth2 tokens to services that support it. # Enabling TLS on a gRPC client ```Go conn, err := grpc.Dial(serverAddr, grpc.WithTransportCredentials(credentials.NewClientTLSFromCert(nil, ""))) ``` # Enabling TLS on a gRPC server ```Go creds, err := credentials.NewServerTLSFromFile(certFile, keyFile) if err != nil { log.Fatalf("Failed to generate credentials %v", err) } lis, err := net.Listen("tcp", ":0") server := grpc.NewServer(grpc.Creds(creds)) ... server.Serve(lis) ``` # OAuth2 For an example of how to configure client and server to use OAuth2 tokens, see [here](https://github.com/grpc/grpc-go/tree/master/examples/features/authentication). ## Validating a token on the server Clients may use [metadata.MD](https://godoc.org/google.golang.org/grpc/metadata#MD) to store tokens and other authentication-related data. To gain access to the `metadata.MD` object, a server may use [metadata.FromIncomingContext](https://godoc.org/google.golang.org/grpc/metadata#FromIncomingContext). With a reference to `metadata.MD` on the server, one needs to simply lookup the `authorization` key. Note, all keys stored within `metadata.MD` are normalized to lowercase. See [here](https://godoc.org/google.golang.org/grpc/metadata#New). It is possible to configure token validation for all RPCs using an interceptor. A server may configure either a [grpc.UnaryInterceptor](https://godoc.org/google.golang.org/grpc#UnaryInterceptor) or a [grpc.StreamInterceptor](https://godoc.org/google.golang.org/grpc#StreamInterceptor). ## Adding a token to all outgoing client RPCs To send an OAuth2 token with each RPC, a client may configure the `grpc.DialOption` [grpc.WithPerRPCCredentials](https://godoc.org/google.golang.org/grpc#WithPerRPCCredentials). Alternatively, a client may also use the `grpc.CallOption` [grpc.PerRPCCredentials](https://godoc.org/google.golang.org/grpc#PerRPCCredentials) on each invocation of an RPC. To create a `credentials.PerRPCCredentials`, use [oauth.NewOauthAccess](https://godoc.org/google.golang.org/grpc/credentials/oauth#NewOauthAccess). Note, the OAuth2 implementation of `grpc.PerRPCCredentials` requires a client to use [grpc.WithTransportCredentials](https://godoc.org/google.golang.org/grpc#WithTransportCredentials) to prevent any insecure transmission of tokens. # Authenticating with Google ## Google Compute Engine (GCE) ```Go conn, err := grpc.Dial(serverAddr, grpc.WithTransportCredentials(credentials.NewClientTLSFromCert(nil, "")), grpc.WithPerRPCCredentials(oauth.NewComputeEngine())) ``` ## JWT ```Go jwtCreds, err := oauth.NewServiceAccountFromFile(*serviceAccountKeyFile, *oauthScope) if err != nil { log.Fatalf("Failed to create JWT credentials: %v", err) } conn, err := grpc.Dial(serverAddr, grpc.WithTransportCredentials(credentials.NewClientTLSFromCert(nil, "")), grpc.WithPerRPCCredentials(jwtCreds)) ``` grpc-go-1.29.1/Documentation/grpc-metadata.md000066400000000000000000000165051365033716300210240ustar00rootroot00000000000000# Metadata gRPC supports sending metadata between client and server. This doc shows how to send and receive metadata in gRPC-go. ## Background Four kinds of service method: - [Unary RPC](https://grpc.io/docs/guides/concepts.html#unary-rpc) - [Server streaming RPC](https://grpc.io/docs/guides/concepts.html#server-streaming-rpc) - [Client streaming RPC](https://grpc.io/docs/guides/concepts.html#client-streaming-rpc) - [Bidirectional streaming RPC](https://grpc.io/docs/guides/concepts.html#bidirectional-streaming-rpc) And concept of [metadata](https://grpc.io/docs/guides/concepts.html#metadata). ## Constructing metadata A metadata can be created using package [metadata](https://godoc.org/google.golang.org/grpc/metadata). The type MD is actually a map from string to a list of strings: ```go type MD map[string][]string ``` Metadata can be read like a normal map. Note that the value type of this map is `[]string`, so that users can attach multiple values using a single key. ### Creating a new metadata A metadata can be created from a `map[string]string` using function `New`: ```go md := metadata.New(map[string]string{"key1": "val1", "key2": "val2"}) ``` Another way is to use `Pairs`. Values with the same key will be merged into a list: ```go md := metadata.Pairs( "key1", "val1", "key1", "val1-2", // "key1" will have map value []string{"val1", "val1-2"} "key2", "val2", ) ``` __Note:__ all the keys will be automatically converted to lowercase, so "key1" and "kEy1" will be the same key and their values will be merged into the same list. This happens for both `New` and `Pairs`. ### Storing binary data in metadata In metadata, keys are always strings. But values can be strings or binary data. To store binary data value in metadata, simply add "-bin" suffix to the key. The values with "-bin" suffixed keys will be encoded when creating the metadata: ```go md := metadata.Pairs( "key", "string value", "key-bin", string([]byte{96, 102}), // this binary data will be encoded (base64) before sending // and will be decoded after being transferred. ) ``` ## Retrieving metadata from context Metadata can be retrieved from context using `FromIncomingContext`: ```go func (s *server) SomeRPC(ctx context.Context, in *pb.SomeRequest) (*pb.SomeResponse, err) { md, ok := metadata.FromIncomingContext(ctx) // do something with metadata } ``` ## Sending and receiving metadata - client side Client side metadata sending and receiving examples are available [here](../examples/features/metadata/client/main.go). ### Sending metadata There are two ways to send metadata to the server. The recommended way is to append kv pairs to the context using `AppendToOutgoingContext`. This can be used with or without existing metadata on the context. When there is no prior metadata, metadata is added; when metadata already exists on the context, kv pairs are merged in. ```go // create a new context with some metadata ctx := metadata.AppendToOutgoingContext(ctx, "k1", "v1", "k1", "v2", "k2", "v3") // later, add some more metadata to the context (e.g. in an interceptor) ctx := metadata.AppendToOutgoingContext(ctx, "k3", "v4") // make unary RPC response, err := client.SomeRPC(ctx, someRequest) // or make streaming RPC stream, err := client.SomeStreamingRPC(ctx) ``` Alternatively, metadata may be attached to the context using `NewOutgoingContext`. However, this replaces any existing metadata in the context, so care must be taken to preserve the existing metadata if desired. This is slower than using `AppendToOutgoingContext`. An example of this is below: ```go // create a new context with some metadata md := metadata.Pairs("k1", "v1", "k1", "v2", "k2", "v3") ctx := metadata.NewOutgoingContext(context.Background(), md) // later, add some more metadata to the context (e.g. in an interceptor) md, _ := metadata.FromOutgoingContext(ctx) newMD := metadata.Pairs("k3", "v3") ctx = metadata.NewContext(ctx, metadata.Join(metadata.New(send), newMD)) // make unary RPC response, err := client.SomeRPC(ctx, someRequest) // or make streaming RPC stream, err := client.SomeStreamingRPC(ctx) ``` ### Receiving metadata Metadata that a client can receive includes header and trailer. #### Unary call Header and trailer sent along with a unary call can be retrieved using function [Header](https://godoc.org/google.golang.org/grpc#Header) and [Trailer](https://godoc.org/google.golang.org/grpc#Trailer) in [CallOption](https://godoc.org/google.golang.org/grpc#CallOption): ```go var header, trailer metadata.MD // variable to store header and trailer r, err := client.SomeRPC( ctx, someRequest, grpc.Header(&header), // will retrieve header grpc.Trailer(&trailer), // will retrieve trailer ) // do something with header and trailer ``` #### Streaming call For streaming calls including: - Server streaming RPC - Client streaming RPC - Bidirectional streaming RPC Header and trailer can be retrieved from the returned stream using function `Header` and `Trailer` in interface [ClientStream](https://godoc.org/google.golang.org/grpc#ClientStream): ```go stream, err := client.SomeStreamingRPC(ctx) // retrieve header header, err := stream.Header() // retrieve trailer trailer := stream.Trailer() ``` ## Sending and receiving metadata - server side Server side metadata sending and receiving examples are available [here](../examples/features/metadata/server/main.go). ### Receiving metadata To read metadata sent by the client, the server needs to retrieve it from RPC context. If it is a unary call, the RPC handler's context can be used. For streaming calls, the server needs to get context from the stream. #### Unary call ```go func (s *server) SomeRPC(ctx context.Context, in *pb.someRequest) (*pb.someResponse, error) { md, ok := metadata.FromIncomingContext(ctx) // do something with metadata } ``` #### Streaming call ```go func (s *server) SomeStreamingRPC(stream pb.Service_SomeStreamingRPCServer) error { md, ok := metadata.FromIncomingContext(stream.Context()) // get context from stream // do something with metadata } ``` ### Sending metadata #### Unary call To send header and trailer to client in unary call, the server can call [SendHeader](https://godoc.org/google.golang.org/grpc#SendHeader) and [SetTrailer](https://godoc.org/google.golang.org/grpc#SetTrailer) functions in module [grpc](https://godoc.org/google.golang.org/grpc). These two functions take a context as the first parameter. It should be the RPC handler's context or one derived from it: ```go func (s *server) SomeRPC(ctx context.Context, in *pb.someRequest) (*pb.someResponse, error) { // create and send header header := metadata.Pairs("header-key", "val") grpc.SendHeader(ctx, header) // create and set trailer trailer := metadata.Pairs("trailer-key", "val") grpc.SetTrailer(ctx, trailer) } ``` #### Streaming call For streaming calls, header and trailer can be sent using function `SendHeader` and `SetTrailer` in interface [ServerStream](https://godoc.org/google.golang.org/grpc#ServerStream): ```go func (s *server) SomeStreamingRPC(stream pb.Service_SomeStreamingRPCServer) error { // create and send header header := metadata.Pairs("header-key", "val") stream.SendHeader(header) // create and set trailer trailer := metadata.Pairs("trailer-key", "val") stream.SetTrailer(trailer) } ``` grpc-go-1.29.1/Documentation/keepalive.md000066400000000000000000000033651365033716300202600ustar00rootroot00000000000000# Keepalive gRPC sends http2 pings on the transport to detect if the connection is down. If the ping is not acknowledged by the other side within a certain period, the connection will be close. Note that pings are only necessary when there's no activity on the connection. For how to configure keepalive, see https://godoc.org/google.golang.org/grpc/keepalive for the options. ## What should I set? It should be sufficient for most users to set [client parameters](https://godoc.org/google.golang.org/grpc/keepalive) as a [dial option](https://godoc.org/google.golang.org/grpc#WithKeepaliveParams). ## What will happen? (The behavior described here is specific for gRPC-go, it might be slightly different in other languages.) When there's no activity on a connection (note that an ongoing stream results in __no activity__ when there's no message being sent), after `Time`, a ping will be sent by the client and the server will send a ping ack when it gets the ping. Client will wait for `Timeout`, and check if there's any activity on the connection during this period (a ping ack is an activity). ## What about server side? Server has similar `Time` and `Timeout` settings as client. Server can also configure connection max-age. See [server parameters](https://godoc.org/google.golang.org/grpc/keepalive#ServerParameters) for details. ### Enforcement policy [Enforcement policy](https://godoc.org/google.golang.org/grpc/keepalive#EnforcementPolicy) is a special setting on server side to protect server from malicious or misbehaving clients. Server sends GOAWAY with ENHANCE_YOUR_CALM and close the connection when bad behaviors are detected: - Client sends too frequent pings - Client sends pings when there's no stream and this is disallowed by server config grpc-go-1.29.1/Documentation/log_levels.md000066400000000000000000000030261365033716300204400ustar00rootroot00000000000000# Log Levels This document describes the different log levels supported by the grpc-go library, and under what conditions they should be used. ### Info Info messages are for informational purposes and may aid in the debugging of applications or the gRPC library. Examples: - The name resolver received an update. - The balancer updated its picker. - Significant gRPC state is changing. At verbosity of 0 (the default), any single info message should not be output more than once every 5 minutes under normal operation. ### Warning Warning messages indicate problems that are non-fatal for the application, but could lead to unexpected behavior or subsequent errors. Examples: - Resolver could not resolve target name. - Error received while connecting to a server. - Lost or corrupt connection with remote endpoint. ### Error Error messages represent errors in the usage of gRPC that cannot be returned to the application as errors, or internal gRPC-Go errors that are recoverable. Internal errors are detected during gRPC tests and will result in test failures. Examples: - Invalid arguments passed to a function that cannot return an error. - An internal error that cannot be returned or would be inappropriate to return to the user. ### Fatal Fatal errors are severe internal errors that are unrecoverable. These lead directly to panics, and are avoided as much as possible. Example: - Internal invariant was violated. - User attempted an action that cannot return an error gracefully, but would lead to an invalid state if performed. grpc-go-1.29.1/Documentation/proxy.md000066400000000000000000000011331365033716300174630ustar00rootroot00000000000000# Proxy HTTP CONNECT proxies are supported by default in gRPC. The proxy address can be specified by the environment variables HTTP_PROXY, HTTPS_PROXY and NO_PROXY (or the lowercase versions thereof). ## Custom proxy Currently, proxy support is implemented in the default dialer. It does one more handshake (a CONNECT handshake in the case of HTTP CONNECT proxy) on the connection before giving it to gRPC. If the default proxy doesn't work for you, replace the default dialer with your custom proxy dialer. This can be done using [`WithDialer`](https://godoc.org/google.golang.org/grpc#WithDialer).grpc-go-1.29.1/Documentation/rpc-errors.md000066400000000000000000000045511365033716300204070ustar00rootroot00000000000000# RPC Errors All service method handlers should return `nil` or errors from the `status.Status` type. Clients have direct access to the errors. Upon encountering an error, a gRPC server method handler should create a `status.Status`. In typical usage, one would use [status.New][new-status] passing in an appropriate [codes.Code][code] as well as a description of the error to produce a `status.Status`. Calling [status.Err][status-err] converts the `status.Status` type into an `error`. As a convenience method, there is also [status.Error][status-error] which obviates the conversion step. Compare: ``` st := status.New(codes.NotFound, "some description") err := st.Err() // vs. err := status.Error(codes.NotFound, "some description") ``` ## Adding additional details to errors In some cases, it may be necessary to add details for a particular error on the server side. The [status.WithDetails][with-details] method exists for this purpose. Clients may then read those details by first converting the plain `error` type back to a [status.Status][status] and then using [status.Details][details]. ## Example The [example][example] demonstrates the API discussed above and shows how to add information about rate limits to the error message using `status.Status`. To run the example, first start the server: ``` $ go run examples/rpc_errors/server/main.go ``` In a separate session, run the client: ``` $ go run examples/rpc_errors/client/main.go ``` On the first run of the client, all is well: ``` 2018/03/12 19:39:33 Greeting: Hello world ``` Upon running the client a second time, the client exceeds the rate limit and receives an error with details: ``` 2018/03/19 16:42:01 Quota failure: violations: exit status 1 ``` [status]: https://godoc.org/google.golang.org/grpc/status#Status [new-status]: https://godoc.org/google.golang.org/grpc/status#New [code]: https://godoc.org/google.golang.org/grpc/codes#Code [with-details]: https://godoc.org/google.golang.org/grpc/status#Status.WithDetails [details]: https://godoc.org/google.golang.org/grpc/status#Status.Details [status-err]: https://godoc.org/google.golang.org/grpc/status#Status.Err [status-error]: https://godoc.org/google.golang.org/grpc/status#Error [example]: https://github.com/grpc/grpc-go/tree/master/examples/features/errors grpc-go-1.29.1/Documentation/server-reflection-tutorial.md000066400000000000000000000077121365033716300236120ustar00rootroot00000000000000# gRPC Server Reflection Tutorial gRPC Server Reflection provides information about publicly-accessible gRPC services on a server, and assists clients at runtime to construct RPC requests and responses without precompiled service information. It is used by gRPC CLI, which can be used to introspect server protos and send/receive test RPCs. ## Enable Server Reflection gRPC-go Server Reflection is implemented in package [reflection](https://github.com/grpc/grpc-go/tree/master/reflection). To enable server reflection, you need to import this package and register reflection service on your gRPC server. For example, to enable server reflection in `example/helloworld`, we need to make the following changes: ```diff --- a/examples/helloworld/greeter_server/main.go +++ b/examples/helloworld/greeter_server/main.go @@ -40,6 +40,7 @@ import ( "google.golang.org/grpc" pb "google.golang.org/grpc/examples/helloworld/helloworld" + "google.golang.org/grpc/reflection" ) const ( @@ -61,6 +62,8 @@ func main() { } s := grpc.NewServer() pb.RegisterGreeterServer(s, &server{}) + // Register reflection service on gRPC server. + reflection.Register(s) if err := s.Serve(lis); err != nil { log.Fatalf("failed to serve: %v", err) } ``` An example server with reflection registered can be found at `examples/features/reflection/server`. ## gRPC CLI After enabling Server Reflection in a server application, you can use gRPC CLI to check its services. gRPC CLI is only available in c++. Instructions on how to use gRPC CLI can be found at [command_line_tool.md](https://github.com/grpc/grpc/blob/master/doc/command_line_tool.md). To build gRPC CLI: ```sh git clone https://github.com/grpc/grpc cd grpc git submodule update --init make grpc_cli cd bins/opt # grpc_cli is in directory bins/opt/ ``` ## Use gRPC CLI to check services First, start the helloworld server in grpc-go directory: ```sh $ cd $ go run examples/features/reflection/server/main.go ``` Open a new terminal and make sure you are in the directory where grpc_cli lives: ```sh $ cd /bins/opt ``` ### List services `grpc_cli ls` command lists services and methods exposed at a given port: - List all the services exposed at a given port ```sh $ ./grpc_cli ls localhost:50051 ``` output: ```sh grpc.examples.echo.Echo grpc.reflection.v1alpha.ServerReflection helloworld.Greeter ``` - List one service with details `grpc_cli ls` command inspects a service given its full name (in the format of \.\). It can print information with a long listing format when `-l` flag is set. This flag can be used to get more details about a service. ```sh $ ./grpc_cli ls localhost:50051 helloworld.Greeter -l ``` output: ```sh filename: helloworld.proto package: helloworld; service Greeter { rpc SayHello(helloworld.HelloRequest) returns (helloworld.HelloReply) {} } ``` ### List methods - List one method with details `grpc_cli ls` command also inspects a method given its full name (in the format of \.\.\). ```sh $ ./grpc_cli ls localhost:50051 helloworld.Greeter.SayHello -l ``` output: ```sh rpc SayHello(helloworld.HelloRequest) returns (helloworld.HelloReply) {} ``` ### Inspect message types We can use`grpc_cli type` command to inspect request/response types given the full name of the type (in the format of \.\). - Get information about the request type ```sh $ ./grpc_cli type localhost:50051 helloworld.HelloRequest ``` output: ```sh message HelloRequest { optional string name = 1[json_name = "name"]; } ``` ### Call a remote method We can send RPCs to a server and get responses using `grpc_cli call` command. - Call a unary method ```sh $ ./grpc_cli call localhost:50051 SayHello "name: 'gRPC CLI'" ``` output: ```sh message: "Hello gRPC CLI" ``` grpc-go-1.29.1/Documentation/versioning.md000066400000000000000000000022631365033716300204720ustar00rootroot00000000000000# Versioning and Releases Note: This document references terminology defined at http://semver.org. ## Release Frequency Regular MINOR releases of gRPC-Go are performed every six weeks. Patch releases to the previous two MINOR releases may be performed on demand or if serious security problems are discovered. ## Versioning Policy The gRPC-Go versioning policy follows the Semantic Versioning 2.0.0 specification, with the following exceptions: - A MINOR version will not _necessarily_ add new functionality. - MINOR releases will not break backward compatibility, except in the following circumstances: - An API was marked as EXPERIMENTAL upon its introduction. - An API was marked as DEPRECATED in the initial MAJOR release. - An API is inherently flawed and cannot provide correct or secure behavior. In these cases, APIs MAY be changed or removed without a MAJOR release. Otherwise, backward compatibility will be preserved by MINOR releases. For an API marked as DEPRECATED, an alternative will be available (if appropriate) for at least three months prior to its removal. ## Release History Please see our release history on GitHub: https://github.com/grpc/grpc-go/releases grpc-go-1.29.1/GOVERNANCE.md000066400000000000000000000002151365033716300151200ustar00rootroot00000000000000This repository is governed by the gRPC organization's [governance rules](https://github.com/grpc/grpc-community/blob/master/governance.md). grpc-go-1.29.1/LICENSE000066400000000000000000000261361365033716300141660ustar00rootroot00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. grpc-go-1.29.1/MAINTAINERS.md000066400000000000000000000024761365033716300152560ustar00rootroot00000000000000This page lists all active maintainers of this repository. If you were a maintainer and would like to add your name to the Emeritus list, please send us a PR. See [GOVERNANCE.md](https://github.com/grpc/grpc-community/blob/master/governance.md) for governance guidelines and how to become a maintainer. See [CONTRIBUTING.md](https://github.com/grpc/grpc-community/blob/master/CONTRIBUTING.md) for general contribution guidelines. ## Maintainers (in alphabetical order) - [canguler](https://github.com/canguler), Google LLC - [cesarghali](https://github.com/cesarghali), Google LLC - [dfawley](https://github.com/dfawley), Google LLC - [easwars](https://github.com/easwars), Google LLC - [jadekler](https://github.com/jadekler), Google LLC - [menghanl](https://github.com/menghanl), Google LLC - [srini100](https://github.com/srini100), Google LLC ## Emeritus Maintainers (in alphabetical order) - [adelez](https://github.com/adelez), Google LLC - [iamqizhao](https://github.com/iamqizhao), Google LLC - [jtattermusch](https://github.com/jtattermusch), Google LLC - [lyuxuan](https://github.com/lyuxuan), Google LLC - [makmukhi](https://github.com/makmukhi), Google LLC - [matt-kwong](https://github.com/matt-kwong), Google LLC - [nicolasnoble](https://github.com/nicolasnoble), Google LLC - [yongni](https://github.com/yongni), Google LLC grpc-go-1.29.1/Makefile000066400000000000000000000022611365033716300146120ustar00rootroot00000000000000all: vet test testrace build: deps go build google.golang.org/grpc/... clean: go clean -i google.golang.org/grpc/... deps: go get -d -v google.golang.org/grpc/... proto: @ if ! which protoc > /dev/null; then \ echo "error: protoc not installed" >&2; \ exit 1; \ fi go generate google.golang.org/grpc/... test: testdeps go test -cpu 1,4 -timeout 7m google.golang.org/grpc/... testsubmodule: testdeps cd security/advancedtls && go test -cpu 1,4 -timeout 7m google.golang.org/grpc/security/advancedtls/... testappengine: testappenginedeps goapp test -cpu 1,4 -timeout 7m google.golang.org/grpc/... testappenginedeps: goapp get -d -v -t -tags 'appengine appenginevm' google.golang.org/grpc/... testdeps: go get -d -v -t google.golang.org/grpc/... testrace: testdeps go test -race -cpu 1,4 -timeout 7m google.golang.org/grpc/... updatedeps: go get -d -v -u -f google.golang.org/grpc/... updatetestdeps: go get -d -v -t -u -f google.golang.org/grpc/... vet: vetdeps ./vet.sh vetdeps: ./vet.sh -install .PHONY: \ all \ build \ clean \ deps \ proto \ test \ testappengine \ testappenginedeps \ testdeps \ testrace \ updatedeps \ updatetestdeps \ vet \ vetdeps grpc-go-1.29.1/README.md000066400000000000000000000115731365033716300144370ustar00rootroot00000000000000# gRPC-Go [![Build Status](https://travis-ci.org/grpc/grpc-go.svg)](https://travis-ci.org/grpc/grpc-go) [![GoDoc](https://godoc.org/google.golang.org/grpc?status.svg)](https://godoc.org/google.golang.org/grpc) [![GoReportCard](https://goreportcard.com/badge/grpc/grpc-go)](https://goreportcard.com/report/github.com/grpc/grpc-go) The Go implementation of [gRPC](https://grpc.io/): A high performance, open source, general RPC framework that puts mobile and HTTP/2 first. For more information see the [gRPC Quick Start: Go](https://grpc.io/docs/quickstart/go.html) guide. Installation ------------ To install this package, you need to install Go and setup your Go workspace on your computer. The simplest way to install the library is to run: ``` $ go get -u google.golang.org/grpc ``` With Go module support (Go 1.11+), simply `import "google.golang.org/grpc"` in your source code and `go [build|run|test]` will automatically download the necessary dependencies ([Go modules ref](https://github.com/golang/go/wiki/Modules)). If you are trying to access grpc-go from within China, please see the [FAQ](#FAQ) below. Prerequisites ------------- gRPC-Go requires Go 1.9 or later. Documentation ------------- - See [godoc](https://godoc.org/google.golang.org/grpc) for package and API descriptions. - Documentation on specific topics can be found in the [Documentation directory](Documentation/). - Examples can be found in the [examples directory](examples/). Performance ----------- Performance benchmark data for grpc-go and other languages is maintained in [this dashboard](https://performance-dot-grpc-testing.appspot.com/explore?dashboard=5652536396611584&widget=490377658&container=1286539696). Status ------ General Availability [Google Cloud Platform Launch Stages](https://cloud.google.com/terms/launch-stages). FAQ --- #### I/O Timeout Errors The `golang.org` domain may be blocked from some countries. `go get` usually produces an error like the following when this happens: ``` $ go get -u google.golang.org/grpc package google.golang.org/grpc: unrecognized import path "google.golang.org/grpc" (https fetch: Get https://google.golang.org/grpc?go-get=1: dial tcp 216.239.37.1:443: i/o timeout) ``` To build Go code, there are several options: - Set up a VPN and access google.golang.org through that. - Without Go module support: `git clone` the repo manually: ``` git clone https://github.com/grpc/grpc-go.git $GOPATH/src/google.golang.org/grpc ``` You will need to do the same for all of grpc's dependencies in `golang.org`, e.g. `golang.org/x/net`. - With Go module support: it is possible to use the `replace` feature of `go mod` to create aliases for golang.org packages. In your project's directory: ``` go mod edit -replace=google.golang.org/grpc=github.com/grpc/grpc-go@latest go mod tidy go mod vendor go build -mod=vendor ``` Again, this will need to be done for all transitive dependencies hosted on golang.org as well. Please refer to [this issue](https://github.com/golang/go/issues/28652) in the golang repo regarding this concern. #### Compiling error, undefined: grpc.SupportPackageIsVersion ##### If you are using Go modules: Please ensure your gRPC-Go version is `require`d at the appropriate version in the same module containing the generated `.pb.go` files. For example, `SupportPackageIsVersion6` needs `v1.27.0`, so in your `go.mod` file: ``` module require ( google.golang.org/grpc v1.27.0 ) ``` ##### If you are *not* using Go modules: Please update proto package, gRPC package and rebuild the proto files: - `go get -u github.com/golang/protobuf/{proto,protoc-gen-go}` - `go get -u google.golang.org/grpc` - `protoc --go_out=plugins=grpc:. *.proto` #### How to turn on logging The default logger is controlled by the environment variables. Turn everything on by setting: ``` GRPC_GO_LOG_VERBOSITY_LEVEL=99 GRPC_GO_LOG_SEVERITY_LEVEL=info ``` #### The RPC failed with error `"code = Unavailable desc = transport is closing"` This error means the connection the RPC is using was closed, and there are many possible reasons, including: 1. mis-configured transport credentials, connection failed on handshaking 1. bytes disrupted, possibly by a proxy in between 1. server shutdown 1. Keepalive parameters caused connection shutdown, for example if you have configured your server to terminate connections regularly to [trigger DNS lookups](https://github.com/grpc/grpc-go/issues/3170#issuecomment-552517779). If this is the case, you may want to increase your [MaxConnectionAgeGrace](https://pkg.go.dev/google.golang.org/grpc/keepalive?tab=doc#ServerParameters), to allow longer RPC calls to finish. It can be tricky to debug this because the error happens on the client side but the root cause of the connection being closed is on the server side. Turn on logging on __both client and server__, and see if there are any transport errors. grpc-go-1.29.1/attributes/000077500000000000000000000000001365033716300153375ustar00rootroot00000000000000grpc-go-1.29.1/attributes/attributes.go000066400000000000000000000045051365033716300200600ustar00rootroot00000000000000/* * * Copyright 2019 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Package attributes defines a generic key/value store used in various gRPC // components. // // All APIs in this package are EXPERIMENTAL. package attributes import "fmt" // Attributes is an immutable struct for storing and retrieving generic // key/value pairs. Keys must be hashable, and users should define their own // types for keys. type Attributes struct { m map[interface{}]interface{} } // New returns a new Attributes containing all key/value pairs in kvs. If the // same key appears multiple times, the last value overwrites all previous // values for that key. Panics if len(kvs) is not even. func New(kvs ...interface{}) *Attributes { if len(kvs)%2 != 0 { panic(fmt.Sprintf("attributes.New called with unexpected input: len(kvs) = %v", len(kvs))) } a := &Attributes{m: make(map[interface{}]interface{}, len(kvs)/2)} for i := 0; i < len(kvs)/2; i++ { a.m[kvs[i*2]] = kvs[i*2+1] } return a } // WithValues returns a new Attributes containing all key/value pairs in a and // kvs. Panics if len(kvs) is not even. If the same key appears multiple // times, the last value overwrites all previous values for that key. To // remove an existing key, use a nil value. func (a *Attributes) WithValues(kvs ...interface{}) *Attributes { if len(kvs)%2 != 0 { panic(fmt.Sprintf("attributes.New called with unexpected input: len(kvs) = %v", len(kvs))) } n := &Attributes{m: make(map[interface{}]interface{}, len(a.m)+len(kvs)/2)} for k, v := range a.m { n.m[k] = v } for i := 0; i < len(kvs)/2; i++ { n.m[kvs[i*2]] = kvs[i*2+1] } return n } // Value returns the value associated with these attributes for key, or nil if // no value is associated with key. func (a *Attributes) Value(key interface{}) interface{} { return a.m[key] } grpc-go-1.29.1/attributes/attributes_test.go000066400000000000000000000023141365033716300211130ustar00rootroot00000000000000/* * * Copyright 2019 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package attributes_test import ( "fmt" "google.golang.org/grpc/attributes" ) func ExampleAttributes() { type keyOne struct{} type keyTwo struct{} a := attributes.New(keyOne{}, 1, keyTwo{}, "two") fmt.Println("Key one:", a.Value(keyOne{})) fmt.Println("Key two:", a.Value(keyTwo{})) // Output: // Key one: 1 // Key two: two } func ExampleAttributes_WithValues() { type keyOne struct{} type keyTwo struct{} a := attributes.New(keyOne{}, 1) a = a.WithValues(keyTwo{}, "two") fmt.Println("Key one:", a.Value(keyOne{})) fmt.Println("Key two:", a.Value(keyTwo{})) // Output: // Key one: 1 // Key two: two } grpc-go-1.29.1/backoff.go000066400000000000000000000035721365033716300151020ustar00rootroot00000000000000/* * * Copyright 2017 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // See internal/backoff package for the backoff implementation. This file is // kept for the exported types and API backward compatibility. package grpc import ( "time" "google.golang.org/grpc/backoff" ) // DefaultBackoffConfig uses values specified for backoff in // https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. // // Deprecated: use ConnectParams instead. Will be supported throughout 1.x. var DefaultBackoffConfig = BackoffConfig{ MaxDelay: 120 * time.Second, } // BackoffConfig defines the parameters for the default gRPC backoff strategy. // // Deprecated: use ConnectParams instead. Will be supported throughout 1.x. type BackoffConfig struct { // MaxDelay is the upper bound of backoff delay. MaxDelay time.Duration } // ConnectParams defines the parameters for connecting and retrying. Users are // encouraged to use this instead of the BackoffConfig type defined above. See // here for more details: // https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. // // This API is EXPERIMENTAL. type ConnectParams struct { // Backoff specifies the configuration options for connection backoff. Backoff backoff.Config // MinConnectTimeout is the minimum amount of time we are willing to give a // connection to complete. MinConnectTimeout time.Duration } grpc-go-1.29.1/backoff/000077500000000000000000000000001365033716300145445ustar00rootroot00000000000000grpc-go-1.29.1/backoff/backoff.go000066400000000000000000000033111365033716300164640ustar00rootroot00000000000000/* * * Copyright 2019 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Package backoff provides configuration options for backoff. // // More details can be found at: // https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. // // All APIs in this package are experimental. package backoff import "time" // Config defines the configuration options for backoff. type Config struct { // BaseDelay is the amount of time to backoff after the first failure. BaseDelay time.Duration // Multiplier is the factor with which to multiply backoffs after a // failed retry. Should ideally be greater than 1. Multiplier float64 // Jitter is the factor with which backoffs are randomized. Jitter float64 // MaxDelay is the upper bound of backoff delay. MaxDelay time.Duration } // DefaultConfig is a backoff configuration with the default values specfied // at https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. // // This should be useful for callers who want to configure backoff with // non-default values only for a subset of the options. var DefaultConfig = Config{ BaseDelay: 1.0 * time.Second, Multiplier: 1.6, Jitter: 0.2, MaxDelay: 120 * time.Second, } grpc-go-1.29.1/balancer.go000066400000000000000000000260171365033716300152550ustar00rootroot00000000000000/* * * Copyright 2016 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package grpc import ( "context" "net" "sync" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/naming" "google.golang.org/grpc/status" ) // Address represents a server the client connects to. // // Deprecated: please use package balancer. type Address struct { // Addr is the server address on which a connection will be established. Addr string // Metadata is the information associated with Addr, which may be used // to make load balancing decision. Metadata interface{} } // BalancerConfig specifies the configurations for Balancer. // // Deprecated: please use package balancer. May be removed in a future 1.x release. type BalancerConfig struct { // DialCreds is the transport credential the Balancer implementation can // use to dial to a remote load balancer server. The Balancer implementations // can ignore this if it does not need to talk to another party securely. DialCreds credentials.TransportCredentials // Dialer is the custom dialer the Balancer implementation can use to dial // to a remote load balancer server. The Balancer implementations // can ignore this if it doesn't need to talk to remote balancer. Dialer func(context.Context, string) (net.Conn, error) } // BalancerGetOptions configures a Get call. // // Deprecated: please use package balancer. May be removed in a future 1.x release. type BalancerGetOptions struct { // BlockingWait specifies whether Get should block when there is no // connected address. BlockingWait bool } // Balancer chooses network addresses for RPCs. // // Deprecated: please use package balancer. May be removed in a future 1.x release. type Balancer interface { // Start does the initialization work to bootstrap a Balancer. For example, // this function may start the name resolution and watch the updates. It will // be called when dialing. Start(target string, config BalancerConfig) error // Up informs the Balancer that gRPC has a connection to the server at // addr. It returns down which is called once the connection to addr gets // lost or closed. // TODO: It is not clear how to construct and take advantage of the meaningful error // parameter for down. Need realistic demands to guide. Up(addr Address) (down func(error)) // Get gets the address of a server for the RPC corresponding to ctx. // i) If it returns a connected address, gRPC internals issues the RPC on the // connection to this address; // ii) If it returns an address on which the connection is under construction // (initiated by Notify(...)) but not connected, gRPC internals // * fails RPC if the RPC is fail-fast and connection is in the TransientFailure or // Shutdown state; // or // * issues RPC on the connection otherwise. // iii) If it returns an address on which the connection does not exist, gRPC // internals treats it as an error and will fail the corresponding RPC. // // Therefore, the following is the recommended rule when writing a custom Balancer. // If opts.BlockingWait is true, it should return a connected address or // block if there is no connected address. It should respect the timeout or // cancellation of ctx when blocking. If opts.BlockingWait is false (for fail-fast // RPCs), it should return an address it has notified via Notify(...) immediately // instead of blocking. // // The function returns put which is called once the rpc has completed or failed. // put can collect and report RPC stats to a remote load balancer. // // This function should only return the errors Balancer cannot recover by itself. // gRPC internals will fail the RPC if an error is returned. Get(ctx context.Context, opts BalancerGetOptions) (addr Address, put func(), err error) // Notify returns a channel that is used by gRPC internals to watch the addresses // gRPC needs to connect. The addresses might be from a name resolver or remote // load balancer. gRPC internals will compare it with the existing connected // addresses. If the address Balancer notified is not in the existing connected // addresses, gRPC starts to connect the address. If an address in the existing // connected addresses is not in the notification list, the corresponding connection // is shutdown gracefully. Otherwise, there are no operations to take. Note that // the Address slice must be the full list of the Addresses which should be connected. // It is NOT delta. Notify() <-chan []Address // Close shuts down the balancer. Close() error } // RoundRobin returns a Balancer that selects addresses round-robin. It uses r to watch // the name resolution updates and updates the addresses available correspondingly. // // Deprecated: please use package balancer/roundrobin. May be removed in a future 1.x release. func RoundRobin(r naming.Resolver) Balancer { return &roundRobin{r: r} } type addrInfo struct { addr Address connected bool } type roundRobin struct { r naming.Resolver w naming.Watcher addrs []*addrInfo // all the addresses the client should potentially connect mu sync.Mutex addrCh chan []Address // the channel to notify gRPC internals the list of addresses the client should connect to. next int // index of the next address to return for Get() waitCh chan struct{} // the channel to block when there is no connected address available done bool // The Balancer is closed. } func (rr *roundRobin) watchAddrUpdates() error { updates, err := rr.w.Next() if err != nil { grpclog.Warningf("grpc: the naming watcher stops working due to %v.", err) return err } rr.mu.Lock() defer rr.mu.Unlock() for _, update := range updates { addr := Address{ Addr: update.Addr, Metadata: update.Metadata, } switch update.Op { case naming.Add: var exist bool for _, v := range rr.addrs { if addr == v.addr { exist = true grpclog.Infoln("grpc: The name resolver wanted to add an existing address: ", addr) break } } if exist { continue } rr.addrs = append(rr.addrs, &addrInfo{addr: addr}) case naming.Delete: for i, v := range rr.addrs { if addr == v.addr { copy(rr.addrs[i:], rr.addrs[i+1:]) rr.addrs = rr.addrs[:len(rr.addrs)-1] break } } default: grpclog.Errorln("Unknown update.Op ", update.Op) } } // Make a copy of rr.addrs and write it onto rr.addrCh so that gRPC internals gets notified. open := make([]Address, len(rr.addrs)) for i, v := range rr.addrs { open[i] = v.addr } if rr.done { return ErrClientConnClosing } select { case <-rr.addrCh: default: } rr.addrCh <- open return nil } func (rr *roundRobin) Start(target string, config BalancerConfig) error { rr.mu.Lock() defer rr.mu.Unlock() if rr.done { return ErrClientConnClosing } if rr.r == nil { // If there is no name resolver installed, it is not needed to // do name resolution. In this case, target is added into rr.addrs // as the only address available and rr.addrCh stays nil. rr.addrs = append(rr.addrs, &addrInfo{addr: Address{Addr: target}}) return nil } w, err := rr.r.Resolve(target) if err != nil { return err } rr.w = w rr.addrCh = make(chan []Address, 1) go func() { for { if err := rr.watchAddrUpdates(); err != nil { return } } }() return nil } // Up sets the connected state of addr and sends notification if there are pending // Get() calls. func (rr *roundRobin) Up(addr Address) func(error) { rr.mu.Lock() defer rr.mu.Unlock() var cnt int for _, a := range rr.addrs { if a.addr == addr { if a.connected { return nil } a.connected = true } if a.connected { cnt++ } } // addr is only one which is connected. Notify the Get() callers who are blocking. if cnt == 1 && rr.waitCh != nil { close(rr.waitCh) rr.waitCh = nil } return func(err error) { rr.down(addr, err) } } // down unsets the connected state of addr. func (rr *roundRobin) down(addr Address, err error) { rr.mu.Lock() defer rr.mu.Unlock() for _, a := range rr.addrs { if addr == a.addr { a.connected = false break } } } // Get returns the next addr in the rotation. func (rr *roundRobin) Get(ctx context.Context, opts BalancerGetOptions) (addr Address, put func(), err error) { var ch chan struct{} rr.mu.Lock() if rr.done { rr.mu.Unlock() err = ErrClientConnClosing return } if len(rr.addrs) > 0 { if rr.next >= len(rr.addrs) { rr.next = 0 } next := rr.next for { a := rr.addrs[next] next = (next + 1) % len(rr.addrs) if a.connected { addr = a.addr rr.next = next rr.mu.Unlock() return } if next == rr.next { // Has iterated all the possible address but none is connected. break } } } if !opts.BlockingWait { if len(rr.addrs) == 0 { rr.mu.Unlock() err = status.Errorf(codes.Unavailable, "there is no address available") return } // Returns the next addr on rr.addrs for failfast RPCs. addr = rr.addrs[rr.next].addr rr.next++ rr.mu.Unlock() return } // Wait on rr.waitCh for non-failfast RPCs. if rr.waitCh == nil { ch = make(chan struct{}) rr.waitCh = ch } else { ch = rr.waitCh } rr.mu.Unlock() for { select { case <-ctx.Done(): err = ctx.Err() return case <-ch: rr.mu.Lock() if rr.done { rr.mu.Unlock() err = ErrClientConnClosing return } if len(rr.addrs) > 0 { if rr.next >= len(rr.addrs) { rr.next = 0 } next := rr.next for { a := rr.addrs[next] next = (next + 1) % len(rr.addrs) if a.connected { addr = a.addr rr.next = next rr.mu.Unlock() return } if next == rr.next { // Has iterated all the possible address but none is connected. break } } } // The newly added addr got removed by Down() again. if rr.waitCh == nil { ch = make(chan struct{}) rr.waitCh = ch } else { ch = rr.waitCh } rr.mu.Unlock() } } } func (rr *roundRobin) Notify() <-chan []Address { return rr.addrCh } func (rr *roundRobin) Close() error { rr.mu.Lock() defer rr.mu.Unlock() if rr.done { return errBalancerClosed } rr.done = true if rr.w != nil { rr.w.Close() } if rr.waitCh != nil { close(rr.waitCh) rr.waitCh = nil } if rr.addrCh != nil { close(rr.addrCh) } return nil } // pickFirst is used to test multi-addresses in one addrConn in which all addresses share the same addrConn. // It is a wrapper around roundRobin balancer. The logic of all methods works fine because balancer.Get() // returns the only address Up by resetTransport(). type pickFirst struct { *roundRobin } grpc-go-1.29.1/balancer/000077500000000000000000000000001365033716300147205ustar00rootroot00000000000000grpc-go-1.29.1/balancer/balancer.go000066400000000000000000000436641365033716300170330ustar00rootroot00000000000000/* * * Copyright 2017 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Package balancer defines APIs for load balancing in gRPC. // All APIs in this package are experimental. package balancer import ( "context" "encoding/json" "errors" "net" "strings" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal" "google.golang.org/grpc/metadata" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" ) var ( // m is a map from name to balancer builder. m = make(map[string]Builder) ) // Register registers the balancer builder to the balancer map. b.Name // (lowercased) will be used as the name registered with this builder. If the // Builder implements ConfigParser, ParseConfig will be called when new service // configs are received by the resolver, and the result will be provided to the // Balancer in UpdateClientConnState. // // NOTE: this function must only be called during initialization time (i.e. in // an init() function), and is not thread-safe. If multiple Balancers are // registered with the same name, the one registered last will take effect. func Register(b Builder) { m[strings.ToLower(b.Name())] = b } // unregisterForTesting deletes the balancer with the given name from the // balancer map. // // This function is not thread-safe. func unregisterForTesting(name string) { delete(m, name) } func init() { internal.BalancerUnregister = unregisterForTesting } // Get returns the resolver builder registered with the given name. // Note that the compare is done in a case-insensitive fashion. // If no builder is register with the name, nil will be returned. func Get(name string) Builder { if b, ok := m[strings.ToLower(name)]; ok { return b } return nil } // SubConn represents a gRPC sub connection. // Each sub connection contains a list of addresses. gRPC will // try to connect to them (in sequence), and stop trying the // remainder once one connection is successful. // // The reconnect backoff will be applied on the list, not a single address. // For example, try_on_all_addresses -> backoff -> try_on_all_addresses. // // All SubConns start in IDLE, and will not try to connect. To trigger // the connecting, Balancers must call Connect. // When the connection encounters an error, it will reconnect immediately. // When the connection becomes IDLE, it will not reconnect unless Connect is // called. // // This interface is to be implemented by gRPC. Users should not need a // brand new implementation of this interface. For the situations like // testing, the new implementation should embed this interface. This allows // gRPC to add new methods to this interface. type SubConn interface { // UpdateAddresses updates the addresses used in this SubConn. // gRPC checks if currently-connected address is still in the new list. // If it's in the list, the connection will be kept. // If it's not in the list, the connection will gracefully closed, and // a new connection will be created. // // This will trigger a state transition for the SubConn. UpdateAddresses([]resolver.Address) // Connect starts the connecting for this SubConn. Connect() } // NewSubConnOptions contains options to create new SubConn. type NewSubConnOptions struct { // CredsBundle is the credentials bundle that will be used in the created // SubConn. If it's nil, the original creds from grpc DialOptions will be // used. CredsBundle credentials.Bundle // HealthCheckEnabled indicates whether health check service should be // enabled on this SubConn HealthCheckEnabled bool } // State contains the balancer's state relevant to the gRPC ClientConn. type State struct { // State contains the connectivity state of the balancer, which is used to // determine the state of the ClientConn. ConnectivityState connectivity.State // Picker is used to choose connections (SubConns) for RPCs. Picker V2Picker } // ClientConn represents a gRPC ClientConn. // // This interface is to be implemented by gRPC. Users should not need a // brand new implementation of this interface. For the situations like // testing, the new implementation should embed this interface. This allows // gRPC to add new methods to this interface. type ClientConn interface { // NewSubConn is called by balancer to create a new SubConn. // It doesn't block and wait for the connections to be established. // Behaviors of the SubConn can be controlled by options. NewSubConn([]resolver.Address, NewSubConnOptions) (SubConn, error) // RemoveSubConn removes the SubConn from ClientConn. // The SubConn will be shutdown. RemoveSubConn(SubConn) // UpdateBalancerState is called by balancer to notify gRPC that some internal // state in balancer has changed. // // gRPC will update the connectivity state of the ClientConn, and will call pick // on the new picker to pick new SubConn. // // Deprecated: use UpdateState instead UpdateBalancerState(s connectivity.State, p Picker) // UpdateState notifies gRPC that the balancer's internal state has // changed. // // gRPC will update the connectivity state of the ClientConn, and will call pick // on the new picker to pick new SubConns. UpdateState(State) // ResolveNow is called by balancer to notify gRPC to do a name resolving. ResolveNow(resolver.ResolveNowOptions) // Target returns the dial target for this ClientConn. // // Deprecated: Use the Target field in the BuildOptions instead. Target() string } // BuildOptions contains additional information for Build. type BuildOptions struct { // DialCreds is the transport credential the Balancer implementation can // use to dial to a remote load balancer server. The Balancer implementations // can ignore this if it does not need to talk to another party securely. DialCreds credentials.TransportCredentials // CredsBundle is the credentials bundle that the Balancer can use. CredsBundle credentials.Bundle // Dialer is the custom dialer the Balancer implementation can use to dial // to a remote load balancer server. The Balancer implementations // can ignore this if it doesn't need to talk to remote balancer. Dialer func(context.Context, string) (net.Conn, error) // ChannelzParentID is the entity parent's channelz unique identification number. ChannelzParentID int64 // Target contains the parsed address info of the dial target. It is the same resolver.Target as // passed to the resolver. // See the documentation for the resolver.Target type for details about what it contains. Target resolver.Target } // Builder creates a balancer. type Builder interface { // Build creates a new balancer with the ClientConn. Build(cc ClientConn, opts BuildOptions) Balancer // Name returns the name of balancers built by this builder. // It will be used to pick balancers (for example in service config). Name() string } // ConfigParser parses load balancer configs. type ConfigParser interface { // ParseConfig parses the JSON load balancer config provided into an // internal form or returns an error if the config is invalid. For future // compatibility reasons, unknown fields in the config should be ignored. ParseConfig(LoadBalancingConfigJSON json.RawMessage) (serviceconfig.LoadBalancingConfig, error) } // PickInfo contains additional information for the Pick operation. type PickInfo struct { // FullMethodName is the method name that NewClientStream() is called // with. The canonical format is /service/Method. FullMethodName string // Ctx is the RPC's context, and may contain relevant RPC-level information // like the outgoing header metadata. Ctx context.Context } // DoneInfo contains additional information for done. type DoneInfo struct { // Err is the rpc error the RPC finished with. It could be nil. Err error // Trailer contains the metadata from the RPC's trailer, if present. Trailer metadata.MD // BytesSent indicates if any bytes have been sent to the server. BytesSent bool // BytesReceived indicates if any byte has been received from the server. BytesReceived bool // ServerLoad is the load received from server. It's usually sent as part of // trailing metadata. // // The only supported type now is *orca_v1.LoadReport. ServerLoad interface{} } var ( // ErrNoSubConnAvailable indicates no SubConn is available for pick(). // gRPC will block the RPC until a new picker is available via UpdateBalancerState(). ErrNoSubConnAvailable = errors.New("no SubConn is available") // ErrTransientFailure indicates all SubConns are in TransientFailure. // WaitForReady RPCs will block, non-WaitForReady RPCs will fail. ErrTransientFailure = TransientFailureError(errors.New("all SubConns are in TransientFailure")) ) // Picker is used by gRPC to pick a SubConn to send an RPC. // Balancer is expected to generate a new picker from its snapshot every time its // internal state has changed. // // The pickers used by gRPC can be updated by ClientConn.UpdateBalancerState(). // // Deprecated: use V2Picker instead type Picker interface { // Pick returns the SubConn to be used to send the RPC. // The returned SubConn must be one returned by NewSubConn(). // // This functions is expected to return: // - a SubConn that is known to be READY; // - ErrNoSubConnAvailable if no SubConn is available, but progress is being // made (for example, some SubConn is in CONNECTING mode); // - other errors if no active connecting is happening (for example, all SubConn // are in TRANSIENT_FAILURE mode). // // If a SubConn is returned: // - If it is READY, gRPC will send the RPC on it; // - If it is not ready, or becomes not ready after it's returned, gRPC will // block until UpdateBalancerState() is called and will call pick on the // new picker. The done function returned from Pick(), if not nil, will be // called with nil error, no bytes sent and no bytes received. // // If the returned error is not nil: // - If the error is ErrNoSubConnAvailable, gRPC will block until UpdateBalancerState() // - If the error is ErrTransientFailure or implements IsTransientFailure() // bool, returning true: // - If the RPC is wait-for-ready, gRPC will block until UpdateBalancerState() // is called to pick again; // - Otherwise, RPC will fail with unavailable error. // - Else (error is other non-nil error): // - The RPC will fail with the error's status code, or Unknown if it is // not a status error. // // The returned done() function will be called once the rpc has finished, // with the final status of that RPC. If the SubConn returned is not a // valid SubConn type, done may not be called. done may be nil if balancer // doesn't care about the RPC status. Pick(ctx context.Context, info PickInfo) (conn SubConn, done func(DoneInfo), err error) } // PickResult contains information related to a connection chosen for an RPC. type PickResult struct { // SubConn is the connection to use for this pick, if its state is Ready. // If the state is not Ready, gRPC will block the RPC until a new Picker is // provided by the balancer (using ClientConn.UpdateState). The SubConn // must be one returned by ClientConn.NewSubConn. SubConn SubConn // Done is called when the RPC is completed. If the SubConn is not ready, // this will be called with a nil parameter. If the SubConn is not a valid // type, Done may not be called. May be nil if the balancer does not wish // to be notified when the RPC completes. Done func(DoneInfo) } type transientFailureError struct { error } func (e *transientFailureError) IsTransientFailure() bool { return true } // TransientFailureError wraps err in an error implementing // IsTransientFailure() bool, returning true. func TransientFailureError(err error) error { return &transientFailureError{error: err} } // V2Picker is used by gRPC to pick a SubConn to send an RPC. // Balancer is expected to generate a new picker from its snapshot every time its // internal state has changed. // // The pickers used by gRPC can be updated by ClientConn.UpdateBalancerState(). type V2Picker interface { // Pick returns the connection to use for this RPC and related information. // // Pick should not block. If the balancer needs to do I/O or any blocking // or time-consuming work to service this call, it should return // ErrNoSubConnAvailable, and the Pick call will be repeated by gRPC when // the Picker is updated (using ClientConn.UpdateState). // // If an error is returned: // // - If the error is ErrNoSubConnAvailable, gRPC will block until a new // Picker is provided by the balancer (using ClientConn.UpdateState). // // - If the error implements IsTransientFailure() bool, returning true, // wait for ready RPCs will wait, but non-wait for ready RPCs will be // terminated with this error's Error() string and status code // Unavailable. // // - Any other errors terminate all RPCs with the code and message // provided. If the error is not a status error, it will be converted by // gRPC to a status error with code Unknown. Pick(info PickInfo) (PickResult, error) } // Balancer takes input from gRPC, manages SubConns, and collects and aggregates // the connectivity states. // // It also generates and updates the Picker used by gRPC to pick SubConns for RPCs. // // HandleSubConnectionStateChange, HandleResolvedAddrs and Close are guaranteed // to be called synchronously from the same goroutine. // There's no guarantee on picker.Pick, it may be called anytime. type Balancer interface { // HandleSubConnStateChange is called by gRPC when the connectivity state // of sc has changed. // Balancer is expected to aggregate all the state of SubConn and report // that back to gRPC. // Balancer should also generate and update Pickers when its internal state has // been changed by the new state. // // Deprecated: if V2Balancer is implemented by the Balancer, // UpdateSubConnState will be called instead. HandleSubConnStateChange(sc SubConn, state connectivity.State) // HandleResolvedAddrs is called by gRPC to send updated resolved addresses to // balancers. // Balancer can create new SubConn or remove SubConn with the addresses. // An empty address slice and a non-nil error will be passed if the resolver returns // non-nil error to gRPC. // // Deprecated: if V2Balancer is implemented by the Balancer, // UpdateClientConnState will be called instead. HandleResolvedAddrs([]resolver.Address, error) // Close closes the balancer. The balancer is not required to call // ClientConn.RemoveSubConn for its existing SubConns. Close() } // SubConnState describes the state of a SubConn. type SubConnState struct { // ConnectivityState is the connectivity state of the SubConn. ConnectivityState connectivity.State // ConnectionError is set if the ConnectivityState is TransientFailure, // describing the reason the SubConn failed. Otherwise, it is nil. ConnectionError error } // ClientConnState describes the state of a ClientConn relevant to the // balancer. type ClientConnState struct { ResolverState resolver.State // The parsed load balancing configuration returned by the builder's // ParseConfig method, if implemented. BalancerConfig serviceconfig.LoadBalancingConfig } // ErrBadResolverState may be returned by UpdateClientConnState to indicate a // problem with the provided name resolver data. var ErrBadResolverState = errors.New("bad resolver state") // V2Balancer is defined for documentation purposes. If a Balancer also // implements V2Balancer, its UpdateClientConnState method will be called // instead of HandleResolvedAddrs and its UpdateSubConnState will be called // instead of HandleSubConnStateChange. type V2Balancer interface { // UpdateClientConnState is called by gRPC when the state of the ClientConn // changes. If the error returned is ErrBadResolverState, the ClientConn // will begin calling ResolveNow on the active name resolver with // exponential backoff until a subsequent call to UpdateClientConnState // returns a nil error. Any other errors are currently ignored. UpdateClientConnState(ClientConnState) error // ResolverError is called by gRPC when the name resolver reports an error. ResolverError(error) // UpdateSubConnState is called by gRPC when the state of a SubConn // changes. UpdateSubConnState(SubConn, SubConnState) // Close closes the balancer. The balancer is not required to call // ClientConn.RemoveSubConn for its existing SubConns. Close() } // ConnectivityStateEvaluator takes the connectivity states of multiple SubConns // and returns one aggregated connectivity state. // // It's not thread safe. type ConnectivityStateEvaluator struct { numReady uint64 // Number of addrConns in ready state. numConnecting uint64 // Number of addrConns in connecting state. } // RecordTransition records state change happening in subConn and based on that // it evaluates what aggregated state should be. // // - If at least one SubConn in Ready, the aggregated state is Ready; // - Else if at least one SubConn in Connecting, the aggregated state is Connecting; // - Else the aggregated state is TransientFailure. // // Idle and Shutdown are not considered. func (cse *ConnectivityStateEvaluator) RecordTransition(oldState, newState connectivity.State) connectivity.State { // Update counters. for idx, state := range []connectivity.State{oldState, newState} { updateVal := 2*uint64(idx) - 1 // -1 for oldState and +1 for new. switch state { case connectivity.Ready: cse.numReady += updateVal case connectivity.Connecting: cse.numConnecting += updateVal } } // Evaluate. if cse.numReady > 0 { return connectivity.Ready } if cse.numConnecting > 0 { return connectivity.Connecting } return connectivity.TransientFailure } grpc-go-1.29.1/balancer/base/000077500000000000000000000000001365033716300156325ustar00rootroot00000000000000grpc-go-1.29.1/balancer/base/balancer.go000066400000000000000000000217211365033716300177330ustar00rootroot00000000000000/* * * Copyright 2017 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package base import ( "context" "errors" "fmt" "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/resolver" ) type baseBuilder struct { name string pickerBuilder PickerBuilder v2PickerBuilder V2PickerBuilder config Config } func (bb *baseBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer { bal := &baseBalancer{ cc: cc, pickerBuilder: bb.pickerBuilder, v2PickerBuilder: bb.v2PickerBuilder, subConns: make(map[resolver.Address]balancer.SubConn), scStates: make(map[balancer.SubConn]connectivity.State), csEvltr: &balancer.ConnectivityStateEvaluator{}, config: bb.config, } // Initialize picker to a picker that always returns // ErrNoSubConnAvailable, because when state of a SubConn changes, we // may call UpdateState with this picker. if bb.pickerBuilder != nil { bal.picker = NewErrPicker(balancer.ErrNoSubConnAvailable) } else { bal.v2Picker = NewErrPickerV2(balancer.ErrNoSubConnAvailable) } return bal } func (bb *baseBuilder) Name() string { return bb.name } var _ balancer.V2Balancer = (*baseBalancer)(nil) // Assert that we implement V2Balancer type baseBalancer struct { cc balancer.ClientConn pickerBuilder PickerBuilder v2PickerBuilder V2PickerBuilder csEvltr *balancer.ConnectivityStateEvaluator state connectivity.State subConns map[resolver.Address]balancer.SubConn scStates map[balancer.SubConn]connectivity.State picker balancer.Picker v2Picker balancer.V2Picker config Config resolverErr error // the last error reported by the resolver; cleared on successful resolution connErr error // the last connection error; cleared upon leaving TransientFailure } func (b *baseBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) { panic("not implemented") } func (b *baseBalancer) ResolverError(err error) { b.resolverErr = err if len(b.subConns) == 0 { b.state = connectivity.TransientFailure } if b.state != connectivity.TransientFailure { // The picker will not change since the balancer does not currently // report an error. return } b.regeneratePicker() if b.picker != nil { b.cc.UpdateBalancerState(b.state, b.picker) } else { b.cc.UpdateState(balancer.State{ ConnectivityState: b.state, Picker: b.v2Picker, }) } } func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error { // TODO: handle s.ResolverState.Err (log if not nil) once implemented. // TODO: handle s.ResolverState.ServiceConfig? if grpclog.V(2) { grpclog.Infoln("base.baseBalancer: got new ClientConn state: ", s) } // Successful resolution; clear resolver error and ensure we return nil. b.resolverErr = nil // addrsSet is the set converted from addrs, it's used for quick lookup of an address. addrsSet := make(map[resolver.Address]struct{}) for _, a := range s.ResolverState.Addresses { addrsSet[a] = struct{}{} if _, ok := b.subConns[a]; !ok { // a is a new address (not existing in b.subConns). sc, err := b.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{HealthCheckEnabled: b.config.HealthCheck}) if err != nil { grpclog.Warningf("base.baseBalancer: failed to create new SubConn: %v", err) continue } b.subConns[a] = sc b.scStates[sc] = connectivity.Idle sc.Connect() } } for a, sc := range b.subConns { // a was removed by resolver. if _, ok := addrsSet[a]; !ok { b.cc.RemoveSubConn(sc) delete(b.subConns, a) // Keep the state of this sc in b.scStates until sc's state becomes Shutdown. // The entry will be deleted in HandleSubConnStateChange. } } // If resolver state contains no addresses, return an error so ClientConn // will trigger re-resolve. Also records this as an resolver error, so when // the overall state turns transient failure, the error message will have // the zero address information. if len(s.ResolverState.Addresses) == 0 { b.ResolverError(errors.New("produced zero addresses")) return balancer.ErrBadResolverState } return nil } // mergeErrors builds an error from the last connection error and the last // resolver error. Must only be called if b.state is TransientFailure. func (b *baseBalancer) mergeErrors() error { // connErr must always be non-nil unless there are no SubConns, in which // case resolverErr must be non-nil. if b.connErr == nil { return fmt.Errorf("last resolver error: %v", b.resolverErr) } if b.resolverErr == nil { return fmt.Errorf("last connection error: %v", b.connErr) } return fmt.Errorf("last connection error: %v; last resolver error: %v", b.connErr, b.resolverErr) } // regeneratePicker takes a snapshot of the balancer, and generates a picker // from it. The picker is // - errPicker if the balancer is in TransientFailure, // - built by the pickerBuilder with all READY SubConns otherwise. func (b *baseBalancer) regeneratePicker() { if b.state == connectivity.TransientFailure { if b.pickerBuilder != nil { b.picker = NewErrPicker(balancer.ErrTransientFailure) } else { b.v2Picker = NewErrPickerV2(balancer.TransientFailureError(b.mergeErrors())) } return } if b.pickerBuilder != nil { readySCs := make(map[resolver.Address]balancer.SubConn) // Filter out all ready SCs from full subConn map. for addr, sc := range b.subConns { if st, ok := b.scStates[sc]; ok && st == connectivity.Ready { readySCs[addr] = sc } } b.picker = b.pickerBuilder.Build(readySCs) } else { readySCs := make(map[balancer.SubConn]SubConnInfo) // Filter out all ready SCs from full subConn map. for addr, sc := range b.subConns { if st, ok := b.scStates[sc]; ok && st == connectivity.Ready { readySCs[sc] = SubConnInfo{Address: addr} } } b.v2Picker = b.v2PickerBuilder.Build(PickerBuildInfo{ReadySCs: readySCs}) } } func (b *baseBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) { panic("not implemented") } func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { s := state.ConnectivityState if grpclog.V(2) { grpclog.Infof("base.baseBalancer: handle SubConn state change: %p, %v", sc, s) } oldS, ok := b.scStates[sc] if !ok { if grpclog.V(2) { grpclog.Infof("base.baseBalancer: got state changes for an unknown SubConn: %p, %v", sc, s) } return } if oldS == connectivity.TransientFailure && s == connectivity.Connecting { // Once a subconn enters TRANSIENT_FAILURE, ignore subsequent // CONNECTING transitions to prevent the aggregated state from being // always CONNECTING when many backends exist but are all down. return } b.scStates[sc] = s switch s { case connectivity.Idle: sc.Connect() case connectivity.Shutdown: // When an address was removed by resolver, b called RemoveSubConn but // kept the sc's state in scStates. Remove state for this sc here. delete(b.scStates, sc) case connectivity.TransientFailure: // Save error to be reported via picker. b.connErr = state.ConnectionError } b.state = b.csEvltr.RecordTransition(oldS, s) // Regenerate picker when one of the following happens: // - this sc entered or left ready // - the aggregated state of balancer is TransientFailure // (may need to update error message) if (s == connectivity.Ready) != (oldS == connectivity.Ready) || b.state == connectivity.TransientFailure { b.regeneratePicker() } if b.picker != nil { b.cc.UpdateBalancerState(b.state, b.picker) } else { b.cc.UpdateState(balancer.State{ConnectivityState: b.state, Picker: b.v2Picker}) } } // Close is a nop because base balancer doesn't have internal state to clean up, // and it doesn't need to call RemoveSubConn for the SubConns. func (b *baseBalancer) Close() { } // NewErrPicker returns a picker that always returns err on Pick(). func NewErrPicker(err error) balancer.Picker { return &errPicker{err: err} } type errPicker struct { err error // Pick() always returns this err. } func (p *errPicker) Pick(context.Context, balancer.PickInfo) (balancer.SubConn, func(balancer.DoneInfo), error) { return nil, nil, p.err } // NewErrPickerV2 returns a V2Picker that always returns err on Pick(). func NewErrPickerV2(err error) balancer.V2Picker { return &errPickerV2{err: err} } type errPickerV2 struct { err error // Pick() always returns this err. } func (p *errPickerV2) Pick(info balancer.PickInfo) (balancer.PickResult, error) { return balancer.PickResult{}, p.err } grpc-go-1.29.1/balancer/base/base.go000066400000000000000000000062211365033716300170740ustar00rootroot00000000000000/* * * Copyright 2017 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Package base defines a balancer base that can be used to build balancers with // different picking algorithms. // // The base balancer creates a new SubConn for each resolved address. The // provided picker will only be notified about READY SubConns. // // This package is the base of round_robin balancer, its purpose is to be used // to build round_robin like balancers with complex picking algorithms. // Balancers with more complicated logic should try to implement a balancer // builder from scratch. // // All APIs in this package are experimental. package base import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/resolver" ) // PickerBuilder creates balancer.Picker. type PickerBuilder interface { // Build takes a slice of ready SubConns, and returns a picker that will be // used by gRPC to pick a SubConn. Build(readySCs map[resolver.Address]balancer.SubConn) balancer.Picker } // V2PickerBuilder creates balancer.V2Picker. type V2PickerBuilder interface { // Build returns a picker that will be used by gRPC to pick a SubConn. Build(info PickerBuildInfo) balancer.V2Picker } // PickerBuildInfo contains information needed by the picker builder to // construct a picker. type PickerBuildInfo struct { // ReadySCs is a map from all ready SubConns to the Addresses used to // create them. ReadySCs map[balancer.SubConn]SubConnInfo } // SubConnInfo contains information about a SubConn created by the base // balancer. type SubConnInfo struct { Address resolver.Address // the address used to create this SubConn } // NewBalancerBuilder returns a balancer builder. The balancers // built by this builder will use the picker builder to build pickers. func NewBalancerBuilder(name string, pb PickerBuilder) balancer.Builder { return NewBalancerBuilderWithConfig(name, pb, Config{}) } // Config contains the config info about the base balancer builder. type Config struct { // HealthCheck indicates whether health checking should be enabled for this specific balancer. HealthCheck bool } // NewBalancerBuilderWithConfig returns a base balancer builder configured by the provided config. func NewBalancerBuilderWithConfig(name string, pb PickerBuilder, config Config) balancer.Builder { return &baseBuilder{ name: name, pickerBuilder: pb, config: config, } } // NewBalancerBuilderV2 returns a base balancer builder configured by the provided config. func NewBalancerBuilderV2(name string, pb V2PickerBuilder, config Config) balancer.Builder { return &baseBuilder{ name: name, v2PickerBuilder: pb, config: config, } } grpc-go-1.29.1/balancer/grpclb/000077500000000000000000000000001365033716300161715ustar00rootroot00000000000000grpc-go-1.29.1/balancer/grpclb/grpc_lb_v1/000077500000000000000000000000001365033716300202075ustar00rootroot00000000000000grpc-go-1.29.1/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go000066400000000000000000000732751365033716300237220ustar00rootroot00000000000000// Code generated by protoc-gen-go. DO NOT EDIT. // source: grpc/lb/v1/load_balancer.proto package grpc_lb_v1 import ( context "context" fmt "fmt" proto "github.com/golang/protobuf/proto" duration "github.com/golang/protobuf/ptypes/duration" timestamp "github.com/golang/protobuf/ptypes/timestamp" grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" status "google.golang.org/grpc/status" math "math" ) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package type LoadBalanceRequest struct { // Types that are valid to be assigned to LoadBalanceRequestType: // *LoadBalanceRequest_InitialRequest // *LoadBalanceRequest_ClientStats LoadBalanceRequestType isLoadBalanceRequest_LoadBalanceRequestType `protobuf_oneof:"load_balance_request_type"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *LoadBalanceRequest) Reset() { *m = LoadBalanceRequest{} } func (m *LoadBalanceRequest) String() string { return proto.CompactTextString(m) } func (*LoadBalanceRequest) ProtoMessage() {} func (*LoadBalanceRequest) Descriptor() ([]byte, []int) { return fileDescriptor_7cd3f6d792743fdf, []int{0} } func (m *LoadBalanceRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_LoadBalanceRequest.Unmarshal(m, b) } func (m *LoadBalanceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_LoadBalanceRequest.Marshal(b, m, deterministic) } func (m *LoadBalanceRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_LoadBalanceRequest.Merge(m, src) } func (m *LoadBalanceRequest) XXX_Size() int { return xxx_messageInfo_LoadBalanceRequest.Size(m) } func (m *LoadBalanceRequest) XXX_DiscardUnknown() { xxx_messageInfo_LoadBalanceRequest.DiscardUnknown(m) } var xxx_messageInfo_LoadBalanceRequest proto.InternalMessageInfo type isLoadBalanceRequest_LoadBalanceRequestType interface { isLoadBalanceRequest_LoadBalanceRequestType() } type LoadBalanceRequest_InitialRequest struct { InitialRequest *InitialLoadBalanceRequest `protobuf:"bytes,1,opt,name=initial_request,json=initialRequest,proto3,oneof"` } type LoadBalanceRequest_ClientStats struct { ClientStats *ClientStats `protobuf:"bytes,2,opt,name=client_stats,json=clientStats,proto3,oneof"` } func (*LoadBalanceRequest_InitialRequest) isLoadBalanceRequest_LoadBalanceRequestType() {} func (*LoadBalanceRequest_ClientStats) isLoadBalanceRequest_LoadBalanceRequestType() {} func (m *LoadBalanceRequest) GetLoadBalanceRequestType() isLoadBalanceRequest_LoadBalanceRequestType { if m != nil { return m.LoadBalanceRequestType } return nil } func (m *LoadBalanceRequest) GetInitialRequest() *InitialLoadBalanceRequest { if x, ok := m.GetLoadBalanceRequestType().(*LoadBalanceRequest_InitialRequest); ok { return x.InitialRequest } return nil } func (m *LoadBalanceRequest) GetClientStats() *ClientStats { if x, ok := m.GetLoadBalanceRequestType().(*LoadBalanceRequest_ClientStats); ok { return x.ClientStats } return nil } // XXX_OneofWrappers is for the internal use of the proto package. func (*LoadBalanceRequest) XXX_OneofWrappers() []interface{} { return []interface{}{ (*LoadBalanceRequest_InitialRequest)(nil), (*LoadBalanceRequest_ClientStats)(nil), } } type InitialLoadBalanceRequest struct { // The name of the load balanced service (e.g., service.googleapis.com). Its // length should be less than 256 bytes. // The name might include a port number. How to handle the port number is up // to the balancer. Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *InitialLoadBalanceRequest) Reset() { *m = InitialLoadBalanceRequest{} } func (m *InitialLoadBalanceRequest) String() string { return proto.CompactTextString(m) } func (*InitialLoadBalanceRequest) ProtoMessage() {} func (*InitialLoadBalanceRequest) Descriptor() ([]byte, []int) { return fileDescriptor_7cd3f6d792743fdf, []int{1} } func (m *InitialLoadBalanceRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_InitialLoadBalanceRequest.Unmarshal(m, b) } func (m *InitialLoadBalanceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_InitialLoadBalanceRequest.Marshal(b, m, deterministic) } func (m *InitialLoadBalanceRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_InitialLoadBalanceRequest.Merge(m, src) } func (m *InitialLoadBalanceRequest) XXX_Size() int { return xxx_messageInfo_InitialLoadBalanceRequest.Size(m) } func (m *InitialLoadBalanceRequest) XXX_DiscardUnknown() { xxx_messageInfo_InitialLoadBalanceRequest.DiscardUnknown(m) } var xxx_messageInfo_InitialLoadBalanceRequest proto.InternalMessageInfo func (m *InitialLoadBalanceRequest) GetName() string { if m != nil { return m.Name } return "" } // Contains the number of calls finished for a particular load balance token. type ClientStatsPerToken struct { // See Server.load_balance_token. LoadBalanceToken string `protobuf:"bytes,1,opt,name=load_balance_token,json=loadBalanceToken,proto3" json:"load_balance_token,omitempty"` // The total number of RPCs that finished associated with the token. NumCalls int64 `protobuf:"varint,2,opt,name=num_calls,json=numCalls,proto3" json:"num_calls,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *ClientStatsPerToken) Reset() { *m = ClientStatsPerToken{} } func (m *ClientStatsPerToken) String() string { return proto.CompactTextString(m) } func (*ClientStatsPerToken) ProtoMessage() {} func (*ClientStatsPerToken) Descriptor() ([]byte, []int) { return fileDescriptor_7cd3f6d792743fdf, []int{2} } func (m *ClientStatsPerToken) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ClientStatsPerToken.Unmarshal(m, b) } func (m *ClientStatsPerToken) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ClientStatsPerToken.Marshal(b, m, deterministic) } func (m *ClientStatsPerToken) XXX_Merge(src proto.Message) { xxx_messageInfo_ClientStatsPerToken.Merge(m, src) } func (m *ClientStatsPerToken) XXX_Size() int { return xxx_messageInfo_ClientStatsPerToken.Size(m) } func (m *ClientStatsPerToken) XXX_DiscardUnknown() { xxx_messageInfo_ClientStatsPerToken.DiscardUnknown(m) } var xxx_messageInfo_ClientStatsPerToken proto.InternalMessageInfo func (m *ClientStatsPerToken) GetLoadBalanceToken() string { if m != nil { return m.LoadBalanceToken } return "" } func (m *ClientStatsPerToken) GetNumCalls() int64 { if m != nil { return m.NumCalls } return 0 } // Contains client level statistics that are useful to load balancing. Each // count except the timestamp should be reset to zero after reporting the stats. type ClientStats struct { // The timestamp of generating the report. Timestamp *timestamp.Timestamp `protobuf:"bytes,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"` // The total number of RPCs that started. NumCallsStarted int64 `protobuf:"varint,2,opt,name=num_calls_started,json=numCallsStarted,proto3" json:"num_calls_started,omitempty"` // The total number of RPCs that finished. NumCallsFinished int64 `protobuf:"varint,3,opt,name=num_calls_finished,json=numCallsFinished,proto3" json:"num_calls_finished,omitempty"` // The total number of RPCs that failed to reach a server except dropped RPCs. NumCallsFinishedWithClientFailedToSend int64 `protobuf:"varint,6,opt,name=num_calls_finished_with_client_failed_to_send,json=numCallsFinishedWithClientFailedToSend,proto3" json:"num_calls_finished_with_client_failed_to_send,omitempty"` // The total number of RPCs that finished and are known to have been received // by a server. NumCallsFinishedKnownReceived int64 `protobuf:"varint,7,opt,name=num_calls_finished_known_received,json=numCallsFinishedKnownReceived,proto3" json:"num_calls_finished_known_received,omitempty"` // The list of dropped calls. CallsFinishedWithDrop []*ClientStatsPerToken `protobuf:"bytes,8,rep,name=calls_finished_with_drop,json=callsFinishedWithDrop,proto3" json:"calls_finished_with_drop,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *ClientStats) Reset() { *m = ClientStats{} } func (m *ClientStats) String() string { return proto.CompactTextString(m) } func (*ClientStats) ProtoMessage() {} func (*ClientStats) Descriptor() ([]byte, []int) { return fileDescriptor_7cd3f6d792743fdf, []int{3} } func (m *ClientStats) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ClientStats.Unmarshal(m, b) } func (m *ClientStats) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ClientStats.Marshal(b, m, deterministic) } func (m *ClientStats) XXX_Merge(src proto.Message) { xxx_messageInfo_ClientStats.Merge(m, src) } func (m *ClientStats) XXX_Size() int { return xxx_messageInfo_ClientStats.Size(m) } func (m *ClientStats) XXX_DiscardUnknown() { xxx_messageInfo_ClientStats.DiscardUnknown(m) } var xxx_messageInfo_ClientStats proto.InternalMessageInfo func (m *ClientStats) GetTimestamp() *timestamp.Timestamp { if m != nil { return m.Timestamp } return nil } func (m *ClientStats) GetNumCallsStarted() int64 { if m != nil { return m.NumCallsStarted } return 0 } func (m *ClientStats) GetNumCallsFinished() int64 { if m != nil { return m.NumCallsFinished } return 0 } func (m *ClientStats) GetNumCallsFinishedWithClientFailedToSend() int64 { if m != nil { return m.NumCallsFinishedWithClientFailedToSend } return 0 } func (m *ClientStats) GetNumCallsFinishedKnownReceived() int64 { if m != nil { return m.NumCallsFinishedKnownReceived } return 0 } func (m *ClientStats) GetCallsFinishedWithDrop() []*ClientStatsPerToken { if m != nil { return m.CallsFinishedWithDrop } return nil } type LoadBalanceResponse struct { // Types that are valid to be assigned to LoadBalanceResponseType: // *LoadBalanceResponse_InitialResponse // *LoadBalanceResponse_ServerList // *LoadBalanceResponse_FallbackResponse LoadBalanceResponseType isLoadBalanceResponse_LoadBalanceResponseType `protobuf_oneof:"load_balance_response_type"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *LoadBalanceResponse) Reset() { *m = LoadBalanceResponse{} } func (m *LoadBalanceResponse) String() string { return proto.CompactTextString(m) } func (*LoadBalanceResponse) ProtoMessage() {} func (*LoadBalanceResponse) Descriptor() ([]byte, []int) { return fileDescriptor_7cd3f6d792743fdf, []int{4} } func (m *LoadBalanceResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_LoadBalanceResponse.Unmarshal(m, b) } func (m *LoadBalanceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_LoadBalanceResponse.Marshal(b, m, deterministic) } func (m *LoadBalanceResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_LoadBalanceResponse.Merge(m, src) } func (m *LoadBalanceResponse) XXX_Size() int { return xxx_messageInfo_LoadBalanceResponse.Size(m) } func (m *LoadBalanceResponse) XXX_DiscardUnknown() { xxx_messageInfo_LoadBalanceResponse.DiscardUnknown(m) } var xxx_messageInfo_LoadBalanceResponse proto.InternalMessageInfo type isLoadBalanceResponse_LoadBalanceResponseType interface { isLoadBalanceResponse_LoadBalanceResponseType() } type LoadBalanceResponse_InitialResponse struct { InitialResponse *InitialLoadBalanceResponse `protobuf:"bytes,1,opt,name=initial_response,json=initialResponse,proto3,oneof"` } type LoadBalanceResponse_ServerList struct { ServerList *ServerList `protobuf:"bytes,2,opt,name=server_list,json=serverList,proto3,oneof"` } type LoadBalanceResponse_FallbackResponse struct { FallbackResponse *FallbackResponse `protobuf:"bytes,3,opt,name=fallback_response,json=fallbackResponse,proto3,oneof"` } func (*LoadBalanceResponse_InitialResponse) isLoadBalanceResponse_LoadBalanceResponseType() {} func (*LoadBalanceResponse_ServerList) isLoadBalanceResponse_LoadBalanceResponseType() {} func (*LoadBalanceResponse_FallbackResponse) isLoadBalanceResponse_LoadBalanceResponseType() {} func (m *LoadBalanceResponse) GetLoadBalanceResponseType() isLoadBalanceResponse_LoadBalanceResponseType { if m != nil { return m.LoadBalanceResponseType } return nil } func (m *LoadBalanceResponse) GetInitialResponse() *InitialLoadBalanceResponse { if x, ok := m.GetLoadBalanceResponseType().(*LoadBalanceResponse_InitialResponse); ok { return x.InitialResponse } return nil } func (m *LoadBalanceResponse) GetServerList() *ServerList { if x, ok := m.GetLoadBalanceResponseType().(*LoadBalanceResponse_ServerList); ok { return x.ServerList } return nil } func (m *LoadBalanceResponse) GetFallbackResponse() *FallbackResponse { if x, ok := m.GetLoadBalanceResponseType().(*LoadBalanceResponse_FallbackResponse); ok { return x.FallbackResponse } return nil } // XXX_OneofWrappers is for the internal use of the proto package. func (*LoadBalanceResponse) XXX_OneofWrappers() []interface{} { return []interface{}{ (*LoadBalanceResponse_InitialResponse)(nil), (*LoadBalanceResponse_ServerList)(nil), (*LoadBalanceResponse_FallbackResponse)(nil), } } type FallbackResponse struct { XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *FallbackResponse) Reset() { *m = FallbackResponse{} } func (m *FallbackResponse) String() string { return proto.CompactTextString(m) } func (*FallbackResponse) ProtoMessage() {} func (*FallbackResponse) Descriptor() ([]byte, []int) { return fileDescriptor_7cd3f6d792743fdf, []int{5} } func (m *FallbackResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_FallbackResponse.Unmarshal(m, b) } func (m *FallbackResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_FallbackResponse.Marshal(b, m, deterministic) } func (m *FallbackResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_FallbackResponse.Merge(m, src) } func (m *FallbackResponse) XXX_Size() int { return xxx_messageInfo_FallbackResponse.Size(m) } func (m *FallbackResponse) XXX_DiscardUnknown() { xxx_messageInfo_FallbackResponse.DiscardUnknown(m) } var xxx_messageInfo_FallbackResponse proto.InternalMessageInfo type InitialLoadBalanceResponse struct { // This interval defines how often the client should send the client stats // to the load balancer. Stats should only be reported when the duration is // positive. ClientStatsReportInterval *duration.Duration `protobuf:"bytes,2,opt,name=client_stats_report_interval,json=clientStatsReportInterval,proto3" json:"client_stats_report_interval,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *InitialLoadBalanceResponse) Reset() { *m = InitialLoadBalanceResponse{} } func (m *InitialLoadBalanceResponse) String() string { return proto.CompactTextString(m) } func (*InitialLoadBalanceResponse) ProtoMessage() {} func (*InitialLoadBalanceResponse) Descriptor() ([]byte, []int) { return fileDescriptor_7cd3f6d792743fdf, []int{6} } func (m *InitialLoadBalanceResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_InitialLoadBalanceResponse.Unmarshal(m, b) } func (m *InitialLoadBalanceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_InitialLoadBalanceResponse.Marshal(b, m, deterministic) } func (m *InitialLoadBalanceResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_InitialLoadBalanceResponse.Merge(m, src) } func (m *InitialLoadBalanceResponse) XXX_Size() int { return xxx_messageInfo_InitialLoadBalanceResponse.Size(m) } func (m *InitialLoadBalanceResponse) XXX_DiscardUnknown() { xxx_messageInfo_InitialLoadBalanceResponse.DiscardUnknown(m) } var xxx_messageInfo_InitialLoadBalanceResponse proto.InternalMessageInfo func (m *InitialLoadBalanceResponse) GetClientStatsReportInterval() *duration.Duration { if m != nil { return m.ClientStatsReportInterval } return nil } type ServerList struct { // Contains a list of servers selected by the load balancer. The list will // be updated when server resolutions change or as needed to balance load // across more servers. The client should consume the server list in order // unless instructed otherwise via the client_config. Servers []*Server `protobuf:"bytes,1,rep,name=servers,proto3" json:"servers,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *ServerList) Reset() { *m = ServerList{} } func (m *ServerList) String() string { return proto.CompactTextString(m) } func (*ServerList) ProtoMessage() {} func (*ServerList) Descriptor() ([]byte, []int) { return fileDescriptor_7cd3f6d792743fdf, []int{7} } func (m *ServerList) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ServerList.Unmarshal(m, b) } func (m *ServerList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ServerList.Marshal(b, m, deterministic) } func (m *ServerList) XXX_Merge(src proto.Message) { xxx_messageInfo_ServerList.Merge(m, src) } func (m *ServerList) XXX_Size() int { return xxx_messageInfo_ServerList.Size(m) } func (m *ServerList) XXX_DiscardUnknown() { xxx_messageInfo_ServerList.DiscardUnknown(m) } var xxx_messageInfo_ServerList proto.InternalMessageInfo func (m *ServerList) GetServers() []*Server { if m != nil { return m.Servers } return nil } // Contains server information. When the drop field is not true, use the other // fields. type Server struct { // A resolved address for the server, serialized in network-byte-order. It may // either be an IPv4 or IPv6 address. IpAddress []byte `protobuf:"bytes,1,opt,name=ip_address,json=ipAddress,proto3" json:"ip_address,omitempty"` // A resolved port number for the server. Port int32 `protobuf:"varint,2,opt,name=port,proto3" json:"port,omitempty"` // An opaque but printable token for load reporting. The client must include // the token of the picked server into the initial metadata when it starts a // call to that server. The token is used by the server to verify the request // and to allow the server to report load to the gRPC LB system. The token is // also used in client stats for reporting dropped calls. // // Its length can be variable but must be less than 50 bytes. LoadBalanceToken string `protobuf:"bytes,3,opt,name=load_balance_token,json=loadBalanceToken,proto3" json:"load_balance_token,omitempty"` // Indicates whether this particular request should be dropped by the client. // If the request is dropped, there will be a corresponding entry in // ClientStats.calls_finished_with_drop. Drop bool `protobuf:"varint,4,opt,name=drop,proto3" json:"drop,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *Server) Reset() { *m = Server{} } func (m *Server) String() string { return proto.CompactTextString(m) } func (*Server) ProtoMessage() {} func (*Server) Descriptor() ([]byte, []int) { return fileDescriptor_7cd3f6d792743fdf, []int{8} } func (m *Server) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Server.Unmarshal(m, b) } func (m *Server) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Server.Marshal(b, m, deterministic) } func (m *Server) XXX_Merge(src proto.Message) { xxx_messageInfo_Server.Merge(m, src) } func (m *Server) XXX_Size() int { return xxx_messageInfo_Server.Size(m) } func (m *Server) XXX_DiscardUnknown() { xxx_messageInfo_Server.DiscardUnknown(m) } var xxx_messageInfo_Server proto.InternalMessageInfo func (m *Server) GetIpAddress() []byte { if m != nil { return m.IpAddress } return nil } func (m *Server) GetPort() int32 { if m != nil { return m.Port } return 0 } func (m *Server) GetLoadBalanceToken() string { if m != nil { return m.LoadBalanceToken } return "" } func (m *Server) GetDrop() bool { if m != nil { return m.Drop } return false } func init() { proto.RegisterType((*LoadBalanceRequest)(nil), "grpc.lb.v1.LoadBalanceRequest") proto.RegisterType((*InitialLoadBalanceRequest)(nil), "grpc.lb.v1.InitialLoadBalanceRequest") proto.RegisterType((*ClientStatsPerToken)(nil), "grpc.lb.v1.ClientStatsPerToken") proto.RegisterType((*ClientStats)(nil), "grpc.lb.v1.ClientStats") proto.RegisterType((*LoadBalanceResponse)(nil), "grpc.lb.v1.LoadBalanceResponse") proto.RegisterType((*FallbackResponse)(nil), "grpc.lb.v1.FallbackResponse") proto.RegisterType((*InitialLoadBalanceResponse)(nil), "grpc.lb.v1.InitialLoadBalanceResponse") proto.RegisterType((*ServerList)(nil), "grpc.lb.v1.ServerList") proto.RegisterType((*Server)(nil), "grpc.lb.v1.Server") } func init() { proto.RegisterFile("grpc/lb/v1/load_balancer.proto", fileDescriptor_7cd3f6d792743fdf) } var fileDescriptor_7cd3f6d792743fdf = []byte{ // 769 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x55, 0xdd, 0x6e, 0xdb, 0x36, 0x14, 0x8e, 0x62, 0x25, 0x75, 0x8e, 0xb3, 0x45, 0x61, 0xb1, 0xcd, 0x71, 0xd3, 0x24, 0x13, 0xb0, 0x22, 0x18, 0x3a, 0x79, 0xc9, 0x6e, 0x36, 0x60, 0x17, 0x9b, 0x5b, 0x04, 0x69, 0xda, 0x8b, 0x80, 0x0e, 0xd0, 0xa1, 0xc0, 0xc0, 0x51, 0x12, 0xed, 0x10, 0xa1, 0x49, 0x8d, 0xa2, 0x5d, 0xec, 0x66, 0x37, 0x7b, 0x81, 0x3d, 0xca, 0x5e, 0x61, 0x6f, 0x36, 0x88, 0xa4, 0x2c, 0xd5, 0xae, 0xb1, 0x2b, 0x91, 0xe7, 0x7c, 0xfc, 0xce, 0xff, 0x11, 0x9c, 0x4c, 0x75, 0x91, 0x0d, 0x45, 0x3a, 0x5c, 0x5c, 0x0c, 0x85, 0xa2, 0x39, 0x49, 0xa9, 0xa0, 0x32, 0x63, 0x3a, 0x29, 0xb4, 0x32, 0x0a, 0x41, 0xa5, 0x4f, 0x44, 0x9a, 0x2c, 0x2e, 0x06, 0x27, 0x53, 0xa5, 0xa6, 0x82, 0x0d, 0xad, 0x26, 0x9d, 0x4f, 0x86, 0xf9, 0x5c, 0x53, 0xc3, 0x95, 0x74, 0xd8, 0xc1, 0xe9, 0xaa, 0xde, 0xf0, 0x19, 0x2b, 0x0d, 0x9d, 0x15, 0x0e, 0x10, 0xff, 0x1b, 0x00, 0x7a, 0xa3, 0x68, 0x3e, 0x72, 0x36, 0x30, 0xfb, 0x7d, 0xce, 0x4a, 0x83, 0x6e, 0xe1, 0x80, 0x4b, 0x6e, 0x38, 0x15, 0x44, 0x3b, 0x51, 0x3f, 0x38, 0x0b, 0xce, 0x7b, 0x97, 0x5f, 0x25, 0x8d, 0xf5, 0xe4, 0x95, 0x83, 0xac, 0xbf, 0xbf, 0xde, 0xc2, 0x9f, 0xfa, 0xf7, 0x35, 0xe3, 0x8f, 0xb0, 0x9f, 0x09, 0xce, 0xa4, 0x21, 0xa5, 0xa1, 0xa6, 0xec, 0x6f, 0x5b, 0xba, 0x2f, 0xda, 0x74, 0x2f, 0xac, 0x7e, 0x5c, 0xa9, 0xaf, 0xb7, 0x70, 0x2f, 0x6b, 0xae, 0xa3, 0x27, 0x70, 0xd4, 0x4e, 0x45, 0xed, 0x14, 0x31, 0x7f, 0x14, 0x2c, 0x1e, 0xc2, 0xd1, 0x46, 0x4f, 0x10, 0x82, 0x50, 0xd2, 0x19, 0xb3, 0xee, 0xef, 0x61, 0x7b, 0x8e, 0x7f, 0x83, 0xc7, 0x2d, 0x5b, 0xb7, 0x4c, 0xdf, 0xa9, 0x07, 0x26, 0xd1, 0x73, 0x40, 0x1f, 0x18, 0x31, 0x95, 0xd4, 0x3f, 0x8c, 0x44, 0x43, 0xed, 0xd0, 0x4f, 0x60, 0x4f, 0xce, 0x67, 0x24, 0xa3, 0x42, 0xb8, 0x68, 0x3a, 0xb8, 0x2b, 0xe7, 0xb3, 0x17, 0xd5, 0x3d, 0xfe, 0xa7, 0x03, 0xbd, 0x96, 0x09, 0xf4, 0x3d, 0xec, 0x2d, 0x33, 0xef, 0x33, 0x39, 0x48, 0x5c, 0x6d, 0x92, 0xba, 0x36, 0xc9, 0x5d, 0x8d, 0xc0, 0x0d, 0x18, 0x7d, 0x0d, 0x87, 0x4b, 0x33, 0x55, 0xea, 0xb4, 0x61, 0xb9, 0x37, 0x77, 0x50, 0x9b, 0x1b, 0x3b, 0x71, 0x15, 0x40, 0x83, 0x9d, 0x70, 0xc9, 0xcb, 0x7b, 0x96, 0xf7, 0x3b, 0x16, 0x1c, 0xd5, 0xe0, 0x2b, 0x2f, 0x47, 0xbf, 0xc2, 0x37, 0xeb, 0x68, 0xf2, 0x9e, 0x9b, 0x7b, 0xe2, 0x2b, 0x35, 0xa1, 0x5c, 0xb0, 0x9c, 0x18, 0x45, 0x4a, 0x26, 0xf3, 0xfe, 0xae, 0x25, 0x7a, 0xb6, 0x4a, 0xf4, 0x96, 0x9b, 0x7b, 0x17, 0xeb, 0x95, 0xc5, 0xdf, 0xa9, 0x31, 0x93, 0x39, 0xba, 0x86, 0x2f, 0x3f, 0x42, 0xff, 0x20, 0xd5, 0x7b, 0x49, 0x34, 0xcb, 0x18, 0x5f, 0xb0, 0xbc, 0xff, 0xc8, 0x52, 0x3e, 0x5d, 0xa5, 0x7c, 0x5d, 0xa1, 0xb0, 0x07, 0xa1, 0x5f, 0xa0, 0xff, 0x31, 0x27, 0x73, 0xad, 0x8a, 0x7e, 0xf7, 0xac, 0x73, 0xde, 0xbb, 0x3c, 0xdd, 0xd0, 0x46, 0x75, 0x69, 0xf1, 0x67, 0xd9, 0xaa, 0xc7, 0x2f, 0xb5, 0x2a, 0x6e, 0xc2, 0x6e, 0x18, 0xed, 0xdc, 0x84, 0xdd, 0x9d, 0x68, 0x37, 0xfe, 0x7b, 0x1b, 0x1e, 0x7f, 0xd0, 0x3f, 0x65, 0xa1, 0x64, 0xc9, 0xd0, 0x18, 0xa2, 0x66, 0x14, 0x9c, 0xcc, 0x57, 0xf0, 0xd9, 0xff, 0xcd, 0x82, 0x43, 0x5f, 0x6f, 0xe1, 0x83, 0xe5, 0x30, 0x78, 0xd2, 0x1f, 0xa0, 0x57, 0x32, 0xbd, 0x60, 0x9a, 0x08, 0x5e, 0x1a, 0x3f, 0x0c, 0x9f, 0xb7, 0xf9, 0xc6, 0x56, 0xfd, 0x86, 0xdb, 0x61, 0x82, 0x72, 0x79, 0x43, 0xaf, 0xe1, 0x70, 0x42, 0x85, 0x48, 0x69, 0xf6, 0xd0, 0x38, 0xd4, 0xb1, 0x04, 0xc7, 0x6d, 0x82, 0x2b, 0x0f, 0x6a, 0xb9, 0x11, 0x4d, 0x56, 0x64, 0xa3, 0x63, 0x18, 0xac, 0xcc, 0x95, 0x53, 0xb8, 0xc1, 0x42, 0x10, 0xad, 0xb2, 0xc4, 0x7f, 0xc2, 0x60, 0x73, 0xa8, 0xe8, 0x1d, 0x1c, 0xb7, 0xa7, 0x9c, 0x68, 0x56, 0x28, 0x6d, 0x08, 0x97, 0x86, 0xe9, 0x05, 0x15, 0x3e, 0xd0, 0xa3, 0xb5, 0xd6, 0x7f, 0xe9, 0xd7, 0x16, 0x3e, 0x6a, 0x4d, 0x3d, 0xb6, 0x8f, 0x5f, 0xf9, 0xb7, 0x37, 0x61, 0x37, 0x88, 0xb6, 0xe3, 0x9f, 0x00, 0x9a, 0xd4, 0xa0, 0xe7, 0xf0, 0xc8, 0xa5, 0xa6, 0xec, 0x07, 0xb6, 0x13, 0xd0, 0x7a, 0x0e, 0x71, 0x0d, 0xb9, 0x09, 0xbb, 0x9d, 0x28, 0x8c, 0xff, 0x0a, 0x60, 0xd7, 0x69, 0xd0, 0x53, 0x00, 0x5e, 0x10, 0x9a, 0xe7, 0x9a, 0x95, 0xa5, 0xad, 0xea, 0x3e, 0xde, 0xe3, 0xc5, 0xcf, 0x4e, 0x50, 0xed, 0x8e, 0xca, 0x03, 0xeb, 0xf5, 0x0e, 0xb6, 0xe7, 0x0d, 0x4b, 0xa2, 0xb3, 0x61, 0x49, 0x20, 0x08, 0x6d, 0x9b, 0x86, 0x67, 0xc1, 0x79, 0x17, 0xdb, 0xb3, 0x6b, 0xb7, 0xcb, 0x14, 0xf6, 0x5b, 0x09, 0xd4, 0x08, 0x43, 0xcf, 0x9f, 0x2b, 0x31, 0x3a, 0x69, 0xc7, 0xb1, 0xbe, 0xd6, 0x06, 0xa7, 0x1b, 0xf5, 0xae, 0x12, 0xe7, 0xc1, 0xb7, 0xc1, 0xe8, 0x2d, 0x7c, 0xc2, 0x55, 0x0b, 0x38, 0x3a, 0x6c, 0x9b, 0xbc, 0xad, 0x92, 0x7f, 0x1b, 0xbc, 0xbb, 0xf0, 0xc5, 0x98, 0x2a, 0x41, 0xe5, 0x34, 0x51, 0x7a, 0x3a, 0xb4, 0x7f, 0xa0, 0xfa, 0xb7, 0x63, 0x6f, 0x22, 0xb5, 0x1f, 0x22, 0x52, 0xb2, 0xb8, 0x48, 0x77, 0x6d, 0xe1, 0xbe, 0xfb, 0x2f, 0x00, 0x00, 0xff, 0xff, 0x3f, 0x47, 0x55, 0xac, 0xab, 0x06, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. var _ context.Context var _ grpc.ClientConnInterface // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. const _ = grpc.SupportPackageIsVersion6 // LoadBalancerClient is the client API for LoadBalancer service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type LoadBalancerClient interface { // Bidirectional rpc to get a list of servers. BalanceLoad(ctx context.Context, opts ...grpc.CallOption) (LoadBalancer_BalanceLoadClient, error) } type loadBalancerClient struct { cc grpc.ClientConnInterface } func NewLoadBalancerClient(cc grpc.ClientConnInterface) LoadBalancerClient { return &loadBalancerClient{cc} } func (c *loadBalancerClient) BalanceLoad(ctx context.Context, opts ...grpc.CallOption) (LoadBalancer_BalanceLoadClient, error) { stream, err := c.cc.NewStream(ctx, &_LoadBalancer_serviceDesc.Streams[0], "/grpc.lb.v1.LoadBalancer/BalanceLoad", opts...) if err != nil { return nil, err } x := &loadBalancerBalanceLoadClient{stream} return x, nil } type LoadBalancer_BalanceLoadClient interface { Send(*LoadBalanceRequest) error Recv() (*LoadBalanceResponse, error) grpc.ClientStream } type loadBalancerBalanceLoadClient struct { grpc.ClientStream } func (x *loadBalancerBalanceLoadClient) Send(m *LoadBalanceRequest) error { return x.ClientStream.SendMsg(m) } func (x *loadBalancerBalanceLoadClient) Recv() (*LoadBalanceResponse, error) { m := new(LoadBalanceResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } // LoadBalancerServer is the server API for LoadBalancer service. type LoadBalancerServer interface { // Bidirectional rpc to get a list of servers. BalanceLoad(LoadBalancer_BalanceLoadServer) error } // UnimplementedLoadBalancerServer can be embedded to have forward compatible implementations. type UnimplementedLoadBalancerServer struct { } func (*UnimplementedLoadBalancerServer) BalanceLoad(srv LoadBalancer_BalanceLoadServer) error { return status.Errorf(codes.Unimplemented, "method BalanceLoad not implemented") } func RegisterLoadBalancerServer(s *grpc.Server, srv LoadBalancerServer) { s.RegisterService(&_LoadBalancer_serviceDesc, srv) } func _LoadBalancer_BalanceLoad_Handler(srv interface{}, stream grpc.ServerStream) error { return srv.(LoadBalancerServer).BalanceLoad(&loadBalancerBalanceLoadServer{stream}) } type LoadBalancer_BalanceLoadServer interface { Send(*LoadBalanceResponse) error Recv() (*LoadBalanceRequest, error) grpc.ServerStream } type loadBalancerBalanceLoadServer struct { grpc.ServerStream } func (x *loadBalancerBalanceLoadServer) Send(m *LoadBalanceResponse) error { return x.ServerStream.SendMsg(m) } func (x *loadBalancerBalanceLoadServer) Recv() (*LoadBalanceRequest, error) { m := new(LoadBalanceRequest) if err := x.ServerStream.RecvMsg(m); err != nil { return nil, err } return m, nil } var _LoadBalancer_serviceDesc = grpc.ServiceDesc{ ServiceName: "grpc.lb.v1.LoadBalancer", HandlerType: (*LoadBalancerServer)(nil), Methods: []grpc.MethodDesc{}, Streams: []grpc.StreamDesc{ { StreamName: "BalanceLoad", Handler: _LoadBalancer_BalanceLoad_Handler, ServerStreams: true, ClientStreams: true, }, }, Metadata: "grpc/lb/v1/load_balancer.proto", } grpc-go-1.29.1/balancer/grpclb/grpclb.go000066400000000000000000000362031365033716300177750ustar00rootroot00000000000000/* * * Copyright 2016 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ //go:generate ./regenerate.sh // Package grpclb defines a grpclb balancer. // // To install grpclb balancer, import this package as: // import _ "google.golang.org/grpc/balancer/grpclb" package grpclb import ( "context" "errors" "sync" "time" durationpb "github.com/golang/protobuf/ptypes/duration" "google.golang.org/grpc" "google.golang.org/grpc/balancer" lbpb "google.golang.org/grpc/balancer/grpclb/grpc_lb_v1" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/backoff" "google.golang.org/grpc/internal/resolver/dns" "google.golang.org/grpc/resolver" ) const ( lbTokenKey = "lb-token" defaultFallbackTimeout = 10 * time.Second grpclbName = "grpclb" ) var errServerTerminatedConnection = errors.New("grpclb: failed to recv server list: server terminated connection") func convertDuration(d *durationpb.Duration) time.Duration { if d == nil { return 0 } return time.Duration(d.Seconds)*time.Second + time.Duration(d.Nanos)*time.Nanosecond } // Client API for LoadBalancer service. // Mostly copied from generated pb.go file. // To avoid circular dependency. type loadBalancerClient struct { cc *grpc.ClientConn } func (c *loadBalancerClient) BalanceLoad(ctx context.Context, opts ...grpc.CallOption) (*balanceLoadClientStream, error) { desc := &grpc.StreamDesc{ StreamName: "BalanceLoad", ServerStreams: true, ClientStreams: true, } stream, err := c.cc.NewStream(ctx, desc, "/grpc.lb.v1.LoadBalancer/BalanceLoad", opts...) if err != nil { return nil, err } x := &balanceLoadClientStream{stream} return x, nil } type balanceLoadClientStream struct { grpc.ClientStream } func (x *balanceLoadClientStream) Send(m *lbpb.LoadBalanceRequest) error { return x.ClientStream.SendMsg(m) } func (x *balanceLoadClientStream) Recv() (*lbpb.LoadBalanceResponse, error) { m := new(lbpb.LoadBalanceResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func init() { balancer.Register(newLBBuilder()) dns.EnableSRVLookups = true } // newLBBuilder creates a builder for grpclb. func newLBBuilder() balancer.Builder { return newLBBuilderWithFallbackTimeout(defaultFallbackTimeout) } // newLBBuilderWithFallbackTimeout creates a grpclb builder with the given // fallbackTimeout. If no response is received from the remote balancer within // fallbackTimeout, the backend addresses from the resolved address list will be // used. // // Only call this function when a non-default fallback timeout is needed. func newLBBuilderWithFallbackTimeout(fallbackTimeout time.Duration) balancer.Builder { return &lbBuilder{ fallbackTimeout: fallbackTimeout, } } type lbBuilder struct { fallbackTimeout time.Duration } func (b *lbBuilder) Name() string { return grpclbName } func (b *lbBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer { // This generates a manual resolver builder with a fixed scheme. This // scheme will be used to dial to remote LB, so we can send filtered // address updates to remote LB ClientConn using this manual resolver. r := &lbManualResolver{scheme: "grpclb-internal", ccb: cc} lb := &lbBalancer{ cc: newLBCacheClientConn(cc), target: opt.Target.Endpoint, opt: opt, fallbackTimeout: b.fallbackTimeout, doneCh: make(chan struct{}), manualResolver: r, subConns: make(map[resolver.Address]balancer.SubConn), scStates: make(map[balancer.SubConn]connectivity.State), picker: &errPicker{err: balancer.ErrNoSubConnAvailable}, clientStats: newRPCStats(), backoff: backoff.DefaultExponential, // TODO: make backoff configurable. } var err error if opt.CredsBundle != nil { lb.grpclbClientConnCreds, err = opt.CredsBundle.NewWithMode(internal.CredsBundleModeBalancer) if err != nil { grpclog.Warningf("lbBalancer: client connection creds NewWithMode failed: %v", err) } lb.grpclbBackendCreds, err = opt.CredsBundle.NewWithMode(internal.CredsBundleModeBackendFromBalancer) if err != nil { grpclog.Warningf("lbBalancer: backend creds NewWithMode failed: %v", err) } } return lb } var _ balancer.V2Balancer = (*lbBalancer)(nil) // Assert that we implement V2Balancer type lbBalancer struct { cc *lbCacheClientConn target string opt balancer.BuildOptions usePickFirst bool // grpclbClientConnCreds is the creds bundle to be used to connect to grpclb // servers. If it's nil, use the TransportCredentials from BuildOptions // instead. grpclbClientConnCreds credentials.Bundle // grpclbBackendCreds is the creds bundle to be used for addresses that are // returned by grpclb server. If it's nil, don't set anything when creating // SubConns. grpclbBackendCreds credentials.Bundle fallbackTimeout time.Duration doneCh chan struct{} // manualResolver is used in the remote LB ClientConn inside grpclb. When // resolved address updates are received by grpclb, filtered updates will be // send to remote LB ClientConn through this resolver. manualResolver *lbManualResolver // The ClientConn to talk to the remote balancer. ccRemoteLB *remoteBalancerCCWrapper // backoff for calling remote balancer. backoff backoff.Strategy // Support client side load reporting. Each picker gets a reference to this, // and will update its content. clientStats *rpcStats mu sync.Mutex // guards everything following. // The full server list including drops, used to check if the newly received // serverList contains anything new. Each generate picker will also have // reference to this list to do the first layer pick. fullServerList []*lbpb.Server // Backend addresses. It's kept so the addresses are available when // switching between round_robin and pickfirst. backendAddrs []resolver.Address // All backends addresses, with metadata set to nil. This list contains all // backend addresses in the same order and with the same duplicates as in // serverlist. When generating picker, a SubConn slice with the same order // but with only READY SCs will be gerenated. backendAddrsWithoutMetadata []resolver.Address // Roundrobin functionalities. state connectivity.State subConns map[resolver.Address]balancer.SubConn // Used to new/remove SubConn. scStates map[balancer.SubConn]connectivity.State // Used to filter READY SubConns. picker balancer.V2Picker // Support fallback to resolved backend addresses if there's no response // from remote balancer within fallbackTimeout. remoteBalancerConnected bool serverListReceived bool inFallback bool // resolvedBackendAddrs is resolvedAddrs minus remote balancers. It's set // when resolved address updates are received, and read in the goroutine // handling fallback. resolvedBackendAddrs []resolver.Address } // regeneratePicker takes a snapshot of the balancer, and generates a picker from // it. The picker // - always returns ErrTransientFailure if the balancer is in TransientFailure, // - does two layer roundrobin pick otherwise. // Caller must hold lb.mu. func (lb *lbBalancer) regeneratePicker(resetDrop bool) { if lb.state == connectivity.TransientFailure { lb.picker = &errPicker{err: balancer.ErrTransientFailure} return } if lb.state == connectivity.Connecting { lb.picker = &errPicker{err: balancer.ErrNoSubConnAvailable} return } var readySCs []balancer.SubConn if lb.usePickFirst { for _, sc := range lb.subConns { readySCs = append(readySCs, sc) break } } else { for _, a := range lb.backendAddrsWithoutMetadata { if sc, ok := lb.subConns[a]; ok { if st, ok := lb.scStates[sc]; ok && st == connectivity.Ready { readySCs = append(readySCs, sc) } } } } if len(readySCs) <= 0 { // If there's no ready SubConns, always re-pick. This is to avoid drops // unless at least one SubConn is ready. Otherwise we may drop more // often than want because of drops + re-picks(which become re-drops). // // This doesn't seem to be necessary after the connecting check above. // Kept for safety. lb.picker = &errPicker{err: balancer.ErrNoSubConnAvailable} return } if lb.inFallback { lb.picker = newRRPicker(readySCs) return } if resetDrop { lb.picker = newLBPicker(lb.fullServerList, readySCs, lb.clientStats) return } prevLBPicker, ok := lb.picker.(*lbPicker) if !ok { lb.picker = newLBPicker(lb.fullServerList, readySCs, lb.clientStats) return } prevLBPicker.updateReadySCs(readySCs) } // aggregateSubConnStats calculate the aggregated state of SubConns in // lb.SubConns. These SubConns are subconns in use (when switching between // fallback and grpclb). lb.scState contains states for all SubConns, including // those in cache (SubConns are cached for 10 seconds after remove). // // The aggregated state is: // - If at least one SubConn in Ready, the aggregated state is Ready; // - Else if at least one SubConn in Connecting, the aggregated state is Connecting; // - Else the aggregated state is TransientFailure. func (lb *lbBalancer) aggregateSubConnStates() connectivity.State { var numConnecting uint64 for _, sc := range lb.subConns { if state, ok := lb.scStates[sc]; ok { switch state { case connectivity.Ready: return connectivity.Ready case connectivity.Connecting: numConnecting++ } } } if numConnecting > 0 { return connectivity.Connecting } return connectivity.TransientFailure } func (lb *lbBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) { panic("not used") } func (lb *lbBalancer) UpdateSubConnState(sc balancer.SubConn, scs balancer.SubConnState) { s := scs.ConnectivityState if grpclog.V(2) { grpclog.Infof("lbBalancer: handle SubConn state change: %p, %v", sc, s) } lb.mu.Lock() defer lb.mu.Unlock() oldS, ok := lb.scStates[sc] if !ok { if grpclog.V(2) { grpclog.Infof("lbBalancer: got state changes for an unknown SubConn: %p, %v", sc, s) } return } lb.scStates[sc] = s switch s { case connectivity.Idle: sc.Connect() case connectivity.Shutdown: // When an address was removed by resolver, b called RemoveSubConn but // kept the sc's state in scStates. Remove state for this sc here. delete(lb.scStates, sc) } // Force regenerate picker if // - this sc became ready from not-ready // - this sc became not-ready from ready lb.updateStateAndPicker((oldS == connectivity.Ready) != (s == connectivity.Ready), false) // Enter fallback when the aggregated state is not Ready and the connection // to remote balancer is lost. if lb.state != connectivity.Ready { if !lb.inFallback && !lb.remoteBalancerConnected { // Enter fallback. lb.refreshSubConns(lb.resolvedBackendAddrs, true, lb.usePickFirst) } } } // updateStateAndPicker re-calculate the aggregated state, and regenerate picker // if overall state is changed. // // If forceRegeneratePicker is true, picker will be regenerated. func (lb *lbBalancer) updateStateAndPicker(forceRegeneratePicker bool, resetDrop bool) { oldAggrState := lb.state lb.state = lb.aggregateSubConnStates() // Regenerate picker when one of the following happens: // - caller wants to regenerate // - the aggregated state changed if forceRegeneratePicker || (lb.state != oldAggrState) { lb.regeneratePicker(resetDrop) } lb.cc.UpdateState(balancer.State{ConnectivityState: lb.state, Picker: lb.picker}) } // fallbackToBackendsAfter blocks for fallbackTimeout and falls back to use // resolved backends (backends received from resolver, not from remote balancer) // if no connection to remote balancers was successful. func (lb *lbBalancer) fallbackToBackendsAfter(fallbackTimeout time.Duration) { timer := time.NewTimer(fallbackTimeout) defer timer.Stop() select { case <-timer.C: case <-lb.doneCh: return } lb.mu.Lock() if lb.inFallback || lb.serverListReceived { lb.mu.Unlock() return } // Enter fallback. lb.refreshSubConns(lb.resolvedBackendAddrs, true, lb.usePickFirst) lb.mu.Unlock() } // HandleResolvedAddrs sends the updated remoteLB addresses to remoteLB // clientConn. The remoteLB clientConn will handle creating/removing remoteLB // connections. func (lb *lbBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) { panic("not used") } func (lb *lbBalancer) handleServiceConfig(gc *grpclbServiceConfig) { lb.mu.Lock() defer lb.mu.Unlock() newUsePickFirst := childIsPickFirst(gc) if lb.usePickFirst == newUsePickFirst { return } if grpclog.V(2) { grpclog.Infof("lbBalancer: switching mode, new usePickFirst: %+v", newUsePickFirst) } lb.refreshSubConns(lb.backendAddrs, lb.inFallback, newUsePickFirst) } func (lb *lbBalancer) ResolverError(error) { // Ignore resolver errors. GRPCLB is not selected unless the resolver // works at least once. } func (lb *lbBalancer) UpdateClientConnState(ccs balancer.ClientConnState) error { if grpclog.V(2) { grpclog.Infof("lbBalancer: UpdateClientConnState: %+v", ccs) } gc, _ := ccs.BalancerConfig.(*grpclbServiceConfig) lb.handleServiceConfig(gc) addrs := ccs.ResolverState.Addresses if len(addrs) == 0 { // There should be at least one address, either grpclb server or // fallback. Empty address is not valid. return balancer.ErrBadResolverState } var remoteBalancerAddrs, backendAddrs []resolver.Address for _, a := range addrs { if a.Type == resolver.GRPCLB { a.Type = resolver.Backend remoteBalancerAddrs = append(remoteBalancerAddrs, a) } else { backendAddrs = append(backendAddrs, a) } } if len(remoteBalancerAddrs) == 0 { if lb.ccRemoteLB != nil { lb.ccRemoteLB.close() lb.ccRemoteLB = nil } } else if lb.ccRemoteLB == nil { // First time receiving resolved addresses, create a cc to remote // balancers. lb.newRemoteBalancerCCWrapper() // Start the fallback goroutine. go lb.fallbackToBackendsAfter(lb.fallbackTimeout) } if lb.ccRemoteLB != nil { // cc to remote balancers uses lb.manualResolver. Send the updated remote // balancer addresses to it through manualResolver. lb.manualResolver.UpdateState(resolver.State{Addresses: remoteBalancerAddrs}) } lb.mu.Lock() lb.resolvedBackendAddrs = backendAddrs if len(remoteBalancerAddrs) == 0 || lb.inFallback { // If there's no remote balancer address in ClientConn update, grpclb // enters fallback mode immediately. // // If a new update is received while grpclb is in fallback, update the // list of backends being used to the new fallback backends. lb.refreshSubConns(lb.resolvedBackendAddrs, true, lb.usePickFirst) } lb.mu.Unlock() return nil } func (lb *lbBalancer) Close() { select { case <-lb.doneCh: return default: } close(lb.doneCh) if lb.ccRemoteLB != nil { lb.ccRemoteLB.close() } lb.cc.close() } grpc-go-1.29.1/balancer/grpclb/grpclb_config.go000066400000000000000000000031711365033716300213200ustar00rootroot00000000000000/* * * Copyright 2019 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package grpclb import ( "encoding/json" "google.golang.org/grpc" "google.golang.org/grpc/balancer/roundrobin" "google.golang.org/grpc/serviceconfig" ) const ( roundRobinName = roundrobin.Name pickFirstName = grpc.PickFirstBalancerName ) type grpclbServiceConfig struct { serviceconfig.LoadBalancingConfig ChildPolicy *[]map[string]json.RawMessage } func (b *lbBuilder) ParseConfig(lbConfig json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { ret := &grpclbServiceConfig{} if err := json.Unmarshal(lbConfig, ret); err != nil { return nil, err } return ret, nil } func childIsPickFirst(sc *grpclbServiceConfig) bool { if sc == nil { return false } childConfigs := sc.ChildPolicy if childConfigs == nil { return false } for _, childC := range *childConfigs { // If round_robin exists before pick_first, return false if _, ok := childC[roundRobinName]; ok { return false } // If pick_first is before round_robin, return true if _, ok := childC[pickFirstName]; ok { return true } } return false } grpc-go-1.29.1/balancer/grpclb/grpclb_config_test.go000066400000000000000000000052011365033716300223530ustar00rootroot00000000000000/* * * Copyright 2019 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package grpclb import ( "encoding/json" "errors" "fmt" "reflect" "strings" "testing" "google.golang.org/grpc/serviceconfig" ) func (s) TestParse(t *testing.T) { tests := []struct { name string s string want serviceconfig.LoadBalancingConfig wantErr error }{ { name: "empty", s: "", want: nil, wantErr: errors.New("unexpected end of JSON input"), }, { name: "success1", s: `{"childPolicy":[{"pick_first":{}}]}`, want: &grpclbServiceConfig{ ChildPolicy: &[]map[string]json.RawMessage{ {"pick_first": json.RawMessage("{}")}, }, }, }, { name: "success2", s: `{"childPolicy":[{"round_robin":{}},{"pick_first":{}}]}`, want: &grpclbServiceConfig{ ChildPolicy: &[]map[string]json.RawMessage{ {"round_robin": json.RawMessage("{}")}, {"pick_first": json.RawMessage("{}")}, }, }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { if got, err := (&lbBuilder{}).ParseConfig(json.RawMessage(tt.s)); !reflect.DeepEqual(got, tt.want) || !strings.Contains(fmt.Sprint(err), fmt.Sprint(tt.wantErr)) { t.Errorf("parseFullServiceConfig() = %+v, %+v, want %+v, ", got, err, tt.want, tt.wantErr) } }) } } func (s) TestChildIsPickFirst(t *testing.T) { tests := []struct { name string s string want bool }{ { name: "pickfirst_only", s: `{"childPolicy":[{"pick_first":{}}]}`, want: true, }, { name: "pickfirst_before_rr", s: `{"childPolicy":[{"pick_first":{}},{"round_robin":{}}]}`, want: true, }, { name: "rr_before_pickfirst", s: `{"childPolicy":[{"round_robin":{}},{"pick_first":{}}]}`, want: false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { gc, err := (&lbBuilder{}).ParseConfig(json.RawMessage(tt.s)) if err != nil { t.Fatalf("Parse(%v) = _, %v; want _, nil", tt.s, err) } if got := childIsPickFirst(gc.(*grpclbServiceConfig)); got != tt.want { t.Errorf("childIsPickFirst() = %v, want %v", got, tt.want) } }) } } grpc-go-1.29.1/balancer/grpclb/grpclb_picker.go000066400000000000000000000136051365033716300213330ustar00rootroot00000000000000/* * * Copyright 2017 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package grpclb import ( "sync" "sync/atomic" "google.golang.org/grpc/balancer" lbpb "google.golang.org/grpc/balancer/grpclb/grpc_lb_v1" "google.golang.org/grpc/codes" "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/status" ) // rpcStats is same as lbpb.ClientStats, except that numCallsDropped is a map // instead of a slice. type rpcStats struct { // Only access the following fields atomically. numCallsStarted int64 numCallsFinished int64 numCallsFinishedWithClientFailedToSend int64 numCallsFinishedKnownReceived int64 mu sync.Mutex // map load_balance_token -> num_calls_dropped numCallsDropped map[string]int64 } func newRPCStats() *rpcStats { return &rpcStats{ numCallsDropped: make(map[string]int64), } } func isZeroStats(stats *lbpb.ClientStats) bool { return len(stats.CallsFinishedWithDrop) == 0 && stats.NumCallsStarted == 0 && stats.NumCallsFinished == 0 && stats.NumCallsFinishedWithClientFailedToSend == 0 && stats.NumCallsFinishedKnownReceived == 0 } // toClientStats converts rpcStats to lbpb.ClientStats, and clears rpcStats. func (s *rpcStats) toClientStats() *lbpb.ClientStats { stats := &lbpb.ClientStats{ NumCallsStarted: atomic.SwapInt64(&s.numCallsStarted, 0), NumCallsFinished: atomic.SwapInt64(&s.numCallsFinished, 0), NumCallsFinishedWithClientFailedToSend: atomic.SwapInt64(&s.numCallsFinishedWithClientFailedToSend, 0), NumCallsFinishedKnownReceived: atomic.SwapInt64(&s.numCallsFinishedKnownReceived, 0), } s.mu.Lock() dropped := s.numCallsDropped s.numCallsDropped = make(map[string]int64) s.mu.Unlock() for token, count := range dropped { stats.CallsFinishedWithDrop = append(stats.CallsFinishedWithDrop, &lbpb.ClientStatsPerToken{ LoadBalanceToken: token, NumCalls: count, }) } return stats } func (s *rpcStats) drop(token string) { atomic.AddInt64(&s.numCallsStarted, 1) s.mu.Lock() s.numCallsDropped[token]++ s.mu.Unlock() atomic.AddInt64(&s.numCallsFinished, 1) } func (s *rpcStats) failedToSend() { atomic.AddInt64(&s.numCallsStarted, 1) atomic.AddInt64(&s.numCallsFinishedWithClientFailedToSend, 1) atomic.AddInt64(&s.numCallsFinished, 1) } func (s *rpcStats) knownReceived() { atomic.AddInt64(&s.numCallsStarted, 1) atomic.AddInt64(&s.numCallsFinishedKnownReceived, 1) atomic.AddInt64(&s.numCallsFinished, 1) } type errPicker struct { // Pick always returns this err. err error } func (p *errPicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { return balancer.PickResult{}, p.err } // rrPicker does roundrobin on subConns. It's typically used when there's no // response from remote balancer, and grpclb falls back to the resolved // backends. // // It guaranteed that len(subConns) > 0. type rrPicker struct { mu sync.Mutex subConns []balancer.SubConn // The subConns that were READY when taking the snapshot. subConnsNext int } func newRRPicker(readySCs []balancer.SubConn) *rrPicker { return &rrPicker{ subConns: readySCs, subConnsNext: grpcrand.Intn(len(readySCs)), } } func (p *rrPicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { p.mu.Lock() defer p.mu.Unlock() sc := p.subConns[p.subConnsNext] p.subConnsNext = (p.subConnsNext + 1) % len(p.subConns) return balancer.PickResult{SubConn: sc}, nil } // lbPicker does two layers of picks: // // First layer: roundrobin on all servers in serverList, including drops and backends. // - If it picks a drop, the RPC will fail as being dropped. // - If it picks a backend, do a second layer pick to pick the real backend. // // Second layer: roundrobin on all READY backends. // // It's guaranteed that len(serverList) > 0. type lbPicker struct { mu sync.Mutex serverList []*lbpb.Server serverListNext int subConns []balancer.SubConn // The subConns that were READY when taking the snapshot. subConnsNext int stats *rpcStats } func newLBPicker(serverList []*lbpb.Server, readySCs []balancer.SubConn, stats *rpcStats) *lbPicker { return &lbPicker{ serverList: serverList, subConns: readySCs, subConnsNext: grpcrand.Intn(len(readySCs)), stats: stats, } } func (p *lbPicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { p.mu.Lock() defer p.mu.Unlock() // Layer one roundrobin on serverList. s := p.serverList[p.serverListNext] p.serverListNext = (p.serverListNext + 1) % len(p.serverList) // If it's a drop, return an error and fail the RPC. if s.Drop { p.stats.drop(s.LoadBalanceToken) return balancer.PickResult{}, status.Errorf(codes.Unavailable, "request dropped by grpclb") } // If not a drop but there's no ready subConns. if len(p.subConns) <= 0 { return balancer.PickResult{}, balancer.ErrNoSubConnAvailable } // Return the next ready subConn in the list, also collect rpc stats. sc := p.subConns[p.subConnsNext] p.subConnsNext = (p.subConnsNext + 1) % len(p.subConns) done := func(info balancer.DoneInfo) { if !info.BytesSent { p.stats.failedToSend() } else if info.BytesReceived { p.stats.knownReceived() } } return balancer.PickResult{SubConn: sc, Done: done}, nil } func (p *lbPicker) updateReadySCs(readySCs []balancer.SubConn) { p.mu.Lock() defer p.mu.Unlock() p.subConns = readySCs p.subConnsNext = p.subConnsNext % len(readySCs) } grpc-go-1.29.1/balancer/grpclb/grpclb_remote_balancer.go000066400000000000000000000271621365033716300232030ustar00rootroot00000000000000/* * * Copyright 2017 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package grpclb import ( "context" "fmt" "io" "net" "sync" "time" "github.com/golang/protobuf/proto" timestamppb "github.com/golang/protobuf/ptypes/timestamp" "github.com/google/go-cmp/cmp" "google.golang.org/grpc" "google.golang.org/grpc/balancer" lbpb "google.golang.org/grpc/balancer/grpclb/grpc_lb_v1" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal/backoff" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" "google.golang.org/grpc/resolver" ) // processServerList updates balancer's internal state, create/remove SubConns // and regenerates picker using the received serverList. func (lb *lbBalancer) processServerList(l *lbpb.ServerList) { if grpclog.V(2) { grpclog.Infof("lbBalancer: processing server list: %+v", l) } lb.mu.Lock() defer lb.mu.Unlock() // Set serverListReceived to true so fallback will not take effect if it has // not hit timeout. lb.serverListReceived = true // If the new server list == old server list, do nothing. if cmp.Equal(lb.fullServerList, l.Servers, cmp.Comparer(proto.Equal)) { if grpclog.V(2) { grpclog.Infof("lbBalancer: new serverlist same as the previous one, ignoring") } return } lb.fullServerList = l.Servers var backendAddrs []resolver.Address for i, s := range l.Servers { if s.Drop { continue } md := metadata.Pairs(lbTokenKey, s.LoadBalanceToken) ip := net.IP(s.IpAddress) ipStr := ip.String() if ip.To4() == nil { // Add square brackets to ipv6 addresses, otherwise net.Dial() and // net.SplitHostPort() will return too many colons error. ipStr = fmt.Sprintf("[%s]", ipStr) } addr := resolver.Address{ Addr: fmt.Sprintf("%s:%d", ipStr, s.Port), Metadata: &md, } if grpclog.V(2) { grpclog.Infof("lbBalancer: server list entry[%d]: ipStr:|%s|, port:|%d|, load balancer token:|%v|", i, ipStr, s.Port, s.LoadBalanceToken) } backendAddrs = append(backendAddrs, addr) } // Call refreshSubConns to create/remove SubConns. If we are in fallback, // this is also exiting fallback. lb.refreshSubConns(backendAddrs, false, lb.usePickFirst) } // refreshSubConns creates/removes SubConns with backendAddrs, and refreshes // balancer state and picker. // // Caller must hold lb.mu. func (lb *lbBalancer) refreshSubConns(backendAddrs []resolver.Address, fallback bool, pickFirst bool) { opts := balancer.NewSubConnOptions{} if !fallback { opts.CredsBundle = lb.grpclbBackendCreds } lb.backendAddrs = backendAddrs lb.backendAddrsWithoutMetadata = nil fallbackModeChanged := lb.inFallback != fallback lb.inFallback = fallback if fallbackModeChanged && lb.inFallback { // Clear previous received list when entering fallback, so if the server // comes back and sends the same list again, the new addresses will be // used. lb.fullServerList = nil } balancingPolicyChanged := lb.usePickFirst != pickFirst oldUsePickFirst := lb.usePickFirst lb.usePickFirst = pickFirst if fallbackModeChanged || balancingPolicyChanged { // Remove all SubConns when switching balancing policy or switching // fallback mode. // // For fallback mode switching with pickfirst, we want to recreate the // SubConn because the creds could be different. for a, sc := range lb.subConns { if oldUsePickFirst { // If old SubConn were created for pickfirst, bypass cache and // remove directly. lb.cc.cc.RemoveSubConn(sc) } else { lb.cc.RemoveSubConn(sc) } delete(lb.subConns, a) } } if lb.usePickFirst { var sc balancer.SubConn for _, sc = range lb.subConns { break } if sc != nil { sc.UpdateAddresses(backendAddrs) sc.Connect() return } // This bypasses the cc wrapper with SubConn cache. sc, err := lb.cc.cc.NewSubConn(backendAddrs, opts) if err != nil { grpclog.Warningf("grpclb: failed to create new SubConn: %v", err) return } sc.Connect() lb.subConns[backendAddrs[0]] = sc lb.scStates[sc] = connectivity.Idle return } // addrsSet is the set converted from backendAddrsWithoutMetadata, it's used to quick // lookup for an address. addrsSet := make(map[resolver.Address]struct{}) // Create new SubConns. for _, addr := range backendAddrs { addrWithoutMD := addr addrWithoutMD.Metadata = nil addrsSet[addrWithoutMD] = struct{}{} lb.backendAddrsWithoutMetadata = append(lb.backendAddrsWithoutMetadata, addrWithoutMD) if _, ok := lb.subConns[addrWithoutMD]; !ok { // Use addrWithMD to create the SubConn. sc, err := lb.cc.NewSubConn([]resolver.Address{addr}, opts) if err != nil { grpclog.Warningf("grpclb: failed to create new SubConn: %v", err) continue } lb.subConns[addrWithoutMD] = sc // Use the addr without MD as key for the map. if _, ok := lb.scStates[sc]; !ok { // Only set state of new sc to IDLE. The state could already be // READY for cached SubConns. lb.scStates[sc] = connectivity.Idle } sc.Connect() } } for a, sc := range lb.subConns { // a was removed by resolver. if _, ok := addrsSet[a]; !ok { lb.cc.RemoveSubConn(sc) delete(lb.subConns, a) // Keep the state of this sc in b.scStates until sc's state becomes Shutdown. // The entry will be deleted in HandleSubConnStateChange. } } // Regenerate and update picker after refreshing subconns because with // cache, even if SubConn was newed/removed, there might be no state // changes (the subconn will be kept in cache, not actually // newed/removed). lb.updateStateAndPicker(true, true) } type remoteBalancerCCWrapper struct { cc *grpc.ClientConn lb *lbBalancer backoff backoff.Strategy done chan struct{} // waitgroup to wait for all goroutines to exit. wg sync.WaitGroup } func (lb *lbBalancer) newRemoteBalancerCCWrapper() { var dopts []grpc.DialOption if creds := lb.opt.DialCreds; creds != nil { dopts = append(dopts, grpc.WithTransportCredentials(creds)) } else if bundle := lb.grpclbClientConnCreds; bundle != nil { dopts = append(dopts, grpc.WithCredentialsBundle(bundle)) } else { dopts = append(dopts, grpc.WithInsecure()) } if lb.opt.Dialer != nil { dopts = append(dopts, grpc.WithContextDialer(lb.opt.Dialer)) } // Explicitly set pickfirst as the balancer. dopts = append(dopts, grpc.WithDefaultServiceConfig(`{"loadBalancingPolicy":"pick_first"}`)) dopts = append(dopts, grpc.WithResolvers(lb.manualResolver)) if channelz.IsOn() { dopts = append(dopts, grpc.WithChannelzParentID(lb.opt.ChannelzParentID)) } // Enable Keepalive for grpclb client. dopts = append(dopts, grpc.WithKeepaliveParams(keepalive.ClientParameters{ Time: 20 * time.Second, Timeout: 10 * time.Second, PermitWithoutStream: true, })) // The dial target is not important. // // The grpclb server addresses will set field ServerName, and creds will // receive ServerName as authority. cc, err := grpc.DialContext(context.Background(), lb.manualResolver.Scheme()+":///grpclb.subClientConn", dopts...) if err != nil { grpclog.Fatalf("failed to dial: %v", err) } ccw := &remoteBalancerCCWrapper{ cc: cc, lb: lb, backoff: lb.backoff, done: make(chan struct{}), } lb.ccRemoteLB = ccw ccw.wg.Add(1) go ccw.watchRemoteBalancer() } // close closed the ClientConn to remote balancer, and waits until all // goroutines to finish. func (ccw *remoteBalancerCCWrapper) close() { close(ccw.done) ccw.cc.Close() ccw.wg.Wait() } func (ccw *remoteBalancerCCWrapper) readServerList(s *balanceLoadClientStream) error { for { reply, err := s.Recv() if err != nil { if err == io.EOF { return errServerTerminatedConnection } return fmt.Errorf("grpclb: failed to recv server list: %v", err) } if serverList := reply.GetServerList(); serverList != nil { ccw.lb.processServerList(serverList) } if reply.GetFallbackResponse() != nil { // Eagerly enter fallback ccw.lb.mu.Lock() ccw.lb.refreshSubConns(ccw.lb.resolvedBackendAddrs, true, ccw.lb.usePickFirst) ccw.lb.mu.Unlock() } } } func (ccw *remoteBalancerCCWrapper) sendLoadReport(s *balanceLoadClientStream, interval time.Duration) { ticker := time.NewTicker(interval) defer ticker.Stop() lastZero := false for { select { case <-ticker.C: case <-s.Context().Done(): return } stats := ccw.lb.clientStats.toClientStats() zero := isZeroStats(stats) if zero && lastZero { // Quash redundant empty load reports. continue } lastZero = zero t := time.Now() stats.Timestamp = ×tamppb.Timestamp{ Seconds: t.Unix(), Nanos: int32(t.Nanosecond()), } if err := s.Send(&lbpb.LoadBalanceRequest{ LoadBalanceRequestType: &lbpb.LoadBalanceRequest_ClientStats{ ClientStats: stats, }, }); err != nil { return } } } func (ccw *remoteBalancerCCWrapper) callRemoteBalancer() (backoff bool, _ error) { lbClient := &loadBalancerClient{cc: ccw.cc} ctx, cancel := context.WithCancel(context.Background()) defer cancel() stream, err := lbClient.BalanceLoad(ctx, grpc.WaitForReady(true)) if err != nil { return true, fmt.Errorf("grpclb: failed to perform RPC to the remote balancer %v", err) } ccw.lb.mu.Lock() ccw.lb.remoteBalancerConnected = true ccw.lb.mu.Unlock() // grpclb handshake on the stream. initReq := &lbpb.LoadBalanceRequest{ LoadBalanceRequestType: &lbpb.LoadBalanceRequest_InitialRequest{ InitialRequest: &lbpb.InitialLoadBalanceRequest{ Name: ccw.lb.target, }, }, } if err := stream.Send(initReq); err != nil { return true, fmt.Errorf("grpclb: failed to send init request: %v", err) } reply, err := stream.Recv() if err != nil { return true, fmt.Errorf("grpclb: failed to recv init response: %v", err) } initResp := reply.GetInitialResponse() if initResp == nil { return true, fmt.Errorf("grpclb: reply from remote balancer did not include initial response") } ccw.wg.Add(1) go func() { defer ccw.wg.Done() if d := convertDuration(initResp.ClientStatsReportInterval); d > 0 { ccw.sendLoadReport(stream, d) } }() // No backoff if init req/resp handshake was successful. return false, ccw.readServerList(stream) } func (ccw *remoteBalancerCCWrapper) watchRemoteBalancer() { defer ccw.wg.Done() var retryCount int for { doBackoff, err := ccw.callRemoteBalancer() select { case <-ccw.done: return default: if err != nil { if err == errServerTerminatedConnection { grpclog.Info(err) } else { grpclog.Warning(err) } } } // Trigger a re-resolve when the stream errors. ccw.lb.cc.cc.ResolveNow(resolver.ResolveNowOptions{}) ccw.lb.mu.Lock() ccw.lb.remoteBalancerConnected = false ccw.lb.fullServerList = nil // Enter fallback when connection to remote balancer is lost, and the // aggregated state is not Ready. if !ccw.lb.inFallback && ccw.lb.state != connectivity.Ready { // Entering fallback. ccw.lb.refreshSubConns(ccw.lb.resolvedBackendAddrs, true, ccw.lb.usePickFirst) } ccw.lb.mu.Unlock() if !doBackoff { retryCount = 0 continue } timer := time.NewTimer(ccw.backoff.Backoff(retryCount)) // Copy backoff select { case <-timer.C: case <-ccw.done: timer.Stop() return } retryCount++ } } grpc-go-1.29.1/balancer/grpclb/grpclb_test.go000066400000000000000000001276031365033716300210410ustar00rootroot00000000000000/* * * Copyright 2016 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package grpclb import ( "context" "errors" "fmt" "io" "net" "strconv" "strings" "sync" "sync/atomic" "testing" "time" durationpb "github.com/golang/protobuf/ptypes/duration" "google.golang.org/grpc" "google.golang.org/grpc/balancer" lbgrpc "google.golang.org/grpc/balancer/grpclb/grpc_lb_v1" lbpb "google.golang.org/grpc/balancer/grpclb/grpc_lb_v1" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal/grpctest" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" "google.golang.org/grpc/resolver" "google.golang.org/grpc/resolver/manual" "google.golang.org/grpc/status" testpb "google.golang.org/grpc/test/grpc_testing" ) var ( lbServerName = "lb.server.com" beServerName = "backends.com" lbToken = "iamatoken" // Resolver replaces localhost with fakeName in Next(). // Dialer replaces fakeName with localhost when dialing. // This will test that custom dialer is passed from Dial to grpclb. fakeName = "fake.Name" ) type s struct { grpctest.Tester } func Test(t *testing.T) { grpctest.RunSubTests(t, s{}) } type serverNameCheckCreds struct { mu sync.Mutex sn string } func (c *serverNameCheckCreds) ServerHandshake(rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { if _, err := io.WriteString(rawConn, c.sn); err != nil { fmt.Printf("Failed to write the server name %s to the client %v", c.sn, err) return nil, nil, err } return rawConn, nil, nil } func (c *serverNameCheckCreds) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { c.mu.Lock() defer c.mu.Unlock() b := make([]byte, len(authority)) errCh := make(chan error, 1) go func() { _, err := rawConn.Read(b) errCh <- err }() select { case err := <-errCh: if err != nil { fmt.Printf("test-creds: failed to read expected authority name from the server: %v\n", err) return nil, nil, err } case <-ctx.Done(): return nil, nil, ctx.Err() } if authority != string(b) { fmt.Printf("test-creds: got authority from ClientConn %q, expected by server %q\n", authority, string(b)) return nil, nil, errors.New("received unexpected server name") } return rawConn, nil, nil } func (c *serverNameCheckCreds) Info() credentials.ProtocolInfo { return credentials.ProtocolInfo{} } func (c *serverNameCheckCreds) Clone() credentials.TransportCredentials { return &serverNameCheckCreds{} } func (c *serverNameCheckCreds) OverrideServerName(s string) error { return nil } // fakeNameDialer replaces fakeName with localhost when dialing. // This will test that custom dialer is passed from Dial to grpclb. func fakeNameDialer(ctx context.Context, addr string) (net.Conn, error) { addr = strings.Replace(addr, fakeName, "localhost", 1) return (&net.Dialer{}).DialContext(ctx, "tcp", addr) } // merge merges the new client stats into current stats. // // It's a test-only method. rpcStats is defined in grpclb_picker. func (s *rpcStats) merge(cs *lbpb.ClientStats) { atomic.AddInt64(&s.numCallsStarted, cs.NumCallsStarted) atomic.AddInt64(&s.numCallsFinished, cs.NumCallsFinished) atomic.AddInt64(&s.numCallsFinishedWithClientFailedToSend, cs.NumCallsFinishedWithClientFailedToSend) atomic.AddInt64(&s.numCallsFinishedKnownReceived, cs.NumCallsFinishedKnownReceived) s.mu.Lock() for _, perToken := range cs.CallsFinishedWithDrop { s.numCallsDropped[perToken.LoadBalanceToken] += perToken.NumCalls } s.mu.Unlock() } func mapsEqual(a, b map[string]int64) bool { if len(a) != len(b) { return false } for k, v1 := range a { if v2, ok := b[k]; !ok || v1 != v2 { return false } } return true } func atomicEqual(a, b *int64) bool { return atomic.LoadInt64(a) == atomic.LoadInt64(b) } // equal compares two rpcStats. // // It's a test-only method. rpcStats is defined in grpclb_picker. func (s *rpcStats) equal(o *rpcStats) bool { if !atomicEqual(&s.numCallsStarted, &o.numCallsStarted) { return false } if !atomicEqual(&s.numCallsFinished, &o.numCallsFinished) { return false } if !atomicEqual(&s.numCallsFinishedWithClientFailedToSend, &o.numCallsFinishedWithClientFailedToSend) { return false } if !atomicEqual(&s.numCallsFinishedKnownReceived, &o.numCallsFinishedKnownReceived) { return false } s.mu.Lock() defer s.mu.Unlock() o.mu.Lock() defer o.mu.Unlock() return mapsEqual(s.numCallsDropped, o.numCallsDropped) } func (s *rpcStats) String() string { s.mu.Lock() defer s.mu.Unlock() return fmt.Sprintf("Started: %v, Finished: %v, FinishedWithClientFailedToSend: %v, FinishedKnownReceived: %v, Dropped: %v", atomic.LoadInt64(&s.numCallsStarted), atomic.LoadInt64(&s.numCallsFinished), atomic.LoadInt64(&s.numCallsFinishedWithClientFailedToSend), atomic.LoadInt64(&s.numCallsFinishedKnownReceived), s.numCallsDropped) } type remoteBalancer struct { sls chan *lbpb.ServerList statsDura time.Duration done chan struct{} stats *rpcStats statsChan chan *lbpb.ClientStats fbChan chan struct{} } func newRemoteBalancer(intervals []time.Duration, statsChan chan *lbpb.ClientStats) *remoteBalancer { return &remoteBalancer{ sls: make(chan *lbpb.ServerList, 1), done: make(chan struct{}), stats: newRPCStats(), statsChan: statsChan, fbChan: make(chan struct{}), } } func (b *remoteBalancer) stop() { close(b.sls) close(b.done) } func (b *remoteBalancer) fallbackNow() { b.fbChan <- struct{}{} } func (b *remoteBalancer) BalanceLoad(stream lbgrpc.LoadBalancer_BalanceLoadServer) error { req, err := stream.Recv() if err != nil { return err } initReq := req.GetInitialRequest() if initReq.Name != beServerName { return status.Errorf(codes.InvalidArgument, "invalid service name: %v", initReq.Name) } resp := &lbpb.LoadBalanceResponse{ LoadBalanceResponseType: &lbpb.LoadBalanceResponse_InitialResponse{ InitialResponse: &lbpb.InitialLoadBalanceResponse{ ClientStatsReportInterval: &durationpb.Duration{ Seconds: int64(b.statsDura.Seconds()), Nanos: int32(b.statsDura.Nanoseconds() - int64(b.statsDura.Seconds())*1e9), }, }, }, } if err := stream.Send(resp); err != nil { return err } go func() { for { var ( req *lbpb.LoadBalanceRequest err error ) if req, err = stream.Recv(); err != nil { return } b.stats.merge(req.GetClientStats()) if b.statsChan != nil && req.GetClientStats() != nil { b.statsChan <- req.GetClientStats() } } }() for { select { case v := <-b.sls: resp = &lbpb.LoadBalanceResponse{ LoadBalanceResponseType: &lbpb.LoadBalanceResponse_ServerList{ ServerList: v, }, } case <-b.fbChan: resp = &lbpb.LoadBalanceResponse{ LoadBalanceResponseType: &lbpb.LoadBalanceResponse_FallbackResponse{ FallbackResponse: &lbpb.FallbackResponse{}, }, } case <-stream.Context().Done(): return stream.Context().Err() } if err := stream.Send(resp); err != nil { return err } } } type testServer struct { testpb.UnimplementedTestServiceServer addr string fallback bool } const testmdkey = "testmd" func (s *testServer) EmptyCall(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { md, ok := metadata.FromIncomingContext(ctx) if !ok { return nil, status.Error(codes.Internal, "failed to receive metadata") } if !s.fallback && (md == nil || md["lb-token"][0] != lbToken) { return nil, status.Errorf(codes.Internal, "received unexpected metadata: %v", md) } grpc.SetTrailer(ctx, metadata.Pairs(testmdkey, s.addr)) return &testpb.Empty{}, nil } func (s *testServer) FullDuplexCall(stream testpb.TestService_FullDuplexCallServer) error { return nil } func startBackends(sn string, fallback bool, lis ...net.Listener) (servers []*grpc.Server) { for _, l := range lis { creds := &serverNameCheckCreds{ sn: sn, } s := grpc.NewServer(grpc.Creds(creds)) testpb.RegisterTestServiceServer(s, &testServer{addr: l.Addr().String(), fallback: fallback}) servers = append(servers, s) go func(s *grpc.Server, l net.Listener) { s.Serve(l) }(s, l) } return } func stopBackends(servers []*grpc.Server) { for _, s := range servers { s.Stop() } } type testServers struct { lbAddr string ls *remoteBalancer lb *grpc.Server backends []*grpc.Server beIPs []net.IP bePorts []int lbListener net.Listener beListeners []net.Listener } func newLoadBalancer(numberOfBackends int, statsChan chan *lbpb.ClientStats) (tss *testServers, cleanup func(), err error) { var ( beListeners []net.Listener ls *remoteBalancer lb *grpc.Server beIPs []net.IP bePorts []int ) for i := 0; i < numberOfBackends; i++ { // Start a backend. beLis, e := net.Listen("tcp", "localhost:0") if e != nil { err = fmt.Errorf("failed to listen %v", err) return } beIPs = append(beIPs, beLis.Addr().(*net.TCPAddr).IP) bePorts = append(bePorts, beLis.Addr().(*net.TCPAddr).Port) beListeners = append(beListeners, newRestartableListener(beLis)) } backends := startBackends(beServerName, false, beListeners...) // Start a load balancer. lbLis, err := net.Listen("tcp", "localhost:0") if err != nil { err = fmt.Errorf("failed to create the listener for the load balancer %v", err) return } lbLis = newRestartableListener(lbLis) lbCreds := &serverNameCheckCreds{ sn: lbServerName, } lb = grpc.NewServer(grpc.Creds(lbCreds)) ls = newRemoteBalancer(nil, statsChan) lbgrpc.RegisterLoadBalancerServer(lb, ls) go func() { lb.Serve(lbLis) }() tss = &testServers{ lbAddr: net.JoinHostPort(fakeName, strconv.Itoa(lbLis.Addr().(*net.TCPAddr).Port)), ls: ls, lb: lb, backends: backends, beIPs: beIPs, bePorts: bePorts, lbListener: lbLis, beListeners: beListeners, } cleanup = func() { defer stopBackends(backends) defer func() { ls.stop() lb.Stop() }() } return } func (s) TestGRPCLB(t *testing.T) { r, cleanup := manual.GenerateAndRegisterManualResolver() defer cleanup() tss, cleanup, err := newLoadBalancer(1, nil) if err != nil { t.Fatalf("failed to create new load balancer: %v", err) } defer cleanup() be := &lbpb.Server{ IpAddress: tss.beIPs[0], Port: int32(tss.bePorts[0]), LoadBalanceToken: lbToken, } var bes []*lbpb.Server bes = append(bes, be) sl := &lbpb.ServerList{ Servers: bes, } tss.ls.sls <- sl creds := serverNameCheckCreds{} ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() cc, err := grpc.DialContext(ctx, r.Scheme()+":///"+beServerName, grpc.WithTransportCredentials(&creds), grpc.WithContextDialer(fakeNameDialer)) if err != nil { t.Fatalf("Failed to dial to the backend %v", err) } defer cc.Close() testC := testpb.NewTestServiceClient(cc) r.UpdateState(resolver.State{Addresses: []resolver.Address{{ Addr: tss.lbAddr, Type: resolver.GRPCLB, ServerName: lbServerName, }}}) if _, err := testC.EmptyCall(context.Background(), &testpb.Empty{}, grpc.WaitForReady(true)); err != nil { t.Fatalf("%v.EmptyCall(_, _) = _, %v, want _, ", testC, err) } } // The remote balancer sends response with duplicates to grpclb client. func (s) TestGRPCLBWeighted(t *testing.T) { r, cleanup := manual.GenerateAndRegisterManualResolver() defer cleanup() tss, cleanup, err := newLoadBalancer(2, nil) if err != nil { t.Fatalf("failed to create new load balancer: %v", err) } defer cleanup() beServers := []*lbpb.Server{{ IpAddress: tss.beIPs[0], Port: int32(tss.bePorts[0]), LoadBalanceToken: lbToken, }, { IpAddress: tss.beIPs[1], Port: int32(tss.bePorts[1]), LoadBalanceToken: lbToken, }} portsToIndex := make(map[int]int) for i := range beServers { portsToIndex[tss.bePorts[i]] = i } creds := serverNameCheckCreds{} ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() cc, err := grpc.DialContext(ctx, r.Scheme()+":///"+beServerName, grpc.WithTransportCredentials(&creds), grpc.WithContextDialer(fakeNameDialer)) if err != nil { t.Fatalf("Failed to dial to the backend %v", err) } defer cc.Close() testC := testpb.NewTestServiceClient(cc) r.UpdateState(resolver.State{Addresses: []resolver.Address{{ Addr: tss.lbAddr, Type: resolver.GRPCLB, ServerName: lbServerName, }}}) sequences := []string{"00101", "00011"} for _, seq := range sequences { var ( bes []*lbpb.Server p peer.Peer result string ) for _, s := range seq { bes = append(bes, beServers[s-'0']) } tss.ls.sls <- &lbpb.ServerList{Servers: bes} for i := 0; i < 1000; i++ { if _, err := testC.EmptyCall(context.Background(), &testpb.Empty{}, grpc.WaitForReady(true), grpc.Peer(&p)); err != nil { t.Fatalf("%v.EmptyCall(_, _) = _, %v, want _, ", testC, err) } result += strconv.Itoa(portsToIndex[p.Addr.(*net.TCPAddr).Port]) } // The generated result will be in format of "0010100101". if !strings.Contains(result, strings.Repeat(seq, 2)) { t.Errorf("got result sequence %q, want patten %q", result, seq) } } } func (s) TestDropRequest(t *testing.T) { r, cleanup := manual.GenerateAndRegisterManualResolver() defer cleanup() tss, cleanup, err := newLoadBalancer(2, nil) if err != nil { t.Fatalf("failed to create new load balancer: %v", err) } defer cleanup() tss.ls.sls <- &lbpb.ServerList{ Servers: []*lbpb.Server{{ IpAddress: tss.beIPs[0], Port: int32(tss.bePorts[0]), LoadBalanceToken: lbToken, Drop: false, }, { IpAddress: tss.beIPs[1], Port: int32(tss.bePorts[1]), LoadBalanceToken: lbToken, Drop: false, }, { Drop: true, }}, } creds := serverNameCheckCreds{} ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() cc, err := grpc.DialContext(ctx, r.Scheme()+":///"+beServerName, grpc.WithTransportCredentials(&creds), grpc.WithContextDialer(fakeNameDialer)) if err != nil { t.Fatalf("Failed to dial to the backend %v", err) } defer cc.Close() testC := testpb.NewTestServiceClient(cc) r.UpdateState(resolver.State{Addresses: []resolver.Address{{ Addr: tss.lbAddr, Type: resolver.GRPCLB, ServerName: lbServerName, }}}) var ( i int p peer.Peer ) const ( // Poll to wait for something to happen. Total timeout 1 second. Sleep 1 // ms each loop, and do at most 1000 loops. sleepEachLoop = time.Millisecond loopCount = int(time.Second / sleepEachLoop) ) // Make a non-fail-fast RPC and wait for it to succeed. for i = 0; i < loopCount; i++ { if _, err := testC.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true), grpc.Peer(&p)); err == nil { break } time.Sleep(sleepEachLoop) } if i >= loopCount { t.Fatalf("timeout waiting for the first connection to become ready. EmptyCall(_, _) = _, %v, want _, ", err) } // Make RPCs until the peer is different. So we know both connections are // READY. for i = 0; i < loopCount; i++ { var temp peer.Peer if _, err := testC.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true), grpc.Peer(&temp)); err == nil { if temp.Addr.(*net.TCPAddr).Port != p.Addr.(*net.TCPAddr).Port { break } } time.Sleep(sleepEachLoop) } if i >= loopCount { t.Fatalf("timeout waiting for the second connection to become ready") } // More RPCs until drop happens. So we know the picker index, and the // expected behavior of following RPCs. for i = 0; i < loopCount; i++ { if _, err := testC.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); status.Code(err) == codes.Unavailable { break } time.Sleep(sleepEachLoop) } if i >= loopCount { t.Fatalf("timeout waiting for drop. EmptyCall(_, _) = _, %v, want _, ", err) } select { case <-ctx.Done(): t.Fatal("timed out", ctx.Err()) default: } for _, failfast := range []bool{true, false} { for i := 0; i < 3; i++ { // 1st RPCs pick the first item in server list. They should succeed // since they choose the non-drop-request backend according to the // round robin policy. if _, err := testC.EmptyCall(context.Background(), &testpb.Empty{}, grpc.WaitForReady(!failfast)); err != nil { t.Errorf("%v.EmptyCall(_, _) = _, %v, want _, ", testC, err) } // 2nd RPCs pick the second item in server list. They should succeed // since they choose the non-drop-request backend according to the // round robin policy. if _, err := testC.EmptyCall(context.Background(), &testpb.Empty{}, grpc.WaitForReady(!failfast)); err != nil { t.Errorf("%v.EmptyCall(_, _) = _, %v, want _, ", testC, err) } // 3rd RPCs should fail, because they pick last item in server list, // with Drop set to true. if _, err := testC.EmptyCall(context.Background(), &testpb.Empty{}, grpc.WaitForReady(!failfast)); status.Code(err) != codes.Unavailable { t.Errorf("%v.EmptyCall(_, _) = _, %v, want _, %s", testC, err, codes.Unavailable) } } } // Make one more RPC to move the picker index one step further, so it's not // 0. The following RPCs will test that drop index is not reset. If picker // index is at 0, we cannot tell whether it's reset or not. if _, err := testC.EmptyCall(context.Background(), &testpb.Empty{}, grpc.WaitForReady(true)); err != nil { t.Errorf("%v.EmptyCall(_, _) = _, %v, want _, ", testC, err) } tss.backends[0].Stop() // This last pick was backend 0. Closing backend 0 doesn't reset drop index // (for level 1 picking), so the following picks will be (backend1, drop, // backend1), instead of (backend, backend, drop) if drop index was reset. time.Sleep(time.Second) for i := 0; i < 3; i++ { var p peer.Peer if _, err := testC.EmptyCall(context.Background(), &testpb.Empty{}, grpc.WaitForReady(true), grpc.Peer(&p)); err != nil { t.Errorf("%v.EmptyCall(_, _) = _, %v, want _, ", testC, err) } if want := tss.bePorts[1]; p.Addr.(*net.TCPAddr).Port != want { t.Errorf("got peer: %v, want peer port: %v", p.Addr, want) } if _, err := testC.EmptyCall(context.Background(), &testpb.Empty{}, grpc.WaitForReady(true)); status.Code(err) != codes.Unavailable { t.Errorf("%v.EmptyCall(_, _) = _, %v, want _, %s", testC, err, codes.Unavailable) } if _, err := testC.EmptyCall(context.Background(), &testpb.Empty{}, grpc.WaitForReady(true), grpc.Peer(&p)); err != nil { t.Errorf("%v.EmptyCall(_, _) = _, %v, want _, ", testC, err) } if want := tss.bePorts[1]; p.Addr.(*net.TCPAddr).Port != want { t.Errorf("got peer: %v, want peer port: %v", p.Addr, want) } } } // When the balancer in use disconnects, grpclb should connect to the next address from resolved balancer address list. func (s) TestBalancerDisconnects(t *testing.T) { r, cleanup := manual.GenerateAndRegisterManualResolver() defer cleanup() var ( tests []*testServers lbs []*grpc.Server ) for i := 0; i < 2; i++ { tss, cleanup, err := newLoadBalancer(1, nil) if err != nil { t.Fatalf("failed to create new load balancer: %v", err) } defer cleanup() be := &lbpb.Server{ IpAddress: tss.beIPs[0], Port: int32(tss.bePorts[0]), LoadBalanceToken: lbToken, } var bes []*lbpb.Server bes = append(bes, be) sl := &lbpb.ServerList{ Servers: bes, } tss.ls.sls <- sl tests = append(tests, tss) lbs = append(lbs, tss.lb) } creds := serverNameCheckCreds{} ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() cc, err := grpc.DialContext(ctx, r.Scheme()+":///"+beServerName, grpc.WithTransportCredentials(&creds), grpc.WithContextDialer(fakeNameDialer)) if err != nil { t.Fatalf("Failed to dial to the backend %v", err) } defer cc.Close() testC := testpb.NewTestServiceClient(cc) r.UpdateState(resolver.State{Addresses: []resolver.Address{{ Addr: tests[0].lbAddr, Type: resolver.GRPCLB, ServerName: lbServerName, }, { Addr: tests[1].lbAddr, Type: resolver.GRPCLB, ServerName: lbServerName, }}}) var p peer.Peer if _, err := testC.EmptyCall(context.Background(), &testpb.Empty{}, grpc.WaitForReady(true), grpc.Peer(&p)); err != nil { t.Fatalf("%v.EmptyCall(_, _) = _, %v, want _, ", testC, err) } if p.Addr.(*net.TCPAddr).Port != tests[0].bePorts[0] { t.Fatalf("got peer: %v, want peer port: %v", p.Addr, tests[0].bePorts[0]) } lbs[0].Stop() // Stop balancer[0], balancer[1] should be used by grpclb. // Check peer address to see if that happened. for i := 0; i < 1000; i++ { if _, err := testC.EmptyCall(context.Background(), &testpb.Empty{}, grpc.WaitForReady(true), grpc.Peer(&p)); err != nil { t.Fatalf("%v.EmptyCall(_, _) = _, %v, want _, ", testC, err) } if p.Addr.(*net.TCPAddr).Port == tests[1].bePorts[0] { return } time.Sleep(time.Millisecond) } t.Fatalf("No RPC sent to second backend after 1 second") } func (s) TestFallback(t *testing.T) { balancer.Register(newLBBuilderWithFallbackTimeout(100 * time.Millisecond)) defer balancer.Register(newLBBuilder()) r, cleanup := manual.GenerateAndRegisterManualResolver() defer cleanup() tss, cleanup, err := newLoadBalancer(1, nil) if err != nil { t.Fatalf("failed to create new load balancer: %v", err) } defer cleanup() // Start a standalone backend. beLis, err := net.Listen("tcp", "localhost:0") if err != nil { t.Fatalf("Failed to listen %v", err) } defer beLis.Close() standaloneBEs := startBackends(beServerName, true, beLis) defer stopBackends(standaloneBEs) be := &lbpb.Server{ IpAddress: tss.beIPs[0], Port: int32(tss.bePorts[0]), LoadBalanceToken: lbToken, } var bes []*lbpb.Server bes = append(bes, be) sl := &lbpb.ServerList{ Servers: bes, } tss.ls.sls <- sl creds := serverNameCheckCreds{} ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() cc, err := grpc.DialContext(ctx, r.Scheme()+":///"+beServerName, grpc.WithTransportCredentials(&creds), grpc.WithContextDialer(fakeNameDialer)) if err != nil { t.Fatalf("Failed to dial to the backend %v", err) } defer cc.Close() testC := testpb.NewTestServiceClient(cc) r.UpdateState(resolver.State{Addresses: []resolver.Address{{ Addr: "invalid.address", Type: resolver.GRPCLB, ServerName: lbServerName, }, { Addr: beLis.Addr().String(), Type: resolver.Backend, }}}) var p peer.Peer if _, err := testC.EmptyCall(context.Background(), &testpb.Empty{}, grpc.WaitForReady(true), grpc.Peer(&p)); err != nil { t.Fatalf("_.EmptyCall(_, _) = _, %v, want _, ", err) } if p.Addr.String() != beLis.Addr().String() { t.Fatalf("got peer: %v, want peer: %v", p.Addr, beLis.Addr()) } r.UpdateState(resolver.State{Addresses: []resolver.Address{{ Addr: tss.lbAddr, Type: resolver.GRPCLB, ServerName: lbServerName, }, { Addr: beLis.Addr().String(), Type: resolver.Backend, }}}) var backendUsed bool for i := 0; i < 1000; i++ { if _, err := testC.EmptyCall(context.Background(), &testpb.Empty{}, grpc.WaitForReady(true), grpc.Peer(&p)); err != nil { t.Fatalf("%v.EmptyCall(_, _) = _, %v, want _, ", testC, err) } if p.Addr.(*net.TCPAddr).Port == tss.bePorts[0] { backendUsed = true break } time.Sleep(time.Millisecond) } if !backendUsed { t.Fatalf("No RPC sent to backend behind remote balancer after 1 second") } // Close backend and remote balancer connections, should use fallback. tss.beListeners[0].(*restartableListener).stopPreviousConns() tss.lbListener.(*restartableListener).stopPreviousConns() var fallbackUsed bool for i := 0; i < 2000; i++ { if _, err := testC.EmptyCall(context.Background(), &testpb.Empty{}, grpc.WaitForReady(true), grpc.Peer(&p)); err != nil { // Because we are hard-closing the connection, above, it's possible // for the first RPC attempt to be sent on the old connection, // which will lead to an Unavailable error when it is closed. // Ignore unavailable errors. if status.Code(err) != codes.Unavailable { t.Fatalf("%v.EmptyCall(_, _) = _, %v, want _, ", testC, err) } } if p.Addr.String() == beLis.Addr().String() { fallbackUsed = true break } time.Sleep(time.Millisecond) } if !fallbackUsed { t.Fatalf("No RPC sent to fallback after 2 seconds") } // Restart backend and remote balancer, should not use backends. tss.beListeners[0].(*restartableListener).restart() tss.lbListener.(*restartableListener).restart() tss.ls.sls <- sl var backendUsed2 bool for i := 0; i < 2000; i++ { if _, err := testC.EmptyCall(context.Background(), &testpb.Empty{}, grpc.WaitForReady(true), grpc.Peer(&p)); err != nil { t.Fatalf("%v.EmptyCall(_, _) = _, %v, want _, ", testC, err) } if p.Addr.(*net.TCPAddr).Port == tss.bePorts[0] { backendUsed2 = true break } time.Sleep(time.Millisecond) } if !backendUsed2 { t.Fatalf("No RPC sent to backend behind remote balancer after 2 seconds") } } func (s) TestExplicitFallback(t *testing.T) { r, cleanup := manual.GenerateAndRegisterManualResolver() defer cleanup() tss, cleanup, err := newLoadBalancer(1, nil) if err != nil { t.Fatalf("failed to create new load balancer: %v", err) } defer cleanup() // Start a standalone backend. beLis, err := net.Listen("tcp", "localhost:0") if err != nil { t.Fatalf("Failed to listen %v", err) } defer beLis.Close() standaloneBEs := startBackends(beServerName, true, beLis) defer stopBackends(standaloneBEs) be := &lbpb.Server{ IpAddress: tss.beIPs[0], Port: int32(tss.bePorts[0]), LoadBalanceToken: lbToken, } var bes []*lbpb.Server bes = append(bes, be) sl := &lbpb.ServerList{ Servers: bes, } tss.ls.sls <- sl creds := serverNameCheckCreds{} ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() cc, err := grpc.DialContext(ctx, r.Scheme()+":///"+beServerName, grpc.WithTransportCredentials(&creds), grpc.WithContextDialer(fakeNameDialer)) if err != nil { t.Fatalf("Failed to dial to the backend %v", err) } defer cc.Close() testC := testpb.NewTestServiceClient(cc) r.UpdateState(resolver.State{Addresses: []resolver.Address{{ Addr: tss.lbAddr, Type: resolver.GRPCLB, ServerName: lbServerName, }, { Addr: beLis.Addr().String(), Type: resolver.Backend, }}}) var p peer.Peer var backendUsed bool for i := 0; i < 2000; i++ { if _, err := testC.EmptyCall(context.Background(), &testpb.Empty{}, grpc.WaitForReady(true), grpc.Peer(&p)); err != nil { t.Fatalf("%v.EmptyCall(_, _) = _, %v, want _, ", testC, err) } if p.Addr.(*net.TCPAddr).Port == tss.bePorts[0] { backendUsed = true break } time.Sleep(time.Millisecond) } if !backendUsed { t.Fatalf("No RPC sent to backend behind remote balancer after 2 seconds") } // Send fallback signal from remote balancer; should use fallback. tss.ls.fallbackNow() var fallbackUsed bool for i := 0; i < 2000; i++ { if _, err := testC.EmptyCall(context.Background(), &testpb.Empty{}, grpc.WaitForReady(true), grpc.Peer(&p)); err != nil { t.Fatalf("%v.EmptyCall(_, _) = _, %v, want _, ", testC, err) } if p.Addr.String() == beLis.Addr().String() { fallbackUsed = true break } time.Sleep(time.Millisecond) } if !fallbackUsed { t.Fatalf("No RPC sent to fallback after 2 seconds") } // Send another server list; should use backends again. tss.ls.sls <- sl backendUsed = false for i := 0; i < 2000; i++ { if _, err := testC.EmptyCall(context.Background(), &testpb.Empty{}, grpc.WaitForReady(true), grpc.Peer(&p)); err != nil { t.Fatalf("%v.EmptyCall(_, _) = _, %v, want _, ", testC, err) } if p.Addr.(*net.TCPAddr).Port == tss.bePorts[0] { backendUsed = true break } time.Sleep(time.Millisecond) } if !backendUsed { t.Fatalf("No RPC sent to backend behind remote balancer after 2 seconds") } } func (s) TestFallBackWithNoServerAddress(t *testing.T) { resolveNowCh := make(chan struct{}, 1) r, cleanup := manual.GenerateAndRegisterManualResolver() r.ResolveNowCallback = func(resolver.ResolveNowOptions) { select { case <-resolveNowCh: default: } resolveNowCh <- struct{}{} } defer cleanup() tss, cleanup, err := newLoadBalancer(1, nil) if err != nil { t.Fatalf("failed to create new load balancer: %v", err) } defer cleanup() // Start a standalone backend. beLis, err := net.Listen("tcp", "localhost:0") if err != nil { t.Fatalf("Failed to listen %v", err) } defer beLis.Close() standaloneBEs := startBackends(beServerName, true, beLis) defer stopBackends(standaloneBEs) be := &lbpb.Server{ IpAddress: tss.beIPs[0], Port: int32(tss.bePorts[0]), LoadBalanceToken: lbToken, } var bes []*lbpb.Server bes = append(bes, be) sl := &lbpb.ServerList{ Servers: bes, } creds := serverNameCheckCreds{} ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() cc, err := grpc.DialContext(ctx, r.Scheme()+":///"+beServerName, grpc.WithTransportCredentials(&creds), grpc.WithContextDialer(fakeNameDialer)) if err != nil { t.Fatalf("Failed to dial to the backend %v", err) } defer cc.Close() testC := testpb.NewTestServiceClient(cc) // Select grpclb with service config. const pfc = `{"loadBalancingConfig":[{"grpclb":{"childPolicy":[{"round_robin":{}}]}}]}` scpr := r.CC.ParseServiceConfig(pfc) if scpr.Err != nil { t.Fatalf("Error parsing config %q: %v", pfc, scpr.Err) } for i := 0; i < 2; i++ { // Send an update with only backend address. grpclb should enter fallback // and use the fallback backend. r.UpdateState(resolver.State{ Addresses: []resolver.Address{{ Addr: beLis.Addr().String(), Type: resolver.Backend, }}, ServiceConfig: scpr, }) select { case <-resolveNowCh: t.Errorf("unexpected resolveNow when grpclb gets no balancer address 1111, %d", i) case <-time.After(time.Second): } var p peer.Peer rpcCtx, rpcCancel := context.WithTimeout(context.Background(), time.Second) defer rpcCancel() if _, err := testC.EmptyCall(rpcCtx, &testpb.Empty{}, grpc.WaitForReady(true), grpc.Peer(&p)); err != nil { t.Fatalf("_.EmptyCall(_, _) = _, %v, want _, ", err) } if p.Addr.String() != beLis.Addr().String() { t.Fatalf("got peer: %v, want peer: %v", p.Addr, beLis.Addr()) } select { case <-resolveNowCh: t.Errorf("unexpected resolveNow when grpclb gets no balancer address 2222, %d", i) case <-time.After(time.Second): } tss.ls.sls <- sl // Send an update with balancer address. The backends behind grpclb should // be used. r.UpdateState(resolver.State{ Addresses: []resolver.Address{{ Addr: tss.lbAddr, Type: resolver.GRPCLB, ServerName: lbServerName, }, { Addr: beLis.Addr().String(), Type: resolver.Backend, }}, ServiceConfig: scpr, }) var backendUsed bool for i := 0; i < 1000; i++ { if _, err := testC.EmptyCall(context.Background(), &testpb.Empty{}, grpc.WaitForReady(true), grpc.Peer(&p)); err != nil { t.Fatalf("%v.EmptyCall(_, _) = _, %v, want _, ", testC, err) } if p.Addr.(*net.TCPAddr).Port == tss.bePorts[0] { backendUsed = true break } time.Sleep(time.Millisecond) } if !backendUsed { t.Fatalf("No RPC sent to backend behind remote balancer after 1 second") } } } func (s) TestGRPCLBPickFirst(t *testing.T) { r, cleanup := manual.GenerateAndRegisterManualResolver() defer cleanup() tss, cleanup, err := newLoadBalancer(3, nil) if err != nil { t.Fatalf("failed to create new load balancer: %v", err) } defer cleanup() beServers := []*lbpb.Server{{ IpAddress: tss.beIPs[0], Port: int32(tss.bePorts[0]), LoadBalanceToken: lbToken, }, { IpAddress: tss.beIPs[1], Port: int32(tss.bePorts[1]), LoadBalanceToken: lbToken, }, { IpAddress: tss.beIPs[2], Port: int32(tss.bePorts[2]), LoadBalanceToken: lbToken, }} portsToIndex := make(map[int]int) for i := range beServers { portsToIndex[tss.bePorts[i]] = i } creds := serverNameCheckCreds{} ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() cc, err := grpc.DialContext(ctx, r.Scheme()+":///"+beServerName, grpc.WithTransportCredentials(&creds), grpc.WithContextDialer(fakeNameDialer)) if err != nil { t.Fatalf("Failed to dial to the backend %v", err) } defer cc.Close() testC := testpb.NewTestServiceClient(cc) var ( p peer.Peer result string ) tss.ls.sls <- &lbpb.ServerList{Servers: beServers[0:3]} // Start with sub policy pick_first. const pfc = `{"loadBalancingConfig":[{"grpclb":{"childPolicy":[{"pick_first":{}}]}}]}` scpr := r.CC.ParseServiceConfig(pfc) if scpr.Err != nil { t.Fatalf("Error parsing config %q: %v", pfc, scpr.Err) } r.UpdateState(resolver.State{ Addresses: []resolver.Address{{ Addr: tss.lbAddr, Type: resolver.GRPCLB, ServerName: lbServerName, }}, ServiceConfig: scpr, }) result = "" for i := 0; i < 1000; i++ { if _, err := testC.EmptyCall(context.Background(), &testpb.Empty{}, grpc.WaitForReady(true), grpc.Peer(&p)); err != nil { t.Fatalf("_.EmptyCall(_, _) = _, %v, want _, ", err) } result += strconv.Itoa(portsToIndex[p.Addr.(*net.TCPAddr).Port]) } if seq := "00000"; !strings.Contains(result, strings.Repeat(seq, 100)) { t.Errorf("got result sequence %q, want patten %q", result, seq) } tss.ls.sls <- &lbpb.ServerList{Servers: beServers[2:]} result = "" for i := 0; i < 1000; i++ { if _, err := testC.EmptyCall(context.Background(), &testpb.Empty{}, grpc.WaitForReady(true), grpc.Peer(&p)); err != nil { t.Fatalf("_.EmptyCall(_, _) = _, %v, want _, ", err) } result += strconv.Itoa(portsToIndex[p.Addr.(*net.TCPAddr).Port]) } if seq := "22222"; !strings.Contains(result, strings.Repeat(seq, 100)) { t.Errorf("got result sequence %q, want patten %q", result, seq) } tss.ls.sls <- &lbpb.ServerList{Servers: beServers[1:]} result = "" for i := 0; i < 1000; i++ { if _, err := testC.EmptyCall(context.Background(), &testpb.Empty{}, grpc.WaitForReady(true), grpc.Peer(&p)); err != nil { t.Fatalf("_.EmptyCall(_, _) = _, %v, want _, ", err) } result += strconv.Itoa(portsToIndex[p.Addr.(*net.TCPAddr).Port]) } if seq := "22222"; !strings.Contains(result, strings.Repeat(seq, 100)) { t.Errorf("got result sequence %q, want patten %q", result, seq) } // Switch sub policy to roundrobin. grpclbServiceConfigEmpty := r.CC.ParseServiceConfig(`{}`) if grpclbServiceConfigEmpty.Err != nil { t.Fatalf("Error parsing config %q: %v", `{}`, grpclbServiceConfigEmpty.Err) } r.UpdateState(resolver.State{ Addresses: []resolver.Address{{ Addr: tss.lbAddr, Type: resolver.GRPCLB, ServerName: lbServerName, }}, ServiceConfig: grpclbServiceConfigEmpty, }) result = "" for i := 0; i < 1000; i++ { if _, err := testC.EmptyCall(context.Background(), &testpb.Empty{}, grpc.WaitForReady(true), grpc.Peer(&p)); err != nil { t.Fatalf("_.EmptyCall(_, _) = _, %v, want _, ", err) } result += strconv.Itoa(portsToIndex[p.Addr.(*net.TCPAddr).Port]) } if seq := "121212"; !strings.Contains(result, strings.Repeat(seq, 100)) { t.Errorf("got result sequence %q, want patten %q", result, seq) } tss.ls.sls <- &lbpb.ServerList{Servers: beServers[0:3]} result = "" for i := 0; i < 1000; i++ { if _, err := testC.EmptyCall(context.Background(), &testpb.Empty{}, grpc.WaitForReady(true), grpc.Peer(&p)); err != nil { t.Fatalf("%v.EmptyCall(_, _) = _, %v, want _, ", testC, err) } result += strconv.Itoa(portsToIndex[p.Addr.(*net.TCPAddr).Port]) } if seq := "012012012"; !strings.Contains(result, strings.Repeat(seq, 2)) { t.Errorf("got result sequence %q, want patten %q", result, seq) } } type failPreRPCCred struct{} func (failPreRPCCred) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { if strings.Contains(uri[0], failtosendURI) { return nil, fmt.Errorf("rpc should fail to send") } return nil, nil } func (failPreRPCCred) RequireTransportSecurity() bool { return false } func checkStats(stats, expected *rpcStats) error { if !stats.equal(expected) { return fmt.Errorf("stats not equal: got %+v, want %+v", stats, expected) } return nil } func runAndCheckStats(t *testing.T, drop bool, statsChan chan *lbpb.ClientStats, runRPCs func(*grpc.ClientConn), statsWant *rpcStats) error { r, cleanup := manual.GenerateAndRegisterManualResolver() defer cleanup() tss, cleanup, err := newLoadBalancer(1, statsChan) if err != nil { t.Fatalf("failed to create new load balancer: %v", err) } defer cleanup() servers := []*lbpb.Server{{ IpAddress: tss.beIPs[0], Port: int32(tss.bePorts[0]), LoadBalanceToken: lbToken, }} if drop { servers = append(servers, &lbpb.Server{ LoadBalanceToken: lbToken, Drop: drop, }) } tss.ls.sls <- &lbpb.ServerList{Servers: servers} tss.ls.statsDura = 100 * time.Millisecond creds := serverNameCheckCreds{} ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() cc, err := grpc.DialContext(ctx, r.Scheme()+":///"+beServerName, grpc.WithTransportCredentials(&creds), grpc.WithPerRPCCredentials(failPreRPCCred{}), grpc.WithContextDialer(fakeNameDialer)) if err != nil { t.Fatalf("Failed to dial to the backend %v", err) } defer cc.Close() r.UpdateState(resolver.State{Addresses: []resolver.Address{{ Addr: tss.lbAddr, Type: resolver.GRPCLB, ServerName: lbServerName, }}}) runRPCs(cc) end := time.Now().Add(time.Second) for time.Now().Before(end) { if err := checkStats(tss.ls.stats, statsWant); err == nil { time.Sleep(200 * time.Millisecond) // sleep for two intervals to make sure no new stats are reported. break } } return checkStats(tss.ls.stats, statsWant) } const ( countRPC = 40 failtosendURI = "failtosend" ) func (s) TestGRPCLBStatsUnarySuccess(t *testing.T) { if err := runAndCheckStats(t, false, nil, func(cc *grpc.ClientConn) { testC := testpb.NewTestServiceClient(cc) // The first non-failfast RPC succeeds, all connections are up. if _, err := testC.EmptyCall(context.Background(), &testpb.Empty{}, grpc.WaitForReady(true)); err != nil { t.Fatalf("%v.EmptyCall(_, _) = _, %v, want _, ", testC, err) } for i := 0; i < countRPC-1; i++ { testC.EmptyCall(context.Background(), &testpb.Empty{}) } }, &rpcStats{ numCallsStarted: int64(countRPC), numCallsFinished: int64(countRPC), numCallsFinishedKnownReceived: int64(countRPC), }); err != nil { t.Fatal(err) } } func (s) TestGRPCLBStatsUnaryDrop(t *testing.T) { if err := runAndCheckStats(t, true, nil, func(cc *grpc.ClientConn) { testC := testpb.NewTestServiceClient(cc) // The first non-failfast RPC succeeds, all connections are up. if _, err := testC.EmptyCall(context.Background(), &testpb.Empty{}, grpc.WaitForReady(true)); err != nil { t.Fatalf("%v.EmptyCall(_, _) = _, %v, want _, ", testC, err) } for i := 0; i < countRPC-1; i++ { testC.EmptyCall(context.Background(), &testpb.Empty{}) } }, &rpcStats{ numCallsStarted: int64(countRPC), numCallsFinished: int64(countRPC), numCallsFinishedKnownReceived: int64(countRPC) / 2, numCallsDropped: map[string]int64{lbToken: int64(countRPC) / 2}, }); err != nil { t.Fatal(err) } } func (s) TestGRPCLBStatsUnaryFailedToSend(t *testing.T) { if err := runAndCheckStats(t, false, nil, func(cc *grpc.ClientConn) { testC := testpb.NewTestServiceClient(cc) // The first non-failfast RPC succeeds, all connections are up. if _, err := testC.EmptyCall(context.Background(), &testpb.Empty{}, grpc.WaitForReady(true)); err != nil { t.Fatalf("%v.EmptyCall(_, _) = _, %v, want _, ", testC, err) } for i := 0; i < countRPC-1; i++ { cc.Invoke(context.Background(), failtosendURI, &testpb.Empty{}, nil) } }, &rpcStats{ numCallsStarted: int64(countRPC)*2 - 1, numCallsFinished: int64(countRPC)*2 - 1, numCallsFinishedWithClientFailedToSend: int64(countRPC-1) * 2, numCallsFinishedKnownReceived: 1, }); err != nil { t.Fatal(err) } } func (s) TestGRPCLBStatsStreamingSuccess(t *testing.T) { if err := runAndCheckStats(t, false, nil, func(cc *grpc.ClientConn) { testC := testpb.NewTestServiceClient(cc) // The first non-failfast RPC succeeds, all connections are up. stream, err := testC.FullDuplexCall(context.Background(), grpc.WaitForReady(true)) if err != nil { t.Fatalf("%v.FullDuplexCall(_, _) = _, %v, want _, ", testC, err) } for { if _, err = stream.Recv(); err == io.EOF { break } } for i := 0; i < countRPC-1; i++ { stream, err = testC.FullDuplexCall(context.Background()) if err == nil { // Wait for stream to end if err is nil. for { if _, err = stream.Recv(); err == io.EOF { break } } } } }, &rpcStats{ numCallsStarted: int64(countRPC), numCallsFinished: int64(countRPC), numCallsFinishedKnownReceived: int64(countRPC), }); err != nil { t.Fatal(err) } } func (s) TestGRPCLBStatsStreamingDrop(t *testing.T) { if err := runAndCheckStats(t, true, nil, func(cc *grpc.ClientConn) { testC := testpb.NewTestServiceClient(cc) // The first non-failfast RPC succeeds, all connections are up. stream, err := testC.FullDuplexCall(context.Background(), grpc.WaitForReady(true)) if err != nil { t.Fatalf("%v.FullDuplexCall(_, _) = _, %v, want _, ", testC, err) } for { if _, err = stream.Recv(); err == io.EOF { break } } for i := 0; i < countRPC-1; i++ { stream, err = testC.FullDuplexCall(context.Background()) if err == nil { // Wait for stream to end if err is nil. for { if _, err = stream.Recv(); err == io.EOF { break } } } } }, &rpcStats{ numCallsStarted: int64(countRPC), numCallsFinished: int64(countRPC), numCallsFinishedKnownReceived: int64(countRPC) / 2, numCallsDropped: map[string]int64{lbToken: int64(countRPC) / 2}, }); err != nil { t.Fatal(err) } } func (s) TestGRPCLBStatsStreamingFailedToSend(t *testing.T) { if err := runAndCheckStats(t, false, nil, func(cc *grpc.ClientConn) { testC := testpb.NewTestServiceClient(cc) // The first non-failfast RPC succeeds, all connections are up. stream, err := testC.FullDuplexCall(context.Background(), grpc.WaitForReady(true)) if err != nil { t.Fatalf("%v.FullDuplexCall(_, _) = _, %v, want _, ", testC, err) } for { if _, err = stream.Recv(); err == io.EOF { break } } for i := 0; i < countRPC-1; i++ { cc.NewStream(context.Background(), &grpc.StreamDesc{}, failtosendURI) } }, &rpcStats{ numCallsStarted: int64(countRPC)*2 - 1, numCallsFinished: int64(countRPC)*2 - 1, numCallsFinishedWithClientFailedToSend: int64(countRPC-1) * 2, numCallsFinishedKnownReceived: 1, }); err != nil { t.Fatal(err) } } func (s) TestGRPCLBStatsQuashEmpty(t *testing.T) { ch := make(chan *lbpb.ClientStats) defer close(ch) if err := runAndCheckStats(t, false, ch, func(cc *grpc.ClientConn) { // Perform no RPCs; wait for load reports to start, which should be // zero, then expect no other load report within 5x the update // interval. select { case st := <-ch: if !isZeroStats(st) { t.Errorf("got stats %v; want all zero", st) } case <-time.After(5 * time.Second): t.Errorf("did not get initial stats report after 5 seconds") return } select { case st := <-ch: t.Errorf("got unexpected stats report: %v", st) case <-time.After(500 * time.Millisecond): // Success. } go func() { for range ch { // Drain statsChan until it is closed. } }() }, &rpcStats{ numCallsStarted: 0, numCallsFinished: 0, numCallsFinishedKnownReceived: 0, }); err != nil { t.Fatal(err) } } grpc-go-1.29.1/balancer/grpclb/grpclb_test_util_test.go000066400000000000000000000032251365033716300231260ustar00rootroot00000000000000/* * * Copyright 2019 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package grpclb import ( "net" "sync" ) type tempError struct{} func (*tempError) Error() string { return "grpclb test temporary error" } func (*tempError) Temporary() bool { return true } type restartableListener struct { net.Listener addr string mu sync.Mutex closed bool conns []net.Conn } func newRestartableListener(l net.Listener) *restartableListener { return &restartableListener{ Listener: l, addr: l.Addr().String(), } } func (l *restartableListener) Accept() (conn net.Conn, err error) { conn, err = l.Listener.Accept() if err == nil { l.mu.Lock() if l.closed { conn.Close() l.mu.Unlock() return nil, &tempError{} } l.conns = append(l.conns, conn) l.mu.Unlock() } return } func (l *restartableListener) Close() error { return l.Listener.Close() } func (l *restartableListener) stopPreviousConns() { l.mu.Lock() l.closed = true tmp := l.conns l.conns = nil l.mu.Unlock() for _, conn := range tmp { conn.Close() } } func (l *restartableListener) restart() { l.mu.Lock() l.closed = false l.mu.Unlock() } grpc-go-1.29.1/balancer/grpclb/grpclb_util.go000066400000000000000000000146671365033716300210440ustar00rootroot00000000000000/* * * Copyright 2016 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package grpclb import ( "fmt" "sync" "time" "google.golang.org/grpc/balancer" "google.golang.org/grpc/resolver" ) // The parent ClientConn should re-resolve when grpclb loses connection to the // remote balancer. When the ClientConn inside grpclb gets a TransientFailure, // it calls lbManualResolver.ResolveNow(), which calls parent ClientConn's // ResolveNow, and eventually results in re-resolve happening in parent // ClientConn's resolver (DNS for example). // // parent // ClientConn // +-----------------------------------------------------------------+ // | parent +---------------------------------+ | // | DNS ClientConn | grpclb | | // | resolver balancerWrapper | | | // | + + | grpclb grpclb | | // | | | | ManualResolver ClientConn | | // | | | | + + | | // | | | | | | Transient | | // | | | | | | Failure | | // | | | | | <--------- | | | // | | | <--------------- | ResolveNow | | | // | | <--------- | ResolveNow | | | | | // | | ResolveNow | | | | | | // | | | | | | | | // | + + | + + | | // | +---------------------------------+ | // +-----------------------------------------------------------------+ // lbManualResolver is used by the ClientConn inside grpclb. It's a manual // resolver with a special ResolveNow() function. // // When ResolveNow() is called, it calls ResolveNow() on the parent ClientConn, // so when grpclb client lose contact with remote balancers, the parent // ClientConn's resolver will re-resolve. type lbManualResolver struct { scheme string ccr resolver.ClientConn ccb balancer.ClientConn } func (r *lbManualResolver) Build(_ resolver.Target, cc resolver.ClientConn, _ resolver.BuildOptions) (resolver.Resolver, error) { r.ccr = cc return r, nil } func (r *lbManualResolver) Scheme() string { return r.scheme } // ResolveNow calls resolveNow on the parent ClientConn. func (r *lbManualResolver) ResolveNow(o resolver.ResolveNowOptions) { r.ccb.ResolveNow(o) } // Close is a noop for Resolver. func (*lbManualResolver) Close() {} // UpdateState calls cc.UpdateState. func (r *lbManualResolver) UpdateState(s resolver.State) { r.ccr.UpdateState(s) } const subConnCacheTime = time.Second * 10 // lbCacheClientConn is a wrapper balancer.ClientConn with a SubConn cache. // SubConns will be kept in cache for subConnCacheTime before being removed. // // Its new and remove methods are updated to do cache first. type lbCacheClientConn struct { cc balancer.ClientConn timeout time.Duration mu sync.Mutex // subConnCache only keeps subConns that are being deleted. subConnCache map[resolver.Address]*subConnCacheEntry subConnToAddr map[balancer.SubConn]resolver.Address } type subConnCacheEntry struct { sc balancer.SubConn cancel func() abortDeleting bool } func newLBCacheClientConn(cc balancer.ClientConn) *lbCacheClientConn { return &lbCacheClientConn{ cc: cc, timeout: subConnCacheTime, subConnCache: make(map[resolver.Address]*subConnCacheEntry), subConnToAddr: make(map[balancer.SubConn]resolver.Address), } } func (ccc *lbCacheClientConn) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { if len(addrs) != 1 { return nil, fmt.Errorf("grpclb calling NewSubConn with addrs of length %v", len(addrs)) } addrWithoutMD := addrs[0] addrWithoutMD.Metadata = nil ccc.mu.Lock() defer ccc.mu.Unlock() if entry, ok := ccc.subConnCache[addrWithoutMD]; ok { // If entry is in subConnCache, the SubConn was being deleted. // cancel function will never be nil. entry.cancel() delete(ccc.subConnCache, addrWithoutMD) return entry.sc, nil } scNew, err := ccc.cc.NewSubConn(addrs, opts) if err != nil { return nil, err } ccc.subConnToAddr[scNew] = addrWithoutMD return scNew, nil } func (ccc *lbCacheClientConn) RemoveSubConn(sc balancer.SubConn) { ccc.mu.Lock() defer ccc.mu.Unlock() addr, ok := ccc.subConnToAddr[sc] if !ok { return } if entry, ok := ccc.subConnCache[addr]; ok { if entry.sc != sc { // This could happen if NewSubConn was called multiple times for the // same address, and those SubConns are all removed. We remove sc // immediately here. delete(ccc.subConnToAddr, sc) ccc.cc.RemoveSubConn(sc) } return } entry := &subConnCacheEntry{ sc: sc, } ccc.subConnCache[addr] = entry timer := time.AfterFunc(ccc.timeout, func() { ccc.mu.Lock() defer ccc.mu.Unlock() if entry.abortDeleting { return } ccc.cc.RemoveSubConn(sc) delete(ccc.subConnToAddr, sc) delete(ccc.subConnCache, addr) }) entry.cancel = func() { if !timer.Stop() { // If stop was not successful, the timer has fired (this can only // happen in a race). But the deleting function is blocked on ccc.mu // because the mutex was held by the caller of this function. // // Set abortDeleting to true to abort the deleting function. When // the lock is released, the deleting function will acquire the // lock, check the value of abortDeleting and return. entry.abortDeleting = true } } } func (ccc *lbCacheClientConn) UpdateState(s balancer.State) { ccc.cc.UpdateState(s) } func (ccc *lbCacheClientConn) close() { ccc.mu.Lock() // Only cancel all existing timers. There's no need to remove SubConns. for _, entry := range ccc.subConnCache { entry.cancel() } ccc.mu.Unlock() } grpc-go-1.29.1/balancer/grpclb/grpclb_util_test.go000066400000000000000000000145521365033716300220740ustar00rootroot00000000000000/* * * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package grpclb import ( "fmt" "sync" "testing" "time" "google.golang.org/grpc/balancer" "google.golang.org/grpc/resolver" ) type mockSubConn struct { balancer.SubConn } type mockClientConn struct { balancer.ClientConn mu sync.Mutex subConns map[balancer.SubConn]resolver.Address } func newMockClientConn() *mockClientConn { return &mockClientConn{ subConns: make(map[balancer.SubConn]resolver.Address), } } func (mcc *mockClientConn) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { sc := &mockSubConn{} mcc.mu.Lock() defer mcc.mu.Unlock() mcc.subConns[sc] = addrs[0] return sc, nil } func (mcc *mockClientConn) RemoveSubConn(sc balancer.SubConn) { mcc.mu.Lock() defer mcc.mu.Unlock() delete(mcc.subConns, sc) } const testCacheTimeout = 100 * time.Millisecond func checkMockCC(mcc *mockClientConn, scLen int) error { mcc.mu.Lock() defer mcc.mu.Unlock() if len(mcc.subConns) != scLen { return fmt.Errorf("mcc = %+v, want len(mcc.subConns) = %v", mcc.subConns, scLen) } return nil } func checkCacheCC(ccc *lbCacheClientConn, sccLen, sctaLen int) error { ccc.mu.Lock() defer ccc.mu.Unlock() if len(ccc.subConnCache) != sccLen { return fmt.Errorf("ccc = %+v, want len(ccc.subConnCache) = %v", ccc.subConnCache, sccLen) } if len(ccc.subConnToAddr) != sctaLen { return fmt.Errorf("ccc = %+v, want len(ccc.subConnToAddr) = %v", ccc.subConnToAddr, sctaLen) } return nil } // Test that SubConn won't be immediately removed. func (s) TestLBCacheClientConnExpire(t *testing.T) { mcc := newMockClientConn() if err := checkMockCC(mcc, 0); err != nil { t.Fatal(err) } ccc := newLBCacheClientConn(mcc) ccc.timeout = testCacheTimeout if err := checkCacheCC(ccc, 0, 0); err != nil { t.Fatal(err) } sc, _ := ccc.NewSubConn([]resolver.Address{{Addr: "address1"}}, balancer.NewSubConnOptions{}) // One subconn in MockCC. if err := checkMockCC(mcc, 1); err != nil { t.Fatal(err) } // No subconn being deleted, and one in CacheCC. if err := checkCacheCC(ccc, 0, 1); err != nil { t.Fatal(err) } ccc.RemoveSubConn(sc) // One subconn in MockCC before timeout. if err := checkMockCC(mcc, 1); err != nil { t.Fatal(err) } // One subconn being deleted, and one in CacheCC. if err := checkCacheCC(ccc, 1, 1); err != nil { t.Fatal(err) } // Should all become empty after timeout. var err error for i := 0; i < 2; i++ { time.Sleep(testCacheTimeout) err = checkMockCC(mcc, 0) if err != nil { continue } err = checkCacheCC(ccc, 0, 0) if err != nil { continue } } if err != nil { t.Fatal(err) } } // Test that NewSubConn with the same address of a SubConn being removed will // reuse the SubConn and cancel the removing. func (s) TestLBCacheClientConnReuse(t *testing.T) { mcc := newMockClientConn() if err := checkMockCC(mcc, 0); err != nil { t.Fatal(err) } ccc := newLBCacheClientConn(mcc) ccc.timeout = testCacheTimeout if err := checkCacheCC(ccc, 0, 0); err != nil { t.Fatal(err) } sc, _ := ccc.NewSubConn([]resolver.Address{{Addr: "address1"}}, balancer.NewSubConnOptions{}) // One subconn in MockCC. if err := checkMockCC(mcc, 1); err != nil { t.Fatal(err) } // No subconn being deleted, and one in CacheCC. if err := checkCacheCC(ccc, 0, 1); err != nil { t.Fatal(err) } ccc.RemoveSubConn(sc) // One subconn in MockCC before timeout. if err := checkMockCC(mcc, 1); err != nil { t.Fatal(err) } // One subconn being deleted, and one in CacheCC. if err := checkCacheCC(ccc, 1, 1); err != nil { t.Fatal(err) } // Recreate the old subconn, this should cancel the deleting process. sc, _ = ccc.NewSubConn([]resolver.Address{{Addr: "address1"}}, balancer.NewSubConnOptions{}) // One subconn in MockCC. if err := checkMockCC(mcc, 1); err != nil { t.Fatal(err) } // No subconn being deleted, and one in CacheCC. if err := checkCacheCC(ccc, 0, 1); err != nil { t.Fatal(err) } var err error // Should not become empty after 2*timeout. time.Sleep(2 * testCacheTimeout) err = checkMockCC(mcc, 1) if err != nil { t.Fatal(err) } err = checkCacheCC(ccc, 0, 1) if err != nil { t.Fatal(err) } // Call remove again, will delete after timeout. ccc.RemoveSubConn(sc) // One subconn in MockCC before timeout. if err := checkMockCC(mcc, 1); err != nil { t.Fatal(err) } // One subconn being deleted, and one in CacheCC. if err := checkCacheCC(ccc, 1, 1); err != nil { t.Fatal(err) } // Should all become empty after timeout. for i := 0; i < 2; i++ { time.Sleep(testCacheTimeout) err = checkMockCC(mcc, 0) if err != nil { continue } err = checkCacheCC(ccc, 0, 0) if err != nil { continue } } if err != nil { t.Fatal(err) } } // Test that if the timer to remove a SubConn fires at the same time NewSubConn // cancels the timer, it doesn't cause deadlock. func (s) TestLBCache_RemoveTimer_New_Race(t *testing.T) { mcc := newMockClientConn() if err := checkMockCC(mcc, 0); err != nil { t.Fatal(err) } ccc := newLBCacheClientConn(mcc) ccc.timeout = time.Nanosecond if err := checkCacheCC(ccc, 0, 0); err != nil { t.Fatal(err) } sc, _ := ccc.NewSubConn([]resolver.Address{{Addr: "address1"}}, balancer.NewSubConnOptions{}) // One subconn in MockCC. if err := checkMockCC(mcc, 1); err != nil { t.Fatal(err) } // No subconn being deleted, and one in CacheCC. if err := checkCacheCC(ccc, 0, 1); err != nil { t.Fatal(err) } done := make(chan struct{}) go func() { for i := 0; i < 1000; i++ { // Remove starts a timer with 1 ns timeout, the NewSubConn will race // with with the timer. ccc.RemoveSubConn(sc) sc, _ = ccc.NewSubConn([]resolver.Address{{Addr: "address1"}}, balancer.NewSubConnOptions{}) } close(done) }() select { case <-time.After(time.Second): t.Fatalf("Test didn't finish within 1 second. Deadlock") case <-done: } } grpc-go-1.29.1/balancer/grpclb/regenerate.sh000077500000000000000000000017411365033716300206540ustar00rootroot00000000000000#!/bin/bash # Copyright 2018 gRPC authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. set -eux -o pipefail TMP=$(mktemp -d) function finish { rm -rf "$TMP" } trap finish EXIT pushd "$TMP" mkdir -p grpc/lb/v1 curl https://raw.githubusercontent.com/grpc/grpc-proto/master/grpc/lb/v1/load_balancer.proto > grpc/lb/v1/load_balancer.proto protoc --go_out=plugins=grpc,paths=source_relative:. -I. grpc/lb/v1/*.proto popd rm -f grpc_lb_v1/*.pb.go cp "$TMP"/grpc/lb/v1/*.pb.go grpc_lb_v1/ grpc-go-1.29.1/balancer/rls/000077500000000000000000000000001365033716300155205ustar00rootroot00000000000000grpc-go-1.29.1/balancer/rls/internal/000077500000000000000000000000001365033716300173345ustar00rootroot00000000000000grpc-go-1.29.1/balancer/rls/internal/adaptive/000077500000000000000000000000001365033716300211315ustar00rootroot00000000000000grpc-go-1.29.1/balancer/rls/internal/adaptive/adaptive.go000066400000000000000000000114451365033716300232620ustar00rootroot00000000000000/* * * Copyright 2020 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Package adaptive provides functionality for adaptive client-side throttling. package adaptive import ( "sync" "time" "google.golang.org/grpc/internal/grpcrand" ) // For overriding in unittests. var ( timeNowFunc = func() time.Time { return time.Now() } randFunc = func() float64 { return grpcrand.Float64() } ) const ( defaultDuration = 30 * time.Second defaultBins = 100 defaultRatioForAccepts = 2.0 defaultRequestsPadding = 8.0 ) // Throttler implements a client-side throttling recommendation system. All // methods are safe for concurrent use by multiple goroutines. // // The throttler has the following knobs for which we will use defaults for // now. If there is a need to make them configurable at a later point in time, // support for the same will be added. // * Duration: amount of recent history that will be taken into account for // making client-side throttling decisions. A default of 30 seconds is used. // * Bins: number of bins to be used for bucketing historical data. A default // of 100 is used. // * RatioForAccepts: ratio by which accepts are multiplied, typically a value // slightly larger than 1.0. This is used to make the throttler behave as if // the backend had accepted more requests than it actually has, which lets us // err on the side of sending to the backend more requests than we think it // will accept for the sake of speeding up the propagation of state. A // default of 2.0 is used. // * RequestsPadding: is used to decrease the (client-side) throttling // probability in the low QPS regime (to speed up propagation of state), as // well as to safeguard against hitting a client-side throttling probability // of 100%. The weight of this value decreases as the number of requests in // recent history grows. A default of 8 is used. // // The adaptive throttler attempts to estimate the probability that a request // will be throttled using recent history. Server requests (both throttled and // accepted) are registered with the throttler (via the RegisterBackendResponse // method), which then recommends client-side throttling (via the // ShouldThrottle method) with probability given by: // (requests - RatioForAccepts * accepts) / (requests + RequestsPadding) type Throttler struct { ratioForAccepts float64 requestsPadding float64 // Number of total accepts and throttles in the lookback period. mu sync.Mutex accepts *lookback throttles *lookback } // New initializes a new adaptive throttler with the default values. func New() *Throttler { return newWithArgs(defaultDuration, defaultBins, defaultRatioForAccepts, defaultRequestsPadding) } // newWithArgs initializes a new adaptive throttler with the provided values. // Used only in unittests. func newWithArgs(duration time.Duration, bins int64, ratioForAccepts, requestsPadding float64) *Throttler { return &Throttler{ ratioForAccepts: ratioForAccepts, requestsPadding: requestsPadding, accepts: newLookback(bins, duration), throttles: newLookback(bins, duration), } } // ShouldThrottle returns a probabilistic estimate of whether the server would // throttle the next request. This should be called for every request before // allowing it to hit the network. If the returned value is true, the request // should be aborted immediately (as if it had been throttled by the server). func (t *Throttler) ShouldThrottle() bool { randomProbability := randFunc() now := timeNowFunc() t.mu.Lock() defer t.mu.Unlock() accepts, throttles := float64(t.accepts.sum(now)), float64(t.throttles.sum(now)) requests := accepts + throttles throttleProbability := (requests - t.ratioForAccepts*accepts) / (requests + t.requestsPadding) if throttleProbability <= randomProbability { return false } t.throttles.add(now, 1) return true } // RegisterBackendResponse registers a response received from the backend for a // request allowed by ShouldThrottle. This should be called for every response // received from the backend (i.e., once for each request for which // ShouldThrottle returned false). func (t *Throttler) RegisterBackendResponse(throttled bool) { now := timeNowFunc() t.mu.Lock() if throttled { t.throttles.add(now, 1) } else { t.accepts.add(now, 1) } t.mu.Unlock() } grpc-go-1.29.1/balancer/rls/internal/adaptive/adaptive_test.go000066400000000000000000000124211365033716300243140ustar00rootroot00000000000000/* * * Copyright 2020 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package adaptive import ( "sync" "testing" "time" ) // stats returns a tuple with accepts, throttles for the current time. func (th *Throttler) stats() (int64, int64) { now := timeNowFunc() th.mu.Lock() a, t := th.accepts.sum(now), th.throttles.sum(now) th.mu.Unlock() return a, t } // Enums for responses. const ( E = iota // No response A // Accepted T // Throttled ) func TestRegisterBackendResponse(t *testing.T) { testcases := []struct { desc string bins int64 ticks []int64 responses []int64 wantAccepts []int64 wantThrottled []int64 }{ { "Accumulate", 3, []int64{0, 1, 2}, // Ticks []int64{A, T, E}, // Responses []int64{1, 1, 1}, // Accepts []int64{0, 1, 1}, // Throttled }, { "LightTimeTravel", 3, []int64{1, 0, 2}, // Ticks []int64{A, T, E}, // Response []int64{1, 1, 1}, // Accepts []int64{0, 1, 1}, // Throttled }, { "HeavyTimeTravel", 3, []int64{8, 0, 9}, // Ticks []int64{A, A, A}, // Response []int64{1, 1, 2}, // Accepts []int64{0, 0, 0}, // Throttled }, { "Rollover", 1, []int64{0, 1, 2}, // Ticks []int64{A, T, E}, // Responses []int64{1, 0, 0}, // Accepts []int64{0, 1, 0}, // Throttled }, } m := mockClock{} oldTimeNowFunc := timeNowFunc timeNowFunc = m.Now defer func() { timeNowFunc = oldTimeNowFunc }() for _, test := range testcases { t.Run(test.desc, func(t *testing.T) { th := newWithArgs(time.Duration(test.bins), test.bins, 2.0, 8) for i, tick := range test.ticks { m.SetNanos(tick) if test.responses[i] != E { th.RegisterBackendResponse(test.responses[i] == T) } if gotAccepts, gotThrottled := th.stats(); gotAccepts != test.wantAccepts[i] || gotThrottled != test.wantThrottled[i] { t.Errorf("th.stats() = {%d, %d} for index %d, want {%d, %d}", i, gotAccepts, gotThrottled, test.wantAccepts[i], test.wantThrottled[i]) } } }) } } func TestShouldThrottleOptions(t *testing.T) { // ShouldThrottle should return true iff // (requests - RatioForAccepts * accepts) / (requests + RequestsPadding) <= p // where p is a random number. For the purposes of this test it's fixed // to 0.5. responses := []int64{T, T, T, T, T, T, T, T, T, A, A, A, A, A, A, T, T, T, T} n := false y := true testcases := []struct { desc string ratioForAccepts float64 requestsPadding float64 want []bool }{ { "Baseline", 1.1, 8, []bool{n, n, n, n, n, n, n, n, y, y, y, y, y, n, n, n, y, y, y}, }, { "ChangePadding", 1.1, 7, []bool{n, n, n, n, n, n, n, y, y, y, y, y, y, y, y, y, y, y, y}, }, { "ChangeRatioForAccepts", 1.4, 8, []bool{n, n, n, n, n, n, n, n, y, y, n, n, n, n, n, n, n, n, n}, }, } m := mockClock{} oldTimeNowFunc := timeNowFunc timeNowFunc = m.Now oldRandFunc := randFunc randFunc = func() float64 { return 0.5 } defer func() { timeNowFunc = oldTimeNowFunc randFunc = oldRandFunc }() for _, test := range testcases { t.Run(test.desc, func(t *testing.T) { m.SetNanos(0) th := newWithArgs(time.Duration(time.Nanosecond), 1, test.ratioForAccepts, test.requestsPadding) for i, response := range responses { if response != E { th.RegisterBackendResponse(response == T) } if got := th.ShouldThrottle(); got != test.want[i] { t.Errorf("ShouldThrottle for index %d: got %v, want %v", i, got, test.want[i]) } } }) } } func TestParallel(t *testing.T) { // Uses all the defaults which comes with a 30 second duration. th := New() testDuration := 2 * time.Second numRoutines := 10 accepts := make([]int64, numRoutines) throttles := make([]int64, numRoutines) var wg sync.WaitGroup for i := 0; i < numRoutines; i++ { wg.Add(1) go func(num int) { defer wg.Done() ticker := time.NewTicker(testDuration) var accept int64 var throttle int64 for i := 0; ; i++ { select { case <-ticker.C: ticker.Stop() accepts[num] = accept throttles[num] = throttle return default: if i%2 == 0 { th.RegisterBackendResponse(true) throttle++ } else { th.RegisterBackendResponse(false) accept++ } } } }(i) } wg.Wait() var wantAccepts, wantThrottles int64 for i := 0; i < numRoutines; i++ { wantAccepts += accepts[i] wantThrottles += throttles[i] } if gotAccepts, gotThrottles := th.stats(); gotAccepts != wantAccepts || gotThrottles != wantThrottles { t.Errorf("th.stats() = {%d, %d}, want {%d, %d}", gotAccepts, gotThrottles, wantAccepts, wantThrottles) } } type mockClock struct { t time.Time } func (m *mockClock) Now() time.Time { return m.t } func (m *mockClock) SetNanos(n int64) { m.t = time.Unix(0, n) } grpc-go-1.29.1/balancer/rls/internal/adaptive/lookback.go000066400000000000000000000050311365033716300232440ustar00rootroot00000000000000/* * * Copyright 2020 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package adaptive import "time" // lookback implements a moving sum over an int64 timeline. type lookback struct { bins int64 // Number of bins to use for lookback. width time.Duration // Width of each bin. head int64 // Absolute bin index (time * bins / duration) of the current head bin. total int64 // Sum over all the values in buf, within the lookback window behind head. buf []int64 // Ring buffer for keeping track of the sum elements. } // newLookback creates a new lookback for the given duration with a set number // of bins. func newLookback(bins int64, duration time.Duration) *lookback { return &lookback{ bins: bins, width: duration / time.Duration(bins), buf: make([]int64, bins), } } // add is used to increment the lookback sum. func (l *lookback) add(t time.Time, v int64) { pos := l.advance(t) if (l.head - pos) >= l.bins { // Do not increment counters if pos is more than bins behind head. return } l.buf[pos%l.bins] += v l.total += v } // sum returns the sum of the lookback buffer at the given time or head, // whichever is greater. func (l *lookback) sum(t time.Time) int64 { l.advance(t) return l.total } // advance prepares the lookback buffer for calls to add() or sum() at time t. // If head is greater than t then the lookback buffer will be untouched. The // absolute bin index corresponding to t is returned. It will always be less // than or equal to head. func (l *lookback) advance(t time.Time) int64 { ch := l.head // Current head bin index. nh := t.UnixNano() / l.width.Nanoseconds() // New head bin index. if nh <= ch { // Either head unchanged or clock jitter (time has moved backwards). Do // not advance. return nh } jmax := min(l.bins, nh-ch) for j := int64(0); j < jmax; j++ { i := (ch + j + 1) % l.bins l.total -= l.buf[i] l.buf[i] = 0 } l.head = nh return nh } func min(x int64, y int64) int64 { if x < y { return x } return y } grpc-go-1.29.1/balancer/rls/internal/adaptive/lookback_test.go000066400000000000000000000037701365033716300243130ustar00rootroot00000000000000/* * * Copyright 2020 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package adaptive import ( "testing" "time" ) func TestLookback(t *testing.T) { makeTicks := func(offsets []int64) []time.Time { var ticks []time.Time now := time.Now() for _, offset := range offsets { ticks = append(ticks, now.Add(time.Duration(offset))) } return ticks } // lookback.add and lookback.sum behave correctly. testcases := []struct { desc string bins int64 ticks []time.Time values []int64 want []int64 }{ { "Accumulate", 3, makeTicks([]int64{0, 1, 2}), // Ticks []int64{1, 2, 3}, // Values []int64{1, 3, 6}, // Want }, { "LightTimeTravel", 3, makeTicks([]int64{1, 0, 2}), // Ticks []int64{1, 2, 3}, // Values []int64{1, 3, 6}, // Want }, { "HeavyTimeTravel", 3, makeTicks([]int64{8, 0, 9}), // Ticks []int64{1, 2, 3}, // Values []int64{1, 1, 4}, // Want }, { "Rollover", 1, makeTicks([]int64{0, 1, 2}), // Ticks []int64{1, 2, 3}, // Values []int64{1, 2, 3}, // Want }, } for _, test := range testcases { t.Run(test.desc, func(t *testing.T) { lb := newLookback(test.bins, time.Duration(test.bins)) for i, tick := range test.ticks { lb.add(tick, test.values[i]) if got := lb.sum(tick); got != test.want[i] { t.Errorf("sum for index %d got %d, want %d", i, got, test.want[i]) } } }) } } grpc-go-1.29.1/balancer/rls/internal/builder.go000066400000000000000000000020651365033716300213140ustar00rootroot00000000000000// +build go1.10 /* * * Copyright 2020 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Package rls implements the RLS LB policy. package rls const rlsBalancerName = "rls" // rlsBB helps build RLS load balancers and parse the service config to be // passed to the RLS load balancer. type rlsBB struct { // TODO(easwars): Implement the Build() method and register the builder. } // Name returns the name of the RLS LB policy and helps implement the // balancer.Balancer interface. func (*rlsBB) Name() string { return rlsBalancerName } grpc-go-1.29.1/balancer/rls/internal/cache/000077500000000000000000000000001365033716300203775ustar00rootroot00000000000000grpc-go-1.29.1/balancer/rls/internal/cache/cache.go000066400000000000000000000213401365033716300217710ustar00rootroot00000000000000/* * * Copyright 2020 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Package cache provides an LRU cache implementation to be used by the RLS LB // policy to cache RLS response data. package cache import ( "container/list" "sync" "time" "google.golang.org/grpc/balancer" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal/backoff" ) // Key represents the cache key used to uniquely identify a cache entry. type Key struct { // Path is the full path of the incoming RPC request. Path string // KeyMap is a stringified version of the RLS request keys built using the // RLS keyBuilder. Since map is not a Type which is comparable in Go, it // cannot be part of the key for another map (the LRU cache is implemented // using a native map type). KeyMap string } // Entry wraps all the data to be stored in a cache entry. type Entry struct { // Mu synchronizes access to this particular cache entry. The LB policy // will also hold another mutex to synchronize access to the cache as a // whole. To avoid holding the top-level mutex for the whole duration for // which one particular cache entry is acted upon, we use this entry mutex. Mu sync.Mutex // ExpiryTime is the absolute time at which the data cached as part of this // entry stops being valid. When an RLS request succeeds, this is set to // the current time plus the max_age field from the LB policy config. An // entry with this field in the past is not used to process picks. ExpiryTime time.Time // BackoffExpiryTime is the absolute time at which an entry which has gone // through backoff stops being valid. When an RLS request fails, this is // set to the current time plus twice the backoff time. The cache expiry // timer will only delete entries for which both ExpiryTime and // BackoffExpiryTime are in the past. BackoffExpiryTime time.Time // StaleTime is the absolute time after which this entry will be // proactively refreshed if we receive a request for it. When an RLS // request succeeds, this is set to the current time plus the stale_age // from the LB policy config. StaleTime time.Time // BackoffTime is the absolute time at which the backoff period for this // entry ends. The backoff timer is setup with this value. No new RLS // requests are sent out for this entry until the backoff period ends. BackoffTime time.Time // EarliestEvictTime is the absolute time before which this entry should // not be evicted from the cache. This is set to a default value of 5 // seconds when the entry is created. This is required to make sure that a // new entry added to the cache is not evicted before the RLS response // arrives (usually when the cache is too small). EarliestEvictTime time.Time // CallStatus stores the RPC status of the previous RLS request for this // entry. Picks for entries with a non-nil value for this field are failed // with the error stored here. CallStatus error // Backoff contains all backoff related state. When an RLS request // succeeds, backoff state is reset. Backoff BackoffState // HeaderData is received in an RLS response and is to be sent in the // X-Google-RLS-Data header for matching RPCs. HeaderData string // ChildPicker is a very thin wrapper around the child policy wrapper. // The type is declared as a V2Picker interface since the users of // the cache only care about the picker provided by the child policy, and // this makes it easy for testing. ChildPicker balancer.V2Picker // size stores the size of this cache entry. Uses only a subset of the // fields. See `entrySize` for this is computed. size int64 // key contains the cache key corresponding to this entry. This is required // from methods like `removeElement` which only have a pointer to the // list.Element which contains a reference to the cache.Entry. But these // methods need the cache.Key to be able to remove the entry from the // underlying map. key Key } // BackoffState wraps all backoff related state associated with a cache entry. type BackoffState struct { // Retries keeps track of the number of RLS failures, to be able to // determine the amount of time to backoff before the next attempt. Retries int // Backoff is an exponential backoff implementation which returns the // amount of time to backoff, given the number of retries. Backoff backoff.Strategy // Timer fires when the backoff period ends and incoming requests after // this will trigger a new RLS request. Timer *time.Timer // Callback provided by the LB policy to be notified when the backoff timer // expires. This will trigger a new picker to be returned to the // ClientConn, to force queued up RPCs to be retried. Callback func() } // LRU is a cache with a least recently used eviction policy. It is not safe // for concurrent access. type LRU struct { maxSize int64 usedSize int64 onEvicted func(Key, *Entry) ll *list.List cache map[Key]*list.Element } // NewLRU creates a cache.LRU with a size limit of maxSize and the provided // eviction callback. // // Currently, only the cache.Key and the HeaderData field from cache.Entry // count towards the size of the cache (other overhead per cache entry is not // counted). The cache could temporarily exceed the configured maxSize because // we want the entries to spend a configured minimum amount of time in the // cache before they are LRU evicted (so that all the work performed in sending // an RLS request and caching the response is not a total waste). // // The provided onEvited callback must not attempt to re-add the entry inline // and the RLS LB policy does not have a need to do that. // // The cache package trusts the RLS policy (its only user) to supply a default // minimum non-zero maxSize, in the event that the ServiceConfig does not // provide a value for it. func NewLRU(maxSize int64, onEvicted func(Key, *Entry)) *LRU { return &LRU{ maxSize: maxSize, onEvicted: onEvicted, ll: list.New(), cache: make(map[Key]*list.Element), } } // Resize sets the size limit of the LRU to newMaxSize and removes older // entries, if required, to comply with the new limit. func (lru *LRU) Resize(newMaxSize int64) { lru.maxSize = newMaxSize lru.removeToFit(0) } // TODO(easwars): If required, make this function more sophisticated. func entrySize(key Key, value *Entry) int64 { return int64(len(key.Path) + len(key.KeyMap) + len(value.HeaderData)) } // removeToFit removes older entries from the cache to make room for a new // entry of size newSize. func (lru *LRU) removeToFit(newSize int64) { now := time.Now() for lru.usedSize+newSize > lru.maxSize { elem := lru.ll.Back() if elem == nil { // This is a corner case where the cache is empty, but the new entry // to be added is bigger than maxSize. grpclog.Info("rls: newly added cache entry exceeds cache maxSize") return } entry := elem.Value.(*Entry) if t := entry.EarliestEvictTime; !t.IsZero() && t.Before(now) { // When the oldest entry is too new (it hasn't even spent a default // minimum amount of time in the cache), we abort and allow the // cache to grow bigger than the configured maxSize. grpclog.Info("rls: LRU eviction finds oldest entry to be too new. Allowing cache to exceed maxSize momentarily") return } lru.removeElement(elem) } } // Add adds a new entry to the cache. func (lru *LRU) Add(key Key, value *Entry) { size := entrySize(key, value) elem, ok := lru.cache[key] if !ok { lru.removeToFit(size) lru.usedSize += size value.size = size value.key = key elem := lru.ll.PushFront(value) lru.cache[key] = elem return } existing := elem.Value.(*Entry) sizeDiff := size - existing.size lru.removeToFit(sizeDiff) value.size = size elem.Value = value lru.ll.MoveToFront(elem) lru.usedSize += sizeDiff } // Remove removes a cache entry wth key key, if one exists. func (lru *LRU) Remove(key Key) { if elem, ok := lru.cache[key]; ok { lru.removeElement(elem) } } func (lru *LRU) removeElement(e *list.Element) { entry := e.Value.(*Entry) lru.ll.Remove(e) delete(lru.cache, entry.key) lru.usedSize -= entry.size if lru.onEvicted != nil { lru.onEvicted(entry.key, entry) } } // Get returns a cache entry with key key. func (lru *LRU) Get(key Key) *Entry { elem, ok := lru.cache[key] if !ok { return nil } lru.ll.MoveToFront(elem) return elem.Value.(*Entry) } grpc-go-1.29.1/balancer/rls/internal/cache/cache_test.go000066400000000000000000000160231365033716300230320ustar00rootroot00000000000000/* * * Copyright 2020 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package cache import ( "sync" "testing" "time" "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" ) const ( defaultTestCacheSize = 5 defaultTestCacheMaxSize = 1000000 defaultTestTimeout = 1 * time.Second ) // TestGet verifies the Add and Get methods of cache.LRU. func TestGet(t *testing.T) { key1 := Key{Path: "/service1/method1", KeyMap: "k1=v1,k2=v2"} key2 := Key{Path: "/service2/method2", KeyMap: "k1=v1,k2=v2"} val1 := Entry{HeaderData: "h1=v1"} val2 := Entry{HeaderData: "h2=v2"} tests := []struct { desc string keysToAdd []Key valsToAdd []*Entry keyToGet Key wantEntry *Entry }{ { desc: "Empty cache", keyToGet: Key{}, }, { desc: "Single entry miss", keysToAdd: []Key{key1}, valsToAdd: []*Entry{&val1}, keyToGet: Key{}, }, { desc: "Single entry hit", keysToAdd: []Key{key1}, valsToAdd: []*Entry{&val1}, keyToGet: key1, wantEntry: &val1, }, { desc: "Multi entry miss", keysToAdd: []Key{key1, key2}, valsToAdd: []*Entry{&val1, &val2}, keyToGet: Key{}, }, { desc: "Multi entry hit", keysToAdd: []Key{key1, key2}, valsToAdd: []*Entry{&val1, &val2}, keyToGet: key1, wantEntry: &val1, }, } for _, test := range tests { t.Run(test.desc, func(t *testing.T) { lru := NewLRU(defaultTestCacheMaxSize, nil) for i, key := range test.keysToAdd { lru.Add(key, test.valsToAdd[i]) } opts := []cmp.Option{ cmpopts.IgnoreInterfaces(struct{ sync.Locker }{}), cmpopts.IgnoreUnexported(Entry{}), } if gotEntry := lru.Get(test.keyToGet); !cmp.Equal(gotEntry, test.wantEntry, opts...) { t.Errorf("lru.Get(%+v) = %+v, want %+v", test.keyToGet, gotEntry, test.wantEntry) } }) } } // TestRemove verifies the Add and Remove methods of cache.LRU. func TestRemove(t *testing.T) { keys := []Key{ {Path: "/service1/method1", KeyMap: "k1=v1,k2=v2"}, {Path: "/service2/method2", KeyMap: "k1=v1,k2=v2"}, {Path: "/service3/method3", KeyMap: "k1=v1,k2=v2"}, } lru := NewLRU(defaultTestCacheMaxSize, nil) for _, k := range keys { lru.Add(k, &Entry{}) } for _, k := range keys { lru.Remove(k) if entry := lru.Get(k); entry != nil { t.Fatalf("lru.Get(%+v) after a call to lru.Remove succeeds, should have failed", k) } } } // TestExceedingSizeCausesEviction verifies the case where adding a new entry // to the cache leads to eviction of old entries to make space for the new one. func TestExceedingSizeCausesEviction(t *testing.T) { evictCh := make(chan Key, defaultTestCacheSize) onEvicted := func(k Key, _ *Entry) { t.Logf("evicted key {%+v} from cache", k) evictCh <- k } keysToFill := []Key{{Path: "a"}, {Path: "b"}, {Path: "c"}, {Path: "d"}, {Path: "e"}} keysCausingEviction := []Key{{Path: "f"}, {Path: "g"}, {Path: "h"}, {Path: "i"}, {Path: "j"}} lru := NewLRU(defaultTestCacheSize, onEvicted) for _, key := range keysToFill { lru.Add(key, &Entry{}) } for i, key := range keysCausingEviction { lru.Add(key, &Entry{}) timer := time.NewTimer(defaultTestTimeout) select { case <-timer.C: t.Fatal("Test timeout waiting for eviction") case k := <-evictCh: timer.Stop() if !cmp.Equal(k, keysToFill[i]) { t.Fatalf("Evicted key %+v, wanted %+v", k, keysToFill[i]) } } } } // TestAddCausesMultipleEvictions verifies the case where adding one new entry // causes the eviction of multiple old entries to make space for the new one. func TestAddCausesMultipleEvictions(t *testing.T) { evictCh := make(chan Key, defaultTestCacheSize) onEvicted := func(k Key, _ *Entry) { evictCh <- k } keysToFill := []Key{{Path: "a"}, {Path: "b"}, {Path: "c"}, {Path: "d"}, {Path: "e"}} keyCausingEviction := Key{Path: "abcde"} lru := NewLRU(defaultTestCacheSize, onEvicted) for _, key := range keysToFill { lru.Add(key, &Entry{}) } lru.Add(keyCausingEviction, &Entry{}) for i := range keysToFill { timer := time.NewTimer(defaultTestTimeout) select { case <-timer.C: t.Fatal("Test timeout waiting for eviction") case k := <-evictCh: timer.Stop() if !cmp.Equal(k, keysToFill[i]) { t.Fatalf("Evicted key %+v, wanted %+v", k, keysToFill[i]) } } } } // TestModifyCausesMultipleEvictions verifies the case where mofiying an // existing entry to increase its size leads to the eviction of older entries // to make space for the new one. func TestModifyCausesMultipleEvictions(t *testing.T) { evictCh := make(chan Key, defaultTestCacheSize) onEvicted := func(k Key, _ *Entry) { evictCh <- k } keysToFill := []Key{{Path: "a"}, {Path: "b"}, {Path: "c"}, {Path: "d"}, {Path: "e"}} lru := NewLRU(defaultTestCacheSize, onEvicted) for _, key := range keysToFill { lru.Add(key, &Entry{}) } lru.Add(keysToFill[len(keysToFill)-1], &Entry{HeaderData: "xxxx"}) for i := range keysToFill[:len(keysToFill)-1] { timer := time.NewTimer(defaultTestTimeout) select { case <-timer.C: t.Fatal("Test timeout waiting for eviction") case k := <-evictCh: timer.Stop() if !cmp.Equal(k, keysToFill[i]) { t.Fatalf("Evicted key %+v, wanted %+v", k, keysToFill[i]) } } } } func TestLRUResize(t *testing.T) { tests := []struct { desc string maxSize int64 keysToFill []Key newMaxSize int64 wantEvictedKeys []Key }{ { desc: "resize causes multiple evictions", maxSize: 5, keysToFill: []Key{{Path: "a"}, {Path: "b"}, {Path: "c"}, {Path: "d"}, {Path: "e"}}, newMaxSize: 3, wantEvictedKeys: []Key{{Path: "a"}, {Path: "b"}}, }, { desc: "resize causes no evictions", maxSize: 50, keysToFill: []Key{{Path: "a"}, {Path: "b"}, {Path: "c"}, {Path: "d"}, {Path: "e"}}, newMaxSize: 10, wantEvictedKeys: []Key{}, }, { desc: "resize to higher value", maxSize: 5, keysToFill: []Key{{Path: "a"}, {Path: "b"}, {Path: "c"}, {Path: "d"}, {Path: "e"}}, newMaxSize: 10, wantEvictedKeys: []Key{}, }, } for _, test := range tests { t.Run(test.desc, func(t *testing.T) { var evictedKeys []Key onEvicted := func(k Key, _ *Entry) { evictedKeys = append(evictedKeys, k) } lru := NewLRU(test.maxSize, onEvicted) for _, key := range test.keysToFill { lru.Add(key, &Entry{}) } lru.Resize(test.newMaxSize) if !cmp.Equal(evictedKeys, test.wantEvictedKeys, cmpopts.EquateEmpty()) { t.Fatalf("lru.Resize evicted keys {%v}, should have evicted {%v}", evictedKeys, test.wantEvictedKeys) } }) } } grpc-go-1.29.1/balancer/rls/internal/client.go000066400000000000000000000053071365033716300211460ustar00rootroot00000000000000/* * * Copyright 2020 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package rls import ( "context" "time" "google.golang.org/grpc" rlspb "google.golang.org/grpc/balancer/rls/internal/proto/grpc_lookup_v1" ) // For gRPC services using RLS, the value of target_type in the // RouteLookupServiceRequest will be set to this. const grpcTargetType = "grpc" // rlsClient is a simple wrapper around a RouteLookupService client which // provides non-blocking semantics on top of a blocking unary RPC call. // // The RLS LB policy creates a new rlsClient object with the following values: // * a grpc.ClientConn to the RLS server using appropriate credentials from the // parent channel // * dialTarget corresponding to the original user dial target, e.g. // "firestore.googleapis.com". // // The RLS LB policy uses an adaptive throttler to perform client side // throttling and asks this client to make an RPC call only after checking with // the throttler. type rlsClient struct { cc *grpc.ClientConn stub rlspb.RouteLookupServiceClient // origDialTarget is the original dial target of the user and sent in each // RouteLookup RPC made to the RLS server. origDialTarget string // rpcTimeout specifies the timeout for the RouteLookup RPC call. The LB // policy receives this value in its service config. rpcTimeout time.Duration } func newRLSClient(cc *grpc.ClientConn, dialTarget string, rpcTimeout time.Duration) *rlsClient { return &rlsClient{ cc: cc, stub: rlspb.NewRouteLookupServiceClient(cc), origDialTarget: dialTarget, rpcTimeout: rpcTimeout, } } type lookupCallback func(target, headerData string, err error) // lookup starts a RouteLookup RPC in a separate goroutine and returns the // results (and error, if any) in the provided callback. func (c *rlsClient) lookup(path string, keyMap map[string]string, cb lookupCallback) { go func() { ctx, cancel := context.WithTimeout(context.Background(), c.rpcTimeout) resp, err := c.stub.RouteLookup(ctx, &rlspb.RouteLookupRequest{ Server: c.origDialTarget, Path: path, TargetType: grpcTargetType, KeyMap: keyMap, }) cb(resp.GetTarget(), resp.GetHeaderData(), err) cancel() }() } grpc-go-1.29.1/balancer/rls/internal/client_test.go000066400000000000000000000122521365033716300222020ustar00rootroot00000000000000/* * * Copyright 2020 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package rls import ( "errors" "fmt" "testing" "time" "github.com/golang/protobuf/proto" "github.com/google/go-cmp/cmp" "google.golang.org/grpc" rlspb "google.golang.org/grpc/balancer/rls/internal/proto/grpc_lookup_v1" "google.golang.org/grpc/balancer/rls/internal/testutils/fakeserver" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" ) const ( defaultDialTarget = "dummy" defaultRPCTimeout = 5 * time.Second defaultTestTimeout = 1 * time.Second ) func setup(t *testing.T) (*fakeserver.Server, *grpc.ClientConn, func()) { t.Helper() server, sCleanup, err := fakeserver.Start() if err != nil { t.Fatalf("Failed to start fake RLS server: %v", err) } cc, cCleanup, err := server.ClientConn() if err != nil { t.Fatalf("Failed to get a ClientConn to the RLS server: %v", err) } return server, cc, func() { sCleanup() cCleanup() } } // TestLookupFailure verifies the case where the RLS server returns an error. func TestLookupFailure(t *testing.T) { server, cc, cleanup := setup(t) defer cleanup() // We setup the fake server to return an error. server.ResponseChan <- fakeserver.Response{Err: errors.New("rls failure")} rlsClient := newRLSClient(cc, defaultDialTarget, defaultRPCTimeout) errCh := make(chan error) rlsClient.lookup("", nil, func(target, headerData string, err error) { if err == nil { errCh <- errors.New("rlsClient.lookup() succeeded, should have failed") return } if target != "" || headerData != "" { errCh <- fmt.Errorf("rlsClient.lookup() = (%s, %s), should be empty strings", target, headerData) return } errCh <- nil }) timer := time.NewTimer(defaultTestTimeout) select { case <-timer.C: t.Fatal("Timeout when expecting a routeLookup callback") case err := <-errCh: timer.Stop() if err != nil { t.Fatal(err) } } } // TestLookupDeadlineExceeded tests the case where the RPC deadline associated // with the lookup expires. func TestLookupDeadlineExceeded(t *testing.T) { _, cc, cleanup := setup(t) defer cleanup() // Give the Lookup RPC a small deadline, but don't setup the fake server to // return anything. So the Lookup call will block and eventuall expire. rlsClient := newRLSClient(cc, defaultDialTarget, 100*time.Millisecond) errCh := make(chan error) rlsClient.lookup("", nil, func(target, headerData string, err error) { if st, ok := status.FromError(err); !ok || st.Code() != codes.DeadlineExceeded { errCh <- fmt.Errorf("rlsClient.lookup() returned error: %v, want %v", err, codes.DeadlineExceeded) return } errCh <- nil }) timer := time.NewTimer(defaultTestTimeout) select { case <-timer.C: t.Fatal("Timeout when expecting a routeLookup callback") case err := <-errCh: timer.Stop() if err != nil { t.Fatal(err) } } } // TestLookupSuccess verifies the successful Lookup API case. func TestLookupSuccess(t *testing.T) { server, cc, cleanup := setup(t) defer cleanup() const ( rlsReqPath = "/service/method" rlsRespTarget = "us_east_1.firestore.googleapis.com" rlsHeaderData = "headerData" ) rlsReqKeyMap := map[string]string{ "k1": "v1", "k2": "v2", } wantLookupRequest := &rlspb.RouteLookupRequest{ Server: defaultDialTarget, Path: rlsReqPath, TargetType: "grpc", KeyMap: rlsReqKeyMap, } rlsClient := newRLSClient(cc, defaultDialTarget, defaultRPCTimeout) errCh := make(chan error) rlsClient.lookup(rlsReqPath, rlsReqKeyMap, func(t, hd string, err error) { if err != nil { errCh <- fmt.Errorf("rlsClient.Lookup() failed: %v", err) return } if t != rlsRespTarget || hd != rlsHeaderData { errCh <- fmt.Errorf("rlsClient.lookup() = (%s, %s), want (%s, %s)", t, hd, rlsRespTarget, rlsHeaderData) return } errCh <- nil }) // Make sure that the fake server received the expected RouteLookupRequest // proto. timer := time.NewTimer(defaultTestTimeout) select { case gotLookupRequest := <-server.RequestChan: if !timer.Stop() { <-timer.C } if diff := cmp.Diff(wantLookupRequest, gotLookupRequest, cmp.Comparer(proto.Equal)); diff != "" { t.Fatalf("RouteLookupRequest diff (-want, +got):\n%s", diff) } case <-timer.C: t.Fatalf("Timed out wile waiting for a RouteLookupRequest") } // We setup the fake server to return this response when it receives a // request. server.ResponseChan <- fakeserver.Response{ Resp: &rlspb.RouteLookupResponse{ Target: rlsRespTarget, HeaderData: rlsHeaderData, }, } timer = time.NewTimer(defaultTestTimeout) select { case <-timer.C: t.Fatal("Timeout when expecting a routeLookup callback") case err := <-errCh: timer.Stop() if err != nil { t.Fatal(err) } } } grpc-go-1.29.1/balancer/rls/internal/config.go000066400000000000000000000303051365033716300211310ustar00rootroot00000000000000// +build go1.10 /* * * Copyright 2020 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package rls import ( "bytes" "encoding/json" "fmt" "time" "github.com/golang/protobuf/jsonpb" "github.com/golang/protobuf/ptypes" durationpb "github.com/golang/protobuf/ptypes/duration" "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/rls/internal/keys" rlspb "google.golang.org/grpc/balancer/rls/internal/proto/grpc_lookup_v1" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal/grpcutil" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" ) const ( // This is max duration that we are willing to cache RLS responses. If the // service config doesn't specify a value for max_age or if it specified a // value greater that this, we will use this value instead. maxMaxAge = 5 * time.Minute // If lookup_service_timeout is not specified in the service config, we use // a default of 10 seconds. defaultLookupServiceTimeout = 10 * time.Second // This is set to the targetNameField in the child policy config during // service config validation. dummyChildPolicyTarget = "target_name_to_be_filled_in_later" ) // lbConfig contains the parsed and validated contents of the // loadBalancingConfig section of the service config. The RLS LB policy will // use this to directly access config data instead of ploughing through proto // fields. type lbConfig struct { serviceconfig.LoadBalancingConfig kbMap keys.BuilderMap lookupService string lookupServiceTimeout time.Duration maxAge time.Duration staleAge time.Duration cacheSizeBytes int64 rpStrategy rlspb.RouteLookupConfig_RequestProcessingStrategy defaultTarget string cpName string cpTargetField string cpConfig map[string]json.RawMessage } // This struct resembles the JSON respresentation of the loadBalancing config // and makes it easier to unmarshal. type lbConfigJSON struct { RouteLookupConfig json.RawMessage ChildPolicy []*loadBalancingConfig ChildPolicyConfigTargetFieldName string } // loadBalancingConfig represents a single load balancing config, // stored in JSON format. // // TODO(easwars): This code seems to be repeated in a few places // (service_config.go and in the xds code as well). Refactor and re-use. type loadBalancingConfig struct { Name string Config json.RawMessage } // MarshalJSON returns a JSON encoding of l. func (l *loadBalancingConfig) MarshalJSON() ([]byte, error) { return nil, fmt.Errorf("rls: loadBalancingConfig.MarshalJSON() is unimplemented") } // UnmarshalJSON parses the JSON-encoded byte slice in data and stores it in l. func (l *loadBalancingConfig) UnmarshalJSON(data []byte) error { var cfg map[string]json.RawMessage if err := json.Unmarshal(data, &cfg); err != nil { return err } for name, config := range cfg { l.Name = name l.Config = config } return nil } // ParseConfig parses and validates the JSON representation of the service // config and returns the loadBalancingConfig to be used by the RLS LB policy. // // Helps implement the balancer.ConfigParser interface. // // The following validation checks are performed: // * routeLookupConfig: // ** grpc_keybuilders field: // - must have at least one entry // - must not have two entries with the same Name // - must not have any entry with a Name with the service field unset or // empty // - must not have any entries without a Name // - must not have a headers entry that has required_match set // - must not have two headers entries with the same key within one entry // ** lookup_service field: // - must be set and non-empty and must parse as a target URI // ** max_age field: // - if not specified or is greater than maxMaxAge, it will be reset to // maxMaxAge // ** stale_age field: // - if the value is greater than or equal to max_age, it is ignored // - if set, then max_age must also be set // ** valid_targets field: // - will be ignored // ** cache_size_bytes field: // - must be greater than zero // - TODO(easwars): Define a minimum value for this field, to be used when // left unspecified // ** request_processing_strategy field: // - must have a value other than STRATEGY_UNSPECIFIED // - if set to SYNC_LOOKUP_DEFAULT_TARGET_ON_ERROR or // ASYNC_LOOKUP_DEFAULT_TARGET_ON_MISS, the default_target field must be // set to a non-empty value // * childPolicy field: // - must find a valid child policy with a valid config (the child policy must // be able to parse the provided config successfully when we pass it a dummy // target name in the target_field provided by the // childPolicyConfigTargetFieldName field) // * childPolicyConfigTargetFieldName field: // - must be set and non-empty func (*rlsBB) ParseConfig(c json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { cfgJSON := &lbConfigJSON{} if err := json.Unmarshal(c, cfgJSON); err != nil { return nil, fmt.Errorf("rls: json unmarshal failed for service config {%+v}: %v", string(c), err) } m := jsonpb.Unmarshaler{AllowUnknownFields: true} rlsProto := &rlspb.RouteLookupConfig{} if err := m.Unmarshal(bytes.NewReader(cfgJSON.RouteLookupConfig), rlsProto); err != nil { return nil, fmt.Errorf("rls: bad RouteLookupConfig proto {%+v}: %v", string(cfgJSON.RouteLookupConfig), err) } var childPolicy *loadBalancingConfig for _, lbcfg := range cfgJSON.ChildPolicy { if balancer.Get(lbcfg.Name) != nil { childPolicy = lbcfg break } } kbMap, err := keys.MakeBuilderMap(rlsProto) if err != nil { return nil, err } lookupService := rlsProto.GetLookupService() if lookupService == "" { return nil, fmt.Errorf("rls: empty lookup_service in service config {%+v}", string(c)) } parsedTarget := grpcutil.ParseTarget(lookupService) if parsedTarget.Scheme == "" { parsedTarget.Scheme = resolver.GetDefaultScheme() } if resolver.Get(parsedTarget.Scheme) == nil { return nil, fmt.Errorf("rls: invalid target URI in lookup_service {%s}", lookupService) } lookupServiceTimeout, err := convertDuration(rlsProto.GetLookupServiceTimeout()) if err != nil { return nil, fmt.Errorf("rls: failed to parse lookup_service_timeout in service config {%+v}: %v", string(c), err) } if lookupServiceTimeout == 0 { lookupServiceTimeout = defaultLookupServiceTimeout } maxAge, err := convertDuration(rlsProto.GetMaxAge()) if err != nil { return nil, fmt.Errorf("rls: failed to parse max_age in service config {%+v}: %v", string(c), err) } staleAge, err := convertDuration(rlsProto.GetStaleAge()) if err != nil { return nil, fmt.Errorf("rls: failed to parse staleAge in service config {%+v}: %v", string(c), err) } if staleAge != 0 && maxAge == 0 { return nil, fmt.Errorf("rls: stale_age is set, but max_age is not in service config {%+v}", string(c)) } if staleAge >= maxAge { grpclog.Info("rls: stale_age {%v} is greater than max_age {%v}, ignoring it", staleAge, maxAge) staleAge = 0 } if maxAge == 0 || maxAge > maxMaxAge { grpclog.Infof("rls: max_age in service config is %v, using %v", maxAge, maxMaxAge) maxAge = maxMaxAge } cacheSizeBytes := rlsProto.GetCacheSizeBytes() if cacheSizeBytes <= 0 { return nil, fmt.Errorf("rls: cache_size_bytes must be greater than 0 in service config {%+v}", string(c)) } rpStrategy := rlsProto.GetRequestProcessingStrategy() if rpStrategy == rlspb.RouteLookupConfig_STRATEGY_UNSPECIFIED { return nil, fmt.Errorf("rls: request_processing_strategy cannot be left unspecified in service config {%+v}", string(c)) } defaultTarget := rlsProto.GetDefaultTarget() if (rpStrategy == rlspb.RouteLookupConfig_SYNC_LOOKUP_DEFAULT_TARGET_ON_ERROR || rpStrategy == rlspb.RouteLookupConfig_ASYNC_LOOKUP_DEFAULT_TARGET_ON_MISS) && defaultTarget == "" { return nil, fmt.Errorf("rls: request_processing_strategy is %s, but default_target is not set", rpStrategy.String()) } if childPolicy == nil { return nil, fmt.Errorf("rls: childPolicy is invalid in service config {%+v}", string(c)) } if cfgJSON.ChildPolicyConfigTargetFieldName == "" { return nil, fmt.Errorf("rls: childPolicyConfigTargetFieldName field is not set in service config {%+v}", string(c)) } // TODO(easwars): When we start instantiating the child policy from the // parent RLS LB policy, we could make this function a method on the // lbConfig object and share the code. We would be parsing the child policy // config again during that time. The only difference betweeen now and then // would be that we would be using real targetField name instead of the // dummy. So, we could make the targetName field a parameter to this // function during the refactor. cpCfg, err := validateChildPolicyConfig(childPolicy, cfgJSON.ChildPolicyConfigTargetFieldName) if err != nil { return nil, err } return &lbConfig{ kbMap: kbMap, lookupService: lookupService, lookupServiceTimeout: lookupServiceTimeout, maxAge: maxAge, staleAge: staleAge, cacheSizeBytes: cacheSizeBytes, rpStrategy: rpStrategy, defaultTarget: defaultTarget, // TODO(easwars): Once we refactor validateChildPolicyConfig and make // it a method on the lbConfig object, we could directly store the // balancer.Builder and/or balancer.ConfigParser here instead of the // Name. That would mean that we would have to create the lbConfig // object here first before validating the childPolicy config, but // that's a minor detail. cpName: childPolicy.Name, cpTargetField: cfgJSON.ChildPolicyConfigTargetFieldName, cpConfig: cpCfg, }, nil } // validateChildPolicyConfig validates the child policy config received in the // service config. This makes it possible for us to reject service configs // which contain invalid child policy configs which we know will fail for sure. // // It does the following: // * Unmarshals the provided child policy config into a map of string to // json.RawMessage. This allows us to add an entry to the map corresponding // to the targetFieldName that we received in the service config. // * Marshals the map back into JSON, finds the config parser associated with // the child policy and asks it to validate the config. // * If the validation succeeded, removes the dummy entry from the map and // returns it. If any of the above steps failed, it returns an error. func validateChildPolicyConfig(cp *loadBalancingConfig, cpTargetField string) (map[string]json.RawMessage, error) { var childConfig map[string]json.RawMessage if err := json.Unmarshal(cp.Config, &childConfig); err != nil { return nil, fmt.Errorf("rls: json unmarshal failed for child policy config {%+v}: %v", cp.Config, err) } childConfig[cpTargetField], _ = json.Marshal(dummyChildPolicyTarget) jsonCfg, err := json.Marshal(childConfig) if err != nil { return nil, fmt.Errorf("rls: json marshal failed for child policy config {%+v}: %v", childConfig, err) } builder := balancer.Get(cp.Name) if builder == nil { // This should never happen since we already made sure that the child // policy name mentioned in the service config is a valid one. return nil, fmt.Errorf("rls: balancer builder not found for child_policy %v", cp.Name) } parser, ok := builder.(balancer.ConfigParser) if !ok { return nil, fmt.Errorf("rls: balancer builder for child_policy does not implement balancer.ConfigParser: %v", cp.Name) } _, err = parser.ParseConfig(jsonCfg) if err != nil { return nil, fmt.Errorf("rls: childPolicy config validation failed: %v", err) } delete(childConfig, cpTargetField) return childConfig, nil } func convertDuration(d *durationpb.Duration) (time.Duration, error) { if d == nil { return 0, nil } return ptypes.Duration(d) } grpc-go-1.29.1/balancer/rls/internal/config_test.go000066400000000000000000000357501365033716300222010ustar00rootroot00000000000000// +build go1.10 /* * * Copyright 2020 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package rls import ( "encoding/json" "fmt" "strings" "testing" "time" "github.com/google/go-cmp/cmp" "google.golang.org/grpc/balancer" _ "google.golang.org/grpc/balancer/grpclb" // grpclb for config parsing. rlspb "google.golang.org/grpc/balancer/rls/internal/proto/grpc_lookup_v1" _ "google.golang.org/grpc/internal/resolver/passthrough" // passthrough resolver. ) const balancerWithoutConfigParserName = "dummy_balancer" type dummyBB struct { balancer.Builder } func (*dummyBB) Name() string { return balancerWithoutConfigParserName } func init() { balancer.Register(&dummyBB{}) } func (lbCfg *lbConfig) Equal(other *lbConfig) bool { // This only ignores the keyBuilderMap field because its internals are not // exported, and hence not possible to specify in the want section of the // test. return lbCfg.lookupService == other.lookupService && lbCfg.lookupServiceTimeout == other.lookupServiceTimeout && lbCfg.maxAge == other.maxAge && lbCfg.staleAge == other.staleAge && lbCfg.cacheSizeBytes == other.cacheSizeBytes && lbCfg.rpStrategy == other.rpStrategy && lbCfg.defaultTarget == other.defaultTarget && lbCfg.cpName == other.cpName && lbCfg.cpTargetField == other.cpTargetField && cmp.Equal(lbCfg.cpConfig, other.cpConfig) } func TestParseConfig(t *testing.T) { tests := []struct { desc string input []byte wantCfg *lbConfig }{ // This input validates a few cases: // - A top-level unknown field should not fail. // - An unknown field in routeLookupConfig proto should not fail. // - lookupServiceTimeout is set to its default value, since it is not specified in the input. // - maxAge is set to maxMaxAge since the value is too large in the input. // - staleAge is ignore because it is higher than maxAge in the input. { desc: "with transformations", input: []byte(`{ "top-level-unknown-field": "unknown-value", "routeLookupConfig": { "unknown-field": "unknown-value", "grpcKeybuilders": [{ "names": [{"service": "service", "method": "method"}], "headers": [{"key": "k1", "names": ["v1"]}] }], "lookupService": "passthrough:///target", "maxAge" : "500s", "staleAge": "600s", "cacheSizeBytes": 1000, "request_processing_strategy": "ASYNC_LOOKUP_DEFAULT_TARGET_ON_MISS", "defaultTarget": "passthrough:///default" }, "childPolicy": [ {"cds_experimental": {"Cluster": "my-fav-cluster"}}, {"unknown-policy": {"unknown-field": "unknown-value"}}, {"grpclb": {"childPolicy": [{"pickfirst": {}}]}} ], "childPolicyConfigTargetFieldName": "service_name" }`), wantCfg: &lbConfig{ lookupService: "passthrough:///target", lookupServiceTimeout: 10 * time.Second, // This is the default value. maxAge: 5 * time.Minute, // This is max maxAge. staleAge: time.Duration(0), // StaleAge is ignore because it was higher than maxAge. cacheSizeBytes: 1000, rpStrategy: rlspb.RouteLookupConfig_ASYNC_LOOKUP_DEFAULT_TARGET_ON_MISS, defaultTarget: "passthrough:///default", cpName: "grpclb", cpTargetField: "service_name", cpConfig: map[string]json.RawMessage{"childPolicy": json.RawMessage(`[{"pickfirst": {}}]`)}, }, }, { desc: "without transformations", input: []byte(`{ "routeLookupConfig": { "grpcKeybuilders": [{ "names": [{"service": "service", "method": "method"}], "headers": [{"key": "k1", "names": ["v1"]}] }], "lookupService": "passthrough:///target", "lookupServiceTimeout" : "100s", "maxAge": "60s", "staleAge" : "50s", "cacheSizeBytes": 1000, "request_processing_strategy": "ASYNC_LOOKUP_DEFAULT_TARGET_ON_MISS", "defaultTarget": "passthrough:///default" }, "childPolicy": [{"grpclb": {"childPolicy": [{"pickfirst": {}}]}}], "childPolicyConfigTargetFieldName": "service_name" }`), wantCfg: &lbConfig{ lookupService: "passthrough:///target", lookupServiceTimeout: 100 * time.Second, maxAge: 60 * time.Second, staleAge: 50 * time.Second, cacheSizeBytes: 1000, rpStrategy: rlspb.RouteLookupConfig_ASYNC_LOOKUP_DEFAULT_TARGET_ON_MISS, defaultTarget: "passthrough:///default", cpName: "grpclb", cpTargetField: "service_name", cpConfig: map[string]json.RawMessage{"childPolicy": json.RawMessage(`[{"pickfirst": {}}]`)}, }, }, } builder := &rlsBB{} for _, test := range tests { t.Run(test.desc, func(t *testing.T) { lbCfg, err := builder.ParseConfig(test.input) if err != nil || !cmp.Equal(lbCfg, test.wantCfg) { t.Errorf("ParseConfig(%s) = {%+v, %v}, want {%+v, nil}", string(test.input), lbCfg, err, test.wantCfg) } }) } } func TestParseConfigErrors(t *testing.T) { tests := []struct { desc string input []byte wantErr string }{ { desc: "empty input", input: nil, wantErr: "rls: json unmarshal failed for service config", }, { desc: "bad json", input: []byte(`bad bad json`), wantErr: "rls: json unmarshal failed for service config", }, { desc: "bad grpcKeyBuilder", input: []byte(`{ "routeLookupConfig": { "grpcKeybuilders": [{ "names": [{"service": "service", "method": "method"}], "headers": [{"key": "k1", "requiredMatch": true, "names": ["v1"]}] }] } }`), wantErr: "rls: GrpcKeyBuilder in RouteLookupConfig has required_match field set", }, { desc: "empty lookup service", input: []byte(`{ "routeLookupConfig": { "grpcKeybuilders": [{ "names": [{"service": "service", "method": "method"}], "headers": [{"key": "k1", "names": ["v1"]}] }] } }`), wantErr: "rls: empty lookup_service in service config", }, { desc: "invalid lookup service URI", input: []byte(`{ "routeLookupConfig": { "grpcKeybuilders": [{ "names": [{"service": "service", "method": "method"}], "headers": [{"key": "k1", "names": ["v1"]}] }], "lookupService": "badScheme:///target" } }`), wantErr: "rls: invalid target URI in lookup_service", }, { desc: "invalid lookup service timeout", input: []byte(`{ "routeLookupConfig": { "grpcKeybuilders": [{ "names": [{"service": "service", "method": "method"}], "headers": [{"key": "k1", "names": ["v1"]}] }], "lookupService": "passthrough:///target", "lookupServiceTimeout" : "315576000001s" } }`), wantErr: "bad Duration: time: invalid duration", }, { desc: "invalid max age", input: []byte(`{ "routeLookupConfig": { "grpcKeybuilders": [{ "names": [{"service": "service", "method": "method"}], "headers": [{"key": "k1", "names": ["v1"]}] }], "lookupService": "passthrough:///target", "lookupServiceTimeout" : "10s", "maxAge" : "315576000001s" } }`), wantErr: "bad Duration: time: invalid duration", }, { desc: "invalid stale age", input: []byte(`{ "routeLookupConfig": { "grpcKeybuilders": [{ "names": [{"service": "service", "method": "method"}], "headers": [{"key": "k1", "names": ["v1"]}] }], "lookupService": "passthrough:///target", "lookupServiceTimeout" : "10s", "maxAge" : "10s", "staleAge" : "315576000001s" } }`), wantErr: "bad Duration: time: invalid duration", }, { desc: "invalid max age stale age combo", input: []byte(`{ "routeLookupConfig": { "grpcKeybuilders": [{ "names": [{"service": "service", "method": "method"}], "headers": [{"key": "k1", "names": ["v1"]}] }], "lookupService": "passthrough:///target", "lookupServiceTimeout" : "10s", "staleAge" : "10s" } }`), wantErr: "rls: stale_age is set, but max_age is not in service config", }, { desc: "invalid cache size", input: []byte(`{ "routeLookupConfig": { "grpcKeybuilders": [{ "names": [{"service": "service", "method": "method"}], "headers": [{"key": "k1", "names": ["v1"]}] }], "lookupService": "passthrough:///target", "lookupServiceTimeout" : "10s", "maxAge": "30s", "staleAge" : "25s" } }`), wantErr: "rls: cache_size_bytes must be greater than 0 in service config", }, { desc: "invalid request processing strategy", input: []byte(`{ "routeLookupConfig": { "grpcKeybuilders": [{ "names": [{"service": "service", "method": "method"}], "headers": [{"key": "k1", "names": ["v1"]}] }], "lookupService": "passthrough:///target", "lookupServiceTimeout" : "10s", "maxAge": "30s", "staleAge" : "25s", "cacheSizeBytes": 1000 } }`), wantErr: "rls: request_processing_strategy cannot be left unspecified in service config", }, { desc: "request processing strategy without default target", input: []byte(`{ "routeLookupConfig": { "grpcKeybuilders": [{ "names": [{"service": "service", "method": "method"}], "headers": [{"key": "k1", "names": ["v1"]}] }], "lookupService": "passthrough:///target", "lookupServiceTimeout" : "10s", "maxAge": "30s", "staleAge" : "25s", "cacheSizeBytes": 1000, "request_processing_strategy": "ASYNC_LOOKUP_DEFAULT_TARGET_ON_MISS" } }`), wantErr: "default_target is not set", }, { desc: "no child policy", input: []byte(`{ "routeLookupConfig": { "grpcKeybuilders": [{ "names": [{"service": "service", "method": "method"}], "headers": [{"key": "k1", "names": ["v1"]}] }], "lookupService": "passthrough:///target", "lookupServiceTimeout" : "10s", "maxAge": "30s", "staleAge" : "25s", "cacheSizeBytes": 1000, "request_processing_strategy": "ASYNC_LOOKUP_DEFAULT_TARGET_ON_MISS", "defaultTarget": "passthrough:///default" } }`), wantErr: "rls: childPolicy is invalid in service config", }, { desc: "no known child policy", input: []byte(`{ "routeLookupConfig": { "grpcKeybuilders": [{ "names": [{"service": "service", "method": "method"}], "headers": [{"key": "k1", "names": ["v1"]}] }], "lookupService": "passthrough:///target", "lookupServiceTimeout" : "10s", "maxAge": "30s", "staleAge" : "25s", "cacheSizeBytes": 1000, "request_processing_strategy": "ASYNC_LOOKUP_DEFAULT_TARGET_ON_MISS", "defaultTarget": "passthrough:///default" }, "childPolicy": [ {"cds_experimental": {"Cluster": "my-fav-cluster"}}, {"unknown-policy": {"unknown-field": "unknown-value"}} ] }`), wantErr: "rls: childPolicy is invalid in service config", }, { desc: "no childPolicyConfigTargetFieldName", input: []byte(`{ "routeLookupConfig": { "grpcKeybuilders": [{ "names": [{"service": "service", "method": "method"}], "headers": [{"key": "k1", "names": ["v1"]}] }], "lookupService": "passthrough:///target", "lookupServiceTimeout" : "10s", "maxAge": "30s", "staleAge" : "25s", "cacheSizeBytes": 1000, "request_processing_strategy": "ASYNC_LOOKUP_DEFAULT_TARGET_ON_MISS", "defaultTarget": "passthrough:///default" }, "childPolicy": [ {"cds_experimental": {"Cluster": "my-fav-cluster"}}, {"unknown-policy": {"unknown-field": "unknown-value"}}, {"grpclb": {}} ] }`), wantErr: "rls: childPolicyConfigTargetFieldName field is not set in service config", }, { desc: "child policy config validation failure", input: []byte(`{ "routeLookupConfig": { "grpcKeybuilders": [{ "names": [{"service": "service", "method": "method"}], "headers": [{"key": "k1", "names": ["v1"]}] }], "lookupService": "passthrough:///target", "lookupServiceTimeout" : "10s", "maxAge": "30s", "staleAge" : "25s", "cacheSizeBytes": 1000, "request_processing_strategy": "ASYNC_LOOKUP_DEFAULT_TARGET_ON_MISS", "defaultTarget": "passthrough:///default" }, "childPolicy": [ {"cds_experimental": {"Cluster": "my-fav-cluster"}}, {"unknown-policy": {"unknown-field": "unknown-value"}}, {"grpclb": {"childPolicy": "not-an-array"}} ], "childPolicyConfigTargetFieldName": "service_name" }`), wantErr: "rls: childPolicy config validation failed", }, } builder := &rlsBB{} for _, test := range tests { t.Run(test.desc, func(t *testing.T) { lbCfg, err := builder.ParseConfig(test.input) if lbCfg != nil || !strings.Contains(fmt.Sprint(err), test.wantErr) { t.Errorf("ParseConfig(%s) = {%+v, %v}, want {nil, %s}", string(test.input), lbCfg, err, test.wantErr) } }) } } func TestValidateChildPolicyConfig(t *testing.T) { jsonCfg := json.RawMessage(`[{"round_robin" : {}}, {"pick_first" : {}}]`) wantChildConfig := map[string]json.RawMessage{"childPolicy": jsonCfg} cp := &loadBalancingConfig{ Name: "grpclb", Config: []byte(`{"childPolicy": [{"round_robin" : {}}, {"pick_first" : {}}]}`), } cpTargetField := "serviceName" gotChildConfig, err := validateChildPolicyConfig(cp, cpTargetField) if err != nil || !cmp.Equal(gotChildConfig, wantChildConfig) { t.Errorf("validateChildPolicyConfig(%v, %v) = {%v, %v}, want {%v, nil}", cp, cpTargetField, gotChildConfig, err, wantChildConfig) } } func TestValidateChildPolicyConfigErrors(t *testing.T) { tests := []struct { desc string cp *loadBalancingConfig wantErrPrefix string }{ { desc: "unknown child policy", cp: &loadBalancingConfig{ Name: "unknown", Config: []byte(`{}`), }, wantErrPrefix: "rls: balancer builder not found for child_policy", }, { desc: "balancer builder does not implement ConfigParser", cp: &loadBalancingConfig{ Name: balancerWithoutConfigParserName, Config: []byte(`{}`), }, wantErrPrefix: "rls: balancer builder for child_policy does not implement balancer.ConfigParser", }, { desc: "child policy config parsing failure", cp: &loadBalancingConfig{ Name: "grpclb", Config: []byte(`{"childPolicy": "not-an-array"}`), }, wantErrPrefix: "rls: childPolicy config validation failed", }, } for _, test := range tests { t.Run(test.desc, func(t *testing.T) { gotChildConfig, gotErr := validateChildPolicyConfig(test.cp, "") if gotChildConfig != nil || !strings.HasPrefix(fmt.Sprint(gotErr), test.wantErrPrefix) { t.Errorf("validateChildPolicyConfig(%v) = {%v, %v}, want {nil, %v}", test.cp, gotChildConfig, gotErr, test.wantErrPrefix) } }) } } grpc-go-1.29.1/balancer/rls/internal/keys/000077500000000000000000000000001365033716300203075ustar00rootroot00000000000000grpc-go-1.29.1/balancer/rls/internal/keys/builder.go000066400000000000000000000163241365033716300222720ustar00rootroot00000000000000// +build go1.10 /* * * Copyright 2020 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Package keys provides functionality required to build RLS request keys. package keys import ( "errors" "fmt" "sort" "strings" rlspb "google.golang.org/grpc/balancer/rls/internal/proto/grpc_lookup_v1" "google.golang.org/grpc/metadata" ) // BuilderMap provides a mapping from a request path to the key builder to be // used for that path. // The BuilderMap is constructed by parsing the RouteLookupConfig received by // the RLS balancer as part of its ServiceConfig, and is used by the picker in // the data path to build the RLS keys to be used for a given request. type BuilderMap map[string]builder // MakeBuilderMap parses the provided RouteLookupConfig proto and returns a map // from paths to key builders. // // The following conditions are validated, and an error is returned if any of // them is not met: // grpc_keybuilders field // * must have at least one entry // * must not have two entries with the same Name // * must not have any entry with a Name with the service field unset or empty // * must not have any entries without a Name // * must not have a headers entry that has required_match set // * must not have two headers entries with the same key within one entry func MakeBuilderMap(cfg *rlspb.RouteLookupConfig) (BuilderMap, error) { kbs := cfg.GetGrpcKeybuilders() if len(kbs) == 0 { return nil, errors.New("rls: RouteLookupConfig does not contain any GrpcKeyBuilder") } bm := make(map[string]builder) for _, kb := range kbs { var matchers []matcher seenKeys := make(map[string]bool) for _, h := range kb.GetHeaders() { if h.GetRequiredMatch() { return nil, fmt.Errorf("rls: GrpcKeyBuilder in RouteLookupConfig has required_match field set {%+v}", kbs) } key := h.GetKey() if seenKeys[key] { return nil, fmt.Errorf("rls: GrpcKeyBuilder in RouteLookupConfig contains repeated Key field in headers {%+v}", kbs) } seenKeys[key] = true matchers = append(matchers, matcher{key: h.GetKey(), names: h.GetNames()}) } b := builder{matchers: matchers} names := kb.GetNames() if len(names) == 0 { return nil, fmt.Errorf("rls: GrpcKeyBuilder in RouteLookupConfig does not contain any Name {%+v}", kbs) } for _, name := range names { if name.GetService() == "" { return nil, fmt.Errorf("rls: GrpcKeyBuilder in RouteLookupConfig contains a Name field with no Service {%+v}", kbs) } if strings.Contains(name.GetMethod(), `/`) { return nil, fmt.Errorf("rls: GrpcKeyBuilder in RouteLookupConfig contains a method with a slash {%+v}", kbs) } path := "/" + name.GetService() + "/" + name.GetMethod() if _, ok := bm[path]; ok { return nil, fmt.Errorf("rls: GrpcKeyBuilder in RouteLookupConfig contains repeated Name field {%+v}", kbs) } bm[path] = b } } return bm, nil } // KeyMap represents the RLS keys to be used for a request. type KeyMap struct { // Map is the representation of an RLS key as a Go map. This is used when // an actual RLS request is to be sent out on the wire, since the // RouteLookupRequest proto expects a Go map. Map map[string]string // Str is the representation of an RLS key as a string, sorted by keys. // Since the RLS keys are part of the cache key in the request cache // maintained by the RLS balancer, and Go maps cannot be used as keys for // Go maps (the cache is implemented as a map), we need a stringified // version of it. Str string } // RLSKey builds the RLS keys to be used for the given request, identified by // the request path and the request headers stored in metadata. func (bm BuilderMap) RLSKey(md metadata.MD, path string) KeyMap { b, ok := bm[path] if !ok { i := strings.LastIndex(path, "/") b, ok = bm[path[:i+1]] if !ok { return KeyMap{} } } return b.keys(md) } // Equal reports whether bm and am represent equivalent BuilderMaps. func (bm BuilderMap) Equal(am BuilderMap) bool { if (bm == nil) != (am == nil) { return false } if len(bm) != len(am) { return false } for key, bBuilder := range bm { aBuilder, ok := am[key] if !ok { return false } if !bBuilder.Equal(aBuilder) { return false } } return true } // builder provides the actual functionality of building RLS keys. These are // stored in the BuilderMap. // While processing a pick, the picker looks in the BuilderMap for the // appropriate builder to be used for the given RPC. For each of the matchers // in the found builder, we iterate over the list of request headers (available // as metadata in the context). Once a header matches one of the names in the // matcher, we set the value of the header in the keyMap (with the key being // the one found in the matcher) and move on to the next matcher. If no // KeyBuilder was found in the map, or no header match was found, an empty // keyMap is returned. type builder struct { matchers []matcher } // Equal reports whether b and a represent equivalent key builders. func (b builder) Equal(a builder) bool { if (b.matchers == nil) != (a.matchers == nil) { return false } if len(b.matchers) != len(a.matchers) { return false } // Protobuf serialization maintains the order of repeated fields. Matchers // are specified as a repeated field inside the KeyBuilder proto. If the // order changes, it means that the order in the protobuf changed. We report // this case as not being equal even though the builders could possible be // functionally equal. for i, bMatcher := range b.matchers { aMatcher := a.matchers[i] if !bMatcher.Equal(aMatcher) { return false } } return true } // matcher helps extract a key from request headers based on a given name. type matcher struct { // The key used in the keyMap sent as part of the RLS request. key string // List of header names which can supply the value for this key. names []string } // Equal reports if m and are are equivalent matchers. func (m matcher) Equal(a matcher) bool { if m.key != a.key { return false } if (m.names == nil) != (a.names == nil) { return false } if len(m.names) != len(a.names) { return false } for i := 0; i < len(m.names); i++ { if m.names[i] != a.names[i] { return false } } return true } func (b builder) keys(md metadata.MD) KeyMap { kvMap := make(map[string]string) for _, m := range b.matchers { for _, name := range m.names { if vals := md.Get(name); vals != nil { kvMap[m.key] = strings.Join(vals, ",") break } } } return KeyMap{Map: kvMap, Str: mapToString(kvMap)} } func mapToString(kv map[string]string) string { var keys []string for k := range kv { keys = append(keys, k) } sort.Strings(keys) var sb strings.Builder for i, k := range keys { if i != 0 { fmt.Fprint(&sb, ",") } fmt.Fprintf(&sb, "%s=%s", k, kv[k]) } return sb.String() } grpc-go-1.29.1/balancer/rls/internal/keys/builder_test.go000066400000000000000000000364561365033716300233410ustar00rootroot00000000000000// +build go1.10 /* * * Copyright 2020 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package keys import ( "fmt" "strings" "testing" "github.com/google/go-cmp/cmp" rlspb "google.golang.org/grpc/balancer/rls/internal/proto/grpc_lookup_v1" "google.golang.org/grpc/metadata" ) var ( goodKeyBuilder1 = &rlspb.GrpcKeyBuilder{ Names: []*rlspb.GrpcKeyBuilder_Name{ {Service: "gFoo"}, }, Headers: []*rlspb.NameMatcher{ {Key: "k1", Names: []string{"n1"}}, {Key: "k2", Names: []string{"n1"}}, }, } goodKeyBuilder2 = &rlspb.GrpcKeyBuilder{ Names: []*rlspb.GrpcKeyBuilder_Name{ {Service: "gBar", Method: "method1"}, {Service: "gFoobar"}, }, Headers: []*rlspb.NameMatcher{ {Key: "k1", Names: []string{"n1", "n2"}}, }, } ) func TestMakeBuilderMap(t *testing.T) { wantBuilderMap1 := map[string]builder{ "/gFoo/": {matchers: []matcher{{key: "k1", names: []string{"n1"}}, {key: "k2", names: []string{"n1"}}}}, } wantBuilderMap2 := map[string]builder{ "/gFoo/": {matchers: []matcher{{key: "k1", names: []string{"n1"}}, {key: "k2", names: []string{"n1"}}}}, "/gBar/method1": {matchers: []matcher{{key: "k1", names: []string{"n1", "n2"}}}}, "/gFoobar/": {matchers: []matcher{{key: "k1", names: []string{"n1", "n2"}}}}, } tests := []struct { desc string cfg *rlspb.RouteLookupConfig wantBuilderMap BuilderMap }{ { desc: "One good GrpcKeyBuilder", cfg: &rlspb.RouteLookupConfig{ GrpcKeybuilders: []*rlspb.GrpcKeyBuilder{goodKeyBuilder1}, }, wantBuilderMap: wantBuilderMap1, }, { desc: "Two good GrpcKeyBuilders", cfg: &rlspb.RouteLookupConfig{ GrpcKeybuilders: []*rlspb.GrpcKeyBuilder{goodKeyBuilder1, goodKeyBuilder2}, }, wantBuilderMap: wantBuilderMap2, }, } for _, test := range tests { t.Run(test.desc, func(t *testing.T) { builderMap, err := MakeBuilderMap(test.cfg) if err != nil || !builderMap.Equal(test.wantBuilderMap) { t.Errorf("MakeBuilderMap(%+v) returned {%v, %v}, want: {%v, nil}", test.cfg, builderMap, err, test.wantBuilderMap) } }) } } func TestMakeBuilderMapErrors(t *testing.T) { emptyServiceKeyBuilder := &rlspb.GrpcKeyBuilder{ Names: []*rlspb.GrpcKeyBuilder_Name{ {Service: "bFoo", Method: "method1"}, {Service: "bBar"}, {Method: "method1"}, }, Headers: []*rlspb.NameMatcher{{Key: "k1", Names: []string{"n1", "n2"}}}, } requiredMatchKeyBuilder := &rlspb.GrpcKeyBuilder{ Names: []*rlspb.GrpcKeyBuilder_Name{{Service: "bFoo", Method: "method1"}}, Headers: []*rlspb.NameMatcher{{Key: "k1", Names: []string{"n1", "n2"}, RequiredMatch: true}}, } repeatedHeadersKeyBuilder := &rlspb.GrpcKeyBuilder{ Names: []*rlspb.GrpcKeyBuilder_Name{ {Service: "gBar", Method: "method1"}, {Service: "gFoobar"}, }, Headers: []*rlspb.NameMatcher{ {Key: "k1", Names: []string{"n1", "n2"}}, {Key: "k1", Names: []string{"n1", "n2"}}, }, } methodNameWithSlashKeyBuilder := &rlspb.GrpcKeyBuilder{ Names: []*rlspb.GrpcKeyBuilder_Name{{Service: "gBar", Method: "method1/foo"}}, Headers: []*rlspb.NameMatcher{{Key: "k1", Names: []string{"n1", "n2"}}}, } tests := []struct { desc string cfg *rlspb.RouteLookupConfig wantErrPrefix string }{ { desc: "No GrpcKeyBuilder", cfg: &rlspb.RouteLookupConfig{}, wantErrPrefix: "rls: RouteLookupConfig does not contain any GrpcKeyBuilder", }, { desc: "Two GrpcKeyBuilders with same Name", cfg: &rlspb.RouteLookupConfig{ GrpcKeybuilders: []*rlspb.GrpcKeyBuilder{goodKeyBuilder1, goodKeyBuilder1}, }, wantErrPrefix: "rls: GrpcKeyBuilder in RouteLookupConfig contains repeated Name field", }, { desc: "GrpcKeyBuilder with empty Service field", cfg: &rlspb.RouteLookupConfig{ GrpcKeybuilders: []*rlspb.GrpcKeyBuilder{emptyServiceKeyBuilder, goodKeyBuilder1}, }, wantErrPrefix: "rls: GrpcKeyBuilder in RouteLookupConfig contains a Name field with no Service", }, { desc: "GrpcKeyBuilder with no Name", cfg: &rlspb.RouteLookupConfig{ GrpcKeybuilders: []*rlspb.GrpcKeyBuilder{{}, goodKeyBuilder1}, }, wantErrPrefix: "rls: GrpcKeyBuilder in RouteLookupConfig does not contain any Name", }, { desc: "GrpcKeyBuilder with requiredMatch field set", cfg: &rlspb.RouteLookupConfig{ GrpcKeybuilders: []*rlspb.GrpcKeyBuilder{requiredMatchKeyBuilder, goodKeyBuilder1}, }, wantErrPrefix: "rls: GrpcKeyBuilder in RouteLookupConfig has required_match field set", }, { desc: "GrpcKeyBuilder two headers with same key", cfg: &rlspb.RouteLookupConfig{ GrpcKeybuilders: []*rlspb.GrpcKeyBuilder{repeatedHeadersKeyBuilder, goodKeyBuilder1}, }, wantErrPrefix: "rls: GrpcKeyBuilder in RouteLookupConfig contains repeated Key field in headers", }, { desc: "GrpcKeyBuilder with slash in method name", cfg: &rlspb.RouteLookupConfig{ GrpcKeybuilders: []*rlspb.GrpcKeyBuilder{methodNameWithSlashKeyBuilder}, }, wantErrPrefix: "rls: GrpcKeyBuilder in RouteLookupConfig contains a method with a slash", }, } for _, test := range tests { t.Run(test.desc, func(t *testing.T) { builderMap, err := MakeBuilderMap(test.cfg) if builderMap != nil || !strings.HasPrefix(fmt.Sprint(err), test.wantErrPrefix) { t.Errorf("MakeBuilderMap(%+v) returned {%v, %v}, want: {nil, %v}", test.cfg, builderMap, err, test.wantErrPrefix) } }) } } func TestRLSKey(t *testing.T) { bm, err := MakeBuilderMap(&rlspb.RouteLookupConfig{ GrpcKeybuilders: []*rlspb.GrpcKeyBuilder{goodKeyBuilder1, goodKeyBuilder2}, }) if err != nil { t.Fatalf("MakeBuilderMap() failed: %v", err) } tests := []struct { desc string path string md metadata.MD wantKM KeyMap }{ { // No keyBuilder is found for the provided service. desc: "service not found in key builder map", path: "/notFoundService/method", md: metadata.Pairs("n1", "v1", "n2", "v2"), wantKM: KeyMap{}, }, { // No keyBuilder is found for the provided method. desc: "method not found in key builder map", path: "/gBar/notFoundMethod", md: metadata.Pairs("n1", "v1", "n2", "v2"), wantKM: KeyMap{}, }, { // A keyBuilder is found, but none of the headers match. desc: "directPathMatch-NoMatchingKey", path: "/gBar/method1", md: metadata.Pairs("notMatchingKey", "v1"), wantKM: KeyMap{Map: map[string]string{}, Str: ""}, }, { // A keyBuilder is found, and a single headers matches. desc: "directPathMatch-SingleKey", path: "/gBar/method1", md: metadata.Pairs("n1", "v1"), wantKM: KeyMap{Map: map[string]string{"k1": "v1"}, Str: "k1=v1"}, }, { // A keyBuilder is found, and multiple headers match, but the first // match is chosen. desc: "directPathMatch-FirstMatchingKey", path: "/gBar/method1", md: metadata.Pairs("n2", "v2", "n1", "v1"), wantKM: KeyMap{Map: map[string]string{"k1": "v1"}, Str: "k1=v1"}, }, { // A keyBuilder is found as a wildcard match, but none of the // headers match. desc: "wildcardPathMatch-NoMatchingKey", path: "/gFoobar/method1", md: metadata.Pairs("notMatchingKey", "v1"), wantKM: KeyMap{Map: map[string]string{}, Str: ""}, }, { // A keyBuilder is found as a wildcard match, and a single headers // matches. desc: "wildcardPathMatch-SingleKey", path: "/gFoobar/method1", md: metadata.Pairs("n1", "v1"), wantKM: KeyMap{Map: map[string]string{"k1": "v1"}, Str: "k1=v1"}, }, { // A keyBuilder is found as a wildcard match, and multiple headers // match, but the first match is chosen. desc: "wildcardPathMatch-FirstMatchingKey", path: "/gFoobar/method1", md: metadata.Pairs("n2", "v2", "n1", "v1"), wantKM: KeyMap{Map: map[string]string{"k1": "v1"}, Str: "k1=v1"}, }, { // Multiple matchers find hits in the provided request headers. desc: "multipleMatchers", path: "/gFoo/method1", md: metadata.Pairs("n2", "v2", "n1", "v1"), wantKM: KeyMap{Map: map[string]string{"k1": "v1", "k2": "v1"}, Str: "k1=v1,k2=v1"}, }, { // A match is found for a header which is specified multiple times. // So, the values are joined with commas separating them. desc: "commaSeparated", path: "/gBar/method1", md: metadata.Pairs("n1", "v1", "n1", "v2", "n1", "v3"), wantKM: KeyMap{Map: map[string]string{"k1": "v1,v2,v3"}, Str: "k1=v1,v2,v3"}, }, } for _, test := range tests { t.Run(test.desc, func(t *testing.T) { if gotKM := bm.RLSKey(test.md, test.path); !cmp.Equal(gotKM, test.wantKM) { t.Errorf("RLSKey(%+v, %s) = %+v, want %+v", test.md, test.path, gotKM, test.wantKM) } }) } } func TestMapToString(t *testing.T) { tests := []struct { desc string input map[string]string wantStr string }{ { desc: "empty map", input: nil, wantStr: "", }, { desc: "one key", input: map[string]string{ "k1": "v1", }, wantStr: "k1=v1", }, { desc: "sorted keys", input: map[string]string{ "k1": "v1", "k2": "v2", "k3": "v3", }, wantStr: "k1=v1,k2=v2,k3=v3", }, { desc: "unsorted keys", input: map[string]string{ "k3": "v3", "k1": "v1", "k2": "v2", }, wantStr: "k1=v1,k2=v2,k3=v3", }, } for _, test := range tests { t.Run(test.desc, func(t *testing.T) { if gotStr := mapToString(test.input); gotStr != test.wantStr { t.Errorf("mapToString(%v) = %s, want %s", test.input, gotStr, test.wantStr) } }) } } func TestBuilderMapEqual(t *testing.T) { tests := []struct { desc string a BuilderMap b BuilderMap wantEqual bool }{ { desc: "nil builder maps", a: nil, b: nil, wantEqual: true, }, { desc: "empty builder maps", a: make(map[string]builder), b: make(map[string]builder), wantEqual: true, }, { desc: "nil and non-nil builder maps", a: nil, b: map[string]builder{"/gFoo/": {matchers: []matcher{{key: "k1", names: []string{"n1"}}}}}, wantEqual: false, }, { desc: "empty and non-empty builder maps", a: make(map[string]builder), b: map[string]builder{"/gFoo/": {matchers: []matcher{{key: "k1", names: []string{"n1"}}}}}, wantEqual: false, }, { desc: "different number of map keys", a: map[string]builder{ "/gFoo/": {matchers: []matcher{{key: "k1", names: []string{"n1"}}}}, "/gBar/": {matchers: []matcher{{key: "k1", names: []string{"n1"}}}}, }, b: map[string]builder{ "/gFoo/": {matchers: []matcher{{key: "k1", names: []string{"n1"}}}}, }, wantEqual: false, }, { desc: "different map keys", a: map[string]builder{ "/gBar/": {matchers: []matcher{{key: "k1", names: []string{"n1"}}}}, }, b: map[string]builder{ "/gFoo/": {matchers: []matcher{{key: "k1", names: []string{"n1"}}}}, }, wantEqual: false, }, { desc: "equal keys different values", a: map[string]builder{ "/gBar/": {matchers: []matcher{{key: "k1", names: []string{"n1"}}}}, "/gFoo/": {matchers: []matcher{{key: "k1", names: []string{"n1", "n2"}}}}, }, b: map[string]builder{ "/gBar/": {matchers: []matcher{{key: "k1", names: []string{"n1"}}}}, "/gFoo/": {matchers: []matcher{{key: "k1", names: []string{"n1"}}}}, }, wantEqual: false, }, { desc: "good match", a: map[string]builder{ "/gBar/": {matchers: []matcher{{key: "k1", names: []string{"n1"}}}}, "/gFoo/": {matchers: []matcher{{key: "k1", names: []string{"n1"}}}}, }, b: map[string]builder{ "/gBar/": {matchers: []matcher{{key: "k1", names: []string{"n1"}}}}, "/gFoo/": {matchers: []matcher{{key: "k1", names: []string{"n1"}}}}, }, wantEqual: true, }, } for _, test := range tests { t.Run(test.desc, func(t *testing.T) { if gotEqual := test.a.Equal(test.b); gotEqual != test.wantEqual { t.Errorf("BuilderMap.Equal(%v, %v) = %v, want %v", test.a, test.b, gotEqual, test.wantEqual) } }) } } func TestBuilderEqual(t *testing.T) { tests := []struct { desc string a builder b builder wantEqual bool }{ { desc: "nil builders", a: builder{matchers: nil}, b: builder{matchers: nil}, wantEqual: true, }, { desc: "empty builders", a: builder{matchers: []matcher{}}, b: builder{matchers: []matcher{}}, wantEqual: true, }, { desc: "nil and non-nil builders", a: builder{matchers: nil}, b: builder{matchers: []matcher{}}, wantEqual: false, }, { desc: "empty and non-empty builders", a: builder{matchers: []matcher{}}, b: builder{matchers: []matcher{{key: "foo"}}}, wantEqual: false, }, { desc: "different number of matchers", a: builder{matchers: []matcher{{key: "foo"}, {key: "bar"}}}, b: builder{matchers: []matcher{{key: "foo"}}}, wantEqual: false, }, { desc: "equal number but differing matchers", a: builder{matchers: []matcher{{key: "bar"}}}, b: builder{matchers: []matcher{{key: "foo"}}}, wantEqual: false, }, { desc: "good match", a: builder{matchers: []matcher{{key: "foo"}}}, b: builder{matchers: []matcher{{key: "foo"}}}, wantEqual: true, }, } for _, test := range tests { t.Run(test.desc, func(t *testing.T) { t.Run(test.desc, func(t *testing.T) { if gotEqual := test.a.Equal(test.b); gotEqual != test.wantEqual { t.Errorf("builder.Equal(%v, %v) = %v, want %v", test.a, test.b, gotEqual, test.wantEqual) } }) }) } } // matcher helps extract a key from request headers based on a given name. func TestMatcherEqual(t *testing.T) { tests := []struct { desc string a matcher b matcher wantEqual bool }{ { desc: "different keys", a: matcher{key: "foo"}, b: matcher{key: "bar"}, wantEqual: false, }, { desc: "different number of names", a: matcher{key: "foo", names: []string{"v1", "v2"}}, b: matcher{key: "foo", names: []string{"v1"}}, wantEqual: false, }, { desc: "equal number but differing names", a: matcher{key: "foo", names: []string{"v1", "v2"}}, b: matcher{key: "foo", names: []string{"v1", "v22"}}, wantEqual: false, }, { desc: "same names in different order", a: matcher{key: "foo", names: []string{"v2", "v1"}}, b: matcher{key: "foo", names: []string{"v1", "v3"}}, wantEqual: false, }, { desc: "good match", a: matcher{key: "foo", names: []string{"v1", "v2"}}, b: matcher{key: "foo", names: []string{"v1", "v2"}}, wantEqual: true, }, } for _, test := range tests { t.Run(test.desc, func(t *testing.T) { if gotEqual := test.a.Equal(test.b); gotEqual != test.wantEqual { t.Errorf("matcher.Equal(%v, %v) = %v, want %v", test.a, test.b, gotEqual, test.wantEqual) } }) } } grpc-go-1.29.1/balancer/rls/internal/picker.go000066400000000000000000000162731365033716300211510ustar00rootroot00000000000000// +build go1.10 /* * * Copyright 2020 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package rls import ( "errors" "time" "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/rls/internal/cache" "google.golang.org/grpc/balancer/rls/internal/keys" rlspb "google.golang.org/grpc/balancer/rls/internal/proto/grpc_lookup_v1" "google.golang.org/grpc/metadata" ) var errRLSThrottled = balancer.TransientFailureError(errors.New("RLS call throttled at client side")) // Compile time assert to ensure we implement V2Picker. var _ balancer.V2Picker = (*rlsPicker)(nil) // RLS rlsPicker selects the subConn to be used for a particular RPC. It does // not manage subConns directly and usually deletegates to pickers provided by // child policies. // // The RLS LB policy creates a new rlsPicker object whenever its ServiceConfig // is updated and provides a bunch of hooks for the rlsPicker to get the latest // state that it can used to make its decision. type rlsPicker struct { // The keyBuilder map used to generate RLS keys for the RPC. This is built // by the LB policy based on the received ServiceConfig. kbm keys.BuilderMap // This is the request processing strategy as indicated by the LB policy's // ServiceConfig. This controls how to process a RPC when the data required // to make the pick decision is not in the cache. strategy rlspb.RouteLookupConfig_RequestProcessingStrategy // The following hooks are setup by the LB policy to enable the rlsPicker to // access state stored in the policy. This approach has the following // advantages: // 1. The rlsPicker is loosely coupled with the LB policy in the sense that // updates happening on the LB policy like the receipt of an RLS // response, or an update to the default rlsPicker etc are not explicitly // pushed to the rlsPicker, but are readily available to the rlsPicker // when it invokes these hooks. And the LB policy takes care of // synchronizing access to these shared state. // 2. It makes unit testing the rlsPicker easy since any number of these // hooks could be overridden. // readCache is used to read from the data cache and the pending request // map in an atomic fashion. The first return parameter is the entry in the // data cache, and the second indicates whether an entry for the same key // is present in the pending cache. readCache func(cache.Key) (*cache.Entry, bool) // shouldThrottle decides if the current RPC should be throttled at the // client side. It uses an adaptive throttling algorithm. shouldThrottle func() bool // startRLS kicks off an RLS request in the background for the provided RPC // path and keyMap. An entry in the pending request map is created before // sending out the request and an entry in the data cache is created or // updated upon receipt of a response. See implementation in the LB policy // for details. startRLS func(string, keys.KeyMap) // defaultPick enables the rlsPicker to delegate the pick decision to the // rlsPicker returned by the child LB policy pointing to the default target // specified in the service config. defaultPick func(balancer.PickInfo) (balancer.PickResult, error) } // This helper function decides if the pick should delegate to the default // rlsPicker based on the request processing strategy. This is used when the // data cache does not have a valid entry for the current RPC and the RLS // request is throttled, or if the current data cache entry is in backoff. func (p *rlsPicker) shouldDelegateToDefault() bool { return p.strategy == rlspb.RouteLookupConfig_SYNC_LOOKUP_DEFAULT_TARGET_ON_ERROR || p.strategy == rlspb.RouteLookupConfig_ASYNC_LOOKUP_DEFAULT_TARGET_ON_MISS } // Pick makes the routing decision for every outbound RPC. func (p *rlsPicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { // For every incoming request, we first build the RLS keys using the // keyBuilder we received from the LB policy. If no metadata is present in // the context, we end up using an empty key. km := keys.KeyMap{} md, ok := metadata.FromOutgoingContext(info.Ctx) if ok { km = p.kbm.RLSKey(md, info.FullMethodName) } // We use the LB policy hook to read the data cache and the pending request // map (whether or not an entry exists) for the RPC path and the generated // RLS keys. We will end up kicking off an RLS request only if there is no // pending request for the current RPC path and keys, and either we didn't // find an entry in the data cache or the entry was stale and it wasn't in // backoff. startRequest := false now := time.Now() entry, pending := p.readCache(cache.Key{Path: info.FullMethodName, KeyMap: km.Str}) if entry == nil { startRequest = true } else { entry.Mu.Lock() defer entry.Mu.Unlock() if entry.StaleTime.Before(now) && entry.BackoffTime.Before(now) { // This is the proactive cache refresh. startRequest = true } } if startRequest && !pending { if p.shouldThrottle() { // The entry doesn't exist or has expired and the new RLS request // has been throttled. Treat it as an error and delegate to default // pick or fail the pick, based on the request processing strategy. if entry == nil || entry.ExpiryTime.Before(now) { if p.shouldDelegateToDefault() { return p.defaultPick(info) } return balancer.PickResult{}, errRLSThrottled } // The proactive refresh has been throttled. Nothing to worry, just // keep using the existing entry. } else { p.startRLS(info.FullMethodName, km) } } if entry != nil { if entry.ExpiryTime.After(now) { // This is the jolly good case where we have found a valid entry in // the data cache. We delegate to the LB policy associated with // this cache entry. return entry.ChildPicker.Pick(info) } else if entry.BackoffTime.After(now) { // The entry has expired, but is in backoff. We either delegate to // the default rlsPicker or return the error from the last failed // RLS request for this entry. if p.shouldDelegateToDefault() { return p.defaultPick(info) } return balancer.PickResult{}, entry.CallStatus } } // Either we didn't find an entry or found an entry which had expired and // was not in backoff (which is also essentially equivalent to not finding // an entry), and we started an RLS request in the background. We either // queue the pick or delegate to the default pick. In the former case, upon // receipt of an RLS response, the LB policy will send a new rlsPicker to // the channel, and the pick will be retried. if p.strategy == rlspb.RouteLookupConfig_SYNC_LOOKUP_DEFAULT_TARGET_ON_ERROR || p.strategy == rlspb.RouteLookupConfig_SYNC_LOOKUP_CLIENT_SEES_ERROR { return balancer.PickResult{}, balancer.ErrNoSubConnAvailable } return p.defaultPick(info) } grpc-go-1.29.1/balancer/rls/internal/picker_test.go000066400000000000000000000477421365033716300222150ustar00rootroot00000000000000// +build go1.10 /* * * Copyright 2020 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package rls import ( "context" "errors" "fmt" "math" "testing" "time" "google.golang.org/grpc/internal/grpcrand" "github.com/google/go-cmp/cmp" "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/rls/internal/cache" "google.golang.org/grpc/balancer/rls/internal/keys" rlspb "google.golang.org/grpc/balancer/rls/internal/proto/grpc_lookup_v1" "google.golang.org/grpc/metadata" ) const defaultTestMaxAge = 5 * time.Second func initKeyBuilderMap() (keys.BuilderMap, error) { kb1 := &rlspb.GrpcKeyBuilder{ Names: []*rlspb.GrpcKeyBuilder_Name{{Service: "gFoo"}}, Headers: []*rlspb.NameMatcher{{Key: "k1", Names: []string{"n1"}}}, } kb2 := &rlspb.GrpcKeyBuilder{ Names: []*rlspb.GrpcKeyBuilder_Name{{Service: "gBar", Method: "method1"}}, Headers: []*rlspb.NameMatcher{{Key: "k2", Names: []string{"n21", "n22"}}}, } kb3 := &rlspb.GrpcKeyBuilder{ Names: []*rlspb.GrpcKeyBuilder_Name{{Service: "gFoobar"}}, Headers: []*rlspb.NameMatcher{{Key: "k3", Names: []string{"n3"}}}, } return keys.MakeBuilderMap(&rlspb.RouteLookupConfig{ GrpcKeybuilders: []*rlspb.GrpcKeyBuilder{kb1, kb2, kb3}, }) } // fakeSubConn embeds the balancer.SubConn interface and contains an id which // helps verify that the expected subConn was returned by the rlsPicker. type fakeSubConn struct { balancer.SubConn id int } // fakeChildPicker sends a PickResult with a fakeSubConn with the configured id. type fakeChildPicker struct { id int } func (p *fakeChildPicker) Pick(_ balancer.PickInfo) (balancer.PickResult, error) { return balancer.PickResult{SubConn: &fakeSubConn{id: p.id}}, nil } // TestPickKeyBuilder verifies the different possible scenarios for forming an // RLS key for an incoming RPC. func TestPickKeyBuilder(t *testing.T) { kbm, err := initKeyBuilderMap() if err != nil { t.Fatalf("Failed to create keyBuilderMap: %v", err) } tests := []struct { desc string rpcPath string md metadata.MD wantKey cache.Key }{ { desc: "non existent service in keyBuilder map", rpcPath: "/gNonExistentService/method", md: metadata.New(map[string]string{"n1": "v1", "n3": "v3"}), wantKey: cache.Key{Path: "/gNonExistentService/method", KeyMap: ""}, }, { desc: "no metadata in incoming context", rpcPath: "/gFoo/method", md: metadata.MD{}, wantKey: cache.Key{Path: "/gFoo/method", KeyMap: ""}, }, { desc: "keyBuilderMatch", rpcPath: "/gFoo/method", md: metadata.New(map[string]string{"n1": "v1", "n3": "v3"}), wantKey: cache.Key{Path: "/gFoo/method", KeyMap: "k1=v1"}, }, } for _, test := range tests { t.Run(test.desc, func(t *testing.T) { randID := grpcrand.Intn(math.MaxInt32) p := rlsPicker{ kbm: kbm, strategy: rlspb.RouteLookupConfig_SYNC_LOOKUP_DEFAULT_TARGET_ON_ERROR, readCache: func(key cache.Key) (*cache.Entry, bool) { if !cmp.Equal(key, test.wantKey) { t.Fatalf("rlsPicker using cacheKey %v, want %v", key, test.wantKey) } now := time.Now() return &cache.Entry{ ExpiryTime: now.Add(defaultTestMaxAge), StaleTime: now.Add(defaultTestMaxAge), // Cache entry is configured with a child policy whose // rlsPicker always returns an empty PickResult and nil // error. ChildPicker: &fakeChildPicker{id: randID}, }, false }, // The other hooks are not set here because they are not expected to be // invoked for these cases and if they get invoked, they will panic. } gotResult, err := p.Pick(balancer.PickInfo{ FullMethodName: test.rpcPath, Ctx: metadata.NewOutgoingContext(context.Background(), test.md), }) if err != nil { t.Fatalf("Pick() failed with error: %v", err) } sc, ok := gotResult.SubConn.(*fakeSubConn) if !ok { t.Fatalf("Pick() returned a SubConn of type %T, want %T", gotResult.SubConn, &fakeSubConn{}) } if sc.id != randID { t.Fatalf("Pick() returned SubConn %d, want %d", sc.id, randID) } }) } } func TestPick(t *testing.T) { const ( rpcPath = "/gFoo/method" wantKeyMapStr = "k1=v1" ) kbm, err := initKeyBuilderMap() if err != nil { t.Fatalf("Failed to create keyBuilderMap: %v", err) } md := metadata.New(map[string]string{"n1": "v1", "n3": "v3"}) wantKey := cache.Key{Path: rpcPath, KeyMap: wantKeyMapStr} rlsLastErr := errors.New("last RLS request failed") tests := []struct { desc string // The cache entry, as returned by the overridden readCache hook. cacheEntry *cache.Entry // Whether or not a pending entry exists, as returned by the overridden // readCache hook. pending bool // Whether or not the RLS request should be throttled. throttle bool // Whether or not the test is expected to make a new RLS request. newRLSRequest bool // Whether or not the test ends up delegating to the default pick. useDefaultPick bool // Whether or not the test ends up delegating to the child policy in // the cache entry. useChildPick bool // Request processing strategy as used by the rlsPicker. strategy rlspb.RouteLookupConfig_RequestProcessingStrategy // Expected error returned by the rlsPicker under test. wantErr error }{ { desc: "cacheMiss_pending_defaultTargetOnError", pending: true, strategy: rlspb.RouteLookupConfig_SYNC_LOOKUP_DEFAULT_TARGET_ON_ERROR, wantErr: balancer.ErrNoSubConnAvailable, }, { desc: "cacheMiss_pending_clientSeesError", pending: true, strategy: rlspb.RouteLookupConfig_SYNC_LOOKUP_CLIENT_SEES_ERROR, wantErr: balancer.ErrNoSubConnAvailable, }, { desc: "cacheMiss_pending_defaultTargetOnMiss", pending: true, useDefaultPick: true, strategy: rlspb.RouteLookupConfig_ASYNC_LOOKUP_DEFAULT_TARGET_ON_MISS, wantErr: nil, }, { desc: "cacheMiss_noPending_notThrottled_defaultTargetOnError", newRLSRequest: true, strategy: rlspb.RouteLookupConfig_SYNC_LOOKUP_DEFAULT_TARGET_ON_ERROR, wantErr: balancer.ErrNoSubConnAvailable, }, { desc: "cacheMiss_noPending_notThrottled_clientSeesError", newRLSRequest: true, strategy: rlspb.RouteLookupConfig_SYNC_LOOKUP_CLIENT_SEES_ERROR, wantErr: balancer.ErrNoSubConnAvailable, }, { desc: "cacheMiss_noPending_notThrottled_defaultTargetOnMiss", newRLSRequest: true, useDefaultPick: true, strategy: rlspb.RouteLookupConfig_ASYNC_LOOKUP_DEFAULT_TARGET_ON_MISS, wantErr: nil, }, { desc: "cacheMiss_noPending_throttled_defaultTargetOnError", throttle: true, useDefaultPick: true, strategy: rlspb.RouteLookupConfig_SYNC_LOOKUP_DEFAULT_TARGET_ON_ERROR, wantErr: nil, }, { desc: "cacheMiss_noPending_throttled_clientSeesError", throttle: true, strategy: rlspb.RouteLookupConfig_SYNC_LOOKUP_CLIENT_SEES_ERROR, wantErr: errRLSThrottled, }, { desc: "cacheMiss_noPending_throttled_defaultTargetOnMiss", strategy: rlspb.RouteLookupConfig_ASYNC_LOOKUP_DEFAULT_TARGET_ON_MISS, throttle: true, useDefaultPick: true, wantErr: nil, }, { desc: "cacheHit_noPending_boExpired_dataExpired_throttled_defaultTargetOnError", cacheEntry: &cache.Entry{}, // Everything is expired in this entry throttle: true, useDefaultPick: true, strategy: rlspb.RouteLookupConfig_SYNC_LOOKUP_DEFAULT_TARGET_ON_ERROR, wantErr: nil, }, { desc: "cacheHit_noPending_boExpired_dataExpired_throttled_clientSeesError", cacheEntry: &cache.Entry{}, // Everything is expired in this entry throttle: true, strategy: rlspb.RouteLookupConfig_SYNC_LOOKUP_CLIENT_SEES_ERROR, wantErr: errRLSThrottled, }, { desc: "cacheHit_noPending_boExpired_dataExpired_throttled_defaultTargetOnMiss", cacheEntry: &cache.Entry{}, // Everything is expired in this entry throttle: true, useDefaultPick: true, strategy: rlspb.RouteLookupConfig_ASYNC_LOOKUP_DEFAULT_TARGET_ON_MISS, wantErr: nil, }, { desc: "cacheHit_noPending_stale_boExpired_dataNotExpired_throttled_defaultTargetOnMiss", cacheEntry: &cache.Entry{ExpiryTime: time.Now().Add(defaultTestMaxAge)}, throttle: true, // Proactive refresh is throttled. useChildPick: true, strategy: rlspb.RouteLookupConfig_ASYNC_LOOKUP_DEFAULT_TARGET_ON_MISS, wantErr: nil, }, { desc: "cacheHit_noPending_stale_boExpired_dataNotExpired_throttled_clientSeesError", cacheEntry: &cache.Entry{ExpiryTime: time.Now().Add(defaultTestMaxAge)}, throttle: true, // Proactive refresh is throttled. useChildPick: true, strategy: rlspb.RouteLookupConfig_SYNC_LOOKUP_CLIENT_SEES_ERROR, wantErr: nil, }, { desc: "cacheHit_noPending_stale_boExpired_dataNotExpired_throttled_defaultTargetOnError", cacheEntry: &cache.Entry{ExpiryTime: time.Now().Add(defaultTestMaxAge)}, throttle: true, // Proactive refresh is throttled. useChildPick: true, strategy: rlspb.RouteLookupConfig_SYNC_LOOKUP_DEFAULT_TARGET_ON_ERROR, wantErr: nil, }, { desc: "cacheHit_noPending_boExpired_dataExpired_notThrottled_defaultTargetOnError", cacheEntry: &cache.Entry{}, // Everything is expired in this entry newRLSRequest: true, strategy: rlspb.RouteLookupConfig_SYNC_LOOKUP_DEFAULT_TARGET_ON_ERROR, wantErr: balancer.ErrNoSubConnAvailable, }, { desc: "cacheHit_noPending_boExpired_dataExpired_notThrottled_clientSeesError", cacheEntry: &cache.Entry{}, // Everything is expired in this entry newRLSRequest: true, strategy: rlspb.RouteLookupConfig_SYNC_LOOKUP_CLIENT_SEES_ERROR, wantErr: balancer.ErrNoSubConnAvailable, }, { desc: "cacheHit_noPending_boExpired_dataExpired_notThrottled_defaultTargetOnMiss", cacheEntry: &cache.Entry{}, // Everything is expired in this entry newRLSRequest: true, useDefaultPick: true, strategy: rlspb.RouteLookupConfig_ASYNC_LOOKUP_DEFAULT_TARGET_ON_MISS, wantErr: nil, }, { desc: "cacheHit_noPending_stale_boExpired_dataNotExpired_notThrottled_defaultTargetOnMiss", cacheEntry: &cache.Entry{ExpiryTime: time.Now().Add(defaultTestMaxAge)}, newRLSRequest: true, // Proactive refresh. useChildPick: true, strategy: rlspb.RouteLookupConfig_ASYNC_LOOKUP_DEFAULT_TARGET_ON_MISS, wantErr: nil, }, { desc: "cacheHit_noPending_stale_boExpired_dataNotExpired_notThrottled_clientSeesError", cacheEntry: &cache.Entry{ExpiryTime: time.Now().Add(defaultTestMaxAge)}, newRLSRequest: true, // Proactive refresh. useChildPick: true, strategy: rlspb.RouteLookupConfig_SYNC_LOOKUP_CLIENT_SEES_ERROR, wantErr: nil, }, { desc: "cacheHit_noPending_stale_boExpired_dataNotExpired_notThrottled_defaultTargetOnError", cacheEntry: &cache.Entry{ExpiryTime: time.Now().Add(defaultTestMaxAge)}, newRLSRequest: true, // Proactive refresh. useChildPick: true, strategy: rlspb.RouteLookupConfig_SYNC_LOOKUP_DEFAULT_TARGET_ON_ERROR, wantErr: nil, }, { desc: "cacheHit_noPending_stale_boNotExpired_dataExpired_defaultTargetOnError", cacheEntry: &cache.Entry{BackoffTime: time.Now().Add(defaultTestMaxAge)}, useDefaultPick: true, strategy: rlspb.RouteLookupConfig_SYNC_LOOKUP_DEFAULT_TARGET_ON_ERROR, wantErr: nil, }, { desc: "cacheHit_noPending_stale_boNotExpired_dataExpired_defaultTargetOnMiss", cacheEntry: &cache.Entry{BackoffTime: time.Now().Add(defaultTestMaxAge)}, useDefaultPick: true, strategy: rlspb.RouteLookupConfig_ASYNC_LOOKUP_DEFAULT_TARGET_ON_MISS, wantErr: nil, }, { desc: "cacheHit_noPending_stale_boNotExpired_dataExpired_clientSeesError", cacheEntry: &cache.Entry{ BackoffTime: time.Now().Add(defaultTestMaxAge), CallStatus: rlsLastErr, }, strategy: rlspb.RouteLookupConfig_SYNC_LOOKUP_CLIENT_SEES_ERROR, wantErr: rlsLastErr, }, { desc: "cacheHit_noPending_stale_boNotExpired_dataNotExpired_defaultTargetOnError", cacheEntry: &cache.Entry{ ExpiryTime: time.Now().Add(defaultTestMaxAge), BackoffTime: time.Now().Add(defaultTestMaxAge), CallStatus: rlsLastErr, }, useChildPick: true, strategy: rlspb.RouteLookupConfig_SYNC_LOOKUP_DEFAULT_TARGET_ON_ERROR, wantErr: nil, }, { desc: "cacheHit_noPending_stale_boNotExpired_dataNotExpired_defaultTargetOnMiss", cacheEntry: &cache.Entry{ ExpiryTime: time.Now().Add(defaultTestMaxAge), BackoffTime: time.Now().Add(defaultTestMaxAge), CallStatus: rlsLastErr, }, useChildPick: true, strategy: rlspb.RouteLookupConfig_ASYNC_LOOKUP_DEFAULT_TARGET_ON_MISS, wantErr: nil, }, { desc: "cacheHit_noPending_stale_boNotExpired_dataNotExpired_clientSeesError", cacheEntry: &cache.Entry{ ExpiryTime: time.Now().Add(defaultTestMaxAge), BackoffTime: time.Now().Add(defaultTestMaxAge), CallStatus: rlsLastErr, }, useChildPick: true, strategy: rlspb.RouteLookupConfig_SYNC_LOOKUP_CLIENT_SEES_ERROR, wantErr: nil, }, { desc: "cacheHit_noPending_notStale_dataNotExpired_defaultTargetOnError", cacheEntry: &cache.Entry{ ExpiryTime: time.Now().Add(defaultTestMaxAge), StaleTime: time.Now().Add(defaultTestMaxAge), }, useChildPick: true, strategy: rlspb.RouteLookupConfig_SYNC_LOOKUP_DEFAULT_TARGET_ON_ERROR, wantErr: nil, }, { desc: "cacheHit_noPending_notStale_dataNotExpired_defaultTargetOnMiss", cacheEntry: &cache.Entry{ ExpiryTime: time.Now().Add(defaultTestMaxAge), StaleTime: time.Now().Add(defaultTestMaxAge), }, useChildPick: true, strategy: rlspb.RouteLookupConfig_ASYNC_LOOKUP_DEFAULT_TARGET_ON_MISS, wantErr: nil, }, { desc: "cacheHit_noPending_notStale_dataNotExpired_clientSeesError", cacheEntry: &cache.Entry{ ExpiryTime: time.Now().Add(defaultTestMaxAge), StaleTime: time.Now().Add(defaultTestMaxAge), }, useChildPick: true, strategy: rlspb.RouteLookupConfig_SYNC_LOOKUP_CLIENT_SEES_ERROR, wantErr: nil, }, { desc: "cacheHit_pending_dataExpired_boExpired_defaultTargetOnError", cacheEntry: &cache.Entry{}, strategy: rlspb.RouteLookupConfig_SYNC_LOOKUP_DEFAULT_TARGET_ON_ERROR, wantErr: balancer.ErrNoSubConnAvailable, }, { desc: "cacheHit_pending_dataExpired_boExpired_defaultTargetOnMiss", cacheEntry: &cache.Entry{}, pending: true, useDefaultPick: true, strategy: rlspb.RouteLookupConfig_ASYNC_LOOKUP_DEFAULT_TARGET_ON_MISS, wantErr: nil, }, { desc: "cacheHit_pending_dataExpired_boExpired_clientSeesError", cacheEntry: &cache.Entry{}, pending: true, strategy: rlspb.RouteLookupConfig_SYNC_LOOKUP_CLIENT_SEES_ERROR, wantErr: balancer.ErrNoSubConnAvailable, }, { desc: "cacheHit_pending_dataExpired_boNotExpired_defaultTargetOnError", cacheEntry: &cache.Entry{ BackoffTime: time.Now().Add(defaultTestMaxAge), CallStatus: rlsLastErr, }, useDefaultPick: true, strategy: rlspb.RouteLookupConfig_SYNC_LOOKUP_DEFAULT_TARGET_ON_ERROR, wantErr: nil, }, { desc: "cacheHit_pending_dataExpired_boNotExpired_defaultTargetOnMiss", cacheEntry: &cache.Entry{ BackoffTime: time.Now().Add(defaultTestMaxAge), CallStatus: rlsLastErr, }, pending: true, useDefaultPick: true, strategy: rlspb.RouteLookupConfig_ASYNC_LOOKUP_DEFAULT_TARGET_ON_MISS, wantErr: nil, }, { desc: "cacheHit_pending_dataExpired_boNotExpired_clientSeesError", cacheEntry: &cache.Entry{ BackoffTime: time.Now().Add(defaultTestMaxAge), CallStatus: rlsLastErr, }, pending: true, strategy: rlspb.RouteLookupConfig_SYNC_LOOKUP_CLIENT_SEES_ERROR, wantErr: rlsLastErr, }, { desc: "cacheHit_pending_dataNotExpired_defaultTargetOnError", cacheEntry: &cache.Entry{ExpiryTime: time.Now().Add(defaultTestMaxAge)}, pending: true, useChildPick: true, strategy: rlspb.RouteLookupConfig_SYNC_LOOKUP_DEFAULT_TARGET_ON_ERROR, wantErr: nil, }, { desc: "cacheHit_pending_dataNotExpired_defaultTargetOnMiss", cacheEntry: &cache.Entry{ExpiryTime: time.Now().Add(defaultTestMaxAge)}, pending: true, useChildPick: true, strategy: rlspb.RouteLookupConfig_ASYNC_LOOKUP_DEFAULT_TARGET_ON_MISS, wantErr: nil, }, { desc: "cacheHit_pending_dataNotExpired_clientSeesError", cacheEntry: &cache.Entry{ExpiryTime: time.Now().Add(defaultTestMaxAge)}, pending: true, useChildPick: true, strategy: rlspb.RouteLookupConfig_SYNC_LOOKUP_CLIENT_SEES_ERROR, wantErr: nil, }, } for _, test := range tests { t.Run(test.desc, func(t *testing.T) { rlsCh := make(chan error, 1) randID := grpcrand.Intn(math.MaxInt32) // We instantiate a fakeChildPicker which will return a fakeSubConn // with configured id. Either the childPicker or the defaultPicker // is configured to use this fakePicker based on whether // useChidlPick or useDefaultPick is set in the test. childPicker := &fakeChildPicker{id: randID} p := rlsPicker{ kbm: kbm, strategy: test.strategy, readCache: func(key cache.Key) (*cache.Entry, bool) { if !cmp.Equal(key, wantKey) { t.Fatalf("cache lookup using cacheKey %v, want %v", key, wantKey) } if test.useChildPick { test.cacheEntry.ChildPicker = childPicker } return test.cacheEntry, test.pending }, shouldThrottle: func() bool { return test.throttle }, startRLS: func(path string, km keys.KeyMap) { if !test.newRLSRequest { rlsCh <- errors.New("RLS request attempted when none was expected") return } if path != rpcPath { rlsCh <- fmt.Errorf("RLS request initiated for rpcPath %s, want %s", path, rpcPath) return } if km.Str != wantKeyMapStr { rlsCh <- fmt.Errorf("RLS request initiated with keys %v, want %v", km.Str, wantKeyMapStr) return } rlsCh <- nil }, defaultPick: func(info balancer.PickInfo) (balancer.PickResult, error) { if !test.useDefaultPick { return balancer.PickResult{}, errors.New("Using default pick when the test doesn't want to use default pick") } return childPicker.Pick(info) }, } gotResult, err := p.Pick(balancer.PickInfo{ FullMethodName: rpcPath, Ctx: metadata.NewOutgoingContext(context.Background(), md), }) if err != test.wantErr { t.Fatalf("Pick() returned error {%v}, want {%v}", err, test.wantErr) } if test.useChildPick || test.useDefaultPick { // For cases where the pick is not queued, but is delegated to // either the child rlsPicker or the default rlsPicker, we // verify that the expected fakeSubConn is returned. sc, ok := gotResult.SubConn.(*fakeSubConn) if !ok { t.Fatalf("Pick() returned a SubConn of type %T, want %T", gotResult.SubConn, &fakeSubConn{}) } if sc.id != randID { t.Fatalf("Pick() returned SubConn %d, want %d", sc.id, randID) } } // If the test specified that a new RLS request should be made, // verify it. if test.newRLSRequest { timer := time.NewTimer(defaultTestTimeout) select { case err := <-rlsCh: timer.Stop() if err != nil { t.Fatal(err) } case <-timer.C: t.Fatal("Timeout waiting for RLS request to be sent out") } } }) } } grpc-go-1.29.1/balancer/rls/internal/proto/000077500000000000000000000000001365033716300204775ustar00rootroot00000000000000grpc-go-1.29.1/balancer/rls/internal/proto/grpc_lookup_v1/000077500000000000000000000000001365033716300234315ustar00rootroot00000000000000grpc-go-1.29.1/balancer/rls/internal/proto/grpc_lookup_v1/rls.pb.go000066400000000000000000000251021365033716300251600ustar00rootroot00000000000000// Code generated by protoc-gen-go. DO NOT EDIT. // source: grpc/rls/grpc_lookup_v1/rls.proto package grpc_lookup_v1 import ( context "context" fmt "fmt" proto "github.com/golang/protobuf/proto" grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" status "google.golang.org/grpc/status" math "math" ) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package type RouteLookupRequest struct { // Full host name of the target server, e.g. firestore.googleapis.com. // Only set for gRPC requests; HTTP requests must use key_map explicitly. Server string `protobuf:"bytes,1,opt,name=server,proto3" json:"server,omitempty"` // Full path of the request, i.e. "/service/method". // Only set for gRPC requests; HTTP requests must use key_map explicitly. Path string `protobuf:"bytes,2,opt,name=path,proto3" json:"path,omitempty"` // Target type allows the client to specify what kind of target format it // would like from RLS to allow it to find the regional server, e.g. "grpc". TargetType string `protobuf:"bytes,3,opt,name=target_type,json=targetType,proto3" json:"target_type,omitempty"` // Map of key values extracted via key builders for the gRPC or HTTP request. KeyMap map[string]string `protobuf:"bytes,4,rep,name=key_map,json=keyMap,proto3" json:"key_map,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *RouteLookupRequest) Reset() { *m = RouteLookupRequest{} } func (m *RouteLookupRequest) String() string { return proto.CompactTextString(m) } func (*RouteLookupRequest) ProtoMessage() {} func (*RouteLookupRequest) Descriptor() ([]byte, []int) { return fileDescriptor_5fe9649e373b9d12, []int{0} } func (m *RouteLookupRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_RouteLookupRequest.Unmarshal(m, b) } func (m *RouteLookupRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_RouteLookupRequest.Marshal(b, m, deterministic) } func (m *RouteLookupRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_RouteLookupRequest.Merge(m, src) } func (m *RouteLookupRequest) XXX_Size() int { return xxx_messageInfo_RouteLookupRequest.Size(m) } func (m *RouteLookupRequest) XXX_DiscardUnknown() { xxx_messageInfo_RouteLookupRequest.DiscardUnknown(m) } var xxx_messageInfo_RouteLookupRequest proto.InternalMessageInfo func (m *RouteLookupRequest) GetServer() string { if m != nil { return m.Server } return "" } func (m *RouteLookupRequest) GetPath() string { if m != nil { return m.Path } return "" } func (m *RouteLookupRequest) GetTargetType() string { if m != nil { return m.TargetType } return "" } func (m *RouteLookupRequest) GetKeyMap() map[string]string { if m != nil { return m.KeyMap } return nil } type RouteLookupResponse struct { // Actual addressable entity to use for routing decision, using syntax // requested by the request target_type. Target string `protobuf:"bytes,1,opt,name=target,proto3" json:"target,omitempty"` // Optional header value to pass along to AFE in the X-Google-RLS-Data header. // Cached with "target" and sent with all requests that match the request key. // Allows the RLS to pass its work product to the eventual target. HeaderData string `protobuf:"bytes,2,opt,name=header_data,json=headerData,proto3" json:"header_data,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *RouteLookupResponse) Reset() { *m = RouteLookupResponse{} } func (m *RouteLookupResponse) String() string { return proto.CompactTextString(m) } func (*RouteLookupResponse) ProtoMessage() {} func (*RouteLookupResponse) Descriptor() ([]byte, []int) { return fileDescriptor_5fe9649e373b9d12, []int{1} } func (m *RouteLookupResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_RouteLookupResponse.Unmarshal(m, b) } func (m *RouteLookupResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_RouteLookupResponse.Marshal(b, m, deterministic) } func (m *RouteLookupResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_RouteLookupResponse.Merge(m, src) } func (m *RouteLookupResponse) XXX_Size() int { return xxx_messageInfo_RouteLookupResponse.Size(m) } func (m *RouteLookupResponse) XXX_DiscardUnknown() { xxx_messageInfo_RouteLookupResponse.DiscardUnknown(m) } var xxx_messageInfo_RouteLookupResponse proto.InternalMessageInfo func (m *RouteLookupResponse) GetTarget() string { if m != nil { return m.Target } return "" } func (m *RouteLookupResponse) GetHeaderData() string { if m != nil { return m.HeaderData } return "" } func init() { proto.RegisterType((*RouteLookupRequest)(nil), "grpc.lookup.v1.RouteLookupRequest") proto.RegisterMapType((map[string]string)(nil), "grpc.lookup.v1.RouteLookupRequest.KeyMapEntry") proto.RegisterType((*RouteLookupResponse)(nil), "grpc.lookup.v1.RouteLookupResponse") } func init() { proto.RegisterFile("grpc/rls/grpc_lookup_v1/rls.proto", fileDescriptor_5fe9649e373b9d12) } var fileDescriptor_5fe9649e373b9d12 = []byte{ // 325 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x92, 0xc1, 0x4b, 0xc3, 0x30, 0x14, 0xc6, 0xed, 0x36, 0xa7, 0xbe, 0x82, 0x68, 0x14, 0x29, 0xbb, 0x38, 0xeb, 0x65, 0x07, 0xc9, 0xd8, 0xbc, 0xa8, 0xc7, 0xa1, 0x78, 0xd0, 0xc9, 0xa8, 0x1e, 0xc4, 0x4b, 0x89, 0xdb, 0x23, 0x1b, 0xad, 0x4d, 0x4c, 0xd3, 0x42, 0xff, 0x60, 0xff, 0x0f, 0x49, 0x52, 0x61, 0x9d, 0xa0, 0xb7, 0xf7, 0xfd, 0xde, 0x23, 0xf9, 0xbe, 0xe4, 0xc1, 0x19, 0x57, 0x72, 0x3e, 0x54, 0x69, 0x3e, 0x34, 0x45, 0x9c, 0x0a, 0x91, 0x14, 0x32, 0x2e, 0x47, 0x06, 0x51, 0xa9, 0x84, 0x16, 0x64, 0xdf, 0x74, 0xa8, 0xeb, 0xd0, 0x72, 0x14, 0x7e, 0x79, 0x40, 0x22, 0x51, 0x68, 0x7c, 0xb4, 0x28, 0xc2, 0xcf, 0x02, 0x73, 0x4d, 0x4e, 0xa0, 0x9b, 0xa3, 0x2a, 0x51, 0x05, 0x5e, 0xdf, 0x1b, 0xec, 0x45, 0xb5, 0x22, 0x04, 0x3a, 0x92, 0xe9, 0x65, 0xd0, 0xb2, 0xd4, 0xd6, 0xe4, 0x14, 0x7c, 0xcd, 0x14, 0x47, 0x1d, 0xeb, 0x4a, 0x62, 0xd0, 0xb6, 0x2d, 0x70, 0xe8, 0xa5, 0x92, 0x48, 0xee, 0x61, 0x27, 0xc1, 0x2a, 0xfe, 0x60, 0x32, 0xe8, 0xf4, 0xdb, 0x03, 0x7f, 0x4c, 0x69, 0xd3, 0x05, 0xfd, 0xed, 0x80, 0x3e, 0x60, 0x35, 0x65, 0xf2, 0x2e, 0xd3, 0xaa, 0x8a, 0xba, 0x89, 0x15, 0xbd, 0x6b, 0xf0, 0xd7, 0x30, 0x39, 0x80, 0x76, 0x82, 0x55, 0xed, 0xd0, 0x94, 0xe4, 0x18, 0xb6, 0x4b, 0x96, 0x16, 0x58, 0xfb, 0x73, 0xe2, 0xa6, 0x75, 0xe5, 0x85, 0x4f, 0x70, 0xd4, 0xb8, 0x24, 0x97, 0x22, 0xcb, 0xd1, 0xe4, 0x74, 0x46, 0x7f, 0x72, 0x3a, 0x65, 0x32, 0x2d, 0x91, 0x2d, 0x50, 0xc5, 0x0b, 0xa6, 0x59, 0x7d, 0x1c, 0x38, 0x74, 0xcb, 0x34, 0x1b, 0x67, 0x8d, 0x67, 0x7b, 0x46, 0x55, 0xae, 0xe6, 0x48, 0x5e, 0xc1, 0x5f, 0xa3, 0x24, 0xfc, 0x3f, 0x67, 0xef, 0xfc, 0xcf, 0x19, 0x67, 0x33, 0xdc, 0x9a, 0x4c, 0xe1, 0x70, 0x25, 0x36, 0x46, 0x27, 0xbb, 0x51, 0x9a, 0xcf, 0xcc, 0xb7, 0xce, 0xbc, 0xb7, 0x0b, 0x2e, 0x04, 0x4f, 0x91, 0x72, 0x91, 0xb2, 0x8c, 0x53, 0xa1, 0xb8, 0x5d, 0x82, 0xa1, 0x9b, 0xde, 0x58, 0x88, 0xf7, 0xae, 0xdd, 0x86, 0xcb, 0xef, 0x00, 0x00, 0x00, 0xff, 0xff, 0xba, 0x10, 0x2d, 0xb5, 0x32, 0x02, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. var _ context.Context var _ grpc.ClientConn // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. const _ = grpc.SupportPackageIsVersion4 // RouteLookupServiceClient is the client API for RouteLookupService service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type RouteLookupServiceClient interface { // Lookup returns a target for a single key. RouteLookup(ctx context.Context, in *RouteLookupRequest, opts ...grpc.CallOption) (*RouteLookupResponse, error) } type routeLookupServiceClient struct { cc *grpc.ClientConn } func NewRouteLookupServiceClient(cc *grpc.ClientConn) RouteLookupServiceClient { return &routeLookupServiceClient{cc} } func (c *routeLookupServiceClient) RouteLookup(ctx context.Context, in *RouteLookupRequest, opts ...grpc.CallOption) (*RouteLookupResponse, error) { out := new(RouteLookupResponse) err := c.cc.Invoke(ctx, "/grpc.lookup.v1.RouteLookupService/RouteLookup", in, out, opts...) if err != nil { return nil, err } return out, nil } // RouteLookupServiceServer is the server API for RouteLookupService service. type RouteLookupServiceServer interface { // Lookup returns a target for a single key. RouteLookup(context.Context, *RouteLookupRequest) (*RouteLookupResponse, error) } // UnimplementedRouteLookupServiceServer can be embedded to have forward compatible implementations. type UnimplementedRouteLookupServiceServer struct { } func (*UnimplementedRouteLookupServiceServer) RouteLookup(ctx context.Context, req *RouteLookupRequest) (*RouteLookupResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method RouteLookup not implemented") } func RegisterRouteLookupServiceServer(s *grpc.Server, srv RouteLookupServiceServer) { s.RegisterService(&_RouteLookupService_serviceDesc, srv) } func _RouteLookupService_RouteLookup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(RouteLookupRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(RouteLookupServiceServer).RouteLookup(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/grpc.lookup.v1.RouteLookupService/RouteLookup", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(RouteLookupServiceServer).RouteLookup(ctx, req.(*RouteLookupRequest)) } return interceptor(ctx, in, info, handler) } var _RouteLookupService_serviceDesc = grpc.ServiceDesc{ ServiceName: "grpc.lookup.v1.RouteLookupService", HandlerType: (*RouteLookupServiceServer)(nil), Methods: []grpc.MethodDesc{ { MethodName: "RouteLookup", Handler: _RouteLookupService_RouteLookup_Handler, }, }, Streams: []grpc.StreamDesc{}, Metadata: "grpc/rls/grpc_lookup_v1/rls.proto", } grpc-go-1.29.1/balancer/rls/internal/proto/grpc_lookup_v1/rls_config.pb.go000066400000000000000000000640371365033716300265170ustar00rootroot00000000000000// Code generated by protoc-gen-go. DO NOT EDIT. // source: grpc/rls/grpc_lookup_v1/rls_config.proto package grpc_lookup_v1 import ( fmt "fmt" proto "github.com/golang/protobuf/proto" duration "github.com/golang/protobuf/ptypes/duration" math "math" ) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package // Specify how to process a request when not already in the cache. type RouteLookupConfig_RequestProcessingStrategy int32 const ( RouteLookupConfig_STRATEGY_UNSPECIFIED RouteLookupConfig_RequestProcessingStrategy = 0 // Query the RLS and process the request using target returned by the // lookup. The target will then be cached and used for processing // subsequent requests for the same key. Any errors during lookup service // processing will fall back to default target for request processing. RouteLookupConfig_SYNC_LOOKUP_DEFAULT_TARGET_ON_ERROR RouteLookupConfig_RequestProcessingStrategy = 1 // Query the RLS and process the request using target returned by the // lookup. The target will then be cached and used for processing // subsequent requests for the same key. Any errors during lookup service // processing will return an error back to the client. Services with // strict regional routing requirements should use this strategy. RouteLookupConfig_SYNC_LOOKUP_CLIENT_SEES_ERROR RouteLookupConfig_RequestProcessingStrategy = 2 // Query the RLS asynchronously but respond with the default target. The // target in the lookup response will then be cached and used for // subsequent requests. Services with strict latency requirements (but not // strict regional routing requirements) should use this strategy. RouteLookupConfig_ASYNC_LOOKUP_DEFAULT_TARGET_ON_MISS RouteLookupConfig_RequestProcessingStrategy = 3 ) var RouteLookupConfig_RequestProcessingStrategy_name = map[int32]string{ 0: "STRATEGY_UNSPECIFIED", 1: "SYNC_LOOKUP_DEFAULT_TARGET_ON_ERROR", 2: "SYNC_LOOKUP_CLIENT_SEES_ERROR", 3: "ASYNC_LOOKUP_DEFAULT_TARGET_ON_MISS", } var RouteLookupConfig_RequestProcessingStrategy_value = map[string]int32{ "STRATEGY_UNSPECIFIED": 0, "SYNC_LOOKUP_DEFAULT_TARGET_ON_ERROR": 1, "SYNC_LOOKUP_CLIENT_SEES_ERROR": 2, "ASYNC_LOOKUP_DEFAULT_TARGET_ON_MISS": 3, } func (x RouteLookupConfig_RequestProcessingStrategy) String() string { return proto.EnumName(RouteLookupConfig_RequestProcessingStrategy_name, int32(x)) } func (RouteLookupConfig_RequestProcessingStrategy) EnumDescriptor() ([]byte, []int) { return fileDescriptor_f013e3228551a7a8, []int{3, 0} } // Extract a key based on a given name (e.g. header name or query parameter // name). The name must match one of the names listed in the "name" field. If // the "required_match" field is true, one of the specified names must be // present for the keybuilder to match. type NameMatcher struct { // The name that will be used in the RLS key_map to refer to this value. Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` // Ordered list of names (headers or query parameter names) that can supply // this value; the first one with a non-empty value is used. Names []string `protobuf:"bytes,2,rep,name=names,proto3" json:"names,omitempty"` // If true, make this extraction required; the key builder will not match // if no value is found. RequiredMatch bool `protobuf:"varint,3,opt,name=required_match,json=requiredMatch,proto3" json:"required_match,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *NameMatcher) Reset() { *m = NameMatcher{} } func (m *NameMatcher) String() string { return proto.CompactTextString(m) } func (*NameMatcher) ProtoMessage() {} func (*NameMatcher) Descriptor() ([]byte, []int) { return fileDescriptor_f013e3228551a7a8, []int{0} } func (m *NameMatcher) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_NameMatcher.Unmarshal(m, b) } func (m *NameMatcher) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_NameMatcher.Marshal(b, m, deterministic) } func (m *NameMatcher) XXX_Merge(src proto.Message) { xxx_messageInfo_NameMatcher.Merge(m, src) } func (m *NameMatcher) XXX_Size() int { return xxx_messageInfo_NameMatcher.Size(m) } func (m *NameMatcher) XXX_DiscardUnknown() { xxx_messageInfo_NameMatcher.DiscardUnknown(m) } var xxx_messageInfo_NameMatcher proto.InternalMessageInfo func (m *NameMatcher) GetKey() string { if m != nil { return m.Key } return "" } func (m *NameMatcher) GetNames() []string { if m != nil { return m.Names } return nil } func (m *NameMatcher) GetRequiredMatch() bool { if m != nil { return m.RequiredMatch } return false } // A GrpcKeyBuilder applies to a given gRPC service, name, and headers. type GrpcKeyBuilder struct { Names []*GrpcKeyBuilder_Name `protobuf:"bytes,1,rep,name=names,proto3" json:"names,omitempty"` // Extract keys from all listed headers. // For gRPC, it is an error to specify "required_match" on the NameMatcher // protos, and we ignore it if set. Headers []*NameMatcher `protobuf:"bytes,2,rep,name=headers,proto3" json:"headers,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *GrpcKeyBuilder) Reset() { *m = GrpcKeyBuilder{} } func (m *GrpcKeyBuilder) String() string { return proto.CompactTextString(m) } func (*GrpcKeyBuilder) ProtoMessage() {} func (*GrpcKeyBuilder) Descriptor() ([]byte, []int) { return fileDescriptor_f013e3228551a7a8, []int{1} } func (m *GrpcKeyBuilder) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_GrpcKeyBuilder.Unmarshal(m, b) } func (m *GrpcKeyBuilder) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_GrpcKeyBuilder.Marshal(b, m, deterministic) } func (m *GrpcKeyBuilder) XXX_Merge(src proto.Message) { xxx_messageInfo_GrpcKeyBuilder.Merge(m, src) } func (m *GrpcKeyBuilder) XXX_Size() int { return xxx_messageInfo_GrpcKeyBuilder.Size(m) } func (m *GrpcKeyBuilder) XXX_DiscardUnknown() { xxx_messageInfo_GrpcKeyBuilder.DiscardUnknown(m) } var xxx_messageInfo_GrpcKeyBuilder proto.InternalMessageInfo func (m *GrpcKeyBuilder) GetNames() []*GrpcKeyBuilder_Name { if m != nil { return m.Names } return nil } func (m *GrpcKeyBuilder) GetHeaders() []*NameMatcher { if m != nil { return m.Headers } return nil } // To match, one of the given Name fields must match; the service and method // fields are specified as fixed strings. The service name is required and // includes the proto package name. The method name may be omitted, in // which case any method on the given service is matched. type GrpcKeyBuilder_Name struct { Service string `protobuf:"bytes,1,opt,name=service,proto3" json:"service,omitempty"` Method string `protobuf:"bytes,2,opt,name=method,proto3" json:"method,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *GrpcKeyBuilder_Name) Reset() { *m = GrpcKeyBuilder_Name{} } func (m *GrpcKeyBuilder_Name) String() string { return proto.CompactTextString(m) } func (*GrpcKeyBuilder_Name) ProtoMessage() {} func (*GrpcKeyBuilder_Name) Descriptor() ([]byte, []int) { return fileDescriptor_f013e3228551a7a8, []int{1, 0} } func (m *GrpcKeyBuilder_Name) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_GrpcKeyBuilder_Name.Unmarshal(m, b) } func (m *GrpcKeyBuilder_Name) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_GrpcKeyBuilder_Name.Marshal(b, m, deterministic) } func (m *GrpcKeyBuilder_Name) XXX_Merge(src proto.Message) { xxx_messageInfo_GrpcKeyBuilder_Name.Merge(m, src) } func (m *GrpcKeyBuilder_Name) XXX_Size() int { return xxx_messageInfo_GrpcKeyBuilder_Name.Size(m) } func (m *GrpcKeyBuilder_Name) XXX_DiscardUnknown() { xxx_messageInfo_GrpcKeyBuilder_Name.DiscardUnknown(m) } var xxx_messageInfo_GrpcKeyBuilder_Name proto.InternalMessageInfo func (m *GrpcKeyBuilder_Name) GetService() string { if m != nil { return m.Service } return "" } func (m *GrpcKeyBuilder_Name) GetMethod() string { if m != nil { return m.Method } return "" } // An HttpKeyBuilder applies to a given HTTP URL and headers. // // Path and host patterns use the matching syntax from gRPC transcoding to // extract named key/value pairs from the path and host components of the URL: // https://github.com/googleapis/googleapis/blob/master/google/api/http.proto // // It is invalid to specify the same key name in multiple places in a pattern. // // For a service where the project id can be expressed either as a subdomain or // in the path, separate HttpKeyBuilders must be used: // host_pattern: 'example.com' path_pattern: '/{id}/{object}/**' // host_pattern: '{id}.example.com' path_pattern: '/{object}/**' // If the host is exactly 'example.com', the first path segment will be used as // the id and the second segment as the object. If the host has a subdomain, the // subdomain will be used as the id and the first segment as the object. If // neither pattern matches, no keys will be extracted. type HttpKeyBuilder struct { // host_pattern is an ordered list of host template patterns for the desired // value. If any host_pattern values are specified, then at least one must // match, and the last one wins and sets any specified variables. A host // consists of labels separated by dots. Each label is matched against the // label in the pattern as follows: // - "*": Matches any single label. // - "**": Matches zero or more labels (first or last part of host only). // - "{=...}": One or more label capture, where "..." can be any // template that does not include a capture. // - "{}": A single label capture. Identical to {=*}. // // Examples: // - "example.com": Only applies to the exact host example.com. // - "*.example.com": Matches subdomains of example.com. // - "**.example.com": matches example.com, and all levels of subdomains. // - "{project}.example.com": Extracts the third level subdomain. // - "{project=**}.example.com": Extracts the third level+ subdomains. // - "{project=**}": Extracts the entire host. HostPatterns []string `protobuf:"bytes,1,rep,name=host_patterns,json=hostPatterns,proto3" json:"host_patterns,omitempty"` // path_pattern is an ordered list of path template patterns for the desired // value. If any path_pattern values are specified, then at least one must // match, and the last one wins and sets any specified variables. A path // consists of segments separated by slashes. Each segment is matched against // the segment in the pattern as follows: // - "*": Matches any single segment. // - "**": Matches zero or more segments (first or last part of path only). // - "{=...}": One or more segment capture, where "..." can be any // template that does not include a capture. // - "{}": A single segment capture. Identical to {=*}. // A custom method may also be specified by appending ":" and the custom // method name or "*" to indicate any custom method (including no custom // method). For example, "/*/projects/{project_id}/**:*" extracts // `{project_id}` for any version, resource and custom method that includes // it. By default, any custom method will be matched. // // Examples: // - "/v1/{name=messages/*}": extracts a name like "messages/12345". // - "/v1/messages/{message_id}": extracts a message_id like "12345". // - "/v1/users/{user_id}/messages/{message_id}": extracts two key values. PathPatterns []string `protobuf:"bytes,2,rep,name=path_patterns,json=pathPatterns,proto3" json:"path_patterns,omitempty"` // List of query parameter names to try to match. // For example: ["parent", "name", "resource.name"] // We extract all the specified query_parameters (case-sensitively). If any // are marked as "required_match" and are not present, this keybuilder fails // to match. If a given parameter appears multiple times (?foo=a&foo=b) we // will report it as a comma-separated string (foo=a,b). QueryParameters []*NameMatcher `protobuf:"bytes,3,rep,name=query_parameters,json=queryParameters,proto3" json:"query_parameters,omitempty"` // List of headers to try to match. // We extract all the specified header values (case-insensitively). If any // are marked as "required_match" and are not present, this keybuilder fails // to match. If a given header appears multiple times in the request we will // report it as a comma-separated string, in standard HTTP fashion. Headers []*NameMatcher `protobuf:"bytes,4,rep,name=headers,proto3" json:"headers,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *HttpKeyBuilder) Reset() { *m = HttpKeyBuilder{} } func (m *HttpKeyBuilder) String() string { return proto.CompactTextString(m) } func (*HttpKeyBuilder) ProtoMessage() {} func (*HttpKeyBuilder) Descriptor() ([]byte, []int) { return fileDescriptor_f013e3228551a7a8, []int{2} } func (m *HttpKeyBuilder) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_HttpKeyBuilder.Unmarshal(m, b) } func (m *HttpKeyBuilder) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_HttpKeyBuilder.Marshal(b, m, deterministic) } func (m *HttpKeyBuilder) XXX_Merge(src proto.Message) { xxx_messageInfo_HttpKeyBuilder.Merge(m, src) } func (m *HttpKeyBuilder) XXX_Size() int { return xxx_messageInfo_HttpKeyBuilder.Size(m) } func (m *HttpKeyBuilder) XXX_DiscardUnknown() { xxx_messageInfo_HttpKeyBuilder.DiscardUnknown(m) } var xxx_messageInfo_HttpKeyBuilder proto.InternalMessageInfo func (m *HttpKeyBuilder) GetHostPatterns() []string { if m != nil { return m.HostPatterns } return nil } func (m *HttpKeyBuilder) GetPathPatterns() []string { if m != nil { return m.PathPatterns } return nil } func (m *HttpKeyBuilder) GetQueryParameters() []*NameMatcher { if m != nil { return m.QueryParameters } return nil } func (m *HttpKeyBuilder) GetHeaders() []*NameMatcher { if m != nil { return m.Headers } return nil } type RouteLookupConfig struct { // Ordered specifications for constructing keys for HTTP requests. Last // match wins. If no HttpKeyBuilder matches, an empty key_map will be sent to // the lookup service; it should likely reply with a global default route // and raise an alert. HttpKeybuilders []*HttpKeyBuilder `protobuf:"bytes,1,rep,name=http_keybuilders,json=httpKeybuilders,proto3" json:"http_keybuilders,omitempty"` // Unordered specifications for constructing keys for gRPC requests. All // GrpcKeyBuilders on this list must have unique "name" fields so that the // client is free to prebuild a hash map keyed by name. If no GrpcKeyBuilder // matches, an empty key_map will be sent to the lookup service; it should // likely reply with a global default route and raise an alert. GrpcKeybuilders []*GrpcKeyBuilder `protobuf:"bytes,2,rep,name=grpc_keybuilders,json=grpcKeybuilders,proto3" json:"grpc_keybuilders,omitempty"` // The name of the lookup service as a gRPC URI. Typically, this will be // a subdomain of the target, such as "lookup.datastore.googleapis.com". LookupService string `protobuf:"bytes,3,opt,name=lookup_service,json=lookupService,proto3" json:"lookup_service,omitempty"` // Configure a timeout value for lookup service requests. // Defaults to 10 seconds if not specified. LookupServiceTimeout *duration.Duration `protobuf:"bytes,4,opt,name=lookup_service_timeout,json=lookupServiceTimeout,proto3" json:"lookup_service_timeout,omitempty"` // How long are responses valid for (like HTTP Cache-Control). // If omitted (i.e. 0), responses are considered not to be cacheable. // This value is clamped to 5 minutes to avoid unflushable bad responses. MaxAge *duration.Duration `protobuf:"bytes,5,opt,name=max_age,json=maxAge,proto3" json:"max_age,omitempty"` // After a response has been in the client cache for this amount of time // and is re-requested, start an asynchronous RPC to re-validate it. // This value should be less than max_age by at least the length of a // typical RTT to the Route Lookup Service to fully mask the RTT latency. // If omitted, keys are only re-requested after they have expired. StaleAge *duration.Duration `protobuf:"bytes,6,opt,name=stale_age,json=staleAge,proto3" json:"stale_age,omitempty"` // Rough indicator of amount of memory to use for the client cache. Some of // the data structure overhead is not accounted for, so actual memory consumed // will be somewhat greater than this value. If this field is omitted or set // to zero, a client default will be used. The value may be capped to a lower // amount based on client configuration. CacheSizeBytes int64 `protobuf:"varint,7,opt,name=cache_size_bytes,json=cacheSizeBytes,proto3" json:"cache_size_bytes,omitempty"` // This is a list of all the possible targets that can be returned by the // lookup service. If a target not on this list is returned, it will be // treated the same as an RPC error from the RLS. ValidTargets []string `protobuf:"bytes,8,rep,name=valid_targets,json=validTargets,proto3" json:"valid_targets,omitempty"` // This value provides a default target to use if needed. It will be used for // request processing strategy SYNC_LOOKUP_DEFAULT_TARGET_ON_ERROR if RLS // returns an error, or strategy ASYNC_LOOKUP_DEFAULT_TARGET_ON_MISS if RLS // returns an error or there is a cache miss in the client. It will also be // used if there are no healthy backends for an RLS target. Note that // requests can be routed only to a subdomain of the original target, // e.g. "us_east_1.cloudbigtable.googleapis.com". DefaultTarget string `protobuf:"bytes,9,opt,name=default_target,json=defaultTarget,proto3" json:"default_target,omitempty"` RequestProcessingStrategy RouteLookupConfig_RequestProcessingStrategy `protobuf:"varint,10,opt,name=request_processing_strategy,json=requestProcessingStrategy,proto3,enum=grpc.lookup.v1.RouteLookupConfig_RequestProcessingStrategy" json:"request_processing_strategy,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *RouteLookupConfig) Reset() { *m = RouteLookupConfig{} } func (m *RouteLookupConfig) String() string { return proto.CompactTextString(m) } func (*RouteLookupConfig) ProtoMessage() {} func (*RouteLookupConfig) Descriptor() ([]byte, []int) { return fileDescriptor_f013e3228551a7a8, []int{3} } func (m *RouteLookupConfig) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_RouteLookupConfig.Unmarshal(m, b) } func (m *RouteLookupConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_RouteLookupConfig.Marshal(b, m, deterministic) } func (m *RouteLookupConfig) XXX_Merge(src proto.Message) { xxx_messageInfo_RouteLookupConfig.Merge(m, src) } func (m *RouteLookupConfig) XXX_Size() int { return xxx_messageInfo_RouteLookupConfig.Size(m) } func (m *RouteLookupConfig) XXX_DiscardUnknown() { xxx_messageInfo_RouteLookupConfig.DiscardUnknown(m) } var xxx_messageInfo_RouteLookupConfig proto.InternalMessageInfo func (m *RouteLookupConfig) GetHttpKeybuilders() []*HttpKeyBuilder { if m != nil { return m.HttpKeybuilders } return nil } func (m *RouteLookupConfig) GetGrpcKeybuilders() []*GrpcKeyBuilder { if m != nil { return m.GrpcKeybuilders } return nil } func (m *RouteLookupConfig) GetLookupService() string { if m != nil { return m.LookupService } return "" } func (m *RouteLookupConfig) GetLookupServiceTimeout() *duration.Duration { if m != nil { return m.LookupServiceTimeout } return nil } func (m *RouteLookupConfig) GetMaxAge() *duration.Duration { if m != nil { return m.MaxAge } return nil } func (m *RouteLookupConfig) GetStaleAge() *duration.Duration { if m != nil { return m.StaleAge } return nil } func (m *RouteLookupConfig) GetCacheSizeBytes() int64 { if m != nil { return m.CacheSizeBytes } return 0 } func (m *RouteLookupConfig) GetValidTargets() []string { if m != nil { return m.ValidTargets } return nil } func (m *RouteLookupConfig) GetDefaultTarget() string { if m != nil { return m.DefaultTarget } return "" } func (m *RouteLookupConfig) GetRequestProcessingStrategy() RouteLookupConfig_RequestProcessingStrategy { if m != nil { return m.RequestProcessingStrategy } return RouteLookupConfig_STRATEGY_UNSPECIFIED } func init() { proto.RegisterEnum("grpc.lookup.v1.RouteLookupConfig_RequestProcessingStrategy", RouteLookupConfig_RequestProcessingStrategy_name, RouteLookupConfig_RequestProcessingStrategy_value) proto.RegisterType((*NameMatcher)(nil), "grpc.lookup.v1.NameMatcher") proto.RegisterType((*GrpcKeyBuilder)(nil), "grpc.lookup.v1.GrpcKeyBuilder") proto.RegisterType((*GrpcKeyBuilder_Name)(nil), "grpc.lookup.v1.GrpcKeyBuilder.Name") proto.RegisterType((*HttpKeyBuilder)(nil), "grpc.lookup.v1.HttpKeyBuilder") proto.RegisterType((*RouteLookupConfig)(nil), "grpc.lookup.v1.RouteLookupConfig") } func init() { proto.RegisterFile("grpc/rls/grpc_lookup_v1/rls_config.proto", fileDescriptor_f013e3228551a7a8) } var fileDescriptor_f013e3228551a7a8 = []byte{ // 742 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x54, 0xdd, 0x6e, 0xdb, 0x36, 0x18, 0x9d, 0xa2, 0xd4, 0x89, 0x99, 0x46, 0x71, 0x85, 0xa0, 0x50, 0x5a, 0xac, 0xf0, 0x1c, 0x14, 0xd3, 0xc5, 0x20, 0xa3, 0x1e, 0x36, 0x6c, 0xd8, 0x95, 0xed, 0x28, 0x99, 0x51, 0xd7, 0x36, 0x28, 0xe5, 0xa2, 0xc3, 0x00, 0x82, 0x96, 0xbf, 0x48, 0x42, 0x24, 0x53, 0xa5, 0x28, 0xa3, 0xee, 0xde, 0x68, 0xc0, 0xde, 0x60, 0x2f, 0xb2, 0xb7, 0x19, 0x28, 0x4a, 0x9e, 0xed, 0x2d, 0x4b, 0xef, 0xf4, 0x1d, 0x9e, 0x73, 0xc4, 0xef, 0x8f, 0xc8, 0x0e, 0x79, 0x16, 0x74, 0x79, 0x92, 0x77, 0xe5, 0x07, 0x49, 0x18, 0xbb, 0x2f, 0x32, 0xb2, 0x7a, 0x23, 0x21, 0x12, 0xb0, 0xe5, 0x5d, 0x1c, 0x3a, 0x19, 0x67, 0x82, 0x99, 0x86, 0x24, 0x38, 0x8a, 0xe0, 0xac, 0xde, 0xbc, 0x78, 0x15, 0x32, 0x16, 0x26, 0xd0, 0x2d, 0x4f, 0xe7, 0xc5, 0x5d, 0x77, 0x51, 0x70, 0x2a, 0x62, 0xb6, 0x54, 0xfc, 0xce, 0xaf, 0xe8, 0x64, 0x42, 0x53, 0x78, 0x47, 0x45, 0x10, 0x01, 0x37, 0x5b, 0x48, 0xbf, 0x87, 0xb5, 0xa5, 0xb5, 0x35, 0xbb, 0x89, 0xe5, 0xa7, 0x79, 0x8e, 0x9e, 0x2c, 0x69, 0x0a, 0xb9, 0x75, 0xd0, 0xd6, 0xed, 0x26, 0x56, 0x81, 0xf9, 0x1a, 0x19, 0x1c, 0x3e, 0x14, 0x31, 0x87, 0x05, 0x49, 0xa5, 0xd6, 0xd2, 0xdb, 0x9a, 0x7d, 0x8c, 0x4f, 0x6b, 0xb4, 0x34, 0xec, 0xfc, 0xa9, 0x21, 0xe3, 0x86, 0x67, 0xc1, 0x5b, 0x58, 0x0f, 0x8a, 0x38, 0x59, 0x00, 0x37, 0x7f, 0xac, 0xfd, 0xb4, 0xb6, 0x6e, 0x9f, 0xf4, 0x2e, 0x9d, 0xdd, 0x0b, 0x3b, 0xbb, 0x74, 0x47, 0x5e, 0xae, 0xfe, 0xe9, 0x77, 0xe8, 0x28, 0x02, 0xba, 0x00, 0xae, 0x2e, 0x73, 0xd2, 0x7b, 0xb9, 0x2f, 0xde, 0x4a, 0x05, 0xd7, 0xdc, 0x17, 0x3f, 0xa0, 0x43, 0x89, 0x9b, 0x16, 0x3a, 0xca, 0x81, 0xaf, 0xe2, 0x00, 0xaa, 0xfc, 0xea, 0xd0, 0x7c, 0x8e, 0x1a, 0x29, 0x88, 0x88, 0x2d, 0xac, 0x83, 0xf2, 0xa0, 0x8a, 0x3a, 0x7f, 0x69, 0xc8, 0xf8, 0x59, 0x88, 0x6c, 0xeb, 0xfa, 0x97, 0xe8, 0x34, 0x62, 0xb9, 0x20, 0x19, 0x15, 0x02, 0xf8, 0x52, 0xa5, 0xd1, 0xc4, 0x4f, 0x25, 0x38, 0xab, 0x30, 0x49, 0xca, 0xa8, 0x88, 0xfe, 0x21, 0xa9, 0xda, 0x3d, 0x95, 0xe0, 0x86, 0x74, 0x8d, 0x5a, 0x1f, 0x0a, 0xe0, 0x6b, 0x92, 0x51, 0x4e, 0x53, 0x10, 0x32, 0x2d, 0xfd, 0xf1, 0xb4, 0xce, 0x4a, 0xd1, 0x6c, 0xa3, 0xd9, 0xae, 0xca, 0xe1, 0xe7, 0x57, 0xa5, 0xf3, 0x47, 0x03, 0x3d, 0xc3, 0xac, 0x10, 0x30, 0x2e, 0x79, 0xc3, 0x72, 0x88, 0xcc, 0x11, 0x6a, 0x45, 0x42, 0x64, 0xe4, 0x1e, 0xd6, 0x73, 0x95, 0x71, 0xdd, 0xa8, 0x57, 0xfb, 0xae, 0xbb, 0x85, 0xc1, 0x67, 0x91, 0x8a, 0x6b, 0x99, 0xb4, 0x2a, 0x87, 0x75, 0xdb, 0xea, 0xe0, 0xbf, 0xad, 0x76, 0x7b, 0x8e, 0xcf, 0x42, 0x15, 0x6f, 0xac, 0x5e, 0x23, 0xa3, 0x1a, 0xf9, 0xba, 0x81, 0x7a, 0xd9, 0xa7, 0x53, 0x85, 0x7a, 0x55, 0x1b, 0xa7, 0xe8, 0xf9, 0x2e, 0x8d, 0x88, 0x38, 0x05, 0x56, 0x08, 0xeb, 0xb0, 0xad, 0xd9, 0x27, 0xbd, 0x0b, 0x47, 0x2d, 0x83, 0x53, 0x2f, 0x83, 0x73, 0x55, 0x2d, 0x03, 0x3e, 0xdf, 0x71, 0xf2, 0x95, 0xcc, 0xec, 0xa1, 0xa3, 0x94, 0x7e, 0x24, 0x34, 0x04, 0xeb, 0xc9, 0x63, 0x0e, 0x8d, 0x94, 0x7e, 0xec, 0x87, 0x60, 0x7e, 0x8f, 0x9a, 0xb9, 0xa0, 0x09, 0x94, 0xaa, 0xc6, 0x63, 0xaa, 0xe3, 0x92, 0x2b, 0x75, 0x36, 0x6a, 0x05, 0x34, 0x88, 0x80, 0xe4, 0xf1, 0x27, 0x20, 0xf3, 0xb5, 0x80, 0xdc, 0x3a, 0x6a, 0x6b, 0xb6, 0x8e, 0x8d, 0x12, 0xf7, 0xe2, 0x4f, 0x30, 0x90, 0xa8, 0x9c, 0xae, 0x15, 0x4d, 0xe2, 0x05, 0x11, 0x94, 0x87, 0x20, 0x72, 0xeb, 0x58, 0x4d, 0x57, 0x09, 0xfa, 0x0a, 0x93, 0x25, 0x5b, 0xc0, 0x1d, 0x2d, 0x12, 0x51, 0xd1, 0xac, 0xa6, 0x2a, 0x59, 0x85, 0x2a, 0x9e, 0xf9, 0x1b, 0x7a, 0x29, 0x37, 0x16, 0xe4, 0x44, 0x73, 0x16, 0x40, 0x9e, 0xc7, 0xcb, 0x90, 0xe4, 0x82, 0x53, 0x01, 0xe1, 0xda, 0x42, 0x6d, 0xcd, 0x36, 0x7a, 0x3f, 0xed, 0xf7, 0xeb, 0x5f, 0x73, 0xe3, 0x60, 0x65, 0x32, 0xdb, 0x78, 0x78, 0x95, 0x05, 0xbe, 0xe0, 0x0f, 0x1d, 0x75, 0x7e, 0xd7, 0xd0, 0xc5, 0x83, 0x42, 0xd3, 0x42, 0xe7, 0x9e, 0x8f, 0xfb, 0xbe, 0x7b, 0xf3, 0x9e, 0xdc, 0x4e, 0xbc, 0x99, 0x3b, 0x1c, 0x5d, 0x8f, 0xdc, 0xab, 0xd6, 0x17, 0xe6, 0xd7, 0xe8, 0xd2, 0x7b, 0x3f, 0x19, 0x92, 0xf1, 0x74, 0xfa, 0xf6, 0x76, 0x46, 0xae, 0xdc, 0xeb, 0xfe, 0xed, 0xd8, 0x27, 0x7e, 0x1f, 0xdf, 0xb8, 0x3e, 0x99, 0x4e, 0x88, 0x8b, 0xf1, 0x14, 0xb7, 0x34, 0xf3, 0x2b, 0xf4, 0xe5, 0x36, 0x71, 0x38, 0x1e, 0xb9, 0x13, 0x9f, 0x78, 0xae, 0xeb, 0x55, 0x94, 0x03, 0xe9, 0xd5, 0xff, 0x7f, 0xb3, 0x77, 0x23, 0xcf, 0x6b, 0xe9, 0x03, 0x0f, 0x3d, 0x8b, 0xd9, 0x5e, 0x21, 0x06, 0x06, 0x4e, 0x72, 0x55, 0x81, 0x99, 0x6c, 0xed, 0x4c, 0xfb, 0xe5, 0x9b, 0xaa, 0xd5, 0x21, 0x4b, 0xe8, 0x32, 0x74, 0x18, 0x0f, 0xcb, 0x27, 0xbb, 0xab, 0x34, 0x7b, 0xcf, 0xf7, 0xbc, 0x51, 0x4e, 0xc4, 0xb7, 0x7f, 0x07, 0x00, 0x00, 0xff, 0xff, 0x47, 0xa7, 0x94, 0xbe, 0xe0, 0x05, 0x00, 0x00, } grpc-go-1.29.1/balancer/rls/internal/proto/regenerate.sh000077500000000000000000000022631365033716300231620ustar00rootroot00000000000000#!/bin/bash # Copyright 2020 gRPC authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. set -eux -o pipefail TMP=$(mktemp -d) function finish { rm -rf "$TMP" } trap finish EXIT pushd "$TMP" mkdir -p grpc/rls/grpc_lookup_v1 curl https://raw.githubusercontent.com/grpc/grpc-proto/master/grpc/lookup/v1/rls.proto > grpc/rls/grpc_lookup_v1/rls.proto curl https://raw.githubusercontent.com/grpc/grpc-proto/master/grpc/lookup/v1/rls_config.proto > grpc/rls/grpc_lookup_v1/rls_config.proto protoc --go_out=plugins=grpc,paths=source_relative:. -I. grpc/rls/grpc_lookup_v1/*.proto popd rm -f ./grpc_lookup_v1/*.pb.go cp "$TMP"/grpc/rls/grpc_lookup_v1/*.pb.go ../../../rls/internal/proto/grpc_lookup_v1/ grpc-go-1.29.1/balancer/rls/internal/testutils/000077500000000000000000000000001365033716300213745ustar00rootroot00000000000000grpc-go-1.29.1/balancer/rls/internal/testutils/fakeserver/000077500000000000000000000000001365033716300235315ustar00rootroot00000000000000grpc-go-1.29.1/balancer/rls/internal/testutils/fakeserver/fakeserver.go000066400000000000000000000055631365033716300262260ustar00rootroot00000000000000/* * * Copyright 2020 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Package fakeserver provides a fake implementation of the RouteLookupService, // to be used in unit tests. package fakeserver import ( "context" "fmt" "net" "time" "google.golang.org/grpc" rlsgrpc "google.golang.org/grpc/balancer/rls/internal/proto/grpc_lookup_v1" rlspb "google.golang.org/grpc/balancer/rls/internal/proto/grpc_lookup_v1" ) const defaultDialTimeout = 5 * time.Second // Response wraps the response protobuf (xds/LRS) and error that the Server // should send out to the client through a call to stream.Send() type Response struct { Resp *rlspb.RouteLookupResponse Err error } // Server is a fake implementation of RLS. It exposes channels to send/receive // RLS requests and responses. type Server struct { RequestChan chan *rlspb.RouteLookupRequest ResponseChan chan Response Address string } // Start makes a new Server and gets it to start listening on a local port for // gRPC requests. The returned cancel function should be invoked by the caller // upon completion of the test. func Start() (*Server, func(), error) { lis, err := net.Listen("tcp", "localhost:0") if err != nil { return nil, func() {}, fmt.Errorf("net.Listen() failed: %v", err) } s := &Server{ // Give the channels a buffer size of 1 so that we can setup // expectations for one lookup call, without blocking. RequestChan: make(chan *rlspb.RouteLookupRequest, 1), ResponseChan: make(chan Response, 1), Address: lis.Addr().String(), } server := grpc.NewServer() rlsgrpc.RegisterRouteLookupServiceServer(server, s) go server.Serve(lis) return s, func() { server.Stop() }, nil } // RouteLookup implements the RouteLookupService. func (s *Server) RouteLookup(ctx context.Context, req *rlspb.RouteLookupRequest) (*rlspb.RouteLookupResponse, error) { s.RequestChan <- req resp := <-s.ResponseChan return resp.Resp, resp.Err } // ClientConn returns a grpc.ClientConn connected to the fakeServer. func (s *Server) ClientConn() (*grpc.ClientConn, func(), error) { ctx, cancel := context.WithTimeout(context.Background(), defaultDialTimeout) defer cancel() cc, err := grpc.DialContext(ctx, s.Address, grpc.WithInsecure(), grpc.WithBlock()) if err != nil { return nil, nil, fmt.Errorf("grpc.DialContext(%s) failed: %v", s.Address, err) } return cc, func() { cc.Close() }, nil } grpc-go-1.29.1/balancer/roundrobin/000077500000000000000000000000001365033716300171015ustar00rootroot00000000000000grpc-go-1.29.1/balancer/roundrobin/roundrobin.go000066400000000000000000000045671365033716300216250ustar00rootroot00000000000000/* * * Copyright 2017 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Package roundrobin defines a roundrobin balancer. Roundrobin balancer is // installed as one of the default balancers in gRPC, users don't need to // explicitly install this balancer. package roundrobin import ( "sync" "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/base" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal/grpcrand" ) // Name is the name of round_robin balancer. const Name = "round_robin" // newBuilder creates a new roundrobin balancer builder. func newBuilder() balancer.Builder { return base.NewBalancerBuilderV2(Name, &rrPickerBuilder{}, base.Config{HealthCheck: true}) } func init() { balancer.Register(newBuilder()) } type rrPickerBuilder struct{} func (*rrPickerBuilder) Build(info base.PickerBuildInfo) balancer.V2Picker { grpclog.Infof("roundrobinPicker: newPicker called with info: %v", info) if len(info.ReadySCs) == 0 { return base.NewErrPickerV2(balancer.ErrNoSubConnAvailable) } var scs []balancer.SubConn for sc := range info.ReadySCs { scs = append(scs, sc) } return &rrPicker{ subConns: scs, // Start at a random index, as the same RR balancer rebuilds a new // picker when SubConn states change, and we don't want to apply excess // load to the first server in the list. next: grpcrand.Intn(len(scs)), } } type rrPicker struct { // subConns is the snapshot of the roundrobin balancer when this picker was // created. The slice is immutable. Each Get() will do a round robin // selection from it and return the selected SubConn. subConns []balancer.SubConn mu sync.Mutex next int } func (p *rrPicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { p.mu.Lock() sc := p.subConns[p.next] p.next = (p.next + 1) % len(p.subConns) p.mu.Unlock() return balancer.PickResult{SubConn: sc}, nil } grpc-go-1.29.1/balancer/roundrobin/roundrobin_test.go000066400000000000000000000357111365033716300226570ustar00rootroot00000000000000/* * * Copyright 2017 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package roundrobin_test import ( "context" "fmt" "net" "strings" "sync" "testing" "time" "google.golang.org/grpc" "google.golang.org/grpc/balancer/roundrobin" "google.golang.org/grpc/codes" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/internal/grpctest" "google.golang.org/grpc/peer" "google.golang.org/grpc/resolver" "google.golang.org/grpc/resolver/manual" "google.golang.org/grpc/status" testpb "google.golang.org/grpc/test/grpc_testing" ) type s struct { grpctest.Tester } func Test(t *testing.T) { grpctest.RunSubTests(t, s{}) } type testServer struct { testpb.UnimplementedTestServiceServer } func (s *testServer) EmptyCall(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { return &testpb.Empty{}, nil } func (s *testServer) FullDuplexCall(stream testpb.TestService_FullDuplexCallServer) error { return nil } type test struct { servers []*grpc.Server addresses []string } func (t *test) cleanup() { for _, s := range t.servers { s.Stop() } } func startTestServers(count int) (_ *test, err error) { t := &test{} defer func() { if err != nil { t.cleanup() } }() for i := 0; i < count; i++ { lis, err := net.Listen("tcp", "localhost:0") if err != nil { return nil, fmt.Errorf("failed to listen %v", err) } s := grpc.NewServer() testpb.RegisterTestServiceServer(s, &testServer{}) t.servers = append(t.servers, s) t.addresses = append(t.addresses, lis.Addr().String()) go func(s *grpc.Server, l net.Listener) { s.Serve(l) }(s, lis) } return t, nil } func (s) TestOneBackend(t *testing.T) { r, cleanup := manual.GenerateAndRegisterManualResolver() defer cleanup() test, err := startTestServers(1) if err != nil { t.Fatalf("failed to start servers: %v", err) } defer test.cleanup() cc, err := grpc.Dial(r.Scheme()+":///test.server", grpc.WithInsecure(), grpc.WithBalancerName(roundrobin.Name)) if err != nil { t.Fatalf("failed to dial: %v", err) } defer cc.Close() testc := testpb.NewTestServiceClient(cc) // The first RPC should fail because there's no address. ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond) defer cancel() if _, err := testc.EmptyCall(ctx, &testpb.Empty{}); err == nil || status.Code(err) != codes.DeadlineExceeded { t.Fatalf("EmptyCall() = _, %v, want _, DeadlineExceeded", err) } r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: test.addresses[0]}}}) // The second RPC should succeed. if _, err := testc.EmptyCall(context.Background(), &testpb.Empty{}); err != nil { t.Fatalf("EmptyCall() = _, %v, want _, ", err) } } func (s) TestBackendsRoundRobin(t *testing.T) { r, cleanup := manual.GenerateAndRegisterManualResolver() defer cleanup() backendCount := 5 test, err := startTestServers(backendCount) if err != nil { t.Fatalf("failed to start servers: %v", err) } defer test.cleanup() cc, err := grpc.Dial(r.Scheme()+":///test.server", grpc.WithInsecure(), grpc.WithBalancerName(roundrobin.Name)) if err != nil { t.Fatalf("failed to dial: %v", err) } defer cc.Close() testc := testpb.NewTestServiceClient(cc) // The first RPC should fail because there's no address. ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond) defer cancel() if _, err := testc.EmptyCall(ctx, &testpb.Empty{}); err == nil || status.Code(err) != codes.DeadlineExceeded { t.Fatalf("EmptyCall() = _, %v, want _, DeadlineExceeded", err) } var resolvedAddrs []resolver.Address for i := 0; i < backendCount; i++ { resolvedAddrs = append(resolvedAddrs, resolver.Address{Addr: test.addresses[i]}) } r.UpdateState(resolver.State{Addresses: resolvedAddrs}) var p peer.Peer // Make sure connections to all servers are up. for si := 0; si < backendCount; si++ { var connected bool for i := 0; i < 1000; i++ { if _, err := testc.EmptyCall(context.Background(), &testpb.Empty{}, grpc.Peer(&p)); err != nil { t.Fatalf("EmptyCall() = _, %v, want _, ", err) } if p.Addr.String() == test.addresses[si] { connected = true break } time.Sleep(time.Millisecond) } if !connected { t.Fatalf("Connection to %v was not up after more than 1 second", test.addresses[si]) } } for i := 0; i < 3*backendCount; i++ { if _, err := testc.EmptyCall(context.Background(), &testpb.Empty{}, grpc.Peer(&p)); err != nil { t.Fatalf("EmptyCall() = _, %v, want _, ", err) } if p.Addr.String() != test.addresses[i%backendCount] { t.Fatalf("Index %d: want peer %v, got peer %v", i, test.addresses[i%backendCount], p.Addr.String()) } } } func (s) TestAddressesRemoved(t *testing.T) { r, cleanup := manual.GenerateAndRegisterManualResolver() defer cleanup() test, err := startTestServers(1) if err != nil { t.Fatalf("failed to start servers: %v", err) } defer test.cleanup() cc, err := grpc.Dial(r.Scheme()+":///test.server", grpc.WithInsecure(), grpc.WithBalancerName(roundrobin.Name)) if err != nil { t.Fatalf("failed to dial: %v", err) } defer cc.Close() testc := testpb.NewTestServiceClient(cc) // The first RPC should fail because there's no address. ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond) defer cancel() if _, err := testc.EmptyCall(ctx, &testpb.Empty{}); err == nil || status.Code(err) != codes.DeadlineExceeded { t.Fatalf("EmptyCall() = _, %v, want _, DeadlineExceeded", err) } r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: test.addresses[0]}}}) // The second RPC should succeed. if _, err := testc.EmptyCall(context.Background(), &testpb.Empty{}); err != nil { t.Fatalf("EmptyCall() = _, %v, want _, ", err) } r.UpdateState(resolver.State{Addresses: []resolver.Address{}}) ctx2, cancel2 := context.WithTimeout(context.Background(), 500*time.Millisecond) defer cancel2() // Wait for state to change to transient failure. for src := cc.GetState(); src != connectivity.TransientFailure; src = cc.GetState() { if !cc.WaitForStateChange(ctx2, src) { t.Fatalf("timed out waiting for state change. got %v; want %v", src, connectivity.TransientFailure) } } const msgWant = "produced zero addresses" if _, err := testc.EmptyCall(ctx2, &testpb.Empty{}); err == nil || !strings.Contains(status.Convert(err).Message(), msgWant) { t.Fatalf("EmptyCall() = _, %v, want _, Contains(Message(), %q)", err, msgWant) } } func (s) TestCloseWithPendingRPC(t *testing.T) { r, cleanup := manual.GenerateAndRegisterManualResolver() defer cleanup() test, err := startTestServers(1) if err != nil { t.Fatalf("failed to start servers: %v", err) } defer test.cleanup() cc, err := grpc.Dial(r.Scheme()+":///test.server", grpc.WithInsecure(), grpc.WithBalancerName(roundrobin.Name)) if err != nil { t.Fatalf("failed to dial: %v", err) } testc := testpb.NewTestServiceClient(cc) var wg sync.WaitGroup for i := 0; i < 3; i++ { wg.Add(1) go func() { defer wg.Done() // This RPC blocks until cc is closed. ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) if _, err := testc.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) == codes.DeadlineExceeded { t.Errorf("RPC failed because of deadline after cc is closed; want error the client connection is closing") } cancel() }() } cc.Close() wg.Wait() } func (s) TestNewAddressWhileBlocking(t *testing.T) { r, cleanup := manual.GenerateAndRegisterManualResolver() defer cleanup() test, err := startTestServers(1) if err != nil { t.Fatalf("failed to start servers: %v", err) } defer test.cleanup() cc, err := grpc.Dial(r.Scheme()+":///test.server", grpc.WithInsecure(), grpc.WithBalancerName(roundrobin.Name)) if err != nil { t.Fatalf("failed to dial: %v", err) } defer cc.Close() testc := testpb.NewTestServiceClient(cc) // The first RPC should fail because there's no address. ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond) defer cancel() if _, err := testc.EmptyCall(ctx, &testpb.Empty{}); err == nil || status.Code(err) != codes.DeadlineExceeded { t.Fatalf("EmptyCall() = _, %v, want _, DeadlineExceeded", err) } r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: test.addresses[0]}}}) // The second RPC should succeed. ctx, cancel = context.WithTimeout(context.Background(), 2*time.Second) defer cancel() if _, err := testc.EmptyCall(ctx, &testpb.Empty{}); err != nil { t.Fatalf("EmptyCall() = _, %v, want _, nil", err) } r.UpdateState(resolver.State{Addresses: []resolver.Address{}}) var wg sync.WaitGroup for i := 0; i < 3; i++ { wg.Add(1) go func() { defer wg.Done() // This RPC blocks until NewAddress is called. testc.EmptyCall(context.Background(), &testpb.Empty{}) }() } time.Sleep(50 * time.Millisecond) r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: test.addresses[0]}}}) wg.Wait() } func (s) TestOneServerDown(t *testing.T) { r, cleanup := manual.GenerateAndRegisterManualResolver() defer cleanup() backendCount := 3 test, err := startTestServers(backendCount) if err != nil { t.Fatalf("failed to start servers: %v", err) } defer test.cleanup() cc, err := grpc.Dial(r.Scheme()+":///test.server", grpc.WithInsecure(), grpc.WithBalancerName(roundrobin.Name)) if err != nil { t.Fatalf("failed to dial: %v", err) } defer cc.Close() testc := testpb.NewTestServiceClient(cc) // The first RPC should fail because there's no address. ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond) defer cancel() if _, err := testc.EmptyCall(ctx, &testpb.Empty{}); err == nil || status.Code(err) != codes.DeadlineExceeded { t.Fatalf("EmptyCall() = _, %v, want _, DeadlineExceeded", err) } var resolvedAddrs []resolver.Address for i := 0; i < backendCount; i++ { resolvedAddrs = append(resolvedAddrs, resolver.Address{Addr: test.addresses[i]}) } r.UpdateState(resolver.State{Addresses: resolvedAddrs}) var p peer.Peer // Make sure connections to all servers are up. for si := 0; si < backendCount; si++ { var connected bool for i := 0; i < 1000; i++ { if _, err := testc.EmptyCall(context.Background(), &testpb.Empty{}, grpc.Peer(&p)); err != nil { t.Fatalf("EmptyCall() = _, %v, want _, ", err) } if p.Addr.String() == test.addresses[si] { connected = true break } time.Sleep(time.Millisecond) } if !connected { t.Fatalf("Connection to %v was not up after more than 1 second", test.addresses[si]) } } for i := 0; i < 3*backendCount; i++ { if _, err := testc.EmptyCall(context.Background(), &testpb.Empty{}, grpc.Peer(&p)); err != nil { t.Fatalf("EmptyCall() = _, %v, want _, ", err) } if p.Addr.String() != test.addresses[i%backendCount] { t.Fatalf("Index %d: want peer %v, got peer %v", i, test.addresses[i%backendCount], p.Addr.String()) } } // Stop one server, RPCs should roundrobin among the remaining servers. backendCount-- test.servers[backendCount].Stop() // Loop until see server[backendCount-1] twice without seeing server[backendCount]. var targetSeen int for i := 0; i < 1000; i++ { if _, err := testc.EmptyCall(context.Background(), &testpb.Empty{}, grpc.Peer(&p)); err != nil { targetSeen = 0 t.Logf("EmptyCall() = _, %v, want _, ", err) // Due to a race, this RPC could possibly get the connection that // was closing, and this RPC may fail. Keep trying when this // happens. continue } switch p.Addr.String() { case test.addresses[backendCount-1]: targetSeen++ case test.addresses[backendCount]: // Reset targetSeen if peer is server[backendCount]. targetSeen = 0 } // Break to make sure the last picked address is server[-1], so the following for loop won't be flaky. if targetSeen >= 2 { break } } if targetSeen != 2 { t.Fatal("Failed to see server[backendCount-1] twice without seeing server[backendCount]") } for i := 0; i < 3*backendCount; i++ { if _, err := testc.EmptyCall(context.Background(), &testpb.Empty{}, grpc.Peer(&p)); err != nil { t.Fatalf("EmptyCall() = _, %v, want _, ", err) } if p.Addr.String() != test.addresses[i%backendCount] { t.Errorf("Index %d: want peer %v, got peer %v", i, test.addresses[i%backendCount], p.Addr.String()) } } } func (s) TestAllServersDown(t *testing.T) { r, cleanup := manual.GenerateAndRegisterManualResolver() defer cleanup() backendCount := 3 test, err := startTestServers(backendCount) if err != nil { t.Fatalf("failed to start servers: %v", err) } defer test.cleanup() cc, err := grpc.Dial(r.Scheme()+":///test.server", grpc.WithInsecure(), grpc.WithBalancerName(roundrobin.Name)) if err != nil { t.Fatalf("failed to dial: %v", err) } defer cc.Close() testc := testpb.NewTestServiceClient(cc) // The first RPC should fail because there's no address. ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond) defer cancel() if _, err := testc.EmptyCall(ctx, &testpb.Empty{}); err == nil || status.Code(err) != codes.DeadlineExceeded { t.Fatalf("EmptyCall() = _, %v, want _, DeadlineExceeded", err) } var resolvedAddrs []resolver.Address for i := 0; i < backendCount; i++ { resolvedAddrs = append(resolvedAddrs, resolver.Address{Addr: test.addresses[i]}) } r.UpdateState(resolver.State{Addresses: resolvedAddrs}) var p peer.Peer // Make sure connections to all servers are up. for si := 0; si < backendCount; si++ { var connected bool for i := 0; i < 1000; i++ { if _, err := testc.EmptyCall(context.Background(), &testpb.Empty{}, grpc.Peer(&p)); err != nil { t.Fatalf("EmptyCall() = _, %v, want _, ", err) } if p.Addr.String() == test.addresses[si] { connected = true break } time.Sleep(time.Millisecond) } if !connected { t.Fatalf("Connection to %v was not up after more than 1 second", test.addresses[si]) } } for i := 0; i < 3*backendCount; i++ { if _, err := testc.EmptyCall(context.Background(), &testpb.Empty{}, grpc.Peer(&p)); err != nil { t.Fatalf("EmptyCall() = _, %v, want _, ", err) } if p.Addr.String() != test.addresses[i%backendCount] { t.Fatalf("Index %d: want peer %v, got peer %v", i, test.addresses[i%backendCount], p.Addr.String()) } } // All servers are stopped, failfast RPC should fail with unavailable. for i := 0; i < backendCount; i++ { test.servers[i].Stop() } time.Sleep(100 * time.Millisecond) for i := 0; i < 1000; i++ { if _, err := testc.EmptyCall(context.Background(), &testpb.Empty{}); status.Code(err) == codes.Unavailable { return } time.Sleep(time.Millisecond) } t.Fatalf("Failfast RPCs didn't fail with Unavailable after all servers are stopped") } grpc-go-1.29.1/balancer/weightedroundrobin/000077500000000000000000000000001365033716300206225ustar00rootroot00000000000000grpc-go-1.29.1/balancer/weightedroundrobin/weightedroundrobin.go000066400000000000000000000016451365033716300250610ustar00rootroot00000000000000/* * * Copyright 2019 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Package weightedroundrobin defines a weighted roundrobin balancer. package weightedroundrobin // Name is the name of weighted_round_robin balancer. const Name = "weighted_round_robin" // AddrInfo will be stored inside Address metadata in order to use weighted roundrobin // balancer. type AddrInfo struct { Weight uint32 } grpc-go-1.29.1/balancer_conn_wrappers.go000066400000000000000000000167541365033716300202240ustar00rootroot00000000000000/* * * Copyright 2017 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package grpc import ( "fmt" "sync" "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/internal/buffer" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/resolver" ) // scStateUpdate contains the subConn and the new state it changed to. type scStateUpdate struct { sc balancer.SubConn state connectivity.State err error } // ccBalancerWrapper is a wrapper on top of cc for balancers. // It implements balancer.ClientConn interface. type ccBalancerWrapper struct { cc *ClientConn balancerMu sync.Mutex // synchronizes calls to the balancer balancer balancer.Balancer scBuffer *buffer.Unbounded done *grpcsync.Event mu sync.Mutex subConns map[*acBalancerWrapper]struct{} } func newCCBalancerWrapper(cc *ClientConn, b balancer.Builder, bopts balancer.BuildOptions) *ccBalancerWrapper { ccb := &ccBalancerWrapper{ cc: cc, scBuffer: buffer.NewUnbounded(), done: grpcsync.NewEvent(), subConns: make(map[*acBalancerWrapper]struct{}), } go ccb.watcher() ccb.balancer = b.Build(ccb, bopts) return ccb } // watcher balancer functions sequentially, so the balancer can be implemented // lock-free. func (ccb *ccBalancerWrapper) watcher() { for { select { case t := <-ccb.scBuffer.Get(): ccb.scBuffer.Load() if ccb.done.HasFired() { break } ccb.balancerMu.Lock() su := t.(*scStateUpdate) if ub, ok := ccb.balancer.(balancer.V2Balancer); ok { ub.UpdateSubConnState(su.sc, balancer.SubConnState{ConnectivityState: su.state, ConnectionError: su.err}) } else { ccb.balancer.HandleSubConnStateChange(su.sc, su.state) } ccb.balancerMu.Unlock() case <-ccb.done.Done(): } if ccb.done.HasFired() { ccb.balancer.Close() ccb.mu.Lock() scs := ccb.subConns ccb.subConns = nil ccb.mu.Unlock() for acbw := range scs { ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain) } ccb.UpdateState(balancer.State{ConnectivityState: connectivity.Connecting, Picker: nil}) return } } } func (ccb *ccBalancerWrapper) close() { ccb.done.Fire() } func (ccb *ccBalancerWrapper) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State, err error) { // When updating addresses for a SubConn, if the address in use is not in // the new addresses, the old ac will be tearDown() and a new ac will be // created. tearDown() generates a state change with Shutdown state, we // don't want the balancer to receive this state change. So before // tearDown() on the old ac, ac.acbw (acWrapper) will be set to nil, and // this function will be called with (nil, Shutdown). We don't need to call // balancer method in this case. if sc == nil { return } ccb.scBuffer.Put(&scStateUpdate{ sc: sc, state: s, err: err, }) } func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error { ccb.balancerMu.Lock() defer ccb.balancerMu.Unlock() if ub, ok := ccb.balancer.(balancer.V2Balancer); ok { return ub.UpdateClientConnState(*ccs) } ccb.balancer.HandleResolvedAddrs(ccs.ResolverState.Addresses, nil) return nil } func (ccb *ccBalancerWrapper) resolverError(err error) { if ub, ok := ccb.balancer.(balancer.V2Balancer); ok { ccb.balancerMu.Lock() ub.ResolverError(err) ccb.balancerMu.Unlock() } } func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { if len(addrs) <= 0 { return nil, fmt.Errorf("grpc: cannot create SubConn with empty address list") } ccb.mu.Lock() defer ccb.mu.Unlock() if ccb.subConns == nil { return nil, fmt.Errorf("grpc: ClientConn balancer wrapper was closed") } ac, err := ccb.cc.newAddrConn(addrs, opts) if err != nil { return nil, err } acbw := &acBalancerWrapper{ac: ac} acbw.ac.mu.Lock() ac.acbw = acbw acbw.ac.mu.Unlock() ccb.subConns[acbw] = struct{}{} return acbw, nil } func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) { acbw, ok := sc.(*acBalancerWrapper) if !ok { return } ccb.mu.Lock() defer ccb.mu.Unlock() if ccb.subConns == nil { return } delete(ccb.subConns, acbw) ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain) } func (ccb *ccBalancerWrapper) UpdateBalancerState(s connectivity.State, p balancer.Picker) { ccb.mu.Lock() defer ccb.mu.Unlock() if ccb.subConns == nil { return } // Update picker before updating state. Even though the ordering here does // not matter, it can lead to multiple calls of Pick in the common start-up // case where we wait for ready and then perform an RPC. If the picker is // updated later, we could call the "connecting" picker when the state is // updated, and then call the "ready" picker after the picker gets updated. ccb.cc.blockingpicker.updatePicker(p) ccb.cc.csMgr.updateState(s) } func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) { ccb.mu.Lock() defer ccb.mu.Unlock() if ccb.subConns == nil { return } // Update picker before updating state. Even though the ordering here does // not matter, it can lead to multiple calls of Pick in the common start-up // case where we wait for ready and then perform an RPC. If the picker is // updated later, we could call the "connecting" picker when the state is // updated, and then call the "ready" picker after the picker gets updated. ccb.cc.blockingpicker.updatePickerV2(s.Picker) ccb.cc.csMgr.updateState(s.ConnectivityState) } func (ccb *ccBalancerWrapper) ResolveNow(o resolver.ResolveNowOptions) { ccb.cc.resolveNow(o) } func (ccb *ccBalancerWrapper) Target() string { return ccb.cc.target } // acBalancerWrapper is a wrapper on top of ac for balancers. // It implements balancer.SubConn interface. type acBalancerWrapper struct { mu sync.Mutex ac *addrConn } func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) { acbw.mu.Lock() defer acbw.mu.Unlock() if len(addrs) <= 0 { acbw.ac.tearDown(errConnDrain) return } if !acbw.ac.tryUpdateAddrs(addrs) { cc := acbw.ac.cc opts := acbw.ac.scopts acbw.ac.mu.Lock() // Set old ac.acbw to nil so the Shutdown state update will be ignored // by balancer. // // TODO(bar) the state transition could be wrong when tearDown() old ac // and creating new ac, fix the transition. acbw.ac.acbw = nil acbw.ac.mu.Unlock() acState := acbw.ac.getState() acbw.ac.tearDown(errConnDrain) if acState == connectivity.Shutdown { return } ac, err := cc.newAddrConn(addrs, opts) if err != nil { channelz.Warningf(acbw.ac.channelzID, "acBalancerWrapper: UpdateAddresses: failed to newAddrConn: %v", err) return } acbw.ac = ac ac.mu.Lock() ac.acbw = acbw ac.mu.Unlock() if acState != connectivity.Idle { ac.connect() } } } func (acbw *acBalancerWrapper) Connect() { acbw.mu.Lock() defer acbw.mu.Unlock() acbw.ac.connect() } func (acbw *acBalancerWrapper) getAddrConn() *addrConn { acbw.mu.Lock() defer acbw.mu.Unlock() return acbw.ac } grpc-go-1.29.1/balancer_conn_wrappers_test.go000066400000000000000000000102021365033716300212410ustar00rootroot00000000000000/* * * Copyright 2019 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package grpc import ( "fmt" "net" "testing" "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/roundrobin" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/resolver" "google.golang.org/grpc/resolver/manual" ) var _ balancer.V2Balancer = &funcBalancer{} type funcBalancer struct { updateClientConnState func(s balancer.ClientConnState) error } func (*funcBalancer) HandleSubConnStateChange(balancer.SubConn, connectivity.State) { panic("unimplemented") // v1 API } func (*funcBalancer) HandleResolvedAddrs([]resolver.Address, error) { panic("unimplemented") // v1 API } func (b *funcBalancer) UpdateClientConnState(s balancer.ClientConnState) error { return b.updateClientConnState(s) } func (*funcBalancer) ResolverError(error) {} func (*funcBalancer) UpdateSubConnState(balancer.SubConn, balancer.SubConnState) { panic("unimplemented") // we never have sub-conns } func (*funcBalancer) Close() {} type funcBalancerBuilder struct { name string instance *funcBalancer } func (b *funcBalancerBuilder) Build(balancer.ClientConn, balancer.BuildOptions) balancer.Balancer { return b.instance } func (b *funcBalancerBuilder) Name() string { return b.name } // TestBalancerErrorResolverPolling injects balancer errors and verifies // ResolveNow is called on the resolver with the appropriate backoff strategy // being consulted between ResolveNow calls. func (s) TestBalancerErrorResolverPolling(t *testing.T) { // The test balancer will return ErrBadResolverState iff the // ClientConnState contains no addresses. fb := &funcBalancer{ updateClientConnState: func(s balancer.ClientConnState) error { if len(s.ResolverState.Addresses) == 0 { return balancer.ErrBadResolverState } return nil }, } const balName = "BalancerErrorResolverPolling" balancer.Register(&funcBalancerBuilder{name: balName, instance: fb}) testResolverErrorPolling(t, func(r *manual.Resolver) { // No addresses so the balancer will fail. r.CC.UpdateState(resolver.State{}) }, func(r *manual.Resolver) { // UpdateState will block if ResolveNow is being called (which blocks on // rn), so call it in a goroutine. Include some address so the balancer // will be happy. go r.CC.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: "x"}}}) }, WithDefaultServiceConfig(fmt.Sprintf(`{ "loadBalancingConfig": [{"%v": {}}] }`, balName))) } // TestRoundRobinZeroAddressesResolverPolling reports no addresses to the round // robin balancer and verifies ResolveNow is called on the resolver with the // appropriate backoff strategy being consulted between ResolveNow calls. func (s) TestRoundRobinZeroAddressesResolverPolling(t *testing.T) { // We need to start a real server or else the connecting loop will call // ResolveNow after every iteration, even after a valid resolver result is // returned. lis, err := net.Listen("tcp", "localhost:0") if err != nil { t.Fatalf("Error while listening. Err: %v", err) } defer lis.Close() s := NewServer() defer s.Stop() go s.Serve(lis) testResolverErrorPolling(t, func(r *manual.Resolver) { // No addresses so the balancer will fail. r.CC.UpdateState(resolver.State{}) }, func(r *manual.Resolver) { // UpdateState will block if ResolveNow is being called (which // blocks on rn), so call it in a goroutine. Include a valid // address so the balancer will be happy. go r.CC.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: lis.Addr().String()}}}) }, WithDefaultServiceConfig(fmt.Sprintf(`{ "loadBalancingConfig": [{"%v": {}}] }`, roundrobin.Name))) } grpc-go-1.29.1/balancer_switching_test.go000066400000000000000000000401571365033716300203740ustar00rootroot00000000000000/* * * Copyright 2017 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package grpc import ( "context" "fmt" "math" "testing" "time" "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/roundrobin" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/internal" "google.golang.org/grpc/resolver" "google.golang.org/grpc/resolver/manual" "google.golang.org/grpc/serviceconfig" ) var _ balancer.Builder = &magicalLB{} var _ balancer.Balancer = &magicalLB{} // magicalLB is a ringer for grpclb. It is used to avoid circular dependencies on the grpclb package type magicalLB struct{} func (b *magicalLB) Name() string { return "grpclb" } func (b *magicalLB) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { return b } func (b *magicalLB) HandleSubConnStateChange(balancer.SubConn, connectivity.State) {} func (b *magicalLB) HandleResolvedAddrs([]resolver.Address, error) {} func (b *magicalLB) Close() {} func init() { balancer.Register(&magicalLB{}) } func checkPickFirst(cc *ClientConn, servers []*server) error { var ( req = "port" reply string err error ) connected := false for i := 0; i < 5000; i++ { if err = cc.Invoke(context.Background(), "/foo/bar", &req, &reply); errorDesc(err) == servers[0].port { if connected { // connected is set to false if peer is not server[0]. So if // connected is true here, this is the second time we saw // server[0] in a row. Break because pickfirst is in effect. break } connected = true } else { connected = false } time.Sleep(time.Millisecond) } if !connected { return fmt.Errorf("pickfirst is not in effect after 5 second, EmptyCall() = _, %v, want _, %v", err, servers[0].port) } // The following RPCs should all succeed with the first server. for i := 0; i < 3; i++ { err = cc.Invoke(context.Background(), "/foo/bar", &req, &reply) if errorDesc(err) != servers[0].port { return fmt.Errorf("index %d: want peer %v, got peer %v", i, servers[0].port, err) } } return nil } func checkRoundRobin(cc *ClientConn, servers []*server) error { var ( req = "port" reply string err error ) // Make sure connections to all servers are up. for i := 0; i < 2; i++ { // Do this check twice, otherwise the first RPC's transport may still be // picked by the closing pickfirst balancer, and the test becomes flaky. for _, s := range servers { var up bool for i := 0; i < 5000; i++ { if err = cc.Invoke(context.Background(), "/foo/bar", &req, &reply); errorDesc(err) == s.port { up = true break } time.Sleep(time.Millisecond) } if !up { return fmt.Errorf("server %v is not up within 5 second", s.port) } } } serverCount := len(servers) for i := 0; i < 3*serverCount; i++ { err = cc.Invoke(context.Background(), "/foo/bar", &req, &reply) if errorDesc(err) != servers[i%serverCount].port { return fmt.Errorf("index %d: want peer %v, got peer %v", i, servers[i%serverCount].port, err) } } return nil } func (s) TestSwitchBalancer(t *testing.T) { r, rcleanup := manual.GenerateAndRegisterManualResolver() defer rcleanup() const numServers = 2 servers, _, scleanup := startServers(t, numServers, math.MaxInt32) defer scleanup() cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithCodec(testCodec{})) if err != nil { t.Fatalf("failed to dial: %v", err) } defer cc.Close() addrs := []resolver.Address{{Addr: servers[0].addr}, {Addr: servers[1].addr}} r.UpdateState(resolver.State{Addresses: addrs}) // The default balancer is pickfirst. if err := checkPickFirst(cc, servers); err != nil { t.Fatalf("check pickfirst returned non-nil error: %v", err) } // Switch to roundrobin. cc.updateResolverState(resolver.State{ServiceConfig: parseCfg(r, `{"loadBalancingPolicy": "round_robin"}`), Addresses: addrs}, nil) if err := checkRoundRobin(cc, servers); err != nil { t.Fatalf("check roundrobin returned non-nil error: %v", err) } // Switch to pickfirst. cc.updateResolverState(resolver.State{ServiceConfig: parseCfg(r, `{"loadBalancingPolicy": "pick_first"}`), Addresses: addrs}, nil) if err := checkPickFirst(cc, servers); err != nil { t.Fatalf("check pickfirst returned non-nil error: %v", err) } } // Test that balancer specified by dial option will not be overridden. func (s) TestBalancerDialOption(t *testing.T) { r, rcleanup := manual.GenerateAndRegisterManualResolver() defer rcleanup() const numServers = 2 servers, _, scleanup := startServers(t, numServers, math.MaxInt32) defer scleanup() cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithCodec(testCodec{}), WithBalancerName(roundrobin.Name)) if err != nil { t.Fatalf("failed to dial: %v", err) } defer cc.Close() addrs := []resolver.Address{{Addr: servers[0].addr}, {Addr: servers[1].addr}} r.UpdateState(resolver.State{Addresses: addrs}) // The init balancer is roundrobin. if err := checkRoundRobin(cc, servers); err != nil { t.Fatalf("check roundrobin returned non-nil error: %v", err) } // Switch to pickfirst. cc.updateResolverState(resolver.State{ServiceConfig: parseCfg(r, `{"loadBalancingPolicy": "pick_first"}`), Addresses: addrs}, nil) // Balancer is still roundrobin. if err := checkRoundRobin(cc, servers); err != nil { t.Fatalf("check roundrobin returned non-nil error: %v", err) } } // First addr update contains grpclb. func (s) TestSwitchBalancerGRPCLBFirst(t *testing.T) { r, rcleanup := manual.GenerateAndRegisterManualResolver() defer rcleanup() cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithCodec(testCodec{})) if err != nil { t.Fatalf("failed to dial: %v", err) } defer cc.Close() // ClientConn will switch balancer to grpclb when receives an address of // type GRPCLB. r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: "backend"}, {Addr: "grpclb", Type: resolver.GRPCLB}}}) var isGRPCLB bool for i := 0; i < 5000; i++ { cc.mu.Lock() isGRPCLB = cc.curBalancerName == "grpclb" cc.mu.Unlock() if isGRPCLB { break } time.Sleep(time.Millisecond) } if !isGRPCLB { t.Fatalf("after 5 second, cc.balancer is of type %v, not grpclb", cc.curBalancerName) } // New update containing new backend and new grpclb. Should not switch // balancer. r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: "backend2"}, {Addr: "grpclb2", Type: resolver.GRPCLB}}}) for i := 0; i < 200; i++ { cc.mu.Lock() isGRPCLB = cc.curBalancerName == "grpclb" cc.mu.Unlock() if !isGRPCLB { break } time.Sleep(time.Millisecond) } if !isGRPCLB { t.Fatalf("within 200 ms, cc.balancer switched to !grpclb, want grpclb") } var isPickFirst bool // Switch balancer to pickfirst. r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: "backend"}}}) for i := 0; i < 5000; i++ { cc.mu.Lock() isPickFirst = cc.curBalancerName == PickFirstBalancerName cc.mu.Unlock() if isPickFirst { break } time.Sleep(time.Millisecond) } if !isPickFirst { t.Fatalf("after 5 second, cc.balancer is of type %v, not pick_first", cc.curBalancerName) } } // First addr update does not contain grpclb. func (s) TestSwitchBalancerGRPCLBSecond(t *testing.T) { r, rcleanup := manual.GenerateAndRegisterManualResolver() defer rcleanup() cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithCodec(testCodec{})) if err != nil { t.Fatalf("failed to dial: %v", err) } defer cc.Close() r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: "backend"}}}) var isPickFirst bool for i := 0; i < 5000; i++ { cc.mu.Lock() isPickFirst = cc.curBalancerName == PickFirstBalancerName cc.mu.Unlock() if isPickFirst { break } time.Sleep(time.Millisecond) } if !isPickFirst { t.Fatalf("after 5 second, cc.balancer is of type %v, not pick_first", cc.curBalancerName) } // ClientConn will switch balancer to grpclb when receives an address of // type GRPCLB. r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: "backend"}, {Addr: "grpclb", Type: resolver.GRPCLB}}}) var isGRPCLB bool for i := 0; i < 5000; i++ { cc.mu.Lock() isGRPCLB = cc.curBalancerName == "grpclb" cc.mu.Unlock() if isGRPCLB { break } time.Sleep(time.Millisecond) } if !isGRPCLB { t.Fatalf("after 5 second, cc.balancer is of type %v, not grpclb", cc.curBalancerName) } // New update containing new backend and new grpclb. Should not switch // balancer. r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: "backend2"}, {Addr: "grpclb2", Type: resolver.GRPCLB}}}) for i := 0; i < 200; i++ { cc.mu.Lock() isGRPCLB = cc.curBalancerName == "grpclb" cc.mu.Unlock() if !isGRPCLB { break } time.Sleep(time.Millisecond) } if !isGRPCLB { t.Fatalf("within 200 ms, cc.balancer switched to !grpclb, want grpclb") } // Switch balancer back. r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: "backend"}}}) for i := 0; i < 5000; i++ { cc.mu.Lock() isPickFirst = cc.curBalancerName == PickFirstBalancerName cc.mu.Unlock() if isPickFirst { break } time.Sleep(time.Millisecond) } if !isPickFirst { t.Fatalf("after 5 second, cc.balancer is of type %v, not pick_first", cc.curBalancerName) } } // Test that if the current balancer is roundrobin, after switching to grpclb, // when the resolved address doesn't contain grpclb addresses, balancer will be // switched back to roundrobin. func (s) TestSwitchBalancerGRPCLBRoundRobin(t *testing.T) { r, rcleanup := manual.GenerateAndRegisterManualResolver() defer rcleanup() cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithCodec(testCodec{})) if err != nil { t.Fatalf("failed to dial: %v", err) } defer cc.Close() sc := parseCfg(r, `{"loadBalancingPolicy": "round_robin"}`) r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: "backend"}}, ServiceConfig: sc}) var isRoundRobin bool for i := 0; i < 5000; i++ { cc.mu.Lock() isRoundRobin = cc.curBalancerName == "round_robin" cc.mu.Unlock() if isRoundRobin { break } time.Sleep(time.Millisecond) } if !isRoundRobin { t.Fatalf("after 5 second, cc.balancer is of type %v, not round_robin", cc.curBalancerName) } // ClientConn will switch balancer to grpclb when receives an address of // type GRPCLB. r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: "grpclb", Type: resolver.GRPCLB}}, ServiceConfig: sc}) var isGRPCLB bool for i := 0; i < 5000; i++ { cc.mu.Lock() isGRPCLB = cc.curBalancerName == "grpclb" cc.mu.Unlock() if isGRPCLB { break } time.Sleep(time.Millisecond) } if !isGRPCLB { t.Fatalf("after 5 second, cc.balancer is of type %v, not grpclb", cc.curBalancerName) } // Switch balancer back. r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: "backend"}}, ServiceConfig: sc}) for i := 0; i < 5000; i++ { cc.mu.Lock() isRoundRobin = cc.curBalancerName == "round_robin" cc.mu.Unlock() if isRoundRobin { break } time.Sleep(time.Millisecond) } if !isRoundRobin { t.Fatalf("after 5 second, cc.balancer is of type %v, not round_robin", cc.curBalancerName) } } // Test that if resolved address list contains grpclb, the balancer option in // service config won't take effect. But when there's no grpclb address in a new // resolved address list, balancer will be switched to the new one. func (s) TestSwitchBalancerGRPCLBServiceConfig(t *testing.T) { r, rcleanup := manual.GenerateAndRegisterManualResolver() defer rcleanup() cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithCodec(testCodec{})) if err != nil { t.Fatalf("failed to dial: %v", err) } defer cc.Close() r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: "backend"}}}) var isPickFirst bool for i := 0; i < 5000; i++ { cc.mu.Lock() isPickFirst = cc.curBalancerName == PickFirstBalancerName cc.mu.Unlock() if isPickFirst { break } time.Sleep(time.Millisecond) } if !isPickFirst { t.Fatalf("after 5 second, cc.balancer is of type %v, not pick_first", cc.curBalancerName) } // ClientConn will switch balancer to grpclb when receives an address of // type GRPCLB. addrs := []resolver.Address{{Addr: "grpclb", Type: resolver.GRPCLB}} r.UpdateState(resolver.State{Addresses: addrs}) var isGRPCLB bool for i := 0; i < 5000; i++ { cc.mu.Lock() isGRPCLB = cc.curBalancerName == "grpclb" cc.mu.Unlock() if isGRPCLB { break } time.Sleep(time.Millisecond) } if !isGRPCLB { t.Fatalf("after 5 second, cc.balancer is of type %v, not grpclb", cc.curBalancerName) } sc := parseCfg(r, `{"loadBalancingPolicy": "round_robin"}`) r.UpdateState(resolver.State{Addresses: addrs, ServiceConfig: sc}) var isRoundRobin bool for i := 0; i < 200; i++ { cc.mu.Lock() isRoundRobin = cc.curBalancerName == "round_robin" cc.mu.Unlock() if isRoundRobin { break } time.Sleep(time.Millisecond) } // Balancer should NOT switch to round_robin because resolved list contains // grpclb. if isRoundRobin { t.Fatalf("within 200 ms, cc.balancer switched to round_robin, want grpclb") } // Switch balancer back. r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: "backend"}}, ServiceConfig: sc}) for i := 0; i < 5000; i++ { cc.mu.Lock() isRoundRobin = cc.curBalancerName == "round_robin" cc.mu.Unlock() if isRoundRobin { break } time.Sleep(time.Millisecond) } if !isRoundRobin { t.Fatalf("after 5 second, cc.balancer is of type %v, not round_robin", cc.curBalancerName) } } // Test that when switching to grpclb fails because grpclb is not registered, // the fallback balancer will only get backend addresses, not the grpclb server // address. // // The tests sends 3 server addresses (all backends) as resolved addresses, but // claim the first one is grpclb server. The all RPCs should all be send to the // other addresses, not the first one. func (s) TestSwitchBalancerGRPCLBWithGRPCLBNotRegistered(t *testing.T) { internal.BalancerUnregister("grpclb") defer balancer.Register(&magicalLB{}) r, rcleanup := manual.GenerateAndRegisterManualResolver() defer rcleanup() const numServers = 3 servers, _, scleanup := startServers(t, numServers, math.MaxInt32) defer scleanup() cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithCodec(testCodec{})) if err != nil { t.Fatalf("failed to dial: %v", err) } defer cc.Close() r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: servers[1].addr}, {Addr: servers[2].addr}}}) // The default balancer is pickfirst. if err := checkPickFirst(cc, servers[1:]); err != nil { t.Fatalf("check pickfirst returned non-nil error: %v", err) } // Try switching to grpclb by sending servers[0] as grpclb address. It's // expected that servers[0] will be filtered out, so it will not be used by // the balancer. // // If the filtering failed, servers[0] will be used for RPCs and the RPCs // will succeed. The following checks will catch this and fail. addrs := []resolver.Address{ {Addr: servers[0].addr, Type: resolver.GRPCLB}, {Addr: servers[1].addr}, {Addr: servers[2].addr}} r.UpdateState(resolver.State{Addresses: addrs}) // Still check for pickfirst, but only with server[1] and server[2]. if err := checkPickFirst(cc, servers[1:]); err != nil { t.Fatalf("check pickfirst returned non-nil error: %v", err) } // Switch to roundrobin, and check against server[1] and server[2]. cc.updateResolverState(resolver.State{ServiceConfig: parseCfg(r, `{"loadBalancingPolicy": "round_robin"}`), Addresses: addrs}, nil) if err := checkRoundRobin(cc, servers[1:]); err != nil { t.Fatalf("check roundrobin returned non-nil error: %v", err) } } func parseCfg(r *manual.Resolver, s string) *serviceconfig.ParseResult { scpr := r.CC.ParseServiceConfig(s) if scpr.Err != nil { panic(fmt.Sprintf("Error parsing config %q: %v", s, scpr.Err)) } return scpr } grpc-go-1.29.1/balancer_test.go000066400000000000000000000577241365033716300163250ustar00rootroot00000000000000/* * * Copyright 2016 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package grpc import ( "context" "fmt" "math" "strconv" "sync" "testing" "time" "google.golang.org/grpc/codes" "google.golang.org/grpc/naming" "google.golang.org/grpc/status" ) func pickFirstBalancerV1(r naming.Resolver) Balancer { return &pickFirst{&roundRobin{r: r}} } type testWatcher struct { // the channel to receives name resolution updates update chan *naming.Update // the side channel to get to know how many updates in a batch side chan int // the channel to notify update injector that the update reading is done readDone chan int } func (w *testWatcher) Next() (updates []*naming.Update, err error) { n := <-w.side if n == 0 { return nil, fmt.Errorf("w.side is closed") } for i := 0; i < n; i++ { u := <-w.update if u != nil { updates = append(updates, u) } } w.readDone <- 0 return } func (w *testWatcher) Close() { close(w.side) } // Inject naming resolution updates to the testWatcher. func (w *testWatcher) inject(updates []*naming.Update) { w.side <- len(updates) for _, u := range updates { w.update <- u } <-w.readDone } type testNameResolver struct { w *testWatcher addr string } func (r *testNameResolver) Resolve(target string) (naming.Watcher, error) { r.w = &testWatcher{ update: make(chan *naming.Update, 1), side: make(chan int, 1), readDone: make(chan int), } r.w.side <- 1 r.w.update <- &naming.Update{ Op: naming.Add, Addr: r.addr, } go func() { <-r.w.readDone }() return r.w, nil } func startServers(t *testing.T, numServers int, maxStreams uint32) ([]*server, *testNameResolver, func()) { var servers []*server for i := 0; i < numServers; i++ { s := newTestServer() servers = append(servers, s) go s.start(t, 0, maxStreams) s.wait(t, 2*time.Second) } // Point to server[0] addr := "localhost:" + servers[0].port return servers, &testNameResolver{ addr: addr, }, func() { for i := 0; i < numServers; i++ { servers[i].stop() } } } func (s) TestNameDiscovery(t *testing.T) { // Start 2 servers on 2 ports. numServers := 2 servers, r, cleanup := startServers(t, numServers, math.MaxUint32) defer cleanup() cc, err := Dial("passthrough:///foo.bar.com", WithBalancer(RoundRobin(r)), WithBlock(), WithInsecure(), WithCodec(testCodec{})) if err != nil { t.Fatalf("Failed to create ClientConn: %v", err) } defer cc.Close() req := "port" var reply string if err := cc.Invoke(context.Background(), "/foo/bar", &req, &reply); err == nil || errorDesc(err) != servers[0].port { t.Fatalf("grpc.Invoke(_, _, _, _, _) = %v, want %s", err, servers[0].port) } // Inject the name resolution change to remove servers[0] and add servers[1]. var updates []*naming.Update updates = append(updates, &naming.Update{ Op: naming.Delete, Addr: "localhost:" + servers[0].port, }) updates = append(updates, &naming.Update{ Op: naming.Add, Addr: "localhost:" + servers[1].port, }) r.w.inject(updates) // Loop until the rpcs in flight talks to servers[1]. for { if err := cc.Invoke(context.Background(), "/foo/bar", &req, &reply); err != nil && errorDesc(err) == servers[1].port { break } time.Sleep(10 * time.Millisecond) } } func (s) TestEmptyAddrs(t *testing.T) { servers, r, cleanup := startServers(t, 1, math.MaxUint32) defer cleanup() cc, err := Dial("passthrough:///foo.bar.com", WithBalancer(RoundRobin(r)), WithBlock(), WithInsecure(), WithCodec(testCodec{})) if err != nil { t.Fatalf("Failed to create ClientConn: %v", err) } defer cc.Close() var reply string if err := cc.Invoke(context.Background(), "/foo/bar", &expectedRequest, &reply); err != nil || reply != expectedResponse { t.Fatalf("grpc.Invoke(_, _, _, _, _) = %v, reply = %q, want %q, ", err, reply, expectedResponse) } // Inject name resolution change to remove the server so that there is no address // available after that. u := &naming.Update{ Op: naming.Delete, Addr: "localhost:" + servers[0].port, } r.w.inject([]*naming.Update{u}) // Loop until the above updates apply. for { time.Sleep(10 * time.Millisecond) ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond) if err := cc.Invoke(ctx, "/foo/bar", &expectedRequest, &reply); err != nil { cancel() break } cancel() } } func (s) TestRoundRobin(t *testing.T) { // Start 3 servers on 3 ports. numServers := 3 servers, r, cleanup := startServers(t, numServers, math.MaxUint32) defer cleanup() cc, err := Dial("passthrough:///foo.bar.com", WithBalancer(RoundRobin(r)), WithBlock(), WithInsecure(), WithCodec(testCodec{})) if err != nil { t.Fatalf("Failed to create ClientConn: %v", err) } defer cc.Close() // Add servers[1] to the service discovery. u := &naming.Update{ Op: naming.Add, Addr: "localhost:" + servers[1].port, } r.w.inject([]*naming.Update{u}) req := "port" var reply string // Loop until servers[1] is up for { if err := cc.Invoke(context.Background(), "/foo/bar", &req, &reply); err != nil && errorDesc(err) == servers[1].port { break } time.Sleep(10 * time.Millisecond) } // Add server2[2] to the service discovery. u = &naming.Update{ Op: naming.Add, Addr: "localhost:" + servers[2].port, } r.w.inject([]*naming.Update{u}) // Loop until both servers[2] are up. for { if err := cc.Invoke(context.Background(), "/foo/bar", &req, &reply); err != nil && errorDesc(err) == servers[2].port { break } time.Sleep(10 * time.Millisecond) } // Check the incoming RPCs served in a round-robin manner. for i := 0; i < 10; i++ { if err := cc.Invoke(context.Background(), "/foo/bar", &req, &reply); err == nil || errorDesc(err) != servers[i%numServers].port { t.Fatalf("Index %d: Invoke(_, _, _, _, _) = %v, want %s", i, err, servers[i%numServers].port) } } } func (s) TestCloseWithPendingRPC(t *testing.T) { servers, r, cleanup := startServers(t, 1, math.MaxUint32) defer cleanup() cc, err := Dial("passthrough:///foo.bar.com", WithBalancer(RoundRobin(r)), WithBlock(), WithInsecure(), WithCodec(testCodec{})) if err != nil { t.Fatalf("Failed to create ClientConn: %v", err) } defer cc.Close() var reply string if err := cc.Invoke(context.Background(), "/foo/bar", &expectedRequest, &reply, WaitForReady(true)); err != nil { t.Fatalf("grpc.Invoke(_, _, _, _, _) = %v, want %s", err, servers[0].port) } // Remove the server. updates := []*naming.Update{{ Op: naming.Delete, Addr: "localhost:" + servers[0].port, }} r.w.inject(updates) // Loop until the above update applies. for { ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond) if err := cc.Invoke(ctx, "/foo/bar", &expectedRequest, &reply, WaitForReady(true)); status.Code(err) == codes.DeadlineExceeded { cancel() break } time.Sleep(10 * time.Millisecond) cancel() } // Issue 2 RPCs which should be completed with error status once cc is closed. var wg sync.WaitGroup wg.Add(2) go func() { defer wg.Done() var reply string if err := cc.Invoke(context.Background(), "/foo/bar", &expectedRequest, &reply, WaitForReady(true)); err == nil { t.Errorf("grpc.Invoke(_, _, _, _, _) = %v, want not nil", err) } }() go func() { defer wg.Done() var reply string time.Sleep(5 * time.Millisecond) if err := cc.Invoke(context.Background(), "/foo/bar", &expectedRequest, &reply, WaitForReady(true)); err == nil { t.Errorf("grpc.Invoke(_, _, _, _, _) = %v, want not nil", err) } }() time.Sleep(5 * time.Millisecond) cc.Close() wg.Wait() } func (s) TestGetOnWaitChannel(t *testing.T) { servers, r, cleanup := startServers(t, 1, math.MaxUint32) defer cleanup() cc, err := Dial("passthrough:///foo.bar.com", WithBalancer(RoundRobin(r)), WithBlock(), WithInsecure(), WithCodec(testCodec{})) if err != nil { t.Fatalf("Failed to create ClientConn: %v", err) } defer cc.Close() // Remove all servers so that all upcoming RPCs will block on waitCh. updates := []*naming.Update{{ Op: naming.Delete, Addr: "localhost:" + servers[0].port, }} r.w.inject(updates) for { var reply string ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond) if err := cc.Invoke(ctx, "/foo/bar", &expectedRequest, &reply, WaitForReady(true)); status.Code(err) == codes.DeadlineExceeded { cancel() break } cancel() time.Sleep(10 * time.Millisecond) } var wg sync.WaitGroup wg.Add(1) go func() { defer wg.Done() var reply string if err := cc.Invoke(context.Background(), "/foo/bar", &expectedRequest, &reply, WaitForReady(true)); err != nil { t.Errorf("grpc.Invoke(_, _, _, _, _) = %v, want ", err) } }() // Add a connected server to get the above RPC through. updates = []*naming.Update{{ Op: naming.Add, Addr: "localhost:" + servers[0].port, }} r.w.inject(updates) // Wait until the above RPC succeeds. wg.Wait() } func (s) TestOneServerDown(t *testing.T) { // Start 2 servers. numServers := 2 servers, r, cleanup := startServers(t, numServers, math.MaxUint32) defer cleanup() cc, err := Dial("passthrough:///foo.bar.com", WithBalancer(RoundRobin(r)), WithBlock(), WithInsecure(), WithCodec(testCodec{})) if err != nil { t.Fatalf("Failed to create ClientConn: %v", err) } defer cc.Close() // Add servers[1] to the service discovery. var updates []*naming.Update updates = append(updates, &naming.Update{ Op: naming.Add, Addr: "localhost:" + servers[1].port, }) r.w.inject(updates) req := "port" var reply string // Loop until servers[1] is up for { if err := cc.Invoke(context.Background(), "/foo/bar", &req, &reply); err != nil && errorDesc(err) == servers[1].port { break } time.Sleep(10 * time.Millisecond) } var wg sync.WaitGroup numRPC := 100 sleepDuration := 10 * time.Millisecond wg.Add(1) go func() { time.Sleep(sleepDuration) // After sleepDuration, kill server[0]. servers[0].stop() wg.Done() }() // All non-failfast RPCs should not block because there's at least one connection available. for i := 0; i < numRPC; i++ { wg.Add(1) go func() { time.Sleep(sleepDuration) // After sleepDuration, invoke RPC. // server[0] is killed around the same time to make it racy between balancer and gRPC internals. cc.Invoke(context.Background(), "/foo/bar", &req, &reply, WaitForReady(true)) wg.Done() }() } wg.Wait() } func (s) TestOneAddressRemoval(t *testing.T) { // Start 2 servers. numServers := 2 servers, r, cleanup := startServers(t, numServers, math.MaxUint32) defer cleanup() cc, err := Dial("passthrough:///foo.bar.com", WithBalancer(RoundRobin(r)), WithBlock(), WithInsecure(), WithCodec(testCodec{})) if err != nil { t.Fatalf("Failed to create ClientConn: %v", err) } defer cc.Close() // Add servers[1] to the service discovery. var updates []*naming.Update updates = append(updates, &naming.Update{ Op: naming.Add, Addr: "localhost:" + servers[1].port, }) r.w.inject(updates) req := "port" var reply string // Loop until servers[1] is up for { if err := cc.Invoke(context.Background(), "/foo/bar", &req, &reply); err != nil && errorDesc(err) == servers[1].port { break } time.Sleep(10 * time.Millisecond) } var wg sync.WaitGroup numRPC := 100 sleepDuration := 10 * time.Millisecond wg.Add(1) go func() { time.Sleep(sleepDuration) // After sleepDuration, delete server[0]. var updates []*naming.Update updates = append(updates, &naming.Update{ Op: naming.Delete, Addr: "localhost:" + servers[0].port, }) r.w.inject(updates) wg.Done() }() // All non-failfast RPCs should not fail because there's at least one connection available. for i := 0; i < numRPC; i++ { wg.Add(1) go func() { var reply string time.Sleep(sleepDuration) // After sleepDuration, invoke RPC. // server[0] is removed around the same time to make it racy between balancer and gRPC internals. if err := cc.Invoke(context.Background(), "/foo/bar", &expectedRequest, &reply, WaitForReady(true)); err != nil { t.Errorf("grpc.Invoke(_, _, _, _, _) = %v, want nil", err) } wg.Done() }() } wg.Wait() } func checkServerUp(t *testing.T, currentServer *server) { req := "port" port := currentServer.port cc, err := Dial("passthrough:///localhost:"+port, WithBlock(), WithInsecure(), WithCodec(testCodec{})) if err != nil { t.Fatalf("Failed to create ClientConn: %v", err) } defer cc.Close() var reply string for { if err := cc.Invoke(context.Background(), "/foo/bar", &req, &reply); err != nil && errorDesc(err) == port { break } time.Sleep(10 * time.Millisecond) } } func (s) TestPickFirstEmptyAddrs(t *testing.T) { servers, r, cleanup := startServers(t, 1, math.MaxUint32) defer cleanup() cc, err := Dial("passthrough:///foo.bar.com", WithBalancer(pickFirstBalancerV1(r)), WithBlock(), WithInsecure(), WithCodec(testCodec{})) if err != nil { t.Fatalf("Failed to create ClientConn: %v", err) } defer cc.Close() var reply string if err := cc.Invoke(context.Background(), "/foo/bar", &expectedRequest, &reply); err != nil || reply != expectedResponse { t.Fatalf("grpc.Invoke(_, _, _, _, _) = %v, reply = %q, want %q, ", err, reply, expectedResponse) } // Inject name resolution change to remove the server so that there is no address // available after that. u := &naming.Update{ Op: naming.Delete, Addr: "localhost:" + servers[0].port, } r.w.inject([]*naming.Update{u}) // Loop until the above updates apply. for { time.Sleep(10 * time.Millisecond) ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond) if err := cc.Invoke(ctx, "/foo/bar", &expectedRequest, &reply); err != nil { cancel() break } cancel() } } func (s) TestPickFirstCloseWithPendingRPC(t *testing.T) { servers, r, cleanup := startServers(t, 1, math.MaxUint32) defer cleanup() cc, err := Dial("passthrough:///foo.bar.com", WithBalancer(pickFirstBalancerV1(r)), WithBlock(), WithInsecure(), WithCodec(testCodec{})) if err != nil { t.Fatalf("Failed to create ClientConn: %v", err) } defer cc.Close() var reply string if err := cc.Invoke(context.Background(), "/foo/bar", &expectedRequest, &reply, WaitForReady(true)); err != nil { t.Fatalf("grpc.Invoke(_, _, _, _, _) = %v, want %s", err, servers[0].port) } // Remove the server. updates := []*naming.Update{{ Op: naming.Delete, Addr: "localhost:" + servers[0].port, }} r.w.inject(updates) // Loop until the above update applies. for { ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond) if err := cc.Invoke(ctx, "/foo/bar", &expectedRequest, &reply, WaitForReady(true)); status.Code(err) == codes.DeadlineExceeded { cancel() break } time.Sleep(10 * time.Millisecond) cancel() } // Issue 2 RPCs which should be completed with error status once cc is closed. var wg sync.WaitGroup wg.Add(2) go func() { defer wg.Done() var reply string if err := cc.Invoke(context.Background(), "/foo/bar", &expectedRequest, &reply, WaitForReady(true)); err == nil { t.Errorf("grpc.Invoke(_, _, _, _, _) = %v, want not nil", err) } }() go func() { defer wg.Done() var reply string time.Sleep(5 * time.Millisecond) if err := cc.Invoke(context.Background(), "/foo/bar", &expectedRequest, &reply, WaitForReady(true)); err == nil { t.Errorf("grpc.Invoke(_, _, _, _, _) = %v, want not nil", err) } }() time.Sleep(5 * time.Millisecond) cc.Close() wg.Wait() } func (s) TestPickFirstOrderAllServerUp(t *testing.T) { // Start 3 servers on 3 ports. numServers := 3 servers, r, cleanup := startServers(t, numServers, math.MaxUint32) defer cleanup() cc, err := Dial("passthrough:///foo.bar.com", WithBalancer(pickFirstBalancerV1(r)), WithBlock(), WithInsecure(), WithCodec(testCodec{})) if err != nil { t.Fatalf("Failed to create ClientConn: %v", err) } defer cc.Close() // Add servers[1] and [2] to the service discovery. u := &naming.Update{ Op: naming.Add, Addr: "localhost:" + servers[1].port, } r.w.inject([]*naming.Update{u}) u = &naming.Update{ Op: naming.Add, Addr: "localhost:" + servers[2].port, } r.w.inject([]*naming.Update{u}) // Loop until all 3 servers are up checkServerUp(t, servers[0]) checkServerUp(t, servers[1]) checkServerUp(t, servers[2]) // Check the incoming RPCs served in server[0] req := "port" var reply string for i := 0; i < 20; i++ { if err := cc.Invoke(context.Background(), "/foo/bar", &req, &reply); err == nil || errorDesc(err) != servers[0].port { t.Fatalf("Index %d: Invoke(_, _, _, _, _) = %v, want %s", 0, err, servers[0].port) } time.Sleep(10 * time.Millisecond) } // Delete server[0] in the balancer, the incoming RPCs served in server[1] // For test addrconn, close server[0] instead u = &naming.Update{ Op: naming.Delete, Addr: "localhost:" + servers[0].port, } r.w.inject([]*naming.Update{u}) // Loop until it changes to server[1] for { if err := cc.Invoke(context.Background(), "/foo/bar", &req, &reply); err != nil && errorDesc(err) == servers[1].port { break } time.Sleep(10 * time.Millisecond) } for i := 0; i < 20; i++ { if err := cc.Invoke(context.Background(), "/foo/bar", &req, &reply); err == nil || errorDesc(err) != servers[1].port { t.Fatalf("Index %d: Invoke(_, _, _, _, _) = %v, want %s", 1, err, servers[1].port) } time.Sleep(10 * time.Millisecond) } // Add server[0] back to the balancer, the incoming RPCs served in server[1] // Add is append operation, the order of Notify now is {server[1].port server[2].port server[0].port} u = &naming.Update{ Op: naming.Add, Addr: "localhost:" + servers[0].port, } r.w.inject([]*naming.Update{u}) for i := 0; i < 20; i++ { if err := cc.Invoke(context.Background(), "/foo/bar", &req, &reply); err == nil || errorDesc(err) != servers[1].port { t.Fatalf("Index %d: Invoke(_, _, _, _, _) = %v, want %s", 1, err, servers[1].port) } time.Sleep(10 * time.Millisecond) } // Delete server[1] in the balancer, the incoming RPCs served in server[2] u = &naming.Update{ Op: naming.Delete, Addr: "localhost:" + servers[1].port, } r.w.inject([]*naming.Update{u}) for { if err := cc.Invoke(context.Background(), "/foo/bar", &req, &reply); err != nil && errorDesc(err) == servers[2].port { break } time.Sleep(1 * time.Second) } for i := 0; i < 20; i++ { if err := cc.Invoke(context.Background(), "/foo/bar", &req, &reply); err == nil || errorDesc(err) != servers[2].port { t.Fatalf("Index %d: Invoke(_, _, _, _, _) = %v, want %s", 2, err, servers[2].port) } time.Sleep(10 * time.Millisecond) } // Delete server[2] in the balancer, the incoming RPCs served in server[0] u = &naming.Update{ Op: naming.Delete, Addr: "localhost:" + servers[2].port, } r.w.inject([]*naming.Update{u}) for { if err := cc.Invoke(context.Background(), "/foo/bar", &req, &reply); err != nil && errorDesc(err) == servers[0].port { break } time.Sleep(1 * time.Second) } for i := 0; i < 20; i++ { if err := cc.Invoke(context.Background(), "/foo/bar", &req, &reply); err == nil || errorDesc(err) != servers[0].port { t.Fatalf("Index %d: Invoke(_, _, _, _, _) = %v, want %s", 0, err, servers[0].port) } time.Sleep(10 * time.Millisecond) } } func (s) TestPickFirstOrderOneServerDown(t *testing.T) { // Start 3 servers on 3 ports. numServers := 3 servers, r, cleanup := startServers(t, numServers, math.MaxUint32) defer cleanup() cc, err := Dial("passthrough:///foo.bar.com", WithBalancer(pickFirstBalancerV1(r)), WithBlock(), WithInsecure(), WithCodec(testCodec{})) if err != nil { t.Fatalf("Failed to create ClientConn: %v", err) } defer cc.Close() // Add servers[1] and [2] to the service discovery. u := &naming.Update{ Op: naming.Add, Addr: "localhost:" + servers[1].port, } r.w.inject([]*naming.Update{u}) u = &naming.Update{ Op: naming.Add, Addr: "localhost:" + servers[2].port, } r.w.inject([]*naming.Update{u}) // Loop until all 3 servers are up checkServerUp(t, servers[0]) checkServerUp(t, servers[1]) checkServerUp(t, servers[2]) // Check the incoming RPCs served in server[0] req := "port" var reply string for i := 0; i < 20; i++ { if err := cc.Invoke(context.Background(), "/foo/bar", &req, &reply); err == nil || errorDesc(err) != servers[0].port { t.Fatalf("Index %d: Invoke(_, _, _, _, _) = %v, want %s", 0, err, servers[0].port) } time.Sleep(10 * time.Millisecond) } // server[0] down, incoming RPCs served in server[1], but the order of Notify still remains // {server[0] server[1] server[2]} servers[0].stop() // Loop until it changes to server[1] for { if err := cc.Invoke(context.Background(), "/foo/bar", &req, &reply); err != nil && errorDesc(err) == servers[1].port { break } time.Sleep(10 * time.Millisecond) } for i := 0; i < 20; i++ { if err := cc.Invoke(context.Background(), "/foo/bar", &req, &reply); err == nil || errorDesc(err) != servers[1].port { t.Fatalf("Index %d: Invoke(_, _, _, _, _) = %v, want %s", 1, err, servers[1].port) } time.Sleep(10 * time.Millisecond) } // up the server[0] back, the incoming RPCs served in server[1] p, _ := strconv.Atoi(servers[0].port) servers[0] = newTestServer() go servers[0].start(t, p, math.MaxUint32) defer servers[0].stop() servers[0].wait(t, 2*time.Second) checkServerUp(t, servers[0]) for i := 0; i < 20; i++ { if err := cc.Invoke(context.Background(), "/foo/bar", &req, &reply); err == nil || errorDesc(err) != servers[1].port { t.Fatalf("Index %d: Invoke(_, _, _, _, _) = %v, want %s", 1, err, servers[1].port) } time.Sleep(10 * time.Millisecond) } // Delete server[1] in the balancer, the incoming RPCs served in server[0] u = &naming.Update{ Op: naming.Delete, Addr: "localhost:" + servers[1].port, } r.w.inject([]*naming.Update{u}) for { if err := cc.Invoke(context.Background(), "/foo/bar", &req, &reply); err != nil && errorDesc(err) == servers[0].port { break } time.Sleep(1 * time.Second) } for i := 0; i < 20; i++ { if err := cc.Invoke(context.Background(), "/foo/bar", &req, &reply); err == nil || errorDesc(err) != servers[0].port { t.Fatalf("Index %d: Invoke(_, _, _, _, _) = %v, want %s", 0, err, servers[0].port) } time.Sleep(10 * time.Millisecond) } } func (s) TestPickFirstOneAddressRemoval(t *testing.T) { // Start 2 servers. numServers := 2 servers, r, cleanup := startServers(t, numServers, math.MaxUint32) defer cleanup() cc, err := Dial("passthrough:///localhost:"+servers[0].port, WithBalancer(pickFirstBalancerV1(r)), WithBlock(), WithInsecure(), WithCodec(testCodec{})) if err != nil { t.Fatalf("Failed to create ClientConn: %v", err) } defer cc.Close() // Add servers[1] to the service discovery. var updates []*naming.Update updates = append(updates, &naming.Update{ Op: naming.Add, Addr: "localhost:" + servers[1].port, }) r.w.inject(updates) // Create a new cc to Loop until servers[1] is up checkServerUp(t, servers[0]) checkServerUp(t, servers[1]) var wg sync.WaitGroup numRPC := 100 sleepDuration := 10 * time.Millisecond wg.Add(1) go func() { time.Sleep(sleepDuration) // After sleepDuration, delete server[0]. var updates []*naming.Update updates = append(updates, &naming.Update{ Op: naming.Delete, Addr: "localhost:" + servers[0].port, }) r.w.inject(updates) wg.Done() }() // All non-failfast RPCs should not fail because there's at least one connection available. for i := 0; i < numRPC; i++ { wg.Add(1) go func() { var reply string time.Sleep(sleepDuration) // After sleepDuration, invoke RPC. // server[0] is removed around the same time to make it racy between balancer and gRPC internals. if err := cc.Invoke(context.Background(), "/foo/bar", &expectedRequest, &reply, WaitForReady(true)); err != nil { t.Errorf("grpc.Invoke(_, _, _, _, _) = %v, want nil", err) } wg.Done() }() } wg.Wait() } grpc-go-1.29.1/balancer_v1_wrapper.go000066400000000000000000000217551365033716300174270ustar00rootroot00000000000000/* * * Copyright 2017 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package grpc import ( "sync" "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/resolver" ) type balancerWrapperBuilder struct { b Balancer // The v1 balancer. } func (bwb *balancerWrapperBuilder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { bwb.b.Start(opts.Target.Endpoint, BalancerConfig{ DialCreds: opts.DialCreds, Dialer: opts.Dialer, }) _, pickfirst := bwb.b.(*pickFirst) bw := &balancerWrapper{ balancer: bwb.b, pickfirst: pickfirst, cc: cc, targetAddr: opts.Target.Endpoint, startCh: make(chan struct{}), conns: make(map[resolver.Address]balancer.SubConn), connSt: make(map[balancer.SubConn]*scState), csEvltr: &balancer.ConnectivityStateEvaluator{}, state: connectivity.Idle, } cc.UpdateState(balancer.State{ConnectivityState: connectivity.Idle, Picker: bw}) go bw.lbWatcher() return bw } func (bwb *balancerWrapperBuilder) Name() string { return "wrapper" } type scState struct { addr Address // The v1 address type. s connectivity.State down func(error) } type balancerWrapper struct { balancer Balancer // The v1 balancer. pickfirst bool cc balancer.ClientConn targetAddr string // Target without the scheme. mu sync.Mutex conns map[resolver.Address]balancer.SubConn connSt map[balancer.SubConn]*scState // This channel is closed when handling the first resolver result. // lbWatcher blocks until this is closed, to avoid race between // - NewSubConn is created, cc wants to notify balancer of state changes; // - Build hasn't return, cc doesn't have access to balancer. startCh chan struct{} // To aggregate the connectivity state. csEvltr *balancer.ConnectivityStateEvaluator state connectivity.State } // lbWatcher watches the Notify channel of the balancer and manages // connections accordingly. func (bw *balancerWrapper) lbWatcher() { <-bw.startCh notifyCh := bw.balancer.Notify() if notifyCh == nil { // There's no resolver in the balancer. Connect directly. a := resolver.Address{ Addr: bw.targetAddr, Type: resolver.Backend, } sc, err := bw.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{}) if err != nil { grpclog.Warningf("Error creating connection to %v. Err: %v", a, err) } else { bw.mu.Lock() bw.conns[a] = sc bw.connSt[sc] = &scState{ addr: Address{Addr: bw.targetAddr}, s: connectivity.Idle, } bw.mu.Unlock() sc.Connect() } return } for addrs := range notifyCh { grpclog.Infof("balancerWrapper: got update addr from Notify: %v", addrs) if bw.pickfirst { var ( oldA resolver.Address oldSC balancer.SubConn ) bw.mu.Lock() for oldA, oldSC = range bw.conns { break } bw.mu.Unlock() if len(addrs) <= 0 { if oldSC != nil { // Teardown old sc. bw.mu.Lock() delete(bw.conns, oldA) delete(bw.connSt, oldSC) bw.mu.Unlock() bw.cc.RemoveSubConn(oldSC) } continue } var newAddrs []resolver.Address for _, a := range addrs { newAddr := resolver.Address{ Addr: a.Addr, Type: resolver.Backend, // All addresses from balancer are all backends. ServerName: "", Metadata: a.Metadata, } newAddrs = append(newAddrs, newAddr) } if oldSC == nil { // Create new sc. sc, err := bw.cc.NewSubConn(newAddrs, balancer.NewSubConnOptions{}) if err != nil { grpclog.Warningf("Error creating connection to %v. Err: %v", newAddrs, err) } else { bw.mu.Lock() // For pickfirst, there should be only one SubConn, so the // address doesn't matter. All states updating (up and down) // and picking should all happen on that only SubConn. bw.conns[resolver.Address{}] = sc bw.connSt[sc] = &scState{ addr: addrs[0], // Use the first address. s: connectivity.Idle, } bw.mu.Unlock() sc.Connect() } } else { bw.mu.Lock() bw.connSt[oldSC].addr = addrs[0] bw.mu.Unlock() oldSC.UpdateAddresses(newAddrs) } } else { var ( add []resolver.Address // Addresses need to setup connections. del []balancer.SubConn // Connections need to tear down. ) resAddrs := make(map[resolver.Address]Address) for _, a := range addrs { resAddrs[resolver.Address{ Addr: a.Addr, Type: resolver.Backend, // All addresses from balancer are all backends. ServerName: "", Metadata: a.Metadata, }] = a } bw.mu.Lock() for a := range resAddrs { if _, ok := bw.conns[a]; !ok { add = append(add, a) } } for a, c := range bw.conns { if _, ok := resAddrs[a]; !ok { del = append(del, c) delete(bw.conns, a) // Keep the state of this sc in bw.connSt until its state becomes Shutdown. } } bw.mu.Unlock() for _, a := range add { sc, err := bw.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{}) if err != nil { grpclog.Warningf("Error creating connection to %v. Err: %v", a, err) } else { bw.mu.Lock() bw.conns[a] = sc bw.connSt[sc] = &scState{ addr: resAddrs[a], s: connectivity.Idle, } bw.mu.Unlock() sc.Connect() } } for _, c := range del { bw.cc.RemoveSubConn(c) } } } } func (bw *balancerWrapper) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) { bw.mu.Lock() defer bw.mu.Unlock() scSt, ok := bw.connSt[sc] if !ok { return } if s == connectivity.Idle { sc.Connect() } oldS := scSt.s scSt.s = s if oldS != connectivity.Ready && s == connectivity.Ready { scSt.down = bw.balancer.Up(scSt.addr) } else if oldS == connectivity.Ready && s != connectivity.Ready { if scSt.down != nil { scSt.down(errConnClosing) } } sa := bw.csEvltr.RecordTransition(oldS, s) if bw.state != sa { bw.state = sa } bw.cc.UpdateState(balancer.State{ConnectivityState: bw.state, Picker: bw}) if s == connectivity.Shutdown { // Remove state for this sc. delete(bw.connSt, sc) } } func (bw *balancerWrapper) HandleResolvedAddrs([]resolver.Address, error) { bw.mu.Lock() defer bw.mu.Unlock() select { case <-bw.startCh: default: close(bw.startCh) } // There should be a resolver inside the balancer. // All updates here, if any, are ignored. } func (bw *balancerWrapper) Close() { bw.mu.Lock() defer bw.mu.Unlock() select { case <-bw.startCh: default: close(bw.startCh) } bw.balancer.Close() } // The picker is the balancerWrapper itself. // It either blocks or returns error, consistent with v1 balancer Get(). func (bw *balancerWrapper) Pick(info balancer.PickInfo) (result balancer.PickResult, err error) { failfast := true // Default failfast is true. if ss, ok := rpcInfoFromContext(info.Ctx); ok { failfast = ss.failfast } a, p, err := bw.balancer.Get(info.Ctx, BalancerGetOptions{BlockingWait: !failfast}) if err != nil { return balancer.PickResult{}, toRPCErr(err) } if p != nil { result.Done = func(balancer.DoneInfo) { p() } defer func() { if err != nil { p() } }() } bw.mu.Lock() defer bw.mu.Unlock() if bw.pickfirst { // Get the first sc in conns. for _, result.SubConn = range bw.conns { return result, nil } return balancer.PickResult{}, balancer.ErrNoSubConnAvailable } var ok1 bool result.SubConn, ok1 = bw.conns[resolver.Address{ Addr: a.Addr, Type: resolver.Backend, ServerName: "", Metadata: a.Metadata, }] s, ok2 := bw.connSt[result.SubConn] if !ok1 || !ok2 { // This can only happen due to a race where Get() returned an address // that was subsequently removed by Notify. In this case we should // retry always. return balancer.PickResult{}, balancer.ErrNoSubConnAvailable } switch s.s { case connectivity.Ready, connectivity.Idle: return result, nil case connectivity.Shutdown, connectivity.TransientFailure: // If the returned sc has been shut down or is in transient failure, // return error, and this RPC will fail or wait for another picker (if // non-failfast). return balancer.PickResult{}, balancer.ErrTransientFailure default: // For other states (connecting or unknown), the v1 balancer would // traditionally wait until ready and then issue the RPC. Returning // ErrNoSubConnAvailable will be a slight improvement in that it will // allow the balancer to choose another address in case others are // connected. return balancer.PickResult{}, balancer.ErrNoSubConnAvailable } } grpc-go-1.29.1/benchmark/000077500000000000000000000000001365033716300151035ustar00rootroot00000000000000grpc-go-1.29.1/benchmark/benchmain/000077500000000000000000000000001365033716300170275ustar00rootroot00000000000000grpc-go-1.29.1/benchmark/benchmain/main.go000066400000000000000000000712271365033716300203130ustar00rootroot00000000000000/* * * Copyright 2017 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ /* Package main provides benchmark with setting flags. An example to run some benchmarks with profiling enabled: go run benchmark/benchmain/main.go -benchtime=10s -workloads=all \ -compression=gzip -maxConcurrentCalls=1 -trace=off \ -reqSizeBytes=1,1048576 -respSizeBytes=1,1048576 -networkMode=Local \ -cpuProfile=cpuProf -memProfile=memProf -memProfileRate=10000 -resultFile=result As a suggestion, when creating a branch, you can run this benchmark and save the result file "-resultFile=basePerf", and later when you at the middle of the work or finish the work, you can get the benchmark result and compare it with the base anytime. Assume there are two result files names as "basePerf" and "curPerf" created by adding -resultFile=basePerf and -resultFile=curPerf. To format the curPerf, run: go run benchmark/benchresult/main.go curPerf To observe how the performance changes based on a base result, run: go run benchmark/benchresult/main.go basePerf curPerf */ package main import ( "context" "encoding/gob" "flag" "fmt" "io" "io/ioutil" "log" "net" "os" "reflect" "runtime" "runtime/pprof" "strings" "sync" "sync/atomic" "time" "google.golang.org/grpc" bm "google.golang.org/grpc/benchmark" "google.golang.org/grpc/benchmark/flags" testpb "google.golang.org/grpc/benchmark/grpc_testing" "google.golang.org/grpc/benchmark/latency" "google.golang.org/grpc/benchmark/stats" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/test/bufconn" ) var ( workloads = flags.StringWithAllowedValues("workloads", workloadsAll, fmt.Sprintf("Workloads to execute - One of: %v", strings.Join(allWorkloads, ", ")), allWorkloads) traceMode = flags.StringWithAllowedValues("trace", toggleModeOff, fmt.Sprintf("Trace mode - One of: %v", strings.Join(allToggleModes, ", ")), allToggleModes) preloaderMode = flags.StringWithAllowedValues("preloader", toggleModeOff, fmt.Sprintf("Preloader mode - One of: %v", strings.Join(allToggleModes, ", ")), allToggleModes) channelzOn = flags.StringWithAllowedValues("channelz", toggleModeOff, fmt.Sprintf("Channelz mode - One of: %v", strings.Join(allToggleModes, ", ")), allToggleModes) compressorMode = flags.StringWithAllowedValues("compression", compModeOff, fmt.Sprintf("Compression mode - One of: %v", strings.Join(allCompModes, ", ")), allCompModes) networkMode = flags.StringWithAllowedValues("networkMode", networkModeNone, "Network mode includes LAN, WAN, Local and Longhaul", allNetworkModes) readLatency = flags.DurationSlice("latency", defaultReadLatency, "Simulated one-way network latency - may be a comma-separated list") readKbps = flags.IntSlice("kbps", defaultReadKbps, "Simulated network throughput (in kbps) - may be a comma-separated list") readMTU = flags.IntSlice("mtu", defaultReadMTU, "Simulated network MTU (Maximum Transmission Unit) - may be a comma-separated list") maxConcurrentCalls = flags.IntSlice("maxConcurrentCalls", defaultMaxConcurrentCalls, "Number of concurrent RPCs during benchmarks") readReqSizeBytes = flags.IntSlice("reqSizeBytes", nil, "Request size in bytes - may be a comma-separated list") readRespSizeBytes = flags.IntSlice("respSizeBytes", nil, "Response size in bytes - may be a comma-separated list") reqPayloadCurveFiles = flags.StringSlice("reqPayloadCurveFiles", nil, "comma-separated list of CSV files describing the shape a random distribution of request payload sizes") respPayloadCurveFiles = flags.StringSlice("respPayloadCurveFiles", nil, "comma-separated list of CSV files describing the shape a random distribution of response payload sizes") benchTime = flag.Duration("benchtime", time.Second, "Configures the amount of time to run each benchmark") memProfile = flag.String("memProfile", "", "Enables memory profiling output to the filename provided.") memProfileRate = flag.Int("memProfileRate", 512*1024, "Configures the memory profiling rate. \n"+ "memProfile should be set before setting profile rate. To include every allocated block in the profile, "+ "set MemProfileRate to 1. To turn off profiling entirely, set MemProfileRate to 0. 512 * 1024 by default.") cpuProfile = flag.String("cpuProfile", "", "Enables CPU profiling output to the filename provided") benchmarkResultFile = flag.String("resultFile", "", "Save the benchmark result into a binary file") useBufconn = flag.Bool("bufconn", false, "Use in-memory connection instead of system network I/O") enableKeepalive = flag.Bool("enable_keepalive", false, "Enable client keepalive. \n"+ "Keepalive.Time is set to 10s, Keepalive.Timeout is set to 1s, Keepalive.PermitWithoutStream is set to true.") ) const ( workloadsUnary = "unary" workloadsStreaming = "streaming" workloadsUnconstrained = "unconstrained" workloadsAll = "all" // Compression modes. compModeOff = "off" compModeGzip = "gzip" compModeNop = "nop" compModeAll = "all" // Toggle modes. toggleModeOff = "off" toggleModeOn = "on" toggleModeBoth = "both" // Network modes. networkModeNone = "none" networkModeLocal = "Local" networkModeLAN = "LAN" networkModeWAN = "WAN" networkLongHaul = "Longhaul" numStatsBuckets = 10 warmupCallCount = 10 warmuptime = time.Second ) var ( allWorkloads = []string{workloadsUnary, workloadsStreaming, workloadsUnconstrained, workloadsAll} allCompModes = []string{compModeOff, compModeGzip, compModeNop, compModeAll} allToggleModes = []string{toggleModeOff, toggleModeOn, toggleModeBoth} allNetworkModes = []string{networkModeNone, networkModeLocal, networkModeLAN, networkModeWAN, networkLongHaul} defaultReadLatency = []time.Duration{0, 40 * time.Millisecond} // if non-positive, no delay. defaultReadKbps = []int{0, 10240} // if non-positive, infinite defaultReadMTU = []int{0} // if non-positive, infinite defaultMaxConcurrentCalls = []int{1, 8, 64, 512} defaultReqSizeBytes = []int{1, 1024, 1024 * 1024} defaultRespSizeBytes = []int{1, 1024, 1024 * 1024} networks = map[string]latency.Network{ networkModeLocal: latency.Local, networkModeLAN: latency.LAN, networkModeWAN: latency.WAN, networkLongHaul: latency.Longhaul, } keepaliveTime = 10 * time.Second keepaliveTimeout = 1 * time.Second // This is 0.8*keepaliveTime to prevent connection issues because of server // keepalive enforcement. keepaliveMinTime = 8 * time.Second ) // runModes indicates the workloads to run. This is initialized with a call to // `runModesFromWorkloads`, passing the workloads flag set by the user. type runModes struct { unary, streaming, unconstrained bool } // runModesFromWorkloads determines the runModes based on the value of // workloads flag set by the user. func runModesFromWorkloads(workload string) runModes { r := runModes{} switch workload { case workloadsUnary: r.unary = true case workloadsStreaming: r.streaming = true case workloadsUnconstrained: r.unconstrained = true case workloadsAll: r.unary = true r.streaming = true r.unconstrained = true default: log.Fatalf("Unknown workloads setting: %v (want one of: %v)", workloads, strings.Join(allWorkloads, ", ")) } return r } type startFunc func(mode string, bf stats.Features) type stopFunc func(count uint64) type ucStopFunc func(req uint64, resp uint64) type rpcCallFunc func(pos int) type rpcSendFunc func(pos int) type rpcRecvFunc func(pos int) type rpcCleanupFunc func() func unaryBenchmark(start startFunc, stop stopFunc, bf stats.Features, s *stats.Stats) { caller, cleanup := makeFuncUnary(bf) defer cleanup() runBenchmark(caller, start, stop, bf, s, workloadsUnary) } func streamBenchmark(start startFunc, stop stopFunc, bf stats.Features, s *stats.Stats) { caller, cleanup := makeFuncStream(bf) defer cleanup() runBenchmark(caller, start, stop, bf, s, workloadsStreaming) } func unconstrainedStreamBenchmark(start startFunc, stop ucStopFunc, bf stats.Features, s *stats.Stats) { var sender rpcSendFunc var recver rpcRecvFunc var cleanup rpcCleanupFunc if bf.EnablePreloader { sender, recver, cleanup = makeFuncUnconstrainedStreamPreloaded(bf) } else { sender, recver, cleanup = makeFuncUnconstrainedStream(bf) } defer cleanup() var req, resp uint64 go func() { // Resets the counters once warmed up <-time.NewTimer(warmuptime).C atomic.StoreUint64(&req, 0) atomic.StoreUint64(&resp, 0) start(workloadsUnconstrained, bf) }() bmEnd := time.Now().Add(bf.BenchTime + warmuptime) var wg sync.WaitGroup wg.Add(2 * bf.MaxConcurrentCalls) for i := 0; i < bf.MaxConcurrentCalls; i++ { go func(pos int) { defer wg.Done() for { t := time.Now() if t.After(bmEnd) { return } sender(pos) atomic.AddUint64(&req, 1) } }(i) go func(pos int) { defer wg.Done() for { t := time.Now() if t.After(bmEnd) { return } recver(pos) atomic.AddUint64(&resp, 1) } }(i) } wg.Wait() stop(req, resp) } // makeClient returns a gRPC client for the grpc.testing.BenchmarkService // service. The client is configured using the different options in the passed // 'bf'. Also returns a cleanup function to close the client and release // resources. func makeClient(bf stats.Features) (testpb.BenchmarkServiceClient, func()) { nw := &latency.Network{Kbps: bf.Kbps, Latency: bf.Latency, MTU: bf.MTU} opts := []grpc.DialOption{} sopts := []grpc.ServerOption{} if bf.ModeCompressor == compModeNop { sopts = append(sopts, grpc.RPCCompressor(nopCompressor{}), grpc.RPCDecompressor(nopDecompressor{}), ) opts = append(opts, grpc.WithCompressor(nopCompressor{}), grpc.WithDecompressor(nopDecompressor{}), ) } if bf.ModeCompressor == compModeGzip { sopts = append(sopts, grpc.RPCCompressor(grpc.NewGZIPCompressor()), grpc.RPCDecompressor(grpc.NewGZIPDecompressor()), ) opts = append(opts, grpc.WithCompressor(grpc.NewGZIPCompressor()), grpc.WithDecompressor(grpc.NewGZIPDecompressor()), ) } if bf.EnableKeepalive { sopts = append(sopts, grpc.KeepaliveParams(keepalive.ServerParameters{ Time: keepaliveTime, Timeout: keepaliveTimeout, }), grpc.KeepaliveEnforcementPolicy(keepalive.EnforcementPolicy{ MinTime: keepaliveMinTime, PermitWithoutStream: true, }), ) opts = append(opts, grpc.WithKeepaliveParams(keepalive.ClientParameters{ Time: keepaliveTime, Timeout: keepaliveTimeout, PermitWithoutStream: true, }), ) } sopts = append(sopts, grpc.MaxConcurrentStreams(uint32(bf.MaxConcurrentCalls+1))) opts = append(opts, grpc.WithInsecure()) var lis net.Listener if bf.UseBufConn { bcLis := bufconn.Listen(256 * 1024) lis = bcLis opts = append(opts, grpc.WithContextDialer(func(ctx context.Context, address string) (net.Conn, error) { return nw.ContextDialer(func(context.Context, string, string) (net.Conn, error) { return bcLis.Dial() })(ctx, "", "") })) } else { var err error lis, err = net.Listen("tcp", "localhost:0") if err != nil { grpclog.Fatalf("Failed to listen: %v", err) } opts = append(opts, grpc.WithContextDialer(func(ctx context.Context, address string) (net.Conn, error) { return nw.ContextDialer((&net.Dialer{}).DialContext)(ctx, "tcp", lis.Addr().String()) })) } lis = nw.Listener(lis) stopper := bm.StartServer(bm.ServerInfo{Type: "protobuf", Listener: lis}, sopts...) conn := bm.NewClientConn("" /* target not used */, opts...) return testpb.NewBenchmarkServiceClient(conn), func() { conn.Close() stopper() } } func makeFuncUnary(bf stats.Features) (rpcCallFunc, rpcCleanupFunc) { tc, cleanup := makeClient(bf) return func(int) { reqSizeBytes := bf.ReqSizeBytes respSizeBytes := bf.RespSizeBytes if bf.ReqPayloadCurve != nil { reqSizeBytes = bf.ReqPayloadCurve.ChooseRandom() } if bf.RespPayloadCurve != nil { respSizeBytes = bf.RespPayloadCurve.ChooseRandom() } unaryCaller(tc, reqSizeBytes, respSizeBytes) }, cleanup } func makeFuncStream(bf stats.Features) (rpcCallFunc, rpcCleanupFunc) { tc, cleanup := makeClient(bf) streams := make([]testpb.BenchmarkService_StreamingCallClient, bf.MaxConcurrentCalls) for i := 0; i < bf.MaxConcurrentCalls; i++ { stream, err := tc.StreamingCall(context.Background()) if err != nil { grpclog.Fatalf("%v.StreamingCall(_) = _, %v", tc, err) } streams[i] = stream } return func(pos int) { reqSizeBytes := bf.ReqSizeBytes respSizeBytes := bf.RespSizeBytes if bf.ReqPayloadCurve != nil { reqSizeBytes = bf.ReqPayloadCurve.ChooseRandom() } if bf.RespPayloadCurve != nil { respSizeBytes = bf.RespPayloadCurve.ChooseRandom() } streamCaller(streams[pos], reqSizeBytes, respSizeBytes) }, cleanup } func makeFuncUnconstrainedStreamPreloaded(bf stats.Features) (rpcSendFunc, rpcRecvFunc, rpcCleanupFunc) { streams, req, cleanup := setupUnconstrainedStream(bf) preparedMsg := make([]*grpc.PreparedMsg, len(streams)) for i, stream := range streams { preparedMsg[i] = &grpc.PreparedMsg{} err := preparedMsg[i].Encode(stream, req) if err != nil { grpclog.Fatalf("%v.Encode(%v, %v) = %v", preparedMsg[i], req, stream, err) } } return func(pos int) { streams[pos].SendMsg(preparedMsg[pos]) }, func(pos int) { streams[pos].Recv() }, cleanup } func makeFuncUnconstrainedStream(bf stats.Features) (rpcSendFunc, rpcRecvFunc, rpcCleanupFunc) { streams, req, cleanup := setupUnconstrainedStream(bf) return func(pos int) { streams[pos].Send(req) }, func(pos int) { streams[pos].Recv() }, cleanup } func setupUnconstrainedStream(bf stats.Features) ([]testpb.BenchmarkService_StreamingCallClient, *testpb.SimpleRequest, rpcCleanupFunc) { tc, cleanup := makeClient(bf) streams := make([]testpb.BenchmarkService_StreamingCallClient, bf.MaxConcurrentCalls) for i := 0; i < bf.MaxConcurrentCalls; i++ { stream, err := tc.UnconstrainedStreamingCall(context.Background()) if err != nil { grpclog.Fatalf("%v.UnconstrainedStreamingCall(_) = _, %v", tc, err) } streams[i] = stream } pl := bm.NewPayload(testpb.PayloadType_COMPRESSABLE, bf.ReqSizeBytes) req := &testpb.SimpleRequest{ ResponseType: pl.Type, ResponseSize: int32(bf.RespSizeBytes), Payload: pl, } return streams, req, cleanup } // Makes a UnaryCall gRPC request using the given BenchmarkServiceClient and // request and response sizes. func unaryCaller(client testpb.BenchmarkServiceClient, reqSize, respSize int) { if err := bm.DoUnaryCall(client, reqSize, respSize); err != nil { grpclog.Fatalf("DoUnaryCall failed: %v", err) } } func streamCaller(stream testpb.BenchmarkService_StreamingCallClient, reqSize, respSize int) { if err := bm.DoStreamingRoundTrip(stream, reqSize, respSize); err != nil { grpclog.Fatalf("DoStreamingRoundTrip failed: %v", err) } } func runBenchmark(caller rpcCallFunc, start startFunc, stop stopFunc, bf stats.Features, s *stats.Stats, mode string) { // Warm up connection. for i := 0; i < warmupCallCount; i++ { caller(0) } // Run benchmark. start(mode, bf) var wg sync.WaitGroup wg.Add(bf.MaxConcurrentCalls) bmEnd := time.Now().Add(bf.BenchTime) var count uint64 for i := 0; i < bf.MaxConcurrentCalls; i++ { go func(pos int) { defer wg.Done() for { t := time.Now() if t.After(bmEnd) { return } start := time.Now() caller(pos) elapse := time.Since(start) atomic.AddUint64(&count, 1) s.AddDuration(elapse) } }(i) } wg.Wait() stop(count) } // benchOpts represents all configurable options available while running this // benchmark. This is built from the values passed as flags. type benchOpts struct { rModes runModes benchTime time.Duration memProfileRate int memProfile string cpuProfile string networkMode string benchmarkResultFile string useBufconn bool enableKeepalive bool features *featureOpts } // featureOpts represents options which can have multiple values. The user // usually provides a comma-separated list of options for each of these // features through command line flags. We generate all possible combinations // for the provided values and run the benchmarks for each combination. type featureOpts struct { enableTrace []bool readLatencies []time.Duration readKbps []int readMTU []int maxConcurrentCalls []int reqSizeBytes []int respSizeBytes []int reqPayloadCurves []*stats.PayloadCurve respPayloadCurves []*stats.PayloadCurve compModes []string enableChannelz []bool enablePreloader []bool } // makeFeaturesNum returns a slice of ints of size 'maxFeatureIndex' where each // element of the slice (indexed by 'featuresIndex' enum) contains the number // of features to be exercised by the benchmark code. // For example: Index 0 of the returned slice contains the number of values for // enableTrace feature, while index 1 contains the number of value of // readLatencies feature and so on. func makeFeaturesNum(b *benchOpts) []int { featuresNum := make([]int, stats.MaxFeatureIndex) for i := 0; i < len(featuresNum); i++ { switch stats.FeatureIndex(i) { case stats.EnableTraceIndex: featuresNum[i] = len(b.features.enableTrace) case stats.ReadLatenciesIndex: featuresNum[i] = len(b.features.readLatencies) case stats.ReadKbpsIndex: featuresNum[i] = len(b.features.readKbps) case stats.ReadMTUIndex: featuresNum[i] = len(b.features.readMTU) case stats.MaxConcurrentCallsIndex: featuresNum[i] = len(b.features.maxConcurrentCalls) case stats.ReqSizeBytesIndex: featuresNum[i] = len(b.features.reqSizeBytes) case stats.RespSizeBytesIndex: featuresNum[i] = len(b.features.respSizeBytes) case stats.ReqPayloadCurveIndex: featuresNum[i] = len(b.features.reqPayloadCurves) case stats.RespPayloadCurveIndex: featuresNum[i] = len(b.features.respPayloadCurves) case stats.CompModesIndex: featuresNum[i] = len(b.features.compModes) case stats.EnableChannelzIndex: featuresNum[i] = len(b.features.enableChannelz) case stats.EnablePreloaderIndex: featuresNum[i] = len(b.features.enablePreloader) default: log.Fatalf("Unknown feature index %v in generateFeatures. maxFeatureIndex is %v", i, stats.MaxFeatureIndex) } } return featuresNum } // sharedFeatures returns a bool slice which acts as a bitmask. Each item in // the slice represents a feature, indexed by 'featureIndex' enum. The bit is // set to 1 if the corresponding feature does not have multiple value, so is // shared amongst all benchmarks. func sharedFeatures(featuresNum []int) []bool { result := make([]bool, len(featuresNum)) for i, num := range featuresNum { if num <= 1 { result[i] = true } } return result } // generateFeatures generates all combinations of the provided feature options. // While all the feature options are stored in the benchOpts struct, the input // parameter 'featuresNum' is a slice indexed by 'featureIndex' enum containing // the number of values for each feature. // For example, let's say the user sets -workloads=all and // -maxConcurrentCalls=1,100, this would end up with the following // combinations: // [workloads: unary, maxConcurrentCalls=1] // [workloads: unary, maxConcurrentCalls=1] // [workloads: streaming, maxConcurrentCalls=100] // [workloads: streaming, maxConcurrentCalls=100] // [workloads: unconstrained, maxConcurrentCalls=1] // [workloads: unconstrained, maxConcurrentCalls=100] func (b *benchOpts) generateFeatures(featuresNum []int) []stats.Features { // curPos and initialPos are two slices where each value acts as an index // into the appropriate feature slice maintained in benchOpts.features. This // loop generates all possible combinations of features by changing one value // at a time, and once curPos becomes equal to initialPos, we have explored // all options. var result []stats.Features var curPos []int initialPos := make([]int, stats.MaxFeatureIndex) for !reflect.DeepEqual(initialPos, curPos) { if curPos == nil { curPos = make([]int, stats.MaxFeatureIndex) } f := stats.Features{ // These features stay the same for each iteration. NetworkMode: b.networkMode, UseBufConn: b.useBufconn, EnableKeepalive: b.enableKeepalive, BenchTime: b.benchTime, // These features can potentially change for each iteration. EnableTrace: b.features.enableTrace[curPos[stats.EnableTraceIndex]], Latency: b.features.readLatencies[curPos[stats.ReadLatenciesIndex]], Kbps: b.features.readKbps[curPos[stats.ReadKbpsIndex]], MTU: b.features.readMTU[curPos[stats.ReadMTUIndex]], MaxConcurrentCalls: b.features.maxConcurrentCalls[curPos[stats.MaxConcurrentCallsIndex]], ModeCompressor: b.features.compModes[curPos[stats.CompModesIndex]], EnableChannelz: b.features.enableChannelz[curPos[stats.EnableChannelzIndex]], EnablePreloader: b.features.enablePreloader[curPos[stats.EnablePreloaderIndex]], } if len(b.features.reqPayloadCurves) == 0 { f.ReqSizeBytes = b.features.reqSizeBytes[curPos[stats.ReqSizeBytesIndex]] } else { f.ReqPayloadCurve = b.features.reqPayloadCurves[curPos[stats.ReqPayloadCurveIndex]] } if len(b.features.respPayloadCurves) == 0 { f.RespSizeBytes = b.features.respSizeBytes[curPos[stats.RespSizeBytesIndex]] } else { f.RespPayloadCurve = b.features.respPayloadCurves[curPos[stats.RespPayloadCurveIndex]] } result = append(result, f) addOne(curPos, featuresNum) } return result } // addOne mutates the input slice 'features' by changing one feature, thus // arriving at the next combination of feature values. 'featuresMaxPosition' // provides the numbers of allowed values for each feature, indexed by // 'featureIndex' enum. func addOne(features []int, featuresMaxPosition []int) { for i := len(features) - 1; i >= 0; i-- { if featuresMaxPosition[i] == 0 { continue } features[i] = (features[i] + 1) if features[i]/featuresMaxPosition[i] == 0 { break } features[i] = features[i] % featuresMaxPosition[i] } } // processFlags reads the command line flags and builds benchOpts. Specifying // invalid values for certain flags will cause flag.Parse() to fail, and the // program to terminate. // This *SHOULD* be the only place where the flags are accessed. All other // parts of the benchmark code should rely on the returned benchOpts. func processFlags() *benchOpts { flag.Parse() if flag.NArg() != 0 { log.Fatal("Error: unparsed arguments: ", flag.Args()) } opts := &benchOpts{ rModes: runModesFromWorkloads(*workloads), benchTime: *benchTime, memProfileRate: *memProfileRate, memProfile: *memProfile, cpuProfile: *cpuProfile, networkMode: *networkMode, benchmarkResultFile: *benchmarkResultFile, useBufconn: *useBufconn, enableKeepalive: *enableKeepalive, features: &featureOpts{ enableTrace: setToggleMode(*traceMode), readLatencies: append([]time.Duration(nil), *readLatency...), readKbps: append([]int(nil), *readKbps...), readMTU: append([]int(nil), *readMTU...), maxConcurrentCalls: append([]int(nil), *maxConcurrentCalls...), reqSizeBytes: append([]int(nil), *readReqSizeBytes...), respSizeBytes: append([]int(nil), *readRespSizeBytes...), compModes: setCompressorMode(*compressorMode), enableChannelz: setToggleMode(*channelzOn), enablePreloader: setToggleMode(*preloaderMode), }, } if len(*reqPayloadCurveFiles) == 0 { if len(opts.features.reqSizeBytes) == 0 { opts.features.reqSizeBytes = defaultReqSizeBytes } } else { if len(opts.features.reqSizeBytes) != 0 { log.Fatalf("you may not specify -reqPayloadCurveFiles and -reqSizeBytes at the same time") } for _, file := range *reqPayloadCurveFiles { pc, err := stats.NewPayloadCurve(file) if err != nil { log.Fatalf("cannot load payload curve file %s: %v", file, err) } opts.features.reqPayloadCurves = append(opts.features.reqPayloadCurves, pc) } opts.features.reqSizeBytes = nil } if len(*respPayloadCurveFiles) == 0 { if len(opts.features.respSizeBytes) == 0 { opts.features.respSizeBytes = defaultRespSizeBytes } } else { if len(opts.features.respSizeBytes) != 0 { log.Fatalf("you may not specify -respPayloadCurveFiles and -respSizeBytes at the same time") } for _, file := range *respPayloadCurveFiles { pc, err := stats.NewPayloadCurve(file) if err != nil { log.Fatalf("cannot load payload curve file %s: %v", file, err) } opts.features.respPayloadCurves = append(opts.features.respPayloadCurves, pc) } opts.features.respSizeBytes = nil } // Re-write latency, kpbs and mtu if network mode is set. if network, ok := networks[opts.networkMode]; ok { opts.features.readLatencies = []time.Duration{network.Latency} opts.features.readKbps = []int{network.Kbps} opts.features.readMTU = []int{network.MTU} } return opts } func setToggleMode(val string) []bool { switch val { case toggleModeOn: return []bool{true} case toggleModeOff: return []bool{false} case toggleModeBoth: return []bool{false, true} default: // This should never happen because a wrong value passed to this flag would // be caught during flag.Parse(). return []bool{} } } func setCompressorMode(val string) []string { switch val { case compModeNop, compModeGzip, compModeOff: return []string{val} case compModeAll: return []string{compModeNop, compModeGzip, compModeOff} default: // This should never happen because a wrong value passed to this flag would // be caught during flag.Parse(). return []string{} } } func main() { opts := processFlags() before(opts) s := stats.NewStats(numStatsBuckets) featuresNum := makeFeaturesNum(opts) sf := sharedFeatures(featuresNum) var ( start = func(mode string, bf stats.Features) { s.StartRun(mode, bf, sf) } stop = func(count uint64) { s.EndRun(count) } ucStop = func(req uint64, resp uint64) { s.EndUnconstrainedRun(req, resp) } ) for _, bf := range opts.generateFeatures(featuresNum) { grpc.EnableTracing = bf.EnableTrace if bf.EnableChannelz { channelz.TurnOn() } if opts.rModes.unary { unaryBenchmark(start, stop, bf, s) } if opts.rModes.streaming { streamBenchmark(start, stop, bf, s) } if opts.rModes.unconstrained { unconstrainedStreamBenchmark(start, ucStop, bf, s) } } after(opts, s.GetResults()) } func before(opts *benchOpts) { if opts.memProfile != "" { runtime.MemProfileRate = opts.memProfileRate } if opts.cpuProfile != "" { f, err := os.Create(opts.cpuProfile) if err != nil { fmt.Fprintf(os.Stderr, "testing: %s\n", err) return } if err := pprof.StartCPUProfile(f); err != nil { fmt.Fprintf(os.Stderr, "testing: can't start cpu profile: %s\n", err) f.Close() return } } } func after(opts *benchOpts, data []stats.BenchResults) { if opts.cpuProfile != "" { pprof.StopCPUProfile() // flushes profile to disk } if opts.memProfile != "" { f, err := os.Create(opts.memProfile) if err != nil { fmt.Fprintf(os.Stderr, "testing: %s\n", err) os.Exit(2) } runtime.GC() // materialize all statistics if err = pprof.WriteHeapProfile(f); err != nil { fmt.Fprintf(os.Stderr, "testing: can't write heap profile %s: %s\n", opts.memProfile, err) os.Exit(2) } f.Close() } if opts.benchmarkResultFile != "" { f, err := os.Create(opts.benchmarkResultFile) if err != nil { log.Fatalf("testing: can't write benchmark result %s: %s\n", opts.benchmarkResultFile, err) } dataEncoder := gob.NewEncoder(f) dataEncoder.Encode(data) f.Close() } } // nopCompressor is a compressor that just copies data. type nopCompressor struct{} func (nopCompressor) Do(w io.Writer, p []byte) error { n, err := w.Write(p) if err != nil { return err } if n != len(p) { return fmt.Errorf("nopCompressor.Write: wrote %v bytes; want %v", n, len(p)) } return nil } func (nopCompressor) Type() string { return compModeNop } // nopDecompressor is a decompressor that just copies data. type nopDecompressor struct{} func (nopDecompressor) Do(r io.Reader) ([]byte, error) { return ioutil.ReadAll(r) } func (nopDecompressor) Type() string { return compModeNop } grpc-go-1.29.1/benchmark/benchmark.go000066400000000000000000000211571365033716300173720ustar00rootroot00000000000000/* * * Copyright 2014 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ //go:generate protoc -I grpc_testing --go_out=plugins=grpc:grpc_testing grpc_testing/control.proto grpc_testing/messages.proto grpc_testing/payloads.proto grpc_testing/services.proto grpc_testing/stats.proto /* Package benchmark implements the building blocks to setup end-to-end gRPC benchmarks. */ package benchmark import ( "context" "fmt" "io" "log" "net" "google.golang.org/grpc" testpb "google.golang.org/grpc/benchmark/grpc_testing" "google.golang.org/grpc/codes" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/status" ) // Allows reuse of the same testpb.Payload object. func setPayload(p *testpb.Payload, t testpb.PayloadType, size int) { if size < 0 { grpclog.Fatalf("Requested a response with invalid length %d", size) } body := make([]byte, size) switch t { case testpb.PayloadType_COMPRESSABLE: case testpb.PayloadType_UNCOMPRESSABLE: grpclog.Fatalf("PayloadType UNCOMPRESSABLE is not supported") default: grpclog.Fatalf("Unsupported payload type: %d", t) } p.Type = t p.Body = body } // NewPayload creates a payload with the given type and size. func NewPayload(t testpb.PayloadType, size int) *testpb.Payload { p := new(testpb.Payload) setPayload(p, t, size) return p } type testServer struct { } func (s *testServer) UnaryCall(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { return &testpb.SimpleResponse{ Payload: NewPayload(in.ResponseType, int(in.ResponseSize)), }, nil } func (s *testServer) StreamingCall(stream testpb.BenchmarkService_StreamingCallServer) error { response := &testpb.SimpleResponse{ Payload: new(testpb.Payload), } in := new(testpb.SimpleRequest) for { // use ServerStream directly to reuse the same testpb.SimpleRequest object err := stream.(grpc.ServerStream).RecvMsg(in) if err == io.EOF { // read done. return nil } if err != nil { return err } setPayload(response.Payload, in.ResponseType, int(in.ResponseSize)) if err := stream.Send(response); err != nil { return err } } } func (s *testServer) UnconstrainedStreamingCall(stream testpb.BenchmarkService_UnconstrainedStreamingCallServer) error { in := new(testpb.SimpleRequest) // Receive a message to learn response type and size. err := stream.RecvMsg(in) if err == io.EOF { // read done. return nil } if err != nil { return err } response := &testpb.SimpleResponse{ Payload: new(testpb.Payload), } setPayload(response.Payload, in.ResponseType, int(in.ResponseSize)) go func() { for { // Using RecvMsg rather than Recv to prevent reallocation of SimpleRequest. err := stream.RecvMsg(in) switch status.Code(err) { case codes.Canceled: case codes.OK: default: log.Fatalf("server recv error: %v", err) } } }() go func() { for { err := stream.Send(response) switch status.Code(err) { case codes.Unavailable: case codes.OK: default: log.Fatalf("server send error: %v", err) } } }() <-stream.Context().Done() return stream.Context().Err() } // byteBufServer is a gRPC server that sends and receives byte buffer. // The purpose is to benchmark the gRPC performance without protobuf serialization/deserialization overhead. type byteBufServer struct { respSize int32 } // UnaryCall is an empty function and is not used for benchmark. // If bytebuf UnaryCall benchmark is needed later, the function body needs to be updated. func (s *byteBufServer) UnaryCall(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { return &testpb.SimpleResponse{}, nil } func (s *byteBufServer) StreamingCall(stream testpb.BenchmarkService_StreamingCallServer) error { for { var in []byte err := stream.(grpc.ServerStream).RecvMsg(&in) if err == io.EOF { return nil } if err != nil { return err } out := make([]byte, s.respSize) if err := stream.(grpc.ServerStream).SendMsg(&out); err != nil { return err } } } func (s *byteBufServer) UnconstrainedStreamingCall(stream testpb.BenchmarkService_UnconstrainedStreamingCallServer) error { for { var in []byte err := stream.(grpc.ServerStream).RecvMsg(&in) if err == io.EOF { return nil } if err != nil { return err } out := make([]byte, s.respSize) if err := stream.(grpc.ServerStream).SendMsg(&out); err != nil { return err } } } // ServerInfo contains the information to create a gRPC benchmark server. type ServerInfo struct { // Type is the type of the server. // It should be "protobuf" or "bytebuf". Type string // Metadata is an optional configuration. // For "protobuf", it's ignored. // For "bytebuf", it should be an int representing response size. Metadata interface{} // Listener is the network listener for the server to use Listener net.Listener } // StartServer starts a gRPC server serving a benchmark service according to info. // It returns a function to stop the server. func StartServer(info ServerInfo, opts ...grpc.ServerOption) func() { opts = append(opts, grpc.WriteBufferSize(128*1024)) opts = append(opts, grpc.ReadBufferSize(128*1024)) s := grpc.NewServer(opts...) switch info.Type { case "protobuf": testpb.RegisterBenchmarkServiceServer(s, &testServer{}) case "bytebuf": respSize, ok := info.Metadata.(int32) if !ok { grpclog.Fatalf("failed to StartServer, invalid metadata: %v, for Type: %v", info.Metadata, info.Type) } testpb.RegisterBenchmarkServiceServer(s, &byteBufServer{respSize: respSize}) default: grpclog.Fatalf("failed to StartServer, unknown Type: %v", info.Type) } go s.Serve(info.Listener) return func() { s.Stop() } } // DoUnaryCall performs an unary RPC with given stub and request and response sizes. func DoUnaryCall(tc testpb.BenchmarkServiceClient, reqSize, respSize int) error { pl := NewPayload(testpb.PayloadType_COMPRESSABLE, reqSize) req := &testpb.SimpleRequest{ ResponseType: pl.Type, ResponseSize: int32(respSize), Payload: pl, } if _, err := tc.UnaryCall(context.Background(), req); err != nil { return fmt.Errorf("/BenchmarkService/UnaryCall(_, _) = _, %v, want _, ", err) } return nil } // DoStreamingRoundTrip performs a round trip for a single streaming rpc. func DoStreamingRoundTrip(stream testpb.BenchmarkService_StreamingCallClient, reqSize, respSize int) error { pl := NewPayload(testpb.PayloadType_COMPRESSABLE, reqSize) req := &testpb.SimpleRequest{ ResponseType: pl.Type, ResponseSize: int32(respSize), Payload: pl, } if err := stream.Send(req); err != nil { return fmt.Errorf("/BenchmarkService/StreamingCall.Send(_) = %v, want ", err) } if _, err := stream.Recv(); err != nil { // EOF is a valid error here. if err == io.EOF { return nil } return fmt.Errorf("/BenchmarkService/StreamingCall.Recv(_) = %v, want ", err) } return nil } // DoByteBufStreamingRoundTrip performs a round trip for a single streaming rpc, using a custom codec for byte buffer. func DoByteBufStreamingRoundTrip(stream testpb.BenchmarkService_StreamingCallClient, reqSize, respSize int) error { out := make([]byte, reqSize) if err := stream.(grpc.ClientStream).SendMsg(&out); err != nil { return fmt.Errorf("/BenchmarkService/StreamingCall.(ClientStream).SendMsg(_) = %v, want ", err) } var in []byte if err := stream.(grpc.ClientStream).RecvMsg(&in); err != nil { // EOF is a valid error here. if err == io.EOF { return nil } return fmt.Errorf("/BenchmarkService/StreamingCall.(ClientStream).RecvMsg(_) = %v, want ", err) } return nil } // NewClientConn creates a gRPC client connection to addr. func NewClientConn(addr string, opts ...grpc.DialOption) *grpc.ClientConn { return NewClientConnWithContext(context.Background(), addr, opts...) } // NewClientConnWithContext creates a gRPC client connection to addr using ctx. func NewClientConnWithContext(ctx context.Context, addr string, opts ...grpc.DialOption) *grpc.ClientConn { opts = append(opts, grpc.WithWriteBufferSize(128*1024)) opts = append(opts, grpc.WithReadBufferSize(128*1024)) conn, err := grpc.DialContext(ctx, addr, opts...) if err != nil { grpclog.Fatalf("NewClientConn(%q) failed to create a ClientConn %v", addr, err) } return conn } grpc-go-1.29.1/benchmark/benchresult/000077500000000000000000000000001365033716300174215ustar00rootroot00000000000000grpc-go-1.29.1/benchmark/benchresult/main.go000066400000000000000000000115221365033716300206750ustar00rootroot00000000000000/* * * Copyright 2017 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ /* To format the benchmark result: go run benchmark/benchresult/main.go resultfile To see the performance change based on a old result: go run benchmark/benchresult/main.go resultfile_old resultfile It will print the comparison result of intersection benchmarks between two files. */ package main import ( "encoding/gob" "fmt" "log" "os" "strings" "time" "google.golang.org/grpc/benchmark/stats" ) func createMap(fileName string) map[string]stats.BenchResults { f, err := os.Open(fileName) if err != nil { log.Fatalf("Read file %s error: %s\n", fileName, err) } defer f.Close() var data []stats.BenchResults decoder := gob.NewDecoder(f) if err = decoder.Decode(&data); err != nil { log.Fatalf("Decode file %s error: %s\n", fileName, err) } m := make(map[string]stats.BenchResults) for _, d := range data { m[d.RunMode+"-"+d.Features.String()] = d } return m } func intChange(title string, val1, val2 uint64) string { return fmt.Sprintf("%20s %12d %12d %8.2f%%\n", title, val1, val2, float64(int64(val2)-int64(val1))*100/float64(val1)) } func floatChange(title string, val1, val2 float64) string { return fmt.Sprintf("%20s %12.2f %12.2f %8.2f%%\n", title, val1, val2, float64(int64(val2)-int64(val1))*100/float64(val1)) } func timeChange(title string, val1, val2 time.Duration) string { return fmt.Sprintf("%20s %12s %12s %8.2f%%\n", title, val1.String(), val2.String(), float64(val2-val1)*100/float64(val1)) } func strDiff(title, val1, val2 string) string { return fmt.Sprintf("%20s %12s %12s\n", title, val1, val2) } func compareTwoMap(m1, m2 map[string]stats.BenchResults) { for k2, v2 := range m2 { if v1, ok := m1[k2]; ok { changes := k2 + "\n" changes += fmt.Sprintf("%20s %12s %12s %8s\n", "Title", "Before", "After", "Percentage") changes += intChange("TotalOps", v1.Data.TotalOps, v2.Data.TotalOps) changes += intChange("SendOps", v1.Data.SendOps, v2.Data.SendOps) changes += intChange("RecvOps", v1.Data.RecvOps, v2.Data.RecvOps) changes += floatChange("Bytes/op", v1.Data.AllocedBytes, v2.Data.AllocedBytes) changes += floatChange("Allocs/op", v1.Data.Allocs, v2.Data.Allocs) changes += floatChange("ReqT/op", v1.Data.ReqT, v2.Data.ReqT) changes += floatChange("RespT/op", v1.Data.RespT, v2.Data.RespT) changes += timeChange("50th-Lat", v1.Data.Fiftieth, v2.Data.Fiftieth) changes += timeChange("90th-Lat", v1.Data.Ninetieth, v2.Data.Ninetieth) changes += timeChange("99th-Lat", v1.Data.NinetyNinth, v2.Data.NinetyNinth) changes += timeChange("Avg-Lat", v1.Data.Average, v2.Data.Average) changes += strDiff("GoVersion", v1.GoVersion, v2.GoVersion) changes += strDiff("GrpcVersion", v1.GrpcVersion, v2.GrpcVersion) fmt.Printf("%s\n", changes) } } } func compareBenchmark(file1, file2 string) { compareTwoMap(createMap(file1), createMap(file2)) } func printHeader() { fmt.Printf("%-80s%12s%12s%12s%18s%18s%18s%18s%12s%12s%12s%12s\n", "Name", "TotalOps", "SendOps", "RecvOps", "Bytes/op (B)", "Allocs/op (#)", "RequestT", "ResponseT", "L-50", "L-90", "L-99", "L-Avg") } func printline(benchName string, d stats.RunData) { fmt.Printf("%-80s%12d%12d%12d%18.2f%18.2f%18.2f%18.2f%12v%12v%12v%12v\n", benchName, d.TotalOps, d.SendOps, d.RecvOps, d.AllocedBytes, d.Allocs, d.ReqT, d.RespT, d.Fiftieth, d.Ninetieth, d.NinetyNinth, d.Average) } func formatBenchmark(fileName string) { f, err := os.Open(fileName) if err != nil { log.Fatalf("Read file %s error: %s\n", fileName, err) } defer f.Close() var results []stats.BenchResults decoder := gob.NewDecoder(f) if err = decoder.Decode(&results); err != nil { log.Fatalf("Decode file %s error: %s\n", fileName, err) } if len(results) == 0 { log.Fatalf("No benchmark results in file %s\n", fileName) } fmt.Println("\nShared features:\n" + strings.Repeat("-", 20)) fmt.Print(results[0].Features.SharedFeatures(results[0].SharedFeatures)) fmt.Println(strings.Repeat("-", 35)) wantFeatures := results[0].SharedFeatures for i := 0; i < len(results[0].SharedFeatures); i++ { wantFeatures[i] = !wantFeatures[i] } printHeader() for _, r := range results { printline(r.RunMode+r.Features.PrintableName(wantFeatures), r.Data) } } func main() { if len(os.Args) == 2 { formatBenchmark(os.Args[1]) } else { compareBenchmark(os.Args[1], os.Args[2]) } } grpc-go-1.29.1/benchmark/client/000077500000000000000000000000001365033716300163615ustar00rootroot00000000000000grpc-go-1.29.1/benchmark/client/main.go000066400000000000000000000140321365033716300176340ustar00rootroot00000000000000/* * * Copyright 2017 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ /* Package main provides a client used for benchmarking. Before running the client, the user would need to launch the grpc server. To start the server before running the client, you can run look for the command under the following file: benchmark/server/main.go After starting the server, the client can be run. An example of how to run this command is: go run benchmark/client/main.go -test_name=grpc_test If the server is running on a different port than 50051, then use the port flag for the client to hit the server on the correct port. An example for how to run this command on a different port can be found here: go run benchmark/client/main.go -test_name=grpc_test -port=8080 */ package main import ( "context" "flag" "fmt" "os" "runtime" "runtime/pprof" "sync" "time" "google.golang.org/grpc" "google.golang.org/grpc/benchmark" testpb "google.golang.org/grpc/benchmark/grpc_testing" "google.golang.org/grpc/benchmark/stats" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal/syscall" ) var ( port = flag.String("port", "50051", "Localhost port to connect to.") numRPC = flag.Int("r", 1, "The number of concurrent RPCs on each connection.") numConn = flag.Int("c", 1, "The number of parallel connections.") warmupDur = flag.Int("w", 10, "Warm-up duration in seconds") duration = flag.Int("d", 60, "Benchmark duration in seconds") rqSize = flag.Int("req", 1, "Request message size in bytes.") rspSize = flag.Int("resp", 1, "Response message size in bytes.") rpcType = flag.String("rpc_type", "unary", `Configure different client rpc type. Valid options are: unary; streaming.`) testName = flag.String("test_name", "", "Name of the test used for creating profiles.") wg sync.WaitGroup hopts = stats.HistogramOptions{ NumBuckets: 2495, GrowthFactor: .01, } mu sync.Mutex hists []*stats.Histogram ) func main() { flag.Parse() if *testName == "" { grpclog.Fatalf("test_name not set") } req := &testpb.SimpleRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE, ResponseSize: int32(*rspSize), Payload: &testpb.Payload{ Type: testpb.PayloadType_COMPRESSABLE, Body: make([]byte, *rqSize), }, } connectCtx, connectCancel := context.WithDeadline(context.Background(), time.Now().Add(5*time.Second)) defer connectCancel() ccs := buildConnections(connectCtx) warmDeadline := time.Now().Add(time.Duration(*warmupDur) * time.Second) endDeadline := warmDeadline.Add(time.Duration(*duration) * time.Second) cf, err := os.Create("/tmp/" + *testName + ".cpu") if err != nil { grpclog.Fatalf("Error creating file: %v", err) } defer cf.Close() pprof.StartCPUProfile(cf) cpuBeg := syscall.GetCPUTime() for _, cc := range ccs { runWithConn(cc, req, warmDeadline, endDeadline) } wg.Wait() cpu := time.Duration(syscall.GetCPUTime() - cpuBeg) pprof.StopCPUProfile() mf, err := os.Create("/tmp/" + *testName + ".mem") if err != nil { grpclog.Fatalf("Error creating file: %v", err) } defer mf.Close() runtime.GC() // materialize all statistics if err := pprof.WriteHeapProfile(mf); err != nil { grpclog.Fatalf("Error writing memory profile: %v", err) } hist := stats.NewHistogram(hopts) for _, h := range hists { hist.Merge(h) } parseHist(hist) fmt.Println("Client CPU utilization:", cpu) fmt.Println("Client CPU profile:", cf.Name()) fmt.Println("Client Mem Profile:", mf.Name()) } func buildConnections(ctx context.Context) []*grpc.ClientConn { ccs := make([]*grpc.ClientConn, *numConn) for i := range ccs { ccs[i] = benchmark.NewClientConnWithContext(ctx, "localhost:"+*port, grpc.WithInsecure(), grpc.WithBlock()) } return ccs } func runWithConn(cc *grpc.ClientConn, req *testpb.SimpleRequest, warmDeadline, endDeadline time.Time) { for i := 0; i < *numRPC; i++ { wg.Add(1) go func() { defer wg.Done() caller := makeCaller(cc, req) hist := stats.NewHistogram(hopts) for { start := time.Now() if start.After(endDeadline) { mu.Lock() hists = append(hists, hist) mu.Unlock() return } caller() elapsed := time.Since(start) if start.After(warmDeadline) { hist.Add(elapsed.Nanoseconds()) } } }() } } func makeCaller(cc *grpc.ClientConn, req *testpb.SimpleRequest) func() { client := testpb.NewBenchmarkServiceClient(cc) if *rpcType == "unary" { return func() { if _, err := client.UnaryCall(context.Background(), req); err != nil { grpclog.Fatalf("RPC failed: %v", err) } } } stream, err := client.StreamingCall(context.Background()) if err != nil { grpclog.Fatalf("RPC failed: %v", err) } return func() { if err := stream.Send(req); err != nil { grpclog.Fatalf("Streaming RPC failed to send: %v", err) } if _, err := stream.Recv(); err != nil { grpclog.Fatalf("Streaming RPC failed to read: %v", err) } } } func parseHist(hist *stats.Histogram) { fmt.Println("qps:", float64(hist.Count)/float64(*duration)) fmt.Printf("Latency: (50/90/99 %%ile): %v/%v/%v\n", time.Duration(median(.5, hist)), time.Duration(median(.9, hist)), time.Duration(median(.99, hist))) } func median(percentile float64, h *stats.Histogram) int64 { need := int64(float64(h.Count) * percentile) have := int64(0) for _, bucket := range h.Buckets { count := bucket.Count if have+count >= need { percent := float64(need-have) / float64(count) return int64((1.0-percent)*bucket.LowBound + percent*bucket.LowBound*(1.0+hopts.GrowthFactor)) } have += bucket.Count } panic("should have found a bound") } grpc-go-1.29.1/benchmark/flags/000077500000000000000000000000001365033716300161775ustar00rootroot00000000000000grpc-go-1.29.1/benchmark/flags/flags.go000066400000000000000000000106071365033716300176260ustar00rootroot00000000000000/* * * Copyright 2019 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ /* Package flags provide convenience types and routines to accept specific types of flag values on the command line. */ package flags import ( "bytes" "encoding/csv" "flag" "fmt" "strconv" "strings" "time" ) // stringFlagWithAllowedValues represents a string flag which can only take a // predefined set of values. type stringFlagWithAllowedValues struct { val string allowed []string } // StringWithAllowedValues returns a flag variable of type // stringFlagWithAllowedValues configured with the provided parameters. // 'allowed` is the set of values that this flag can be set to. func StringWithAllowedValues(name, defaultVal, usage string, allowed []string) *string { as := &stringFlagWithAllowedValues{defaultVal, allowed} flag.CommandLine.Var(as, name, usage) return &as.val } // String implements the flag.Value interface. func (as *stringFlagWithAllowedValues) String() string { return as.val } // Set implements the flag.Value interface. func (as *stringFlagWithAllowedValues) Set(val string) error { for _, a := range as.allowed { if a == val { as.val = val return nil } } return fmt.Errorf("want one of: %v", strings.Join(as.allowed, ", ")) } type durationSliceValue []time.Duration // DurationSlice returns a flag representing a slice of time.Duration objects. func DurationSlice(name string, defaultVal []time.Duration, usage string) *[]time.Duration { ds := make([]time.Duration, len(defaultVal)) copy(ds, defaultVal) dsv := (*durationSliceValue)(&ds) flag.CommandLine.Var(dsv, name, usage) return &ds } // Set implements the flag.Value interface. func (dsv *durationSliceValue) Set(s string) error { ds := strings.Split(s, ",") var dd []time.Duration for _, n := range ds { d, err := time.ParseDuration(n) if err != nil { return err } dd = append(dd, d) } *dsv = durationSliceValue(dd) return nil } // String implements the flag.Value interface. func (dsv *durationSliceValue) String() string { var b bytes.Buffer for i, d := range *dsv { if i > 0 { b.WriteRune(',') } b.WriteString(d.String()) } return b.String() } type intSliceValue []int // IntSlice returns a flag representing a slice of ints. func IntSlice(name string, defaultVal []int, usage string) *[]int { is := make([]int, len(defaultVal)) copy(is, defaultVal) isv := (*intSliceValue)(&is) flag.CommandLine.Var(isv, name, usage) return &is } // Set implements the flag.Value interface. func (isv *intSliceValue) Set(s string) error { is := strings.Split(s, ",") var ret []int for _, n := range is { i, err := strconv.Atoi(n) if err != nil { return err } ret = append(ret, i) } *isv = intSliceValue(ret) return nil } // String implements the flag.Value interface. func (isv *intSliceValue) String() string { var b bytes.Buffer for i, n := range *isv { if i > 0 { b.WriteRune(',') } b.WriteString(strconv.Itoa(n)) } return b.String() } type stringSliceValue []string // StringSlice returns a flag representing a slice of strings. func StringSlice(name string, defaultVal []string, usage string) *[]string { ss := make([]string, len(defaultVal)) copy(ss, defaultVal) ssv := (*stringSliceValue)(&ss) flag.CommandLine.Var(ssv, name, usage) return &ss } // escapedCommaSplit splits a comma-separated list of strings in the same way // CSV files work (escaping a comma requires double-quotes). func escapedCommaSplit(str string) ([]string, error) { r := csv.NewReader(strings.NewReader(str)) ret, err := r.Read() if err != nil { return nil, err } return ret, nil } // Set implements the flag.Value interface. func (ss *stringSliceValue) Set(str string) error { var err error *ss, err = escapedCommaSplit(str) if err != nil { return err } return nil } // String implements the flag.Value interface. func (ss *stringSliceValue) String() string { return strings.Join(*ss, ",") } grpc-go-1.29.1/benchmark/flags/flags_test.go000066400000000000000000000104751365033716300206700ustar00rootroot00000000000000/* * * Copyright 2019 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package flags import ( "flag" "reflect" "testing" "time" "google.golang.org/grpc/internal/grpctest" ) type s struct { grpctest.Tester } func Test(t *testing.T) { grpctest.RunSubTests(t, s{}) } func (s) TestStringWithAllowedValues(t *testing.T) { const defaultVal = "default" tests := []struct { args string allowed []string wantVal string wantErr bool }{ {"-workloads=all", []string{"unary", "streaming", "all"}, "all", false}, {"-workloads=disallowed", []string{"unary", "streaming", "all"}, defaultVal, true}, } for _, test := range tests { flag.CommandLine = flag.NewFlagSet("test", flag.ContinueOnError) var w = StringWithAllowedValues("workloads", defaultVal, "usage", test.allowed) err := flag.CommandLine.Parse([]string{test.args}) switch { case !test.wantErr && err != nil: t.Errorf("failed to parse command line args {%v}: %v", test.args, err) case test.wantErr && err == nil: t.Errorf("flag.Parse(%v) = nil, want non-nil error", test.args) default: if *w != test.wantVal { t.Errorf("flag value is %v, want %v", *w, test.wantVal) } } } } func (s) TestDurationSlice(t *testing.T) { defaultVal := []time.Duration{time.Second, time.Nanosecond} tests := []struct { args string wantVal []time.Duration wantErr bool }{ {"-latencies=1s", []time.Duration{time.Second}, false}, {"-latencies=1s,2s,3s", []time.Duration{time.Second, 2 * time.Second, 3 * time.Second}, false}, {"-latencies=bad", defaultVal, true}, } for _, test := range tests { flag.CommandLine = flag.NewFlagSet("test", flag.ContinueOnError) var w = DurationSlice("latencies", defaultVal, "usage") err := flag.CommandLine.Parse([]string{test.args}) switch { case !test.wantErr && err != nil: t.Errorf("failed to parse command line args {%v}: %v", test.args, err) case test.wantErr && err == nil: t.Errorf("flag.Parse(%v) = nil, want non-nil error", test.args) default: if !reflect.DeepEqual(*w, test.wantVal) { t.Errorf("flag value is %v, want %v", *w, test.wantVal) } } } } func (s) TestIntSlice(t *testing.T) { defaultVal := []int{1, 1024} tests := []struct { args string wantVal []int wantErr bool }{ {"-kbps=1", []int{1}, false}, {"-kbps=1,2,3", []int{1, 2, 3}, false}, {"-kbps=20e4", defaultVal, true}, } for _, test := range tests { flag.CommandLine = flag.NewFlagSet("test", flag.ContinueOnError) var w = IntSlice("kbps", defaultVal, "usage") err := flag.CommandLine.Parse([]string{test.args}) switch { case !test.wantErr && err != nil: t.Errorf("failed to parse command line args {%v}: %v", test.args, err) case test.wantErr && err == nil: t.Errorf("flag.Parse(%v) = nil, want non-nil error", test.args) default: if !reflect.DeepEqual(*w, test.wantVal) { t.Errorf("flag value is %v, want %v", *w, test.wantVal) } } } } func (s) TestStringSlice(t *testing.T) { defaultVal := []string{"bar", "baz"} tests := []struct { args string wantVal []string wantErr bool }{ {"-name=foobar", []string{"foobar"}, false}, {"-name=foo,bar", []string{"foo", "bar"}, false}, {`-name="foo,bar",baz`, []string{"foo,bar", "baz"}, false}, {`-name="foo,bar""",baz`, []string{`foo,bar"`, "baz"}, false}, } for _, test := range tests { flag.CommandLine = flag.NewFlagSet("test", flag.ContinueOnError) var w = StringSlice("name", defaultVal, "usage") err := flag.CommandLine.Parse([]string{test.args}) switch { case !test.wantErr && err != nil: t.Errorf("failed to parse command line args {%v}: %v", test.args, err) case test.wantErr && err == nil: t.Errorf("flag.Parse(%v) = nil, want non-nil error", test.args) default: if !reflect.DeepEqual(*w, test.wantVal) { t.Errorf("flag value is %v, want %v", *w, test.wantVal) } } } } grpc-go-1.29.1/benchmark/grpc_testing/000077500000000000000000000000001365033716300175735ustar00rootroot00000000000000grpc-go-1.29.1/benchmark/grpc_testing/control.pb.go000066400000000000000000001334671365033716300222200ustar00rootroot00000000000000// Code generated by protoc-gen-go. DO NOT EDIT. // source: control.proto package grpc_testing import ( fmt "fmt" proto "github.com/golang/protobuf/proto" math "math" ) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package type ClientType int32 const ( ClientType_SYNC_CLIENT ClientType = 0 ClientType_ASYNC_CLIENT ClientType = 1 ) var ClientType_name = map[int32]string{ 0: "SYNC_CLIENT", 1: "ASYNC_CLIENT", } var ClientType_value = map[string]int32{ "SYNC_CLIENT": 0, "ASYNC_CLIENT": 1, } func (x ClientType) String() string { return proto.EnumName(ClientType_name, int32(x)) } func (ClientType) EnumDescriptor() ([]byte, []int) { return fileDescriptor_0c5120591600887d, []int{0} } type ServerType int32 const ( ServerType_SYNC_SERVER ServerType = 0 ServerType_ASYNC_SERVER ServerType = 1 ServerType_ASYNC_GENERIC_SERVER ServerType = 2 ) var ServerType_name = map[int32]string{ 0: "SYNC_SERVER", 1: "ASYNC_SERVER", 2: "ASYNC_GENERIC_SERVER", } var ServerType_value = map[string]int32{ "SYNC_SERVER": 0, "ASYNC_SERVER": 1, "ASYNC_GENERIC_SERVER": 2, } func (x ServerType) String() string { return proto.EnumName(ServerType_name, int32(x)) } func (ServerType) EnumDescriptor() ([]byte, []int) { return fileDescriptor_0c5120591600887d, []int{1} } type RpcType int32 const ( RpcType_UNARY RpcType = 0 RpcType_STREAMING RpcType = 1 ) var RpcType_name = map[int32]string{ 0: "UNARY", 1: "STREAMING", } var RpcType_value = map[string]int32{ "UNARY": 0, "STREAMING": 1, } func (x RpcType) String() string { return proto.EnumName(RpcType_name, int32(x)) } func (RpcType) EnumDescriptor() ([]byte, []int) { return fileDescriptor_0c5120591600887d, []int{2} } // Parameters of poisson process distribution, which is a good representation // of activity coming in from independent identical stationary sources. type PoissonParams struct { // The rate of arrivals (a.k.a. lambda parameter of the exp distribution). OfferedLoad float64 `protobuf:"fixed64,1,opt,name=offered_load,json=offeredLoad,proto3" json:"offered_load,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *PoissonParams) Reset() { *m = PoissonParams{} } func (m *PoissonParams) String() string { return proto.CompactTextString(m) } func (*PoissonParams) ProtoMessage() {} func (*PoissonParams) Descriptor() ([]byte, []int) { return fileDescriptor_0c5120591600887d, []int{0} } func (m *PoissonParams) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_PoissonParams.Unmarshal(m, b) } func (m *PoissonParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_PoissonParams.Marshal(b, m, deterministic) } func (m *PoissonParams) XXX_Merge(src proto.Message) { xxx_messageInfo_PoissonParams.Merge(m, src) } func (m *PoissonParams) XXX_Size() int { return xxx_messageInfo_PoissonParams.Size(m) } func (m *PoissonParams) XXX_DiscardUnknown() { xxx_messageInfo_PoissonParams.DiscardUnknown(m) } var xxx_messageInfo_PoissonParams proto.InternalMessageInfo func (m *PoissonParams) GetOfferedLoad() float64 { if m != nil { return m.OfferedLoad } return 0 } type UniformParams struct { InterarrivalLo float64 `protobuf:"fixed64,1,opt,name=interarrival_lo,json=interarrivalLo,proto3" json:"interarrival_lo,omitempty"` InterarrivalHi float64 `protobuf:"fixed64,2,opt,name=interarrival_hi,json=interarrivalHi,proto3" json:"interarrival_hi,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *UniformParams) Reset() { *m = UniformParams{} } func (m *UniformParams) String() string { return proto.CompactTextString(m) } func (*UniformParams) ProtoMessage() {} func (*UniformParams) Descriptor() ([]byte, []int) { return fileDescriptor_0c5120591600887d, []int{1} } func (m *UniformParams) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_UniformParams.Unmarshal(m, b) } func (m *UniformParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_UniformParams.Marshal(b, m, deterministic) } func (m *UniformParams) XXX_Merge(src proto.Message) { xxx_messageInfo_UniformParams.Merge(m, src) } func (m *UniformParams) XXX_Size() int { return xxx_messageInfo_UniformParams.Size(m) } func (m *UniformParams) XXX_DiscardUnknown() { xxx_messageInfo_UniformParams.DiscardUnknown(m) } var xxx_messageInfo_UniformParams proto.InternalMessageInfo func (m *UniformParams) GetInterarrivalLo() float64 { if m != nil { return m.InterarrivalLo } return 0 } func (m *UniformParams) GetInterarrivalHi() float64 { if m != nil { return m.InterarrivalHi } return 0 } type DeterministicParams struct { OfferedLoad float64 `protobuf:"fixed64,1,opt,name=offered_load,json=offeredLoad,proto3" json:"offered_load,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *DeterministicParams) Reset() { *m = DeterministicParams{} } func (m *DeterministicParams) String() string { return proto.CompactTextString(m) } func (*DeterministicParams) ProtoMessage() {} func (*DeterministicParams) Descriptor() ([]byte, []int) { return fileDescriptor_0c5120591600887d, []int{2} } func (m *DeterministicParams) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_DeterministicParams.Unmarshal(m, b) } func (m *DeterministicParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_DeterministicParams.Marshal(b, m, deterministic) } func (m *DeterministicParams) XXX_Merge(src proto.Message) { xxx_messageInfo_DeterministicParams.Merge(m, src) } func (m *DeterministicParams) XXX_Size() int { return xxx_messageInfo_DeterministicParams.Size(m) } func (m *DeterministicParams) XXX_DiscardUnknown() { xxx_messageInfo_DeterministicParams.DiscardUnknown(m) } var xxx_messageInfo_DeterministicParams proto.InternalMessageInfo func (m *DeterministicParams) GetOfferedLoad() float64 { if m != nil { return m.OfferedLoad } return 0 } type ParetoParams struct { InterarrivalBase float64 `protobuf:"fixed64,1,opt,name=interarrival_base,json=interarrivalBase,proto3" json:"interarrival_base,omitempty"` Alpha float64 `protobuf:"fixed64,2,opt,name=alpha,proto3" json:"alpha,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *ParetoParams) Reset() { *m = ParetoParams{} } func (m *ParetoParams) String() string { return proto.CompactTextString(m) } func (*ParetoParams) ProtoMessage() {} func (*ParetoParams) Descriptor() ([]byte, []int) { return fileDescriptor_0c5120591600887d, []int{3} } func (m *ParetoParams) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ParetoParams.Unmarshal(m, b) } func (m *ParetoParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ParetoParams.Marshal(b, m, deterministic) } func (m *ParetoParams) XXX_Merge(src proto.Message) { xxx_messageInfo_ParetoParams.Merge(m, src) } func (m *ParetoParams) XXX_Size() int { return xxx_messageInfo_ParetoParams.Size(m) } func (m *ParetoParams) XXX_DiscardUnknown() { xxx_messageInfo_ParetoParams.DiscardUnknown(m) } var xxx_messageInfo_ParetoParams proto.InternalMessageInfo func (m *ParetoParams) GetInterarrivalBase() float64 { if m != nil { return m.InterarrivalBase } return 0 } func (m *ParetoParams) GetAlpha() float64 { if m != nil { return m.Alpha } return 0 } // Once an RPC finishes, immediately start a new one. // No configuration parameters needed. type ClosedLoopParams struct { XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *ClosedLoopParams) Reset() { *m = ClosedLoopParams{} } func (m *ClosedLoopParams) String() string { return proto.CompactTextString(m) } func (*ClosedLoopParams) ProtoMessage() {} func (*ClosedLoopParams) Descriptor() ([]byte, []int) { return fileDescriptor_0c5120591600887d, []int{4} } func (m *ClosedLoopParams) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ClosedLoopParams.Unmarshal(m, b) } func (m *ClosedLoopParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ClosedLoopParams.Marshal(b, m, deterministic) } func (m *ClosedLoopParams) XXX_Merge(src proto.Message) { xxx_messageInfo_ClosedLoopParams.Merge(m, src) } func (m *ClosedLoopParams) XXX_Size() int { return xxx_messageInfo_ClosedLoopParams.Size(m) } func (m *ClosedLoopParams) XXX_DiscardUnknown() { xxx_messageInfo_ClosedLoopParams.DiscardUnknown(m) } var xxx_messageInfo_ClosedLoopParams proto.InternalMessageInfo type LoadParams struct { // Types that are valid to be assigned to Load: // *LoadParams_ClosedLoop // *LoadParams_Poisson // *LoadParams_Uniform // *LoadParams_Determ // *LoadParams_Pareto Load isLoadParams_Load `protobuf_oneof:"load"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *LoadParams) Reset() { *m = LoadParams{} } func (m *LoadParams) String() string { return proto.CompactTextString(m) } func (*LoadParams) ProtoMessage() {} func (*LoadParams) Descriptor() ([]byte, []int) { return fileDescriptor_0c5120591600887d, []int{5} } func (m *LoadParams) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_LoadParams.Unmarshal(m, b) } func (m *LoadParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_LoadParams.Marshal(b, m, deterministic) } func (m *LoadParams) XXX_Merge(src proto.Message) { xxx_messageInfo_LoadParams.Merge(m, src) } func (m *LoadParams) XXX_Size() int { return xxx_messageInfo_LoadParams.Size(m) } func (m *LoadParams) XXX_DiscardUnknown() { xxx_messageInfo_LoadParams.DiscardUnknown(m) } var xxx_messageInfo_LoadParams proto.InternalMessageInfo type isLoadParams_Load interface { isLoadParams_Load() } type LoadParams_ClosedLoop struct { ClosedLoop *ClosedLoopParams `protobuf:"bytes,1,opt,name=closed_loop,json=closedLoop,proto3,oneof"` } type LoadParams_Poisson struct { Poisson *PoissonParams `protobuf:"bytes,2,opt,name=poisson,proto3,oneof"` } type LoadParams_Uniform struct { Uniform *UniformParams `protobuf:"bytes,3,opt,name=uniform,proto3,oneof"` } type LoadParams_Determ struct { Determ *DeterministicParams `protobuf:"bytes,4,opt,name=determ,proto3,oneof"` } type LoadParams_Pareto struct { Pareto *ParetoParams `protobuf:"bytes,5,opt,name=pareto,proto3,oneof"` } func (*LoadParams_ClosedLoop) isLoadParams_Load() {} func (*LoadParams_Poisson) isLoadParams_Load() {} func (*LoadParams_Uniform) isLoadParams_Load() {} func (*LoadParams_Determ) isLoadParams_Load() {} func (*LoadParams_Pareto) isLoadParams_Load() {} func (m *LoadParams) GetLoad() isLoadParams_Load { if m != nil { return m.Load } return nil } func (m *LoadParams) GetClosedLoop() *ClosedLoopParams { if x, ok := m.GetLoad().(*LoadParams_ClosedLoop); ok { return x.ClosedLoop } return nil } func (m *LoadParams) GetPoisson() *PoissonParams { if x, ok := m.GetLoad().(*LoadParams_Poisson); ok { return x.Poisson } return nil } func (m *LoadParams) GetUniform() *UniformParams { if x, ok := m.GetLoad().(*LoadParams_Uniform); ok { return x.Uniform } return nil } func (m *LoadParams) GetDeterm() *DeterministicParams { if x, ok := m.GetLoad().(*LoadParams_Determ); ok { return x.Determ } return nil } func (m *LoadParams) GetPareto() *ParetoParams { if x, ok := m.GetLoad().(*LoadParams_Pareto); ok { return x.Pareto } return nil } // XXX_OneofWrappers is for the internal use of the proto package. func (*LoadParams) XXX_OneofWrappers() []interface{} { return []interface{}{ (*LoadParams_ClosedLoop)(nil), (*LoadParams_Poisson)(nil), (*LoadParams_Uniform)(nil), (*LoadParams_Determ)(nil), (*LoadParams_Pareto)(nil), } } // presence of SecurityParams implies use of TLS type SecurityParams struct { UseTestCa bool `protobuf:"varint,1,opt,name=use_test_ca,json=useTestCa,proto3" json:"use_test_ca,omitempty"` ServerHostOverride string `protobuf:"bytes,2,opt,name=server_host_override,json=serverHostOverride,proto3" json:"server_host_override,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *SecurityParams) Reset() { *m = SecurityParams{} } func (m *SecurityParams) String() string { return proto.CompactTextString(m) } func (*SecurityParams) ProtoMessage() {} func (*SecurityParams) Descriptor() ([]byte, []int) { return fileDescriptor_0c5120591600887d, []int{6} } func (m *SecurityParams) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_SecurityParams.Unmarshal(m, b) } func (m *SecurityParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_SecurityParams.Marshal(b, m, deterministic) } func (m *SecurityParams) XXX_Merge(src proto.Message) { xxx_messageInfo_SecurityParams.Merge(m, src) } func (m *SecurityParams) XXX_Size() int { return xxx_messageInfo_SecurityParams.Size(m) } func (m *SecurityParams) XXX_DiscardUnknown() { xxx_messageInfo_SecurityParams.DiscardUnknown(m) } var xxx_messageInfo_SecurityParams proto.InternalMessageInfo func (m *SecurityParams) GetUseTestCa() bool { if m != nil { return m.UseTestCa } return false } func (m *SecurityParams) GetServerHostOverride() string { if m != nil { return m.ServerHostOverride } return "" } type ClientConfig struct { // List of targets to connect to. At least one target needs to be specified. ServerTargets []string `protobuf:"bytes,1,rep,name=server_targets,json=serverTargets,proto3" json:"server_targets,omitempty"` ClientType ClientType `protobuf:"varint,2,opt,name=client_type,json=clientType,proto3,enum=grpc.testing.ClientType" json:"client_type,omitempty"` SecurityParams *SecurityParams `protobuf:"bytes,3,opt,name=security_params,json=securityParams,proto3" json:"security_params,omitempty"` // How many concurrent RPCs to start for each channel. // For synchronous client, use a separate thread for each outstanding RPC. OutstandingRpcsPerChannel int32 `protobuf:"varint,4,opt,name=outstanding_rpcs_per_channel,json=outstandingRpcsPerChannel,proto3" json:"outstanding_rpcs_per_channel,omitempty"` // Number of independent client channels to create. // i-th channel will connect to server_target[i % server_targets.size()] ClientChannels int32 `protobuf:"varint,5,opt,name=client_channels,json=clientChannels,proto3" json:"client_channels,omitempty"` // Only for async client. Number of threads to use to start/manage RPCs. AsyncClientThreads int32 `protobuf:"varint,7,opt,name=async_client_threads,json=asyncClientThreads,proto3" json:"async_client_threads,omitempty"` RpcType RpcType `protobuf:"varint,8,opt,name=rpc_type,json=rpcType,proto3,enum=grpc.testing.RpcType" json:"rpc_type,omitempty"` // The requested load for the entire client (aggregated over all the threads). LoadParams *LoadParams `protobuf:"bytes,10,opt,name=load_params,json=loadParams,proto3" json:"load_params,omitempty"` PayloadConfig *PayloadConfig `protobuf:"bytes,11,opt,name=payload_config,json=payloadConfig,proto3" json:"payload_config,omitempty"` HistogramParams *HistogramParams `protobuf:"bytes,12,opt,name=histogram_params,json=histogramParams,proto3" json:"histogram_params,omitempty"` // Specify the cores we should run the client on, if desired CoreList []int32 `protobuf:"varint,13,rep,packed,name=core_list,json=coreList,proto3" json:"core_list,omitempty"` CoreLimit int32 `protobuf:"varint,14,opt,name=core_limit,json=coreLimit,proto3" json:"core_limit,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *ClientConfig) Reset() { *m = ClientConfig{} } func (m *ClientConfig) String() string { return proto.CompactTextString(m) } func (*ClientConfig) ProtoMessage() {} func (*ClientConfig) Descriptor() ([]byte, []int) { return fileDescriptor_0c5120591600887d, []int{7} } func (m *ClientConfig) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ClientConfig.Unmarshal(m, b) } func (m *ClientConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ClientConfig.Marshal(b, m, deterministic) } func (m *ClientConfig) XXX_Merge(src proto.Message) { xxx_messageInfo_ClientConfig.Merge(m, src) } func (m *ClientConfig) XXX_Size() int { return xxx_messageInfo_ClientConfig.Size(m) } func (m *ClientConfig) XXX_DiscardUnknown() { xxx_messageInfo_ClientConfig.DiscardUnknown(m) } var xxx_messageInfo_ClientConfig proto.InternalMessageInfo func (m *ClientConfig) GetServerTargets() []string { if m != nil { return m.ServerTargets } return nil } func (m *ClientConfig) GetClientType() ClientType { if m != nil { return m.ClientType } return ClientType_SYNC_CLIENT } func (m *ClientConfig) GetSecurityParams() *SecurityParams { if m != nil { return m.SecurityParams } return nil } func (m *ClientConfig) GetOutstandingRpcsPerChannel() int32 { if m != nil { return m.OutstandingRpcsPerChannel } return 0 } func (m *ClientConfig) GetClientChannels() int32 { if m != nil { return m.ClientChannels } return 0 } func (m *ClientConfig) GetAsyncClientThreads() int32 { if m != nil { return m.AsyncClientThreads } return 0 } func (m *ClientConfig) GetRpcType() RpcType { if m != nil { return m.RpcType } return RpcType_UNARY } func (m *ClientConfig) GetLoadParams() *LoadParams { if m != nil { return m.LoadParams } return nil } func (m *ClientConfig) GetPayloadConfig() *PayloadConfig { if m != nil { return m.PayloadConfig } return nil } func (m *ClientConfig) GetHistogramParams() *HistogramParams { if m != nil { return m.HistogramParams } return nil } func (m *ClientConfig) GetCoreList() []int32 { if m != nil { return m.CoreList } return nil } func (m *ClientConfig) GetCoreLimit() int32 { if m != nil { return m.CoreLimit } return 0 } type ClientStatus struct { Stats *ClientStats `protobuf:"bytes,1,opt,name=stats,proto3" json:"stats,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *ClientStatus) Reset() { *m = ClientStatus{} } func (m *ClientStatus) String() string { return proto.CompactTextString(m) } func (*ClientStatus) ProtoMessage() {} func (*ClientStatus) Descriptor() ([]byte, []int) { return fileDescriptor_0c5120591600887d, []int{8} } func (m *ClientStatus) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ClientStatus.Unmarshal(m, b) } func (m *ClientStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ClientStatus.Marshal(b, m, deterministic) } func (m *ClientStatus) XXX_Merge(src proto.Message) { xxx_messageInfo_ClientStatus.Merge(m, src) } func (m *ClientStatus) XXX_Size() int { return xxx_messageInfo_ClientStatus.Size(m) } func (m *ClientStatus) XXX_DiscardUnknown() { xxx_messageInfo_ClientStatus.DiscardUnknown(m) } var xxx_messageInfo_ClientStatus proto.InternalMessageInfo func (m *ClientStatus) GetStats() *ClientStats { if m != nil { return m.Stats } return nil } // Request current stats type Mark struct { // if true, the stats will be reset after taking their snapshot. Reset_ bool `protobuf:"varint,1,opt,name=reset,proto3" json:"reset,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *Mark) Reset() { *m = Mark{} } func (m *Mark) String() string { return proto.CompactTextString(m) } func (*Mark) ProtoMessage() {} func (*Mark) Descriptor() ([]byte, []int) { return fileDescriptor_0c5120591600887d, []int{9} } func (m *Mark) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Mark.Unmarshal(m, b) } func (m *Mark) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Mark.Marshal(b, m, deterministic) } func (m *Mark) XXX_Merge(src proto.Message) { xxx_messageInfo_Mark.Merge(m, src) } func (m *Mark) XXX_Size() int { return xxx_messageInfo_Mark.Size(m) } func (m *Mark) XXX_DiscardUnknown() { xxx_messageInfo_Mark.DiscardUnknown(m) } var xxx_messageInfo_Mark proto.InternalMessageInfo func (m *Mark) GetReset_() bool { if m != nil { return m.Reset_ } return false } type ClientArgs struct { // Types that are valid to be assigned to Argtype: // *ClientArgs_Setup // *ClientArgs_Mark Argtype isClientArgs_Argtype `protobuf_oneof:"argtype"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *ClientArgs) Reset() { *m = ClientArgs{} } func (m *ClientArgs) String() string { return proto.CompactTextString(m) } func (*ClientArgs) ProtoMessage() {} func (*ClientArgs) Descriptor() ([]byte, []int) { return fileDescriptor_0c5120591600887d, []int{10} } func (m *ClientArgs) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ClientArgs.Unmarshal(m, b) } func (m *ClientArgs) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ClientArgs.Marshal(b, m, deterministic) } func (m *ClientArgs) XXX_Merge(src proto.Message) { xxx_messageInfo_ClientArgs.Merge(m, src) } func (m *ClientArgs) XXX_Size() int { return xxx_messageInfo_ClientArgs.Size(m) } func (m *ClientArgs) XXX_DiscardUnknown() { xxx_messageInfo_ClientArgs.DiscardUnknown(m) } var xxx_messageInfo_ClientArgs proto.InternalMessageInfo type isClientArgs_Argtype interface { isClientArgs_Argtype() } type ClientArgs_Setup struct { Setup *ClientConfig `protobuf:"bytes,1,opt,name=setup,proto3,oneof"` } type ClientArgs_Mark struct { Mark *Mark `protobuf:"bytes,2,opt,name=mark,proto3,oneof"` } func (*ClientArgs_Setup) isClientArgs_Argtype() {} func (*ClientArgs_Mark) isClientArgs_Argtype() {} func (m *ClientArgs) GetArgtype() isClientArgs_Argtype { if m != nil { return m.Argtype } return nil } func (m *ClientArgs) GetSetup() *ClientConfig { if x, ok := m.GetArgtype().(*ClientArgs_Setup); ok { return x.Setup } return nil } func (m *ClientArgs) GetMark() *Mark { if x, ok := m.GetArgtype().(*ClientArgs_Mark); ok { return x.Mark } return nil } // XXX_OneofWrappers is for the internal use of the proto package. func (*ClientArgs) XXX_OneofWrappers() []interface{} { return []interface{}{ (*ClientArgs_Setup)(nil), (*ClientArgs_Mark)(nil), } } type ServerConfig struct { ServerType ServerType `protobuf:"varint,1,opt,name=server_type,json=serverType,proto3,enum=grpc.testing.ServerType" json:"server_type,omitempty"` SecurityParams *SecurityParams `protobuf:"bytes,2,opt,name=security_params,json=securityParams,proto3" json:"security_params,omitempty"` // Port on which to listen. Zero means pick unused port. Port int32 `protobuf:"varint,4,opt,name=port,proto3" json:"port,omitempty"` // Only for async server. Number of threads used to serve the requests. AsyncServerThreads int32 `protobuf:"varint,7,opt,name=async_server_threads,json=asyncServerThreads,proto3" json:"async_server_threads,omitempty"` // Specify the number of cores to limit server to, if desired CoreLimit int32 `protobuf:"varint,8,opt,name=core_limit,json=coreLimit,proto3" json:"core_limit,omitempty"` // payload config, used in generic server PayloadConfig *PayloadConfig `protobuf:"bytes,9,opt,name=payload_config,json=payloadConfig,proto3" json:"payload_config,omitempty"` // Specify the cores we should run the server on, if desired CoreList []int32 `protobuf:"varint,10,rep,packed,name=core_list,json=coreList,proto3" json:"core_list,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *ServerConfig) Reset() { *m = ServerConfig{} } func (m *ServerConfig) String() string { return proto.CompactTextString(m) } func (*ServerConfig) ProtoMessage() {} func (*ServerConfig) Descriptor() ([]byte, []int) { return fileDescriptor_0c5120591600887d, []int{11} } func (m *ServerConfig) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ServerConfig.Unmarshal(m, b) } func (m *ServerConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ServerConfig.Marshal(b, m, deterministic) } func (m *ServerConfig) XXX_Merge(src proto.Message) { xxx_messageInfo_ServerConfig.Merge(m, src) } func (m *ServerConfig) XXX_Size() int { return xxx_messageInfo_ServerConfig.Size(m) } func (m *ServerConfig) XXX_DiscardUnknown() { xxx_messageInfo_ServerConfig.DiscardUnknown(m) } var xxx_messageInfo_ServerConfig proto.InternalMessageInfo func (m *ServerConfig) GetServerType() ServerType { if m != nil { return m.ServerType } return ServerType_SYNC_SERVER } func (m *ServerConfig) GetSecurityParams() *SecurityParams { if m != nil { return m.SecurityParams } return nil } func (m *ServerConfig) GetPort() int32 { if m != nil { return m.Port } return 0 } func (m *ServerConfig) GetAsyncServerThreads() int32 { if m != nil { return m.AsyncServerThreads } return 0 } func (m *ServerConfig) GetCoreLimit() int32 { if m != nil { return m.CoreLimit } return 0 } func (m *ServerConfig) GetPayloadConfig() *PayloadConfig { if m != nil { return m.PayloadConfig } return nil } func (m *ServerConfig) GetCoreList() []int32 { if m != nil { return m.CoreList } return nil } type ServerArgs struct { // Types that are valid to be assigned to Argtype: // *ServerArgs_Setup // *ServerArgs_Mark Argtype isServerArgs_Argtype `protobuf_oneof:"argtype"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *ServerArgs) Reset() { *m = ServerArgs{} } func (m *ServerArgs) String() string { return proto.CompactTextString(m) } func (*ServerArgs) ProtoMessage() {} func (*ServerArgs) Descriptor() ([]byte, []int) { return fileDescriptor_0c5120591600887d, []int{12} } func (m *ServerArgs) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ServerArgs.Unmarshal(m, b) } func (m *ServerArgs) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ServerArgs.Marshal(b, m, deterministic) } func (m *ServerArgs) XXX_Merge(src proto.Message) { xxx_messageInfo_ServerArgs.Merge(m, src) } func (m *ServerArgs) XXX_Size() int { return xxx_messageInfo_ServerArgs.Size(m) } func (m *ServerArgs) XXX_DiscardUnknown() { xxx_messageInfo_ServerArgs.DiscardUnknown(m) } var xxx_messageInfo_ServerArgs proto.InternalMessageInfo type isServerArgs_Argtype interface { isServerArgs_Argtype() } type ServerArgs_Setup struct { Setup *ServerConfig `protobuf:"bytes,1,opt,name=setup,proto3,oneof"` } type ServerArgs_Mark struct { Mark *Mark `protobuf:"bytes,2,opt,name=mark,proto3,oneof"` } func (*ServerArgs_Setup) isServerArgs_Argtype() {} func (*ServerArgs_Mark) isServerArgs_Argtype() {} func (m *ServerArgs) GetArgtype() isServerArgs_Argtype { if m != nil { return m.Argtype } return nil } func (m *ServerArgs) GetSetup() *ServerConfig { if x, ok := m.GetArgtype().(*ServerArgs_Setup); ok { return x.Setup } return nil } func (m *ServerArgs) GetMark() *Mark { if x, ok := m.GetArgtype().(*ServerArgs_Mark); ok { return x.Mark } return nil } // XXX_OneofWrappers is for the internal use of the proto package. func (*ServerArgs) XXX_OneofWrappers() []interface{} { return []interface{}{ (*ServerArgs_Setup)(nil), (*ServerArgs_Mark)(nil), } } type ServerStatus struct { Stats *ServerStats `protobuf:"bytes,1,opt,name=stats,proto3" json:"stats,omitempty"` // the port bound by the server Port int32 `protobuf:"varint,2,opt,name=port,proto3" json:"port,omitempty"` // Number of cores available to the server Cores int32 `protobuf:"varint,3,opt,name=cores,proto3" json:"cores,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *ServerStatus) Reset() { *m = ServerStatus{} } func (m *ServerStatus) String() string { return proto.CompactTextString(m) } func (*ServerStatus) ProtoMessage() {} func (*ServerStatus) Descriptor() ([]byte, []int) { return fileDescriptor_0c5120591600887d, []int{13} } func (m *ServerStatus) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ServerStatus.Unmarshal(m, b) } func (m *ServerStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ServerStatus.Marshal(b, m, deterministic) } func (m *ServerStatus) XXX_Merge(src proto.Message) { xxx_messageInfo_ServerStatus.Merge(m, src) } func (m *ServerStatus) XXX_Size() int { return xxx_messageInfo_ServerStatus.Size(m) } func (m *ServerStatus) XXX_DiscardUnknown() { xxx_messageInfo_ServerStatus.DiscardUnknown(m) } var xxx_messageInfo_ServerStatus proto.InternalMessageInfo func (m *ServerStatus) GetStats() *ServerStats { if m != nil { return m.Stats } return nil } func (m *ServerStatus) GetPort() int32 { if m != nil { return m.Port } return 0 } func (m *ServerStatus) GetCores() int32 { if m != nil { return m.Cores } return 0 } type CoreRequest struct { XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *CoreRequest) Reset() { *m = CoreRequest{} } func (m *CoreRequest) String() string { return proto.CompactTextString(m) } func (*CoreRequest) ProtoMessage() {} func (*CoreRequest) Descriptor() ([]byte, []int) { return fileDescriptor_0c5120591600887d, []int{14} } func (m *CoreRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_CoreRequest.Unmarshal(m, b) } func (m *CoreRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_CoreRequest.Marshal(b, m, deterministic) } func (m *CoreRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_CoreRequest.Merge(m, src) } func (m *CoreRequest) XXX_Size() int { return xxx_messageInfo_CoreRequest.Size(m) } func (m *CoreRequest) XXX_DiscardUnknown() { xxx_messageInfo_CoreRequest.DiscardUnknown(m) } var xxx_messageInfo_CoreRequest proto.InternalMessageInfo type CoreResponse struct { // Number of cores available on the server Cores int32 `protobuf:"varint,1,opt,name=cores,proto3" json:"cores,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *CoreResponse) Reset() { *m = CoreResponse{} } func (m *CoreResponse) String() string { return proto.CompactTextString(m) } func (*CoreResponse) ProtoMessage() {} func (*CoreResponse) Descriptor() ([]byte, []int) { return fileDescriptor_0c5120591600887d, []int{15} } func (m *CoreResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_CoreResponse.Unmarshal(m, b) } func (m *CoreResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_CoreResponse.Marshal(b, m, deterministic) } func (m *CoreResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_CoreResponse.Merge(m, src) } func (m *CoreResponse) XXX_Size() int { return xxx_messageInfo_CoreResponse.Size(m) } func (m *CoreResponse) XXX_DiscardUnknown() { xxx_messageInfo_CoreResponse.DiscardUnknown(m) } var xxx_messageInfo_CoreResponse proto.InternalMessageInfo func (m *CoreResponse) GetCores() int32 { if m != nil { return m.Cores } return 0 } type Void struct { XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *Void) Reset() { *m = Void{} } func (m *Void) String() string { return proto.CompactTextString(m) } func (*Void) ProtoMessage() {} func (*Void) Descriptor() ([]byte, []int) { return fileDescriptor_0c5120591600887d, []int{16} } func (m *Void) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Void.Unmarshal(m, b) } func (m *Void) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Void.Marshal(b, m, deterministic) } func (m *Void) XXX_Merge(src proto.Message) { xxx_messageInfo_Void.Merge(m, src) } func (m *Void) XXX_Size() int { return xxx_messageInfo_Void.Size(m) } func (m *Void) XXX_DiscardUnknown() { xxx_messageInfo_Void.DiscardUnknown(m) } var xxx_messageInfo_Void proto.InternalMessageInfo // A single performance scenario: input to qps_json_driver type Scenario struct { // Human readable name for this scenario Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // Client configuration ClientConfig *ClientConfig `protobuf:"bytes,2,opt,name=client_config,json=clientConfig,proto3" json:"client_config,omitempty"` // Number of clients to start for the test NumClients int32 `protobuf:"varint,3,opt,name=num_clients,json=numClients,proto3" json:"num_clients,omitempty"` // Server configuration ServerConfig *ServerConfig `protobuf:"bytes,4,opt,name=server_config,json=serverConfig,proto3" json:"server_config,omitempty"` // Number of servers to start for the test NumServers int32 `protobuf:"varint,5,opt,name=num_servers,json=numServers,proto3" json:"num_servers,omitempty"` // Warmup period, in seconds WarmupSeconds int32 `protobuf:"varint,6,opt,name=warmup_seconds,json=warmupSeconds,proto3" json:"warmup_seconds,omitempty"` // Benchmark time, in seconds BenchmarkSeconds int32 `protobuf:"varint,7,opt,name=benchmark_seconds,json=benchmarkSeconds,proto3" json:"benchmark_seconds,omitempty"` // Number of workers to spawn locally (usually zero) SpawnLocalWorkerCount int32 `protobuf:"varint,8,opt,name=spawn_local_worker_count,json=spawnLocalWorkerCount,proto3" json:"spawn_local_worker_count,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *Scenario) Reset() { *m = Scenario{} } func (m *Scenario) String() string { return proto.CompactTextString(m) } func (*Scenario) ProtoMessage() {} func (*Scenario) Descriptor() ([]byte, []int) { return fileDescriptor_0c5120591600887d, []int{17} } func (m *Scenario) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Scenario.Unmarshal(m, b) } func (m *Scenario) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Scenario.Marshal(b, m, deterministic) } func (m *Scenario) XXX_Merge(src proto.Message) { xxx_messageInfo_Scenario.Merge(m, src) } func (m *Scenario) XXX_Size() int { return xxx_messageInfo_Scenario.Size(m) } func (m *Scenario) XXX_DiscardUnknown() { xxx_messageInfo_Scenario.DiscardUnknown(m) } var xxx_messageInfo_Scenario proto.InternalMessageInfo func (m *Scenario) GetName() string { if m != nil { return m.Name } return "" } func (m *Scenario) GetClientConfig() *ClientConfig { if m != nil { return m.ClientConfig } return nil } func (m *Scenario) GetNumClients() int32 { if m != nil { return m.NumClients } return 0 } func (m *Scenario) GetServerConfig() *ServerConfig { if m != nil { return m.ServerConfig } return nil } func (m *Scenario) GetNumServers() int32 { if m != nil { return m.NumServers } return 0 } func (m *Scenario) GetWarmupSeconds() int32 { if m != nil { return m.WarmupSeconds } return 0 } func (m *Scenario) GetBenchmarkSeconds() int32 { if m != nil { return m.BenchmarkSeconds } return 0 } func (m *Scenario) GetSpawnLocalWorkerCount() int32 { if m != nil { return m.SpawnLocalWorkerCount } return 0 } // A set of scenarios to be run with qps_json_driver type Scenarios struct { Scenarios []*Scenario `protobuf:"bytes,1,rep,name=scenarios,proto3" json:"scenarios,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *Scenarios) Reset() { *m = Scenarios{} } func (m *Scenarios) String() string { return proto.CompactTextString(m) } func (*Scenarios) ProtoMessage() {} func (*Scenarios) Descriptor() ([]byte, []int) { return fileDescriptor_0c5120591600887d, []int{18} } func (m *Scenarios) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Scenarios.Unmarshal(m, b) } func (m *Scenarios) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Scenarios.Marshal(b, m, deterministic) } func (m *Scenarios) XXX_Merge(src proto.Message) { xxx_messageInfo_Scenarios.Merge(m, src) } func (m *Scenarios) XXX_Size() int { return xxx_messageInfo_Scenarios.Size(m) } func (m *Scenarios) XXX_DiscardUnknown() { xxx_messageInfo_Scenarios.DiscardUnknown(m) } var xxx_messageInfo_Scenarios proto.InternalMessageInfo func (m *Scenarios) GetScenarios() []*Scenario { if m != nil { return m.Scenarios } return nil } func init() { proto.RegisterEnum("grpc.testing.ClientType", ClientType_name, ClientType_value) proto.RegisterEnum("grpc.testing.ServerType", ServerType_name, ServerType_value) proto.RegisterEnum("grpc.testing.RpcType", RpcType_name, RpcType_value) proto.RegisterType((*PoissonParams)(nil), "grpc.testing.PoissonParams") proto.RegisterType((*UniformParams)(nil), "grpc.testing.UniformParams") proto.RegisterType((*DeterministicParams)(nil), "grpc.testing.DeterministicParams") proto.RegisterType((*ParetoParams)(nil), "grpc.testing.ParetoParams") proto.RegisterType((*ClosedLoopParams)(nil), "grpc.testing.ClosedLoopParams") proto.RegisterType((*LoadParams)(nil), "grpc.testing.LoadParams") proto.RegisterType((*SecurityParams)(nil), "grpc.testing.SecurityParams") proto.RegisterType((*ClientConfig)(nil), "grpc.testing.ClientConfig") proto.RegisterType((*ClientStatus)(nil), "grpc.testing.ClientStatus") proto.RegisterType((*Mark)(nil), "grpc.testing.Mark") proto.RegisterType((*ClientArgs)(nil), "grpc.testing.ClientArgs") proto.RegisterType((*ServerConfig)(nil), "grpc.testing.ServerConfig") proto.RegisterType((*ServerArgs)(nil), "grpc.testing.ServerArgs") proto.RegisterType((*ServerStatus)(nil), "grpc.testing.ServerStatus") proto.RegisterType((*CoreRequest)(nil), "grpc.testing.CoreRequest") proto.RegisterType((*CoreResponse)(nil), "grpc.testing.CoreResponse") proto.RegisterType((*Void)(nil), "grpc.testing.Void") proto.RegisterType((*Scenario)(nil), "grpc.testing.Scenario") proto.RegisterType((*Scenarios)(nil), "grpc.testing.Scenarios") } func init() { proto.RegisterFile("control.proto", fileDescriptor_0c5120591600887d) } var fileDescriptor_0c5120591600887d = []byte{ // 1179 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x56, 0x6f, 0x6f, 0xdb, 0xb6, 0x13, 0xb6, 0x1d, 0xdb, 0xb1, 0x4e, 0xb6, 0xe3, 0x1f, 0x7f, 0xe9, 0xa0, 0xa6, 0x69, 0x97, 0x6a, 0x1b, 0x16, 0x64, 0x40, 0x5a, 0x78, 0x05, 0xba, 0x62, 0x2f, 0x02, 0xc7, 0x33, 0xea, 0x00, 0x69, 0x96, 0xd1, 0x69, 0x87, 0xbe, 0x12, 0x18, 0x99, 0xb1, 0x85, 0xc8, 0xa2, 0x46, 0x52, 0x09, 0xf2, 0x15, 0xf6, 0x99, 0xf6, 0x39, 0xf6, 0x35, 0xf6, 0x15, 0x06, 0xfe, 0x91, 0x23, 0xb9, 0x06, 0x9a, 0x6d, 0xef, 0xc4, 0xbb, 0xe7, 0xe1, 0x91, 0xf7, 0xdc, 0x1d, 0x05, 0x9d, 0x90, 0x25, 0x92, 0xb3, 0xf8, 0x30, 0xe5, 0x4c, 0x32, 0xd4, 0x9e, 0xf1, 0x34, 0x3c, 0x94, 0x54, 0xc8, 0x28, 0x99, 0xed, 0x74, 0x53, 0x72, 0x17, 0x33, 0x32, 0x15, 0xc6, 0xbb, 0xe3, 0x0a, 0x49, 0xa4, 0x5d, 0xf8, 0x7d, 0xe8, 0x9c, 0xb3, 0x48, 0x08, 0x96, 0x9c, 0x13, 0x4e, 0x16, 0x02, 0x3d, 0x87, 0x36, 0xbb, 0xba, 0xa2, 0x9c, 0x4e, 0x03, 0x45, 0xf2, 0xaa, 0x7b, 0xd5, 0xfd, 0x2a, 0x76, 0xad, 0xed, 0x94, 0x91, 0xa9, 0x4f, 0xa0, 0xf3, 0x3e, 0x89, 0xae, 0x18, 0x5f, 0x58, 0xce, 0xb7, 0xb0, 0x15, 0x25, 0x92, 0x72, 0xc2, 0x79, 0x74, 0x43, 0xe2, 0x20, 0x66, 0x96, 0xd6, 0x2d, 0x9a, 0x4f, 0xd9, 0x27, 0xc0, 0x79, 0xe4, 0xd5, 0x3e, 0x05, 0x8e, 0x23, 0xff, 0x07, 0xf8, 0xff, 0x4f, 0x54, 0x52, 0xbe, 0x88, 0x92, 0x48, 0xc8, 0x28, 0x7c, 0xf8, 0xe1, 0x7e, 0x81, 0xf6, 0x39, 0xe1, 0x54, 0x32, 0x4b, 0xf9, 0x0e, 0xfe, 0x57, 0x0a, 0x79, 0x49, 0x04, 0xb5, 0xbc, 0x5e, 0xd1, 0x71, 0x4c, 0x04, 0x45, 0xdb, 0xd0, 0x20, 0x71, 0x3a, 0x27, 0xf6, 0x54, 0x66, 0xe1, 0x23, 0xe8, 0x0d, 0x63, 0x26, 0x54, 0x00, 0x96, 0x9a, 0x6d, 0xfd, 0x3f, 0x6a, 0x00, 0x2a, 0x9e, 0x8d, 0x32, 0x00, 0x37, 0xd4, 0x90, 0x20, 0x66, 0x2c, 0xd5, 0xfb, 0xbb, 0xfd, 0x67, 0x87, 0x45, 0x1d, 0x0e, 0x57, 0xf7, 0x18, 0x57, 0x30, 0x84, 0x4b, 0x1b, 0x7a, 0x0d, 0x9b, 0xa9, 0x51, 0x42, 0x47, 0x77, 0xfb, 0x4f, 0xca, 0xf4, 0x92, 0x4c, 0xe3, 0x0a, 0xce, 0xd1, 0x8a, 0x98, 0x19, 0x39, 0xbc, 0x8d, 0x75, 0xc4, 0x92, 0x56, 0x8a, 0x68, 0xd1, 0xe8, 0x47, 0x68, 0x4e, 0x75, 0x92, 0xbd, 0xba, 0xe6, 0x3d, 0x2f, 0xf3, 0xd6, 0x08, 0x30, 0xae, 0x60, 0x4b, 0x41, 0xaf, 0xa0, 0x99, 0xea, 0x3c, 0x7b, 0x0d, 0x4d, 0xde, 0x59, 0x39, 0x6d, 0x41, 0x03, 0xc5, 0x32, 0xd8, 0xe3, 0x26, 0xd4, 0x95, 0x70, 0xfe, 0x25, 0x74, 0x27, 0x34, 0xcc, 0x78, 0x24, 0xef, 0x6c, 0x06, 0x9f, 0x81, 0x9b, 0x09, 0x1a, 0x28, 0x7e, 0x10, 0x12, 0x9d, 0xc1, 0x16, 0x76, 0x32, 0x41, 0x2f, 0xa8, 0x90, 0x43, 0x82, 0x5e, 0xc2, 0xb6, 0xa0, 0xfc, 0x86, 0xf2, 0x60, 0xce, 0x84, 0x0c, 0xd8, 0x0d, 0xe5, 0x3c, 0x9a, 0x52, 0x9d, 0x2b, 0x07, 0x23, 0xe3, 0x1b, 0x33, 0x21, 0x7f, 0xb6, 0x1e, 0xff, 0xf7, 0x06, 0xb4, 0x87, 0x71, 0x44, 0x13, 0x39, 0x64, 0xc9, 0x55, 0x34, 0x43, 0xdf, 0x40, 0xd7, 0x6e, 0x21, 0x09, 0x9f, 0x51, 0x29, 0xbc, 0xea, 0xde, 0xc6, 0xbe, 0x83, 0x3b, 0xc6, 0x7a, 0x61, 0x8c, 0xe8, 0x8d, 0xd2, 0x52, 0xd1, 0x02, 0x79, 0x97, 0x9a, 0x00, 0xdd, 0xbe, 0xb7, 0xaa, 0xa5, 0x02, 0x5c, 0xdc, 0xa5, 0x54, 0x69, 0x98, 0x7f, 0xa3, 0x11, 0x6c, 0x09, 0x7b, 0xad, 0x20, 0xd5, 0xf7, 0xb2, 0x92, 0xec, 0x96, 0xe9, 0xe5, 0xbb, 0xe3, 0xae, 0x28, 0xe7, 0xe2, 0x08, 0x76, 0x59, 0x26, 0x85, 0x24, 0xc9, 0x34, 0x4a, 0x66, 0x01, 0x4f, 0x43, 0x11, 0xa4, 0x94, 0x07, 0xe1, 0x9c, 0x24, 0x09, 0x8d, 0xb5, 0x5c, 0x0d, 0xfc, 0xb8, 0x80, 0xc1, 0x69, 0x28, 0xce, 0x29, 0x1f, 0x1a, 0x80, 0xea, 0x33, 0x7b, 0x05, 0x4b, 0x11, 0x5a, 0xa5, 0x06, 0xee, 0x1a, 0xb3, 0xc5, 0x09, 0x95, 0x55, 0x22, 0xee, 0x92, 0x30, 0xc8, 0x6f, 0x3c, 0xe7, 0x94, 0x4c, 0x85, 0xb7, 0xa9, 0xd1, 0x48, 0xfb, 0xec, 0x5d, 0x8d, 0x07, 0xbd, 0x84, 0x16, 0x4f, 0x43, 0x93, 0x9a, 0x96, 0x4e, 0xcd, 0xa3, 0xf2, 0xdd, 0x70, 0x1a, 0xea, 0xbc, 0x6c, 0x72, 0xf3, 0xa1, 0xf2, 0xa9, 0x34, 0xcf, 0x13, 0x02, 0x3a, 0x21, 0x2b, 0xf9, 0xbc, 0x6f, 0x25, 0x0c, 0xf1, 0x7d, 0x5b, 0x1d, 0x43, 0x3e, 0xbc, 0x82, 0x50, 0x6b, 0xe8, 0xb9, 0x6b, 0x5b, 0xc3, 0x60, 0x8c, 0xcc, 0xb8, 0x93, 0x16, 0x97, 0x68, 0x0c, 0xbd, 0x79, 0x24, 0x24, 0x9b, 0x71, 0xb2, 0xc8, 0xcf, 0xd0, 0xd6, 0xbb, 0x3c, 0x2d, 0xef, 0x32, 0xce, 0x51, 0xf6, 0x20, 0x5b, 0xf3, 0xb2, 0x01, 0x3d, 0x01, 0x27, 0x64, 0x9c, 0x06, 0x71, 0x24, 0xa4, 0xd7, 0xd9, 0xdb, 0xd8, 0x6f, 0xe0, 0x96, 0x32, 0x9c, 0x46, 0x42, 0xa2, 0xa7, 0x00, 0xd6, 0xb9, 0x88, 0xa4, 0xd7, 0xd5, 0xf9, 0x73, 0x8c, 0x77, 0x11, 0x49, 0xff, 0x28, 0xaf, 0xc5, 0x89, 0x24, 0x32, 0x13, 0xe8, 0x05, 0x34, 0xf4, 0x18, 0xb6, 0xa3, 0xe2, 0xf1, 0xba, 0xf2, 0x52, 0x50, 0x81, 0x0d, 0xce, 0xdf, 0x85, 0xfa, 0x3b, 0xc2, 0xaf, 0xd5, 0x88, 0xe2, 0x54, 0x50, 0x69, 0x3b, 0xc4, 0x2c, 0xfc, 0x0c, 0xc0, 0x70, 0x06, 0x7c, 0x26, 0x50, 0x1f, 0x1a, 0x82, 0xca, 0x2c, 0x9f, 0x43, 0x3b, 0xeb, 0x36, 0x37, 0xd9, 0x19, 0x57, 0xb0, 0x81, 0xa2, 0x7d, 0xa8, 0x2f, 0x08, 0xbf, 0xb6, 0xb3, 0x07, 0x95, 0x29, 0x2a, 0xf2, 0xb8, 0x82, 0x35, 0xe2, 0xd8, 0x81, 0x4d, 0xc2, 0x67, 0xaa, 0x00, 0xfc, 0x3f, 0x6b, 0xd0, 0x9e, 0xe8, 0xe6, 0xb1, 0xc9, 0x7e, 0x03, 0x6e, 0xde, 0x62, 0xaa, 0x40, 0xaa, 0xeb, 0x7a, 0xc7, 0x10, 0x4c, 0xef, 0x88, 0xe5, 0xf7, 0xba, 0xde, 0xa9, 0xfd, 0x8b, 0xde, 0x41, 0x50, 0x4f, 0x19, 0x97, 0xb6, 0x47, 0xf4, 0xf7, 0x7d, 0x95, 0xe7, 0x67, 0x5b, 0x53, 0xe5, 0xf6, 0x54, 0xb6, 0xca, 0xcb, 0x6a, 0xb6, 0x56, 0xd4, 0x5c, 0x53, 0x97, 0xce, 0x3f, 0xae, 0xcb, 0x52, 0x35, 0x41, 0xb9, 0x9a, 0x94, 0x9e, 0xe6, 0x40, 0x0f, 0xd0, 0xb3, 0x28, 0xc0, 0x7f, 0xd4, 0x33, 0xca, 0xe5, 0x7c, 0x50, 0x95, 0xde, 0x43, 0xf3, 0x2a, 0x5d, 0x66, 0xbf, 0x56, 0xc8, 0xfe, 0x36, 0x34, 0xd4, 0xbd, 0xcc, 0x28, 0x6c, 0x60, 0xb3, 0xf0, 0x3b, 0xe0, 0x0e, 0x19, 0xa7, 0x98, 0xfe, 0x96, 0x51, 0x21, 0xfd, 0xaf, 0xa1, 0x6d, 0x96, 0x22, 0x65, 0x89, 0x79, 0x89, 0x0d, 0xa9, 0x5a, 0x24, 0x35, 0xa1, 0xfe, 0x81, 0x45, 0x53, 0xff, 0xaf, 0x1a, 0xb4, 0x26, 0x21, 0x4d, 0x08, 0x8f, 0x98, 0x8a, 0x99, 0x90, 0x85, 0x29, 0x36, 0x07, 0xeb, 0x6f, 0x74, 0x04, 0x9d, 0x7c, 0x00, 0x1a, 0x7d, 0x6a, 0x9f, 0xeb, 0x04, 0xdc, 0x0e, 0x8b, 0x6f, 0xc5, 0x97, 0xe0, 0x26, 0xd9, 0xc2, 0x8e, 0xc5, 0xfc, 0xe8, 0x90, 0x64, 0x0b, 0xc3, 0x51, 0x33, 0xda, 0x3e, 0x1b, 0x79, 0x84, 0xfa, 0xe7, 0xb4, 0xc1, 0x6d, 0x51, 0x6c, 0x15, 0x1b, 0xc1, 0xd8, 0xf2, 0xf9, 0xac, 0x22, 0x18, 0x8e, 0x50, 0xcf, 0xd5, 0x2d, 0xe1, 0x8b, 0x2c, 0x0d, 0x04, 0x0d, 0x59, 0x32, 0x15, 0x5e, 0x53, 0x63, 0x3a, 0xc6, 0x3a, 0x31, 0x46, 0xf5, 0x83, 0x73, 0x49, 0x93, 0x70, 0xae, 0xb4, 0x5c, 0x22, 0x4d, 0x65, 0xf7, 0x96, 0x8e, 0x1c, 0xfc, 0x1a, 0x3c, 0x91, 0x92, 0xdb, 0x24, 0x88, 0x59, 0x48, 0xe2, 0xe0, 0x96, 0xf1, 0x6b, 0x7d, 0x83, 0x2c, 0xc9, 0xab, 0xfc, 0x91, 0xf6, 0x9f, 0x2a, 0xf7, 0xaf, 0xda, 0x3b, 0x54, 0x4e, 0x7f, 0x00, 0x4e, 0x9e, 0x70, 0x81, 0x5e, 0x81, 0x23, 0xf2, 0x85, 0x7e, 0x43, 0xdd, 0xfe, 0x17, 0x2b, 0xf7, 0xb6, 0x6e, 0x7c, 0x0f, 0x3c, 0x78, 0x91, 0xcf, 0x28, 0xdd, 0xee, 0x5b, 0xe0, 0x4e, 0x3e, 0x9e, 0x0d, 0x83, 0xe1, 0xe9, 0xc9, 0xe8, 0xec, 0xa2, 0x57, 0x41, 0x3d, 0x68, 0x0f, 0x8a, 0x96, 0xea, 0xc1, 0x49, 0xde, 0x04, 0x25, 0xc2, 0x64, 0x84, 0x3f, 0x8c, 0x70, 0x91, 0x60, 0x2d, 0x55, 0xe4, 0xc1, 0xb6, 0xb1, 0xbc, 0x1d, 0x9d, 0x8d, 0xf0, 0xc9, 0xd2, 0x53, 0x3b, 0xf8, 0x0a, 0x36, 0xed, 0xbb, 0x84, 0x1c, 0x68, 0xbc, 0x3f, 0x1b, 0xe0, 0x8f, 0xbd, 0x0a, 0xea, 0x80, 0x33, 0xb9, 0xc0, 0xa3, 0xc1, 0xbb, 0x93, 0xb3, 0xb7, 0xbd, 0xea, 0x65, 0x53, 0xff, 0x12, 0x7f, 0xff, 0x77, 0x00, 0x00, 0x00, 0xff, 0xff, 0x75, 0x59, 0xf4, 0x03, 0x4e, 0x0b, 0x00, 0x00, } grpc-go-1.29.1/benchmark/grpc_testing/control.proto000066400000000000000000000112531365033716300223420ustar00rootroot00000000000000// Copyright 2016 gRPC authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. syntax = "proto3"; import "payloads.proto"; import "stats.proto"; package grpc.testing; enum ClientType { SYNC_CLIENT = 0; ASYNC_CLIENT = 1; } enum ServerType { SYNC_SERVER = 0; ASYNC_SERVER = 1; ASYNC_GENERIC_SERVER = 2; } enum RpcType { UNARY = 0; STREAMING = 1; } // Parameters of poisson process distribution, which is a good representation // of activity coming in from independent identical stationary sources. message PoissonParams { // The rate of arrivals (a.k.a. lambda parameter of the exp distribution). double offered_load = 1; } message UniformParams { double interarrival_lo = 1; double interarrival_hi = 2; } message DeterministicParams { double offered_load = 1; } message ParetoParams { double interarrival_base = 1; double alpha = 2; } // Once an RPC finishes, immediately start a new one. // No configuration parameters needed. message ClosedLoopParams { } message LoadParams { oneof load { ClosedLoopParams closed_loop = 1; PoissonParams poisson = 2; UniformParams uniform = 3; DeterministicParams determ = 4; ParetoParams pareto = 5; }; } // presence of SecurityParams implies use of TLS message SecurityParams { bool use_test_ca = 1; string server_host_override = 2; } message ClientConfig { // List of targets to connect to. At least one target needs to be specified. repeated string server_targets = 1; ClientType client_type = 2; SecurityParams security_params = 3; // How many concurrent RPCs to start for each channel. // For synchronous client, use a separate thread for each outstanding RPC. int32 outstanding_rpcs_per_channel = 4; // Number of independent client channels to create. // i-th channel will connect to server_target[i % server_targets.size()] int32 client_channels = 5; // Only for async client. Number of threads to use to start/manage RPCs. int32 async_client_threads = 7; RpcType rpc_type = 8; // The requested load for the entire client (aggregated over all the threads). LoadParams load_params = 10; PayloadConfig payload_config = 11; HistogramParams histogram_params = 12; // Specify the cores we should run the client on, if desired repeated int32 core_list = 13; int32 core_limit = 14; } message ClientStatus { ClientStats stats = 1; } // Request current stats message Mark { // if true, the stats will be reset after taking their snapshot. bool reset = 1; } message ClientArgs { oneof argtype { ClientConfig setup = 1; Mark mark = 2; } } message ServerConfig { ServerType server_type = 1; SecurityParams security_params = 2; // Port on which to listen. Zero means pick unused port. int32 port = 4; // Only for async server. Number of threads used to serve the requests. int32 async_server_threads = 7; // Specify the number of cores to limit server to, if desired int32 core_limit = 8; // payload config, used in generic server PayloadConfig payload_config = 9; // Specify the cores we should run the server on, if desired repeated int32 core_list = 10; } message ServerArgs { oneof argtype { ServerConfig setup = 1; Mark mark = 2; } } message ServerStatus { ServerStats stats = 1; // the port bound by the server int32 port = 2; // Number of cores available to the server int32 cores = 3; } message CoreRequest { } message CoreResponse { // Number of cores available on the server int32 cores = 1; } message Void { } // A single performance scenario: input to qps_json_driver message Scenario { // Human readable name for this scenario string name = 1; // Client configuration ClientConfig client_config = 2; // Number of clients to start for the test int32 num_clients = 3; // Server configuration ServerConfig server_config = 4; // Number of servers to start for the test int32 num_servers = 5; // Warmup period, in seconds int32 warmup_seconds = 6; // Benchmark time, in seconds int32 benchmark_seconds = 7; // Number of workers to spawn locally (usually zero) int32 spawn_local_worker_count = 8; } // A set of scenarios to be run with qps_json_driver message Scenarios { repeated Scenario scenarios = 1; } grpc-go-1.29.1/benchmark/grpc_testing/messages.pb.go000066400000000000000000000677241365033716300223510ustar00rootroot00000000000000// Code generated by protoc-gen-go. DO NOT EDIT. // source: messages.proto package grpc_testing import ( fmt "fmt" proto "github.com/golang/protobuf/proto" math "math" ) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package // The type of payload that should be returned. type PayloadType int32 const ( // Compressable text format. PayloadType_COMPRESSABLE PayloadType = 0 // Uncompressable binary format. PayloadType_UNCOMPRESSABLE PayloadType = 1 // Randomly chosen from all other formats defined in this enum. PayloadType_RANDOM PayloadType = 2 ) var PayloadType_name = map[int32]string{ 0: "COMPRESSABLE", 1: "UNCOMPRESSABLE", 2: "RANDOM", } var PayloadType_value = map[string]int32{ "COMPRESSABLE": 0, "UNCOMPRESSABLE": 1, "RANDOM": 2, } func (x PayloadType) String() string { return proto.EnumName(PayloadType_name, int32(x)) } func (PayloadType) EnumDescriptor() ([]byte, []int) { return fileDescriptor_4dc296cbfe5ffcd5, []int{0} } // Compression algorithms type CompressionType int32 const ( // No compression CompressionType_NONE CompressionType = 0 CompressionType_GZIP CompressionType = 1 CompressionType_DEFLATE CompressionType = 2 ) var CompressionType_name = map[int32]string{ 0: "NONE", 1: "GZIP", 2: "DEFLATE", } var CompressionType_value = map[string]int32{ "NONE": 0, "GZIP": 1, "DEFLATE": 2, } func (x CompressionType) String() string { return proto.EnumName(CompressionType_name, int32(x)) } func (CompressionType) EnumDescriptor() ([]byte, []int) { return fileDescriptor_4dc296cbfe5ffcd5, []int{1} } // A block of data, to simply increase gRPC message size. type Payload struct { // The type of data in body. Type PayloadType `protobuf:"varint,1,opt,name=type,proto3,enum=grpc.testing.PayloadType" json:"type,omitempty"` // Primary contents of payload. Body []byte `protobuf:"bytes,2,opt,name=body,proto3" json:"body,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *Payload) Reset() { *m = Payload{} } func (m *Payload) String() string { return proto.CompactTextString(m) } func (*Payload) ProtoMessage() {} func (*Payload) Descriptor() ([]byte, []int) { return fileDescriptor_4dc296cbfe5ffcd5, []int{0} } func (m *Payload) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Payload.Unmarshal(m, b) } func (m *Payload) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Payload.Marshal(b, m, deterministic) } func (m *Payload) XXX_Merge(src proto.Message) { xxx_messageInfo_Payload.Merge(m, src) } func (m *Payload) XXX_Size() int { return xxx_messageInfo_Payload.Size(m) } func (m *Payload) XXX_DiscardUnknown() { xxx_messageInfo_Payload.DiscardUnknown(m) } var xxx_messageInfo_Payload proto.InternalMessageInfo func (m *Payload) GetType() PayloadType { if m != nil { return m.Type } return PayloadType_COMPRESSABLE } func (m *Payload) GetBody() []byte { if m != nil { return m.Body } return nil } // A protobuf representation for grpc status. This is used by test // clients to specify a status that the server should attempt to return. type EchoStatus struct { Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *EchoStatus) Reset() { *m = EchoStatus{} } func (m *EchoStatus) String() string { return proto.CompactTextString(m) } func (*EchoStatus) ProtoMessage() {} func (*EchoStatus) Descriptor() ([]byte, []int) { return fileDescriptor_4dc296cbfe5ffcd5, []int{1} } func (m *EchoStatus) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_EchoStatus.Unmarshal(m, b) } func (m *EchoStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_EchoStatus.Marshal(b, m, deterministic) } func (m *EchoStatus) XXX_Merge(src proto.Message) { xxx_messageInfo_EchoStatus.Merge(m, src) } func (m *EchoStatus) XXX_Size() int { return xxx_messageInfo_EchoStatus.Size(m) } func (m *EchoStatus) XXX_DiscardUnknown() { xxx_messageInfo_EchoStatus.DiscardUnknown(m) } var xxx_messageInfo_EchoStatus proto.InternalMessageInfo func (m *EchoStatus) GetCode() int32 { if m != nil { return m.Code } return 0 } func (m *EchoStatus) GetMessage() string { if m != nil { return m.Message } return "" } // Unary request. type SimpleRequest struct { // Desired payload type in the response from the server. // If response_type is RANDOM, server randomly chooses one from other formats. ResponseType PayloadType `protobuf:"varint,1,opt,name=response_type,json=responseType,proto3,enum=grpc.testing.PayloadType" json:"response_type,omitempty"` // Desired payload size in the response from the server. // If response_type is COMPRESSABLE, this denotes the size before compression. ResponseSize int32 `protobuf:"varint,2,opt,name=response_size,json=responseSize,proto3" json:"response_size,omitempty"` // Optional input payload sent along with the request. Payload *Payload `protobuf:"bytes,3,opt,name=payload,proto3" json:"payload,omitempty"` // Whether SimpleResponse should include username. FillUsername bool `protobuf:"varint,4,opt,name=fill_username,json=fillUsername,proto3" json:"fill_username,omitempty"` // Whether SimpleResponse should include OAuth scope. FillOauthScope bool `protobuf:"varint,5,opt,name=fill_oauth_scope,json=fillOauthScope,proto3" json:"fill_oauth_scope,omitempty"` // Compression algorithm to be used by the server for the response (stream) ResponseCompression CompressionType `protobuf:"varint,6,opt,name=response_compression,json=responseCompression,proto3,enum=grpc.testing.CompressionType" json:"response_compression,omitempty"` // Whether server should return a given status ResponseStatus *EchoStatus `protobuf:"bytes,7,opt,name=response_status,json=responseStatus,proto3" json:"response_status,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *SimpleRequest) Reset() { *m = SimpleRequest{} } func (m *SimpleRequest) String() string { return proto.CompactTextString(m) } func (*SimpleRequest) ProtoMessage() {} func (*SimpleRequest) Descriptor() ([]byte, []int) { return fileDescriptor_4dc296cbfe5ffcd5, []int{2} } func (m *SimpleRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_SimpleRequest.Unmarshal(m, b) } func (m *SimpleRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_SimpleRequest.Marshal(b, m, deterministic) } func (m *SimpleRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_SimpleRequest.Merge(m, src) } func (m *SimpleRequest) XXX_Size() int { return xxx_messageInfo_SimpleRequest.Size(m) } func (m *SimpleRequest) XXX_DiscardUnknown() { xxx_messageInfo_SimpleRequest.DiscardUnknown(m) } var xxx_messageInfo_SimpleRequest proto.InternalMessageInfo func (m *SimpleRequest) GetResponseType() PayloadType { if m != nil { return m.ResponseType } return PayloadType_COMPRESSABLE } func (m *SimpleRequest) GetResponseSize() int32 { if m != nil { return m.ResponseSize } return 0 } func (m *SimpleRequest) GetPayload() *Payload { if m != nil { return m.Payload } return nil } func (m *SimpleRequest) GetFillUsername() bool { if m != nil { return m.FillUsername } return false } func (m *SimpleRequest) GetFillOauthScope() bool { if m != nil { return m.FillOauthScope } return false } func (m *SimpleRequest) GetResponseCompression() CompressionType { if m != nil { return m.ResponseCompression } return CompressionType_NONE } func (m *SimpleRequest) GetResponseStatus() *EchoStatus { if m != nil { return m.ResponseStatus } return nil } // Unary response, as configured by the request. type SimpleResponse struct { // Payload to increase message size. Payload *Payload `protobuf:"bytes,1,opt,name=payload,proto3" json:"payload,omitempty"` // The user the request came from, for verifying authentication was // successful when the client expected it. Username string `protobuf:"bytes,2,opt,name=username,proto3" json:"username,omitempty"` // OAuth scope. OauthScope string `protobuf:"bytes,3,opt,name=oauth_scope,json=oauthScope,proto3" json:"oauth_scope,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *SimpleResponse) Reset() { *m = SimpleResponse{} } func (m *SimpleResponse) String() string { return proto.CompactTextString(m) } func (*SimpleResponse) ProtoMessage() {} func (*SimpleResponse) Descriptor() ([]byte, []int) { return fileDescriptor_4dc296cbfe5ffcd5, []int{3} } func (m *SimpleResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_SimpleResponse.Unmarshal(m, b) } func (m *SimpleResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_SimpleResponse.Marshal(b, m, deterministic) } func (m *SimpleResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_SimpleResponse.Merge(m, src) } func (m *SimpleResponse) XXX_Size() int { return xxx_messageInfo_SimpleResponse.Size(m) } func (m *SimpleResponse) XXX_DiscardUnknown() { xxx_messageInfo_SimpleResponse.DiscardUnknown(m) } var xxx_messageInfo_SimpleResponse proto.InternalMessageInfo func (m *SimpleResponse) GetPayload() *Payload { if m != nil { return m.Payload } return nil } func (m *SimpleResponse) GetUsername() string { if m != nil { return m.Username } return "" } func (m *SimpleResponse) GetOauthScope() string { if m != nil { return m.OauthScope } return "" } // Client-streaming request. type StreamingInputCallRequest struct { // Optional input payload sent along with the request. Payload *Payload `protobuf:"bytes,1,opt,name=payload,proto3" json:"payload,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *StreamingInputCallRequest) Reset() { *m = StreamingInputCallRequest{} } func (m *StreamingInputCallRequest) String() string { return proto.CompactTextString(m) } func (*StreamingInputCallRequest) ProtoMessage() {} func (*StreamingInputCallRequest) Descriptor() ([]byte, []int) { return fileDescriptor_4dc296cbfe5ffcd5, []int{4} } func (m *StreamingInputCallRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_StreamingInputCallRequest.Unmarshal(m, b) } func (m *StreamingInputCallRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_StreamingInputCallRequest.Marshal(b, m, deterministic) } func (m *StreamingInputCallRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_StreamingInputCallRequest.Merge(m, src) } func (m *StreamingInputCallRequest) XXX_Size() int { return xxx_messageInfo_StreamingInputCallRequest.Size(m) } func (m *StreamingInputCallRequest) XXX_DiscardUnknown() { xxx_messageInfo_StreamingInputCallRequest.DiscardUnknown(m) } var xxx_messageInfo_StreamingInputCallRequest proto.InternalMessageInfo func (m *StreamingInputCallRequest) GetPayload() *Payload { if m != nil { return m.Payload } return nil } // Client-streaming response. type StreamingInputCallResponse struct { // Aggregated size of payloads received from the client. AggregatedPayloadSize int32 `protobuf:"varint,1,opt,name=aggregated_payload_size,json=aggregatedPayloadSize,proto3" json:"aggregated_payload_size,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *StreamingInputCallResponse) Reset() { *m = StreamingInputCallResponse{} } func (m *StreamingInputCallResponse) String() string { return proto.CompactTextString(m) } func (*StreamingInputCallResponse) ProtoMessage() {} func (*StreamingInputCallResponse) Descriptor() ([]byte, []int) { return fileDescriptor_4dc296cbfe5ffcd5, []int{5} } func (m *StreamingInputCallResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_StreamingInputCallResponse.Unmarshal(m, b) } func (m *StreamingInputCallResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_StreamingInputCallResponse.Marshal(b, m, deterministic) } func (m *StreamingInputCallResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_StreamingInputCallResponse.Merge(m, src) } func (m *StreamingInputCallResponse) XXX_Size() int { return xxx_messageInfo_StreamingInputCallResponse.Size(m) } func (m *StreamingInputCallResponse) XXX_DiscardUnknown() { xxx_messageInfo_StreamingInputCallResponse.DiscardUnknown(m) } var xxx_messageInfo_StreamingInputCallResponse proto.InternalMessageInfo func (m *StreamingInputCallResponse) GetAggregatedPayloadSize() int32 { if m != nil { return m.AggregatedPayloadSize } return 0 } // Configuration for a particular response. type ResponseParameters struct { // Desired payload sizes in responses from the server. // If response_type is COMPRESSABLE, this denotes the size before compression. Size int32 `protobuf:"varint,1,opt,name=size,proto3" json:"size,omitempty"` // Desired interval between consecutive responses in the response stream in // microseconds. IntervalUs int32 `protobuf:"varint,2,opt,name=interval_us,json=intervalUs,proto3" json:"interval_us,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *ResponseParameters) Reset() { *m = ResponseParameters{} } func (m *ResponseParameters) String() string { return proto.CompactTextString(m) } func (*ResponseParameters) ProtoMessage() {} func (*ResponseParameters) Descriptor() ([]byte, []int) { return fileDescriptor_4dc296cbfe5ffcd5, []int{6} } func (m *ResponseParameters) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ResponseParameters.Unmarshal(m, b) } func (m *ResponseParameters) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ResponseParameters.Marshal(b, m, deterministic) } func (m *ResponseParameters) XXX_Merge(src proto.Message) { xxx_messageInfo_ResponseParameters.Merge(m, src) } func (m *ResponseParameters) XXX_Size() int { return xxx_messageInfo_ResponseParameters.Size(m) } func (m *ResponseParameters) XXX_DiscardUnknown() { xxx_messageInfo_ResponseParameters.DiscardUnknown(m) } var xxx_messageInfo_ResponseParameters proto.InternalMessageInfo func (m *ResponseParameters) GetSize() int32 { if m != nil { return m.Size } return 0 } func (m *ResponseParameters) GetIntervalUs() int32 { if m != nil { return m.IntervalUs } return 0 } // Server-streaming request. type StreamingOutputCallRequest struct { // Desired payload type in the response from the server. // If response_type is RANDOM, the payload from each response in the stream // might be of different types. This is to simulate a mixed type of payload // stream. ResponseType PayloadType `protobuf:"varint,1,opt,name=response_type,json=responseType,proto3,enum=grpc.testing.PayloadType" json:"response_type,omitempty"` // Configuration for each expected response message. ResponseParameters []*ResponseParameters `protobuf:"bytes,2,rep,name=response_parameters,json=responseParameters,proto3" json:"response_parameters,omitempty"` // Optional input payload sent along with the request. Payload *Payload `protobuf:"bytes,3,opt,name=payload,proto3" json:"payload,omitempty"` // Compression algorithm to be used by the server for the response (stream) ResponseCompression CompressionType `protobuf:"varint,6,opt,name=response_compression,json=responseCompression,proto3,enum=grpc.testing.CompressionType" json:"response_compression,omitempty"` // Whether server should return a given status ResponseStatus *EchoStatus `protobuf:"bytes,7,opt,name=response_status,json=responseStatus,proto3" json:"response_status,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *StreamingOutputCallRequest) Reset() { *m = StreamingOutputCallRequest{} } func (m *StreamingOutputCallRequest) String() string { return proto.CompactTextString(m) } func (*StreamingOutputCallRequest) ProtoMessage() {} func (*StreamingOutputCallRequest) Descriptor() ([]byte, []int) { return fileDescriptor_4dc296cbfe5ffcd5, []int{7} } func (m *StreamingOutputCallRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_StreamingOutputCallRequest.Unmarshal(m, b) } func (m *StreamingOutputCallRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_StreamingOutputCallRequest.Marshal(b, m, deterministic) } func (m *StreamingOutputCallRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_StreamingOutputCallRequest.Merge(m, src) } func (m *StreamingOutputCallRequest) XXX_Size() int { return xxx_messageInfo_StreamingOutputCallRequest.Size(m) } func (m *StreamingOutputCallRequest) XXX_DiscardUnknown() { xxx_messageInfo_StreamingOutputCallRequest.DiscardUnknown(m) } var xxx_messageInfo_StreamingOutputCallRequest proto.InternalMessageInfo func (m *StreamingOutputCallRequest) GetResponseType() PayloadType { if m != nil { return m.ResponseType } return PayloadType_COMPRESSABLE } func (m *StreamingOutputCallRequest) GetResponseParameters() []*ResponseParameters { if m != nil { return m.ResponseParameters } return nil } func (m *StreamingOutputCallRequest) GetPayload() *Payload { if m != nil { return m.Payload } return nil } func (m *StreamingOutputCallRequest) GetResponseCompression() CompressionType { if m != nil { return m.ResponseCompression } return CompressionType_NONE } func (m *StreamingOutputCallRequest) GetResponseStatus() *EchoStatus { if m != nil { return m.ResponseStatus } return nil } // Server-streaming response, as configured by the request and parameters. type StreamingOutputCallResponse struct { // Payload to increase response size. Payload *Payload `protobuf:"bytes,1,opt,name=payload,proto3" json:"payload,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *StreamingOutputCallResponse) Reset() { *m = StreamingOutputCallResponse{} } func (m *StreamingOutputCallResponse) String() string { return proto.CompactTextString(m) } func (*StreamingOutputCallResponse) ProtoMessage() {} func (*StreamingOutputCallResponse) Descriptor() ([]byte, []int) { return fileDescriptor_4dc296cbfe5ffcd5, []int{8} } func (m *StreamingOutputCallResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_StreamingOutputCallResponse.Unmarshal(m, b) } func (m *StreamingOutputCallResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_StreamingOutputCallResponse.Marshal(b, m, deterministic) } func (m *StreamingOutputCallResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_StreamingOutputCallResponse.Merge(m, src) } func (m *StreamingOutputCallResponse) XXX_Size() int { return xxx_messageInfo_StreamingOutputCallResponse.Size(m) } func (m *StreamingOutputCallResponse) XXX_DiscardUnknown() { xxx_messageInfo_StreamingOutputCallResponse.DiscardUnknown(m) } var xxx_messageInfo_StreamingOutputCallResponse proto.InternalMessageInfo func (m *StreamingOutputCallResponse) GetPayload() *Payload { if m != nil { return m.Payload } return nil } // For reconnect interop test only. // Client tells server what reconnection parameters it used. type ReconnectParams struct { MaxReconnectBackoffMs int32 `protobuf:"varint,1,opt,name=max_reconnect_backoff_ms,json=maxReconnectBackoffMs,proto3" json:"max_reconnect_backoff_ms,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *ReconnectParams) Reset() { *m = ReconnectParams{} } func (m *ReconnectParams) String() string { return proto.CompactTextString(m) } func (*ReconnectParams) ProtoMessage() {} func (*ReconnectParams) Descriptor() ([]byte, []int) { return fileDescriptor_4dc296cbfe5ffcd5, []int{9} } func (m *ReconnectParams) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ReconnectParams.Unmarshal(m, b) } func (m *ReconnectParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ReconnectParams.Marshal(b, m, deterministic) } func (m *ReconnectParams) XXX_Merge(src proto.Message) { xxx_messageInfo_ReconnectParams.Merge(m, src) } func (m *ReconnectParams) XXX_Size() int { return xxx_messageInfo_ReconnectParams.Size(m) } func (m *ReconnectParams) XXX_DiscardUnknown() { xxx_messageInfo_ReconnectParams.DiscardUnknown(m) } var xxx_messageInfo_ReconnectParams proto.InternalMessageInfo func (m *ReconnectParams) GetMaxReconnectBackoffMs() int32 { if m != nil { return m.MaxReconnectBackoffMs } return 0 } // For reconnect interop test only. // Server tells client whether its reconnects are following the spec and the // reconnect backoffs it saw. type ReconnectInfo struct { Passed bool `protobuf:"varint,1,opt,name=passed,proto3" json:"passed,omitempty"` BackoffMs []int32 `protobuf:"varint,2,rep,packed,name=backoff_ms,json=backoffMs,proto3" json:"backoff_ms,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *ReconnectInfo) Reset() { *m = ReconnectInfo{} } func (m *ReconnectInfo) String() string { return proto.CompactTextString(m) } func (*ReconnectInfo) ProtoMessage() {} func (*ReconnectInfo) Descriptor() ([]byte, []int) { return fileDescriptor_4dc296cbfe5ffcd5, []int{10} } func (m *ReconnectInfo) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ReconnectInfo.Unmarshal(m, b) } func (m *ReconnectInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ReconnectInfo.Marshal(b, m, deterministic) } func (m *ReconnectInfo) XXX_Merge(src proto.Message) { xxx_messageInfo_ReconnectInfo.Merge(m, src) } func (m *ReconnectInfo) XXX_Size() int { return xxx_messageInfo_ReconnectInfo.Size(m) } func (m *ReconnectInfo) XXX_DiscardUnknown() { xxx_messageInfo_ReconnectInfo.DiscardUnknown(m) } var xxx_messageInfo_ReconnectInfo proto.InternalMessageInfo func (m *ReconnectInfo) GetPassed() bool { if m != nil { return m.Passed } return false } func (m *ReconnectInfo) GetBackoffMs() []int32 { if m != nil { return m.BackoffMs } return nil } func init() { proto.RegisterEnum("grpc.testing.PayloadType", PayloadType_name, PayloadType_value) proto.RegisterEnum("grpc.testing.CompressionType", CompressionType_name, CompressionType_value) proto.RegisterType((*Payload)(nil), "grpc.testing.Payload") proto.RegisterType((*EchoStatus)(nil), "grpc.testing.EchoStatus") proto.RegisterType((*SimpleRequest)(nil), "grpc.testing.SimpleRequest") proto.RegisterType((*SimpleResponse)(nil), "grpc.testing.SimpleResponse") proto.RegisterType((*StreamingInputCallRequest)(nil), "grpc.testing.StreamingInputCallRequest") proto.RegisterType((*StreamingInputCallResponse)(nil), "grpc.testing.StreamingInputCallResponse") proto.RegisterType((*ResponseParameters)(nil), "grpc.testing.ResponseParameters") proto.RegisterType((*StreamingOutputCallRequest)(nil), "grpc.testing.StreamingOutputCallRequest") proto.RegisterType((*StreamingOutputCallResponse)(nil), "grpc.testing.StreamingOutputCallResponse") proto.RegisterType((*ReconnectParams)(nil), "grpc.testing.ReconnectParams") proto.RegisterType((*ReconnectInfo)(nil), "grpc.testing.ReconnectInfo") } func init() { proto.RegisterFile("messages.proto", fileDescriptor_4dc296cbfe5ffcd5) } var fileDescriptor_4dc296cbfe5ffcd5 = []byte{ // 652 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x55, 0x4d, 0x6f, 0xd3, 0x40, 0x10, 0xc5, 0xf9, 0xee, 0x24, 0x4d, 0xa3, 0x85, 0x82, 0x5b, 0x54, 0x11, 0x99, 0x4b, 0x54, 0x89, 0x20, 0x05, 0x09, 0x24, 0x0e, 0xa0, 0xb4, 0x4d, 0x51, 0x50, 0x9a, 0x84, 0x75, 0x7b, 0xe1, 0x62, 0x6d, 0x9c, 0x8d, 0x6b, 0x11, 0x7b, 0x8d, 0x77, 0x8d, 0x9a, 0x1e, 0xb8, 0xf3, 0x83, 0xb9, 0xa3, 0x5d, 0x7f, 0xc4, 0x69, 0x7b, 0x68, 0xe1, 0xc2, 0x6d, 0xf7, 0xed, 0x9b, 0x97, 0x79, 0x33, 0xcf, 0x0a, 0x34, 0x3d, 0xca, 0x39, 0x71, 0x28, 0xef, 0x06, 0x21, 0x13, 0x0c, 0x35, 0x9c, 0x30, 0xb0, 0xbb, 0x82, 0x72, 0xe1, 0xfa, 0x8e, 0x31, 0x82, 0xea, 0x94, 0xac, 0x96, 0x8c, 0xcc, 0xd1, 0x2b, 0x28, 0x89, 0x55, 0x40, 0x75, 0xad, 0xad, 0x75, 0x9a, 0xbd, 0xbd, 0x6e, 0x9e, 0xd7, 0x4d, 0x48, 0xe7, 0xab, 0x80, 0x62, 0x45, 0x43, 0x08, 0x4a, 0x33, 0x36, 0x5f, 0xe9, 0x85, 0xb6, 0xd6, 0x69, 0x60, 0x75, 0x36, 0xde, 0x03, 0x0c, 0xec, 0x4b, 0x66, 0x0a, 0x22, 0x22, 0x2e, 0x19, 0x36, 0x9b, 0xc7, 0x82, 0x65, 0xac, 0xce, 0x48, 0x87, 0x6a, 0xd2, 0x8f, 0x2a, 0xdc, 0xc2, 0xe9, 0xd5, 0xf8, 0x55, 0x84, 0x6d, 0xd3, 0xf5, 0x82, 0x25, 0xc5, 0xf4, 0x7b, 0x44, 0xb9, 0x40, 0x1f, 0x60, 0x3b, 0xa4, 0x3c, 0x60, 0x3e, 0xa7, 0xd6, 0xfd, 0x3a, 0x6b, 0xa4, 0x7c, 0x79, 0x43, 0x2f, 0x73, 0xf5, 0xdc, 0xbd, 0x8e, 0x7f, 0xb1, 0xbc, 0x26, 0x99, 0xee, 0x35, 0x45, 0xaf, 0xa1, 0x1a, 0xc4, 0x0a, 0x7a, 0xb1, 0xad, 0x75, 0xea, 0xbd, 0xdd, 0x3b, 0xe5, 0x71, 0xca, 0x92, 0xaa, 0x0b, 0x77, 0xb9, 0xb4, 0x22, 0x4e, 0x43, 0x9f, 0x78, 0x54, 0x2f, 0xb5, 0xb5, 0x4e, 0x0d, 0x37, 0x24, 0x78, 0x91, 0x60, 0xa8, 0x03, 0x2d, 0x45, 0x62, 0x24, 0x12, 0x97, 0x16, 0xb7, 0x59, 0x40, 0xf5, 0xb2, 0xe2, 0x35, 0x25, 0x3e, 0x91, 0xb0, 0x29, 0x51, 0x34, 0x85, 0x27, 0x59, 0x93, 0x36, 0xf3, 0x82, 0x90, 0x72, 0xee, 0x32, 0x5f, 0xaf, 0x28, 0xaf, 0x07, 0x9b, 0xcd, 0x1c, 0xaf, 0x09, 0xca, 0xef, 0xe3, 0xb4, 0x34, 0xf7, 0x80, 0xfa, 0xb0, 0xb3, 0xb6, 0xad, 0x36, 0xa1, 0x57, 0x95, 0x33, 0x7d, 0x53, 0x6c, 0xbd, 0x29, 0xdc, 0xcc, 0x46, 0xa2, 0xee, 0xc6, 0x4f, 0x68, 0xa6, 0xab, 0x88, 0xf1, 0xfc, 0x98, 0xb4, 0x7b, 0x8d, 0x69, 0x1f, 0x6a, 0xd9, 0x84, 0xe2, 0x4d, 0x67, 0x77, 0xf4, 0x02, 0xea, 0xf9, 0xc1, 0x14, 0xd5, 0x33, 0xb0, 0x6c, 0x28, 0xc6, 0x08, 0xf6, 0x4c, 0x11, 0x52, 0xe2, 0xb9, 0xbe, 0x33, 0xf4, 0x83, 0x48, 0x1c, 0x93, 0xe5, 0x32, 0x8d, 0xc5, 0x43, 0x5b, 0x31, 0xce, 0x61, 0xff, 0x2e, 0xb5, 0xc4, 0xd9, 0x5b, 0x78, 0x46, 0x1c, 0x27, 0xa4, 0x0e, 0x11, 0x74, 0x6e, 0x25, 0x35, 0x71, 0x5e, 0xe2, 0xe0, 0xee, 0xae, 0x9f, 0x13, 0x69, 0x19, 0x1c, 0x63, 0x08, 0x28, 0xd5, 0x98, 0x92, 0x90, 0x78, 0x54, 0xd0, 0x50, 0x65, 0x3e, 0x57, 0xaa, 0xce, 0xd2, 0xae, 0xeb, 0x0b, 0x1a, 0xfe, 0x20, 0x32, 0x35, 0x49, 0x0a, 0x21, 0x85, 0x2e, 0xb8, 0xf1, 0xbb, 0x90, 0xeb, 0x70, 0x12, 0x89, 0x1b, 0x86, 0xff, 0xf5, 0x3b, 0xf8, 0x02, 0x59, 0x4e, 0xac, 0x20, 0x6b, 0x55, 0x2f, 0xb4, 0x8b, 0x9d, 0x7a, 0xaf, 0xbd, 0xa9, 0x72, 0xdb, 0x12, 0x46, 0xe1, 0x6d, 0x9b, 0x0f, 0xfe, 0x6a, 0xfe, 0xcb, 0x98, 0x8f, 0xe1, 0xf9, 0x9d, 0x63, 0xff, 0xcb, 0xcc, 0x1b, 0x9f, 0x61, 0x07, 0x53, 0x9b, 0xf9, 0x3e, 0xb5, 0x85, 0x1a, 0x16, 0x47, 0xef, 0x40, 0xf7, 0xc8, 0x95, 0x15, 0xa6, 0xb0, 0x35, 0x23, 0xf6, 0x37, 0xb6, 0x58, 0x58, 0x1e, 0x4f, 0xe3, 0xe5, 0x91, 0xab, 0xac, 0xea, 0x28, 0x7e, 0x3d, 0xe3, 0xc6, 0x29, 0x6c, 0x67, 0xe8, 0xd0, 0x5f, 0x30, 0xf4, 0x14, 0x2a, 0x01, 0xe1, 0x9c, 0xc6, 0xcd, 0xd4, 0x70, 0x72, 0x43, 0x07, 0x00, 0x39, 0x4d, 0xb9, 0xd4, 0x32, 0xde, 0x9a, 0xa5, 0x3a, 0x87, 0x1f, 0xa1, 0x9e, 0x4b, 0x06, 0x6a, 0x41, 0xe3, 0x78, 0x72, 0x36, 0xc5, 0x03, 0xd3, 0xec, 0x1f, 0x8d, 0x06, 0xad, 0x47, 0x08, 0x41, 0xf3, 0x62, 0xbc, 0x81, 0x69, 0x08, 0xa0, 0x82, 0xfb, 0xe3, 0x93, 0xc9, 0x59, 0xab, 0x70, 0xd8, 0x83, 0x9d, 0x1b, 0xfb, 0x40, 0x35, 0x28, 0x8d, 0x27, 0x63, 0x59, 0x5c, 0x83, 0xd2, 0xa7, 0xaf, 0xc3, 0x69, 0x4b, 0x43, 0x75, 0xa8, 0x9e, 0x0c, 0x4e, 0x47, 0xfd, 0xf3, 0x41, 0xab, 0x30, 0xab, 0xa8, 0xbf, 0x9a, 0x37, 0x7f, 0x02, 0x00, 0x00, 0xff, 0xff, 0xc2, 0x6a, 0xce, 0x1e, 0x7c, 0x06, 0x00, 0x00, } grpc-go-1.29.1/benchmark/grpc_testing/messages.proto000066400000000000000000000110451365033716300224700ustar00rootroot00000000000000// Copyright 2016 gRPC authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Message definitions to be used by integration test service definitions. syntax = "proto3"; package grpc.testing; // The type of payload that should be returned. enum PayloadType { // Compressable text format. COMPRESSABLE = 0; // Uncompressable binary format. UNCOMPRESSABLE = 1; // Randomly chosen from all other formats defined in this enum. RANDOM = 2; } // Compression algorithms enum CompressionType { // No compression NONE = 0; GZIP = 1; DEFLATE = 2; } // A block of data, to simply increase gRPC message size. message Payload { // The type of data in body. PayloadType type = 1; // Primary contents of payload. bytes body = 2; } // A protobuf representation for grpc status. This is used by test // clients to specify a status that the server should attempt to return. message EchoStatus { int32 code = 1; string message = 2; } // Unary request. message SimpleRequest { // Desired payload type in the response from the server. // If response_type is RANDOM, server randomly chooses one from other formats. PayloadType response_type = 1; // Desired payload size in the response from the server. // If response_type is COMPRESSABLE, this denotes the size before compression. int32 response_size = 2; // Optional input payload sent along with the request. Payload payload = 3; // Whether SimpleResponse should include username. bool fill_username = 4; // Whether SimpleResponse should include OAuth scope. bool fill_oauth_scope = 5; // Compression algorithm to be used by the server for the response (stream) CompressionType response_compression = 6; // Whether server should return a given status EchoStatus response_status = 7; } // Unary response, as configured by the request. message SimpleResponse { // Payload to increase message size. Payload payload = 1; // The user the request came from, for verifying authentication was // successful when the client expected it. string username = 2; // OAuth scope. string oauth_scope = 3; } // Client-streaming request. message StreamingInputCallRequest { // Optional input payload sent along with the request. Payload payload = 1; // Not expecting any payload from the response. } // Client-streaming response. message StreamingInputCallResponse { // Aggregated size of payloads received from the client. int32 aggregated_payload_size = 1; } // Configuration for a particular response. message ResponseParameters { // Desired payload sizes in responses from the server. // If response_type is COMPRESSABLE, this denotes the size before compression. int32 size = 1; // Desired interval between consecutive responses in the response stream in // microseconds. int32 interval_us = 2; } // Server-streaming request. message StreamingOutputCallRequest { // Desired payload type in the response from the server. // If response_type is RANDOM, the payload from each response in the stream // might be of different types. This is to simulate a mixed type of payload // stream. PayloadType response_type = 1; // Configuration for each expected response message. repeated ResponseParameters response_parameters = 2; // Optional input payload sent along with the request. Payload payload = 3; // Compression algorithm to be used by the server for the response (stream) CompressionType response_compression = 6; // Whether server should return a given status EchoStatus response_status = 7; } // Server-streaming response, as configured by the request and parameters. message StreamingOutputCallResponse { // Payload to increase response size. Payload payload = 1; } // For reconnect interop test only. // Client tells server what reconnection parameters it used. message ReconnectParams { int32 max_reconnect_backoff_ms = 1; } // For reconnect interop test only. // Server tells client whether its reconnects are following the spec and the // reconnect backoffs it saw. message ReconnectInfo { bool passed = 1; repeated int32 backoff_ms = 2; } grpc-go-1.29.1/benchmark/grpc_testing/payloads.pb.go000066400000000000000000000232751365033716300223470ustar00rootroot00000000000000// Code generated by protoc-gen-go. DO NOT EDIT. // source: payloads.proto package grpc_testing import ( fmt "fmt" proto "github.com/golang/protobuf/proto" math "math" ) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package type ByteBufferParams struct { ReqSize int32 `protobuf:"varint,1,opt,name=req_size,json=reqSize,proto3" json:"req_size,omitempty"` RespSize int32 `protobuf:"varint,2,opt,name=resp_size,json=respSize,proto3" json:"resp_size,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *ByteBufferParams) Reset() { *m = ByteBufferParams{} } func (m *ByteBufferParams) String() string { return proto.CompactTextString(m) } func (*ByteBufferParams) ProtoMessage() {} func (*ByteBufferParams) Descriptor() ([]byte, []int) { return fileDescriptor_3a075f58f70088c8, []int{0} } func (m *ByteBufferParams) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ByteBufferParams.Unmarshal(m, b) } func (m *ByteBufferParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ByteBufferParams.Marshal(b, m, deterministic) } func (m *ByteBufferParams) XXX_Merge(src proto.Message) { xxx_messageInfo_ByteBufferParams.Merge(m, src) } func (m *ByteBufferParams) XXX_Size() int { return xxx_messageInfo_ByteBufferParams.Size(m) } func (m *ByteBufferParams) XXX_DiscardUnknown() { xxx_messageInfo_ByteBufferParams.DiscardUnknown(m) } var xxx_messageInfo_ByteBufferParams proto.InternalMessageInfo func (m *ByteBufferParams) GetReqSize() int32 { if m != nil { return m.ReqSize } return 0 } func (m *ByteBufferParams) GetRespSize() int32 { if m != nil { return m.RespSize } return 0 } type SimpleProtoParams struct { ReqSize int32 `protobuf:"varint,1,opt,name=req_size,json=reqSize,proto3" json:"req_size,omitempty"` RespSize int32 `protobuf:"varint,2,opt,name=resp_size,json=respSize,proto3" json:"resp_size,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *SimpleProtoParams) Reset() { *m = SimpleProtoParams{} } func (m *SimpleProtoParams) String() string { return proto.CompactTextString(m) } func (*SimpleProtoParams) ProtoMessage() {} func (*SimpleProtoParams) Descriptor() ([]byte, []int) { return fileDescriptor_3a075f58f70088c8, []int{1} } func (m *SimpleProtoParams) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_SimpleProtoParams.Unmarshal(m, b) } func (m *SimpleProtoParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_SimpleProtoParams.Marshal(b, m, deterministic) } func (m *SimpleProtoParams) XXX_Merge(src proto.Message) { xxx_messageInfo_SimpleProtoParams.Merge(m, src) } func (m *SimpleProtoParams) XXX_Size() int { return xxx_messageInfo_SimpleProtoParams.Size(m) } func (m *SimpleProtoParams) XXX_DiscardUnknown() { xxx_messageInfo_SimpleProtoParams.DiscardUnknown(m) } var xxx_messageInfo_SimpleProtoParams proto.InternalMessageInfo func (m *SimpleProtoParams) GetReqSize() int32 { if m != nil { return m.ReqSize } return 0 } func (m *SimpleProtoParams) GetRespSize() int32 { if m != nil { return m.RespSize } return 0 } type ComplexProtoParams struct { XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *ComplexProtoParams) Reset() { *m = ComplexProtoParams{} } func (m *ComplexProtoParams) String() string { return proto.CompactTextString(m) } func (*ComplexProtoParams) ProtoMessage() {} func (*ComplexProtoParams) Descriptor() ([]byte, []int) { return fileDescriptor_3a075f58f70088c8, []int{2} } func (m *ComplexProtoParams) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ComplexProtoParams.Unmarshal(m, b) } func (m *ComplexProtoParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ComplexProtoParams.Marshal(b, m, deterministic) } func (m *ComplexProtoParams) XXX_Merge(src proto.Message) { xxx_messageInfo_ComplexProtoParams.Merge(m, src) } func (m *ComplexProtoParams) XXX_Size() int { return xxx_messageInfo_ComplexProtoParams.Size(m) } func (m *ComplexProtoParams) XXX_DiscardUnknown() { xxx_messageInfo_ComplexProtoParams.DiscardUnknown(m) } var xxx_messageInfo_ComplexProtoParams proto.InternalMessageInfo type PayloadConfig struct { // Types that are valid to be assigned to Payload: // *PayloadConfig_BytebufParams // *PayloadConfig_SimpleParams // *PayloadConfig_ComplexParams Payload isPayloadConfig_Payload `protobuf_oneof:"payload"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *PayloadConfig) Reset() { *m = PayloadConfig{} } func (m *PayloadConfig) String() string { return proto.CompactTextString(m) } func (*PayloadConfig) ProtoMessage() {} func (*PayloadConfig) Descriptor() ([]byte, []int) { return fileDescriptor_3a075f58f70088c8, []int{3} } func (m *PayloadConfig) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_PayloadConfig.Unmarshal(m, b) } func (m *PayloadConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_PayloadConfig.Marshal(b, m, deterministic) } func (m *PayloadConfig) XXX_Merge(src proto.Message) { xxx_messageInfo_PayloadConfig.Merge(m, src) } func (m *PayloadConfig) XXX_Size() int { return xxx_messageInfo_PayloadConfig.Size(m) } func (m *PayloadConfig) XXX_DiscardUnknown() { xxx_messageInfo_PayloadConfig.DiscardUnknown(m) } var xxx_messageInfo_PayloadConfig proto.InternalMessageInfo type isPayloadConfig_Payload interface { isPayloadConfig_Payload() } type PayloadConfig_BytebufParams struct { BytebufParams *ByteBufferParams `protobuf:"bytes,1,opt,name=bytebuf_params,json=bytebufParams,proto3,oneof"` } type PayloadConfig_SimpleParams struct { SimpleParams *SimpleProtoParams `protobuf:"bytes,2,opt,name=simple_params,json=simpleParams,proto3,oneof"` } type PayloadConfig_ComplexParams struct { ComplexParams *ComplexProtoParams `protobuf:"bytes,3,opt,name=complex_params,json=complexParams,proto3,oneof"` } func (*PayloadConfig_BytebufParams) isPayloadConfig_Payload() {} func (*PayloadConfig_SimpleParams) isPayloadConfig_Payload() {} func (*PayloadConfig_ComplexParams) isPayloadConfig_Payload() {} func (m *PayloadConfig) GetPayload() isPayloadConfig_Payload { if m != nil { return m.Payload } return nil } func (m *PayloadConfig) GetBytebufParams() *ByteBufferParams { if x, ok := m.GetPayload().(*PayloadConfig_BytebufParams); ok { return x.BytebufParams } return nil } func (m *PayloadConfig) GetSimpleParams() *SimpleProtoParams { if x, ok := m.GetPayload().(*PayloadConfig_SimpleParams); ok { return x.SimpleParams } return nil } func (m *PayloadConfig) GetComplexParams() *ComplexProtoParams { if x, ok := m.GetPayload().(*PayloadConfig_ComplexParams); ok { return x.ComplexParams } return nil } // XXX_OneofWrappers is for the internal use of the proto package. func (*PayloadConfig) XXX_OneofWrappers() []interface{} { return []interface{}{ (*PayloadConfig_BytebufParams)(nil), (*PayloadConfig_SimpleParams)(nil), (*PayloadConfig_ComplexParams)(nil), } } func init() { proto.RegisterType((*ByteBufferParams)(nil), "grpc.testing.ByteBufferParams") proto.RegisterType((*SimpleProtoParams)(nil), "grpc.testing.SimpleProtoParams") proto.RegisterType((*ComplexProtoParams)(nil), "grpc.testing.ComplexProtoParams") proto.RegisterType((*PayloadConfig)(nil), "grpc.testing.PayloadConfig") } func init() { proto.RegisterFile("payloads.proto", fileDescriptor_3a075f58f70088c8) } var fileDescriptor_3a075f58f70088c8 = []byte{ // 254 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x2b, 0x48, 0xac, 0xcc, 0xc9, 0x4f, 0x4c, 0x29, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x49, 0x2f, 0x2a, 0x48, 0xd6, 0x2b, 0x49, 0x2d, 0x2e, 0xc9, 0xcc, 0x4b, 0x57, 0xf2, 0xe2, 0x12, 0x70, 0xaa, 0x2c, 0x49, 0x75, 0x2a, 0x4d, 0x4b, 0x4b, 0x2d, 0x0a, 0x48, 0x2c, 0x4a, 0xcc, 0x2d, 0x16, 0x92, 0xe4, 0xe2, 0x28, 0x4a, 0x2d, 0x8c, 0x2f, 0xce, 0xac, 0x4a, 0x95, 0x60, 0x54, 0x60, 0xd4, 0x60, 0x0d, 0x62, 0x2f, 0x4a, 0x2d, 0x0c, 0xce, 0xac, 0x4a, 0x15, 0x92, 0xe6, 0xe2, 0x2c, 0x4a, 0x2d, 0x2e, 0x80, 0xc8, 0x31, 0x81, 0xe5, 0x38, 0x40, 0x02, 0x20, 0x49, 0x25, 0x6f, 0x2e, 0xc1, 0xe0, 0xcc, 0xdc, 0x82, 0x9c, 0xd4, 0x00, 0x90, 0x45, 0x14, 0x1a, 0x26, 0xc2, 0x25, 0xe4, 0x9c, 0x0f, 0x32, 0xac, 0x02, 0xc9, 0x34, 0xa5, 0x6f, 0x8c, 0x5c, 0xbc, 0x01, 0x10, 0xff, 0x38, 0xe7, 0xe7, 0xa5, 0x65, 0xa6, 0x0b, 0xb9, 0x73, 0xf1, 0x25, 0x55, 0x96, 0xa4, 0x26, 0x95, 0xa6, 0xc5, 0x17, 0x80, 0xd5, 0x80, 0x6d, 0xe1, 0x36, 0x92, 0xd3, 0x43, 0xf6, 0xa7, 0x1e, 0xba, 0x27, 0x3d, 0x18, 0x82, 0x78, 0xa1, 0xfa, 0xa0, 0x0e, 0x75, 0xe3, 0xe2, 0x2d, 0x06, 0xbb, 0x1e, 0x66, 0x0e, 0x13, 0xd8, 0x1c, 0x79, 0x54, 0x73, 0x30, 0x3c, 0xe8, 0xc1, 0x10, 0xc4, 0x03, 0xd1, 0x07, 0x35, 0xc7, 0x93, 0x8b, 0x2f, 0x19, 0xe2, 0x70, 0x98, 0x41, 0xcc, 0x60, 0x83, 0x14, 0x50, 0x0d, 0xc2, 0xf4, 0x1c, 0xc8, 0x49, 0x50, 0x9d, 0x10, 0x01, 0x27, 0x4e, 0x2e, 0x76, 0x68, 0xe4, 0x25, 0xb1, 0x81, 0x23, 0xcf, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0xb0, 0x8c, 0x18, 0x4e, 0xce, 0x01, 0x00, 0x00, } grpc-go-1.29.1/benchmark/grpc_testing/payloads.proto000066400000000000000000000021161365033716300224740ustar00rootroot00000000000000// Copyright 2016 gRPC authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. syntax = "proto3"; package grpc.testing; message ByteBufferParams { int32 req_size = 1; int32 resp_size = 2; } message SimpleProtoParams { int32 req_size = 1; int32 resp_size = 2; } message ComplexProtoParams { // TODO (vpai): Fill this in once the details of complex, representative // protos are decided } message PayloadConfig { oneof payload { ByteBufferParams bytebuf_params = 1; SimpleProtoParams simple_params = 2; ComplexProtoParams complex_params = 3; } } grpc-go-1.29.1/benchmark/grpc_testing/services.pb.go000066400000000000000000000464411365033716300223560ustar00rootroot00000000000000// Code generated by protoc-gen-go. DO NOT EDIT. // source: services.proto package grpc_testing import ( context "context" fmt "fmt" proto "github.com/golang/protobuf/proto" grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" status "google.golang.org/grpc/status" math "math" ) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package func init() { proto.RegisterFile("services.proto", fileDescriptor_8e16ccb8c5307b32) } var fileDescriptor_8e16ccb8c5307b32 = []byte{ // 271 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x92, 0xc1, 0x4a, 0xc3, 0x40, 0x10, 0x86, 0x69, 0x0f, 0x42, 0x16, 0x53, 0x64, 0x4f, 0xba, 0xfa, 0x00, 0x9e, 0x82, 0x54, 0x5f, 0xc0, 0x06, 0x3d, 0x0a, 0x36, 0x54, 0x0f, 0x9e, 0xd6, 0x74, 0x88, 0x4b, 0x93, 0x99, 0x38, 0x33, 0x11, 0x7c, 0x02, 0x1f, 0xc1, 0xd7, 0x15, 0xb3, 0x56, 0x6a, 0xc8, 0xcd, 0x1e, 0xe7, 0xff, 0x86, 0x8f, 0xfd, 0x77, 0xd7, 0xcc, 0x04, 0xf8, 0x2d, 0x94, 0x20, 0x59, 0xcb, 0xa4, 0x64, 0x0f, 0x2b, 0x6e, 0xcb, 0x4c, 0x41, 0x34, 0x60, 0xe5, 0x66, 0x0d, 0x88, 0xf8, 0x6a, 0x4b, 0x5d, 0x5a, 0x12, 0x2a, 0x53, 0x1d, 0xc7, 0xf9, 0xc7, 0xd4, 0x1c, 0x2d, 0x00, 0xcb, 0x97, 0xc6, 0xf3, 0xa6, 0x88, 0x22, 0x7b, 0x6b, 0x92, 0x15, 0x7a, 0x7e, 0xcf, 0x7d, 0x5d, 0xdb, 0xd3, 0x6c, 0xd7, 0x97, 0x15, 0xa1, 0x69, 0x6b, 0x58, 0xc2, 0x6b, 0x07, 0xa2, 0xee, 0x6c, 0x1c, 0x4a, 0x4b, 0x28, 0x60, 0xef, 0x4c, 0x5a, 0x28, 0x83, 0x6f, 0x02, 0x56, 0xff, 0x74, 0x9d, 0x4f, 0x2e, 0x26, 0xf6, 0xc9, 0xb8, 0x15, 0x96, 0x84, 0xa2, 0xec, 0x03, 0xc2, 0x7a, 0x9f, 0xf2, 0xf9, 0xe7, 0xd4, 0xa4, 0x8f, 0xc4, 0x1b, 0xe0, 0xed, 0x35, 0xdc, 0x98, 0x64, 0xd9, 0xe1, 0xf7, 0x04, 0x6c, 0x8f, 0x07, 0x82, 0x3e, 0xbd, 0xe6, 0x4a, 0x9c, 0x1b, 0x23, 0x85, 0x7a, 0xed, 0xa4, 0x3f, 0x75, 0xd4, 0xe4, 0x75, 0x00, 0xd4, 0xa1, 0x26, 0xa6, 0x63, 0x9a, 0x48, 0x76, 0x34, 0x0b, 0x93, 0xe4, 0xc4, 0x90, 0x53, 0x87, 0x6a, 0x4f, 0x06, 0xcb, 0xc4, 0xbf, 0x4d, 0xdd, 0x18, 0xfa, 0x79, 0x90, 0x2b, 0x63, 0xee, 0xbb, 0xa0, 0xb1, 0xa6, 0xb5, 0x7f, 0x37, 0x1f, 0x28, 0xac, 0xdd, 0x48, 0xf6, 0x7c, 0xd0, 0x7f, 0x95, 0xcb, 0xaf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x9a, 0xb4, 0x19, 0x36, 0x69, 0x02, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. var _ context.Context var _ grpc.ClientConnInterface // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. const _ = grpc.SupportPackageIsVersion6 // BenchmarkServiceClient is the client API for BenchmarkService service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type BenchmarkServiceClient interface { // One request followed by one response. // The server returns the client payload as-is. UnaryCall(ctx context.Context, in *SimpleRequest, opts ...grpc.CallOption) (*SimpleResponse, error) // One request followed by one response. // The server returns the client payload as-is. StreamingCall(ctx context.Context, opts ...grpc.CallOption) (BenchmarkService_StreamingCallClient, error) // Unconstrainted streaming. // Both server and client keep sending & receiving simultaneously. UnconstrainedStreamingCall(ctx context.Context, opts ...grpc.CallOption) (BenchmarkService_UnconstrainedStreamingCallClient, error) } type benchmarkServiceClient struct { cc grpc.ClientConnInterface } func NewBenchmarkServiceClient(cc grpc.ClientConnInterface) BenchmarkServiceClient { return &benchmarkServiceClient{cc} } func (c *benchmarkServiceClient) UnaryCall(ctx context.Context, in *SimpleRequest, opts ...grpc.CallOption) (*SimpleResponse, error) { out := new(SimpleResponse) err := c.cc.Invoke(ctx, "/grpc.testing.BenchmarkService/UnaryCall", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *benchmarkServiceClient) StreamingCall(ctx context.Context, opts ...grpc.CallOption) (BenchmarkService_StreamingCallClient, error) { stream, err := c.cc.NewStream(ctx, &_BenchmarkService_serviceDesc.Streams[0], "/grpc.testing.BenchmarkService/StreamingCall", opts...) if err != nil { return nil, err } x := &benchmarkServiceStreamingCallClient{stream} return x, nil } type BenchmarkService_StreamingCallClient interface { Send(*SimpleRequest) error Recv() (*SimpleResponse, error) grpc.ClientStream } type benchmarkServiceStreamingCallClient struct { grpc.ClientStream } func (x *benchmarkServiceStreamingCallClient) Send(m *SimpleRequest) error { return x.ClientStream.SendMsg(m) } func (x *benchmarkServiceStreamingCallClient) Recv() (*SimpleResponse, error) { m := new(SimpleResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func (c *benchmarkServiceClient) UnconstrainedStreamingCall(ctx context.Context, opts ...grpc.CallOption) (BenchmarkService_UnconstrainedStreamingCallClient, error) { stream, err := c.cc.NewStream(ctx, &_BenchmarkService_serviceDesc.Streams[1], "/grpc.testing.BenchmarkService/UnconstrainedStreamingCall", opts...) if err != nil { return nil, err } x := &benchmarkServiceUnconstrainedStreamingCallClient{stream} return x, nil } type BenchmarkService_UnconstrainedStreamingCallClient interface { Send(*SimpleRequest) error Recv() (*SimpleResponse, error) grpc.ClientStream } type benchmarkServiceUnconstrainedStreamingCallClient struct { grpc.ClientStream } func (x *benchmarkServiceUnconstrainedStreamingCallClient) Send(m *SimpleRequest) error { return x.ClientStream.SendMsg(m) } func (x *benchmarkServiceUnconstrainedStreamingCallClient) Recv() (*SimpleResponse, error) { m := new(SimpleResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } // BenchmarkServiceServer is the server API for BenchmarkService service. type BenchmarkServiceServer interface { // One request followed by one response. // The server returns the client payload as-is. UnaryCall(context.Context, *SimpleRequest) (*SimpleResponse, error) // One request followed by one response. // The server returns the client payload as-is. StreamingCall(BenchmarkService_StreamingCallServer) error // Unconstrainted streaming. // Both server and client keep sending & receiving simultaneously. UnconstrainedStreamingCall(BenchmarkService_UnconstrainedStreamingCallServer) error } // UnimplementedBenchmarkServiceServer can be embedded to have forward compatible implementations. type UnimplementedBenchmarkServiceServer struct { } func (*UnimplementedBenchmarkServiceServer) UnaryCall(ctx context.Context, req *SimpleRequest) (*SimpleResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method UnaryCall not implemented") } func (*UnimplementedBenchmarkServiceServer) StreamingCall(srv BenchmarkService_StreamingCallServer) error { return status.Errorf(codes.Unimplemented, "method StreamingCall not implemented") } func (*UnimplementedBenchmarkServiceServer) UnconstrainedStreamingCall(srv BenchmarkService_UnconstrainedStreamingCallServer) error { return status.Errorf(codes.Unimplemented, "method UnconstrainedStreamingCall not implemented") } func RegisterBenchmarkServiceServer(s *grpc.Server, srv BenchmarkServiceServer) { s.RegisterService(&_BenchmarkService_serviceDesc, srv) } func _BenchmarkService_UnaryCall_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(SimpleRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(BenchmarkServiceServer).UnaryCall(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/grpc.testing.BenchmarkService/UnaryCall", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(BenchmarkServiceServer).UnaryCall(ctx, req.(*SimpleRequest)) } return interceptor(ctx, in, info, handler) } func _BenchmarkService_StreamingCall_Handler(srv interface{}, stream grpc.ServerStream) error { return srv.(BenchmarkServiceServer).StreamingCall(&benchmarkServiceStreamingCallServer{stream}) } type BenchmarkService_StreamingCallServer interface { Send(*SimpleResponse) error Recv() (*SimpleRequest, error) grpc.ServerStream } type benchmarkServiceStreamingCallServer struct { grpc.ServerStream } func (x *benchmarkServiceStreamingCallServer) Send(m *SimpleResponse) error { return x.ServerStream.SendMsg(m) } func (x *benchmarkServiceStreamingCallServer) Recv() (*SimpleRequest, error) { m := new(SimpleRequest) if err := x.ServerStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func _BenchmarkService_UnconstrainedStreamingCall_Handler(srv interface{}, stream grpc.ServerStream) error { return srv.(BenchmarkServiceServer).UnconstrainedStreamingCall(&benchmarkServiceUnconstrainedStreamingCallServer{stream}) } type BenchmarkService_UnconstrainedStreamingCallServer interface { Send(*SimpleResponse) error Recv() (*SimpleRequest, error) grpc.ServerStream } type benchmarkServiceUnconstrainedStreamingCallServer struct { grpc.ServerStream } func (x *benchmarkServiceUnconstrainedStreamingCallServer) Send(m *SimpleResponse) error { return x.ServerStream.SendMsg(m) } func (x *benchmarkServiceUnconstrainedStreamingCallServer) Recv() (*SimpleRequest, error) { m := new(SimpleRequest) if err := x.ServerStream.RecvMsg(m); err != nil { return nil, err } return m, nil } var _BenchmarkService_serviceDesc = grpc.ServiceDesc{ ServiceName: "grpc.testing.BenchmarkService", HandlerType: (*BenchmarkServiceServer)(nil), Methods: []grpc.MethodDesc{ { MethodName: "UnaryCall", Handler: _BenchmarkService_UnaryCall_Handler, }, }, Streams: []grpc.StreamDesc{ { StreamName: "StreamingCall", Handler: _BenchmarkService_StreamingCall_Handler, ServerStreams: true, ClientStreams: true, }, { StreamName: "UnconstrainedStreamingCall", Handler: _BenchmarkService_UnconstrainedStreamingCall_Handler, ServerStreams: true, ClientStreams: true, }, }, Metadata: "services.proto", } // WorkerServiceClient is the client API for WorkerService service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type WorkerServiceClient interface { // Start server with specified workload. // First request sent specifies the ServerConfig followed by ServerStatus // response. After that, a "Mark" can be sent anytime to request the latest // stats. Closing the stream will initiate shutdown of the test server // and once the shutdown has finished, the OK status is sent to terminate // this RPC. RunServer(ctx context.Context, opts ...grpc.CallOption) (WorkerService_RunServerClient, error) // Start client with specified workload. // First request sent specifies the ClientConfig followed by ClientStatus // response. After that, a "Mark" can be sent anytime to request the latest // stats. Closing the stream will initiate shutdown of the test client // and once the shutdown has finished, the OK status is sent to terminate // this RPC. RunClient(ctx context.Context, opts ...grpc.CallOption) (WorkerService_RunClientClient, error) // Just return the core count - unary call CoreCount(ctx context.Context, in *CoreRequest, opts ...grpc.CallOption) (*CoreResponse, error) // Quit this worker QuitWorker(ctx context.Context, in *Void, opts ...grpc.CallOption) (*Void, error) } type workerServiceClient struct { cc grpc.ClientConnInterface } func NewWorkerServiceClient(cc grpc.ClientConnInterface) WorkerServiceClient { return &workerServiceClient{cc} } func (c *workerServiceClient) RunServer(ctx context.Context, opts ...grpc.CallOption) (WorkerService_RunServerClient, error) { stream, err := c.cc.NewStream(ctx, &_WorkerService_serviceDesc.Streams[0], "/grpc.testing.WorkerService/RunServer", opts...) if err != nil { return nil, err } x := &workerServiceRunServerClient{stream} return x, nil } type WorkerService_RunServerClient interface { Send(*ServerArgs) error Recv() (*ServerStatus, error) grpc.ClientStream } type workerServiceRunServerClient struct { grpc.ClientStream } func (x *workerServiceRunServerClient) Send(m *ServerArgs) error { return x.ClientStream.SendMsg(m) } func (x *workerServiceRunServerClient) Recv() (*ServerStatus, error) { m := new(ServerStatus) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func (c *workerServiceClient) RunClient(ctx context.Context, opts ...grpc.CallOption) (WorkerService_RunClientClient, error) { stream, err := c.cc.NewStream(ctx, &_WorkerService_serviceDesc.Streams[1], "/grpc.testing.WorkerService/RunClient", opts...) if err != nil { return nil, err } x := &workerServiceRunClientClient{stream} return x, nil } type WorkerService_RunClientClient interface { Send(*ClientArgs) error Recv() (*ClientStatus, error) grpc.ClientStream } type workerServiceRunClientClient struct { grpc.ClientStream } func (x *workerServiceRunClientClient) Send(m *ClientArgs) error { return x.ClientStream.SendMsg(m) } func (x *workerServiceRunClientClient) Recv() (*ClientStatus, error) { m := new(ClientStatus) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func (c *workerServiceClient) CoreCount(ctx context.Context, in *CoreRequest, opts ...grpc.CallOption) (*CoreResponse, error) { out := new(CoreResponse) err := c.cc.Invoke(ctx, "/grpc.testing.WorkerService/CoreCount", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *workerServiceClient) QuitWorker(ctx context.Context, in *Void, opts ...grpc.CallOption) (*Void, error) { out := new(Void) err := c.cc.Invoke(ctx, "/grpc.testing.WorkerService/QuitWorker", in, out, opts...) if err != nil { return nil, err } return out, nil } // WorkerServiceServer is the server API for WorkerService service. type WorkerServiceServer interface { // Start server with specified workload. // First request sent specifies the ServerConfig followed by ServerStatus // response. After that, a "Mark" can be sent anytime to request the latest // stats. Closing the stream will initiate shutdown of the test server // and once the shutdown has finished, the OK status is sent to terminate // this RPC. RunServer(WorkerService_RunServerServer) error // Start client with specified workload. // First request sent specifies the ClientConfig followed by ClientStatus // response. After that, a "Mark" can be sent anytime to request the latest // stats. Closing the stream will initiate shutdown of the test client // and once the shutdown has finished, the OK status is sent to terminate // this RPC. RunClient(WorkerService_RunClientServer) error // Just return the core count - unary call CoreCount(context.Context, *CoreRequest) (*CoreResponse, error) // Quit this worker QuitWorker(context.Context, *Void) (*Void, error) } // UnimplementedWorkerServiceServer can be embedded to have forward compatible implementations. type UnimplementedWorkerServiceServer struct { } func (*UnimplementedWorkerServiceServer) RunServer(srv WorkerService_RunServerServer) error { return status.Errorf(codes.Unimplemented, "method RunServer not implemented") } func (*UnimplementedWorkerServiceServer) RunClient(srv WorkerService_RunClientServer) error { return status.Errorf(codes.Unimplemented, "method RunClient not implemented") } func (*UnimplementedWorkerServiceServer) CoreCount(ctx context.Context, req *CoreRequest) (*CoreResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method CoreCount not implemented") } func (*UnimplementedWorkerServiceServer) QuitWorker(ctx context.Context, req *Void) (*Void, error) { return nil, status.Errorf(codes.Unimplemented, "method QuitWorker not implemented") } func RegisterWorkerServiceServer(s *grpc.Server, srv WorkerServiceServer) { s.RegisterService(&_WorkerService_serviceDesc, srv) } func _WorkerService_RunServer_Handler(srv interface{}, stream grpc.ServerStream) error { return srv.(WorkerServiceServer).RunServer(&workerServiceRunServerServer{stream}) } type WorkerService_RunServerServer interface { Send(*ServerStatus) error Recv() (*ServerArgs, error) grpc.ServerStream } type workerServiceRunServerServer struct { grpc.ServerStream } func (x *workerServiceRunServerServer) Send(m *ServerStatus) error { return x.ServerStream.SendMsg(m) } func (x *workerServiceRunServerServer) Recv() (*ServerArgs, error) { m := new(ServerArgs) if err := x.ServerStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func _WorkerService_RunClient_Handler(srv interface{}, stream grpc.ServerStream) error { return srv.(WorkerServiceServer).RunClient(&workerServiceRunClientServer{stream}) } type WorkerService_RunClientServer interface { Send(*ClientStatus) error Recv() (*ClientArgs, error) grpc.ServerStream } type workerServiceRunClientServer struct { grpc.ServerStream } func (x *workerServiceRunClientServer) Send(m *ClientStatus) error { return x.ServerStream.SendMsg(m) } func (x *workerServiceRunClientServer) Recv() (*ClientArgs, error) { m := new(ClientArgs) if err := x.ServerStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func _WorkerService_CoreCount_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(CoreRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(WorkerServiceServer).CoreCount(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/grpc.testing.WorkerService/CoreCount", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(WorkerServiceServer).CoreCount(ctx, req.(*CoreRequest)) } return interceptor(ctx, in, info, handler) } func _WorkerService_QuitWorker_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(Void) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(WorkerServiceServer).QuitWorker(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/grpc.testing.WorkerService/QuitWorker", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(WorkerServiceServer).QuitWorker(ctx, req.(*Void)) } return interceptor(ctx, in, info, handler) } var _WorkerService_serviceDesc = grpc.ServiceDesc{ ServiceName: "grpc.testing.WorkerService", HandlerType: (*WorkerServiceServer)(nil), Methods: []grpc.MethodDesc{ { MethodName: "CoreCount", Handler: _WorkerService_CoreCount_Handler, }, { MethodName: "QuitWorker", Handler: _WorkerService_QuitWorker_Handler, }, }, Streams: []grpc.StreamDesc{ { StreamName: "RunServer", Handler: _WorkerService_RunServer_Handler, ServerStreams: true, ClientStreams: true, }, { StreamName: "RunClient", Handler: _WorkerService_RunClient_Handler, ServerStreams: true, ClientStreams: true, }, }, Metadata: "services.proto", } grpc-go-1.29.1/benchmark/grpc_testing/services.proto000066400000000000000000000045221365033716300225060ustar00rootroot00000000000000// Copyright 2016 gRPC authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // An integration test service that covers all the method signature permutations // of unary/streaming requests/responses. syntax = "proto3"; import "messages.proto"; import "control.proto"; package grpc.testing; service BenchmarkService { // One request followed by one response. // The server returns the client payload as-is. rpc UnaryCall(SimpleRequest) returns (SimpleResponse); // One request followed by one response. // The server returns the client payload as-is. rpc StreamingCall(stream SimpleRequest) returns (stream SimpleResponse); // Unconstrainted streaming. // Both server and client keep sending & receiving simultaneously. rpc UnconstrainedStreamingCall(stream SimpleRequest) returns (stream SimpleResponse); } service WorkerService { // Start server with specified workload. // First request sent specifies the ServerConfig followed by ServerStatus // response. After that, a "Mark" can be sent anytime to request the latest // stats. Closing the stream will initiate shutdown of the test server // and once the shutdown has finished, the OK status is sent to terminate // this RPC. rpc RunServer(stream ServerArgs) returns (stream ServerStatus); // Start client with specified workload. // First request sent specifies the ClientConfig followed by ClientStatus // response. After that, a "Mark" can be sent anytime to request the latest // stats. Closing the stream will initiate shutdown of the test client // and once the shutdown has finished, the OK status is sent to terminate // this RPC. rpc RunClient(stream ClientArgs) returns (stream ClientStatus); // Just return the core count - unary call rpc CoreCount(CoreRequest) returns (CoreResponse); // Quit this worker rpc QuitWorker(Void) returns (Void); } grpc-go-1.29.1/benchmark/grpc_testing/stats.pb.go000066400000000000000000000255031365033716300216650ustar00rootroot00000000000000// Code generated by protoc-gen-go. DO NOT EDIT. // source: stats.proto package grpc_testing import ( fmt "fmt" proto "github.com/golang/protobuf/proto" math "math" ) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package type ServerStats struct { // wall clock time change in seconds since last reset TimeElapsed float64 `protobuf:"fixed64,1,opt,name=time_elapsed,json=timeElapsed,proto3" json:"time_elapsed,omitempty"` // change in user time (in seconds) used by the server since last reset TimeUser float64 `protobuf:"fixed64,2,opt,name=time_user,json=timeUser,proto3" json:"time_user,omitempty"` // change in server time (in seconds) used by the server process and all // threads since last reset TimeSystem float64 `protobuf:"fixed64,3,opt,name=time_system,json=timeSystem,proto3" json:"time_system,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *ServerStats) Reset() { *m = ServerStats{} } func (m *ServerStats) String() string { return proto.CompactTextString(m) } func (*ServerStats) ProtoMessage() {} func (*ServerStats) Descriptor() ([]byte, []int) { return fileDescriptor_b4756a0aec8b9d44, []int{0} } func (m *ServerStats) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ServerStats.Unmarshal(m, b) } func (m *ServerStats) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ServerStats.Marshal(b, m, deterministic) } func (m *ServerStats) XXX_Merge(src proto.Message) { xxx_messageInfo_ServerStats.Merge(m, src) } func (m *ServerStats) XXX_Size() int { return xxx_messageInfo_ServerStats.Size(m) } func (m *ServerStats) XXX_DiscardUnknown() { xxx_messageInfo_ServerStats.DiscardUnknown(m) } var xxx_messageInfo_ServerStats proto.InternalMessageInfo func (m *ServerStats) GetTimeElapsed() float64 { if m != nil { return m.TimeElapsed } return 0 } func (m *ServerStats) GetTimeUser() float64 { if m != nil { return m.TimeUser } return 0 } func (m *ServerStats) GetTimeSystem() float64 { if m != nil { return m.TimeSystem } return 0 } // Histogram params based on grpc/support/histogram.c type HistogramParams struct { Resolution float64 `protobuf:"fixed64,1,opt,name=resolution,proto3" json:"resolution,omitempty"` MaxPossible float64 `protobuf:"fixed64,2,opt,name=max_possible,json=maxPossible,proto3" json:"max_possible,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *HistogramParams) Reset() { *m = HistogramParams{} } func (m *HistogramParams) String() string { return proto.CompactTextString(m) } func (*HistogramParams) ProtoMessage() {} func (*HistogramParams) Descriptor() ([]byte, []int) { return fileDescriptor_b4756a0aec8b9d44, []int{1} } func (m *HistogramParams) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_HistogramParams.Unmarshal(m, b) } func (m *HistogramParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_HistogramParams.Marshal(b, m, deterministic) } func (m *HistogramParams) XXX_Merge(src proto.Message) { xxx_messageInfo_HistogramParams.Merge(m, src) } func (m *HistogramParams) XXX_Size() int { return xxx_messageInfo_HistogramParams.Size(m) } func (m *HistogramParams) XXX_DiscardUnknown() { xxx_messageInfo_HistogramParams.DiscardUnknown(m) } var xxx_messageInfo_HistogramParams proto.InternalMessageInfo func (m *HistogramParams) GetResolution() float64 { if m != nil { return m.Resolution } return 0 } func (m *HistogramParams) GetMaxPossible() float64 { if m != nil { return m.MaxPossible } return 0 } // Histogram data based on grpc/support/histogram.c type HistogramData struct { Bucket []uint32 `protobuf:"varint,1,rep,packed,name=bucket,proto3" json:"bucket,omitempty"` MinSeen float64 `protobuf:"fixed64,2,opt,name=min_seen,json=minSeen,proto3" json:"min_seen,omitempty"` MaxSeen float64 `protobuf:"fixed64,3,opt,name=max_seen,json=maxSeen,proto3" json:"max_seen,omitempty"` Sum float64 `protobuf:"fixed64,4,opt,name=sum,proto3" json:"sum,omitempty"` SumOfSquares float64 `protobuf:"fixed64,5,opt,name=sum_of_squares,json=sumOfSquares,proto3" json:"sum_of_squares,omitempty"` Count float64 `protobuf:"fixed64,6,opt,name=count,proto3" json:"count,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *HistogramData) Reset() { *m = HistogramData{} } func (m *HistogramData) String() string { return proto.CompactTextString(m) } func (*HistogramData) ProtoMessage() {} func (*HistogramData) Descriptor() ([]byte, []int) { return fileDescriptor_b4756a0aec8b9d44, []int{2} } func (m *HistogramData) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_HistogramData.Unmarshal(m, b) } func (m *HistogramData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_HistogramData.Marshal(b, m, deterministic) } func (m *HistogramData) XXX_Merge(src proto.Message) { xxx_messageInfo_HistogramData.Merge(m, src) } func (m *HistogramData) XXX_Size() int { return xxx_messageInfo_HistogramData.Size(m) } func (m *HistogramData) XXX_DiscardUnknown() { xxx_messageInfo_HistogramData.DiscardUnknown(m) } var xxx_messageInfo_HistogramData proto.InternalMessageInfo func (m *HistogramData) GetBucket() []uint32 { if m != nil { return m.Bucket } return nil } func (m *HistogramData) GetMinSeen() float64 { if m != nil { return m.MinSeen } return 0 } func (m *HistogramData) GetMaxSeen() float64 { if m != nil { return m.MaxSeen } return 0 } func (m *HistogramData) GetSum() float64 { if m != nil { return m.Sum } return 0 } func (m *HistogramData) GetSumOfSquares() float64 { if m != nil { return m.SumOfSquares } return 0 } func (m *HistogramData) GetCount() float64 { if m != nil { return m.Count } return 0 } type ClientStats struct { // Latency histogram. Data points are in nanoseconds. Latencies *HistogramData `protobuf:"bytes,1,opt,name=latencies,proto3" json:"latencies,omitempty"` // See ServerStats for details. TimeElapsed float64 `protobuf:"fixed64,2,opt,name=time_elapsed,json=timeElapsed,proto3" json:"time_elapsed,omitempty"` TimeUser float64 `protobuf:"fixed64,3,opt,name=time_user,json=timeUser,proto3" json:"time_user,omitempty"` TimeSystem float64 `protobuf:"fixed64,4,opt,name=time_system,json=timeSystem,proto3" json:"time_system,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *ClientStats) Reset() { *m = ClientStats{} } func (m *ClientStats) String() string { return proto.CompactTextString(m) } func (*ClientStats) ProtoMessage() {} func (*ClientStats) Descriptor() ([]byte, []int) { return fileDescriptor_b4756a0aec8b9d44, []int{3} } func (m *ClientStats) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ClientStats.Unmarshal(m, b) } func (m *ClientStats) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ClientStats.Marshal(b, m, deterministic) } func (m *ClientStats) XXX_Merge(src proto.Message) { xxx_messageInfo_ClientStats.Merge(m, src) } func (m *ClientStats) XXX_Size() int { return xxx_messageInfo_ClientStats.Size(m) } func (m *ClientStats) XXX_DiscardUnknown() { xxx_messageInfo_ClientStats.DiscardUnknown(m) } var xxx_messageInfo_ClientStats proto.InternalMessageInfo func (m *ClientStats) GetLatencies() *HistogramData { if m != nil { return m.Latencies } return nil } func (m *ClientStats) GetTimeElapsed() float64 { if m != nil { return m.TimeElapsed } return 0 } func (m *ClientStats) GetTimeUser() float64 { if m != nil { return m.TimeUser } return 0 } func (m *ClientStats) GetTimeSystem() float64 { if m != nil { return m.TimeSystem } return 0 } func init() { proto.RegisterType((*ServerStats)(nil), "grpc.testing.ServerStats") proto.RegisterType((*HistogramParams)(nil), "grpc.testing.HistogramParams") proto.RegisterType((*HistogramData)(nil), "grpc.testing.HistogramData") proto.RegisterType((*ClientStats)(nil), "grpc.testing.ClientStats") } func init() { proto.RegisterFile("stats.proto", fileDescriptor_b4756a0aec8b9d44) } var fileDescriptor_b4756a0aec8b9d44 = []byte{ // 341 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x92, 0xc1, 0x4a, 0xeb, 0x40, 0x14, 0x86, 0x49, 0xd3, 0xf6, 0xb6, 0x27, 0xed, 0xbd, 0x97, 0x41, 0x24, 0x52, 0xd0, 0x1a, 0x5c, 0x74, 0x95, 0x85, 0xae, 0x5c, 0xab, 0xe0, 0xce, 0xd2, 0xe8, 0x3a, 0x4c, 0xe3, 0x69, 0x19, 0xcc, 0xcc, 0xc4, 0x39, 0x33, 0x12, 0x1f, 0x49, 0x7c, 0x49, 0xc9, 0x24, 0x68, 0x55, 0xd0, 0x5d, 0xe6, 0xfb, 0x7e, 0xe6, 0xe4, 0xe4, 0x0f, 0x44, 0x64, 0xb9, 0xa5, 0xb4, 0x32, 0xda, 0x6a, 0x36, 0xd9, 0x9a, 0xaa, 0x48, 0x2d, 0x92, 0x15, 0x6a, 0x9b, 0x28, 0x88, 0x32, 0x34, 0x4f, 0x68, 0xb2, 0x26, 0xc2, 0x8e, 0x61, 0x62, 0x85, 0xc4, 0x1c, 0x4b, 0x5e, 0x11, 0xde, 0xc7, 0xc1, 0x3c, 0x58, 0x04, 0xab, 0xa8, 0x61, 0x57, 0x2d, 0x62, 0x33, 0x18, 0xfb, 0x88, 0x23, 0x34, 0x71, 0xcf, 0xfb, 0x51, 0x03, 0xee, 0x08, 0x0d, 0x3b, 0x02, 0x9f, 0xcd, 0xe9, 0x99, 0x2c, 0xca, 0x38, 0xf4, 0x1a, 0x1a, 0x94, 0x79, 0x92, 0xdc, 0xc2, 0xbf, 0x6b, 0x41, 0x56, 0x6f, 0x0d, 0x97, 0x4b, 0x6e, 0xb8, 0x24, 0x76, 0x08, 0x60, 0x90, 0x74, 0xe9, 0xac, 0xd0, 0xaa, 0x9b, 0xb8, 0x43, 0x9a, 0x77, 0x92, 0xbc, 0xce, 0x2b, 0x4d, 0x24, 0xd6, 0x25, 0x76, 0x33, 0x23, 0xc9, 0xeb, 0x65, 0x87, 0x92, 0xd7, 0x00, 0xa6, 0xef, 0xd7, 0x5e, 0x72, 0xcb, 0xd9, 0x3e, 0x0c, 0xd7, 0xae, 0x78, 0x40, 0x1b, 0x07, 0xf3, 0x70, 0x31, 0x5d, 0x75, 0x27, 0x76, 0x00, 0x23, 0x29, 0x54, 0x4e, 0x88, 0xaa, 0xbb, 0xe8, 0x8f, 0x14, 0x2a, 0x43, 0x54, 0x5e, 0xf1, 0xba, 0x55, 0x61, 0xa7, 0x78, 0xed, 0xd5, 0x7f, 0x08, 0xc9, 0xc9, 0xb8, 0xef, 0x69, 0xf3, 0xc8, 0x4e, 0xe0, 0x2f, 0x39, 0x99, 0xeb, 0x4d, 0x4e, 0x8f, 0x8e, 0x1b, 0xa4, 0x78, 0xe0, 0xe5, 0x84, 0x9c, 0xbc, 0xd9, 0x64, 0x2d, 0x63, 0x7b, 0x30, 0x28, 0xb4, 0x53, 0x36, 0x1e, 0x7a, 0xd9, 0x1e, 0x92, 0x97, 0x00, 0xa2, 0x8b, 0x52, 0xa0, 0xb2, 0xed, 0x47, 0x3f, 0x87, 0x71, 0xc9, 0x2d, 0xaa, 0x42, 0x20, 0xf9, 0xfd, 0xa3, 0xd3, 0x59, 0xba, 0xdb, 0x52, 0xfa, 0x69, 0xb7, 0xd5, 0x47, 0xfa, 0x5b, 0x5f, 0xbd, 0x5f, 0xfa, 0x0a, 0x7f, 0xee, 0xab, 0xff, 0xb5, 0xaf, 0xf5, 0xd0, 0xff, 0x34, 0x67, 0x6f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xea, 0x75, 0x34, 0x90, 0x43, 0x02, 0x00, 0x00, } grpc-go-1.29.1/benchmark/grpc_testing/stats.proto000066400000000000000000000031461365033716300220220ustar00rootroot00000000000000// Copyright 2016 gRPC authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. syntax = "proto3"; package grpc.testing; message ServerStats { // wall clock time change in seconds since last reset double time_elapsed = 1; // change in user time (in seconds) used by the server since last reset double time_user = 2; // change in server time (in seconds) used by the server process and all // threads since last reset double time_system = 3; } // Histogram params based on grpc/support/histogram.c message HistogramParams { double resolution = 1; // first bucket is [0, 1 + resolution) double max_possible = 2; // use enough buckets to allow this value } // Histogram data based on grpc/support/histogram.c message HistogramData { repeated uint32 bucket = 1; double min_seen = 2; double max_seen = 3; double sum = 4; double sum_of_squares = 5; double count = 6; } message ClientStats { // Latency histogram. Data points are in nanoseconds. HistogramData latencies = 1; // See ServerStats for details. double time_elapsed = 2; double time_user = 3; double time_system = 4; } grpc-go-1.29.1/benchmark/latency/000077500000000000000000000000001365033716300165425ustar00rootroot00000000000000grpc-go-1.29.1/benchmark/latency/latency.go000066400000000000000000000224771365033716300205440ustar00rootroot00000000000000/* * * Copyright 2017 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Package latency provides wrappers for net.Conn, net.Listener, and // net.Dialers, designed to interoperate to inject real-world latency into // network connections. package latency import ( "bytes" "context" "encoding/binary" "fmt" "io" "net" "time" ) // Dialer is a function matching the signature of net.Dial. type Dialer func(network, address string) (net.Conn, error) // TimeoutDialer is a function matching the signature of net.DialTimeout. type TimeoutDialer func(network, address string, timeout time.Duration) (net.Conn, error) // ContextDialer is a function matching the signature of // net.Dialer.DialContext. type ContextDialer func(ctx context.Context, network, address string) (net.Conn, error) // Network represents a network with the given bandwidth, latency, and MTU // (Maximum Transmission Unit) configuration, and can produce wrappers of // net.Listeners, net.Conn, and various forms of dialing functions. The // Listeners and Dialers/Conns on both sides of connections must come from this // package, but need not be created from the same Network. Latency is computed // when sending (in Write), and is injected when receiving (in Read). This // allows senders' Write calls to be non-blocking, as in real-world // applications. // // Note: Latency is injected by the sender specifying the absolute time data // should be available, and the reader delaying until that time arrives to // provide the data. This package attempts to counter-act the effects of clock // drift and existing network latency by measuring the delay between the // sender's transmission time and the receiver's reception time during startup. // No attempt is made to measure the existing bandwidth of the connection. type Network struct { Kbps int // Kilobits per second; if non-positive, infinite Latency time.Duration // One-way latency (sending); if non-positive, no delay MTU int // Bytes per packet; if non-positive, infinite } var ( //Local simulates local network. Local = Network{0, 0, 0} //LAN simulates local area network network. LAN = Network{100 * 1024, 2 * time.Millisecond, 1500} //WAN simulates wide area network. WAN = Network{20 * 1024, 30 * time.Millisecond, 1500} //Longhaul simulates bad network. Longhaul = Network{1000 * 1024, 200 * time.Millisecond, 9000} ) // Conn returns a net.Conn that wraps c and injects n's latency into that // connection. This function also imposes latency for connection creation. // If n's Latency is lower than the measured latency in c, an error is // returned. func (n *Network) Conn(c net.Conn) (net.Conn, error) { start := now() nc := &conn{Conn: c, network: n, readBuf: new(bytes.Buffer)} if err := nc.sync(); err != nil { return nil, err } sleep(start.Add(nc.delay).Sub(now())) return nc, nil } type conn struct { net.Conn network *Network readBuf *bytes.Buffer // one packet worth of data received lastSendEnd time.Time // time the previous Write should be fully on the wire delay time.Duration // desired latency - measured latency } // header is sent before all data transmitted by the application. type header struct { ReadTime int64 // Time the reader is allowed to read this packet (UnixNano) Sz int32 // Size of the data in the packet } func (c *conn) Write(p []byte) (n int, err error) { tNow := now() if c.lastSendEnd.Before(tNow) { c.lastSendEnd = tNow } for len(p) > 0 { pkt := p if c.network.MTU > 0 && len(pkt) > c.network.MTU { pkt = pkt[:c.network.MTU] p = p[c.network.MTU:] } else { p = nil } if c.network.Kbps > 0 { if congestion := c.lastSendEnd.Sub(tNow) - c.delay; congestion > 0 { // The network is full; sleep until this packet can be sent. sleep(congestion) tNow = tNow.Add(congestion) } } c.lastSendEnd = c.lastSendEnd.Add(c.network.pktTime(len(pkt))) hdr := header{ReadTime: c.lastSendEnd.Add(c.delay).UnixNano(), Sz: int32(len(pkt))} if err := binary.Write(c.Conn, binary.BigEndian, hdr); err != nil { return n, err } x, err := c.Conn.Write(pkt) n += x if err != nil { return n, err } } return n, nil } func (c *conn) Read(p []byte) (n int, err error) { if c.readBuf.Len() == 0 { var hdr header if err := binary.Read(c.Conn, binary.BigEndian, &hdr); err != nil { return 0, err } defer func() { sleep(time.Unix(0, hdr.ReadTime).Sub(now())) }() if _, err := io.CopyN(c.readBuf, c.Conn, int64(hdr.Sz)); err != nil { return 0, err } } // Read from readBuf. return c.readBuf.Read(p) } // sync does a handshake and then measures the latency on the network in // coordination with the other side. func (c *conn) sync() error { const ( pingMsg = "syncPing" warmup = 10 // minimum number of iterations to measure latency giveUp = 50 // maximum number of iterations to measure latency accuracy = time.Millisecond // req'd accuracy to stop early goodRun = 3 // stop early if latency within accuracy this many times ) type syncMsg struct { SendT int64 // Time sent. If zero, stop. RecvT int64 // Time received. If zero, fill in and respond. } // A trivial handshake if err := binary.Write(c.Conn, binary.BigEndian, []byte(pingMsg)); err != nil { return err } var ping [8]byte if err := binary.Read(c.Conn, binary.BigEndian, &ping); err != nil { return err } else if string(ping[:]) != pingMsg { return fmt.Errorf("malformed handshake message: %v (want %q)", ping, pingMsg) } // Both sides are alive and syncing. Calculate network delay / clock skew. att := 0 good := 0 var latency time.Duration localDone, remoteDone := false, false send := true for !localDone || !remoteDone { if send { if err := binary.Write(c.Conn, binary.BigEndian, syncMsg{SendT: now().UnixNano()}); err != nil { return err } att++ send = false } // Block until we get a syncMsg m := syncMsg{} if err := binary.Read(c.Conn, binary.BigEndian, &m); err != nil { return err } if m.RecvT == 0 { // Message initiated from other side. if m.SendT == 0 { remoteDone = true continue } // Send response. m.RecvT = now().UnixNano() if err := binary.Write(c.Conn, binary.BigEndian, m); err != nil { return err } continue } lag := time.Duration(m.RecvT - m.SendT) latency += lag avgLatency := latency / time.Duration(att) if e := lag - avgLatency; e > -accuracy && e < accuracy { good++ } else { good = 0 } if att < giveUp && (att < warmup || good < goodRun) { send = true continue } localDone = true latency = avgLatency // Tell the other side we're done. if err := binary.Write(c.Conn, binary.BigEndian, syncMsg{}); err != nil { return err } } if c.network.Latency <= 0 { return nil } c.delay = c.network.Latency - latency if c.delay < 0 { return fmt.Errorf("measured network latency (%v) higher than desired latency (%v)", latency, c.network.Latency) } return nil } // Listener returns a net.Listener that wraps l and injects n's latency in its // connections. func (n *Network) Listener(l net.Listener) net.Listener { return &listener{Listener: l, network: n} } type listener struct { net.Listener network *Network } func (l *listener) Accept() (net.Conn, error) { c, err := l.Listener.Accept() if err != nil { return nil, err } return l.network.Conn(c) } // Dialer returns a Dialer that wraps d and injects n's latency in its // connections. n's Latency is also injected to the connection's creation. func (n *Network) Dialer(d Dialer) Dialer { return func(network, address string) (net.Conn, error) { conn, err := d(network, address) if err != nil { return nil, err } return n.Conn(conn) } } // TimeoutDialer returns a TimeoutDialer that wraps d and injects n's latency // in its connections. n's Latency is also injected to the connection's // creation. func (n *Network) TimeoutDialer(d TimeoutDialer) TimeoutDialer { return func(network, address string, timeout time.Duration) (net.Conn, error) { conn, err := d(network, address, timeout) if err != nil { return nil, err } return n.Conn(conn) } } // ContextDialer returns a ContextDialer that wraps d and injects n's latency // in its connections. n's Latency is also injected to the connection's // creation. func (n *Network) ContextDialer(d ContextDialer) ContextDialer { return func(ctx context.Context, network, address string) (net.Conn, error) { conn, err := d(ctx, network, address) if err != nil { return nil, err } return n.Conn(conn) } } // pktTime returns the time it takes to transmit one packet of data of size b // in bytes. func (n *Network) pktTime(b int) time.Duration { if n.Kbps <= 0 { return time.Duration(0) } return time.Duration(b) * time.Second / time.Duration(n.Kbps*(1024/8)) } // Wrappers for testing var now = time.Now var sleep = time.Sleep grpc-go-1.29.1/benchmark/latency/latency_test.go000066400000000000000000000247761365033716300216070ustar00rootroot00000000000000/* * * Copyright 2017 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package latency import ( "bytes" "fmt" "net" "reflect" "sync" "testing" "time" "google.golang.org/grpc/internal/grpctest" ) type s struct { grpctest.Tester } func Test(t *testing.T) { grpctest.RunSubTests(t, s{}) } // bufConn is a net.Conn implemented by a bytes.Buffer (which is a ReadWriter). type bufConn struct { *bytes.Buffer } func (bufConn) Close() error { panic("unimplemented") } func (bufConn) LocalAddr() net.Addr { panic("unimplemented") } func (bufConn) RemoteAddr() net.Addr { panic("unimplemented") } func (bufConn) SetDeadline(t time.Time) error { panic("unimplemneted") } func (bufConn) SetReadDeadline(t time.Time) error { panic("unimplemneted") } func (bufConn) SetWriteDeadline(t time.Time) error { panic("unimplemneted") } func restoreHooks() func() { s := sleep n := now return func() { sleep = s now = n } } func (s) TestConn(t *testing.T) { defer restoreHooks()() // Constant time. now = func() time.Time { return time.Unix(123, 456) } // Capture sleep times for checking later. var sleepTimes []time.Duration sleep = func(t time.Duration) { sleepTimes = append(sleepTimes, t) } wantSleeps := func(want ...time.Duration) { if !reflect.DeepEqual(want, sleepTimes) { t.Fatalf("sleepTimes = %v; want %v", sleepTimes, want) } sleepTimes = nil } // Use a fairly high latency to cause a large BDP and avoid sleeps while // writing due to simulation of full buffers. latency := 1 * time.Second c, err := (&Network{Kbps: 1, Latency: latency, MTU: 5}).Conn(bufConn{&bytes.Buffer{}}) if err != nil { t.Fatalf("Unexpected error creating connection: %v", err) } wantSleeps(latency) // Connection creation delay. // 1 kbps = 128 Bps. Divides evenly by 1 second using nanos. byteLatency := time.Duration(time.Second / 128) write := func(b []byte) { n, err := c.Write(b) if n != len(b) || err != nil { t.Fatalf("c.Write(%v) = %v, %v; want %v, nil", b, n, err, len(b)) } } write([]byte{1, 2, 3, 4, 5}) // One full packet pkt1Time := latency + byteLatency*5 write([]byte{6}) // One partial packet pkt2Time := pkt1Time + byteLatency write([]byte{7, 8, 9, 10, 11, 12, 13}) // Two packets pkt3Time := pkt2Time + byteLatency*5 pkt4Time := pkt3Time + byteLatency*2 // No reads, so no sleeps yet. wantSleeps() read := func(n int, want []byte) { b := make([]byte, n) if rd, err := c.Read(b); err != nil || rd != len(want) { t.Fatalf("c.Read(<%v bytes>) = %v, %v; want %v, nil", n, rd, err, len(want)) } if !reflect.DeepEqual(b[:len(want)], want) { t.Fatalf("read %v; want %v", b, want) } } read(1, []byte{1}) wantSleeps(pkt1Time) read(1, []byte{2}) wantSleeps() read(3, []byte{3, 4, 5}) wantSleeps() read(2, []byte{6}) wantSleeps(pkt2Time) read(2, []byte{7, 8}) wantSleeps(pkt3Time) read(10, []byte{9, 10, 11}) wantSleeps() read(10, []byte{12, 13}) wantSleeps(pkt4Time) } func (s) TestSync(t *testing.T) { defer restoreHooks()() // Infinitely fast CPU: time doesn't pass unless sleep is called. tn := time.Unix(123, 0) now = func() time.Time { return tn } sleep = func(d time.Duration) { tn = tn.Add(d) } // Simulate a 20ms latency network, then run sync across that and expect to // measure 20ms latency, or 10ms additional delay for a 30ms network. slowConn, err := (&Network{Kbps: 0, Latency: 20 * time.Millisecond, MTU: 5}).Conn(bufConn{&bytes.Buffer{}}) if err != nil { t.Fatalf("Unexpected error creating connection: %v", err) } c, err := (&Network{Latency: 30 * time.Millisecond}).Conn(slowConn) if err != nil { t.Fatalf("Unexpected error creating connection: %v", err) } if c.(*conn).delay != 10*time.Millisecond { t.Fatalf("c.delay = %v; want 10ms", c.(*conn).delay) } } func (s) TestSyncTooSlow(t *testing.T) { defer restoreHooks()() // Infinitely fast CPU: time doesn't pass unless sleep is called. tn := time.Unix(123, 0) now = func() time.Time { return tn } sleep = func(d time.Duration) { tn = tn.Add(d) } // Simulate a 10ms latency network, then attempt to simulate a 5ms latency // network and expect an error. slowConn, err := (&Network{Kbps: 0, Latency: 10 * time.Millisecond, MTU: 5}).Conn(bufConn{&bytes.Buffer{}}) if err != nil { t.Fatalf("Unexpected error creating connection: %v", err) } errWant := "measured network latency (10ms) higher than desired latency (5ms)" if _, err := (&Network{Latency: 5 * time.Millisecond}).Conn(slowConn); err == nil || err.Error() != errWant { t.Fatalf("Conn() = _, %q; want _, %q", err, errWant) } } func (s) TestListenerAndDialer(t *testing.T) { defer restoreHooks()() tn := time.Unix(123, 0) startTime := tn mu := &sync.Mutex{} now = func() time.Time { mu.Lock() defer mu.Unlock() return tn } // Use a fairly high latency to cause a large BDP and avoid sleeps while // writing due to simulation of full buffers. n := &Network{Kbps: 2, Latency: 1 * time.Second, MTU: 10} // 2 kbps = .25 kBps = 256 Bps byteLatency := func(n int) time.Duration { return time.Duration(n) * time.Second / 256 } // Create a real listener and wrap it. l, err := net.Listen("tcp", "localhost:0") if err != nil { t.Fatalf("Unexpected error creating listener: %v", err) } defer l.Close() l = n.Listener(l) var serverConn net.Conn var scErr error scDone := make(chan struct{}) go func() { serverConn, scErr = l.Accept() close(scDone) }() // Create a dialer and use it. clientConn, err := n.TimeoutDialer(net.DialTimeout)("tcp", l.Addr().String(), 2*time.Second) if err != nil { t.Fatalf("Unexpected error dialing: %v", err) } defer clientConn.Close() // Block until server's Conn is available. <-scDone if scErr != nil { t.Fatalf("Unexpected error listening: %v", scErr) } defer serverConn.Close() // sleep (only) advances tn. Done after connections established so sync detects zero delay. sleep = func(d time.Duration) { mu.Lock() defer mu.Unlock() if d > 0 { tn = tn.Add(d) } } seq := func(a, b int) []byte { buf := make([]byte, b-a) for i := 0; i < b-a; i++ { buf[i] = byte(i + a) } return buf } pkt1 := seq(0, 10) pkt2 := seq(10, 30) pkt3 := seq(30, 35) write := func(c net.Conn, b []byte) { n, err := c.Write(b) if n != len(b) || err != nil { t.Fatalf("c.Write(%v) = %v, %v; want %v, nil", b, n, err, len(b)) } } write(serverConn, pkt1) write(serverConn, pkt2) write(serverConn, pkt3) write(clientConn, pkt3) write(clientConn, pkt1) write(clientConn, pkt2) if tn != startTime { t.Fatalf("unexpected sleep in write; tn = %v; want %v", tn, startTime) } read := func(c net.Conn, n int, want []byte, timeWant time.Time) { b := make([]byte, n) if rd, err := c.Read(b); err != nil || rd != len(want) { t.Fatalf("c.Read(<%v bytes>) = %v, %v; want %v, nil (read: %v)", n, rd, err, len(want), b[:rd]) } if !reflect.DeepEqual(b[:len(want)], want) { t.Fatalf("read %v; want %v", b, want) } if !tn.Equal(timeWant) { t.Errorf("tn after read(%v) = %v; want %v", want, tn, timeWant) } } read(clientConn, len(pkt1)+1, pkt1, startTime.Add(n.Latency+byteLatency(len(pkt1)))) read(serverConn, len(pkt3)+1, pkt3, tn) // tn was advanced by the above read; pkt3 is shorter than pkt1 read(clientConn, len(pkt2), pkt2[:10], startTime.Add(n.Latency+byteLatency(len(pkt1)+10))) read(clientConn, len(pkt2), pkt2[10:], startTime.Add(n.Latency+byteLatency(len(pkt1)+len(pkt2)))) read(clientConn, len(pkt3), pkt3, startTime.Add(n.Latency+byteLatency(len(pkt1)+len(pkt2)+len(pkt3)))) read(serverConn, len(pkt1), pkt1, tn) // tn already past the arrival time due to prior reads read(serverConn, len(pkt2), pkt2[:10], tn) read(serverConn, len(pkt2), pkt2[10:], tn) // Sleep awhile and make sure the read happens disregarding previous writes // (lastSendEnd handling). sleep(10 * time.Second) write(clientConn, pkt1) read(serverConn, len(pkt1), pkt1, tn.Add(n.Latency+byteLatency(len(pkt1)))) // Send, sleep longer than the network delay, then make sure the read happens // instantly. write(serverConn, pkt1) sleep(10 * time.Second) read(clientConn, len(pkt1), pkt1, tn) } func (s) TestBufferBloat(t *testing.T) { defer restoreHooks()() // Infinitely fast CPU: time doesn't pass unless sleep is called. tn := time.Unix(123, 0) now = func() time.Time { return tn } // Capture sleep times for checking later. var sleepTimes []time.Duration sleep = func(d time.Duration) { sleepTimes = append(sleepTimes, d) tn = tn.Add(d) } wantSleeps := func(want ...time.Duration) error { if !reflect.DeepEqual(want, sleepTimes) { return fmt.Errorf("sleepTimes = %v; want %v", sleepTimes, want) } sleepTimes = nil return nil } n := &Network{Kbps: 8 /* 1KBps */, Latency: time.Second, MTU: 8} bdpBytes := (n.Kbps * 1024 / 8) * int(n.Latency/time.Second) // 1024 c, err := n.Conn(bufConn{&bytes.Buffer{}}) if err != nil { t.Fatalf("Unexpected error creating connection: %v", err) } wantSleeps(n.Latency) // Connection creation delay. write := func(n int, sleeps ...time.Duration) { if wt, err := c.Write(make([]byte, n)); err != nil || wt != n { t.Fatalf("c.Write(<%v bytes>) = %v, %v; want %v, nil", n, wt, err, n) } if err := wantSleeps(sleeps...); err != nil { t.Fatalf("After writing %v bytes: %v", n, err) } } read := func(n int, sleeps ...time.Duration) { if rd, err := c.Read(make([]byte, n)); err != nil || rd != n { t.Fatalf("c.Read(_) = %v, %v; want %v, nil", rd, err, n) } if err := wantSleeps(sleeps...); err != nil { t.Fatalf("After reading %v bytes: %v", n, err) } } write(8) // No reads and buffer not full, so no sleeps yet. read(8, time.Second+n.pktTime(8)) write(bdpBytes) // Fill the buffer. write(1) // We can send one extra packet even when the buffer is full. write(n.MTU, n.pktTime(1)) // Make sure we sleep to clear the previous write. write(1, n.pktTime(n.MTU)) write(n.MTU+1, n.pktTime(1), n.pktTime(n.MTU)) tn = tn.Add(10 * time.Second) // Wait long enough for the buffer to clear. write(bdpBytes) // No sleeps required. } grpc-go-1.29.1/benchmark/primitives/000077500000000000000000000000001365033716300172765ustar00rootroot00000000000000grpc-go-1.29.1/benchmark/primitives/code_string_test.go000066400000000000000000000064361365033716300231750ustar00rootroot00000000000000/* * * Copyright 2017 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package primitives_test import ( "strconv" "testing" "google.golang.org/grpc/codes" ) type codeBench uint32 const ( OK codeBench = iota Canceled Unknown InvalidArgument DeadlineExceeded NotFound AlreadyExists PermissionDenied ResourceExhausted FailedPrecondition Aborted OutOfRange Unimplemented Internal Unavailable DataLoss Unauthenticated ) // The following String() function was generated by stringer. const _Code_name = "OKCanceledUnknownInvalidArgumentDeadlineExceededNotFoundAlreadyExistsPermissionDeniedResourceExhaustedFailedPreconditionAbortedOutOfRangeUnimplementedInternalUnavailableDataLossUnauthenticated" var _Code_index = [...]uint8{0, 2, 10, 17, 32, 48, 56, 69, 85, 102, 120, 127, 137, 150, 158, 169, 177, 192} func (i codeBench) String() string { if i >= codeBench(len(_Code_index)-1) { return "Code(" + strconv.FormatInt(int64(i), 10) + ")" } return _Code_name[_Code_index[i]:_Code_index[i+1]] } var nameMap = map[codeBench]string{ OK: "OK", Canceled: "Canceled", Unknown: "Unknown", InvalidArgument: "InvalidArgument", DeadlineExceeded: "DeadlineExceeded", NotFound: "NotFound", AlreadyExists: "AlreadyExists", PermissionDenied: "PermissionDenied", ResourceExhausted: "ResourceExhausted", FailedPrecondition: "FailedPrecondition", Aborted: "Aborted", OutOfRange: "OutOfRange", Unimplemented: "Unimplemented", Internal: "Internal", Unavailable: "Unavailable", DataLoss: "DataLoss", Unauthenticated: "Unauthenticated", } func (i codeBench) StringUsingMap() string { if s, ok := nameMap[i]; ok { return s } return "Code(" + strconv.FormatInt(int64(i), 10) + ")" } func BenchmarkCodeStringStringer(b *testing.B) { b.ResetTimer() for i := 0; i < b.N; i++ { c := codeBench(uint32(i % 17)) _ = c.String() } b.StopTimer() } func BenchmarkCodeStringMap(b *testing.B) { b.ResetTimer() for i := 0; i < b.N; i++ { c := codeBench(uint32(i % 17)) _ = c.StringUsingMap() } b.StopTimer() } // codes.Code.String() does a switch. func BenchmarkCodeStringSwitch(b *testing.B) { b.ResetTimer() for i := 0; i < b.N; i++ { c := codes.Code(uint32(i % 17)) _ = c.String() } b.StopTimer() } // Testing all codes (0<=c<=16) and also one overflow (17). func BenchmarkCodeStringStringerWithOverflow(b *testing.B) { b.ResetTimer() for i := 0; i < b.N; i++ { c := codeBench(uint32(i % 18)) _ = c.String() } b.StopTimer() } // Testing all codes (0<=c<=16) and also one overflow (17). func BenchmarkCodeStringSwitchWithOverflow(b *testing.B) { b.ResetTimer() for i := 0; i < b.N; i++ { c := codes.Code(uint32(i % 18)) _ = c.String() } b.StopTimer() } grpc-go-1.29.1/benchmark/primitives/context_test.go000066400000000000000000000064651365033716300223630ustar00rootroot00000000000000/* * * Copyright 2017 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package primitives_test import ( "context" "testing" "time" ) func BenchmarkCancelContextErrNoErr(b *testing.B) { ctx, cancel := context.WithCancel(context.Background()) for i := 0; i < b.N; i++ { if err := ctx.Err(); err != nil { b.Fatal("error") } } cancel() } func BenchmarkCancelContextErrGotErr(b *testing.B) { ctx, cancel := context.WithCancel(context.Background()) cancel() for i := 0; i < b.N; i++ { if err := ctx.Err(); err == nil { b.Fatal("error") } } } func BenchmarkCancelContextChannelNoErr(b *testing.B) { ctx, cancel := context.WithCancel(context.Background()) for i := 0; i < b.N; i++ { select { case <-ctx.Done(): b.Fatal("error: ctx.Done():", ctx.Err()) default: } } cancel() } func BenchmarkCancelContextChannelGotErr(b *testing.B) { ctx, cancel := context.WithCancel(context.Background()) cancel() for i := 0; i < b.N; i++ { select { case <-ctx.Done(): if err := ctx.Err(); err == nil { b.Fatal("error") } default: b.Fatal("error: !ctx.Done()") } } } func BenchmarkTimerContextErrNoErr(b *testing.B) { ctx, cancel := context.WithTimeout(context.Background(), 24*time.Hour) for i := 0; i < b.N; i++ { if err := ctx.Err(); err != nil { b.Fatal("error") } } cancel() } func BenchmarkTimerContextErrGotErr(b *testing.B) { ctx, cancel := context.WithTimeout(context.Background(), time.Microsecond) cancel() for i := 0; i < b.N; i++ { if err := ctx.Err(); err == nil { b.Fatal("error") } } } func BenchmarkTimerContextChannelNoErr(b *testing.B) { ctx, cancel := context.WithTimeout(context.Background(), 24*time.Hour) for i := 0; i < b.N; i++ { select { case <-ctx.Done(): b.Fatal("error: ctx.Done():", ctx.Err()) default: } } cancel() } func BenchmarkTimerContextChannelGotErr(b *testing.B) { ctx, cancel := context.WithTimeout(context.Background(), time.Microsecond) cancel() for i := 0; i < b.N; i++ { select { case <-ctx.Done(): if err := ctx.Err(); err == nil { b.Fatal("error") } default: b.Fatal("error: !ctx.Done()") } } } type ctxKey struct{} func newContextWithLocalKey(parent context.Context) context.Context { return context.WithValue(parent, ctxKey{}, nil) } var ck = ctxKey{} func newContextWithGlobalKey(parent context.Context) context.Context { return context.WithValue(parent, ck, nil) } func BenchmarkContextWithValue(b *testing.B) { benches := []struct { name string f func(context.Context) context.Context }{ {"newContextWithLocalKey", newContextWithLocalKey}, {"newContextWithGlobalKey", newContextWithGlobalKey}, } pCtx := context.Background() for _, bench := range benches { b.Run(bench.name, func(b *testing.B) { for j := 0; j < b.N; j++ { bench.f(pCtx) } }) } } grpc-go-1.29.1/benchmark/primitives/primitives_test.go000066400000000000000000000150471365033716300230660ustar00rootroot00000000000000/* * * Copyright 2017 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Package primitives_test contains benchmarks for various synchronization primitives // available in Go. package primitives_test import ( "fmt" "sync" "sync/atomic" "testing" "time" "unsafe" ) func BenchmarkSelectClosed(b *testing.B) { c := make(chan struct{}) close(c) x := 0 b.ResetTimer() for i := 0; i < b.N; i++ { select { case <-c: x++ default: } } b.StopTimer() if x != b.N { b.Fatal("error") } } func BenchmarkSelectOpen(b *testing.B) { c := make(chan struct{}) x := 0 b.ResetTimer() for i := 0; i < b.N; i++ { select { case <-c: default: x++ } } b.StopTimer() if x != b.N { b.Fatal("error") } } func BenchmarkAtomicBool(b *testing.B) { c := int32(0) x := 0 b.ResetTimer() for i := 0; i < b.N; i++ { if atomic.LoadInt32(&c) == 0 { x++ } } b.StopTimer() if x != b.N { b.Fatal("error") } } func BenchmarkAtomicValueLoad(b *testing.B) { c := atomic.Value{} c.Store(0) x := 0 b.ResetTimer() for i := 0; i < b.N; i++ { if c.Load().(int) == 0 { x++ } } b.StopTimer() if x != b.N { b.Fatal("error") } } func BenchmarkAtomicValueStore(b *testing.B) { c := atomic.Value{} v := 123 b.ResetTimer() for i := 0; i < b.N; i++ { c.Store(v) } b.StopTimer() } func BenchmarkMutex(b *testing.B) { c := sync.Mutex{} x := 0 b.ResetTimer() for i := 0; i < b.N; i++ { c.Lock() x++ c.Unlock() } b.StopTimer() if x != b.N { b.Fatal("error") } } func BenchmarkRWMutex(b *testing.B) { c := sync.RWMutex{} x := 0 b.ResetTimer() for i := 0; i < b.N; i++ { c.RLock() x++ c.RUnlock() } b.StopTimer() if x != b.N { b.Fatal("error") } } func BenchmarkRWMutexW(b *testing.B) { c := sync.RWMutex{} x := 0 b.ResetTimer() for i := 0; i < b.N; i++ { c.Lock() x++ c.Unlock() } b.StopTimer() if x != b.N { b.Fatal("error") } } func BenchmarkMutexWithDefer(b *testing.B) { c := sync.Mutex{} x := 0 b.ResetTimer() for i := 0; i < b.N; i++ { func() { c.Lock() defer c.Unlock() x++ }() } b.StopTimer() if x != b.N { b.Fatal("error") } } func BenchmarkMutexWithClosureDefer(b *testing.B) { c := sync.Mutex{} x := 0 b.ResetTimer() for i := 0; i < b.N; i++ { func() { c.Lock() defer func() { c.Unlock() }() x++ }() } b.StopTimer() if x != b.N { b.Fatal("error") } } func BenchmarkMutexWithoutDefer(b *testing.B) { c := sync.Mutex{} x := 0 b.ResetTimer() for i := 0; i < b.N; i++ { func() { c.Lock() x++ c.Unlock() }() } b.StopTimer() if x != b.N { b.Fatal("error") } } func BenchmarkAtomicAddInt64(b *testing.B) { var c int64 b.ResetTimer() for i := 0; i < b.N; i++ { atomic.AddInt64(&c, 1) } b.StopTimer() if c != int64(b.N) { b.Fatal("error") } } func BenchmarkAtomicTimeValueStore(b *testing.B) { var c atomic.Value t := time.Now() b.ResetTimer() for i := 0; i < b.N; i++ { c.Store(t) } b.StopTimer() } func BenchmarkAtomic16BValueStore(b *testing.B) { var c atomic.Value t := struct { a int64 b int64 }{ 123, 123, } b.ResetTimer() for i := 0; i < b.N; i++ { c.Store(t) } b.StopTimer() } func BenchmarkAtomic32BValueStore(b *testing.B) { var c atomic.Value t := struct { a int64 b int64 c int64 d int64 }{ 123, 123, 123, 123, } b.ResetTimer() for i := 0; i < b.N; i++ { c.Store(t) } b.StopTimer() } func BenchmarkAtomicPointerStore(b *testing.B) { t := 123 var up unsafe.Pointer b.ResetTimer() for i := 0; i < b.N; i++ { atomic.StorePointer(&up, unsafe.Pointer(&t)) } b.StopTimer() } func BenchmarkAtomicTimePointerStore(b *testing.B) { t := time.Now() var up unsafe.Pointer b.ResetTimer() for i := 0; i < b.N; i++ { atomic.StorePointer(&up, unsafe.Pointer(&t)) } b.StopTimer() } func BenchmarkStoreContentionWithAtomic(b *testing.B) { t := 123 var c unsafe.Pointer b.RunParallel(func(pb *testing.PB) { for pb.Next() { atomic.StorePointer(&c, unsafe.Pointer(&t)) } }) } func BenchmarkStoreContentionWithMutex(b *testing.B) { t := 123 var mu sync.Mutex var c int b.RunParallel(func(pb *testing.PB) { for pb.Next() { mu.Lock() c = t mu.Unlock() } }) _ = c } type dummyStruct struct { a int64 b time.Time } func BenchmarkStructStoreContention(b *testing.B) { d := dummyStruct{} dp := unsafe.Pointer(&d) t := time.Now() for _, j := range []int{100000000, 10000, 0} { for _, i := range []int{100000, 10} { b.Run(fmt.Sprintf("CAS/%v/%v", j, i), func(b *testing.B) { b.SetParallelism(i) b.RunParallel(func(pb *testing.PB) { n := &dummyStruct{ b: t, } for pb.Next() { for y := 0; y < j; y++ { } for { v := (*dummyStruct)(atomic.LoadPointer(&dp)) n.a = v.a + 1 if atomic.CompareAndSwapPointer(&dp, unsafe.Pointer(v), unsafe.Pointer(n)) { n = v break } } } }) }) } } var mu sync.Mutex for _, j := range []int{100000000, 10000, 0} { for _, i := range []int{100000, 10} { b.Run(fmt.Sprintf("Mutex/%v/%v", j, i), func(b *testing.B) { b.SetParallelism(i) b.RunParallel(func(pb *testing.PB) { for pb.Next() { for y := 0; y < j; y++ { } mu.Lock() d.a++ d.b = t mu.Unlock() } }) }) } } } type myFooer struct{} func (myFooer) Foo() {} type fooer interface { Foo() } func BenchmarkInterfaceTypeAssertion(b *testing.B) { // Call a separate function to avoid compiler optimizations. runInterfaceTypeAssertion(b, myFooer{}) } func runInterfaceTypeAssertion(b *testing.B, fer interface{}) { x := 0 b.ResetTimer() for i := 0; i < b.N; i++ { if _, ok := fer.(fooer); ok { x++ } } b.StopTimer() if x != b.N { b.Fatal("error") } } func BenchmarkStructTypeAssertion(b *testing.B) { // Call a separate function to avoid compiler optimizations. runStructTypeAssertion(b, myFooer{}) } func runStructTypeAssertion(b *testing.B, fer interface{}) { x := 0 b.ResetTimer() for i := 0; i < b.N; i++ { if _, ok := fer.(myFooer); ok { x++ } } b.StopTimer() if x != b.N { b.Fatal("error") } } grpc-go-1.29.1/benchmark/primitives/syncmap_test.go000066400000000000000000000064071365033716300223450ustar00rootroot00000000000000/* * * Copyright 2019 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package primitives_test import ( "sync" "sync/atomic" "testing" ) type incrementUint64Map interface { increment(string) result(string) uint64 } type mapWithLock struct { mu sync.Mutex m map[string]uint64 } func newMapWithLock() incrementUint64Map { return &mapWithLock{ m: make(map[string]uint64), } } func (mwl *mapWithLock) increment(c string) { mwl.mu.Lock() mwl.m[c]++ mwl.mu.Unlock() } func (mwl *mapWithLock) result(c string) uint64 { return mwl.m[c] } type mapWithAtomicFastpath struct { mu sync.RWMutex m map[string]*uint64 } func newMapWithAtomicFastpath() incrementUint64Map { return &mapWithAtomicFastpath{ m: make(map[string]*uint64), } } func (mwaf *mapWithAtomicFastpath) increment(c string) { mwaf.mu.RLock() if p, ok := mwaf.m[c]; ok { atomic.AddUint64(p, 1) mwaf.mu.RUnlock() return } mwaf.mu.RUnlock() mwaf.mu.Lock() if p, ok := mwaf.m[c]; ok { atomic.AddUint64(p, 1) mwaf.mu.Unlock() return } var temp uint64 = 1 mwaf.m[c] = &temp mwaf.mu.Unlock() } func (mwaf *mapWithAtomicFastpath) result(c string) uint64 { return atomic.LoadUint64(mwaf.m[c]) } type mapWithSyncMap struct { m sync.Map } func newMapWithSyncMap() incrementUint64Map { return &mapWithSyncMap{} } func (mwsm *mapWithSyncMap) increment(c string) { p, ok := mwsm.m.Load(c) if !ok { tp := new(uint64) p, _ = mwsm.m.LoadOrStore(c, tp) } atomic.AddUint64(p.(*uint64), 1) } func (mwsm *mapWithSyncMap) result(c string) uint64 { p, _ := mwsm.m.Load(c) return atomic.LoadUint64(p.(*uint64)) } func benchmarkIncrementUint64Map(b *testing.B, f func() incrementUint64Map) { const cat = "cat" benches := []struct { name string goroutineCount int }{ { name: " 1", goroutineCount: 1, }, { name: " 10", goroutineCount: 10, }, { name: " 100", goroutineCount: 100, }, { name: "1000", goroutineCount: 1000, }, } for _, bb := range benches { b.Run(bb.name, func(b *testing.B) { m := f() var wg sync.WaitGroup wg.Add(bb.goroutineCount) b.ResetTimer() for i := 0; i < bb.goroutineCount; i++ { go func() { for j := 0; j < b.N; j++ { m.increment(cat) } wg.Done() }() } wg.Wait() b.StopTimer() if m.result(cat) != uint64(bb.goroutineCount*b.N) { b.Fatalf("result is %d, want %d", m.result(cat), b.N) } }) } } func BenchmarkMapWithSyncMutexContetion(b *testing.B) { benchmarkIncrementUint64Map(b, newMapWithLock) } func BenchmarkMapWithAtomicFastpath(b *testing.B) { benchmarkIncrementUint64Map(b, newMapWithAtomicFastpath) } func BenchmarkMapWithSyncMap(b *testing.B) { benchmarkIncrementUint64Map(b, newMapWithSyncMap) } grpc-go-1.29.1/benchmark/run_bench.sh000077500000000000000000000077571365033716300174250ustar00rootroot00000000000000#!/bin/bash rpcs=(1) conns=(1) warmup=10 dur=10 reqs=(1) resps=(1) rpc_types=(unary) # idx[0] = idx value for rpcs # idx[1] = idx value for conns # idx[2] = idx value for reqs # idx[3] = idx value for resps # idx[4] = idx value for rpc_types idx=(0 0 0 0 0) idx_max=(1 1 1 1 1) inc() { for i in $(seq $((${#idx[@]}-1)) -1 0); do idx[${i}]=$((${idx[${i}]}+1)) if [ ${idx[${i}]} == ${idx_max[${i}]} ]; then idx[${i}]=0 else break fi done local fin fin=1 # Check to see if we have looped back to the beginning. for v in ${idx[@]}; do if [ ${v} != 0 ]; then fin=0 break fi done if [ ${fin} == 1 ]; then rm -Rf ${out_dir} clean_and_die 0 fi } clean_and_die() { rm -Rf ${out_dir} exit $1 } run(){ local nr nr=${rpcs[${idx[0]}]} local nc nc=${conns[${idx[1]}]} req_sz=${reqs[${idx[2]}]} resp_sz=${resps[${idx[3]}]} r_type=${rpc_types[${idx[4]}]} # Following runs one benchmark base_port=50051 delta=0 test_name="r_"${nr}"_c_"${nc}"_req_"${req_sz}"_resp_"${resp_sz}"_"${r_type}"_"$(date +%s) echo "================================================================================" echo ${test_name} while : do port=$((${base_port}+${delta})) # Launch the server in background ${out_dir}/server --port=${port} --test_name="Server_"${test_name}& server_pid=$(echo $!) # Launch the client ${out_dir}/client --port=${port} --d=${dur} --w=${warmup} --r=${nr} --c=${nc} --req=${req_sz} --resp=${resp_sz} --rpc_type=${r_type} --test_name="client_"${test_name} client_status=$(echo $?) kill -INT ${server_pid} wait ${server_pid} if [ ${client_status} == 0 ]; then break fi delta=$((${delta}+1)) if [ ${delta} == 10 ]; then echo "Continuous 10 failed runs. Exiting now." rm -Rf ${out_dir} clean_and_die 1 fi done } set_param(){ local argname=$1 shift local idx=$1 shift if [ $# -eq 0 ]; then echo "${argname} not specified" exit 1 fi PARAM=($(echo $1 | sed 's/,/ /g')) if [ ${idx} -lt 0 ]; then return fi idx_max[${idx}]=${#PARAM[@]} } while [ $# -gt 0 ]; do case "$1" in -r) shift set_param "number of rpcs" 0 $1 rpcs=(${PARAM[@]}) shift ;; -c) shift set_param "number of connections" 1 $1 conns=(${PARAM[@]}) shift ;; -w) shift set_param "warm-up period" -1 $1 warmup=${PARAM} shift ;; -d) shift set_param "duration" -1 $1 dur=${PARAM} shift ;; -req) shift set_param "request size" 2 $1 reqs=(${PARAM[@]}) shift ;; -resp) shift set_param "response size" 3 $1 resps=(${PARAM[@]}) shift ;; -rpc_type) shift set_param "rpc type" 4 $1 rpc_types=(${PARAM[@]}) shift ;; -h|--help) echo "Following are valid options:" echo echo "-h, --help show brief help" echo "-w warm-up duration in seconds, default value is 10" echo "-d benchmark duration in seconds, default value is 60" echo "" echo "Each of the following can have multiple comma separated values." echo "" echo "-r number of RPCs, default value is 1" echo "-c number of Connections, default value is 1" echo "-req req size in bytes, default value is 1" echo "-resp resp size in bytes, default value is 1" echo "-rpc_type valid values are unary|streaming, default is unary" exit 0 ;; *) echo "Incorrect option $1" exit 1 ;; esac done # Build server and client out_dir=$(mktemp -d oss_benchXXX) go build -o ${out_dir}/server $GOPATH/src/google.golang.org/grpc/benchmark/server/main.go && go build -o ${out_dir}/client $GOPATH/src/google.golang.org/grpc/benchmark/client/main.go if [ $? != 0 ]; then clean_and_die 1 fi while : do run inc done grpc-go-1.29.1/benchmark/server/000077500000000000000000000000001365033716300164115ustar00rootroot00000000000000grpc-go-1.29.1/benchmark/server/main.go000066400000000000000000000047071365033716300176740ustar00rootroot00000000000000/* * * Copyright 2017 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ /* Package main provides a server used for benchmarking. It launches a server which is listening on port 50051. An example to start the server can be found at: go run benchmark/server/main.go -test_name=grpc_test After starting the server, the client can be run separately and used to test qps and latency. */ package main import ( "flag" "fmt" "net" _ "net/http/pprof" "os" "os/signal" "runtime" "runtime/pprof" "time" "google.golang.org/grpc/benchmark" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal/syscall" ) var ( port = flag.String("port", "50051", "Localhost port to listen on.") testName = flag.String("test_name", "", "Name of the test used for creating profiles.") ) func main() { flag.Parse() if *testName == "" { grpclog.Fatalf("test name not set") } lis, err := net.Listen("tcp", ":"+*port) if err != nil { grpclog.Fatalf("Failed to listen: %v", err) } defer lis.Close() cf, err := os.Create("/tmp/" + *testName + ".cpu") if err != nil { grpclog.Fatalf("Failed to create file: %v", err) } defer cf.Close() pprof.StartCPUProfile(cf) cpuBeg := syscall.GetCPUTime() // Launch server in a separate goroutine. stop := benchmark.StartServer(benchmark.ServerInfo{Type: "protobuf", Listener: lis}) // Wait on OS terminate signal. ch := make(chan os.Signal, 1) signal.Notify(ch, os.Interrupt) <-ch cpu := time.Duration(syscall.GetCPUTime() - cpuBeg) stop() pprof.StopCPUProfile() mf, err := os.Create("/tmp/" + *testName + ".mem") if err != nil { grpclog.Fatalf("Failed to create file: %v", err) } defer mf.Close() runtime.GC() // materialize all statistics if err := pprof.WriteHeapProfile(mf); err != nil { grpclog.Fatalf("Failed to write memory profile: %v", err) } fmt.Println("Server CPU utilization:", cpu) fmt.Println("Server CPU profile:", cf.Name()) fmt.Println("Server Mem Profile:", mf.Name()) } grpc-go-1.29.1/benchmark/stats/000077500000000000000000000000001365033716300162415ustar00rootroot00000000000000grpc-go-1.29.1/benchmark/stats/curve.go000066400000000000000000000112031365033716300177110ustar00rootroot00000000000000/* * * Copyright 2019 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package stats import ( "crypto/sha256" "encoding/csv" "encoding/hex" "fmt" "io/ioutil" "math" "math/rand" "os" "sort" "strconv" ) // payloadCurveRange represents a line within a payload curve CSV file. type payloadCurveRange struct { from, to int32 weight float64 } // newPayloadCurveRange receives a line from a payload curve CSV file and // returns a *payloadCurveRange if the values are acceptable. func newPayloadCurveRange(line []string) (*payloadCurveRange, error) { if len(line) != 3 { return nil, fmt.Errorf("invalid number of entries in line %v (expected 3)", line) } var from, to int64 var weight float64 var err error if from, err = strconv.ParseInt(line[0], 10, 32); err != nil { return nil, err } if from <= 0 { return nil, fmt.Errorf("line %v: field (%d) must be in (0, %d]", line, from, math.MaxInt32) } if to, err = strconv.ParseInt(line[1], 10, 32); err != nil { return nil, err } if to <= 0 { return nil, fmt.Errorf("line %v: field %d must be in (0, %d]", line, to, math.MaxInt32) } if from > to { return nil, fmt.Errorf("line %v: from (%d) > to (%d)", line, from, to) } if weight, err = strconv.ParseFloat(line[2], 64); err != nil { return nil, err } return &payloadCurveRange{from: int32(from), to: int32(to), weight: weight}, nil } // chooseRandom picks a payload size (in bytes) for a particular range. This is // done with a uniform distribution. func (pcr *payloadCurveRange) chooseRandom() int { if pcr.from == pcr.to { // fast path return int(pcr.from) } return int(rand.Int31n(pcr.to-pcr.from+1) + pcr.from) } // sha256file is a helper function that returns a hex string matching the // SHA-256 sum of the input file. func sha256file(file string) (string, error) { data, err := ioutil.ReadFile(file) if err != nil { return "", err } sum := sha256.Sum256(data) return hex.EncodeToString(sum[:]), nil } // PayloadCurve is an internal representation of a weighted random distribution // CSV file. Once a *PayloadCurve is created with NewPayloadCurve, the // ChooseRandom function should be called to generate random payload sizes. type PayloadCurve struct { pcrs []*payloadCurveRange // Sha256 must be a public field so that the gob encoder can write it to // disk. This will be needed at decode-time by the Hash function. Sha256 string } // NewPayloadCurve parses a .csv file and returns a *PayloadCurve if no errors // were encountered in parsing and initialization. func NewPayloadCurve(file string) (*PayloadCurve, error) { f, err := os.Open(file) if err != nil { return nil, err } defer f.Close() r := csv.NewReader(f) lines, err := r.ReadAll() if err != nil { return nil, err } ret := &PayloadCurve{} var total float64 for _, line := range lines { pcr, err := newPayloadCurveRange(line) if err != nil { return nil, err } ret.pcrs = append(ret.pcrs, pcr) total += pcr.weight } ret.Sha256, err = sha256file(file) if err != nil { return nil, err } for _, pcr := range ret.pcrs { pcr.weight /= total } sort.Slice(ret.pcrs, func(i, j int) bool { if ret.pcrs[i].from == ret.pcrs[j].from { return ret.pcrs[i].to < ret.pcrs[j].to } return ret.pcrs[i].from < ret.pcrs[j].from }) var lastTo int32 for _, pcr := range ret.pcrs { if lastTo >= pcr.from { return nil, fmt.Errorf("[%d, %d] overlaps with a different line", pcr.from, pcr.to) } lastTo = pcr.to } return ret, nil } // ChooseRandom picks a random payload size (in bytes) that follows the // underlying weighted random distribution. func (pc *PayloadCurve) ChooseRandom() int { target := rand.Float64() var seen float64 for _, pcr := range pc.pcrs { seen += pcr.weight if seen >= target { return pcr.chooseRandom() } } // This should never happen, but if it does, return a sane default. return 1 } // Hash returns a string uniquely identifying a payload curve file for feature // matching purposes. func (pc *PayloadCurve) Hash() string { return pc.Sha256 } // ShortHash returns a shortened version of Hash for display purposes. func (pc *PayloadCurve) ShortHash() string { return pc.Sha256[:8] } grpc-go-1.29.1/benchmark/stats/histogram.go000066400000000000000000000145221365033716300205710ustar00rootroot00000000000000/* * * Copyright 2017 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package stats import ( "bytes" "fmt" "io" "log" "math" "strconv" "strings" ) // Histogram accumulates values in the form of a histogram with // exponentially increased bucket sizes. type Histogram struct { // Count is the total number of values added to the histogram. Count int64 // Sum is the sum of all the values added to the histogram. Sum int64 // SumOfSquares is the sum of squares of all values. SumOfSquares int64 // Min is the minimum of all the values added to the histogram. Min int64 // Max is the maximum of all the values added to the histogram. Max int64 // Buckets contains all the buckets of the histogram. Buckets []HistogramBucket opts HistogramOptions logBaseBucketSize float64 oneOverLogOnePlusGrowthFactor float64 } // HistogramOptions contains the parameters that define the histogram's buckets. // The first bucket of the created histogram (with index 0) contains [min, min+n) // where n = BaseBucketSize, min = MinValue. // Bucket i (i>=1) contains [min + n * m^(i-1), min + n * m^i), where m = 1+GrowthFactor. // The type of the values is int64. type HistogramOptions struct { // NumBuckets is the number of buckets. NumBuckets int // GrowthFactor is the growth factor of the buckets. A value of 0.1 // indicates that bucket N+1 will be 10% larger than bucket N. GrowthFactor float64 // BaseBucketSize is the size of the first bucket. BaseBucketSize float64 // MinValue is the lower bound of the first bucket. MinValue int64 } // HistogramBucket represents one histogram bucket. type HistogramBucket struct { // LowBound is the lower bound of the bucket. LowBound float64 // Count is the number of values in the bucket. Count int64 } // NewHistogram returns a pointer to a new Histogram object that was created // with the provided options. func NewHistogram(opts HistogramOptions) *Histogram { if opts.NumBuckets == 0 { opts.NumBuckets = 32 } if opts.BaseBucketSize == 0.0 { opts.BaseBucketSize = 1.0 } h := Histogram{ Buckets: make([]HistogramBucket, opts.NumBuckets), Min: math.MaxInt64, Max: math.MinInt64, opts: opts, logBaseBucketSize: math.Log(opts.BaseBucketSize), oneOverLogOnePlusGrowthFactor: 1 / math.Log(1+opts.GrowthFactor), } m := 1.0 + opts.GrowthFactor delta := opts.BaseBucketSize h.Buckets[0].LowBound = float64(opts.MinValue) for i := 1; i < opts.NumBuckets; i++ { h.Buckets[i].LowBound = float64(opts.MinValue) + delta delta = delta * m } return &h } // Print writes textual output of the histogram values. func (h *Histogram) Print(w io.Writer) { h.PrintWithUnit(w, 1) } // PrintWithUnit writes textual output of the histogram values . // Data in histogram is divided by a Unit before print. func (h *Histogram) PrintWithUnit(w io.Writer, unit float64) { avg := float64(h.Sum) / float64(h.Count) fmt.Fprintf(w, "Count: %d Min: %5.1f Max: %5.1f Avg: %.2f\n", h.Count, float64(h.Min)/unit, float64(h.Max)/unit, avg/unit) fmt.Fprintf(w, "%s\n", strings.Repeat("-", 60)) if h.Count <= 0 { return } maxBucketDigitLen := len(strconv.FormatFloat(h.Buckets[len(h.Buckets)-1].LowBound, 'f', 6, 64)) if maxBucketDigitLen < 3 { // For "inf". maxBucketDigitLen = 3 } maxCountDigitLen := len(strconv.FormatInt(h.Count, 10)) percentMulti := 100 / float64(h.Count) accCount := int64(0) for i, b := range h.Buckets { fmt.Fprintf(w, "[%*f, ", maxBucketDigitLen, b.LowBound/unit) if i+1 < len(h.Buckets) { fmt.Fprintf(w, "%*f)", maxBucketDigitLen, h.Buckets[i+1].LowBound/unit) } else { fmt.Fprintf(w, "%*s)", maxBucketDigitLen, "inf") } accCount += b.Count fmt.Fprintf(w, " %*d %5.1f%% %5.1f%%", maxCountDigitLen, b.Count, float64(b.Count)*percentMulti, float64(accCount)*percentMulti) const barScale = 0.1 barLength := int(float64(b.Count)*percentMulti*barScale + 0.5) fmt.Fprintf(w, " %s\n", strings.Repeat("#", barLength)) } } // String returns the textual output of the histogram values as string. func (h *Histogram) String() string { var b bytes.Buffer h.Print(&b) return b.String() } // Clear resets all the content of histogram. func (h *Histogram) Clear() { h.Count = 0 h.Sum = 0 h.SumOfSquares = 0 h.Min = math.MaxInt64 h.Max = math.MinInt64 for i := range h.Buckets { h.Buckets[i].Count = 0 } } // Opts returns a copy of the options used to create the Histogram. func (h *Histogram) Opts() HistogramOptions { return h.opts } // Add adds a value to the histogram. func (h *Histogram) Add(value int64) error { bucket, err := h.findBucket(value) if err != nil { return err } h.Buckets[bucket].Count++ h.Count++ h.Sum += value h.SumOfSquares += value * value if value < h.Min { h.Min = value } if value > h.Max { h.Max = value } return nil } func (h *Histogram) findBucket(value int64) (int, error) { delta := float64(value - h.opts.MinValue) var b int if delta >= h.opts.BaseBucketSize { // b = log_{1+growthFactor} (delta / baseBucketSize) + 1 // = log(delta / baseBucketSize) / log(1+growthFactor) + 1 // = (log(delta) - log(baseBucketSize)) * (1 / log(1+growthFactor)) + 1 b = int((math.Log(delta)-h.logBaseBucketSize)*h.oneOverLogOnePlusGrowthFactor + 1) } if b >= len(h.Buckets) { return 0, fmt.Errorf("no bucket for value: %d", value) } return b, nil } // Merge takes another histogram h2, and merges its content into h. // The two histograms must be created by equivalent HistogramOptions. func (h *Histogram) Merge(h2 *Histogram) { if h.opts != h2.opts { log.Fatalf("failed to merge histograms, created by inequivalent options") } h.Count += h2.Count h.Sum += h2.Sum h.SumOfSquares += h2.SumOfSquares if h2.Min < h.Min { h.Min = h2.Min } if h2.Max > h.Max { h.Max = h2.Max } for i, b := range h2.Buckets { h.Buckets[i].Count += b.Count } } grpc-go-1.29.1/benchmark/stats/stats.go000066400000000000000000000402441365033716300177320ustar00rootroot00000000000000/* * * Copyright 2017 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Package stats tracks the statistics associated with benchmark runs. package stats import ( "bytes" "fmt" "log" "math" "runtime" "sort" "strconv" "sync" "time" "google.golang.org/grpc" ) // FeatureIndex is an enum for features that usually differ across individual // benchmark runs in a single execution. These are usually configured by the // user through command line flags. type FeatureIndex int // FeatureIndex enum values corresponding to individually settable features. const ( EnableTraceIndex FeatureIndex = iota ReadLatenciesIndex ReadKbpsIndex ReadMTUIndex MaxConcurrentCallsIndex ReqSizeBytesIndex RespSizeBytesIndex ReqPayloadCurveIndex RespPayloadCurveIndex CompModesIndex EnableChannelzIndex EnablePreloaderIndex // MaxFeatureIndex is a place holder to indicate the total number of feature // indices we have. Any new feature indices should be added above this. MaxFeatureIndex ) // Features represent configured options for a specific benchmark run. This is // usually constructed from command line arguments passed by the caller. See // benchmark/benchmain/main.go for defined command line flags. This is also // part of the BenchResults struct which is serialized and written to a file. type Features struct { // Network mode used for this benchmark run. Could be one of Local, LAN, WAN // or Longhaul. NetworkMode string // UseBufCon indicates whether an in-memory connection was used for this // benchmark run instead of system network I/O. UseBufConn bool // EnableKeepalive indicates if keepalives were enabled on the connections // used in this benchmark run. EnableKeepalive bool // BenchTime indicates the duration of the benchmark run. BenchTime time.Duration // Features defined above are usually the same for all benchmark runs in a // particular invocation, while the features defined below could vary from // run to run based on the configured command line. These features have a // corresponding featureIndex value which is used for a variety of reasons. // EnableTrace indicates if tracing was enabled. EnableTrace bool // Latency is the simulated one-way network latency used. Latency time.Duration // Kbps is the simulated network throughput used. Kbps int // MTU is the simulated network MTU used. MTU int // MaxConcurrentCalls is the number of concurrent RPCs made during this // benchmark run. MaxConcurrentCalls int // ReqSizeBytes is the request size in bytes used in this benchmark run. // Unused if ReqPayloadCurve is non-nil. ReqSizeBytes int // RespSizeBytes is the response size in bytes used in this benchmark run. // Unused if RespPayloadCurve is non-nil. RespSizeBytes int // ReqPayloadCurve is a histogram representing the shape a random // distribution request payloads should take. ReqPayloadCurve *PayloadCurve // RespPayloadCurve is a histogram representing the shape a random // distribution request payloads should take. RespPayloadCurve *PayloadCurve // ModeCompressor represents the compressor mode used. ModeCompressor string // EnableChannelz indicates if channelz was turned on. EnableChannelz bool // EnablePreloader indicates if preloading was turned on. EnablePreloader bool } // String returns all the feature values as a string. func (f Features) String() string { var reqPayloadString, respPayloadString string if f.ReqPayloadCurve != nil { reqPayloadString = fmt.Sprintf("reqPayloadCurve_%s", f.ReqPayloadCurve.ShortHash()) } else { reqPayloadString = fmt.Sprintf("reqSize_%vB", f.ReqSizeBytes) } if f.RespPayloadCurve != nil { respPayloadString = fmt.Sprintf("respPayloadCurve_%s", f.RespPayloadCurve.ShortHash()) } else { respPayloadString = fmt.Sprintf("respSize_%vB", f.RespSizeBytes) } return fmt.Sprintf("networkMode_%v-bufConn_%v-keepalive_%v-benchTime_%v-"+ "trace_%v-latency_%v-kbps_%v-MTU_%v-maxConcurrentCalls_%v-%s-%s-"+ "compressor_%v-channelz_%v-preloader_%v", f.NetworkMode, f.UseBufConn, f.EnableKeepalive, f.BenchTime, f.EnableTrace, f.Latency, f.Kbps, f.MTU, f.MaxConcurrentCalls, reqPayloadString, respPayloadString, f.ModeCompressor, f.EnableChannelz, f.EnablePreloader) } // SharedFeatures returns the shared features as a pretty printable string. // 'wantFeatures' is a bitmask of wanted features, indexed by FeaturesIndex. func (f Features) SharedFeatures(wantFeatures []bool) string { var b bytes.Buffer if f.NetworkMode != "" { b.WriteString(fmt.Sprintf("Network: %v\n", f.NetworkMode)) } if f.UseBufConn { b.WriteString(fmt.Sprintf("UseBufConn: %v\n", f.UseBufConn)) } if f.EnableKeepalive { b.WriteString(fmt.Sprintf("EnableKeepalive: %v\n", f.EnableKeepalive)) } b.WriteString(fmt.Sprintf("BenchTime: %v\n", f.BenchTime)) f.partialString(&b, wantFeatures, ": ", "\n") return b.String() } // PrintableName returns a one line name which includes the features specified // by 'wantFeatures' which is a bitmask of wanted features, indexed by // FeaturesIndex. func (f Features) PrintableName(wantFeatures []bool) string { var b bytes.Buffer f.partialString(&b, wantFeatures, "_", "-") return b.String() } // partialString writes features specified by 'wantFeatures' to the provided // bytes.Buffer. func (f Features) partialString(b *bytes.Buffer, wantFeatures []bool, sep, delim string) { for i, sf := range wantFeatures { if sf { switch FeatureIndex(i) { case EnableTraceIndex: b.WriteString(fmt.Sprintf("Trace%v%v%v", sep, f.EnableTrace, delim)) case ReadLatenciesIndex: b.WriteString(fmt.Sprintf("Latency%v%v%v", sep, f.Latency, delim)) case ReadKbpsIndex: b.WriteString(fmt.Sprintf("Kbps%v%v%v", sep, f.Kbps, delim)) case ReadMTUIndex: b.WriteString(fmt.Sprintf("MTU%v%v%v", sep, f.MTU, delim)) case MaxConcurrentCallsIndex: b.WriteString(fmt.Sprintf("Callers%v%v%v", sep, f.MaxConcurrentCalls, delim)) case ReqSizeBytesIndex: b.WriteString(fmt.Sprintf("ReqSize%v%vB%v", sep, f.ReqSizeBytes, delim)) case RespSizeBytesIndex: b.WriteString(fmt.Sprintf("RespSize%v%vB%v", sep, f.RespSizeBytes, delim)) case ReqPayloadCurveIndex: b.WriteString(fmt.Sprintf("ReqPayloadCurve%vSHA-256:%v%v", sep, f.ReqPayloadCurve.Hash(), delim)) case RespPayloadCurveIndex: b.WriteString(fmt.Sprintf("RespPayloadCurve%vSHA-256:%v%v", sep, f.RespPayloadCurve.Hash(), delim)) case CompModesIndex: b.WriteString(fmt.Sprintf("Compressor%v%v%v", sep, f.ModeCompressor, delim)) case EnableChannelzIndex: b.WriteString(fmt.Sprintf("Channelz%v%v%v", sep, f.EnableChannelz, delim)) case EnablePreloaderIndex: b.WriteString(fmt.Sprintf("Preloader%v%v%v", sep, f.EnablePreloader, delim)) default: log.Fatalf("Unknown feature index %v. maxFeatureIndex is %v", i, MaxFeatureIndex) } } } } // BenchResults records features and results of a benchmark run. A collection // of these structs is usually serialized and written to a file after a // benchmark execution, and could later be read for pretty-printing or // comparison with other benchmark results. type BenchResults struct { // GoVersion is the version of the compiler the benchmark was compiled with. GoVersion string // GrpcVersion is the gRPC version being benchmarked. GrpcVersion string // RunMode is the workload mode for this benchmark run. This could be unary, // stream or unconstrained. RunMode string // Features represents the configured feature options for this run. Features Features // SharedFeatures represents the features which were shared across all // benchmark runs during one execution. It is a slice indexed by // 'FeaturesIndex' and a value of true indicates that the associated // feature is shared across all runs. SharedFeatures []bool // Data contains the statistical data of interest from the benchmark run. Data RunData } // RunData contains statistical data of interest from a benchmark run. type RunData struct { // TotalOps is the number of operations executed during this benchmark run. // Only makes sense for unary and streaming workloads. TotalOps uint64 // SendOps is the number of send operations executed during this benchmark // run. Only makes sense for unconstrained workloads. SendOps uint64 // RecvOps is the number of receive operations executed during this benchmark // run. Only makes sense for unconstrained workloads. RecvOps uint64 // AllocedBytes is the average memory allocation in bytes per operation. AllocedBytes float64 // Allocs is the average number of memory allocations per operation. Allocs float64 // ReqT is the average request throughput associated with this run. ReqT float64 // RespT is the average response throughput associated with this run. RespT float64 // We store different latencies associated with each run. These latencies are // only computed for unary and stream workloads as they are not very useful // for unconstrained workloads. // Fiftieth is the 50th percentile latency. Fiftieth time.Duration // Ninetieth is the 90th percentile latency. Ninetieth time.Duration // Ninetyninth is the 99th percentile latency. NinetyNinth time.Duration // Average is the average latency. Average time.Duration } type durationSlice []time.Duration func (a durationSlice) Len() int { return len(a) } func (a durationSlice) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (a durationSlice) Less(i, j int) bool { return a[i] < a[j] } // Stats is a helper for gathering statistics about individual benchmark runs. type Stats struct { mu sync.Mutex numBuckets int hw *histWrapper results []BenchResults startMS runtime.MemStats stopMS runtime.MemStats } type histWrapper struct { unit time.Duration histogram *Histogram durations durationSlice } // NewStats creates a new Stats instance. If numBuckets is not positive, the // default value (16) will be used. func NewStats(numBuckets int) *Stats { if numBuckets <= 0 { numBuckets = 16 } // Use one more bucket for the last unbounded bucket. s := &Stats{numBuckets: numBuckets + 1} s.hw = &histWrapper{} return s } // StartRun is to be invoked to indicate the start of a new benchmark run. func (s *Stats) StartRun(mode string, f Features, sf []bool) { s.mu.Lock() defer s.mu.Unlock() runtime.ReadMemStats(&s.startMS) s.results = append(s.results, BenchResults{ GoVersion: runtime.Version(), GrpcVersion: grpc.Version, RunMode: mode, Features: f, SharedFeatures: sf, }) } // EndRun is to be invoked to indicate the end of the ongoing benchmark run. It // computes a bunch of stats and dumps them to stdout. func (s *Stats) EndRun(count uint64) { s.mu.Lock() defer s.mu.Unlock() runtime.ReadMemStats(&s.stopMS) r := &s.results[len(s.results)-1] r.Data = RunData{ TotalOps: count, AllocedBytes: float64(s.stopMS.TotalAlloc-s.startMS.TotalAlloc) / float64(count), Allocs: float64(s.stopMS.Mallocs-s.startMS.Mallocs) / float64(count), ReqT: float64(count) * float64(r.Features.ReqSizeBytes) * 8 / r.Features.BenchTime.Seconds(), RespT: float64(count) * float64(r.Features.RespSizeBytes) * 8 / r.Features.BenchTime.Seconds(), } s.computeLatencies(r) s.dump(r) s.hw = &histWrapper{} } // EndUnconstrainedRun is similar to EndRun, but is to be used for // unconstrained workloads. func (s *Stats) EndUnconstrainedRun(req uint64, resp uint64) { s.mu.Lock() defer s.mu.Unlock() runtime.ReadMemStats(&s.stopMS) r := &s.results[len(s.results)-1] r.Data = RunData{ SendOps: req, RecvOps: resp, AllocedBytes: float64(s.stopMS.TotalAlloc-s.startMS.TotalAlloc) / float64((req+resp)/2), Allocs: float64(s.stopMS.Mallocs-s.startMS.Mallocs) / float64((req+resp)/2), ReqT: float64(req) * float64(r.Features.ReqSizeBytes) * 8 / r.Features.BenchTime.Seconds(), RespT: float64(resp) * float64(r.Features.RespSizeBytes) * 8 / r.Features.BenchTime.Seconds(), } s.computeLatencies(r) s.dump(r) s.hw = &histWrapper{} } // AddDuration adds an elapsed duration per operation to the stats. This is // used by unary and stream modes where request and response stats are equal. func (s *Stats) AddDuration(d time.Duration) { s.mu.Lock() defer s.mu.Unlock() s.hw.durations = append(s.hw.durations, d) } // GetResults returns the results from all benchmark runs. func (s *Stats) GetResults() []BenchResults { s.mu.Lock() defer s.mu.Unlock() return s.results } // computeLatencies computes percentile latencies based on durations stored in // the stats object and updates the corresponding fields in the result object. func (s *Stats) computeLatencies(result *BenchResults) { if len(s.hw.durations) == 0 { return } sort.Sort(s.hw.durations) minDuration := int64(s.hw.durations[0]) maxDuration := int64(s.hw.durations[len(s.hw.durations)-1]) // Use the largest unit that can represent the minimum time duration. s.hw.unit = time.Nanosecond for _, u := range []time.Duration{time.Microsecond, time.Millisecond, time.Second} { if minDuration <= int64(u) { break } s.hw.unit = u } numBuckets := s.numBuckets if n := int(maxDuration - minDuration + 1); n < numBuckets { numBuckets = n } s.hw.histogram = NewHistogram(HistogramOptions{ NumBuckets: numBuckets, // max-min(lower bound of last bucket) = (1 + growthFactor)^(numBuckets-2) * baseBucketSize. GrowthFactor: math.Pow(float64(maxDuration-minDuration), 1/float64(numBuckets-2)) - 1, BaseBucketSize: 1.0, MinValue: minDuration, }) for _, d := range s.hw.durations { s.hw.histogram.Add(int64(d)) } result.Data.Fiftieth = s.hw.durations[max(s.hw.histogram.Count*int64(50)/100-1, 0)] result.Data.Ninetieth = s.hw.durations[max(s.hw.histogram.Count*int64(90)/100-1, 0)] result.Data.NinetyNinth = s.hw.durations[max(s.hw.histogram.Count*int64(99)/100-1, 0)] result.Data.Average = time.Duration(float64(s.hw.histogram.Sum) / float64(s.hw.histogram.Count)) } // dump returns a printable version. func (s *Stats) dump(result *BenchResults) { var b bytes.Buffer // Go and gRPC version information. b.WriteString(fmt.Sprintf("%s/grpc%s\n", result.GoVersion, result.GrpcVersion)) // This prints the run mode and all features of the bench on a line. b.WriteString(fmt.Sprintf("%s-%s:\n", result.RunMode, result.Features.String())) unit := s.hw.unit tUnit := fmt.Sprintf("%v", unit)[1:] // stores one of s, ms, μs, ns if l := result.Data.Fiftieth; l != 0 { b.WriteString(fmt.Sprintf("50_Latency: %s%s\t", strconv.FormatFloat(float64(l)/float64(unit), 'f', 4, 64), tUnit)) } if l := result.Data.Ninetieth; l != 0 { b.WriteString(fmt.Sprintf("90_Latency: %s%s\t", strconv.FormatFloat(float64(l)/float64(unit), 'f', 4, 64), tUnit)) } if l := result.Data.NinetyNinth; l != 0 { b.WriteString(fmt.Sprintf("99_Latency: %s%s\t", strconv.FormatFloat(float64(l)/float64(unit), 'f', 4, 64), tUnit)) } if l := result.Data.Average; l != 0 { b.WriteString(fmt.Sprintf("Avg_Latency: %s%s\t", strconv.FormatFloat(float64(l)/float64(unit), 'f', 4, 64), tUnit)) } b.WriteString(fmt.Sprintf("Bytes/op: %v\t", result.Data.AllocedBytes)) b.WriteString(fmt.Sprintf("Allocs/op: %v\t\n", result.Data.Allocs)) // This prints the histogram stats for the latency. if s.hw.histogram == nil { b.WriteString("Histogram (empty)\n") } else { b.WriteString(fmt.Sprintf("Histogram (unit: %s)\n", tUnit)) s.hw.histogram.PrintWithUnit(&b, float64(unit)) } // Print throughput data. req := result.Data.SendOps if req == 0 { req = result.Data.TotalOps } resp := result.Data.RecvOps if resp == 0 { resp = result.Data.TotalOps } b.WriteString(fmt.Sprintf("Number of requests: %v\tRequest throughput: %v bit/s\n", req, result.Data.ReqT)) b.WriteString(fmt.Sprintf("Number of responses: %v\tResponse throughput: %v bit/s\n", resp, result.Data.RespT)) fmt.Println(b.String()) } func max(a, b int64) int64 { if a > b { return a } return b } grpc-go-1.29.1/benchmark/worker/000077500000000000000000000000001365033716300164145ustar00rootroot00000000000000grpc-go-1.29.1/benchmark/worker/benchmark_client.go000066400000000000000000000300531365033716300222340ustar00rootroot00000000000000/* * * Copyright 2016 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package main import ( "context" "flag" "math" "runtime" "sync" "time" "google.golang.org/grpc" "google.golang.org/grpc/benchmark" testpb "google.golang.org/grpc/benchmark/grpc_testing" "google.golang.org/grpc/benchmark/stats" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal/syscall" "google.golang.org/grpc/status" "google.golang.org/grpc/testdata" ) var caFile = flag.String("ca_file", "", "The file containing the CA root cert file") type lockingHistogram struct { mu sync.Mutex histogram *stats.Histogram } func (h *lockingHistogram) add(value int64) { h.mu.Lock() defer h.mu.Unlock() h.histogram.Add(value) } // swap sets h.histogram to o and returns its old value. func (h *lockingHistogram) swap(o *stats.Histogram) *stats.Histogram { h.mu.Lock() defer h.mu.Unlock() old := h.histogram h.histogram = o return old } func (h *lockingHistogram) mergeInto(merged *stats.Histogram) { h.mu.Lock() defer h.mu.Unlock() merged.Merge(h.histogram) } type benchmarkClient struct { closeConns func() stop chan bool lastResetTime time.Time histogramOptions stats.HistogramOptions lockingHistograms []lockingHistogram rusageLastReset *syscall.Rusage } func printClientConfig(config *testpb.ClientConfig) { // Some config options are ignored: // - client type: // will always create sync client // - async client threads. // - core list grpclog.Infof(" * client type: %v (ignored, always creates sync client)", config.ClientType) grpclog.Infof(" * async client threads: %v (ignored)", config.AsyncClientThreads) // TODO: use cores specified by CoreList when setting list of cores is supported in go. grpclog.Infof(" * core list: %v (ignored)", config.CoreList) grpclog.Infof(" - security params: %v", config.SecurityParams) grpclog.Infof(" - core limit: %v", config.CoreLimit) grpclog.Infof(" - payload config: %v", config.PayloadConfig) grpclog.Infof(" - rpcs per chann: %v", config.OutstandingRpcsPerChannel) grpclog.Infof(" - channel number: %v", config.ClientChannels) grpclog.Infof(" - load params: %v", config.LoadParams) grpclog.Infof(" - rpc type: %v", config.RpcType) grpclog.Infof(" - histogram params: %v", config.HistogramParams) grpclog.Infof(" - server targets: %v", config.ServerTargets) } func setupClientEnv(config *testpb.ClientConfig) { // Use all cpu cores available on machine by default. // TODO: Revisit this for the optimal default setup. if config.CoreLimit > 0 { runtime.GOMAXPROCS(int(config.CoreLimit)) } else { runtime.GOMAXPROCS(runtime.NumCPU()) } } // createConns creates connections according to given config. // It returns the connections and corresponding function to close them. // It returns non-nil error if there is anything wrong. func createConns(config *testpb.ClientConfig) ([]*grpc.ClientConn, func(), error) { var opts []grpc.DialOption // Sanity check for client type. switch config.ClientType { case testpb.ClientType_SYNC_CLIENT: case testpb.ClientType_ASYNC_CLIENT: default: return nil, nil, status.Errorf(codes.InvalidArgument, "unknown client type: %v", config.ClientType) } // Check and set security options. if config.SecurityParams != nil { if *caFile == "" { *caFile = testdata.Path("ca.pem") } creds, err := credentials.NewClientTLSFromFile(*caFile, config.SecurityParams.ServerHostOverride) if err != nil { return nil, nil, status.Errorf(codes.InvalidArgument, "failed to create TLS credentials %v", err) } opts = append(opts, grpc.WithTransportCredentials(creds)) } else { opts = append(opts, grpc.WithInsecure()) } // Use byteBufCodec if it is required. if config.PayloadConfig != nil { switch config.PayloadConfig.Payload.(type) { case *testpb.PayloadConfig_BytebufParams: opts = append(opts, grpc.WithDefaultCallOptions(grpc.CallCustomCodec(byteBufCodec{}))) case *testpb.PayloadConfig_SimpleParams: default: return nil, nil, status.Errorf(codes.InvalidArgument, "unknown payload config: %v", config.PayloadConfig) } } // Create connections. connCount := int(config.ClientChannels) conns := make([]*grpc.ClientConn, connCount) for connIndex := 0; connIndex < connCount; connIndex++ { conns[connIndex] = benchmark.NewClientConn(config.ServerTargets[connIndex%len(config.ServerTargets)], opts...) } return conns, func() { for _, conn := range conns { conn.Close() } }, nil } func performRPCs(config *testpb.ClientConfig, conns []*grpc.ClientConn, bc *benchmarkClient) error { // Read payload size and type from config. var ( payloadReqSize, payloadRespSize int payloadType string ) if config.PayloadConfig != nil { switch c := config.PayloadConfig.Payload.(type) { case *testpb.PayloadConfig_BytebufParams: payloadReqSize = int(c.BytebufParams.ReqSize) payloadRespSize = int(c.BytebufParams.RespSize) payloadType = "bytebuf" case *testpb.PayloadConfig_SimpleParams: payloadReqSize = int(c.SimpleParams.ReqSize) payloadRespSize = int(c.SimpleParams.RespSize) payloadType = "protobuf" default: return status.Errorf(codes.InvalidArgument, "unknown payload config: %v", config.PayloadConfig) } } // TODO add open loop distribution. switch config.LoadParams.Load.(type) { case *testpb.LoadParams_ClosedLoop: case *testpb.LoadParams_Poisson: return status.Errorf(codes.Unimplemented, "unsupported load params: %v", config.LoadParams) default: return status.Errorf(codes.InvalidArgument, "unknown load params: %v", config.LoadParams) } rpcCountPerConn := int(config.OutstandingRpcsPerChannel) switch config.RpcType { case testpb.RpcType_UNARY: bc.doCloseLoopUnary(conns, rpcCountPerConn, payloadReqSize, payloadRespSize) // TODO open loop. case testpb.RpcType_STREAMING: bc.doCloseLoopStreaming(conns, rpcCountPerConn, payloadReqSize, payloadRespSize, payloadType) // TODO open loop. default: return status.Errorf(codes.InvalidArgument, "unknown rpc type: %v", config.RpcType) } return nil } func startBenchmarkClient(config *testpb.ClientConfig) (*benchmarkClient, error) { printClientConfig(config) // Set running environment like how many cores to use. setupClientEnv(config) conns, closeConns, err := createConns(config) if err != nil { return nil, err } rpcCountPerConn := int(config.OutstandingRpcsPerChannel) bc := &benchmarkClient{ histogramOptions: stats.HistogramOptions{ NumBuckets: int(math.Log(config.HistogramParams.MaxPossible)/math.Log(1+config.HistogramParams.Resolution)) + 1, GrowthFactor: config.HistogramParams.Resolution, BaseBucketSize: (1 + config.HistogramParams.Resolution), MinValue: 0, }, lockingHistograms: make([]lockingHistogram, rpcCountPerConn*len(conns)), stop: make(chan bool), lastResetTime: time.Now(), closeConns: closeConns, rusageLastReset: syscall.GetRusage(), } if err = performRPCs(config, conns, bc); err != nil { // Close all connections if performRPCs failed. closeConns() return nil, err } return bc, nil } func (bc *benchmarkClient) doCloseLoopUnary(conns []*grpc.ClientConn, rpcCountPerConn int, reqSize int, respSize int) { for ic, conn := range conns { client := testpb.NewBenchmarkServiceClient(conn) // For each connection, create rpcCountPerConn goroutines to do rpc. for j := 0; j < rpcCountPerConn; j++ { // Create histogram for each goroutine. idx := ic*rpcCountPerConn + j bc.lockingHistograms[idx].histogram = stats.NewHistogram(bc.histogramOptions) // Start goroutine on the created mutex and histogram. go func(idx int) { // TODO: do warm up if necessary. // Now relying on worker client to reserve time to do warm up. // The worker client needs to wait for some time after client is created, // before starting benchmark. done := make(chan bool) for { go func() { start := time.Now() if err := benchmark.DoUnaryCall(client, reqSize, respSize); err != nil { select { case <-bc.stop: case done <- false: } return } elapse := time.Since(start) bc.lockingHistograms[idx].add(int64(elapse)) select { case <-bc.stop: case done <- true: } }() select { case <-bc.stop: return case <-done: } } }(idx) } } } func (bc *benchmarkClient) doCloseLoopStreaming(conns []*grpc.ClientConn, rpcCountPerConn int, reqSize int, respSize int, payloadType string) { var doRPC func(testpb.BenchmarkService_StreamingCallClient, int, int) error if payloadType == "bytebuf" { doRPC = benchmark.DoByteBufStreamingRoundTrip } else { doRPC = benchmark.DoStreamingRoundTrip } for ic, conn := range conns { // For each connection, create rpcCountPerConn goroutines to do rpc. for j := 0; j < rpcCountPerConn; j++ { c := testpb.NewBenchmarkServiceClient(conn) stream, err := c.StreamingCall(context.Background()) if err != nil { grpclog.Fatalf("%v.StreamingCall(_) = _, %v", c, err) } // Create histogram for each goroutine. idx := ic*rpcCountPerConn + j bc.lockingHistograms[idx].histogram = stats.NewHistogram(bc.histogramOptions) // Start goroutine on the created mutex and histogram. go func(idx int) { // TODO: do warm up if necessary. // Now relying on worker client to reserve time to do warm up. // The worker client needs to wait for some time after client is created, // before starting benchmark. for { start := time.Now() if err := doRPC(stream, reqSize, respSize); err != nil { return } elapse := time.Since(start) bc.lockingHistograms[idx].add(int64(elapse)) select { case <-bc.stop: return default: } } }(idx) } } } // getStats returns the stats for benchmark client. // It resets lastResetTime and all histograms if argument reset is true. func (bc *benchmarkClient) getStats(reset bool) *testpb.ClientStats { var wallTimeElapsed, uTimeElapsed, sTimeElapsed float64 mergedHistogram := stats.NewHistogram(bc.histogramOptions) if reset { // Merging histogram may take some time. // Put all histograms aside and merge later. toMerge := make([]*stats.Histogram, len(bc.lockingHistograms)) for i := range bc.lockingHistograms { toMerge[i] = bc.lockingHistograms[i].swap(stats.NewHistogram(bc.histogramOptions)) } for i := 0; i < len(toMerge); i++ { mergedHistogram.Merge(toMerge[i]) } wallTimeElapsed = time.Since(bc.lastResetTime).Seconds() latestRusage := syscall.GetRusage() uTimeElapsed, sTimeElapsed = syscall.CPUTimeDiff(bc.rusageLastReset, latestRusage) bc.rusageLastReset = latestRusage bc.lastResetTime = time.Now() } else { // Merge only, not reset. for i := range bc.lockingHistograms { bc.lockingHistograms[i].mergeInto(mergedHistogram) } wallTimeElapsed = time.Since(bc.lastResetTime).Seconds() uTimeElapsed, sTimeElapsed = syscall.CPUTimeDiff(bc.rusageLastReset, syscall.GetRusage()) } b := make([]uint32, len(mergedHistogram.Buckets)) for i, v := range mergedHistogram.Buckets { b[i] = uint32(v.Count) } return &testpb.ClientStats{ Latencies: &testpb.HistogramData{ Bucket: b, MinSeen: float64(mergedHistogram.Min), MaxSeen: float64(mergedHistogram.Max), Sum: float64(mergedHistogram.Sum), SumOfSquares: float64(mergedHistogram.SumOfSquares), Count: float64(mergedHistogram.Count), }, TimeElapsed: wallTimeElapsed, TimeUser: uTimeElapsed, TimeSystem: sTimeElapsed, } } func (bc *benchmarkClient) shutdown() { close(bc.stop) bc.closeConns() } grpc-go-1.29.1/benchmark/worker/benchmark_server.go000066400000000000000000000127021365033716300222650ustar00rootroot00000000000000/* * * Copyright 2016 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package main import ( "flag" "fmt" "net" "runtime" "strconv" "strings" "sync" "time" "google.golang.org/grpc" "google.golang.org/grpc/benchmark" testpb "google.golang.org/grpc/benchmark/grpc_testing" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal/syscall" "google.golang.org/grpc/status" "google.golang.org/grpc/testdata" ) var ( certFile = flag.String("tls_cert_file", "", "The TLS cert file") keyFile = flag.String("tls_key_file", "", "The TLS key file") ) type benchmarkServer struct { port int cores int closeFunc func() mu sync.RWMutex lastResetTime time.Time rusageLastReset *syscall.Rusage } func printServerConfig(config *testpb.ServerConfig) { // Some config options are ignored: // - server type: // will always start sync server // - async server threads // - core list grpclog.Infof(" * server type: %v (ignored, always starts sync server)", config.ServerType) grpclog.Infof(" * async server threads: %v (ignored)", config.AsyncServerThreads) // TODO: use cores specified by CoreList when setting list of cores is supported in go. grpclog.Infof(" * core list: %v (ignored)", config.CoreList) grpclog.Infof(" - security params: %v", config.SecurityParams) grpclog.Infof(" - core limit: %v", config.CoreLimit) grpclog.Infof(" - port: %v", config.Port) grpclog.Infof(" - payload config: %v", config.PayloadConfig) } func startBenchmarkServer(config *testpb.ServerConfig, serverPort int) (*benchmarkServer, error) { printServerConfig(config) // Use all cpu cores available on machine by default. // TODO: Revisit this for the optimal default setup. numOfCores := runtime.NumCPU() if config.CoreLimit > 0 { numOfCores = int(config.CoreLimit) } runtime.GOMAXPROCS(numOfCores) var opts []grpc.ServerOption // Sanity check for server type. switch config.ServerType { case testpb.ServerType_SYNC_SERVER: case testpb.ServerType_ASYNC_SERVER: case testpb.ServerType_ASYNC_GENERIC_SERVER: default: return nil, status.Errorf(codes.InvalidArgument, "unknown server type: %v", config.ServerType) } // Set security options. if config.SecurityParams != nil { if *certFile == "" { *certFile = testdata.Path("server1.pem") } if *keyFile == "" { *keyFile = testdata.Path("server1.key") } creds, err := credentials.NewServerTLSFromFile(*certFile, *keyFile) if err != nil { grpclog.Fatalf("failed to generate credentials %v", err) } opts = append(opts, grpc.Creds(creds)) } // Priority: config.Port > serverPort > default (0). port := int(config.Port) if port == 0 { port = serverPort } lis, err := net.Listen("tcp", fmt.Sprintf(":%d", port)) if err != nil { grpclog.Fatalf("Failed to listen: %v", err) } addr := lis.Addr().String() // Create different benchmark server according to config. var closeFunc func() if config.PayloadConfig != nil { switch payload := config.PayloadConfig.Payload.(type) { case *testpb.PayloadConfig_BytebufParams: opts = append(opts, grpc.CustomCodec(byteBufCodec{})) closeFunc = benchmark.StartServer(benchmark.ServerInfo{ Type: "bytebuf", Metadata: payload.BytebufParams.RespSize, Listener: lis, }, opts...) case *testpb.PayloadConfig_SimpleParams: closeFunc = benchmark.StartServer(benchmark.ServerInfo{ Type: "protobuf", Listener: lis, }, opts...) case *testpb.PayloadConfig_ComplexParams: return nil, status.Errorf(codes.Unimplemented, "unsupported payload config: %v", config.PayloadConfig) default: return nil, status.Errorf(codes.InvalidArgument, "unknown payload config: %v", config.PayloadConfig) } } else { // Start protobuf server if payload config is nil. closeFunc = benchmark.StartServer(benchmark.ServerInfo{ Type: "protobuf", Listener: lis, }, opts...) } grpclog.Infof("benchmark server listening at %v", addr) addrSplitted := strings.Split(addr, ":") p, err := strconv.Atoi(addrSplitted[len(addrSplitted)-1]) if err != nil { grpclog.Fatalf("failed to get port number from server address: %v", err) } return &benchmarkServer{ port: p, cores: numOfCores, closeFunc: closeFunc, lastResetTime: time.Now(), rusageLastReset: syscall.GetRusage(), }, nil } // getStats returns the stats for benchmark server. // It resets lastResetTime if argument reset is true. func (bs *benchmarkServer) getStats(reset bool) *testpb.ServerStats { bs.mu.RLock() defer bs.mu.RUnlock() wallTimeElapsed := time.Since(bs.lastResetTime).Seconds() rusageLatest := syscall.GetRusage() uTimeElapsed, sTimeElapsed := syscall.CPUTimeDiff(bs.rusageLastReset, rusageLatest) if reset { bs.lastResetTime = time.Now() bs.rusageLastReset = rusageLatest } return &testpb.ServerStats{ TimeElapsed: wallTimeElapsed, TimeUser: uTimeElapsed, TimeSystem: sTimeElapsed, } } grpc-go-1.29.1/benchmark/worker/main.go000066400000000000000000000133361365033716300176750ustar00rootroot00000000000000/* * * Copyright 2016 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package main import ( "context" "flag" "fmt" "io" "net" "net/http" _ "net/http/pprof" "runtime" "strconv" "time" "google.golang.org/grpc" testpb "google.golang.org/grpc/benchmark/grpc_testing" "google.golang.org/grpc/codes" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/status" ) var ( driverPort = flag.Int("driver_port", 10000, "port for communication with driver") serverPort = flag.Int("server_port", 0, "port for benchmark server if not specified by server config message") pprofPort = flag.Int("pprof_port", -1, "Port for pprof debug server to listen on. Pprof server doesn't start if unset") blockProfRate = flag.Int("block_prof_rate", 0, "fraction of goroutine blocking events to report in blocking profile") ) type byteBufCodec struct { } func (byteBufCodec) Marshal(v interface{}) ([]byte, error) { b, ok := v.(*[]byte) if !ok { return nil, fmt.Errorf("failed to marshal: %v is not type of *[]byte", v) } return *b, nil } func (byteBufCodec) Unmarshal(data []byte, v interface{}) error { b, ok := v.(*[]byte) if !ok { return fmt.Errorf("failed to marshal: %v is not type of *[]byte", v) } *b = data return nil } func (byteBufCodec) String() string { return "bytebuffer" } // workerServer implements WorkerService rpc handlers. // It can create benchmarkServer or benchmarkClient on demand. type workerServer struct { stop chan<- bool serverPort int } func (s *workerServer) RunServer(stream testpb.WorkerService_RunServerServer) error { var bs *benchmarkServer defer func() { // Close benchmark server when stream ends. grpclog.Infof("closing benchmark server") if bs != nil { bs.closeFunc() } }() for { in, err := stream.Recv() if err == io.EOF { return nil } if err != nil { return err } var out *testpb.ServerStatus switch argtype := in.Argtype.(type) { case *testpb.ServerArgs_Setup: grpclog.Infof("server setup received:") if bs != nil { grpclog.Infof("server setup received when server already exists, closing the existing server") bs.closeFunc() } bs, err = startBenchmarkServer(argtype.Setup, s.serverPort) if err != nil { return err } out = &testpb.ServerStatus{ Stats: bs.getStats(false), Port: int32(bs.port), Cores: int32(bs.cores), } case *testpb.ServerArgs_Mark: grpclog.Infof("server mark received:") grpclog.Infof(" - %v", argtype) if bs == nil { return status.Error(codes.InvalidArgument, "server does not exist when mark received") } out = &testpb.ServerStatus{ Stats: bs.getStats(argtype.Mark.Reset_), Port: int32(bs.port), Cores: int32(bs.cores), } } if err := stream.Send(out); err != nil { return err } } } func (s *workerServer) RunClient(stream testpb.WorkerService_RunClientServer) error { var bc *benchmarkClient defer func() { // Shut down benchmark client when stream ends. grpclog.Infof("shuting down benchmark client") if bc != nil { bc.shutdown() } }() for { in, err := stream.Recv() if err == io.EOF { return nil } if err != nil { return err } var out *testpb.ClientStatus switch t := in.Argtype.(type) { case *testpb.ClientArgs_Setup: grpclog.Infof("client setup received:") if bc != nil { grpclog.Infof("client setup received when client already exists, shuting down the existing client") bc.shutdown() } bc, err = startBenchmarkClient(t.Setup) if err != nil { return err } out = &testpb.ClientStatus{ Stats: bc.getStats(false), } case *testpb.ClientArgs_Mark: grpclog.Infof("client mark received:") grpclog.Infof(" - %v", t) if bc == nil { return status.Error(codes.InvalidArgument, "client does not exist when mark received") } out = &testpb.ClientStatus{ Stats: bc.getStats(t.Mark.Reset_), } } if err := stream.Send(out); err != nil { return err } } } func (s *workerServer) CoreCount(ctx context.Context, in *testpb.CoreRequest) (*testpb.CoreResponse, error) { grpclog.Infof("core count: %v", runtime.NumCPU()) return &testpb.CoreResponse{Cores: int32(runtime.NumCPU())}, nil } func (s *workerServer) QuitWorker(ctx context.Context, in *testpb.Void) (*testpb.Void, error) { grpclog.Infof("quitting worker") s.stop <- true return &testpb.Void{}, nil } func main() { grpc.EnableTracing = false flag.Parse() lis, err := net.Listen("tcp", ":"+strconv.Itoa(*driverPort)) if err != nil { grpclog.Fatalf("failed to listen: %v", err) } grpclog.Infof("worker listening at port %v", *driverPort) s := grpc.NewServer() stop := make(chan bool) testpb.RegisterWorkerServiceServer(s, &workerServer{ stop: stop, serverPort: *serverPort, }) go func() { <-stop // Wait for 1 second before stopping the server to make sure the return value of QuitWorker is sent to client. // TODO revise this once server graceful stop is supported in gRPC. time.Sleep(time.Second) s.Stop() }() runtime.SetBlockProfileRate(*blockProfRate) if *pprofPort >= 0 { go func() { grpclog.Infoln("Starting pprof server on port " + strconv.Itoa(*pprofPort)) grpclog.Infoln(http.ListenAndServe("localhost:"+strconv.Itoa(*pprofPort), nil)) }() } s.Serve(lis) } grpc-go-1.29.1/binarylog/000077500000000000000000000000001365033716300151375ustar00rootroot00000000000000grpc-go-1.29.1/binarylog/grpc_binarylog_v1/000077500000000000000000000000001365033716300205465ustar00rootroot00000000000000grpc-go-1.29.1/binarylog/grpc_binarylog_v1/binarylog.pb.go000066400000000000000000001010111365033716300234550ustar00rootroot00000000000000// Code generated by protoc-gen-go. DO NOT EDIT. // source: grpc/binarylog/grpc_binarylog_v1/binarylog.proto package grpc_binarylog_v1 // import "google.golang.org/grpc/binarylog/grpc_binarylog_v1" import proto "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" import duration "github.com/golang/protobuf/ptypes/duration" import timestamp "github.com/golang/protobuf/ptypes/timestamp" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package // Enumerates the type of event // Note the terminology is different from the RPC semantics // definition, but the same meaning is expressed here. type GrpcLogEntry_EventType int32 const ( GrpcLogEntry_EVENT_TYPE_UNKNOWN GrpcLogEntry_EventType = 0 // Header sent from client to server GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER GrpcLogEntry_EventType = 1 // Header sent from server to client GrpcLogEntry_EVENT_TYPE_SERVER_HEADER GrpcLogEntry_EventType = 2 // Message sent from client to server GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE GrpcLogEntry_EventType = 3 // Message sent from server to client GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE GrpcLogEntry_EventType = 4 // A signal that client is done sending GrpcLogEntry_EVENT_TYPE_CLIENT_HALF_CLOSE GrpcLogEntry_EventType = 5 // Trailer indicates the end of the RPC. // On client side, this event means a trailer was either received // from the network or the gRPC library locally generated a status // to inform the application about a failure. // On server side, this event means the server application requested // to send a trailer. Note: EVENT_TYPE_CANCEL may still arrive after // this due to races on server side. GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER GrpcLogEntry_EventType = 6 // A signal that the RPC is cancelled. On client side, this // indicates the client application requests a cancellation. // On server side, this indicates that cancellation was detected. // Note: This marks the end of the RPC. Events may arrive after // this due to races. For example, on client side a trailer // may arrive even though the application requested to cancel the RPC. GrpcLogEntry_EVENT_TYPE_CANCEL GrpcLogEntry_EventType = 7 ) var GrpcLogEntry_EventType_name = map[int32]string{ 0: "EVENT_TYPE_UNKNOWN", 1: "EVENT_TYPE_CLIENT_HEADER", 2: "EVENT_TYPE_SERVER_HEADER", 3: "EVENT_TYPE_CLIENT_MESSAGE", 4: "EVENT_TYPE_SERVER_MESSAGE", 5: "EVENT_TYPE_CLIENT_HALF_CLOSE", 6: "EVENT_TYPE_SERVER_TRAILER", 7: "EVENT_TYPE_CANCEL", } var GrpcLogEntry_EventType_value = map[string]int32{ "EVENT_TYPE_UNKNOWN": 0, "EVENT_TYPE_CLIENT_HEADER": 1, "EVENT_TYPE_SERVER_HEADER": 2, "EVENT_TYPE_CLIENT_MESSAGE": 3, "EVENT_TYPE_SERVER_MESSAGE": 4, "EVENT_TYPE_CLIENT_HALF_CLOSE": 5, "EVENT_TYPE_SERVER_TRAILER": 6, "EVENT_TYPE_CANCEL": 7, } func (x GrpcLogEntry_EventType) String() string { return proto.EnumName(GrpcLogEntry_EventType_name, int32(x)) } func (GrpcLogEntry_EventType) EnumDescriptor() ([]byte, []int) { return fileDescriptor_binarylog_264c8c9c551ce911, []int{0, 0} } // Enumerates the entity that generates the log entry type GrpcLogEntry_Logger int32 const ( GrpcLogEntry_LOGGER_UNKNOWN GrpcLogEntry_Logger = 0 GrpcLogEntry_LOGGER_CLIENT GrpcLogEntry_Logger = 1 GrpcLogEntry_LOGGER_SERVER GrpcLogEntry_Logger = 2 ) var GrpcLogEntry_Logger_name = map[int32]string{ 0: "LOGGER_UNKNOWN", 1: "LOGGER_CLIENT", 2: "LOGGER_SERVER", } var GrpcLogEntry_Logger_value = map[string]int32{ "LOGGER_UNKNOWN": 0, "LOGGER_CLIENT": 1, "LOGGER_SERVER": 2, } func (x GrpcLogEntry_Logger) String() string { return proto.EnumName(GrpcLogEntry_Logger_name, int32(x)) } func (GrpcLogEntry_Logger) EnumDescriptor() ([]byte, []int) { return fileDescriptor_binarylog_264c8c9c551ce911, []int{0, 1} } type Address_Type int32 const ( Address_TYPE_UNKNOWN Address_Type = 0 // address is in 1.2.3.4 form Address_TYPE_IPV4 Address_Type = 1 // address is in IPv6 canonical form (RFC5952 section 4) // The scope is NOT included in the address string. Address_TYPE_IPV6 Address_Type = 2 // address is UDS string Address_TYPE_UNIX Address_Type = 3 ) var Address_Type_name = map[int32]string{ 0: "TYPE_UNKNOWN", 1: "TYPE_IPV4", 2: "TYPE_IPV6", 3: "TYPE_UNIX", } var Address_Type_value = map[string]int32{ "TYPE_UNKNOWN": 0, "TYPE_IPV4": 1, "TYPE_IPV6": 2, "TYPE_UNIX": 3, } func (x Address_Type) String() string { return proto.EnumName(Address_Type_name, int32(x)) } func (Address_Type) EnumDescriptor() ([]byte, []int) { return fileDescriptor_binarylog_264c8c9c551ce911, []int{7, 0} } // Log entry we store in binary logs type GrpcLogEntry struct { // The timestamp of the binary log message Timestamp *timestamp.Timestamp `protobuf:"bytes,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"` // Uniquely identifies a call. The value must not be 0 in order to disambiguate // from an unset value. // Each call may have several log entries, they will all have the same call_id. // Nothing is guaranteed about their value other than they are unique across // different RPCs in the same gRPC process. CallId uint64 `protobuf:"varint,2,opt,name=call_id,json=callId,proto3" json:"call_id,omitempty"` // The entry sequence id for this call. The first GrpcLogEntry has a // value of 1, to disambiguate from an unset value. The purpose of // this field is to detect missing entries in environments where // durability or ordering is not guaranteed. SequenceIdWithinCall uint64 `protobuf:"varint,3,opt,name=sequence_id_within_call,json=sequenceIdWithinCall,proto3" json:"sequence_id_within_call,omitempty"` Type GrpcLogEntry_EventType `protobuf:"varint,4,opt,name=type,proto3,enum=grpc.binarylog.v1.GrpcLogEntry_EventType" json:"type,omitempty"` Logger GrpcLogEntry_Logger `protobuf:"varint,5,opt,name=logger,proto3,enum=grpc.binarylog.v1.GrpcLogEntry_Logger" json:"logger,omitempty"` // The logger uses one of the following fields to record the payload, // according to the type of the log entry. // // Types that are valid to be assigned to Payload: // *GrpcLogEntry_ClientHeader // *GrpcLogEntry_ServerHeader // *GrpcLogEntry_Message // *GrpcLogEntry_Trailer Payload isGrpcLogEntry_Payload `protobuf_oneof:"payload"` // true if payload does not represent the full message or metadata. PayloadTruncated bool `protobuf:"varint,10,opt,name=payload_truncated,json=payloadTruncated,proto3" json:"payload_truncated,omitempty"` // Peer address information, will only be recorded on the first // incoming event. On client side, peer is logged on // EVENT_TYPE_SERVER_HEADER normally or EVENT_TYPE_SERVER_TRAILER in // the case of trailers-only. On server side, peer is always // logged on EVENT_TYPE_CLIENT_HEADER. Peer *Address `protobuf:"bytes,11,opt,name=peer,proto3" json:"peer,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *GrpcLogEntry) Reset() { *m = GrpcLogEntry{} } func (m *GrpcLogEntry) String() string { return proto.CompactTextString(m) } func (*GrpcLogEntry) ProtoMessage() {} func (*GrpcLogEntry) Descriptor() ([]byte, []int) { return fileDescriptor_binarylog_264c8c9c551ce911, []int{0} } func (m *GrpcLogEntry) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_GrpcLogEntry.Unmarshal(m, b) } func (m *GrpcLogEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_GrpcLogEntry.Marshal(b, m, deterministic) } func (dst *GrpcLogEntry) XXX_Merge(src proto.Message) { xxx_messageInfo_GrpcLogEntry.Merge(dst, src) } func (m *GrpcLogEntry) XXX_Size() int { return xxx_messageInfo_GrpcLogEntry.Size(m) } func (m *GrpcLogEntry) XXX_DiscardUnknown() { xxx_messageInfo_GrpcLogEntry.DiscardUnknown(m) } var xxx_messageInfo_GrpcLogEntry proto.InternalMessageInfo func (m *GrpcLogEntry) GetTimestamp() *timestamp.Timestamp { if m != nil { return m.Timestamp } return nil } func (m *GrpcLogEntry) GetCallId() uint64 { if m != nil { return m.CallId } return 0 } func (m *GrpcLogEntry) GetSequenceIdWithinCall() uint64 { if m != nil { return m.SequenceIdWithinCall } return 0 } func (m *GrpcLogEntry) GetType() GrpcLogEntry_EventType { if m != nil { return m.Type } return GrpcLogEntry_EVENT_TYPE_UNKNOWN } func (m *GrpcLogEntry) GetLogger() GrpcLogEntry_Logger { if m != nil { return m.Logger } return GrpcLogEntry_LOGGER_UNKNOWN } type isGrpcLogEntry_Payload interface { isGrpcLogEntry_Payload() } type GrpcLogEntry_ClientHeader struct { ClientHeader *ClientHeader `protobuf:"bytes,6,opt,name=client_header,json=clientHeader,proto3,oneof"` } type GrpcLogEntry_ServerHeader struct { ServerHeader *ServerHeader `protobuf:"bytes,7,opt,name=server_header,json=serverHeader,proto3,oneof"` } type GrpcLogEntry_Message struct { Message *Message `protobuf:"bytes,8,opt,name=message,proto3,oneof"` } type GrpcLogEntry_Trailer struct { Trailer *Trailer `protobuf:"bytes,9,opt,name=trailer,proto3,oneof"` } func (*GrpcLogEntry_ClientHeader) isGrpcLogEntry_Payload() {} func (*GrpcLogEntry_ServerHeader) isGrpcLogEntry_Payload() {} func (*GrpcLogEntry_Message) isGrpcLogEntry_Payload() {} func (*GrpcLogEntry_Trailer) isGrpcLogEntry_Payload() {} func (m *GrpcLogEntry) GetPayload() isGrpcLogEntry_Payload { if m != nil { return m.Payload } return nil } func (m *GrpcLogEntry) GetClientHeader() *ClientHeader { if x, ok := m.GetPayload().(*GrpcLogEntry_ClientHeader); ok { return x.ClientHeader } return nil } func (m *GrpcLogEntry) GetServerHeader() *ServerHeader { if x, ok := m.GetPayload().(*GrpcLogEntry_ServerHeader); ok { return x.ServerHeader } return nil } func (m *GrpcLogEntry) GetMessage() *Message { if x, ok := m.GetPayload().(*GrpcLogEntry_Message); ok { return x.Message } return nil } func (m *GrpcLogEntry) GetTrailer() *Trailer { if x, ok := m.GetPayload().(*GrpcLogEntry_Trailer); ok { return x.Trailer } return nil } func (m *GrpcLogEntry) GetPayloadTruncated() bool { if m != nil { return m.PayloadTruncated } return false } func (m *GrpcLogEntry) GetPeer() *Address { if m != nil { return m.Peer } return nil } // XXX_OneofFuncs is for the internal use of the proto package. func (*GrpcLogEntry) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { return _GrpcLogEntry_OneofMarshaler, _GrpcLogEntry_OneofUnmarshaler, _GrpcLogEntry_OneofSizer, []interface{}{ (*GrpcLogEntry_ClientHeader)(nil), (*GrpcLogEntry_ServerHeader)(nil), (*GrpcLogEntry_Message)(nil), (*GrpcLogEntry_Trailer)(nil), } } func _GrpcLogEntry_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { m := msg.(*GrpcLogEntry) // payload switch x := m.Payload.(type) { case *GrpcLogEntry_ClientHeader: b.EncodeVarint(6<<3 | proto.WireBytes) if err := b.EncodeMessage(x.ClientHeader); err != nil { return err } case *GrpcLogEntry_ServerHeader: b.EncodeVarint(7<<3 | proto.WireBytes) if err := b.EncodeMessage(x.ServerHeader); err != nil { return err } case *GrpcLogEntry_Message: b.EncodeVarint(8<<3 | proto.WireBytes) if err := b.EncodeMessage(x.Message); err != nil { return err } case *GrpcLogEntry_Trailer: b.EncodeVarint(9<<3 | proto.WireBytes) if err := b.EncodeMessage(x.Trailer); err != nil { return err } case nil: default: return fmt.Errorf("GrpcLogEntry.Payload has unexpected type %T", x) } return nil } func _GrpcLogEntry_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { m := msg.(*GrpcLogEntry) switch tag { case 6: // payload.client_header if wire != proto.WireBytes { return true, proto.ErrInternalBadWireType } msg := new(ClientHeader) err := b.DecodeMessage(msg) m.Payload = &GrpcLogEntry_ClientHeader{msg} return true, err case 7: // payload.server_header if wire != proto.WireBytes { return true, proto.ErrInternalBadWireType } msg := new(ServerHeader) err := b.DecodeMessage(msg) m.Payload = &GrpcLogEntry_ServerHeader{msg} return true, err case 8: // payload.message if wire != proto.WireBytes { return true, proto.ErrInternalBadWireType } msg := new(Message) err := b.DecodeMessage(msg) m.Payload = &GrpcLogEntry_Message{msg} return true, err case 9: // payload.trailer if wire != proto.WireBytes { return true, proto.ErrInternalBadWireType } msg := new(Trailer) err := b.DecodeMessage(msg) m.Payload = &GrpcLogEntry_Trailer{msg} return true, err default: return false, nil } } func _GrpcLogEntry_OneofSizer(msg proto.Message) (n int) { m := msg.(*GrpcLogEntry) // payload switch x := m.Payload.(type) { case *GrpcLogEntry_ClientHeader: s := proto.Size(x.ClientHeader) n += 1 // tag and wire n += proto.SizeVarint(uint64(s)) n += s case *GrpcLogEntry_ServerHeader: s := proto.Size(x.ServerHeader) n += 1 // tag and wire n += proto.SizeVarint(uint64(s)) n += s case *GrpcLogEntry_Message: s := proto.Size(x.Message) n += 1 // tag and wire n += proto.SizeVarint(uint64(s)) n += s case *GrpcLogEntry_Trailer: s := proto.Size(x.Trailer) n += 1 // tag and wire n += proto.SizeVarint(uint64(s)) n += s case nil: default: panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) } return n } type ClientHeader struct { // This contains only the metadata from the application. Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"` // The name of the RPC method, which looks something like: // // // Note the leading "/" character. MethodName string `protobuf:"bytes,2,opt,name=method_name,json=methodName,proto3" json:"method_name,omitempty"` // A single process may be used to run multiple virtual // servers with different identities. // The authority is the name of such a server identitiy. // It is typically a portion of the URI in the form of // or : . Authority string `protobuf:"bytes,3,opt,name=authority,proto3" json:"authority,omitempty"` // the RPC timeout Timeout *duration.Duration `protobuf:"bytes,4,opt,name=timeout,proto3" json:"timeout,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *ClientHeader) Reset() { *m = ClientHeader{} } func (m *ClientHeader) String() string { return proto.CompactTextString(m) } func (*ClientHeader) ProtoMessage() {} func (*ClientHeader) Descriptor() ([]byte, []int) { return fileDescriptor_binarylog_264c8c9c551ce911, []int{1} } func (m *ClientHeader) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ClientHeader.Unmarshal(m, b) } func (m *ClientHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ClientHeader.Marshal(b, m, deterministic) } func (dst *ClientHeader) XXX_Merge(src proto.Message) { xxx_messageInfo_ClientHeader.Merge(dst, src) } func (m *ClientHeader) XXX_Size() int { return xxx_messageInfo_ClientHeader.Size(m) } func (m *ClientHeader) XXX_DiscardUnknown() { xxx_messageInfo_ClientHeader.DiscardUnknown(m) } var xxx_messageInfo_ClientHeader proto.InternalMessageInfo func (m *ClientHeader) GetMetadata() *Metadata { if m != nil { return m.Metadata } return nil } func (m *ClientHeader) GetMethodName() string { if m != nil { return m.MethodName } return "" } func (m *ClientHeader) GetAuthority() string { if m != nil { return m.Authority } return "" } func (m *ClientHeader) GetTimeout() *duration.Duration { if m != nil { return m.Timeout } return nil } type ServerHeader struct { // This contains only the metadata from the application. Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *ServerHeader) Reset() { *m = ServerHeader{} } func (m *ServerHeader) String() string { return proto.CompactTextString(m) } func (*ServerHeader) ProtoMessage() {} func (*ServerHeader) Descriptor() ([]byte, []int) { return fileDescriptor_binarylog_264c8c9c551ce911, []int{2} } func (m *ServerHeader) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ServerHeader.Unmarshal(m, b) } func (m *ServerHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ServerHeader.Marshal(b, m, deterministic) } func (dst *ServerHeader) XXX_Merge(src proto.Message) { xxx_messageInfo_ServerHeader.Merge(dst, src) } func (m *ServerHeader) XXX_Size() int { return xxx_messageInfo_ServerHeader.Size(m) } func (m *ServerHeader) XXX_DiscardUnknown() { xxx_messageInfo_ServerHeader.DiscardUnknown(m) } var xxx_messageInfo_ServerHeader proto.InternalMessageInfo func (m *ServerHeader) GetMetadata() *Metadata { if m != nil { return m.Metadata } return nil } type Trailer struct { // This contains only the metadata from the application. Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"` // The gRPC status code. StatusCode uint32 `protobuf:"varint,2,opt,name=status_code,json=statusCode,proto3" json:"status_code,omitempty"` // An original status message before any transport specific // encoding. StatusMessage string `protobuf:"bytes,3,opt,name=status_message,json=statusMessage,proto3" json:"status_message,omitempty"` // The value of the 'grpc-status-details-bin' metadata key. If // present, this is always an encoded 'google.rpc.Status' message. StatusDetails []byte `protobuf:"bytes,4,opt,name=status_details,json=statusDetails,proto3" json:"status_details,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *Trailer) Reset() { *m = Trailer{} } func (m *Trailer) String() string { return proto.CompactTextString(m) } func (*Trailer) ProtoMessage() {} func (*Trailer) Descriptor() ([]byte, []int) { return fileDescriptor_binarylog_264c8c9c551ce911, []int{3} } func (m *Trailer) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Trailer.Unmarshal(m, b) } func (m *Trailer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Trailer.Marshal(b, m, deterministic) } func (dst *Trailer) XXX_Merge(src proto.Message) { xxx_messageInfo_Trailer.Merge(dst, src) } func (m *Trailer) XXX_Size() int { return xxx_messageInfo_Trailer.Size(m) } func (m *Trailer) XXX_DiscardUnknown() { xxx_messageInfo_Trailer.DiscardUnknown(m) } var xxx_messageInfo_Trailer proto.InternalMessageInfo func (m *Trailer) GetMetadata() *Metadata { if m != nil { return m.Metadata } return nil } func (m *Trailer) GetStatusCode() uint32 { if m != nil { return m.StatusCode } return 0 } func (m *Trailer) GetStatusMessage() string { if m != nil { return m.StatusMessage } return "" } func (m *Trailer) GetStatusDetails() []byte { if m != nil { return m.StatusDetails } return nil } // Message payload, used by CLIENT_MESSAGE and SERVER_MESSAGE type Message struct { // Length of the message. It may not be the same as the length of the // data field, as the logging payload can be truncated or omitted. Length uint32 `protobuf:"varint,1,opt,name=length,proto3" json:"length,omitempty"` // May be truncated or omitted. Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *Message) Reset() { *m = Message{} } func (m *Message) String() string { return proto.CompactTextString(m) } func (*Message) ProtoMessage() {} func (*Message) Descriptor() ([]byte, []int) { return fileDescriptor_binarylog_264c8c9c551ce911, []int{4} } func (m *Message) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Message.Unmarshal(m, b) } func (m *Message) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Message.Marshal(b, m, deterministic) } func (dst *Message) XXX_Merge(src proto.Message) { xxx_messageInfo_Message.Merge(dst, src) } func (m *Message) XXX_Size() int { return xxx_messageInfo_Message.Size(m) } func (m *Message) XXX_DiscardUnknown() { xxx_messageInfo_Message.DiscardUnknown(m) } var xxx_messageInfo_Message proto.InternalMessageInfo func (m *Message) GetLength() uint32 { if m != nil { return m.Length } return 0 } func (m *Message) GetData() []byte { if m != nil { return m.Data } return nil } // A list of metadata pairs, used in the payload of client header, // server header, and server trailer. // Implementations may omit some entries to honor the header limits // of GRPC_BINARY_LOG_CONFIG. // // Header keys added by gRPC are omitted. To be more specific, // implementations will not log the following entries, and this is // not to be treated as a truncation: // - entries handled by grpc that are not user visible, such as those // that begin with 'grpc-' (with exception of grpc-trace-bin) // or keys like 'lb-token' // - transport specific entries, including but not limited to: // ':path', ':authority', 'content-encoding', 'user-agent', 'te', etc // - entries added for call credentials // // Implementations must always log grpc-trace-bin if it is present. // Practically speaking it will only be visible on server side because // grpc-trace-bin is managed by low level client side mechanisms // inaccessible from the application level. On server side, the // header is just a normal metadata key. // The pair will not count towards the size limit. type Metadata struct { Entry []*MetadataEntry `protobuf:"bytes,1,rep,name=entry,proto3" json:"entry,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *Metadata) Reset() { *m = Metadata{} } func (m *Metadata) String() string { return proto.CompactTextString(m) } func (*Metadata) ProtoMessage() {} func (*Metadata) Descriptor() ([]byte, []int) { return fileDescriptor_binarylog_264c8c9c551ce911, []int{5} } func (m *Metadata) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Metadata.Unmarshal(m, b) } func (m *Metadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Metadata.Marshal(b, m, deterministic) } func (dst *Metadata) XXX_Merge(src proto.Message) { xxx_messageInfo_Metadata.Merge(dst, src) } func (m *Metadata) XXX_Size() int { return xxx_messageInfo_Metadata.Size(m) } func (m *Metadata) XXX_DiscardUnknown() { xxx_messageInfo_Metadata.DiscardUnknown(m) } var xxx_messageInfo_Metadata proto.InternalMessageInfo func (m *Metadata) GetEntry() []*MetadataEntry { if m != nil { return m.Entry } return nil } // A metadata key value pair type MetadataEntry struct { Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *MetadataEntry) Reset() { *m = MetadataEntry{} } func (m *MetadataEntry) String() string { return proto.CompactTextString(m) } func (*MetadataEntry) ProtoMessage() {} func (*MetadataEntry) Descriptor() ([]byte, []int) { return fileDescriptor_binarylog_264c8c9c551ce911, []int{6} } func (m *MetadataEntry) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_MetadataEntry.Unmarshal(m, b) } func (m *MetadataEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_MetadataEntry.Marshal(b, m, deterministic) } func (dst *MetadataEntry) XXX_Merge(src proto.Message) { xxx_messageInfo_MetadataEntry.Merge(dst, src) } func (m *MetadataEntry) XXX_Size() int { return xxx_messageInfo_MetadataEntry.Size(m) } func (m *MetadataEntry) XXX_DiscardUnknown() { xxx_messageInfo_MetadataEntry.DiscardUnknown(m) } var xxx_messageInfo_MetadataEntry proto.InternalMessageInfo func (m *MetadataEntry) GetKey() string { if m != nil { return m.Key } return "" } func (m *MetadataEntry) GetValue() []byte { if m != nil { return m.Value } return nil } // Address information type Address struct { Type Address_Type `protobuf:"varint,1,opt,name=type,proto3,enum=grpc.binarylog.v1.Address_Type" json:"type,omitempty"` Address string `protobuf:"bytes,2,opt,name=address,proto3" json:"address,omitempty"` // only for TYPE_IPV4 and TYPE_IPV6 IpPort uint32 `protobuf:"varint,3,opt,name=ip_port,json=ipPort,proto3" json:"ip_port,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *Address) Reset() { *m = Address{} } func (m *Address) String() string { return proto.CompactTextString(m) } func (*Address) ProtoMessage() {} func (*Address) Descriptor() ([]byte, []int) { return fileDescriptor_binarylog_264c8c9c551ce911, []int{7} } func (m *Address) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Address.Unmarshal(m, b) } func (m *Address) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Address.Marshal(b, m, deterministic) } func (dst *Address) XXX_Merge(src proto.Message) { xxx_messageInfo_Address.Merge(dst, src) } func (m *Address) XXX_Size() int { return xxx_messageInfo_Address.Size(m) } func (m *Address) XXX_DiscardUnknown() { xxx_messageInfo_Address.DiscardUnknown(m) } var xxx_messageInfo_Address proto.InternalMessageInfo func (m *Address) GetType() Address_Type { if m != nil { return m.Type } return Address_TYPE_UNKNOWN } func (m *Address) GetAddress() string { if m != nil { return m.Address } return "" } func (m *Address) GetIpPort() uint32 { if m != nil { return m.IpPort } return 0 } func init() { proto.RegisterType((*GrpcLogEntry)(nil), "grpc.binarylog.v1.GrpcLogEntry") proto.RegisterType((*ClientHeader)(nil), "grpc.binarylog.v1.ClientHeader") proto.RegisterType((*ServerHeader)(nil), "grpc.binarylog.v1.ServerHeader") proto.RegisterType((*Trailer)(nil), "grpc.binarylog.v1.Trailer") proto.RegisterType((*Message)(nil), "grpc.binarylog.v1.Message") proto.RegisterType((*Metadata)(nil), "grpc.binarylog.v1.Metadata") proto.RegisterType((*MetadataEntry)(nil), "grpc.binarylog.v1.MetadataEntry") proto.RegisterType((*Address)(nil), "grpc.binarylog.v1.Address") proto.RegisterEnum("grpc.binarylog.v1.GrpcLogEntry_EventType", GrpcLogEntry_EventType_name, GrpcLogEntry_EventType_value) proto.RegisterEnum("grpc.binarylog.v1.GrpcLogEntry_Logger", GrpcLogEntry_Logger_name, GrpcLogEntry_Logger_value) proto.RegisterEnum("grpc.binarylog.v1.Address_Type", Address_Type_name, Address_Type_value) } func init() { proto.RegisterFile("grpc/binarylog/grpc_binarylog_v1/binarylog.proto", fileDescriptor_binarylog_264c8c9c551ce911) } var fileDescriptor_binarylog_264c8c9c551ce911 = []byte{ // 900 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0x51, 0x6f, 0xe3, 0x44, 0x10, 0x3e, 0x37, 0x69, 0xdc, 0x4c, 0x92, 0xca, 0x5d, 0x95, 0x3b, 0x5f, 0x29, 0x34, 0xb2, 0x04, 0x0a, 0x42, 0x72, 0xb9, 0x94, 0xeb, 0xf1, 0x02, 0x52, 0x92, 0xfa, 0xd2, 0x88, 0x5c, 0x1a, 0x6d, 0x72, 0x3d, 0x40, 0x48, 0xd6, 0x36, 0x5e, 0x1c, 0x0b, 0xc7, 0x6b, 0xd6, 0x9b, 0xa0, 0xfc, 0x2c, 0xde, 0x90, 0xee, 0x77, 0xf1, 0x8e, 0xbc, 0x6b, 0x27, 0xa6, 0x69, 0x0f, 0x09, 0xde, 0x3c, 0xdf, 0x7c, 0xf3, 0xcd, 0xee, 0x78, 0x66, 0x16, 0xbe, 0xf2, 0x79, 0x3c, 0x3b, 0xbf, 0x0b, 0x22, 0xc2, 0xd7, 0x21, 0xf3, 0xcf, 0x53, 0xd3, 0xdd, 0x98, 0xee, 0xea, 0xc5, 0xd6, 0x67, 0xc7, 0x9c, 0x09, 0x86, 0x8e, 0x52, 0x8a, 0xbd, 0x45, 0x57, 0x2f, 0x4e, 0x3e, 0xf5, 0x19, 0xf3, 0x43, 0x7a, 0x2e, 0x09, 0x77, 0xcb, 0x5f, 0xce, 0xbd, 0x25, 0x27, 0x22, 0x60, 0x91, 0x0a, 0x39, 0x39, 0xbb, 0xef, 0x17, 0xc1, 0x82, 0x26, 0x82, 0x2c, 0x62, 0x45, 0xb0, 0xde, 0xeb, 0x50, 0xef, 0xf3, 0x78, 0x36, 0x64, 0xbe, 0x13, 0x09, 0xbe, 0x46, 0xdf, 0x40, 0x75, 0xc3, 0x31, 0xb5, 0xa6, 0xd6, 0xaa, 0xb5, 0x4f, 0x6c, 0xa5, 0x62, 0xe7, 0x2a, 0xf6, 0x34, 0x67, 0xe0, 0x2d, 0x19, 0x3d, 0x03, 0x7d, 0x46, 0xc2, 0xd0, 0x0d, 0x3c, 0x73, 0xaf, 0xa9, 0xb5, 0xca, 0xb8, 0x92, 0x9a, 0x03, 0x0f, 0xbd, 0x84, 0x67, 0x09, 0xfd, 0x6d, 0x49, 0xa3, 0x19, 0x75, 0x03, 0xcf, 0xfd, 0x3d, 0x10, 0xf3, 0x20, 0x72, 0x53, 0xa7, 0x59, 0x92, 0xc4, 0xe3, 0xdc, 0x3d, 0xf0, 0xde, 0x49, 0x67, 0x8f, 0x84, 0x21, 0xfa, 0x16, 0xca, 0x62, 0x1d, 0x53, 0xb3, 0xdc, 0xd4, 0x5a, 0x87, 0xed, 0x2f, 0xec, 0x9d, 0xdb, 0xdb, 0xc5, 0x83, 0xdb, 0xce, 0x8a, 0x46, 0x62, 0xba, 0x8e, 0x29, 0x96, 0x61, 0xe8, 0x3b, 0xa8, 0x84, 0xcc, 0xf7, 0x29, 0x37, 0xf7, 0xa5, 0xc0, 0xe7, 0xff, 0x26, 0x30, 0x94, 0x6c, 0x9c, 0x45, 0xa1, 0xd7, 0xd0, 0x98, 0x85, 0x01, 0x8d, 0x84, 0x3b, 0xa7, 0xc4, 0xa3, 0xdc, 0xac, 0xc8, 0x62, 0x9c, 0x3d, 0x20, 0xd3, 0x93, 0xbc, 0x6b, 0x49, 0xbb, 0x7e, 0x82, 0xeb, 0xb3, 0x82, 0x9d, 0xea, 0x24, 0x94, 0xaf, 0x28, 0xcf, 0x75, 0xf4, 0x47, 0x75, 0x26, 0x92, 0xb7, 0xd5, 0x49, 0x0a, 0x36, 0xba, 0x04, 0x7d, 0x41, 0x93, 0x84, 0xf8, 0xd4, 0x3c, 0xc8, 0x7f, 0xcb, 0x8e, 0xc2, 0x1b, 0xc5, 0xb8, 0x7e, 0x82, 0x73, 0x72, 0x1a, 0x27, 0x38, 0x09, 0x42, 0xca, 0xcd, 0xea, 0xa3, 0x71, 0x53, 0xc5, 0x48, 0xe3, 0x32, 0x32, 0xfa, 0x12, 0x8e, 0x62, 0xb2, 0x0e, 0x19, 0xf1, 0x5c, 0xc1, 0x97, 0xd1, 0x8c, 0x08, 0xea, 0x99, 0xd0, 0xd4, 0x5a, 0x07, 0xd8, 0xc8, 0x1c, 0xd3, 0x1c, 0x47, 0x36, 0x94, 0x63, 0x4a, 0xb9, 0x59, 0x7b, 0x34, 0x43, 0xc7, 0xf3, 0x38, 0x4d, 0x12, 0x2c, 0x79, 0xd6, 0x5f, 0x1a, 0x54, 0x37, 0x3f, 0x0c, 0x3d, 0x05, 0xe4, 0xdc, 0x3a, 0xa3, 0xa9, 0x3b, 0xfd, 0x71, 0xec, 0xb8, 0x6f, 0x47, 0xdf, 0x8f, 0x6e, 0xde, 0x8d, 0x8c, 0x27, 0xe8, 0x14, 0xcc, 0x02, 0xde, 0x1b, 0x0e, 0xd2, 0xef, 0x6b, 0xa7, 0x73, 0xe5, 0x60, 0x43, 0xbb, 0xe7, 0x9d, 0x38, 0xf8, 0xd6, 0xc1, 0xb9, 0x77, 0x0f, 0x7d, 0x02, 0xcf, 0x77, 0x63, 0xdf, 0x38, 0x93, 0x49, 0xa7, 0xef, 0x18, 0xa5, 0x7b, 0xee, 0x2c, 0x38, 0x77, 0x97, 0x51, 0x13, 0x4e, 0x1f, 0xc8, 0xdc, 0x19, 0xbe, 0x76, 0x7b, 0xc3, 0x9b, 0x89, 0x63, 0xec, 0x3f, 0x2c, 0x30, 0xc5, 0x9d, 0xc1, 0xd0, 0xc1, 0x46, 0x05, 0x7d, 0x04, 0x47, 0x45, 0x81, 0xce, 0xa8, 0xe7, 0x0c, 0x0d, 0xdd, 0xea, 0x42, 0x45, 0xb5, 0x19, 0x42, 0x70, 0x38, 0xbc, 0xe9, 0xf7, 0x1d, 0x5c, 0xb8, 0xef, 0x11, 0x34, 0x32, 0x4c, 0x65, 0x34, 0xb4, 0x02, 0xa4, 0x52, 0x18, 0x7b, 0xdd, 0x2a, 0xe8, 0x59, 0xfd, 0xad, 0xf7, 0x1a, 0xd4, 0x8b, 0xcd, 0x87, 0x5e, 0xc1, 0xc1, 0x82, 0x0a, 0xe2, 0x11, 0x41, 0xb2, 0xe1, 0xfd, 0xf8, 0xc1, 0x2e, 0x51, 0x14, 0xbc, 0x21, 0xa3, 0x33, 0xa8, 0x2d, 0xa8, 0x98, 0x33, 0xcf, 0x8d, 0xc8, 0x82, 0xca, 0x01, 0xae, 0x62, 0x50, 0xd0, 0x88, 0x2c, 0x28, 0x3a, 0x85, 0x2a, 0x59, 0x8a, 0x39, 0xe3, 0x81, 0x58, 0xcb, 0xb1, 0xad, 0xe2, 0x2d, 0x80, 0x2e, 0x40, 0x4f, 0x17, 0x01, 0x5b, 0x0a, 0x39, 0xae, 0xb5, 0xf6, 0xf3, 0x9d, 0x9d, 0x71, 0x95, 0x6d, 0x26, 0x9c, 0x33, 0xad, 0x3e, 0xd4, 0x8b, 0x1d, 0xff, 0x9f, 0x0f, 0x6f, 0xfd, 0xa1, 0x81, 0x9e, 0x75, 0xf0, 0xff, 0xaa, 0x40, 0x22, 0x88, 0x58, 0x26, 0xee, 0x8c, 0x79, 0xaa, 0x02, 0x0d, 0x0c, 0x0a, 0xea, 0x31, 0x8f, 0xa2, 0xcf, 0xe0, 0x30, 0x23, 0xe4, 0x73, 0xa8, 0xca, 0xd0, 0x50, 0x68, 0x36, 0x7a, 0x05, 0x9a, 0x47, 0x05, 0x09, 0xc2, 0x44, 0x56, 0xa4, 0x9e, 0xd3, 0xae, 0x14, 0x68, 0xbd, 0x04, 0x3d, 0x8f, 0x78, 0x0a, 0x95, 0x90, 0x46, 0xbe, 0x98, 0xcb, 0x03, 0x37, 0x70, 0x66, 0x21, 0x04, 0x65, 0x79, 0x8d, 0x3d, 0x19, 0x2f, 0xbf, 0xad, 0x2e, 0x1c, 0xe4, 0x67, 0x47, 0x97, 0xb0, 0x4f, 0xd3, 0xcd, 0x65, 0x6a, 0xcd, 0x52, 0xab, 0xd6, 0x6e, 0x7e, 0xe0, 0x9e, 0x72, 0xc3, 0x61, 0x45, 0xb7, 0x5e, 0x41, 0xe3, 0x1f, 0x38, 0x32, 0xa0, 0xf4, 0x2b, 0x5d, 0xcb, 0xec, 0x55, 0x9c, 0x7e, 0xa2, 0x63, 0xd8, 0x5f, 0x91, 0x70, 0x49, 0xb3, 0xdc, 0xca, 0xb0, 0xfe, 0xd4, 0x40, 0xcf, 0xe6, 0x18, 0x5d, 0x64, 0xdb, 0x59, 0x93, 0xcb, 0xf5, 0xec, 0xf1, 0x89, 0xb7, 0x0b, 0x3b, 0xd9, 0x04, 0x9d, 0x28, 0x34, 0xeb, 0xb0, 0xdc, 0x4c, 0x1f, 0x8f, 0x20, 0x76, 0x63, 0xc6, 0x85, 0xac, 0x6a, 0x03, 0x57, 0x82, 0x78, 0xcc, 0xb8, 0xb0, 0x1c, 0x28, 0xcb, 0x1d, 0x61, 0x40, 0xfd, 0xde, 0x76, 0x68, 0x40, 0x55, 0x22, 0x83, 0xf1, 0xed, 0xd7, 0x86, 0x56, 0x34, 0x2f, 0x8d, 0xbd, 0x8d, 0xf9, 0x76, 0x34, 0xf8, 0xc1, 0x28, 0x75, 0x7f, 0x86, 0xe3, 0x80, 0xed, 0x1e, 0xb2, 0x7b, 0xd8, 0x95, 0xd6, 0x90, 0xf9, 0xe3, 0xb4, 0x51, 0xc7, 0xda, 0x4f, 0xed, 0xac, 0x71, 0x7d, 0x16, 0x92, 0xc8, 0xb7, 0x19, 0x57, 0x4f, 0xf3, 0x87, 0x5e, 0xea, 0xbb, 0x8a, 0xec, 0xf2, 0x8b, 0xbf, 0x03, 0x00, 0x00, 0xff, 0xff, 0xe7, 0xf6, 0x4b, 0x50, 0xd4, 0x07, 0x00, 0x00, } grpc-go-1.29.1/call.go000066400000000000000000000046671365033716300144300ustar00rootroot00000000000000/* * * Copyright 2014 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package grpc import ( "context" ) // Invoke sends the RPC request on the wire and returns after response is // received. This is typically called by generated code. // // All errors returned by Invoke are compatible with the status package. func (cc *ClientConn) Invoke(ctx context.Context, method string, args, reply interface{}, opts ...CallOption) error { // allow interceptor to see all applicable call options, which means those // configured as defaults from dial option as well as per-call options opts = combine(cc.dopts.callOptions, opts) if cc.dopts.unaryInt != nil { return cc.dopts.unaryInt(ctx, method, args, reply, cc, invoke, opts...) } return invoke(ctx, method, args, reply, cc, opts...) } func combine(o1 []CallOption, o2 []CallOption) []CallOption { // we don't use append because o1 could have extra capacity whose // elements would be overwritten, which could cause inadvertent // sharing (and race conditions) between concurrent calls if len(o1) == 0 { return o2 } else if len(o2) == 0 { return o1 } ret := make([]CallOption, len(o1)+len(o2)) copy(ret, o1) copy(ret[len(o1):], o2) return ret } // Invoke sends the RPC request on the wire and returns after response is // received. This is typically called by generated code. // // DEPRECATED: Use ClientConn.Invoke instead. func Invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) error { return cc.Invoke(ctx, method, args, reply, opts...) } var unaryStreamDesc = &StreamDesc{ServerStreams: false, ClientStreams: false} func invoke(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error { cs, err := newClientStream(ctx, unaryStreamDesc, cc, method, opts...) if err != nil { return err } if err := cs.SendMsg(req); err != nil { return err } return cs.RecvMsg(reply) } grpc-go-1.29.1/call_test.go000066400000000000000000000345271365033716300154650ustar00rootroot00000000000000/* * * Copyright 2014 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package grpc import ( "context" "fmt" "io" "math" "net" "strconv" "strings" "sync" "testing" "time" "google.golang.org/grpc/codes" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/status" ) var ( expectedRequest = "ping" expectedResponse = "pong" weirdError = "format verbs: %v%s" sizeLargeErr = 1024 * 1024 canceled = 0 ) type testCodec struct { } func (testCodec) Marshal(v interface{}) ([]byte, error) { return []byte(*(v.(*string))), nil } func (testCodec) Unmarshal(data []byte, v interface{}) error { *(v.(*string)) = string(data) return nil } func (testCodec) String() string { return "test" } type testStreamHandler struct { port string t transport.ServerTransport } func (h *testStreamHandler) handleStream(t *testing.T, s *transport.Stream) { p := &parser{r: s} for { pf, req, err := p.recvMsg(math.MaxInt32) if err == io.EOF { break } if err != nil { return } if pf != compressionNone { t.Errorf("Received the mistaken message format %d, want %d", pf, compressionNone) return } var v string codec := testCodec{} if err := codec.Unmarshal(req, &v); err != nil { t.Errorf("Failed to unmarshal the received message: %v", err) return } if v == "weird error" { h.t.WriteStatus(s, status.New(codes.Internal, weirdError)) return } if v == "canceled" { canceled++ h.t.WriteStatus(s, status.New(codes.Internal, "")) return } if v == "port" { h.t.WriteStatus(s, status.New(codes.Internal, h.port)) return } if v != expectedRequest { h.t.WriteStatus(s, status.New(codes.Internal, strings.Repeat("A", sizeLargeErr))) return } } // send a response back to end the stream. data, err := encode(testCodec{}, &expectedResponse) if err != nil { t.Errorf("Failed to encode the response: %v", err) return } hdr, payload := msgHeader(data, nil) h.t.Write(s, hdr, payload, &transport.Options{}) h.t.WriteStatus(s, status.New(codes.OK, "")) } type server struct { lis net.Listener port string addr string startedErr chan error // sent nil or an error after server starts mu sync.Mutex conns map[transport.ServerTransport]bool } type ctxKey string func newTestServer() *server { return &server{startedErr: make(chan error, 1)} } // start starts server. Other goroutines should block on s.startedErr for further operations. func (s *server) start(t *testing.T, port int, maxStreams uint32) { var err error if port == 0 { s.lis, err = net.Listen("tcp", "localhost:0") } else { s.lis, err = net.Listen("tcp", "localhost:"+strconv.Itoa(port)) } if err != nil { s.startedErr <- fmt.Errorf("failed to listen: %v", err) return } s.addr = s.lis.Addr().String() _, p, err := net.SplitHostPort(s.addr) if err != nil { s.startedErr <- fmt.Errorf("failed to parse listener address: %v", err) return } s.port = p s.conns = make(map[transport.ServerTransport]bool) s.startedErr <- nil for { conn, err := s.lis.Accept() if err != nil { return } config := &transport.ServerConfig{ MaxStreams: maxStreams, } st, err := transport.NewServerTransport("http2", conn, config) if err != nil { continue } s.mu.Lock() if s.conns == nil { s.mu.Unlock() st.Close() return } s.conns[st] = true s.mu.Unlock() h := &testStreamHandler{ port: s.port, t: st, } go st.HandleStreams(func(s *transport.Stream) { go h.handleStream(t, s) }, func(ctx context.Context, method string) context.Context { return ctx }) } } func (s *server) wait(t *testing.T, timeout time.Duration) { select { case err := <-s.startedErr: if err != nil { t.Fatal(err) } case <-time.After(timeout): t.Fatalf("Timed out after %v waiting for server to be ready", timeout) } } func (s *server) stop() { s.lis.Close() s.mu.Lock() for c := range s.conns { c.Close() } s.conns = nil s.mu.Unlock() } func setUp(t *testing.T, port int, maxStreams uint32) (*server, *ClientConn) { return setUpWithOptions(t, port, maxStreams) } func setUpWithOptions(t *testing.T, port int, maxStreams uint32, dopts ...DialOption) (*server, *ClientConn) { server := newTestServer() go server.start(t, port, maxStreams) server.wait(t, 2*time.Second) addr := "localhost:" + server.port dopts = append(dopts, WithBlock(), WithInsecure(), WithCodec(testCodec{})) cc, err := Dial(addr, dopts...) if err != nil { t.Fatalf("Failed to create ClientConn: %v", err) } return server, cc } func (s) TestUnaryClientInterceptor(t *testing.T) { parentKey := ctxKey("parentKey") interceptor := func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error { if ctx.Value(parentKey) == nil { t.Fatalf("interceptor should have %v in context", parentKey) } return invoker(ctx, method, req, reply, cc, opts...) } server, cc := setUpWithOptions(t, 0, math.MaxUint32, WithUnaryInterceptor(interceptor)) defer func() { cc.Close() server.stop() }() var reply string ctx := context.Background() parentCtx := context.WithValue(ctx, ctxKey("parentKey"), 0) if err := cc.Invoke(parentCtx, "/foo/bar", &expectedRequest, &reply); err != nil || reply != expectedResponse { t.Fatalf("grpc.Invoke(_, _, _, _, _) = %v, want ", err) } } func (s) TestChainUnaryClientInterceptor(t *testing.T) { var ( parentKey = ctxKey("parentKey") firstIntKey = ctxKey("firstIntKey") secondIntKey = ctxKey("secondIntKey") ) firstInt := func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error { if ctx.Value(parentKey) == nil { t.Fatalf("first interceptor should have %v in context", parentKey) } if ctx.Value(firstIntKey) != nil { t.Fatalf("first interceptor should not have %v in context", firstIntKey) } if ctx.Value(secondIntKey) != nil { t.Fatalf("first interceptor should not have %v in context", secondIntKey) } firstCtx := context.WithValue(ctx, firstIntKey, 1) err := invoker(firstCtx, method, req, reply, cc, opts...) *(reply.(*string)) += "1" return err } secondInt := func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error { if ctx.Value(parentKey) == nil { t.Fatalf("second interceptor should have %v in context", parentKey) } if ctx.Value(firstIntKey) == nil { t.Fatalf("second interceptor should have %v in context", firstIntKey) } if ctx.Value(secondIntKey) != nil { t.Fatalf("second interceptor should not have %v in context", secondIntKey) } secondCtx := context.WithValue(ctx, secondIntKey, 2) err := invoker(secondCtx, method, req, reply, cc, opts...) *(reply.(*string)) += "2" return err } lastInt := func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error { if ctx.Value(parentKey) == nil { t.Fatalf("last interceptor should have %v in context", parentKey) } if ctx.Value(firstIntKey) == nil { t.Fatalf("last interceptor should have %v in context", firstIntKey) } if ctx.Value(secondIntKey) == nil { t.Fatalf("last interceptor should have %v in context", secondIntKey) } err := invoker(ctx, method, req, reply, cc, opts...) *(reply.(*string)) += "3" return err } server, cc := setUpWithOptions(t, 0, math.MaxUint32, WithChainUnaryInterceptor(firstInt, secondInt, lastInt)) defer func() { cc.Close() server.stop() }() var reply string ctx := context.Background() parentCtx := context.WithValue(ctx, ctxKey("parentKey"), 0) if err := cc.Invoke(parentCtx, "/foo/bar", &expectedRequest, &reply); err != nil || reply != expectedResponse+"321" { t.Fatalf("grpc.Invoke(_, _, _, _, _) = %v, want ", err) } } func (s) TestChainOnBaseUnaryClientInterceptor(t *testing.T) { var ( parentKey = ctxKey("parentKey") baseIntKey = ctxKey("baseIntKey") ) baseInt := func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error { if ctx.Value(parentKey) == nil { t.Fatalf("base interceptor should have %v in context", parentKey) } if ctx.Value(baseIntKey) != nil { t.Fatalf("base interceptor should not have %v in context", baseIntKey) } baseCtx := context.WithValue(ctx, baseIntKey, 1) return invoker(baseCtx, method, req, reply, cc, opts...) } chainInt := func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error { if ctx.Value(parentKey) == nil { t.Fatalf("chain interceptor should have %v in context", parentKey) } if ctx.Value(baseIntKey) == nil { t.Fatalf("chain interceptor should have %v in context", baseIntKey) } return invoker(ctx, method, req, reply, cc, opts...) } server, cc := setUpWithOptions(t, 0, math.MaxUint32, WithUnaryInterceptor(baseInt), WithChainUnaryInterceptor(chainInt)) defer func() { cc.Close() server.stop() }() var reply string ctx := context.Background() parentCtx := context.WithValue(ctx, ctxKey("parentKey"), 0) if err := cc.Invoke(parentCtx, "/foo/bar", &expectedRequest, &reply); err != nil || reply != expectedResponse { t.Fatalf("grpc.Invoke(_, _, _, _, _) = %v, want ", err) } } func (s) TestChainStreamClientInterceptor(t *testing.T) { var ( parentKey = ctxKey("parentKey") firstIntKey = ctxKey("firstIntKey") secondIntKey = ctxKey("secondIntKey") ) firstInt := func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, streamer Streamer, opts ...CallOption) (ClientStream, error) { if ctx.Value(parentKey) == nil { t.Fatalf("first interceptor should have %v in context", parentKey) } if ctx.Value(firstIntKey) != nil { t.Fatalf("first interceptor should not have %v in context", firstIntKey) } if ctx.Value(secondIntKey) != nil { t.Fatalf("first interceptor should not have %v in context", secondIntKey) } firstCtx := context.WithValue(ctx, firstIntKey, 1) return streamer(firstCtx, desc, cc, method, opts...) } secondInt := func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, streamer Streamer, opts ...CallOption) (ClientStream, error) { if ctx.Value(parentKey) == nil { t.Fatalf("second interceptor should have %v in context", parentKey) } if ctx.Value(firstIntKey) == nil { t.Fatalf("second interceptor should have %v in context", firstIntKey) } if ctx.Value(secondIntKey) != nil { t.Fatalf("second interceptor should not have %v in context", secondIntKey) } secondCtx := context.WithValue(ctx, secondIntKey, 2) return streamer(secondCtx, desc, cc, method, opts...) } lastInt := func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, streamer Streamer, opts ...CallOption) (ClientStream, error) { if ctx.Value(parentKey) == nil { t.Fatalf("last interceptor should have %v in context", parentKey) } if ctx.Value(firstIntKey) == nil { t.Fatalf("last interceptor should have %v in context", firstIntKey) } if ctx.Value(secondIntKey) == nil { t.Fatalf("last interceptor should have %v in context", secondIntKey) } return streamer(ctx, desc, cc, method, opts...) } server, cc := setUpWithOptions(t, 0, math.MaxUint32, WithChainStreamInterceptor(firstInt, secondInt, lastInt)) defer func() { cc.Close() server.stop() }() ctx := context.Background() parentCtx := context.WithValue(ctx, ctxKey("parentKey"), 0) _, err := cc.NewStream(parentCtx, &StreamDesc{}, "/foo/bar") if err != nil { t.Fatalf("grpc.NewStream(_, _, _) = %v, want ", err) } } func (s) TestInvoke(t *testing.T) { server, cc := setUp(t, 0, math.MaxUint32) var reply string if err := cc.Invoke(context.Background(), "/foo/bar", &expectedRequest, &reply); err != nil || reply != expectedResponse { t.Fatalf("grpc.Invoke(_, _, _, _, _) = %v, want ", err) } cc.Close() server.stop() } func (s) TestInvokeLargeErr(t *testing.T) { server, cc := setUp(t, 0, math.MaxUint32) var reply string req := "hello" err := cc.Invoke(context.Background(), "/foo/bar", &req, &reply) if _, ok := status.FromError(err); !ok { t.Fatalf("grpc.Invoke(_, _, _, _, _) receives non rpc error.") } if status.Code(err) != codes.Internal || len(errorDesc(err)) != sizeLargeErr { t.Fatalf("grpc.Invoke(_, _, _, _, _) = %v, want an error of code %d and desc size %d", err, codes.Internal, sizeLargeErr) } cc.Close() server.stop() } // TestInvokeErrorSpecialChars checks that error messages don't get mangled. func (s) TestInvokeErrorSpecialChars(t *testing.T) { server, cc := setUp(t, 0, math.MaxUint32) var reply string req := "weird error" err := cc.Invoke(context.Background(), "/foo/bar", &req, &reply) if _, ok := status.FromError(err); !ok { t.Fatalf("grpc.Invoke(_, _, _, _, _) receives non rpc error.") } if got, want := errorDesc(err), weirdError; got != want { t.Fatalf("grpc.Invoke(_, _, _, _, _) error = %q, want %q", got, want) } cc.Close() server.stop() } // TestInvokeCancel checks that an Invoke with a canceled context is not sent. func (s) TestInvokeCancel(t *testing.T) { server, cc := setUp(t, 0, math.MaxUint32) var reply string req := "canceled" for i := 0; i < 100; i++ { ctx, cancel := context.WithCancel(context.Background()) cancel() cc.Invoke(ctx, "/foo/bar", &req, &reply) } if canceled != 0 { t.Fatalf("received %d of 100 canceled requests", canceled) } cc.Close() server.stop() } // TestInvokeCancelClosedNonFail checks that a canceled non-failfast RPC // on a closed client will terminate. func (s) TestInvokeCancelClosedNonFailFast(t *testing.T) { server, cc := setUp(t, 0, math.MaxUint32) var reply string cc.Close() req := "hello" ctx, cancel := context.WithCancel(context.Background()) cancel() if err := cc.Invoke(ctx, "/foo/bar", &req, &reply, WaitForReady(true)); err == nil { t.Fatalf("canceled invoke on closed connection should fail") } server.stop() } grpc-go-1.29.1/channelz/000077500000000000000000000000001365033716300147535ustar00rootroot00000000000000grpc-go-1.29.1/channelz/grpc_channelz_v1/000077500000000000000000000000001365033716300201765ustar00rootroot00000000000000grpc-go-1.29.1/channelz/grpc_channelz_v1/channelz.pb.go000066400000000000000000003607061365033716300227430ustar00rootroot00000000000000// Code generated by protoc-gen-go. DO NOT EDIT. // source: grpc/channelz/v1/channelz.proto package grpc_channelz_v1 import ( context "context" fmt "fmt" proto "github.com/golang/protobuf/proto" any "github.com/golang/protobuf/ptypes/any" duration "github.com/golang/protobuf/ptypes/duration" timestamp "github.com/golang/protobuf/ptypes/timestamp" wrappers "github.com/golang/protobuf/ptypes/wrappers" grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" status "google.golang.org/grpc/status" math "math" ) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package type ChannelConnectivityState_State int32 const ( ChannelConnectivityState_UNKNOWN ChannelConnectivityState_State = 0 ChannelConnectivityState_IDLE ChannelConnectivityState_State = 1 ChannelConnectivityState_CONNECTING ChannelConnectivityState_State = 2 ChannelConnectivityState_READY ChannelConnectivityState_State = 3 ChannelConnectivityState_TRANSIENT_FAILURE ChannelConnectivityState_State = 4 ChannelConnectivityState_SHUTDOWN ChannelConnectivityState_State = 5 ) var ChannelConnectivityState_State_name = map[int32]string{ 0: "UNKNOWN", 1: "IDLE", 2: "CONNECTING", 3: "READY", 4: "TRANSIENT_FAILURE", 5: "SHUTDOWN", } var ChannelConnectivityState_State_value = map[string]int32{ "UNKNOWN": 0, "IDLE": 1, "CONNECTING": 2, "READY": 3, "TRANSIENT_FAILURE": 4, "SHUTDOWN": 5, } func (x ChannelConnectivityState_State) String() string { return proto.EnumName(ChannelConnectivityState_State_name, int32(x)) } func (ChannelConnectivityState_State) EnumDescriptor() ([]byte, []int) { return fileDescriptor_6ee37dfd35a8ab00, []int{2, 0} } // The supported severity levels of trace events. type ChannelTraceEvent_Severity int32 const ( ChannelTraceEvent_CT_UNKNOWN ChannelTraceEvent_Severity = 0 ChannelTraceEvent_CT_INFO ChannelTraceEvent_Severity = 1 ChannelTraceEvent_CT_WARNING ChannelTraceEvent_Severity = 2 ChannelTraceEvent_CT_ERROR ChannelTraceEvent_Severity = 3 ) var ChannelTraceEvent_Severity_name = map[int32]string{ 0: "CT_UNKNOWN", 1: "CT_INFO", 2: "CT_WARNING", 3: "CT_ERROR", } var ChannelTraceEvent_Severity_value = map[string]int32{ "CT_UNKNOWN": 0, "CT_INFO": 1, "CT_WARNING": 2, "CT_ERROR": 3, } func (x ChannelTraceEvent_Severity) String() string { return proto.EnumName(ChannelTraceEvent_Severity_name, int32(x)) } func (ChannelTraceEvent_Severity) EnumDescriptor() ([]byte, []int) { return fileDescriptor_6ee37dfd35a8ab00, []int{4, 0} } // Channel is a logical grouping of channels, subchannels, and sockets. type Channel struct { // The identifier for this channel. This should bet set. Ref *ChannelRef `protobuf:"bytes,1,opt,name=ref,proto3" json:"ref,omitempty"` // Data specific to this channel. Data *ChannelData `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` // There are no ordering guarantees on the order of channel refs. // There may not be cycles in the ref graph. // A channel ref may be present in more than one channel or subchannel. ChannelRef []*ChannelRef `protobuf:"bytes,3,rep,name=channel_ref,json=channelRef,proto3" json:"channel_ref,omitempty"` // At most one of 'channel_ref+subchannel_ref' and 'socket' is set. // There are no ordering guarantees on the order of subchannel refs. // There may not be cycles in the ref graph. // A sub channel ref may be present in more than one channel or subchannel. SubchannelRef []*SubchannelRef `protobuf:"bytes,4,rep,name=subchannel_ref,json=subchannelRef,proto3" json:"subchannel_ref,omitempty"` // There are no ordering guarantees on the order of sockets. SocketRef []*SocketRef `protobuf:"bytes,5,rep,name=socket_ref,json=socketRef,proto3" json:"socket_ref,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *Channel) Reset() { *m = Channel{} } func (m *Channel) String() string { return proto.CompactTextString(m) } func (*Channel) ProtoMessage() {} func (*Channel) Descriptor() ([]byte, []int) { return fileDescriptor_6ee37dfd35a8ab00, []int{0} } func (m *Channel) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Channel.Unmarshal(m, b) } func (m *Channel) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Channel.Marshal(b, m, deterministic) } func (m *Channel) XXX_Merge(src proto.Message) { xxx_messageInfo_Channel.Merge(m, src) } func (m *Channel) XXX_Size() int { return xxx_messageInfo_Channel.Size(m) } func (m *Channel) XXX_DiscardUnknown() { xxx_messageInfo_Channel.DiscardUnknown(m) } var xxx_messageInfo_Channel proto.InternalMessageInfo func (m *Channel) GetRef() *ChannelRef { if m != nil { return m.Ref } return nil } func (m *Channel) GetData() *ChannelData { if m != nil { return m.Data } return nil } func (m *Channel) GetChannelRef() []*ChannelRef { if m != nil { return m.ChannelRef } return nil } func (m *Channel) GetSubchannelRef() []*SubchannelRef { if m != nil { return m.SubchannelRef } return nil } func (m *Channel) GetSocketRef() []*SocketRef { if m != nil { return m.SocketRef } return nil } // Subchannel is a logical grouping of channels, subchannels, and sockets. // A subchannel is load balanced over by it's ancestor type Subchannel struct { // The identifier for this channel. Ref *SubchannelRef `protobuf:"bytes,1,opt,name=ref,proto3" json:"ref,omitempty"` // Data specific to this channel. Data *ChannelData `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` // There are no ordering guarantees on the order of channel refs. // There may not be cycles in the ref graph. // A channel ref may be present in more than one channel or subchannel. ChannelRef []*ChannelRef `protobuf:"bytes,3,rep,name=channel_ref,json=channelRef,proto3" json:"channel_ref,omitempty"` // At most one of 'channel_ref+subchannel_ref' and 'socket' is set. // There are no ordering guarantees on the order of subchannel refs. // There may not be cycles in the ref graph. // A sub channel ref may be present in more than one channel or subchannel. SubchannelRef []*SubchannelRef `protobuf:"bytes,4,rep,name=subchannel_ref,json=subchannelRef,proto3" json:"subchannel_ref,omitempty"` // There are no ordering guarantees on the order of sockets. SocketRef []*SocketRef `protobuf:"bytes,5,rep,name=socket_ref,json=socketRef,proto3" json:"socket_ref,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *Subchannel) Reset() { *m = Subchannel{} } func (m *Subchannel) String() string { return proto.CompactTextString(m) } func (*Subchannel) ProtoMessage() {} func (*Subchannel) Descriptor() ([]byte, []int) { return fileDescriptor_6ee37dfd35a8ab00, []int{1} } func (m *Subchannel) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Subchannel.Unmarshal(m, b) } func (m *Subchannel) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Subchannel.Marshal(b, m, deterministic) } func (m *Subchannel) XXX_Merge(src proto.Message) { xxx_messageInfo_Subchannel.Merge(m, src) } func (m *Subchannel) XXX_Size() int { return xxx_messageInfo_Subchannel.Size(m) } func (m *Subchannel) XXX_DiscardUnknown() { xxx_messageInfo_Subchannel.DiscardUnknown(m) } var xxx_messageInfo_Subchannel proto.InternalMessageInfo func (m *Subchannel) GetRef() *SubchannelRef { if m != nil { return m.Ref } return nil } func (m *Subchannel) GetData() *ChannelData { if m != nil { return m.Data } return nil } func (m *Subchannel) GetChannelRef() []*ChannelRef { if m != nil { return m.ChannelRef } return nil } func (m *Subchannel) GetSubchannelRef() []*SubchannelRef { if m != nil { return m.SubchannelRef } return nil } func (m *Subchannel) GetSocketRef() []*SocketRef { if m != nil { return m.SocketRef } return nil } // These come from the specified states in this document: // https://github.com/grpc/grpc/blob/master/doc/connectivity-semantics-and-api.md type ChannelConnectivityState struct { State ChannelConnectivityState_State `protobuf:"varint,1,opt,name=state,proto3,enum=grpc.channelz.v1.ChannelConnectivityState_State" json:"state,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *ChannelConnectivityState) Reset() { *m = ChannelConnectivityState{} } func (m *ChannelConnectivityState) String() string { return proto.CompactTextString(m) } func (*ChannelConnectivityState) ProtoMessage() {} func (*ChannelConnectivityState) Descriptor() ([]byte, []int) { return fileDescriptor_6ee37dfd35a8ab00, []int{2} } func (m *ChannelConnectivityState) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ChannelConnectivityState.Unmarshal(m, b) } func (m *ChannelConnectivityState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ChannelConnectivityState.Marshal(b, m, deterministic) } func (m *ChannelConnectivityState) XXX_Merge(src proto.Message) { xxx_messageInfo_ChannelConnectivityState.Merge(m, src) } func (m *ChannelConnectivityState) XXX_Size() int { return xxx_messageInfo_ChannelConnectivityState.Size(m) } func (m *ChannelConnectivityState) XXX_DiscardUnknown() { xxx_messageInfo_ChannelConnectivityState.DiscardUnknown(m) } var xxx_messageInfo_ChannelConnectivityState proto.InternalMessageInfo func (m *ChannelConnectivityState) GetState() ChannelConnectivityState_State { if m != nil { return m.State } return ChannelConnectivityState_UNKNOWN } // Channel data is data related to a specific Channel or Subchannel. type ChannelData struct { // The connectivity state of the channel or subchannel. Implementations // should always set this. State *ChannelConnectivityState `protobuf:"bytes,1,opt,name=state,proto3" json:"state,omitempty"` // The target this channel originally tried to connect to. May be absent Target string `protobuf:"bytes,2,opt,name=target,proto3" json:"target,omitempty"` // A trace of recent events on the channel. May be absent. Trace *ChannelTrace `protobuf:"bytes,3,opt,name=trace,proto3" json:"trace,omitempty"` // The number of calls started on the channel CallsStarted int64 `protobuf:"varint,4,opt,name=calls_started,json=callsStarted,proto3" json:"calls_started,omitempty"` // The number of calls that have completed with an OK status CallsSucceeded int64 `protobuf:"varint,5,opt,name=calls_succeeded,json=callsSucceeded,proto3" json:"calls_succeeded,omitempty"` // The number of calls that have completed with a non-OK status CallsFailed int64 `protobuf:"varint,6,opt,name=calls_failed,json=callsFailed,proto3" json:"calls_failed,omitempty"` // The last time a call was started on the channel. LastCallStartedTimestamp *timestamp.Timestamp `protobuf:"bytes,7,opt,name=last_call_started_timestamp,json=lastCallStartedTimestamp,proto3" json:"last_call_started_timestamp,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *ChannelData) Reset() { *m = ChannelData{} } func (m *ChannelData) String() string { return proto.CompactTextString(m) } func (*ChannelData) ProtoMessage() {} func (*ChannelData) Descriptor() ([]byte, []int) { return fileDescriptor_6ee37dfd35a8ab00, []int{3} } func (m *ChannelData) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ChannelData.Unmarshal(m, b) } func (m *ChannelData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ChannelData.Marshal(b, m, deterministic) } func (m *ChannelData) XXX_Merge(src proto.Message) { xxx_messageInfo_ChannelData.Merge(m, src) } func (m *ChannelData) XXX_Size() int { return xxx_messageInfo_ChannelData.Size(m) } func (m *ChannelData) XXX_DiscardUnknown() { xxx_messageInfo_ChannelData.DiscardUnknown(m) } var xxx_messageInfo_ChannelData proto.InternalMessageInfo func (m *ChannelData) GetState() *ChannelConnectivityState { if m != nil { return m.State } return nil } func (m *ChannelData) GetTarget() string { if m != nil { return m.Target } return "" } func (m *ChannelData) GetTrace() *ChannelTrace { if m != nil { return m.Trace } return nil } func (m *ChannelData) GetCallsStarted() int64 { if m != nil { return m.CallsStarted } return 0 } func (m *ChannelData) GetCallsSucceeded() int64 { if m != nil { return m.CallsSucceeded } return 0 } func (m *ChannelData) GetCallsFailed() int64 { if m != nil { return m.CallsFailed } return 0 } func (m *ChannelData) GetLastCallStartedTimestamp() *timestamp.Timestamp { if m != nil { return m.LastCallStartedTimestamp } return nil } // A trace event is an interesting thing that happened to a channel or // subchannel, such as creation, address resolution, subchannel creation, etc. type ChannelTraceEvent struct { // High level description of the event. Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"` // the severity of the trace event Severity ChannelTraceEvent_Severity `protobuf:"varint,2,opt,name=severity,proto3,enum=grpc.channelz.v1.ChannelTraceEvent_Severity" json:"severity,omitempty"` // When this event occurred. Timestamp *timestamp.Timestamp `protobuf:"bytes,3,opt,name=timestamp,proto3" json:"timestamp,omitempty"` // ref of referenced channel or subchannel. // Optional, only present if this event refers to a child object. For example, // this field would be filled if this trace event was for a subchannel being // created. // // Types that are valid to be assigned to ChildRef: // *ChannelTraceEvent_ChannelRef // *ChannelTraceEvent_SubchannelRef ChildRef isChannelTraceEvent_ChildRef `protobuf_oneof:"child_ref"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *ChannelTraceEvent) Reset() { *m = ChannelTraceEvent{} } func (m *ChannelTraceEvent) String() string { return proto.CompactTextString(m) } func (*ChannelTraceEvent) ProtoMessage() {} func (*ChannelTraceEvent) Descriptor() ([]byte, []int) { return fileDescriptor_6ee37dfd35a8ab00, []int{4} } func (m *ChannelTraceEvent) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ChannelTraceEvent.Unmarshal(m, b) } func (m *ChannelTraceEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ChannelTraceEvent.Marshal(b, m, deterministic) } func (m *ChannelTraceEvent) XXX_Merge(src proto.Message) { xxx_messageInfo_ChannelTraceEvent.Merge(m, src) } func (m *ChannelTraceEvent) XXX_Size() int { return xxx_messageInfo_ChannelTraceEvent.Size(m) } func (m *ChannelTraceEvent) XXX_DiscardUnknown() { xxx_messageInfo_ChannelTraceEvent.DiscardUnknown(m) } var xxx_messageInfo_ChannelTraceEvent proto.InternalMessageInfo func (m *ChannelTraceEvent) GetDescription() string { if m != nil { return m.Description } return "" } func (m *ChannelTraceEvent) GetSeverity() ChannelTraceEvent_Severity { if m != nil { return m.Severity } return ChannelTraceEvent_CT_UNKNOWN } func (m *ChannelTraceEvent) GetTimestamp() *timestamp.Timestamp { if m != nil { return m.Timestamp } return nil } type isChannelTraceEvent_ChildRef interface { isChannelTraceEvent_ChildRef() } type ChannelTraceEvent_ChannelRef struct { ChannelRef *ChannelRef `protobuf:"bytes,4,opt,name=channel_ref,json=channelRef,proto3,oneof"` } type ChannelTraceEvent_SubchannelRef struct { SubchannelRef *SubchannelRef `protobuf:"bytes,5,opt,name=subchannel_ref,json=subchannelRef,proto3,oneof"` } func (*ChannelTraceEvent_ChannelRef) isChannelTraceEvent_ChildRef() {} func (*ChannelTraceEvent_SubchannelRef) isChannelTraceEvent_ChildRef() {} func (m *ChannelTraceEvent) GetChildRef() isChannelTraceEvent_ChildRef { if m != nil { return m.ChildRef } return nil } func (m *ChannelTraceEvent) GetChannelRef() *ChannelRef { if x, ok := m.GetChildRef().(*ChannelTraceEvent_ChannelRef); ok { return x.ChannelRef } return nil } func (m *ChannelTraceEvent) GetSubchannelRef() *SubchannelRef { if x, ok := m.GetChildRef().(*ChannelTraceEvent_SubchannelRef); ok { return x.SubchannelRef } return nil } // XXX_OneofWrappers is for the internal use of the proto package. func (*ChannelTraceEvent) XXX_OneofWrappers() []interface{} { return []interface{}{ (*ChannelTraceEvent_ChannelRef)(nil), (*ChannelTraceEvent_SubchannelRef)(nil), } } // ChannelTrace represents the recent events that have occurred on the channel. type ChannelTrace struct { // Number of events ever logged in this tracing object. This can differ from // events.size() because events can be overwritten or garbage collected by // implementations. NumEventsLogged int64 `protobuf:"varint,1,opt,name=num_events_logged,json=numEventsLogged,proto3" json:"num_events_logged,omitempty"` // Time that this channel was created. CreationTimestamp *timestamp.Timestamp `protobuf:"bytes,2,opt,name=creation_timestamp,json=creationTimestamp,proto3" json:"creation_timestamp,omitempty"` // List of events that have occurred on this channel. Events []*ChannelTraceEvent `protobuf:"bytes,3,rep,name=events,proto3" json:"events,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *ChannelTrace) Reset() { *m = ChannelTrace{} } func (m *ChannelTrace) String() string { return proto.CompactTextString(m) } func (*ChannelTrace) ProtoMessage() {} func (*ChannelTrace) Descriptor() ([]byte, []int) { return fileDescriptor_6ee37dfd35a8ab00, []int{5} } func (m *ChannelTrace) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ChannelTrace.Unmarshal(m, b) } func (m *ChannelTrace) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ChannelTrace.Marshal(b, m, deterministic) } func (m *ChannelTrace) XXX_Merge(src proto.Message) { xxx_messageInfo_ChannelTrace.Merge(m, src) } func (m *ChannelTrace) XXX_Size() int { return xxx_messageInfo_ChannelTrace.Size(m) } func (m *ChannelTrace) XXX_DiscardUnknown() { xxx_messageInfo_ChannelTrace.DiscardUnknown(m) } var xxx_messageInfo_ChannelTrace proto.InternalMessageInfo func (m *ChannelTrace) GetNumEventsLogged() int64 { if m != nil { return m.NumEventsLogged } return 0 } func (m *ChannelTrace) GetCreationTimestamp() *timestamp.Timestamp { if m != nil { return m.CreationTimestamp } return nil } func (m *ChannelTrace) GetEvents() []*ChannelTraceEvent { if m != nil { return m.Events } return nil } // ChannelRef is a reference to a Channel. type ChannelRef struct { // The globally unique id for this channel. Must be a positive number. ChannelId int64 `protobuf:"varint,1,opt,name=channel_id,json=channelId,proto3" json:"channel_id,omitempty"` // An optional name associated with the channel. Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *ChannelRef) Reset() { *m = ChannelRef{} } func (m *ChannelRef) String() string { return proto.CompactTextString(m) } func (*ChannelRef) ProtoMessage() {} func (*ChannelRef) Descriptor() ([]byte, []int) { return fileDescriptor_6ee37dfd35a8ab00, []int{6} } func (m *ChannelRef) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ChannelRef.Unmarshal(m, b) } func (m *ChannelRef) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ChannelRef.Marshal(b, m, deterministic) } func (m *ChannelRef) XXX_Merge(src proto.Message) { xxx_messageInfo_ChannelRef.Merge(m, src) } func (m *ChannelRef) XXX_Size() int { return xxx_messageInfo_ChannelRef.Size(m) } func (m *ChannelRef) XXX_DiscardUnknown() { xxx_messageInfo_ChannelRef.DiscardUnknown(m) } var xxx_messageInfo_ChannelRef proto.InternalMessageInfo func (m *ChannelRef) GetChannelId() int64 { if m != nil { return m.ChannelId } return 0 } func (m *ChannelRef) GetName() string { if m != nil { return m.Name } return "" } // SubchannelRef is a reference to a Subchannel. type SubchannelRef struct { // The globally unique id for this subchannel. Must be a positive number. SubchannelId int64 `protobuf:"varint,7,opt,name=subchannel_id,json=subchannelId,proto3" json:"subchannel_id,omitempty"` // An optional name associated with the subchannel. Name string `protobuf:"bytes,8,opt,name=name,proto3" json:"name,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *SubchannelRef) Reset() { *m = SubchannelRef{} } func (m *SubchannelRef) String() string { return proto.CompactTextString(m) } func (*SubchannelRef) ProtoMessage() {} func (*SubchannelRef) Descriptor() ([]byte, []int) { return fileDescriptor_6ee37dfd35a8ab00, []int{7} } func (m *SubchannelRef) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_SubchannelRef.Unmarshal(m, b) } func (m *SubchannelRef) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_SubchannelRef.Marshal(b, m, deterministic) } func (m *SubchannelRef) XXX_Merge(src proto.Message) { xxx_messageInfo_SubchannelRef.Merge(m, src) } func (m *SubchannelRef) XXX_Size() int { return xxx_messageInfo_SubchannelRef.Size(m) } func (m *SubchannelRef) XXX_DiscardUnknown() { xxx_messageInfo_SubchannelRef.DiscardUnknown(m) } var xxx_messageInfo_SubchannelRef proto.InternalMessageInfo func (m *SubchannelRef) GetSubchannelId() int64 { if m != nil { return m.SubchannelId } return 0 } func (m *SubchannelRef) GetName() string { if m != nil { return m.Name } return "" } // SocketRef is a reference to a Socket. type SocketRef struct { // The globally unique id for this socket. Must be a positive number. SocketId int64 `protobuf:"varint,3,opt,name=socket_id,json=socketId,proto3" json:"socket_id,omitempty"` // An optional name associated with the socket. Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *SocketRef) Reset() { *m = SocketRef{} } func (m *SocketRef) String() string { return proto.CompactTextString(m) } func (*SocketRef) ProtoMessage() {} func (*SocketRef) Descriptor() ([]byte, []int) { return fileDescriptor_6ee37dfd35a8ab00, []int{8} } func (m *SocketRef) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_SocketRef.Unmarshal(m, b) } func (m *SocketRef) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_SocketRef.Marshal(b, m, deterministic) } func (m *SocketRef) XXX_Merge(src proto.Message) { xxx_messageInfo_SocketRef.Merge(m, src) } func (m *SocketRef) XXX_Size() int { return xxx_messageInfo_SocketRef.Size(m) } func (m *SocketRef) XXX_DiscardUnknown() { xxx_messageInfo_SocketRef.DiscardUnknown(m) } var xxx_messageInfo_SocketRef proto.InternalMessageInfo func (m *SocketRef) GetSocketId() int64 { if m != nil { return m.SocketId } return 0 } func (m *SocketRef) GetName() string { if m != nil { return m.Name } return "" } // ServerRef is a reference to a Server. type ServerRef struct { // A globally unique identifier for this server. Must be a positive number. ServerId int64 `protobuf:"varint,5,opt,name=server_id,json=serverId,proto3" json:"server_id,omitempty"` // An optional name associated with the server. Name string `protobuf:"bytes,6,opt,name=name,proto3" json:"name,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *ServerRef) Reset() { *m = ServerRef{} } func (m *ServerRef) String() string { return proto.CompactTextString(m) } func (*ServerRef) ProtoMessage() {} func (*ServerRef) Descriptor() ([]byte, []int) { return fileDescriptor_6ee37dfd35a8ab00, []int{9} } func (m *ServerRef) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ServerRef.Unmarshal(m, b) } func (m *ServerRef) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ServerRef.Marshal(b, m, deterministic) } func (m *ServerRef) XXX_Merge(src proto.Message) { xxx_messageInfo_ServerRef.Merge(m, src) } func (m *ServerRef) XXX_Size() int { return xxx_messageInfo_ServerRef.Size(m) } func (m *ServerRef) XXX_DiscardUnknown() { xxx_messageInfo_ServerRef.DiscardUnknown(m) } var xxx_messageInfo_ServerRef proto.InternalMessageInfo func (m *ServerRef) GetServerId() int64 { if m != nil { return m.ServerId } return 0 } func (m *ServerRef) GetName() string { if m != nil { return m.Name } return "" } // Server represents a single server. There may be multiple servers in a single // program. type Server struct { // The identifier for a Server. This should be set. Ref *ServerRef `protobuf:"bytes,1,opt,name=ref,proto3" json:"ref,omitempty"` // The associated data of the Server. Data *ServerData `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` // The sockets that the server is listening on. There are no ordering // guarantees. This may be absent. ListenSocket []*SocketRef `protobuf:"bytes,3,rep,name=listen_socket,json=listenSocket,proto3" json:"listen_socket,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *Server) Reset() { *m = Server{} } func (m *Server) String() string { return proto.CompactTextString(m) } func (*Server) ProtoMessage() {} func (*Server) Descriptor() ([]byte, []int) { return fileDescriptor_6ee37dfd35a8ab00, []int{10} } func (m *Server) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Server.Unmarshal(m, b) } func (m *Server) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Server.Marshal(b, m, deterministic) } func (m *Server) XXX_Merge(src proto.Message) { xxx_messageInfo_Server.Merge(m, src) } func (m *Server) XXX_Size() int { return xxx_messageInfo_Server.Size(m) } func (m *Server) XXX_DiscardUnknown() { xxx_messageInfo_Server.DiscardUnknown(m) } var xxx_messageInfo_Server proto.InternalMessageInfo func (m *Server) GetRef() *ServerRef { if m != nil { return m.Ref } return nil } func (m *Server) GetData() *ServerData { if m != nil { return m.Data } return nil } func (m *Server) GetListenSocket() []*SocketRef { if m != nil { return m.ListenSocket } return nil } // ServerData is data for a specific Server. type ServerData struct { // A trace of recent events on the server. May be absent. Trace *ChannelTrace `protobuf:"bytes,1,opt,name=trace,proto3" json:"trace,omitempty"` // The number of incoming calls started on the server CallsStarted int64 `protobuf:"varint,2,opt,name=calls_started,json=callsStarted,proto3" json:"calls_started,omitempty"` // The number of incoming calls that have completed with an OK status CallsSucceeded int64 `protobuf:"varint,3,opt,name=calls_succeeded,json=callsSucceeded,proto3" json:"calls_succeeded,omitempty"` // The number of incoming calls that have a completed with a non-OK status CallsFailed int64 `protobuf:"varint,4,opt,name=calls_failed,json=callsFailed,proto3" json:"calls_failed,omitempty"` // The last time a call was started on the server. LastCallStartedTimestamp *timestamp.Timestamp `protobuf:"bytes,5,opt,name=last_call_started_timestamp,json=lastCallStartedTimestamp,proto3" json:"last_call_started_timestamp,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *ServerData) Reset() { *m = ServerData{} } func (m *ServerData) String() string { return proto.CompactTextString(m) } func (*ServerData) ProtoMessage() {} func (*ServerData) Descriptor() ([]byte, []int) { return fileDescriptor_6ee37dfd35a8ab00, []int{11} } func (m *ServerData) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ServerData.Unmarshal(m, b) } func (m *ServerData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ServerData.Marshal(b, m, deterministic) } func (m *ServerData) XXX_Merge(src proto.Message) { xxx_messageInfo_ServerData.Merge(m, src) } func (m *ServerData) XXX_Size() int { return xxx_messageInfo_ServerData.Size(m) } func (m *ServerData) XXX_DiscardUnknown() { xxx_messageInfo_ServerData.DiscardUnknown(m) } var xxx_messageInfo_ServerData proto.InternalMessageInfo func (m *ServerData) GetTrace() *ChannelTrace { if m != nil { return m.Trace } return nil } func (m *ServerData) GetCallsStarted() int64 { if m != nil { return m.CallsStarted } return 0 } func (m *ServerData) GetCallsSucceeded() int64 { if m != nil { return m.CallsSucceeded } return 0 } func (m *ServerData) GetCallsFailed() int64 { if m != nil { return m.CallsFailed } return 0 } func (m *ServerData) GetLastCallStartedTimestamp() *timestamp.Timestamp { if m != nil { return m.LastCallStartedTimestamp } return nil } // Information about an actual connection. Pronounced "sock-ay". type Socket struct { // The identifier for the Socket. Ref *SocketRef `protobuf:"bytes,1,opt,name=ref,proto3" json:"ref,omitempty"` // Data specific to this Socket. Data *SocketData `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` // The locally bound address. Local *Address `protobuf:"bytes,3,opt,name=local,proto3" json:"local,omitempty"` // The remote bound address. May be absent. Remote *Address `protobuf:"bytes,4,opt,name=remote,proto3" json:"remote,omitempty"` // Security details for this socket. May be absent if not available, or // there is no security on the socket. Security *Security `protobuf:"bytes,5,opt,name=security,proto3" json:"security,omitempty"` // Optional, represents the name of the remote endpoint, if different than // the original target name. RemoteName string `protobuf:"bytes,6,opt,name=remote_name,json=remoteName,proto3" json:"remote_name,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *Socket) Reset() { *m = Socket{} } func (m *Socket) String() string { return proto.CompactTextString(m) } func (*Socket) ProtoMessage() {} func (*Socket) Descriptor() ([]byte, []int) { return fileDescriptor_6ee37dfd35a8ab00, []int{12} } func (m *Socket) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Socket.Unmarshal(m, b) } func (m *Socket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Socket.Marshal(b, m, deterministic) } func (m *Socket) XXX_Merge(src proto.Message) { xxx_messageInfo_Socket.Merge(m, src) } func (m *Socket) XXX_Size() int { return xxx_messageInfo_Socket.Size(m) } func (m *Socket) XXX_DiscardUnknown() { xxx_messageInfo_Socket.DiscardUnknown(m) } var xxx_messageInfo_Socket proto.InternalMessageInfo func (m *Socket) GetRef() *SocketRef { if m != nil { return m.Ref } return nil } func (m *Socket) GetData() *SocketData { if m != nil { return m.Data } return nil } func (m *Socket) GetLocal() *Address { if m != nil { return m.Local } return nil } func (m *Socket) GetRemote() *Address { if m != nil { return m.Remote } return nil } func (m *Socket) GetSecurity() *Security { if m != nil { return m.Security } return nil } func (m *Socket) GetRemoteName() string { if m != nil { return m.RemoteName } return "" } // SocketData is data associated for a specific Socket. The fields present // are specific to the implementation, so there may be minor differences in // the semantics. (e.g. flow control windows) type SocketData struct { // The number of streams that have been started. StreamsStarted int64 `protobuf:"varint,1,opt,name=streams_started,json=streamsStarted,proto3" json:"streams_started,omitempty"` // The number of streams that have ended successfully: // On client side, received frame with eos bit set; // On server side, sent frame with eos bit set. StreamsSucceeded int64 `protobuf:"varint,2,opt,name=streams_succeeded,json=streamsSucceeded,proto3" json:"streams_succeeded,omitempty"` // The number of streams that have ended unsuccessfully: // On client side, ended without receiving frame with eos bit set; // On server side, ended without sending frame with eos bit set. StreamsFailed int64 `protobuf:"varint,3,opt,name=streams_failed,json=streamsFailed,proto3" json:"streams_failed,omitempty"` // The number of grpc messages successfully sent on this socket. MessagesSent int64 `protobuf:"varint,4,opt,name=messages_sent,json=messagesSent,proto3" json:"messages_sent,omitempty"` // The number of grpc messages received on this socket. MessagesReceived int64 `protobuf:"varint,5,opt,name=messages_received,json=messagesReceived,proto3" json:"messages_received,omitempty"` // The number of keep alives sent. This is typically implemented with HTTP/2 // ping messages. KeepAlivesSent int64 `protobuf:"varint,6,opt,name=keep_alives_sent,json=keepAlivesSent,proto3" json:"keep_alives_sent,omitempty"` // The last time a stream was created by this endpoint. Usually unset for // servers. LastLocalStreamCreatedTimestamp *timestamp.Timestamp `protobuf:"bytes,7,opt,name=last_local_stream_created_timestamp,json=lastLocalStreamCreatedTimestamp,proto3" json:"last_local_stream_created_timestamp,omitempty"` // The last time a stream was created by the remote endpoint. Usually unset // for clients. LastRemoteStreamCreatedTimestamp *timestamp.Timestamp `protobuf:"bytes,8,opt,name=last_remote_stream_created_timestamp,json=lastRemoteStreamCreatedTimestamp,proto3" json:"last_remote_stream_created_timestamp,omitempty"` // The last time a message was sent by this endpoint. LastMessageSentTimestamp *timestamp.Timestamp `protobuf:"bytes,9,opt,name=last_message_sent_timestamp,json=lastMessageSentTimestamp,proto3" json:"last_message_sent_timestamp,omitempty"` // The last time a message was received by this endpoint. LastMessageReceivedTimestamp *timestamp.Timestamp `protobuf:"bytes,10,opt,name=last_message_received_timestamp,json=lastMessageReceivedTimestamp,proto3" json:"last_message_received_timestamp,omitempty"` // The amount of window, granted to the local endpoint by the remote endpoint. // This may be slightly out of date due to network latency. This does NOT // include stream level or TCP level flow control info. LocalFlowControlWindow *wrappers.Int64Value `protobuf:"bytes,11,opt,name=local_flow_control_window,json=localFlowControlWindow,proto3" json:"local_flow_control_window,omitempty"` // The amount of window, granted to the remote endpoint by the local endpoint. // This may be slightly out of date due to network latency. This does NOT // include stream level or TCP level flow control info. RemoteFlowControlWindow *wrappers.Int64Value `protobuf:"bytes,12,opt,name=remote_flow_control_window,json=remoteFlowControlWindow,proto3" json:"remote_flow_control_window,omitempty"` // Socket options set on this socket. May be absent if 'summary' is set // on GetSocketRequest. Option []*SocketOption `protobuf:"bytes,13,rep,name=option,proto3" json:"option,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *SocketData) Reset() { *m = SocketData{} } func (m *SocketData) String() string { return proto.CompactTextString(m) } func (*SocketData) ProtoMessage() {} func (*SocketData) Descriptor() ([]byte, []int) { return fileDescriptor_6ee37dfd35a8ab00, []int{13} } func (m *SocketData) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_SocketData.Unmarshal(m, b) } func (m *SocketData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_SocketData.Marshal(b, m, deterministic) } func (m *SocketData) XXX_Merge(src proto.Message) { xxx_messageInfo_SocketData.Merge(m, src) } func (m *SocketData) XXX_Size() int { return xxx_messageInfo_SocketData.Size(m) } func (m *SocketData) XXX_DiscardUnknown() { xxx_messageInfo_SocketData.DiscardUnknown(m) } var xxx_messageInfo_SocketData proto.InternalMessageInfo func (m *SocketData) GetStreamsStarted() int64 { if m != nil { return m.StreamsStarted } return 0 } func (m *SocketData) GetStreamsSucceeded() int64 { if m != nil { return m.StreamsSucceeded } return 0 } func (m *SocketData) GetStreamsFailed() int64 { if m != nil { return m.StreamsFailed } return 0 } func (m *SocketData) GetMessagesSent() int64 { if m != nil { return m.MessagesSent } return 0 } func (m *SocketData) GetMessagesReceived() int64 { if m != nil { return m.MessagesReceived } return 0 } func (m *SocketData) GetKeepAlivesSent() int64 { if m != nil { return m.KeepAlivesSent } return 0 } func (m *SocketData) GetLastLocalStreamCreatedTimestamp() *timestamp.Timestamp { if m != nil { return m.LastLocalStreamCreatedTimestamp } return nil } func (m *SocketData) GetLastRemoteStreamCreatedTimestamp() *timestamp.Timestamp { if m != nil { return m.LastRemoteStreamCreatedTimestamp } return nil } func (m *SocketData) GetLastMessageSentTimestamp() *timestamp.Timestamp { if m != nil { return m.LastMessageSentTimestamp } return nil } func (m *SocketData) GetLastMessageReceivedTimestamp() *timestamp.Timestamp { if m != nil { return m.LastMessageReceivedTimestamp } return nil } func (m *SocketData) GetLocalFlowControlWindow() *wrappers.Int64Value { if m != nil { return m.LocalFlowControlWindow } return nil } func (m *SocketData) GetRemoteFlowControlWindow() *wrappers.Int64Value { if m != nil { return m.RemoteFlowControlWindow } return nil } func (m *SocketData) GetOption() []*SocketOption { if m != nil { return m.Option } return nil } // Address represents the address used to create the socket. type Address struct { // Types that are valid to be assigned to Address: // *Address_TcpipAddress // *Address_UdsAddress_ // *Address_OtherAddress_ Address isAddress_Address `protobuf_oneof:"address"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *Address) Reset() { *m = Address{} } func (m *Address) String() string { return proto.CompactTextString(m) } func (*Address) ProtoMessage() {} func (*Address) Descriptor() ([]byte, []int) { return fileDescriptor_6ee37dfd35a8ab00, []int{14} } func (m *Address) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Address.Unmarshal(m, b) } func (m *Address) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Address.Marshal(b, m, deterministic) } func (m *Address) XXX_Merge(src proto.Message) { xxx_messageInfo_Address.Merge(m, src) } func (m *Address) XXX_Size() int { return xxx_messageInfo_Address.Size(m) } func (m *Address) XXX_DiscardUnknown() { xxx_messageInfo_Address.DiscardUnknown(m) } var xxx_messageInfo_Address proto.InternalMessageInfo type isAddress_Address interface { isAddress_Address() } type Address_TcpipAddress struct { TcpipAddress *Address_TcpIpAddress `protobuf:"bytes,1,opt,name=tcpip_address,json=tcpipAddress,proto3,oneof"` } type Address_UdsAddress_ struct { UdsAddress *Address_UdsAddress `protobuf:"bytes,2,opt,name=uds_address,json=udsAddress,proto3,oneof"` } type Address_OtherAddress_ struct { OtherAddress *Address_OtherAddress `protobuf:"bytes,3,opt,name=other_address,json=otherAddress,proto3,oneof"` } func (*Address_TcpipAddress) isAddress_Address() {} func (*Address_UdsAddress_) isAddress_Address() {} func (*Address_OtherAddress_) isAddress_Address() {} func (m *Address) GetAddress() isAddress_Address { if m != nil { return m.Address } return nil } func (m *Address) GetTcpipAddress() *Address_TcpIpAddress { if x, ok := m.GetAddress().(*Address_TcpipAddress); ok { return x.TcpipAddress } return nil } func (m *Address) GetUdsAddress() *Address_UdsAddress { if x, ok := m.GetAddress().(*Address_UdsAddress_); ok { return x.UdsAddress } return nil } func (m *Address) GetOtherAddress() *Address_OtherAddress { if x, ok := m.GetAddress().(*Address_OtherAddress_); ok { return x.OtherAddress } return nil } // XXX_OneofWrappers is for the internal use of the proto package. func (*Address) XXX_OneofWrappers() []interface{} { return []interface{}{ (*Address_TcpipAddress)(nil), (*Address_UdsAddress_)(nil), (*Address_OtherAddress_)(nil), } } type Address_TcpIpAddress struct { // Either the IPv4 or IPv6 address in bytes. Will be either 4 bytes or 16 // bytes in length. IpAddress []byte `protobuf:"bytes,1,opt,name=ip_address,json=ipAddress,proto3" json:"ip_address,omitempty"` // 0-64k, or -1 if not appropriate. Port int32 `protobuf:"varint,2,opt,name=port,proto3" json:"port,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *Address_TcpIpAddress) Reset() { *m = Address_TcpIpAddress{} } func (m *Address_TcpIpAddress) String() string { return proto.CompactTextString(m) } func (*Address_TcpIpAddress) ProtoMessage() {} func (*Address_TcpIpAddress) Descriptor() ([]byte, []int) { return fileDescriptor_6ee37dfd35a8ab00, []int{14, 0} } func (m *Address_TcpIpAddress) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Address_TcpIpAddress.Unmarshal(m, b) } func (m *Address_TcpIpAddress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Address_TcpIpAddress.Marshal(b, m, deterministic) } func (m *Address_TcpIpAddress) XXX_Merge(src proto.Message) { xxx_messageInfo_Address_TcpIpAddress.Merge(m, src) } func (m *Address_TcpIpAddress) XXX_Size() int { return xxx_messageInfo_Address_TcpIpAddress.Size(m) } func (m *Address_TcpIpAddress) XXX_DiscardUnknown() { xxx_messageInfo_Address_TcpIpAddress.DiscardUnknown(m) } var xxx_messageInfo_Address_TcpIpAddress proto.InternalMessageInfo func (m *Address_TcpIpAddress) GetIpAddress() []byte { if m != nil { return m.IpAddress } return nil } func (m *Address_TcpIpAddress) GetPort() int32 { if m != nil { return m.Port } return 0 } // A Unix Domain Socket address. type Address_UdsAddress struct { Filename string `protobuf:"bytes,1,opt,name=filename,proto3" json:"filename,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *Address_UdsAddress) Reset() { *m = Address_UdsAddress{} } func (m *Address_UdsAddress) String() string { return proto.CompactTextString(m) } func (*Address_UdsAddress) ProtoMessage() {} func (*Address_UdsAddress) Descriptor() ([]byte, []int) { return fileDescriptor_6ee37dfd35a8ab00, []int{14, 1} } func (m *Address_UdsAddress) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Address_UdsAddress.Unmarshal(m, b) } func (m *Address_UdsAddress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Address_UdsAddress.Marshal(b, m, deterministic) } func (m *Address_UdsAddress) XXX_Merge(src proto.Message) { xxx_messageInfo_Address_UdsAddress.Merge(m, src) } func (m *Address_UdsAddress) XXX_Size() int { return xxx_messageInfo_Address_UdsAddress.Size(m) } func (m *Address_UdsAddress) XXX_DiscardUnknown() { xxx_messageInfo_Address_UdsAddress.DiscardUnknown(m) } var xxx_messageInfo_Address_UdsAddress proto.InternalMessageInfo func (m *Address_UdsAddress) GetFilename() string { if m != nil { return m.Filename } return "" } // An address type not included above. type Address_OtherAddress struct { // The human readable version of the value. This value should be set. Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // The actual address message. Value *any.Any `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *Address_OtherAddress) Reset() { *m = Address_OtherAddress{} } func (m *Address_OtherAddress) String() string { return proto.CompactTextString(m) } func (*Address_OtherAddress) ProtoMessage() {} func (*Address_OtherAddress) Descriptor() ([]byte, []int) { return fileDescriptor_6ee37dfd35a8ab00, []int{14, 2} } func (m *Address_OtherAddress) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Address_OtherAddress.Unmarshal(m, b) } func (m *Address_OtherAddress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Address_OtherAddress.Marshal(b, m, deterministic) } func (m *Address_OtherAddress) XXX_Merge(src proto.Message) { xxx_messageInfo_Address_OtherAddress.Merge(m, src) } func (m *Address_OtherAddress) XXX_Size() int { return xxx_messageInfo_Address_OtherAddress.Size(m) } func (m *Address_OtherAddress) XXX_DiscardUnknown() { xxx_messageInfo_Address_OtherAddress.DiscardUnknown(m) } var xxx_messageInfo_Address_OtherAddress proto.InternalMessageInfo func (m *Address_OtherAddress) GetName() string { if m != nil { return m.Name } return "" } func (m *Address_OtherAddress) GetValue() *any.Any { if m != nil { return m.Value } return nil } // Security represents details about how secure the socket is. type Security struct { // Types that are valid to be assigned to Model: // *Security_Tls_ // *Security_Other Model isSecurity_Model `protobuf_oneof:"model"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *Security) Reset() { *m = Security{} } func (m *Security) String() string { return proto.CompactTextString(m) } func (*Security) ProtoMessage() {} func (*Security) Descriptor() ([]byte, []int) { return fileDescriptor_6ee37dfd35a8ab00, []int{15} } func (m *Security) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Security.Unmarshal(m, b) } func (m *Security) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Security.Marshal(b, m, deterministic) } func (m *Security) XXX_Merge(src proto.Message) { xxx_messageInfo_Security.Merge(m, src) } func (m *Security) XXX_Size() int { return xxx_messageInfo_Security.Size(m) } func (m *Security) XXX_DiscardUnknown() { xxx_messageInfo_Security.DiscardUnknown(m) } var xxx_messageInfo_Security proto.InternalMessageInfo type isSecurity_Model interface { isSecurity_Model() } type Security_Tls_ struct { Tls *Security_Tls `protobuf:"bytes,1,opt,name=tls,proto3,oneof"` } type Security_Other struct { Other *Security_OtherSecurity `protobuf:"bytes,2,opt,name=other,proto3,oneof"` } func (*Security_Tls_) isSecurity_Model() {} func (*Security_Other) isSecurity_Model() {} func (m *Security) GetModel() isSecurity_Model { if m != nil { return m.Model } return nil } func (m *Security) GetTls() *Security_Tls { if x, ok := m.GetModel().(*Security_Tls_); ok { return x.Tls } return nil } func (m *Security) GetOther() *Security_OtherSecurity { if x, ok := m.GetModel().(*Security_Other); ok { return x.Other } return nil } // XXX_OneofWrappers is for the internal use of the proto package. func (*Security) XXX_OneofWrappers() []interface{} { return []interface{}{ (*Security_Tls_)(nil), (*Security_Other)(nil), } } type Security_Tls struct { // Types that are valid to be assigned to CipherSuite: // *Security_Tls_StandardName // *Security_Tls_OtherName CipherSuite isSecurity_Tls_CipherSuite `protobuf_oneof:"cipher_suite"` // the certificate used by this endpoint. LocalCertificate []byte `protobuf:"bytes,3,opt,name=local_certificate,json=localCertificate,proto3" json:"local_certificate,omitempty"` // the certificate used by the remote endpoint. RemoteCertificate []byte `protobuf:"bytes,4,opt,name=remote_certificate,json=remoteCertificate,proto3" json:"remote_certificate,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *Security_Tls) Reset() { *m = Security_Tls{} } func (m *Security_Tls) String() string { return proto.CompactTextString(m) } func (*Security_Tls) ProtoMessage() {} func (*Security_Tls) Descriptor() ([]byte, []int) { return fileDescriptor_6ee37dfd35a8ab00, []int{15, 0} } func (m *Security_Tls) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Security_Tls.Unmarshal(m, b) } func (m *Security_Tls) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Security_Tls.Marshal(b, m, deterministic) } func (m *Security_Tls) XXX_Merge(src proto.Message) { xxx_messageInfo_Security_Tls.Merge(m, src) } func (m *Security_Tls) XXX_Size() int { return xxx_messageInfo_Security_Tls.Size(m) } func (m *Security_Tls) XXX_DiscardUnknown() { xxx_messageInfo_Security_Tls.DiscardUnknown(m) } var xxx_messageInfo_Security_Tls proto.InternalMessageInfo type isSecurity_Tls_CipherSuite interface { isSecurity_Tls_CipherSuite() } type Security_Tls_StandardName struct { StandardName string `protobuf:"bytes,1,opt,name=standard_name,json=standardName,proto3,oneof"` } type Security_Tls_OtherName struct { OtherName string `protobuf:"bytes,2,opt,name=other_name,json=otherName,proto3,oneof"` } func (*Security_Tls_StandardName) isSecurity_Tls_CipherSuite() {} func (*Security_Tls_OtherName) isSecurity_Tls_CipherSuite() {} func (m *Security_Tls) GetCipherSuite() isSecurity_Tls_CipherSuite { if m != nil { return m.CipherSuite } return nil } func (m *Security_Tls) GetStandardName() string { if x, ok := m.GetCipherSuite().(*Security_Tls_StandardName); ok { return x.StandardName } return "" } func (m *Security_Tls) GetOtherName() string { if x, ok := m.GetCipherSuite().(*Security_Tls_OtherName); ok { return x.OtherName } return "" } func (m *Security_Tls) GetLocalCertificate() []byte { if m != nil { return m.LocalCertificate } return nil } func (m *Security_Tls) GetRemoteCertificate() []byte { if m != nil { return m.RemoteCertificate } return nil } // XXX_OneofWrappers is for the internal use of the proto package. func (*Security_Tls) XXX_OneofWrappers() []interface{} { return []interface{}{ (*Security_Tls_StandardName)(nil), (*Security_Tls_OtherName)(nil), } } type Security_OtherSecurity struct { // The human readable version of the value. Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // The actual security details message. Value *any.Any `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *Security_OtherSecurity) Reset() { *m = Security_OtherSecurity{} } func (m *Security_OtherSecurity) String() string { return proto.CompactTextString(m) } func (*Security_OtherSecurity) ProtoMessage() {} func (*Security_OtherSecurity) Descriptor() ([]byte, []int) { return fileDescriptor_6ee37dfd35a8ab00, []int{15, 1} } func (m *Security_OtherSecurity) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Security_OtherSecurity.Unmarshal(m, b) } func (m *Security_OtherSecurity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Security_OtherSecurity.Marshal(b, m, deterministic) } func (m *Security_OtherSecurity) XXX_Merge(src proto.Message) { xxx_messageInfo_Security_OtherSecurity.Merge(m, src) } func (m *Security_OtherSecurity) XXX_Size() int { return xxx_messageInfo_Security_OtherSecurity.Size(m) } func (m *Security_OtherSecurity) XXX_DiscardUnknown() { xxx_messageInfo_Security_OtherSecurity.DiscardUnknown(m) } var xxx_messageInfo_Security_OtherSecurity proto.InternalMessageInfo func (m *Security_OtherSecurity) GetName() string { if m != nil { return m.Name } return "" } func (m *Security_OtherSecurity) GetValue() *any.Any { if m != nil { return m.Value } return nil } // SocketOption represents socket options for a socket. Specifically, these // are the options returned by getsockopt(). type SocketOption struct { // The full name of the socket option. Typically this will be the upper case // name, such as "SO_REUSEPORT". Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // The human readable value of this socket option. At least one of value or // additional will be set. Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` // Additional data associated with the socket option. At least one of value // or additional will be set. Additional *any.Any `protobuf:"bytes,3,opt,name=additional,proto3" json:"additional,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *SocketOption) Reset() { *m = SocketOption{} } func (m *SocketOption) String() string { return proto.CompactTextString(m) } func (*SocketOption) ProtoMessage() {} func (*SocketOption) Descriptor() ([]byte, []int) { return fileDescriptor_6ee37dfd35a8ab00, []int{16} } func (m *SocketOption) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_SocketOption.Unmarshal(m, b) } func (m *SocketOption) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_SocketOption.Marshal(b, m, deterministic) } func (m *SocketOption) XXX_Merge(src proto.Message) { xxx_messageInfo_SocketOption.Merge(m, src) } func (m *SocketOption) XXX_Size() int { return xxx_messageInfo_SocketOption.Size(m) } func (m *SocketOption) XXX_DiscardUnknown() { xxx_messageInfo_SocketOption.DiscardUnknown(m) } var xxx_messageInfo_SocketOption proto.InternalMessageInfo func (m *SocketOption) GetName() string { if m != nil { return m.Name } return "" } func (m *SocketOption) GetValue() string { if m != nil { return m.Value } return "" } func (m *SocketOption) GetAdditional() *any.Any { if m != nil { return m.Additional } return nil } // For use with SocketOption's additional field. This is primarily used for // SO_RCVTIMEO and SO_SNDTIMEO type SocketOptionTimeout struct { Duration *duration.Duration `protobuf:"bytes,1,opt,name=duration,proto3" json:"duration,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *SocketOptionTimeout) Reset() { *m = SocketOptionTimeout{} } func (m *SocketOptionTimeout) String() string { return proto.CompactTextString(m) } func (*SocketOptionTimeout) ProtoMessage() {} func (*SocketOptionTimeout) Descriptor() ([]byte, []int) { return fileDescriptor_6ee37dfd35a8ab00, []int{17} } func (m *SocketOptionTimeout) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_SocketOptionTimeout.Unmarshal(m, b) } func (m *SocketOptionTimeout) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_SocketOptionTimeout.Marshal(b, m, deterministic) } func (m *SocketOptionTimeout) XXX_Merge(src proto.Message) { xxx_messageInfo_SocketOptionTimeout.Merge(m, src) } func (m *SocketOptionTimeout) XXX_Size() int { return xxx_messageInfo_SocketOptionTimeout.Size(m) } func (m *SocketOptionTimeout) XXX_DiscardUnknown() { xxx_messageInfo_SocketOptionTimeout.DiscardUnknown(m) } var xxx_messageInfo_SocketOptionTimeout proto.InternalMessageInfo func (m *SocketOptionTimeout) GetDuration() *duration.Duration { if m != nil { return m.Duration } return nil } // For use with SocketOption's additional field. This is primarily used for // SO_LINGER. type SocketOptionLinger struct { // active maps to `struct linger.l_onoff` Active bool `protobuf:"varint,1,opt,name=active,proto3" json:"active,omitempty"` // duration maps to `struct linger.l_linger` Duration *duration.Duration `protobuf:"bytes,2,opt,name=duration,proto3" json:"duration,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *SocketOptionLinger) Reset() { *m = SocketOptionLinger{} } func (m *SocketOptionLinger) String() string { return proto.CompactTextString(m) } func (*SocketOptionLinger) ProtoMessage() {} func (*SocketOptionLinger) Descriptor() ([]byte, []int) { return fileDescriptor_6ee37dfd35a8ab00, []int{18} } func (m *SocketOptionLinger) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_SocketOptionLinger.Unmarshal(m, b) } func (m *SocketOptionLinger) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_SocketOptionLinger.Marshal(b, m, deterministic) } func (m *SocketOptionLinger) XXX_Merge(src proto.Message) { xxx_messageInfo_SocketOptionLinger.Merge(m, src) } func (m *SocketOptionLinger) XXX_Size() int { return xxx_messageInfo_SocketOptionLinger.Size(m) } func (m *SocketOptionLinger) XXX_DiscardUnknown() { xxx_messageInfo_SocketOptionLinger.DiscardUnknown(m) } var xxx_messageInfo_SocketOptionLinger proto.InternalMessageInfo func (m *SocketOptionLinger) GetActive() bool { if m != nil { return m.Active } return false } func (m *SocketOptionLinger) GetDuration() *duration.Duration { if m != nil { return m.Duration } return nil } // For use with SocketOption's additional field. Tcp info for // SOL_TCP and TCP_INFO. type SocketOptionTcpInfo struct { TcpiState uint32 `protobuf:"varint,1,opt,name=tcpi_state,json=tcpiState,proto3" json:"tcpi_state,omitempty"` TcpiCaState uint32 `protobuf:"varint,2,opt,name=tcpi_ca_state,json=tcpiCaState,proto3" json:"tcpi_ca_state,omitempty"` TcpiRetransmits uint32 `protobuf:"varint,3,opt,name=tcpi_retransmits,json=tcpiRetransmits,proto3" json:"tcpi_retransmits,omitempty"` TcpiProbes uint32 `protobuf:"varint,4,opt,name=tcpi_probes,json=tcpiProbes,proto3" json:"tcpi_probes,omitempty"` TcpiBackoff uint32 `protobuf:"varint,5,opt,name=tcpi_backoff,json=tcpiBackoff,proto3" json:"tcpi_backoff,omitempty"` TcpiOptions uint32 `protobuf:"varint,6,opt,name=tcpi_options,json=tcpiOptions,proto3" json:"tcpi_options,omitempty"` TcpiSndWscale uint32 `protobuf:"varint,7,opt,name=tcpi_snd_wscale,json=tcpiSndWscale,proto3" json:"tcpi_snd_wscale,omitempty"` TcpiRcvWscale uint32 `protobuf:"varint,8,opt,name=tcpi_rcv_wscale,json=tcpiRcvWscale,proto3" json:"tcpi_rcv_wscale,omitempty"` TcpiRto uint32 `protobuf:"varint,9,opt,name=tcpi_rto,json=tcpiRto,proto3" json:"tcpi_rto,omitempty"` TcpiAto uint32 `protobuf:"varint,10,opt,name=tcpi_ato,json=tcpiAto,proto3" json:"tcpi_ato,omitempty"` TcpiSndMss uint32 `protobuf:"varint,11,opt,name=tcpi_snd_mss,json=tcpiSndMss,proto3" json:"tcpi_snd_mss,omitempty"` TcpiRcvMss uint32 `protobuf:"varint,12,opt,name=tcpi_rcv_mss,json=tcpiRcvMss,proto3" json:"tcpi_rcv_mss,omitempty"` TcpiUnacked uint32 `protobuf:"varint,13,opt,name=tcpi_unacked,json=tcpiUnacked,proto3" json:"tcpi_unacked,omitempty"` TcpiSacked uint32 `protobuf:"varint,14,opt,name=tcpi_sacked,json=tcpiSacked,proto3" json:"tcpi_sacked,omitempty"` TcpiLost uint32 `protobuf:"varint,15,opt,name=tcpi_lost,json=tcpiLost,proto3" json:"tcpi_lost,omitempty"` TcpiRetrans uint32 `protobuf:"varint,16,opt,name=tcpi_retrans,json=tcpiRetrans,proto3" json:"tcpi_retrans,omitempty"` TcpiFackets uint32 `protobuf:"varint,17,opt,name=tcpi_fackets,json=tcpiFackets,proto3" json:"tcpi_fackets,omitempty"` TcpiLastDataSent uint32 `protobuf:"varint,18,opt,name=tcpi_last_data_sent,json=tcpiLastDataSent,proto3" json:"tcpi_last_data_sent,omitempty"` TcpiLastAckSent uint32 `protobuf:"varint,19,opt,name=tcpi_last_ack_sent,json=tcpiLastAckSent,proto3" json:"tcpi_last_ack_sent,omitempty"` TcpiLastDataRecv uint32 `protobuf:"varint,20,opt,name=tcpi_last_data_recv,json=tcpiLastDataRecv,proto3" json:"tcpi_last_data_recv,omitempty"` TcpiLastAckRecv uint32 `protobuf:"varint,21,opt,name=tcpi_last_ack_recv,json=tcpiLastAckRecv,proto3" json:"tcpi_last_ack_recv,omitempty"` TcpiPmtu uint32 `protobuf:"varint,22,opt,name=tcpi_pmtu,json=tcpiPmtu,proto3" json:"tcpi_pmtu,omitempty"` TcpiRcvSsthresh uint32 `protobuf:"varint,23,opt,name=tcpi_rcv_ssthresh,json=tcpiRcvSsthresh,proto3" json:"tcpi_rcv_ssthresh,omitempty"` TcpiRtt uint32 `protobuf:"varint,24,opt,name=tcpi_rtt,json=tcpiRtt,proto3" json:"tcpi_rtt,omitempty"` TcpiRttvar uint32 `protobuf:"varint,25,opt,name=tcpi_rttvar,json=tcpiRttvar,proto3" json:"tcpi_rttvar,omitempty"` TcpiSndSsthresh uint32 `protobuf:"varint,26,opt,name=tcpi_snd_ssthresh,json=tcpiSndSsthresh,proto3" json:"tcpi_snd_ssthresh,omitempty"` TcpiSndCwnd uint32 `protobuf:"varint,27,opt,name=tcpi_snd_cwnd,json=tcpiSndCwnd,proto3" json:"tcpi_snd_cwnd,omitempty"` TcpiAdvmss uint32 `protobuf:"varint,28,opt,name=tcpi_advmss,json=tcpiAdvmss,proto3" json:"tcpi_advmss,omitempty"` TcpiReordering uint32 `protobuf:"varint,29,opt,name=tcpi_reordering,json=tcpiReordering,proto3" json:"tcpi_reordering,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *SocketOptionTcpInfo) Reset() { *m = SocketOptionTcpInfo{} } func (m *SocketOptionTcpInfo) String() string { return proto.CompactTextString(m) } func (*SocketOptionTcpInfo) ProtoMessage() {} func (*SocketOptionTcpInfo) Descriptor() ([]byte, []int) { return fileDescriptor_6ee37dfd35a8ab00, []int{19} } func (m *SocketOptionTcpInfo) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_SocketOptionTcpInfo.Unmarshal(m, b) } func (m *SocketOptionTcpInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_SocketOptionTcpInfo.Marshal(b, m, deterministic) } func (m *SocketOptionTcpInfo) XXX_Merge(src proto.Message) { xxx_messageInfo_SocketOptionTcpInfo.Merge(m, src) } func (m *SocketOptionTcpInfo) XXX_Size() int { return xxx_messageInfo_SocketOptionTcpInfo.Size(m) } func (m *SocketOptionTcpInfo) XXX_DiscardUnknown() { xxx_messageInfo_SocketOptionTcpInfo.DiscardUnknown(m) } var xxx_messageInfo_SocketOptionTcpInfo proto.InternalMessageInfo func (m *SocketOptionTcpInfo) GetTcpiState() uint32 { if m != nil { return m.TcpiState } return 0 } func (m *SocketOptionTcpInfo) GetTcpiCaState() uint32 { if m != nil { return m.TcpiCaState } return 0 } func (m *SocketOptionTcpInfo) GetTcpiRetransmits() uint32 { if m != nil { return m.TcpiRetransmits } return 0 } func (m *SocketOptionTcpInfo) GetTcpiProbes() uint32 { if m != nil { return m.TcpiProbes } return 0 } func (m *SocketOptionTcpInfo) GetTcpiBackoff() uint32 { if m != nil { return m.TcpiBackoff } return 0 } func (m *SocketOptionTcpInfo) GetTcpiOptions() uint32 { if m != nil { return m.TcpiOptions } return 0 } func (m *SocketOptionTcpInfo) GetTcpiSndWscale() uint32 { if m != nil { return m.TcpiSndWscale } return 0 } func (m *SocketOptionTcpInfo) GetTcpiRcvWscale() uint32 { if m != nil { return m.TcpiRcvWscale } return 0 } func (m *SocketOptionTcpInfo) GetTcpiRto() uint32 { if m != nil { return m.TcpiRto } return 0 } func (m *SocketOptionTcpInfo) GetTcpiAto() uint32 { if m != nil { return m.TcpiAto } return 0 } func (m *SocketOptionTcpInfo) GetTcpiSndMss() uint32 { if m != nil { return m.TcpiSndMss } return 0 } func (m *SocketOptionTcpInfo) GetTcpiRcvMss() uint32 { if m != nil { return m.TcpiRcvMss } return 0 } func (m *SocketOptionTcpInfo) GetTcpiUnacked() uint32 { if m != nil { return m.TcpiUnacked } return 0 } func (m *SocketOptionTcpInfo) GetTcpiSacked() uint32 { if m != nil { return m.TcpiSacked } return 0 } func (m *SocketOptionTcpInfo) GetTcpiLost() uint32 { if m != nil { return m.TcpiLost } return 0 } func (m *SocketOptionTcpInfo) GetTcpiRetrans() uint32 { if m != nil { return m.TcpiRetrans } return 0 } func (m *SocketOptionTcpInfo) GetTcpiFackets() uint32 { if m != nil { return m.TcpiFackets } return 0 } func (m *SocketOptionTcpInfo) GetTcpiLastDataSent() uint32 { if m != nil { return m.TcpiLastDataSent } return 0 } func (m *SocketOptionTcpInfo) GetTcpiLastAckSent() uint32 { if m != nil { return m.TcpiLastAckSent } return 0 } func (m *SocketOptionTcpInfo) GetTcpiLastDataRecv() uint32 { if m != nil { return m.TcpiLastDataRecv } return 0 } func (m *SocketOptionTcpInfo) GetTcpiLastAckRecv() uint32 { if m != nil { return m.TcpiLastAckRecv } return 0 } func (m *SocketOptionTcpInfo) GetTcpiPmtu() uint32 { if m != nil { return m.TcpiPmtu } return 0 } func (m *SocketOptionTcpInfo) GetTcpiRcvSsthresh() uint32 { if m != nil { return m.TcpiRcvSsthresh } return 0 } func (m *SocketOptionTcpInfo) GetTcpiRtt() uint32 { if m != nil { return m.TcpiRtt } return 0 } func (m *SocketOptionTcpInfo) GetTcpiRttvar() uint32 { if m != nil { return m.TcpiRttvar } return 0 } func (m *SocketOptionTcpInfo) GetTcpiSndSsthresh() uint32 { if m != nil { return m.TcpiSndSsthresh } return 0 } func (m *SocketOptionTcpInfo) GetTcpiSndCwnd() uint32 { if m != nil { return m.TcpiSndCwnd } return 0 } func (m *SocketOptionTcpInfo) GetTcpiAdvmss() uint32 { if m != nil { return m.TcpiAdvmss } return 0 } func (m *SocketOptionTcpInfo) GetTcpiReordering() uint32 { if m != nil { return m.TcpiReordering } return 0 } type GetTopChannelsRequest struct { // start_channel_id indicates that only channels at or above this id should be // included in the results. // To request the first page, this should be set to 0. To request // subsequent pages, the client generates this value by adding 1 to // the highest seen result ID. StartChannelId int64 `protobuf:"varint,1,opt,name=start_channel_id,json=startChannelId,proto3" json:"start_channel_id,omitempty"` // If non-zero, the server will return a page of results containing // at most this many items. If zero, the server will choose a // reasonable page size. Must never be negative. MaxResults int64 `protobuf:"varint,2,opt,name=max_results,json=maxResults,proto3" json:"max_results,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *GetTopChannelsRequest) Reset() { *m = GetTopChannelsRequest{} } func (m *GetTopChannelsRequest) String() string { return proto.CompactTextString(m) } func (*GetTopChannelsRequest) ProtoMessage() {} func (*GetTopChannelsRequest) Descriptor() ([]byte, []int) { return fileDescriptor_6ee37dfd35a8ab00, []int{20} } func (m *GetTopChannelsRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_GetTopChannelsRequest.Unmarshal(m, b) } func (m *GetTopChannelsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_GetTopChannelsRequest.Marshal(b, m, deterministic) } func (m *GetTopChannelsRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_GetTopChannelsRequest.Merge(m, src) } func (m *GetTopChannelsRequest) XXX_Size() int { return xxx_messageInfo_GetTopChannelsRequest.Size(m) } func (m *GetTopChannelsRequest) XXX_DiscardUnknown() { xxx_messageInfo_GetTopChannelsRequest.DiscardUnknown(m) } var xxx_messageInfo_GetTopChannelsRequest proto.InternalMessageInfo func (m *GetTopChannelsRequest) GetStartChannelId() int64 { if m != nil { return m.StartChannelId } return 0 } func (m *GetTopChannelsRequest) GetMaxResults() int64 { if m != nil { return m.MaxResults } return 0 } type GetTopChannelsResponse struct { // list of channels that the connection detail service knows about. Sorted in // ascending channel_id order. // Must contain at least 1 result, otherwise 'end' must be true. Channel []*Channel `protobuf:"bytes,1,rep,name=channel,proto3" json:"channel,omitempty"` // If set, indicates that the list of channels is the final list. Requesting // more channels can only return more if they are created after this RPC // completes. End bool `protobuf:"varint,2,opt,name=end,proto3" json:"end,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *GetTopChannelsResponse) Reset() { *m = GetTopChannelsResponse{} } func (m *GetTopChannelsResponse) String() string { return proto.CompactTextString(m) } func (*GetTopChannelsResponse) ProtoMessage() {} func (*GetTopChannelsResponse) Descriptor() ([]byte, []int) { return fileDescriptor_6ee37dfd35a8ab00, []int{21} } func (m *GetTopChannelsResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_GetTopChannelsResponse.Unmarshal(m, b) } func (m *GetTopChannelsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_GetTopChannelsResponse.Marshal(b, m, deterministic) } func (m *GetTopChannelsResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_GetTopChannelsResponse.Merge(m, src) } func (m *GetTopChannelsResponse) XXX_Size() int { return xxx_messageInfo_GetTopChannelsResponse.Size(m) } func (m *GetTopChannelsResponse) XXX_DiscardUnknown() { xxx_messageInfo_GetTopChannelsResponse.DiscardUnknown(m) } var xxx_messageInfo_GetTopChannelsResponse proto.InternalMessageInfo func (m *GetTopChannelsResponse) GetChannel() []*Channel { if m != nil { return m.Channel } return nil } func (m *GetTopChannelsResponse) GetEnd() bool { if m != nil { return m.End } return false } type GetServersRequest struct { // start_server_id indicates that only servers at or above this id should be // included in the results. // To request the first page, this must be set to 0. To request // subsequent pages, the client generates this value by adding 1 to // the highest seen result ID. StartServerId int64 `protobuf:"varint,1,opt,name=start_server_id,json=startServerId,proto3" json:"start_server_id,omitempty"` // If non-zero, the server will return a page of results containing // at most this many items. If zero, the server will choose a // reasonable page size. Must never be negative. MaxResults int64 `protobuf:"varint,2,opt,name=max_results,json=maxResults,proto3" json:"max_results,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *GetServersRequest) Reset() { *m = GetServersRequest{} } func (m *GetServersRequest) String() string { return proto.CompactTextString(m) } func (*GetServersRequest) ProtoMessage() {} func (*GetServersRequest) Descriptor() ([]byte, []int) { return fileDescriptor_6ee37dfd35a8ab00, []int{22} } func (m *GetServersRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_GetServersRequest.Unmarshal(m, b) } func (m *GetServersRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_GetServersRequest.Marshal(b, m, deterministic) } func (m *GetServersRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_GetServersRequest.Merge(m, src) } func (m *GetServersRequest) XXX_Size() int { return xxx_messageInfo_GetServersRequest.Size(m) } func (m *GetServersRequest) XXX_DiscardUnknown() { xxx_messageInfo_GetServersRequest.DiscardUnknown(m) } var xxx_messageInfo_GetServersRequest proto.InternalMessageInfo func (m *GetServersRequest) GetStartServerId() int64 { if m != nil { return m.StartServerId } return 0 } func (m *GetServersRequest) GetMaxResults() int64 { if m != nil { return m.MaxResults } return 0 } type GetServersResponse struct { // list of servers that the connection detail service knows about. Sorted in // ascending server_id order. // Must contain at least 1 result, otherwise 'end' must be true. Server []*Server `protobuf:"bytes,1,rep,name=server,proto3" json:"server,omitempty"` // If set, indicates that the list of servers is the final list. Requesting // more servers will only return more if they are created after this RPC // completes. End bool `protobuf:"varint,2,opt,name=end,proto3" json:"end,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *GetServersResponse) Reset() { *m = GetServersResponse{} } func (m *GetServersResponse) String() string { return proto.CompactTextString(m) } func (*GetServersResponse) ProtoMessage() {} func (*GetServersResponse) Descriptor() ([]byte, []int) { return fileDescriptor_6ee37dfd35a8ab00, []int{23} } func (m *GetServersResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_GetServersResponse.Unmarshal(m, b) } func (m *GetServersResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_GetServersResponse.Marshal(b, m, deterministic) } func (m *GetServersResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_GetServersResponse.Merge(m, src) } func (m *GetServersResponse) XXX_Size() int { return xxx_messageInfo_GetServersResponse.Size(m) } func (m *GetServersResponse) XXX_DiscardUnknown() { xxx_messageInfo_GetServersResponse.DiscardUnknown(m) } var xxx_messageInfo_GetServersResponse proto.InternalMessageInfo func (m *GetServersResponse) GetServer() []*Server { if m != nil { return m.Server } return nil } func (m *GetServersResponse) GetEnd() bool { if m != nil { return m.End } return false } type GetServerRequest struct { // server_id is the identifier of the specific server to get. ServerId int64 `protobuf:"varint,1,opt,name=server_id,json=serverId,proto3" json:"server_id,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *GetServerRequest) Reset() { *m = GetServerRequest{} } func (m *GetServerRequest) String() string { return proto.CompactTextString(m) } func (*GetServerRequest) ProtoMessage() {} func (*GetServerRequest) Descriptor() ([]byte, []int) { return fileDescriptor_6ee37dfd35a8ab00, []int{24} } func (m *GetServerRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_GetServerRequest.Unmarshal(m, b) } func (m *GetServerRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_GetServerRequest.Marshal(b, m, deterministic) } func (m *GetServerRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_GetServerRequest.Merge(m, src) } func (m *GetServerRequest) XXX_Size() int { return xxx_messageInfo_GetServerRequest.Size(m) } func (m *GetServerRequest) XXX_DiscardUnknown() { xxx_messageInfo_GetServerRequest.DiscardUnknown(m) } var xxx_messageInfo_GetServerRequest proto.InternalMessageInfo func (m *GetServerRequest) GetServerId() int64 { if m != nil { return m.ServerId } return 0 } type GetServerResponse struct { // The Server that corresponds to the requested server_id. This field // should be set. Server *Server `protobuf:"bytes,1,opt,name=server,proto3" json:"server,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *GetServerResponse) Reset() { *m = GetServerResponse{} } func (m *GetServerResponse) String() string { return proto.CompactTextString(m) } func (*GetServerResponse) ProtoMessage() {} func (*GetServerResponse) Descriptor() ([]byte, []int) { return fileDescriptor_6ee37dfd35a8ab00, []int{25} } func (m *GetServerResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_GetServerResponse.Unmarshal(m, b) } func (m *GetServerResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_GetServerResponse.Marshal(b, m, deterministic) } func (m *GetServerResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_GetServerResponse.Merge(m, src) } func (m *GetServerResponse) XXX_Size() int { return xxx_messageInfo_GetServerResponse.Size(m) } func (m *GetServerResponse) XXX_DiscardUnknown() { xxx_messageInfo_GetServerResponse.DiscardUnknown(m) } var xxx_messageInfo_GetServerResponse proto.InternalMessageInfo func (m *GetServerResponse) GetServer() *Server { if m != nil { return m.Server } return nil } type GetServerSocketsRequest struct { ServerId int64 `protobuf:"varint,1,opt,name=server_id,json=serverId,proto3" json:"server_id,omitempty"` // start_socket_id indicates that only sockets at or above this id should be // included in the results. // To request the first page, this must be set to 0. To request // subsequent pages, the client generates this value by adding 1 to // the highest seen result ID. StartSocketId int64 `protobuf:"varint,2,opt,name=start_socket_id,json=startSocketId,proto3" json:"start_socket_id,omitempty"` // If non-zero, the server will return a page of results containing // at most this many items. If zero, the server will choose a // reasonable page size. Must never be negative. MaxResults int64 `protobuf:"varint,3,opt,name=max_results,json=maxResults,proto3" json:"max_results,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *GetServerSocketsRequest) Reset() { *m = GetServerSocketsRequest{} } func (m *GetServerSocketsRequest) String() string { return proto.CompactTextString(m) } func (*GetServerSocketsRequest) ProtoMessage() {} func (*GetServerSocketsRequest) Descriptor() ([]byte, []int) { return fileDescriptor_6ee37dfd35a8ab00, []int{26} } func (m *GetServerSocketsRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_GetServerSocketsRequest.Unmarshal(m, b) } func (m *GetServerSocketsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_GetServerSocketsRequest.Marshal(b, m, deterministic) } func (m *GetServerSocketsRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_GetServerSocketsRequest.Merge(m, src) } func (m *GetServerSocketsRequest) XXX_Size() int { return xxx_messageInfo_GetServerSocketsRequest.Size(m) } func (m *GetServerSocketsRequest) XXX_DiscardUnknown() { xxx_messageInfo_GetServerSocketsRequest.DiscardUnknown(m) } var xxx_messageInfo_GetServerSocketsRequest proto.InternalMessageInfo func (m *GetServerSocketsRequest) GetServerId() int64 { if m != nil { return m.ServerId } return 0 } func (m *GetServerSocketsRequest) GetStartSocketId() int64 { if m != nil { return m.StartSocketId } return 0 } func (m *GetServerSocketsRequest) GetMaxResults() int64 { if m != nil { return m.MaxResults } return 0 } type GetServerSocketsResponse struct { // list of socket refs that the connection detail service knows about. Sorted in // ascending socket_id order. // Must contain at least 1 result, otherwise 'end' must be true. SocketRef []*SocketRef `protobuf:"bytes,1,rep,name=socket_ref,json=socketRef,proto3" json:"socket_ref,omitempty"` // If set, indicates that the list of sockets is the final list. Requesting // more sockets will only return more if they are created after this RPC // completes. End bool `protobuf:"varint,2,opt,name=end,proto3" json:"end,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *GetServerSocketsResponse) Reset() { *m = GetServerSocketsResponse{} } func (m *GetServerSocketsResponse) String() string { return proto.CompactTextString(m) } func (*GetServerSocketsResponse) ProtoMessage() {} func (*GetServerSocketsResponse) Descriptor() ([]byte, []int) { return fileDescriptor_6ee37dfd35a8ab00, []int{27} } func (m *GetServerSocketsResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_GetServerSocketsResponse.Unmarshal(m, b) } func (m *GetServerSocketsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_GetServerSocketsResponse.Marshal(b, m, deterministic) } func (m *GetServerSocketsResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_GetServerSocketsResponse.Merge(m, src) } func (m *GetServerSocketsResponse) XXX_Size() int { return xxx_messageInfo_GetServerSocketsResponse.Size(m) } func (m *GetServerSocketsResponse) XXX_DiscardUnknown() { xxx_messageInfo_GetServerSocketsResponse.DiscardUnknown(m) } var xxx_messageInfo_GetServerSocketsResponse proto.InternalMessageInfo func (m *GetServerSocketsResponse) GetSocketRef() []*SocketRef { if m != nil { return m.SocketRef } return nil } func (m *GetServerSocketsResponse) GetEnd() bool { if m != nil { return m.End } return false } type GetChannelRequest struct { // channel_id is the identifier of the specific channel to get. ChannelId int64 `protobuf:"varint,1,opt,name=channel_id,json=channelId,proto3" json:"channel_id,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *GetChannelRequest) Reset() { *m = GetChannelRequest{} } func (m *GetChannelRequest) String() string { return proto.CompactTextString(m) } func (*GetChannelRequest) ProtoMessage() {} func (*GetChannelRequest) Descriptor() ([]byte, []int) { return fileDescriptor_6ee37dfd35a8ab00, []int{28} } func (m *GetChannelRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_GetChannelRequest.Unmarshal(m, b) } func (m *GetChannelRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_GetChannelRequest.Marshal(b, m, deterministic) } func (m *GetChannelRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_GetChannelRequest.Merge(m, src) } func (m *GetChannelRequest) XXX_Size() int { return xxx_messageInfo_GetChannelRequest.Size(m) } func (m *GetChannelRequest) XXX_DiscardUnknown() { xxx_messageInfo_GetChannelRequest.DiscardUnknown(m) } var xxx_messageInfo_GetChannelRequest proto.InternalMessageInfo func (m *GetChannelRequest) GetChannelId() int64 { if m != nil { return m.ChannelId } return 0 } type GetChannelResponse struct { // The Channel that corresponds to the requested channel_id. This field // should be set. Channel *Channel `protobuf:"bytes,1,opt,name=channel,proto3" json:"channel,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *GetChannelResponse) Reset() { *m = GetChannelResponse{} } func (m *GetChannelResponse) String() string { return proto.CompactTextString(m) } func (*GetChannelResponse) ProtoMessage() {} func (*GetChannelResponse) Descriptor() ([]byte, []int) { return fileDescriptor_6ee37dfd35a8ab00, []int{29} } func (m *GetChannelResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_GetChannelResponse.Unmarshal(m, b) } func (m *GetChannelResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_GetChannelResponse.Marshal(b, m, deterministic) } func (m *GetChannelResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_GetChannelResponse.Merge(m, src) } func (m *GetChannelResponse) XXX_Size() int { return xxx_messageInfo_GetChannelResponse.Size(m) } func (m *GetChannelResponse) XXX_DiscardUnknown() { xxx_messageInfo_GetChannelResponse.DiscardUnknown(m) } var xxx_messageInfo_GetChannelResponse proto.InternalMessageInfo func (m *GetChannelResponse) GetChannel() *Channel { if m != nil { return m.Channel } return nil } type GetSubchannelRequest struct { // subchannel_id is the identifier of the specific subchannel to get. SubchannelId int64 `protobuf:"varint,1,opt,name=subchannel_id,json=subchannelId,proto3" json:"subchannel_id,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *GetSubchannelRequest) Reset() { *m = GetSubchannelRequest{} } func (m *GetSubchannelRequest) String() string { return proto.CompactTextString(m) } func (*GetSubchannelRequest) ProtoMessage() {} func (*GetSubchannelRequest) Descriptor() ([]byte, []int) { return fileDescriptor_6ee37dfd35a8ab00, []int{30} } func (m *GetSubchannelRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_GetSubchannelRequest.Unmarshal(m, b) } func (m *GetSubchannelRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_GetSubchannelRequest.Marshal(b, m, deterministic) } func (m *GetSubchannelRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_GetSubchannelRequest.Merge(m, src) } func (m *GetSubchannelRequest) XXX_Size() int { return xxx_messageInfo_GetSubchannelRequest.Size(m) } func (m *GetSubchannelRequest) XXX_DiscardUnknown() { xxx_messageInfo_GetSubchannelRequest.DiscardUnknown(m) } var xxx_messageInfo_GetSubchannelRequest proto.InternalMessageInfo func (m *GetSubchannelRequest) GetSubchannelId() int64 { if m != nil { return m.SubchannelId } return 0 } type GetSubchannelResponse struct { // The Subchannel that corresponds to the requested subchannel_id. This // field should be set. Subchannel *Subchannel `protobuf:"bytes,1,opt,name=subchannel,proto3" json:"subchannel,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *GetSubchannelResponse) Reset() { *m = GetSubchannelResponse{} } func (m *GetSubchannelResponse) String() string { return proto.CompactTextString(m) } func (*GetSubchannelResponse) ProtoMessage() {} func (*GetSubchannelResponse) Descriptor() ([]byte, []int) { return fileDescriptor_6ee37dfd35a8ab00, []int{31} } func (m *GetSubchannelResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_GetSubchannelResponse.Unmarshal(m, b) } func (m *GetSubchannelResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_GetSubchannelResponse.Marshal(b, m, deterministic) } func (m *GetSubchannelResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_GetSubchannelResponse.Merge(m, src) } func (m *GetSubchannelResponse) XXX_Size() int { return xxx_messageInfo_GetSubchannelResponse.Size(m) } func (m *GetSubchannelResponse) XXX_DiscardUnknown() { xxx_messageInfo_GetSubchannelResponse.DiscardUnknown(m) } var xxx_messageInfo_GetSubchannelResponse proto.InternalMessageInfo func (m *GetSubchannelResponse) GetSubchannel() *Subchannel { if m != nil { return m.Subchannel } return nil } type GetSocketRequest struct { // socket_id is the identifier of the specific socket to get. SocketId int64 `protobuf:"varint,1,opt,name=socket_id,json=socketId,proto3" json:"socket_id,omitempty"` // If true, the response will contain only high level information // that is inexpensive to obtain. Fields thay may be omitted are // documented. Summary bool `protobuf:"varint,2,opt,name=summary,proto3" json:"summary,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *GetSocketRequest) Reset() { *m = GetSocketRequest{} } func (m *GetSocketRequest) String() string { return proto.CompactTextString(m) } func (*GetSocketRequest) ProtoMessage() {} func (*GetSocketRequest) Descriptor() ([]byte, []int) { return fileDescriptor_6ee37dfd35a8ab00, []int{32} } func (m *GetSocketRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_GetSocketRequest.Unmarshal(m, b) } func (m *GetSocketRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_GetSocketRequest.Marshal(b, m, deterministic) } func (m *GetSocketRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_GetSocketRequest.Merge(m, src) } func (m *GetSocketRequest) XXX_Size() int { return xxx_messageInfo_GetSocketRequest.Size(m) } func (m *GetSocketRequest) XXX_DiscardUnknown() { xxx_messageInfo_GetSocketRequest.DiscardUnknown(m) } var xxx_messageInfo_GetSocketRequest proto.InternalMessageInfo func (m *GetSocketRequest) GetSocketId() int64 { if m != nil { return m.SocketId } return 0 } func (m *GetSocketRequest) GetSummary() bool { if m != nil { return m.Summary } return false } type GetSocketResponse struct { // The Socket that corresponds to the requested socket_id. This field // should be set. Socket *Socket `protobuf:"bytes,1,opt,name=socket,proto3" json:"socket,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *GetSocketResponse) Reset() { *m = GetSocketResponse{} } func (m *GetSocketResponse) String() string { return proto.CompactTextString(m) } func (*GetSocketResponse) ProtoMessage() {} func (*GetSocketResponse) Descriptor() ([]byte, []int) { return fileDescriptor_6ee37dfd35a8ab00, []int{33} } func (m *GetSocketResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_GetSocketResponse.Unmarshal(m, b) } func (m *GetSocketResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_GetSocketResponse.Marshal(b, m, deterministic) } func (m *GetSocketResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_GetSocketResponse.Merge(m, src) } func (m *GetSocketResponse) XXX_Size() int { return xxx_messageInfo_GetSocketResponse.Size(m) } func (m *GetSocketResponse) XXX_DiscardUnknown() { xxx_messageInfo_GetSocketResponse.DiscardUnknown(m) } var xxx_messageInfo_GetSocketResponse proto.InternalMessageInfo func (m *GetSocketResponse) GetSocket() *Socket { if m != nil { return m.Socket } return nil } func init() { proto.RegisterEnum("grpc.channelz.v1.ChannelConnectivityState_State", ChannelConnectivityState_State_name, ChannelConnectivityState_State_value) proto.RegisterEnum("grpc.channelz.v1.ChannelTraceEvent_Severity", ChannelTraceEvent_Severity_name, ChannelTraceEvent_Severity_value) proto.RegisterType((*Channel)(nil), "grpc.channelz.v1.Channel") proto.RegisterType((*Subchannel)(nil), "grpc.channelz.v1.Subchannel") proto.RegisterType((*ChannelConnectivityState)(nil), "grpc.channelz.v1.ChannelConnectivityState") proto.RegisterType((*ChannelData)(nil), "grpc.channelz.v1.ChannelData") proto.RegisterType((*ChannelTraceEvent)(nil), "grpc.channelz.v1.ChannelTraceEvent") proto.RegisterType((*ChannelTrace)(nil), "grpc.channelz.v1.ChannelTrace") proto.RegisterType((*ChannelRef)(nil), "grpc.channelz.v1.ChannelRef") proto.RegisterType((*SubchannelRef)(nil), "grpc.channelz.v1.SubchannelRef") proto.RegisterType((*SocketRef)(nil), "grpc.channelz.v1.SocketRef") proto.RegisterType((*ServerRef)(nil), "grpc.channelz.v1.ServerRef") proto.RegisterType((*Server)(nil), "grpc.channelz.v1.Server") proto.RegisterType((*ServerData)(nil), "grpc.channelz.v1.ServerData") proto.RegisterType((*Socket)(nil), "grpc.channelz.v1.Socket") proto.RegisterType((*SocketData)(nil), "grpc.channelz.v1.SocketData") proto.RegisterType((*Address)(nil), "grpc.channelz.v1.Address") proto.RegisterType((*Address_TcpIpAddress)(nil), "grpc.channelz.v1.Address.TcpIpAddress") proto.RegisterType((*Address_UdsAddress)(nil), "grpc.channelz.v1.Address.UdsAddress") proto.RegisterType((*Address_OtherAddress)(nil), "grpc.channelz.v1.Address.OtherAddress") proto.RegisterType((*Security)(nil), "grpc.channelz.v1.Security") proto.RegisterType((*Security_Tls)(nil), "grpc.channelz.v1.Security.Tls") proto.RegisterType((*Security_OtherSecurity)(nil), "grpc.channelz.v1.Security.OtherSecurity") proto.RegisterType((*SocketOption)(nil), "grpc.channelz.v1.SocketOption") proto.RegisterType((*SocketOptionTimeout)(nil), "grpc.channelz.v1.SocketOptionTimeout") proto.RegisterType((*SocketOptionLinger)(nil), "grpc.channelz.v1.SocketOptionLinger") proto.RegisterType((*SocketOptionTcpInfo)(nil), "grpc.channelz.v1.SocketOptionTcpInfo") proto.RegisterType((*GetTopChannelsRequest)(nil), "grpc.channelz.v1.GetTopChannelsRequest") proto.RegisterType((*GetTopChannelsResponse)(nil), "grpc.channelz.v1.GetTopChannelsResponse") proto.RegisterType((*GetServersRequest)(nil), "grpc.channelz.v1.GetServersRequest") proto.RegisterType((*GetServersResponse)(nil), "grpc.channelz.v1.GetServersResponse") proto.RegisterType((*GetServerRequest)(nil), "grpc.channelz.v1.GetServerRequest") proto.RegisterType((*GetServerResponse)(nil), "grpc.channelz.v1.GetServerResponse") proto.RegisterType((*GetServerSocketsRequest)(nil), "grpc.channelz.v1.GetServerSocketsRequest") proto.RegisterType((*GetServerSocketsResponse)(nil), "grpc.channelz.v1.GetServerSocketsResponse") proto.RegisterType((*GetChannelRequest)(nil), "grpc.channelz.v1.GetChannelRequest") proto.RegisterType((*GetChannelResponse)(nil), "grpc.channelz.v1.GetChannelResponse") proto.RegisterType((*GetSubchannelRequest)(nil), "grpc.channelz.v1.GetSubchannelRequest") proto.RegisterType((*GetSubchannelResponse)(nil), "grpc.channelz.v1.GetSubchannelResponse") proto.RegisterType((*GetSocketRequest)(nil), "grpc.channelz.v1.GetSocketRequest") proto.RegisterType((*GetSocketResponse)(nil), "grpc.channelz.v1.GetSocketResponse") } func init() { proto.RegisterFile("grpc/channelz/v1/channelz.proto", fileDescriptor_6ee37dfd35a8ab00) } var fileDescriptor_6ee37dfd35a8ab00 = []byte{ // 2584 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x59, 0x4b, 0x6f, 0xdb, 0xd8, 0xf5, 0xb7, 0xde, 0xd4, 0xd1, 0x23, 0xf2, 0x4d, 0x26, 0x43, 0x2b, 0x99, 0xb1, 0xff, 0xf4, 0x4c, 0xc6, 0x93, 0xfc, 0x23, 0xc7, 0x9e, 0x34, 0x28, 0x3a, 0x2d, 0x3a, 0xb6, 0x62, 0xc7, 0x72, 0x1d, 0x39, 0xa0, 0xe4, 0x49, 0xa6, 0x28, 0xca, 0xa1, 0xc9, 0x6b, 0x99, 0x35, 0x45, 0xaa, 0xbc, 0x57, 0xf2, 0x24, 0x9b, 0x2e, 0xba, 0xef, 0xb2, 0x28, 0xfa, 0x01, 0xba, 0xe9, 0xa2, 0x40, 0x81, 0x02, 0xed, 0xb6, 0xdf, 0xa6, 0xdf, 0xa2, 0xb8, 0x0f, 0x3e, 0xf4, 0xb2, 0x14, 0x64, 0xd9, 0x8d, 0x21, 0x1e, 0xfe, 0xce, 0xef, 0x9c, 0x7b, 0x5e, 0xf7, 0xf2, 0x1a, 0xd6, 0x7b, 0xc1, 0xc0, 0xda, 0xb6, 0x2e, 0x4d, 0xcf, 0xc3, 0xee, 0xbb, 0xed, 0xd1, 0x4e, 0xf4, 0xbb, 0x31, 0x08, 0x7c, 0xea, 0xa3, 0x1a, 0x03, 0x34, 0x22, 0xe1, 0x68, 0xa7, 0xbe, 0xd6, 0xf3, 0xfd, 0x9e, 0x8b, 0xb7, 0xf9, 0xfb, 0xf3, 0xe1, 0xc5, 0xb6, 0xe9, 0xbd, 0x15, 0xe0, 0xfa, 0xa7, 0x93, 0xaf, 0xec, 0x61, 0x60, 0x52, 0xc7, 0xf7, 0xe4, 0xfb, 0xf5, 0xc9, 0xf7, 0xd4, 0xe9, 0x63, 0x42, 0xcd, 0xfe, 0x60, 0x1e, 0xc1, 0x75, 0x60, 0x0e, 0x06, 0x38, 0x20, 0xe2, 0xbd, 0xf6, 0xb7, 0x34, 0x14, 0x9a, 0xc2, 0x17, 0xd4, 0x80, 0x4c, 0x80, 0x2f, 0xd4, 0xd4, 0x46, 0x6a, 0xab, 0xb4, 0x7b, 0xbf, 0x31, 0xe9, 0x67, 0x43, 0xe2, 0x74, 0x7c, 0xa1, 0x33, 0x20, 0xda, 0x81, 0xac, 0x6d, 0x52, 0x53, 0x4d, 0x73, 0x85, 0x4f, 0xe6, 0x2a, 0x3c, 0x37, 0xa9, 0xa9, 0x73, 0x28, 0xfa, 0x19, 0x94, 0x24, 0xc0, 0x60, 0xa6, 0x32, 0x1b, 0x99, 0x85, 0xa6, 0xc0, 0x8a, 0x7e, 0xa3, 0x43, 0xa8, 0x92, 0xe1, 0x79, 0x92, 0x21, 0xcb, 0x19, 0xd6, 0xa7, 0x19, 0x3a, 0x11, 0x8e, 0x91, 0x54, 0x48, 0xf2, 0x11, 0xfd, 0x04, 0x80, 0xf8, 0xd6, 0x15, 0xa6, 0x9c, 0x23, 0xc7, 0x39, 0xee, 0xcd, 0xe0, 0xe0, 0x18, 0xa6, 0x5f, 0x24, 0xe1, 0x4f, 0xed, 0x1f, 0x69, 0x80, 0x98, 0x1c, 0xed, 0x24, 0x83, 0xb6, 0xd0, 0x8f, 0xff, 0xe1, 0xb8, 0xfd, 0x3b, 0x05, 0xaa, 0x74, 0xaf, 0xe9, 0x7b, 0x1e, 0xb6, 0xa8, 0x33, 0x72, 0xe8, 0xdb, 0x0e, 0x35, 0x29, 0x46, 0x87, 0x90, 0x23, 0xec, 0x07, 0x8f, 0x63, 0x75, 0xf7, 0xc9, 0xdc, 0x95, 0x4d, 0xa9, 0x36, 0xf8, 0x5f, 0x5d, 0xa8, 0x6b, 0xbf, 0x86, 0x9c, 0x20, 0x2c, 0x41, 0xe1, 0xac, 0xfd, 0x8b, 0xf6, 0xe9, 0xeb, 0x76, 0x6d, 0x05, 0x29, 0x90, 0x6d, 0x3d, 0x3f, 0x39, 0xa8, 0xa5, 0x50, 0x15, 0xa0, 0x79, 0xda, 0x6e, 0x1f, 0x34, 0xbb, 0xad, 0xf6, 0x8b, 0x5a, 0x1a, 0x15, 0x21, 0xa7, 0x1f, 0xec, 0x3d, 0xff, 0xae, 0x96, 0x41, 0x1f, 0xc1, 0x6a, 0x57, 0xdf, 0x6b, 0x77, 0x5a, 0x07, 0xed, 0xae, 0x71, 0xb8, 0xd7, 0x3a, 0x39, 0xd3, 0x0f, 0x6a, 0x59, 0x54, 0x06, 0xa5, 0x73, 0x74, 0xd6, 0x7d, 0xce, 0x98, 0x72, 0xda, 0x7f, 0xd2, 0x50, 0x4a, 0x64, 0x07, 0x7d, 0x93, 0xf4, 0xbb, 0xb4, 0xfb, 0x70, 0x79, 0xbf, 0xa5, 0xc7, 0xe8, 0x2e, 0xe4, 0xa9, 0x19, 0xf4, 0x30, 0xe5, 0xe5, 0x50, 0xd4, 0xe5, 0x13, 0x7a, 0x0a, 0x39, 0x1a, 0x98, 0x16, 0x56, 0x33, 0x9c, 0xf9, 0xd3, 0xb9, 0xcc, 0x5d, 0x86, 0xd2, 0x05, 0x18, 0x6d, 0x42, 0xc5, 0x32, 0x5d, 0x97, 0x18, 0x84, 0x9a, 0x01, 0xc5, 0xb6, 0x9a, 0xdd, 0x48, 0x6d, 0x65, 0xf4, 0x32, 0x17, 0x76, 0x84, 0x0c, 0x7d, 0x01, 0xb7, 0x24, 0x68, 0x68, 0x59, 0x18, 0xdb, 0xd8, 0x56, 0x73, 0x1c, 0x56, 0x15, 0xb0, 0x50, 0x8a, 0xfe, 0x0f, 0x84, 0xa2, 0x71, 0x61, 0x3a, 0x2e, 0xb6, 0xd5, 0x3c, 0x47, 0x95, 0xb8, 0xec, 0x90, 0x8b, 0xd0, 0x77, 0x70, 0xcf, 0x35, 0x09, 0x35, 0x98, 0x2c, 0x34, 0x6a, 0x44, 0x43, 0x48, 0x2d, 0x70, 0xe7, 0xeb, 0x0d, 0x31, 0x85, 0x1a, 0xe1, 0x14, 0x6a, 0x74, 0x43, 0x84, 0xae, 0x32, 0xf5, 0xa6, 0xe9, 0xba, 0xd2, 0xbb, 0xe8, 0x8d, 0xf6, 0xa7, 0x0c, 0xac, 0x26, 0xd7, 0x78, 0x30, 0xc2, 0x1e, 0x45, 0x1b, 0x50, 0xb2, 0x31, 0xb1, 0x02, 0x67, 0xc0, 0xc6, 0x20, 0x8f, 0x7b, 0x51, 0x4f, 0x8a, 0xd0, 0x11, 0x28, 0x04, 0x8f, 0x70, 0xe0, 0xd0, 0xb7, 0x3c, 0xa6, 0xd5, 0xdd, 0xff, 0xbf, 0x39, 0x78, 0x9c, 0xb8, 0xd1, 0x91, 0x3a, 0x7a, 0xa4, 0x8d, 0x7e, 0x0c, 0xc5, 0x78, 0x29, 0x99, 0x85, 0x4b, 0x89, 0xc1, 0xe8, 0xe7, 0xe3, 0xfd, 0x9a, 0x5d, 0x3c, 0x52, 0x8f, 0x56, 0xc6, 0x3a, 0xf6, 0x68, 0xaa, 0x63, 0x73, 0x4b, 0x4d, 0x98, 0xa3, 0x95, 0x89, 0x9e, 0xd5, 0x0e, 0x40, 0x09, 0x97, 0xc6, 0xcb, 0xbf, 0x6b, 0xc4, 0x8d, 0x51, 0x82, 0x42, 0xb3, 0x6b, 0xb4, 0xda, 0x87, 0xa7, 0xb2, 0x37, 0xba, 0xc6, 0xeb, 0x3d, 0xbd, 0x2d, 0x7a, 0xa3, 0x0c, 0x4a, 0xb3, 0x6b, 0x1c, 0xe8, 0xfa, 0xa9, 0x5e, 0xcb, 0xec, 0x97, 0xa0, 0x68, 0x5d, 0x3a, 0xae, 0xcd, 0x7c, 0x61, 0xbd, 0x5c, 0x4e, 0x46, 0x10, 0x3d, 0x84, 0x55, 0x6f, 0xd8, 0x37, 0x30, 0x8b, 0x24, 0x31, 0x5c, 0xbf, 0xd7, 0xc3, 0x36, 0xcf, 0x4d, 0x46, 0xbf, 0xe5, 0x0d, 0xfb, 0x3c, 0xc2, 0xe4, 0x84, 0x8b, 0x51, 0x0b, 0x90, 0x15, 0x60, 0xbe, 0x8b, 0x25, 0x2a, 0x25, 0xbd, 0x30, 0xbc, 0xab, 0xa1, 0x56, 0x24, 0x42, 0x5f, 0x43, 0x5e, 0x98, 0x94, 0x13, 0x71, 0x73, 0x89, 0x44, 0xeb, 0x52, 0x45, 0xb3, 0x00, 0xe2, 0xf0, 0xa3, 0x4f, 0x20, 0x0c, 0xbf, 0xe1, 0x84, 0xae, 0x17, 0xa5, 0xa4, 0x65, 0x23, 0x04, 0x59, 0xcf, 0xec, 0x63, 0xd9, 0xa4, 0xfc, 0xf7, 0x71, 0x56, 0xc9, 0xd4, 0xb2, 0xc7, 0x59, 0x25, 0x5b, 0xcb, 0x1d, 0x67, 0x95, 0x5c, 0x2d, 0x7f, 0x9c, 0x55, 0xf2, 0xb5, 0xc2, 0x71, 0x56, 0x29, 0xd4, 0x94, 0xe3, 0xac, 0xa2, 0xd4, 0x8a, 0x9a, 0x0b, 0x95, 0xb1, 0xfc, 0xb0, 0x0e, 0x4d, 0x24, 0xd6, 0xb1, 0x79, 0x8b, 0x64, 0xf4, 0x72, 0x2c, 0x4c, 0x58, 0x53, 0xc6, 0xac, 0xa5, 0x6a, 0xe9, 0xe3, 0xac, 0x92, 0xae, 0x65, 0xe6, 0x59, 0xd6, 0xbe, 0x87, 0x62, 0x34, 0x7b, 0xd1, 0x3d, 0x90, 0xd3, 0x97, 0x59, 0xc9, 0x70, 0x2b, 0x8a, 0x10, 0x24, 0x2c, 0x64, 0xe7, 0x5a, 0x98, 0xbd, 0x1e, 0x66, 0x01, 0x07, 0x23, 0x1c, 0x84, 0x16, 0xf8, 0x03, 0xb3, 0x90, 0x93, 0x16, 0xb8, 0x20, 0x61, 0x21, 0xbf, 0xd4, 0x1a, 0x62, 0x0b, 0x7f, 0x4d, 0x41, 0x5e, 0x98, 0x40, 0x8f, 0x93, 0x7b, 0xeb, 0xac, 0x7d, 0x26, 0xf4, 0x44, 0xec, 0xab, 0x4f, 0xc6, 0xf6, 0xd5, 0xfb, 0xf3, 0xf0, 0x89, 0x6d, 0xf5, 0x1b, 0xa8, 0xb8, 0x0e, 0xa1, 0xd8, 0x33, 0x44, 0x60, 0x64, 0x19, 0xdd, 0xb8, 0xa5, 0x95, 0x85, 0x86, 0x10, 0x68, 0x7f, 0x60, 0xa7, 0x81, 0x88, 0x36, 0x9e, 0xda, 0xa9, 0x0f, 0x9a, 0xda, 0xe9, 0xe5, 0xa6, 0x76, 0x66, 0xa9, 0xa9, 0x9d, 0x7d, 0xef, 0xa9, 0x9d, 0xfb, 0x80, 0xa9, 0xfd, 0x97, 0x34, 0xe4, 0x45, 0x6c, 0x16, 0xa7, 0x2f, 0x8a, 0xe9, 0x92, 0xe9, 0xe3, 0xf8, 0x44, 0xfa, 0xb6, 0x21, 0xe7, 0xfa, 0x96, 0xe9, 0xca, 0xd9, 0xbc, 0x36, 0xad, 0xb2, 0x67, 0xdb, 0x01, 0x26, 0x44, 0x17, 0x38, 0xb4, 0x03, 0xf9, 0x00, 0xf7, 0x7d, 0x8a, 0xe5, 0x44, 0xbe, 0x41, 0x43, 0x02, 0xd1, 0x33, 0xb6, 0x9b, 0x58, 0x43, 0xbe, 0x9b, 0x44, 0x71, 0x99, 0x2e, 0x2c, 0x81, 0xd0, 0x23, 0x2c, 0x5a, 0x87, 0x92, 0x60, 0x30, 0x12, 0x5d, 0x00, 0x42, 0xd4, 0x36, 0xfb, 0x58, 0xfb, 0x7d, 0x01, 0x20, 0x5e, 0x11, 0x4b, 0x2f, 0xa1, 0x01, 0x36, 0xfb, 0x71, 0x15, 0x88, 0x21, 0x54, 0x95, 0xe2, 0xb0, 0x0e, 0x1e, 0xc1, 0x6a, 0x04, 0x8c, 0x2a, 0x41, 0x14, 0x4c, 0x2d, 0x84, 0x46, 0xb5, 0xf0, 0x39, 0x84, 0xea, 0x61, 0x35, 0x88, 0x9a, 0xa9, 0x48, 0xa9, 0xac, 0x87, 0x4d, 0xa8, 0xf4, 0x31, 0x21, 0x66, 0x0f, 0x13, 0x83, 0x60, 0x8f, 0x86, 0xc7, 0x86, 0x50, 0xd8, 0x61, 0x3b, 0xef, 0x23, 0x58, 0x8d, 0x40, 0x01, 0xb6, 0xb0, 0x33, 0x8a, 0x0e, 0x0e, 0xb5, 0xf0, 0x85, 0x2e, 0xe5, 0x68, 0x0b, 0x6a, 0x57, 0x18, 0x0f, 0x0c, 0xd3, 0x75, 0x46, 0x21, 0xa9, 0x38, 0x3e, 0x54, 0x99, 0x7c, 0x8f, 0x8b, 0x39, 0xed, 0x25, 0x6c, 0xf2, 0x5a, 0xe4, 0x19, 0x32, 0x84, 0x5f, 0x06, 0x1f, 0xf5, 0xef, 0x79, 0x92, 0x58, 0x67, 0x34, 0x27, 0x8c, 0xa5, 0xc3, 0x49, 0x9a, 0x82, 0x23, 0xde, 0x2d, 0x7e, 0x03, 0x9f, 0x71, 0x4b, 0x32, 0x2f, 0x73, 0x4d, 0x29, 0x0b, 0x4d, 0x6d, 0x30, 0x1e, 0x9d, 0xd3, 0xcc, 0xb1, 0x15, 0x76, 0x98, 0x0c, 0x0c, 0x0f, 0x40, 0xc2, 0x44, 0x71, 0xb9, 0x0e, 0x7b, 0x29, 0xb4, 0x59, 0x9c, 0x62, 0x6a, 0x13, 0xd6, 0xc7, 0xa8, 0xc3, 0x5c, 0x24, 0xe8, 0x61, 0x21, 0xfd, 0xfd, 0x04, 0x7d, 0x98, 0xb4, 0xd8, 0xc4, 0xb7, 0xb0, 0x26, 0xd2, 0x71, 0xe1, 0xfa, 0xd7, 0x86, 0xe5, 0x7b, 0x34, 0xf0, 0x5d, 0xe3, 0xda, 0xf1, 0x6c, 0xff, 0x5a, 0x2d, 0x85, 0xfd, 0x3c, 0x41, 0xde, 0xf2, 0xe8, 0xb3, 0xa7, 0xdf, 0x9a, 0xee, 0x10, 0xeb, 0x77, 0xb9, 0xf6, 0xa1, 0xeb, 0x5f, 0x37, 0x85, 0xee, 0x6b, 0xae, 0x8a, 0xde, 0x40, 0x5d, 0x06, 0x7f, 0x16, 0x71, 0x79, 0x31, 0xf1, 0xc7, 0x42, 0x7d, 0x9a, 0xf9, 0x19, 0xe4, 0x7d, 0x71, 0x22, 0xac, 0xf0, 0x11, 0xfe, 0xe9, 0xbc, 0xf1, 0x71, 0xca, 0x51, 0xba, 0x44, 0x6b, 0xff, 0xcc, 0x40, 0x41, 0xb6, 0x3c, 0x7a, 0x09, 0x15, 0x6a, 0x0d, 0x9c, 0x81, 0x61, 0x0a, 0x81, 0x9c, 0x5c, 0x0f, 0xe6, 0x0e, 0x89, 0x46, 0xd7, 0x1a, 0xb4, 0x06, 0xf2, 0xe1, 0x68, 0x45, 0x2f, 0x73, 0xf5, 0x90, 0xee, 0x05, 0x94, 0x86, 0x36, 0x89, 0xc8, 0xc4, 0x58, 0xfb, 0x6c, 0x3e, 0xd9, 0x99, 0x4d, 0x62, 0x2a, 0x18, 0x46, 0x4f, 0xcc, 0x2f, 0x9f, 0x5e, 0xe2, 0x20, 0xa2, 0xca, 0x2c, 0xf2, 0xeb, 0x94, 0xc1, 0x13, 0x7e, 0xf9, 0x89, 0xe7, 0xfa, 0x1e, 0x94, 0x93, 0x7e, 0xb3, 0x93, 0xcf, 0xc4, 0x9a, 0xcb, 0x7a, 0x31, 0x5e, 0x06, 0x82, 0xec, 0xc0, 0x0f, 0xc4, 0xe7, 0x49, 0x4e, 0xe7, 0xbf, 0xeb, 0x5b, 0x00, 0xb1, 0xb7, 0xa8, 0x0e, 0xca, 0x85, 0xe3, 0x62, 0x3e, 0xe7, 0xc4, 0x79, 0x3c, 0x7a, 0xae, 0xb7, 0xa1, 0x9c, 0x74, 0x26, 0x3a, 0x15, 0xa4, 0xe2, 0x53, 0x01, 0x7a, 0x08, 0xb9, 0x11, 0xcb, 0xae, 0x0c, 0xd1, 0x9d, 0xa9, 0x02, 0xd8, 0xf3, 0xde, 0xea, 0x02, 0xb2, 0x5f, 0x84, 0x82, 0xf4, 0x54, 0xfb, 0x63, 0x86, 0x9d, 0x6c, 0xe5, 0xb8, 0xdd, 0x85, 0x0c, 0x75, 0xc9, 0xfc, 0x6d, 0x37, 0x04, 0x36, 0xba, 0x2e, 0x8b, 0x08, 0x03, 0xb3, 0x8f, 0x37, 0x1e, 0x18, 0x69, 0x77, 0xeb, 0x06, 0x2d, 0xbe, 0x86, 0xf0, 0xe9, 0x68, 0x45, 0x17, 0x8a, 0xf5, 0x7f, 0xa5, 0x20, 0xd3, 0x75, 0x09, 0xfa, 0x1c, 0x2a, 0x84, 0x9a, 0x9e, 0x6d, 0x06, 0xb6, 0x11, 0x2f, 0x8f, 0x45, 0x3e, 0x14, 0xb3, 0x91, 0x8f, 0xd6, 0x01, 0x44, 0x22, 0xe3, 0xa3, 0xe4, 0xd1, 0x8a, 0x5e, 0xe4, 0x32, 0x0e, 0x78, 0x04, 0xab, 0xa2, 0xef, 0x2c, 0x1c, 0x50, 0xe7, 0xc2, 0xb1, 0xd8, 0xa7, 0x65, 0x86, 0x67, 0xa4, 0xc6, 0x5f, 0x34, 0x63, 0x39, 0x7a, 0x0c, 0x48, 0x36, 0x53, 0x12, 0x9d, 0xe5, 0xe8, 0x55, 0xf1, 0x26, 0x01, 0xdf, 0xaf, 0x42, 0xd9, 0x72, 0x06, 0xcc, 0x3a, 0x19, 0x3a, 0x14, 0xd7, 0x4f, 0xa1, 0x32, 0xb6, 0xaa, 0x0f, 0x4e, 0x4d, 0x01, 0x72, 0x7d, 0xdf, 0xc6, 0xae, 0xe6, 0x41, 0x39, 0xd9, 0x6b, 0x33, 0x89, 0xef, 0x24, 0x89, 0x8b, 0x92, 0x02, 0x3d, 0x05, 0x30, 0x6d, 0xdb, 0x61, 0x5a, 0xd1, 0xae, 0x3e, 0xdb, 0x66, 0x02, 0xa7, 0x9d, 0xc0, 0xed, 0xa4, 0x3d, 0x36, 0xc6, 0xfc, 0x21, 0x45, 0x3f, 0x02, 0x25, 0xbc, 0x2d, 0x93, 0x75, 0xb1, 0x36, 0x45, 0xf5, 0x5c, 0x02, 0xf4, 0x08, 0xaa, 0x59, 0x80, 0x92, 0x6c, 0x27, 0x8e, 0xd7, 0xc3, 0x01, 0xfb, 0x4c, 0x37, 0xd9, 0xe7, 0xbb, 0x58, 0x85, 0xa2, 0xcb, 0xa7, 0x31, 0x23, 0xe9, 0xe5, 0x8d, 0xfc, 0x5d, 0x99, 0xf0, 0xd9, 0x1a, 0xb4, 0xbc, 0x0b, 0x9f, 0xf5, 0x22, 0x9b, 0x21, 0x46, 0x7c, 0xa9, 0x50, 0xd1, 0x8b, 0x4c, 0x22, 0x6e, 0x35, 0x34, 0x31, 0xa1, 0x0c, 0xcb, 0x94, 0x88, 0x34, 0x47, 0x94, 0x98, 0xb0, 0x69, 0x0a, 0xcc, 0x97, 0x50, 0xe3, 0x98, 0x00, 0xd3, 0xc0, 0xf4, 0x48, 0xdf, 0xa1, 0x62, 0x60, 0x54, 0xf4, 0x5b, 0x4c, 0xae, 0xc7, 0x62, 0x76, 0x46, 0xe1, 0xd0, 0x41, 0xe0, 0x9f, 0x63, 0xc2, 0x4b, 0xa7, 0xa2, 0x73, 0x07, 0x5e, 0x71, 0x09, 0x3b, 0x4a, 0x72, 0xc0, 0xb9, 0x69, 0x5d, 0xf9, 0x17, 0xe2, 0x1b, 0x54, 0x9a, 0xdb, 0x17, 0xa2, 0x08, 0x22, 0xe6, 0x29, 0xe1, 0x9b, 0xbc, 0x84, 0x88, 0xa5, 0x11, 0xf4, 0x00, 0x6e, 0x89, 0x45, 0x79, 0xb6, 0x71, 0x4d, 0x2c, 0xd3, 0xc5, 0x7c, 0x37, 0xaf, 0xe8, 0x7c, 0x31, 0x1d, 0xcf, 0x7e, 0xcd, 0x85, 0x11, 0x2e, 0xb0, 0x46, 0x21, 0x4e, 0x89, 0x71, 0xba, 0x35, 0x92, 0xb8, 0x35, 0x50, 0x04, 0x8e, 0xfa, 0x7c, 0x23, 0xad, 0xe8, 0x05, 0x0e, 0xa0, 0x7e, 0xf4, 0xca, 0xa4, 0x3e, 0xdf, 0x04, 0xe5, 0xab, 0x3d, 0xea, 0xa3, 0x0d, 0xe9, 0x28, 0xf3, 0xa2, 0x4f, 0x08, 0xdf, 0xc6, 0xe4, 0x6a, 0x3b, 0x9e, 0xfd, 0x92, 0x90, 0x08, 0xc1, 0xec, 0x33, 0x44, 0x39, 0x46, 0xe8, 0xd6, 0x88, 0x21, 0xc2, 0xc5, 0x0e, 0x3d, 0xd3, 0xba, 0xc2, 0xb6, 0x5a, 0x89, 0x17, 0x7b, 0x26, 0x44, 0x51, 0x4c, 0x89, 0x40, 0x54, 0x13, 0x56, 0x04, 0xe0, 0x1e, 0xf0, 0x84, 0x1a, 0xae, 0x4f, 0xa8, 0x7a, 0x8b, 0xbf, 0xe6, 0x3e, 0x9f, 0xf8, 0x84, 0x46, 0x06, 0x64, 0xf2, 0xd4, 0x5a, 0x6c, 0x40, 0x26, 0x2e, 0x82, 0x5c, 0x30, 0x3a, 0x4a, 0xd4, 0xd5, 0x18, 0x72, 0x28, 0x44, 0xe8, 0x31, 0xdc, 0x16, 0x26, 0xd8, 0x31, 0x81, 0x9d, 0x94, 0xc5, 0xf9, 0x0b, 0x71, 0x24, 0xaf, 0x8e, 0x13, 0x93, 0xf0, 0x63, 0xa7, 0x3c, 0xd8, 0xa1, 0x18, 0x6e, 0x5a, 0x57, 0x02, 0x7d, 0x3b, 0xae, 0x19, 0x86, 0xde, 0xb3, 0xae, 0x38, 0x78, 0x9a, 0x3b, 0xc0, 0xd6, 0x48, 0xbd, 0x33, 0xcd, 0xad, 0x63, 0x6b, 0x34, 0xcd, 0xcd, 0xd1, 0x1f, 0x4d, 0x71, 0x73, 0x70, 0x18, 0x9a, 0x41, 0x9f, 0x0e, 0xd5, 0xbb, 0x71, 0x68, 0x5e, 0xf5, 0xe9, 0x10, 0x3d, 0x84, 0xd5, 0x28, 0x3b, 0x84, 0xd0, 0xcb, 0x00, 0x93, 0x4b, 0xf5, 0xe3, 0x44, 0x61, 0x5b, 0xa3, 0x8e, 0x14, 0x27, 0x2a, 0x84, 0xaa, 0x6a, 0xb2, 0x42, 0x68, 0x94, 0x9f, 0x80, 0xd2, 0x91, 0x19, 0xa8, 0x6b, 0x89, 0x1c, 0x73, 0x49, 0x64, 0x87, 0xd5, 0x49, 0x64, 0xa7, 0x1e, 0xdb, 0xe9, 0x78, 0x76, 0x64, 0x27, 0xec, 0x47, 0x86, 0xb5, 0xae, 0x3d, 0x5b, 0xbd, 0x17, 0x27, 0xa3, 0xe3, 0xd9, 0xcd, 0x6b, 0x2f, 0x2e, 0x08, 0xd3, 0x1e, 0xb1, 0xa2, 0xba, 0x1f, 0x1b, 0xdc, 0xe3, 0x12, 0x76, 0xf2, 0x97, 0x39, 0xf7, 0x03, 0x1b, 0x07, 0x8e, 0xd7, 0x53, 0x3f, 0xe1, 0xa0, 0xaa, 0x48, 0x7b, 0x28, 0xd5, 0xce, 0xe1, 0xa3, 0x17, 0x98, 0x76, 0xfd, 0x81, 0xfc, 0x86, 0x24, 0x3a, 0xfe, 0xed, 0x10, 0x13, 0xca, 0x0e, 0xdb, 0xfc, 0x9b, 0xc1, 0x98, 0xba, 0xc1, 0xa8, 0x72, 0x79, 0x33, 0xba, 0x58, 0x58, 0x87, 0x52, 0xdf, 0xfc, 0xc1, 0x08, 0x30, 0x19, 0xba, 0x94, 0xc8, 0xcf, 0x06, 0xe8, 0x9b, 0x3f, 0xe8, 0x42, 0xa2, 0x19, 0x70, 0x77, 0xd2, 0x06, 0x19, 0xf8, 0x1e, 0xc1, 0xe8, 0x2b, 0x28, 0x48, 0x7a, 0x35, 0xc5, 0x8f, 0x58, 0x6b, 0xf3, 0xaf, 0xb3, 0x42, 0x24, 0xaa, 0x41, 0x06, 0x7b, 0xe2, 0xf3, 0x44, 0xd1, 0xd9, 0x4f, 0xed, 0x57, 0xb0, 0xfa, 0x02, 0x53, 0xf1, 0xc9, 0x1c, 0x2d, 0xe0, 0x01, 0xfb, 0xf8, 0x61, 0x0b, 0x88, 0xaf, 0x13, 0x52, 0xe1, 0x77, 0x8a, 0x19, 0x48, 0xf4, 0x32, 0xee, 0xbf, 0x01, 0x94, 0x64, 0x97, 0xae, 0x3f, 0x81, 0xbc, 0x20, 0x96, 0x9e, 0xab, 0x73, 0xaf, 0x12, 0x24, 0x6e, 0x86, 0xdf, 0xdb, 0x50, 0x8b, 0x98, 0x43, 0xb7, 0xc7, 0xee, 0x3f, 0x52, 0xe3, 0xf7, 0x1f, 0xda, 0x41, 0x62, 0xa1, 0x33, 0x3d, 0x49, 0x2d, 0xe3, 0x89, 0xf6, 0x3b, 0xf8, 0x38, 0xa2, 0x11, 0x3b, 0x06, 0x59, 0xc6, 0x7c, 0x22, 0xa4, 0xd1, 0x1d, 0x50, 0x3a, 0x19, 0xd2, 0xf0, 0x22, 0x68, 0x22, 0xa4, 0x99, 0xa9, 0x90, 0x5e, 0x82, 0x3a, 0xed, 0x80, 0x5c, 0xce, 0xf8, 0xff, 0x03, 0x52, 0xef, 0xf3, 0xff, 0x80, 0x19, 0x21, 0xde, 0xe5, 0x11, 0x8b, 0xee, 0xe4, 0xc4, 0x22, 0x6f, 0xbe, 0x97, 0xd3, 0x5a, 0x3c, 0xe1, 0x91, 0xce, 0xac, 0x5a, 0x4d, 0x2d, 0x57, 0xab, 0xda, 0xd7, 0x70, 0x87, 0x2d, 0x34, 0x71, 0x5b, 0x27, 0x3c, 0x98, 0xba, 0xb1, 0x4b, 0x4d, 0xdf, 0xd8, 0x69, 0x67, 0xbc, 0x37, 0x93, 0xca, 0xd2, 0x95, 0x9f, 0x02, 0xc4, 0xc0, 0xf9, 0xff, 0x5b, 0x4b, 0x68, 0x26, 0xf0, 0x5a, 0x4b, 0x54, 0x9d, 0x0c, 0x5a, 0x9c, 0xf6, 0x28, 0xa7, 0xa9, 0x89, 0x7b, 0x3d, 0x15, 0x0a, 0x64, 0xd8, 0xef, 0x9b, 0xc1, 0x5b, 0x19, 0xd9, 0xf0, 0x31, 0xac, 0x47, 0x49, 0x95, 0xa8, 0x47, 0x71, 0xf3, 0x35, 0xbf, 0x1e, 0x85, 0x86, 0xc4, 0xed, 0xfe, 0x39, 0x07, 0x8a, 0x0c, 0xdd, 0x3b, 0x64, 0x41, 0x75, 0x7c, 0x5a, 0xa0, 0x2f, 0xa6, 0x09, 0x66, 0xce, 0xac, 0xfa, 0xd6, 0x62, 0xa0, 0xf4, 0xf1, 0x35, 0x40, 0xdc, 0xd3, 0x68, 0x73, 0xa6, 0xde, 0xf8, 0x3c, 0xa9, 0x7f, 0x76, 0x33, 0x48, 0x12, 0x77, 0xa1, 0x18, 0x49, 0x91, 0x76, 0x83, 0x4a, 0x48, 0xbb, 0x79, 0x23, 0x46, 0xb2, 0x3a, 0x89, 0x41, 0x21, 0xfb, 0x05, 0x7d, 0x79, 0x83, 0xe2, 0x78, 0x53, 0xd7, 0x1f, 0x2e, 0x03, 0x1d, 0x8b, 0x4c, 0xf8, 0xef, 0xdb, 0xd9, 0xde, 0x8d, 0xb7, 0xd3, 0x9c, 0xc8, 0x4c, 0xf6, 0xcf, 0xf7, 0x50, 0x19, 0xab, 0x66, 0xf4, 0x60, 0xb6, 0x57, 0x93, 0xbd, 0x52, 0xff, 0x62, 0x21, 0x6e, 0x3c, 0xf6, 0xe2, 0xa2, 0x70, 0x4e, 0xec, 0x93, 0x55, 0x3f, 0x2f, 0xf6, 0x63, 0xe5, 0xbc, 0xff, 0x06, 0x6e, 0x3b, 0xfe, 0x14, 0x70, 0xbf, 0x12, 0x16, 0xec, 0x2b, 0x76, 0x24, 0x7f, 0x95, 0xfa, 0xe5, 0x13, 0x79, 0x44, 0xef, 0xf9, 0xae, 0xe9, 0xf5, 0x1a, 0x7e, 0xd0, 0xdb, 0x1e, 0xff, 0xb7, 0x3d, 0x7b, 0x0a, 0x77, 0xd3, 0x77, 0xc6, 0x68, 0xe7, 0x3c, 0xcf, 0x4f, 0xf3, 0x5f, 0xfd, 0x37, 0x00, 0x00, 0xff, 0xff, 0x54, 0xae, 0x0b, 0x93, 0xdf, 0x1f, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. var _ context.Context var _ grpc.ClientConnInterface // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. const _ = grpc.SupportPackageIsVersion6 // ChannelzClient is the client API for Channelz service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type ChannelzClient interface { // Gets all root channels (i.e. channels the application has directly // created). This does not include subchannels nor non-top level channels. GetTopChannels(ctx context.Context, in *GetTopChannelsRequest, opts ...grpc.CallOption) (*GetTopChannelsResponse, error) // Gets all servers that exist in the process. GetServers(ctx context.Context, in *GetServersRequest, opts ...grpc.CallOption) (*GetServersResponse, error) // Returns a single Server, or else a NOT_FOUND code. GetServer(ctx context.Context, in *GetServerRequest, opts ...grpc.CallOption) (*GetServerResponse, error) // Gets all server sockets that exist in the process. GetServerSockets(ctx context.Context, in *GetServerSocketsRequest, opts ...grpc.CallOption) (*GetServerSocketsResponse, error) // Returns a single Channel, or else a NOT_FOUND code. GetChannel(ctx context.Context, in *GetChannelRequest, opts ...grpc.CallOption) (*GetChannelResponse, error) // Returns a single Subchannel, or else a NOT_FOUND code. GetSubchannel(ctx context.Context, in *GetSubchannelRequest, opts ...grpc.CallOption) (*GetSubchannelResponse, error) // Returns a single Socket or else a NOT_FOUND code. GetSocket(ctx context.Context, in *GetSocketRequest, opts ...grpc.CallOption) (*GetSocketResponse, error) } type channelzClient struct { cc grpc.ClientConnInterface } func NewChannelzClient(cc grpc.ClientConnInterface) ChannelzClient { return &channelzClient{cc} } func (c *channelzClient) GetTopChannels(ctx context.Context, in *GetTopChannelsRequest, opts ...grpc.CallOption) (*GetTopChannelsResponse, error) { out := new(GetTopChannelsResponse) err := c.cc.Invoke(ctx, "/grpc.channelz.v1.Channelz/GetTopChannels", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *channelzClient) GetServers(ctx context.Context, in *GetServersRequest, opts ...grpc.CallOption) (*GetServersResponse, error) { out := new(GetServersResponse) err := c.cc.Invoke(ctx, "/grpc.channelz.v1.Channelz/GetServers", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *channelzClient) GetServer(ctx context.Context, in *GetServerRequest, opts ...grpc.CallOption) (*GetServerResponse, error) { out := new(GetServerResponse) err := c.cc.Invoke(ctx, "/grpc.channelz.v1.Channelz/GetServer", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *channelzClient) GetServerSockets(ctx context.Context, in *GetServerSocketsRequest, opts ...grpc.CallOption) (*GetServerSocketsResponse, error) { out := new(GetServerSocketsResponse) err := c.cc.Invoke(ctx, "/grpc.channelz.v1.Channelz/GetServerSockets", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *channelzClient) GetChannel(ctx context.Context, in *GetChannelRequest, opts ...grpc.CallOption) (*GetChannelResponse, error) { out := new(GetChannelResponse) err := c.cc.Invoke(ctx, "/grpc.channelz.v1.Channelz/GetChannel", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *channelzClient) GetSubchannel(ctx context.Context, in *GetSubchannelRequest, opts ...grpc.CallOption) (*GetSubchannelResponse, error) { out := new(GetSubchannelResponse) err := c.cc.Invoke(ctx, "/grpc.channelz.v1.Channelz/GetSubchannel", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *channelzClient) GetSocket(ctx context.Context, in *GetSocketRequest, opts ...grpc.CallOption) (*GetSocketResponse, error) { out := new(GetSocketResponse) err := c.cc.Invoke(ctx, "/grpc.channelz.v1.Channelz/GetSocket", in, out, opts...) if err != nil { return nil, err } return out, nil } // ChannelzServer is the server API for Channelz service. type ChannelzServer interface { // Gets all root channels (i.e. channels the application has directly // created). This does not include subchannels nor non-top level channels. GetTopChannels(context.Context, *GetTopChannelsRequest) (*GetTopChannelsResponse, error) // Gets all servers that exist in the process. GetServers(context.Context, *GetServersRequest) (*GetServersResponse, error) // Returns a single Server, or else a NOT_FOUND code. GetServer(context.Context, *GetServerRequest) (*GetServerResponse, error) // Gets all server sockets that exist in the process. GetServerSockets(context.Context, *GetServerSocketsRequest) (*GetServerSocketsResponse, error) // Returns a single Channel, or else a NOT_FOUND code. GetChannel(context.Context, *GetChannelRequest) (*GetChannelResponse, error) // Returns a single Subchannel, or else a NOT_FOUND code. GetSubchannel(context.Context, *GetSubchannelRequest) (*GetSubchannelResponse, error) // Returns a single Socket or else a NOT_FOUND code. GetSocket(context.Context, *GetSocketRequest) (*GetSocketResponse, error) } // UnimplementedChannelzServer can be embedded to have forward compatible implementations. type UnimplementedChannelzServer struct { } func (*UnimplementedChannelzServer) GetTopChannels(ctx context.Context, req *GetTopChannelsRequest) (*GetTopChannelsResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method GetTopChannels not implemented") } func (*UnimplementedChannelzServer) GetServers(ctx context.Context, req *GetServersRequest) (*GetServersResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method GetServers not implemented") } func (*UnimplementedChannelzServer) GetServer(ctx context.Context, req *GetServerRequest) (*GetServerResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method GetServer not implemented") } func (*UnimplementedChannelzServer) GetServerSockets(ctx context.Context, req *GetServerSocketsRequest) (*GetServerSocketsResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method GetServerSockets not implemented") } func (*UnimplementedChannelzServer) GetChannel(ctx context.Context, req *GetChannelRequest) (*GetChannelResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method GetChannel not implemented") } func (*UnimplementedChannelzServer) GetSubchannel(ctx context.Context, req *GetSubchannelRequest) (*GetSubchannelResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method GetSubchannel not implemented") } func (*UnimplementedChannelzServer) GetSocket(ctx context.Context, req *GetSocketRequest) (*GetSocketResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method GetSocket not implemented") } func RegisterChannelzServer(s *grpc.Server, srv ChannelzServer) { s.RegisterService(&_Channelz_serviceDesc, srv) } func _Channelz_GetTopChannels_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(GetTopChannelsRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(ChannelzServer).GetTopChannels(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/grpc.channelz.v1.Channelz/GetTopChannels", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ChannelzServer).GetTopChannels(ctx, req.(*GetTopChannelsRequest)) } return interceptor(ctx, in, info, handler) } func _Channelz_GetServers_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(GetServersRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(ChannelzServer).GetServers(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/grpc.channelz.v1.Channelz/GetServers", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ChannelzServer).GetServers(ctx, req.(*GetServersRequest)) } return interceptor(ctx, in, info, handler) } func _Channelz_GetServer_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(GetServerRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(ChannelzServer).GetServer(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/grpc.channelz.v1.Channelz/GetServer", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ChannelzServer).GetServer(ctx, req.(*GetServerRequest)) } return interceptor(ctx, in, info, handler) } func _Channelz_GetServerSockets_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(GetServerSocketsRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(ChannelzServer).GetServerSockets(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/grpc.channelz.v1.Channelz/GetServerSockets", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ChannelzServer).GetServerSockets(ctx, req.(*GetServerSocketsRequest)) } return interceptor(ctx, in, info, handler) } func _Channelz_GetChannel_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(GetChannelRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(ChannelzServer).GetChannel(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/grpc.channelz.v1.Channelz/GetChannel", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ChannelzServer).GetChannel(ctx, req.(*GetChannelRequest)) } return interceptor(ctx, in, info, handler) } func _Channelz_GetSubchannel_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(GetSubchannelRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(ChannelzServer).GetSubchannel(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/grpc.channelz.v1.Channelz/GetSubchannel", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ChannelzServer).GetSubchannel(ctx, req.(*GetSubchannelRequest)) } return interceptor(ctx, in, info, handler) } func _Channelz_GetSocket_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(GetSocketRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(ChannelzServer).GetSocket(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/grpc.channelz.v1.Channelz/GetSocket", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ChannelzServer).GetSocket(ctx, req.(*GetSocketRequest)) } return interceptor(ctx, in, info, handler) } var _Channelz_serviceDesc = grpc.ServiceDesc{ ServiceName: "grpc.channelz.v1.Channelz", HandlerType: (*ChannelzServer)(nil), Methods: []grpc.MethodDesc{ { MethodName: "GetTopChannels", Handler: _Channelz_GetTopChannels_Handler, }, { MethodName: "GetServers", Handler: _Channelz_GetServers_Handler, }, { MethodName: "GetServer", Handler: _Channelz_GetServer_Handler, }, { MethodName: "GetServerSockets", Handler: _Channelz_GetServerSockets_Handler, }, { MethodName: "GetChannel", Handler: _Channelz_GetChannel_Handler, }, { MethodName: "GetSubchannel", Handler: _Channelz_GetSubchannel_Handler, }, { MethodName: "GetSocket", Handler: _Channelz_GetSocket_Handler, }, }, Streams: []grpc.StreamDesc{}, Metadata: "grpc/channelz/v1/channelz.proto", } grpc-go-1.29.1/channelz/service/000077500000000000000000000000001365033716300164135ustar00rootroot00000000000000grpc-go-1.29.1/channelz/service/func_linux.go000066400000000000000000000073631365033716300211250ustar00rootroot00000000000000// +build !appengine /* * * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package service import ( "time" "github.com/golang/protobuf/ptypes" durpb "github.com/golang/protobuf/ptypes/duration" channelzpb "google.golang.org/grpc/channelz/grpc_channelz_v1" "google.golang.org/grpc/internal/channelz" ) func convertToPtypesDuration(sec int64, usec int64) *durpb.Duration { return ptypes.DurationProto(time.Duration(sec*1e9 + usec*1e3)) } func sockoptToProto(skopts *channelz.SocketOptionData) []*channelzpb.SocketOption { var opts []*channelzpb.SocketOption if skopts.Linger != nil { additional, err := ptypes.MarshalAny(&channelzpb.SocketOptionLinger{ Active: skopts.Linger.Onoff != 0, Duration: convertToPtypesDuration(int64(skopts.Linger.Linger), 0), }) if err == nil { opts = append(opts, &channelzpb.SocketOption{ Name: "SO_LINGER", Additional: additional, }) } } if skopts.RecvTimeout != nil { additional, err := ptypes.MarshalAny(&channelzpb.SocketOptionTimeout{ Duration: convertToPtypesDuration(int64(skopts.RecvTimeout.Sec), int64(skopts.RecvTimeout.Usec)), }) if err == nil { opts = append(opts, &channelzpb.SocketOption{ Name: "SO_RCVTIMEO", Additional: additional, }) } } if skopts.SendTimeout != nil { additional, err := ptypes.MarshalAny(&channelzpb.SocketOptionTimeout{ Duration: convertToPtypesDuration(int64(skopts.SendTimeout.Sec), int64(skopts.SendTimeout.Usec)), }) if err == nil { opts = append(opts, &channelzpb.SocketOption{ Name: "SO_SNDTIMEO", Additional: additional, }) } } if skopts.TCPInfo != nil { additional, err := ptypes.MarshalAny(&channelzpb.SocketOptionTcpInfo{ TcpiState: uint32(skopts.TCPInfo.State), TcpiCaState: uint32(skopts.TCPInfo.Ca_state), TcpiRetransmits: uint32(skopts.TCPInfo.Retransmits), TcpiProbes: uint32(skopts.TCPInfo.Probes), TcpiBackoff: uint32(skopts.TCPInfo.Backoff), TcpiOptions: uint32(skopts.TCPInfo.Options), // https://golang.org/pkg/syscall/#TCPInfo // TCPInfo struct does not contain info about TcpiSndWscale and TcpiRcvWscale. TcpiRto: skopts.TCPInfo.Rto, TcpiAto: skopts.TCPInfo.Ato, TcpiSndMss: skopts.TCPInfo.Snd_mss, TcpiRcvMss: skopts.TCPInfo.Rcv_mss, TcpiUnacked: skopts.TCPInfo.Unacked, TcpiSacked: skopts.TCPInfo.Sacked, TcpiLost: skopts.TCPInfo.Lost, TcpiRetrans: skopts.TCPInfo.Retrans, TcpiFackets: skopts.TCPInfo.Fackets, TcpiLastDataSent: skopts.TCPInfo.Last_data_sent, TcpiLastAckSent: skopts.TCPInfo.Last_ack_sent, TcpiLastDataRecv: skopts.TCPInfo.Last_data_recv, TcpiLastAckRecv: skopts.TCPInfo.Last_ack_recv, TcpiPmtu: skopts.TCPInfo.Pmtu, TcpiRcvSsthresh: skopts.TCPInfo.Rcv_ssthresh, TcpiRtt: skopts.TCPInfo.Rtt, TcpiRttvar: skopts.TCPInfo.Rttvar, TcpiSndSsthresh: skopts.TCPInfo.Snd_ssthresh, TcpiSndCwnd: skopts.TCPInfo.Snd_cwnd, TcpiAdvmss: skopts.TCPInfo.Advmss, TcpiReordering: skopts.TCPInfo.Reordering, }) if err == nil { opts = append(opts, &channelzpb.SocketOption{ Name: "TCP_INFO", Additional: additional, }) } } return opts } grpc-go-1.29.1/channelz/service/func_nonlinux.go000066400000000000000000000015421365033716300216310ustar00rootroot00000000000000// +build !linux appengine /* * * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package service import ( channelzpb "google.golang.org/grpc/channelz/grpc_channelz_v1" "google.golang.org/grpc/internal/channelz" ) func sockoptToProto(skopts *channelz.SocketOptionData) []*channelzpb.SocketOption { return nil } grpc-go-1.29.1/channelz/service/regenerate.sh000077500000000000000000000020071365033716300210720ustar00rootroot00000000000000#!/bin/bash # Copyright 2018 gRPC authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. set -eux -o pipefail TMP=$(mktemp -d) function finish { rm -rf "$TMP" } trap finish EXIT pushd "$TMP" mkdir -p grpc/channelz/v1 curl https://raw.githubusercontent.com/grpc/grpc-proto/master/grpc/channelz/v1/channelz.proto > grpc/channelz/v1/channelz.proto protoc --go_out=plugins=grpc,paths=source_relative:. -I. grpc/channelz/v1/*.proto popd rm -f ../grpc_channelz_v1/*.pb.go cp "$TMP"/grpc/channelz/v1/*.pb.go ../grpc_channelz_v1/ grpc-go-1.29.1/channelz/service/service.go000066400000000000000000000317361365033716300204140ustar00rootroot00000000000000/* * * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ //go:generate ./regenerate.sh // Package service provides an implementation for channelz service server. package service import ( "context" "net" "github.com/golang/protobuf/ptypes" wrpb "github.com/golang/protobuf/ptypes/wrappers" "google.golang.org/grpc" channelzgrpc "google.golang.org/grpc/channelz/grpc_channelz_v1" channelzpb "google.golang.org/grpc/channelz/grpc_channelz_v1" "google.golang.org/grpc/codes" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/status" ) func init() { channelz.TurnOn() } // RegisterChannelzServiceToServer registers the channelz service to the given server. func RegisterChannelzServiceToServer(s *grpc.Server) { channelzgrpc.RegisterChannelzServer(s, newCZServer()) } func newCZServer() channelzgrpc.ChannelzServer { return &serverImpl{} } type serverImpl struct{} func connectivityStateToProto(s connectivity.State) *channelzpb.ChannelConnectivityState { switch s { case connectivity.Idle: return &channelzpb.ChannelConnectivityState{State: channelzpb.ChannelConnectivityState_IDLE} case connectivity.Connecting: return &channelzpb.ChannelConnectivityState{State: channelzpb.ChannelConnectivityState_CONNECTING} case connectivity.Ready: return &channelzpb.ChannelConnectivityState{State: channelzpb.ChannelConnectivityState_READY} case connectivity.TransientFailure: return &channelzpb.ChannelConnectivityState{State: channelzpb.ChannelConnectivityState_TRANSIENT_FAILURE} case connectivity.Shutdown: return &channelzpb.ChannelConnectivityState{State: channelzpb.ChannelConnectivityState_SHUTDOWN} default: return &channelzpb.ChannelConnectivityState{State: channelzpb.ChannelConnectivityState_UNKNOWN} } } func channelTraceToProto(ct *channelz.ChannelTrace) *channelzpb.ChannelTrace { pbt := &channelzpb.ChannelTrace{} pbt.NumEventsLogged = ct.EventNum if ts, err := ptypes.TimestampProto(ct.CreationTime); err == nil { pbt.CreationTimestamp = ts } var events []*channelzpb.ChannelTraceEvent for _, e := range ct.Events { cte := &channelzpb.ChannelTraceEvent{ Description: e.Desc, Severity: channelzpb.ChannelTraceEvent_Severity(e.Severity), } if ts, err := ptypes.TimestampProto(e.Timestamp); err == nil { cte.Timestamp = ts } if e.RefID != 0 { switch e.RefType { case channelz.RefChannel: cte.ChildRef = &channelzpb.ChannelTraceEvent_ChannelRef{ChannelRef: &channelzpb.ChannelRef{ChannelId: e.RefID, Name: e.RefName}} case channelz.RefSubChannel: cte.ChildRef = &channelzpb.ChannelTraceEvent_SubchannelRef{SubchannelRef: &channelzpb.SubchannelRef{SubchannelId: e.RefID, Name: e.RefName}} } } events = append(events, cte) } pbt.Events = events return pbt } func channelMetricToProto(cm *channelz.ChannelMetric) *channelzpb.Channel { c := &channelzpb.Channel{} c.Ref = &channelzpb.ChannelRef{ChannelId: cm.ID, Name: cm.RefName} c.Data = &channelzpb.ChannelData{ State: connectivityStateToProto(cm.ChannelData.State), Target: cm.ChannelData.Target, CallsStarted: cm.ChannelData.CallsStarted, CallsSucceeded: cm.ChannelData.CallsSucceeded, CallsFailed: cm.ChannelData.CallsFailed, } if ts, err := ptypes.TimestampProto(cm.ChannelData.LastCallStartedTimestamp); err == nil { c.Data.LastCallStartedTimestamp = ts } nestedChans := make([]*channelzpb.ChannelRef, 0, len(cm.NestedChans)) for id, ref := range cm.NestedChans { nestedChans = append(nestedChans, &channelzpb.ChannelRef{ChannelId: id, Name: ref}) } c.ChannelRef = nestedChans subChans := make([]*channelzpb.SubchannelRef, 0, len(cm.SubChans)) for id, ref := range cm.SubChans { subChans = append(subChans, &channelzpb.SubchannelRef{SubchannelId: id, Name: ref}) } c.SubchannelRef = subChans sockets := make([]*channelzpb.SocketRef, 0, len(cm.Sockets)) for id, ref := range cm.Sockets { sockets = append(sockets, &channelzpb.SocketRef{SocketId: id, Name: ref}) } c.SocketRef = sockets c.Data.Trace = channelTraceToProto(cm.Trace) return c } func subChannelMetricToProto(cm *channelz.SubChannelMetric) *channelzpb.Subchannel { sc := &channelzpb.Subchannel{} sc.Ref = &channelzpb.SubchannelRef{SubchannelId: cm.ID, Name: cm.RefName} sc.Data = &channelzpb.ChannelData{ State: connectivityStateToProto(cm.ChannelData.State), Target: cm.ChannelData.Target, CallsStarted: cm.ChannelData.CallsStarted, CallsSucceeded: cm.ChannelData.CallsSucceeded, CallsFailed: cm.ChannelData.CallsFailed, } if ts, err := ptypes.TimestampProto(cm.ChannelData.LastCallStartedTimestamp); err == nil { sc.Data.LastCallStartedTimestamp = ts } nestedChans := make([]*channelzpb.ChannelRef, 0, len(cm.NestedChans)) for id, ref := range cm.NestedChans { nestedChans = append(nestedChans, &channelzpb.ChannelRef{ChannelId: id, Name: ref}) } sc.ChannelRef = nestedChans subChans := make([]*channelzpb.SubchannelRef, 0, len(cm.SubChans)) for id, ref := range cm.SubChans { subChans = append(subChans, &channelzpb.SubchannelRef{SubchannelId: id, Name: ref}) } sc.SubchannelRef = subChans sockets := make([]*channelzpb.SocketRef, 0, len(cm.Sockets)) for id, ref := range cm.Sockets { sockets = append(sockets, &channelzpb.SocketRef{SocketId: id, Name: ref}) } sc.SocketRef = sockets sc.Data.Trace = channelTraceToProto(cm.Trace) return sc } func securityToProto(se credentials.ChannelzSecurityValue) *channelzpb.Security { switch v := se.(type) { case *credentials.TLSChannelzSecurityValue: return &channelzpb.Security{Model: &channelzpb.Security_Tls_{Tls: &channelzpb.Security_Tls{ CipherSuite: &channelzpb.Security_Tls_StandardName{StandardName: v.StandardName}, LocalCertificate: v.LocalCertificate, RemoteCertificate: v.RemoteCertificate, }}} case *credentials.OtherChannelzSecurityValue: otherSecurity := &channelzpb.Security_OtherSecurity{ Name: v.Name, } if anyval, err := ptypes.MarshalAny(v.Value); err == nil { otherSecurity.Value = anyval } return &channelzpb.Security{Model: &channelzpb.Security_Other{Other: otherSecurity}} } return nil } func addrToProto(a net.Addr) *channelzpb.Address { switch a.Network() { case "udp": // TODO: Address_OtherAddress{}. Need proto def for Value. case "ip": // Note zone info is discarded through the conversion. return &channelzpb.Address{Address: &channelzpb.Address_TcpipAddress{TcpipAddress: &channelzpb.Address_TcpIpAddress{IpAddress: a.(*net.IPAddr).IP}}} case "ip+net": // Note mask info is discarded through the conversion. return &channelzpb.Address{Address: &channelzpb.Address_TcpipAddress{TcpipAddress: &channelzpb.Address_TcpIpAddress{IpAddress: a.(*net.IPNet).IP}}} case "tcp": // Note zone info is discarded through the conversion. return &channelzpb.Address{Address: &channelzpb.Address_TcpipAddress{TcpipAddress: &channelzpb.Address_TcpIpAddress{IpAddress: a.(*net.TCPAddr).IP, Port: int32(a.(*net.TCPAddr).Port)}}} case "unix", "unixgram", "unixpacket": return &channelzpb.Address{Address: &channelzpb.Address_UdsAddress_{UdsAddress: &channelzpb.Address_UdsAddress{Filename: a.String()}}} default: } return &channelzpb.Address{} } func socketMetricToProto(sm *channelz.SocketMetric) *channelzpb.Socket { s := &channelzpb.Socket{} s.Ref = &channelzpb.SocketRef{SocketId: sm.ID, Name: sm.RefName} s.Data = &channelzpb.SocketData{ StreamsStarted: sm.SocketData.StreamsStarted, StreamsSucceeded: sm.SocketData.StreamsSucceeded, StreamsFailed: sm.SocketData.StreamsFailed, MessagesSent: sm.SocketData.MessagesSent, MessagesReceived: sm.SocketData.MessagesReceived, KeepAlivesSent: sm.SocketData.KeepAlivesSent, } if ts, err := ptypes.TimestampProto(sm.SocketData.LastLocalStreamCreatedTimestamp); err == nil { s.Data.LastLocalStreamCreatedTimestamp = ts } if ts, err := ptypes.TimestampProto(sm.SocketData.LastRemoteStreamCreatedTimestamp); err == nil { s.Data.LastRemoteStreamCreatedTimestamp = ts } if ts, err := ptypes.TimestampProto(sm.SocketData.LastMessageSentTimestamp); err == nil { s.Data.LastMessageSentTimestamp = ts } if ts, err := ptypes.TimestampProto(sm.SocketData.LastMessageReceivedTimestamp); err == nil { s.Data.LastMessageReceivedTimestamp = ts } s.Data.LocalFlowControlWindow = &wrpb.Int64Value{Value: sm.SocketData.LocalFlowControlWindow} s.Data.RemoteFlowControlWindow = &wrpb.Int64Value{Value: sm.SocketData.RemoteFlowControlWindow} if sm.SocketData.SocketOptions != nil { s.Data.Option = sockoptToProto(sm.SocketData.SocketOptions) } if sm.SocketData.Security != nil { s.Security = securityToProto(sm.SocketData.Security) } if sm.SocketData.LocalAddr != nil { s.Local = addrToProto(sm.SocketData.LocalAddr) } if sm.SocketData.RemoteAddr != nil { s.Remote = addrToProto(sm.SocketData.RemoteAddr) } s.RemoteName = sm.SocketData.RemoteName return s } func (s *serverImpl) GetTopChannels(ctx context.Context, req *channelzpb.GetTopChannelsRequest) (*channelzpb.GetTopChannelsResponse, error) { metrics, end := channelz.GetTopChannels(req.GetStartChannelId(), req.GetMaxResults()) resp := &channelzpb.GetTopChannelsResponse{} for _, m := range metrics { resp.Channel = append(resp.Channel, channelMetricToProto(m)) } resp.End = end return resp, nil } func serverMetricToProto(sm *channelz.ServerMetric) *channelzpb.Server { s := &channelzpb.Server{} s.Ref = &channelzpb.ServerRef{ServerId: sm.ID, Name: sm.RefName} s.Data = &channelzpb.ServerData{ CallsStarted: sm.ServerData.CallsStarted, CallsSucceeded: sm.ServerData.CallsSucceeded, CallsFailed: sm.ServerData.CallsFailed, } if ts, err := ptypes.TimestampProto(sm.ServerData.LastCallStartedTimestamp); err == nil { s.Data.LastCallStartedTimestamp = ts } sockets := make([]*channelzpb.SocketRef, 0, len(sm.ListenSockets)) for id, ref := range sm.ListenSockets { sockets = append(sockets, &channelzpb.SocketRef{SocketId: id, Name: ref}) } s.ListenSocket = sockets return s } func (s *serverImpl) GetServers(ctx context.Context, req *channelzpb.GetServersRequest) (*channelzpb.GetServersResponse, error) { metrics, end := channelz.GetServers(req.GetStartServerId(), req.GetMaxResults()) resp := &channelzpb.GetServersResponse{} for _, m := range metrics { resp.Server = append(resp.Server, serverMetricToProto(m)) } resp.End = end return resp, nil } func (s *serverImpl) GetServerSockets(ctx context.Context, req *channelzpb.GetServerSocketsRequest) (*channelzpb.GetServerSocketsResponse, error) { metrics, end := channelz.GetServerSockets(req.GetServerId(), req.GetStartSocketId(), req.GetMaxResults()) resp := &channelzpb.GetServerSocketsResponse{} for _, m := range metrics { resp.SocketRef = append(resp.SocketRef, &channelzpb.SocketRef{SocketId: m.ID, Name: m.RefName}) } resp.End = end return resp, nil } func (s *serverImpl) GetChannel(ctx context.Context, req *channelzpb.GetChannelRequest) (*channelzpb.GetChannelResponse, error) { var metric *channelz.ChannelMetric if metric = channelz.GetChannel(req.GetChannelId()); metric == nil { return nil, status.Errorf(codes.NotFound, "requested channel %d not found", req.GetChannelId()) } resp := &channelzpb.GetChannelResponse{Channel: channelMetricToProto(metric)} return resp, nil } func (s *serverImpl) GetSubchannel(ctx context.Context, req *channelzpb.GetSubchannelRequest) (*channelzpb.GetSubchannelResponse, error) { var metric *channelz.SubChannelMetric if metric = channelz.GetSubChannel(req.GetSubchannelId()); metric == nil { return nil, status.Errorf(codes.NotFound, "requested sub channel %d not found", req.GetSubchannelId()) } resp := &channelzpb.GetSubchannelResponse{Subchannel: subChannelMetricToProto(metric)} return resp, nil } func (s *serverImpl) GetSocket(ctx context.Context, req *channelzpb.GetSocketRequest) (*channelzpb.GetSocketResponse, error) { var metric *channelz.SocketMetric if metric = channelz.GetSocket(req.GetSocketId()); metric == nil { return nil, status.Errorf(codes.NotFound, "requested socket %d not found", req.GetSocketId()) } resp := &channelzpb.GetSocketResponse{Socket: socketMetricToProto(metric)} return resp, nil } func (s *serverImpl) GetServer(ctx context.Context, req *channelzpb.GetServerRequest) (*channelzpb.GetServerResponse, error) { var metric *channelz.ServerMetric if metric = channelz.GetServer(req.GetServerId()); metric == nil { return nil, status.Errorf(codes.NotFound, "requested server %d not found", req.GetServerId()) } resp := &channelzpb.GetServerResponse{Server: serverMetricToProto(metric)} return resp, nil } grpc-go-1.29.1/channelz/service/service_sktopt_test.go000066400000000000000000000120741365033716300230510ustar00rootroot00000000000000// +build linux,!appengine // +build 386 amd64 /* * * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // SocketOptions is only supported on linux system. The functions defined in // this file are to parse the socket option field and the test is specifically // to verify the behavior of socket option parsing. package service import ( "context" "reflect" "strconv" "testing" "github.com/golang/protobuf/ptypes" durpb "github.com/golang/protobuf/ptypes/duration" "golang.org/x/sys/unix" channelzpb "google.golang.org/grpc/channelz/grpc_channelz_v1" "google.golang.org/grpc/internal/channelz" ) func init() { // Assign protoToSocketOption to protoToSocketOpt in order to enable socket option // data conversion from proto message to channelz defined struct. protoToSocketOpt = protoToSocketOption } func convertToDuration(d *durpb.Duration) (sec int64, usec int64) { if d != nil { if dur, err := ptypes.Duration(d); err == nil { sec = int64(int64(dur) / 1e9) usec = (int64(dur) - sec*1e9) / 1e3 } } return } func protoToLinger(protoLinger *channelzpb.SocketOptionLinger) *unix.Linger { linger := &unix.Linger{} if protoLinger.GetActive() { linger.Onoff = 1 } lv, _ := convertToDuration(protoLinger.GetDuration()) linger.Linger = int32(lv) return linger } func protoToSocketOption(skopts []*channelzpb.SocketOption) *channelz.SocketOptionData { skdata := &channelz.SocketOptionData{} for _, opt := range skopts { switch opt.GetName() { case "SO_LINGER": protoLinger := &channelzpb.SocketOptionLinger{} err := ptypes.UnmarshalAny(opt.GetAdditional(), protoLinger) if err == nil { skdata.Linger = protoToLinger(protoLinger) } case "SO_RCVTIMEO": protoTimeout := &channelzpb.SocketOptionTimeout{} err := ptypes.UnmarshalAny(opt.GetAdditional(), protoTimeout) if err == nil { skdata.RecvTimeout = protoToTime(protoTimeout) } case "SO_SNDTIMEO": protoTimeout := &channelzpb.SocketOptionTimeout{} err := ptypes.UnmarshalAny(opt.GetAdditional(), protoTimeout) if err == nil { skdata.SendTimeout = protoToTime(protoTimeout) } case "TCP_INFO": tcpi := &channelzpb.SocketOptionTcpInfo{} err := ptypes.UnmarshalAny(opt.GetAdditional(), tcpi) if err == nil { skdata.TCPInfo = &unix.TCPInfo{ State: uint8(tcpi.TcpiState), Ca_state: uint8(tcpi.TcpiCaState), Retransmits: uint8(tcpi.TcpiRetransmits), Probes: uint8(tcpi.TcpiProbes), Backoff: uint8(tcpi.TcpiBackoff), Options: uint8(tcpi.TcpiOptions), Rto: tcpi.TcpiRto, Ato: tcpi.TcpiAto, Snd_mss: tcpi.TcpiSndMss, Rcv_mss: tcpi.TcpiRcvMss, Unacked: tcpi.TcpiUnacked, Sacked: tcpi.TcpiSacked, Lost: tcpi.TcpiLost, Retrans: tcpi.TcpiRetrans, Fackets: tcpi.TcpiFackets, Last_data_sent: tcpi.TcpiLastDataSent, Last_ack_sent: tcpi.TcpiLastAckSent, Last_data_recv: tcpi.TcpiLastDataRecv, Last_ack_recv: tcpi.TcpiLastAckRecv, Pmtu: tcpi.TcpiPmtu, Rcv_ssthresh: tcpi.TcpiRcvSsthresh, Rtt: tcpi.TcpiRtt, Rttvar: tcpi.TcpiRttvar, Snd_ssthresh: tcpi.TcpiSndSsthresh, Snd_cwnd: tcpi.TcpiSndCwnd, Advmss: tcpi.TcpiAdvmss, Reordering: tcpi.TcpiReordering} } } } return skdata } func (s) TestGetSocketOptions(t *testing.T) { czCleanup := channelz.NewChannelzStorage() defer cleanupWrapper(czCleanup, t) ss := []*dummySocket{ { socketOptions: &channelz.SocketOptionData{ Linger: &unix.Linger{Onoff: 1, Linger: 2}, RecvTimeout: &unix.Timeval{Sec: 10, Usec: 1}, SendTimeout: &unix.Timeval{}, TCPInfo: &unix.TCPInfo{State: 1}, }, }, } svr := newCZServer() ids := make([]int64, len(ss)) svrID := channelz.RegisterServer(&dummyServer{}, "") defer channelz.RemoveEntry(svrID) for i, s := range ss { ids[i] = channelz.RegisterNormalSocket(s, svrID, strconv.Itoa(i)) defer channelz.RemoveEntry(ids[i]) } for i, s := range ss { resp, _ := svr.GetSocket(context.Background(), &channelzpb.GetSocketRequest{SocketId: ids[i]}) metrics := resp.GetSocket() if !reflect.DeepEqual(metrics.GetRef(), &channelzpb.SocketRef{SocketId: ids[i], Name: strconv.Itoa(i)}) || !reflect.DeepEqual(socketProtoToStruct(metrics), s) { t.Fatalf("resp.GetSocket() want: metrics.GetRef() = %#v and %#v, got: metrics.GetRef() = %#v and %#v", &channelzpb.SocketRef{SocketId: ids[i], Name: strconv.Itoa(i)}, s, metrics.GetRef(), socketProtoToStruct(metrics)) } } } grpc-go-1.29.1/channelz/service/service_test.go000066400000000000000000000657171365033716300214610ustar00rootroot00000000000000/* * * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package service import ( "context" "fmt" "net" "reflect" "strconv" "testing" "time" "github.com/golang/protobuf/proto" "github.com/golang/protobuf/ptypes" channelzpb "google.golang.org/grpc/channelz/grpc_channelz_v1" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpctest" ) func init() { channelz.TurnOn() } type s struct { grpctest.Tester } func Test(t *testing.T) { grpctest.RunSubTests(t, s{}) } func cleanupWrapper(cleanup func() error, t *testing.T) { if err := cleanup(); err != nil { t.Error(err) } } type protoToSocketOptFunc func([]*channelzpb.SocketOption) *channelz.SocketOptionData // protoToSocketOpt is used in function socketProtoToStruct to extract socket option // data from unmarshaled proto message. // It is only defined under linux, non-appengine environment on x86 architecture. var protoToSocketOpt protoToSocketOptFunc // emptyTime is used for detecting unset value of time.Time type. // For go1.7 and earlier, ptypes.Timestamp will fill in the loc field of time.Time // with &utcLoc. However zero value of a time.Time type value loc field is nil. // This behavior will make reflect.DeepEqual fail upon unset time.Time field, // and cause false positive fatal error. // TODO: Go1.7 is no longer supported - does this need a change? var emptyTime time.Time type dummyChannel struct { state connectivity.State target string callsStarted int64 callsSucceeded int64 callsFailed int64 lastCallStartedTimestamp time.Time } func (d *dummyChannel) ChannelzMetric() *channelz.ChannelInternalMetric { return &channelz.ChannelInternalMetric{ State: d.state, Target: d.target, CallsStarted: d.callsStarted, CallsSucceeded: d.callsSucceeded, CallsFailed: d.callsFailed, LastCallStartedTimestamp: d.lastCallStartedTimestamp, } } type dummyServer struct { callsStarted int64 callsSucceeded int64 callsFailed int64 lastCallStartedTimestamp time.Time } func (d *dummyServer) ChannelzMetric() *channelz.ServerInternalMetric { return &channelz.ServerInternalMetric{ CallsStarted: d.callsStarted, CallsSucceeded: d.callsSucceeded, CallsFailed: d.callsFailed, LastCallStartedTimestamp: d.lastCallStartedTimestamp, } } type dummySocket struct { streamsStarted int64 streamsSucceeded int64 streamsFailed int64 messagesSent int64 messagesReceived int64 keepAlivesSent int64 lastLocalStreamCreatedTimestamp time.Time lastRemoteStreamCreatedTimestamp time.Time lastMessageSentTimestamp time.Time lastMessageReceivedTimestamp time.Time localFlowControlWindow int64 remoteFlowControlWindow int64 socketOptions *channelz.SocketOptionData localAddr net.Addr remoteAddr net.Addr security credentials.ChannelzSecurityValue remoteName string } func (d *dummySocket) ChannelzMetric() *channelz.SocketInternalMetric { return &channelz.SocketInternalMetric{ StreamsStarted: d.streamsStarted, StreamsSucceeded: d.streamsSucceeded, StreamsFailed: d.streamsFailed, MessagesSent: d.messagesSent, MessagesReceived: d.messagesReceived, KeepAlivesSent: d.keepAlivesSent, LastLocalStreamCreatedTimestamp: d.lastLocalStreamCreatedTimestamp, LastRemoteStreamCreatedTimestamp: d.lastRemoteStreamCreatedTimestamp, LastMessageSentTimestamp: d.lastMessageSentTimestamp, LastMessageReceivedTimestamp: d.lastMessageReceivedTimestamp, LocalFlowControlWindow: d.localFlowControlWindow, RemoteFlowControlWindow: d.remoteFlowControlWindow, SocketOptions: d.socketOptions, LocalAddr: d.localAddr, RemoteAddr: d.remoteAddr, Security: d.security, RemoteName: d.remoteName, } } func channelProtoToStruct(c *channelzpb.Channel) *dummyChannel { dc := &dummyChannel{} pdata := c.GetData() switch pdata.GetState().GetState() { case channelzpb.ChannelConnectivityState_UNKNOWN: // TODO: what should we set here? case channelzpb.ChannelConnectivityState_IDLE: dc.state = connectivity.Idle case channelzpb.ChannelConnectivityState_CONNECTING: dc.state = connectivity.Connecting case channelzpb.ChannelConnectivityState_READY: dc.state = connectivity.Ready case channelzpb.ChannelConnectivityState_TRANSIENT_FAILURE: dc.state = connectivity.TransientFailure case channelzpb.ChannelConnectivityState_SHUTDOWN: dc.state = connectivity.Shutdown } dc.target = pdata.GetTarget() dc.callsStarted = pdata.CallsStarted dc.callsSucceeded = pdata.CallsSucceeded dc.callsFailed = pdata.CallsFailed if t, err := ptypes.Timestamp(pdata.GetLastCallStartedTimestamp()); err == nil { if !t.Equal(emptyTime) { dc.lastCallStartedTimestamp = t } } return dc } func serverProtoToStruct(s *channelzpb.Server) *dummyServer { ds := &dummyServer{} pdata := s.GetData() ds.callsStarted = pdata.CallsStarted ds.callsSucceeded = pdata.CallsSucceeded ds.callsFailed = pdata.CallsFailed if t, err := ptypes.Timestamp(pdata.GetLastCallStartedTimestamp()); err == nil { if !t.Equal(emptyTime) { ds.lastCallStartedTimestamp = t } } return ds } func socketProtoToStruct(s *channelzpb.Socket) *dummySocket { ds := &dummySocket{} pdata := s.GetData() ds.streamsStarted = pdata.GetStreamsStarted() ds.streamsSucceeded = pdata.GetStreamsSucceeded() ds.streamsFailed = pdata.GetStreamsFailed() ds.messagesSent = pdata.GetMessagesSent() ds.messagesReceived = pdata.GetMessagesReceived() ds.keepAlivesSent = pdata.GetKeepAlivesSent() if t, err := ptypes.Timestamp(pdata.GetLastLocalStreamCreatedTimestamp()); err == nil { if !t.Equal(emptyTime) { ds.lastLocalStreamCreatedTimestamp = t } } if t, err := ptypes.Timestamp(pdata.GetLastRemoteStreamCreatedTimestamp()); err == nil { if !t.Equal(emptyTime) { ds.lastRemoteStreamCreatedTimestamp = t } } if t, err := ptypes.Timestamp(pdata.GetLastMessageSentTimestamp()); err == nil { if !t.Equal(emptyTime) { ds.lastMessageSentTimestamp = t } } if t, err := ptypes.Timestamp(pdata.GetLastMessageReceivedTimestamp()); err == nil { if !t.Equal(emptyTime) { ds.lastMessageReceivedTimestamp = t } } if v := pdata.GetLocalFlowControlWindow(); v != nil { ds.localFlowControlWindow = v.Value } if v := pdata.GetRemoteFlowControlWindow(); v != nil { ds.remoteFlowControlWindow = v.Value } if v := pdata.GetOption(); v != nil && protoToSocketOpt != nil { ds.socketOptions = protoToSocketOpt(v) } if v := s.GetSecurity(); v != nil { ds.security = protoToSecurity(v) } if local := s.GetLocal(); local != nil { ds.localAddr = protoToAddr(local) } if remote := s.GetRemote(); remote != nil { ds.remoteAddr = protoToAddr(remote) } ds.remoteName = s.GetRemoteName() return ds } func protoToSecurity(protoSecurity *channelzpb.Security) credentials.ChannelzSecurityValue { switch v := protoSecurity.Model.(type) { case *channelzpb.Security_Tls_: return &credentials.TLSChannelzSecurityValue{StandardName: v.Tls.GetStandardName(), LocalCertificate: v.Tls.GetLocalCertificate(), RemoteCertificate: v.Tls.GetRemoteCertificate()} case *channelzpb.Security_Other: sv := &credentials.OtherChannelzSecurityValue{Name: v.Other.GetName()} var x ptypes.DynamicAny if err := ptypes.UnmarshalAny(v.Other.GetValue(), &x); err == nil { sv.Value = x.Message } return sv } return nil } func protoToAddr(a *channelzpb.Address) net.Addr { switch v := a.Address.(type) { case *channelzpb.Address_TcpipAddress: if port := v.TcpipAddress.GetPort(); port != 0 { return &net.TCPAddr{IP: v.TcpipAddress.GetIpAddress(), Port: int(port)} } return &net.IPAddr{IP: v.TcpipAddress.GetIpAddress()} case *channelzpb.Address_UdsAddress_: return &net.UnixAddr{Name: v.UdsAddress.GetFilename(), Net: "unix"} case *channelzpb.Address_OtherAddress_: // TODO: } return nil } func convertSocketRefSliceToMap(sktRefs []*channelzpb.SocketRef) map[int64]string { m := make(map[int64]string) for _, sr := range sktRefs { m[sr.SocketId] = sr.Name } return m } type OtherSecurityValue struct { LocalCertificate []byte `protobuf:"bytes,1,opt,name=local_certificate,json=localCertificate,proto3" json:"local_certificate,omitempty"` RemoteCertificate []byte `protobuf:"bytes,2,opt,name=remote_certificate,json=remoteCertificate,proto3" json:"remote_certificate,omitempty"` } func (m *OtherSecurityValue) Reset() { *m = OtherSecurityValue{} } func (m *OtherSecurityValue) String() string { return proto.CompactTextString(m) } func (*OtherSecurityValue) ProtoMessage() {} func init() { // Ad-hoc registering the proto type here to facilitate UnmarshalAny of OtherSecurityValue. proto.RegisterType((*OtherSecurityValue)(nil), "grpc.credentials.OtherChannelzSecurityValue") } func (s) TestGetTopChannels(t *testing.T) { tcs := []*dummyChannel{ { state: connectivity.Connecting, target: "test.channelz:1234", callsStarted: 6, callsSucceeded: 2, callsFailed: 3, lastCallStartedTimestamp: time.Now().UTC(), }, { state: connectivity.Connecting, target: "test.channelz:1234", callsStarted: 1, callsSucceeded: 2, callsFailed: 3, lastCallStartedTimestamp: time.Now().UTC(), }, { state: connectivity.Shutdown, target: "test.channelz:8888", callsStarted: 0, callsSucceeded: 0, callsFailed: 0, }, {}, } czCleanup := channelz.NewChannelzStorage() defer cleanupWrapper(czCleanup, t) for _, c := range tcs { id := channelz.RegisterChannel(c, 0, "") defer channelz.RemoveEntry(id) } s := newCZServer() resp, _ := s.GetTopChannels(context.Background(), &channelzpb.GetTopChannelsRequest{StartChannelId: 0}) if !resp.GetEnd() { t.Fatalf("resp.GetEnd() want true, got %v", resp.GetEnd()) } for i, c := range resp.GetChannel() { if !reflect.DeepEqual(channelProtoToStruct(c), tcs[i]) { t.Fatalf("dummyChannel: %d, want: %#v, got: %#v", i, tcs[i], channelProtoToStruct(c)) } } for i := 0; i < 50; i++ { id := channelz.RegisterChannel(tcs[0], 0, "") defer channelz.RemoveEntry(id) } resp, _ = s.GetTopChannels(context.Background(), &channelzpb.GetTopChannelsRequest{StartChannelId: 0}) if resp.GetEnd() { t.Fatalf("resp.GetEnd() want false, got %v", resp.GetEnd()) } } func (s) TestGetServers(t *testing.T) { ss := []*dummyServer{ { callsStarted: 6, callsSucceeded: 2, callsFailed: 3, lastCallStartedTimestamp: time.Now().UTC(), }, { callsStarted: 1, callsSucceeded: 2, callsFailed: 3, lastCallStartedTimestamp: time.Now().UTC(), }, { callsStarted: 1, callsSucceeded: 0, callsFailed: 0, lastCallStartedTimestamp: time.Now().UTC(), }, } czCleanup := channelz.NewChannelzStorage() defer cleanupWrapper(czCleanup, t) for _, s := range ss { id := channelz.RegisterServer(s, "") defer channelz.RemoveEntry(id) } svr := newCZServer() resp, _ := svr.GetServers(context.Background(), &channelzpb.GetServersRequest{StartServerId: 0}) if !resp.GetEnd() { t.Fatalf("resp.GetEnd() want true, got %v", resp.GetEnd()) } for i, s := range resp.GetServer() { if !reflect.DeepEqual(serverProtoToStruct(s), ss[i]) { t.Fatalf("dummyServer: %d, want: %#v, got: %#v", i, ss[i], serverProtoToStruct(s)) } } for i := 0; i < 50; i++ { id := channelz.RegisterServer(ss[0], "") defer channelz.RemoveEntry(id) } resp, _ = svr.GetServers(context.Background(), &channelzpb.GetServersRequest{StartServerId: 0}) if resp.GetEnd() { t.Fatalf("resp.GetEnd() want false, got %v", resp.GetEnd()) } } func (s) TestGetServerSockets(t *testing.T) { czCleanup := channelz.NewChannelzStorage() defer cleanupWrapper(czCleanup, t) svrID := channelz.RegisterServer(&dummyServer{}, "") defer channelz.RemoveEntry(svrID) refNames := []string{"listen socket 1", "normal socket 1", "normal socket 2"} ids := make([]int64, 3) ids[0] = channelz.RegisterListenSocket(&dummySocket{}, svrID, refNames[0]) ids[1] = channelz.RegisterNormalSocket(&dummySocket{}, svrID, refNames[1]) ids[2] = channelz.RegisterNormalSocket(&dummySocket{}, svrID, refNames[2]) for _, id := range ids { defer channelz.RemoveEntry(id) } svr := newCZServer() resp, _ := svr.GetServerSockets(context.Background(), &channelzpb.GetServerSocketsRequest{ServerId: svrID, StartSocketId: 0}) if !resp.GetEnd() { t.Fatalf("resp.GetEnd() want: true, got: %v", resp.GetEnd()) } // GetServerSockets only return normal sockets. want := map[int64]string{ ids[1]: refNames[1], ids[2]: refNames[2], } if !reflect.DeepEqual(convertSocketRefSliceToMap(resp.GetSocketRef()), want) { t.Fatalf("GetServerSockets want: %#v, got: %#v", want, resp.GetSocketRef()) } for i := 0; i < 50; i++ { id := channelz.RegisterNormalSocket(&dummySocket{}, svrID, "") defer channelz.RemoveEntry(id) } resp, _ = svr.GetServerSockets(context.Background(), &channelzpb.GetServerSocketsRequest{ServerId: svrID, StartSocketId: 0}) if resp.GetEnd() { t.Fatalf("resp.GetEnd() want false, got %v", resp.GetEnd()) } } // This test makes a GetServerSockets with a non-zero start ID, and expect only // sockets with ID >= the given start ID. func (s) TestGetServerSocketsNonZeroStartID(t *testing.T) { czCleanup := channelz.NewChannelzStorage() defer cleanupWrapper(czCleanup, t) svrID := channelz.RegisterServer(&dummyServer{}, "") defer channelz.RemoveEntry(svrID) refNames := []string{"listen socket 1", "normal socket 1", "normal socket 2"} ids := make([]int64, 3) ids[0] = channelz.RegisterListenSocket(&dummySocket{}, svrID, refNames[0]) ids[1] = channelz.RegisterNormalSocket(&dummySocket{}, svrID, refNames[1]) ids[2] = channelz.RegisterNormalSocket(&dummySocket{}, svrID, refNames[2]) for _, id := range ids { defer channelz.RemoveEntry(id) } svr := newCZServer() // Make GetServerSockets with startID = ids[1]+1, so socket-1 won't be // included in the response. resp, _ := svr.GetServerSockets(context.Background(), &channelzpb.GetServerSocketsRequest{ServerId: svrID, StartSocketId: ids[1] + 1}) if !resp.GetEnd() { t.Fatalf("resp.GetEnd() want: true, got: %v", resp.GetEnd()) } // GetServerSockets only return normal socket-2, socket-1 should be // filtered by start ID. want := map[int64]string{ ids[2]: refNames[2], } if !reflect.DeepEqual(convertSocketRefSliceToMap(resp.GetSocketRef()), want) { t.Fatalf("GetServerSockets want: %#v, got: %#v", want, resp.GetSocketRef()) } } func (s) TestGetChannel(t *testing.T) { czCleanup := channelz.NewChannelzStorage() defer cleanupWrapper(czCleanup, t) refNames := []string{"top channel 1", "nested channel 1", "sub channel 2", "nested channel 3"} ids := make([]int64, 4) ids[0] = channelz.RegisterChannel(&dummyChannel{}, 0, refNames[0]) channelz.AddTraceEvent(ids[0], 0, &channelz.TraceEventDesc{ Desc: "Channel Created", Severity: channelz.CtINFO, }) ids[1] = channelz.RegisterChannel(&dummyChannel{}, ids[0], refNames[1]) channelz.AddTraceEvent(ids[1], 0, &channelz.TraceEventDesc{ Desc: "Channel Created", Severity: channelz.CtINFO, Parent: &channelz.TraceEventDesc{ Desc: fmt.Sprintf("Nested Channel(id:%d) created", ids[1]), Severity: channelz.CtINFO, }, }) ids[2] = channelz.RegisterSubChannel(&dummyChannel{}, ids[0], refNames[2]) channelz.AddTraceEvent(ids[2], 0, &channelz.TraceEventDesc{ Desc: "SubChannel Created", Severity: channelz.CtINFO, Parent: &channelz.TraceEventDesc{ Desc: fmt.Sprintf("SubChannel(id:%d) created", ids[2]), Severity: channelz.CtINFO, }, }) ids[3] = channelz.RegisterChannel(&dummyChannel{}, ids[1], refNames[3]) channelz.AddTraceEvent(ids[3], 0, &channelz.TraceEventDesc{ Desc: "Channel Created", Severity: channelz.CtINFO, Parent: &channelz.TraceEventDesc{ Desc: fmt.Sprintf("Nested Channel(id:%d) created", ids[3]), Severity: channelz.CtINFO, }, }) channelz.AddTraceEvent(ids[0], 0, &channelz.TraceEventDesc{ Desc: fmt.Sprintf("Channel Connectivity change to %v", connectivity.Ready), Severity: channelz.CtINFO, }) channelz.AddTraceEvent(ids[0], 0, &channelz.TraceEventDesc{ Desc: "Resolver returns an empty address list", Severity: channelz.CtWarning, }) for _, id := range ids { defer channelz.RemoveEntry(id) } svr := newCZServer() resp, _ := svr.GetChannel(context.Background(), &channelzpb.GetChannelRequest{ChannelId: ids[0]}) metrics := resp.GetChannel() subChans := metrics.GetSubchannelRef() if len(subChans) != 1 || subChans[0].GetName() != refNames[2] || subChans[0].GetSubchannelId() != ids[2] { t.Fatalf("metrics.GetSubChannelRef() want %#v, got %#v", []*channelzpb.SubchannelRef{{SubchannelId: ids[2], Name: refNames[2]}}, subChans) } nestedChans := metrics.GetChannelRef() if len(nestedChans) != 1 || nestedChans[0].GetName() != refNames[1] || nestedChans[0].GetChannelId() != ids[1] { t.Fatalf("metrics.GetChannelRef() want %#v, got %#v", []*channelzpb.ChannelRef{{ChannelId: ids[1], Name: refNames[1]}}, nestedChans) } trace := metrics.GetData().GetTrace() want := []struct { desc string severity channelzpb.ChannelTraceEvent_Severity childID int64 childRef string }{ {desc: "Channel Created", severity: channelzpb.ChannelTraceEvent_CT_INFO}, {desc: fmt.Sprintf("Nested Channel(id:%d) created", ids[1]), severity: channelzpb.ChannelTraceEvent_CT_INFO, childID: ids[1], childRef: refNames[1]}, {desc: fmt.Sprintf("SubChannel(id:%d) created", ids[2]), severity: channelzpb.ChannelTraceEvent_CT_INFO, childID: ids[2], childRef: refNames[2]}, {desc: fmt.Sprintf("Channel Connectivity change to %v", connectivity.Ready), severity: channelzpb.ChannelTraceEvent_CT_INFO}, {desc: "Resolver returns an empty address list", severity: channelzpb.ChannelTraceEvent_CT_WARNING}, } for i, e := range trace.Events { if e.GetDescription() != want[i].desc { t.Fatalf("trace: GetDescription want %#v, got %#v", want[i].desc, e.GetDescription()) } if e.GetSeverity() != want[i].severity { t.Fatalf("trace: GetSeverity want %#v, got %#v", want[i].severity, e.GetSeverity()) } if want[i].childID == 0 && (e.GetChannelRef() != nil || e.GetSubchannelRef() != nil) { t.Fatalf("trace: GetChannelRef() should return nil, as there is no reference") } if e.GetChannelRef().GetChannelId() != want[i].childID || e.GetChannelRef().GetName() != want[i].childRef { if e.GetSubchannelRef().GetSubchannelId() != want[i].childID || e.GetSubchannelRef().GetName() != want[i].childRef { t.Fatalf("trace: GetChannelRef/GetSubchannelRef want (child ID: %d, child name: %q), got %#v and %#v", want[i].childID, want[i].childRef, e.GetChannelRef(), e.GetSubchannelRef()) } } } resp, _ = svr.GetChannel(context.Background(), &channelzpb.GetChannelRequest{ChannelId: ids[1]}) metrics = resp.GetChannel() nestedChans = metrics.GetChannelRef() if len(nestedChans) != 1 || nestedChans[0].GetName() != refNames[3] || nestedChans[0].GetChannelId() != ids[3] { t.Fatalf("metrics.GetChannelRef() want %#v, got %#v", []*channelzpb.ChannelRef{{ChannelId: ids[3], Name: refNames[3]}}, nestedChans) } } func (s) TestGetSubChannel(t *testing.T) { var ( subchanCreated = "SubChannel Created" subchanConnectivityChange = fmt.Sprintf("Subchannel Connectivity change to %v", connectivity.Ready) subChanPickNewAddress = fmt.Sprintf("Subchannel picks a new address %q to connect", "0.0.0.0") ) czCleanup := channelz.NewChannelzStorage() defer cleanupWrapper(czCleanup, t) refNames := []string{"top channel 1", "sub channel 1", "socket 1", "socket 2"} ids := make([]int64, 4) ids[0] = channelz.RegisterChannel(&dummyChannel{}, 0, refNames[0]) channelz.AddTraceEvent(ids[0], 0, &channelz.TraceEventDesc{ Desc: "Channel Created", Severity: channelz.CtINFO, }) ids[1] = channelz.RegisterSubChannel(&dummyChannel{}, ids[0], refNames[1]) channelz.AddTraceEvent(ids[1], 0, &channelz.TraceEventDesc{ Desc: subchanCreated, Severity: channelz.CtINFO, Parent: &channelz.TraceEventDesc{ Desc: fmt.Sprintf("Nested Channel(id:%d) created", ids[0]), Severity: channelz.CtINFO, }, }) ids[2] = channelz.RegisterNormalSocket(&dummySocket{}, ids[1], refNames[2]) ids[3] = channelz.RegisterNormalSocket(&dummySocket{}, ids[1], refNames[3]) channelz.AddTraceEvent(ids[1], 0, &channelz.TraceEventDesc{ Desc: subchanConnectivityChange, Severity: channelz.CtINFO, }) channelz.AddTraceEvent(ids[1], 0, &channelz.TraceEventDesc{ Desc: subChanPickNewAddress, Severity: channelz.CtINFO, }) for _, id := range ids { defer channelz.RemoveEntry(id) } svr := newCZServer() resp, _ := svr.GetSubchannel(context.Background(), &channelzpb.GetSubchannelRequest{SubchannelId: ids[1]}) metrics := resp.GetSubchannel() want := map[int64]string{ ids[2]: refNames[2], ids[3]: refNames[3], } if !reflect.DeepEqual(convertSocketRefSliceToMap(metrics.GetSocketRef()), want) { t.Fatalf("metrics.GetSocketRef() want %#v: got: %#v", want, metrics.GetSocketRef()) } trace := metrics.GetData().GetTrace() wantTrace := []struct { desc string severity channelzpb.ChannelTraceEvent_Severity childID int64 childRef string }{ {desc: subchanCreated, severity: channelzpb.ChannelTraceEvent_CT_INFO}, {desc: subchanConnectivityChange, severity: channelzpb.ChannelTraceEvent_CT_INFO}, {desc: subChanPickNewAddress, severity: channelzpb.ChannelTraceEvent_CT_INFO}, } for i, e := range trace.Events { if e.GetDescription() != wantTrace[i].desc { t.Fatalf("trace: GetDescription want %#v, got %#v", wantTrace[i].desc, e.GetDescription()) } if e.GetSeverity() != wantTrace[i].severity { t.Fatalf("trace: GetSeverity want %#v, got %#v", wantTrace[i].severity, e.GetSeverity()) } if wantTrace[i].childID == 0 && (e.GetChannelRef() != nil || e.GetSubchannelRef() != nil) { t.Fatalf("trace: GetChannelRef() should return nil, as there is no reference") } if e.GetChannelRef().GetChannelId() != wantTrace[i].childID || e.GetChannelRef().GetName() != wantTrace[i].childRef { if e.GetSubchannelRef().GetSubchannelId() != wantTrace[i].childID || e.GetSubchannelRef().GetName() != wantTrace[i].childRef { t.Fatalf("trace: GetChannelRef/GetSubchannelRef want (child ID: %d, child name: %q), got %#v and %#v", wantTrace[i].childID, wantTrace[i].childRef, e.GetChannelRef(), e.GetSubchannelRef()) } } } } func (s) TestGetSocket(t *testing.T) { czCleanup := channelz.NewChannelzStorage() defer cleanupWrapper(czCleanup, t) ss := []*dummySocket{ { streamsStarted: 10, streamsSucceeded: 2, streamsFailed: 3, messagesSent: 20, messagesReceived: 10, keepAlivesSent: 2, lastLocalStreamCreatedTimestamp: time.Now().UTC(), lastRemoteStreamCreatedTimestamp: time.Now().UTC(), lastMessageSentTimestamp: time.Now().UTC(), lastMessageReceivedTimestamp: time.Now().UTC(), localFlowControlWindow: 65536, remoteFlowControlWindow: 1024, localAddr: &net.TCPAddr{IP: net.ParseIP("1.0.0.1"), Port: 10001}, remoteAddr: &net.TCPAddr{IP: net.ParseIP("12.0.0.1"), Port: 10002}, remoteName: "remote.remote", }, { streamsStarted: 10, streamsSucceeded: 2, streamsFailed: 3, messagesSent: 20, messagesReceived: 10, keepAlivesSent: 2, lastRemoteStreamCreatedTimestamp: time.Now().UTC(), lastMessageSentTimestamp: time.Now().UTC(), lastMessageReceivedTimestamp: time.Now().UTC(), localFlowControlWindow: 65536, remoteFlowControlWindow: 1024, localAddr: &net.UnixAddr{Name: "file.path", Net: "unix"}, remoteAddr: &net.UnixAddr{Name: "another.path", Net: "unix"}, remoteName: "remote.remote", }, { streamsStarted: 5, streamsSucceeded: 2, streamsFailed: 3, messagesSent: 20, messagesReceived: 10, keepAlivesSent: 2, lastLocalStreamCreatedTimestamp: time.Now().UTC(), lastMessageSentTimestamp: time.Now().UTC(), lastMessageReceivedTimestamp: time.Now().UTC(), localFlowControlWindow: 65536, remoteFlowControlWindow: 10240, localAddr: &net.IPAddr{IP: net.ParseIP("1.0.0.1")}, remoteAddr: &net.IPAddr{IP: net.ParseIP("9.0.0.1")}, remoteName: "", }, { localAddr: &net.TCPAddr{IP: net.ParseIP("127.0.0.1"), Port: 10001}, }, { security: &credentials.TLSChannelzSecurityValue{ StandardName: "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", RemoteCertificate: []byte{48, 130, 2, 156, 48, 130, 2, 5, 160}, }, }, { security: &credentials.OtherChannelzSecurityValue{ Name: "XXXX", }, }, { security: &credentials.OtherChannelzSecurityValue{ Name: "YYYY", Value: &OtherSecurityValue{LocalCertificate: []byte{1, 2, 3}, RemoteCertificate: []byte{4, 5, 6}}, }, }, } svr := newCZServer() ids := make([]int64, len(ss)) svrID := channelz.RegisterServer(&dummyServer{}, "") defer channelz.RemoveEntry(svrID) for i, s := range ss { ids[i] = channelz.RegisterNormalSocket(s, svrID, strconv.Itoa(i)) defer channelz.RemoveEntry(ids[i]) } for i, s := range ss { resp, _ := svr.GetSocket(context.Background(), &channelzpb.GetSocketRequest{SocketId: ids[i]}) metrics := resp.GetSocket() if !reflect.DeepEqual(metrics.GetRef(), &channelzpb.SocketRef{SocketId: ids[i], Name: strconv.Itoa(i)}) || !reflect.DeepEqual(socketProtoToStruct(metrics), s) { t.Fatalf("resp.GetSocket() want: metrics.GetRef() = %#v and %#v, got: metrics.GetRef() = %#v and %#v", &channelzpb.SocketRef{SocketId: ids[i], Name: strconv.Itoa(i)}, s, metrics.GetRef(), socketProtoToStruct(metrics)) } } } grpc-go-1.29.1/channelz/service/util_sktopt_386_test.go000066400000000000000000000017311365033716300227640ustar00rootroot00000000000000// +build 386,linux,!appengine /* * * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package service import ( "golang.org/x/sys/unix" channelzpb "google.golang.org/grpc/channelz/grpc_channelz_v1" ) func protoToTime(protoTime *channelzpb.SocketOptionTimeout) *unix.Timeval { timeout := &unix.Timeval{} sec, usec := convertToDuration(protoTime.GetDuration()) timeout.Sec, timeout.Usec = int32(sec), int32(usec) return timeout } grpc-go-1.29.1/channelz/service/util_sktopt_amd64_test.go000066400000000000000000000016651365033716300233650ustar00rootroot00000000000000// +build amd64,linux,!appengine /* * * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package service import ( "golang.org/x/sys/unix" channelzpb "google.golang.org/grpc/channelz/grpc_channelz_v1" ) func protoToTime(protoTime *channelzpb.SocketOptionTimeout) *unix.Timeval { timeout := &unix.Timeval{} timeout.Sec, timeout.Usec = convertToDuration(protoTime.GetDuration()) return timeout } grpc-go-1.29.1/clientconn.go000066400000000000000000001366751365033716300156560ustar00rootroot00000000000000/* * * Copyright 2014 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package grpc import ( "context" "errors" "fmt" "math" "net" "reflect" "strings" "sync" "sync/atomic" "time" "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/base" "google.golang.org/grpc/codes" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal/backoff" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/grpcutil" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" "google.golang.org/grpc/status" _ "google.golang.org/grpc/balancer/roundrobin" // To register roundrobin. _ "google.golang.org/grpc/internal/resolver/dns" // To register dns resolver. _ "google.golang.org/grpc/internal/resolver/passthrough" // To register passthrough resolver. ) const ( // minimum time to give a connection to complete minConnectTimeout = 20 * time.Second // must match grpclbName in grpclb/grpclb.go grpclbName = "grpclb" ) var ( // ErrClientConnClosing indicates that the operation is illegal because // the ClientConn is closing. // // Deprecated: this error should not be relied upon by users; use the status // code of Canceled instead. ErrClientConnClosing = status.Error(codes.Canceled, "grpc: the client connection is closing") // errConnDrain indicates that the connection starts to be drained and does not accept any new RPCs. errConnDrain = errors.New("grpc: the connection is drained") // errConnClosing indicates that the connection is closing. errConnClosing = errors.New("grpc: the connection is closing") // errBalancerClosed indicates that the balancer is closed. errBalancerClosed = errors.New("grpc: balancer is closed") // invalidDefaultServiceConfigErrPrefix is used to prefix the json parsing error for the default // service config. invalidDefaultServiceConfigErrPrefix = "grpc: the provided default service config is invalid" ) // The following errors are returned from Dial and DialContext var ( // errNoTransportSecurity indicates that there is no transport security // being set for ClientConn. Users should either set one or explicitly // call WithInsecure DialOption to disable security. errNoTransportSecurity = errors.New("grpc: no transport security set (use grpc.WithInsecure() explicitly or set credentials)") // errTransportCredsAndBundle indicates that creds bundle is used together // with other individual Transport Credentials. errTransportCredsAndBundle = errors.New("grpc: credentials.Bundle may not be used with individual TransportCredentials") // errTransportCredentialsMissing indicates that users want to transmit security // information (e.g., OAuth2 token) which requires secure connection on an insecure // connection. errTransportCredentialsMissing = errors.New("grpc: the credentials require transport level security (use grpc.WithTransportCredentials() to set)") // errCredentialsConflict indicates that grpc.WithTransportCredentials() // and grpc.WithInsecure() are both called for a connection. errCredentialsConflict = errors.New("grpc: transport credentials are set for an insecure connection (grpc.WithTransportCredentials() and grpc.WithInsecure() are both called)") ) const ( defaultClientMaxReceiveMessageSize = 1024 * 1024 * 4 defaultClientMaxSendMessageSize = math.MaxInt32 // http2IOBufSize specifies the buffer size for sending frames. defaultWriteBufSize = 32 * 1024 defaultReadBufSize = 32 * 1024 ) // Dial creates a client connection to the given target. func Dial(target string, opts ...DialOption) (*ClientConn, error) { return DialContext(context.Background(), target, opts...) } // DialContext creates a client connection to the given target. By default, it's // a non-blocking dial (the function won't wait for connections to be // established, and connecting happens in the background). To make it a blocking // dial, use WithBlock() dial option. // // In the non-blocking case, the ctx does not act against the connection. It // only controls the setup steps. // // In the blocking case, ctx can be used to cancel or expire the pending // connection. Once this function returns, the cancellation and expiration of // ctx will be noop. Users should call ClientConn.Close to terminate all the // pending operations after this function returns. // // The target name syntax is defined in // https://github.com/grpc/grpc/blob/master/doc/naming.md. // e.g. to use dns resolver, a "dns:///" prefix should be applied to the target. func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) { cc := &ClientConn{ target: target, csMgr: &connectivityStateManager{}, conns: make(map[*addrConn]struct{}), dopts: defaultDialOptions(), blockingpicker: newPickerWrapper(), czData: new(channelzData), firstResolveEvent: grpcsync.NewEvent(), } cc.retryThrottler.Store((*retryThrottler)(nil)) cc.ctx, cc.cancel = context.WithCancel(context.Background()) for _, opt := range opts { opt.apply(&cc.dopts) } chainUnaryClientInterceptors(cc) chainStreamClientInterceptors(cc) defer func() { if err != nil { cc.Close() } }() if channelz.IsOn() { if cc.dopts.channelzParentID != 0 { cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, cc.dopts.channelzParentID, target) channelz.AddTraceEvent(cc.channelzID, 0, &channelz.TraceEventDesc{ Desc: "Channel Created", Severity: channelz.CtINFO, Parent: &channelz.TraceEventDesc{ Desc: fmt.Sprintf("Nested Channel(id:%d) created", cc.channelzID), Severity: channelz.CtINFO, }, }) } else { cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, 0, target) channelz.Info(cc.channelzID, "Channel Created") } cc.csMgr.channelzID = cc.channelzID } if !cc.dopts.insecure { if cc.dopts.copts.TransportCredentials == nil && cc.dopts.copts.CredsBundle == nil { return nil, errNoTransportSecurity } if cc.dopts.copts.TransportCredentials != nil && cc.dopts.copts.CredsBundle != nil { return nil, errTransportCredsAndBundle } } else { if cc.dopts.copts.TransportCredentials != nil || cc.dopts.copts.CredsBundle != nil { return nil, errCredentialsConflict } for _, cd := range cc.dopts.copts.PerRPCCredentials { if cd.RequireTransportSecurity() { return nil, errTransportCredentialsMissing } } } if cc.dopts.defaultServiceConfigRawJSON != nil { scpr := parseServiceConfig(*cc.dopts.defaultServiceConfigRawJSON) if scpr.Err != nil { return nil, fmt.Errorf("%s: %v", invalidDefaultServiceConfigErrPrefix, scpr.Err) } cc.dopts.defaultServiceConfig, _ = scpr.Config.(*ServiceConfig) } cc.mkp = cc.dopts.copts.KeepaliveParams if cc.dopts.copts.Dialer == nil { cc.dopts.copts.Dialer = func(ctx context.Context, addr string) (net.Conn, error) { network, addr := parseDialTarget(addr) return (&net.Dialer{}).DialContext(ctx, network, addr) } if cc.dopts.withProxy { cc.dopts.copts.Dialer = newProxyDialer(cc.dopts.copts.Dialer) } } if cc.dopts.copts.UserAgent != "" { cc.dopts.copts.UserAgent += " " + grpcUA } else { cc.dopts.copts.UserAgent = grpcUA } if cc.dopts.timeout > 0 { var cancel context.CancelFunc ctx, cancel = context.WithTimeout(ctx, cc.dopts.timeout) defer cancel() } defer func() { select { case <-ctx.Done(): conn, err = nil, ctx.Err() default: } }() scSet := false if cc.dopts.scChan != nil { // Try to get an initial service config. select { case sc, ok := <-cc.dopts.scChan: if ok { cc.sc = &sc scSet = true } default: } } if cc.dopts.bs == nil { cc.dopts.bs = backoff.DefaultExponential } // Determine the resolver to use. cc.parsedTarget = grpcutil.ParseTarget(cc.target) channelz.Infof(cc.channelzID, "parsed scheme: %q", cc.parsedTarget.Scheme) resolverBuilder := cc.getResolver(cc.parsedTarget.Scheme) if resolverBuilder == nil { // If resolver builder is still nil, the parsed target's scheme is // not registered. Fallback to default resolver and set Endpoint to // the original target. channelz.Infof(cc.channelzID, "scheme %q not registered, fallback to default scheme", cc.parsedTarget.Scheme) cc.parsedTarget = resolver.Target{ Scheme: resolver.GetDefaultScheme(), Endpoint: target, } resolverBuilder = cc.getResolver(cc.parsedTarget.Scheme) if resolverBuilder == nil { return nil, fmt.Errorf("could not get resolver for default scheme: %q", cc.parsedTarget.Scheme) } } creds := cc.dopts.copts.TransportCredentials if creds != nil && creds.Info().ServerName != "" { cc.authority = creds.Info().ServerName } else if cc.dopts.insecure && cc.dopts.authority != "" { cc.authority = cc.dopts.authority } else { // Use endpoint from "scheme://authority/endpoint" as the default // authority for ClientConn. cc.authority = cc.parsedTarget.Endpoint } if cc.dopts.scChan != nil && !scSet { // Blocking wait for the initial service config. select { case sc, ok := <-cc.dopts.scChan: if ok { cc.sc = &sc } case <-ctx.Done(): return nil, ctx.Err() } } if cc.dopts.scChan != nil { go cc.scWatcher() } var credsClone credentials.TransportCredentials if creds := cc.dopts.copts.TransportCredentials; creds != nil { credsClone = creds.Clone() } cc.balancerBuildOpts = balancer.BuildOptions{ DialCreds: credsClone, CredsBundle: cc.dopts.copts.CredsBundle, Dialer: cc.dopts.copts.Dialer, ChannelzParentID: cc.channelzID, Target: cc.parsedTarget, } // Build the resolver. rWrapper, err := newCCResolverWrapper(cc, resolverBuilder) if err != nil { return nil, fmt.Errorf("failed to build resolver: %v", err) } cc.mu.Lock() cc.resolverWrapper = rWrapper cc.mu.Unlock() // A blocking dial blocks until the clientConn is ready. if cc.dopts.block { for { s := cc.GetState() if s == connectivity.Ready { break } else if cc.dopts.copts.FailOnNonTempDialError && s == connectivity.TransientFailure { if err = cc.blockingpicker.connectionError(); err != nil { terr, ok := err.(interface { Temporary() bool }) if ok && !terr.Temporary() { return nil, err } } } if !cc.WaitForStateChange(ctx, s) { // ctx got timeout or canceled. return nil, ctx.Err() } } } return cc, nil } // chainUnaryClientInterceptors chains all unary client interceptors into one. func chainUnaryClientInterceptors(cc *ClientConn) { interceptors := cc.dopts.chainUnaryInts // Prepend dopts.unaryInt to the chaining interceptors if it exists, since unaryInt will // be executed before any other chained interceptors. if cc.dopts.unaryInt != nil { interceptors = append([]UnaryClientInterceptor{cc.dopts.unaryInt}, interceptors...) } var chainedInt UnaryClientInterceptor if len(interceptors) == 0 { chainedInt = nil } else if len(interceptors) == 1 { chainedInt = interceptors[0] } else { chainedInt = func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error { return interceptors[0](ctx, method, req, reply, cc, getChainUnaryInvoker(interceptors, 0, invoker), opts...) } } cc.dopts.unaryInt = chainedInt } // getChainUnaryInvoker recursively generate the chained unary invoker. func getChainUnaryInvoker(interceptors []UnaryClientInterceptor, curr int, finalInvoker UnaryInvoker) UnaryInvoker { if curr == len(interceptors)-1 { return finalInvoker } return func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error { return interceptors[curr+1](ctx, method, req, reply, cc, getChainUnaryInvoker(interceptors, curr+1, finalInvoker), opts...) } } // chainStreamClientInterceptors chains all stream client interceptors into one. func chainStreamClientInterceptors(cc *ClientConn) { interceptors := cc.dopts.chainStreamInts // Prepend dopts.streamInt to the chaining interceptors if it exists, since streamInt will // be executed before any other chained interceptors. if cc.dopts.streamInt != nil { interceptors = append([]StreamClientInterceptor{cc.dopts.streamInt}, interceptors...) } var chainedInt StreamClientInterceptor if len(interceptors) == 0 { chainedInt = nil } else if len(interceptors) == 1 { chainedInt = interceptors[0] } else { chainedInt = func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, streamer Streamer, opts ...CallOption) (ClientStream, error) { return interceptors[0](ctx, desc, cc, method, getChainStreamer(interceptors, 0, streamer), opts...) } } cc.dopts.streamInt = chainedInt } // getChainStreamer recursively generate the chained client stream constructor. func getChainStreamer(interceptors []StreamClientInterceptor, curr int, finalStreamer Streamer) Streamer { if curr == len(interceptors)-1 { return finalStreamer } return func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) { return interceptors[curr+1](ctx, desc, cc, method, getChainStreamer(interceptors, curr+1, finalStreamer), opts...) } } // connectivityStateManager keeps the connectivity.State of ClientConn. // This struct will eventually be exported so the balancers can access it. type connectivityStateManager struct { mu sync.Mutex state connectivity.State notifyChan chan struct{} channelzID int64 } // updateState updates the connectivity.State of ClientConn. // If there's a change it notifies goroutines waiting on state change to // happen. func (csm *connectivityStateManager) updateState(state connectivity.State) { csm.mu.Lock() defer csm.mu.Unlock() if csm.state == connectivity.Shutdown { return } if csm.state == state { return } csm.state = state channelz.Infof(csm.channelzID, "Channel Connectivity change to %v", state) if csm.notifyChan != nil { // There are other goroutines waiting on this channel. close(csm.notifyChan) csm.notifyChan = nil } } func (csm *connectivityStateManager) getState() connectivity.State { csm.mu.Lock() defer csm.mu.Unlock() return csm.state } func (csm *connectivityStateManager) getNotifyChan() <-chan struct{} { csm.mu.Lock() defer csm.mu.Unlock() if csm.notifyChan == nil { csm.notifyChan = make(chan struct{}) } return csm.notifyChan } // ClientConnInterface defines the functions clients need to perform unary and // streaming RPCs. It is implemented by *ClientConn, and is only intended to // be referenced by generated code. type ClientConnInterface interface { // Invoke performs a unary RPC and returns after the response is received // into reply. Invoke(ctx context.Context, method string, args interface{}, reply interface{}, opts ...CallOption) error // NewStream begins a streaming RPC. NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) } // Assert *ClientConn implements ClientConnInterface. var _ ClientConnInterface = (*ClientConn)(nil) // ClientConn represents a virtual connection to a conceptual endpoint, to // perform RPCs. // // A ClientConn is free to have zero or more actual connections to the endpoint // based on configuration, load, etc. It is also free to determine which actual // endpoints to use and may change it every RPC, permitting client-side load // balancing. // // A ClientConn encapsulates a range of functionality including name // resolution, TCP connection establishment (with retries and backoff) and TLS // handshakes. It also handles errors on established connections by // re-resolving the name and reconnecting. type ClientConn struct { ctx context.Context cancel context.CancelFunc target string parsedTarget resolver.Target authority string dopts dialOptions csMgr *connectivityStateManager balancerBuildOpts balancer.BuildOptions blockingpicker *pickerWrapper mu sync.RWMutex resolverWrapper *ccResolverWrapper sc *ServiceConfig conns map[*addrConn]struct{} // Keepalive parameter can be updated if a GoAway is received. mkp keepalive.ClientParameters curBalancerName string balancerWrapper *ccBalancerWrapper retryThrottler atomic.Value firstResolveEvent *grpcsync.Event channelzID int64 // channelz unique identification number czData *channelzData } // WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or // ctx expires. A true value is returned in former case and false in latter. // This is an EXPERIMENTAL API. func (cc *ClientConn) WaitForStateChange(ctx context.Context, sourceState connectivity.State) bool { ch := cc.csMgr.getNotifyChan() if cc.csMgr.getState() != sourceState { return true } select { case <-ctx.Done(): return false case <-ch: return true } } // GetState returns the connectivity.State of ClientConn. // This is an EXPERIMENTAL API. func (cc *ClientConn) GetState() connectivity.State { return cc.csMgr.getState() } func (cc *ClientConn) scWatcher() { for { select { case sc, ok := <-cc.dopts.scChan: if !ok { return } cc.mu.Lock() // TODO: load balance policy runtime change is ignored. // We may revisit this decision in the future. cc.sc = &sc cc.mu.Unlock() case <-cc.ctx.Done(): return } } } // waitForResolvedAddrs blocks until the resolver has provided addresses or the // context expires. Returns nil unless the context expires first; otherwise // returns a status error based on the context. func (cc *ClientConn) waitForResolvedAddrs(ctx context.Context) error { // This is on the RPC path, so we use a fast path to avoid the // more-expensive "select" below after the resolver has returned once. if cc.firstResolveEvent.HasFired() { return nil } select { case <-cc.firstResolveEvent.Done(): return nil case <-ctx.Done(): return status.FromContextError(ctx.Err()).Err() case <-cc.ctx.Done(): return ErrClientConnClosing } } var emptyServiceConfig *ServiceConfig func init() { cfg := parseServiceConfig("{}") if cfg.Err != nil { panic(fmt.Sprintf("impossible error parsing empty service config: %v", cfg.Err)) } emptyServiceConfig = cfg.Config.(*ServiceConfig) } func (cc *ClientConn) maybeApplyDefaultServiceConfig(addrs []resolver.Address) { if cc.sc != nil { cc.applyServiceConfigAndBalancer(cc.sc, addrs) return } if cc.dopts.defaultServiceConfig != nil { cc.applyServiceConfigAndBalancer(cc.dopts.defaultServiceConfig, addrs) } else { cc.applyServiceConfigAndBalancer(emptyServiceConfig, addrs) } } func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { defer cc.firstResolveEvent.Fire() cc.mu.Lock() // Check if the ClientConn is already closed. Some fields (e.g. // balancerWrapper) are set to nil when closing the ClientConn, and could // cause nil pointer panic if we don't have this check. if cc.conns == nil { cc.mu.Unlock() return nil } if err != nil { // May need to apply the initial service config in case the resolver // doesn't support service configs, or doesn't provide a service config // with the new addresses. cc.maybeApplyDefaultServiceConfig(nil) if cc.balancerWrapper != nil { cc.balancerWrapper.resolverError(err) } // No addresses are valid with err set; return early. cc.mu.Unlock() return balancer.ErrBadResolverState } var ret error if cc.dopts.disableServiceConfig || s.ServiceConfig == nil { cc.maybeApplyDefaultServiceConfig(s.Addresses) // TODO: do we need to apply a failing LB policy if there is no // default, per the error handling design? } else { if sc, ok := s.ServiceConfig.Config.(*ServiceConfig); s.ServiceConfig.Err == nil && ok { cc.applyServiceConfigAndBalancer(sc, s.Addresses) } else { ret = balancer.ErrBadResolverState if cc.balancerWrapper == nil { var err error if s.ServiceConfig.Err != nil { err = status.Errorf(codes.Unavailable, "error parsing service config: %v", s.ServiceConfig.Err) } else { err = status.Errorf(codes.Unavailable, "illegal service config type: %T", s.ServiceConfig.Config) } cc.blockingpicker.updatePicker(base.NewErrPicker(err)) cc.csMgr.updateState(connectivity.TransientFailure) cc.mu.Unlock() return ret } } } var balCfg serviceconfig.LoadBalancingConfig if cc.dopts.balancerBuilder == nil && cc.sc != nil && cc.sc.lbConfig != nil { balCfg = cc.sc.lbConfig.cfg } cbn := cc.curBalancerName bw := cc.balancerWrapper cc.mu.Unlock() if cbn != grpclbName { // Filter any grpclb addresses since we don't have the grpclb balancer. for i := 0; i < len(s.Addresses); { if s.Addresses[i].Type == resolver.GRPCLB { copy(s.Addresses[i:], s.Addresses[i+1:]) s.Addresses = s.Addresses[:len(s.Addresses)-1] continue } i++ } } uccsErr := bw.updateClientConnState(&balancer.ClientConnState{ResolverState: s, BalancerConfig: balCfg}) if ret == nil { ret = uccsErr // prefer ErrBadResolver state since any other error is // currently meaningless to the caller. } return ret } // switchBalancer starts the switching from current balancer to the balancer // with the given name. // // It will NOT send the current address list to the new balancer. If needed, // caller of this function should send address list to the new balancer after // this function returns. // // Caller must hold cc.mu. func (cc *ClientConn) switchBalancer(name string) { if strings.EqualFold(cc.curBalancerName, name) { return } channelz.Infof(cc.channelzID, "ClientConn switching balancer to %q", name) if cc.dopts.balancerBuilder != nil { channelz.Info(cc.channelzID, "ignoring balancer switching: Balancer DialOption used instead") return } if cc.balancerWrapper != nil { cc.balancerWrapper.close() } builder := balancer.Get(name) if builder == nil { channelz.Warningf(cc.channelzID, "Channel switches to new LB policy %q due to fallback from invalid balancer name", PickFirstBalancerName) channelz.Infof(cc.channelzID, "failed to get balancer builder for: %v, using pick_first instead", name) builder = newPickfirstBuilder() } else { channelz.Infof(cc.channelzID, "Channel switches to new LB policy %q", name) } cc.curBalancerName = builder.Name() cc.balancerWrapper = newCCBalancerWrapper(cc, builder, cc.balancerBuildOpts) } func (cc *ClientConn) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State, err error) { cc.mu.Lock() if cc.conns == nil { cc.mu.Unlock() return } // TODO(bar switching) send updates to all balancer wrappers when balancer // gracefully switching is supported. cc.balancerWrapper.handleSubConnStateChange(sc, s, err) cc.mu.Unlock() } // newAddrConn creates an addrConn for addrs and adds it to cc.conns. // // Caller needs to make sure len(addrs) > 0. func (cc *ClientConn) newAddrConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (*addrConn, error) { ac := &addrConn{ state: connectivity.Idle, cc: cc, addrs: addrs, scopts: opts, dopts: cc.dopts, czData: new(channelzData), resetBackoff: make(chan struct{}), } ac.ctx, ac.cancel = context.WithCancel(cc.ctx) // Track ac in cc. This needs to be done before any getTransport(...) is called. cc.mu.Lock() if cc.conns == nil { cc.mu.Unlock() return nil, ErrClientConnClosing } if channelz.IsOn() { ac.channelzID = channelz.RegisterSubChannel(ac, cc.channelzID, "") channelz.AddTraceEvent(ac.channelzID, 0, &channelz.TraceEventDesc{ Desc: "Subchannel Created", Severity: channelz.CtINFO, Parent: &channelz.TraceEventDesc{ Desc: fmt.Sprintf("Subchannel(id:%d) created", ac.channelzID), Severity: channelz.CtINFO, }, }) } cc.conns[ac] = struct{}{} cc.mu.Unlock() return ac, nil } // removeAddrConn removes the addrConn in the subConn from clientConn. // It also tears down the ac with the given error. func (cc *ClientConn) removeAddrConn(ac *addrConn, err error) { cc.mu.Lock() if cc.conns == nil { cc.mu.Unlock() return } delete(cc.conns, ac) cc.mu.Unlock() ac.tearDown(err) } func (cc *ClientConn) channelzMetric() *channelz.ChannelInternalMetric { return &channelz.ChannelInternalMetric{ State: cc.GetState(), Target: cc.target, CallsStarted: atomic.LoadInt64(&cc.czData.callsStarted), CallsSucceeded: atomic.LoadInt64(&cc.czData.callsSucceeded), CallsFailed: atomic.LoadInt64(&cc.czData.callsFailed), LastCallStartedTimestamp: time.Unix(0, atomic.LoadInt64(&cc.czData.lastCallStartedTime)), } } // Target returns the target string of the ClientConn. // This is an EXPERIMENTAL API. func (cc *ClientConn) Target() string { return cc.target } func (cc *ClientConn) incrCallsStarted() { atomic.AddInt64(&cc.czData.callsStarted, 1) atomic.StoreInt64(&cc.czData.lastCallStartedTime, time.Now().UnixNano()) } func (cc *ClientConn) incrCallsSucceeded() { atomic.AddInt64(&cc.czData.callsSucceeded, 1) } func (cc *ClientConn) incrCallsFailed() { atomic.AddInt64(&cc.czData.callsFailed, 1) } // connect starts creating a transport. // It does nothing if the ac is not IDLE. // TODO(bar) Move this to the addrConn section. func (ac *addrConn) connect() error { ac.mu.Lock() if ac.state == connectivity.Shutdown { ac.mu.Unlock() return errConnClosing } if ac.state != connectivity.Idle { ac.mu.Unlock() return nil } // Update connectivity state within the lock to prevent subsequent or // concurrent calls from resetting the transport more than once. ac.updateConnectivityState(connectivity.Connecting, nil) ac.mu.Unlock() // Start a goroutine connecting to the server asynchronously. go ac.resetTransport() return nil } // tryUpdateAddrs tries to update ac.addrs with the new addresses list. // // If ac is Connecting, it returns false. The caller should tear down the ac and // create a new one. Note that the backoff will be reset when this happens. // // If ac is TransientFailure, it updates ac.addrs and returns true. The updated // addresses will be picked up by retry in the next iteration after backoff. // // If ac is Shutdown or Idle, it updates ac.addrs and returns true. // // If ac is Ready, it checks whether current connected address of ac is in the // new addrs list. // - If true, it updates ac.addrs and returns true. The ac will keep using // the existing connection. // - If false, it does nothing and returns false. func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool { ac.mu.Lock() defer ac.mu.Unlock() channelz.Infof(ac.channelzID, "addrConn: tryUpdateAddrs curAddr: %v, addrs: %v", ac.curAddr, addrs) if ac.state == connectivity.Shutdown || ac.state == connectivity.TransientFailure || ac.state == connectivity.Idle { ac.addrs = addrs return true } if ac.state == connectivity.Connecting { return false } // ac.state is Ready, try to find the connected address. var curAddrFound bool for _, a := range addrs { if reflect.DeepEqual(ac.curAddr, a) { curAddrFound = true break } } channelz.Infof(ac.channelzID, "addrConn: tryUpdateAddrs curAddrFound: %v", curAddrFound) if curAddrFound { ac.addrs = addrs } return curAddrFound } // GetMethodConfig gets the method config of the input method. // If there's an exact match for input method (i.e. /service/method), we return // the corresponding MethodConfig. // If there isn't an exact match for the input method, we look for the default config // under the service (i.e /service/). If there is a default MethodConfig for // the service, we return it. // Otherwise, we return an empty MethodConfig. func (cc *ClientConn) GetMethodConfig(method string) MethodConfig { // TODO: Avoid the locking here. cc.mu.RLock() defer cc.mu.RUnlock() if cc.sc == nil { return MethodConfig{} } m, ok := cc.sc.Methods[method] if !ok { i := strings.LastIndex(method, "/") m = cc.sc.Methods[method[:i+1]] } return m } func (cc *ClientConn) healthCheckConfig() *healthCheckConfig { cc.mu.RLock() defer cc.mu.RUnlock() if cc.sc == nil { return nil } return cc.sc.healthCheckConfig } func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method string) (transport.ClientTransport, func(balancer.DoneInfo), error) { t, done, err := cc.blockingpicker.pick(ctx, failfast, balancer.PickInfo{ Ctx: ctx, FullMethodName: method, }) if err != nil { return nil, nil, toRPCErr(err) } return t, done, nil } func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, addrs []resolver.Address) { if sc == nil { // should never reach here. return } cc.sc = sc if cc.sc.retryThrottling != nil { newThrottler := &retryThrottler{ tokens: cc.sc.retryThrottling.MaxTokens, max: cc.sc.retryThrottling.MaxTokens, thresh: cc.sc.retryThrottling.MaxTokens / 2, ratio: cc.sc.retryThrottling.TokenRatio, } cc.retryThrottler.Store(newThrottler) } else { cc.retryThrottler.Store((*retryThrottler)(nil)) } if cc.dopts.balancerBuilder == nil { // Only look at balancer types and switch balancer if balancer dial // option is not set. var newBalancerName string if cc.sc != nil && cc.sc.lbConfig != nil { newBalancerName = cc.sc.lbConfig.name } else { var isGRPCLB bool for _, a := range addrs { if a.Type == resolver.GRPCLB { isGRPCLB = true break } } if isGRPCLB { newBalancerName = grpclbName } else if cc.sc != nil && cc.sc.LB != nil { newBalancerName = *cc.sc.LB } else { newBalancerName = PickFirstBalancerName } } cc.switchBalancer(newBalancerName) } else if cc.balancerWrapper == nil { // Balancer dial option was set, and this is the first time handling // resolved addresses. Build a balancer with dopts.balancerBuilder. cc.curBalancerName = cc.dopts.balancerBuilder.Name() cc.balancerWrapper = newCCBalancerWrapper(cc, cc.dopts.balancerBuilder, cc.balancerBuildOpts) } } func (cc *ClientConn) resolveNow(o resolver.ResolveNowOptions) { cc.mu.RLock() r := cc.resolverWrapper cc.mu.RUnlock() if r == nil { return } go r.resolveNow(o) } // ResetConnectBackoff wakes up all subchannels in transient failure and causes // them to attempt another connection immediately. It also resets the backoff // times used for subsequent attempts regardless of the current state. // // In general, this function should not be used. Typical service or network // outages result in a reasonable client reconnection strategy by default. // However, if a previously unavailable network becomes available, this may be // used to trigger an immediate reconnect. // // This API is EXPERIMENTAL. func (cc *ClientConn) ResetConnectBackoff() { cc.mu.Lock() conns := cc.conns cc.mu.Unlock() for ac := range conns { ac.resetConnectBackoff() } } // Close tears down the ClientConn and all underlying connections. func (cc *ClientConn) Close() error { defer cc.cancel() cc.mu.Lock() if cc.conns == nil { cc.mu.Unlock() return ErrClientConnClosing } conns := cc.conns cc.conns = nil cc.csMgr.updateState(connectivity.Shutdown) rWrapper := cc.resolverWrapper cc.resolverWrapper = nil bWrapper := cc.balancerWrapper cc.balancerWrapper = nil cc.mu.Unlock() cc.blockingpicker.close() if rWrapper != nil { rWrapper.close() } if bWrapper != nil { bWrapper.close() } for ac := range conns { ac.tearDown(ErrClientConnClosing) } if channelz.IsOn() { ted := &channelz.TraceEventDesc{ Desc: "Channel Deleted", Severity: channelz.CtINFO, } if cc.dopts.channelzParentID != 0 { ted.Parent = &channelz.TraceEventDesc{ Desc: fmt.Sprintf("Nested channel(id:%d) deleted", cc.channelzID), Severity: channelz.CtINFO, } } channelz.AddTraceEvent(cc.channelzID, 0, ted) // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add trace reference to // the entity being deleted, and thus prevent it from being deleted right away. channelz.RemoveEntry(cc.channelzID) } return nil } // addrConn is a network connection to a given address. type addrConn struct { ctx context.Context cancel context.CancelFunc cc *ClientConn dopts dialOptions acbw balancer.SubConn scopts balancer.NewSubConnOptions // transport is set when there's a viable transport (note: ac state may not be READY as LB channel // health checking may require server to report healthy to set ac to READY), and is reset // to nil when the current transport should no longer be used to create a stream (e.g. after GoAway // is received, transport is closed, ac has been torn down). transport transport.ClientTransport // The current transport. mu sync.Mutex curAddr resolver.Address // The current address. addrs []resolver.Address // All addresses that the resolver resolved to. // Use updateConnectivityState for updating addrConn's connectivity state. state connectivity.State backoffIdx int // Needs to be stateful for resetConnectBackoff. resetBackoff chan struct{} channelzID int64 // channelz unique identification number. czData *channelzData } // Note: this requires a lock on ac.mu. func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error) { if ac.state == s { return } ac.state = s channelz.Infof(ac.channelzID, "Subchannel Connectivity change to %v", s) ac.cc.handleSubConnStateChange(ac.acbw, s, lastErr) } // adjustParams updates parameters used to create transports upon // receiving a GoAway. func (ac *addrConn) adjustParams(r transport.GoAwayReason) { switch r { case transport.GoAwayTooManyPings: v := 2 * ac.dopts.copts.KeepaliveParams.Time ac.cc.mu.Lock() if v > ac.cc.mkp.Time { ac.cc.mkp.Time = v } ac.cc.mu.Unlock() } } func (ac *addrConn) resetTransport() { for i := 0; ; i++ { if i > 0 { ac.cc.resolveNow(resolver.ResolveNowOptions{}) } ac.mu.Lock() if ac.state == connectivity.Shutdown { ac.mu.Unlock() return } addrs := ac.addrs backoffFor := ac.dopts.bs.Backoff(ac.backoffIdx) // This will be the duration that dial gets to finish. dialDuration := minConnectTimeout if ac.dopts.minConnectTimeout != nil { dialDuration = ac.dopts.minConnectTimeout() } if dialDuration < backoffFor { // Give dial more time as we keep failing to connect. dialDuration = backoffFor } // We can potentially spend all the time trying the first address, and // if the server accepts the connection and then hangs, the following // addresses will never be tried. // // The spec doesn't mention what should be done for multiple addresses. // https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md#proposed-backoff-algorithm connectDeadline := time.Now().Add(dialDuration) ac.updateConnectivityState(connectivity.Connecting, nil) ac.transport = nil ac.mu.Unlock() newTr, addr, reconnect, err := ac.tryAllAddrs(addrs, connectDeadline) if err != nil { // After exhausting all addresses, the addrConn enters // TRANSIENT_FAILURE. ac.mu.Lock() if ac.state == connectivity.Shutdown { ac.mu.Unlock() return } ac.updateConnectivityState(connectivity.TransientFailure, err) // Backoff. b := ac.resetBackoff ac.mu.Unlock() timer := time.NewTimer(backoffFor) select { case <-timer.C: ac.mu.Lock() ac.backoffIdx++ ac.mu.Unlock() case <-b: timer.Stop() case <-ac.ctx.Done(): timer.Stop() return } continue } ac.mu.Lock() if ac.state == connectivity.Shutdown { ac.mu.Unlock() newTr.Close() return } ac.curAddr = addr ac.transport = newTr ac.backoffIdx = 0 hctx, hcancel := context.WithCancel(ac.ctx) ac.startHealthCheck(hctx) ac.mu.Unlock() // Block until the created transport is down. And when this happens, // we restart from the top of the addr list. <-reconnect.Done() hcancel() // restart connecting - the top of the loop will set state to // CONNECTING. This is against the current connectivity semantics doc, // however it allows for graceful behavior for RPCs not yet dispatched // - unfortunate timing would otherwise lead to the RPC failing even // though the TRANSIENT_FAILURE state (called for by the doc) would be // instantaneous. // // Ideally we should transition to Idle here and block until there is // RPC activity that leads to the balancer requesting a reconnect of // the associated SubConn. } } // tryAllAddrs tries to creates a connection to the addresses, and stop when at the // first successful one. It returns the transport, the address and a Event in // the successful case. The Event fires when the returned transport disconnects. func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.Time) (transport.ClientTransport, resolver.Address, *grpcsync.Event, error) { var firstConnErr error for _, addr := range addrs { ac.mu.Lock() if ac.state == connectivity.Shutdown { ac.mu.Unlock() return nil, resolver.Address{}, nil, errConnClosing } ac.cc.mu.RLock() ac.dopts.copts.KeepaliveParams = ac.cc.mkp ac.cc.mu.RUnlock() copts := ac.dopts.copts if ac.scopts.CredsBundle != nil { copts.CredsBundle = ac.scopts.CredsBundle } ac.mu.Unlock() channelz.Infof(ac.channelzID, "Subchannel picks a new address %q to connect", addr.Addr) newTr, reconnect, err := ac.createTransport(addr, copts, connectDeadline) if err == nil { return newTr, addr, reconnect, nil } if firstConnErr == nil { firstConnErr = err } ac.cc.blockingpicker.updateConnectionError(err) } // Couldn't connect to any address. return nil, resolver.Address{}, nil, firstConnErr } // createTransport creates a connection to addr. It returns the transport and a // Event in the successful case. The Event fires when the returned transport // disconnects. func (ac *addrConn) createTransport(addr resolver.Address, copts transport.ConnectOptions, connectDeadline time.Time) (transport.ClientTransport, *grpcsync.Event, error) { prefaceReceived := make(chan struct{}) onCloseCalled := make(chan struct{}) reconnect := grpcsync.NewEvent() authority := ac.cc.authority // addr.ServerName takes precedent over ClientConn authority, if present. if addr.ServerName != "" { authority = addr.ServerName } target := transport.TargetInfo{ Addr: addr.Addr, Metadata: addr.Metadata, Authority: authority, } once := sync.Once{} onGoAway := func(r transport.GoAwayReason) { ac.mu.Lock() ac.adjustParams(r) once.Do(func() { if ac.state == connectivity.Ready { // Prevent this SubConn from being used for new RPCs by setting its // state to Connecting. // // TODO: this should be Idle when grpc-go properly supports it. ac.updateConnectivityState(connectivity.Connecting, nil) } }) ac.mu.Unlock() reconnect.Fire() } onClose := func() { ac.mu.Lock() once.Do(func() { if ac.state == connectivity.Ready { // Prevent this SubConn from being used for new RPCs by setting its // state to Connecting. // // TODO: this should be Idle when grpc-go properly supports it. ac.updateConnectivityState(connectivity.Connecting, nil) } }) ac.mu.Unlock() close(onCloseCalled) reconnect.Fire() } onPrefaceReceipt := func() { close(prefaceReceived) } connectCtx, cancel := context.WithDeadline(ac.ctx, connectDeadline) defer cancel() if channelz.IsOn() { copts.ChannelzParentID = ac.channelzID } newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, target, copts, onPrefaceReceipt, onGoAway, onClose) if err != nil { // newTr is either nil, or closed. channelz.Warningf(ac.channelzID, "grpc: addrConn.createTransport failed to connect to %v. Err: %v. Reconnecting...", addr, err) return nil, nil, err } select { case <-time.After(time.Until(connectDeadline)): // We didn't get the preface in time. newTr.Close() channelz.Warningf(ac.channelzID, "grpc: addrConn.createTransport failed to connect to %v: didn't receive server preface in time. Reconnecting...", addr) return nil, nil, errors.New("timed out waiting for server handshake") case <-prefaceReceived: // We got the preface - huzzah! things are good. case <-onCloseCalled: // The transport has already closed - noop. return nil, nil, errors.New("connection closed") // TODO(deklerk) this should bail on ac.ctx.Done(). Add a test and fix. } return newTr, reconnect, nil } // startHealthCheck starts the health checking stream (RPC) to watch the health // stats of this connection if health checking is requested and configured. // // LB channel health checking is enabled when all requirements below are met: // 1. it is not disabled by the user with the WithDisableHealthCheck DialOption // 2. internal.HealthCheckFunc is set by importing the grpc/healthcheck package // 3. a service config with non-empty healthCheckConfig field is provided // 4. the load balancer requests it // // It sets addrConn to READY if the health checking stream is not started. // // Caller must hold ac.mu. func (ac *addrConn) startHealthCheck(ctx context.Context) { var healthcheckManagingState bool defer func() { if !healthcheckManagingState { ac.updateConnectivityState(connectivity.Ready, nil) } }() if ac.cc.dopts.disableHealthCheck { return } healthCheckConfig := ac.cc.healthCheckConfig() if healthCheckConfig == nil { return } if !ac.scopts.HealthCheckEnabled { return } healthCheckFunc := ac.cc.dopts.healthCheckFunc if healthCheckFunc == nil { // The health package is not imported to set health check function. // // TODO: add a link to the health check doc in the error message. channelz.Error(ac.channelzID, "Health check is requested but health check function is not set.") return } healthcheckManagingState = true // Set up the health check helper functions. currentTr := ac.transport newStream := func(method string) (interface{}, error) { ac.mu.Lock() if ac.transport != currentTr { ac.mu.Unlock() return nil, status.Error(codes.Canceled, "the provided transport is no longer valid to use") } ac.mu.Unlock() return newNonRetryClientStream(ctx, &StreamDesc{ServerStreams: true}, method, currentTr, ac) } setConnectivityState := func(s connectivity.State, lastErr error) { ac.mu.Lock() defer ac.mu.Unlock() if ac.transport != currentTr { return } ac.updateConnectivityState(s, lastErr) } // Start the health checking stream. go func() { err := ac.cc.dopts.healthCheckFunc(ctx, newStream, setConnectivityState, healthCheckConfig.ServiceName) if err != nil { if status.Code(err) == codes.Unimplemented { channelz.Error(ac.channelzID, "Subchannel health check is unimplemented at server side, thus health check is disabled") } else { channelz.Errorf(ac.channelzID, "HealthCheckFunc exits with unexpected error %v", err) } } }() } func (ac *addrConn) resetConnectBackoff() { ac.mu.Lock() close(ac.resetBackoff) ac.backoffIdx = 0 ac.resetBackoff = make(chan struct{}) ac.mu.Unlock() } // getReadyTransport returns the transport if ac's state is READY. // Otherwise it returns nil, false. // If ac's state is IDLE, it will trigger ac to connect. func (ac *addrConn) getReadyTransport() (transport.ClientTransport, bool) { ac.mu.Lock() if ac.state == connectivity.Ready && ac.transport != nil { t := ac.transport ac.mu.Unlock() return t, true } var idle bool if ac.state == connectivity.Idle { idle = true } ac.mu.Unlock() // Trigger idle ac to connect. if idle { ac.connect() } return nil, false } // tearDown starts to tear down the addrConn. // TODO(zhaoq): Make this synchronous to avoid unbounded memory consumption in // some edge cases (e.g., the caller opens and closes many addrConn's in a // tight loop. // tearDown doesn't remove ac from ac.cc.conns. func (ac *addrConn) tearDown(err error) { ac.mu.Lock() if ac.state == connectivity.Shutdown { ac.mu.Unlock() return } curTr := ac.transport ac.transport = nil // We have to set the state to Shutdown before anything else to prevent races // between setting the state and logic that waits on context cancellation / etc. ac.updateConnectivityState(connectivity.Shutdown, nil) ac.cancel() ac.curAddr = resolver.Address{} if err == errConnDrain && curTr != nil { // GracefulClose(...) may be executed multiple times when // i) receiving multiple GoAway frames from the server; or // ii) there are concurrent name resolver/Balancer triggered // address removal and GoAway. // We have to unlock and re-lock here because GracefulClose => Close => onClose, which requires locking ac.mu. ac.mu.Unlock() curTr.GracefulClose() ac.mu.Lock() } if channelz.IsOn() { channelz.AddTraceEvent(ac.channelzID, 0, &channelz.TraceEventDesc{ Desc: "Subchannel Deleted", Severity: channelz.CtINFO, Parent: &channelz.TraceEventDesc{ Desc: fmt.Sprintf("Subchanel(id:%d) deleted", ac.channelzID), Severity: channelz.CtINFO, }, }) // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add trace reference to // the entity being deleted, and thus prevent it from being deleted right away. channelz.RemoveEntry(ac.channelzID) } ac.mu.Unlock() } func (ac *addrConn) getState() connectivity.State { ac.mu.Lock() defer ac.mu.Unlock() return ac.state } func (ac *addrConn) ChannelzMetric() *channelz.ChannelInternalMetric { ac.mu.Lock() addr := ac.curAddr.Addr ac.mu.Unlock() return &channelz.ChannelInternalMetric{ State: ac.getState(), Target: addr, CallsStarted: atomic.LoadInt64(&ac.czData.callsStarted), CallsSucceeded: atomic.LoadInt64(&ac.czData.callsSucceeded), CallsFailed: atomic.LoadInt64(&ac.czData.callsFailed), LastCallStartedTimestamp: time.Unix(0, atomic.LoadInt64(&ac.czData.lastCallStartedTime)), } } func (ac *addrConn) incrCallsStarted() { atomic.AddInt64(&ac.czData.callsStarted, 1) atomic.StoreInt64(&ac.czData.lastCallStartedTime, time.Now().UnixNano()) } func (ac *addrConn) incrCallsSucceeded() { atomic.AddInt64(&ac.czData.callsSucceeded, 1) } func (ac *addrConn) incrCallsFailed() { atomic.AddInt64(&ac.czData.callsFailed, 1) } type retryThrottler struct { max float64 thresh float64 ratio float64 mu sync.Mutex tokens float64 // TODO(dfawley): replace with atomic and remove lock. } // throttle subtracts a retry token from the pool and returns whether a retry // should be throttled (disallowed) based upon the retry throttling policy in // the service config. func (rt *retryThrottler) throttle() bool { if rt == nil { return false } rt.mu.Lock() defer rt.mu.Unlock() rt.tokens-- if rt.tokens < 0 { rt.tokens = 0 } return rt.tokens <= rt.thresh } func (rt *retryThrottler) successfulRPC() { if rt == nil { return } rt.mu.Lock() defer rt.mu.Unlock() rt.tokens += rt.ratio if rt.tokens > rt.max { rt.tokens = rt.max } } type channelzChannel struct { cc *ClientConn } func (c *channelzChannel) ChannelzMetric() *channelz.ChannelInternalMetric { return c.cc.channelzMetric() } // ErrClientConnTimeout indicates that the ClientConn cannot establish the // underlying connections within the specified timeout. // // Deprecated: This error is never returned by grpc and should not be // referenced by users. var ErrClientConnTimeout = errors.New("grpc: timed out when dialing") func (cc *ClientConn) getResolver(scheme string) resolver.Builder { for _, rb := range cc.dopts.resolvers { if scheme == rb.Scheme() { return rb } } return resolver.Get(scheme) } grpc-go-1.29.1/clientconn_state_transition_test.go000066400000000000000000000311411365033716300223450ustar00rootroot00000000000000/* * * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package grpc import ( "context" "net" "sync" "testing" "time" "golang.org/x/net/http2" "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/resolver" "google.golang.org/grpc/resolver/manual" ) const stateRecordingBalancerName = "state_recoding_balancer" var testBalancerBuilder = newStateRecordingBalancerBuilder() func init() { balancer.Register(testBalancerBuilder) } // These tests use a pipeListener. This listener is similar to net.Listener // except that it is unbuffered, so each read and write will wait for the other // side's corresponding write or read. func (s) TestStateTransitions_SingleAddress(t *testing.T) { for _, test := range []struct { desc string want []connectivity.State server func(net.Listener) net.Conn }{ { desc: "When the server returns server preface, the client enters READY.", want: []connectivity.State{ connectivity.Connecting, connectivity.Ready, }, server: func(lis net.Listener) net.Conn { conn, err := lis.Accept() if err != nil { t.Error(err) return nil } go keepReading(conn) framer := http2.NewFramer(conn, conn) if err := framer.WriteSettings(http2.Setting{}); err != nil { t.Errorf("Error while writing settings frame. %v", err) return nil } return conn }, }, { desc: "When the connection is closed, the client enters TRANSIENT FAILURE.", want: []connectivity.State{ connectivity.Connecting, connectivity.TransientFailure, }, server: func(lis net.Listener) net.Conn { conn, err := lis.Accept() if err != nil { t.Error(err) return nil } conn.Close() return nil }, }, { desc: `When the server sends its connection preface, but the connection dies before the client can write its connection preface, the client enters TRANSIENT FAILURE.`, want: []connectivity.State{ connectivity.Connecting, connectivity.TransientFailure, }, server: func(lis net.Listener) net.Conn { conn, err := lis.Accept() if err != nil { t.Error(err) return nil } framer := http2.NewFramer(conn, conn) if err := framer.WriteSettings(http2.Setting{}); err != nil { t.Errorf("Error while writing settings frame. %v", err) return nil } conn.Close() return nil }, }, { desc: `When the server reads the client connection preface but does not send its connection preface, the client enters TRANSIENT FAILURE.`, want: []connectivity.State{ connectivity.Connecting, connectivity.TransientFailure, }, server: func(lis net.Listener) net.Conn { conn, err := lis.Accept() if err != nil { t.Error(err) return nil } go keepReading(conn) return conn }, }, } { t.Log(test.desc) testStateTransitionSingleAddress(t, test.want, test.server) } } func testStateTransitionSingleAddress(t *testing.T, want []connectivity.State, server func(net.Listener) net.Conn) { ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) defer cancel() pl := testutils.NewPipeListener() defer pl.Close() // Launch the server. var conn net.Conn var connMu sync.Mutex go func() { connMu.Lock() conn = server(pl) connMu.Unlock() }() client, err := DialContext(ctx, "", WithInsecure(), WithBalancerName(stateRecordingBalancerName), WithDialer(pl.Dialer()), withBackoff(noBackoff{}), withMinConnectDeadline(func() time.Duration { return time.Millisecond * 100 })) if err != nil { t.Fatal(err) } defer client.Close() stateNotifications := testBalancerBuilder.nextStateNotifier() timeout := time.After(5 * time.Second) for i := 0; i < len(want); i++ { select { case <-timeout: t.Fatalf("timed out waiting for state %d (%v) in flow %v", i, want[i], want) case seen := <-stateNotifications: if seen != want[i] { t.Fatalf("expected to see %v at position %d in flow %v, got %v", want[i], i, want, seen) } } } connMu.Lock() defer connMu.Unlock() if conn != nil { err = conn.Close() if err != nil { t.Fatal(err) } } } // When a READY connection is closed, the client enters CONNECTING. func (s) TestStateTransitions_ReadyToConnecting(t *testing.T) { want := []connectivity.State{ connectivity.Connecting, connectivity.Ready, connectivity.Connecting, } ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) defer cancel() lis, err := net.Listen("tcp", "localhost:0") if err != nil { t.Fatalf("Error while listening. Err: %v", err) } defer lis.Close() sawReady := make(chan struct{}) // Launch the server. go func() { conn, err := lis.Accept() if err != nil { t.Error(err) return } go keepReading(conn) framer := http2.NewFramer(conn, conn) if err := framer.WriteSettings(http2.Setting{}); err != nil { t.Errorf("Error while writing settings frame. %v", err) return } // Prevents race between onPrefaceReceipt and onClose. <-sawReady conn.Close() }() client, err := DialContext(ctx, lis.Addr().String(), WithInsecure(), WithBalancerName(stateRecordingBalancerName)) if err != nil { t.Fatal(err) } defer client.Close() stateNotifications := testBalancerBuilder.nextStateNotifier() timeout := time.After(5 * time.Second) for i := 0; i < len(want); i++ { select { case <-timeout: t.Fatalf("timed out waiting for state %d (%v) in flow %v", i, want[i], want) case seen := <-stateNotifications: if seen == connectivity.Ready { close(sawReady) } if seen != want[i] { t.Fatalf("expected to see %v at position %d in flow %v, got %v", want[i], i, want, seen) } } } } // When the first connection is closed, the client stays in CONNECTING until it // tries the second address (which succeeds, and then it enters READY). func (s) TestStateTransitions_TriesAllAddrsBeforeTransientFailure(t *testing.T) { want := []connectivity.State{ connectivity.Connecting, connectivity.Ready, } ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) defer cancel() lis1, err := net.Listen("tcp", "localhost:0") if err != nil { t.Fatalf("Error while listening. Err: %v", err) } defer lis1.Close() lis2, err := net.Listen("tcp", "localhost:0") if err != nil { t.Fatalf("Error while listening. Err: %v", err) } defer lis2.Close() server1Done := make(chan struct{}) server2Done := make(chan struct{}) // Launch server 1. go func() { conn, err := lis1.Accept() if err != nil { t.Error(err) return } conn.Close() close(server1Done) }() // Launch server 2. go func() { conn, err := lis2.Accept() if err != nil { t.Error(err) return } go keepReading(conn) framer := http2.NewFramer(conn, conn) if err := framer.WriteSettings(http2.Setting{}); err != nil { t.Errorf("Error while writing settings frame. %v", err) return } close(server2Done) }() rb := manual.NewBuilderWithScheme("whatever") rb.InitialState(resolver.State{Addresses: []resolver.Address{ {Addr: lis1.Addr().String()}, {Addr: lis2.Addr().String()}, }}) client, err := DialContext(ctx, "whatever:///this-gets-overwritten", WithInsecure(), WithBalancerName(stateRecordingBalancerName), WithResolvers(rb)) if err != nil { t.Fatal(err) } defer client.Close() stateNotifications := testBalancerBuilder.nextStateNotifier() timeout := time.After(5 * time.Second) for i := 0; i < len(want); i++ { select { case <-timeout: t.Fatalf("timed out waiting for state %d (%v) in flow %v", i, want[i], want) case seen := <-stateNotifications: if seen != want[i] { t.Fatalf("expected to see %v at position %d in flow %v, got %v", want[i], i, want, seen) } } } select { case <-timeout: t.Fatal("saw the correct state transitions, but timed out waiting for client to finish interactions with server 1") case <-server1Done: } select { case <-timeout: t.Fatal("saw the correct state transitions, but timed out waiting for client to finish interactions with server 2") case <-server2Done: } } // When there are multiple addresses, and we enter READY on one of them, a // later closure should cause the client to enter CONNECTING func (s) TestStateTransitions_MultipleAddrsEntersReady(t *testing.T) { want := []connectivity.State{ connectivity.Connecting, connectivity.Ready, connectivity.Connecting, } ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) defer cancel() lis1, err := net.Listen("tcp", "localhost:0") if err != nil { t.Fatalf("Error while listening. Err: %v", err) } defer lis1.Close() // Never actually gets used; we just want it to be alive so that the resolver has two addresses to target. lis2, err := net.Listen("tcp", "localhost:0") if err != nil { t.Fatalf("Error while listening. Err: %v", err) } defer lis2.Close() server1Done := make(chan struct{}) sawReady := make(chan struct{}) // Launch server 1. go func() { conn, err := lis1.Accept() if err != nil { t.Error(err) return } go keepReading(conn) framer := http2.NewFramer(conn, conn) if err := framer.WriteSettings(http2.Setting{}); err != nil { t.Errorf("Error while writing settings frame. %v", err) return } <-sawReady conn.Close() _, err = lis1.Accept() if err != nil { t.Error(err) return } close(server1Done) }() rb := manual.NewBuilderWithScheme("whatever") rb.InitialState(resolver.State{Addresses: []resolver.Address{ {Addr: lis1.Addr().String()}, {Addr: lis2.Addr().String()}, }}) client, err := DialContext(ctx, "whatever:///this-gets-overwritten", WithInsecure(), WithBalancerName(stateRecordingBalancerName), WithResolvers(rb)) if err != nil { t.Fatal(err) } defer client.Close() stateNotifications := testBalancerBuilder.nextStateNotifier() timeout := time.After(2 * time.Second) for i := 0; i < len(want); i++ { select { case <-timeout: t.Fatalf("timed out waiting for state %d (%v) in flow %v", i, want[i], want) case seen := <-stateNotifications: if seen == connectivity.Ready { close(sawReady) } if seen != want[i] { t.Fatalf("expected to see %v at position %d in flow %v, got %v", want[i], i, want, seen) } } } select { case <-timeout: t.Fatal("saw the correct state transitions, but timed out waiting for client to finish interactions with server 1") case <-server1Done: } } type stateRecordingBalancer struct { notifier chan<- connectivity.State balancer.Balancer } func (b *stateRecordingBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) { b.notifier <- s b.Balancer.HandleSubConnStateChange(sc, s) } func (b *stateRecordingBalancer) ResetNotifier(r chan<- connectivity.State) { b.notifier = r } func (b *stateRecordingBalancer) Close() { b.Balancer.Close() } type stateRecordingBalancerBuilder struct { mu sync.Mutex notifier chan connectivity.State // The notifier used in the last Balancer. } func newStateRecordingBalancerBuilder() *stateRecordingBalancerBuilder { return &stateRecordingBalancerBuilder{} } func (b *stateRecordingBalancerBuilder) Name() string { return stateRecordingBalancerName } func (b *stateRecordingBalancerBuilder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { stateNotifications := make(chan connectivity.State, 10) b.mu.Lock() b.notifier = stateNotifications b.mu.Unlock() return &stateRecordingBalancer{ notifier: stateNotifications, Balancer: balancer.Get(PickFirstBalancerName).Build(cc, opts), } } func (b *stateRecordingBalancerBuilder) nextStateNotifier() <-chan connectivity.State { b.mu.Lock() defer b.mu.Unlock() ret := b.notifier b.notifier = nil return ret } type noBackoff struct{} func (b noBackoff) Backoff(int) time.Duration { return time.Duration(0) } // Keep reading until something causes the connection to die (EOF, server // closed, etc). Useful as a tool for mindlessly keeping the connection // healthy, since the client will error if things like client prefaces are not // accepted in a timely fashion. func keepReading(conn net.Conn) { buf := make([]byte, 1024) for _, err := conn.Read(buf); err == nil; _, err = conn.Read(buf) { } } grpc-go-1.29.1/clientconn_test.go000066400000000000000000001066021365033716300167000ustar00rootroot00000000000000/* * * Copyright 2014 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package grpc import ( "context" "errors" "fmt" "math" "net" "strings" "sync/atomic" "testing" "time" "golang.org/x/net/http2" "google.golang.org/grpc/backoff" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" internalbackoff "google.golang.org/grpc/internal/backoff" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/naming" "google.golang.org/grpc/resolver" "google.golang.org/grpc/resolver/manual" "google.golang.org/grpc/testdata" ) func assertState(wantState connectivity.State, cc *ClientConn) (connectivity.State, bool) { ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() var state connectivity.State for state = cc.GetState(); state != wantState && cc.WaitForStateChange(ctx, state); state = cc.GetState() { } return state, state == wantState } func (s) TestDialWithTimeout(t *testing.T) { lis, err := net.Listen("tcp", "localhost:0") if err != nil { t.Fatalf("Error while listening. Err: %v", err) } defer lis.Close() lisAddr := resolver.Address{Addr: lis.Addr().String()} lisDone := make(chan struct{}) dialDone := make(chan struct{}) // 1st listener accepts the connection and then does nothing go func() { defer close(lisDone) conn, err := lis.Accept() if err != nil { t.Errorf("Error while accepting. Err: %v", err) return } framer := http2.NewFramer(conn, conn) if err := framer.WriteSettings(http2.Setting{}); err != nil { t.Errorf("Error while writing settings. Err: %v", err) return } <-dialDone // Close conn only after dial returns. }() r, cleanup := manual.GenerateAndRegisterManualResolver() defer cleanup() r.InitialState(resolver.State{Addresses: []resolver.Address{lisAddr}}) client, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithTimeout(5*time.Second)) close(dialDone) if err != nil { t.Fatalf("Dial failed. Err: %v", err) } defer client.Close() timeout := time.After(1 * time.Second) select { case <-timeout: t.Fatal("timed out waiting for server to finish") case <-lisDone: } } func (s) TestDialWithMultipleBackendsNotSendingServerPreface(t *testing.T) { lis1, err := net.Listen("tcp", "localhost:0") if err != nil { t.Fatalf("Error while listening. Err: %v", err) } defer lis1.Close() lis1Addr := resolver.Address{Addr: lis1.Addr().String()} lis1Done := make(chan struct{}) // 1st listener accepts the connection and immediately closes it. go func() { defer close(lis1Done) conn, err := lis1.Accept() if err != nil { t.Errorf("Error while accepting. Err: %v", err) return } conn.Close() }() lis2, err := net.Listen("tcp", "localhost:0") if err != nil { t.Fatalf("Error while listening. Err: %v", err) } defer lis2.Close() lis2Done := make(chan struct{}) lis2Addr := resolver.Address{Addr: lis2.Addr().String()} // 2nd listener should get a connection attempt since the first one failed. go func() { defer close(lis2Done) _, err := lis2.Accept() // Closing the client will clean up this conn. if err != nil { t.Errorf("Error while accepting. Err: %v", err) return } }() r, cleanup := manual.GenerateAndRegisterManualResolver() defer cleanup() r.InitialState(resolver.State{Addresses: []resolver.Address{lis1Addr, lis2Addr}}) client, err := Dial(r.Scheme()+":///test.server", WithInsecure()) if err != nil { t.Fatalf("Dial failed. Err: %v", err) } defer client.Close() timeout := time.After(5 * time.Second) select { case <-timeout: t.Fatal("timed out waiting for server 1 to finish") case <-lis1Done: } select { case <-timeout: t.Fatal("timed out waiting for server 2 to finish") case <-lis2Done: } } func (s) TestDialWaitsForServerSettings(t *testing.T) { lis, err := net.Listen("tcp", "localhost:0") if err != nil { t.Fatalf("Error while listening. Err: %v", err) } defer lis.Close() done := make(chan struct{}) sent := make(chan struct{}) dialDone := make(chan struct{}) go func() { // Launch the server. defer func() { close(done) }() conn, err := lis.Accept() if err != nil { t.Errorf("Error while accepting. Err: %v", err) return } defer conn.Close() // Sleep for a little bit to make sure that Dial on client // side blocks until settings are received. time.Sleep(100 * time.Millisecond) framer := http2.NewFramer(conn, conn) close(sent) if err := framer.WriteSettings(http2.Setting{}); err != nil { t.Errorf("Error while writing settings. Err: %v", err) return } <-dialDone // Close conn only after dial returns. }() ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() client, err := DialContext(ctx, lis.Addr().String(), WithInsecure(), WithBlock()) close(dialDone) if err != nil { t.Fatalf("Error while dialing. Err: %v", err) } defer client.Close() select { case <-sent: default: t.Fatalf("Dial returned before server settings were sent") } <-done } func (s) TestDialWaitsForServerSettingsAndFails(t *testing.T) { lis, err := net.Listen("tcp", "localhost:0") if err != nil { t.Fatalf("Error while listening. Err: %v", err) } done := make(chan struct{}) numConns := 0 go func() { // Launch the server. defer func() { close(done) }() for { conn, err := lis.Accept() if err != nil { break } numConns++ defer conn.Close() } }() ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) defer cancel() client, err := DialContext(ctx, lis.Addr().String(), WithInsecure(), WithBlock(), withBackoff(noBackoff{}), withMinConnectDeadline(func() time.Duration { return time.Second / 4 })) lis.Close() if err == nil { client.Close() t.Fatalf("Unexpected success (err=nil) while dialing") } if err != context.DeadlineExceeded { t.Fatalf("DialContext(_) = %v; want context.DeadlineExceeded", err) } <-done if numConns < 2 { t.Fatalf("dial attempts: %v; want > 1", numConns) } } // 1. Client connects to a server that doesn't send preface. // 2. After minConnectTimeout(500 ms here), client disconnects and retries. // 3. The new server sends its preface. // 4. Client doesn't kill the connection this time. func (s) TestCloseConnectionWhenServerPrefaceNotReceived(t *testing.T) { lis, err := net.Listen("tcp", "localhost:0") if err != nil { t.Fatalf("Error while listening. Err: %v", err) } var ( conn2 net.Conn over uint32 ) defer func() { lis.Close() // conn2 shouldn't be closed until the client has // observed a successful test. if conn2 != nil { conn2.Close() } }() done := make(chan struct{}) accepted := make(chan struct{}) go func() { // Launch the server. defer close(done) conn1, err := lis.Accept() if err != nil { t.Errorf("Error while accepting. Err: %v", err) return } defer conn1.Close() // Don't send server settings and the client should close the connection and try again. conn2, err = lis.Accept() // Accept a reconnection request from client. if err != nil { t.Errorf("Error while accepting. Err: %v", err) return } close(accepted) framer := http2.NewFramer(conn2, conn2) if err = framer.WriteSettings(http2.Setting{}); err != nil { t.Errorf("Error while writing settings. Err: %v", err) return } b := make([]byte, 8) for { _, err = conn2.Read(b) if err == nil { continue } if atomic.LoadUint32(&over) == 1 { // The connection stayed alive for the timer. // Success. return } t.Errorf("Unexpected error while reading. Err: %v, want timeout error", err) break } }() client, err := Dial(lis.Addr().String(), WithInsecure(), withMinConnectDeadline(func() time.Duration { return time.Millisecond * 500 })) if err != nil { t.Fatalf("Error while dialing. Err: %v", err) } // wait for connection to be accepted on the server. timer := time.NewTimer(time.Second * 10) select { case <-accepted: case <-timer.C: t.Fatalf("Client didn't make another connection request in time.") } // Make sure the connection stays alive for sometime. time.Sleep(time.Second) atomic.StoreUint32(&over, 1) client.Close() <-done } func (s) TestBackoffWhenNoServerPrefaceReceived(t *testing.T) { lis, err := net.Listen("tcp", "localhost:0") if err != nil { t.Fatalf("Error while listening. Err: %v", err) } defer lis.Close() done := make(chan struct{}) go func() { // Launch the server. defer func() { close(done) }() conn, err := lis.Accept() // Accept the connection only to close it immediately. if err != nil { t.Errorf("Error while accepting. Err: %v", err) return } prevAt := time.Now() conn.Close() var prevDuration time.Duration // Make sure the retry attempts are backed off properly. for i := 0; i < 3; i++ { conn, err := lis.Accept() if err != nil { t.Errorf("Error while accepting. Err: %v", err) return } meow := time.Now() conn.Close() dr := meow.Sub(prevAt) if dr <= prevDuration { t.Errorf("Client backoff did not increase with retries. Previous duration: %v, current duration: %v", prevDuration, dr) return } prevDuration = dr prevAt = meow } }() client, err := Dial(lis.Addr().String(), WithInsecure()) if err != nil { t.Fatalf("Error while dialing. Err: %v", err) } defer client.Close() <-done } func (s) TestConnectivityStates(t *testing.T) { servers, resolver, cleanup := startServers(t, 2, math.MaxUint32) defer cleanup() cc, err := Dial("passthrough:///foo.bar.com", WithBalancer(RoundRobin(resolver)), WithInsecure()) if err != nil { t.Fatalf("Dial(\"foo.bar.com\", WithBalancer(_)) = _, %v, want _ ", err) } defer cc.Close() wantState := connectivity.Ready if state, ok := assertState(wantState, cc); !ok { t.Fatalf("asserState(%s) = %s, false, want %s, true", wantState, state, wantState) } // Send an update to delete the server connection (tearDown addrConn). update := []*naming.Update{ { Op: naming.Delete, Addr: "localhost:" + servers[0].port, }, } resolver.w.inject(update) wantState = connectivity.TransientFailure if state, ok := assertState(wantState, cc); !ok { t.Fatalf("asserState(%s) = %s, false, want %s, true", wantState, state, wantState) } update[0] = &naming.Update{ Op: naming.Add, Addr: "localhost:" + servers[1].port, } resolver.w.inject(update) wantState = connectivity.Ready if state, ok := assertState(wantState, cc); !ok { t.Fatalf("asserState(%s) = %s, false, want %s, true", wantState, state, wantState) } } func (s) TestWithTimeout(t *testing.T) { conn, err := Dial("passthrough:///Non-Existent.Server:80", WithTimeout(time.Millisecond), WithBlock(), WithInsecure()) if err == nil { conn.Close() } if err != context.DeadlineExceeded { t.Fatalf("Dial(_, _) = %v, %v, want %v", conn, err, context.DeadlineExceeded) } } func (s) TestWithTransportCredentialsTLS(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond) defer cancel() creds, err := credentials.NewClientTLSFromFile(testdata.Path("ca.pem"), "x.test.youtube.com") if err != nil { t.Fatalf("Failed to create credentials %v", err) } conn, err := DialContext(ctx, "passthrough:///Non-Existent.Server:80", WithTransportCredentials(creds), WithBlock()) if err == nil { conn.Close() } if err != context.DeadlineExceeded { t.Fatalf("Dial(_, _) = %v, %v, want %v", conn, err, context.DeadlineExceeded) } } func (s) TestDefaultAuthority(t *testing.T) { target := "Non-Existent.Server:8080" conn, err := Dial(target, WithInsecure()) if err != nil { t.Fatalf("Dial(_, _) = _, %v, want _, ", err) } defer conn.Close() if conn.authority != target { t.Fatalf("%v.authority = %v, want %v", conn, conn.authority, target) } } func (s) TestTLSServerNameOverwrite(t *testing.T) { overwriteServerName := "over.write.server.name" creds, err := credentials.NewClientTLSFromFile(testdata.Path("ca.pem"), overwriteServerName) if err != nil { t.Fatalf("Failed to create credentials %v", err) } conn, err := Dial("passthrough:///Non-Existent.Server:80", WithTransportCredentials(creds)) if err != nil { t.Fatalf("Dial(_, _) = _, %v, want _, ", err) } defer conn.Close() if conn.authority != overwriteServerName { t.Fatalf("%v.authority = %v, want %v", conn, conn.authority, overwriteServerName) } } func (s) TestWithAuthority(t *testing.T) { overwriteServerName := "over.write.server.name" conn, err := Dial("passthrough:///Non-Existent.Server:80", WithInsecure(), WithAuthority(overwriteServerName)) if err != nil { t.Fatalf("Dial(_, _) = _, %v, want _, ", err) } defer conn.Close() if conn.authority != overwriteServerName { t.Fatalf("%v.authority = %v, want %v", conn, conn.authority, overwriteServerName) } } func (s) TestWithAuthorityAndTLS(t *testing.T) { overwriteServerName := "over.write.server.name" creds, err := credentials.NewClientTLSFromFile(testdata.Path("ca.pem"), overwriteServerName) if err != nil { t.Fatalf("Failed to create credentials %v", err) } conn, err := Dial("passthrough:///Non-Existent.Server:80", WithTransportCredentials(creds), WithAuthority("no.effect.authority")) if err != nil { t.Fatalf("Dial(_, _) = _, %v, want _, ", err) } defer conn.Close() if conn.authority != overwriteServerName { t.Fatalf("%v.authority = %v, want %v", conn, conn.authority, overwriteServerName) } } // When creating a transport configured with n addresses, only calculate the // backoff once per "round" of attempts instead of once per address (n times // per "round" of attempts). func (s) TestDial_OneBackoffPerRetryGroup(t *testing.T) { var attempts uint32 getMinConnectTimeout := func() time.Duration { if atomic.AddUint32(&attempts, 1) == 1 { // Once all addresses are exhausted, hang around and wait for the // client.Close to happen rather than re-starting a new round of // attempts. return time.Hour } t.Error("only one attempt backoff calculation, but got more") return 0 } ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) defer cancel() lis1, err := net.Listen("tcp", "localhost:0") if err != nil { t.Fatalf("Error while listening. Err: %v", err) } defer lis1.Close() lis2, err := net.Listen("tcp", "localhost:0") if err != nil { t.Fatalf("Error while listening. Err: %v", err) } defer lis2.Close() server1Done := make(chan struct{}) server2Done := make(chan struct{}) // Launch server 1. go func() { conn, err := lis1.Accept() if err != nil { t.Error(err) return } conn.Close() close(server1Done) }() // Launch server 2. go func() { conn, err := lis2.Accept() if err != nil { t.Error(err) return } conn.Close() close(server2Done) }() rb := manual.NewBuilderWithScheme("whatever") rb.InitialState(resolver.State{Addresses: []resolver.Address{ {Addr: lis1.Addr().String()}, {Addr: lis2.Addr().String()}, }}) client, err := DialContext(ctx, "whatever:///this-gets-overwritten", WithInsecure(), WithBalancerName(stateRecordingBalancerName), WithResolvers(rb), withMinConnectDeadline(getMinConnectTimeout)) if err != nil { t.Fatal(err) } defer client.Close() timeout := time.After(15 * time.Second) select { case <-timeout: t.Fatal("timed out waiting for test to finish") case <-server1Done: } select { case <-timeout: t.Fatal("timed out waiting for test to finish") case <-server2Done: } } func (s) TestDialContextCancel(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) cancel() if _, err := DialContext(ctx, "Non-Existent.Server:80", WithBlock(), WithInsecure()); err != context.Canceled { t.Fatalf("DialContext(%v, _) = _, %v, want _, %v", ctx, err, context.Canceled) } } type failFastError struct{} func (failFastError) Error() string { return "failfast" } func (failFastError) Temporary() bool { return false } func (s) TestDialContextFailFast(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() failErr := failFastError{} dialer := func(string, time.Duration) (net.Conn, error) { return nil, failErr } _, err := DialContext(ctx, "Non-Existent.Server:80", WithBlock(), WithInsecure(), WithDialer(dialer), FailOnNonTempDialError(true)) if terr, ok := err.(transport.ConnectionError); !ok || terr.Origin() != failErr { t.Fatalf("DialContext() = _, %v, want _, %v", err, failErr) } } // blockingBalancer mimics the behavior of balancers whose initialization takes a long time. // In this test, reading from blockingBalancer.Notify() blocks forever. type blockingBalancer struct { ch chan []Address } func newBlockingBalancer() Balancer { return &blockingBalancer{ch: make(chan []Address)} } func (b *blockingBalancer) Start(target string, config BalancerConfig) error { return nil } func (b *blockingBalancer) Up(addr Address) func(error) { return nil } func (b *blockingBalancer) Get(ctx context.Context, opts BalancerGetOptions) (addr Address, put func(), err error) { return Address{}, nil, nil } func (b *blockingBalancer) Notify() <-chan []Address { return b.ch } func (b *blockingBalancer) Close() error { close(b.ch) return nil } func (s) TestDialWithBlockingBalancer(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) dialDone := make(chan struct{}) go func() { DialContext(ctx, "Non-Existent.Server:80", WithBlock(), WithInsecure(), WithBalancer(newBlockingBalancer())) close(dialDone) }() cancel() <-dialDone } // securePerRPCCredentials always requires transport security. type securePerRPCCredentials struct{} func (c securePerRPCCredentials) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { return nil, nil } func (c securePerRPCCredentials) RequireTransportSecurity() bool { return true } func (s) TestCredentialsMisuse(t *testing.T) { tlsCreds, err := credentials.NewClientTLSFromFile(testdata.Path("ca.pem"), "x.test.youtube.com") if err != nil { t.Fatalf("Failed to create authenticator %v", err) } // Two conflicting credential configurations if _, err := Dial("passthrough:///Non-Existent.Server:80", WithTransportCredentials(tlsCreds), WithBlock(), WithInsecure()); err != errCredentialsConflict { t.Fatalf("Dial(_, _) = _, %v, want _, %v", err, errCredentialsConflict) } // security info on insecure connection if _, err := Dial("passthrough:///Non-Existent.Server:80", WithPerRPCCredentials(securePerRPCCredentials{}), WithBlock(), WithInsecure()); err != errTransportCredentialsMissing { t.Fatalf("Dial(_, _) = _, %v, want _, %v", err, errTransportCredentialsMissing) } } func (s) TestWithBackoffConfigDefault(t *testing.T) { testBackoffConfigSet(t, internalbackoff.DefaultExponential) } func (s) TestWithBackoffConfig(t *testing.T) { b := BackoffConfig{MaxDelay: DefaultBackoffConfig.MaxDelay / 2} bc := backoff.DefaultConfig bc.MaxDelay = b.MaxDelay wantBackoff := internalbackoff.Exponential{Config: bc} testBackoffConfigSet(t, wantBackoff, WithBackoffConfig(b)) } func (s) TestWithBackoffMaxDelay(t *testing.T) { md := DefaultBackoffConfig.MaxDelay / 2 bc := backoff.DefaultConfig bc.MaxDelay = md wantBackoff := internalbackoff.Exponential{Config: bc} testBackoffConfigSet(t, wantBackoff, WithBackoffMaxDelay(md)) } func (s) TestWithConnectParams(t *testing.T) { bd := 2 * time.Second mltpr := 2.0 jitter := 0.0 bc := backoff.Config{BaseDelay: bd, Multiplier: mltpr, Jitter: jitter} crt := ConnectParams{Backoff: bc} // MaxDelay is not set in the ConnectParams. So it should not be set on // internalbackoff.Exponential as well. wantBackoff := internalbackoff.Exponential{Config: bc} testBackoffConfigSet(t, wantBackoff, WithConnectParams(crt)) } func testBackoffConfigSet(t *testing.T, wantBackoff internalbackoff.Exponential, opts ...DialOption) { opts = append(opts, WithInsecure()) conn, err := Dial("passthrough:///foo:80", opts...) if err != nil { t.Fatalf("unexpected error dialing connection: %v", err) } defer conn.Close() if conn.dopts.bs == nil { t.Fatalf("backoff config not set") } gotBackoff, ok := conn.dopts.bs.(internalbackoff.Exponential) if !ok { t.Fatalf("unexpected type of backoff config: %#v", conn.dopts.bs) } if gotBackoff != wantBackoff { t.Fatalf("unexpected backoff config on connection: %v, want %v", gotBackoff, wantBackoff) } } func (s) TestConnectParamsWithMinConnectTimeout(t *testing.T) { // Default value specified for minConnectTimeout in the spec is 20 seconds. mct := 1 * time.Minute conn, err := Dial("passthrough:///foo:80", WithInsecure(), WithConnectParams(ConnectParams{MinConnectTimeout: mct})) if err != nil { t.Fatalf("unexpected error dialing connection: %v", err) } defer conn.Close() if got := conn.dopts.minConnectTimeout(); got != mct { t.Errorf("unexpect minConnectTimeout on the connection: %v, want %v", got, mct) } } // emptyBalancer returns an empty set of servers. type emptyBalancer struct { ch chan []Address } func newEmptyBalancer() Balancer { return &emptyBalancer{ch: make(chan []Address, 1)} } func (b *emptyBalancer) Start(_ string, _ BalancerConfig) error { b.ch <- nil return nil } func (b *emptyBalancer) Up(_ Address) func(error) { return nil } func (b *emptyBalancer) Get(_ context.Context, _ BalancerGetOptions) (Address, func(), error) { return Address{}, nil, nil } func (b *emptyBalancer) Notify() <-chan []Address { return b.ch } func (b *emptyBalancer) Close() error { close(b.ch) return nil } func (s) TestNonblockingDialWithEmptyBalancer(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() dialDone := make(chan error) go func() { dialDone <- func() error { conn, err := DialContext(ctx, "Non-Existent.Server:80", WithInsecure(), WithBalancer(newEmptyBalancer())) if err != nil { return err } return conn.Close() }() }() if err := <-dialDone; err != nil { t.Fatalf("unexpected error dialing connection: %s", err) } } func (s) TestResolverServiceConfigBeforeAddressNotPanic(t *testing.T) { r, rcleanup := manual.GenerateAndRegisterManualResolver() defer rcleanup() cc, err := Dial(r.Scheme()+":///test.server", WithInsecure()) if err != nil { t.Fatalf("failed to dial: %v", err) } defer cc.Close() // SwitchBalancer before NewAddress. There was no balancer created, this // makes sure we don't call close on nil balancerWrapper. r.UpdateState(resolver.State{ServiceConfig: parseCfg(r, `{"loadBalancingPolicy": "round_robin"}`)}) // This should not panic. time.Sleep(time.Second) // Sleep to make sure the service config is handled by ClientConn. } func (s) TestResolverServiceConfigWhileClosingNotPanic(t *testing.T) { for i := 0; i < 10; i++ { // Run this multiple times to make sure it doesn't panic. r, rcleanup := manual.GenerateAndRegisterManualResolver() defer rcleanup() cc, err := Dial(r.Scheme()+":///test.server", WithInsecure()) if err != nil { t.Fatalf("failed to dial: %v", err) } // Send a new service config while closing the ClientConn. go cc.Close() go r.UpdateState(resolver.State{ServiceConfig: parseCfg(r, `{"loadBalancingPolicy": "round_robin"}`)}) // This should not panic. } } func (s) TestResolverEmptyUpdateNotPanic(t *testing.T) { r, rcleanup := manual.GenerateAndRegisterManualResolver() defer rcleanup() cc, err := Dial(r.Scheme()+":///test.server", WithInsecure()) if err != nil { t.Fatalf("failed to dial: %v", err) } defer cc.Close() // This make sure we don't create addrConn with empty address list. r.UpdateState(resolver.State{}) // This should not panic. time.Sleep(time.Second) // Sleep to make sure the service config is handled by ClientConn. } func (s) TestClientUpdatesParamsAfterGoAway(t *testing.T) { lis, err := net.Listen("tcp", "localhost:0") if err != nil { t.Fatalf("Failed to listen. Err: %v", err) } defer lis.Close() connected := make(chan struct{}) go func() { conn, err := lis.Accept() if err != nil { t.Errorf("error accepting connection: %v", err) return } defer conn.Close() f := http2.NewFramer(conn, conn) // Start a goroutine to read from the conn to prevent the client from // blocking after it writes its preface. go func() { for { if _, err := f.ReadFrame(); err != nil { return } } }() if err := f.WriteSettings(http2.Setting{}); err != nil { t.Errorf("error writing settings: %v", err) return } <-connected if err := f.WriteGoAway(0, http2.ErrCodeEnhanceYourCalm, []byte("too_many_pings")); err != nil { t.Errorf("error writing GOAWAY: %v", err) return } }() addr := lis.Addr().String() ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() cc, err := DialContext(ctx, addr, WithBlock(), WithInsecure(), WithKeepaliveParams(keepalive.ClientParameters{ Time: 10 * time.Second, Timeout: 100 * time.Millisecond, PermitWithoutStream: true, })) if err != nil { t.Fatalf("Dial(%s, _) = _, %v, want _, ", addr, err) } defer cc.Close() close(connected) for { time.Sleep(10 * time.Millisecond) cc.mu.RLock() v := cc.mkp.Time if v == 20*time.Second { // Success cc.mu.RUnlock() return } if ctx.Err() != nil { // Timeout t.Fatalf("cc.dopts.copts.Keepalive.Time = %v , want 20s", v) } cc.mu.RUnlock() } } func (s) TestDisableServiceConfigOption(t *testing.T) { r, cleanup := manual.GenerateAndRegisterManualResolver() defer cleanup() addr := r.Scheme() + ":///non.existent" cc, err := Dial(addr, WithInsecure(), WithDisableServiceConfig()) if err != nil { t.Fatalf("Dial(%s, _) = _, %v, want _, ", addr, err) } defer cc.Close() r.UpdateState(resolver.State{ServiceConfig: parseCfg(r, `{ "methodConfig": [ { "name": [ { "service": "foo", "method": "Bar" } ], "waitForReady": true } ] }`)}) time.Sleep(1 * time.Second) m := cc.GetMethodConfig("/foo/Bar") if m.WaitForReady != nil { t.Fatalf("want: method (\"/foo/bar/\") config to be empty, got: %+v", m) } } func (s) TestGetClientConnTarget(t *testing.T) { addr := "nonexist:///non.existent" cc, err := Dial(addr, WithInsecure()) if err != nil { t.Fatalf("Dial(%s, _) = _, %v, want _, ", addr, err) } defer cc.Close() if cc.Target() != addr { t.Fatalf("Target() = %s, want %s", cc.Target(), addr) } } type backoffForever struct{} func (b backoffForever) Backoff(int) time.Duration { return time.Duration(math.MaxInt64) } func (s) TestResetConnectBackoff(t *testing.T) { dials := make(chan struct{}) defer func() { // If we fail, let the http2client break out of dialing. select { case <-dials: default: } }() dialer := func(string, time.Duration) (net.Conn, error) { dials <- struct{}{} return nil, errors.New("failed to fake dial") } cc, err := Dial("any", WithInsecure(), WithDialer(dialer), withBackoff(backoffForever{})) if err != nil { t.Fatalf("Dial() = _, %v; want _, nil", err) } defer cc.Close() select { case <-dials: case <-time.NewTimer(10 * time.Second).C: t.Fatal("Failed to call dial within 10s") } select { case <-dials: t.Fatal("Dial called unexpectedly before resetting backoff") case <-time.NewTimer(100 * time.Millisecond).C: } cc.ResetConnectBackoff() select { case <-dials: case <-time.NewTimer(10 * time.Second).C: t.Fatal("Failed to call dial within 10s after resetting backoff") } } func (s) TestBackoffCancel(t *testing.T) { dialStrCh := make(chan string) cc, err := Dial("any", WithInsecure(), WithDialer(func(t string, _ time.Duration) (net.Conn, error) { dialStrCh <- t return nil, fmt.Errorf("test dialer, always error") })) if err != nil { t.Fatalf("Failed to create ClientConn: %v", err) } <-dialStrCh cc.Close() // Should not leak. May need -count 5000 to exercise. } // UpdateAddresses should cause the next reconnect to begin from the top of the // list if the connection is not READY. func (s) TestUpdateAddresses_RetryFromFirstAddr(t *testing.T) { lis1, err := net.Listen("tcp", "localhost:0") if err != nil { t.Fatalf("Error while listening. Err: %v", err) } defer lis1.Close() lis2, err := net.Listen("tcp", "localhost:0") if err != nil { t.Fatalf("Error while listening. Err: %v", err) } defer lis2.Close() lis3, err := net.Listen("tcp", "localhost:0") if err != nil { t.Fatalf("Error while listening. Err: %v", err) } defer lis3.Close() closeServer2 := make(chan struct{}) server1ContactedFirstTime := make(chan struct{}) server1ContactedSecondTime := make(chan struct{}) server2ContactedFirstTime := make(chan struct{}) server2ContactedSecondTime := make(chan struct{}) server3Contacted := make(chan struct{}) // Launch server 1. go func() { // First, let's allow the initial connection to go READY. We need to do // this because tryUpdateAddrs only works after there's some non-nil // address on the ac, and curAddress is only set after READY. conn1, err := lis1.Accept() if err != nil { t.Error(err) return } go keepReading(conn1) framer := http2.NewFramer(conn1, conn1) if err := framer.WriteSettings(http2.Setting{}); err != nil { t.Errorf("Error while writing settings frame. %v", err) return } // nextStateNotifier() is updated after balancerBuilder.Build(), which is // called by grpc.Dial. It's safe to do it here because lis1.Accept blocks // until balancer is built to process the addresses. stateNotifications := testBalancerBuilder.nextStateNotifier() // Wait for the transport to become ready. for s := range stateNotifications { if s == connectivity.Ready { break } } // Once it's ready, curAddress has been set. So let's close this // connection prompting the first reconnect cycle. conn1.Close() // Accept and immediately close, causing it to go to server2. conn2, err := lis1.Accept() if err != nil { t.Error(err) return } close(server1ContactedFirstTime) conn2.Close() // Hopefully it picks this server after tryUpdateAddrs. lis1.Accept() close(server1ContactedSecondTime) }() // Launch server 2. go func() { // Accept and then hang waiting for the test call tryUpdateAddrs and // then signal to this server to close. After this server closes, it // should start from the top instead of trying server2 or continuing // to server3. conn, err := lis2.Accept() if err != nil { t.Error(err) return } close(server2ContactedFirstTime) <-closeServer2 conn.Close() // After tryUpdateAddrs, it should NOT try server2. lis2.Accept() close(server2ContactedSecondTime) }() // Launch server 3. go func() { // After tryUpdateAddrs, it should NOT try server3. (or any other time) lis3.Accept() close(server3Contacted) }() addrsList := []resolver.Address{ {Addr: lis1.Addr().String()}, {Addr: lis2.Addr().String()}, {Addr: lis3.Addr().String()}, } rb := manual.NewBuilderWithScheme("whatever") rb.InitialState(resolver.State{Addresses: addrsList}) client, err := Dial("whatever:///this-gets-overwritten", WithInsecure(), WithResolvers(rb), withBackoff(noBackoff{}), WithBalancerName(stateRecordingBalancerName), withMinConnectDeadline(func() time.Duration { return time.Hour })) if err != nil { t.Fatal(err) } defer client.Close() timeout := time.After(5 * time.Second) // Wait for server1 to be contacted (which will immediately fail), then // server2 (which will hang waiting for our signal). select { case <-server1ContactedFirstTime: case <-timeout: t.Fatal("timed out waiting for server1 to be contacted") } select { case <-server2ContactedFirstTime: case <-timeout: t.Fatal("timed out waiting for server2 to be contacted") } // Grab the addrConn and call tryUpdateAddrs. var ac *addrConn client.mu.Lock() for clientAC := range client.conns { ac = clientAC break } client.mu.Unlock() ac.acbw.UpdateAddresses(addrsList) // We've called tryUpdateAddrs - now let's make server2 close the // connection and check that it goes back to server1 instead of continuing // to server3 or trying server2 again. close(closeServer2) select { case <-server1ContactedSecondTime: case <-server2ContactedSecondTime: t.Fatal("server2 was contacted a second time, but it after tryUpdateAddrs it should have re-started the list and tried server1") case <-server3Contacted: t.Fatal("server3 was contacted, but after tryUpdateAddrs it should have re-started the list and tried server1") case <-timeout: t.Fatal("timed out waiting for any server to be contacted after tryUpdateAddrs") } } func (s) TestDefaultServiceConfig(t *testing.T) { r, cleanup := manual.GenerateAndRegisterManualResolver() defer cleanup() addr := r.Scheme() + ":///non.existent" js := `{ "methodConfig": [ { "name": [ { "service": "foo", "method": "bar" } ], "waitForReady": true } ] }` testInvalidDefaultServiceConfig(t) testDefaultServiceConfigWhenResolverServiceConfigDisabled(t, r, addr, js) testDefaultServiceConfigWhenResolverDoesNotReturnServiceConfig(t, r, addr, js) testDefaultServiceConfigWhenResolverReturnInvalidServiceConfig(t, r, addr, js) } func verifyWaitForReadyEqualsTrue(cc *ClientConn) bool { var i int for i = 0; i < 10; i++ { mc := cc.GetMethodConfig("/foo/bar") if mc.WaitForReady != nil && *mc.WaitForReady == true { break } time.Sleep(100 * time.Millisecond) } return i != 10 } func testInvalidDefaultServiceConfig(t *testing.T) { _, err := Dial("fake.com", WithInsecure(), WithDefaultServiceConfig("")) if !strings.Contains(err.Error(), invalidDefaultServiceConfigErrPrefix) { t.Fatalf("Dial got err: %v, want err contains: %v", err, invalidDefaultServiceConfigErrPrefix) } } func testDefaultServiceConfigWhenResolverServiceConfigDisabled(t *testing.T, r *manual.Resolver, addr string, js string) { cc, err := Dial(addr, WithInsecure(), WithDisableServiceConfig(), WithDefaultServiceConfig(js)) if err != nil { t.Fatalf("Dial(%s, _) = _, %v, want _, ", addr, err) } defer cc.Close() // Resolver service config gets ignored since resolver service config is disabled. r.UpdateState(resolver.State{ Addresses: []resolver.Address{{Addr: addr}}, ServiceConfig: parseCfg(r, "{}"), }) if !verifyWaitForReadyEqualsTrue(cc) { t.Fatal("default service config failed to be applied after 1s") } } func testDefaultServiceConfigWhenResolverDoesNotReturnServiceConfig(t *testing.T, r *manual.Resolver, addr string, js string) { cc, err := Dial(addr, WithInsecure(), WithDefaultServiceConfig(js)) if err != nil { t.Fatalf("Dial(%s, _) = _, %v, want _, ", addr, err) } defer cc.Close() r.UpdateState(resolver.State{ Addresses: []resolver.Address{{Addr: addr}}, }) if !verifyWaitForReadyEqualsTrue(cc) { t.Fatal("default service config failed to be applied after 1s") } } func testDefaultServiceConfigWhenResolverReturnInvalidServiceConfig(t *testing.T, r *manual.Resolver, addr string, js string) { cc, err := Dial(addr, WithInsecure(), WithDefaultServiceConfig(js)) if err != nil { t.Fatalf("Dial(%s, _) = _, %v, want _, ", addr, err) } defer cc.Close() r.UpdateState(resolver.State{ Addresses: []resolver.Address{{Addr: addr}}, }) if !verifyWaitForReadyEqualsTrue(cc) { t.Fatal("default service config failed to be applied after 1s") } } grpc-go-1.29.1/codec.go000066400000000000000000000032311365033716300145540ustar00rootroot00000000000000/* * * Copyright 2014 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package grpc import ( "google.golang.org/grpc/encoding" _ "google.golang.org/grpc/encoding/proto" // to register the Codec for "proto" ) // baseCodec contains the functionality of both Codec and encoding.Codec, but // omits the name/string, which vary between the two and are not needed for // anything besides the registry in the encoding package. type baseCodec interface { Marshal(v interface{}) ([]byte, error) Unmarshal(data []byte, v interface{}) error } var _ baseCodec = Codec(nil) var _ baseCodec = encoding.Codec(nil) // Codec defines the interface gRPC uses to encode and decode messages. // Note that implementations of this interface must be thread safe; // a Codec's methods can be called from concurrent goroutines. // // Deprecated: use encoding.Codec instead. type Codec interface { // Marshal returns the wire format of v. Marshal(v interface{}) ([]byte, error) // Unmarshal parses the wire format into v. Unmarshal(data []byte, v interface{}) error // String returns the name of the Codec implementation. This is unused by // gRPC. String() string } grpc-go-1.29.1/codec_test.go000066400000000000000000000015771365033716300156260ustar00rootroot00000000000000/* * * Copyright 2014 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package grpc import ( "testing" "google.golang.org/grpc/encoding" "google.golang.org/grpc/encoding/proto" ) func (s) TestGetCodecForProtoIsNotNil(t *testing.T) { if encoding.GetCodec(proto.Name) == nil { t.Fatalf("encoding.GetCodec(%q) must not be nil by default", proto.Name) } } grpc-go-1.29.1/codegen.sh000077500000000000000000000011731365033716300151160ustar00rootroot00000000000000#!/usr/bin/env bash # This script serves as an example to demonstrate how to generate the gRPC-Go # interface and the related messages from .proto file. # # It assumes the installation of i) Google proto buffer compiler at # https://github.com/google/protobuf (after v2.6.1) and ii) the Go codegen # plugin at https://github.com/golang/protobuf (after 2015-02-20). If you have # not, please install them first. # # We recommend running this script at $GOPATH/src. # # If this is not what you need, feel free to make your own scripts. Again, this # script is for demonstration purpose. # proto=$1 protoc --go_out=plugins=grpc:. $proto grpc-go-1.29.1/codes/000077500000000000000000000000001365033716300142465ustar00rootroot00000000000000grpc-go-1.29.1/codes/code_string.go000066400000000000000000000027051365033716300171010ustar00rootroot00000000000000/* * * Copyright 2017 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package codes import "strconv" func (c Code) String() string { switch c { case OK: return "OK" case Canceled: return "Canceled" case Unknown: return "Unknown" case InvalidArgument: return "InvalidArgument" case DeadlineExceeded: return "DeadlineExceeded" case NotFound: return "NotFound" case AlreadyExists: return "AlreadyExists" case PermissionDenied: return "PermissionDenied" case ResourceExhausted: return "ResourceExhausted" case FailedPrecondition: return "FailedPrecondition" case Aborted: return "Aborted" case OutOfRange: return "OutOfRange" case Unimplemented: return "Unimplemented" case Internal: return "Internal" case Unavailable: return "Unavailable" case DataLoss: return "DataLoss" case Unauthenticated: return "Unauthenticated" default: return "Code(" + strconv.FormatInt(int64(c), 10) + ")" } } grpc-go-1.29.1/codes/codes.go000066400000000000000000000161741365033716300157030ustar00rootroot00000000000000/* * * Copyright 2014 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Package codes defines the canonical error codes used by gRPC. It is // consistent across various languages. package codes // import "google.golang.org/grpc/codes" import ( "fmt" "strconv" ) // A Code is an unsigned 32-bit error code as defined in the gRPC spec. type Code uint32 const ( // OK is returned on success. OK Code = 0 // Canceled indicates the operation was canceled (typically by the caller). Canceled Code = 1 // Unknown error. An example of where this error may be returned is // if a Status value received from another address space belongs to // an error-space that is not known in this address space. Also // errors raised by APIs that do not return enough error information // may be converted to this error. Unknown Code = 2 // InvalidArgument indicates client specified an invalid argument. // Note that this differs from FailedPrecondition. It indicates arguments // that are problematic regardless of the state of the system // (e.g., a malformed file name). InvalidArgument Code = 3 // DeadlineExceeded means operation expired before completion. // For operations that change the state of the system, this error may be // returned even if the operation has completed successfully. For // example, a successful response from a server could have been delayed // long enough for the deadline to expire. DeadlineExceeded Code = 4 // NotFound means some requested entity (e.g., file or directory) was // not found. NotFound Code = 5 // AlreadyExists means an attempt to create an entity failed because one // already exists. AlreadyExists Code = 6 // PermissionDenied indicates the caller does not have permission to // execute the specified operation. It must not be used for rejections // caused by exhausting some resource (use ResourceExhausted // instead for those errors). It must not be // used if the caller cannot be identified (use Unauthenticated // instead for those errors). PermissionDenied Code = 7 // ResourceExhausted indicates some resource has been exhausted, perhaps // a per-user quota, or perhaps the entire file system is out of space. ResourceExhausted Code = 8 // FailedPrecondition indicates operation was rejected because the // system is not in a state required for the operation's execution. // For example, directory to be deleted may be non-empty, an rmdir // operation is applied to a non-directory, etc. // // A litmus test that may help a service implementor in deciding // between FailedPrecondition, Aborted, and Unavailable: // (a) Use Unavailable if the client can retry just the failing call. // (b) Use Aborted if the client should retry at a higher-level // (e.g., restarting a read-modify-write sequence). // (c) Use FailedPrecondition if the client should not retry until // the system state has been explicitly fixed. E.g., if an "rmdir" // fails because the directory is non-empty, FailedPrecondition // should be returned since the client should not retry unless // they have first fixed up the directory by deleting files from it. // (d) Use FailedPrecondition if the client performs conditional // REST Get/Update/Delete on a resource and the resource on the // server does not match the condition. E.g., conflicting // read-modify-write on the same resource. FailedPrecondition Code = 9 // Aborted indicates the operation was aborted, typically due to a // concurrency issue like sequencer check failures, transaction aborts, // etc. // // See litmus test above for deciding between FailedPrecondition, // Aborted, and Unavailable. Aborted Code = 10 // OutOfRange means operation was attempted past the valid range. // E.g., seeking or reading past end of file. // // Unlike InvalidArgument, this error indicates a problem that may // be fixed if the system state changes. For example, a 32-bit file // system will generate InvalidArgument if asked to read at an // offset that is not in the range [0,2^32-1], but it will generate // OutOfRange if asked to read from an offset past the current // file size. // // There is a fair bit of overlap between FailedPrecondition and // OutOfRange. We recommend using OutOfRange (the more specific // error) when it applies so that callers who are iterating through // a space can easily look for an OutOfRange error to detect when // they are done. OutOfRange Code = 11 // Unimplemented indicates operation is not implemented or not // supported/enabled in this service. Unimplemented Code = 12 // Internal errors. Means some invariants expected by underlying // system has been broken. If you see one of these errors, // something is very broken. Internal Code = 13 // Unavailable indicates the service is currently unavailable. // This is a most likely a transient condition and may be corrected // by retrying with a backoff. Note that it is not always safe to retry // non-idempotent operations. // // See litmus test above for deciding between FailedPrecondition, // Aborted, and Unavailable. Unavailable Code = 14 // DataLoss indicates unrecoverable data loss or corruption. DataLoss Code = 15 // Unauthenticated indicates the request does not have valid // authentication credentials for the operation. Unauthenticated Code = 16 _maxCode = 17 ) var strToCode = map[string]Code{ `"OK"`: OK, `"CANCELLED"`:/* [sic] */ Canceled, `"UNKNOWN"`: Unknown, `"INVALID_ARGUMENT"`: InvalidArgument, `"DEADLINE_EXCEEDED"`: DeadlineExceeded, `"NOT_FOUND"`: NotFound, `"ALREADY_EXISTS"`: AlreadyExists, `"PERMISSION_DENIED"`: PermissionDenied, `"RESOURCE_EXHAUSTED"`: ResourceExhausted, `"FAILED_PRECONDITION"`: FailedPrecondition, `"ABORTED"`: Aborted, `"OUT_OF_RANGE"`: OutOfRange, `"UNIMPLEMENTED"`: Unimplemented, `"INTERNAL"`: Internal, `"UNAVAILABLE"`: Unavailable, `"DATA_LOSS"`: DataLoss, `"UNAUTHENTICATED"`: Unauthenticated, } // UnmarshalJSON unmarshals b into the Code. func (c *Code) UnmarshalJSON(b []byte) error { // From json.Unmarshaler: By convention, to approximate the behavior of // Unmarshal itself, Unmarshalers implement UnmarshalJSON([]byte("null")) as // a no-op. if string(b) == "null" { return nil } if c == nil { return fmt.Errorf("nil receiver passed to UnmarshalJSON") } if ci, err := strconv.ParseUint(string(b), 10, 32); err == nil { if ci >= _maxCode { return fmt.Errorf("invalid code: %q", ci) } *c = Code(ci) return nil } if jc, ok := strToCode[string(b)]; ok { *c = jc return nil } return fmt.Errorf("invalid code: %q", string(b)) } grpc-go-1.29.1/codes/codes_test.go000066400000000000000000000047201365033716300167340ustar00rootroot00000000000000/* * * Copyright 2017 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package codes import ( "encoding/json" "reflect" "testing" cpb "google.golang.org/genproto/googleapis/rpc/code" "google.golang.org/grpc/internal/grpctest" ) type s struct { grpctest.Tester } func Test(t *testing.T) { grpctest.RunSubTests(t, s{}) } func (s) TestUnmarshalJSON(t *testing.T) { for s, v := range cpb.Code_value { want := Code(v) var got Code if err := got.UnmarshalJSON([]byte(`"` + s + `"`)); err != nil || got != want { t.Errorf("got.UnmarshalJSON(%q) = %v; want . got=%v; want %v", s, err, got, want) } } } func (s) TestJSONUnmarshal(t *testing.T) { var got []Code want := []Code{OK, NotFound, Internal, Canceled} in := `["OK", "NOT_FOUND", "INTERNAL", "CANCELLED"]` err := json.Unmarshal([]byte(in), &got) if err != nil || !reflect.DeepEqual(got, want) { t.Fatalf("json.Unmarshal(%q, &got) = %v; want . got=%v; want %v", in, err, got, want) } } func (s) TestUnmarshalJSON_NilReceiver(t *testing.T) { var got *Code in := OK.String() if err := got.UnmarshalJSON([]byte(in)); err == nil { t.Errorf("got.UnmarshalJSON(%q) = nil; want . got=%v", in, got) } } func (s) TestUnmarshalJSON_UnknownInput(t *testing.T) { var got Code for _, in := range [][]byte{[]byte(""), []byte("xxx"), []byte("Code(17)"), nil} { if err := got.UnmarshalJSON([]byte(in)); err == nil { t.Errorf("got.UnmarshalJSON(%q) = nil; want . got=%v", in, got) } } } func (s) TestUnmarshalJSON_MarshalUnmarshal(t *testing.T) { for i := 0; i < _maxCode; i++ { var cUnMarshaled Code c := Code(i) cJSON, err := json.Marshal(c) if err != nil { t.Errorf("marshalling %q failed: %v", c, err) } if err := json.Unmarshal(cJSON, &cUnMarshaled); err != nil { t.Errorf("unmarshalling code failed: %s", err) } if c != cUnMarshaled { t.Errorf("code is %q after marshalling/unmarshalling, expected %q", cUnMarshaled, c) } } } grpc-go-1.29.1/connectivity/000077500000000000000000000000001365033716300156675ustar00rootroot00000000000000grpc-go-1.29.1/connectivity/connectivity.go000066400000000000000000000040771365033716300207440ustar00rootroot00000000000000/* * * Copyright 2017 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Package connectivity defines connectivity semantics. // For details, see https://github.com/grpc/grpc/blob/master/doc/connectivity-semantics-and-api.md. // All APIs in this package are experimental. package connectivity import ( "context" "google.golang.org/grpc/grpclog" ) // State indicates the state of connectivity. // It can be the state of a ClientConn or SubConn. type State int func (s State) String() string { switch s { case Idle: return "IDLE" case Connecting: return "CONNECTING" case Ready: return "READY" case TransientFailure: return "TRANSIENT_FAILURE" case Shutdown: return "SHUTDOWN" default: grpclog.Errorf("unknown connectivity state: %d", s) return "Invalid-State" } } const ( // Idle indicates the ClientConn is idle. Idle State = iota // Connecting indicates the ClientConn is connecting. Connecting // Ready indicates the ClientConn is ready for work. Ready // TransientFailure indicates the ClientConn has seen a failure but expects to recover. TransientFailure // Shutdown indicates the ClientConn has started shutting down. Shutdown ) // Reporter reports the connectivity states. type Reporter interface { // CurrentState returns the current state of the reporter. CurrentState() State // WaitForStateChange blocks until the reporter's state is different from the given state, // and returns true. // It returns false if <-ctx.Done() can proceed (ctx got timeout or got canceled). WaitForStateChange(context.Context, State) bool } grpc-go-1.29.1/credentials/000077500000000000000000000000001365033716300154465ustar00rootroot00000000000000grpc-go-1.29.1/credentials/alts/000077500000000000000000000000001365033716300164115ustar00rootroot00000000000000grpc-go-1.29.1/credentials/alts/alts.go000066400000000000000000000252101365033716300177030ustar00rootroot00000000000000/* * * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Package alts implements the ALTS credential support by gRPC library, which // encapsulates all the state needed by a client to authenticate with a server // using ALTS and make various assertions, e.g., about the client's identity, // role, or whether it is authorized to make a particular call. // This package is experimental. package alts import ( "context" "errors" "fmt" "net" "sync" "time" "google.golang.org/grpc/credentials" core "google.golang.org/grpc/credentials/alts/internal" "google.golang.org/grpc/credentials/alts/internal/handshaker" "google.golang.org/grpc/credentials/alts/internal/handshaker/service" altspb "google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp" "google.golang.org/grpc/grpclog" ) const ( // hypervisorHandshakerServiceAddress represents the default ALTS gRPC // handshaker service address in the hypervisor. hypervisorHandshakerServiceAddress = "metadata.google.internal.:8080" // defaultTimeout specifies the server handshake timeout. defaultTimeout = 30.0 * time.Second // The following constants specify the minimum and maximum acceptable // protocol versions. protocolVersionMaxMajor = 2 protocolVersionMaxMinor = 1 protocolVersionMinMajor = 2 protocolVersionMinMinor = 1 ) var ( once sync.Once maxRPCVersion = &altspb.RpcProtocolVersions_Version{ Major: protocolVersionMaxMajor, Minor: protocolVersionMaxMinor, } minRPCVersion = &altspb.RpcProtocolVersions_Version{ Major: protocolVersionMinMajor, Minor: protocolVersionMinMinor, } // ErrUntrustedPlatform is returned from ClientHandshake and // ServerHandshake is running on a platform where the trustworthiness of // the handshaker service is not guaranteed. ErrUntrustedPlatform = errors.New("ALTS: untrusted platform. ALTS is only supported on GCP") ) // AuthInfo exposes security information from the ALTS handshake to the // application. This interface is to be implemented by ALTS. Users should not // need a brand new implementation of this interface. For situations like // testing, any new implementation should embed this interface. This allows // ALTS to add new methods to this interface. type AuthInfo interface { // ApplicationProtocol returns application protocol negotiated for the // ALTS connection. ApplicationProtocol() string // RecordProtocol returns the record protocol negotiated for the ALTS // connection. RecordProtocol() string // SecurityLevel returns the security level of the created ALTS secure // channel. SecurityLevel() altspb.SecurityLevel // PeerServiceAccount returns the peer service account. PeerServiceAccount() string // LocalServiceAccount returns the local service account. LocalServiceAccount() string // PeerRPCVersions returns the RPC version supported by the peer. PeerRPCVersions() *altspb.RpcProtocolVersions } // ClientOptions contains the client-side options of an ALTS channel. These // options will be passed to the underlying ALTS handshaker. type ClientOptions struct { // TargetServiceAccounts contains a list of expected target service // accounts. TargetServiceAccounts []string // HandshakerServiceAddress represents the ALTS handshaker gRPC service // address to connect to. HandshakerServiceAddress string } // DefaultClientOptions creates a new ClientOptions object with the default // values. func DefaultClientOptions() *ClientOptions { return &ClientOptions{ HandshakerServiceAddress: hypervisorHandshakerServiceAddress, } } // ServerOptions contains the server-side options of an ALTS channel. These // options will be passed to the underlying ALTS handshaker. type ServerOptions struct { // HandshakerServiceAddress represents the ALTS handshaker gRPC service // address to connect to. HandshakerServiceAddress string } // DefaultServerOptions creates a new ServerOptions object with the default // values. func DefaultServerOptions() *ServerOptions { return &ServerOptions{ HandshakerServiceAddress: hypervisorHandshakerServiceAddress, } } // altsTC is the credentials required for authenticating a connection using ALTS. // It implements credentials.TransportCredentials interface. type altsTC struct { info *credentials.ProtocolInfo side core.Side accounts []string hsAddress string } // NewClientCreds constructs a client-side ALTS TransportCredentials object. func NewClientCreds(opts *ClientOptions) credentials.TransportCredentials { return newALTS(core.ClientSide, opts.TargetServiceAccounts, opts.HandshakerServiceAddress) } // NewServerCreds constructs a server-side ALTS TransportCredentials object. func NewServerCreds(opts *ServerOptions) credentials.TransportCredentials { return newALTS(core.ServerSide, nil, opts.HandshakerServiceAddress) } func newALTS(side core.Side, accounts []string, hsAddress string) credentials.TransportCredentials { once.Do(func() { vmOnGCP = isRunningOnGCP() }) if hsAddress == "" { hsAddress = hypervisorHandshakerServiceAddress } return &altsTC{ info: &credentials.ProtocolInfo{ SecurityProtocol: "alts", SecurityVersion: "1.0", }, side: side, accounts: accounts, hsAddress: hsAddress, } } // ClientHandshake implements the client side handshake protocol. func (g *altsTC) ClientHandshake(ctx context.Context, addr string, rawConn net.Conn) (_ net.Conn, _ credentials.AuthInfo, err error) { if !vmOnGCP { return nil, nil, ErrUntrustedPlatform } // Connecting to ALTS handshaker service. hsConn, err := service.Dial(g.hsAddress) if err != nil { return nil, nil, err } // Do not close hsConn since it is shared with other handshakes. // Possible context leak: // The cancel function for the child context we create will only be // called a non-nil error is returned. var cancel context.CancelFunc ctx, cancel = context.WithCancel(ctx) defer func() { if err != nil { cancel() } }() opts := handshaker.DefaultClientHandshakerOptions() opts.TargetName = addr opts.TargetServiceAccounts = g.accounts opts.RPCVersions = &altspb.RpcProtocolVersions{ MaxRpcVersion: maxRPCVersion, MinRpcVersion: minRPCVersion, } chs, err := handshaker.NewClientHandshaker(ctx, hsConn, rawConn, opts) if err != nil { return nil, nil, err } defer func() { if err != nil { chs.Close() } }() secConn, authInfo, err := chs.ClientHandshake(ctx) if err != nil { return nil, nil, err } altsAuthInfo, ok := authInfo.(AuthInfo) if !ok { return nil, nil, errors.New("client-side auth info is not of type alts.AuthInfo") } match, _ := checkRPCVersions(opts.RPCVersions, altsAuthInfo.PeerRPCVersions()) if !match { return nil, nil, fmt.Errorf("server-side RPC versions are not compatible with this client, local versions: %v, peer versions: %v", opts.RPCVersions, altsAuthInfo.PeerRPCVersions()) } return secConn, authInfo, nil } // ServerHandshake implements the server side ALTS handshaker. func (g *altsTC) ServerHandshake(rawConn net.Conn) (_ net.Conn, _ credentials.AuthInfo, err error) { if !vmOnGCP { return nil, nil, ErrUntrustedPlatform } // Connecting to ALTS handshaker service. hsConn, err := service.Dial(g.hsAddress) if err != nil { return nil, nil, err } // Do not close hsConn since it's shared with other handshakes. ctx, cancel := context.WithTimeout(context.Background(), defaultTimeout) defer cancel() opts := handshaker.DefaultServerHandshakerOptions() opts.RPCVersions = &altspb.RpcProtocolVersions{ MaxRpcVersion: maxRPCVersion, MinRpcVersion: minRPCVersion, } shs, err := handshaker.NewServerHandshaker(ctx, hsConn, rawConn, opts) if err != nil { return nil, nil, err } defer func() { if err != nil { shs.Close() } }() secConn, authInfo, err := shs.ServerHandshake(ctx) if err != nil { return nil, nil, err } altsAuthInfo, ok := authInfo.(AuthInfo) if !ok { return nil, nil, errors.New("server-side auth info is not of type alts.AuthInfo") } match, _ := checkRPCVersions(opts.RPCVersions, altsAuthInfo.PeerRPCVersions()) if !match { return nil, nil, fmt.Errorf("client-side RPC versions is not compatible with this server, local versions: %v, peer versions: %v", opts.RPCVersions, altsAuthInfo.PeerRPCVersions()) } return secConn, authInfo, nil } func (g *altsTC) Info() credentials.ProtocolInfo { return *g.info } func (g *altsTC) Clone() credentials.TransportCredentials { info := *g.info var accounts []string if g.accounts != nil { accounts = make([]string, len(g.accounts)) copy(accounts, g.accounts) } return &altsTC{ info: &info, side: g.side, hsAddress: g.hsAddress, accounts: accounts, } } func (g *altsTC) OverrideServerName(serverNameOverride string) error { g.info.ServerName = serverNameOverride return nil } // compareRPCVersion returns 0 if v1 == v2, 1 if v1 > v2 and -1 if v1 < v2. func compareRPCVersions(v1, v2 *altspb.RpcProtocolVersions_Version) int { switch { case v1.GetMajor() > v2.GetMajor(), v1.GetMajor() == v2.GetMajor() && v1.GetMinor() > v2.GetMinor(): return 1 case v1.GetMajor() < v2.GetMajor(), v1.GetMajor() == v2.GetMajor() && v1.GetMinor() < v2.GetMinor(): return -1 } return 0 } // checkRPCVersions performs a version check between local and peer rpc protocol // versions. This function returns true if the check passes which means both // parties agreed on a common rpc protocol to use, and false otherwise. The // function also returns the highest common RPC protocol version both parties // agreed on. func checkRPCVersions(local, peer *altspb.RpcProtocolVersions) (bool, *altspb.RpcProtocolVersions_Version) { if local == nil || peer == nil { grpclog.Error("invalid checkRPCVersions argument, either local or peer is nil.") return false, nil } // maxCommonVersion is MIN(local.max, peer.max). maxCommonVersion := local.GetMaxRpcVersion() if compareRPCVersions(local.GetMaxRpcVersion(), peer.GetMaxRpcVersion()) > 0 { maxCommonVersion = peer.GetMaxRpcVersion() } // minCommonVersion is MAX(local.min, peer.min). minCommonVersion := peer.GetMinRpcVersion() if compareRPCVersions(local.GetMinRpcVersion(), peer.GetMinRpcVersion()) > 0 { minCommonVersion = local.GetMinRpcVersion() } if compareRPCVersions(maxCommonVersion, minCommonVersion) < 0 { return false, nil } return true, maxCommonVersion } grpc-go-1.29.1/credentials/alts/alts_test.go000066400000000000000000000171001365033716300207410ustar00rootroot00000000000000/* * * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package alts import ( "reflect" "testing" "github.com/golang/protobuf/proto" altspb "google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp" "google.golang.org/grpc/internal/grpctest" ) type s struct { grpctest.Tester } func Test(t *testing.T) { grpctest.RunSubTests(t, s{}) } func (s) TestInfoServerName(t *testing.T) { // This is not testing any handshaker functionality, so it's fine to only // use NewServerCreds and not NewClientCreds. alts := NewServerCreds(DefaultServerOptions()) if got, want := alts.Info().ServerName, ""; got != want { t.Fatalf("%v.Info().ServerName = %v, want %v", alts, got, want) } } func (s) TestOverrideServerName(t *testing.T) { wantServerName := "server.name" // This is not testing any handshaker functionality, so it's fine to only // use NewServerCreds and not NewClientCreds. c := NewServerCreds(DefaultServerOptions()) c.OverrideServerName(wantServerName) if got, want := c.Info().ServerName, wantServerName; got != want { t.Fatalf("c.Info().ServerName = %v, want %v", got, want) } } func (s) TestCloneClient(t *testing.T) { wantServerName := "server.name" opt := DefaultClientOptions() opt.TargetServiceAccounts = []string{"not", "empty"} c := NewClientCreds(opt) c.OverrideServerName(wantServerName) cc := c.Clone() if got, want := cc.Info().ServerName, wantServerName; got != want { t.Fatalf("cc.Info().ServerName = %v, want %v", got, want) } cc.OverrideServerName("") if got, want := c.Info().ServerName, wantServerName; got != want { t.Fatalf("Change in clone should not affect the original, c.Info().ServerName = %v, want %v", got, want) } if got, want := cc.Info().ServerName, ""; got != want { t.Fatalf("cc.Info().ServerName = %v, want %v", got, want) } ct := c.(*altsTC) cct := cc.(*altsTC) if ct.side != cct.side { t.Errorf("cc.side = %q, want %q", cct.side, ct.side) } if ct.hsAddress != cct.hsAddress { t.Errorf("cc.hsAddress = %q, want %q", cct.hsAddress, ct.hsAddress) } if !reflect.DeepEqual(ct.accounts, cct.accounts) { t.Errorf("cc.accounts = %q, want %q", cct.accounts, ct.accounts) } } func (s) TestCloneServer(t *testing.T) { wantServerName := "server.name" c := NewServerCreds(DefaultServerOptions()) c.OverrideServerName(wantServerName) cc := c.Clone() if got, want := cc.Info().ServerName, wantServerName; got != want { t.Fatalf("cc.Info().ServerName = %v, want %v", got, want) } cc.OverrideServerName("") if got, want := c.Info().ServerName, wantServerName; got != want { t.Fatalf("Change in clone should not affect the original, c.Info().ServerName = %v, want %v", got, want) } if got, want := cc.Info().ServerName, ""; got != want { t.Fatalf("cc.Info().ServerName = %v, want %v", got, want) } ct := c.(*altsTC) cct := cc.(*altsTC) if ct.side != cct.side { t.Errorf("cc.side = %q, want %q", cct.side, ct.side) } if ct.hsAddress != cct.hsAddress { t.Errorf("cc.hsAddress = %q, want %q", cct.hsAddress, ct.hsAddress) } if !reflect.DeepEqual(ct.accounts, cct.accounts) { t.Errorf("cc.accounts = %q, want %q", cct.accounts, ct.accounts) } } func (s) TestInfo(t *testing.T) { // This is not testing any handshaker functionality, so it's fine to only // use NewServerCreds and not NewClientCreds. c := NewServerCreds(DefaultServerOptions()) info := c.Info() if got, want := info.ProtocolVersion, ""; got != want { t.Errorf("info.ProtocolVersion=%v, want %v", got, want) } if got, want := info.SecurityProtocol, "alts"; got != want { t.Errorf("info.SecurityProtocol=%v, want %v", got, want) } if got, want := info.SecurityVersion, "1.0"; got != want { t.Errorf("info.SecurityVersion=%v, want %v", got, want) } if got, want := info.ServerName, ""; got != want { t.Errorf("info.ServerName=%v, want %v", got, want) } } func (s) TestCompareRPCVersions(t *testing.T) { for _, tc := range []struct { v1 *altspb.RpcProtocolVersions_Version v2 *altspb.RpcProtocolVersions_Version output int }{ { version(3, 2), version(2, 1), 1, }, { version(3, 2), version(3, 1), 1, }, { version(2, 1), version(3, 2), -1, }, { version(3, 1), version(3, 2), -1, }, { version(3, 2), version(3, 2), 0, }, } { if got, want := compareRPCVersions(tc.v1, tc.v2), tc.output; got != want { t.Errorf("compareRPCVersions(%v, %v)=%v, want %v", tc.v1, tc.v2, got, want) } } } func (s) TestCheckRPCVersions(t *testing.T) { for _, tc := range []struct { desc string local *altspb.RpcProtocolVersions peer *altspb.RpcProtocolVersions output bool maxCommonVersion *altspb.RpcProtocolVersions_Version }{ { "local.max > peer.max and local.min > peer.min", versions(2, 1, 3, 2), versions(1, 2, 2, 1), true, version(2, 1), }, { "local.max > peer.max and local.min < peer.min", versions(1, 2, 3, 2), versions(2, 1, 2, 1), true, version(2, 1), }, { "local.max > peer.max and local.min = peer.min", versions(2, 1, 3, 2), versions(2, 1, 2, 1), true, version(2, 1), }, { "local.max < peer.max and local.min > peer.min", versions(2, 1, 2, 1), versions(1, 2, 3, 2), true, version(2, 1), }, { "local.max = peer.max and local.min > peer.min", versions(2, 1, 2, 1), versions(1, 2, 2, 1), true, version(2, 1), }, { "local.max < peer.max and local.min < peer.min", versions(1, 2, 2, 1), versions(2, 1, 3, 2), true, version(2, 1), }, { "local.max < peer.max and local.min = peer.min", versions(1, 2, 2, 1), versions(1, 2, 3, 2), true, version(2, 1), }, { "local.max = peer.max and local.min < peer.min", versions(1, 2, 2, 1), versions(2, 1, 2, 1), true, version(2, 1), }, { "all equal", versions(2, 1, 2, 1), versions(2, 1, 2, 1), true, version(2, 1), }, { "max is smaller than min", versions(2, 1, 1, 2), versions(2, 1, 1, 2), false, nil, }, { "no overlap, local > peer", versions(4, 3, 6, 5), versions(1, 0, 2, 1), false, nil, }, { "no overlap, local < peer", versions(1, 0, 2, 1), versions(4, 3, 6, 5), false, nil, }, { "no overlap, max < min", versions(6, 5, 4, 3), versions(2, 1, 1, 0), false, nil, }, } { output, maxCommonVersion := checkRPCVersions(tc.local, tc.peer) if got, want := output, tc.output; got != want { t.Errorf("%v: checkRPCVersions(%v, %v)=(%v, _), want (%v, _)", tc.desc, tc.local, tc.peer, got, want) } if got, want := maxCommonVersion, tc.maxCommonVersion; !proto.Equal(got, want) { t.Errorf("%v: checkRPCVersions(%v, %v)=(_, %v), want (_, %v)", tc.desc, tc.local, tc.peer, got, want) } } } func version(major, minor uint32) *altspb.RpcProtocolVersions_Version { return &altspb.RpcProtocolVersions_Version{ Major: major, Minor: minor, } } func versions(minMajor, minMinor, maxMajor, maxMinor uint32) *altspb.RpcProtocolVersions { return &altspb.RpcProtocolVersions{ MinRpcVersion: version(minMajor, minMinor), MaxRpcVersion: version(maxMajor, maxMinor), } } grpc-go-1.29.1/credentials/alts/internal/000077500000000000000000000000001365033716300202255ustar00rootroot00000000000000grpc-go-1.29.1/credentials/alts/internal/authinfo/000077500000000000000000000000001365033716300220425ustar00rootroot00000000000000grpc-go-1.29.1/credentials/alts/internal/authinfo/authinfo.go000066400000000000000000000056631365033716300242200ustar00rootroot00000000000000/* * * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Package authinfo provide authentication information returned by handshakers. package authinfo import ( "google.golang.org/grpc/credentials" altspb "google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp" ) var _ credentials.AuthInfo = (*altsAuthInfo)(nil) // altsAuthInfo exposes security information from the ALTS handshake to the // application. altsAuthInfo is immutable and implements credentials.AuthInfo. type altsAuthInfo struct { p *altspb.AltsContext credentials.CommonAuthInfo } // New returns a new altsAuthInfo object given handshaker results. func New(result *altspb.HandshakerResult) credentials.AuthInfo { return newAuthInfo(result) } func newAuthInfo(result *altspb.HandshakerResult) *altsAuthInfo { return &altsAuthInfo{ p: &altspb.AltsContext{ ApplicationProtocol: result.GetApplicationProtocol(), RecordProtocol: result.GetRecordProtocol(), // TODO: assign security level from result. SecurityLevel: altspb.SecurityLevel_INTEGRITY_AND_PRIVACY, PeerServiceAccount: result.GetPeerIdentity().GetServiceAccount(), LocalServiceAccount: result.GetLocalIdentity().GetServiceAccount(), PeerRpcVersions: result.GetPeerRpcVersions(), }, CommonAuthInfo: credentials.CommonAuthInfo{SecurityLevel: credentials.PrivacyAndIntegrity}, } } // AuthType identifies the context as providing ALTS authentication information. func (s *altsAuthInfo) AuthType() string { return "alts" } // ApplicationProtocol returns the context's application protocol. func (s *altsAuthInfo) ApplicationProtocol() string { return s.p.GetApplicationProtocol() } // RecordProtocol returns the context's record protocol. func (s *altsAuthInfo) RecordProtocol() string { return s.p.GetRecordProtocol() } // SecurityLevel returns the context's security level. func (s *altsAuthInfo) SecurityLevel() altspb.SecurityLevel { return s.p.GetSecurityLevel() } // PeerServiceAccount returns the context's peer service account. func (s *altsAuthInfo) PeerServiceAccount() string { return s.p.GetPeerServiceAccount() } // LocalServiceAccount returns the context's local service account. func (s *altsAuthInfo) LocalServiceAccount() string { return s.p.GetLocalServiceAccount() } // PeerRPCVersions returns the context's peer RPC versions. func (s *altsAuthInfo) PeerRPCVersions() *altspb.RpcProtocolVersions { return s.p.GetPeerRpcVersions() } grpc-go-1.29.1/credentials/alts/internal/authinfo/authinfo_test.go000066400000000000000000000077771365033716300252670ustar00rootroot00000000000000/* * * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package authinfo import ( "reflect" "testing" altspb "google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp" "google.golang.org/grpc/internal/grpctest" ) type s struct { grpctest.Tester } func Test(t *testing.T) { grpctest.RunSubTests(t, s{}) } const ( testAppProtocol = "my_app" testRecordProtocol = "very_secure_protocol" testPeerAccount = "peer_service_account" testLocalAccount = "local_service_account" testPeerHostname = "peer_hostname" testLocalHostname = "local_hostname" ) func (s) TestALTSAuthInfo(t *testing.T) { for _, tc := range []struct { result *altspb.HandshakerResult outAppProtocol string outRecordProtocol string outSecurityLevel altspb.SecurityLevel outPeerAccount string outLocalAccount string outPeerRPCVersions *altspb.RpcProtocolVersions }{ { &altspb.HandshakerResult{ ApplicationProtocol: testAppProtocol, RecordProtocol: testRecordProtocol, PeerIdentity: &altspb.Identity{ IdentityOneof: &altspb.Identity_ServiceAccount{ ServiceAccount: testPeerAccount, }, }, LocalIdentity: &altspb.Identity{ IdentityOneof: &altspb.Identity_ServiceAccount{ ServiceAccount: testLocalAccount, }, }, }, testAppProtocol, testRecordProtocol, altspb.SecurityLevel_INTEGRITY_AND_PRIVACY, testPeerAccount, testLocalAccount, nil, }, { &altspb.HandshakerResult{ ApplicationProtocol: testAppProtocol, RecordProtocol: testRecordProtocol, PeerIdentity: &altspb.Identity{ IdentityOneof: &altspb.Identity_Hostname{ Hostname: testPeerHostname, }, }, LocalIdentity: &altspb.Identity{ IdentityOneof: &altspb.Identity_Hostname{ Hostname: testLocalHostname, }, }, PeerRpcVersions: &altspb.RpcProtocolVersions{ MaxRpcVersion: &altspb.RpcProtocolVersions_Version{ Major: 20, Minor: 21, }, MinRpcVersion: &altspb.RpcProtocolVersions_Version{ Major: 10, Minor: 11, }, }, }, testAppProtocol, testRecordProtocol, altspb.SecurityLevel_INTEGRITY_AND_PRIVACY, "", "", &altspb.RpcProtocolVersions{ MaxRpcVersion: &altspb.RpcProtocolVersions_Version{ Major: 20, Minor: 21, }, MinRpcVersion: &altspb.RpcProtocolVersions_Version{ Major: 10, Minor: 11, }, }, }, } { authInfo := newAuthInfo(tc.result) if got, want := authInfo.AuthType(), "alts"; got != want { t.Errorf("authInfo.AuthType()=%v, want %v", got, want) } if got, want := authInfo.ApplicationProtocol(), tc.outAppProtocol; got != want { t.Errorf("authInfo.ApplicationProtocol()=%v, want %v", got, want) } if got, want := authInfo.RecordProtocol(), tc.outRecordProtocol; got != want { t.Errorf("authInfo.RecordProtocol()=%v, want %v", got, want) } if got, want := authInfo.SecurityLevel(), tc.outSecurityLevel; got != want { t.Errorf("authInfo.SecurityLevel()=%v, want %v", got, want) } if got, want := authInfo.PeerServiceAccount(), tc.outPeerAccount; got != want { t.Errorf("authInfo.PeerServiceAccount()=%v, want %v", got, want) } if got, want := authInfo.LocalServiceAccount(), tc.outLocalAccount; got != want { t.Errorf("authInfo.LocalServiceAccount()=%v, want %v", got, want) } if got, want := authInfo.PeerRPCVersions(), tc.outPeerRPCVersions; !reflect.DeepEqual(got, want) { t.Errorf("authinfo.PeerRpcVersions()=%v, want %v", got, want) } } } grpc-go-1.29.1/credentials/alts/internal/common.go000066400000000000000000000043661365033716300220550ustar00rootroot00000000000000/* * * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ //go:generate ./regenerate.sh // Package internal contains common core functionality for ALTS. package internal import ( "context" "net" "google.golang.org/grpc/credentials" ) const ( // ClientSide identifies the client in this communication. ClientSide Side = iota // ServerSide identifies the server in this communication. ServerSide ) // PeerNotRespondingError is returned when a peer server is not responding // after a channel has been established. It is treated as a temporary connection // error and re-connection to the server should be attempted. var PeerNotRespondingError = &peerNotRespondingError{} // Side identifies the party's role: client or server. type Side int type peerNotRespondingError struct{} // Return an error message for the purpose of logging. func (e *peerNotRespondingError) Error() string { return "peer server is not responding and re-connection should be attempted." } // Temporary indicates if this connection error is temporary or fatal. func (e *peerNotRespondingError) Temporary() bool { return true } // Handshaker defines a ALTS handshaker interface. type Handshaker interface { // ClientHandshake starts and completes a client-side handshaking and // returns a secure connection and corresponding auth information. ClientHandshake(ctx context.Context) (net.Conn, credentials.AuthInfo, error) // ServerHandshake starts and completes a server-side handshaking and // returns a secure connection and corresponding auth information. ServerHandshake(ctx context.Context) (net.Conn, credentials.AuthInfo, error) // Close terminates the Handshaker. It should be called when the caller // obtains the secure connection. Close() } grpc-go-1.29.1/credentials/alts/internal/conn/000077500000000000000000000000001365033716300211625ustar00rootroot00000000000000grpc-go-1.29.1/credentials/alts/internal/conn/aeadrekey.go000066400000000000000000000077471365033716300234620ustar00rootroot00000000000000/* * * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package conn import ( "bytes" "crypto/aes" "crypto/cipher" "crypto/hmac" "crypto/sha256" "encoding/binary" "fmt" "strconv" ) // rekeyAEAD holds the necessary information for an AEAD based on // AES-GCM that performs nonce-based key derivation and XORs the // nonce with a random mask. type rekeyAEAD struct { kdfKey []byte kdfCounter []byte nonceMask []byte nonceBuf []byte gcmAEAD cipher.AEAD } // KeySizeError signals that the given key does not have the correct size. type KeySizeError int func (k KeySizeError) Error() string { return "alts/conn: invalid key size " + strconv.Itoa(int(k)) } // newRekeyAEAD creates a new instance of aes128gcm with rekeying. // The key argument should be 44 bytes, the first 32 bytes are used as a key // for HKDF-expand and the remainining 12 bytes are used as a random mask for // the counter. func newRekeyAEAD(key []byte) (*rekeyAEAD, error) { k := len(key) if k != kdfKeyLen+nonceLen { return nil, KeySizeError(k) } return &rekeyAEAD{ kdfKey: key[:kdfKeyLen], kdfCounter: make([]byte, kdfCounterLen), nonceMask: key[kdfKeyLen:], nonceBuf: make([]byte, nonceLen), gcmAEAD: nil, }, nil } // Seal rekeys if nonce[2:8] is different than in the last call, masks the nonce, // and calls Seal for aes128gcm. func (s *rekeyAEAD) Seal(dst, nonce, plaintext, additionalData []byte) []byte { if err := s.rekeyIfRequired(nonce); err != nil { panic(fmt.Sprintf("Rekeying failed with: %s", err.Error())) } maskNonce(s.nonceBuf, nonce, s.nonceMask) return s.gcmAEAD.Seal(dst, s.nonceBuf, plaintext, additionalData) } // Open rekeys if nonce[2:8] is different than in the last call, masks the nonce, // and calls Open for aes128gcm. func (s *rekeyAEAD) Open(dst, nonce, ciphertext, additionalData []byte) ([]byte, error) { if err := s.rekeyIfRequired(nonce); err != nil { return nil, err } maskNonce(s.nonceBuf, nonce, s.nonceMask) return s.gcmAEAD.Open(dst, s.nonceBuf, ciphertext, additionalData) } // rekeyIfRequired creates a new aes128gcm AEAD if the existing AEAD is nil // or cannot be used with given nonce. func (s *rekeyAEAD) rekeyIfRequired(nonce []byte) error { newKdfCounter := nonce[kdfCounterOffset : kdfCounterOffset+kdfCounterLen] if s.gcmAEAD != nil && bytes.Equal(newKdfCounter, s.kdfCounter) { return nil } copy(s.kdfCounter, newKdfCounter) a, err := aes.NewCipher(hkdfExpand(s.kdfKey, s.kdfCounter)) if err != nil { return err } s.gcmAEAD, err = cipher.NewGCM(a) return err } // maskNonce XORs the given nonce with the mask and stores the result in dst. func maskNonce(dst, nonce, mask []byte) { nonce1 := binary.LittleEndian.Uint64(nonce[:sizeUint64]) nonce2 := binary.LittleEndian.Uint32(nonce[sizeUint64:]) mask1 := binary.LittleEndian.Uint64(mask[:sizeUint64]) mask2 := binary.LittleEndian.Uint32(mask[sizeUint64:]) binary.LittleEndian.PutUint64(dst[:sizeUint64], nonce1^mask1) binary.LittleEndian.PutUint32(dst[sizeUint64:], nonce2^mask2) } // NonceSize returns the required nonce size. func (s *rekeyAEAD) NonceSize() int { return s.gcmAEAD.NonceSize() } // Overhead returns the ciphertext overhead. func (s *rekeyAEAD) Overhead() int { return s.gcmAEAD.Overhead() } // hkdfExpand computes the first 16 bytes of the HKDF-expand function // defined in RFC5869. func hkdfExpand(key, info []byte) []byte { mac := hmac.New(sha256.New, key) mac.Write(info) mac.Write([]byte{0x01}[:]) return mac.Sum(nil)[:aeadKeyLen] } grpc-go-1.29.1/credentials/alts/internal/conn/aeadrekey_test.go000066400000000000000000000362241365033716300245110ustar00rootroot00000000000000/* * * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package conn import ( "bytes" "encoding/hex" "testing" ) // cryptoTestVector is struct for a rekey test vector type rekeyAEADTestVector struct { desc string key, nonce, plaintext, aad, ciphertext []byte } // Test encrypt and decrypt using (adapted) test vectors for AES-GCM. func (s) TestAES128GCMRekeyEncrypt(t *testing.T) { for _, test := range []rekeyAEADTestVector{ // NIST vectors from: // http://csrc.nist.gov/groups/ST/toolkit/BCM/documents/proposedmodes/gcm/gcm-revised-spec.pdf // // IEEE vectors from: // http://www.ieee802.org/1/files/public/docs2011/bn-randall-test-vectors-0511-v1.pdf // // Key expanded by setting // expandedKey = (key || // key ^ {0x01,..,0x01} || // key ^ {0x02,..,0x02})[0:44]. { desc: "Derived from NIST test vector 1", key: dehex("0000000000000000000000000000000001010101010101010101010101010101020202020202020202020202"), nonce: dehex("000000000000000000000000"), aad: dehex(""), plaintext: dehex(""), ciphertext: dehex("85e873e002f6ebdc4060954eb8675508"), }, { desc: "Derived from NIST test vector 2", key: dehex("0000000000000000000000000000000001010101010101010101010101010101020202020202020202020202"), nonce: dehex("000000000000000000000000"), aad: dehex(""), plaintext: dehex("00000000000000000000000000000000"), ciphertext: dehex("51e9a8cb23ca2512c8256afff8e72d681aca19a1148ac115e83df4888cc00d11"), }, { desc: "Derived from NIST test vector 3", key: dehex("feffe9928665731c6d6a8f9467308308fffee8938764721d6c6b8e9566318209fcfdeb908467711e6f688d96"), nonce: dehex("cafebabefacedbaddecaf888"), aad: dehex(""), plaintext: dehex("d9313225f88406e5a55909c5aff5269a86a7a9531534f7da2e4c303d8a318a721c3c0c95956809532fcf0e2449a6b525b16aedf5aa0de657ba637b391aafd255"), ciphertext: dehex("1018ed5a1402a86516d6576d70b2ffccca261b94df88b58f53b64dfba435d18b2f6e3b7869f9353d4ac8cf09afb1663daa7b4017e6fc2c177c0c087c0df1162129952213cee1bc6e9c8495dd705e1f3d"), }, { desc: "Derived from NIST test vector 4", key: dehex("feffe9928665731c6d6a8f9467308308fffee8938764721d6c6b8e9566318209fcfdeb908467711e6f688d96"), nonce: dehex("cafebabefacedbaddecaf888"), aad: dehex("feedfacedeadbeeffeedfacedeadbeefabaddad2"), plaintext: dehex("d9313225f88406e5a55909c5aff5269a86a7a9531534f7da2e4c303d8a318a721c3c0c95956809532fcf0e2449a6b525b16aedf5aa0de657ba637b39"), ciphertext: dehex("1018ed5a1402a86516d6576d70b2ffccca261b94df88b58f53b64dfba435d18b2f6e3b7869f9353d4ac8cf09afb1663daa7b4017e6fc2c177c0c087c4764565d077e9124001ddb27fc0848c5"), }, { desc: "Derived from adapted NIST test vector 4 for KDF counter boundary (flip nonce bit 15)", key: dehex("feffe9928665731c6d6a8f9467308308fffee8938764721d6c6b8e9566318209fcfdeb908467711e6f688d96"), nonce: dehex("ca7ebabefacedbaddecaf888"), aad: dehex("feedfacedeadbeeffeedfacedeadbeefabaddad2"), plaintext: dehex("d9313225f88406e5a55909c5aff5269a86a7a9531534f7da2e4c303d8a318a721c3c0c95956809532fcf0e2449a6b525b16aedf5aa0de657ba637b39"), ciphertext: dehex("e650d3c0fb879327f2d03287fa93cd07342b136215adbca00c3bd5099ec41832b1d18e0423ed26bb12c6cd09debb29230a94c0cee15903656f85edb6fc509b1b28216382172ecbcc31e1e9b1"), }, { desc: "Derived from adapted NIST test vector 4 for KDF counter boundary (flip nonce bit 16)", key: dehex("feffe9928665731c6d6a8f9467308308fffee8938764721d6c6b8e9566318209fcfdeb908467711e6f688d96"), nonce: dehex("cafebbbefacedbaddecaf888"), aad: dehex("feedfacedeadbeeffeedfacedeadbeefabaddad2"), plaintext: dehex("d9313225f88406e5a55909c5aff5269a86a7a9531534f7da2e4c303d8a318a721c3c0c95956809532fcf0e2449a6b525b16aedf5aa0de657ba637b39"), ciphertext: dehex("c0121e6c954d0767f96630c33450999791b2da2ad05c4190169ccad9ac86ff1c721e3d82f2ad22ab463bab4a0754b7dd68ca4de7ea2531b625eda01f89312b2ab957d5c7f8568dd95fcdcd1f"), }, { desc: "Derived from adapted NIST test vector 4 for KDF counter boundary (flip nonce bit 63)", key: dehex("feffe9928665731c6d6a8f9467308308fffee8938764721d6c6b8e9566318209fcfdeb908467711e6f688d96"), nonce: dehex("cafebabefacedb2ddecaf888"), aad: dehex("feedfacedeadbeeffeedfacedeadbeefabaddad2"), plaintext: dehex("d9313225f88406e5a55909c5aff5269a86a7a9531534f7da2e4c303d8a318a721c3c0c95956809532fcf0e2449a6b525b16aedf5aa0de657ba637b39"), ciphertext: dehex("8af37ea5684a4d81d4fd817261fd9743099e7e6a025eaacf8e54b124fb5743149e05cb89f4a49467fe2e5e5965f29a19f99416b0016b54585d12553783ba59e9f782e82e097c336bf7989f08"), }, { desc: "Derived from adapted NIST test vector 4 for KDF counter boundary (flip nonce bit 64)", key: dehex("feffe9928665731c6d6a8f9467308308fffee8938764721d6c6b8e9566318209fcfdeb908467711e6f688d96"), nonce: dehex("cafebabefacedbaddfcaf888"), aad: dehex("feedfacedeadbeeffeedfacedeadbeefabaddad2"), plaintext: dehex("d9313225f88406e5a55909c5aff5269a86a7a9531534f7da2e4c303d8a318a721c3c0c95956809532fcf0e2449a6b525b16aedf5aa0de657ba637b39"), ciphertext: dehex("fbd528448d0346bfa878634864d407a35a039de9db2f1feb8e965b3ae9356ce6289441d77f8f0df294891f37ea438b223e3bf2bdc53d4c5a74fb680bb312a8dec6f7252cbcd7f5799750ad78"), }, { desc: "Derived from IEEE 2.1.1 54-byte auth", key: dehex("ad7a2bd03eac835a6f620fdcb506b345ac7b2ad13fad825b6e630eddb407b244af7829d23cae81586d600dde"), nonce: dehex("12153524c0895e81b2c28465"), aad: dehex("d609b1f056637a0d46df998d88e5222ab2c2846512153524c0895e8108000f101112131415161718191a1b1c1d1e1f202122232425262728292a2b2c2d2e2f30313233340001"), plaintext: dehex(""), ciphertext: dehex("3ea0b584f3c85e93f9320ea591699efb"), }, { desc: "Derived from IEEE 2.1.2 54-byte auth", key: dehex("e3c08a8f06c6e3ad95a70557b23f75483ce33021a9c72b7025666204c69c0b72e1c2888d04c4e1af97a50755"), nonce: dehex("12153524c0895e81b2c28465"), aad: dehex("d609b1f056637a0d46df998d88e5222ab2c2846512153524c0895e8108000f101112131415161718191a1b1c1d1e1f202122232425262728292a2b2c2d2e2f30313233340001"), plaintext: dehex(""), ciphertext: dehex("294e028bf1fe6f14c4e8f7305c933eb5"), }, { desc: "Derived from IEEE 2.2.1 60-byte crypt", key: dehex("ad7a2bd03eac835a6f620fdcb506b345ac7b2ad13fad825b6e630eddb407b244af7829d23cae81586d600dde"), nonce: dehex("12153524c0895e81b2c28465"), aad: dehex("d609b1f056637a0d46df998d88e52e00b2c2846512153524c0895e81"), plaintext: dehex("08000f101112131415161718191a1b1c1d1e1f202122232425262728292a2b2c2d2e2f303132333435363738393a0002"), ciphertext: dehex("db3d25719c6b0a3ca6145c159d5c6ed9aff9c6e0b79f17019ea923b8665ddf52137ad611f0d1bf417a7ca85e45afe106ff9c7569d335d086ae6c03f00987ccd6"), }, { desc: "Derived from IEEE 2.2.2 60-byte crypt", key: dehex("e3c08a8f06c6e3ad95a70557b23f75483ce33021a9c72b7025666204c69c0b72e1c2888d04c4e1af97a50755"), nonce: dehex("12153524c0895e81b2c28465"), aad: dehex("d609b1f056637a0d46df998d88e52e00b2c2846512153524c0895e81"), plaintext: dehex("08000f101112131415161718191a1b1c1d1e1f202122232425262728292a2b2c2d2e2f303132333435363738393a0002"), ciphertext: dehex("1641f28ec13afcc8f7903389787201051644914933e9202bb9d06aa020c2a67ef51dfe7bc00a856c55b8f8133e77f659132502bad63f5713d57d0c11e0f871ed"), }, { desc: "Derived from IEEE 2.3.1 60-byte auth", key: dehex("071b113b0ca743fecccf3d051f737382061a103a0da642ffcdce3c041e727283051913390ea541fccecd3f07"), nonce: dehex("f0761e8dcd3d000176d457ed"), aad: dehex("e20106d7cd0df0761e8dcd3d88e5400076d457ed08000f101112131415161718191a1b1c1d1e1f202122232425262728292a2b2c2d2e2f303132333435363738393a0003"), plaintext: dehex(""), ciphertext: dehex("58837a10562b0f1f8edbe58ca55811d3"), }, { desc: "Derived from IEEE 2.3.2 60-byte auth", key: dehex("691d3ee909d7f54167fd1ca0b5d769081f2bde1aee655fdbab80bd5295ae6be76b1f3ceb0bd5f74365ff1ea2"), nonce: dehex("f0761e8dcd3d000176d457ed"), aad: dehex("e20106d7cd0df0761e8dcd3d88e5400076d457ed08000f101112131415161718191a1b1c1d1e1f202122232425262728292a2b2c2d2e2f303132333435363738393a0003"), plaintext: dehex(""), ciphertext: dehex("c2722ff6ca29a257718a529d1f0c6a3b"), }, { desc: "Derived from IEEE 2.4.1 54-byte crypt", key: dehex("071b113b0ca743fecccf3d051f737382061a103a0da642ffcdce3c041e727283051913390ea541fccecd3f07"), nonce: dehex("f0761e8dcd3d000176d457ed"), aad: dehex("e20106d7cd0df0761e8dcd3d88e54c2a76d457ed"), plaintext: dehex("08000f101112131415161718191a1b1c1d1e1f202122232425262728292a2b2c2d2e2f30313233340004"), ciphertext: dehex("fd96b715b93a13346af51e8acdf792cdc7b2686f8574c70e6b0cbf16291ded427ad73fec48cd298e0528a1f4c644a949fc31dc9279706ddba33f"), }, { desc: "Derived from IEEE 2.4.2 54-byte crypt", key: dehex("691d3ee909d7f54167fd1ca0b5d769081f2bde1aee655fdbab80bd5295ae6be76b1f3ceb0bd5f74365ff1ea2"), nonce: dehex("f0761e8dcd3d000176d457ed"), aad: dehex("e20106d7cd0df0761e8dcd3d88e54c2a76d457ed"), plaintext: dehex("08000f101112131415161718191a1b1c1d1e1f202122232425262728292a2b2c2d2e2f30313233340004"), ciphertext: dehex("b68f6300c2e9ae833bdc070e24021a3477118e78ccf84e11a485d861476c300f175353d5cdf92008a4f878e6cc3577768085c50a0e98fda6cbb8"), }, { desc: "Derived from IEEE 2.5.1 65-byte auth", key: dehex("013fe00b5f11be7f866d0cbbc55a7a90003ee10a5e10bf7e876c0dbac45b7b91033de2095d13bc7d846f0eb9"), nonce: dehex("7cfde9f9e33724c68932d612"), aad: dehex("84c5d513d2aaf6e5bbd2727788e523008932d6127cfde9f9e33724c608000f101112131415161718191a1b1c1d1e1f202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f0005"), plaintext: dehex(""), ciphertext: dehex("cca20eecda6283f09bb3543dd99edb9b"), }, { desc: "Derived from IEEE 2.5.2 65-byte auth", key: dehex("83c093b58de7ffe1c0da926ac43fb3609ac1c80fee1b624497ef942e2f79a82381c291b78fe5fde3c2d89068"), nonce: dehex("7cfde9f9e33724c68932d612"), aad: dehex("84c5d513d2aaf6e5bbd2727788e523008932d6127cfde9f9e33724c608000f101112131415161718191a1b1c1d1e1f202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f0005"), plaintext: dehex(""), ciphertext: dehex("b232cc1da5117bf15003734fa599d271"), }, { desc: "Derived from IEEE 2.6.1 61-byte crypt", key: dehex("013fe00b5f11be7f866d0cbbc55a7a90003ee10a5e10bf7e876c0dbac45b7b91033de2095d13bc7d846f0eb9"), nonce: dehex("7cfde9f9e33724c68932d612"), aad: dehex("84c5d513d2aaf6e5bbd2727788e52f008932d6127cfde9f9e33724c6"), plaintext: dehex("08000f101112131415161718191a1b1c1d1e1f202122232425262728292a2b2c2d2e2f303132333435363738393a3b0006"), ciphertext: dehex("ff1910d35ad7e5657890c7c560146fd038707f204b66edbc3d161f8ace244b985921023c436e3a1c3532ecd5d09a056d70be583f0d10829d9387d07d33d872e490"), }, { desc: "Derived from IEEE 2.6.2 61-byte crypt", key: dehex("83c093b58de7ffe1c0da926ac43fb3609ac1c80fee1b624497ef942e2f79a82381c291b78fe5fde3c2d89068"), nonce: dehex("7cfde9f9e33724c68932d612"), aad: dehex("84c5d513d2aaf6e5bbd2727788e52f008932d6127cfde9f9e33724c6"), plaintext: dehex("08000f101112131415161718191a1b1c1d1e1f202122232425262728292a2b2c2d2e2f303132333435363738393a3b0006"), ciphertext: dehex("0db4cf956b5f97eca4eab82a6955307f9ae02a32dd7d93f83d66ad04e1cfdc5182ad12abdea5bbb619a1bd5fb9a573590fba908e9c7a46c1f7ba0905d1b55ffda4"), }, { desc: "Derived from IEEE 2.7.1 79-byte crypt", key: dehex("88ee087fd95da9fbf6725aa9d757b0cd89ef097ed85ca8faf7735ba8d656b1cc8aec0a7ddb5fabf9f47058ab"), nonce: dehex("7ae8e2ca4ec500012e58495c"), aad: dehex("68f2e77696ce7ae8e2ca4ec588e541002e58495c08000f101112131415161718191a1b1c1d1e1f202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f404142434445464748494a4b4c4d0007"), plaintext: dehex(""), ciphertext: dehex("813f0e630f96fb2d030f58d83f5cdfd0"), }, { desc: "Derived from IEEE 2.7.2 79-byte crypt", key: dehex("4c973dbc7364621674f8b5b89e5c15511fced9216490fb1c1a2caa0ffe0407e54e953fbe7166601476fab7ba"), nonce: dehex("7ae8e2ca4ec500012e58495c"), aad: dehex("68f2e77696ce7ae8e2ca4ec588e541002e58495c08000f101112131415161718191a1b1c1d1e1f202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f404142434445464748494a4b4c4d0007"), plaintext: dehex(""), ciphertext: dehex("77e5a44c21eb07188aacbd74d1980e97"), }, { desc: "Derived from IEEE 2.8.1 61-byte crypt", key: dehex("88ee087fd95da9fbf6725aa9d757b0cd89ef097ed85ca8faf7735ba8d656b1cc8aec0a7ddb5fabf9f47058ab"), nonce: dehex("7ae8e2ca4ec500012e58495c"), aad: dehex("68f2e77696ce7ae8e2ca4ec588e54d002e58495c"), plaintext: dehex("08000f101112131415161718191a1b1c1d1e1f202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f404142434445464748490008"), ciphertext: dehex("958ec3f6d60afeda99efd888f175e5fcd4c87b9bcc5c2f5426253a8b506296c8c43309ab2adb5939462541d95e80811e04e706b1498f2c407c7fb234f8cc01a647550ee6b557b35a7e3945381821f4"), }, { desc: "Derived from IEEE 2.8.2 61-byte crypt", key: dehex("4c973dbc7364621674f8b5b89e5c15511fced9216490fb1c1a2caa0ffe0407e54e953fbe7166601476fab7ba"), nonce: dehex("7ae8e2ca4ec500012e58495c"), aad: dehex("68f2e77696ce7ae8e2ca4ec588e54d002e58495c"), plaintext: dehex("08000f101112131415161718191a1b1c1d1e1f202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f404142434445464748490008"), ciphertext: dehex("b44d072011cd36d272a9b7a98db9aa90cbc5c67b93ddce67c854503214e2e896ec7e9db649ed4bcf6f850aac0223d0cf92c83db80795c3a17ecc1248bb00591712b1ae71e268164196252162810b00"), }} { aead, err := newRekeyAEAD(test.key) if err != nil { t.Fatal("unexpected failure in newRekeyAEAD: ", err.Error()) } if got := aead.Seal(nil, test.nonce, test.plaintext, test.aad); !bytes.Equal(got, test.ciphertext) { t.Errorf("Unexpected ciphertext for test vector '%s':\nciphertext=%s\nwant= %s", test.desc, hex.EncodeToString(got), hex.EncodeToString(test.ciphertext)) } if got, err := aead.Open(nil, test.nonce, test.ciphertext, test.aad); err != nil || !bytes.Equal(got, test.plaintext) { t.Errorf("Unexpected plaintext for test vector '%s':\nplaintext=%s (err=%v)\nwant= %s", test.desc, hex.EncodeToString(got), err, hex.EncodeToString(test.plaintext)) } } } func dehex(s string) []byte { if len(s) == 0 { return make([]byte, 0) } b, err := hex.DecodeString(s) if err != nil { panic(err) } return b } grpc-go-1.29.1/credentials/alts/internal/conn/aes128gcm.go000066400000000000000000000063011365033716300232030ustar00rootroot00000000000000/* * * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package conn import ( "crypto/aes" "crypto/cipher" core "google.golang.org/grpc/credentials/alts/internal" ) const ( // Overflow length n in bytes, never encrypt more than 2^(n*8) frames (in // each direction). overflowLenAES128GCM = 5 ) // aes128gcm is the struct that holds necessary information for ALTS record. // The counter value is NOT included in the payload during the encryption and // decryption operations. type aes128gcm struct { // inCounter is used in ALTS record to check that incoming counters are // as expected, since ALTS record guarantees that messages are unwrapped // in the same order that the peer wrapped them. inCounter Counter outCounter Counter aead cipher.AEAD } // NewAES128GCM creates an instance that uses aes128gcm for ALTS record. func NewAES128GCM(side core.Side, key []byte) (ALTSRecordCrypto, error) { c, err := aes.NewCipher(key) if err != nil { return nil, err } a, err := cipher.NewGCM(c) if err != nil { return nil, err } return &aes128gcm{ inCounter: NewInCounter(side, overflowLenAES128GCM), outCounter: NewOutCounter(side, overflowLenAES128GCM), aead: a, }, nil } // Encrypt is the encryption function. dst can contain bytes at the beginning of // the ciphertext that will not be encrypted but will be authenticated. If dst // has enough capacity to hold these bytes, the ciphertext and the tag, no // allocation and copy operations will be performed. dst and plaintext do not // overlap. func (s *aes128gcm) Encrypt(dst, plaintext []byte) ([]byte, error) { // If we need to allocate an output buffer, we want to include space for // GCM tag to avoid forcing ALTS record to reallocate as well. dlen := len(dst) dst, out := SliceForAppend(dst, len(plaintext)+GcmTagSize) seq, err := s.outCounter.Value() if err != nil { return nil, err } data := out[:len(plaintext)] copy(data, plaintext) // data may alias plaintext // Seal appends the ciphertext and the tag to its first argument and // returns the updated slice. However, SliceForAppend above ensures that // dst has enough capacity to avoid a reallocation and copy due to the // append. dst = s.aead.Seal(dst[:dlen], seq, data, nil) s.outCounter.Inc() return dst, nil } func (s *aes128gcm) EncryptionOverhead() int { return GcmTagSize } func (s *aes128gcm) Decrypt(dst, ciphertext []byte) ([]byte, error) { seq, err := s.inCounter.Value() if err != nil { return nil, err } // If dst is equal to ciphertext[:0], ciphertext storage is reused. plaintext, err := s.aead.Open(dst, seq, ciphertext, nil) if err != nil { return nil, ErrAuth } s.inCounter.Inc() return plaintext, nil } grpc-go-1.29.1/credentials/alts/internal/conn/aes128gcm_test.go000066400000000000000000000174341365033716300242530ustar00rootroot00000000000000/* * * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package conn import ( "bytes" "testing" core "google.golang.org/grpc/credentials/alts/internal" ) // cryptoTestVector is struct for a GCM test vector type cryptoTestVector struct { key, counter, plaintext, ciphertext, tag []byte allocateDst bool } // getGCMCryptoPair outputs a client/server pair on aes128gcm. func getGCMCryptoPair(key []byte, counter []byte, t *testing.T) (ALTSRecordCrypto, ALTSRecordCrypto) { client, err := NewAES128GCM(core.ClientSide, key) if err != nil { t.Fatalf("NewAES128GCM(ClientSide, key) = %v", err) } server, err := NewAES128GCM(core.ServerSide, key) if err != nil { t.Fatalf("NewAES128GCM(ServerSide, key) = %v", err) } // set counter if provided. if counter != nil { if CounterSide(counter) == core.ClientSide { client.(*aes128gcm).outCounter = CounterFromValue(counter, overflowLenAES128GCM) server.(*aes128gcm).inCounter = CounterFromValue(counter, overflowLenAES128GCM) } else { server.(*aes128gcm).outCounter = CounterFromValue(counter, overflowLenAES128GCM) client.(*aes128gcm).inCounter = CounterFromValue(counter, overflowLenAES128GCM) } } return client, server } func testGCMEncryptionDecryption(sender ALTSRecordCrypto, receiver ALTSRecordCrypto, test *cryptoTestVector, withCounter bool, t *testing.T) { // Ciphertext is: counter + encrypted text + tag. ciphertext := []byte(nil) if withCounter { ciphertext = append(ciphertext, test.counter...) } ciphertext = append(ciphertext, test.ciphertext...) ciphertext = append(ciphertext, test.tag...) // Decrypt. if got, err := receiver.Decrypt(nil, ciphertext); err != nil || !bytes.Equal(got, test.plaintext) { t.Errorf("key=%v\ncounter=%v\ntag=%v\nciphertext=%v\nDecrypt = %v, %v\nwant: %v", test.key, test.counter, test.tag, test.ciphertext, got, err, test.plaintext) } // Encrypt. var dst []byte if test.allocateDst { dst = make([]byte, len(test.plaintext)+sender.EncryptionOverhead()) } if got, err := sender.Encrypt(dst[:0], test.plaintext); err != nil || !bytes.Equal(got, ciphertext) { t.Errorf("key=%v\ncounter=%v\nplaintext=%v\nEncrypt = %v, %v\nwant: %v", test.key, test.counter, test.plaintext, got, err, ciphertext) } } // Test encrypt and decrypt using test vectors for aes128gcm. func (s) TestAES128GCMEncrypt(t *testing.T) { for _, test := range []cryptoTestVector{ { key: dehex("11754cd72aec309bf52f7687212e8957"), counter: dehex("3c819d9a9bed087615030b65"), plaintext: nil, ciphertext: nil, tag: dehex("250327c674aaf477aef2675748cf6971"), allocateDst: false, }, { key: dehex("ca47248ac0b6f8372a97ac43508308ed"), counter: dehex("ffd2b598feabc9019262d2be"), plaintext: nil, ciphertext: nil, tag: dehex("60d20404af527d248d893ae495707d1a"), allocateDst: false, }, { key: dehex("7fddb57453c241d03efbed3ac44e371c"), counter: dehex("ee283a3fc75575e33efd4887"), plaintext: dehex("d5de42b461646c255c87bd2962d3b9a2"), ciphertext: dehex("2ccda4a5415cb91e135c2a0f78c9b2fd"), tag: dehex("b36d1df9b9d5e596f83e8b7f52971cb3"), allocateDst: false, }, { key: dehex("ab72c77b97cb5fe9a382d9fe81ffdbed"), counter: dehex("54cc7dc2c37ec006bcc6d1da"), plaintext: dehex("007c5e5b3e59df24a7c355584fc1518d"), ciphertext: dehex("0e1bde206a07a9c2c1b65300f8c64997"), tag: dehex("2b4401346697138c7a4891ee59867d0c"), allocateDst: false, }, { key: dehex("11754cd72aec309bf52f7687212e8957"), counter: dehex("3c819d9a9bed087615030b65"), plaintext: nil, ciphertext: nil, tag: dehex("250327c674aaf477aef2675748cf6971"), allocateDst: true, }, { key: dehex("ca47248ac0b6f8372a97ac43508308ed"), counter: dehex("ffd2b598feabc9019262d2be"), plaintext: nil, ciphertext: nil, tag: dehex("60d20404af527d248d893ae495707d1a"), allocateDst: true, }, { key: dehex("7fddb57453c241d03efbed3ac44e371c"), counter: dehex("ee283a3fc75575e33efd4887"), plaintext: dehex("d5de42b461646c255c87bd2962d3b9a2"), ciphertext: dehex("2ccda4a5415cb91e135c2a0f78c9b2fd"), tag: dehex("b36d1df9b9d5e596f83e8b7f52971cb3"), allocateDst: true, }, { key: dehex("ab72c77b97cb5fe9a382d9fe81ffdbed"), counter: dehex("54cc7dc2c37ec006bcc6d1da"), plaintext: dehex("007c5e5b3e59df24a7c355584fc1518d"), ciphertext: dehex("0e1bde206a07a9c2c1b65300f8c64997"), tag: dehex("2b4401346697138c7a4891ee59867d0c"), allocateDst: true, }, } { // Test encryption and decryption for aes128gcm. client, server := getGCMCryptoPair(test.key, test.counter, t) if CounterSide(test.counter) == core.ClientSide { testGCMEncryptionDecryption(client, server, &test, false, t) } else { testGCMEncryptionDecryption(server, client, &test, false, t) } } } func testGCMEncryptRoundtrip(client ALTSRecordCrypto, server ALTSRecordCrypto, t *testing.T) { // Encrypt. const plaintext = "This is plaintext." var err error buf := []byte(plaintext) buf, err = client.Encrypt(buf[:0], buf) if err != nil { t.Fatal("Encrypting with client-side context: unexpected error", err, "\n", "Plaintext:", []byte(plaintext)) } // Encrypt a second message. const plaintext2 = "This is a second plaintext." buf2 := []byte(plaintext2) buf2, err = client.Encrypt(buf2[:0], buf2) if err != nil { t.Fatal("Encrypting with client-side context: unexpected error", err, "\n", "Plaintext:", []byte(plaintext2)) } // Decryption fails: cannot decrypt second message before first. if got, err := server.Decrypt(nil, buf2); err == nil { t.Error("Decrypting client-side ciphertext with a client-side context unexpectedly succeeded; want unexpected counter error:\n", " Original plaintext:", []byte(plaintext2), "\n", " Ciphertext:", buf2, "\n", " Decrypted plaintext:", got) } // Decryption fails: wrong counter space. if got, err := client.Decrypt(nil, buf); err == nil { t.Error("Decrypting client-side ciphertext with a client-side context unexpectedly succeeded; want counter space error:\n", " Original plaintext:", []byte(plaintext), "\n", " Ciphertext:", buf, "\n", " Decrypted plaintext:", got) } // Decrypt first message. ciphertext := append([]byte(nil), buf...) buf, err = server.Decrypt(buf[:0], buf) if err != nil || string(buf) != plaintext { t.Fatal("Decrypting client-side ciphertext with a server-side context did not produce original content:\n", " Original plaintext:", []byte(plaintext), "\n", " Ciphertext:", ciphertext, "\n", " Decryption error:", err, "\n", " Decrypted plaintext:", buf) } // Decryption fails: replay attack. if got, err := server.Decrypt(nil, buf); err == nil { t.Error("Decrypting client-side ciphertext with a client-side context unexpectedly succeeded; want unexpected counter error:\n", " Original plaintext:", []byte(plaintext), "\n", " Ciphertext:", buf, "\n", " Decrypted plaintext:", got) } } // Test encrypt and decrypt on roundtrip messages for aes128gcm. func (s) TestAES128GCMEncryptRoundtrip(t *testing.T) { // Test for aes128gcm. key := make([]byte, 16) client, server := getGCMCryptoPair(key, nil, t) testGCMEncryptRoundtrip(client, server, t) } grpc-go-1.29.1/credentials/alts/internal/conn/aes128gcmrekey.go000066400000000000000000000071411365033716300242460ustar00rootroot00000000000000/* * * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package conn import ( "crypto/cipher" core "google.golang.org/grpc/credentials/alts/internal" ) const ( // Overflow length n in bytes, never encrypt more than 2^(n*8) frames (in // each direction). overflowLenAES128GCMRekey = 8 nonceLen = 12 aeadKeyLen = 16 kdfKeyLen = 32 kdfCounterOffset = 2 kdfCounterLen = 6 sizeUint64 = 8 ) // aes128gcmRekey is the struct that holds necessary information for ALTS record. // The counter value is NOT included in the payload during the encryption and // decryption operations. type aes128gcmRekey struct { // inCounter is used in ALTS record to check that incoming counters are // as expected, since ALTS record guarantees that messages are unwrapped // in the same order that the peer wrapped them. inCounter Counter outCounter Counter inAEAD cipher.AEAD outAEAD cipher.AEAD } // NewAES128GCMRekey creates an instance that uses aes128gcm with rekeying // for ALTS record. The key argument should be 44 bytes, the first 32 bytes // are used as a key for HKDF-expand and the remainining 12 bytes are used // as a random mask for the counter. func NewAES128GCMRekey(side core.Side, key []byte) (ALTSRecordCrypto, error) { inCounter := NewInCounter(side, overflowLenAES128GCMRekey) outCounter := NewOutCounter(side, overflowLenAES128GCMRekey) inAEAD, err := newRekeyAEAD(key) if err != nil { return nil, err } outAEAD, err := newRekeyAEAD(key) if err != nil { return nil, err } return &aes128gcmRekey{ inCounter, outCounter, inAEAD, outAEAD, }, nil } // Encrypt is the encryption function. dst can contain bytes at the beginning of // the ciphertext that will not be encrypted but will be authenticated. If dst // has enough capacity to hold these bytes, the ciphertext and the tag, no // allocation and copy operations will be performed. dst and plaintext do not // overlap. func (s *aes128gcmRekey) Encrypt(dst, plaintext []byte) ([]byte, error) { // If we need to allocate an output buffer, we want to include space for // GCM tag to avoid forcing ALTS record to reallocate as well. dlen := len(dst) dst, out := SliceForAppend(dst, len(plaintext)+GcmTagSize) seq, err := s.outCounter.Value() if err != nil { return nil, err } data := out[:len(plaintext)] copy(data, plaintext) // data may alias plaintext // Seal appends the ciphertext and the tag to its first argument and // returns the updated slice. However, SliceForAppend above ensures that // dst has enough capacity to avoid a reallocation and copy due to the // append. dst = s.outAEAD.Seal(dst[:dlen], seq, data, nil) s.outCounter.Inc() return dst, nil } func (s *aes128gcmRekey) EncryptionOverhead() int { return GcmTagSize } func (s *aes128gcmRekey) Decrypt(dst, ciphertext []byte) ([]byte, error) { seq, err := s.inCounter.Value() if err != nil { return nil, err } plaintext, err := s.inAEAD.Open(dst, seq, ciphertext, nil) if err != nil { return nil, ErrAuth } s.inCounter.Inc() return plaintext, nil } grpc-go-1.29.1/credentials/alts/internal/conn/aes128gcmrekey_test.go000066400000000000000000000100011365033716300252720ustar00rootroot00000000000000/* * * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package conn import ( "testing" core "google.golang.org/grpc/credentials/alts/internal" ) // getGCMCryptoPair outputs a client/server pair on aes128gcmRekey. func getRekeyCryptoPair(key []byte, counter []byte, t *testing.T) (ALTSRecordCrypto, ALTSRecordCrypto) { client, err := NewAES128GCMRekey(core.ClientSide, key) if err != nil { t.Fatalf("NewAES128GCMRekey(ClientSide, key) = %v", err) } server, err := NewAES128GCMRekey(core.ServerSide, key) if err != nil { t.Fatalf("NewAES128GCMRekey(ServerSide, key) = %v", err) } // set counter if provided. if counter != nil { if CounterSide(counter) == core.ClientSide { client.(*aes128gcmRekey).outCounter = CounterFromValue(counter, overflowLenAES128GCMRekey) server.(*aes128gcmRekey).inCounter = CounterFromValue(counter, overflowLenAES128GCMRekey) } else { server.(*aes128gcmRekey).outCounter = CounterFromValue(counter, overflowLenAES128GCMRekey) client.(*aes128gcmRekey).inCounter = CounterFromValue(counter, overflowLenAES128GCMRekey) } } return client, server } func testRekeyEncryptRoundtrip(client ALTSRecordCrypto, server ALTSRecordCrypto, t *testing.T) { // Encrypt. const plaintext = "This is plaintext." var err error buf := []byte(plaintext) buf, err = client.Encrypt(buf[:0], buf) if err != nil { t.Fatal("Encrypting with client-side context: unexpected error", err, "\n", "Plaintext:", []byte(plaintext)) } // Encrypt a second message. const plaintext2 = "This is a second plaintext." buf2 := []byte(plaintext2) buf2, err = client.Encrypt(buf2[:0], buf2) if err != nil { t.Fatal("Encrypting with client-side context: unexpected error", err, "\n", "Plaintext:", []byte(plaintext2)) } // Decryption fails: cannot decrypt second message before first. if got, err := server.Decrypt(nil, buf2); err == nil { t.Error("Decrypting client-side ciphertext with a client-side context unexpectedly succeeded; want unexpected counter error:\n", " Original plaintext:", []byte(plaintext2), "\n", " Ciphertext:", buf2, "\n", " Decrypted plaintext:", got) } // Decryption fails: wrong counter space. if got, err := client.Decrypt(nil, buf); err == nil { t.Error("Decrypting client-side ciphertext with a client-side context unexpectedly succeeded; want counter space error:\n", " Original plaintext:", []byte(plaintext), "\n", " Ciphertext:", buf, "\n", " Decrypted plaintext:", got) } // Decrypt first message. ciphertext := append([]byte(nil), buf...) buf, err = server.Decrypt(buf[:0], buf) if err != nil || string(buf) != plaintext { t.Fatal("Decrypting client-side ciphertext with a server-side context did not produce original content:\n", " Original plaintext:", []byte(plaintext), "\n", " Ciphertext:", ciphertext, "\n", " Decryption error:", err, "\n", " Decrypted plaintext:", buf) } // Decryption fails: replay attack. if got, err := server.Decrypt(nil, buf); err == nil { t.Error("Decrypting client-side ciphertext with a client-side context unexpectedly succeeded; want unexpected counter error:\n", " Original plaintext:", []byte(plaintext), "\n", " Ciphertext:", buf, "\n", " Decrypted plaintext:", got) } } // Test encrypt and decrypt on roundtrip messages for aes128gcmRekey. func (s) TestAES128GCMRekeyEncryptRoundtrip(t *testing.T) { // Test for aes128gcmRekey. key := make([]byte, 44) client, server := getRekeyCryptoPair(key, nil, t) testRekeyEncryptRoundtrip(client, server, t) } grpc-go-1.29.1/credentials/alts/internal/conn/common.go000066400000000000000000000043231365033716300230030ustar00rootroot00000000000000/* * * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package conn import ( "encoding/binary" "errors" "fmt" ) const ( // GcmTagSize is the GCM tag size is the difference in length between // plaintext and ciphertext. From crypto/cipher/gcm.go in Go crypto // library. GcmTagSize = 16 ) // ErrAuth occurs on authentication failure. var ErrAuth = errors.New("message authentication failed") // SliceForAppend takes a slice and a requested number of bytes. It returns a // slice with the contents of the given slice followed by that many bytes and a // second slice that aliases into it and contains only the extra bytes. If the // original slice has sufficient capacity then no allocation is performed. func SliceForAppend(in []byte, n int) (head, tail []byte) { if total := len(in) + n; cap(in) >= total { head = in[:total] } else { head = make([]byte, total) copy(head, in) } tail = head[len(in):] return head, tail } // ParseFramedMsg parse the provided buffer and returns a frame of the format // msgLength+msg and any remaining bytes in that buffer. func ParseFramedMsg(b []byte, maxLen uint32) ([]byte, []byte, error) { // If the size field is not complete, return the provided buffer as // remaining buffer. if len(b) < MsgLenFieldSize { return nil, b, nil } msgLenField := b[:MsgLenFieldSize] length := binary.LittleEndian.Uint32(msgLenField) if length > maxLen { return nil, nil, fmt.Errorf("received the frame length %d larger than the limit %d", length, maxLen) } if len(b) < int(length)+4 { // account for the first 4 msg length bytes. // Frame is not complete yet. return nil, b, nil } return b[:MsgLenFieldSize+length], b[MsgLenFieldSize+length:], nil } grpc-go-1.29.1/credentials/alts/internal/conn/counter.go000066400000000000000000000025351365033716300231750ustar00rootroot00000000000000/* * * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package conn import ( "errors" ) const counterLen = 12 var ( errInvalidCounter = errors.New("invalid counter") ) // Counter is a 96-bit, little-endian counter. type Counter struct { value [counterLen]byte invalid bool overflowLen int } // Value returns the current value of the counter as a byte slice. func (c *Counter) Value() ([]byte, error) { if c.invalid { return nil, errInvalidCounter } return c.value[:], nil } // Inc increments the counter and checks for overflow. func (c *Counter) Inc() { // If the counter is already invalid, there is no need to increase it. if c.invalid { return } i := 0 for ; i < c.overflowLen; i++ { c.value[i]++ if c.value[i] != 0 { break } } if i == c.overflowLen { c.invalid = true } } grpc-go-1.29.1/credentials/alts/internal/conn/counter_test.go000066400000000000000000000102731365033716300242320ustar00rootroot00000000000000/* * * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package conn import ( "bytes" "testing" core "google.golang.org/grpc/credentials/alts/internal" ) const ( testOverflowLen = 5 ) func (s) TestCounterSides(t *testing.T) { for _, side := range []core.Side{core.ClientSide, core.ServerSide} { outCounter := NewOutCounter(side, testOverflowLen) inCounter := NewInCounter(side, testOverflowLen) for i := 0; i < 1024; i++ { value, _ := outCounter.Value() if g, w := CounterSide(value), side; g != w { t.Errorf("after %d iterations, CounterSide(outCounter.Value()) = %v, want %v", i, g, w) break } value, _ = inCounter.Value() if g, w := CounterSide(value), side; g == w { t.Errorf("after %d iterations, CounterSide(inCounter.Value()) = %v, want %v", i, g, w) break } outCounter.Inc() inCounter.Inc() } } } func (s) TestCounterInc(t *testing.T) { for _, test := range []struct { counter []byte want []byte }{ { counter: []byte{0x00, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, want: []byte{0x01, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, }, { counter: []byte{0x00, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x80}, want: []byte{0x01, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x80}, }, { counter: []byte{0xff, 0x00, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, want: []byte{0x00, 0x01, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, }, { counter: []byte{0x42, 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, want: []byte{0x43, 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, }, { counter: []byte{0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, want: []byte{0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, }, { counter: []byte{0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80}, want: []byte{0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80}, }, } { c := CounterFromValue(test.counter, overflowLenAES128GCM) c.Inc() value, _ := c.Value() if g, w := value, test.want; !bytes.Equal(g, w) || c.invalid { t.Errorf("counter(%v).Inc() =\n%v, want\n%v", test.counter, g, w) } } } func (s) TestRolloverCounter(t *testing.T) { for _, test := range []struct { desc string value []byte overflowLen int }{ { desc: "testing overflow without rekeying 1", value: []byte{0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80}, overflowLen: 5, }, { desc: "testing overflow without rekeying 2", value: []byte{0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, overflowLen: 5, }, { desc: "testing overflow for rekeying mode 1", value: []byte{0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x80}, overflowLen: 8, }, { desc: "testing overflow for rekeying mode 2", value: []byte{0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00}, overflowLen: 8, }, } { c := CounterFromValue(test.value, overflowLenAES128GCM) // First Inc() + Value() should work. c.Inc() _, err := c.Value() if err != nil { t.Errorf("%v: first Inc() + Value() unexpectedly failed: %v, want error", test.desc, err) } // Second Inc() + Value() should fail. c.Inc() _, err = c.Value() if err != errInvalidCounter { t.Errorf("%v: second Inc() + Value() unexpectedly succeeded: want %v", test.desc, errInvalidCounter) } // Third Inc() + Value() should also fail because the counter is // already in an invalid state. c.Inc() _, err = c.Value() if err != errInvalidCounter { t.Errorf("%v: Third Inc() + Value() unexpectedly succeeded: want %v", test.desc, errInvalidCounter) } } } grpc-go-1.29.1/credentials/alts/internal/conn/record.go000066400000000000000000000225201365033716300227700ustar00rootroot00000000000000/* * * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Package conn contains an implementation of a secure channel created by gRPC // handshakers. package conn import ( "encoding/binary" "fmt" "math" "net" core "google.golang.org/grpc/credentials/alts/internal" ) // ALTSRecordCrypto is the interface for gRPC ALTS record protocol. type ALTSRecordCrypto interface { // Encrypt encrypts the plaintext and computes the tag (if any) of dst // and plaintext, dst and plaintext do not overlap. Encrypt(dst, plaintext []byte) ([]byte, error) // EncryptionOverhead returns the tag size (if any) in bytes. EncryptionOverhead() int // Decrypt decrypts ciphertext and verify the tag (if any). dst and // ciphertext may alias exactly or not at all. To reuse ciphertext's // storage for the decrypted output, use ciphertext[:0] as dst. Decrypt(dst, ciphertext []byte) ([]byte, error) } // ALTSRecordFunc is a function type for factory functions that create // ALTSRecordCrypto instances. type ALTSRecordFunc func(s core.Side, keyData []byte) (ALTSRecordCrypto, error) const ( // MsgLenFieldSize is the byte size of the frame length field of a // framed message. MsgLenFieldSize = 4 // The byte size of the message type field of a framed message. msgTypeFieldSize = 4 // The bytes size limit for a ALTS record message. altsRecordLengthLimit = 1024 * 1024 // 1 MiB // The default bytes size of a ALTS record message. altsRecordDefaultLength = 4 * 1024 // 4KiB // Message type value included in ALTS record framing. altsRecordMsgType = uint32(0x06) // The initial write buffer size. altsWriteBufferInitialSize = 32 * 1024 // 32KiB // The maximum write buffer size. This *must* be multiple of // altsRecordDefaultLength. altsWriteBufferMaxSize = 512 * 1024 // 512KiB ) var ( protocols = make(map[string]ALTSRecordFunc) ) // RegisterProtocol register a ALTS record encryption protocol. func RegisterProtocol(protocol string, f ALTSRecordFunc) error { if _, ok := protocols[protocol]; ok { return fmt.Errorf("protocol %v is already registered", protocol) } protocols[protocol] = f return nil } // conn represents a secured connection. It implements the net.Conn interface. type conn struct { net.Conn crypto ALTSRecordCrypto // buf holds data that has been read from the connection and decrypted, // but has not yet been returned by Read. buf []byte payloadLengthLimit int // protected holds data read from the network but have not yet been // decrypted. This data might not compose a complete frame. protected []byte // writeBuf is a buffer used to contain encrypted frames before being // written to the network. writeBuf []byte // nextFrame stores the next frame (in protected buffer) info. nextFrame []byte // overhead is the calculated overhead of each frame. overhead int } // NewConn creates a new secure channel instance given the other party role and // handshaking result. func NewConn(c net.Conn, side core.Side, recordProtocol string, key []byte, protected []byte) (net.Conn, error) { newCrypto := protocols[recordProtocol] if newCrypto == nil { return nil, fmt.Errorf("negotiated unknown next_protocol %q", recordProtocol) } crypto, err := newCrypto(side, key) if err != nil { return nil, fmt.Errorf("protocol %q: %v", recordProtocol, err) } overhead := MsgLenFieldSize + msgTypeFieldSize + crypto.EncryptionOverhead() payloadLengthLimit := altsRecordDefaultLength - overhead if protected == nil { // We pre-allocate protected to be of size // 2*altsRecordDefaultLength-1 during initialization. We only // read from the network into protected when protected does not // contain a complete frame, which is at most // altsRecordDefaultLength-1 (bytes). And we read at most // altsRecordDefaultLength (bytes) data into protected at one // time. Therefore, 2*altsRecordDefaultLength-1 is large enough // to buffer data read from the network. protected = make([]byte, 0, 2*altsRecordDefaultLength-1) } altsConn := &conn{ Conn: c, crypto: crypto, payloadLengthLimit: payloadLengthLimit, protected: protected, writeBuf: make([]byte, altsWriteBufferInitialSize), nextFrame: protected, overhead: overhead, } return altsConn, nil } // Read reads and decrypts a frame from the underlying connection, and copies the // decrypted payload into b. If the size of the payload is greater than len(b), // Read retains the remaining bytes in an internal buffer, and subsequent calls // to Read will read from this buffer until it is exhausted. func (p *conn) Read(b []byte) (n int, err error) { if len(p.buf) == 0 { var framedMsg []byte framedMsg, p.nextFrame, err = ParseFramedMsg(p.nextFrame, altsRecordLengthLimit) if err != nil { return n, err } // Check whether the next frame to be decrypted has been // completely received yet. if len(framedMsg) == 0 { copy(p.protected, p.nextFrame) p.protected = p.protected[:len(p.nextFrame)] // Always copy next incomplete frame to the beginning of // the protected buffer and reset nextFrame to it. p.nextFrame = p.protected } // Check whether a complete frame has been received yet. for len(framedMsg) == 0 { if len(p.protected) == cap(p.protected) { tmp := make([]byte, len(p.protected), cap(p.protected)+altsRecordDefaultLength) copy(tmp, p.protected) p.protected = tmp } n, err = p.Conn.Read(p.protected[len(p.protected):min(cap(p.protected), len(p.protected)+altsRecordDefaultLength)]) if err != nil { return 0, err } p.protected = p.protected[:len(p.protected)+n] framedMsg, p.nextFrame, err = ParseFramedMsg(p.protected, altsRecordLengthLimit) if err != nil { return 0, err } } // Now we have a complete frame, decrypted it. msg := framedMsg[MsgLenFieldSize:] msgType := binary.LittleEndian.Uint32(msg[:msgTypeFieldSize]) if msgType&0xff != altsRecordMsgType { return 0, fmt.Errorf("received frame with incorrect message type %v, expected lower byte %v", msgType, altsRecordMsgType) } ciphertext := msg[msgTypeFieldSize:] // Decrypt requires that if the dst and ciphertext alias, they // must alias exactly. Code here used to use msg[:0], but msg // starts MsgLenFieldSize+msgTypeFieldSize bytes earlier than // ciphertext, so they alias inexactly. Using ciphertext[:0] // arranges the appropriate aliasing without needing to copy // ciphertext or use a separate destination buffer. For more info // check: https://golang.org/pkg/crypto/cipher/#AEAD. p.buf, err = p.crypto.Decrypt(ciphertext[:0], ciphertext) if err != nil { return 0, err } } n = copy(b, p.buf) p.buf = p.buf[n:] return n, nil } // Write encrypts, frames, and writes bytes from b to the underlying connection. func (p *conn) Write(b []byte) (n int, err error) { n = len(b) // Calculate the output buffer size with framing and encryption overhead. numOfFrames := int(math.Ceil(float64(len(b)) / float64(p.payloadLengthLimit))) size := len(b) + numOfFrames*p.overhead // If writeBuf is too small, increase its size up to the maximum size. partialBSize := len(b) if size > altsWriteBufferMaxSize { size = altsWriteBufferMaxSize const numOfFramesInMaxWriteBuf = altsWriteBufferMaxSize / altsRecordDefaultLength partialBSize = numOfFramesInMaxWriteBuf * p.payloadLengthLimit } if len(p.writeBuf) < size { p.writeBuf = make([]byte, size) } for partialBStart := 0; partialBStart < len(b); partialBStart += partialBSize { partialBEnd := partialBStart + partialBSize if partialBEnd > len(b) { partialBEnd = len(b) } partialB := b[partialBStart:partialBEnd] writeBufIndex := 0 for len(partialB) > 0 { payloadLen := len(partialB) if payloadLen > p.payloadLengthLimit { payloadLen = p.payloadLengthLimit } buf := partialB[:payloadLen] partialB = partialB[payloadLen:] // Write buffer contains: length, type, payload, and tag // if any. // 1. Fill in type field. msg := p.writeBuf[writeBufIndex+MsgLenFieldSize:] binary.LittleEndian.PutUint32(msg, altsRecordMsgType) // 2. Encrypt the payload and create a tag if any. msg, err = p.crypto.Encrypt(msg[:msgTypeFieldSize], buf) if err != nil { return n, err } // 3. Fill in the size field. binary.LittleEndian.PutUint32(p.writeBuf[writeBufIndex:], uint32(len(msg))) // 4. Increase writeBufIndex. writeBufIndex += len(buf) + p.overhead } nn, err := p.Conn.Write(p.writeBuf[:writeBufIndex]) if err != nil { // We need to calculate the actual data size that was // written. This means we need to remove header, // encryption overheads, and any partially-written // frame data. numOfWrittenFrames := int(math.Floor(float64(nn) / float64(altsRecordDefaultLength))) return partialBStart + numOfWrittenFrames*p.payloadLengthLimit, err } } return n, nil } func min(a, b int) int { if a < b { return a } return b } grpc-go-1.29.1/credentials/alts/internal/conn/record_test.go000066400000000000000000000220421365033716300240260ustar00rootroot00000000000000/* * * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package conn import ( "bytes" "encoding/binary" "fmt" "io" "math" "net" "reflect" "testing" core "google.golang.org/grpc/credentials/alts/internal" "google.golang.org/grpc/internal/grpctest" ) type s struct { grpctest.Tester } func Test(t *testing.T) { grpctest.RunSubTests(t, s{}) } var ( nextProtocols = []string{"ALTSRP_GCM_AES128"} altsRecordFuncs = map[string]ALTSRecordFunc{ // ALTS handshaker protocols. "ALTSRP_GCM_AES128": func(s core.Side, keyData []byte) (ALTSRecordCrypto, error) { return NewAES128GCM(s, keyData) }, } ) func init() { for protocol, f := range altsRecordFuncs { if err := RegisterProtocol(protocol, f); err != nil { panic(err) } } } // testConn mimics a net.Conn to the peer. type testConn struct { net.Conn in *bytes.Buffer out *bytes.Buffer } func (c *testConn) Read(b []byte) (n int, err error) { return c.in.Read(b) } func (c *testConn) Write(b []byte) (n int, err error) { return c.out.Write(b) } func (c *testConn) Close() error { return nil } func newTestALTSRecordConn(in, out *bytes.Buffer, side core.Side, np string) *conn { key := []byte{ // 16 arbitrary bytes. 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xd2, 0x4c, 0xce, 0x4f, 0x49} tc := testConn{ in: in, out: out, } c, err := NewConn(&tc, side, np, key, nil) if err != nil { panic(fmt.Sprintf("Unexpected error creating test ALTS record connection: %v", err)) } return c.(*conn) } func newConnPair(np string) (client, server *conn) { clientBuf := new(bytes.Buffer) serverBuf := new(bytes.Buffer) clientConn := newTestALTSRecordConn(clientBuf, serverBuf, core.ClientSide, np) serverConn := newTestALTSRecordConn(serverBuf, clientBuf, core.ServerSide, np) return clientConn, serverConn } func testPingPong(t *testing.T, np string) { clientConn, serverConn := newConnPair(np) clientMsg := []byte("Client Message") if n, err := clientConn.Write(clientMsg); n != len(clientMsg) || err != nil { t.Fatalf("Client Write() = %v, %v; want %v, ", n, err, len(clientMsg)) } rcvClientMsg := make([]byte, len(clientMsg)) if n, err := serverConn.Read(rcvClientMsg); n != len(rcvClientMsg) || err != nil { t.Fatalf("Server Read() = %v, %v; want %v, ", n, err, len(rcvClientMsg)) } if !reflect.DeepEqual(clientMsg, rcvClientMsg) { t.Fatalf("Client Write()/Server Read() = %v, want %v", rcvClientMsg, clientMsg) } serverMsg := []byte("Server Message") if n, err := serverConn.Write(serverMsg); n != len(serverMsg) || err != nil { t.Fatalf("Server Write() = %v, %v; want %v, ", n, err, len(serverMsg)) } rcvServerMsg := make([]byte, len(serverMsg)) if n, err := clientConn.Read(rcvServerMsg); n != len(rcvServerMsg) || err != nil { t.Fatalf("Client Read() = %v, %v; want %v, ", n, err, len(rcvServerMsg)) } if !reflect.DeepEqual(serverMsg, rcvServerMsg) { t.Fatalf("Server Write()/Client Read() = %v, want %v", rcvServerMsg, serverMsg) } } func (s) TestPingPong(t *testing.T) { for _, np := range nextProtocols { testPingPong(t, np) } } func testSmallReadBuffer(t *testing.T, np string) { clientConn, serverConn := newConnPair(np) msg := []byte("Very Important Message") if n, err := clientConn.Write(msg); err != nil { t.Fatalf("Write() = %v, %v; want %v, ", n, err, len(msg)) } rcvMsg := make([]byte, len(msg)) n := 2 // Arbitrary index to break rcvMsg in two. rcvMsg1 := rcvMsg[:n] rcvMsg2 := rcvMsg[n:] if n, err := serverConn.Read(rcvMsg1); n != len(rcvMsg1) || err != nil { t.Fatalf("Read() = %v, %v; want %v, ", n, err, len(rcvMsg1)) } if n, err := serverConn.Read(rcvMsg2); n != len(rcvMsg2) || err != nil { t.Fatalf("Read() = %v, %v; want %v, ", n, err, len(rcvMsg2)) } if !reflect.DeepEqual(msg, rcvMsg) { t.Fatalf("Write()/Read() = %v, want %v", rcvMsg, msg) } } func (s) TestSmallReadBuffer(t *testing.T) { for _, np := range nextProtocols { testSmallReadBuffer(t, np) } } func testLargeMsg(t *testing.T, np string) { clientConn, serverConn := newConnPair(np) // msgLen is such that the length in the framing is larger than the // default size of one frame. msgLen := altsRecordDefaultLength - msgTypeFieldSize - clientConn.crypto.EncryptionOverhead() + 1 msg := make([]byte, msgLen) if n, err := clientConn.Write(msg); n != len(msg) || err != nil { t.Fatalf("Write() = %v, %v; want %v, ", n, err, len(msg)) } rcvMsg := make([]byte, len(msg)) if n, err := io.ReadFull(serverConn, rcvMsg); n != len(rcvMsg) || err != nil { t.Fatalf("Read() = %v, %v; want %v, ", n, err, len(rcvMsg)) } if !reflect.DeepEqual(msg, rcvMsg) { t.Fatalf("Write()/Server Read() = %v, want %v", rcvMsg, msg) } } func (s) TestLargeMsg(t *testing.T) { for _, np := range nextProtocols { testLargeMsg(t, np) } } func testIncorrectMsgType(t *testing.T, np string) { // framedMsg is an empty ciphertext with correct framing but wrong // message type. framedMsg := make([]byte, MsgLenFieldSize+msgTypeFieldSize) binary.LittleEndian.PutUint32(framedMsg[:MsgLenFieldSize], msgTypeFieldSize) wrongMsgType := uint32(0x22) binary.LittleEndian.PutUint32(framedMsg[MsgLenFieldSize:], wrongMsgType) in := bytes.NewBuffer(framedMsg) c := newTestALTSRecordConn(in, nil, core.ClientSide, np) b := make([]byte, 1) if n, err := c.Read(b); n != 0 || err == nil { t.Fatalf("Read() = , want %v", fmt.Errorf("received frame with incorrect message type %v", wrongMsgType)) } } func (s) TestIncorrectMsgType(t *testing.T) { for _, np := range nextProtocols { testIncorrectMsgType(t, np) } } func testFrameTooLarge(t *testing.T, np string) { buf := new(bytes.Buffer) clientConn := newTestALTSRecordConn(nil, buf, core.ClientSide, np) serverConn := newTestALTSRecordConn(buf, nil, core.ServerSide, np) // payloadLen is such that the length in the framing is larger than // allowed in one frame. payloadLen := altsRecordLengthLimit - msgTypeFieldSize - clientConn.crypto.EncryptionOverhead() + 1 payload := make([]byte, payloadLen) c, err := clientConn.crypto.Encrypt(nil, payload) if err != nil { t.Fatalf(fmt.Sprintf("Error encrypting message: %v", err)) } msgLen := msgTypeFieldSize + len(c) framedMsg := make([]byte, MsgLenFieldSize+msgLen) binary.LittleEndian.PutUint32(framedMsg[:MsgLenFieldSize], uint32(msgTypeFieldSize+len(c))) msg := framedMsg[MsgLenFieldSize:] binary.LittleEndian.PutUint32(msg[:msgTypeFieldSize], altsRecordMsgType) copy(msg[msgTypeFieldSize:], c) if _, err = buf.Write(framedMsg); err != nil { t.Fatal(fmt.Sprintf("Unexpected error writing to buffer: %v", err)) } b := make([]byte, 1) if n, err := serverConn.Read(b); n != 0 || err == nil { t.Fatalf("Read() = , want %v", fmt.Errorf("received the frame length %d larger than the limit %d", altsRecordLengthLimit+1, altsRecordLengthLimit)) } } func (s) TestFrameTooLarge(t *testing.T) { for _, np := range nextProtocols { testFrameTooLarge(t, np) } } func testWriteLargeData(t *testing.T, np string) { // Test sending and receiving messages larger than the maximum write // buffer size. clientConn, serverConn := newConnPair(np) // Message size is intentionally chosen to not be multiple of // payloadLengthLimtit. msgSize := altsWriteBufferMaxSize + (100 * 1024) clientMsg := make([]byte, msgSize) for i := 0; i < msgSize; i++ { clientMsg[i] = 0xAA } if n, err := clientConn.Write(clientMsg); n != len(clientMsg) || err != nil { t.Fatalf("Client Write() = %v, %v; want %v, ", n, err, len(clientMsg)) } // We need to keep reading until the entire message is received. The // reason we set all bytes of the message to a value other than zero is // to avoid ambiguous zero-init value of rcvClientMsg buffer and the // actual received data. rcvClientMsg := make([]byte, 0, msgSize) numberOfExpectedFrames := int(math.Ceil(float64(msgSize) / float64(serverConn.payloadLengthLimit))) for i := 0; i < numberOfExpectedFrames; i++ { expectedRcvSize := serverConn.payloadLengthLimit if i == numberOfExpectedFrames-1 { // Last frame might be smaller. expectedRcvSize = msgSize % serverConn.payloadLengthLimit } tmpBuf := make([]byte, expectedRcvSize) if n, err := serverConn.Read(tmpBuf); n != len(tmpBuf) || err != nil { t.Fatalf("Server Read() = %v, %v; want %v, ", n, err, len(tmpBuf)) } rcvClientMsg = append(rcvClientMsg, tmpBuf...) } if !reflect.DeepEqual(clientMsg, rcvClientMsg) { t.Fatalf("Client Write()/Server Read() = %v, want %v", rcvClientMsg, clientMsg) } } func (s) TestWriteLargeData(t *testing.T) { for _, np := range nextProtocols { testWriteLargeData(t, np) } } grpc-go-1.29.1/credentials/alts/internal/conn/utils.go000066400000000000000000000040011365033716300226440ustar00rootroot00000000000000/* * * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package conn import core "google.golang.org/grpc/credentials/alts/internal" // NewOutCounter returns an outgoing counter initialized to the starting sequence // number for the client/server side of a connection. func NewOutCounter(s core.Side, overflowLen int) (c Counter) { c.overflowLen = overflowLen if s == core.ServerSide { // Server counters in ALTS record have the little-endian high bit // set. c.value[counterLen-1] = 0x80 } return } // NewInCounter returns an incoming counter initialized to the starting sequence // number for the client/server side of a connection. This is used in ALTS record // to check that incoming counters are as expected, since ALTS record guarantees // that messages are unwrapped in the same order that the peer wrapped them. func NewInCounter(s core.Side, overflowLen int) (c Counter) { c.overflowLen = overflowLen if s == core.ClientSide { // Server counters in ALTS record have the little-endian high bit // set. c.value[counterLen-1] = 0x80 } return } // CounterFromValue creates a new counter given an initial value. func CounterFromValue(value []byte, overflowLen int) (c Counter) { c.overflowLen = overflowLen copy(c.value[:], value) return } // CounterSide returns the connection side (client/server) a sequence counter is // associated with. func CounterSide(c []byte) core.Side { if c[counterLen-1]&0x80 == 0x80 { return core.ServerSide } return core.ClientSide } grpc-go-1.29.1/credentials/alts/internal/handshaker/000077500000000000000000000000001365033716300223355ustar00rootroot00000000000000grpc-go-1.29.1/credentials/alts/internal/handshaker/handshaker.go000066400000000000000000000302261365033716300247770ustar00rootroot00000000000000/* * * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Package handshaker provides ALTS handshaking functionality for GCP. package handshaker import ( "context" "errors" "fmt" "io" "net" "sync" grpc "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" core "google.golang.org/grpc/credentials/alts/internal" "google.golang.org/grpc/credentials/alts/internal/authinfo" "google.golang.org/grpc/credentials/alts/internal/conn" altsgrpc "google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp" altspb "google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp" ) const ( // The maximum byte size of receive frames. frameLimit = 64 * 1024 // 64 KB rekeyRecordProtocolName = "ALTSRP_GCM_AES128_REKEY" // maxPendingHandshakes represents the maximum number of concurrent // handshakes. maxPendingHandshakes = 100 ) var ( hsProtocol = altspb.HandshakeProtocol_ALTS appProtocols = []string{"grpc"} recordProtocols = []string{rekeyRecordProtocolName} keyLength = map[string]int{ rekeyRecordProtocolName: 44, } altsRecordFuncs = map[string]conn.ALTSRecordFunc{ // ALTS handshaker protocols. rekeyRecordProtocolName: func(s core.Side, keyData []byte) (conn.ALTSRecordCrypto, error) { return conn.NewAES128GCMRekey(s, keyData) }, } // control number of concurrent created (but not closed) handshakers. mu sync.Mutex concurrentHandshakes = int64(0) // errDropped occurs when maxPendingHandshakes is reached. errDropped = errors.New("maximum number of concurrent ALTS handshakes is reached") // errOutOfBound occurs when the handshake service returns a consumed // bytes value larger than the buffer that was passed to it originally. errOutOfBound = errors.New("handshaker service consumed bytes value is out-of-bound") ) func init() { for protocol, f := range altsRecordFuncs { if err := conn.RegisterProtocol(protocol, f); err != nil { panic(err) } } } func acquire() bool { mu.Lock() // If we need n to be configurable, we can pass it as an argument. n := int64(1) success := maxPendingHandshakes-concurrentHandshakes >= n if success { concurrentHandshakes += n } mu.Unlock() return success } func release() { mu.Lock() // If we need n to be configurable, we can pass it as an argument. n := int64(1) concurrentHandshakes -= n if concurrentHandshakes < 0 { mu.Unlock() panic("bad release") } mu.Unlock() } // ClientHandshakerOptions contains the client handshaker options that can // provided by the caller. type ClientHandshakerOptions struct { // ClientIdentity is the handshaker client local identity. ClientIdentity *altspb.Identity // TargetName is the server service account name for secure name // checking. TargetName string // TargetServiceAccounts contains a list of expected target service // accounts. One of these accounts should match one of the accounts in // the handshaker results. Otherwise, the handshake fails. TargetServiceAccounts []string // RPCVersions specifies the gRPC versions accepted by the client. RPCVersions *altspb.RpcProtocolVersions } // ServerHandshakerOptions contains the server handshaker options that can // provided by the caller. type ServerHandshakerOptions struct { // RPCVersions specifies the gRPC versions accepted by the server. RPCVersions *altspb.RpcProtocolVersions } // DefaultClientHandshakerOptions returns the default client handshaker options. func DefaultClientHandshakerOptions() *ClientHandshakerOptions { return &ClientHandshakerOptions{} } // DefaultServerHandshakerOptions returns the default client handshaker options. func DefaultServerHandshakerOptions() *ServerHandshakerOptions { return &ServerHandshakerOptions{} } // TODO: add support for future local and remote endpoint in both client options // and server options (server options struct does not exist now. When // caller can provide endpoints, it should be created. // altsHandshaker is used to complete a ALTS handshaking between client and // server. This handshaker talks to the ALTS handshaker service in the metadata // server. type altsHandshaker struct { // RPC stream used to access the ALTS Handshaker service. stream altsgrpc.HandshakerService_DoHandshakeClient // the connection to the peer. conn net.Conn // client handshake options. clientOpts *ClientHandshakerOptions // server handshake options. serverOpts *ServerHandshakerOptions // defines the side doing the handshake, client or server. side core.Side } // NewClientHandshaker creates a ALTS handshaker for GCP which contains an RPC // stub created using the passed conn and used to talk to the ALTS Handshaker // service in the metadata server. func NewClientHandshaker(ctx context.Context, conn *grpc.ClientConn, c net.Conn, opts *ClientHandshakerOptions) (core.Handshaker, error) { stream, err := altsgrpc.NewHandshakerServiceClient(conn).DoHandshake(ctx, grpc.WaitForReady(true)) if err != nil { return nil, err } return &altsHandshaker{ stream: stream, conn: c, clientOpts: opts, side: core.ClientSide, }, nil } // NewServerHandshaker creates a ALTS handshaker for GCP which contains an RPC // stub created using the passed conn and used to talk to the ALTS Handshaker // service in the metadata server. func NewServerHandshaker(ctx context.Context, conn *grpc.ClientConn, c net.Conn, opts *ServerHandshakerOptions) (core.Handshaker, error) { stream, err := altsgrpc.NewHandshakerServiceClient(conn).DoHandshake(ctx, grpc.WaitForReady(true)) if err != nil { return nil, err } return &altsHandshaker{ stream: stream, conn: c, serverOpts: opts, side: core.ServerSide, }, nil } // ClientHandshake starts and completes a client ALTS handshaking for GCP. Once // done, ClientHandshake returns a secure connection. func (h *altsHandshaker) ClientHandshake(ctx context.Context) (net.Conn, credentials.AuthInfo, error) { if !acquire() { return nil, nil, errDropped } defer release() if h.side != core.ClientSide { return nil, nil, errors.New("only handshakers created using NewClientHandshaker can perform a client handshaker") } // Create target identities from service account list. targetIdentities := make([]*altspb.Identity, 0, len(h.clientOpts.TargetServiceAccounts)) for _, account := range h.clientOpts.TargetServiceAccounts { targetIdentities = append(targetIdentities, &altspb.Identity{ IdentityOneof: &altspb.Identity_ServiceAccount{ ServiceAccount: account, }, }) } req := &altspb.HandshakerReq{ ReqOneof: &altspb.HandshakerReq_ClientStart{ ClientStart: &altspb.StartClientHandshakeReq{ HandshakeSecurityProtocol: hsProtocol, ApplicationProtocols: appProtocols, RecordProtocols: recordProtocols, TargetIdentities: targetIdentities, LocalIdentity: h.clientOpts.ClientIdentity, TargetName: h.clientOpts.TargetName, RpcVersions: h.clientOpts.RPCVersions, }, }, } conn, result, err := h.doHandshake(req) if err != nil { return nil, nil, err } authInfo := authinfo.New(result) return conn, authInfo, nil } // ServerHandshake starts and completes a server ALTS handshaking for GCP. Once // done, ServerHandshake returns a secure connection. func (h *altsHandshaker) ServerHandshake(ctx context.Context) (net.Conn, credentials.AuthInfo, error) { if !acquire() { return nil, nil, errDropped } defer release() if h.side != core.ServerSide { return nil, nil, errors.New("only handshakers created using NewServerHandshaker can perform a server handshaker") } p := make([]byte, frameLimit) n, err := h.conn.Read(p) if err != nil { return nil, nil, err } // Prepare server parameters. // TODO: currently only ALTS parameters are provided. Might need to use // more options in the future. params := make(map[int32]*altspb.ServerHandshakeParameters) params[int32(altspb.HandshakeProtocol_ALTS)] = &altspb.ServerHandshakeParameters{ RecordProtocols: recordProtocols, } req := &altspb.HandshakerReq{ ReqOneof: &altspb.HandshakerReq_ServerStart{ ServerStart: &altspb.StartServerHandshakeReq{ ApplicationProtocols: appProtocols, HandshakeParameters: params, InBytes: p[:n], RpcVersions: h.serverOpts.RPCVersions, }, }, } conn, result, err := h.doHandshake(req) if err != nil { return nil, nil, err } authInfo := authinfo.New(result) return conn, authInfo, nil } func (h *altsHandshaker) doHandshake(req *altspb.HandshakerReq) (net.Conn, *altspb.HandshakerResult, error) { resp, err := h.accessHandshakerService(req) if err != nil { return nil, nil, err } // Check of the returned status is an error. if resp.GetStatus() != nil { if got, want := resp.GetStatus().Code, uint32(codes.OK); got != want { return nil, nil, fmt.Errorf("%v", resp.GetStatus().Details) } } var extra []byte if req.GetServerStart() != nil { if resp.GetBytesConsumed() > uint32(len(req.GetServerStart().GetInBytes())) { return nil, nil, errOutOfBound } extra = req.GetServerStart().GetInBytes()[resp.GetBytesConsumed():] } result, extra, err := h.processUntilDone(resp, extra) if err != nil { return nil, nil, err } // The handshaker returns a 128 bytes key. It should be truncated based // on the returned record protocol. keyLen, ok := keyLength[result.RecordProtocol] if !ok { return nil, nil, fmt.Errorf("unknown resulted record protocol %v", result.RecordProtocol) } sc, err := conn.NewConn(h.conn, h.side, result.GetRecordProtocol(), result.KeyData[:keyLen], extra) if err != nil { return nil, nil, err } return sc, result, nil } func (h *altsHandshaker) accessHandshakerService(req *altspb.HandshakerReq) (*altspb.HandshakerResp, error) { if err := h.stream.Send(req); err != nil { return nil, err } resp, err := h.stream.Recv() if err != nil { return nil, err } return resp, nil } // processUntilDone processes the handshake until the handshaker service returns // the results. Handshaker service takes care of frame parsing, so we read // whatever received from the network and send it to the handshaker service. func (h *altsHandshaker) processUntilDone(resp *altspb.HandshakerResp, extra []byte) (*altspb.HandshakerResult, []byte, error) { for { if len(resp.OutFrames) > 0 { if _, err := h.conn.Write(resp.OutFrames); err != nil { return nil, nil, err } } if resp.Result != nil { return resp.Result, extra, nil } buf := make([]byte, frameLimit) n, err := h.conn.Read(buf) if err != nil && err != io.EOF { return nil, nil, err } // If there is nothing to send to the handshaker service, and // nothing is received from the peer, then we are stuck. // This covers the case when the peer is not responding. Note // that handshaker service connection issues are caught in // accessHandshakerService before we even get here. if len(resp.OutFrames) == 0 && n == 0 { return nil, nil, core.PeerNotRespondingError } // Append extra bytes from the previous interaction with the // handshaker service with the current buffer read from conn. p := append(extra, buf[:n]...) // From here on, p and extra point to the same slice. resp, err = h.accessHandshakerService(&altspb.HandshakerReq{ ReqOneof: &altspb.HandshakerReq_Next{ Next: &altspb.NextHandshakeMessageReq{ InBytes: p, }, }, }) if err != nil { return nil, nil, err } // Set extra based on handshaker service response. if resp.GetBytesConsumed() > uint32(len(p)) { return nil, nil, errOutOfBound } extra = p[resp.GetBytesConsumed():] } } // Close terminates the Handshaker. It should be called when the caller obtains // the secure connection. func (h *altsHandshaker) Close() { h.stream.CloseSend() } grpc-go-1.29.1/credentials/alts/internal/handshaker/handshaker_test.go000066400000000000000000000164421365033716300260420ustar00rootroot00000000000000/* * * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package handshaker import ( "bytes" "context" "testing" "time" grpc "google.golang.org/grpc" core "google.golang.org/grpc/credentials/alts/internal" altspb "google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp" "google.golang.org/grpc/credentials/alts/internal/testutil" "google.golang.org/grpc/internal/grpctest" ) type s struct { grpctest.Tester } func Test(t *testing.T) { grpctest.RunSubTests(t, s{}) } var ( testRecordProtocol = rekeyRecordProtocolName testKey = []byte{ // 44 arbitrary bytes. 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xd2, 0x4c, 0xce, 0x4f, 0x49, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xd2, 0x4c, 0xce, 0x4f, 0x49, 0x1f, 0x8b, 0xd2, 0x4c, 0xce, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, } testServiceAccount = "test_service_account" testTargetServiceAccounts = []string{testServiceAccount} testClientIdentity = &altspb.Identity{ IdentityOneof: &altspb.Identity_Hostname{ Hostname: "i_am_a_client", }, } ) // testRPCStream mimics a altspb.HandshakerService_DoHandshakeClient object. type testRPCStream struct { grpc.ClientStream t *testing.T isClient bool // The resp expected to be returned by Recv(). Make sure this is set to // the content the test requires before Recv() is invoked. recvBuf *altspb.HandshakerResp // false if it is the first access to Handshaker service on Envelope. first bool // useful for testing concurrent calls. delay time.Duration } func (t *testRPCStream) Recv() (*altspb.HandshakerResp, error) { resp := t.recvBuf t.recvBuf = nil return resp, nil } func (t *testRPCStream) Send(req *altspb.HandshakerReq) error { var resp *altspb.HandshakerResp if !t.first { // Generate the bytes to be returned by Recv() for the initial // handshaking. t.first = true if t.isClient { resp = &altspb.HandshakerResp{ OutFrames: testutil.MakeFrame("ClientInit"), // Simulate consuming ServerInit. BytesConsumed: 14, } } else { resp = &altspb.HandshakerResp{ OutFrames: testutil.MakeFrame("ServerInit"), // Simulate consuming ClientInit. BytesConsumed: 14, } } } else { // Add delay to test concurrent calls. cleanup := stat.Update() defer cleanup() time.Sleep(t.delay) // Generate the response to be returned by Recv() for the // follow-up handshaking. result := &altspb.HandshakerResult{ RecordProtocol: testRecordProtocol, KeyData: testKey, } resp = &altspb.HandshakerResp{ Result: result, // Simulate consuming ClientFinished or ServerFinished. BytesConsumed: 18, } } t.recvBuf = resp return nil } func (t *testRPCStream) CloseSend() error { return nil } var stat testutil.Stats func (s) TestClientHandshake(t *testing.T) { for _, testCase := range []struct { delay time.Duration numberOfHandshakes int }{ {0 * time.Millisecond, 1}, {100 * time.Millisecond, 10 * maxPendingHandshakes}, } { errc := make(chan error) stat.Reset() for i := 0; i < testCase.numberOfHandshakes; i++ { stream := &testRPCStream{ t: t, isClient: true, } // Preload the inbound frames. f1 := testutil.MakeFrame("ServerInit") f2 := testutil.MakeFrame("ServerFinished") in := bytes.NewBuffer(f1) in.Write(f2) out := new(bytes.Buffer) tc := testutil.NewTestConn(in, out) chs := &altsHandshaker{ stream: stream, conn: tc, clientOpts: &ClientHandshakerOptions{ TargetServiceAccounts: testTargetServiceAccounts, ClientIdentity: testClientIdentity, }, side: core.ClientSide, } go func() { _, context, err := chs.ClientHandshake(context.Background()) if err == nil && context == nil { panic("expected non-nil ALTS context") } errc <- err chs.Close() }() } // Ensure all errors are expected. for i := 0; i < testCase.numberOfHandshakes; i++ { if err := <-errc; err != nil && err != errDropped { t.Errorf("ClientHandshake() = _, %v, want _, or %v", err, errDropped) } } // Ensure that there are no concurrent calls more than the limit. if stat.MaxConcurrentCalls > maxPendingHandshakes { t.Errorf("Observed %d concurrent handshakes; want <= %d", stat.MaxConcurrentCalls, maxPendingHandshakes) } } } func (s) TestServerHandshake(t *testing.T) { for _, testCase := range []struct { delay time.Duration numberOfHandshakes int }{ {0 * time.Millisecond, 1}, {100 * time.Millisecond, 10 * maxPendingHandshakes}, } { errc := make(chan error) stat.Reset() for i := 0; i < testCase.numberOfHandshakes; i++ { stream := &testRPCStream{ t: t, isClient: false, } // Preload the inbound frames. f1 := testutil.MakeFrame("ClientInit") f2 := testutil.MakeFrame("ClientFinished") in := bytes.NewBuffer(f1) in.Write(f2) out := new(bytes.Buffer) tc := testutil.NewTestConn(in, out) shs := &altsHandshaker{ stream: stream, conn: tc, serverOpts: DefaultServerHandshakerOptions(), side: core.ServerSide, } go func() { _, context, err := shs.ServerHandshake(context.Background()) if err == nil && context == nil { panic("expected non-nil ALTS context") } errc <- err shs.Close() }() } // Ensure all errors are expected. for i := 0; i < testCase.numberOfHandshakes; i++ { if err := <-errc; err != nil && err != errDropped { t.Errorf("ServerHandshake() = _, %v, want _, or %v", err, errDropped) } } // Ensure that there are no concurrent calls more than the limit. if stat.MaxConcurrentCalls > maxPendingHandshakes { t.Errorf("Observed %d concurrent handshakes; want <= %d", stat.MaxConcurrentCalls, maxPendingHandshakes) } } } // testUnresponsiveRPCStream is used for testing the PeerNotResponding case. type testUnresponsiveRPCStream struct { grpc.ClientStream } func (t *testUnresponsiveRPCStream) Recv() (*altspb.HandshakerResp, error) { return &altspb.HandshakerResp{}, nil } func (t *testUnresponsiveRPCStream) Send(req *altspb.HandshakerReq) error { return nil } func (t *testUnresponsiveRPCStream) CloseSend() error { return nil } func (s) TestPeerNotResponding(t *testing.T) { stream := &testUnresponsiveRPCStream{} chs := &altsHandshaker{ stream: stream, conn: testutil.NewUnresponsiveTestConn(), clientOpts: &ClientHandshakerOptions{ TargetServiceAccounts: testTargetServiceAccounts, ClientIdentity: testClientIdentity, }, side: core.ClientSide, } _, context, err := chs.ClientHandshake(context.Background()) chs.Close() if context != nil { t.Error("expected non-nil ALTS context") } if got, want := err, core.PeerNotRespondingError; got != want { t.Errorf("ClientHandshake() = %v, want %v", got, want) } } grpc-go-1.29.1/credentials/alts/internal/handshaker/service/000077500000000000000000000000001365033716300237755ustar00rootroot00000000000000grpc-go-1.29.1/credentials/alts/internal/handshaker/service/service.go000066400000000000000000000027471365033716300257760ustar00rootroot00000000000000/* * * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Package service manages connections between the VM application and the ALTS // handshaker service. package service import ( "sync" grpc "google.golang.org/grpc" ) var ( // hsConn represents a connection to hypervisor handshaker service. hsConn *grpc.ClientConn mu sync.Mutex // hsDialer will be reassigned in tests. hsDialer = grpc.Dial ) // Dial dials the handshake service in the hypervisor. If a connection has // already been established, this function returns it. Otherwise, a new // connection is created. func Dial(hsAddress string) (*grpc.ClientConn, error) { mu.Lock() defer mu.Unlock() if hsConn == nil { // Create a new connection to the handshaker service. Note that // this connection stays open until the application is closed. var err error hsConn, err = hsDialer(hsAddress, grpc.WithInsecure()) if err != nil { return nil, err } } return hsConn, nil } grpc-go-1.29.1/credentials/alts/internal/handshaker/service/service_test.go000066400000000000000000000033371365033716300270310ustar00rootroot00000000000000/* * * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package service import ( "testing" grpc "google.golang.org/grpc" ) const ( // The address is irrelevant in this test. testAddress = "some_address" ) func TestDial(t *testing.T) { defer func() func() { temp := hsDialer hsDialer = func(target string, opts ...grpc.DialOption) (*grpc.ClientConn, error) { return &grpc.ClientConn{}, nil } return func() { hsDialer = temp } }() // Ensure that hsConn is nil at first. hsConn = nil // First call to Dial, it should create set hsConn. conn1, err := Dial(testAddress) if err != nil { t.Fatalf("first call to Dial failed: %v", err) } if conn1 == nil { t.Fatal("first call to Dial(_)=(nil, _), want not nil") } if got, want := hsConn, conn1; got != want { t.Fatalf("hsConn=%v, want %v", got, want) } // Second call to Dial should return conn1 above. conn2, err := Dial(testAddress) if err != nil { t.Fatalf("second call to Dial(_) failed: %v", err) } if got, want := conn2, conn1; got != want { t.Fatalf("second call to Dial(_)=(%v, _), want (%v,. _)", got, want) } if got, want := hsConn, conn1; got != want { t.Fatalf("hsConn=%v, want %v", got, want) } } grpc-go-1.29.1/credentials/alts/internal/proto/000077500000000000000000000000001365033716300213705ustar00rootroot00000000000000grpc-go-1.29.1/credentials/alts/internal/proto/grpc_gcp/000077500000000000000000000000001365033716300231545ustar00rootroot00000000000000grpc-go-1.29.1/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go000066400000000000000000000153011365033716300264530ustar00rootroot00000000000000// Code generated by protoc-gen-go. DO NOT EDIT. // source: grpc/gcp/altscontext.proto package grpc_gcp import ( fmt "fmt" proto "github.com/golang/protobuf/proto" math "math" ) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package type AltsContext struct { // The application protocol negotiated for this connection. ApplicationProtocol string `protobuf:"bytes,1,opt,name=application_protocol,json=applicationProtocol,proto3" json:"application_protocol,omitempty"` // The record protocol negotiated for this connection. RecordProtocol string `protobuf:"bytes,2,opt,name=record_protocol,json=recordProtocol,proto3" json:"record_protocol,omitempty"` // The security level of the created secure channel. SecurityLevel SecurityLevel `protobuf:"varint,3,opt,name=security_level,json=securityLevel,proto3,enum=grpc.gcp.SecurityLevel" json:"security_level,omitempty"` // The peer service account. PeerServiceAccount string `protobuf:"bytes,4,opt,name=peer_service_account,json=peerServiceAccount,proto3" json:"peer_service_account,omitempty"` // The local service account. LocalServiceAccount string `protobuf:"bytes,5,opt,name=local_service_account,json=localServiceAccount,proto3" json:"local_service_account,omitempty"` // The RPC protocol versions supported by the peer. PeerRpcVersions *RpcProtocolVersions `protobuf:"bytes,6,opt,name=peer_rpc_versions,json=peerRpcVersions,proto3" json:"peer_rpc_versions,omitempty"` // Additional attributes of the peer. PeerAttributes map[string]string `protobuf:"bytes,7,rep,name=peer_attributes,json=peerAttributes,proto3" json:"peer_attributes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *AltsContext) Reset() { *m = AltsContext{} } func (m *AltsContext) String() string { return proto.CompactTextString(m) } func (*AltsContext) ProtoMessage() {} func (*AltsContext) Descriptor() ([]byte, []int) { return fileDescriptor_6647a41e53a575a3, []int{0} } func (m *AltsContext) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_AltsContext.Unmarshal(m, b) } func (m *AltsContext) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_AltsContext.Marshal(b, m, deterministic) } func (m *AltsContext) XXX_Merge(src proto.Message) { xxx_messageInfo_AltsContext.Merge(m, src) } func (m *AltsContext) XXX_Size() int { return xxx_messageInfo_AltsContext.Size(m) } func (m *AltsContext) XXX_DiscardUnknown() { xxx_messageInfo_AltsContext.DiscardUnknown(m) } var xxx_messageInfo_AltsContext proto.InternalMessageInfo func (m *AltsContext) GetApplicationProtocol() string { if m != nil { return m.ApplicationProtocol } return "" } func (m *AltsContext) GetRecordProtocol() string { if m != nil { return m.RecordProtocol } return "" } func (m *AltsContext) GetSecurityLevel() SecurityLevel { if m != nil { return m.SecurityLevel } return SecurityLevel_SECURITY_NONE } func (m *AltsContext) GetPeerServiceAccount() string { if m != nil { return m.PeerServiceAccount } return "" } func (m *AltsContext) GetLocalServiceAccount() string { if m != nil { return m.LocalServiceAccount } return "" } func (m *AltsContext) GetPeerRpcVersions() *RpcProtocolVersions { if m != nil { return m.PeerRpcVersions } return nil } func (m *AltsContext) GetPeerAttributes() map[string]string { if m != nil { return m.PeerAttributes } return nil } func init() { proto.RegisterType((*AltsContext)(nil), "grpc.gcp.AltsContext") proto.RegisterMapType((map[string]string)(nil), "grpc.gcp.AltsContext.PeerAttributesEntry") } func init() { proto.RegisterFile("grpc/gcp/altscontext.proto", fileDescriptor_6647a41e53a575a3) } var fileDescriptor_6647a41e53a575a3 = []byte{ // 411 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x92, 0x4d, 0x6f, 0x13, 0x31, 0x10, 0x86, 0xb5, 0x0d, 0x2d, 0xe0, 0x88, 0xb4, 0xb8, 0xa9, 0x58, 0x45, 0x42, 0x8a, 0xb8, 0xb0, 0x5c, 0x76, 0x21, 0x5c, 0x10, 0x07, 0x50, 0x8a, 0x38, 0x20, 0x71, 0x88, 0xb6, 0x12, 0x07, 0x2e, 0x2b, 0x77, 0x3a, 0xb2, 0x2c, 0x5c, 0x8f, 0x35, 0x76, 0x22, 0xf2, 0xb3, 0xf9, 0x07, 0x68, 0xed, 0xcd, 0x07, 0x1f, 0xb7, 0x9d, 0x79, 0x9f, 0x19, 0xbf, 0xb3, 0x33, 0x62, 0xa6, 0xd9, 0x43, 0xa3, 0xc1, 0x37, 0xca, 0xc6, 0x00, 0xe4, 0x22, 0xfe, 0x8c, 0xb5, 0x67, 0x8a, 0x24, 0x1f, 0xf5, 0x5a, 0xad, 0xc1, 0xcf, 0xaa, 0x3d, 0x15, 0x59, 0xb9, 0xe0, 0x89, 0x63, 0x17, 0x10, 0xd6, 0x6c, 0xe2, 0xb6, 0x03, 0xba, 0xbf, 0x27, 0x97, 0x6b, 0x5e, 0xfc, 0x1a, 0x89, 0xf1, 0xd2, 0xc6, 0xf0, 0x29, 0x77, 0x92, 0x6f, 0xc4, 0x54, 0x79, 0x6f, 0x0d, 0xa8, 0x68, 0xc8, 0x75, 0x09, 0x02, 0xb2, 0x65, 0x31, 0x2f, 0xaa, 0xc7, 0xed, 0xe5, 0x91, 0xb6, 0x1a, 0x24, 0xf9, 0x52, 0x9c, 0x33, 0x02, 0xf1, 0xdd, 0x81, 0x3e, 0x49, 0xf4, 0x24, 0xa7, 0xf7, 0xe0, 0x07, 0x31, 0xd9, 0x9b, 0xb0, 0xb8, 0x41, 0x5b, 0x8e, 0xe6, 0x45, 0x35, 0x59, 0x3c, 0xab, 0x77, 0xc6, 0xeb, 0x9b, 0x41, 0xff, 0xda, 0xcb, 0xed, 0x93, 0x70, 0x1c, 0xca, 0xd7, 0x62, 0xea, 0x11, 0xb9, 0x0b, 0xc8, 0x1b, 0x03, 0xd8, 0x29, 0x00, 0x5a, 0xbb, 0x58, 0x3e, 0x48, 0xaf, 0xc9, 0x5e, 0xbb, 0xc9, 0xd2, 0x32, 0x2b, 0x72, 0x21, 0xae, 0x2c, 0x81, 0xb2, 0xff, 0x94, 0x9c, 0xe6, 0x71, 0x92, 0xf8, 0x57, 0xcd, 0x17, 0xf1, 0x34, 0xbd, 0xc2, 0x1e, 0xba, 0x0d, 0x72, 0x30, 0xe4, 0x42, 0x79, 0x36, 0x2f, 0xaa, 0xf1, 0xe2, 0xf9, 0xc1, 0x68, 0xeb, 0x61, 0x37, 0xd7, 0xb7, 0x01, 0x6a, 0xcf, 0xfb, 0xba, 0xd6, 0xc3, 0x2e, 0x21, 0x5b, 0x91, 0x52, 0x9d, 0x8a, 0x91, 0xcd, 0xed, 0x3a, 0x62, 0x28, 0x1f, 0xce, 0x47, 0xd5, 0x78, 0xf1, 0xea, 0xd0, 0xe8, 0xe8, 0xe7, 0xd7, 0x2b, 0x44, 0x5e, 0xee, 0xd9, 0xcf, 0x2e, 0xf2, 0xb6, 0x9d, 0xf8, 0x3f, 0x92, 0xb3, 0xa5, 0xb8, 0xfc, 0x0f, 0x26, 0x2f, 0xc4, 0xe8, 0x07, 0x6e, 0x87, 0x35, 0xf5, 0x9f, 0x72, 0x2a, 0x4e, 0x37, 0xca, 0xae, 0x71, 0x58, 0x46, 0x0e, 0xde, 0x9f, 0xbc, 0x2b, 0xae, 0xad, 0xb8, 0x32, 0x94, 0x1d, 0xf4, 0x47, 0x54, 0x1b, 0x17, 0x91, 0x9d, 0xb2, 0xd7, 0x17, 0x47, 0x66, 0xd2, 0x74, 0xab, 0xe2, 0xfb, 0x47, 0x4d, 0xa4, 0x2d, 0xd6, 0x9a, 0xac, 0x72, 0xba, 0x26, 0xd6, 0x4d, 0x3a, 0x2e, 0x60, 0xbc, 0x43, 0x17, 0x8d, 0xb2, 0x21, 0x9d, 0x62, 0xb3, 0xeb, 0xd2, 0xa4, 0x2b, 0x48, 0x50, 0xa7, 0xc1, 0xdf, 0x9e, 0xa5, 0xf8, 0xed, 0xef, 0x00, 0x00, 0x00, 0xff, 0xff, 0x9b, 0x8c, 0xe4, 0x6a, 0xba, 0x02, 0x00, 0x00, } grpc-go-1.29.1/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go000066400000000000000000001251661365033716300262260ustar00rootroot00000000000000// Code generated by protoc-gen-go. DO NOT EDIT. // source: grpc/gcp/handshaker.proto package grpc_gcp import ( context "context" fmt "fmt" proto "github.com/golang/protobuf/proto" grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" status "google.golang.org/grpc/status" math "math" ) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package type HandshakeProtocol int32 const ( // Default value. HandshakeProtocol_HANDSHAKE_PROTOCOL_UNSPECIFIED HandshakeProtocol = 0 // TLS handshake protocol. HandshakeProtocol_TLS HandshakeProtocol = 1 // Application Layer Transport Security handshake protocol. HandshakeProtocol_ALTS HandshakeProtocol = 2 ) var HandshakeProtocol_name = map[int32]string{ 0: "HANDSHAKE_PROTOCOL_UNSPECIFIED", 1: "TLS", 2: "ALTS", } var HandshakeProtocol_value = map[string]int32{ "HANDSHAKE_PROTOCOL_UNSPECIFIED": 0, "TLS": 1, "ALTS": 2, } func (x HandshakeProtocol) String() string { return proto.EnumName(HandshakeProtocol_name, int32(x)) } func (HandshakeProtocol) EnumDescriptor() ([]byte, []int) { return fileDescriptor_54c074f40c7c7e99, []int{0} } type NetworkProtocol int32 const ( NetworkProtocol_NETWORK_PROTOCOL_UNSPECIFIED NetworkProtocol = 0 NetworkProtocol_TCP NetworkProtocol = 1 NetworkProtocol_UDP NetworkProtocol = 2 ) var NetworkProtocol_name = map[int32]string{ 0: "NETWORK_PROTOCOL_UNSPECIFIED", 1: "TCP", 2: "UDP", } var NetworkProtocol_value = map[string]int32{ "NETWORK_PROTOCOL_UNSPECIFIED": 0, "TCP": 1, "UDP": 2, } func (x NetworkProtocol) String() string { return proto.EnumName(NetworkProtocol_name, int32(x)) } func (NetworkProtocol) EnumDescriptor() ([]byte, []int) { return fileDescriptor_54c074f40c7c7e99, []int{1} } type Endpoint struct { // IP address. It should contain an IPv4 or IPv6 string literal, e.g. // "192.168.0.1" or "2001:db8::1". IpAddress string `protobuf:"bytes,1,opt,name=ip_address,json=ipAddress,proto3" json:"ip_address,omitempty"` // Port number. Port int32 `protobuf:"varint,2,opt,name=port,proto3" json:"port,omitempty"` // Network protocol (e.g., TCP, UDP) associated with this endpoint. Protocol NetworkProtocol `protobuf:"varint,3,opt,name=protocol,proto3,enum=grpc.gcp.NetworkProtocol" json:"protocol,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *Endpoint) Reset() { *m = Endpoint{} } func (m *Endpoint) String() string { return proto.CompactTextString(m) } func (*Endpoint) ProtoMessage() {} func (*Endpoint) Descriptor() ([]byte, []int) { return fileDescriptor_54c074f40c7c7e99, []int{0} } func (m *Endpoint) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Endpoint.Unmarshal(m, b) } func (m *Endpoint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Endpoint.Marshal(b, m, deterministic) } func (m *Endpoint) XXX_Merge(src proto.Message) { xxx_messageInfo_Endpoint.Merge(m, src) } func (m *Endpoint) XXX_Size() int { return xxx_messageInfo_Endpoint.Size(m) } func (m *Endpoint) XXX_DiscardUnknown() { xxx_messageInfo_Endpoint.DiscardUnknown(m) } var xxx_messageInfo_Endpoint proto.InternalMessageInfo func (m *Endpoint) GetIpAddress() string { if m != nil { return m.IpAddress } return "" } func (m *Endpoint) GetPort() int32 { if m != nil { return m.Port } return 0 } func (m *Endpoint) GetProtocol() NetworkProtocol { if m != nil { return m.Protocol } return NetworkProtocol_NETWORK_PROTOCOL_UNSPECIFIED } type Identity struct { // Types that are valid to be assigned to IdentityOneof: // *Identity_ServiceAccount // *Identity_Hostname IdentityOneof isIdentity_IdentityOneof `protobuf_oneof:"identity_oneof"` // Additional attributes of the identity. Attributes map[string]string `protobuf:"bytes,3,rep,name=attributes,proto3" json:"attributes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *Identity) Reset() { *m = Identity{} } func (m *Identity) String() string { return proto.CompactTextString(m) } func (*Identity) ProtoMessage() {} func (*Identity) Descriptor() ([]byte, []int) { return fileDescriptor_54c074f40c7c7e99, []int{1} } func (m *Identity) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Identity.Unmarshal(m, b) } func (m *Identity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Identity.Marshal(b, m, deterministic) } func (m *Identity) XXX_Merge(src proto.Message) { xxx_messageInfo_Identity.Merge(m, src) } func (m *Identity) XXX_Size() int { return xxx_messageInfo_Identity.Size(m) } func (m *Identity) XXX_DiscardUnknown() { xxx_messageInfo_Identity.DiscardUnknown(m) } var xxx_messageInfo_Identity proto.InternalMessageInfo type isIdentity_IdentityOneof interface { isIdentity_IdentityOneof() } type Identity_ServiceAccount struct { ServiceAccount string `protobuf:"bytes,1,opt,name=service_account,json=serviceAccount,proto3,oneof"` } type Identity_Hostname struct { Hostname string `protobuf:"bytes,2,opt,name=hostname,proto3,oneof"` } func (*Identity_ServiceAccount) isIdentity_IdentityOneof() {} func (*Identity_Hostname) isIdentity_IdentityOneof() {} func (m *Identity) GetIdentityOneof() isIdentity_IdentityOneof { if m != nil { return m.IdentityOneof } return nil } func (m *Identity) GetServiceAccount() string { if x, ok := m.GetIdentityOneof().(*Identity_ServiceAccount); ok { return x.ServiceAccount } return "" } func (m *Identity) GetHostname() string { if x, ok := m.GetIdentityOneof().(*Identity_Hostname); ok { return x.Hostname } return "" } func (m *Identity) GetAttributes() map[string]string { if m != nil { return m.Attributes } return nil } // XXX_OneofWrappers is for the internal use of the proto package. func (*Identity) XXX_OneofWrappers() []interface{} { return []interface{}{ (*Identity_ServiceAccount)(nil), (*Identity_Hostname)(nil), } } type StartClientHandshakeReq struct { // Handshake security protocol requested by the client. HandshakeSecurityProtocol HandshakeProtocol `protobuf:"varint,1,opt,name=handshake_security_protocol,json=handshakeSecurityProtocol,proto3,enum=grpc.gcp.HandshakeProtocol" json:"handshake_security_protocol,omitempty"` // The application protocols supported by the client, e.g., "h2" (for http2), // "grpc". ApplicationProtocols []string `protobuf:"bytes,2,rep,name=application_protocols,json=applicationProtocols,proto3" json:"application_protocols,omitempty"` // The record protocols supported by the client, e.g., // "ALTSRP_GCM_AES128". RecordProtocols []string `protobuf:"bytes,3,rep,name=record_protocols,json=recordProtocols,proto3" json:"record_protocols,omitempty"` // (Optional) Describes which server identities are acceptable by the client. // If target identities are provided and none of them matches the peer // identity of the server, handshake will fail. TargetIdentities []*Identity `protobuf:"bytes,4,rep,name=target_identities,json=targetIdentities,proto3" json:"target_identities,omitempty"` // (Optional) Application may specify a local identity. Otherwise, the // handshaker chooses a default local identity. LocalIdentity *Identity `protobuf:"bytes,5,opt,name=local_identity,json=localIdentity,proto3" json:"local_identity,omitempty"` // (Optional) Local endpoint information of the connection to the server, // such as local IP address, port number, and network protocol. LocalEndpoint *Endpoint `protobuf:"bytes,6,opt,name=local_endpoint,json=localEndpoint,proto3" json:"local_endpoint,omitempty"` // (Optional) Endpoint information of the remote server, such as IP address, // port number, and network protocol. RemoteEndpoint *Endpoint `protobuf:"bytes,7,opt,name=remote_endpoint,json=remoteEndpoint,proto3" json:"remote_endpoint,omitempty"` // (Optional) If target name is provided, a secure naming check is performed // to verify that the peer authenticated identity is indeed authorized to run // the target name. TargetName string `protobuf:"bytes,8,opt,name=target_name,json=targetName,proto3" json:"target_name,omitempty"` // (Optional) RPC protocol versions supported by the client. RpcVersions *RpcProtocolVersions `protobuf:"bytes,9,opt,name=rpc_versions,json=rpcVersions,proto3" json:"rpc_versions,omitempty"` // (Optional) Maximum frame size supported by the client. MaxFrameSize uint32 `protobuf:"varint,10,opt,name=max_frame_size,json=maxFrameSize,proto3" json:"max_frame_size,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *StartClientHandshakeReq) Reset() { *m = StartClientHandshakeReq{} } func (m *StartClientHandshakeReq) String() string { return proto.CompactTextString(m) } func (*StartClientHandshakeReq) ProtoMessage() {} func (*StartClientHandshakeReq) Descriptor() ([]byte, []int) { return fileDescriptor_54c074f40c7c7e99, []int{2} } func (m *StartClientHandshakeReq) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_StartClientHandshakeReq.Unmarshal(m, b) } func (m *StartClientHandshakeReq) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_StartClientHandshakeReq.Marshal(b, m, deterministic) } func (m *StartClientHandshakeReq) XXX_Merge(src proto.Message) { xxx_messageInfo_StartClientHandshakeReq.Merge(m, src) } func (m *StartClientHandshakeReq) XXX_Size() int { return xxx_messageInfo_StartClientHandshakeReq.Size(m) } func (m *StartClientHandshakeReq) XXX_DiscardUnknown() { xxx_messageInfo_StartClientHandshakeReq.DiscardUnknown(m) } var xxx_messageInfo_StartClientHandshakeReq proto.InternalMessageInfo func (m *StartClientHandshakeReq) GetHandshakeSecurityProtocol() HandshakeProtocol { if m != nil { return m.HandshakeSecurityProtocol } return HandshakeProtocol_HANDSHAKE_PROTOCOL_UNSPECIFIED } func (m *StartClientHandshakeReq) GetApplicationProtocols() []string { if m != nil { return m.ApplicationProtocols } return nil } func (m *StartClientHandshakeReq) GetRecordProtocols() []string { if m != nil { return m.RecordProtocols } return nil } func (m *StartClientHandshakeReq) GetTargetIdentities() []*Identity { if m != nil { return m.TargetIdentities } return nil } func (m *StartClientHandshakeReq) GetLocalIdentity() *Identity { if m != nil { return m.LocalIdentity } return nil } func (m *StartClientHandshakeReq) GetLocalEndpoint() *Endpoint { if m != nil { return m.LocalEndpoint } return nil } func (m *StartClientHandshakeReq) GetRemoteEndpoint() *Endpoint { if m != nil { return m.RemoteEndpoint } return nil } func (m *StartClientHandshakeReq) GetTargetName() string { if m != nil { return m.TargetName } return "" } func (m *StartClientHandshakeReq) GetRpcVersions() *RpcProtocolVersions { if m != nil { return m.RpcVersions } return nil } func (m *StartClientHandshakeReq) GetMaxFrameSize() uint32 { if m != nil { return m.MaxFrameSize } return 0 } type ServerHandshakeParameters struct { // The record protocols supported by the server, e.g., // "ALTSRP_GCM_AES128". RecordProtocols []string `protobuf:"bytes,1,rep,name=record_protocols,json=recordProtocols,proto3" json:"record_protocols,omitempty"` // (Optional) A list of local identities supported by the server, if // specified. Otherwise, the handshaker chooses a default local identity. LocalIdentities []*Identity `protobuf:"bytes,2,rep,name=local_identities,json=localIdentities,proto3" json:"local_identities,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *ServerHandshakeParameters) Reset() { *m = ServerHandshakeParameters{} } func (m *ServerHandshakeParameters) String() string { return proto.CompactTextString(m) } func (*ServerHandshakeParameters) ProtoMessage() {} func (*ServerHandshakeParameters) Descriptor() ([]byte, []int) { return fileDescriptor_54c074f40c7c7e99, []int{3} } func (m *ServerHandshakeParameters) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ServerHandshakeParameters.Unmarshal(m, b) } func (m *ServerHandshakeParameters) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ServerHandshakeParameters.Marshal(b, m, deterministic) } func (m *ServerHandshakeParameters) XXX_Merge(src proto.Message) { xxx_messageInfo_ServerHandshakeParameters.Merge(m, src) } func (m *ServerHandshakeParameters) XXX_Size() int { return xxx_messageInfo_ServerHandshakeParameters.Size(m) } func (m *ServerHandshakeParameters) XXX_DiscardUnknown() { xxx_messageInfo_ServerHandshakeParameters.DiscardUnknown(m) } var xxx_messageInfo_ServerHandshakeParameters proto.InternalMessageInfo func (m *ServerHandshakeParameters) GetRecordProtocols() []string { if m != nil { return m.RecordProtocols } return nil } func (m *ServerHandshakeParameters) GetLocalIdentities() []*Identity { if m != nil { return m.LocalIdentities } return nil } type StartServerHandshakeReq struct { // The application protocols supported by the server, e.g., "h2" (for http2), // "grpc". ApplicationProtocols []string `protobuf:"bytes,1,rep,name=application_protocols,json=applicationProtocols,proto3" json:"application_protocols,omitempty"` // Handshake parameters (record protocols and local identities supported by // the server) mapped by the handshake protocol. Each handshake security // protocol (e.g., TLS or ALTS) has its own set of record protocols and local // identities. Since protobuf does not support enum as key to the map, the key // to handshake_parameters is the integer value of HandshakeProtocol enum. HandshakeParameters map[int32]*ServerHandshakeParameters `protobuf:"bytes,2,rep,name=handshake_parameters,json=handshakeParameters,proto3" json:"handshake_parameters,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` // Bytes in out_frames returned from the peer's HandshakerResp. It is possible // that the peer's out_frames are split into multiple HandshakReq messages. InBytes []byte `protobuf:"bytes,3,opt,name=in_bytes,json=inBytes,proto3" json:"in_bytes,omitempty"` // (Optional) Local endpoint information of the connection to the client, // such as local IP address, port number, and network protocol. LocalEndpoint *Endpoint `protobuf:"bytes,4,opt,name=local_endpoint,json=localEndpoint,proto3" json:"local_endpoint,omitempty"` // (Optional) Endpoint information of the remote client, such as IP address, // port number, and network protocol. RemoteEndpoint *Endpoint `protobuf:"bytes,5,opt,name=remote_endpoint,json=remoteEndpoint,proto3" json:"remote_endpoint,omitempty"` // (Optional) RPC protocol versions supported by the server. RpcVersions *RpcProtocolVersions `protobuf:"bytes,6,opt,name=rpc_versions,json=rpcVersions,proto3" json:"rpc_versions,omitempty"` // (Optional) Maximum frame size supported by the server. MaxFrameSize uint32 `protobuf:"varint,7,opt,name=max_frame_size,json=maxFrameSize,proto3" json:"max_frame_size,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *StartServerHandshakeReq) Reset() { *m = StartServerHandshakeReq{} } func (m *StartServerHandshakeReq) String() string { return proto.CompactTextString(m) } func (*StartServerHandshakeReq) ProtoMessage() {} func (*StartServerHandshakeReq) Descriptor() ([]byte, []int) { return fileDescriptor_54c074f40c7c7e99, []int{4} } func (m *StartServerHandshakeReq) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_StartServerHandshakeReq.Unmarshal(m, b) } func (m *StartServerHandshakeReq) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_StartServerHandshakeReq.Marshal(b, m, deterministic) } func (m *StartServerHandshakeReq) XXX_Merge(src proto.Message) { xxx_messageInfo_StartServerHandshakeReq.Merge(m, src) } func (m *StartServerHandshakeReq) XXX_Size() int { return xxx_messageInfo_StartServerHandshakeReq.Size(m) } func (m *StartServerHandshakeReq) XXX_DiscardUnknown() { xxx_messageInfo_StartServerHandshakeReq.DiscardUnknown(m) } var xxx_messageInfo_StartServerHandshakeReq proto.InternalMessageInfo func (m *StartServerHandshakeReq) GetApplicationProtocols() []string { if m != nil { return m.ApplicationProtocols } return nil } func (m *StartServerHandshakeReq) GetHandshakeParameters() map[int32]*ServerHandshakeParameters { if m != nil { return m.HandshakeParameters } return nil } func (m *StartServerHandshakeReq) GetInBytes() []byte { if m != nil { return m.InBytes } return nil } func (m *StartServerHandshakeReq) GetLocalEndpoint() *Endpoint { if m != nil { return m.LocalEndpoint } return nil } func (m *StartServerHandshakeReq) GetRemoteEndpoint() *Endpoint { if m != nil { return m.RemoteEndpoint } return nil } func (m *StartServerHandshakeReq) GetRpcVersions() *RpcProtocolVersions { if m != nil { return m.RpcVersions } return nil } func (m *StartServerHandshakeReq) GetMaxFrameSize() uint32 { if m != nil { return m.MaxFrameSize } return 0 } type NextHandshakeMessageReq struct { // Bytes in out_frames returned from the peer's HandshakerResp. It is possible // that the peer's out_frames are split into multiple NextHandshakerMessageReq // messages. InBytes []byte `protobuf:"bytes,1,opt,name=in_bytes,json=inBytes,proto3" json:"in_bytes,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *NextHandshakeMessageReq) Reset() { *m = NextHandshakeMessageReq{} } func (m *NextHandshakeMessageReq) String() string { return proto.CompactTextString(m) } func (*NextHandshakeMessageReq) ProtoMessage() {} func (*NextHandshakeMessageReq) Descriptor() ([]byte, []int) { return fileDescriptor_54c074f40c7c7e99, []int{5} } func (m *NextHandshakeMessageReq) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_NextHandshakeMessageReq.Unmarshal(m, b) } func (m *NextHandshakeMessageReq) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_NextHandshakeMessageReq.Marshal(b, m, deterministic) } func (m *NextHandshakeMessageReq) XXX_Merge(src proto.Message) { xxx_messageInfo_NextHandshakeMessageReq.Merge(m, src) } func (m *NextHandshakeMessageReq) XXX_Size() int { return xxx_messageInfo_NextHandshakeMessageReq.Size(m) } func (m *NextHandshakeMessageReq) XXX_DiscardUnknown() { xxx_messageInfo_NextHandshakeMessageReq.DiscardUnknown(m) } var xxx_messageInfo_NextHandshakeMessageReq proto.InternalMessageInfo func (m *NextHandshakeMessageReq) GetInBytes() []byte { if m != nil { return m.InBytes } return nil } type HandshakerReq struct { // Types that are valid to be assigned to ReqOneof: // *HandshakerReq_ClientStart // *HandshakerReq_ServerStart // *HandshakerReq_Next ReqOneof isHandshakerReq_ReqOneof `protobuf_oneof:"req_oneof"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *HandshakerReq) Reset() { *m = HandshakerReq{} } func (m *HandshakerReq) String() string { return proto.CompactTextString(m) } func (*HandshakerReq) ProtoMessage() {} func (*HandshakerReq) Descriptor() ([]byte, []int) { return fileDescriptor_54c074f40c7c7e99, []int{6} } func (m *HandshakerReq) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_HandshakerReq.Unmarshal(m, b) } func (m *HandshakerReq) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_HandshakerReq.Marshal(b, m, deterministic) } func (m *HandshakerReq) XXX_Merge(src proto.Message) { xxx_messageInfo_HandshakerReq.Merge(m, src) } func (m *HandshakerReq) XXX_Size() int { return xxx_messageInfo_HandshakerReq.Size(m) } func (m *HandshakerReq) XXX_DiscardUnknown() { xxx_messageInfo_HandshakerReq.DiscardUnknown(m) } var xxx_messageInfo_HandshakerReq proto.InternalMessageInfo type isHandshakerReq_ReqOneof interface { isHandshakerReq_ReqOneof() } type HandshakerReq_ClientStart struct { ClientStart *StartClientHandshakeReq `protobuf:"bytes,1,opt,name=client_start,json=clientStart,proto3,oneof"` } type HandshakerReq_ServerStart struct { ServerStart *StartServerHandshakeReq `protobuf:"bytes,2,opt,name=server_start,json=serverStart,proto3,oneof"` } type HandshakerReq_Next struct { Next *NextHandshakeMessageReq `protobuf:"bytes,3,opt,name=next,proto3,oneof"` } func (*HandshakerReq_ClientStart) isHandshakerReq_ReqOneof() {} func (*HandshakerReq_ServerStart) isHandshakerReq_ReqOneof() {} func (*HandshakerReq_Next) isHandshakerReq_ReqOneof() {} func (m *HandshakerReq) GetReqOneof() isHandshakerReq_ReqOneof { if m != nil { return m.ReqOneof } return nil } func (m *HandshakerReq) GetClientStart() *StartClientHandshakeReq { if x, ok := m.GetReqOneof().(*HandshakerReq_ClientStart); ok { return x.ClientStart } return nil } func (m *HandshakerReq) GetServerStart() *StartServerHandshakeReq { if x, ok := m.GetReqOneof().(*HandshakerReq_ServerStart); ok { return x.ServerStart } return nil } func (m *HandshakerReq) GetNext() *NextHandshakeMessageReq { if x, ok := m.GetReqOneof().(*HandshakerReq_Next); ok { return x.Next } return nil } // XXX_OneofWrappers is for the internal use of the proto package. func (*HandshakerReq) XXX_OneofWrappers() []interface{} { return []interface{}{ (*HandshakerReq_ClientStart)(nil), (*HandshakerReq_ServerStart)(nil), (*HandshakerReq_Next)(nil), } } type HandshakerResult struct { // The application protocol negotiated for this connection. ApplicationProtocol string `protobuf:"bytes,1,opt,name=application_protocol,json=applicationProtocol,proto3" json:"application_protocol,omitempty"` // The record protocol negotiated for this connection. RecordProtocol string `protobuf:"bytes,2,opt,name=record_protocol,json=recordProtocol,proto3" json:"record_protocol,omitempty"` // Cryptographic key data. The key data may be more than the key length // required for the record protocol, thus the client of the handshaker // service needs to truncate the key data into the right key length. KeyData []byte `protobuf:"bytes,3,opt,name=key_data,json=keyData,proto3" json:"key_data,omitempty"` // The authenticated identity of the peer. PeerIdentity *Identity `protobuf:"bytes,4,opt,name=peer_identity,json=peerIdentity,proto3" json:"peer_identity,omitempty"` // The local identity used in the handshake. LocalIdentity *Identity `protobuf:"bytes,5,opt,name=local_identity,json=localIdentity,proto3" json:"local_identity,omitempty"` // Indicate whether the handshaker service client should keep the channel // between the handshaker service open, e.g., in order to handle // post-handshake messages in the future. KeepChannelOpen bool `protobuf:"varint,6,opt,name=keep_channel_open,json=keepChannelOpen,proto3" json:"keep_channel_open,omitempty"` // The RPC protocol versions supported by the peer. PeerRpcVersions *RpcProtocolVersions `protobuf:"bytes,7,opt,name=peer_rpc_versions,json=peerRpcVersions,proto3" json:"peer_rpc_versions,omitempty"` // The maximum frame size of the peer. MaxFrameSize uint32 `protobuf:"varint,8,opt,name=max_frame_size,json=maxFrameSize,proto3" json:"max_frame_size,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *HandshakerResult) Reset() { *m = HandshakerResult{} } func (m *HandshakerResult) String() string { return proto.CompactTextString(m) } func (*HandshakerResult) ProtoMessage() {} func (*HandshakerResult) Descriptor() ([]byte, []int) { return fileDescriptor_54c074f40c7c7e99, []int{7} } func (m *HandshakerResult) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_HandshakerResult.Unmarshal(m, b) } func (m *HandshakerResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_HandshakerResult.Marshal(b, m, deterministic) } func (m *HandshakerResult) XXX_Merge(src proto.Message) { xxx_messageInfo_HandshakerResult.Merge(m, src) } func (m *HandshakerResult) XXX_Size() int { return xxx_messageInfo_HandshakerResult.Size(m) } func (m *HandshakerResult) XXX_DiscardUnknown() { xxx_messageInfo_HandshakerResult.DiscardUnknown(m) } var xxx_messageInfo_HandshakerResult proto.InternalMessageInfo func (m *HandshakerResult) GetApplicationProtocol() string { if m != nil { return m.ApplicationProtocol } return "" } func (m *HandshakerResult) GetRecordProtocol() string { if m != nil { return m.RecordProtocol } return "" } func (m *HandshakerResult) GetKeyData() []byte { if m != nil { return m.KeyData } return nil } func (m *HandshakerResult) GetPeerIdentity() *Identity { if m != nil { return m.PeerIdentity } return nil } func (m *HandshakerResult) GetLocalIdentity() *Identity { if m != nil { return m.LocalIdentity } return nil } func (m *HandshakerResult) GetKeepChannelOpen() bool { if m != nil { return m.KeepChannelOpen } return false } func (m *HandshakerResult) GetPeerRpcVersions() *RpcProtocolVersions { if m != nil { return m.PeerRpcVersions } return nil } func (m *HandshakerResult) GetMaxFrameSize() uint32 { if m != nil { return m.MaxFrameSize } return 0 } type HandshakerStatus struct { // The status code. This could be the gRPC status code. Code uint32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` // The status details. Details string `protobuf:"bytes,2,opt,name=details,proto3" json:"details,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *HandshakerStatus) Reset() { *m = HandshakerStatus{} } func (m *HandshakerStatus) String() string { return proto.CompactTextString(m) } func (*HandshakerStatus) ProtoMessage() {} func (*HandshakerStatus) Descriptor() ([]byte, []int) { return fileDescriptor_54c074f40c7c7e99, []int{8} } func (m *HandshakerStatus) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_HandshakerStatus.Unmarshal(m, b) } func (m *HandshakerStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_HandshakerStatus.Marshal(b, m, deterministic) } func (m *HandshakerStatus) XXX_Merge(src proto.Message) { xxx_messageInfo_HandshakerStatus.Merge(m, src) } func (m *HandshakerStatus) XXX_Size() int { return xxx_messageInfo_HandshakerStatus.Size(m) } func (m *HandshakerStatus) XXX_DiscardUnknown() { xxx_messageInfo_HandshakerStatus.DiscardUnknown(m) } var xxx_messageInfo_HandshakerStatus proto.InternalMessageInfo func (m *HandshakerStatus) GetCode() uint32 { if m != nil { return m.Code } return 0 } func (m *HandshakerStatus) GetDetails() string { if m != nil { return m.Details } return "" } type HandshakerResp struct { // Frames to be given to the peer for the NextHandshakeMessageReq. May be // empty if no out_frames have to be sent to the peer or if in_bytes in the // HandshakerReq are incomplete. All the non-empty out frames must be sent to // the peer even if the handshaker status is not OK as these frames may // contain the alert frames. OutFrames []byte `protobuf:"bytes,1,opt,name=out_frames,json=outFrames,proto3" json:"out_frames,omitempty"` // Number of bytes in the in_bytes consumed by the handshaker. It is possible // that part of in_bytes in HandshakerReq was unrelated to the handshake // process. BytesConsumed uint32 `protobuf:"varint,2,opt,name=bytes_consumed,json=bytesConsumed,proto3" json:"bytes_consumed,omitempty"` // This is set iff the handshake was successful. out_frames may still be set // to frames that needs to be forwarded to the peer. Result *HandshakerResult `protobuf:"bytes,3,opt,name=result,proto3" json:"result,omitempty"` // Status of the handshaker. Status *HandshakerStatus `protobuf:"bytes,4,opt,name=status,proto3" json:"status,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *HandshakerResp) Reset() { *m = HandshakerResp{} } func (m *HandshakerResp) String() string { return proto.CompactTextString(m) } func (*HandshakerResp) ProtoMessage() {} func (*HandshakerResp) Descriptor() ([]byte, []int) { return fileDescriptor_54c074f40c7c7e99, []int{9} } func (m *HandshakerResp) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_HandshakerResp.Unmarshal(m, b) } func (m *HandshakerResp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_HandshakerResp.Marshal(b, m, deterministic) } func (m *HandshakerResp) XXX_Merge(src proto.Message) { xxx_messageInfo_HandshakerResp.Merge(m, src) } func (m *HandshakerResp) XXX_Size() int { return xxx_messageInfo_HandshakerResp.Size(m) } func (m *HandshakerResp) XXX_DiscardUnknown() { xxx_messageInfo_HandshakerResp.DiscardUnknown(m) } var xxx_messageInfo_HandshakerResp proto.InternalMessageInfo func (m *HandshakerResp) GetOutFrames() []byte { if m != nil { return m.OutFrames } return nil } func (m *HandshakerResp) GetBytesConsumed() uint32 { if m != nil { return m.BytesConsumed } return 0 } func (m *HandshakerResp) GetResult() *HandshakerResult { if m != nil { return m.Result } return nil } func (m *HandshakerResp) GetStatus() *HandshakerStatus { if m != nil { return m.Status } return nil } func init() { proto.RegisterEnum("grpc.gcp.HandshakeProtocol", HandshakeProtocol_name, HandshakeProtocol_value) proto.RegisterEnum("grpc.gcp.NetworkProtocol", NetworkProtocol_name, NetworkProtocol_value) proto.RegisterType((*Endpoint)(nil), "grpc.gcp.Endpoint") proto.RegisterType((*Identity)(nil), "grpc.gcp.Identity") proto.RegisterMapType((map[string]string)(nil), "grpc.gcp.Identity.AttributesEntry") proto.RegisterType((*StartClientHandshakeReq)(nil), "grpc.gcp.StartClientHandshakeReq") proto.RegisterType((*ServerHandshakeParameters)(nil), "grpc.gcp.ServerHandshakeParameters") proto.RegisterType((*StartServerHandshakeReq)(nil), "grpc.gcp.StartServerHandshakeReq") proto.RegisterMapType((map[int32]*ServerHandshakeParameters)(nil), "grpc.gcp.StartServerHandshakeReq.HandshakeParametersEntry") proto.RegisterType((*NextHandshakeMessageReq)(nil), "grpc.gcp.NextHandshakeMessageReq") proto.RegisterType((*HandshakerReq)(nil), "grpc.gcp.HandshakerReq") proto.RegisterType((*HandshakerResult)(nil), "grpc.gcp.HandshakerResult") proto.RegisterType((*HandshakerStatus)(nil), "grpc.gcp.HandshakerStatus") proto.RegisterType((*HandshakerResp)(nil), "grpc.gcp.HandshakerResp") } func init() { proto.RegisterFile("grpc/gcp/handshaker.proto", fileDescriptor_54c074f40c7c7e99) } var fileDescriptor_54c074f40c7c7e99 = []byte{ // 1203 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x56, 0xdd, 0x6e, 0x1b, 0x45, 0x14, 0xce, 0xda, 0x4e, 0xe2, 0x1c, 0xc7, 0x3f, 0x99, 0xa6, 0xea, 0x26, 0x6d, 0xc1, 0x18, 0x10, 0x6e, 0x2f, 0x6c, 0x70, 0x41, 0xa5, 0x45, 0x55, 0x6b, 0x3b, 0x8e, 0x1c, 0x5a, 0x1c, 0x6b, 0x9d, 0x82, 0x44, 0x2f, 0x56, 0xd3, 0xf5, 0xd4, 0x59, 0x79, 0x3d, 0xb3, 0x9d, 0x19, 0x87, 0xb8, 0xf7, 0xbc, 0x04, 0xf7, 0xbc, 0x06, 0x2f, 0xc1, 0x33, 0x20, 0xf1, 0x18, 0x68, 0x67, 0x7f, 0x6d, 0xaf, 0xab, 0x22, 0xb8, 0xdb, 0x39, 0xf3, 0x7d, 0x67, 0xce, 0x9c, 0xf3, 0x9d, 0xb3, 0x03, 0x47, 0x13, 0xee, 0x5a, 0xcd, 0x89, 0xe5, 0x36, 0x2f, 0x31, 0x1d, 0x8b, 0x4b, 0x3c, 0x25, 0xbc, 0xe1, 0x72, 0x26, 0x19, 0xca, 0x7b, 0x5b, 0x8d, 0x89, 0xe5, 0x1e, 0xd7, 0x23, 0x90, 0xe4, 0x98, 0x0a, 0x97, 0x71, 0x69, 0x0a, 0x62, 0xcd, 0xb9, 0x2d, 0x17, 0xa6, 0xc5, 0x66, 0x33, 0x46, 0x7d, 0x4e, 0x4d, 0x42, 0xbe, 0x47, 0xc7, 0x2e, 0xb3, 0xa9, 0x44, 0x77, 0x01, 0x6c, 0xd7, 0xc4, 0xe3, 0x31, 0x27, 0x42, 0xe8, 0x5a, 0x55, 0xab, 0xef, 0x19, 0x7b, 0xb6, 0xdb, 0xf6, 0x0d, 0x08, 0x41, 0xce, 0x73, 0xa4, 0x67, 0xaa, 0x5a, 0x7d, 0xdb, 0x50, 0xdf, 0xe8, 0x1b, 0xc8, 0x2b, 0x3f, 0x16, 0x73, 0xf4, 0x6c, 0x55, 0xab, 0x97, 0x5a, 0x47, 0x8d, 0x30, 0x8a, 0xc6, 0x80, 0xc8, 0x5f, 0x18, 0x9f, 0x0e, 0x03, 0x80, 0x11, 0x41, 0x6b, 0x7f, 0x6b, 0x90, 0x3f, 0x1b, 0x13, 0x2a, 0x6d, 0xb9, 0x40, 0xf7, 0xa0, 0x2c, 0x08, 0xbf, 0xb2, 0x2d, 0x62, 0x62, 0xcb, 0x62, 0x73, 0x2a, 0xfd, 0xb3, 0xfb, 0x5b, 0x46, 0x29, 0xd8, 0x68, 0xfb, 0x76, 0x74, 0x07, 0xf2, 0x97, 0x4c, 0x48, 0x8a, 0x67, 0x44, 0x85, 0xe1, 0x61, 0x22, 0x0b, 0xea, 0x00, 0x60, 0x29, 0xb9, 0xfd, 0x7a, 0x2e, 0x89, 0xd0, 0xb3, 0xd5, 0x6c, 0xbd, 0xd0, 0xaa, 0xc5, 0xe1, 0x84, 0x07, 0x36, 0xda, 0x11, 0xa8, 0x47, 0x25, 0x5f, 0x18, 0x09, 0xd6, 0xf1, 0x13, 0x28, 0xaf, 0x6c, 0xa3, 0x0a, 0x64, 0xa7, 0x64, 0x11, 0xe4, 0xc3, 0xfb, 0x44, 0x87, 0xb0, 0x7d, 0x85, 0x9d, 0x79, 0x10, 0x83, 0xe1, 0x2f, 0x1e, 0x67, 0xbe, 0xd5, 0x3a, 0x15, 0x28, 0xd9, 0xc1, 0x31, 0x26, 0xa3, 0x84, 0xbd, 0xa9, 0xfd, 0x99, 0x83, 0x5b, 0x23, 0x89, 0xb9, 0xec, 0x3a, 0x36, 0xa1, 0xb2, 0x1f, 0x16, 0xcd, 0x20, 0x6f, 0xd1, 0x2b, 0xb8, 0x1d, 0x15, 0x31, 0xae, 0x4f, 0x94, 0x50, 0x4d, 0x25, 0xf4, 0x76, 0x7c, 0x83, 0x88, 0x1c, 0xa5, 0xf4, 0x28, 0xe2, 0x8f, 0x02, 0x7a, 0xb8, 0x85, 0x1e, 0xc0, 0x4d, 0xec, 0xba, 0x8e, 0x6d, 0x61, 0x69, 0x33, 0x1a, 0x79, 0x15, 0x7a, 0xa6, 0x9a, 0xad, 0xef, 0x19, 0x87, 0x89, 0xcd, 0x90, 0x23, 0xd0, 0x3d, 0xa8, 0x70, 0x62, 0x31, 0x3e, 0x4e, 0xe0, 0xb3, 0x0a, 0x5f, 0xf6, 0xed, 0x31, 0xf4, 0x29, 0x1c, 0x48, 0xcc, 0x27, 0x44, 0x9a, 0xc1, 0x8d, 0x6d, 0x22, 0xf4, 0x9c, 0x4a, 0x3a, 0x5a, 0x4f, 0xba, 0x51, 0xf1, 0xc1, 0x67, 0x11, 0x16, 0x3d, 0x82, 0x92, 0xc3, 0x2c, 0xec, 0x84, 0xfc, 0x85, 0xbe, 0x5d, 0xd5, 0x36, 0xb0, 0x8b, 0x0a, 0x19, 0x49, 0x26, 0xa2, 0x92, 0x40, 0xbb, 0xfa, 0xce, 0x2a, 0x35, 0x54, 0x75, 0x40, 0x8d, 0x44, 0xfe, 0x1d, 0x94, 0x39, 0x99, 0x31, 0x49, 0x62, 0xee, 0xee, 0x46, 0x6e, 0xc9, 0x87, 0x46, 0xe4, 0x8f, 0xa1, 0x10, 0xdc, 0x59, 0x49, 0x30, 0xaf, 0xca, 0x0f, 0xbe, 0x69, 0xe0, 0x49, 0xf0, 0x19, 0xec, 0x73, 0xd7, 0x32, 0xaf, 0x08, 0x17, 0x36, 0xa3, 0x42, 0xdf, 0x53, 0xae, 0xef, 0xc6, 0xae, 0x0d, 0xd7, 0x0a, 0x53, 0xf8, 0x63, 0x00, 0x32, 0x0a, 0xdc, 0xb5, 0xc2, 0x05, 0xfa, 0x0c, 0x4a, 0x33, 0x7c, 0x6d, 0xbe, 0xe1, 0x78, 0x46, 0x4c, 0x61, 0xbf, 0x23, 0x3a, 0x54, 0xb5, 0x7a, 0xd1, 0xd8, 0x9f, 0xe1, 0xeb, 0x53, 0xcf, 0x38, 0xb2, 0xdf, 0x91, 0xda, 0xaf, 0x1a, 0x1c, 0x8d, 0x08, 0xbf, 0x22, 0x3c, 0xd6, 0x04, 0xf6, 0x76, 0x25, 0xe1, 0xe9, 0x55, 0xd4, 0xd2, 0xab, 0xf8, 0x04, 0x2a, 0x4b, 0x45, 0xf0, 0x8a, 0x98, 0xd9, 0x58, 0xc4, 0x72, 0xb2, 0x0c, 0x36, 0x11, 0xb5, 0xdf, 0x43, 0x75, 0xaf, 0x04, 0xe3, 0xa9, 0x7b, 0xa3, 0x00, 0xb5, 0xf7, 0x08, 0x70, 0x06, 0x87, 0x71, 0x4b, 0xb8, 0xd1, 0x95, 0x82, 0x98, 0x1e, 0xc7, 0x31, 0x6d, 0x38, 0xb5, 0x91, 0x92, 0x0f, 0xbf, 0xcb, 0x6f, 0x5c, 0xa6, 0x64, 0xea, 0x08, 0xf2, 0x36, 0x35, 0x5f, 0x2f, 0xfc, 0x81, 0xa1, 0xd5, 0xf7, 0x8d, 0x5d, 0x9b, 0x76, 0xbc, 0x65, 0x8a, 0xc6, 0x72, 0xff, 0x41, 0x63, 0xdb, 0x1f, 0xac, 0xb1, 0x55, 0x09, 0xed, 0xfc, 0x0f, 0x12, 0xda, 0x5d, 0x97, 0xd0, 0xf1, 0x14, 0xf4, 0x4d, 0xb9, 0x4a, 0x8e, 0xbc, 0x6d, 0x7f, 0xe4, 0x3d, 0x4a, 0x8e, 0xbc, 0x42, 0xeb, 0xd3, 0x44, 0x21, 0x36, 0xc9, 0x30, 0x31, 0x17, 0x6b, 0x5f, 0xc3, 0xad, 0x01, 0xb9, 0x8e, 0xa7, 0xdf, 0x0f, 0x44, 0x08, 0x3c, 0x51, 0x32, 0x49, 0x96, 0x40, 0x5b, 0x2a, 0x41, 0xed, 0x2f, 0x0d, 0x8a, 0x11, 0x85, 0x7b, 0xe0, 0x53, 0xd8, 0xb7, 0xd4, 0x1c, 0x35, 0x85, 0x57, 0x7f, 0x45, 0x28, 0xb4, 0x3e, 0x59, 0x91, 0xc5, 0xfa, 0xa8, 0xed, 0x6f, 0x19, 0x05, 0x9f, 0xa8, 0x00, 0x9e, 0x1f, 0xa1, 0xe2, 0x0e, 0xfc, 0x64, 0x52, 0xfd, 0xac, 0xcb, 0xcb, 0xf3, 0xe3, 0x13, 0x7d, 0x3f, 0x0f, 0x21, 0x47, 0xc9, 0xb5, 0x54, 0xda, 0x59, 0xe2, 0x6f, 0xb8, 0x6d, 0x7f, 0xcb, 0x50, 0x84, 0x4e, 0x01, 0xf6, 0x38, 0x79, 0x1b, 0xfc, 0x23, 0x7e, 0xcb, 0x42, 0x25, 0x79, 0x4f, 0x31, 0x77, 0x24, 0xfa, 0x0a, 0x0e, 0xd3, 0xda, 0x27, 0xf8, 0x0f, 0xdd, 0x48, 0xe9, 0x1e, 0xf4, 0x05, 0x94, 0x57, 0xfa, 0x3e, 0xf8, 0x43, 0x95, 0x96, 0xdb, 0xde, 0xcb, 0xf9, 0x94, 0x2c, 0xcc, 0x31, 0x96, 0x38, 0x94, 0xfd, 0x94, 0x2c, 0x4e, 0xb0, 0xc4, 0xe8, 0x21, 0x14, 0x5d, 0x42, 0x78, 0x3c, 0x94, 0x73, 0x1b, 0x87, 0xf2, 0xbe, 0x07, 0x5c, 0x9f, 0xc9, 0xff, 0x7e, 0x9c, 0xdf, 0x87, 0x83, 0x29, 0x21, 0xae, 0x69, 0x5d, 0x62, 0x4a, 0x89, 0x63, 0x32, 0x97, 0x50, 0xa5, 0xfb, 0xbc, 0x51, 0xf6, 0x36, 0xba, 0xbe, 0xfd, 0xdc, 0x25, 0x14, 0x9d, 0xc1, 0x81, 0x8a, 0x6f, 0xa9, 0x47, 0x76, 0x3f, 0xa4, 0x47, 0xca, 0x1e, 0xcf, 0x78, 0x6f, 0x9f, 0xe4, 0x53, 0x46, 0xed, 0xb3, 0x64, 0x6d, 0x46, 0x12, 0xcb, 0xb9, 0x7a, 0x0a, 0x59, 0x6c, 0x4c, 0x54, 0x2d, 0x8a, 0x86, 0xfa, 0x46, 0x3a, 0xec, 0x8e, 0x89, 0xc4, 0xb6, 0xfa, 0xc3, 0x7a, 0x49, 0x0f, 0x97, 0xb5, 0x3f, 0x34, 0x28, 0x2d, 0x95, 0xd7, 0xf5, 0x9e, 0x5a, 0x6c, 0x2e, 0xfd, 0xa3, 0x43, 0xd9, 0xef, 0xb1, 0xb9, 0x54, 0xc7, 0x0a, 0xf4, 0x39, 0x94, 0x54, 0x43, 0x98, 0x16, 0xa3, 0x62, 0x3e, 0x23, 0x63, 0xe5, 0xb2, 0x68, 0x14, 0x95, 0xb5, 0x1b, 0x18, 0x51, 0x0b, 0x76, 0xb8, 0x12, 0x4b, 0xa0, 0xbf, 0xe3, 0x94, 0xa7, 0x42, 0x20, 0x27, 0x23, 0x40, 0x7a, 0x1c, 0xa1, 0x2e, 0x11, 0x14, 0x36, 0x95, 0xe3, 0x5f, 0xd3, 0x08, 0x90, 0xf7, 0xbf, 0x87, 0x83, 0xb5, 0xa7, 0x07, 0xaa, 0xc1, 0x47, 0xfd, 0xf6, 0xe0, 0x64, 0xd4, 0x6f, 0x3f, 0xef, 0x99, 0x43, 0xe3, 0xfc, 0xe2, 0xbc, 0x7b, 0xfe, 0xc2, 0x7c, 0x39, 0x18, 0x0d, 0x7b, 0xdd, 0xb3, 0xd3, 0xb3, 0xde, 0x49, 0x65, 0x0b, 0xed, 0x42, 0xf6, 0xe2, 0xc5, 0xa8, 0xa2, 0xa1, 0x3c, 0xe4, 0xda, 0x2f, 0x2e, 0x46, 0x95, 0xcc, 0xfd, 0x1e, 0x94, 0x57, 0xde, 0x85, 0xa8, 0x0a, 0x77, 0x06, 0xbd, 0x8b, 0x9f, 0xce, 0x8d, 0xe7, 0xef, 0xf3, 0xd3, 0x1d, 0x56, 0x34, 0xef, 0xe3, 0xe5, 0xc9, 0xb0, 0x92, 0x69, 0xbd, 0x4a, 0x84, 0xc4, 0x47, 0xfe, 0x2b, 0x11, 0x9d, 0x42, 0xe1, 0x84, 0x45, 0x66, 0x74, 0x2b, 0x3d, 0x1d, 0x6f, 0x8f, 0xf5, 0x0d, 0x79, 0x72, 0x6b, 0x5b, 0x75, 0xed, 0x4b, 0xad, 0x33, 0x85, 0x9b, 0x36, 0xf3, 0x31, 0xd8, 0x91, 0xa2, 0x61, 0x53, 0x49, 0x38, 0xc5, 0x4e, 0xa7, 0x1c, 0xc3, 0x55, 0xf4, 0x43, 0xed, 0xe7, 0xa7, 0x13, 0xc6, 0x26, 0x0e, 0x69, 0x4c, 0x98, 0x83, 0xe9, 0xa4, 0xc1, 0xf8, 0xa4, 0xa9, 0x1e, 0xdf, 0x16, 0x27, 0x4a, 0xde, 0xd8, 0x11, 0x4d, 0xcf, 0x49, 0x33, 0x74, 0xd2, 0x54, 0xbd, 0xa9, 0x40, 0xe6, 0xc4, 0x72, 0x5f, 0xef, 0xa8, 0xf5, 0x83, 0x7f, 0x02, 0x00, 0x00, 0xff, 0xff, 0xc1, 0xf9, 0x9d, 0xf2, 0xd9, 0x0b, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. var _ context.Context var _ grpc.ClientConnInterface // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. const _ = grpc.SupportPackageIsVersion6 // HandshakerServiceClient is the client API for HandshakerService service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type HandshakerServiceClient interface { // Handshaker service accepts a stream of handshaker request, returning a // stream of handshaker response. Client is expected to send exactly one // message with either client_start or server_start followed by one or more // messages with next. Each time client sends a request, the handshaker // service expects to respond. Client does not have to wait for service's // response before sending next request. DoHandshake(ctx context.Context, opts ...grpc.CallOption) (HandshakerService_DoHandshakeClient, error) } type handshakerServiceClient struct { cc grpc.ClientConnInterface } func NewHandshakerServiceClient(cc grpc.ClientConnInterface) HandshakerServiceClient { return &handshakerServiceClient{cc} } func (c *handshakerServiceClient) DoHandshake(ctx context.Context, opts ...grpc.CallOption) (HandshakerService_DoHandshakeClient, error) { stream, err := c.cc.NewStream(ctx, &_HandshakerService_serviceDesc.Streams[0], "/grpc.gcp.HandshakerService/DoHandshake", opts...) if err != nil { return nil, err } x := &handshakerServiceDoHandshakeClient{stream} return x, nil } type HandshakerService_DoHandshakeClient interface { Send(*HandshakerReq) error Recv() (*HandshakerResp, error) grpc.ClientStream } type handshakerServiceDoHandshakeClient struct { grpc.ClientStream } func (x *handshakerServiceDoHandshakeClient) Send(m *HandshakerReq) error { return x.ClientStream.SendMsg(m) } func (x *handshakerServiceDoHandshakeClient) Recv() (*HandshakerResp, error) { m := new(HandshakerResp) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } // HandshakerServiceServer is the server API for HandshakerService service. type HandshakerServiceServer interface { // Handshaker service accepts a stream of handshaker request, returning a // stream of handshaker response. Client is expected to send exactly one // message with either client_start or server_start followed by one or more // messages with next. Each time client sends a request, the handshaker // service expects to respond. Client does not have to wait for service's // response before sending next request. DoHandshake(HandshakerService_DoHandshakeServer) error } // UnimplementedHandshakerServiceServer can be embedded to have forward compatible implementations. type UnimplementedHandshakerServiceServer struct { } func (*UnimplementedHandshakerServiceServer) DoHandshake(srv HandshakerService_DoHandshakeServer) error { return status.Errorf(codes.Unimplemented, "method DoHandshake not implemented") } func RegisterHandshakerServiceServer(s *grpc.Server, srv HandshakerServiceServer) { s.RegisterService(&_HandshakerService_serviceDesc, srv) } func _HandshakerService_DoHandshake_Handler(srv interface{}, stream grpc.ServerStream) error { return srv.(HandshakerServiceServer).DoHandshake(&handshakerServiceDoHandshakeServer{stream}) } type HandshakerService_DoHandshakeServer interface { Send(*HandshakerResp) error Recv() (*HandshakerReq, error) grpc.ServerStream } type handshakerServiceDoHandshakeServer struct { grpc.ServerStream } func (x *handshakerServiceDoHandshakeServer) Send(m *HandshakerResp) error { return x.ServerStream.SendMsg(m) } func (x *handshakerServiceDoHandshakeServer) Recv() (*HandshakerReq, error) { m := new(HandshakerReq) if err := x.ServerStream.RecvMsg(m); err != nil { return nil, err } return m, nil } var _HandshakerService_serviceDesc = grpc.ServiceDesc{ ServiceName: "grpc.gcp.HandshakerService", HandlerType: (*HandshakerServiceServer)(nil), Methods: []grpc.MethodDesc{}, Streams: []grpc.StreamDesc{ { StreamName: "DoHandshake", Handler: _HandshakerService_DoHandshake_Handler, ServerStreams: true, ClientStreams: true, }, }, Metadata: "grpc/gcp/handshaker.proto", } grpc-go-1.29.1/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go000066400000000000000000000166601365033716300314470ustar00rootroot00000000000000// Code generated by protoc-gen-go. DO NOT EDIT. // source: grpc/gcp/transport_security_common.proto package grpc_gcp import ( fmt "fmt" proto "github.com/golang/protobuf/proto" math "math" ) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package // The security level of the created channel. The list is sorted in increasing // level of security. This order must always be maintained. type SecurityLevel int32 const ( SecurityLevel_SECURITY_NONE SecurityLevel = 0 SecurityLevel_INTEGRITY_ONLY SecurityLevel = 1 SecurityLevel_INTEGRITY_AND_PRIVACY SecurityLevel = 2 ) var SecurityLevel_name = map[int32]string{ 0: "SECURITY_NONE", 1: "INTEGRITY_ONLY", 2: "INTEGRITY_AND_PRIVACY", } var SecurityLevel_value = map[string]int32{ "SECURITY_NONE": 0, "INTEGRITY_ONLY": 1, "INTEGRITY_AND_PRIVACY": 2, } func (x SecurityLevel) String() string { return proto.EnumName(SecurityLevel_name, int32(x)) } func (SecurityLevel) EnumDescriptor() ([]byte, []int) { return fileDescriptor_b97e31e3cc23582a, []int{0} } // Max and min supported RPC protocol versions. type RpcProtocolVersions struct { // Maximum supported RPC version. MaxRpcVersion *RpcProtocolVersions_Version `protobuf:"bytes,1,opt,name=max_rpc_version,json=maxRpcVersion,proto3" json:"max_rpc_version,omitempty"` // Minimum supported RPC version. MinRpcVersion *RpcProtocolVersions_Version `protobuf:"bytes,2,opt,name=min_rpc_version,json=minRpcVersion,proto3" json:"min_rpc_version,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *RpcProtocolVersions) Reset() { *m = RpcProtocolVersions{} } func (m *RpcProtocolVersions) String() string { return proto.CompactTextString(m) } func (*RpcProtocolVersions) ProtoMessage() {} func (*RpcProtocolVersions) Descriptor() ([]byte, []int) { return fileDescriptor_b97e31e3cc23582a, []int{0} } func (m *RpcProtocolVersions) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_RpcProtocolVersions.Unmarshal(m, b) } func (m *RpcProtocolVersions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_RpcProtocolVersions.Marshal(b, m, deterministic) } func (m *RpcProtocolVersions) XXX_Merge(src proto.Message) { xxx_messageInfo_RpcProtocolVersions.Merge(m, src) } func (m *RpcProtocolVersions) XXX_Size() int { return xxx_messageInfo_RpcProtocolVersions.Size(m) } func (m *RpcProtocolVersions) XXX_DiscardUnknown() { xxx_messageInfo_RpcProtocolVersions.DiscardUnknown(m) } var xxx_messageInfo_RpcProtocolVersions proto.InternalMessageInfo func (m *RpcProtocolVersions) GetMaxRpcVersion() *RpcProtocolVersions_Version { if m != nil { return m.MaxRpcVersion } return nil } func (m *RpcProtocolVersions) GetMinRpcVersion() *RpcProtocolVersions_Version { if m != nil { return m.MinRpcVersion } return nil } // RPC version contains a major version and a minor version. type RpcProtocolVersions_Version struct { Major uint32 `protobuf:"varint,1,opt,name=major,proto3" json:"major,omitempty"` Minor uint32 `protobuf:"varint,2,opt,name=minor,proto3" json:"minor,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *RpcProtocolVersions_Version) Reset() { *m = RpcProtocolVersions_Version{} } func (m *RpcProtocolVersions_Version) String() string { return proto.CompactTextString(m) } func (*RpcProtocolVersions_Version) ProtoMessage() {} func (*RpcProtocolVersions_Version) Descriptor() ([]byte, []int) { return fileDescriptor_b97e31e3cc23582a, []int{0, 0} } func (m *RpcProtocolVersions_Version) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_RpcProtocolVersions_Version.Unmarshal(m, b) } func (m *RpcProtocolVersions_Version) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_RpcProtocolVersions_Version.Marshal(b, m, deterministic) } func (m *RpcProtocolVersions_Version) XXX_Merge(src proto.Message) { xxx_messageInfo_RpcProtocolVersions_Version.Merge(m, src) } func (m *RpcProtocolVersions_Version) XXX_Size() int { return xxx_messageInfo_RpcProtocolVersions_Version.Size(m) } func (m *RpcProtocolVersions_Version) XXX_DiscardUnknown() { xxx_messageInfo_RpcProtocolVersions_Version.DiscardUnknown(m) } var xxx_messageInfo_RpcProtocolVersions_Version proto.InternalMessageInfo func (m *RpcProtocolVersions_Version) GetMajor() uint32 { if m != nil { return m.Major } return 0 } func (m *RpcProtocolVersions_Version) GetMinor() uint32 { if m != nil { return m.Minor } return 0 } func init() { proto.RegisterEnum("grpc.gcp.SecurityLevel", SecurityLevel_name, SecurityLevel_value) proto.RegisterType((*RpcProtocolVersions)(nil), "grpc.gcp.RpcProtocolVersions") proto.RegisterType((*RpcProtocolVersions_Version)(nil), "grpc.gcp.RpcProtocolVersions.Version") } func init() { proto.RegisterFile("grpc/gcp/transport_security_common.proto", fileDescriptor_b97e31e3cc23582a) } var fileDescriptor_b97e31e3cc23582a = []byte{ // 323 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x91, 0x41, 0x4b, 0x3b, 0x31, 0x10, 0xc5, 0xff, 0x5b, 0xf8, 0xab, 0x44, 0x56, 0xeb, 0x6a, 0x41, 0xc5, 0x83, 0x08, 0x42, 0xf1, 0x90, 0x05, 0xc5, 0xb3, 0xb4, 0xb5, 0x48, 0xa1, 0x6e, 0xeb, 0xb6, 0x16, 0xea, 0x25, 0xc4, 0x18, 0x42, 0x24, 0x9b, 0x09, 0xb3, 0xb1, 0xd4, 0xaf, 0xec, 0xa7, 0x90, 0x4d, 0xbb, 0x14, 0xc1, 0x8b, 0xb7, 0xbc, 0xc7, 0xcc, 0x6f, 0x32, 0xf3, 0x48, 0x5b, 0xa1, 0x13, 0xa9, 0x12, 0x2e, 0xf5, 0xc8, 0x6d, 0xe9, 0x00, 0x3d, 0x2b, 0xa5, 0xf8, 0x40, 0xed, 0x3f, 0x99, 0x80, 0xa2, 0x00, 0x4b, 0x1d, 0x82, 0x87, 0x64, 0xa7, 0xaa, 0xa4, 0x4a, 0xb8, 0x8b, 0xaf, 0x88, 0x1c, 0xe6, 0x4e, 0x8c, 0x2b, 0x5b, 0x80, 0x99, 0x49, 0x2c, 0x35, 0xd8, 0x32, 0x79, 0x24, 0xfb, 0x05, 0x5f, 0x32, 0x74, 0x82, 0x2d, 0x56, 0xde, 0x71, 0x74, 0x1e, 0xb5, 0x77, 0xaf, 0x2f, 0x69, 0xdd, 0x4b, 0x7f, 0xe9, 0xa3, 0xeb, 0x47, 0x1e, 0x17, 0x7c, 0x99, 0x3b, 0xb1, 0x96, 0x01, 0xa7, 0xed, 0x0f, 0x5c, 0xe3, 0x6f, 0x38, 0x6d, 0x37, 0xb8, 0xd3, 0x5b, 0xb2, 0x5d, 0x93, 0x8f, 0xc8, 0xff, 0x82, 0xbf, 0x03, 0x86, 0xef, 0xc5, 0xf9, 0x4a, 0x04, 0x57, 0x5b, 0xc0, 0x30, 0xa5, 0x72, 0x2b, 0x71, 0xf5, 0x44, 0xe2, 0xc9, 0xfa, 0x1e, 0x43, 0xb9, 0x90, 0x26, 0x39, 0x20, 0xf1, 0xa4, 0xdf, 0x7b, 0xce, 0x07, 0xd3, 0x39, 0xcb, 0x46, 0x59, 0xbf, 0xf9, 0x2f, 0x49, 0xc8, 0xde, 0x20, 0x9b, 0xf6, 0x1f, 0x82, 0x37, 0xca, 0x86, 0xf3, 0x66, 0x94, 0x9c, 0x90, 0xd6, 0xc6, 0xeb, 0x64, 0xf7, 0x6c, 0x9c, 0x0f, 0x66, 0x9d, 0xde, 0xbc, 0xd9, 0xe8, 0x2e, 0x49, 0x4b, 0xc3, 0x6a, 0x07, 0x6e, 0x7c, 0x49, 0xb5, 0xf5, 0x12, 0x2d, 0x37, 0xdd, 0xb3, 0x69, 0x9d, 0x41, 0x3d, 0xb2, 0x17, 0x12, 0x08, 0x2b, 0x8e, 0xa3, 0x97, 0x3b, 0x05, 0xa0, 0x8c, 0xa4, 0x0a, 0x0c, 0xb7, 0x8a, 0x02, 0xaa, 0x34, 0xc4, 0x27, 0x50, 0xbe, 0x49, 0xeb, 0x35, 0x37, 0x65, 0x5a, 0x11, 0xd3, 0x9a, 0x98, 0x86, 0xe8, 0x42, 0x11, 0x53, 0xc2, 0xbd, 0x6e, 0x05, 0x7d, 0xf3, 0x1d, 0x00, 0x00, 0xff, 0xff, 0x31, 0x14, 0xb4, 0x11, 0xf6, 0x01, 0x00, 0x00, } grpc-go-1.29.1/credentials/alts/internal/regenerate.sh000077500000000000000000000023411365033716300227050ustar00rootroot00000000000000#!/bin/bash # Copyright 2018 gRPC authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. set -eux -o pipefail TMP=$(mktemp -d) function finish { rm -rf "$TMP" } trap finish EXIT pushd "$TMP" mkdir -p grpc/gcp curl https://raw.githubusercontent.com/grpc/grpc-proto/master/grpc/gcp/altscontext.proto > grpc/gcp/altscontext.proto curl https://raw.githubusercontent.com/grpc/grpc-proto/master/grpc/gcp/handshaker.proto > grpc/gcp/handshaker.proto curl https://raw.githubusercontent.com/grpc/grpc-proto/master/grpc/gcp/transport_security_common.proto > grpc/gcp/transport_security_common.proto protoc --go_out=plugins=grpc,paths=source_relative:. -I. grpc/gcp/*.proto popd rm -f proto/grpc_gcp/*.pb.go cp "$TMP"/grpc/gcp/*.pb.go proto/grpc_gcp/ grpc-go-1.29.1/credentials/alts/internal/testutil/000077500000000000000000000000001365033716300221025ustar00rootroot00000000000000grpc-go-1.29.1/credentials/alts/internal/testutil/testutil.go000066400000000000000000000055141365033716300243130ustar00rootroot00000000000000/* * * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Package testutil include useful test utilities for the handshaker. package testutil import ( "bytes" "encoding/binary" "io" "net" "sync" "google.golang.org/grpc/credentials/alts/internal/conn" ) // Stats is used to collect statistics about concurrent handshake calls. type Stats struct { mu sync.Mutex calls int MaxConcurrentCalls int } // Update updates the statistics by adding one call. func (s *Stats) Update() func() { s.mu.Lock() s.calls++ if s.calls > s.MaxConcurrentCalls { s.MaxConcurrentCalls = s.calls } s.mu.Unlock() return func() { s.mu.Lock() s.calls-- s.mu.Unlock() } } // Reset resets the statistics. func (s *Stats) Reset() { s.mu.Lock() defer s.mu.Unlock() s.calls = 0 s.MaxConcurrentCalls = 0 } // testConn mimics a net.Conn to the peer. type testConn struct { net.Conn in *bytes.Buffer out *bytes.Buffer } // NewTestConn creates a new instance of testConn object. func NewTestConn(in *bytes.Buffer, out *bytes.Buffer) net.Conn { return &testConn{ in: in, out: out, } } // Read reads from the in buffer. func (c *testConn) Read(b []byte) (n int, err error) { return c.in.Read(b) } // Write writes to the out buffer. func (c *testConn) Write(b []byte) (n int, err error) { return c.out.Write(b) } // Close closes the testConn object. func (c *testConn) Close() error { return nil } // unresponsiveTestConn mimics a net.Conn for an unresponsive peer. It is used // for testing the PeerNotResponding case. type unresponsiveTestConn struct { net.Conn } // NewUnresponsiveTestConn creates a new instance of unresponsiveTestConn object. func NewUnresponsiveTestConn() net.Conn { return &unresponsiveTestConn{} } // Read reads from the in buffer. func (c *unresponsiveTestConn) Read(b []byte) (n int, err error) { return 0, io.EOF } // Write writes to the out buffer. func (c *unresponsiveTestConn) Write(b []byte) (n int, err error) { return 0, nil } // Close closes the TestConn object. func (c *unresponsiveTestConn) Close() error { return nil } // MakeFrame creates a handshake frame. func MakeFrame(pl string) []byte { f := make([]byte, len(pl)+conn.MsgLenFieldSize) binary.LittleEndian.PutUint32(f, uint32(len(pl))) copy(f[conn.MsgLenFieldSize:], []byte(pl)) return f } grpc-go-1.29.1/credentials/alts/utils.go000066400000000000000000000111651365033716300201040ustar00rootroot00000000000000/* * * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package alts import ( "context" "errors" "fmt" "io" "io/ioutil" "log" "os" "os/exec" "regexp" "runtime" "strings" "google.golang.org/grpc/codes" "google.golang.org/grpc/peer" "google.golang.org/grpc/status" ) const ( linuxProductNameFile = "/sys/class/dmi/id/product_name" windowsCheckCommand = "powershell.exe" windowsCheckCommandArgs = "Get-WmiObject -Class Win32_BIOS" powershellOutputFilter = "Manufacturer" windowsManufacturerRegex = ":(.*)" ) type platformError string func (k platformError) Error() string { return fmt.Sprintf("%s is not supported", string(k)) } var ( // The following two variables will be reassigned in tests. runningOS = runtime.GOOS manufacturerReader = func() (io.Reader, error) { switch runningOS { case "linux": return os.Open(linuxProductNameFile) case "windows": cmd := exec.Command(windowsCheckCommand, windowsCheckCommandArgs) out, err := cmd.Output() if err != nil { return nil, err } for _, line := range strings.Split(strings.TrimSuffix(string(out), "\n"), "\n") { if strings.HasPrefix(line, powershellOutputFilter) { re := regexp.MustCompile(windowsManufacturerRegex) name := re.FindString(line) name = strings.TrimLeft(name, ":") return strings.NewReader(name), nil } } return nil, errors.New("cannot determine the machine's manufacturer") default: return nil, platformError(runningOS) } } vmOnGCP bool ) // isRunningOnGCP checks whether the local system, without doing a network request is // running on GCP. func isRunningOnGCP() bool { manufacturer, err := readManufacturer() if os.IsNotExist(err) { return false } if err != nil { log.Fatalf("failure to read manufacturer information: %v", err) } name := string(manufacturer) switch runningOS { case "linux": name = strings.TrimSpace(name) return name == "Google" || name == "Google Compute Engine" case "windows": name = strings.Replace(name, " ", "", -1) name = strings.Replace(name, "\n", "", -1) name = strings.Replace(name, "\r", "", -1) return name == "Google" default: log.Fatal(platformError(runningOS)) } return false } func readManufacturer() ([]byte, error) { reader, err := manufacturerReader() if err != nil { return nil, err } if reader == nil { return nil, errors.New("got nil reader") } manufacturer, err := ioutil.ReadAll(reader) if err != nil { return nil, fmt.Errorf("failed reading %v: %v", linuxProductNameFile, err) } return manufacturer, nil } // AuthInfoFromContext extracts the alts.AuthInfo object from the given context, // if it exists. This API should be used by gRPC server RPC handlers to get // information about the communicating peer. For client-side, use grpc.Peer() // CallOption. func AuthInfoFromContext(ctx context.Context) (AuthInfo, error) { p, ok := peer.FromContext(ctx) if !ok { return nil, errors.New("no Peer found in Context") } return AuthInfoFromPeer(p) } // AuthInfoFromPeer extracts the alts.AuthInfo object from the given peer, if it // exists. This API should be used by gRPC clients after obtaining a peer object // using the grpc.Peer() CallOption. func AuthInfoFromPeer(p *peer.Peer) (AuthInfo, error) { altsAuthInfo, ok := p.AuthInfo.(AuthInfo) if !ok { return nil, errors.New("no alts.AuthInfo found in Peer") } return altsAuthInfo, nil } // ClientAuthorizationCheck checks whether the client is authorized to access // the requested resources based on the given expected client service accounts. // This API should be used by gRPC server RPC handlers. This API should not be // used by clients. func ClientAuthorizationCheck(ctx context.Context, expectedServiceAccounts []string) error { authInfo, err := AuthInfoFromContext(ctx) if err != nil { return status.Newf(codes.PermissionDenied, "The context is not an ALTS-compatible context: %v", err).Err() } for _, sa := range expectedServiceAccounts { if authInfo.PeerServiceAccount() == sa { return nil } } return status.Newf(codes.PermissionDenied, "Client %v is not authorized", authInfo.PeerServiceAccount()).Err() } grpc-go-1.29.1/credentials/alts/utils_test.go000066400000000000000000000141131365033716300211370ustar00rootroot00000000000000/* * * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package alts import ( "context" "io" "os" "strings" "testing" "google.golang.org/grpc/codes" altspb "google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp" "google.golang.org/grpc/peer" "google.golang.org/grpc/status" ) const ( testServiceAccount1 = "service_account1" testServiceAccount2 = "service_account2" testServiceAccount3 = "service_account3" ) func setupManufacturerReader(testOS string, reader func() (io.Reader, error)) func() { tmpOS := runningOS tmpReader := manufacturerReader // Set test OS and reader function. runningOS = testOS manufacturerReader = reader return func() { runningOS = tmpOS manufacturerReader = tmpReader } } func setup(testOS string, testReader io.Reader) func() { reader := func() (io.Reader, error) { return testReader, nil } return setupManufacturerReader(testOS, reader) } func setupError(testOS string, err error) func() { reader := func() (io.Reader, error) { return nil, err } return setupManufacturerReader(testOS, reader) } func (s) TestIsRunningOnGCP(t *testing.T) { for _, tc := range []struct { description string testOS string testReader io.Reader out bool }{ // Linux tests. {"linux: not a GCP platform", "linux", strings.NewReader("not GCP"), false}, {"Linux: GCP platform (Google)", "linux", strings.NewReader("Google"), true}, {"Linux: GCP platform (Google Compute Engine)", "linux", strings.NewReader("Google Compute Engine"), true}, {"Linux: GCP platform (Google Compute Engine) with extra spaces", "linux", strings.NewReader(" Google Compute Engine "), true}, // Windows tests. {"windows: not a GCP platform", "windows", strings.NewReader("not GCP"), false}, {"windows: GCP platform (Google)", "windows", strings.NewReader("Google"), true}, {"windows: GCP platform (Google) with extra spaces", "windows", strings.NewReader(" Google "), true}, } { reverseFunc := setup(tc.testOS, tc.testReader) if got, want := isRunningOnGCP(), tc.out; got != want { t.Errorf("%v: isRunningOnGCP()=%v, want %v", tc.description, got, want) } reverseFunc() } } func (s) TestIsRunningOnGCPNoProductNameFile(t *testing.T) { reverseFunc := setupError("linux", os.ErrNotExist) if isRunningOnGCP() { t.Errorf("ErrNotExist: isRunningOnGCP()=true, want false") } reverseFunc() } func (s) TestAuthInfoFromContext(t *testing.T) { ctx := context.Background() altsAuthInfo := &fakeALTSAuthInfo{} p := &peer.Peer{ AuthInfo: altsAuthInfo, } for _, tc := range []struct { desc string ctx context.Context success bool out AuthInfo }{ { "working case", peer.NewContext(ctx, p), true, altsAuthInfo, }, } { authInfo, err := AuthInfoFromContext(tc.ctx) if got, want := (err == nil), tc.success; got != want { t.Errorf("%v: AuthInfoFromContext(_)=(err=nil)=%v, want %v", tc.desc, got, want) } if got, want := authInfo, tc.out; got != want { t.Errorf("%v:, AuthInfoFromContext(_)=(%v, _), want (%v, _)", tc.desc, got, want) } } } func (s) TestAuthInfoFromPeer(t *testing.T) { altsAuthInfo := &fakeALTSAuthInfo{} p := &peer.Peer{ AuthInfo: altsAuthInfo, } for _, tc := range []struct { desc string p *peer.Peer success bool out AuthInfo }{ { "working case", p, true, altsAuthInfo, }, } { authInfo, err := AuthInfoFromPeer(tc.p) if got, want := (err == nil), tc.success; got != want { t.Errorf("%v: AuthInfoFromPeer(_)=(err=nil)=%v, want %v", tc.desc, got, want) } if got, want := authInfo, tc.out; got != want { t.Errorf("%v:, AuthInfoFromPeer(_)=(%v, _), want (%v, _)", tc.desc, got, want) } } } func (s) TestClientAuthorizationCheck(t *testing.T) { ctx := context.Background() altsAuthInfo := &fakeALTSAuthInfo{testServiceAccount1} p := &peer.Peer{ AuthInfo: altsAuthInfo, } for _, tc := range []struct { desc string ctx context.Context expectedServiceAccounts []string success bool code codes.Code }{ { "working case", peer.NewContext(ctx, p), []string{testServiceAccount1, testServiceAccount2}, true, codes.OK, // err is nil, code is OK. }, { "context does not have AuthInfo", ctx, []string{testServiceAccount1, testServiceAccount2}, false, codes.PermissionDenied, }, { "unauthorized client", peer.NewContext(ctx, p), []string{testServiceAccount2, testServiceAccount3}, false, codes.PermissionDenied, }, } { err := ClientAuthorizationCheck(tc.ctx, tc.expectedServiceAccounts) if got, want := (err == nil), tc.success; got != want { t.Errorf("%v: ClientAuthorizationCheck(_, %v)=(err=nil)=%v, want %v", tc.desc, tc.expectedServiceAccounts, got, want) } if got, want := status.Code(err), tc.code; got != want { t.Errorf("%v: ClientAuthorizationCheck(_, %v).Code=%v, want %v", tc.desc, tc.expectedServiceAccounts, got, want) } } } type fakeALTSAuthInfo struct { peerServiceAccount string } func (*fakeALTSAuthInfo) AuthType() string { return "" } func (*fakeALTSAuthInfo) ApplicationProtocol() string { return "" } func (*fakeALTSAuthInfo) RecordProtocol() string { return "" } func (*fakeALTSAuthInfo) SecurityLevel() altspb.SecurityLevel { return altspb.SecurityLevel_SECURITY_NONE } func (f *fakeALTSAuthInfo) PeerServiceAccount() string { return f.peerServiceAccount } func (*fakeALTSAuthInfo) LocalServiceAccount() string { return "" } func (*fakeALTSAuthInfo) PeerRPCVersions() *altspb.RpcProtocolVersions { return nil } grpc-go-1.29.1/credentials/credentials.go000066400000000000000000000243511365033716300202770ustar00rootroot00000000000000/* * * Copyright 2014 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Package credentials implements various credentials supported by gRPC library, // which encapsulate all the state needed by a client to authenticate with a // server and make various assertions, e.g., about the client's identity, role, // or whether it is authorized to make a particular call. package credentials // import "google.golang.org/grpc/credentials" import ( "context" "errors" "fmt" "net" "github.com/golang/protobuf/proto" "google.golang.org/grpc/internal" ) // PerRPCCredentials defines the common interface for the credentials which need to // attach security information to every RPC (e.g., oauth2). type PerRPCCredentials interface { // GetRequestMetadata gets the current request metadata, refreshing // tokens if required. This should be called by the transport layer on // each request, and the data should be populated in headers or other // context. If a status code is returned, it will be used as the status // for the RPC. uri is the URI of the entry point for the request. // When supported by the underlying implementation, ctx can be used for // timeout and cancellation. Additionally, RequestInfo data will be // available via ctx to this call. // TODO(zhaoq): Define the set of the qualified keys instead of leaving // it as an arbitrary string. GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) // RequireTransportSecurity indicates whether the credentials requires // transport security. RequireTransportSecurity() bool } // SecurityLevel defines the protection level on an established connection. // // This API is experimental. type SecurityLevel int const ( // NoSecurity indicates a connection is insecure. // The zero SecurityLevel value is invalid for backward compatibility. NoSecurity SecurityLevel = iota + 1 // IntegrityOnly indicates a connection only provides integrity protection. IntegrityOnly // PrivacyAndIntegrity indicates a connection provides both privacy and integrity protection. PrivacyAndIntegrity ) // String returns SecurityLevel in a string format. func (s SecurityLevel) String() string { switch s { case NoSecurity: return "NoSecurity" case IntegrityOnly: return "IntegrityOnly" case PrivacyAndIntegrity: return "PrivacyAndIntegrity" } return fmt.Sprintf("invalid SecurityLevel: %v", int(s)) } // CommonAuthInfo contains authenticated information common to AuthInfo implementations. // It should be embedded in a struct implementing AuthInfo to provide additional information // about the credentials. // // This API is experimental. type CommonAuthInfo struct { SecurityLevel SecurityLevel } // GetCommonAuthInfo returns the pointer to CommonAuthInfo struct. func (c *CommonAuthInfo) GetCommonAuthInfo() *CommonAuthInfo { return c } // ProtocolInfo provides information regarding the gRPC wire protocol version, // security protocol, security protocol version in use, server name, etc. type ProtocolInfo struct { // ProtocolVersion is the gRPC wire protocol version. ProtocolVersion string // SecurityProtocol is the security protocol in use. SecurityProtocol string // SecurityVersion is the security protocol version. It is a static version string from the // credentials, not a value that reflects per-connection protocol negotiation. To retrieve // details about the credentials used for a connection, use the Peer's AuthInfo field instead. // // Deprecated: please use Peer.AuthInfo. SecurityVersion string // ServerName is the user-configured server name. ServerName string } // AuthInfo defines the common interface for the auth information the users are interested in. // A struct that implements AuthInfo should embed CommonAuthInfo by including additional // information about the credentials in it. type AuthInfo interface { AuthType() string } // ErrConnDispatched indicates that rawConn has been dispatched out of gRPC // and the caller should not close rawConn. var ErrConnDispatched = errors.New("credentials: rawConn is dispatched out of gRPC") // TransportCredentials defines the common interface for all the live gRPC wire // protocols and supported transport security protocols (e.g., TLS, SSL). type TransportCredentials interface { // ClientHandshake does the authentication handshake specified by the corresponding // authentication protocol on rawConn for clients. It returns the authenticated // connection and the corresponding auth information about the connection. // The auth information should embed CommonAuthInfo to return additional information about // the credentials. Implementations must use the provided context to implement timely cancellation. // gRPC will try to reconnect if the error returned is a temporary error // (io.EOF, context.DeadlineExceeded or err.Temporary() == true). // If the returned error is a wrapper error, implementations should make sure that // the error implements Temporary() to have the correct retry behaviors. // // If the returned net.Conn is closed, it MUST close the net.Conn provided. ClientHandshake(context.Context, string, net.Conn) (net.Conn, AuthInfo, error) // ServerHandshake does the authentication handshake for servers. It returns // the authenticated connection and the corresponding auth information about // the connection. The auth information should embed CommonAuthInfo to return additional information // about the credentials. // // If the returned net.Conn is closed, it MUST close the net.Conn provided. ServerHandshake(net.Conn) (net.Conn, AuthInfo, error) // Info provides the ProtocolInfo of this TransportCredentials. Info() ProtocolInfo // Clone makes a copy of this TransportCredentials. Clone() TransportCredentials // OverrideServerName overrides the server name used to verify the hostname on the returned certificates from the server. // gRPC internals also use it to override the virtual hosting name if it is set. // It must be called before dialing. Currently, this is only used by grpclb. OverrideServerName(string) error } // Bundle is a combination of TransportCredentials and PerRPCCredentials. // // It also contains a mode switching method, so it can be used as a combination // of different credential policies. // // Bundle cannot be used together with individual TransportCredentials. // PerRPCCredentials from Bundle will be appended to other PerRPCCredentials. // // This API is experimental. type Bundle interface { TransportCredentials() TransportCredentials PerRPCCredentials() PerRPCCredentials // NewWithMode should make a copy of Bundle, and switch mode. Modifying the // existing Bundle may cause races. // // NewWithMode returns nil if the requested mode is not supported. NewWithMode(mode string) (Bundle, error) } // RequestInfo contains request data attached to the context passed to GetRequestMetadata calls. // // This API is experimental. type RequestInfo struct { // The method passed to Invoke or NewStream for this RPC. (For proto methods, this has the format "/some.Service/Method") Method string // AuthInfo contains the information from a security handshake (TransportCredentials.ClientHandshake, TransportCredentials.ServerHandshake) AuthInfo AuthInfo } // requestInfoKey is a struct to be used as the key when attaching a RequestInfo to a context object. type requestInfoKey struct{} // RequestInfoFromContext extracts the RequestInfo from the context if it exists. // // This API is experimental. func RequestInfoFromContext(ctx context.Context) (ri RequestInfo, ok bool) { ri, ok = ctx.Value(requestInfoKey{}).(RequestInfo) return } // CheckSecurityLevel checks if a connection's security level is greater than or equal to the specified one. // It returns success if 1) the condition is satisified or 2) AuthInfo struct does not implement GetCommonAuthInfo() method // or 3) CommonAuthInfo.SecurityLevel has an invalid zero value. For 2) and 3), it is for the purpose of backward-compatibility. // // This API is experimental. func CheckSecurityLevel(ctx context.Context, level SecurityLevel) error { type internalInfo interface { GetCommonAuthInfo() *CommonAuthInfo } ri, _ := RequestInfoFromContext(ctx) if ri.AuthInfo == nil { return errors.New("unable to obtain SecurityLevel from context") } if ci, ok := ri.AuthInfo.(internalInfo); ok { // CommonAuthInfo.SecurityLevel has an invalid value. if ci.GetCommonAuthInfo().SecurityLevel == 0 { return nil } if ci.GetCommonAuthInfo().SecurityLevel < level { return fmt.Errorf("requires SecurityLevel %v; connection has %v", level, ci.GetCommonAuthInfo().SecurityLevel) } } // The condition is satisfied or AuthInfo struct does not implement GetCommonAuthInfo() method. return nil } func init() { internal.NewRequestInfoContext = func(ctx context.Context, ri RequestInfo) context.Context { return context.WithValue(ctx, requestInfoKey{}, ri) } } // ChannelzSecurityInfo defines the interface that security protocols should implement // in order to provide security info to channelz. // // This API is experimental. type ChannelzSecurityInfo interface { GetSecurityValue() ChannelzSecurityValue } // ChannelzSecurityValue defines the interface that GetSecurityValue() return value // should satisfy. This interface should only be satisfied by *TLSChannelzSecurityValue // and *OtherChannelzSecurityValue. // // This API is experimental. type ChannelzSecurityValue interface { isChannelzSecurityValue() } // OtherChannelzSecurityValue defines the struct that non-TLS protocol should return // from GetSecurityValue(), which contains protocol specific security info. Note // the Value field will be sent to users of channelz requesting channel info, and // thus sensitive info should better be avoided. // // This API is experimental. type OtherChannelzSecurityValue struct { ChannelzSecurityValue Name string Value proto.Message } grpc-go-1.29.1/credentials/credentials_test.go000066400000000000000000000237221365033716300213370ustar00rootroot00000000000000/* * * Copyright 2016 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package credentials import ( "context" "crypto/tls" "net" "reflect" "strings" "testing" "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/grpctest" "google.golang.org/grpc/testdata" ) type s struct { grpctest.Tester } func Test(t *testing.T) { grpctest.RunSubTests(t, s{}) } // A struct that implements AuthInfo interface but does not implement GetCommonAuthInfo() method. type testAuthInfoNoGetCommonAuthInfoMethod struct{} func (ta testAuthInfoNoGetCommonAuthInfoMethod) AuthType() string { return "testAuthInfoNoGetCommonAuthInfoMethod" } // A struct that implements AuthInfo interface and implements CommonAuthInfo() method. type testAuthInfo struct { CommonAuthInfo } func (ta testAuthInfo) AuthType() string { return "testAuthInfo" } func createTestContext(s SecurityLevel) context.Context { auth := &testAuthInfo{CommonAuthInfo: CommonAuthInfo{SecurityLevel: s}} ri := RequestInfo{ Method: "testInfo", AuthInfo: auth, } return internal.NewRequestInfoContext.(func(context.Context, RequestInfo) context.Context)(context.Background(), ri) } func (s) TestCheckSecurityLevel(t *testing.T) { testCases := []struct { authLevel SecurityLevel testLevel SecurityLevel want bool }{ { authLevel: PrivacyAndIntegrity, testLevel: PrivacyAndIntegrity, want: true, }, { authLevel: IntegrityOnly, testLevel: PrivacyAndIntegrity, want: false, }, { authLevel: IntegrityOnly, testLevel: NoSecurity, want: true, }, { authLevel: 0, testLevel: IntegrityOnly, want: true, }, { authLevel: 0, testLevel: PrivacyAndIntegrity, want: true, }, } for _, tc := range testCases { err := CheckSecurityLevel(createTestContext(tc.authLevel), tc.testLevel) if tc.want && (err != nil) { t.Fatalf("CheckSeurityLevel(%s, %s) returned failure but want success", tc.authLevel.String(), tc.testLevel.String()) } else if !tc.want && (err == nil) { t.Fatalf("CheckSeurityLevel(%s, %s) returned success but want failure", tc.authLevel.String(), tc.testLevel.String()) } } } func (s) TestCheckSecurityLevelNoGetCommonAuthInfoMethod(t *testing.T) { auth := &testAuthInfoNoGetCommonAuthInfoMethod{} ri := RequestInfo{ Method: "testInfo", AuthInfo: auth, } ctxWithRequestInfo := internal.NewRequestInfoContext.(func(context.Context, RequestInfo) context.Context)(context.Background(), ri) if err := CheckSecurityLevel(ctxWithRequestInfo, PrivacyAndIntegrity); err != nil { t.Fatalf("CheckSeurityLevel() returned failure but want success") } } func (s) TestTLSOverrideServerName(t *testing.T) { expectedServerName := "server.name" c := NewTLS(nil) c.OverrideServerName(expectedServerName) if c.Info().ServerName != expectedServerName { t.Fatalf("c.Info().ServerName = %v, want %v", c.Info().ServerName, expectedServerName) } } func (s) TestTLSClone(t *testing.T) { expectedServerName := "server.name" c := NewTLS(nil) c.OverrideServerName(expectedServerName) cc := c.Clone() if cc.Info().ServerName != expectedServerName { t.Fatalf("cc.Info().ServerName = %v, want %v", cc.Info().ServerName, expectedServerName) } cc.OverrideServerName("") if c.Info().ServerName != expectedServerName { t.Fatalf("Change in clone should not affect the original, c.Info().ServerName = %v, want %v", c.Info().ServerName, expectedServerName) } } type serverHandshake func(net.Conn) (AuthInfo, error) func (s) TestClientHandshakeReturnsAuthInfo(t *testing.T) { tcs := []struct { name string address string }{ { name: "localhost", address: "localhost:0", }, { name: "ipv4", address: "127.0.0.1:0", }, { name: "ipv6", address: "[::1]:0", }, } for _, tc := range tcs { t.Run(tc.name, func(t *testing.T) { done := make(chan AuthInfo, 1) lis := launchServerOnListenAddress(t, tlsServerHandshake, done, tc.address) defer lis.Close() lisAddr := lis.Addr().String() clientAuthInfo := clientHandle(t, gRPCClientHandshake, lisAddr) // wait until server sends serverAuthInfo or fails. serverAuthInfo, ok := <-done if !ok { t.Fatalf("Error at server-side") } if !compare(clientAuthInfo, serverAuthInfo) { t.Fatalf("c.ClientHandshake(_, %v, _) = %v, want %v.", lisAddr, clientAuthInfo, serverAuthInfo) } }) } } func (s) TestServerHandshakeReturnsAuthInfo(t *testing.T) { done := make(chan AuthInfo, 1) lis := launchServer(t, gRPCServerHandshake, done) defer lis.Close() clientAuthInfo := clientHandle(t, tlsClientHandshake, lis.Addr().String()) // wait until server sends serverAuthInfo or fails. serverAuthInfo, ok := <-done if !ok { t.Fatalf("Error at server-side") } if !compare(clientAuthInfo, serverAuthInfo) { t.Fatalf("ServerHandshake(_) = %v, want %v.", serverAuthInfo, clientAuthInfo) } } func (s) TestServerAndClientHandshake(t *testing.T) { done := make(chan AuthInfo, 1) lis := launchServer(t, gRPCServerHandshake, done) defer lis.Close() clientAuthInfo := clientHandle(t, gRPCClientHandshake, lis.Addr().String()) // wait until server sends serverAuthInfo or fails. serverAuthInfo, ok := <-done if !ok { t.Fatalf("Error at server-side") } if !compare(clientAuthInfo, serverAuthInfo) { t.Fatalf("AuthInfo returned by server: %v and client: %v aren't same", serverAuthInfo, clientAuthInfo) } } func compare(a1, a2 AuthInfo) bool { if a1.AuthType() != a2.AuthType() { return false } switch a1.AuthType() { case "tls": state1 := a1.(TLSInfo).State state2 := a2.(TLSInfo).State if state1.Version == state2.Version && state1.HandshakeComplete == state2.HandshakeComplete && state1.CipherSuite == state2.CipherSuite && state1.NegotiatedProtocol == state2.NegotiatedProtocol { return true } return false default: return false } } func launchServer(t *testing.T, hs serverHandshake, done chan AuthInfo) net.Listener { return launchServerOnListenAddress(t, hs, done, "localhost:0") } func launchServerOnListenAddress(t *testing.T, hs serverHandshake, done chan AuthInfo, address string) net.Listener { lis, err := net.Listen("tcp", address) if err != nil { if strings.Contains(err.Error(), "bind: cannot assign requested address") || strings.Contains(err.Error(), "socket: address family not supported by protocol") { t.Skipf("no support for address %v", address) } t.Fatalf("Failed to listen: %v", err) } go serverHandle(t, hs, done, lis) return lis } // Is run in a separate goroutine. func serverHandle(t *testing.T, hs serverHandshake, done chan AuthInfo, lis net.Listener) { serverRawConn, err := lis.Accept() if err != nil { t.Errorf("Server failed to accept connection: %v", err) close(done) return } serverAuthInfo, err := hs(serverRawConn) if err != nil { t.Errorf("Server failed while handshake. Error: %v", err) serverRawConn.Close() close(done) return } done <- serverAuthInfo } func clientHandle(t *testing.T, hs func(net.Conn, string) (AuthInfo, error), lisAddr string) AuthInfo { conn, err := net.Dial("tcp", lisAddr) if err != nil { t.Fatalf("Client failed to connect to %s. Error: %v", lisAddr, err) } defer conn.Close() clientAuthInfo, err := hs(conn, lisAddr) if err != nil { t.Fatalf("Error on client while handshake. Error: %v", err) } return clientAuthInfo } // Server handshake implementation in gRPC. func gRPCServerHandshake(conn net.Conn) (AuthInfo, error) { serverTLS, err := NewServerTLSFromFile(testdata.Path("server1.pem"), testdata.Path("server1.key")) if err != nil { return nil, err } _, serverAuthInfo, err := serverTLS.ServerHandshake(conn) if err != nil { return nil, err } return serverAuthInfo, nil } // Client handshake implementation in gRPC. func gRPCClientHandshake(conn net.Conn, lisAddr string) (AuthInfo, error) { clientTLS := NewTLS(&tls.Config{InsecureSkipVerify: true}) _, authInfo, err := clientTLS.ClientHandshake(context.Background(), lisAddr, conn) if err != nil { return nil, err } return authInfo, nil } func tlsServerHandshake(conn net.Conn) (AuthInfo, error) { cert, err := tls.LoadX509KeyPair(testdata.Path("server1.pem"), testdata.Path("server1.key")) if err != nil { return nil, err } serverTLSConfig := &tls.Config{Certificates: []tls.Certificate{cert}} serverConn := tls.Server(conn, serverTLSConfig) err = serverConn.Handshake() if err != nil { return nil, err } return TLSInfo{State: serverConn.ConnectionState(), CommonAuthInfo: CommonAuthInfo{SecurityLevel: PrivacyAndIntegrity}}, nil } func tlsClientHandshake(conn net.Conn, _ string) (AuthInfo, error) { clientTLSConfig := &tls.Config{InsecureSkipVerify: true} clientConn := tls.Client(conn, clientTLSConfig) if err := clientConn.Handshake(); err != nil { return nil, err } return TLSInfo{State: clientConn.ConnectionState(), CommonAuthInfo: CommonAuthInfo{SecurityLevel: PrivacyAndIntegrity}}, nil } func (s) TestAppendH2ToNextProtos(t *testing.T) { tests := []struct { name string ps []string want []string }{ { name: "empty", ps: nil, want: []string{"h2"}, }, { name: "only h2", ps: []string{"h2"}, want: []string{"h2"}, }, { name: "with h2", ps: []string{"alpn", "h2"}, want: []string{"alpn", "h2"}, }, { name: "no h2", ps: []string{"alpn"}, want: []string{"alpn", "h2"}, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { if got := appendH2ToNextProtos(tt.ps); !reflect.DeepEqual(got, tt.want) { t.Errorf("appendH2ToNextProtos() = %v, want %v", got, tt.want) } }) } } grpc-go-1.29.1/credentials/go12.go000066400000000000000000000017351365033716300165530ustar00rootroot00000000000000// +build go1.12 /* * * Copyright 2019 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package credentials import "crypto/tls" // This init function adds cipher suite constants only defined in Go 1.12. func init() { cipherSuiteLookup[tls.TLS_AES_128_GCM_SHA256] = "TLS_AES_128_GCM_SHA256" cipherSuiteLookup[tls.TLS_AES_256_GCM_SHA384] = "TLS_AES_256_GCM_SHA384" cipherSuiteLookup[tls.TLS_CHACHA20_POLY1305_SHA256] = "TLS_CHACHA20_POLY1305_SHA256" } grpc-go-1.29.1/credentials/google/000077500000000000000000000000001365033716300167225ustar00rootroot00000000000000grpc-go-1.29.1/credentials/google/google.go000066400000000000000000000075341365033716300205360ustar00rootroot00000000000000/* * * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Package google defines credentials for google cloud services. package google import ( "context" "fmt" "time" "google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials/alts" "google.golang.org/grpc/credentials/oauth" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal" ) const tokenRequestTimeout = 30 * time.Second // NewDefaultCredentials returns a credentials bundle that is configured to work // with google services. // // This API is experimental. func NewDefaultCredentials() credentials.Bundle { c := &creds{ newPerRPCCreds: func() credentials.PerRPCCredentials { ctx, cancel := context.WithTimeout(context.Background(), tokenRequestTimeout) defer cancel() perRPCCreds, err := oauth.NewApplicationDefault(ctx) if err != nil { grpclog.Warningf("google default creds: failed to create application oauth: %v", err) } return perRPCCreds }, } bundle, err := c.NewWithMode(internal.CredsBundleModeFallback) if err != nil { grpclog.Warningf("google default creds: failed to create new creds: %v", err) } return bundle } // NewComputeEngineCredentials returns a credentials bundle that is configured to work // with google services. This API must only be used when running on GCE. Authentication configured // by this API represents the GCE VM's default service account. // // This API is experimental. func NewComputeEngineCredentials() credentials.Bundle { c := &creds{ newPerRPCCreds: func() credentials.PerRPCCredentials { return oauth.NewComputeEngine() }, } bundle, err := c.NewWithMode(internal.CredsBundleModeFallback) if err != nil { grpclog.Warningf("compute engine creds: failed to create new creds: %v", err) } return bundle } // creds implements credentials.Bundle. type creds struct { // Supported modes are defined in internal/internal.go. mode string // The transport credentials associated with this bundle. transportCreds credentials.TransportCredentials // The per RPC credentials associated with this bundle. perRPCCreds credentials.PerRPCCredentials // Creates new per RPC credentials newPerRPCCreds func() credentials.PerRPCCredentials } func (c *creds) TransportCredentials() credentials.TransportCredentials { return c.transportCreds } func (c *creds) PerRPCCredentials() credentials.PerRPCCredentials { if c == nil { return nil } return c.perRPCCreds } // NewWithMode should make a copy of Bundle, and switch mode. Modifying the // existing Bundle may cause races. func (c *creds) NewWithMode(mode string) (credentials.Bundle, error) { newCreds := &creds{ mode: mode, newPerRPCCreds: c.newPerRPCCreds, } // Create transport credentials. switch mode { case internal.CredsBundleModeFallback: newCreds.transportCreds = credentials.NewTLS(nil) case internal.CredsBundleModeBackendFromBalancer, internal.CredsBundleModeBalancer: // Only the clients can use google default credentials, so we only need // to create new ALTS client creds here. newCreds.transportCreds = alts.NewClientCreds(alts.DefaultClientOptions()) default: return nil, fmt.Errorf("unsupported mode: %v", mode) } if mode == internal.CredsBundleModeFallback || mode == internal.CredsBundleModeBackendFromBalancer { newCreds.perRPCCreds = newCreds.newPerRPCCreds() } return newCreds, nil } grpc-go-1.29.1/credentials/internal/000077500000000000000000000000001365033716300172625ustar00rootroot00000000000000grpc-go-1.29.1/credentials/internal/syscallconn.go000066400000000000000000000035451365033716300221500ustar00rootroot00000000000000// +build !appengine /* * * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Package internal contains credentials-internal code. package internal import ( "net" "syscall" ) type sysConn = syscall.Conn // syscallConn keeps reference of rawConn to support syscall.Conn for channelz. // SyscallConn() (the method in interface syscall.Conn) is explicitly // implemented on this type, // // Interface syscall.Conn is implemented by most net.Conn implementations (e.g. // TCPConn, UnixConn), but is not part of net.Conn interface. So wrapper conns // that embed net.Conn don't implement syscall.Conn. (Side note: tls.Conn // doesn't embed net.Conn, so even if syscall.Conn is part of net.Conn, it won't // help here). type syscallConn struct { net.Conn // sysConn is a type alias of syscall.Conn. It's necessary because the name // `Conn` collides with `net.Conn`. sysConn } // WrapSyscallConn tries to wrap rawConn and newConn into a net.Conn that // implements syscall.Conn. rawConn will be used to support syscall, and newConn // will be used for read/write. // // This function returns newConn if rawConn doesn't implement syscall.Conn. func WrapSyscallConn(rawConn, newConn net.Conn) net.Conn { sysConn, ok := rawConn.(syscall.Conn) if !ok { return newConn } return &syscallConn{ Conn: newConn, sysConn: sysConn, } } grpc-go-1.29.1/credentials/internal/syscallconn_appengine.go000066400000000000000000000014241365033716300241700ustar00rootroot00000000000000// +build appengine /* * * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package internal import ( "net" ) // WrapSyscallConn returns newConn on appengine. func WrapSyscallConn(rawConn, newConn net.Conn) net.Conn { return newConn } grpc-go-1.29.1/credentials/internal/syscallconn_test.go000066400000000000000000000033021365033716300231760ustar00rootroot00000000000000// +build !appengine /* * * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package internal_test import ( "net" "syscall" "testing" "google.golang.org/grpc/credentials/internal" "google.golang.org/grpc/internal/grpctest" ) type s struct { grpctest.Tester } func Test(t *testing.T) { grpctest.RunSubTests(t, s{}) } type syscallConn struct { net.Conn } func (*syscallConn) SyscallConn() (syscall.RawConn, error) { return nil, nil } type nonSyscallConn struct { net.Conn } func (s) TestWrapSyscallConn(t *testing.T) { sc := &syscallConn{} nsc := &nonSyscallConn{} wrapConn := internal.WrapSyscallConn(sc, nsc) if _, ok := wrapConn.(syscall.Conn); !ok { t.Errorf("returned conn (type %T) doesn't implement syscall.Conn, want implement", wrapConn) } } func (s) TestWrapSyscallConnNoWrap(t *testing.T) { nscRaw := &nonSyscallConn{} nsc := &nonSyscallConn{} wrapConn := internal.WrapSyscallConn(nscRaw, nsc) if _, ok := wrapConn.(syscall.Conn); ok { t.Errorf("returned conn (type %T) implements syscall.Conn, want not implement", wrapConn) } if wrapConn != nsc { t.Errorf("returned conn is %p, want %p (the passed-in newConn)", wrapConn, nsc) } } grpc-go-1.29.1/credentials/oauth/000077500000000000000000000000001365033716300165665ustar00rootroot00000000000000grpc-go-1.29.1/credentials/oauth/oauth.go000066400000000000000000000135471365033716300202470ustar00rootroot00000000000000/* * * Copyright 2015 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Package oauth implements gRPC credentials using OAuth. package oauth import ( "context" "fmt" "io/ioutil" "sync" "golang.org/x/oauth2" "golang.org/x/oauth2/google" "golang.org/x/oauth2/jwt" "google.golang.org/grpc/credentials" ) // TokenSource supplies PerRPCCredentials from an oauth2.TokenSource. type TokenSource struct { oauth2.TokenSource } // GetRequestMetadata gets the request metadata as a map from a TokenSource. func (ts TokenSource) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { token, err := ts.Token() if err != nil { return nil, err } if err = credentials.CheckSecurityLevel(ctx, credentials.PrivacyAndIntegrity); err != nil { return nil, fmt.Errorf("unable to transfer TokenSource PerRPCCredentials: %v", err) } return map[string]string{ "authorization": token.Type() + " " + token.AccessToken, }, nil } // RequireTransportSecurity indicates whether the credentials requires transport security. func (ts TokenSource) RequireTransportSecurity() bool { return true } type jwtAccess struct { jsonKey []byte } // NewJWTAccessFromFile creates PerRPCCredentials from the given keyFile. func NewJWTAccessFromFile(keyFile string) (credentials.PerRPCCredentials, error) { jsonKey, err := ioutil.ReadFile(keyFile) if err != nil { return nil, fmt.Errorf("credentials: failed to read the service account key file: %v", err) } return NewJWTAccessFromKey(jsonKey) } // NewJWTAccessFromKey creates PerRPCCredentials from the given jsonKey. func NewJWTAccessFromKey(jsonKey []byte) (credentials.PerRPCCredentials, error) { return jwtAccess{jsonKey}, nil } func (j jwtAccess) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { ts, err := google.JWTAccessTokenSourceFromJSON(j.jsonKey, uri[0]) if err != nil { return nil, err } token, err := ts.Token() if err != nil { return nil, err } if err = credentials.CheckSecurityLevel(ctx, credentials.PrivacyAndIntegrity); err != nil { return nil, fmt.Errorf("unable to transfer jwtAccess PerRPCCredentials: %v", err) } return map[string]string{ "authorization": token.Type() + " " + token.AccessToken, }, nil } func (j jwtAccess) RequireTransportSecurity() bool { return true } // oauthAccess supplies PerRPCCredentials from a given token. type oauthAccess struct { token oauth2.Token } // NewOauthAccess constructs the PerRPCCredentials using a given token. func NewOauthAccess(token *oauth2.Token) credentials.PerRPCCredentials { return oauthAccess{token: *token} } func (oa oauthAccess) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { if err := credentials.CheckSecurityLevel(ctx, credentials.PrivacyAndIntegrity); err != nil { return nil, fmt.Errorf("unable to transfer oauthAccess PerRPCCredentials: %v", err) } return map[string]string{ "authorization": oa.token.Type() + " " + oa.token.AccessToken, }, nil } func (oa oauthAccess) RequireTransportSecurity() bool { return true } // NewComputeEngine constructs the PerRPCCredentials that fetches access tokens from // Google Compute Engine (GCE)'s metadata server. It is only valid to use this // if your program is running on a GCE instance. // TODO(dsymonds): Deprecate and remove this. func NewComputeEngine() credentials.PerRPCCredentials { return TokenSource{google.ComputeTokenSource("")} } // serviceAccount represents PerRPCCredentials via JWT signing key. type serviceAccount struct { mu sync.Mutex config *jwt.Config t *oauth2.Token } func (s *serviceAccount) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { s.mu.Lock() defer s.mu.Unlock() if !s.t.Valid() { var err error s.t, err = s.config.TokenSource(ctx).Token() if err != nil { return nil, err } } if err := credentials.CheckSecurityLevel(ctx, credentials.PrivacyAndIntegrity); err != nil { return nil, fmt.Errorf("unable to transfer serviceAccount PerRPCCredentials: %v", err) } return map[string]string{ "authorization": s.t.Type() + " " + s.t.AccessToken, }, nil } func (s *serviceAccount) RequireTransportSecurity() bool { return true } // NewServiceAccountFromKey constructs the PerRPCCredentials using the JSON key slice // from a Google Developers service account. func NewServiceAccountFromKey(jsonKey []byte, scope ...string) (credentials.PerRPCCredentials, error) { config, err := google.JWTConfigFromJSON(jsonKey, scope...) if err != nil { return nil, err } return &serviceAccount{config: config}, nil } // NewServiceAccountFromFile constructs the PerRPCCredentials using the JSON key file // of a Google Developers service account. func NewServiceAccountFromFile(keyFile string, scope ...string) (credentials.PerRPCCredentials, error) { jsonKey, err := ioutil.ReadFile(keyFile) if err != nil { return nil, fmt.Errorf("credentials: failed to read the service account key file: %v", err) } return NewServiceAccountFromKey(jsonKey, scope...) } // NewApplicationDefault returns "Application Default Credentials". For more // detail, see https://developers.google.com/accounts/docs/application-default-credentials. func NewApplicationDefault(ctx context.Context, scope ...string) (credentials.PerRPCCredentials, error) { t, err := google.DefaultTokenSource(ctx, scope...) if err != nil { return nil, err } return TokenSource{t}, nil } grpc-go-1.29.1/credentials/tls.go000066400000000000000000000205541365033716300166050ustar00rootroot00000000000000/* * * Copyright 2014 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package credentials import ( "context" "crypto/tls" "crypto/x509" "fmt" "io/ioutil" "net" "google.golang.org/grpc/credentials/internal" ) // TLSInfo contains the auth information for a TLS authenticated connection. // It implements the AuthInfo interface. type TLSInfo struct { State tls.ConnectionState CommonAuthInfo } // AuthType returns the type of TLSInfo as a string. func (t TLSInfo) AuthType() string { return "tls" } // GetSecurityValue returns security info requested by channelz. func (t TLSInfo) GetSecurityValue() ChannelzSecurityValue { v := &TLSChannelzSecurityValue{ StandardName: cipherSuiteLookup[t.State.CipherSuite], } // Currently there's no way to get LocalCertificate info from tls package. if len(t.State.PeerCertificates) > 0 { v.RemoteCertificate = t.State.PeerCertificates[0].Raw } return v } // tlsCreds is the credentials required for authenticating a connection using TLS. type tlsCreds struct { // TLS configuration config *tls.Config } func (c tlsCreds) Info() ProtocolInfo { return ProtocolInfo{ SecurityProtocol: "tls", SecurityVersion: "1.2", ServerName: c.config.ServerName, } } func (c *tlsCreds) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (_ net.Conn, _ AuthInfo, err error) { // use local cfg to avoid clobbering ServerName if using multiple endpoints cfg := cloneTLSConfig(c.config) if cfg.ServerName == "" { serverName, _, err := net.SplitHostPort(authority) if err != nil { // If the authority had no host port or if the authority cannot be parsed, use it as-is. serverName = authority } cfg.ServerName = serverName } conn := tls.Client(rawConn, cfg) errChannel := make(chan error, 1) go func() { errChannel <- conn.Handshake() close(errChannel) }() select { case err := <-errChannel: if err != nil { conn.Close() return nil, nil, err } case <-ctx.Done(): conn.Close() return nil, nil, ctx.Err() } return internal.WrapSyscallConn(rawConn, conn), TLSInfo{conn.ConnectionState(), CommonAuthInfo{PrivacyAndIntegrity}}, nil } func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) { conn := tls.Server(rawConn, c.config) if err := conn.Handshake(); err != nil { conn.Close() return nil, nil, err } return internal.WrapSyscallConn(rawConn, conn), TLSInfo{conn.ConnectionState(), CommonAuthInfo{PrivacyAndIntegrity}}, nil } func (c *tlsCreds) Clone() TransportCredentials { return NewTLS(c.config) } func (c *tlsCreds) OverrideServerName(serverNameOverride string) error { c.config.ServerName = serverNameOverride return nil } const alpnProtoStrH2 = "h2" func appendH2ToNextProtos(ps []string) []string { for _, p := range ps { if p == alpnProtoStrH2 { return ps } } ret := make([]string, 0, len(ps)+1) ret = append(ret, ps...) return append(ret, alpnProtoStrH2) } // NewTLS uses c to construct a TransportCredentials based on TLS. func NewTLS(c *tls.Config) TransportCredentials { tc := &tlsCreds{cloneTLSConfig(c)} tc.config.NextProtos = appendH2ToNextProtos(tc.config.NextProtos) return tc } // NewClientTLSFromCert constructs TLS credentials from the provided root // certificate authority certificate(s) to validate server connections. If // certificates to establish the identity of the client need to be included in // the credentials (eg: for mTLS), use NewTLS instead, where a complete // tls.Config can be specified. // serverNameOverride is for testing only. If set to a non empty string, // it will override the virtual host name of authority (e.g. :authority header // field) in requests. func NewClientTLSFromCert(cp *x509.CertPool, serverNameOverride string) TransportCredentials { return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp}) } // NewClientTLSFromFile constructs TLS credentials from the provided root // certificate authority certificate file(s) to validate server connections. If // certificates to establish the identity of the client need to be included in // the credentials (eg: for mTLS), use NewTLS instead, where a complete // tls.Config can be specified. // serverNameOverride is for testing only. If set to a non empty string, // it will override the virtual host name of authority (e.g. :authority header // field) in requests. func NewClientTLSFromFile(certFile, serverNameOverride string) (TransportCredentials, error) { b, err := ioutil.ReadFile(certFile) if err != nil { return nil, err } cp := x509.NewCertPool() if !cp.AppendCertsFromPEM(b) { return nil, fmt.Errorf("credentials: failed to append certificates") } return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp}), nil } // NewServerTLSFromCert constructs TLS credentials from the input certificate for server. func NewServerTLSFromCert(cert *tls.Certificate) TransportCredentials { return NewTLS(&tls.Config{Certificates: []tls.Certificate{*cert}}) } // NewServerTLSFromFile constructs TLS credentials from the input certificate file and key // file for server. func NewServerTLSFromFile(certFile, keyFile string) (TransportCredentials, error) { cert, err := tls.LoadX509KeyPair(certFile, keyFile) if err != nil { return nil, err } return NewTLS(&tls.Config{Certificates: []tls.Certificate{cert}}), nil } // TLSChannelzSecurityValue defines the struct that TLS protocol should return // from GetSecurityValue(), containing security info like cipher and certificate used. // // This API is EXPERIMENTAL. type TLSChannelzSecurityValue struct { ChannelzSecurityValue StandardName string LocalCertificate []byte RemoteCertificate []byte } var cipherSuiteLookup = map[uint16]string{ tls.TLS_RSA_WITH_RC4_128_SHA: "TLS_RSA_WITH_RC4_128_SHA", tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA: "TLS_RSA_WITH_3DES_EDE_CBC_SHA", tls.TLS_RSA_WITH_AES_128_CBC_SHA: "TLS_RSA_WITH_AES_128_CBC_SHA", tls.TLS_RSA_WITH_AES_256_CBC_SHA: "TLS_RSA_WITH_AES_256_CBC_SHA", tls.TLS_RSA_WITH_AES_128_GCM_SHA256: "TLS_RSA_WITH_AES_128_GCM_SHA256", tls.TLS_RSA_WITH_AES_256_GCM_SHA384: "TLS_RSA_WITH_AES_256_GCM_SHA384", tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA: "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA", tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA: "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA: "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA", tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA: "TLS_ECDHE_RSA_WITH_RC4_128_SHA", tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA: "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA", tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA: "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256: "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384: "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384: "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", tls.TLS_FALLBACK_SCSV: "TLS_FALLBACK_SCSV", tls.TLS_RSA_WITH_AES_128_CBC_SHA256: "TLS_RSA_WITH_AES_128_CBC_SHA256", tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256: "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256: "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256", tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305", } // cloneTLSConfig returns a shallow clone of the exported // fields of cfg, ignoring the unexported sync.Once, which // contains a mutex and must not be copied. // // If cfg is nil, a new zero tls.Config is returned. // // TODO: inline this function if possible. func cloneTLSConfig(cfg *tls.Config) *tls.Config { if cfg == nil { return &tls.Config{} } return cfg.Clone() } grpc-go-1.29.1/dialoptions.go000066400000000000000000000520001365033716300160220ustar00rootroot00000000000000/* * * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package grpc import ( "context" "fmt" "net" "time" "google.golang.org/grpc/backoff" "google.golang.org/grpc/balancer" "google.golang.org/grpc/credentials" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal" internalbackoff "google.golang.org/grpc/internal/backoff" "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/resolver" "google.golang.org/grpc/stats" ) // dialOptions configure a Dial call. dialOptions are set by the DialOption // values passed to Dial. type dialOptions struct { unaryInt UnaryClientInterceptor streamInt StreamClientInterceptor chainUnaryInts []UnaryClientInterceptor chainStreamInts []StreamClientInterceptor cp Compressor dc Decompressor bs internalbackoff.Strategy block bool insecure bool timeout time.Duration scChan <-chan ServiceConfig authority string copts transport.ConnectOptions callOptions []CallOption // This is used by v1 balancer dial option WithBalancer to support v1 // balancer, and also by WithBalancerName dial option. balancerBuilder balancer.Builder channelzParentID int64 disableServiceConfig bool disableRetry bool disableHealthCheck bool healthCheckFunc internal.HealthChecker minConnectTimeout func() time.Duration defaultServiceConfig *ServiceConfig // defaultServiceConfig is parsed from defaultServiceConfigRawJSON. defaultServiceConfigRawJSON *string // This is used by ccResolverWrapper to backoff between successive calls to // resolver.ResolveNow(). The user will have no need to configure this, but // we need to be able to configure this in tests. resolveNowBackoff func(int) time.Duration resolvers []resolver.Builder withProxy bool } // DialOption configures how we set up the connection. type DialOption interface { apply(*dialOptions) } // EmptyDialOption does not alter the dial configuration. It can be embedded in // another structure to build custom dial options. // // This API is EXPERIMENTAL. type EmptyDialOption struct{} func (EmptyDialOption) apply(*dialOptions) {} // funcDialOption wraps a function that modifies dialOptions into an // implementation of the DialOption interface. type funcDialOption struct { f func(*dialOptions) } func (fdo *funcDialOption) apply(do *dialOptions) { fdo.f(do) } func newFuncDialOption(f func(*dialOptions)) *funcDialOption { return &funcDialOption{ f: f, } } // WithWriteBufferSize determines how much data can be batched before doing a // write on the wire. The corresponding memory allocation for this buffer will // be twice the size to keep syscalls low. The default value for this buffer is // 32KB. // // Zero will disable the write buffer such that each write will be on underlying // connection. Note: A Send call may not directly translate to a write. func WithWriteBufferSize(s int) DialOption { return newFuncDialOption(func(o *dialOptions) { o.copts.WriteBufferSize = s }) } // WithReadBufferSize lets you set the size of read buffer, this determines how // much data can be read at most for each read syscall. // // The default value for this buffer is 32KB. Zero will disable read buffer for // a connection so data framer can access the underlying conn directly. func WithReadBufferSize(s int) DialOption { return newFuncDialOption(func(o *dialOptions) { o.copts.ReadBufferSize = s }) } // WithInitialWindowSize returns a DialOption which sets the value for initial // window size on a stream. The lower bound for window size is 64K and any value // smaller than that will be ignored. func WithInitialWindowSize(s int32) DialOption { return newFuncDialOption(func(o *dialOptions) { o.copts.InitialWindowSize = s }) } // WithInitialConnWindowSize returns a DialOption which sets the value for // initial window size on a connection. The lower bound for window size is 64K // and any value smaller than that will be ignored. func WithInitialConnWindowSize(s int32) DialOption { return newFuncDialOption(func(o *dialOptions) { o.copts.InitialConnWindowSize = s }) } // WithMaxMsgSize returns a DialOption which sets the maximum message size the // client can receive. // // Deprecated: use WithDefaultCallOptions(MaxCallRecvMsgSize(s)) instead. Will // be supported throughout 1.x. func WithMaxMsgSize(s int) DialOption { return WithDefaultCallOptions(MaxCallRecvMsgSize(s)) } // WithDefaultCallOptions returns a DialOption which sets the default // CallOptions for calls over the connection. func WithDefaultCallOptions(cos ...CallOption) DialOption { return newFuncDialOption(func(o *dialOptions) { o.callOptions = append(o.callOptions, cos...) }) } // WithCodec returns a DialOption which sets a codec for message marshaling and // unmarshaling. // // Deprecated: use WithDefaultCallOptions(ForceCodec(_)) instead. Will be // supported throughout 1.x. func WithCodec(c Codec) DialOption { return WithDefaultCallOptions(CallCustomCodec(c)) } // WithCompressor returns a DialOption which sets a Compressor to use for // message compression. It has lower priority than the compressor set by the // UseCompressor CallOption. // // Deprecated: use UseCompressor instead. Will be supported throughout 1.x. func WithCompressor(cp Compressor) DialOption { return newFuncDialOption(func(o *dialOptions) { o.cp = cp }) } // WithDecompressor returns a DialOption which sets a Decompressor to use for // incoming message decompression. If incoming response messages are encoded // using the decompressor's Type(), it will be used. Otherwise, the message // encoding will be used to look up the compressor registered via // encoding.RegisterCompressor, which will then be used to decompress the // message. If no compressor is registered for the encoding, an Unimplemented // status error will be returned. // // Deprecated: use encoding.RegisterCompressor instead. Will be supported // throughout 1.x. func WithDecompressor(dc Decompressor) DialOption { return newFuncDialOption(func(o *dialOptions) { o.dc = dc }) } // WithBalancer returns a DialOption which sets a load balancer with the v1 API. // Name resolver will be ignored if this DialOption is specified. // // Deprecated: use the new balancer APIs in balancer package and // WithBalancerName. Will be removed in a future 1.x release. func WithBalancer(b Balancer) DialOption { return newFuncDialOption(func(o *dialOptions) { o.balancerBuilder = &balancerWrapperBuilder{ b: b, } }) } // WithBalancerName sets the balancer that the ClientConn will be initialized // with. Balancer registered with balancerName will be used. This function // panics if no balancer was registered by balancerName. // // The balancer cannot be overridden by balancer option specified by service // config. // // Deprecated: use WithDefaultServiceConfig and WithDisableServiceConfig // instead. Will be removed in a future 1.x release. func WithBalancerName(balancerName string) DialOption { builder := balancer.Get(balancerName) if builder == nil { panic(fmt.Sprintf("grpc.WithBalancerName: no balancer is registered for name %v", balancerName)) } return newFuncDialOption(func(o *dialOptions) { o.balancerBuilder = builder }) } // WithServiceConfig returns a DialOption which has a channel to read the // service configuration. // // Deprecated: service config should be received through name resolver or via // WithDefaultServiceConfig, as specified at // https://github.com/grpc/grpc/blob/master/doc/service_config.md. Will be // removed in a future 1.x release. func WithServiceConfig(c <-chan ServiceConfig) DialOption { return newFuncDialOption(func(o *dialOptions) { o.scChan = c }) } // WithConnectParams configures the dialer to use the provided ConnectParams. // // The backoff configuration specified as part of the ConnectParams overrides // all defaults specified in // https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. Consider // using the backoff.DefaultConfig as a base, in cases where you want to // override only a subset of the backoff configuration. // // This API is EXPERIMENTAL. func WithConnectParams(p ConnectParams) DialOption { return newFuncDialOption(func(o *dialOptions) { o.bs = internalbackoff.Exponential{Config: p.Backoff} o.minConnectTimeout = func() time.Duration { return p.MinConnectTimeout } }) } // WithBackoffMaxDelay configures the dialer to use the provided maximum delay // when backing off after failed connection attempts. // // Deprecated: use WithConnectParams instead. Will be supported throughout 1.x. func WithBackoffMaxDelay(md time.Duration) DialOption { return WithBackoffConfig(BackoffConfig{MaxDelay: md}) } // WithBackoffConfig configures the dialer to use the provided backoff // parameters after connection failures. // // Deprecated: use WithConnectParams instead. Will be supported throughout 1.x. func WithBackoffConfig(b BackoffConfig) DialOption { bc := backoff.DefaultConfig bc.MaxDelay = b.MaxDelay return withBackoff(internalbackoff.Exponential{Config: bc}) } // withBackoff sets the backoff strategy used for connectRetryNum after a failed // connection attempt. // // This can be exported if arbitrary backoff strategies are allowed by gRPC. func withBackoff(bs internalbackoff.Strategy) DialOption { return newFuncDialOption(func(o *dialOptions) { o.bs = bs }) } // WithBlock returns a DialOption which makes caller of Dial blocks until the // underlying connection is up. Without this, Dial returns immediately and // connecting the server happens in background. func WithBlock() DialOption { return newFuncDialOption(func(o *dialOptions) { o.block = true }) } // WithInsecure returns a DialOption which disables transport security for this // ClientConn. Note that transport security is required unless WithInsecure is // set. func WithInsecure() DialOption { return newFuncDialOption(func(o *dialOptions) { o.insecure = true }) } // WithNoProxy returns a DialOption which disables the use of proxies for this // ClientConn. This is ignored if WithDialer or WithContextDialer are used. // // This API is EXPERIMENTAL. func WithNoProxy() DialOption { return newFuncDialOption(func(o *dialOptions) { o.withProxy = false }) } // WithTransportCredentials returns a DialOption which configures a connection // level security credentials (e.g., TLS/SSL). This should not be used together // with WithCredentialsBundle. func WithTransportCredentials(creds credentials.TransportCredentials) DialOption { return newFuncDialOption(func(o *dialOptions) { o.copts.TransportCredentials = creds }) } // WithPerRPCCredentials returns a DialOption which sets credentials and places // auth state on each outbound RPC. func WithPerRPCCredentials(creds credentials.PerRPCCredentials) DialOption { return newFuncDialOption(func(o *dialOptions) { o.copts.PerRPCCredentials = append(o.copts.PerRPCCredentials, creds) }) } // WithCredentialsBundle returns a DialOption to set a credentials bundle for // the ClientConn.WithCreds. This should not be used together with // WithTransportCredentials. // // This API is experimental. func WithCredentialsBundle(b credentials.Bundle) DialOption { return newFuncDialOption(func(o *dialOptions) { o.copts.CredsBundle = b }) } // WithTimeout returns a DialOption that configures a timeout for dialing a // ClientConn initially. This is valid if and only if WithBlock() is present. // // Deprecated: use DialContext instead of Dial and context.WithTimeout // instead. Will be supported throughout 1.x. func WithTimeout(d time.Duration) DialOption { return newFuncDialOption(func(o *dialOptions) { o.timeout = d }) } // WithContextDialer returns a DialOption that sets a dialer to create // connections. If FailOnNonTempDialError() is set to true, and an error is // returned by f, gRPC checks the error's Temporary() method to decide if it // should try to reconnect to the network address. func WithContextDialer(f func(context.Context, string) (net.Conn, error)) DialOption { return newFuncDialOption(func(o *dialOptions) { o.copts.Dialer = f }) } func init() { internal.WithHealthCheckFunc = withHealthCheckFunc } // WithDialer returns a DialOption that specifies a function to use for dialing // network addresses. If FailOnNonTempDialError() is set to true, and an error // is returned by f, gRPC checks the error's Temporary() method to decide if it // should try to reconnect to the network address. // // Deprecated: use WithContextDialer instead. Will be supported throughout // 1.x. func WithDialer(f func(string, time.Duration) (net.Conn, error)) DialOption { return WithContextDialer( func(ctx context.Context, addr string) (net.Conn, error) { if deadline, ok := ctx.Deadline(); ok { return f(addr, time.Until(deadline)) } return f(addr, 0) }) } // WithStatsHandler returns a DialOption that specifies the stats handler for // all the RPCs and underlying network connections in this ClientConn. func WithStatsHandler(h stats.Handler) DialOption { return newFuncDialOption(func(o *dialOptions) { o.copts.StatsHandler = h }) } // FailOnNonTempDialError returns a DialOption that specifies if gRPC fails on // non-temporary dial errors. If f is true, and dialer returns a non-temporary // error, gRPC will fail the connection to the network address and won't try to // reconnect. The default value of FailOnNonTempDialError is false. // // FailOnNonTempDialError only affects the initial dial, and does not do // anything useful unless you are also using WithBlock(). // // This is an EXPERIMENTAL API. func FailOnNonTempDialError(f bool) DialOption { return newFuncDialOption(func(o *dialOptions) { o.copts.FailOnNonTempDialError = f }) } // WithUserAgent returns a DialOption that specifies a user agent string for all // the RPCs. func WithUserAgent(s string) DialOption { return newFuncDialOption(func(o *dialOptions) { o.copts.UserAgent = s }) } // WithKeepaliveParams returns a DialOption that specifies keepalive parameters // for the client transport. func WithKeepaliveParams(kp keepalive.ClientParameters) DialOption { if kp.Time < internal.KeepaliveMinPingTime { grpclog.Warningf("Adjusting keepalive ping interval to minimum period of %v", internal.KeepaliveMinPingTime) kp.Time = internal.KeepaliveMinPingTime } return newFuncDialOption(func(o *dialOptions) { o.copts.KeepaliveParams = kp }) } // WithUnaryInterceptor returns a DialOption that specifies the interceptor for // unary RPCs. func WithUnaryInterceptor(f UnaryClientInterceptor) DialOption { return newFuncDialOption(func(o *dialOptions) { o.unaryInt = f }) } // WithChainUnaryInterceptor returns a DialOption that specifies the chained // interceptor for unary RPCs. The first interceptor will be the outer most, // while the last interceptor will be the inner most wrapper around the real call. // All interceptors added by this method will be chained, and the interceptor // defined by WithUnaryInterceptor will always be prepended to the chain. func WithChainUnaryInterceptor(interceptors ...UnaryClientInterceptor) DialOption { return newFuncDialOption(func(o *dialOptions) { o.chainUnaryInts = append(o.chainUnaryInts, interceptors...) }) } // WithStreamInterceptor returns a DialOption that specifies the interceptor for // streaming RPCs. func WithStreamInterceptor(f StreamClientInterceptor) DialOption { return newFuncDialOption(func(o *dialOptions) { o.streamInt = f }) } // WithChainStreamInterceptor returns a DialOption that specifies the chained // interceptor for unary RPCs. The first interceptor will be the outer most, // while the last interceptor will be the inner most wrapper around the real call. // All interceptors added by this method will be chained, and the interceptor // defined by WithStreamInterceptor will always be prepended to the chain. func WithChainStreamInterceptor(interceptors ...StreamClientInterceptor) DialOption { return newFuncDialOption(func(o *dialOptions) { o.chainStreamInts = append(o.chainStreamInts, interceptors...) }) } // WithAuthority returns a DialOption that specifies the value to be used as the // :authority pseudo-header. This value only works with WithInsecure and has no // effect if TransportCredentials are present. func WithAuthority(a string) DialOption { return newFuncDialOption(func(o *dialOptions) { o.authority = a }) } // WithChannelzParentID returns a DialOption that specifies the channelz ID of // current ClientConn's parent. This function is used in nested channel creation // (e.g. grpclb dial). // // This API is EXPERIMENTAL. func WithChannelzParentID(id int64) DialOption { return newFuncDialOption(func(o *dialOptions) { o.channelzParentID = id }) } // WithDisableServiceConfig returns a DialOption that causes gRPC to ignore any // service config provided by the resolver and provides a hint to the resolver // to not fetch service configs. // // Note that this dial option only disables service config from resolver. If // default service config is provided, gRPC will use the default service config. func WithDisableServiceConfig() DialOption { return newFuncDialOption(func(o *dialOptions) { o.disableServiceConfig = true }) } // WithDefaultServiceConfig returns a DialOption that configures the default // service config, which will be used in cases where: // // 1. WithDisableServiceConfig is also used. // 2. Resolver does not return a service config or if the resolver returns an // invalid service config. // // This API is EXPERIMENTAL. func WithDefaultServiceConfig(s string) DialOption { return newFuncDialOption(func(o *dialOptions) { o.defaultServiceConfigRawJSON = &s }) } // WithDisableRetry returns a DialOption that disables retries, even if the // service config enables them. This does not impact transparent retries, which // will happen automatically if no data is written to the wire or if the RPC is // unprocessed by the remote server. // // Retry support is currently disabled by default, but will be enabled by // default in the future. Until then, it may be enabled by setting the // environment variable "GRPC_GO_RETRY" to "on". // // This API is EXPERIMENTAL. func WithDisableRetry() DialOption { return newFuncDialOption(func(o *dialOptions) { o.disableRetry = true }) } // WithMaxHeaderListSize returns a DialOption that specifies the maximum // (uncompressed) size of header list that the client is prepared to accept. func WithMaxHeaderListSize(s uint32) DialOption { return newFuncDialOption(func(o *dialOptions) { o.copts.MaxHeaderListSize = &s }) } // WithDisableHealthCheck disables the LB channel health checking for all // SubConns of this ClientConn. // // This API is EXPERIMENTAL. func WithDisableHealthCheck() DialOption { return newFuncDialOption(func(o *dialOptions) { o.disableHealthCheck = true }) } // withHealthCheckFunc replaces the default health check function with the // provided one. It makes tests easier to change the health check function. // // For testing purpose only. func withHealthCheckFunc(f internal.HealthChecker) DialOption { return newFuncDialOption(func(o *dialOptions) { o.healthCheckFunc = f }) } func defaultDialOptions() dialOptions { return dialOptions{ disableRetry: !envconfig.Retry, healthCheckFunc: internal.HealthCheckFunc, copts: transport.ConnectOptions{ WriteBufferSize: defaultWriteBufSize, ReadBufferSize: defaultReadBufSize, }, resolveNowBackoff: internalbackoff.DefaultExponential.Backoff, withProxy: true, } } // withGetMinConnectDeadline specifies the function that clientconn uses to // get minConnectDeadline. This can be used to make connection attempts happen // faster/slower. // // For testing purpose only. func withMinConnectDeadline(f func() time.Duration) DialOption { return newFuncDialOption(func(o *dialOptions) { o.minConnectTimeout = f }) } // withResolveNowBackoff specifies the function that clientconn uses to backoff // between successive calls to resolver.ResolveNow(). // // For testing purpose only. func withResolveNowBackoff(f func(int) time.Duration) DialOption { return newFuncDialOption(func(o *dialOptions) { o.resolveNowBackoff = f }) } // WithResolvers allows a list of resolver implementations to be registered // locally with the ClientConn without needing to be globally registered via // resolver.Register. They will be matched against the scheme used for the // current Dial only, and will take precedence over the global registry. // // This API is EXPERIMENTAL. func WithResolvers(rs ...resolver.Builder) DialOption { return newFuncDialOption(func(o *dialOptions) { o.resolvers = append(o.resolvers, rs...) }) } grpc-go-1.29.1/doc.go000066400000000000000000000013631365033716300142500ustar00rootroot00000000000000/* * * Copyright 2015 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ /* Package grpc implements an RPC system called gRPC. See grpc.io for more information about gRPC. */ package grpc // import "google.golang.org/grpc" grpc-go-1.29.1/encoding/000077500000000000000000000000001365033716300147375ustar00rootroot00000000000000grpc-go-1.29.1/encoding/encoding.go000066400000000000000000000112761365033716300170630ustar00rootroot00000000000000/* * * Copyright 2017 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Package encoding defines the interface for the compressor and codec, and // functions to register and retrieve compressors and codecs. // // This package is EXPERIMENTAL. package encoding import ( "io" "strings" ) // Identity specifies the optional encoding for uncompressed streams. // It is intended for grpc internal use only. const Identity = "identity" // Compressor is used for compressing and decompressing when sending or // receiving messages. type Compressor interface { // Compress writes the data written to wc to w after compressing it. If an // error occurs while initializing the compressor, that error is returned // instead. Compress(w io.Writer) (io.WriteCloser, error) // Decompress reads data from r, decompresses it, and provides the // uncompressed data via the returned io.Reader. If an error occurs while // initializing the decompressor, that error is returned instead. Decompress(r io.Reader) (io.Reader, error) // Name is the name of the compression codec and is used to set the content // coding header. The result must be static; the result cannot change // between calls. Name() string // EXPERIMENTAL: if a Compressor implements // DecompressedSize(compressedBytes []byte) int, gRPC will call it // to determine the size of the buffer allocated for the result of decompression. // Return -1 to indicate unknown size. } var registeredCompressor = make(map[string]Compressor) // RegisterCompressor registers the compressor with gRPC by its name. It can // be activated when sending an RPC via grpc.UseCompressor(). It will be // automatically accessed when receiving a message based on the content coding // header. Servers also use it to send a response with the same encoding as // the request. // // NOTE: this function must only be called during initialization time (i.e. in // an init() function), and is not thread-safe. If multiple Compressors are // registered with the same name, the one registered last will take effect. func RegisterCompressor(c Compressor) { registeredCompressor[c.Name()] = c } // GetCompressor returns Compressor for the given compressor name. func GetCompressor(name string) Compressor { return registeredCompressor[name] } // Codec defines the interface gRPC uses to encode and decode messages. Note // that implementations of this interface must be thread safe; a Codec's // methods can be called from concurrent goroutines. type Codec interface { // Marshal returns the wire format of v. Marshal(v interface{}) ([]byte, error) // Unmarshal parses the wire format into v. Unmarshal(data []byte, v interface{}) error // Name returns the name of the Codec implementation. The returned string // will be used as part of content type in transmission. The result must be // static; the result cannot change between calls. Name() string } var registeredCodecs = make(map[string]Codec) // RegisterCodec registers the provided Codec for use with all gRPC clients and // servers. // // The Codec will be stored and looked up by result of its Name() method, which // should match the content-subtype of the encoding handled by the Codec. This // is case-insensitive, and is stored and looked up as lowercase. If the // result of calling Name() is an empty string, RegisterCodec will panic. See // Content-Type on // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for // more details. // // NOTE: this function must only be called during initialization time (i.e. in // an init() function), and is not thread-safe. If multiple Compressors are // registered with the same name, the one registered last will take effect. func RegisterCodec(codec Codec) { if codec == nil { panic("cannot register a nil Codec") } if codec.Name() == "" { panic("cannot register Codec with empty string result for Name()") } contentSubtype := strings.ToLower(codec.Name()) registeredCodecs[contentSubtype] = codec } // GetCodec gets a registered Codec by content-subtype, or nil if no Codec is // registered for the content-subtype. // // The content-subtype is expected to be lowercase. func GetCodec(contentSubtype string) Codec { return registeredCodecs[contentSubtype] } grpc-go-1.29.1/encoding/gzip/000077500000000000000000000000001365033716300157105ustar00rootroot00000000000000grpc-go-1.29.1/encoding/gzip/gzip.go000066400000000000000000000062601365033716300172140ustar00rootroot00000000000000/* * * Copyright 2017 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Package gzip implements and registers the gzip compressor // during the initialization. // This package is EXPERIMENTAL. package gzip import ( "compress/gzip" "encoding/binary" "fmt" "io" "io/ioutil" "sync" "google.golang.org/grpc/encoding" ) // Name is the name registered for the gzip compressor. const Name = "gzip" func init() { c := &compressor{} c.poolCompressor.New = func() interface{} { return &writer{Writer: gzip.NewWriter(ioutil.Discard), pool: &c.poolCompressor} } encoding.RegisterCompressor(c) } type writer struct { *gzip.Writer pool *sync.Pool } // SetLevel updates the registered gzip compressor to use the compression level specified (gzip.HuffmanOnly is not supported). // NOTE: this function must only be called during initialization time (i.e. in an init() function), // and is not thread-safe. // // The error returned will be nil if the specified level is valid. func SetLevel(level int) error { if level < gzip.DefaultCompression || level > gzip.BestCompression { return fmt.Errorf("grpc: invalid gzip compression level: %d", level) } c := encoding.GetCompressor(Name).(*compressor) c.poolCompressor.New = func() interface{} { w, err := gzip.NewWriterLevel(ioutil.Discard, level) if err != nil { panic(err) } return &writer{Writer: w, pool: &c.poolCompressor} } return nil } func (c *compressor) Compress(w io.Writer) (io.WriteCloser, error) { z := c.poolCompressor.Get().(*writer) z.Writer.Reset(w) return z, nil } func (z *writer) Close() error { defer z.pool.Put(z) return z.Writer.Close() } type reader struct { *gzip.Reader pool *sync.Pool } func (c *compressor) Decompress(r io.Reader) (io.Reader, error) { z, inPool := c.poolDecompressor.Get().(*reader) if !inPool { newZ, err := gzip.NewReader(r) if err != nil { return nil, err } return &reader{Reader: newZ, pool: &c.poolDecompressor}, nil } if err := z.Reset(r); err != nil { c.poolDecompressor.Put(z) return nil, err } return z, nil } func (z *reader) Read(p []byte) (n int, err error) { n, err = z.Reader.Read(p) if err == io.EOF { z.pool.Put(z) } return n, err } // RFC1952 specifies that the last four bytes "contains the size of // the original (uncompressed) input data modulo 2^32." // gRPC has a max message size of 2GB so we don't need to worry about wraparound. func (c *compressor) DecompressedSize(buf []byte) int { last := len(buf) if last < 4 { return -1 } return int(binary.LittleEndian.Uint32(buf[last-4 : last])) } func (c *compressor) Name() string { return Name } type compressor struct { poolCompressor sync.Pool poolDecompressor sync.Pool } grpc-go-1.29.1/encoding/proto/000077500000000000000000000000001365033716300161025ustar00rootroot00000000000000grpc-go-1.29.1/encoding/proto/proto.go000066400000000000000000000047661365033716300176110ustar00rootroot00000000000000/* * * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Package proto defines the protobuf codec. Importing this package will // register the codec. package proto import ( "math" "sync" "github.com/golang/protobuf/proto" "google.golang.org/grpc/encoding" ) // Name is the name registered for the proto compressor. const Name = "proto" func init() { encoding.RegisterCodec(codec{}) } // codec is a Codec implementation with protobuf. It is the default codec for gRPC. type codec struct{} type cachedProtoBuffer struct { lastMarshaledSize uint32 proto.Buffer } func capToMaxInt32(val int) uint32 { if val > math.MaxInt32 { return uint32(math.MaxInt32) } return uint32(val) } func marshal(v interface{}, cb *cachedProtoBuffer) ([]byte, error) { protoMsg := v.(proto.Message) newSlice := make([]byte, 0, cb.lastMarshaledSize) cb.SetBuf(newSlice) cb.Reset() if err := cb.Marshal(protoMsg); err != nil { return nil, err } out := cb.Bytes() cb.lastMarshaledSize = capToMaxInt32(len(out)) return out, nil } func (codec) Marshal(v interface{}) ([]byte, error) { if pm, ok := v.(proto.Marshaler); ok { // object can marshal itself, no need for buffer return pm.Marshal() } cb := protoBufferPool.Get().(*cachedProtoBuffer) out, err := marshal(v, cb) // put back buffer and lose the ref to the slice cb.SetBuf(nil) protoBufferPool.Put(cb) return out, err } func (codec) Unmarshal(data []byte, v interface{}) error { protoMsg := v.(proto.Message) protoMsg.Reset() if pu, ok := protoMsg.(proto.Unmarshaler); ok { // object can unmarshal itself, no need for buffer return pu.Unmarshal(data) } cb := protoBufferPool.Get().(*cachedProtoBuffer) cb.SetBuf(data) err := cb.Unmarshal(protoMsg) cb.SetBuf(nil) protoBufferPool.Put(cb) return err } func (codec) Name() string { return Name } var protoBufferPool = &sync.Pool{ New: func() interface{} { return &cachedProtoBuffer{ Buffer: proto.Buffer{}, lastMarshaledSize: 16, } }, } grpc-go-1.29.1/encoding/proto/proto_benchmark_test.go000066400000000000000000000054501365033716300226510ustar00rootroot00000000000000/* * * Copyright 2014 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package proto import ( "fmt" "testing" "github.com/golang/protobuf/proto" "google.golang.org/grpc/encoding" "google.golang.org/grpc/test/codec_perf" ) func setupBenchmarkProtoCodecInputs(payloadBaseSize uint32) []proto.Message { payloadBase := make([]byte, payloadBaseSize) // arbitrary byte slices payloadSuffixes := [][]byte{ []byte("one"), []byte("two"), []byte("three"), []byte("four"), []byte("five"), } protoStructs := make([]proto.Message, 0) for _, p := range payloadSuffixes { ps := &codec_perf.Buffer{} ps.Body = append(payloadBase, p...) protoStructs = append(protoStructs, ps) } return protoStructs } // The possible use of certain protobuf APIs like the proto.Buffer API potentially involves caching // on our side. This can add checks around memory allocations and possible contention. // Example run: go test -v -run=^$ -bench=BenchmarkProtoCodec -benchmem func BenchmarkProtoCodec(b *testing.B) { // range of message sizes payloadBaseSizes := make([]uint32, 0) for i := uint32(0); i <= 12; i += 4 { payloadBaseSizes = append(payloadBaseSizes, 1<" ["features/deadline"]="wanted = DeadlineExceeded, got = DeadlineExceeded" ["features/encryption/TLS"]="UnaryEcho: hello world" ["features/errors"]="Greeting: Hello world" ["features/interceptor"]="UnaryEcho: hello world" ["features/load_balancing"]="calling helloworld.Greeter/SayHello with pick_first" ["features/metadata"]="this is examples/metadata" ["features/multiplex"]="Greeting: Hello multiplex" ["features/name_resolving"]="calling helloworld.Greeter/SayHello to \"example:///resolver.example.grpc.io\"" ) for example in ${EXAMPLES[@]}; do echo "$(tput setaf 4) testing: ${example} $(tput sgr 0)" # Build server if ! go build -o /dev/null ./examples/${example}/*server/*.go; then fail "failed to build server" else pass "successfully built server" fi # Build client if ! go build -o /dev/null ./examples/${example}/*client/*.go; then fail "failed to build client" else pass "successfully built client" fi # Start server SERVER_LOG="$(mktemp)" go run ./examples/$example/*server/*.go &> $SERVER_LOG & CLIENT_LOG="$(mktemp)" if ! timeout 20 go run examples/${example}/*client/*.go &> $CLIENT_LOG; then fail "client failed to communicate with server got server log: $(cat $SERVER_LOG) got client log: $(cat $CLIENT_LOG) " else pass "client successfully communitcated with server" fi # Check server log for expected output if expecting an # output if [ -n "${EXPECTED_SERVER_OUTPUT[$example]}" ]; then if ! grep -q "${EXPECTED_SERVER_OUTPUT[$example]}" $SERVER_LOG; then fail "server log missing output: ${EXPECTED_SERVER_OUTPUT[$example]} got server log: $(cat $SERVER_LOG) got client log: $(cat $CLIENT_LOG) " else pass "server log contains expected output: ${EXPECTED_SERVER_OUTPUT[$example]}" fi fi # Check client log for expected output if expecting an # output if [ -n "${EXPECTED_CLIENT_OUTPUT[$example]}" ]; then if ! grep -q "${EXPECTED_CLIENT_OUTPUT[$example]}" $CLIENT_LOG; then fail "client log missing output: ${EXPECTED_CLIENT_OUTPUT[$example]} got server log: $(cat $SERVER_LOG) got client log: $(cat $CLIENT_LOG) " else pass "client log contains expected output: ${EXPECTED_CLIENT_OUTPUT[$example]}" fi fi clean done grpc-go-1.29.1/examples/features/000077500000000000000000000000001365033716300166055ustar00rootroot00000000000000grpc-go-1.29.1/examples/features/authentication/000077500000000000000000000000001365033716300216245ustar00rootroot00000000000000grpc-go-1.29.1/examples/features/authentication/README.md000066400000000000000000000044701365033716300231100ustar00rootroot00000000000000# Authentication In grpc, authentication is abstracted as [`credentials.PerRPCCredentials`](https://godoc.org/google.golang.org/grpc/credentials#PerRPCCredentials). It usually also encompasses authorization. Users can configure it on a per-connection basis or a per-call basis. The example for authentication currently includes an example for using oauth2 with grpc. ## Try it ``` go run server/main.go ``` ``` go run client/main.go ``` ## Explanation ### OAuth2 OAuth 2.0 Protocol is a widely used authentication and authorization mechanism nowadays. And grpc provides convenient APIs to configure OAuth to use with grpc. Please refer to the godoc: https://godoc.org/google.golang.org/grpc/credentials/oauth for details. #### Client On client side, users should first get a valid oauth token, and then call [`credentials.NewOauthAccess`](https://godoc.org/google.golang.org/grpc/credentials/oauth#NewOauthAccess) to initialize a `credentials.PerRPCCredentials` with it. Next, if user wants to apply a single OAuth token for all RPC calls on the same connection, then configure grpc `Dial` with `DialOption` [`WithPerRPCCredentials`](https://godoc.org/google.golang.org/grpc#WithPerRPCCredentials). Or, if user wants to apply OAuth token per call, then configure the grpc RPC call with `CallOption` [`PerRPCCredentials`](https://godoc.org/google.golang.org/grpc#PerRPCCredentials). Note that OAuth requires the underlying transport to be secure (e.g. TLS, etc.) Inside grpc, the provided token is prefixed with the token type and a space, and is then attached to the metadata with the key "authorization". ### Server On server side, users usually get the token and verify it inside an interceptor. To get the token, call [`metadata.FromIncomingContext`](https://godoc.org/google.golang.org/grpc/metadata#FromIncomingContext) on the given context. It returns the metadata map. Next, use the key "authorization" to get corresponding value, which is a slice of strings. For OAuth, the slice should only contain one element, which is a string in the format of + " " + . Users can easily get the token by parsing the string, and then verify the validity of it. If the token is not valid, returns an error with error code `codes.Unauthenticated`. If the token is valid, then invoke the method handler to start processing the RPC. grpc-go-1.29.1/examples/features/authentication/client/000077500000000000000000000000001365033716300231025ustar00rootroot00000000000000grpc-go-1.29.1/examples/features/authentication/client/main.go000066400000000000000000000050701365033716300243570ustar00rootroot00000000000000/* * * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // The client demonstrates how to supply an OAuth2 token for every RPC. package main import ( "context" "flag" "fmt" "log" "time" "golang.org/x/oauth2" "google.golang.org/grpc" "google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials/oauth" ecpb "google.golang.org/grpc/examples/features/proto/echo" "google.golang.org/grpc/testdata" ) var addr = flag.String("addr", "localhost:50051", "the address to connect to") func callUnaryEcho(client ecpb.EchoClient, message string) { ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() resp, err := client.UnaryEcho(ctx, &ecpb.EchoRequest{Message: message}) if err != nil { log.Fatalf("client.UnaryEcho(_) = _, %v: ", err) } fmt.Println("UnaryEcho: ", resp.Message) } func main() { flag.Parse() // Set up the credentials for the connection. perRPC := oauth.NewOauthAccess(fetchToken()) creds, err := credentials.NewClientTLSFromFile(testdata.Path("ca.pem"), "x.test.youtube.com") if err != nil { log.Fatalf("failed to load credentials: %v", err) } opts := []grpc.DialOption{ // In addition to the following grpc.DialOption, callers may also use // the grpc.CallOption grpc.PerRPCCredentials with the RPC invocation // itself. // See: https://godoc.org/google.golang.org/grpc#PerRPCCredentials grpc.WithPerRPCCredentials(perRPC), // oauth.NewOauthAccess requires the configuration of transport // credentials. grpc.WithTransportCredentials(creds), } opts = append(opts, grpc.WithBlock()) conn, err := grpc.Dial(*addr, opts...) if err != nil { log.Fatalf("did not connect: %v", err) } defer conn.Close() rgc := ecpb.NewEchoClient(conn) callUnaryEcho(rgc, "hello world") } // fetchToken simulates a token lookup and omits the details of proper token // acquisition. For examples of how to acquire an OAuth2 token, see: // https://godoc.org/golang.org/x/oauth2 func fetchToken() *oauth2.Token { return &oauth2.Token{ AccessToken: "some-secret-token", } } grpc-go-1.29.1/examples/features/authentication/server/000077500000000000000000000000001365033716300231325ustar00rootroot00000000000000grpc-go-1.29.1/examples/features/authentication/server/main.go000066400000000000000000000067651365033716300244230ustar00rootroot00000000000000/* * * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // The server demonstrates how to consume and validate OAuth2 tokens provided by // clients for each RPC. package main import ( "context" "crypto/tls" "flag" "fmt" "log" "net" "strings" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" "google.golang.org/grpc/testdata" pb "google.golang.org/grpc/examples/features/proto/echo" ) var ( errMissingMetadata = status.Errorf(codes.InvalidArgument, "missing metadata") errInvalidToken = status.Errorf(codes.Unauthenticated, "invalid token") ) var port = flag.Int("port", 50051, "the port to serve on") func main() { flag.Parse() fmt.Printf("server starting on port %d...\n", *port) cert, err := tls.LoadX509KeyPair(testdata.Path("server1.pem"), testdata.Path("server1.key")) if err != nil { log.Fatalf("failed to load key pair: %s", err) } opts := []grpc.ServerOption{ // The following grpc.ServerOption adds an interceptor for all unary // RPCs. To configure an interceptor for streaming RPCs, see: // https://godoc.org/google.golang.org/grpc#StreamInterceptor grpc.UnaryInterceptor(ensureValidToken), // Enable TLS for all incoming connections. grpc.Creds(credentials.NewServerTLSFromCert(&cert)), } s := grpc.NewServer(opts...) pb.RegisterEchoServer(s, &ecServer{}) lis, err := net.Listen("tcp", fmt.Sprintf(":%d", *port)) if err != nil { log.Fatalf("failed to listen: %v", err) } if err := s.Serve(lis); err != nil { log.Fatalf("failed to serve: %v", err) } } type ecServer struct { pb.UnimplementedEchoServer } func (s *ecServer) UnaryEcho(ctx context.Context, req *pb.EchoRequest) (*pb.EchoResponse, error) { return &pb.EchoResponse{Message: req.Message}, nil } // valid validates the authorization. func valid(authorization []string) bool { if len(authorization) < 1 { return false } token := strings.TrimPrefix(authorization[0], "Bearer ") // Perform the token validation here. For the sake of this example, the code // here forgoes any of the usual OAuth2 token validation and instead checks // for a token matching an arbitrary string. return token == "some-secret-token" } // ensureValidToken ensures a valid token exists within a request's metadata. If // the token is missing or invalid, the interceptor blocks execution of the // handler and returns an error. Otherwise, the interceptor invokes the unary // handler. func ensureValidToken(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { md, ok := metadata.FromIncomingContext(ctx) if !ok { return nil, errMissingMetadata } // The keys within metadata.MD are normalized to lowercase. // See: https://godoc.org/google.golang.org/grpc/metadata#New if !valid(md["authorization"]) { return nil, errInvalidToken } // Continue execution of handler after ensuring a valid token. return handler(ctx, req) } grpc-go-1.29.1/examples/features/cancellation/000077500000000000000000000000001365033716300212415ustar00rootroot00000000000000grpc-go-1.29.1/examples/features/cancellation/README.md000066400000000000000000000004431365033716300225210ustar00rootroot00000000000000# Cancellation This example shows how clients can cancel in-flight RPCs by canceling the context passed to the RPC call. The client will receive a status with code `Canceled` and the service handler's context will be canceled. ``` go run server/main.go ``` ``` go run client/main.go ``` grpc-go-1.29.1/examples/features/cancellation/client/000077500000000000000000000000001365033716300225175ustar00rootroot00000000000000grpc-go-1.29.1/examples/features/cancellation/client/main.go000066400000000000000000000051221365033716300237720ustar00rootroot00000000000000/* * * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Binary client is an example client. package main import ( "context" "flag" "fmt" "log" "time" "google.golang.org/grpc" "google.golang.org/grpc/codes" pb "google.golang.org/grpc/examples/features/proto/echo" "google.golang.org/grpc/status" ) var addr = flag.String("addr", "localhost:50051", "the address to connect to") func sendMessage(stream pb.Echo_BidirectionalStreamingEchoClient, msg string) error { fmt.Printf("sending message %q\n", msg) return stream.Send(&pb.EchoRequest{Message: msg}) } func recvMessage(stream pb.Echo_BidirectionalStreamingEchoClient, wantErrCode codes.Code) { res, err := stream.Recv() if status.Code(err) != wantErrCode { log.Fatalf("stream.Recv() = %v, %v; want _, status.Code(err)=%v", res, err, wantErrCode) } if err != nil { fmt.Printf("stream.Recv() returned expected error %v\n", err) return } fmt.Printf("received message %q\n", res.GetMessage()) } func main() { flag.Parse() // Set up a connection to the server. conn, err := grpc.Dial(*addr, grpc.WithInsecure()) if err != nil { log.Fatalf("did not connect: %v", err) } defer conn.Close() c := pb.NewEchoClient(conn) // Initiate the stream with a context that supports cancellation. ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) stream, err := c.BidirectionalStreamingEcho(ctx) if err != nil { log.Fatalf("error creating stream: %v", err) } // Send some test messages. if err := sendMessage(stream, "hello"); err != nil { log.Fatalf("error sending on stream: %v", err) } if err := sendMessage(stream, "world"); err != nil { log.Fatalf("error sending on stream: %v", err) } // Ensure the RPC is working. recvMessage(stream, codes.OK) recvMessage(stream, codes.OK) fmt.Println("cancelling context") cancel() // This Send may or may not return an error, depending on whether the // monitored context detects cancellation before the call is made. sendMessage(stream, "closed") // This Recv should never succeed. recvMessage(stream, codes.Canceled) } grpc-go-1.29.1/examples/features/cancellation/server/000077500000000000000000000000001365033716300225475ustar00rootroot00000000000000grpc-go-1.29.1/examples/features/cancellation/server/main.go000066400000000000000000000030411365033716300240200ustar00rootroot00000000000000/* * * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Binary server is an example server. package main import ( "flag" "fmt" "io" "log" "net" "google.golang.org/grpc" pb "google.golang.org/grpc/examples/features/proto/echo" ) var port = flag.Int("port", 50051, "the port to serve on") type server struct { pb.UnimplementedEchoServer } func (s *server) BidirectionalStreamingEcho(stream pb.Echo_BidirectionalStreamingEchoServer) error { for { in, err := stream.Recv() if err != nil { fmt.Printf("server: error receiving from stream: %v\n", err) if err == io.EOF { return nil } return err } fmt.Printf("echoing message %q\n", in.Message) stream.Send(&pb.EchoResponse{Message: in.Message}) } } func main() { flag.Parse() lis, err := net.Listen("tcp", fmt.Sprintf(":%d", *port)) if err != nil { log.Fatalf("failed to listen: %v", err) } fmt.Printf("server listening at port %v\n", lis.Addr()) s := grpc.NewServer() pb.RegisterEchoServer(s, &server{}) s.Serve(lis) } grpc-go-1.29.1/examples/features/compression/000077500000000000000000000000001365033716300211465ustar00rootroot00000000000000grpc-go-1.29.1/examples/features/compression/README.md000066400000000000000000000005031365033716300224230ustar00rootroot00000000000000# Compression This example shows how clients can specify compression options when performing RPCs, and how to install support for compressors on the server. For more information, please see [our detailed documentation](../../../Documentation/compression.md). ``` go run server/main.go ``` ``` go run client/main.go ``` grpc-go-1.29.1/examples/features/compression/client/000077500000000000000000000000001365033716300224245ustar00rootroot00000000000000grpc-go-1.29.1/examples/features/compression/client/main.go000066400000000000000000000033641365033716300237050ustar00rootroot00000000000000/* * * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Binary client is an example client. package main import ( "context" "flag" "fmt" "log" "time" "google.golang.org/grpc" "google.golang.org/grpc/encoding/gzip" // Install the gzip compressor pb "google.golang.org/grpc/examples/features/proto/echo" ) var addr = flag.String("addr", "localhost:50051", "the address to connect to") func main() { flag.Parse() // Set up a connection to the server. conn, err := grpc.Dial(*addr, grpc.WithInsecure(), grpc.WithBlock()) if err != nil { log.Fatalf("did not connect: %v", err) } defer conn.Close() c := pb.NewEchoClient(conn) // Send the RPC compressed. If all RPCs on a client should be sent this // way, use the DialOption: // grpc.WithDefaultCallOptions(grpc.UseCompressor(gzip.Name)) const msg = "compress" ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() res, err := c.UnaryEcho(ctx, &pb.EchoRequest{Message: msg}, grpc.UseCompressor(gzip.Name)) fmt.Printf("UnaryEcho call returned %q, %v\n", res.GetMessage(), err) if err != nil || res.GetMessage() != msg { log.Fatalf("Message=%q, err=%v; want Message=%q, err=", res.GetMessage(), err, msg) } } grpc-go-1.29.1/examples/features/compression/server/000077500000000000000000000000001365033716300224545ustar00rootroot00000000000000grpc-go-1.29.1/examples/features/compression/server/main.go000066400000000000000000000027031365033716300237310ustar00rootroot00000000000000/* * * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Binary server is an example server. package main import ( "context" "flag" "fmt" "log" "net" "google.golang.org/grpc" _ "google.golang.org/grpc/encoding/gzip" // Install the gzip compressor pb "google.golang.org/grpc/examples/features/proto/echo" ) var port = flag.Int("port", 50051, "the port to serve on") type server struct { pb.UnimplementedEchoServer } func (s *server) UnaryEcho(ctx context.Context, in *pb.EchoRequest) (*pb.EchoResponse, error) { fmt.Printf("UnaryEcho called with message %q\n", in.GetMessage()) return &pb.EchoResponse{Message: in.Message}, nil } func main() { flag.Parse() lis, err := net.Listen("tcp", fmt.Sprintf(":%d", *port)) if err != nil { log.Fatalf("failed to listen: %v", err) } fmt.Printf("server listening at %v\n", lis.Addr()) s := grpc.NewServer() pb.RegisterEchoServer(s, &server{}) s.Serve(lis) } grpc-go-1.29.1/examples/features/deadline/000077500000000000000000000000001365033716300203525ustar00rootroot00000000000000grpc-go-1.29.1/examples/features/deadline/client/000077500000000000000000000000001365033716300216305ustar00rootroot00000000000000grpc-go-1.29.1/examples/features/deadline/client/main.go000066400000000000000000000052621365033716300231100ustar00rootroot00000000000000/* * * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Binary client is an example client. package main import ( "context" "flag" "fmt" "log" "time" "google.golang.org/grpc" "google.golang.org/grpc/codes" pb "google.golang.org/grpc/examples/features/proto/echo" "google.golang.org/grpc/status" ) var addr = flag.String("addr", "localhost:50052", "the address to connect to") func unaryCall(c pb.EchoClient, requestID int, message string, want codes.Code) { // Creates a context with a one second deadline for the RPC. ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() req := &pb.EchoRequest{Message: message} _, err := c.UnaryEcho(ctx, req) got := status.Code(err) fmt.Printf("[%v] wanted = %v, got = %v\n", requestID, want, got) } func streamingCall(c pb.EchoClient, requestID int, message string, want codes.Code) { // Creates a context with a one second deadline for the RPC. ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() stream, err := c.BidirectionalStreamingEcho(ctx) if err != nil { log.Printf("Stream err: %v", err) return } err = stream.Send(&pb.EchoRequest{Message: message}) if err != nil { log.Printf("Send error: %v", err) return } _, err = stream.Recv() got := status.Code(err) fmt.Printf("[%v] wanted = %v, got = %v\n", requestID, want, got) } func main() { flag.Parse() conn, err := grpc.Dial(*addr, grpc.WithInsecure(), grpc.WithBlock()) if err != nil { log.Fatalf("did not connect: %v", err) } defer conn.Close() c := pb.NewEchoClient(conn) // A successful request unaryCall(c, 1, "world", codes.OK) // Exceeds deadline unaryCall(c, 2, "delay", codes.DeadlineExceeded) // A successful request with propagated deadline unaryCall(c, 3, "[propagate me]world", codes.OK) // Exceeds propagated deadline unaryCall(c, 4, "[propagate me][propagate me]world", codes.DeadlineExceeded) // Receives a response from the stream successfully. streamingCall(c, 5, "[propagate me]world", codes.OK) // Exceeds propagated deadline before receiving a response streamingCall(c, 6, "[propagate me][propagate me]world", codes.DeadlineExceeded) } grpc-go-1.29.1/examples/features/deadline/server/000077500000000000000000000000001365033716300216605ustar00rootroot00000000000000grpc-go-1.29.1/examples/features/deadline/server/main.go000066400000000000000000000056141365033716300231410ustar00rootroot00000000000000/* * * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Binary server is an example server. package main import ( "context" "flag" "fmt" "io" "log" "net" "strings" "time" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" pb "google.golang.org/grpc/examples/features/proto/echo" ) var port = flag.Int("port", 50052, "port number") // server is used to implement EchoServer. type server struct { pb.UnimplementedEchoServer client pb.EchoClient cc *grpc.ClientConn } func (s *server) UnaryEcho(ctx context.Context, req *pb.EchoRequest) (*pb.EchoResponse, error) { message := req.Message if strings.HasPrefix(message, "[propagate me]") { time.Sleep(800 * time.Millisecond) message = strings.TrimPrefix(message, "[propagate me]") return s.client.UnaryEcho(ctx, &pb.EchoRequest{Message: message}) } if message == "delay" { time.Sleep(1500 * time.Millisecond) } return &pb.EchoResponse{Message: req.Message}, nil } func (s *server) BidirectionalStreamingEcho(stream pb.Echo_BidirectionalStreamingEchoServer) error { for { req, err := stream.Recv() if err == io.EOF { return status.Error(codes.InvalidArgument, "request message not received") } if err != nil { return err } message := req.Message if strings.HasPrefix(message, "[propagate me]") { time.Sleep(800 * time.Millisecond) message = strings.TrimPrefix(message, "[propagate me]") res, err := s.client.UnaryEcho(stream.Context(), &pb.EchoRequest{Message: message}) if err != nil { return err } stream.Send(res) } if message == "delay" { time.Sleep(1500 * time.Millisecond) } stream.Send(&pb.EchoResponse{Message: message}) } } func (s *server) Close() { s.cc.Close() } func newEchoServer() *server { target := fmt.Sprintf("localhost:%v", *port) cc, err := grpc.Dial(target, grpc.WithInsecure()) if err != nil { log.Fatalf("did not connect: %v", err) } return &server{client: pb.NewEchoClient(cc), cc: cc} } func main() { flag.Parse() address := fmt.Sprintf(":%v", *port) lis, err := net.Listen("tcp", address) if err != nil { log.Fatalf("failed to listen: %v", err) } echoServer := newEchoServer() defer echoServer.Close() grpcServer := grpc.NewServer() pb.RegisterEchoServer(grpcServer, echoServer) if err := grpcServer.Serve(lis); err != nil { log.Fatalf("failed to serve: %v", err) } } grpc-go-1.29.1/examples/features/debugging/000077500000000000000000000000001365033716300205405ustar00rootroot00000000000000grpc-go-1.29.1/examples/features/debugging/README.md000066400000000000000000000017671365033716300220320ustar00rootroot00000000000000# Debugging Currently, grpc provides two major tools to help user debug issues, which are logging and channelz. ## Logs gRPC has put substantial logging instruments on critical paths of gRPC to help users debug issues. The [Log Levels](https://github.com/grpc/grpc-go/blob/master/Documentation/log_levels.md) doc describes what each log level means in the gRPC context. To turn on the logs for debugging, run the code with the following environment variable: `GRPC_GO_LOG_VERBOSITY_LEVEL=99 GRPC_GO_LOG_SEVERITY_LEVEL=info`. ## Channelz We also provide a runtime debugging tool, Channelz, to help users with live debugging. See the channelz blog post here ([link](https://grpc.io/blog/a_short_introduction_to_channelz)) for details about how to use channelz service to debug live program. ## Try it The example is able to showcase how logging and channelz can help with debugging. See the channelz blog post linked above for full explanation. ``` go run server/main.go ``` ``` go run client/main.go ``` grpc-go-1.29.1/examples/features/debugging/client/000077500000000000000000000000001365033716300220165ustar00rootroot00000000000000grpc-go-1.29.1/examples/features/debugging/client/main.go000066400000000000000000000047701365033716300233010ustar00rootroot00000000000000/* * * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Binary client is an example client. package main import ( "context" "log" "net" "os" "time" "google.golang.org/grpc" "google.golang.org/grpc/channelz/service" "google.golang.org/grpc/resolver" "google.golang.org/grpc/resolver/manual" pb "google.golang.org/grpc/examples/helloworld/helloworld" ) const ( defaultName = "world" ) func main() { /***** Set up the server serving channelz service. *****/ lis, err := net.Listen("tcp", ":50052") if err != nil { log.Fatalf("failed to listen: %v", err) } defer lis.Close() s := grpc.NewServer() service.RegisterChannelzServiceToServer(s) go s.Serve(lis) defer s.Stop() /***** Initialize manual resolver and Dial *****/ r, rcleanup := manual.GenerateAndRegisterManualResolver() defer rcleanup() // Set up a connection to the server. conn, err := grpc.Dial(r.Scheme()+":///test.server", grpc.WithInsecure(), grpc.WithBalancerName("round_robin")) if err != nil { log.Fatalf("did not connect: %v", err) } defer conn.Close() // Manually provide resolved addresses for the target. r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: ":10001"}, {Addr: ":10002"}, {Addr: ":10003"}}}) c := pb.NewGreeterClient(conn) // Contact the server and print out its response. name := defaultName if len(os.Args) > 1 { name = os.Args[1] } /***** Make 100 SayHello RPCs *****/ for i := 0; i < 100; i++ { // Setting a 150ms timeout on the RPC. ctx, cancel := context.WithTimeout(context.Background(), 150*time.Millisecond) defer cancel() r, err := c.SayHello(ctx, &pb.HelloRequest{Name: name}) if err != nil { log.Printf("could not greet: %v", err) } else { log.Printf("Greeting: %s", r.Message) } } /***** Wait for user exiting the program *****/ // Unless you exit the program (e.g. CTRL+C), channelz data will be available for querying. // Users can take time to examine and learn about the info provided by channelz. select {} } grpc-go-1.29.1/examples/features/debugging/server/000077500000000000000000000000001365033716300220465ustar00rootroot00000000000000grpc-go-1.29.1/examples/features/debugging/server/main.go000066400000000000000000000046421365033716300233270ustar00rootroot00000000000000/* * * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Binary server is an example server. package main import ( "context" "log" "net" "time" "google.golang.org/grpc" "google.golang.org/grpc/channelz/service" "google.golang.org/grpc/internal/grpcrand" pb "google.golang.org/grpc/examples/helloworld/helloworld" ) var ( ports = []string{":10001", ":10002", ":10003"} ) // server is used to implement helloworld.GreeterServer. type server struct { pb.UnimplementedGreeterServer } // SayHello implements helloworld.GreeterServer func (s *server) SayHello(ctx context.Context, in *pb.HelloRequest) (*pb.HelloReply, error) { return &pb.HelloReply{Message: "Hello " + in.Name}, nil } // slow server is used to simulate a server that has a variable delay in its response. type slowServer struct { pb.UnimplementedGreeterServer } // SayHello implements helloworld.GreeterServer func (s *slowServer) SayHello(ctx context.Context, in *pb.HelloRequest) (*pb.HelloReply, error) { // Delay 100ms ~ 200ms before replying time.Sleep(time.Duration(100+grpcrand.Intn(100)) * time.Millisecond) return &pb.HelloReply{Message: "Hello " + in.Name}, nil } func main() { /***** Set up the server serving channelz service. *****/ lis, err := net.Listen("tcp", ":50051") if err != nil { log.Fatalf("failed to listen: %v", err) } defer lis.Close() s := grpc.NewServer() service.RegisterChannelzServiceToServer(s) go s.Serve(lis) defer s.Stop() /***** Start three GreeterServers(with one of them to be the slowServer). *****/ for i := 0; i < 3; i++ { lis, err := net.Listen("tcp", ports[i]) if err != nil { log.Fatalf("failed to listen: %v", err) } defer lis.Close() s := grpc.NewServer() if i == 2 { pb.RegisterGreeterServer(s, &slowServer{}) } else { pb.RegisterGreeterServer(s, &server{}) } go s.Serve(lis) } /***** Wait for user exiting the program *****/ select {} } grpc-go-1.29.1/examples/features/encryption/000077500000000000000000000000001365033716300207775ustar00rootroot00000000000000grpc-go-1.29.1/examples/features/encryption/ALTS/000077500000000000000000000000001365033716300215425ustar00rootroot00000000000000grpc-go-1.29.1/examples/features/encryption/ALTS/client/000077500000000000000000000000001365033716300230205ustar00rootroot00000000000000grpc-go-1.29.1/examples/features/encryption/ALTS/client/main.go000066400000000000000000000032471365033716300243010ustar00rootroot00000000000000/* * * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Binary client is an example client. package main import ( "context" "flag" "fmt" "log" "time" "google.golang.org/grpc" "google.golang.org/grpc/credentials/alts" ecpb "google.golang.org/grpc/examples/features/proto/echo" ) var addr = flag.String("addr", "localhost:50051", "the address to connect to") func callUnaryEcho(client ecpb.EchoClient, message string) { ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() resp, err := client.UnaryEcho(ctx, &ecpb.EchoRequest{Message: message}) if err != nil { log.Fatalf("client.UnaryEcho(_) = _, %v: ", err) } fmt.Println("UnaryEcho: ", resp.Message) } func main() { flag.Parse() // Create alts based credential. altsTC := alts.NewClientCreds(alts.DefaultClientOptions()) // Set up a connection to the server. conn, err := grpc.Dial(*addr, grpc.WithTransportCredentials(altsTC), grpc.WithBlock()) if err != nil { log.Fatalf("did not connect: %v", err) } defer conn.Close() // Make a echo client and send an RPC. rgc := ecpb.NewEchoClient(conn) callUnaryEcho(rgc, "hello world") } grpc-go-1.29.1/examples/features/encryption/ALTS/server/000077500000000000000000000000001365033716300230505ustar00rootroot00000000000000grpc-go-1.29.1/examples/features/encryption/ALTS/server/main.go000066400000000000000000000030231365033716300243210ustar00rootroot00000000000000/* * * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Binary server is an example server. package main import ( "context" "flag" "fmt" "log" "net" "google.golang.org/grpc" "google.golang.org/grpc/credentials/alts" pb "google.golang.org/grpc/examples/features/proto/echo" ) var port = flag.Int("port", 50051, "the port to serve on") type ecServer struct { pb.UnimplementedEchoServer } func (s *ecServer) UnaryEcho(ctx context.Context, req *pb.EchoRequest) (*pb.EchoResponse, error) { return &pb.EchoResponse{Message: req.Message}, nil } func main() { flag.Parse() lis, err := net.Listen("tcp", fmt.Sprintf(":%d", *port)) if err != nil { log.Fatalf("failed to listen: %v", err) } // Create alts based credential. altsTC := alts.NewServerCreds(alts.DefaultServerOptions()) s := grpc.NewServer(grpc.Creds(altsTC)) // Register EchoServer on the server. pb.RegisterEchoServer(s, &ecServer{}) if err := s.Serve(lis); err != nil { log.Fatalf("failed to serve: %v", err) } } grpc-go-1.29.1/examples/features/encryption/README.md000066400000000000000000000072351365033716300222650ustar00rootroot00000000000000# Encryption The example for encryption includes two individual examples for TLS and ALTS encryption mechanism respectively. ## Try it In each example's subdirectory: ``` go run server/main.go ``` ``` go run client/main.go ``` ## Explanation ### TLS TLS is a commonly used cryptographic protocol to provide end-to-end communication security. In the example, we show how to set up a server authenticated TLS connection to transmit RPC. In our `grpc/credentials` package, we provide several convenience methods to create grpc [`credentials.TransportCredentials`](https://godoc.org/google.golang.org/grpc/credentials#TransportCredentials) base on TLS. Refer to the [godoc](https://godoc.org/google.golang.org/grpc/credentials) for details. In our example, we use the public/private keys created ahead: * "server1.pem" contains the server certificate (public key). * "server1.key" contains the server private key. * "ca.pem" contains the certificate (certificate authority) that can verify the server's certificate. On server side, we provide the paths to "server1.pem" and "server1.key" to configure TLS and create the server credential using [`credentials.NewServerTLSFromFile`](https://godoc.org/google.golang.org/grpc/credentials#NewServerTLSFromFile). On client side, we provide the path to the "ca.pem" to configure TLS and create the client credential using [`credentials.NewClientTLSFromFile`](https://godoc.org/google.golang.org/grpc/credentials#NewClientTLSFromFile). Note that we override the server name with "x.test.youtube.com", as the server certificate is valid for *.test.youtube.com but not localhost. It is solely for the convenience of making an example. Once the credentials have been created at both sides, we can start the server with the just created server credential (by calling [`grpc.Creds`](https://godoc.org/google.golang.org/grpc#Creds)) and let client dial to the server with the created client credential (by calling [`grpc.WithTransportCredentials`](https://godoc.org/google.golang.org/grpc#WithTransportCredentials)) And finally we make an RPC call over the created `grpc.ClientConn` to test the secure connection based upon TLS is successfully up. ### ALTS NOTE: ALTS currently needs special early access permission on GCP. You can ask about the detailed process in https://groups.google.com/forum/#!forum/grpc-io. ALTS is the Google's Application Layer Transport Security, which supports mutual authentication and transport encryption. Note that ALTS is currently only supported on Google Cloud Platform, and therefore you can only run the example successfully in a GCP environment. In our example, we show how to initiate a secure connection that is based on ALTS. Unlike TLS, ALTS makes certificate/key management transparent to user. So it is easier to set up. On server side, first call [`alts.DefaultServerOptions`](https://godoc.org/google.golang.org/grpc/credentials/alts#DefaultServerOptions) to get the configuration for alts and then provide the configuration to [`alts.NewServerCreds`](https://godoc.org/google.golang.org/grpc/credentials/alts#NewServerCreds) to create the server credential based upon alts. On client side, first call [`alts.DefaultClientOptions`](https://godoc.org/google.golang.org/grpc/credentials/alts#DefaultClientOptions) to get the configuration for alts and then provide the configuration to [`alts.NewClientCreds`](https://godoc.org/google.golang.org/grpc/credentials/alts#NewClientCreds) to create the client credential based upon alts. Next, same as TLS, start the server with the server credential and let client dial to server with the client credential. Finally, make an RPC to test the secure connection based upon ALTS is successfully up.grpc-go-1.29.1/examples/features/encryption/TLS/000077500000000000000000000000001365033716300214415ustar00rootroot00000000000000grpc-go-1.29.1/examples/features/encryption/TLS/client/000077500000000000000000000000001365033716300227175ustar00rootroot00000000000000grpc-go-1.29.1/examples/features/encryption/TLS/client/main.go000066400000000000000000000034561365033716300242020ustar00rootroot00000000000000/* * * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Binary client is an example client. package main import ( "context" "flag" "fmt" "log" "time" "google.golang.org/grpc" "google.golang.org/grpc/credentials" ecpb "google.golang.org/grpc/examples/features/proto/echo" "google.golang.org/grpc/testdata" ) var addr = flag.String("addr", "localhost:50051", "the address to connect to") func callUnaryEcho(client ecpb.EchoClient, message string) { ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() resp, err := client.UnaryEcho(ctx, &ecpb.EchoRequest{Message: message}) if err != nil { log.Fatalf("client.UnaryEcho(_) = _, %v: ", err) } fmt.Println("UnaryEcho: ", resp.Message) } func main() { flag.Parse() // Create tls based credential. creds, err := credentials.NewClientTLSFromFile(testdata.Path("ca.pem"), "x.test.youtube.com") if err != nil { log.Fatalf("failed to load credentials: %v", err) } // Set up a connection to the server. conn, err := grpc.Dial(*addr, grpc.WithTransportCredentials(creds), grpc.WithBlock()) if err != nil { log.Fatalf("did not connect: %v", err) } defer conn.Close() // Make a echo client and send an RPC. rgc := ecpb.NewEchoClient(conn) callUnaryEcho(rgc, "hello world") } grpc-go-1.29.1/examples/features/encryption/TLS/server/000077500000000000000000000000001365033716300227475ustar00rootroot00000000000000grpc-go-1.29.1/examples/features/encryption/TLS/server/main.go000066400000000000000000000032521365033716300242240ustar00rootroot00000000000000/* * * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Binary server is an example server. package main import ( "context" "flag" "fmt" "log" "net" "google.golang.org/grpc" "google.golang.org/grpc/credentials" "google.golang.org/grpc/testdata" pb "google.golang.org/grpc/examples/features/proto/echo" ) var port = flag.Int("port", 50051, "the port to serve on") type ecServer struct { pb.UnimplementedEchoServer } func (s *ecServer) UnaryEcho(ctx context.Context, req *pb.EchoRequest) (*pb.EchoResponse, error) { return &pb.EchoResponse{Message: req.Message}, nil } func main() { flag.Parse() lis, err := net.Listen("tcp", fmt.Sprintf(":%d", *port)) if err != nil { log.Fatalf("failed to listen: %v", err) } // Create tls based credential. creds, err := credentials.NewServerTLSFromFile(testdata.Path("server1.pem"), testdata.Path("server1.key")) if err != nil { log.Fatalf("failed to create credentials: %v", err) } s := grpc.NewServer(grpc.Creds(creds)) // Register EchoServer on the server. pb.RegisterEchoServer(s, &ecServer{}) if err := s.Serve(lis); err != nil { log.Fatalf("failed to serve: %v", err) } } grpc-go-1.29.1/examples/features/errors/000077500000000000000000000000001365033716300201215ustar00rootroot00000000000000grpc-go-1.29.1/examples/features/errors/README.md000066400000000000000000000007231365033716300214020ustar00rootroot00000000000000# Description This example demonstrates the use of status details in grpc errors. # Run the sample code Run the server: ```sh $ go run ./server/main.go ``` Then run the client in another terminal: ```sh $ go run ./client/main.go ``` It should succeed and print the greeting it received from the server. Then run the client again: ```sh $ go run ./client/main.go ``` This time, it should fail by printing error status details that it received from the server. grpc-go-1.29.1/examples/features/errors/client/000077500000000000000000000000001365033716300213775ustar00rootroot00000000000000grpc-go-1.29.1/examples/features/errors/client/main.go000066400000000000000000000033351365033716300226560ustar00rootroot00000000000000/* * * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Binary client is an example client. package main import ( "context" "flag" "log" "os" "time" epb "google.golang.org/genproto/googleapis/rpc/errdetails" "google.golang.org/grpc" pb "google.golang.org/grpc/examples/helloworld/helloworld" "google.golang.org/grpc/status" ) var addr = flag.String("addr", "localhost:50052", "the address to connect to") func main() { flag.Parse() // Set up a connection to the server. conn, err := grpc.Dial(*addr, grpc.WithInsecure(), grpc.WithBlock()) if err != nil { log.Fatalf("did not connect: %v", err) } defer func() { if e := conn.Close(); e != nil { log.Printf("failed to close connection: %s", e) } }() c := pb.NewGreeterClient(conn) ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() r, err := c.SayHello(ctx, &pb.HelloRequest{Name: "world"}) if err != nil { s := status.Convert(err) for _, d := range s.Details() { switch info := d.(type) { case *epb.QuotaFailure: log.Printf("Quota failure: %s", info) default: log.Printf("Unexpected type: %s", info) } } os.Exit(1) } log.Printf("Greeting: %s", r.Message) } grpc-go-1.29.1/examples/features/errors/server/000077500000000000000000000000001365033716300214275ustar00rootroot00000000000000grpc-go-1.29.1/examples/features/errors/server/main.go000066400000000000000000000042031365033716300227010ustar00rootroot00000000000000/* * * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Binary server is an example server. package main import ( "context" "flag" "fmt" "log" "net" "sync" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" epb "google.golang.org/genproto/googleapis/rpc/errdetails" pb "google.golang.org/grpc/examples/helloworld/helloworld" ) var port = flag.Int("port", 50052, "port number") // server is used to implement helloworld.GreeterServer. type server struct { pb.UnimplementedGreeterServer mu sync.Mutex count map[string]int } // SayHello implements helloworld.GreeterServer func (s *server) SayHello(ctx context.Context, in *pb.HelloRequest) (*pb.HelloReply, error) { s.mu.Lock() defer s.mu.Unlock() // Track the number of times the user has been greeted. s.count[in.Name]++ if s.count[in.Name] > 1 { st := status.New(codes.ResourceExhausted, "Request limit exceeded.") ds, err := st.WithDetails( &epb.QuotaFailure{ Violations: []*epb.QuotaFailure_Violation{{ Subject: fmt.Sprintf("name:%s", in.Name), Description: "Limit one greeting per person", }}, }, ) if err != nil { return nil, st.Err() } return nil, ds.Err() } return &pb.HelloReply{Message: "Hello " + in.Name}, nil } func main() { flag.Parse() address := fmt.Sprintf(":%v", *port) lis, err := net.Listen("tcp", address) if err != nil { log.Fatalf("failed to listen: %v", err) } s := grpc.NewServer() pb.RegisterGreeterServer(s, &server{count: make(map[string]int)}) if err := s.Serve(lis); err != nil { log.Fatalf("failed to serve: %v", err) } } grpc-go-1.29.1/examples/features/health/000077500000000000000000000000001365033716300200525ustar00rootroot00000000000000grpc-go-1.29.1/examples/features/health/README.md000066400000000000000000000047061365033716300213400ustar00rootroot00000000000000# Health gRPC provides a health library to communicate a system's health to their clients. It works by providing a service definition via the [health/v1](https://github.com/grpc/grpc-proto/blob/master/grpc/health/v1/health.proto) api. By using the health library, clients can gracefully avoid using servers as they encounter issues. Most languages provide an implementation out of box, making it interoperable between systems. ## Try it ``` go run server/main.go -port=50051 -sleep=5s go run server/main.go -port=50052 -sleep=10s ``` ``` go run client/main.go ``` ## Explanation ### Client Clients have two ways to monitor a servers health. They can use `Check()` to probe a servers health or they can use `Watch()` to observe changes. In most cases, clients do not need to directly check backend servers. Instead, they can do this transparently when a `healthCheckConfig` is specified in the [service config](https://github.com/grpc/proposal/blob/master/A17-client-side-health-checking.md#service-config-changes). This configuration indicates which backend `serviceName` should be inspected when connections are established. An empty string (`""`) typically indicates the overall health of a server should be reported. ```go // import grpc/health to enable transparent client side checking import _ "google.golang.org/grpc/health" // set up appropriate service config serviceConfig := grpc.WithDefaultServiceConfig(`{ "loadBalancingPolicy": "round_robin", "healthCheckConfig": { "serviceName": "" } }`) conn, err := grpc.Dial(..., serviceConfig) ``` See [A17 - Client-Side Health Checking](https://github.com/grpc/proposal/blob/master/A17-client-side-health-checking.md) for more details. ### Server Servers control their serving status. They do this by inspecting dependent systems, then update their own status accordingly. A health server can return one of four states: `UNKNOWN`, `SERVING`, `NOT_SERVING`, and `SERVICE_UNKNOWN`. `UNKNOWN` indicates the current state is not yet known. This state is often seen at the start up of a server instance. `SERVING` means that the system is healthy and ready to service requests. Conversely, `NOT_SERVING` indicates the system is unable to service requests at the time. `SERVICE_UNKNOWN` communicates the `serviceName` requested by the client is not known by the server. This status is only reported by the `Watch()` call. A server may toggle its health using `healthServer.SetServingStatus("serviceName", servingStatus)`. grpc-go-1.29.1/examples/features/health/client/000077500000000000000000000000001365033716300213305ustar00rootroot00000000000000grpc-go-1.29.1/examples/features/health/client/main.go000066400000000000000000000035751365033716300226150ustar00rootroot00000000000000/* * * Copyright 2020 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package main import ( "context" "flag" "fmt" "log" "time" "google.golang.org/grpc" pb "google.golang.org/grpc/examples/features/proto/echo" _ "google.golang.org/grpc/health" "google.golang.org/grpc/resolver" "google.golang.org/grpc/resolver/manual" ) var serviceConfig = `{ "loadBalancingPolicy": "round_robin", "healthCheckConfig": { "serviceName": "" } }` func callUnaryEcho(c pb.EchoClient) { ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() r, err := c.UnaryEcho(ctx, &pb.EchoRequest{}) if err != nil { fmt.Println("UnaryEcho: _, ", err) } else { fmt.Println("UnaryEcho: ", r.GetMessage()) } } func main() { flag.Parse() r, cleanup := manual.GenerateAndRegisterManualResolver() defer cleanup() r.InitialState(resolver.State{ Addresses: []resolver.Address{ {Addr: "localhost:50051"}, {Addr: "localhost:50052"}, }, }) address := fmt.Sprintf("%s:///unused", r.Scheme()) options := []grpc.DialOption{ grpc.WithInsecure(), grpc.WithBlock(), grpc.WithDefaultServiceConfig(serviceConfig), } conn, err := grpc.Dial(address, options...) if err != nil { log.Fatalf("did not connect %v", err) } defer conn.Close() echoClient := pb.NewEchoClient(conn) for { callUnaryEcho(echoClient) time.Sleep(time.Second) } } grpc-go-1.29.1/examples/features/health/server/000077500000000000000000000000001365033716300213605ustar00rootroot00000000000000grpc-go-1.29.1/examples/features/health/server/main.go000066400000000000000000000041301365033716300226310ustar00rootroot00000000000000/* * * Copyright 2020 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package main import ( "context" "flag" "fmt" "log" "net" "time" "google.golang.org/grpc" pb "google.golang.org/grpc/examples/features/proto/echo" "google.golang.org/grpc/health" healthpb "google.golang.org/grpc/health/grpc_health_v1" ) var ( port = flag.Int("port", 50051, "the port to serve on") sleep = flag.Duration("sleep", time.Second*5, "duration between changes in health") system = "" // empty string represents the health of the system ) type echoServer struct { pb.UnimplementedEchoServer } func (e *echoServer) UnaryEcho(ctx context.Context, req *pb.EchoRequest) (*pb.EchoResponse, error) { return &pb.EchoResponse{ Message: fmt.Sprintf("hello from localhost:%d", *port), }, nil } var _ pb.EchoServer = &echoServer{} func main() { flag.Parse() lis, err := net.Listen("tcp", fmt.Sprintf(":%d", *port)) if err != nil { log.Fatalf("failed to listen: %v", err) } s := grpc.NewServer() healthcheck := health.NewServer() healthpb.RegisterHealthServer(s, healthcheck) pb.RegisterEchoServer(s, &echoServer{}) go func() { // asynchronously inspect dependencies and toggle serving status as needed next := healthpb.HealthCheckResponse_SERVING for { healthcheck.SetServingStatus(system, next) if next == healthpb.HealthCheckResponse_SERVING { next = healthpb.HealthCheckResponse_NOT_SERVING } else { next = healthpb.HealthCheckResponse_SERVING } time.Sleep(*sleep) } }() if err := s.Serve(lis); err != nil { log.Fatalf("failed to serve: %v", err) } } grpc-go-1.29.1/examples/features/interceptor/000077500000000000000000000000001365033716300211435ustar00rootroot00000000000000grpc-go-1.29.1/examples/features/interceptor/README.md000066400000000000000000000120161365033716300224220ustar00rootroot00000000000000# Interceptor gRPC provides simple APIs to implement and install interceptors on a per ClientConn/Server basis. Interceptor intercepts the execution of each RPC call. Users can use interceptors to do logging, authentication/authorization, metrics collection, and many other functionality that can be shared across RPCs. ## Try it ``` go run server/main.go ``` ``` go run client/main.go ``` ## Explanation In gRPC, interceptors can be categorized into two kinds in terms of the type of RPC calls they intercept. The first one is the **unary interceptor**, which intercepts unary RPC calls. And the other is the **stream interceptor** which deals with streaming RPC calls. See [here](https://grpc.io/docs/guides/concepts.html#rpc-life-cycle) for explanation about unary RPCs and streaming RPCs. Each of client and server has their own types of unary and stream interceptors. Thus, there are in total four different types of interceptors in gRPC. ### Client-side #### Unary Interceptor [`UnaryClientInterceptor`](https://godoc.org/google.golang.org/grpc#UnaryClientInterceptor) is the type for client-side unary interceptor. It is essentially a function type with signature: `func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error`. An implementation of a unary interceptor can usually be divided into three parts: pre-processing, invoking RPC method, and post-processing. For pre-processing, users can get info about the current RPC call by examining the args passed in, such as RPC context, method string, request to be sent, and CallOptions configured. With the info, users can even modify the RPC call. For instance, in the example, we examine the list of CallOptions and see if call credential has been configured. If not, configure it to use oauth2 with token "some-secret-token" as fallback. In our example, we intentionally omit configuring the per RPC credential to resort to fallback. After pre-processing is done, use can invoke the RPC call by calling the `invoker`. Once the invoker returns the reply and error, user can do post-processing of the RPC call. Usually, it's about dealing with the returned reply and error. In the example, we log the RPC timing and error info. To install a unary interceptor on a ClientConn, configure `Dial` with `DialOption` [`WithUnaryInterceptor`](https://godoc.org/google.golang.org/grpc#WithUnaryInterceptor). #### Stream Interceptor [`StreamClientInterceptor`](https://godoc.org/google.golang.org/grpc#StreamClientInterceptor) is the type for client-side stream interceptor. It is a function type with signature: `func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, streamer Streamer, opts ...CallOption) (ClientStream, error)`. An implementation of a stream interceptor usually include pre-processing, and stream operation interception. For pre-processing, it's similar to unary interceptor. However, rather than doing the RPC method invocation and post-processing afterwards, stream interceptor intercepts the users' operation on the stream. First, the interceptor calls the passed-in `streamer` to get a `ClientStream`, and then wraps around the `ClientStream` and overloading its methods with intercepting logic. Finally, interceptors returns the wrapped `ClientStream` to user to operate on. In the example, we define a new struct `wrappedStream`, which is embedded with a `ClientStream`. Then, we implement (overload) the `SendMsg` and `RecvMsg` methods on `wrappedStream` to intercepts these two operations on the embedded `ClientStream`. In the example, we log the message type info and time info for interception purpose. To install the stream interceptor for a ClientConn, configure `Dial` with `DialOption` [`WithStreamInterceptor`](https://godoc.org/google.golang.org/grpc#WithStreamInterceptor). ### Server-side Server side interceptor is similar to client side, though with slightly different provided info. #### Unary Interceptor [`UnaryServerInterceptor`](https://godoc.org/google.golang.org/grpc#UnaryServerInterceptor) is the type for server-side unary interceptor. It is a function type with signature: `func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (resp interface{}, err error)`. Refer to client-side unary interceptor section for detailed implementation explanation. To install the unary interceptor for a Server, configure `NewServer` with `ServerOption` [`UnaryInterceptor`](https://godoc.org/google.golang.org/grpc#UnaryInterceptor). #### Stream Interceptor [`StreamServerInterceptor`](https://godoc.org/google.golang.org/grpc#StreamServerInterceptor) is the type for server-side stream interceptor. It is a function type with signature: `func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error`. Refer to client-side stream interceptor section for detailed implementation explanation. To install the unary interceptor for a Server, configure `NewServer` with `ServerOption` [`StreamInterceptor`](https://godoc.org/google.golang.org/grpc#StreamInterceptor). grpc-go-1.29.1/examples/features/interceptor/client/000077500000000000000000000000001365033716300224215ustar00rootroot00000000000000grpc-go-1.29.1/examples/features/interceptor/client/main.go000066400000000000000000000114351365033716300237000ustar00rootroot00000000000000/* * * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Binary client is an example client. package main import ( "context" "flag" "fmt" "io" "log" "time" "golang.org/x/oauth2" "google.golang.org/grpc" "google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials/oauth" ecpb "google.golang.org/grpc/examples/features/proto/echo" "google.golang.org/grpc/testdata" ) var addr = flag.String("addr", "localhost:50051", "the address to connect to") const fallbackToken = "some-secret-token" // logger is to mock a sophisticated logging system. To simplify the example, we just print out the content. func logger(format string, a ...interface{}) { fmt.Printf("LOG:\t"+format+"\n", a...) } // unaryInterceptor is an example unary interceptor. func unaryInterceptor(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { var credsConfigured bool for _, o := range opts { _, ok := o.(grpc.PerRPCCredsCallOption) if ok { credsConfigured = true break } } if !credsConfigured { opts = append(opts, grpc.PerRPCCredentials(oauth.NewOauthAccess(&oauth2.Token{ AccessToken: fallbackToken, }))) } start := time.Now() err := invoker(ctx, method, req, reply, cc, opts...) end := time.Now() logger("RPC: %s, start time: %s, end time: %s, err: %v", method, start.Format("Basic"), end.Format(time.RFC3339), err) return err } // wrappedStream wraps around the embedded grpc.ClientStream, and intercepts the RecvMsg and // SendMsg method call. type wrappedStream struct { grpc.ClientStream } func (w *wrappedStream) RecvMsg(m interface{}) error { logger("Receive a message (Type: %T) at %v", m, time.Now().Format(time.RFC3339)) return w.ClientStream.RecvMsg(m) } func (w *wrappedStream) SendMsg(m interface{}) error { logger("Send a message (Type: %T) at %v", m, time.Now().Format(time.RFC3339)) return w.ClientStream.SendMsg(m) } func newWrappedStream(s grpc.ClientStream) grpc.ClientStream { return &wrappedStream{s} } // streamInterceptor is an example stream interceptor. func streamInterceptor(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) { var credsConfigured bool for _, o := range opts { _, ok := o.(*grpc.PerRPCCredsCallOption) if ok { credsConfigured = true } } if !credsConfigured { opts = append(opts, grpc.PerRPCCredentials(oauth.NewOauthAccess(&oauth2.Token{ AccessToken: fallbackToken, }))) } s, err := streamer(ctx, desc, cc, method, opts...) if err != nil { return nil, err } return newWrappedStream(s), nil } func callUnaryEcho(client ecpb.EchoClient, message string) { ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() resp, err := client.UnaryEcho(ctx, &ecpb.EchoRequest{Message: message}) if err != nil { log.Fatalf("client.UnaryEcho(_) = _, %v: ", err) } fmt.Println("UnaryEcho: ", resp.Message) } func callBidiStreamingEcho(client ecpb.EchoClient) { ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() c, err := client.BidirectionalStreamingEcho(ctx) if err != nil { return } for i := 0; i < 5; i++ { if err := c.Send(&ecpb.EchoRequest{Message: fmt.Sprintf("Request %d", i+1)}); err != nil { log.Fatalf("failed to send request due to error: %v", err) } } c.CloseSend() for { resp, err := c.Recv() if err == io.EOF { break } if err != nil { log.Fatalf("failed to receive response due to error: %v", err) } fmt.Println("BidiStreaming Echo: ", resp.Message) } } func main() { flag.Parse() // Create tls based credential. creds, err := credentials.NewClientTLSFromFile(testdata.Path("ca.pem"), "x.test.youtube.com") if err != nil { log.Fatalf("failed to load credentials: %v", err) } // Set up a connection to the server. conn, err := grpc.Dial(*addr, grpc.WithTransportCredentials(creds), grpc.WithUnaryInterceptor(unaryInterceptor), grpc.WithStreamInterceptor(streamInterceptor), grpc.WithBlock()) if err != nil { log.Fatalf("did not connect: %v", err) } defer conn.Close() // Make a echo client and send RPCs. rgc := ecpb.NewEchoClient(conn) callUnaryEcho(rgc, "hello world") callBidiStreamingEcho(rgc) } grpc-go-1.29.1/examples/features/interceptor/server/000077500000000000000000000000001365033716300224515ustar00rootroot00000000000000grpc-go-1.29.1/examples/features/interceptor/server/main.go000066400000000000000000000110271365033716300237250ustar00rootroot00000000000000/* * * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Binary server is an example server. package main import ( "context" "flag" "fmt" "io" "log" "net" "strings" "time" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" "google.golang.org/grpc/testdata" pb "google.golang.org/grpc/examples/features/proto/echo" ) var ( port = flag.Int("port", 50051, "the port to serve on") errMissingMetadata = status.Errorf(codes.InvalidArgument, "missing metadata") errInvalidToken = status.Errorf(codes.Unauthenticated, "invalid token") ) // logger is to mock a sophisticated logging system. To simplify the example, we just print out the content. func logger(format string, a ...interface{}) { fmt.Printf("LOG:\t"+format+"\n", a...) } type server struct { pb.UnimplementedEchoServer } func (s *server) UnaryEcho(ctx context.Context, in *pb.EchoRequest) (*pb.EchoResponse, error) { fmt.Printf("unary echoing message %q\n", in.Message) return &pb.EchoResponse{Message: in.Message}, nil } func (s *server) BidirectionalStreamingEcho(stream pb.Echo_BidirectionalStreamingEchoServer) error { for { in, err := stream.Recv() if err != nil { if err == io.EOF { return nil } fmt.Printf("server: error receiving from stream: %v\n", err) return err } fmt.Printf("bidi echoing message %q\n", in.Message) stream.Send(&pb.EchoResponse{Message: in.Message}) } } // valid validates the authorization. func valid(authorization []string) bool { if len(authorization) < 1 { return false } token := strings.TrimPrefix(authorization[0], "Bearer ") // Perform the token validation here. For the sake of this example, the code // here forgoes any of the usual OAuth2 token validation and instead checks // for a token matching an arbitrary string. return token == "some-secret-token" } func unaryInterceptor(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { // authentication (token verification) md, ok := metadata.FromIncomingContext(ctx) if !ok { return nil, errMissingMetadata } if !valid(md["authorization"]) { return nil, errInvalidToken } m, err := handler(ctx, req) if err != nil { logger("RPC failed with error %v", err) } return m, err } // wrappedStream wraps around the embedded grpc.ServerStream, and intercepts the RecvMsg and // SendMsg method call. type wrappedStream struct { grpc.ServerStream } func (w *wrappedStream) RecvMsg(m interface{}) error { logger("Receive a message (Type: %T) at %s", m, time.Now().Format(time.RFC3339)) return w.ServerStream.RecvMsg(m) } func (w *wrappedStream) SendMsg(m interface{}) error { logger("Send a message (Type: %T) at %v", m, time.Now().Format(time.RFC3339)) return w.ServerStream.SendMsg(m) } func newWrappedStream(s grpc.ServerStream) grpc.ServerStream { return &wrappedStream{s} } func streamInterceptor(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { // authentication (token verification) md, ok := metadata.FromIncomingContext(ss.Context()) if !ok { return errMissingMetadata } if !valid(md["authorization"]) { return errInvalidToken } err := handler(srv, newWrappedStream(ss)) if err != nil { logger("RPC failed with error %v", err) } return err } func main() { flag.Parse() lis, err := net.Listen("tcp", fmt.Sprintf(":%d", *port)) if err != nil { log.Fatalf("failed to listen: %v", err) } // Create tls based credential. creds, err := credentials.NewServerTLSFromFile(testdata.Path("server1.pem"), testdata.Path("server1.key")) if err != nil { log.Fatalf("failed to create credentials: %v", err) } s := grpc.NewServer(grpc.Creds(creds), grpc.UnaryInterceptor(unaryInterceptor), grpc.StreamInterceptor(streamInterceptor)) // Register EchoServer on the server. pb.RegisterEchoServer(s, &server{}) if err := s.Serve(lis); err != nil { log.Fatalf("failed to serve: %v", err) } } grpc-go-1.29.1/examples/features/keepalive/000077500000000000000000000000001365033716300205525ustar00rootroot00000000000000grpc-go-1.29.1/examples/features/keepalive/README.md000066400000000000000000000005741365033716300220370ustar00rootroot00000000000000# Keepalive This example illustrates how to set up client-side keepalive pings and server-side keepalive ping enforcement and connection idleness settings. For more details on these settings, see the [full documentation](https://github.com/grpc/grpc-go/tree/master/Documentation/keepalive.md). ``` go run server/main.go ``` ``` GODEBUG=http2debug=2 go run client/main.go ``` grpc-go-1.29.1/examples/features/keepalive/client/000077500000000000000000000000001365033716300220305ustar00rootroot00000000000000grpc-go-1.29.1/examples/features/keepalive/client/main.go000066400000000000000000000035451365033716300233120ustar00rootroot00000000000000/* * * Copyright 2019 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Binary client is an example client. package main import ( "context" "flag" "fmt" "log" "time" "google.golang.org/grpc" pb "google.golang.org/grpc/examples/features/proto/echo" "google.golang.org/grpc/keepalive" ) var addr = flag.String("addr", "localhost:50052", "the address to connect to") var kacp = keepalive.ClientParameters{ Time: 10 * time.Second, // send pings every 10 seconds if there is no activity Timeout: time.Second, // wait 1 second for ping ack before considering the connection dead PermitWithoutStream: true, // send pings even without active streams } func main() { flag.Parse() conn, err := grpc.Dial(*addr, grpc.WithInsecure(), grpc.WithKeepaliveParams(kacp)) if err != nil { log.Fatalf("did not connect: %v", err) } defer conn.Close() c := pb.NewEchoClient(conn) ctx, cancel := context.WithTimeout(context.Background(), 3*time.Minute) defer cancel() fmt.Println("Performing unary request") res, err := c.UnaryEcho(ctx, &pb.EchoRequest{Message: "keepalive demo"}) if err != nil { log.Fatalf("unexpected error from UnaryEcho: %v", err) } fmt.Println("RPC response:", res) select {} // Block forever; run with GODEBUG=http2debug=2 to observe ping frames and GOAWAYs due to idleness. } grpc-go-1.29.1/examples/features/keepalive/server/000077500000000000000000000000001365033716300220605ustar00rootroot00000000000000grpc-go-1.29.1/examples/features/keepalive/server/main.go000066400000000000000000000045131365033716300233360ustar00rootroot00000000000000/* * * Copyright 2019 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Binary server is an example server. package main import ( "context" "flag" "fmt" "log" "net" "time" "google.golang.org/grpc" "google.golang.org/grpc/keepalive" pb "google.golang.org/grpc/examples/features/proto/echo" ) var port = flag.Int("port", 50052, "port number") var kaep = keepalive.EnforcementPolicy{ MinTime: 5 * time.Second, // If a client pings more than once every 5 seconds, terminate the connection PermitWithoutStream: true, // Allow pings even when there are no active streams } var kasp = keepalive.ServerParameters{ MaxConnectionIdle: 15 * time.Second, // If a client is idle for 15 seconds, send a GOAWAY MaxConnectionAge: 30 * time.Second, // If any connection is alive for more than 30 seconds, send a GOAWAY MaxConnectionAgeGrace: 5 * time.Second, // Allow 5 seconds for pending RPCs to complete before forcibly closing connections Time: 5 * time.Second, // Ping the client if it is idle for 5 seconds to ensure the connection is still active Timeout: 1 * time.Second, // Wait 1 second for the ping ack before assuming the connection is dead } // server implements EchoServer. type server struct { pb.UnimplementedEchoServer } func (s *server) UnaryEcho(ctx context.Context, req *pb.EchoRequest) (*pb.EchoResponse, error) { return &pb.EchoResponse{Message: req.Message}, nil } func main() { flag.Parse() address := fmt.Sprintf(":%v", *port) lis, err := net.Listen("tcp", address) if err != nil { log.Fatalf("failed to listen: %v", err) } s := grpc.NewServer(grpc.KeepaliveEnforcementPolicy(kaep), grpc.KeepaliveParams(kasp)) pb.RegisterEchoServer(s, &server{}) if err := s.Serve(lis); err != nil { log.Fatalf("failed to serve: %v", err) } } grpc-go-1.29.1/examples/features/load_balancing/000077500000000000000000000000001365033716300215225ustar00rootroot00000000000000grpc-go-1.29.1/examples/features/load_balancing/README.md000066400000000000000000000061111365033716300230000ustar00rootroot00000000000000# Load balancing This examples shows how `ClientConn` can pick different load balancing policies. Note: to show the effect of load balancers, an example resolver is installed in this example to get the backend addresses. It's suggested to read the name resolver example before this example. ## Try it ``` go run server/main.go ``` ``` go run client/main.go ``` ## Explanation Two echo servers are serving on ":50051" and ":50052". They will include their serving address in the response. So the server on ":50051" will reply to the RPC with `this is examples/load_balancing (from :50051)`. Two clients are created, to connect to both of these servers (they get both server addresses from the name resolver). Each client picks a different load balancer (using `grpc.WithBalancerName`): `pick_first` or `round_robin`. (These two policies are supported in gRPC by default. To add a custom balancing policy, implement the interfaces defined in https://godoc.org/google.golang.org/grpc/balancer). Note that balancers can also be switched using service config, which allows service owners (instead of client owners) to pick the balancer to use. Service config doc is available at https://github.com/grpc/grpc/blob/master/doc/service_config.md. ### pick_first The first client is configured to use `pick_first`. `pick_first` tries to connect to the first address, uses it for all RPCs if it connects, or try the next address if it fails (and keep doing that until one connection is successful). Because of this, all the RPCs will be sent to the same backend. The responses received all show the same backend address. ``` this is examples/load_balancing (from :50051) this is examples/load_balancing (from :50051) this is examples/load_balancing (from :50051) this is examples/load_balancing (from :50051) this is examples/load_balancing (from :50051) this is examples/load_balancing (from :50051) this is examples/load_balancing (from :50051) this is examples/load_balancing (from :50051) this is examples/load_balancing (from :50051) this is examples/load_balancing (from :50051) ``` ### round_robin The second client is configured to use `round_robin`. `round_robin` connects to all the addresses it sees, and sends an RPC to each backend one at a time in order. E.g. the first RPC will be sent to backend-1, the second RPC will be be sent to backend-2, and the third RPC will be be sent to backend-1 again. ``` this is examples/load_balancing (from :50051) this is examples/load_balancing (from :50051) this is examples/load_balancing (from :50052) this is examples/load_balancing (from :50051) this is examples/load_balancing (from :50052) this is examples/load_balancing (from :50051) this is examples/load_balancing (from :50052) this is examples/load_balancing (from :50051) this is examples/load_balancing (from :50052) this is examples/load_balancing (from :50051) ``` Note that it's possible to see two continues RPC sent to the same backend. That's because `round_robin` only picks the connections ready for RPCs. So if one of the two connections is not ready for some reason, all RPCs will be sent to the ready connection. grpc-go-1.29.1/examples/features/load_balancing/client/000077500000000000000000000000001365033716300230005ustar00rootroot00000000000000grpc-go-1.29.1/examples/features/load_balancing/client/main.go000066400000000000000000000066531365033716300242650ustar00rootroot00000000000000/* * * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Binary client is an example client. package main import ( "context" "fmt" "log" "time" "google.golang.org/grpc" ecpb "google.golang.org/grpc/examples/features/proto/echo" "google.golang.org/grpc/resolver" ) const ( exampleScheme = "example" exampleServiceName = "lb.example.grpc.io" ) var addrs = []string{"localhost:50051", "localhost:50052"} func callUnaryEcho(c ecpb.EchoClient, message string) { ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() r, err := c.UnaryEcho(ctx, &ecpb.EchoRequest{Message: message}) if err != nil { log.Fatalf("could not greet: %v", err) } fmt.Println(r.Message) } func makeRPCs(cc *grpc.ClientConn, n int) { hwc := ecpb.NewEchoClient(cc) for i := 0; i < n; i++ { callUnaryEcho(hwc, "this is examples/load_balancing") } } func main() { pickfirstConn, err := grpc.Dial( fmt.Sprintf("%s:///%s", exampleScheme, exampleServiceName), // grpc.WithBalancerName("pick_first"), // "pick_first" is the default, so this DialOption is not necessary. grpc.WithInsecure(), grpc.WithBlock(), ) if err != nil { log.Fatalf("did not connect: %v", err) } defer pickfirstConn.Close() fmt.Println("--- calling helloworld.Greeter/SayHello with pick_first ---") makeRPCs(pickfirstConn, 10) fmt.Println() // Make another ClientConn with round_robin policy. roundrobinConn, err := grpc.Dial( fmt.Sprintf("%s:///%s", exampleScheme, exampleServiceName), grpc.WithBalancerName("round_robin"), // This sets the initial balancing policy. grpc.WithInsecure(), grpc.WithBlock(), ) if err != nil { log.Fatalf("did not connect: %v", err) } defer roundrobinConn.Close() fmt.Println("--- calling helloworld.Greeter/SayHello with round_robin ---") makeRPCs(roundrobinConn, 10) } // Following is an example name resolver implementation. Read the name // resolution example to learn more about it. type exampleResolverBuilder struct{} func (*exampleResolverBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { r := &exampleResolver{ target: target, cc: cc, addrsStore: map[string][]string{ exampleServiceName: addrs, }, } r.start() return r, nil } func (*exampleResolverBuilder) Scheme() string { return exampleScheme } type exampleResolver struct { target resolver.Target cc resolver.ClientConn addrsStore map[string][]string } func (r *exampleResolver) start() { addrStrs := r.addrsStore[r.target.Endpoint] addrs := make([]resolver.Address, len(addrStrs)) for i, s := range addrStrs { addrs[i] = resolver.Address{Addr: s} } r.cc.UpdateState(resolver.State{Addresses: addrs}) } func (*exampleResolver) ResolveNow(o resolver.ResolveNowOptions) {} func (*exampleResolver) Close() {} func init() { resolver.Register(&exampleResolverBuilder{}) } grpc-go-1.29.1/examples/features/load_balancing/server/000077500000000000000000000000001365033716300230305ustar00rootroot00000000000000grpc-go-1.29.1/examples/features/load_balancing/server/main.go000066400000000000000000000030731365033716300243060ustar00rootroot00000000000000/* * * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Binary server is an example server. package main import ( "context" "fmt" "log" "net" "sync" "google.golang.org/grpc" pb "google.golang.org/grpc/examples/features/proto/echo" ) var ( addrs = []string{":50051", ":50052"} ) type ecServer struct { pb.UnimplementedEchoServer addr string } func (s *ecServer) UnaryEcho(ctx context.Context, req *pb.EchoRequest) (*pb.EchoResponse, error) { return &pb.EchoResponse{Message: fmt.Sprintf("%s (from %s)", req.Message, s.addr)}, nil } func startServer(addr string) { lis, err := net.Listen("tcp", addr) if err != nil { log.Fatalf("failed to listen: %v", err) } s := grpc.NewServer() pb.RegisterEchoServer(s, &ecServer{addr: addr}) log.Printf("serving on %s\n", addr) if err := s.Serve(lis); err != nil { log.Fatalf("failed to serve: %v", err) } } func main() { var wg sync.WaitGroup for _, addr := range addrs { wg.Add(1) go func(addr string) { defer wg.Done() startServer(addr) }(addr) } wg.Wait() } grpc-go-1.29.1/examples/features/metadata/000077500000000000000000000000001365033716300203655ustar00rootroot00000000000000grpc-go-1.29.1/examples/features/metadata/README.md000066400000000000000000000005071365033716300216460ustar00rootroot00000000000000# Metadata example This example shows how to set and read metadata in RPC headers and trailers. Please see [grpc-metadata.md](https://github.com/grpc/grpc-go/blob/master/Documentation/grpc-metadata.md) for more information. ## Start the server ``` go run server/main.go ``` ## Run the client ``` go run client/main.go ``` grpc-go-1.29.1/examples/features/metadata/client/000077500000000000000000000000001365033716300216435ustar00rootroot00000000000000grpc-go-1.29.1/examples/features/metadata/client/main.go000066400000000000000000000202221365033716300231140ustar00rootroot00000000000000/* * * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Binary client is an example client. package main import ( "context" "flag" "fmt" "io" "log" "time" "google.golang.org/grpc" pb "google.golang.org/grpc/examples/features/proto/echo" "google.golang.org/grpc/metadata" ) var addr = flag.String("addr", "localhost:50051", "the address to connect to") const ( timestampFormat = time.StampNano // "Jan _2 15:04:05.000" streamingCount = 10 ) func unaryCallWithMetadata(c pb.EchoClient, message string) { fmt.Printf("--- unary ---\n") // Create metadata and context. md := metadata.Pairs("timestamp", time.Now().Format(timestampFormat)) ctx := metadata.NewOutgoingContext(context.Background(), md) // Make RPC using the context with the metadata. var header, trailer metadata.MD r, err := c.UnaryEcho(ctx, &pb.EchoRequest{Message: message}, grpc.Header(&header), grpc.Trailer(&trailer)) if err != nil { log.Fatalf("failed to call UnaryEcho: %v", err) } if t, ok := header["timestamp"]; ok { fmt.Printf("timestamp from header:\n") for i, e := range t { fmt.Printf(" %d. %s\n", i, e) } } else { log.Fatal("timestamp expected but doesn't exist in header") } if l, ok := header["location"]; ok { fmt.Printf("location from header:\n") for i, e := range l { fmt.Printf(" %d. %s\n", i, e) } } else { log.Fatal("location expected but doesn't exist in header") } fmt.Printf("response:\n") fmt.Printf(" - %s\n", r.Message) if t, ok := trailer["timestamp"]; ok { fmt.Printf("timestamp from trailer:\n") for i, e := range t { fmt.Printf(" %d. %s\n", i, e) } } else { log.Fatal("timestamp expected but doesn't exist in trailer") } } func serverStreamingWithMetadata(c pb.EchoClient, message string) { fmt.Printf("--- server streaming ---\n") // Create metadata and context. md := metadata.Pairs("timestamp", time.Now().Format(timestampFormat)) ctx := metadata.NewOutgoingContext(context.Background(), md) // Make RPC using the context with the metadata. stream, err := c.ServerStreamingEcho(ctx, &pb.EchoRequest{Message: message}) if err != nil { log.Fatalf("failed to call ServerStreamingEcho: %v", err) } // Read the header when the header arrives. header, err := stream.Header() if err != nil { log.Fatalf("failed to get header from stream: %v", err) } // Read metadata from server's header. if t, ok := header["timestamp"]; ok { fmt.Printf("timestamp from header:\n") for i, e := range t { fmt.Printf(" %d. %s\n", i, e) } } else { log.Fatal("timestamp expected but doesn't exist in header") } if l, ok := header["location"]; ok { fmt.Printf("location from header:\n") for i, e := range l { fmt.Printf(" %d. %s\n", i, e) } } else { log.Fatal("location expected but doesn't exist in header") } // Read all the responses. var rpcStatus error fmt.Printf("response:\n") for { r, err := stream.Recv() if err != nil { rpcStatus = err break } fmt.Printf(" - %s\n", r.Message) } if rpcStatus != io.EOF { log.Fatalf("failed to finish server streaming: %v", rpcStatus) } // Read the trailer after the RPC is finished. trailer := stream.Trailer() // Read metadata from server's trailer. if t, ok := trailer["timestamp"]; ok { fmt.Printf("timestamp from trailer:\n") for i, e := range t { fmt.Printf(" %d. %s\n", i, e) } } else { log.Fatal("timestamp expected but doesn't exist in trailer") } } func clientStreamWithMetadata(c pb.EchoClient, message string) { fmt.Printf("--- client streaming ---\n") // Create metadata and context. md := metadata.Pairs("timestamp", time.Now().Format(timestampFormat)) ctx := metadata.NewOutgoingContext(context.Background(), md) // Make RPC using the context with the metadata. stream, err := c.ClientStreamingEcho(ctx) if err != nil { log.Fatalf("failed to call ClientStreamingEcho: %v\n", err) } // Read the header when the header arrives. header, err := stream.Header() if err != nil { log.Fatalf("failed to get header from stream: %v", err) } // Read metadata from server's header. if t, ok := header["timestamp"]; ok { fmt.Printf("timestamp from header:\n") for i, e := range t { fmt.Printf(" %d. %s\n", i, e) } } else { log.Fatal("timestamp expected but doesn't exist in header") } if l, ok := header["location"]; ok { fmt.Printf("location from header:\n") for i, e := range l { fmt.Printf(" %d. %s\n", i, e) } } else { log.Fatal("location expected but doesn't exist in header") } // Send all requests to the server. for i := 0; i < streamingCount; i++ { if err := stream.Send(&pb.EchoRequest{Message: message}); err != nil { log.Fatalf("failed to send streaming: %v\n", err) } } // Read the response. r, err := stream.CloseAndRecv() if err != nil { log.Fatalf("failed to CloseAndRecv: %v\n", err) } fmt.Printf("response:\n") fmt.Printf(" - %s\n\n", r.Message) // Read the trailer after the RPC is finished. trailer := stream.Trailer() // Read metadata from server's trailer. if t, ok := trailer["timestamp"]; ok { fmt.Printf("timestamp from trailer:\n") for i, e := range t { fmt.Printf(" %d. %s\n", i, e) } } else { log.Fatal("timestamp expected but doesn't exist in trailer") } } func bidirectionalWithMetadata(c pb.EchoClient, message string) { fmt.Printf("--- bidirectional ---\n") // Create metadata and context. md := metadata.Pairs("timestamp", time.Now().Format(timestampFormat)) ctx := metadata.NewOutgoingContext(context.Background(), md) // Make RPC using the context with the metadata. stream, err := c.BidirectionalStreamingEcho(ctx) if err != nil { log.Fatalf("failed to call BidirectionalStreamingEcho: %v\n", err) } go func() { // Read the header when the header arrives. header, err := stream.Header() if err != nil { log.Fatalf("failed to get header from stream: %v", err) } // Read metadata from server's header. if t, ok := header["timestamp"]; ok { fmt.Printf("timestamp from header:\n") for i, e := range t { fmt.Printf(" %d. %s\n", i, e) } } else { log.Fatal("timestamp expected but doesn't exist in header") } if l, ok := header["location"]; ok { fmt.Printf("location from header:\n") for i, e := range l { fmt.Printf(" %d. %s\n", i, e) } } else { log.Fatal("location expected but doesn't exist in header") } // Send all requests to the server. for i := 0; i < streamingCount; i++ { if err := stream.Send(&pb.EchoRequest{Message: message}); err != nil { log.Fatalf("failed to send streaming: %v\n", err) } } stream.CloseSend() }() // Read all the responses. var rpcStatus error fmt.Printf("response:\n") for { r, err := stream.Recv() if err != nil { rpcStatus = err break } fmt.Printf(" - %s\n", r.Message) } if rpcStatus != io.EOF { log.Fatalf("failed to finish server streaming: %v", rpcStatus) } // Read the trailer after the RPC is finished. trailer := stream.Trailer() // Read metadata from server's trailer. if t, ok := trailer["timestamp"]; ok { fmt.Printf("timestamp from trailer:\n") for i, e := range t { fmt.Printf(" %d. %s\n", i, e) } } else { log.Fatal("timestamp expected but doesn't exist in trailer") } } const message = "this is examples/metadata" func main() { flag.Parse() // Set up a connection to the server. conn, err := grpc.Dial(*addr, grpc.WithInsecure(), grpc.WithBlock()) if err != nil { log.Fatalf("did not connect: %v", err) } defer conn.Close() c := pb.NewEchoClient(conn) unaryCallWithMetadata(c, message) time.Sleep(1 * time.Second) serverStreamingWithMetadata(c, message) time.Sleep(1 * time.Second) clientStreamWithMetadata(c, message) time.Sleep(1 * time.Second) bidirectionalWithMetadata(c, message) } grpc-go-1.29.1/examples/features/metadata/server/000077500000000000000000000000001365033716300216735ustar00rootroot00000000000000grpc-go-1.29.1/examples/features/metadata/server/main.go000066400000000000000000000132071365033716300231510ustar00rootroot00000000000000/* * * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Binary server is an example server. package main import ( "context" "flag" "fmt" "io" "log" "math/rand" "net" "time" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" pb "google.golang.org/grpc/examples/features/proto/echo" ) var port = flag.Int("port", 50051, "the port to serve on") const ( timestampFormat = time.StampNano streamingCount = 10 ) type server struct { pb.UnimplementedEchoServer } func (s *server) UnaryEcho(ctx context.Context, in *pb.EchoRequest) (*pb.EchoResponse, error) { fmt.Printf("--- UnaryEcho ---\n") // Create trailer in defer to record function return time. defer func() { trailer := metadata.Pairs("timestamp", time.Now().Format(timestampFormat)) grpc.SetTrailer(ctx, trailer) }() // Read metadata from client. md, ok := metadata.FromIncomingContext(ctx) if !ok { return nil, status.Errorf(codes.DataLoss, "UnaryEcho: failed to get metadata") } if t, ok := md["timestamp"]; ok { fmt.Printf("timestamp from metadata:\n") for i, e := range t { fmt.Printf(" %d. %s\n", i, e) } } // Create and send header. header := metadata.New(map[string]string{"location": "MTV", "timestamp": time.Now().Format(timestampFormat)}) grpc.SendHeader(ctx, header) fmt.Printf("request received: %v, sending echo\n", in) return &pb.EchoResponse{Message: in.Message}, nil } func (s *server) ServerStreamingEcho(in *pb.EchoRequest, stream pb.Echo_ServerStreamingEchoServer) error { fmt.Printf("--- ServerStreamingEcho ---\n") // Create trailer in defer to record function return time. defer func() { trailer := metadata.Pairs("timestamp", time.Now().Format(timestampFormat)) stream.SetTrailer(trailer) }() // Read metadata from client. md, ok := metadata.FromIncomingContext(stream.Context()) if !ok { return status.Errorf(codes.DataLoss, "ServerStreamingEcho: failed to get metadata") } if t, ok := md["timestamp"]; ok { fmt.Printf("timestamp from metadata:\n") for i, e := range t { fmt.Printf(" %d. %s\n", i, e) } } // Create and send header. header := metadata.New(map[string]string{"location": "MTV", "timestamp": time.Now().Format(timestampFormat)}) stream.SendHeader(header) fmt.Printf("request received: %v\n", in) // Read requests and send responses. for i := 0; i < streamingCount; i++ { fmt.Printf("echo message %v\n", in.Message) err := stream.Send(&pb.EchoResponse{Message: in.Message}) if err != nil { return err } } return nil } func (s *server) ClientStreamingEcho(stream pb.Echo_ClientStreamingEchoServer) error { fmt.Printf("--- ClientStreamingEcho ---\n") // Create trailer in defer to record function return time. defer func() { trailer := metadata.Pairs("timestamp", time.Now().Format(timestampFormat)) stream.SetTrailer(trailer) }() // Read metadata from client. md, ok := metadata.FromIncomingContext(stream.Context()) if !ok { return status.Errorf(codes.DataLoss, "ClientStreamingEcho: failed to get metadata") } if t, ok := md["timestamp"]; ok { fmt.Printf("timestamp from metadata:\n") for i, e := range t { fmt.Printf(" %d. %s\n", i, e) } } // Create and send header. header := metadata.New(map[string]string{"location": "MTV", "timestamp": time.Now().Format(timestampFormat)}) stream.SendHeader(header) // Read requests and send responses. var message string for { in, err := stream.Recv() if err == io.EOF { fmt.Printf("echo last received message\n") return stream.SendAndClose(&pb.EchoResponse{Message: message}) } message = in.Message fmt.Printf("request received: %v, building echo\n", in) if err != nil { return err } } } func (s *server) BidirectionalStreamingEcho(stream pb.Echo_BidirectionalStreamingEchoServer) error { fmt.Printf("--- BidirectionalStreamingEcho ---\n") // Create trailer in defer to record function return time. defer func() { trailer := metadata.Pairs("timestamp", time.Now().Format(timestampFormat)) stream.SetTrailer(trailer) }() // Read metadata from client. md, ok := metadata.FromIncomingContext(stream.Context()) if !ok { return status.Errorf(codes.DataLoss, "BidirectionalStreamingEcho: failed to get metadata") } if t, ok := md["timestamp"]; ok { fmt.Printf("timestamp from metadata:\n") for i, e := range t { fmt.Printf(" %d. %s\n", i, e) } } // Create and send header. header := metadata.New(map[string]string{"location": "MTV", "timestamp": time.Now().Format(timestampFormat)}) stream.SendHeader(header) // Read requests and send responses. for { in, err := stream.Recv() if err == io.EOF { return nil } if err != nil { return err } fmt.Printf("request received %v, sending echo\n", in) if err := stream.Send(&pb.EchoResponse{Message: in.Message}); err != nil { return err } } } func main() { flag.Parse() rand.Seed(time.Now().UnixNano()) lis, err := net.Listen("tcp", fmt.Sprintf(":%d", *port)) if err != nil { log.Fatalf("failed to listen: %v", err) } fmt.Printf("server listening at %v\n", lis.Addr()) s := grpc.NewServer() pb.RegisterEchoServer(s, &server{}) s.Serve(lis) } grpc-go-1.29.1/examples/features/multiplex/000077500000000000000000000000001365033716300206305ustar00rootroot00000000000000grpc-go-1.29.1/examples/features/multiplex/README.md000066400000000000000000000003451365033716300221110ustar00rootroot00000000000000# Multiplex A `grpc.ClientConn` can be shared by two stubs and two services can share a `grpc.Server`. This example illustrates how to perform both types of sharing. ``` go run server/main.go ``` ``` go run client/main.go ``` grpc-go-1.29.1/examples/features/multiplex/client/000077500000000000000000000000001365033716300221065ustar00rootroot00000000000000grpc-go-1.29.1/examples/features/multiplex/client/main.go000066400000000000000000000043551365033716300233700ustar00rootroot00000000000000/* * * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Binary client is an example client. package main import ( "context" "flag" "fmt" "log" "time" "google.golang.org/grpc" ecpb "google.golang.org/grpc/examples/features/proto/echo" hwpb "google.golang.org/grpc/examples/helloworld/helloworld" ) var addr = flag.String("addr", "localhost:50051", "the address to connect to") // callSayHello calls SayHello on c with the given name, and prints the // response. func callSayHello(c hwpb.GreeterClient, name string) { ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() r, err := c.SayHello(ctx, &hwpb.HelloRequest{Name: name}) if err != nil { log.Fatalf("client.SayHello(_) = _, %v", err) } fmt.Println("Greeting: ", r.Message) } func callUnaryEcho(client ecpb.EchoClient, message string) { ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() resp, err := client.UnaryEcho(ctx, &ecpb.EchoRequest{Message: message}) if err != nil { log.Fatalf("client.UnaryEcho(_) = _, %v: ", err) } fmt.Println("UnaryEcho: ", resp.Message) } func main() { flag.Parse() // Set up a connection to the server. conn, err := grpc.Dial(*addr, grpc.WithInsecure(), grpc.WithBlock()) if err != nil { log.Fatalf("did not connect: %v", err) } defer conn.Close() fmt.Println("--- calling helloworld.Greeter/SayHello ---") // Make a greeter client and send an RPC. hwc := hwpb.NewGreeterClient(conn) callSayHello(hwc, "multiplex") fmt.Println() fmt.Println("--- calling routeguide.RouteGuide/GetFeature ---") // Make a routeguild client with the same ClientConn. rgc := ecpb.NewEchoClient(conn) callUnaryEcho(rgc, "this is examples/multiplex") } grpc-go-1.29.1/examples/features/multiplex/server/000077500000000000000000000000001365033716300221365ustar00rootroot00000000000000grpc-go-1.29.1/examples/features/multiplex/server/main.go000066400000000000000000000036231365033716300234150ustar00rootroot00000000000000/* * * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Binary server is an example server. package main import ( "context" "flag" "fmt" "log" "net" "google.golang.org/grpc" ecpb "google.golang.org/grpc/examples/features/proto/echo" hwpb "google.golang.org/grpc/examples/helloworld/helloworld" ) var port = flag.Int("port", 50051, "the port to serve on") // hwServer is used to implement helloworld.GreeterServer. type hwServer struct { hwpb.UnimplementedGreeterServer } // SayHello implements helloworld.GreeterServer func (s *hwServer) SayHello(ctx context.Context, in *hwpb.HelloRequest) (*hwpb.HelloReply, error) { return &hwpb.HelloReply{Message: "Hello " + in.Name}, nil } type ecServer struct { ecpb.UnimplementedEchoServer } func (s *ecServer) UnaryEcho(ctx context.Context, req *ecpb.EchoRequest) (*ecpb.EchoResponse, error) { return &ecpb.EchoResponse{Message: req.Message}, nil } func main() { flag.Parse() lis, err := net.Listen("tcp", fmt.Sprintf(":%d", *port)) if err != nil { log.Fatalf("failed to listen: %v", err) } fmt.Printf("server listening at %v\n", lis.Addr()) s := grpc.NewServer() // Register Greeter on the server. hwpb.RegisterGreeterServer(s, &hwServer{}) // Register RouteGuide on the same server. ecpb.RegisterEchoServer(s, &ecServer{}) if err := s.Serve(lis); err != nil { log.Fatalf("failed to serve: %v", err) } } grpc-go-1.29.1/examples/features/name_resolving/000077500000000000000000000000001365033716300216155ustar00rootroot00000000000000grpc-go-1.29.1/examples/features/name_resolving/README.md000066400000000000000000000025421365033716300230770ustar00rootroot00000000000000# Name resolving This examples shows how `ClientConn` can pick different name resolvers. ## What is a name resolver A name resolver can be seen as a `map[service-name][]backend-ip`. It takes a service name, and returns a list of IPs of the backends. A common used name resolver is DNS. In this example, a resolver is created to resolve `resolver.example.grpc.io` to `localhost:50051`. ## Try it ``` go run server/main.go ``` ``` go run client/main.go ``` ## Explanation The echo server is serving on ":50051". Two clients are created, one is dialing to `passthrough:///localhost:50051`, while the other is dialing to `example:///resolver.example.grpc.io`. Both of them can connect the server. Name resolver is picked based on the `scheme` in the target string. See https://github.com/grpc/grpc/blob/master/doc/naming.md for the target syntax. The first client picks the `passthrough` resolver, which takes the input, and use it as the backend addresses. The second is connecting to service name `resolver.example.grpc.io`. Without a proper name resolver, this would fail. In the example it picks the `example` resolver that we installed. The `example` resolver can handle `resolver.example.grpc.io` correctly by returning the backend address. So even though the backend IP is not set when ClientConn is created, the connection will be created to the correct backend.grpc-go-1.29.1/examples/features/name_resolving/client/000077500000000000000000000000001365033716300230735ustar00rootroot00000000000000grpc-go-1.29.1/examples/features/name_resolving/client/main.go000066400000000000000000000100311365033716300243410ustar00rootroot00000000000000/* * * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Binary client is an example client. package main import ( "context" "fmt" "log" "time" "google.golang.org/grpc" ecpb "google.golang.org/grpc/examples/features/proto/echo" "google.golang.org/grpc/resolver" ) const ( exampleScheme = "example" exampleServiceName = "resolver.example.grpc.io" backendAddr = "localhost:50051" ) func callUnaryEcho(c ecpb.EchoClient, message string) { ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() r, err := c.UnaryEcho(ctx, &ecpb.EchoRequest{Message: message}) if err != nil { log.Fatalf("could not greet: %v", err) } fmt.Println(r.Message) } func makeRPCs(cc *grpc.ClientConn, n int) { hwc := ecpb.NewEchoClient(cc) for i := 0; i < n; i++ { callUnaryEcho(hwc, "this is examples/name_resolving") } } func main() { passthroughConn, err := grpc.Dial( fmt.Sprintf("passthrough:///%s", backendAddr), // Dial to "passthrough:///localhost:50051" grpc.WithInsecure(), grpc.WithBlock(), ) if err != nil { log.Fatalf("did not connect: %v", err) } defer passthroughConn.Close() fmt.Printf("--- calling helloworld.Greeter/SayHello to \"passthrough:///%s\"\n", backendAddr) makeRPCs(passthroughConn, 10) fmt.Println() exampleConn, err := grpc.Dial( fmt.Sprintf("%s:///%s", exampleScheme, exampleServiceName), // Dial to "example:///resolver.example.grpc.io" grpc.WithInsecure(), grpc.WithBlock(), ) if err != nil { log.Fatalf("did not connect: %v", err) } defer exampleConn.Close() fmt.Printf("--- calling helloworld.Greeter/SayHello to \"%s:///%s\"\n", exampleScheme, exampleServiceName) makeRPCs(exampleConn, 10) } // Following is an example name resolver. It includes a // ResolverBuilder(https://godoc.org/google.golang.org/grpc/resolver#Builder) // and a Resolver(https://godoc.org/google.golang.org/grpc/resolver#Resolver). // // A ResolverBuilder is registered for a scheme (in this example, "example" is // the scheme). When a ClientConn is created for this scheme, the // ResolverBuilder will be picked to build a Resolver. Note that a new Resolver // is built for each ClientConn. The Resolver will watch the updates for the // target, and send updates to the ClientConn. // exampleResolverBuilder is a // ResolverBuilder(https://godoc.org/google.golang.org/grpc/resolver#Builder). type exampleResolverBuilder struct{} func (*exampleResolverBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { r := &exampleResolver{ target: target, cc: cc, addrsStore: map[string][]string{ exampleServiceName: {backendAddr}, }, } r.start() return r, nil } func (*exampleResolverBuilder) Scheme() string { return exampleScheme } // exampleResolver is a // Resolver(https://godoc.org/google.golang.org/grpc/resolver#Resolver). type exampleResolver struct { target resolver.Target cc resolver.ClientConn addrsStore map[string][]string } func (r *exampleResolver) start() { addrStrs := r.addrsStore[r.target.Endpoint] addrs := make([]resolver.Address, len(addrStrs)) for i, s := range addrStrs { addrs[i] = resolver.Address{Addr: s} } r.cc.UpdateState(resolver.State{Addresses: addrs}) } func (*exampleResolver) ResolveNow(o resolver.ResolveNowOptions) {} func (*exampleResolver) Close() {} func init() { // Register the example ResolverBuilder. This is usually done in a package's // init() function. resolver.Register(&exampleResolverBuilder{}) } grpc-go-1.29.1/examples/features/name_resolving/server/000077500000000000000000000000001365033716300231235ustar00rootroot00000000000000grpc-go-1.29.1/examples/features/name_resolving/server/main.go000066400000000000000000000025471365033716300244060ustar00rootroot00000000000000/* * * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Binary server is an example server. package main import ( "context" "fmt" "log" "net" "google.golang.org/grpc" pb "google.golang.org/grpc/examples/features/proto/echo" ) const addr = "localhost:50051" type ecServer struct { pb.UnimplementedEchoServer addr string } func (s *ecServer) UnaryEcho(ctx context.Context, req *pb.EchoRequest) (*pb.EchoResponse, error) { return &pb.EchoResponse{Message: fmt.Sprintf("%s (from %s)", req.Message, s.addr)}, nil } func main() { lis, err := net.Listen("tcp", addr) if err != nil { log.Fatalf("failed to listen: %v", err) } s := grpc.NewServer() pb.RegisterEchoServer(s, &ecServer{addr: addr}) log.Printf("serving on %s\n", addr) if err := s.Serve(lis); err != nil { log.Fatalf("failed to serve: %v", err) } } grpc-go-1.29.1/examples/features/profiling/000077500000000000000000000000001365033716300205765ustar00rootroot00000000000000grpc-go-1.29.1/examples/features/profiling/README.md000066400000000000000000000233311365033716300220570ustar00rootroot00000000000000# gRPC-Go Profiling - Author(s): adtac - Status: Experimental - Availability: gRPC-Go >= 1.27 - Last updated: December 17, 2019 gRPC-Go has built-in profiling that can be used to generate a detailed timeline of the lifecycle of an RPC request. This can be done on the client-side and the server-side. This directory contains an example client-server implementation with profiling enabled and some example commands you can run to remotely manage profiling. Typically, there are three logically separate parts involved in integrating profiling into your application: 1. Register the `Profiling` service: this requires a simple code change in your application. 1. Enable profiling when required: profiling is disabled by default and must be enabled remotely or at server initialization. 1. Download and process profiling data: once your application has collected enough profiling data, you must use a bundled command-line application to download your data and process it to generate human-friendly visualization. ## Registering the `Profiling` Service ### Server-Side Typically, you would create and register a server like so (some Go is shortened in the interest of brevity; please see the `server` subdirectory for a full implementation): ```go import ( "google.golang.org/grpc" profsvc "google.golang.org/grpc/profiling/service" pb "google.golang.org/grpc/examples/features/proto/echo" ) type server struct{} func main() error { s := grpc.NewServer() pb.RegisterEchoServer(s, &server{}) // Include this to register a profiling-specific service within your server. if err := profsvc.Init(&profsvc.ProfilingConfig{Server: s}); err != nil { fmt.Printf("error calling profsvc.Init: %v\n", err) return } lis, _ := net.Listen("tcp", address) s.Serve(lis) } ``` To register your server for profiling, simply call the `profsvc.Init` method as shown above. The passed `ProfilingConfig` parameter must set the `Server` field to a server that is being served on a TCP address. ### Client-Side To register profiling on the client-side, you must create a server to expose your profiling data in order for it to be retrievable. To do this, it is recommended that you create a dummy, dedicated server with no service other than profiling's. See the `client` directory for an example client. ## Enabling/Disabling Profiling Once profiling is baked into your server (unless otherwise specified, from here on, the word "server" will be used to refer to a `grpc.Server`, not the server/client distinction from the previous subsection), you need to enable profiling. There are three ways to do this -- at initialization, remotely post-initialization, or programmatically within Go. ### Enabling Profiling at Initialization To force profiling to start measuring data right from the first RPC, set the `Enabled` attribute of the `ProfilingConfig` struct to `true` when you are initializing profiling. ```go // Set Enabled: true to turn profiling on at initialization time. profsvc.Init(&profsvc.ProfilingConfig{ Server: s, Enabled: true, }) ``` ### Enabling/Disabling Remotely Alternatively, you can enable/disable profiling any time after server initialization by using a bundled command-line tool designed for remote profiling management. Assuming `example.com:50051` is the address of the server that you would like to enable profiling in, do the following: ```bash $ go run google.golang.org/grpc/profiling/cmd \ -address example.com:50051 \ -enable-profiling ``` Similarly, running the command with `-disable-profiling` can be used to disable profiling remotely. ### Enabling/Disabling Within Go In addition to the remote service that is exposed, you may enable/disable profiling within your application in Go: ```go import ( "google.golang.org/grpc/profiling" ) func setProfiling(enable bool) { profiling.Enable(true) } ``` The `profiling.Enable` function can be safely accessed and called concurrently. ## Downloading and Processing Profiling Data Once your server has collected enough profiling data, you may want to download that data and perform some analysis on the retrieved data. The aforementioned command-line application within gRPC comes bundled with support for both operations. To retrieve profiling data from a remote server, run the following command: ```bash $ go run google.golang.org/grpc/profiling/cmd \ -address example.com:50051 \ -retrieve-snapshot \ -snapshot /path/to/snapshot ``` You must provide a path to `-snapshot` that can be written to. This file will store the retrieved data in a raw and binary form. To process this data into a human-consumable such as [Catapult's trace-viewer format](https://docs.google.com/document/d/1CvAClvFfyA5R-PhYUmn5OOQtYMH4h6I0nSsKchNAySU/preview): ```bash $ go run google.golang.org/grpc/profiling/cmd \ -snapshot /path/to/snapshot \ -stream-stats-catapult-json /path/to/json ``` This would read the data stored in `/path/to/snapshot` and process it to generate a JSON format that is understood by Chromium's [Catapult project](https://chromium.googlesource.com/catapult). The Catapult project comes with a utility called [trace-viewer](https://chromium.googlesource.com/catapult/+/HEAD/tracing/README.md), which can be used to generate human-readable visualizations: ```bash $ git clone https://chromium.googlesource.com/catapult /path/to/catapult $ /path/to/catapult/tracing/bin/trace2html /path/to/json --output=/path/to/html ``` When the generated `/path/to/html` file is opened with a browser, you will be presented with a detailed visualization of the lifecycle of all RPC requests. To learn more about trace-viewer and how to navigate the generated HTML, see [this](https://chromium.googlesource.com/catapult/+/HEAD/tracing/README.md). ## Frequently Asked Questions ##### I have multiple `grpc.Server`s in my application. Can I register profiling with just one of them? You may not call `profsvc.Init` more than once -- all calls except for the first one will return an error. As a corollary, it is also not possible to register or enable/disable profiling for just one `grpc.Server` or operation. That is, you can enable/disable profiling globally for all gRPC operations or none at all. ##### Is `google.golang.org/grpc/profiling/cmd` the canonical implementation of a client that can talk to the profiling service? No, the command-line tool is simply provided as a reference implementation and as a convenience. You are free to write your own tool as long as it can communicate using the underlying protocol buffers. ##### Is Catapult's `trace-viewer` the only option that is supported? Currently, yes. However, support for other (or better) visualization tools is welcome. ##### What is the impact of profiling on application performance? When turned off, profiling has virtually no impact on the performance (QPS, latency, memory footprint) of your application. However, when turned on, expect a 5-10% throughput/latency penalty and double the memory footprint. Profiling is mostly used by gRPC-Go devs. However, if you foresee using profiling in production machines, because of the negligible impact of profiling when turned off, you may want to register/initialize your applications with profiling (but leave it turned off). This will be useful in the off-chance you want to debug an application later -- in such an event, you can simply remotely toggle profiling using the `go run` command previously described to enable profiling data collection. Once you're confident that enough profiling data has been measured, you can turn it off again and retrieve the data for post-processing (see previous section). ##### How many RPCs worth of data is stored by profiling? I'd like to restrict the memory footprint of gRPC's profiling framework to a fixed amount. By default, at any given time, the last 214 RPCs worth of data is stored by profiling. Newly generated profiling data overwrites older data. Note that the internal data structure is not strictly LIFO in order to be performant (but is approximately LIFO). All profiling data is timestamped anyway, so a LIFO property is unnecessary. This number is configurable. When registering your server with profiling, you may specify the number of samples that should be stored, like so: ```go // Setting StreamStatsSize: 1024 will make profiling store the last 1024 // RPCs' data (if profiling is enabled, of course). profsvc.Init(&profsvc.ProfilingConfig{ Server: s, StreamStatsSize: 1024, }) ``` As an estimate, a typical unary RPC is expected produce ~2-3 KiB of profiling data in memory. This may be useful in estimating how many RPCs worth of data you can afford depending on your memory capacity. For more complex RPCs such as streaming RPCs, each RPC will consume more data. The amount of memory consumed by profiling is mostly independent of the size of messages your application handles. ##### The generated visualization is flat and has no flows/arrows. How do I distinguish between different RPCs? Unfortunately, there isn't any way to do this without some changes to the way your application is compiled. This is because gRPC's profiling relies on the Goroutine ID to uniquely identify different components. To enable this, first apply the following patch to your Go runtime installation directory: ```diff diff --git a/src/runtime/runtime2.go b/src/runtime/runtime2.go --- a/src/runtime/runtime2.go +++ b/src/runtime/runtime2.go @@ -392,6 +392,10 @@ type stack struct { hi uintptr } +func Goid() int64 { + return getg().goid +} + type g struct { // Stack parameters. // stack describes the actual stack memory: [stack.lo, stack.hi). ``` Then, recompile your application with `-tags grpcgoid` to generate a new binary. This binary should produce profiling data that is much nicer when visualized. grpc-go-1.29.1/examples/features/profiling/client/000077500000000000000000000000001365033716300220545ustar00rootroot00000000000000grpc-go-1.29.1/examples/features/profiling/client/main.go000066400000000000000000000045071365033716300233350ustar00rootroot00000000000000/* * * Copyright 2019 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Binary client is an example client. package main import ( "context" "flag" "fmt" "log" "net" "time" "google.golang.org/grpc" pb "google.golang.org/grpc/examples/features/proto/echo" profsvc "google.golang.org/grpc/profiling/service" ) var addr = flag.String("addr", "localhost:50051", "the address to connect to") var profilingPort = flag.Int("profilingPort", 50052, "port to expose the profiling service on") func setupClientProfiling() error { lis, err := net.Listen("tcp", fmt.Sprintf(":%d", *profilingPort)) if err != nil { log.Printf("failed to listen: %v\n", err) return err } fmt.Printf("server listening at %v\n", lis.Addr()) s := grpc.NewServer() // Register this grpc.Server with profiling. pc := &profsvc.ProfilingConfig{ Server: s, Enabled: true, StreamStatsSize: 1024, } if err = profsvc.Init(pc); err != nil { fmt.Printf("error calling profsvc.Init: %v\n", err) return err } go s.Serve(lis) return nil } func main() { flag.Parse() if err := setupClientProfiling(); err != nil { log.Fatalf("error setting up profiling: %v\n", err) } // Set up a connection to the server. conn, err := grpc.Dial(*addr, grpc.WithInsecure(), grpc.WithBlock()) if err != nil { log.Fatalf("did not connect: %v", err) } defer conn.Close() c := pb.NewEchoClient(conn) ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() res, err := c.UnaryEcho(ctx, &pb.EchoRequest{Message: "hello, profiling"}) fmt.Printf("UnaryEcho call returned %q, %v\n", res.GetMessage(), err) if err != nil { log.Fatalf("error calling UnaryEcho: %v", err) } log.Printf("sleeping for 30 seconds with exposed profiling service on :%d\n", *profilingPort) time.Sleep(30 * time.Second) } grpc-go-1.29.1/examples/features/profiling/server/000077500000000000000000000000001365033716300221045ustar00rootroot00000000000000grpc-go-1.29.1/examples/features/profiling/server/main.go000066400000000000000000000032641365033716300233640ustar00rootroot00000000000000/* * * Copyright 2019 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Binary server is an example server. package main import ( "context" "flag" "fmt" "log" "net" "google.golang.org/grpc" pb "google.golang.org/grpc/examples/features/proto/echo" profsvc "google.golang.org/grpc/profiling/service" ) var port = flag.Int("port", 50051, "the port to serve on") type server struct { pb.UnimplementedEchoServer } func (s *server) UnaryEcho(ctx context.Context, in *pb.EchoRequest) (*pb.EchoResponse, error) { fmt.Printf("UnaryEcho called with message %q\n", in.GetMessage()) return &pb.EchoResponse{Message: in.Message}, nil } func main() { flag.Parse() lis, err := net.Listen("tcp", fmt.Sprintf(":%d", *port)) if err != nil { log.Fatalf("failed to listen: %v", err) } fmt.Printf("server listening at %v\n", lis.Addr()) s := grpc.NewServer() pb.RegisterEchoServer(s, &server{}) // Register your grpc.Server with profiling. pc := &profsvc.ProfilingConfig{ Server: s, Enabled: true, StreamStatsSize: 1024, } if err = profsvc.Init(pc); err != nil { fmt.Printf("error calling profsvc.Init: %v\n", err) return } s.Serve(lis) } grpc-go-1.29.1/examples/features/proto/000077500000000000000000000000001365033716300177505ustar00rootroot00000000000000grpc-go-1.29.1/examples/features/proto/doc.go000066400000000000000000000013641365033716300210500ustar00rootroot00000000000000/* * * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ //go:generate protoc -I ./echo --go_out=plugins=grpc,paths=source_relative:./echo ./echo/echo.proto // Package proto is for go generate. package proto grpc-go-1.29.1/examples/features/proto/echo/000077500000000000000000000000001365033716300206665ustar00rootroot00000000000000grpc-go-1.29.1/examples/features/proto/echo/echo.pb.go000066400000000000000000000334761365033716300225500ustar00rootroot00000000000000// Code generated by protoc-gen-go. DO NOT EDIT. // source: echo.proto package echo import ( context "context" fmt "fmt" proto "github.com/golang/protobuf/proto" grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" status "google.golang.org/grpc/status" math "math" ) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package // EchoRequest is the request for echo. type EchoRequest struct { Message string `protobuf:"bytes,1,opt,name=message,proto3" json:"message,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *EchoRequest) Reset() { *m = EchoRequest{} } func (m *EchoRequest) String() string { return proto.CompactTextString(m) } func (*EchoRequest) ProtoMessage() {} func (*EchoRequest) Descriptor() ([]byte, []int) { return fileDescriptor_08134aea513e0001, []int{0} } func (m *EchoRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_EchoRequest.Unmarshal(m, b) } func (m *EchoRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_EchoRequest.Marshal(b, m, deterministic) } func (m *EchoRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_EchoRequest.Merge(m, src) } func (m *EchoRequest) XXX_Size() int { return xxx_messageInfo_EchoRequest.Size(m) } func (m *EchoRequest) XXX_DiscardUnknown() { xxx_messageInfo_EchoRequest.DiscardUnknown(m) } var xxx_messageInfo_EchoRequest proto.InternalMessageInfo func (m *EchoRequest) GetMessage() string { if m != nil { return m.Message } return "" } // EchoResponse is the response for echo. type EchoResponse struct { Message string `protobuf:"bytes,1,opt,name=message,proto3" json:"message,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *EchoResponse) Reset() { *m = EchoResponse{} } func (m *EchoResponse) String() string { return proto.CompactTextString(m) } func (*EchoResponse) ProtoMessage() {} func (*EchoResponse) Descriptor() ([]byte, []int) { return fileDescriptor_08134aea513e0001, []int{1} } func (m *EchoResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_EchoResponse.Unmarshal(m, b) } func (m *EchoResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_EchoResponse.Marshal(b, m, deterministic) } func (m *EchoResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_EchoResponse.Merge(m, src) } func (m *EchoResponse) XXX_Size() int { return xxx_messageInfo_EchoResponse.Size(m) } func (m *EchoResponse) XXX_DiscardUnknown() { xxx_messageInfo_EchoResponse.DiscardUnknown(m) } var xxx_messageInfo_EchoResponse proto.InternalMessageInfo func (m *EchoResponse) GetMessage() string { if m != nil { return m.Message } return "" } func init() { proto.RegisterType((*EchoRequest)(nil), "grpc.examples.echo.EchoRequest") proto.RegisterType((*EchoResponse)(nil), "grpc.examples.echo.EchoResponse") } func init() { proto.RegisterFile("echo.proto", fileDescriptor_08134aea513e0001) } var fileDescriptor_08134aea513e0001 = []byte{ // 234 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x92, 0xb1, 0x4b, 0x03, 0x31, 0x14, 0x87, 0x3d, 0x11, 0xa5, 0x4f, 0xa7, 0xb8, 0x94, 0x2e, 0x96, 0x5b, 0xbc, 0x29, 0x29, 0x16, 0xff, 0x81, 0x8a, 0xbb, 0xb4, 0xb8, 0x88, 0x4b, 0x3c, 0x7f, 0xa6, 0x81, 0x5c, 0xde, 0xf9, 0x92, 0x8a, 0xfe, 0xed, 0x2e, 0x92, 0x2b, 0x05, 0x41, 0xba, 0xd5, 0x2d, 0x8f, 0x7c, 0xef, 0xfb, 0x96, 0x47, 0x84, 0x76, 0xcd, 0xba, 0x17, 0xce, 0xac, 0x94, 0x93, 0xbe, 0xd5, 0xf8, 0xb4, 0x5d, 0x1f, 0x90, 0x74, 0xf9, 0xa9, 0xaf, 0xe9, 0xfc, 0xbe, 0x5d, 0xf3, 0x12, 0xef, 0x1b, 0xa4, 0xac, 0xc6, 0x74, 0xd6, 0x21, 0x25, 0xeb, 0x30, 0xae, 0xa6, 0x55, 0x33, 0x5a, 0xee, 0xc6, 0xba, 0xa1, 0x8b, 0x2d, 0x98, 0x7a, 0x8e, 0x09, 0xfb, 0xc9, 0x9b, 0xef, 0x63, 0x3a, 0x29, 0xa8, 0x7a, 0xa0, 0xd1, 0x63, 0xb4, 0xf2, 0x35, 0x0c, 0x57, 0xfa, 0x6f, 0x5d, 0xff, 0x4a, 0x4f, 0xa6, 0xfb, 0x81, 0x6d, 0xb2, 0x3e, 0x52, 0xcf, 0x74, 0xb9, 0x82, 0x7c, 0x40, 0x56, 0x59, 0x60, 0x3b, 0x1f, 0xdd, 0xc1, 0xdc, 0xb3, 0xaa, 0xd8, 0xef, 0x82, 0x47, 0xcc, 0x87, 0xb7, 0x37, 0x95, 0x02, 0x4d, 0x16, 0xfe, 0xd5, 0x0b, 0xda, 0xec, 0x39, 0xda, 0xf0, 0x1f, 0x91, 0x59, 0xb5, 0xb8, 0x7d, 0x9a, 0x3b, 0x66, 0x17, 0xa0, 0x1d, 0x07, 0x1b, 0x9d, 0x66, 0x71, 0xa6, 0xac, 0x9a, 0xdd, 0xaa, 0x79, 0x83, 0xcd, 0x1b, 0x41, 0x32, 0xc3, 0x59, 0x98, 0x62, 0x7a, 0x39, 0x1d, 0xde, 0xf3, 0x9f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x23, 0x14, 0x26, 0x96, 0x30, 0x02, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. var _ context.Context var _ grpc.ClientConnInterface // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. const _ = grpc.SupportPackageIsVersion6 // EchoClient is the client API for Echo service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type EchoClient interface { // UnaryEcho is unary echo. UnaryEcho(ctx context.Context, in *EchoRequest, opts ...grpc.CallOption) (*EchoResponse, error) // ServerStreamingEcho is server side streaming. ServerStreamingEcho(ctx context.Context, in *EchoRequest, opts ...grpc.CallOption) (Echo_ServerStreamingEchoClient, error) // ClientStreamingEcho is client side streaming. ClientStreamingEcho(ctx context.Context, opts ...grpc.CallOption) (Echo_ClientStreamingEchoClient, error) // BidirectionalStreamingEcho is bidi streaming. BidirectionalStreamingEcho(ctx context.Context, opts ...grpc.CallOption) (Echo_BidirectionalStreamingEchoClient, error) } type echoClient struct { cc grpc.ClientConnInterface } func NewEchoClient(cc grpc.ClientConnInterface) EchoClient { return &echoClient{cc} } func (c *echoClient) UnaryEcho(ctx context.Context, in *EchoRequest, opts ...grpc.CallOption) (*EchoResponse, error) { out := new(EchoResponse) err := c.cc.Invoke(ctx, "/grpc.examples.echo.Echo/UnaryEcho", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *echoClient) ServerStreamingEcho(ctx context.Context, in *EchoRequest, opts ...grpc.CallOption) (Echo_ServerStreamingEchoClient, error) { stream, err := c.cc.NewStream(ctx, &_Echo_serviceDesc.Streams[0], "/grpc.examples.echo.Echo/ServerStreamingEcho", opts...) if err != nil { return nil, err } x := &echoServerStreamingEchoClient{stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } if err := x.ClientStream.CloseSend(); err != nil { return nil, err } return x, nil } type Echo_ServerStreamingEchoClient interface { Recv() (*EchoResponse, error) grpc.ClientStream } type echoServerStreamingEchoClient struct { grpc.ClientStream } func (x *echoServerStreamingEchoClient) Recv() (*EchoResponse, error) { m := new(EchoResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func (c *echoClient) ClientStreamingEcho(ctx context.Context, opts ...grpc.CallOption) (Echo_ClientStreamingEchoClient, error) { stream, err := c.cc.NewStream(ctx, &_Echo_serviceDesc.Streams[1], "/grpc.examples.echo.Echo/ClientStreamingEcho", opts...) if err != nil { return nil, err } x := &echoClientStreamingEchoClient{stream} return x, nil } type Echo_ClientStreamingEchoClient interface { Send(*EchoRequest) error CloseAndRecv() (*EchoResponse, error) grpc.ClientStream } type echoClientStreamingEchoClient struct { grpc.ClientStream } func (x *echoClientStreamingEchoClient) Send(m *EchoRequest) error { return x.ClientStream.SendMsg(m) } func (x *echoClientStreamingEchoClient) CloseAndRecv() (*EchoResponse, error) { if err := x.ClientStream.CloseSend(); err != nil { return nil, err } m := new(EchoResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func (c *echoClient) BidirectionalStreamingEcho(ctx context.Context, opts ...grpc.CallOption) (Echo_BidirectionalStreamingEchoClient, error) { stream, err := c.cc.NewStream(ctx, &_Echo_serviceDesc.Streams[2], "/grpc.examples.echo.Echo/BidirectionalStreamingEcho", opts...) if err != nil { return nil, err } x := &echoBidirectionalStreamingEchoClient{stream} return x, nil } type Echo_BidirectionalStreamingEchoClient interface { Send(*EchoRequest) error Recv() (*EchoResponse, error) grpc.ClientStream } type echoBidirectionalStreamingEchoClient struct { grpc.ClientStream } func (x *echoBidirectionalStreamingEchoClient) Send(m *EchoRequest) error { return x.ClientStream.SendMsg(m) } func (x *echoBidirectionalStreamingEchoClient) Recv() (*EchoResponse, error) { m := new(EchoResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } // EchoServer is the server API for Echo service. type EchoServer interface { // UnaryEcho is unary echo. UnaryEcho(context.Context, *EchoRequest) (*EchoResponse, error) // ServerStreamingEcho is server side streaming. ServerStreamingEcho(*EchoRequest, Echo_ServerStreamingEchoServer) error // ClientStreamingEcho is client side streaming. ClientStreamingEcho(Echo_ClientStreamingEchoServer) error // BidirectionalStreamingEcho is bidi streaming. BidirectionalStreamingEcho(Echo_BidirectionalStreamingEchoServer) error } // UnimplementedEchoServer can be embedded to have forward compatible implementations. type UnimplementedEchoServer struct { } func (*UnimplementedEchoServer) UnaryEcho(ctx context.Context, req *EchoRequest) (*EchoResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method UnaryEcho not implemented") } func (*UnimplementedEchoServer) ServerStreamingEcho(req *EchoRequest, srv Echo_ServerStreamingEchoServer) error { return status.Errorf(codes.Unimplemented, "method ServerStreamingEcho not implemented") } func (*UnimplementedEchoServer) ClientStreamingEcho(srv Echo_ClientStreamingEchoServer) error { return status.Errorf(codes.Unimplemented, "method ClientStreamingEcho not implemented") } func (*UnimplementedEchoServer) BidirectionalStreamingEcho(srv Echo_BidirectionalStreamingEchoServer) error { return status.Errorf(codes.Unimplemented, "method BidirectionalStreamingEcho not implemented") } func RegisterEchoServer(s *grpc.Server, srv EchoServer) { s.RegisterService(&_Echo_serviceDesc, srv) } func _Echo_UnaryEcho_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(EchoRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(EchoServer).UnaryEcho(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/grpc.examples.echo.Echo/UnaryEcho", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(EchoServer).UnaryEcho(ctx, req.(*EchoRequest)) } return interceptor(ctx, in, info, handler) } func _Echo_ServerStreamingEcho_Handler(srv interface{}, stream grpc.ServerStream) error { m := new(EchoRequest) if err := stream.RecvMsg(m); err != nil { return err } return srv.(EchoServer).ServerStreamingEcho(m, &echoServerStreamingEchoServer{stream}) } type Echo_ServerStreamingEchoServer interface { Send(*EchoResponse) error grpc.ServerStream } type echoServerStreamingEchoServer struct { grpc.ServerStream } func (x *echoServerStreamingEchoServer) Send(m *EchoResponse) error { return x.ServerStream.SendMsg(m) } func _Echo_ClientStreamingEcho_Handler(srv interface{}, stream grpc.ServerStream) error { return srv.(EchoServer).ClientStreamingEcho(&echoClientStreamingEchoServer{stream}) } type Echo_ClientStreamingEchoServer interface { SendAndClose(*EchoResponse) error Recv() (*EchoRequest, error) grpc.ServerStream } type echoClientStreamingEchoServer struct { grpc.ServerStream } func (x *echoClientStreamingEchoServer) SendAndClose(m *EchoResponse) error { return x.ServerStream.SendMsg(m) } func (x *echoClientStreamingEchoServer) Recv() (*EchoRequest, error) { m := new(EchoRequest) if err := x.ServerStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func _Echo_BidirectionalStreamingEcho_Handler(srv interface{}, stream grpc.ServerStream) error { return srv.(EchoServer).BidirectionalStreamingEcho(&echoBidirectionalStreamingEchoServer{stream}) } type Echo_BidirectionalStreamingEchoServer interface { Send(*EchoResponse) error Recv() (*EchoRequest, error) grpc.ServerStream } type echoBidirectionalStreamingEchoServer struct { grpc.ServerStream } func (x *echoBidirectionalStreamingEchoServer) Send(m *EchoResponse) error { return x.ServerStream.SendMsg(m) } func (x *echoBidirectionalStreamingEchoServer) Recv() (*EchoRequest, error) { m := new(EchoRequest) if err := x.ServerStream.RecvMsg(m); err != nil { return nil, err } return m, nil } var _Echo_serviceDesc = grpc.ServiceDesc{ ServiceName: "grpc.examples.echo.Echo", HandlerType: (*EchoServer)(nil), Methods: []grpc.MethodDesc{ { MethodName: "UnaryEcho", Handler: _Echo_UnaryEcho_Handler, }, }, Streams: []grpc.StreamDesc{ { StreamName: "ServerStreamingEcho", Handler: _Echo_ServerStreamingEcho_Handler, ServerStreams: true, }, { StreamName: "ClientStreamingEcho", Handler: _Echo_ClientStreamingEcho_Handler, ClientStreams: true, }, { StreamName: "BidirectionalStreamingEcho", Handler: _Echo_BidirectionalStreamingEcho_Handler, ServerStreams: true, ClientStreams: true, }, }, Metadata: "echo.proto", } grpc-go-1.29.1/examples/features/proto/echo/echo.proto000066400000000000000000000026141365033716300226740ustar00rootroot00000000000000/* * * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ syntax = "proto3"; package grpc.examples.echo; option go_package = "google.golang.org/grpc/examples/features/proto/echo"; // EchoRequest is the request for echo. message EchoRequest { string message = 1; } // EchoResponse is the response for echo. message EchoResponse { string message = 1; } // Echo is the echo service. service Echo { // UnaryEcho is unary echo. rpc UnaryEcho(EchoRequest) returns (EchoResponse) {} // ServerStreamingEcho is server side streaming. rpc ServerStreamingEcho(EchoRequest) returns (stream EchoResponse) {} // ClientStreamingEcho is client side streaming. rpc ClientStreamingEcho(stream EchoRequest) returns (EchoResponse) {} // BidirectionalStreamingEcho is bidi streaming. rpc BidirectionalStreamingEcho(stream EchoRequest) returns (stream EchoResponse) {} } grpc-go-1.29.1/examples/features/reflection/000077500000000000000000000000001365033716300207375ustar00rootroot00000000000000grpc-go-1.29.1/examples/features/reflection/README.md000066400000000000000000000007331365033716300222210ustar00rootroot00000000000000# Reflection This example shows how reflection can be registered on a gRPC server. See https://github.com/grpc/grpc-go/blob/master/Documentation/server-reflection-tutorial.md for a tutorial. # Try it ```go go run server/main.go ``` There are multiple existing reflection clients. To use `gRPC CLI`, follow https://github.com/grpc/grpc-go/blob/master/Documentation/server-reflection-tutorial.md#grpc-cli. To use `grpcurl`, see https://github.com/fullstorydev/grpcurl. grpc-go-1.29.1/examples/features/reflection/server/000077500000000000000000000000001365033716300222455ustar00rootroot00000000000000grpc-go-1.29.1/examples/features/reflection/server/main.go000066400000000000000000000040011365033716300235130ustar00rootroot00000000000000/* * * Copyright 2019 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Binary server is an example server. package main import ( "context" "flag" "fmt" "log" "net" "google.golang.org/grpc" "google.golang.org/grpc/reflection" ecpb "google.golang.org/grpc/examples/features/proto/echo" hwpb "google.golang.org/grpc/examples/helloworld/helloworld" ) var port = flag.Int("port", 50051, "the port to serve on") // hwServer is used to implement helloworld.GreeterServer. type hwServer struct { hwpb.UnimplementedGreeterServer } // SayHello implements helloworld.GreeterServer func (s *hwServer) SayHello(ctx context.Context, in *hwpb.HelloRequest) (*hwpb.HelloReply, error) { return &hwpb.HelloReply{Message: "Hello " + in.Name}, nil } type ecServer struct { ecpb.UnimplementedEchoServer } func (s *ecServer) UnaryEcho(ctx context.Context, req *ecpb.EchoRequest) (*ecpb.EchoResponse, error) { return &ecpb.EchoResponse{Message: req.Message}, nil } func main() { flag.Parse() lis, err := net.Listen("tcp", fmt.Sprintf(":%d", *port)) if err != nil { log.Fatalf("failed to listen: %v", err) } fmt.Printf("server listening at %v\n", lis.Addr()) s := grpc.NewServer() // Register Greeter on the server. hwpb.RegisterGreeterServer(s, &hwServer{}) // Register RouteGuide on the same server. ecpb.RegisterEchoServer(s, &ecServer{}) // Register reflection service on gRPC server. reflection.Register(s) if err := s.Serve(lis); err != nil { log.Fatalf("failed to serve: %v", err) } } grpc-go-1.29.1/examples/features/retry/000077500000000000000000000000001365033716300177525ustar00rootroot00000000000000grpc-go-1.29.1/examples/features/retry/README.md000066400000000000000000000036651365033716300212430ustar00rootroot00000000000000# Retry This example shows how to enable and configure retry on gRPC clients. ## Documentation [gRFC for client-side retry support](https://github.com/grpc/proposal/blob/master/A6-client-retries.md) ## Try it This example includes a service implementation that fails requests three times with status code `Unavailable`, then passes the fourth. The client is configured to make four retry attempts when receiving an `Unavailable` status code. First start the server: ```bash go run server/main.go ``` Then run the client. Note that when running the client, `GRPC_GO_RETRY=on` must be set in your environment: ```bash GRPC_GO_RETRY=on go run client/main.go ``` ## Usage ### Define your retry policy Retry is enabled via the service config, which can be provided by the name resolver or a DialOption (described below). In the below config, we set retry policy for the "grpc.example.echo.Echo" method. MaxAttempts: how many times to attempt the RPC before failing. InitialBackoff, MaxBackoff, BackoffMultiplier: configures delay between attempts. RetryableStatusCodes: Retry only when receiving these status codes. ```go var retryPolicy = `{ "methodConfig": [{ // config per method or all methods under service "name": [{"service": "grpc.examples.echo.Echo"}], "waitForReady": true, "retryPolicy": { "MaxAttempts": 4, "InitialBackoff": ".01s", "MaxBackoff": ".01s", "BackoffMultiplier": 1.0, // this value is grpc code "RetryableStatusCodes": [ "UNAVAILABLE" ] } }] }` ``` ### Providing the retry policy as a DialOption To use the above service config, pass it with `grpc.WithDefaultServiceConfig` to `grpc.Dial`. ```go conn, err := grpc.Dial(ctx,grpc.WithInsecure(), grpc.WithDefaultServiceConfig(retryPolicy)) ``` grpc-go-1.29.1/examples/features/retry/client/000077500000000000000000000000001365033716300212305ustar00rootroot00000000000000grpc-go-1.29.1/examples/features/retry/client/main.go000066400000000000000000000040121365033716300225000ustar00rootroot00000000000000/* * * Copyright 2019 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Binary client is an example client. package main import ( "context" "flag" "log" "time" "google.golang.org/grpc" pb "google.golang.org/grpc/examples/features/proto/echo" ) var ( addr = flag.String("addr", "localhost:50052", "the address to connect to") // see https://github.com/grpc/grpc/blob/master/doc/service_config.md to know more about service config retryPolicy = `{ "methodConfig": [{ "name": [{"service": "grpc.examples.echo.Echo"}], "waitForReady": true, "retryPolicy": { "MaxAttempts": 4, "InitialBackoff": ".01s", "MaxBackoff": ".01s", "BackoffMultiplier": 1.0, "RetryableStatusCodes": [ "UNAVAILABLE" ] } }]}` ) // use grpc.WithDefaultServiceConfig() to set service config func retryDial() (*grpc.ClientConn, error) { return grpc.Dial(*addr, grpc.WithInsecure(), grpc.WithDefaultServiceConfig(retryPolicy)) } func main() { flag.Parse() // Set up a connection to the server. conn, err := retryDial() if err != nil { log.Fatalf("did not connect: %v", err) } defer func() { if e := conn.Close(); e != nil { log.Printf("failed to close connection: %s", e) } }() c := pb.NewEchoClient(conn) ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) defer cancel() reply, err := c.UnaryEcho(ctx, &pb.EchoRequest{Message: "Try and Success"}) if err != nil { log.Fatalf("UnaryEcho error: %v", err) } log.Printf("UnaryEcho reply: %v", reply) } grpc-go-1.29.1/examples/features/retry/server/000077500000000000000000000000001365033716300212605ustar00rootroot00000000000000grpc-go-1.29.1/examples/features/retry/server/main.go000066400000000000000000000043421365033716300225360ustar00rootroot00000000000000/* * * Copyright 2019 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Binary server is an example server. package main import ( "context" "flag" "fmt" "log" "net" "sync" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" pb "google.golang.org/grpc/examples/features/proto/echo" ) var port = flag.Int("port", 50052, "port number") type failingServer struct { pb.UnimplementedEchoServer mu sync.Mutex reqCounter uint reqModulo uint } // this method will fail reqModulo - 1 times RPCs and return status code Unavailable, // and succeeded RPC on reqModulo times. func (s *failingServer) maybeFailRequest() error { s.mu.Lock() defer s.mu.Unlock() s.reqCounter++ if (s.reqModulo > 0) && (s.reqCounter%s.reqModulo == 0) { return nil } return status.Errorf(codes.Unavailable, "maybeFailRequest: failing it") } func (s *failingServer) UnaryEcho(ctx context.Context, req *pb.EchoRequest) (*pb.EchoResponse, error) { if err := s.maybeFailRequest(); err != nil { log.Println("request failed count:", s.reqCounter) return nil, err } log.Println("request succeeded count:", s.reqCounter) return &pb.EchoResponse{Message: req.Message}, nil } func main() { flag.Parse() address := fmt.Sprintf(":%v", *port) lis, err := net.Listen("tcp", address) if err != nil { log.Fatalf("failed to listen: %v", err) } fmt.Println("listen on address", address) s := grpc.NewServer() // Configure server to pass every fourth RPC; // client is configured to make four attempts. failingservice := &failingServer{ reqCounter: 0, reqModulo: 4, } pb.RegisterEchoServer(s, failingservice) if err := s.Serve(lis); err != nil { log.Fatalf("failed to serve: %v", err) } } grpc-go-1.29.1/examples/features/wait_for_ready/000077500000000000000000000000001365033716300216035ustar00rootroot00000000000000grpc-go-1.29.1/examples/features/wait_for_ready/README.md000066400000000000000000000007401365033716300230630ustar00rootroot00000000000000# Wait for ready example This example shows how to enable "wait for ready" in RPC calls. This code starts a server with a 2 seconds delay. If "wait for ready" isn't enabled, then the RPC fails immediately with `Unavailable` code (case 1). If "wait for ready" is enabled, then the RPC waits for the server. If context dies before the server is available, then it fails with `DeadlineExceeded` (case 3). Otherwise it succeeds (case 2). ## Run the example ``` go run main.go ``` grpc-go-1.29.1/examples/features/wait_for_ready/main.go000066400000000000000000000055311365033716300230620ustar00rootroot00000000000000/* * * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Binary wait_for_ready is an example for "wait for ready". package main import ( "context" "fmt" "log" "net" "sync" "time" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" pb "google.golang.org/grpc/examples/features/proto/echo" ) // server is used to implement EchoServer. type server struct { pb.UnimplementedEchoServer } func (s *server) UnaryEcho(ctx context.Context, req *pb.EchoRequest) (*pb.EchoResponse, error) { return &pb.EchoResponse{Message: req.Message}, nil } // serve starts listening with a 2 seconds delay. func serve() { lis, err := net.Listen("tcp", ":50053") if err != nil { log.Fatalf("failed to listen: %v", err) } s := grpc.NewServer() pb.RegisterEchoServer(s, &server{}) if err := s.Serve(lis); err != nil { log.Fatalf("failed to serve: %v", err) } } func main() { conn, err := grpc.Dial("localhost:50053", grpc.WithInsecure()) if err != nil { log.Fatalf("did not connect: %v", err) } defer conn.Close() c := pb.NewEchoClient(conn) var wg sync.WaitGroup wg.Add(3) // "Wait for ready" is not enabled, returns error with code "Unavailable". go func() { defer wg.Done() ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() _, err := c.UnaryEcho(ctx, &pb.EchoRequest{Message: "Hi!"}) got := status.Code(err) fmt.Printf("[1] wanted = %v, got = %v\n", codes.Unavailable, got) }() // "Wait for ready" is enabled, returns nil error. go func() { defer wg.Done() ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() _, err := c.UnaryEcho(ctx, &pb.EchoRequest{Message: "Hi!"}, grpc.WaitForReady(true)) got := status.Code(err) fmt.Printf("[2] wanted = %v, got = %v\n", codes.OK, got) }() // "Wait for ready" is enabled but exceeds the deadline before server starts listening, // returns error with code "DeadlineExceeded". go func() { defer wg.Done() ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) defer cancel() _, err := c.UnaryEcho(ctx, &pb.EchoRequest{Message: "Hi!"}, grpc.WaitForReady(true)) got := status.Code(err) fmt.Printf("[3] wanted = %v, got = %v\n", codes.DeadlineExceeded, got) }() time.Sleep(2 * time.Second) go serve() wg.Wait() } grpc-go-1.29.1/examples/features/xds/000077500000000000000000000000001365033716300174035ustar00rootroot00000000000000grpc-go-1.29.1/examples/features/xds/README.md000066400000000000000000000024051365033716300206630ustar00rootroot00000000000000# gRPC xDS example xDS is the protocol initially used by Envoy, that is evolving into a universal data plan API for service mesh. The xDS example is a Hello World client/server capable of being configured with the XDS management protocol. Out-of-the-box it behaves the same as [our other hello world example](https://github.com/grpc/grpc-go/tree/master/examples/helloworld). The server replies with responses including its hostname. **Note** that xDS support is incomplete and experimental, with limited compatibility. ## xDS environment setup This example doesn't include instuctions to setup xDS environment. Please refer to documentation specific for your xDS management server. The client also needs a bootstrap file. See [gRFC A27](https://github.com/grpc/proposal/blob/master/A27-xds-global-load-balancing.md#xdsclient-and-bootstrap-file) for the bootstrap format. ## The client The client application needs to import the xDS package to install the resolver and balancers: ```go _ "google.golang.org/grpc/xds/experimental" // To install the xds resolvers and balancers. ``` Then, use `xds-experimental` target scheme for the ClientConn. ``` $ export GRPC_XDS_BOOTSTRAP=/path/to/bootstrap.json $ go run client/main.go "xDS world" xds-experimental:///target_service ``` grpc-go-1.29.1/examples/features/xds/client/000077500000000000000000000000001365033716300206615ustar00rootroot00000000000000grpc-go-1.29.1/examples/features/xds/client/main.go000066400000000000000000000041201365033716300221310ustar00rootroot00000000000000// +build go1.11 /* * * Copyright 2020 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Package main implements a client for Greeter service. package main import ( "context" "flag" "fmt" "log" "time" "google.golang.org/grpc" pb "google.golang.org/grpc/examples/helloworld/helloworld" _ "google.golang.org/grpc/xds/experimental" // To install the xds resolvers and balancers. ) const ( defaultTarget = "localhost:50051" defaultName = "world" ) var help = flag.Bool("help", false, "Print usage information") func init() { flag.Usage = func() { fmt.Fprintf(flag.CommandLine.Output(), ` Usage: client [name [target]] name The name you wish to be greeted by. Defaults to %q target The URI of the server, e.g. "xds-experimental:///helloworld-service". Defaults to %q `, defaultName, defaultTarget) flag.PrintDefaults() } } func main() { flag.Parse() if *help { flag.Usage() return } args := flag.Args() if len(args) > 2 { flag.Usage() return } name := defaultName if len(args) > 0 { name = args[0] } target := defaultTarget if len(args) > 1 { target = args[1] } // Set up a connection to the server. conn, err := grpc.Dial(target, grpc.WithInsecure()) if err != nil { log.Fatalf("did not connect: %v", err) } defer conn.Close() c := pb.NewGreeterClient(conn) ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() r, err := c.SayHello(ctx, &pb.HelloRequest{Name: name}) if err != nil { log.Fatalf("could not greet: %v", err) } log.Printf("Greeting: %s", r.GetMessage()) } grpc-go-1.29.1/examples/features/xds/server/000077500000000000000000000000001365033716300207115ustar00rootroot00000000000000grpc-go-1.29.1/examples/features/xds/server/main.go000066400000000000000000000060321365033716300221650ustar00rootroot00000000000000// +build go1.11 /* * * Copyright 2020 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Package main starts Greeter service that will response with the hostname. package main import ( "context" "flag" "fmt" "log" "math/rand" "net" "os" "strconv" "time" "google.golang.org/grpc" pb "google.golang.org/grpc/examples/helloworld/helloworld" "google.golang.org/grpc/health" healthpb "google.golang.org/grpc/health/grpc_health_v1" "google.golang.org/grpc/reflection" ) var help = flag.Bool("help", false, "Print usage information") const ( defaultPort = 50051 ) // server is used to implement helloworld.GreeterServer. type server struct { pb.UnimplementedGreeterServer serverName string } func newServer(serverName string) *server { return &server{ serverName: serverName, } } // SayHello implements helloworld.GreeterServer func (s *server) SayHello(ctx context.Context, in *pb.HelloRequest) (*pb.HelloReply, error) { log.Printf("Received: %v", in.GetName()) return &pb.HelloReply{Message: "Hello " + in.GetName() + ", from " + s.serverName}, nil } func determineHostname() string { hostname, err := os.Hostname() if err != nil { log.Printf("Failed to get hostname: %v, will generate one", err) rand.Seed(time.Now().UnixNano()) return fmt.Sprintf("generated-%03d", rand.Int()%100) } return hostname } func init() { flag.Usage = func() { fmt.Fprintf(flag.CommandLine.Output(), ` Usage: server [port [hostname]] port The listen port. Defaults to %d hostname The name clients will see in greet responses. Defaults to the machine's hostname `, defaultPort) flag.PrintDefaults() } } func main() { flag.Parse() if *help { flag.Usage() return } args := flag.Args() if len(args) > 2 { flag.Usage() return } port := defaultPort if len(args) > 0 { var err error port, err = strconv.Atoi(args[0]) if err != nil { log.Printf("Invalid port number: %v", err) flag.Usage() return } } var hostname string if len(args) > 1 { hostname = args[1] } if hostname == "" { hostname = determineHostname() } lis, err := net.Listen("tcp", fmt.Sprintf("localhost:%d", port)) if err != nil { log.Fatalf("failed to listen: %v", err) } s := grpc.NewServer() pb.RegisterGreeterServer(s, newServer(hostname)) reflection.Register(s) healthServer := health.NewServer() healthServer.SetServingStatus("", healthpb.HealthCheckResponse_SERVING) healthpb.RegisterHealthServer(s, healthServer) log.Printf("serving on %s, hostname %s", lis.Addr(), hostname) s.Serve(lis) } grpc-go-1.29.1/examples/gotutorial.md000066400000000000000000000515101365033716300175040ustar00rootroot00000000000000# gRPC Basics: Go This tutorial provides a basic Go programmer's introduction to working with gRPC. By walking through this example you'll learn how to: - Define a service in a `.proto` file. - Generate server and client code using the protocol buffer compiler. - Use the Go gRPC API to write a simple client and server for your service. It assumes that you have read the [Getting started](https://github.com/grpc/grpc/tree/master/examples) guide and are familiar with [protocol buffers](https://developers.google.com/protocol-buffers/docs/overview). Note that the example in this tutorial uses the proto3 version of the protocol buffers language, you can find out more in the [proto3 language guide](https://developers.google.com/protocol-buffers/docs/proto3) and see the [release notes](https://github.com/google/protobuf/releases) for the new version in the protocol buffers Github repository. This isn't a comprehensive guide to using gRPC in Go: more reference documentation is coming soon. ## Why use gRPC? Our example is a simple route mapping application that lets clients get information about features on their route, create a summary of their route, and exchange route information such as traffic updates with the server and other clients. With gRPC we can define our service once in a `.proto` file and implement clients and servers in any of gRPC's supported languages, which in turn can be run in environments ranging from servers inside Google to your own tablet - all the complexity of communication between different languages and environments is handled for you by gRPC. We also get all the advantages of working with protocol buffers, including efficient serialization, a simple IDL, and easy interface updating. ## Example code and setup The example code for our tutorial is in [grpc/grpc-go/examples/route_guide](https://github.com/grpc/grpc-go/tree/master/examples/route_guide). To download the example, clone the `grpc-go` repository by running the following command: ```shell $ go get google.golang.org/grpc ``` Then change your current directory to `grpc-go/examples/route_guide`: ```shell $ cd $GOPATH/src/google.golang.org/grpc/examples/route_guide ``` You also should have the relevant tools installed to generate the server and client interface code - if you don't already, follow the setup instructions in [the Go quick start guide](https://github.com/grpc/grpc-go/tree/master/examples/). ## Defining the service Our first step (as you'll know from the [quick start](https://grpc.io/docs/#quick-start)) is to define the gRPC *service* and the method *request* and *response* types using [protocol buffers](https://developers.google.com/protocol-buffers/docs/overview). You can see the complete `.proto` file in [examples/route_guide/routeguide/route_guide.proto](https://github.com/grpc/grpc-go/tree/master/examples/route_guide/routeguide/route_guide.proto). To define a service, you specify a named `service` in your `.proto` file: ```proto service RouteGuide { ... } ``` Then you define `rpc` methods inside your service definition, specifying their request and response types. gRPC lets you define four kinds of service method, all of which are used in the `RouteGuide` service: - A *simple RPC* where the client sends a request to the server using the stub and waits for a response to come back, just like a normal function call. ```proto // Obtains the feature at a given position. rpc GetFeature(Point) returns (Feature) {} ``` - A *server-side streaming RPC* where the client sends a request to the server and gets a stream to read a sequence of messages back. The client reads from the returned stream until there are no more messages. As you can see in our example, you specify a server-side streaming method by placing the `stream` keyword before the *response* type. ```proto // Obtains the Features available within the given Rectangle. Results are // streamed rather than returned at once (e.g. in a response message with a // repeated field), as the rectangle may cover a large area and contain a // huge number of features. rpc ListFeatures(Rectangle) returns (stream Feature) {} ``` - A *client-side streaming RPC* where the client writes a sequence of messages and sends them to the server, again using a provided stream. Once the client has finished writing the messages, it waits for the server to read them all and return its response. You specify a client-side streaming method by placing the `stream` keyword before the *request* type. ```proto // Accepts a stream of Points on a route being traversed, returning a // RouteSummary when traversal is completed. rpc RecordRoute(stream Point) returns (RouteSummary) {} ``` - A *bidirectional streaming RPC* where both sides send a sequence of messages using a read-write stream. The two streams operate independently, so clients and servers can read and write in whatever order they like: for example, the server could wait to receive all the client messages before writing its responses, or it could alternately read a message then write a message, or some other combination of reads and writes. The order of messages in each stream is preserved. You specify this type of method by placing the `stream` keyword before both the request and the response. ```proto // Accepts a stream of RouteNotes sent while a route is being traversed, // while receiving other RouteNotes (e.g. from other users). rpc RouteChat(stream RouteNote) returns (stream RouteNote) {} ``` Our `.proto` file also contains protocol buffer message type definitions for all the request and response types used in our service methods - for example, here's the `Point` message type: ```proto // Points are represented as latitude-longitude pairs in the E7 representation // (degrees multiplied by 10**7 and rounded to the nearest integer). // Latitudes should be in the range +/- 90 degrees and longitude should be in // the range +/- 180 degrees (inclusive). message Point { int32 latitude = 1; int32 longitude = 2; } ``` ## Generating client and server code Next we need to generate the gRPC client and server interfaces from our `.proto` service definition. We do this using the protocol buffer compiler `protoc` with a special gRPC Go plugin. For simplicity, we've provided a [bash script](https://github.com/grpc/grpc-go/blob/master/codegen.sh) that runs `protoc` for you with the appropriate plugin, input, and output (if you want to run this by yourself, make sure you've installed protoc and followed the gRPC-Go [installation instructions](https://github.com/grpc/grpc-go/blob/master/README.md) first): ```shell $ codegen.sh route_guide.proto ``` which actually runs: ```shell $ protoc --go_out=plugins=grpc:. route_guide.proto ``` Running this command generates the following file in your current directory: - `route_guide.pb.go` This contains: - All the protocol buffer code to populate, serialize, and retrieve our request and response message types - An interface type (or *stub*) for clients to call with the methods defined in the `RouteGuide` service. - An interface type for servers to implement, also with the methods defined in the `RouteGuide` service. ## Creating the server First let's look at how we create a `RouteGuide` server. If you're only interested in creating gRPC clients, you can skip this section and go straight to [Creating the client](#client) (though you might find it interesting anyway!). There are two parts to making our `RouteGuide` service do its job: - Implementing the service interface generated from our service definition: doing the actual "work" of our service. - Running a gRPC server to listen for requests from clients and dispatch them to the right service implementation. You can find our example `RouteGuide` server in [grpc-go/examples/route_guide/server/server.go](https://github.com/grpc/grpc-go/tree/master/examples/route_guide/server/server.go). Let's take a closer look at how it works. ### Implementing RouteGuide As you can see, our server has a `routeGuideServer` struct type that implements the generated `RouteGuideServer` interface: ```go type routeGuideServer struct { ... } ... func (s *routeGuideServer) GetFeature(ctx context.Context, point *pb.Point) (*pb.Feature, error) { ... } ... func (s *routeGuideServer) ListFeatures(rect *pb.Rectangle, stream pb.RouteGuide_ListFeaturesServer) error { ... } ... func (s *routeGuideServer) RecordRoute(stream pb.RouteGuide_RecordRouteServer) error { ... } ... func (s *routeGuideServer) RouteChat(stream pb.RouteGuide_RouteChatServer) error { ... } ... ``` #### Simple RPC `routeGuideServer` implements all our service methods. Let's look at the simplest type first, `GetFeature`, which just gets a `Point` from the client and returns the corresponding feature information from its database in a `Feature`. ```go func (s *routeGuideServer) GetFeature(ctx context.Context, point *pb.Point) (*pb.Feature, error) { for _, feature := range s.savedFeatures { if proto.Equal(feature.Location, point) { return feature, nil } } // No feature was found, return an unnamed feature return &pb.Feature{"", point}, nil } ``` The method is passed a context object for the RPC and the client's `Point` protocol buffer request. It returns a `Feature` protocol buffer object with the response information and an `error`. In the method we populate the `Feature` with the appropriate information, and then `return` it along with an `nil` error to tell gRPC that we've finished dealing with the RPC and that the `Feature` can be returned to the client. #### Server-side streaming RPC Now let's look at one of our streaming RPCs. `ListFeatures` is a server-side streaming RPC, so we need to send back multiple `Feature`s to our client. ```go func (s *routeGuideServer) ListFeatures(rect *pb.Rectangle, stream pb.RouteGuide_ListFeaturesServer) error { for _, feature := range s.savedFeatures { if inRange(feature.Location, rect) { if err := stream.Send(feature); err != nil { return err } } } return nil } ``` As you can see, instead of getting simple request and response objects in our method parameters, this time we get a request object (the `Rectangle` in which our client wants to find `Feature`s) and a special `RouteGuide_ListFeaturesServer` object to write our responses. In the method, we populate as many `Feature` objects as we need to return, writing them to the `RouteGuide_ListFeaturesServer` using its `Send()` method. Finally, as in our simple RPC, we return a `nil` error to tell gRPC that we've finished writing responses. Should any error happen in this call, we return a non-`nil` error; the gRPC layer will translate it into an appropriate RPC status to be sent on the wire. #### Client-side streaming RPC Now let's look at something a little more complicated: the client-side streaming method `RecordRoute`, where we get a stream of `Point`s from the client and return a single `RouteSummary` with information about their trip. As you can see, this time the method doesn't have a request parameter at all. Instead, it gets a `RouteGuide_RecordRouteServer` stream, which the server can use to both read *and* write messages - it can receive client messages using its `Recv()` method and return its single response using its `SendAndClose()` method. ```go func (s *routeGuideServer) RecordRoute(stream pb.RouteGuide_RecordRouteServer) error { var pointCount, featureCount, distance int32 var lastPoint *pb.Point startTime := time.Now() for { point, err := stream.Recv() if err == io.EOF { endTime := time.Now() return stream.SendAndClose(&pb.RouteSummary{ PointCount: pointCount, FeatureCount: featureCount, Distance: distance, ElapsedTime: int32(endTime.Sub(startTime).Seconds()), }) } if err != nil { return err } pointCount++ for _, feature := range s.savedFeatures { if proto.Equal(feature.Location, point) { featureCount++ } } if lastPoint != nil { distance += calcDistance(lastPoint, point) } lastPoint = point } } ``` In the method body we use the `RouteGuide_RecordRouteServer`s `Recv()` method to repeatedly read in our client's requests to a request object (in this case a `Point`) until there are no more messages: the server needs to check the error returned from `Recv()` after each call. If this is `nil`, the stream is still good and it can continue reading; if it's `io.EOF` the message stream has ended and the server can return its `RouteSummary`. If it has any other value, we return the error "as is" so that it'll be translated to an RPC status by the gRPC layer. #### Bidirectional streaming RPC Finally, let's look at our bidirectional streaming RPC `RouteChat()`. ```go func (s *routeGuideServer) RouteChat(stream pb.RouteGuide_RouteChatServer) error { for { in, err := stream.Recv() if err == io.EOF { return nil } if err != nil { return err } key := serialize(in.Location) ... // look for notes to be sent to client for _, note := range s.routeNotes[key] { if err := stream.Send(note); err != nil { return err } } } } ``` This time we get a `RouteGuide_RouteChatServer` stream that, as in our client-side streaming example, can be used to read and write messages. However, this time we return values via our method's stream while the client is still writing messages to *their* message stream. The syntax for reading and writing here is very similar to our client-streaming method, except the server uses the stream's `Send()` method rather than `SendAndClose()` because it's writing multiple responses. Although each side will always get the other's messages in the order they were written, both the client and server can read and write in any order — the streams operate completely independently. ### Starting the server Once we've implemented all our methods, we also need to start up a gRPC server so that clients can actually use our service. The following snippet shows how we do this for our `RouteGuide` service: ```go flag.Parse() lis, err := net.Listen("tcp", fmt.Sprintf("localhost:%d", *port)) if err != nil { log.Fatalf("failed to listen: %v", err) } grpcServer := grpc.NewServer() pb.RegisterRouteGuideServer(grpcServer, &routeGuideServer{}) ... // determine whether to use TLS grpcServer.Serve(lis) ``` To build and start a server, we: 1. Specify the port we want to use to listen for client requests using `lis, err := net.Listen("tcp", fmt.Sprintf("localhost:%d", *port))`. 2. Create an instance of the gRPC server using `grpc.NewServer()`. 3. Register our service implementation with the gRPC server. 4. Call `Serve()` on the server with our port details to do a blocking wait until the process is killed or `Stop()` is called. ## Creating the client In this section, we'll look at creating a Go client for our `RouteGuide` service. You can see our complete example client code in [grpc-go/examples/route_guide/client/client.go](https://github.com/grpc/grpc-go/tree/master/examples/route_guide/client/client.go). ### Creating a stub To call service methods, we first need to create a gRPC *channel* to communicate with the server. We create this by passing the server address and port number to `grpc.Dial()` as follows: ```go conn, err := grpc.Dial(*serverAddr) if err != nil { ... } defer conn.Close() ``` You can use `DialOptions` to set the auth credentials (e.g., TLS, GCE credentials, JWT credentials) in `grpc.Dial` if the service you request requires that - however, we don't need to do this for our `RouteGuide` service. Once the gRPC *channel* is setup, we need a client *stub* to perform RPCs. We get this using the `NewRouteGuideClient` method provided in the `pb` package we generated from our `.proto` file. ```go client := pb.NewRouteGuideClient(conn) ``` ### Calling service methods Now let's look at how we call our service methods. Note that in gRPC-Go, RPCs operate in a blocking/synchronous mode, which means that the RPC call waits for the server to respond, and will either return a response or an error. #### Simple RPC Calling the simple RPC `GetFeature` is nearly as straightforward as calling a local method. ```go feature, err := client.GetFeature(ctx, &pb.Point{409146138, -746188906}) if err != nil { ... } ``` As you can see, we call the method on the stub we got earlier. In our method parameters we create and populate a request protocol buffer object (in our case `Point`). We also pass a `context.Context` object which lets us change our RPC's behaviour if necessary, such as time-out/cancel an RPC in flight. If the call doesn't return an error, then we can read the response information from the server from the first return value. ```go log.Println(feature) ``` #### Server-side streaming RPC Here's where we call the server-side streaming method `ListFeatures`, which returns a stream of geographical `Feature`s. If you've already read [Creating the server](#server) some of this may look very familiar - streaming RPCs are implemented in a similar way on both sides. ```go rect := &pb.Rectangle{ ... } // initialize a pb.Rectangle stream, err := client.ListFeatures(ctx, rect) if err != nil { ... } for { feature, err := stream.Recv() if err == io.EOF { break } if err != nil { log.Fatalf("%v.ListFeatures(_) = _, %v", client, err) } log.Println(feature) } ``` As in the simple RPC, we pass the method a context and a request. However, instead of getting a response object back, we get back an instance of `RouteGuide_ListFeaturesClient`. The client can use the `RouteGuide_ListFeaturesClient` stream to read the server's responses. We use the `RouteGuide_ListFeaturesClient`'s `Recv()` method to repeatedly read in the server's responses to a response protocol buffer object (in this case a `Feature`) until there are no more messages: the client needs to check the error `err` returned from `Recv()` after each call. If `nil`, the stream is still good and it can continue reading; if it's `io.EOF` then the message stream has ended; otherwise there must be an RPC error, which is passed over through `err`. #### Client-side streaming RPC The client-side streaming method `RecordRoute` is similar to the server-side method, except that we only pass the method a context and get a `RouteGuide_RecordRouteClient` stream back, which we can use to both write *and* read messages. ```go // Create a random number of random points r := rand.New(rand.NewSource(time.Now().UnixNano())) pointCount := int(r.Int31n(100)) + 2 // Traverse at least two points var points []*pb.Point for i := 0; i < pointCount; i++ { points = append(points, randomPoint(r)) } log.Printf("Traversing %d points.", len(points)) stream, err := client.RecordRoute(ctx) if err != nil { log.Fatalf("%v.RecordRoute(_) = _, %v", client, err) } for _, point := range points { if err := stream.Send(point); err != nil { log.Fatalf("%v.Send(%v) = %v", stream, point, err) } } reply, err := stream.CloseAndRecv() if err != nil { log.Fatalf("%v.CloseAndRecv() got error %v, want %v", stream, err, nil) } log.Printf("Route summary: %v", reply) ``` The `RouteGuide_RecordRouteClient` has a `Send()` method that we can use to send requests to the server. Once we've finished writing our client's requests to the stream using `Send()`, we need to call `CloseAndRecv()` on the stream to let gRPC know that we've finished writing and are expecting to receive a response. We get our RPC status from the `err` returned from `CloseAndRecv()`. If the status is `nil`, then the first return value from `CloseAndRecv()` will be a valid server response. #### Bidirectional streaming RPC Finally, let's look at our bidirectional streaming RPC `RouteChat()`. As in the case of `RecordRoute`, we only pass the method a context object and get back a stream that we can use to both write and read messages. However, this time we return values via our method's stream while the server is still writing messages to *their* message stream. ```go stream, err := client.RouteChat(ctx) waitc := make(chan struct{}) go func() { for { in, err := stream.Recv() if err == io.EOF { // read done. close(waitc) return } if err != nil { log.Fatalf("Failed to receive a note : %v", err) } log.Printf("Got message %s at point(%d, %d)", in.Message, in.Location.Latitude, in.Location.Longitude) } }() for _, note := range notes { if err := stream.Send(note); err != nil { log.Fatalf("Failed to send a note: %v", err) } } stream.CloseSend() <-waitc ``` The syntax for reading and writing here is very similar to our client-side streaming method, except we use the stream's `CloseSend()` method once we've finished our call. Although each side will always get the other's messages in the order they were written, both the client and server can read and write in any order — the streams operate completely independently. ## Try it out! To compile and run the server, assuming you are in the folder `$GOPATH/src/google.golang.org/grpc/examples/route_guide`, simply: ```sh $ go run server/server.go ``` Likewise, to run the client: ```sh $ go run client/client.go ``` grpc-go-1.29.1/examples/helloworld/000077500000000000000000000000001365033716300171425ustar00rootroot00000000000000grpc-go-1.29.1/examples/helloworld/greeter_client/000077500000000000000000000000001365033716300221355ustar00rootroot00000000000000grpc-go-1.29.1/examples/helloworld/greeter_client/main.go000066400000000000000000000027111365033716300234110ustar00rootroot00000000000000/* * * Copyright 2015 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Package main implements a client for Greeter service. package main import ( "context" "log" "os" "time" "google.golang.org/grpc" pb "google.golang.org/grpc/examples/helloworld/helloworld" ) const ( address = "localhost:50051" defaultName = "world" ) func main() { // Set up a connection to the server. conn, err := grpc.Dial(address, grpc.WithInsecure(), grpc.WithBlock()) if err != nil { log.Fatalf("did not connect: %v", err) } defer conn.Close() c := pb.NewGreeterClient(conn) // Contact the server and print out its response. name := defaultName if len(os.Args) > 1 { name = os.Args[1] } ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() r, err := c.SayHello(ctx, &pb.HelloRequest{Name: name}) if err != nil { log.Fatalf("could not greet: %v", err) } log.Printf("Greeting: %s", r.GetMessage()) } grpc-go-1.29.1/examples/helloworld/greeter_server/000077500000000000000000000000001365033716300221655ustar00rootroot00000000000000grpc-go-1.29.1/examples/helloworld/greeter_server/main.go000066400000000000000000000030221365033716300234350ustar00rootroot00000000000000/* * * Copyright 2015 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ //go:generate protoc -I ../helloworld --go_out=plugins=grpc:../helloworld ../helloworld/helloworld.proto // Package main implements a server for Greeter service. package main import ( "context" "log" "net" "google.golang.org/grpc" pb "google.golang.org/grpc/examples/helloworld/helloworld" ) const ( port = ":50051" ) // server is used to implement helloworld.GreeterServer. type server struct { pb.UnimplementedGreeterServer } // SayHello implements helloworld.GreeterServer func (s *server) SayHello(ctx context.Context, in *pb.HelloRequest) (*pb.HelloReply, error) { log.Printf("Received: %v", in.GetName()) return &pb.HelloReply{Message: "Hello " + in.GetName()}, nil } func main() { lis, err := net.Listen("tcp", port) if err != nil { log.Fatalf("failed to listen: %v", err) } s := grpc.NewServer() pb.RegisterGreeterServer(s, &server{}) if err := s.Serve(lis); err != nil { log.Fatalf("failed to serve: %v", err) } } grpc-go-1.29.1/examples/helloworld/helloworld/000077500000000000000000000000001365033716300213155ustar00rootroot00000000000000grpc-go-1.29.1/examples/helloworld/helloworld/helloworld.pb.go000066400000000000000000000162141365033716300244230ustar00rootroot00000000000000// Code generated by protoc-gen-go. DO NOT EDIT. // source: helloworld.proto package helloworld import ( context "context" fmt "fmt" proto "github.com/golang/protobuf/proto" grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" status "google.golang.org/grpc/status" math "math" ) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package // The request message containing the user's name. type HelloRequest struct { Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *HelloRequest) Reset() { *m = HelloRequest{} } func (m *HelloRequest) String() string { return proto.CompactTextString(m) } func (*HelloRequest) ProtoMessage() {} func (*HelloRequest) Descriptor() ([]byte, []int) { return fileDescriptor_17b8c58d586b62f2, []int{0} } func (m *HelloRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_HelloRequest.Unmarshal(m, b) } func (m *HelloRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_HelloRequest.Marshal(b, m, deterministic) } func (m *HelloRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_HelloRequest.Merge(m, src) } func (m *HelloRequest) XXX_Size() int { return xxx_messageInfo_HelloRequest.Size(m) } func (m *HelloRequest) XXX_DiscardUnknown() { xxx_messageInfo_HelloRequest.DiscardUnknown(m) } var xxx_messageInfo_HelloRequest proto.InternalMessageInfo func (m *HelloRequest) GetName() string { if m != nil { return m.Name } return "" } // The response message containing the greetings type HelloReply struct { Message string `protobuf:"bytes,1,opt,name=message,proto3" json:"message,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *HelloReply) Reset() { *m = HelloReply{} } func (m *HelloReply) String() string { return proto.CompactTextString(m) } func (*HelloReply) ProtoMessage() {} func (*HelloReply) Descriptor() ([]byte, []int) { return fileDescriptor_17b8c58d586b62f2, []int{1} } func (m *HelloReply) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_HelloReply.Unmarshal(m, b) } func (m *HelloReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_HelloReply.Marshal(b, m, deterministic) } func (m *HelloReply) XXX_Merge(src proto.Message) { xxx_messageInfo_HelloReply.Merge(m, src) } func (m *HelloReply) XXX_Size() int { return xxx_messageInfo_HelloReply.Size(m) } func (m *HelloReply) XXX_DiscardUnknown() { xxx_messageInfo_HelloReply.DiscardUnknown(m) } var xxx_messageInfo_HelloReply proto.InternalMessageInfo func (m *HelloReply) GetMessage() string { if m != nil { return m.Message } return "" } func init() { proto.RegisterType((*HelloRequest)(nil), "helloworld.HelloRequest") proto.RegisterType((*HelloReply)(nil), "helloworld.HelloReply") } func init() { proto.RegisterFile("helloworld.proto", fileDescriptor_17b8c58d586b62f2) } var fileDescriptor_17b8c58d586b62f2 = []byte{ // 175 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0xc8, 0x48, 0xcd, 0xc9, 0xc9, 0x2f, 0xcf, 0x2f, 0xca, 0x49, 0xd1, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x42, 0x88, 0x28, 0x29, 0x71, 0xf1, 0x78, 0x80, 0x78, 0x41, 0xa9, 0x85, 0xa5, 0xa9, 0xc5, 0x25, 0x42, 0x42, 0x5c, 0x2c, 0x79, 0x89, 0xb9, 0xa9, 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0x9c, 0x41, 0x60, 0xb6, 0x92, 0x1a, 0x17, 0x17, 0x54, 0x4d, 0x41, 0x4e, 0xa5, 0x90, 0x04, 0x17, 0x7b, 0x6e, 0x6a, 0x71, 0x71, 0x62, 0x3a, 0x4c, 0x11, 0x8c, 0x6b, 0xe4, 0xc9, 0xc5, 0xee, 0x5e, 0x94, 0x9a, 0x5a, 0x92, 0x5a, 0x24, 0x64, 0xc7, 0xc5, 0x11, 0x9c, 0x58, 0x09, 0xd6, 0x25, 0x24, 0xa1, 0x87, 0xe4, 0x02, 0x64, 0xcb, 0xa4, 0xc4, 0xb0, 0xc8, 0x14, 0xe4, 0x54, 0x2a, 0x31, 0x38, 0x19, 0x70, 0x49, 0x67, 0xe6, 0xeb, 0xa5, 0x17, 0x15, 0x24, 0xeb, 0xa5, 0x56, 0x24, 0xe6, 0x16, 0xe4, 0xa4, 0x16, 0x23, 0xa9, 0x75, 0xe2, 0x07, 0x2b, 0x0e, 0x07, 0xb1, 0x03, 0x40, 0x5e, 0x0a, 0x60, 0x4c, 0x62, 0x03, 0xfb, 0xcd, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0x0f, 0xb7, 0xcd, 0xf2, 0xef, 0x00, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. var _ context.Context var _ grpc.ClientConnInterface // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. const _ = grpc.SupportPackageIsVersion6 // GreeterClient is the client API for Greeter service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type GreeterClient interface { // Sends a greeting SayHello(ctx context.Context, in *HelloRequest, opts ...grpc.CallOption) (*HelloReply, error) } type greeterClient struct { cc grpc.ClientConnInterface } func NewGreeterClient(cc grpc.ClientConnInterface) GreeterClient { return &greeterClient{cc} } func (c *greeterClient) SayHello(ctx context.Context, in *HelloRequest, opts ...grpc.CallOption) (*HelloReply, error) { out := new(HelloReply) err := c.cc.Invoke(ctx, "/helloworld.Greeter/SayHello", in, out, opts...) if err != nil { return nil, err } return out, nil } // GreeterServer is the server API for Greeter service. type GreeterServer interface { // Sends a greeting SayHello(context.Context, *HelloRequest) (*HelloReply, error) } // UnimplementedGreeterServer can be embedded to have forward compatible implementations. type UnimplementedGreeterServer struct { } func (*UnimplementedGreeterServer) SayHello(ctx context.Context, req *HelloRequest) (*HelloReply, error) { return nil, status.Errorf(codes.Unimplemented, "method SayHello not implemented") } func RegisterGreeterServer(s *grpc.Server, srv GreeterServer) { s.RegisterService(&_Greeter_serviceDesc, srv) } func _Greeter_SayHello_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(HelloRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(GreeterServer).SayHello(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/helloworld.Greeter/SayHello", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(GreeterServer).SayHello(ctx, req.(*HelloRequest)) } return interceptor(ctx, in, info, handler) } var _Greeter_serviceDesc = grpc.ServiceDesc{ ServiceName: "helloworld.Greeter", HandlerType: (*GreeterServer)(nil), Methods: []grpc.MethodDesc{ { MethodName: "SayHello", Handler: _Greeter_SayHello_Handler, }, }, Streams: []grpc.StreamDesc{}, Metadata: "helloworld.proto", } grpc-go-1.29.1/examples/helloworld/helloworld/helloworld.proto000066400000000000000000000021051365033716300245530ustar00rootroot00000000000000// Copyright 2015 gRPC authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. syntax = "proto3"; option java_multiple_files = true; option java_package = "io.grpc.examples.helloworld"; option java_outer_classname = "HelloWorldProto"; package helloworld; // The greeting service definition. service Greeter { // Sends a greeting rpc SayHello (HelloRequest) returns (HelloReply) {} } // The request message containing the user's name. message HelloRequest { string name = 1; } // The response message containing the greetings message HelloReply { string message = 1; } grpc-go-1.29.1/examples/helloworld/mock_helloworld/000077500000000000000000000000001365033716300223265ustar00rootroot00000000000000grpc-go-1.29.1/examples/helloworld/mock_helloworld/hw_mock.go000066400000000000000000000027061365033716300243110ustar00rootroot00000000000000// Automatically generated by MockGen. DO NOT EDIT! // Source: google.golang.org/grpc/examples/helloworld/helloworld (interfaces: GreeterClient) package mock_helloworld import ( context "context" gomock "github.com/golang/mock/gomock" grpc "google.golang.org/grpc" helloworld "google.golang.org/grpc/examples/helloworld/helloworld" ) // Mock of GreeterClient interface type MockGreeterClient struct { ctrl *gomock.Controller recorder *_MockGreeterClientRecorder } // Recorder for MockGreeterClient (not exported) type _MockGreeterClientRecorder struct { mock *MockGreeterClient } func NewMockGreeterClient(ctrl *gomock.Controller) *MockGreeterClient { mock := &MockGreeterClient{ctrl: ctrl} mock.recorder = &_MockGreeterClientRecorder{mock} return mock } func (_m *MockGreeterClient) EXPECT() *_MockGreeterClientRecorder { return _m.recorder } func (_m *MockGreeterClient) SayHello(_param0 context.Context, _param1 *helloworld.HelloRequest, _param2 ...grpc.CallOption) (*helloworld.HelloReply, error) { _s := []interface{}{_param0, _param1} for _, _x := range _param2 { _s = append(_s, _x) } ret := _m.ctrl.Call(_m, "SayHello", _s...) ret0, _ := ret[0].(*helloworld.HelloReply) ret1, _ := ret[1].(error) return ret0, ret1 } func (_mr *_MockGreeterClientRecorder) SayHello(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { _s := append([]interface{}{arg0, arg1}, arg2...) return _mr.mock.ctrl.RecordCall(_mr.mock, "SayHello", _s...) } grpc-go-1.29.1/examples/helloworld/mock_helloworld/hw_mock_test.go000066400000000000000000000040211365033716300253400ustar00rootroot00000000000000/* * * Copyright 2017 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package mock_helloworld_test import ( "context" "fmt" "testing" "time" "github.com/golang/mock/gomock" "github.com/golang/protobuf/proto" helloworld "google.golang.org/grpc/examples/helloworld/helloworld" hwmock "google.golang.org/grpc/examples/helloworld/mock_helloworld" "google.golang.org/grpc/internal/grpctest" ) type s struct { grpctest.Tester } func Test(t *testing.T) { grpctest.RunSubTests(t, s{}) } // rpcMsg implements the gomock.Matcher interface type rpcMsg struct { msg proto.Message } func (r *rpcMsg) Matches(msg interface{}) bool { m, ok := msg.(proto.Message) if !ok { return false } return proto.Equal(m, r.msg) } func (r *rpcMsg) String() string { return fmt.Sprintf("is %s", r.msg) } func (s) TestSayHello(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() mockGreeterClient := hwmock.NewMockGreeterClient(ctrl) req := &helloworld.HelloRequest{Name: "unit_test"} mockGreeterClient.EXPECT().SayHello( gomock.Any(), &rpcMsg{msg: req}, ).Return(&helloworld.HelloReply{Message: "Mocked Interface"}, nil) testSayHello(t, mockGreeterClient) } func testSayHello(t *testing.T, client helloworld.GreeterClient) { ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() r, err := client.SayHello(ctx, &helloworld.HelloRequest{Name: "unit_test"}) if err != nil || r.Message != "Mocked Interface" { t.Errorf("mocking failed") } t.Log("Reply : ", r.Message) } grpc-go-1.29.1/examples/route_guide/000077500000000000000000000000001365033716300173025ustar00rootroot00000000000000grpc-go-1.29.1/examples/route_guide/README.md000066400000000000000000000015471365033716300205700ustar00rootroot00000000000000# Description The route guide server and client demonstrate how to use grpc go libraries to perform unary, client streaming, server streaming and full duplex RPCs. Please refer to [gRPC Basics: Go](https://grpc.io/docs/tutorials/basic/go.html) for more information. See the definition of the route guide service in routeguide/route_guide.proto. # Run the sample code To compile and run the server, assuming you are in the root of the route_guide folder, i.e., .../examples/route_guide/, simply: ```sh $ go run server/server.go ``` Likewise, to run the client: ```sh $ go run client/client.go ``` # Optional command line flags The server and client both take optional command line flags. For example, the client and server run without TLS by default. To enable TLS: ```sh $ go run server/server.go -tls=true ``` and ```sh $ go run client/client.go -tls=true ``` grpc-go-1.29.1/examples/route_guide/client/000077500000000000000000000000001365033716300205605ustar00rootroot00000000000000grpc-go-1.29.1/examples/route_guide/client/client.go000066400000000000000000000141311365033716300223650ustar00rootroot00000000000000/* * * Copyright 2015 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Package main implements a simple gRPC client that demonstrates how to use gRPC-Go libraries // to perform unary, client streaming, server streaming and full duplex RPCs. // // It interacts with the route guide service whose definition can be found in routeguide/route_guide.proto. package main import ( "context" "flag" "io" "log" "math/rand" "time" "google.golang.org/grpc" "google.golang.org/grpc/credentials" pb "google.golang.org/grpc/examples/route_guide/routeguide" "google.golang.org/grpc/testdata" ) var ( tls = flag.Bool("tls", false, "Connection uses TLS if true, else plain TCP") caFile = flag.String("ca_file", "", "The file containing the CA root cert file") serverAddr = flag.String("server_addr", "localhost:10000", "The server address in the format of host:port") serverHostOverride = flag.String("server_host_override", "x.test.youtube.com", "The server name use to verify the hostname returned by TLS handshake") ) // printFeature gets the feature for the given point. func printFeature(client pb.RouteGuideClient, point *pb.Point) { log.Printf("Getting feature for point (%d, %d)", point.Latitude, point.Longitude) ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() feature, err := client.GetFeature(ctx, point) if err != nil { log.Fatalf("%v.GetFeatures(_) = _, %v: ", client, err) } log.Println(feature) } // printFeatures lists all the features within the given bounding Rectangle. func printFeatures(client pb.RouteGuideClient, rect *pb.Rectangle) { log.Printf("Looking for features within %v", rect) ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() stream, err := client.ListFeatures(ctx, rect) if err != nil { log.Fatalf("%v.ListFeatures(_) = _, %v", client, err) } for { feature, err := stream.Recv() if err == io.EOF { break } if err != nil { log.Fatalf("%v.ListFeatures(_) = _, %v", client, err) } log.Println(feature) } } // runRecordRoute sends a sequence of points to server and expects to get a RouteSummary from server. func runRecordRoute(client pb.RouteGuideClient) { // Create a random number of random points r := rand.New(rand.NewSource(time.Now().UnixNano())) pointCount := int(r.Int31n(100)) + 2 // Traverse at least two points var points []*pb.Point for i := 0; i < pointCount; i++ { points = append(points, randomPoint(r)) } log.Printf("Traversing %d points.", len(points)) ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() stream, err := client.RecordRoute(ctx) if err != nil { log.Fatalf("%v.RecordRoute(_) = _, %v", client, err) } for _, point := range points { if err := stream.Send(point); err != nil { log.Fatalf("%v.Send(%v) = %v", stream, point, err) } } reply, err := stream.CloseAndRecv() if err != nil { log.Fatalf("%v.CloseAndRecv() got error %v, want %v", stream, err, nil) } log.Printf("Route summary: %v", reply) } // runRouteChat receives a sequence of route notes, while sending notes for various locations. func runRouteChat(client pb.RouteGuideClient) { notes := []*pb.RouteNote{ {Location: &pb.Point{Latitude: 0, Longitude: 1}, Message: "First message"}, {Location: &pb.Point{Latitude: 0, Longitude: 2}, Message: "Second message"}, {Location: &pb.Point{Latitude: 0, Longitude: 3}, Message: "Third message"}, {Location: &pb.Point{Latitude: 0, Longitude: 1}, Message: "Fourth message"}, {Location: &pb.Point{Latitude: 0, Longitude: 2}, Message: "Fifth message"}, {Location: &pb.Point{Latitude: 0, Longitude: 3}, Message: "Sixth message"}, } ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() stream, err := client.RouteChat(ctx) if err != nil { log.Fatalf("%v.RouteChat(_) = _, %v", client, err) } waitc := make(chan struct{}) go func() { for { in, err := stream.Recv() if err == io.EOF { // read done. close(waitc) return } if err != nil { log.Fatalf("Failed to receive a note : %v", err) } log.Printf("Got message %s at point(%d, %d)", in.Message, in.Location.Latitude, in.Location.Longitude) } }() for _, note := range notes { if err := stream.Send(note); err != nil { log.Fatalf("Failed to send a note: %v", err) } } stream.CloseSend() <-waitc } func randomPoint(r *rand.Rand) *pb.Point { lat := (r.Int31n(180) - 90) * 1e7 long := (r.Int31n(360) - 180) * 1e7 return &pb.Point{Latitude: lat, Longitude: long} } func main() { flag.Parse() var opts []grpc.DialOption if *tls { if *caFile == "" { *caFile = testdata.Path("ca.pem") } creds, err := credentials.NewClientTLSFromFile(*caFile, *serverHostOverride) if err != nil { log.Fatalf("Failed to create TLS credentials %v", err) } opts = append(opts, grpc.WithTransportCredentials(creds)) } else { opts = append(opts, grpc.WithInsecure()) } opts = append(opts, grpc.WithBlock()) conn, err := grpc.Dial(*serverAddr, opts...) if err != nil { log.Fatalf("fail to dial: %v", err) } defer conn.Close() client := pb.NewRouteGuideClient(conn) // Looking for a valid feature printFeature(client, &pb.Point{Latitude: 409146138, Longitude: -746188906}) // Feature missing. printFeature(client, &pb.Point{Latitude: 0, Longitude: 0}) // Looking for features between 40, -75 and 42, -73. printFeatures(client, &pb.Rectangle{ Lo: &pb.Point{Latitude: 400000000, Longitude: -750000000}, Hi: &pb.Point{Latitude: 420000000, Longitude: -730000000}, }) // RecordRoute runRecordRoute(client) // RouteChat runRouteChat(client) } grpc-go-1.29.1/examples/route_guide/mock_routeguide/000077500000000000000000000000001365033716300224675ustar00rootroot00000000000000grpc-go-1.29.1/examples/route_guide/mock_routeguide/rg_mock.go000066400000000000000000000147501365033716300244460ustar00rootroot00000000000000// Automatically generated by MockGen. DO NOT EDIT! // Source: google.golang.org/grpc/examples/route_guide/routeguide (interfaces: RouteGuideClient,RouteGuide_RouteChatClient) package mock_routeguide import ( context "context" gomock "github.com/golang/mock/gomock" grpc "google.golang.org/grpc" routeguide "google.golang.org/grpc/examples/route_guide/routeguide" metadata "google.golang.org/grpc/metadata" ) // Mock of RouteGuideClient interface type MockRouteGuideClient struct { ctrl *gomock.Controller recorder *_MockRouteGuideClientRecorder } // Recorder for MockRouteGuideClient (not exported) type _MockRouteGuideClientRecorder struct { mock *MockRouteGuideClient } func NewMockRouteGuideClient(ctrl *gomock.Controller) *MockRouteGuideClient { mock := &MockRouteGuideClient{ctrl: ctrl} mock.recorder = &_MockRouteGuideClientRecorder{mock} return mock } func (_m *MockRouteGuideClient) EXPECT() *_MockRouteGuideClientRecorder { return _m.recorder } func (_m *MockRouteGuideClient) GetFeature(_param0 context.Context, _param1 *routeguide.Point, _param2 ...grpc.CallOption) (*routeguide.Feature, error) { _s := []interface{}{_param0, _param1} for _, _x := range _param2 { _s = append(_s, _x) } ret := _m.ctrl.Call(_m, "GetFeature", _s...) ret0, _ := ret[0].(*routeguide.Feature) ret1, _ := ret[1].(error) return ret0, ret1 } func (_mr *_MockRouteGuideClientRecorder) GetFeature(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { _s := append([]interface{}{arg0, arg1}, arg2...) return _mr.mock.ctrl.RecordCall(_mr.mock, "GetFeature", _s...) } func (_m *MockRouteGuideClient) ListFeatures(_param0 context.Context, _param1 *routeguide.Rectangle, _param2 ...grpc.CallOption) (routeguide.RouteGuide_ListFeaturesClient, error) { _s := []interface{}{_param0, _param1} for _, _x := range _param2 { _s = append(_s, _x) } ret := _m.ctrl.Call(_m, "ListFeatures", _s...) ret0, _ := ret[0].(routeguide.RouteGuide_ListFeaturesClient) ret1, _ := ret[1].(error) return ret0, ret1 } func (_mr *_MockRouteGuideClientRecorder) ListFeatures(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { _s := append([]interface{}{arg0, arg1}, arg2...) return _mr.mock.ctrl.RecordCall(_mr.mock, "ListFeatures", _s...) } func (_m *MockRouteGuideClient) RecordRoute(_param0 context.Context, _param1 ...grpc.CallOption) (routeguide.RouteGuide_RecordRouteClient, error) { _s := []interface{}{_param0} for _, _x := range _param1 { _s = append(_s, _x) } ret := _m.ctrl.Call(_m, "RecordRoute", _s...) ret0, _ := ret[0].(routeguide.RouteGuide_RecordRouteClient) ret1, _ := ret[1].(error) return ret0, ret1 } func (_mr *_MockRouteGuideClientRecorder) RecordRoute(arg0 interface{}, arg1 ...interface{}) *gomock.Call { _s := append([]interface{}{arg0}, arg1...) return _mr.mock.ctrl.RecordCall(_mr.mock, "RecordRoute", _s...) } func (_m *MockRouteGuideClient) RouteChat(_param0 context.Context, _param1 ...grpc.CallOption) (routeguide.RouteGuide_RouteChatClient, error) { _s := []interface{}{_param0} for _, _x := range _param1 { _s = append(_s, _x) } ret := _m.ctrl.Call(_m, "RouteChat", _s...) ret0, _ := ret[0].(routeguide.RouteGuide_RouteChatClient) ret1, _ := ret[1].(error) return ret0, ret1 } func (_mr *_MockRouteGuideClientRecorder) RouteChat(arg0 interface{}, arg1 ...interface{}) *gomock.Call { _s := append([]interface{}{arg0}, arg1...) return _mr.mock.ctrl.RecordCall(_mr.mock, "RouteChat", _s...) } // Mock of RouteGuide_RouteChatClient interface type MockRouteGuide_RouteChatClient struct { ctrl *gomock.Controller recorder *_MockRouteGuide_RouteChatClientRecorder } // Recorder for MockRouteGuide_RouteChatClient (not exported) type _MockRouteGuide_RouteChatClientRecorder struct { mock *MockRouteGuide_RouteChatClient } func NewMockRouteGuide_RouteChatClient(ctrl *gomock.Controller) *MockRouteGuide_RouteChatClient { mock := &MockRouteGuide_RouteChatClient{ctrl: ctrl} mock.recorder = &_MockRouteGuide_RouteChatClientRecorder{mock} return mock } func (_m *MockRouteGuide_RouteChatClient) EXPECT() *_MockRouteGuide_RouteChatClientRecorder { return _m.recorder } func (_m *MockRouteGuide_RouteChatClient) CloseSend() error { ret := _m.ctrl.Call(_m, "CloseSend") ret0, _ := ret[0].(error) return ret0 } func (_mr *_MockRouteGuide_RouteChatClientRecorder) CloseSend() *gomock.Call { return _mr.mock.ctrl.RecordCall(_mr.mock, "CloseSend") } func (_m *MockRouteGuide_RouteChatClient) Context() context.Context { ret := _m.ctrl.Call(_m, "Context") ret0, _ := ret[0].(context.Context) return ret0 } func (_mr *_MockRouteGuide_RouteChatClientRecorder) Context() *gomock.Call { return _mr.mock.ctrl.RecordCall(_mr.mock, "Context") } func (_m *MockRouteGuide_RouteChatClient) Header() (metadata.MD, error) { ret := _m.ctrl.Call(_m, "Header") ret0, _ := ret[0].(metadata.MD) ret1, _ := ret[1].(error) return ret0, ret1 } func (_mr *_MockRouteGuide_RouteChatClientRecorder) Header() *gomock.Call { return _mr.mock.ctrl.RecordCall(_mr.mock, "Header") } func (_m *MockRouteGuide_RouteChatClient) Recv() (*routeguide.RouteNote, error) { ret := _m.ctrl.Call(_m, "Recv") ret0, _ := ret[0].(*routeguide.RouteNote) ret1, _ := ret[1].(error) return ret0, ret1 } func (_mr *_MockRouteGuide_RouteChatClientRecorder) Recv() *gomock.Call { return _mr.mock.ctrl.RecordCall(_mr.mock, "Recv") } func (_m *MockRouteGuide_RouteChatClient) RecvMsg(_param0 interface{}) error { ret := _m.ctrl.Call(_m, "RecvMsg", _param0) ret0, _ := ret[0].(error) return ret0 } func (_mr *_MockRouteGuide_RouteChatClientRecorder) RecvMsg(arg0 interface{}) *gomock.Call { return _mr.mock.ctrl.RecordCall(_mr.mock, "RecvMsg", arg0) } func (_m *MockRouteGuide_RouteChatClient) Send(_param0 *routeguide.RouteNote) error { ret := _m.ctrl.Call(_m, "Send", _param0) ret0, _ := ret[0].(error) return ret0 } func (_mr *_MockRouteGuide_RouteChatClientRecorder) Send(arg0 interface{}) *gomock.Call { return _mr.mock.ctrl.RecordCall(_mr.mock, "Send", arg0) } func (_m *MockRouteGuide_RouteChatClient) SendMsg(_param0 interface{}) error { ret := _m.ctrl.Call(_m, "SendMsg", _param0) ret0, _ := ret[0].(error) return ret0 } func (_mr *_MockRouteGuide_RouteChatClientRecorder) SendMsg(arg0 interface{}) *gomock.Call { return _mr.mock.ctrl.RecordCall(_mr.mock, "SendMsg", arg0) } func (_m *MockRouteGuide_RouteChatClient) Trailer() metadata.MD { ret := _m.ctrl.Call(_m, "Trailer") ret0, _ := ret[0].(metadata.MD) return ret0 } func (_mr *_MockRouteGuide_RouteChatClientRecorder) Trailer() *gomock.Call { return _mr.mock.ctrl.RecordCall(_mr.mock, "Trailer") } grpc-go-1.29.1/examples/route_guide/mock_routeguide/rg_mock_test.go000066400000000000000000000044251365033716300255030ustar00rootroot00000000000000/* * * Copyright 2017 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package mock_routeguide_test import ( "context" "fmt" "testing" "time" "github.com/golang/mock/gomock" "github.com/golang/protobuf/proto" rgmock "google.golang.org/grpc/examples/route_guide/mock_routeguide" rgpb "google.golang.org/grpc/examples/route_guide/routeguide" "google.golang.org/grpc/internal/grpctest" ) type s struct { grpctest.Tester } func Test(t *testing.T) { grpctest.RunSubTests(t, s{}) } var msg = &rgpb.RouteNote{ Location: &rgpb.Point{Latitude: 17, Longitude: 29}, Message: "Taxi-cab", } func (s) TestRouteChat(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() // Create mock for the stream returned by RouteChat stream := rgmock.NewMockRouteGuide_RouteChatClient(ctrl) // set expectation on sending. stream.EXPECT().Send( gomock.Any(), ).Return(nil) // Set expectation on receiving. stream.EXPECT().Recv().Return(msg, nil) stream.EXPECT().CloseSend().Return(nil) // Create mock for the client interface. rgclient := rgmock.NewMockRouteGuideClient(ctrl) // Set expectation on RouteChat rgclient.EXPECT().RouteChat( gomock.Any(), ).Return(stream, nil) if err := testRouteChat(rgclient); err != nil { t.Fatalf("Test failed: %v", err) } } func testRouteChat(client rgpb.RouteGuideClient) error { ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() stream, err := client.RouteChat(ctx) if err != nil { return err } if err := stream.Send(msg); err != nil { return err } if err := stream.CloseSend(); err != nil { return err } got, err := stream.Recv() if err != nil { return err } if !proto.Equal(got, msg) { return fmt.Errorf("stream.Recv() = %v, want %v", got, msg) } return nil } grpc-go-1.29.1/examples/route_guide/routeguide/000077500000000000000000000000001365033716300214565ustar00rootroot00000000000000grpc-go-1.29.1/examples/route_guide/routeguide/route_guide.pb.go000066400000000000000000000530511365033716300247240ustar00rootroot00000000000000// Code generated by protoc-gen-go. DO NOT EDIT. // source: route_guide.proto package routeguide import ( context "context" fmt "fmt" proto "github.com/golang/protobuf/proto" grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" status "google.golang.org/grpc/status" math "math" ) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package // Points are represented as latitude-longitude pairs in the E7 representation // (degrees multiplied by 10**7 and rounded to the nearest integer). // Latitudes should be in the range +/- 90 degrees and longitude should be in // the range +/- 180 degrees (inclusive). type Point struct { Latitude int32 `protobuf:"varint,1,opt,name=latitude,proto3" json:"latitude,omitempty"` Longitude int32 `protobuf:"varint,2,opt,name=longitude,proto3" json:"longitude,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *Point) Reset() { *m = Point{} } func (m *Point) String() string { return proto.CompactTextString(m) } func (*Point) ProtoMessage() {} func (*Point) Descriptor() ([]byte, []int) { return fileDescriptor_b7d679f20da65b7b, []int{0} } func (m *Point) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Point.Unmarshal(m, b) } func (m *Point) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Point.Marshal(b, m, deterministic) } func (m *Point) XXX_Merge(src proto.Message) { xxx_messageInfo_Point.Merge(m, src) } func (m *Point) XXX_Size() int { return xxx_messageInfo_Point.Size(m) } func (m *Point) XXX_DiscardUnknown() { xxx_messageInfo_Point.DiscardUnknown(m) } var xxx_messageInfo_Point proto.InternalMessageInfo func (m *Point) GetLatitude() int32 { if m != nil { return m.Latitude } return 0 } func (m *Point) GetLongitude() int32 { if m != nil { return m.Longitude } return 0 } // A latitude-longitude rectangle, represented as two diagonally opposite // points "lo" and "hi". type Rectangle struct { // One corner of the rectangle. Lo *Point `protobuf:"bytes,1,opt,name=lo,proto3" json:"lo,omitempty"` // The other corner of the rectangle. Hi *Point `protobuf:"bytes,2,opt,name=hi,proto3" json:"hi,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *Rectangle) Reset() { *m = Rectangle{} } func (m *Rectangle) String() string { return proto.CompactTextString(m) } func (*Rectangle) ProtoMessage() {} func (*Rectangle) Descriptor() ([]byte, []int) { return fileDescriptor_b7d679f20da65b7b, []int{1} } func (m *Rectangle) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Rectangle.Unmarshal(m, b) } func (m *Rectangle) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Rectangle.Marshal(b, m, deterministic) } func (m *Rectangle) XXX_Merge(src proto.Message) { xxx_messageInfo_Rectangle.Merge(m, src) } func (m *Rectangle) XXX_Size() int { return xxx_messageInfo_Rectangle.Size(m) } func (m *Rectangle) XXX_DiscardUnknown() { xxx_messageInfo_Rectangle.DiscardUnknown(m) } var xxx_messageInfo_Rectangle proto.InternalMessageInfo func (m *Rectangle) GetLo() *Point { if m != nil { return m.Lo } return nil } func (m *Rectangle) GetHi() *Point { if m != nil { return m.Hi } return nil } // A feature names something at a given point. // // If a feature could not be named, the name is empty. type Feature struct { // The name of the feature. Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // The point where the feature is detected. Location *Point `protobuf:"bytes,2,opt,name=location,proto3" json:"location,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *Feature) Reset() { *m = Feature{} } func (m *Feature) String() string { return proto.CompactTextString(m) } func (*Feature) ProtoMessage() {} func (*Feature) Descriptor() ([]byte, []int) { return fileDescriptor_b7d679f20da65b7b, []int{2} } func (m *Feature) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Feature.Unmarshal(m, b) } func (m *Feature) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Feature.Marshal(b, m, deterministic) } func (m *Feature) XXX_Merge(src proto.Message) { xxx_messageInfo_Feature.Merge(m, src) } func (m *Feature) XXX_Size() int { return xxx_messageInfo_Feature.Size(m) } func (m *Feature) XXX_DiscardUnknown() { xxx_messageInfo_Feature.DiscardUnknown(m) } var xxx_messageInfo_Feature proto.InternalMessageInfo func (m *Feature) GetName() string { if m != nil { return m.Name } return "" } func (m *Feature) GetLocation() *Point { if m != nil { return m.Location } return nil } // A RouteNote is a message sent while at a given point. type RouteNote struct { // The location from which the message is sent. Location *Point `protobuf:"bytes,1,opt,name=location,proto3" json:"location,omitempty"` // The message to be sent. Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *RouteNote) Reset() { *m = RouteNote{} } func (m *RouteNote) String() string { return proto.CompactTextString(m) } func (*RouteNote) ProtoMessage() {} func (*RouteNote) Descriptor() ([]byte, []int) { return fileDescriptor_b7d679f20da65b7b, []int{3} } func (m *RouteNote) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_RouteNote.Unmarshal(m, b) } func (m *RouteNote) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_RouteNote.Marshal(b, m, deterministic) } func (m *RouteNote) XXX_Merge(src proto.Message) { xxx_messageInfo_RouteNote.Merge(m, src) } func (m *RouteNote) XXX_Size() int { return xxx_messageInfo_RouteNote.Size(m) } func (m *RouteNote) XXX_DiscardUnknown() { xxx_messageInfo_RouteNote.DiscardUnknown(m) } var xxx_messageInfo_RouteNote proto.InternalMessageInfo func (m *RouteNote) GetLocation() *Point { if m != nil { return m.Location } return nil } func (m *RouteNote) GetMessage() string { if m != nil { return m.Message } return "" } // A RouteSummary is received in response to a RecordRoute rpc. // // It contains the number of individual points received, the number of // detected features, and the total distance covered as the cumulative sum of // the distance between each point. type RouteSummary struct { // The number of points received. PointCount int32 `protobuf:"varint,1,opt,name=point_count,json=pointCount,proto3" json:"point_count,omitempty"` // The number of known features passed while traversing the route. FeatureCount int32 `protobuf:"varint,2,opt,name=feature_count,json=featureCount,proto3" json:"feature_count,omitempty"` // The distance covered in metres. Distance int32 `protobuf:"varint,3,opt,name=distance,proto3" json:"distance,omitempty"` // The duration of the traversal in seconds. ElapsedTime int32 `protobuf:"varint,4,opt,name=elapsed_time,json=elapsedTime,proto3" json:"elapsed_time,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *RouteSummary) Reset() { *m = RouteSummary{} } func (m *RouteSummary) String() string { return proto.CompactTextString(m) } func (*RouteSummary) ProtoMessage() {} func (*RouteSummary) Descriptor() ([]byte, []int) { return fileDescriptor_b7d679f20da65b7b, []int{4} } func (m *RouteSummary) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_RouteSummary.Unmarshal(m, b) } func (m *RouteSummary) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_RouteSummary.Marshal(b, m, deterministic) } func (m *RouteSummary) XXX_Merge(src proto.Message) { xxx_messageInfo_RouteSummary.Merge(m, src) } func (m *RouteSummary) XXX_Size() int { return xxx_messageInfo_RouteSummary.Size(m) } func (m *RouteSummary) XXX_DiscardUnknown() { xxx_messageInfo_RouteSummary.DiscardUnknown(m) } var xxx_messageInfo_RouteSummary proto.InternalMessageInfo func (m *RouteSummary) GetPointCount() int32 { if m != nil { return m.PointCount } return 0 } func (m *RouteSummary) GetFeatureCount() int32 { if m != nil { return m.FeatureCount } return 0 } func (m *RouteSummary) GetDistance() int32 { if m != nil { return m.Distance } return 0 } func (m *RouteSummary) GetElapsedTime() int32 { if m != nil { return m.ElapsedTime } return 0 } func init() { proto.RegisterType((*Point)(nil), "routeguide.Point") proto.RegisterType((*Rectangle)(nil), "routeguide.Rectangle") proto.RegisterType((*Feature)(nil), "routeguide.Feature") proto.RegisterType((*RouteNote)(nil), "routeguide.RouteNote") proto.RegisterType((*RouteSummary)(nil), "routeguide.RouteSummary") } func init() { proto.RegisterFile("route_guide.proto", fileDescriptor_b7d679f20da65b7b) } var fileDescriptor_b7d679f20da65b7b = []byte{ // 404 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x53, 0xdd, 0xca, 0xd3, 0x40, 0x10, 0xfd, 0x36, 0x7e, 0x9f, 0x6d, 0x26, 0x11, 0xe9, 0x88, 0x10, 0xa2, 0xa0, 0x8d, 0x37, 0xbd, 0x31, 0x94, 0x0a, 0x5e, 0x56, 0x6c, 0xc1, 0xde, 0x14, 0xa9, 0xb1, 0xf7, 0x65, 0x4d, 0xc6, 0x74, 0x61, 0x93, 0x0d, 0xc9, 0x06, 0xf4, 0x01, 0x7c, 0x02, 0x5f, 0x58, 0xb2, 0x49, 0xda, 0x54, 0x5b, 0xbc, 0xdb, 0x39, 0x73, 0xce, 0xfc, 0x9c, 0x61, 0x61, 0x52, 0xaa, 0x5a, 0xd3, 0x21, 0xad, 0x45, 0x42, 0x61, 0x51, 0x2a, 0xad, 0x10, 0x0c, 0x64, 0x90, 0xe0, 0x23, 0x3c, 0xec, 0x94, 0xc8, 0x35, 0xfa, 0x30, 0x96, 0x5c, 0x0b, 0x5d, 0x27, 0xe4, 0xb1, 0xd7, 0x6c, 0xf6, 0x10, 0x9d, 0x62, 0x7c, 0x09, 0xb6, 0x54, 0x79, 0xda, 0x26, 0x2d, 0x93, 0x3c, 0x03, 0xc1, 0x17, 0xb0, 0x23, 0x8a, 0x35, 0xcf, 0x53, 0x49, 0x38, 0x05, 0x4b, 0x2a, 0x53, 0xc0, 0x59, 0x4c, 0xc2, 0x73, 0xa3, 0xd0, 0x74, 0x89, 0x2c, 0xa9, 0x1a, 0xca, 0x51, 0x98, 0x32, 0xd7, 0x29, 0x47, 0x11, 0x6c, 0x61, 0xf4, 0x89, 0xb8, 0xae, 0x4b, 0x42, 0x84, 0xfb, 0x9c, 0x67, 0xed, 0x4c, 0x76, 0x64, 0xde, 0xf8, 0x16, 0xc6, 0x52, 0xc5, 0x5c, 0x0b, 0x95, 0xdf, 0xae, 0x73, 0xa2, 0x04, 0x7b, 0xb0, 0xa3, 0x26, 0xfb, 0x59, 0xe9, 0x4b, 0x2d, 0xfb, 0xaf, 0x16, 0x3d, 0x18, 0x65, 0x54, 0x55, 0x3c, 0x6d, 0x17, 0xb7, 0xa3, 0x3e, 0x0c, 0x7e, 0x33, 0x70, 0x4d, 0xd9, 0xaf, 0x75, 0x96, 0xf1, 0xf2, 0x27, 0xbe, 0x02, 0xa7, 0x68, 0xd4, 0x87, 0x58, 0xd5, 0xb9, 0xee, 0x4c, 0x04, 0x03, 0xad, 0x1b, 0x04, 0xdf, 0xc0, 0x93, 0xef, 0xed, 0x56, 0x1d, 0xa5, 0xb5, 0xd2, 0xed, 0xc0, 0x96, 0xe4, 0xc3, 0x38, 0x11, 0x95, 0xe6, 0x79, 0x4c, 0xde, 0xa3, 0xf6, 0x0e, 0x7d, 0x8c, 0x53, 0x70, 0x49, 0xf2, 0xa2, 0xa2, 0xe4, 0xa0, 0x45, 0x46, 0xde, 0xbd, 0xc9, 0x3b, 0x1d, 0xb6, 0x17, 0x19, 0x2d, 0x7e, 0x59, 0x00, 0x66, 0xaa, 0x4d, 0xb3, 0x0e, 0xbe, 0x07, 0xd8, 0x90, 0xee, 0xbd, 0xfc, 0x77, 0x53, 0xff, 0xd9, 0x10, 0xea, 0x78, 0xc1, 0x1d, 0x2e, 0xc1, 0xdd, 0x8a, 0xaa, 0x17, 0x56, 0xf8, 0x7c, 0x48, 0x3b, 0x5d, 0xfb, 0x86, 0x7a, 0xce, 0x70, 0x09, 0x4e, 0x44, 0xb1, 0x2a, 0x13, 0x33, 0xcb, 0xb5, 0xc6, 0xde, 0x45, 0xc5, 0x81, 0x8f, 0xc1, 0xdd, 0x8c, 0xe1, 0x87, 0xee, 0x64, 0xeb, 0x23, 0xd7, 0x7f, 0x35, 0xef, 0x2f, 0xe9, 0x5f, 0x87, 0x1b, 0xf9, 0x9c, 0xad, 0xe6, 0xf0, 0x42, 0xa8, 0x30, 0x2d, 0x8b, 0x38, 0xa4, 0x1f, 0x3c, 0x2b, 0x24, 0x55, 0x03, 0xfa, 0xea, 0xe9, 0xd9, 0xa3, 0x5d, 0xf3, 0x27, 0x76, 0xec, 0xdb, 0x63, 0xf3, 0x39, 0xde, 0xfd, 0x09, 0x00, 0x00, 0xff, 0xff, 0xc8, 0xe4, 0xef, 0xe6, 0x31, 0x03, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. var _ context.Context var _ grpc.ClientConnInterface // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. const _ = grpc.SupportPackageIsVersion6 // RouteGuideClient is the client API for RouteGuide service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type RouteGuideClient interface { // A simple RPC. // // Obtains the feature at a given position. // // A feature with an empty name is returned if there's no feature at the given // position. GetFeature(ctx context.Context, in *Point, opts ...grpc.CallOption) (*Feature, error) // A server-to-client streaming RPC. // // Obtains the Features available within the given Rectangle. Results are // streamed rather than returned at once (e.g. in a response message with a // repeated field), as the rectangle may cover a large area and contain a // huge number of features. ListFeatures(ctx context.Context, in *Rectangle, opts ...grpc.CallOption) (RouteGuide_ListFeaturesClient, error) // A client-to-server streaming RPC. // // Accepts a stream of Points on a route being traversed, returning a // RouteSummary when traversal is completed. RecordRoute(ctx context.Context, opts ...grpc.CallOption) (RouteGuide_RecordRouteClient, error) // A Bidirectional streaming RPC. // // Accepts a stream of RouteNotes sent while a route is being traversed, // while receiving other RouteNotes (e.g. from other users). RouteChat(ctx context.Context, opts ...grpc.CallOption) (RouteGuide_RouteChatClient, error) } type routeGuideClient struct { cc grpc.ClientConnInterface } func NewRouteGuideClient(cc grpc.ClientConnInterface) RouteGuideClient { return &routeGuideClient{cc} } func (c *routeGuideClient) GetFeature(ctx context.Context, in *Point, opts ...grpc.CallOption) (*Feature, error) { out := new(Feature) err := c.cc.Invoke(ctx, "/routeguide.RouteGuide/GetFeature", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *routeGuideClient) ListFeatures(ctx context.Context, in *Rectangle, opts ...grpc.CallOption) (RouteGuide_ListFeaturesClient, error) { stream, err := c.cc.NewStream(ctx, &_RouteGuide_serviceDesc.Streams[0], "/routeguide.RouteGuide/ListFeatures", opts...) if err != nil { return nil, err } x := &routeGuideListFeaturesClient{stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } if err := x.ClientStream.CloseSend(); err != nil { return nil, err } return x, nil } type RouteGuide_ListFeaturesClient interface { Recv() (*Feature, error) grpc.ClientStream } type routeGuideListFeaturesClient struct { grpc.ClientStream } func (x *routeGuideListFeaturesClient) Recv() (*Feature, error) { m := new(Feature) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func (c *routeGuideClient) RecordRoute(ctx context.Context, opts ...grpc.CallOption) (RouteGuide_RecordRouteClient, error) { stream, err := c.cc.NewStream(ctx, &_RouteGuide_serviceDesc.Streams[1], "/routeguide.RouteGuide/RecordRoute", opts...) if err != nil { return nil, err } x := &routeGuideRecordRouteClient{stream} return x, nil } type RouteGuide_RecordRouteClient interface { Send(*Point) error CloseAndRecv() (*RouteSummary, error) grpc.ClientStream } type routeGuideRecordRouteClient struct { grpc.ClientStream } func (x *routeGuideRecordRouteClient) Send(m *Point) error { return x.ClientStream.SendMsg(m) } func (x *routeGuideRecordRouteClient) CloseAndRecv() (*RouteSummary, error) { if err := x.ClientStream.CloseSend(); err != nil { return nil, err } m := new(RouteSummary) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func (c *routeGuideClient) RouteChat(ctx context.Context, opts ...grpc.CallOption) (RouteGuide_RouteChatClient, error) { stream, err := c.cc.NewStream(ctx, &_RouteGuide_serviceDesc.Streams[2], "/routeguide.RouteGuide/RouteChat", opts...) if err != nil { return nil, err } x := &routeGuideRouteChatClient{stream} return x, nil } type RouteGuide_RouteChatClient interface { Send(*RouteNote) error Recv() (*RouteNote, error) grpc.ClientStream } type routeGuideRouteChatClient struct { grpc.ClientStream } func (x *routeGuideRouteChatClient) Send(m *RouteNote) error { return x.ClientStream.SendMsg(m) } func (x *routeGuideRouteChatClient) Recv() (*RouteNote, error) { m := new(RouteNote) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } // RouteGuideServer is the server API for RouteGuide service. type RouteGuideServer interface { // A simple RPC. // // Obtains the feature at a given position. // // A feature with an empty name is returned if there's no feature at the given // position. GetFeature(context.Context, *Point) (*Feature, error) // A server-to-client streaming RPC. // // Obtains the Features available within the given Rectangle. Results are // streamed rather than returned at once (e.g. in a response message with a // repeated field), as the rectangle may cover a large area and contain a // huge number of features. ListFeatures(*Rectangle, RouteGuide_ListFeaturesServer) error // A client-to-server streaming RPC. // // Accepts a stream of Points on a route being traversed, returning a // RouteSummary when traversal is completed. RecordRoute(RouteGuide_RecordRouteServer) error // A Bidirectional streaming RPC. // // Accepts a stream of RouteNotes sent while a route is being traversed, // while receiving other RouteNotes (e.g. from other users). RouteChat(RouteGuide_RouteChatServer) error } // UnimplementedRouteGuideServer can be embedded to have forward compatible implementations. type UnimplementedRouteGuideServer struct { } func (*UnimplementedRouteGuideServer) GetFeature(ctx context.Context, req *Point) (*Feature, error) { return nil, status.Errorf(codes.Unimplemented, "method GetFeature not implemented") } func (*UnimplementedRouteGuideServer) ListFeatures(req *Rectangle, srv RouteGuide_ListFeaturesServer) error { return status.Errorf(codes.Unimplemented, "method ListFeatures not implemented") } func (*UnimplementedRouteGuideServer) RecordRoute(srv RouteGuide_RecordRouteServer) error { return status.Errorf(codes.Unimplemented, "method RecordRoute not implemented") } func (*UnimplementedRouteGuideServer) RouteChat(srv RouteGuide_RouteChatServer) error { return status.Errorf(codes.Unimplemented, "method RouteChat not implemented") } func RegisterRouteGuideServer(s *grpc.Server, srv RouteGuideServer) { s.RegisterService(&_RouteGuide_serviceDesc, srv) } func _RouteGuide_GetFeature_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(Point) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(RouteGuideServer).GetFeature(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/routeguide.RouteGuide/GetFeature", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(RouteGuideServer).GetFeature(ctx, req.(*Point)) } return interceptor(ctx, in, info, handler) } func _RouteGuide_ListFeatures_Handler(srv interface{}, stream grpc.ServerStream) error { m := new(Rectangle) if err := stream.RecvMsg(m); err != nil { return err } return srv.(RouteGuideServer).ListFeatures(m, &routeGuideListFeaturesServer{stream}) } type RouteGuide_ListFeaturesServer interface { Send(*Feature) error grpc.ServerStream } type routeGuideListFeaturesServer struct { grpc.ServerStream } func (x *routeGuideListFeaturesServer) Send(m *Feature) error { return x.ServerStream.SendMsg(m) } func _RouteGuide_RecordRoute_Handler(srv interface{}, stream grpc.ServerStream) error { return srv.(RouteGuideServer).RecordRoute(&routeGuideRecordRouteServer{stream}) } type RouteGuide_RecordRouteServer interface { SendAndClose(*RouteSummary) error Recv() (*Point, error) grpc.ServerStream } type routeGuideRecordRouteServer struct { grpc.ServerStream } func (x *routeGuideRecordRouteServer) SendAndClose(m *RouteSummary) error { return x.ServerStream.SendMsg(m) } func (x *routeGuideRecordRouteServer) Recv() (*Point, error) { m := new(Point) if err := x.ServerStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func _RouteGuide_RouteChat_Handler(srv interface{}, stream grpc.ServerStream) error { return srv.(RouteGuideServer).RouteChat(&routeGuideRouteChatServer{stream}) } type RouteGuide_RouteChatServer interface { Send(*RouteNote) error Recv() (*RouteNote, error) grpc.ServerStream } type routeGuideRouteChatServer struct { grpc.ServerStream } func (x *routeGuideRouteChatServer) Send(m *RouteNote) error { return x.ServerStream.SendMsg(m) } func (x *routeGuideRouteChatServer) Recv() (*RouteNote, error) { m := new(RouteNote) if err := x.ServerStream.RecvMsg(m); err != nil { return nil, err } return m, nil } var _RouteGuide_serviceDesc = grpc.ServiceDesc{ ServiceName: "routeguide.RouteGuide", HandlerType: (*RouteGuideServer)(nil), Methods: []grpc.MethodDesc{ { MethodName: "GetFeature", Handler: _RouteGuide_GetFeature_Handler, }, }, Streams: []grpc.StreamDesc{ { StreamName: "ListFeatures", Handler: _RouteGuide_ListFeatures_Handler, ServerStreams: true, }, { StreamName: "RecordRoute", Handler: _RouteGuide_RecordRoute_Handler, ClientStreams: true, }, { StreamName: "RouteChat", Handler: _RouteGuide_RouteChat_Handler, ServerStreams: true, ClientStreams: true, }, }, Metadata: "route_guide.proto", } grpc-go-1.29.1/examples/route_guide/routeguide/route_guide.proto000066400000000000000000000065771365033716300250750ustar00rootroot00000000000000// Copyright 2015 gRPC authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. syntax = "proto3"; option java_multiple_files = true; option java_package = "io.grpc.examples.routeguide"; option java_outer_classname = "RouteGuideProto"; package routeguide; // Interface exported by the server. service RouteGuide { // A simple RPC. // // Obtains the feature at a given position. // // A feature with an empty name is returned if there's no feature at the given // position. rpc GetFeature(Point) returns (Feature) {} // A server-to-client streaming RPC. // // Obtains the Features available within the given Rectangle. Results are // streamed rather than returned at once (e.g. in a response message with a // repeated field), as the rectangle may cover a large area and contain a // huge number of features. rpc ListFeatures(Rectangle) returns (stream Feature) {} // A client-to-server streaming RPC. // // Accepts a stream of Points on a route being traversed, returning a // RouteSummary when traversal is completed. rpc RecordRoute(stream Point) returns (RouteSummary) {} // A Bidirectional streaming RPC. // // Accepts a stream of RouteNotes sent while a route is being traversed, // while receiving other RouteNotes (e.g. from other users). rpc RouteChat(stream RouteNote) returns (stream RouteNote) {} } // Points are represented as latitude-longitude pairs in the E7 representation // (degrees multiplied by 10**7 and rounded to the nearest integer). // Latitudes should be in the range +/- 90 degrees and longitude should be in // the range +/- 180 degrees (inclusive). message Point { int32 latitude = 1; int32 longitude = 2; } // A latitude-longitude rectangle, represented as two diagonally opposite // points "lo" and "hi". message Rectangle { // One corner of the rectangle. Point lo = 1; // The other corner of the rectangle. Point hi = 2; } // A feature names something at a given point. // // If a feature could not be named, the name is empty. message Feature { // The name of the feature. string name = 1; // The point where the feature is detected. Point location = 2; } // A RouteNote is a message sent while at a given point. message RouteNote { // The location from which the message is sent. Point location = 1; // The message to be sent. string message = 2; } // A RouteSummary is received in response to a RecordRoute rpc. // // It contains the number of individual points received, the number of // detected features, and the total distance covered as the cumulative sum of // the distance between each point. message RouteSummary { // The number of points received. int32 point_count = 1; // The number of known features passed while traversing the route. int32 feature_count = 2; // The distance covered in metres. int32 distance = 3; // The duration of the traversal in seconds. int32 elapsed_time = 4; } grpc-go-1.29.1/examples/route_guide/server/000077500000000000000000000000001365033716300206105ustar00rootroot00000000000000grpc-go-1.29.1/examples/route_guide/server/server.go000066400000000000000000000511611365033716300224510ustar00rootroot00000000000000/* * * Copyright 2015 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ //go:generate protoc -I ../routeguide --go_out=plugins=grpc:../routeguide ../routeguide/route_guide.proto // Package main implements a simple gRPC server that demonstrates how to use gRPC-Go libraries // to perform unary, client streaming, server streaming and full duplex RPCs. // // It implements the route guide service whose definition can be found in routeguide/route_guide.proto. package main import ( "context" "encoding/json" "flag" "fmt" "io" "io/ioutil" "log" "math" "net" "sync" "time" "google.golang.org/grpc" "google.golang.org/grpc/credentials" "google.golang.org/grpc/testdata" "github.com/golang/protobuf/proto" pb "google.golang.org/grpc/examples/route_guide/routeguide" ) var ( tls = flag.Bool("tls", false, "Connection uses TLS if true, else plain TCP") certFile = flag.String("cert_file", "", "The TLS cert file") keyFile = flag.String("key_file", "", "The TLS key file") jsonDBFile = flag.String("json_db_file", "", "A json file containing a list of features") port = flag.Int("port", 10000, "The server port") ) type routeGuideServer struct { pb.UnimplementedRouteGuideServer savedFeatures []*pb.Feature // read-only after initialized mu sync.Mutex // protects routeNotes routeNotes map[string][]*pb.RouteNote } // GetFeature returns the feature at the given point. func (s *routeGuideServer) GetFeature(ctx context.Context, point *pb.Point) (*pb.Feature, error) { for _, feature := range s.savedFeatures { if proto.Equal(feature.Location, point) { return feature, nil } } // No feature was found, return an unnamed feature return &pb.Feature{Location: point}, nil } // ListFeatures lists all features contained within the given bounding Rectangle. func (s *routeGuideServer) ListFeatures(rect *pb.Rectangle, stream pb.RouteGuide_ListFeaturesServer) error { for _, feature := range s.savedFeatures { if inRange(feature.Location, rect) { if err := stream.Send(feature); err != nil { return err } } } return nil } // RecordRoute records a route composited of a sequence of points. // // It gets a stream of points, and responds with statistics about the "trip": // number of points, number of known features visited, total distance traveled, and // total time spent. func (s *routeGuideServer) RecordRoute(stream pb.RouteGuide_RecordRouteServer) error { var pointCount, featureCount, distance int32 var lastPoint *pb.Point startTime := time.Now() for { point, err := stream.Recv() if err == io.EOF { endTime := time.Now() return stream.SendAndClose(&pb.RouteSummary{ PointCount: pointCount, FeatureCount: featureCount, Distance: distance, ElapsedTime: int32(endTime.Sub(startTime).Seconds()), }) } if err != nil { return err } pointCount++ for _, feature := range s.savedFeatures { if proto.Equal(feature.Location, point) { featureCount++ } } if lastPoint != nil { distance += calcDistance(lastPoint, point) } lastPoint = point } } // RouteChat receives a stream of message/location pairs, and responds with a stream of all // previous messages at each of those locations. func (s *routeGuideServer) RouteChat(stream pb.RouteGuide_RouteChatServer) error { for { in, err := stream.Recv() if err == io.EOF { return nil } if err != nil { return err } key := serialize(in.Location) s.mu.Lock() s.routeNotes[key] = append(s.routeNotes[key], in) // Note: this copy prevents blocking other clients while serving this one. // We don't need to do a deep copy, because elements in the slice are // insert-only and never modified. rn := make([]*pb.RouteNote, len(s.routeNotes[key])) copy(rn, s.routeNotes[key]) s.mu.Unlock() for _, note := range rn { if err := stream.Send(note); err != nil { return err } } } } // loadFeatures loads features from a JSON file. func (s *routeGuideServer) loadFeatures(filePath string) { var data []byte if filePath != "" { var err error data, err = ioutil.ReadFile(filePath) if err != nil { log.Fatalf("Failed to load default features: %v", err) } } else { data = exampleData } if err := json.Unmarshal(data, &s.savedFeatures); err != nil { log.Fatalf("Failed to load default features: %v", err) } } func toRadians(num float64) float64 { return num * math.Pi / float64(180) } // calcDistance calculates the distance between two points using the "haversine" formula. // The formula is based on http://mathforum.org/library/drmath/view/51879.html. func calcDistance(p1 *pb.Point, p2 *pb.Point) int32 { const CordFactor float64 = 1e7 const R = float64(6371000) // earth radius in metres lat1 := toRadians(float64(p1.Latitude) / CordFactor) lat2 := toRadians(float64(p2.Latitude) / CordFactor) lng1 := toRadians(float64(p1.Longitude) / CordFactor) lng2 := toRadians(float64(p2.Longitude) / CordFactor) dlat := lat2 - lat1 dlng := lng2 - lng1 a := math.Sin(dlat/2)*math.Sin(dlat/2) + math.Cos(lat1)*math.Cos(lat2)* math.Sin(dlng/2)*math.Sin(dlng/2) c := 2 * math.Atan2(math.Sqrt(a), math.Sqrt(1-a)) distance := R * c return int32(distance) } func inRange(point *pb.Point, rect *pb.Rectangle) bool { left := math.Min(float64(rect.Lo.Longitude), float64(rect.Hi.Longitude)) right := math.Max(float64(rect.Lo.Longitude), float64(rect.Hi.Longitude)) top := math.Max(float64(rect.Lo.Latitude), float64(rect.Hi.Latitude)) bottom := math.Min(float64(rect.Lo.Latitude), float64(rect.Hi.Latitude)) if float64(point.Longitude) >= left && float64(point.Longitude) <= right && float64(point.Latitude) >= bottom && float64(point.Latitude) <= top { return true } return false } func serialize(point *pb.Point) string { return fmt.Sprintf("%d %d", point.Latitude, point.Longitude) } func newServer() *routeGuideServer { s := &routeGuideServer{routeNotes: make(map[string][]*pb.RouteNote)} s.loadFeatures(*jsonDBFile) return s } func main() { flag.Parse() lis, err := net.Listen("tcp", fmt.Sprintf("localhost:%d", *port)) if err != nil { log.Fatalf("failed to listen: %v", err) } var opts []grpc.ServerOption if *tls { if *certFile == "" { *certFile = testdata.Path("server1.pem") } if *keyFile == "" { *keyFile = testdata.Path("server1.key") } creds, err := credentials.NewServerTLSFromFile(*certFile, *keyFile) if err != nil { log.Fatalf("Failed to generate credentials %v", err) } opts = []grpc.ServerOption{grpc.Creds(creds)} } grpcServer := grpc.NewServer(opts...) pb.RegisterRouteGuideServer(grpcServer, newServer()) grpcServer.Serve(lis) } // exampleData is a copy of testdata/route_guide_db.json. It's to avoid // specifying file path with `go run`. var exampleData = []byte(`[{ "location": { "latitude": 407838351, "longitude": -746143763 }, "name": "Patriots Path, Mendham, NJ 07945, USA" }, { "location": { "latitude": 408122808, "longitude": -743999179 }, "name": "101 New Jersey 10, Whippany, NJ 07981, USA" }, { "location": { "latitude": 413628156, "longitude": -749015468 }, "name": "U.S. 6, Shohola, PA 18458, USA" }, { "location": { "latitude": 419999544, "longitude": -740371136 }, "name": "5 Conners Road, Kingston, NY 12401, USA" }, { "location": { "latitude": 414008389, "longitude": -743951297 }, "name": "Mid Hudson Psychiatric Center, New Hampton, NY 10958, USA" }, { "location": { "latitude": 419611318, "longitude": -746524769 }, "name": "287 Flugertown Road, Livingston Manor, NY 12758, USA" }, { "location": { "latitude": 406109563, "longitude": -742186778 }, "name": "4001 Tremley Point Road, Linden, NJ 07036, USA" }, { "location": { "latitude": 416802456, "longitude": -742370183 }, "name": "352 South Mountain Road, Wallkill, NY 12589, USA" }, { "location": { "latitude": 412950425, "longitude": -741077389 }, "name": "Bailey Turn Road, Harriman, NY 10926, USA" }, { "location": { "latitude": 412144655, "longitude": -743949739 }, "name": "193-199 Wawayanda Road, Hewitt, NJ 07421, USA" }, { "location": { "latitude": 415736605, "longitude": -742847522 }, "name": "406-496 Ward Avenue, Pine Bush, NY 12566, USA" }, { "location": { "latitude": 413843930, "longitude": -740501726 }, "name": "162 Merrill Road, Highland Mills, NY 10930, USA" }, { "location": { "latitude": 410873075, "longitude": -744459023 }, "name": "Clinton Road, West Milford, NJ 07480, USA" }, { "location": { "latitude": 412346009, "longitude": -744026814 }, "name": "16 Old Brook Lane, Warwick, NY 10990, USA" }, { "location": { "latitude": 402948455, "longitude": -747903913 }, "name": "3 Drake Lane, Pennington, NJ 08534, USA" }, { "location": { "latitude": 406337092, "longitude": -740122226 }, "name": "6324 8th Avenue, Brooklyn, NY 11220, USA" }, { "location": { "latitude": 406421967, "longitude": -747727624 }, "name": "1 Merck Access Road, Whitehouse Station, NJ 08889, USA" }, { "location": { "latitude": 416318082, "longitude": -749677716 }, "name": "78-98 Schalck Road, Narrowsburg, NY 12764, USA" }, { "location": { "latitude": 415301720, "longitude": -748416257 }, "name": "282 Lakeview Drive Road, Highland Lake, NY 12743, USA" }, { "location": { "latitude": 402647019, "longitude": -747071791 }, "name": "330 Evelyn Avenue, Hamilton Township, NJ 08619, USA" }, { "location": { "latitude": 412567807, "longitude": -741058078 }, "name": "New York State Reference Route 987E, Southfields, NY 10975, USA" }, { "location": { "latitude": 416855156, "longitude": -744420597 }, "name": "103-271 Tempaloni Road, Ellenville, NY 12428, USA" }, { "location": { "latitude": 404663628, "longitude": -744820157 }, "name": "1300 Airport Road, North Brunswick Township, NJ 08902, USA" }, { "location": { "latitude": 407113723, "longitude": -749746483 }, "name": "" }, { "location": { "latitude": 402133926, "longitude": -743613249 }, "name": "" }, { "location": { "latitude": 400273442, "longitude": -741220915 }, "name": "" }, { "location": { "latitude": 411236786, "longitude": -744070769 }, "name": "" }, { "location": { "latitude": 411633782, "longitude": -746784970 }, "name": "211-225 Plains Road, Augusta, NJ 07822, USA" }, { "location": { "latitude": 415830701, "longitude": -742952812 }, "name": "" }, { "location": { "latitude": 413447164, "longitude": -748712898 }, "name": "165 Pedersen Ridge Road, Milford, PA 18337, USA" }, { "location": { "latitude": 405047245, "longitude": -749800722 }, "name": "100-122 Locktown Road, Frenchtown, NJ 08825, USA" }, { "location": { "latitude": 418858923, "longitude": -746156790 }, "name": "" }, { "location": { "latitude": 417951888, "longitude": -748484944 }, "name": "650-652 Willi Hill Road, Swan Lake, NY 12783, USA" }, { "location": { "latitude": 407033786, "longitude": -743977337 }, "name": "26 East 3rd Street, New Providence, NJ 07974, USA" }, { "location": { "latitude": 417548014, "longitude": -740075041 }, "name": "" }, { "location": { "latitude": 410395868, "longitude": -744972325 }, "name": "" }, { "location": { "latitude": 404615353, "longitude": -745129803 }, "name": "" }, { "location": { "latitude": 406589790, "longitude": -743560121 }, "name": "611 Lawrence Avenue, Westfield, NJ 07090, USA" }, { "location": { "latitude": 414653148, "longitude": -740477477 }, "name": "18 Lannis Avenue, New Windsor, NY 12553, USA" }, { "location": { "latitude": 405957808, "longitude": -743255336 }, "name": "82-104 Amherst Avenue, Colonia, NJ 07067, USA" }, { "location": { "latitude": 411733589, "longitude": -741648093 }, "name": "170 Seven Lakes Drive, Sloatsburg, NY 10974, USA" }, { "location": { "latitude": 412676291, "longitude": -742606606 }, "name": "1270 Lakes Road, Monroe, NY 10950, USA" }, { "location": { "latitude": 409224445, "longitude": -748286738 }, "name": "509-535 Alphano Road, Great Meadows, NJ 07838, USA" }, { "location": { "latitude": 406523420, "longitude": -742135517 }, "name": "652 Garden Street, Elizabeth, NJ 07202, USA" }, { "location": { "latitude": 401827388, "longitude": -740294537 }, "name": "349 Sea Spray Court, Neptune City, NJ 07753, USA" }, { "location": { "latitude": 410564152, "longitude": -743685054 }, "name": "13-17 Stanley Street, West Milford, NJ 07480, USA" }, { "location": { "latitude": 408472324, "longitude": -740726046 }, "name": "47 Industrial Avenue, Teterboro, NJ 07608, USA" }, { "location": { "latitude": 412452168, "longitude": -740214052 }, "name": "5 White Oak Lane, Stony Point, NY 10980, USA" }, { "location": { "latitude": 409146138, "longitude": -746188906 }, "name": "Berkshire Valley Management Area Trail, Jefferson, NJ, USA" }, { "location": { "latitude": 404701380, "longitude": -744781745 }, "name": "1007 Jersey Avenue, New Brunswick, NJ 08901, USA" }, { "location": { "latitude": 409642566, "longitude": -746017679 }, "name": "6 East Emerald Isle Drive, Lake Hopatcong, NJ 07849, USA" }, { "location": { "latitude": 408031728, "longitude": -748645385 }, "name": "1358-1474 New Jersey 57, Port Murray, NJ 07865, USA" }, { "location": { "latitude": 413700272, "longitude": -742135189 }, "name": "367 Prospect Road, Chester, NY 10918, USA" }, { "location": { "latitude": 404310607, "longitude": -740282632 }, "name": "10 Simon Lake Drive, Atlantic Highlands, NJ 07716, USA" }, { "location": { "latitude": 409319800, "longitude": -746201391 }, "name": "11 Ward Street, Mount Arlington, NJ 07856, USA" }, { "location": { "latitude": 406685311, "longitude": -742108603 }, "name": "300-398 Jefferson Avenue, Elizabeth, NJ 07201, USA" }, { "location": { "latitude": 419018117, "longitude": -749142781 }, "name": "43 Dreher Road, Roscoe, NY 12776, USA" }, { "location": { "latitude": 412856162, "longitude": -745148837 }, "name": "Swan Street, Pine Island, NY 10969, USA" }, { "location": { "latitude": 416560744, "longitude": -746721964 }, "name": "66 Pleasantview Avenue, Monticello, NY 12701, USA" }, { "location": { "latitude": 405314270, "longitude": -749836354 }, "name": "" }, { "location": { "latitude": 414219548, "longitude": -743327440 }, "name": "" }, { "location": { "latitude": 415534177, "longitude": -742900616 }, "name": "565 Winding Hills Road, Montgomery, NY 12549, USA" }, { "location": { "latitude": 406898530, "longitude": -749127080 }, "name": "231 Rocky Run Road, Glen Gardner, NJ 08826, USA" }, { "location": { "latitude": 407586880, "longitude": -741670168 }, "name": "100 Mount Pleasant Avenue, Newark, NJ 07104, USA" }, { "location": { "latitude": 400106455, "longitude": -742870190 }, "name": "517-521 Huntington Drive, Manchester Township, NJ 08759, USA" }, { "location": { "latitude": 400066188, "longitude": -746793294 }, "name": "" }, { "location": { "latitude": 418803880, "longitude": -744102673 }, "name": "40 Mountain Road, Napanoch, NY 12458, USA" }, { "location": { "latitude": 414204288, "longitude": -747895140 }, "name": "" }, { "location": { "latitude": 414777405, "longitude": -740615601 }, "name": "" }, { "location": { "latitude": 415464475, "longitude": -747175374 }, "name": "48 North Road, Forestburgh, NY 12777, USA" }, { "location": { "latitude": 404062378, "longitude": -746376177 }, "name": "" }, { "location": { "latitude": 405688272, "longitude": -749285130 }, "name": "" }, { "location": { "latitude": 400342070, "longitude": -748788996 }, "name": "" }, { "location": { "latitude": 401809022, "longitude": -744157964 }, "name": "" }, { "location": { "latitude": 404226644, "longitude": -740517141 }, "name": "9 Thompson Avenue, Leonardo, NJ 07737, USA" }, { "location": { "latitude": 410322033, "longitude": -747871659 }, "name": "" }, { "location": { "latitude": 407100674, "longitude": -747742727 }, "name": "" }, { "location": { "latitude": 418811433, "longitude": -741718005 }, "name": "213 Bush Road, Stone Ridge, NY 12484, USA" }, { "location": { "latitude": 415034302, "longitude": -743850945 }, "name": "" }, { "location": { "latitude": 411349992, "longitude": -743694161 }, "name": "" }, { "location": { "latitude": 404839914, "longitude": -744759616 }, "name": "1-17 Bergen Court, New Brunswick, NJ 08901, USA" }, { "location": { "latitude": 414638017, "longitude": -745957854 }, "name": "35 Oakland Valley Road, Cuddebackville, NY 12729, USA" }, { "location": { "latitude": 412127800, "longitude": -740173578 }, "name": "" }, { "location": { "latitude": 401263460, "longitude": -747964303 }, "name": "" }, { "location": { "latitude": 412843391, "longitude": -749086026 }, "name": "" }, { "location": { "latitude": 418512773, "longitude": -743067823 }, "name": "" }, { "location": { "latitude": 404318328, "longitude": -740835638 }, "name": "42-102 Main Street, Belford, NJ 07718, USA" }, { "location": { "latitude": 419020746, "longitude": -741172328 }, "name": "" }, { "location": { "latitude": 404080723, "longitude": -746119569 }, "name": "" }, { "location": { "latitude": 401012643, "longitude": -744035134 }, "name": "" }, { "location": { "latitude": 404306372, "longitude": -741079661 }, "name": "" }, { "location": { "latitude": 403966326, "longitude": -748519297 }, "name": "" }, { "location": { "latitude": 405002031, "longitude": -748407866 }, "name": "" }, { "location": { "latitude": 409532885, "longitude": -742200683 }, "name": "" }, { "location": { "latitude": 416851321, "longitude": -742674555 }, "name": "" }, { "location": { "latitude": 406411633, "longitude": -741722051 }, "name": "3387 Richmond Terrace, Staten Island, NY 10303, USA" }, { "location": { "latitude": 413069058, "longitude": -744597778 }, "name": "261 Van Sickle Road, Goshen, NY 10924, USA" }, { "location": { "latitude": 418465462, "longitude": -746859398 }, "name": "" }, { "location": { "latitude": 411733222, "longitude": -744228360 }, "name": "" }, { "location": { "latitude": 410248224, "longitude": -747127767 }, "name": "3 Hasta Way, Newton, NJ 07860, USA" }]`) grpc-go-1.29.1/examples/route_guide/testdata/000077500000000000000000000000001365033716300211135ustar00rootroot00000000000000grpc-go-1.29.1/examples/route_guide/testdata/route_guide_db.json000066400000000000000000000327101365033716300247710ustar00rootroot00000000000000[{ "location": { "latitude": 407838351, "longitude": -746143763 }, "name": "Patriots Path, Mendham, NJ 07945, USA" }, { "location": { "latitude": 408122808, "longitude": -743999179 }, "name": "101 New Jersey 10, Whippany, NJ 07981, USA" }, { "location": { "latitude": 413628156, "longitude": -749015468 }, "name": "U.S. 6, Shohola, PA 18458, USA" }, { "location": { "latitude": 419999544, "longitude": -740371136 }, "name": "5 Conners Road, Kingston, NY 12401, USA" }, { "location": { "latitude": 414008389, "longitude": -743951297 }, "name": "Mid Hudson Psychiatric Center, New Hampton, NY 10958, USA" }, { "location": { "latitude": 419611318, "longitude": -746524769 }, "name": "287 Flugertown Road, Livingston Manor, NY 12758, USA" }, { "location": { "latitude": 406109563, "longitude": -742186778 }, "name": "4001 Tremley Point Road, Linden, NJ 07036, USA" }, { "location": { "latitude": 416802456, "longitude": -742370183 }, "name": "352 South Mountain Road, Wallkill, NY 12589, USA" }, { "location": { "latitude": 412950425, "longitude": -741077389 }, "name": "Bailey Turn Road, Harriman, NY 10926, USA" }, { "location": { "latitude": 412144655, "longitude": -743949739 }, "name": "193-199 Wawayanda Road, Hewitt, NJ 07421, USA" }, { "location": { "latitude": 415736605, "longitude": -742847522 }, "name": "406-496 Ward Avenue, Pine Bush, NY 12566, USA" }, { "location": { "latitude": 413843930, "longitude": -740501726 }, "name": "162 Merrill Road, Highland Mills, NY 10930, USA" }, { "location": { "latitude": 410873075, "longitude": -744459023 }, "name": "Clinton Road, West Milford, NJ 07480, USA" }, { "location": { "latitude": 412346009, "longitude": -744026814 }, "name": "16 Old Brook Lane, Warwick, NY 10990, USA" }, { "location": { "latitude": 402948455, "longitude": -747903913 }, "name": "3 Drake Lane, Pennington, NJ 08534, USA" }, { "location": { "latitude": 406337092, "longitude": -740122226 }, "name": "6324 8th Avenue, Brooklyn, NY 11220, USA" }, { "location": { "latitude": 406421967, "longitude": -747727624 }, "name": "1 Merck Access Road, Whitehouse Station, NJ 08889, USA" }, { "location": { "latitude": 416318082, "longitude": -749677716 }, "name": "78-98 Schalck Road, Narrowsburg, NY 12764, USA" }, { "location": { "latitude": 415301720, "longitude": -748416257 }, "name": "282 Lakeview Drive Road, Highland Lake, NY 12743, USA" }, { "location": { "latitude": 402647019, "longitude": -747071791 }, "name": "330 Evelyn Avenue, Hamilton Township, NJ 08619, USA" }, { "location": { "latitude": 412567807, "longitude": -741058078 }, "name": "New York State Reference Route 987E, Southfields, NY 10975, USA" }, { "location": { "latitude": 416855156, "longitude": -744420597 }, "name": "103-271 Tempaloni Road, Ellenville, NY 12428, USA" }, { "location": { "latitude": 404663628, "longitude": -744820157 }, "name": "1300 Airport Road, North Brunswick Township, NJ 08902, USA" }, { "location": { "latitude": 407113723, "longitude": -749746483 }, "name": "" }, { "location": { "latitude": 402133926, "longitude": -743613249 }, "name": "" }, { "location": { "latitude": 400273442, "longitude": -741220915 }, "name": "" }, { "location": { "latitude": 411236786, "longitude": -744070769 }, "name": "" }, { "location": { "latitude": 411633782, "longitude": -746784970 }, "name": "211-225 Plains Road, Augusta, NJ 07822, USA" }, { "location": { "latitude": 415830701, "longitude": -742952812 }, "name": "" }, { "location": { "latitude": 413447164, "longitude": -748712898 }, "name": "165 Pedersen Ridge Road, Milford, PA 18337, USA" }, { "location": { "latitude": 405047245, "longitude": -749800722 }, "name": "100-122 Locktown Road, Frenchtown, NJ 08825, USA" }, { "location": { "latitude": 418858923, "longitude": -746156790 }, "name": "" }, { "location": { "latitude": 417951888, "longitude": -748484944 }, "name": "650-652 Willi Hill Road, Swan Lake, NY 12783, USA" }, { "location": { "latitude": 407033786, "longitude": -743977337 }, "name": "26 East 3rd Street, New Providence, NJ 07974, USA" }, { "location": { "latitude": 417548014, "longitude": -740075041 }, "name": "" }, { "location": { "latitude": 410395868, "longitude": -744972325 }, "name": "" }, { "location": { "latitude": 404615353, "longitude": -745129803 }, "name": "" }, { "location": { "latitude": 406589790, "longitude": -743560121 }, "name": "611 Lawrence Avenue, Westfield, NJ 07090, USA" }, { "location": { "latitude": 414653148, "longitude": -740477477 }, "name": "18 Lannis Avenue, New Windsor, NY 12553, USA" }, { "location": { "latitude": 405957808, "longitude": -743255336 }, "name": "82-104 Amherst Avenue, Colonia, NJ 07067, USA" }, { "location": { "latitude": 411733589, "longitude": -741648093 }, "name": "170 Seven Lakes Drive, Sloatsburg, NY 10974, USA" }, { "location": { "latitude": 412676291, "longitude": -742606606 }, "name": "1270 Lakes Road, Monroe, NY 10950, USA" }, { "location": { "latitude": 409224445, "longitude": -748286738 }, "name": "509-535 Alphano Road, Great Meadows, NJ 07838, USA" }, { "location": { "latitude": 406523420, "longitude": -742135517 }, "name": "652 Garden Street, Elizabeth, NJ 07202, USA" }, { "location": { "latitude": 401827388, "longitude": -740294537 }, "name": "349 Sea Spray Court, Neptune City, NJ 07753, USA" }, { "location": { "latitude": 410564152, "longitude": -743685054 }, "name": "13-17 Stanley Street, West Milford, NJ 07480, USA" }, { "location": { "latitude": 408472324, "longitude": -740726046 }, "name": "47 Industrial Avenue, Teterboro, NJ 07608, USA" }, { "location": { "latitude": 412452168, "longitude": -740214052 }, "name": "5 White Oak Lane, Stony Point, NY 10980, USA" }, { "location": { "latitude": 409146138, "longitude": -746188906 }, "name": "Berkshire Valley Management Area Trail, Jefferson, NJ, USA" }, { "location": { "latitude": 404701380, "longitude": -744781745 }, "name": "1007 Jersey Avenue, New Brunswick, NJ 08901, USA" }, { "location": { "latitude": 409642566, "longitude": -746017679 }, "name": "6 East Emerald Isle Drive, Lake Hopatcong, NJ 07849, USA" }, { "location": { "latitude": 408031728, "longitude": -748645385 }, "name": "1358-1474 New Jersey 57, Port Murray, NJ 07865, USA" }, { "location": { "latitude": 413700272, "longitude": -742135189 }, "name": "367 Prospect Road, Chester, NY 10918, USA" }, { "location": { "latitude": 404310607, "longitude": -740282632 }, "name": "10 Simon Lake Drive, Atlantic Highlands, NJ 07716, USA" }, { "location": { "latitude": 409319800, "longitude": -746201391 }, "name": "11 Ward Street, Mount Arlington, NJ 07856, USA" }, { "location": { "latitude": 406685311, "longitude": -742108603 }, "name": "300-398 Jefferson Avenue, Elizabeth, NJ 07201, USA" }, { "location": { "latitude": 419018117, "longitude": -749142781 }, "name": "43 Dreher Road, Roscoe, NY 12776, USA" }, { "location": { "latitude": 412856162, "longitude": -745148837 }, "name": "Swan Street, Pine Island, NY 10969, USA" }, { "location": { "latitude": 416560744, "longitude": -746721964 }, "name": "66 Pleasantview Avenue, Monticello, NY 12701, USA" }, { "location": { "latitude": 405314270, "longitude": -749836354 }, "name": "" }, { "location": { "latitude": 414219548, "longitude": -743327440 }, "name": "" }, { "location": { "latitude": 415534177, "longitude": -742900616 }, "name": "565 Winding Hills Road, Montgomery, NY 12549, USA" }, { "location": { "latitude": 406898530, "longitude": -749127080 }, "name": "231 Rocky Run Road, Glen Gardner, NJ 08826, USA" }, { "location": { "latitude": 407586880, "longitude": -741670168 }, "name": "100 Mount Pleasant Avenue, Newark, NJ 07104, USA" }, { "location": { "latitude": 400106455, "longitude": -742870190 }, "name": "517-521 Huntington Drive, Manchester Township, NJ 08759, USA" }, { "location": { "latitude": 400066188, "longitude": -746793294 }, "name": "" }, { "location": { "latitude": 418803880, "longitude": -744102673 }, "name": "40 Mountain Road, Napanoch, NY 12458, USA" }, { "location": { "latitude": 414204288, "longitude": -747895140 }, "name": "" }, { "location": { "latitude": 414777405, "longitude": -740615601 }, "name": "" }, { "location": { "latitude": 415464475, "longitude": -747175374 }, "name": "48 North Road, Forestburgh, NY 12777, USA" }, { "location": { "latitude": 404062378, "longitude": -746376177 }, "name": "" }, { "location": { "latitude": 405688272, "longitude": -749285130 }, "name": "" }, { "location": { "latitude": 400342070, "longitude": -748788996 }, "name": "" }, { "location": { "latitude": 401809022, "longitude": -744157964 }, "name": "" }, { "location": { "latitude": 404226644, "longitude": -740517141 }, "name": "9 Thompson Avenue, Leonardo, NJ 07737, USA" }, { "location": { "latitude": 410322033, "longitude": -747871659 }, "name": "" }, { "location": { "latitude": 407100674, "longitude": -747742727 }, "name": "" }, { "location": { "latitude": 418811433, "longitude": -741718005 }, "name": "213 Bush Road, Stone Ridge, NY 12484, USA" }, { "location": { "latitude": 415034302, "longitude": -743850945 }, "name": "" }, { "location": { "latitude": 411349992, "longitude": -743694161 }, "name": "" }, { "location": { "latitude": 404839914, "longitude": -744759616 }, "name": "1-17 Bergen Court, New Brunswick, NJ 08901, USA" }, { "location": { "latitude": 414638017, "longitude": -745957854 }, "name": "35 Oakland Valley Road, Cuddebackville, NY 12729, USA" }, { "location": { "latitude": 412127800, "longitude": -740173578 }, "name": "" }, { "location": { "latitude": 401263460, "longitude": -747964303 }, "name": "" }, { "location": { "latitude": 412843391, "longitude": -749086026 }, "name": "" }, { "location": { "latitude": 418512773, "longitude": -743067823 }, "name": "" }, { "location": { "latitude": 404318328, "longitude": -740835638 }, "name": "42-102 Main Street, Belford, NJ 07718, USA" }, { "location": { "latitude": 419020746, "longitude": -741172328 }, "name": "" }, { "location": { "latitude": 404080723, "longitude": -746119569 }, "name": "" }, { "location": { "latitude": 401012643, "longitude": -744035134 }, "name": "" }, { "location": { "latitude": 404306372, "longitude": -741079661 }, "name": "" }, { "location": { "latitude": 403966326, "longitude": -748519297 }, "name": "" }, { "location": { "latitude": 405002031, "longitude": -748407866 }, "name": "" }, { "location": { "latitude": 409532885, "longitude": -742200683 }, "name": "" }, { "location": { "latitude": 416851321, "longitude": -742674555 }, "name": "" }, { "location": { "latitude": 406411633, "longitude": -741722051 }, "name": "3387 Richmond Terrace, Staten Island, NY 10303, USA" }, { "location": { "latitude": 413069058, "longitude": -744597778 }, "name": "261 Van Sickle Road, Goshen, NY 10924, USA" }, { "location": { "latitude": 418465462, "longitude": -746859398 }, "name": "" }, { "location": { "latitude": 411733222, "longitude": -744228360 }, "name": "" }, { "location": { "latitude": 410248224, "longitude": -747127767 }, "name": "3 Hasta Way, Newton, NJ 07860, USA" }] grpc-go-1.29.1/go.mod000066400000000000000000000010361365033716300142570ustar00rootroot00000000000000module google.golang.org/grpc go 1.11 require ( github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f github.com/envoyproxy/go-control-plane v0.9.4 github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b github.com/golang/mock v1.1.1 github.com/golang/protobuf v1.3.3 github.com/google/go-cmp v0.2.0 golang.org/x/net v0.0.0-20190311183353-d8887717615a golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 ) grpc-go-1.29.1/go.sum000066400000000000000000000143001365033716300143020ustar00rootroot00000000000000cloud.google.com/go v0.26.0 h1:e0WKqKTd5BnrG8aKH3J3h+QvEIQtSUcf2n5UZ5ZgLtQ= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f h1:WBZRG4aNOuI15bLRrCgN8fCq8E5Xuty6jGbmSNEvSsU= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4 h1:rEvIZUSZ3fx39WIi3JkQqQBitGwpELBIYWeBVh6wn+E= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0 h1:EQciDnbrYxy13PgWoY8AqoxGiPrpgBZ1R8UNe3ddc+A= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/mock v1.1.1 h1:G5FRp8JnTd7RQH5kemVNlMeyXQAztQ3mOWV95KxsXH8= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.3 h1:gyjaxf+svBWX08ZjK86iN9geUJF0H6gp2IRKX6Nf6/I= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3 h1:XQyxROzUlZH+WIQwySDgnISgOivlhjIEwaQaJEJrrN0= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be h1:vEDujvNQGv4jgYKudGeI/+DAX4Jffq6hpD55MmoEvKs= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135 h1:5Beo0mZN8dRzgrMMkDp0jc8YXQKx9DiJ2k1dkvGsn5A= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 h1:gSJIx1SDwno+2ElGhA4+qG2zF97qiUzTM+rQ0klBOcE= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc h1:/hemPrYIhOhy8zYrNj+069zDB68us2sMGsfkFJO0iZs= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= grpc-go-1.29.1/grpc_test.go000066400000000000000000000014141365033716300154720ustar00rootroot00000000000000/* * * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package grpc import ( "testing" "google.golang.org/grpc/internal/grpctest" ) type s struct { grpctest.Tester } func Test(t *testing.T) { grpctest.RunSubTests(t, s{}) } grpc-go-1.29.1/grpclog/000077500000000000000000000000001365033716300146065ustar00rootroot00000000000000grpc-go-1.29.1/grpclog/glogger/000077500000000000000000000000001365033716300162345ustar00rootroot00000000000000grpc-go-1.29.1/grpclog/glogger/glogger.go000066400000000000000000000047651365033716300202250ustar00rootroot00000000000000/* * * Copyright 2015 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Package glogger defines glog-based logging for grpc. // Importing this package will install glog as the logger used by grpclog. package glogger import ( "fmt" "github.com/golang/glog" "google.golang.org/grpc/grpclog" ) func init() { grpclog.SetLoggerV2(&glogger{}) } type glogger struct{} func (g *glogger) Info(args ...interface{}) { glog.InfoDepth(2, args...) } func (g *glogger) Infoln(args ...interface{}) { glog.InfoDepth(2, fmt.Sprintln(args...)) } func (g *glogger) Infof(format string, args ...interface{}) { glog.InfoDepth(2, fmt.Sprintf(format, args...)) } func (g *glogger) InfoDepth(depth int, args ...interface{}) { glog.InfoDepth(depth+2, args...) } func (g *glogger) Warning(args ...interface{}) { glog.WarningDepth(2, args...) } func (g *glogger) Warningln(args ...interface{}) { glog.WarningDepth(2, fmt.Sprintln(args...)) } func (g *glogger) Warningf(format string, args ...interface{}) { glog.WarningDepth(2, fmt.Sprintf(format, args...)) } func (g *glogger) WarningDepth(depth int, args ...interface{}) { glog.WarningDepth(depth+2, args...) } func (g *glogger) Error(args ...interface{}) { glog.ErrorDepth(2, args...) } func (g *glogger) Errorln(args ...interface{}) { glog.ErrorDepth(2, fmt.Sprintln(args...)) } func (g *glogger) Errorf(format string, args ...interface{}) { glog.ErrorDepth(2, fmt.Sprintf(format, args...)) } func (g *glogger) ErrorDepth(depth int, args ...interface{}) { glog.ErrorDepth(depth+2, args...) } func (g *glogger) Fatal(args ...interface{}) { glog.FatalDepth(2, args...) } func (g *glogger) Fatalln(args ...interface{}) { glog.FatalDepth(2, fmt.Sprintln(args...)) } func (g *glogger) Fatalf(format string, args ...interface{}) { glog.FatalDepth(2, fmt.Sprintf(format, args...)) } func (g *glogger) FatalDepth(depth int, args ...interface{}) { glog.FatalDepth(depth+2, args...) } func (g *glogger) V(l int) bool { return bool(glog.V(glog.Level(l))) } grpc-go-1.29.1/grpclog/grpclog.go000066400000000000000000000075061365033716300166020ustar00rootroot00000000000000/* * * Copyright 2017 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Package grpclog defines logging for grpc. // // All logs in transport and grpclb packages only go to verbose level 2. // All logs in other packages in grpc are logged in spite of the verbosity level. // // In the default logger, // severity level can be set by environment variable GRPC_GO_LOG_SEVERITY_LEVEL, // verbosity level can be set by GRPC_GO_LOG_VERBOSITY_LEVEL. package grpclog // import "google.golang.org/grpc/grpclog" import ( "os" "google.golang.org/grpc/internal/grpclog" ) func init() { SetLoggerV2(newLoggerV2()) } // V reports whether verbosity level l is at least the requested verbose level. func V(l int) bool { return grpclog.Logger.V(l) } // Info logs to the INFO log. func Info(args ...interface{}) { grpclog.Logger.Info(args...) } // Infof logs to the INFO log. Arguments are handled in the manner of fmt.Printf. func Infof(format string, args ...interface{}) { grpclog.Logger.Infof(format, args...) } // Infoln logs to the INFO log. Arguments are handled in the manner of fmt.Println. func Infoln(args ...interface{}) { grpclog.Logger.Infoln(args...) } // Warning logs to the WARNING log. func Warning(args ...interface{}) { grpclog.Logger.Warning(args...) } // Warningf logs to the WARNING log. Arguments are handled in the manner of fmt.Printf. func Warningf(format string, args ...interface{}) { grpclog.Logger.Warningf(format, args...) } // Warningln logs to the WARNING log. Arguments are handled in the manner of fmt.Println. func Warningln(args ...interface{}) { grpclog.Logger.Warningln(args...) } // Error logs to the ERROR log. func Error(args ...interface{}) { grpclog.Logger.Error(args...) } // Errorf logs to the ERROR log. Arguments are handled in the manner of fmt.Printf. func Errorf(format string, args ...interface{}) { grpclog.Logger.Errorf(format, args...) } // Errorln logs to the ERROR log. Arguments are handled in the manner of fmt.Println. func Errorln(args ...interface{}) { grpclog.Logger.Errorln(args...) } // Fatal logs to the FATAL log. Arguments are handled in the manner of fmt.Print. // It calls os.Exit() with exit code 1. func Fatal(args ...interface{}) { grpclog.Logger.Fatal(args...) // Make sure fatal logs will exit. os.Exit(1) } // Fatalf logs to the FATAL log. Arguments are handled in the manner of fmt.Printf. // It calls os.Exit() with exit code 1. func Fatalf(format string, args ...interface{}) { grpclog.Logger.Fatalf(format, args...) // Make sure fatal logs will exit. os.Exit(1) } // Fatalln logs to the FATAL log. Arguments are handled in the manner of fmt.Println. // It calle os.Exit()) with exit code 1. func Fatalln(args ...interface{}) { grpclog.Logger.Fatalln(args...) // Make sure fatal logs will exit. os.Exit(1) } // Print prints to the logger. Arguments are handled in the manner of fmt.Print. // // Deprecated: use Info. func Print(args ...interface{}) { grpclog.Logger.Info(args...) } // Printf prints to the logger. Arguments are handled in the manner of fmt.Printf. // // Deprecated: use Infof. func Printf(format string, args ...interface{}) { grpclog.Logger.Infof(format, args...) } // Println prints to the logger. Arguments are handled in the manner of fmt.Println. // // Deprecated: use Infoln. func Println(args ...interface{}) { grpclog.Logger.Infoln(args...) } grpc-go-1.29.1/grpclog/logger.go000066400000000000000000000042151365033716300164160ustar00rootroot00000000000000/* * * Copyright 2015 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package grpclog import "google.golang.org/grpc/internal/grpclog" // Logger mimics golang's standard Logger as an interface. // // Deprecated: use LoggerV2. type Logger interface { Fatal(args ...interface{}) Fatalf(format string, args ...interface{}) Fatalln(args ...interface{}) Print(args ...interface{}) Printf(format string, args ...interface{}) Println(args ...interface{}) } // SetLogger sets the logger that is used in grpc. Call only from // init() functions. // // Deprecated: use SetLoggerV2. func SetLogger(l Logger) { grpclog.Logger = &loggerWrapper{Logger: l} } // loggerWrapper wraps Logger into a LoggerV2. type loggerWrapper struct { Logger } func (g *loggerWrapper) Info(args ...interface{}) { g.Logger.Print(args...) } func (g *loggerWrapper) Infoln(args ...interface{}) { g.Logger.Println(args...) } func (g *loggerWrapper) Infof(format string, args ...interface{}) { g.Logger.Printf(format, args...) } func (g *loggerWrapper) Warning(args ...interface{}) { g.Logger.Print(args...) } func (g *loggerWrapper) Warningln(args ...interface{}) { g.Logger.Println(args...) } func (g *loggerWrapper) Warningf(format string, args ...interface{}) { g.Logger.Printf(format, args...) } func (g *loggerWrapper) Error(args ...interface{}) { g.Logger.Print(args...) } func (g *loggerWrapper) Errorln(args ...interface{}) { g.Logger.Println(args...) } func (g *loggerWrapper) Errorf(format string, args ...interface{}) { g.Logger.Printf(format, args...) } func (g *loggerWrapper) V(l int) bool { // Returns true for all verbose level. return true } grpc-go-1.29.1/grpclog/loggerv2.go000066400000000000000000000164011365033716300166660ustar00rootroot00000000000000/* * * Copyright 2017 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package grpclog import ( "io" "io/ioutil" "log" "os" "strconv" "google.golang.org/grpc/internal/grpclog" ) // LoggerV2 does underlying logging work for grpclog. type LoggerV2 interface { // Info logs to INFO log. Arguments are handled in the manner of fmt.Print. Info(args ...interface{}) // Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println. Infoln(args ...interface{}) // Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf. Infof(format string, args ...interface{}) // Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print. Warning(args ...interface{}) // Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println. Warningln(args ...interface{}) // Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf. Warningf(format string, args ...interface{}) // Error logs to ERROR log. Arguments are handled in the manner of fmt.Print. Error(args ...interface{}) // Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println. Errorln(args ...interface{}) // Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. Errorf(format string, args ...interface{}) // Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print. // gRPC ensures that all Fatal logs will exit with os.Exit(1). // Implementations may also call os.Exit() with a non-zero exit code. Fatal(args ...interface{}) // Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println. // gRPC ensures that all Fatal logs will exit with os.Exit(1). // Implementations may also call os.Exit() with a non-zero exit code. Fatalln(args ...interface{}) // Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. // gRPC ensures that all Fatal logs will exit with os.Exit(1). // Implementations may also call os.Exit() with a non-zero exit code. Fatalf(format string, args ...interface{}) // V reports whether verbosity level l is at least the requested verbose level. V(l int) bool } // SetLoggerV2 sets logger that is used in grpc to a V2 logger. // Not mutex-protected, should be called before any gRPC functions. func SetLoggerV2(l LoggerV2) { grpclog.Logger = l grpclog.DepthLogger, _ = l.(grpclog.DepthLoggerV2) } const ( // infoLog indicates Info severity. infoLog int = iota // warningLog indicates Warning severity. warningLog // errorLog indicates Error severity. errorLog // fatalLog indicates Fatal severity. fatalLog ) // severityName contains the string representation of each severity. var severityName = []string{ infoLog: "INFO", warningLog: "WARNING", errorLog: "ERROR", fatalLog: "FATAL", } // loggerT is the default logger used by grpclog. type loggerT struct { m []*log.Logger v int } // NewLoggerV2 creates a loggerV2 with the provided writers. // Fatal logs will be written to errorW, warningW, infoW, followed by exit(1). // Error logs will be written to errorW, warningW and infoW. // Warning logs will be written to warningW and infoW. // Info logs will be written to infoW. func NewLoggerV2(infoW, warningW, errorW io.Writer) LoggerV2 { return NewLoggerV2WithVerbosity(infoW, warningW, errorW, 0) } // NewLoggerV2WithVerbosity creates a loggerV2 with the provided writers and // verbosity level. func NewLoggerV2WithVerbosity(infoW, warningW, errorW io.Writer, v int) LoggerV2 { var m []*log.Logger m = append(m, log.New(infoW, severityName[infoLog]+": ", log.LstdFlags)) m = append(m, log.New(io.MultiWriter(infoW, warningW), severityName[warningLog]+": ", log.LstdFlags)) ew := io.MultiWriter(infoW, warningW, errorW) // ew will be used for error and fatal. m = append(m, log.New(ew, severityName[errorLog]+": ", log.LstdFlags)) m = append(m, log.New(ew, severityName[fatalLog]+": ", log.LstdFlags)) return &loggerT{m: m, v: v} } // newLoggerV2 creates a loggerV2 to be used as default logger. // All logs are written to stderr. func newLoggerV2() LoggerV2 { errorW := ioutil.Discard warningW := ioutil.Discard infoW := ioutil.Discard logLevel := os.Getenv("GRPC_GO_LOG_SEVERITY_LEVEL") switch logLevel { case "", "ERROR", "error": // If env is unset, set level to ERROR. errorW = os.Stderr case "WARNING", "warning": warningW = os.Stderr case "INFO", "info": infoW = os.Stderr } var v int vLevel := os.Getenv("GRPC_GO_LOG_VERBOSITY_LEVEL") if vl, err := strconv.Atoi(vLevel); err == nil { v = vl } return NewLoggerV2WithVerbosity(infoW, warningW, errorW, v) } func (g *loggerT) Info(args ...interface{}) { g.m[infoLog].Print(args...) } func (g *loggerT) Infoln(args ...interface{}) { g.m[infoLog].Println(args...) } func (g *loggerT) Infof(format string, args ...interface{}) { g.m[infoLog].Printf(format, args...) } func (g *loggerT) Warning(args ...interface{}) { g.m[warningLog].Print(args...) } func (g *loggerT) Warningln(args ...interface{}) { g.m[warningLog].Println(args...) } func (g *loggerT) Warningf(format string, args ...interface{}) { g.m[warningLog].Printf(format, args...) } func (g *loggerT) Error(args ...interface{}) { g.m[errorLog].Print(args...) } func (g *loggerT) Errorln(args ...interface{}) { g.m[errorLog].Println(args...) } func (g *loggerT) Errorf(format string, args ...interface{}) { g.m[errorLog].Printf(format, args...) } func (g *loggerT) Fatal(args ...interface{}) { g.m[fatalLog].Fatal(args...) // No need to call os.Exit() again because log.Logger.Fatal() calls os.Exit(). } func (g *loggerT) Fatalln(args ...interface{}) { g.m[fatalLog].Fatalln(args...) // No need to call os.Exit() again because log.Logger.Fatal() calls os.Exit(). } func (g *loggerT) Fatalf(format string, args ...interface{}) { g.m[fatalLog].Fatalf(format, args...) // No need to call os.Exit() again because log.Logger.Fatal() calls os.Exit(). } func (g *loggerT) V(l int) bool { return l <= g.v } // DepthLoggerV2 logs at a specified call frame. If a LoggerV2 also implements // DepthLoggerV2, the below functions will be called with the appropriate stack // depth set for trivial functions the logger may ignore. // // This API is EXPERIMENTAL. type DepthLoggerV2 interface { // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Print. InfoDepth(depth int, args ...interface{}) // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Print. WarningDepth(depth int, args ...interface{}) // ErrorDetph logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Print. ErrorDepth(depth int, args ...interface{}) // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Print. FatalDepth(depth int, args ...interface{}) } grpc-go-1.29.1/grpclog/loggerv2_test.go000066400000000000000000000034761365033716300177350ustar00rootroot00000000000000/* * * Copyright 2017 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package grpclog import ( "bytes" "fmt" "regexp" "testing" ) func TestLoggerV2Severity(t *testing.T) { buffers := []*bytes.Buffer{new(bytes.Buffer), new(bytes.Buffer), new(bytes.Buffer)} SetLoggerV2(NewLoggerV2(buffers[infoLog], buffers[warningLog], buffers[errorLog])) Info(severityName[infoLog]) Warning(severityName[warningLog]) Error(severityName[errorLog]) for i := 0; i < fatalLog; i++ { buf := buffers[i] // The content of info buffer should be something like: // INFO: 2017/04/07 14:55:42 INFO // WARNING: 2017/04/07 14:55:42 WARNING // ERROR: 2017/04/07 14:55:42 ERROR for j := i; j < fatalLog; j++ { b, err := buf.ReadBytes('\n') if err != nil { t.Fatal(err) } if err := checkLogForSeverity(j, b); err != nil { t.Fatal(err) } } } } // check if b is in the format of: // WARNING: 2017/04/07 14:55:42 WARNING func checkLogForSeverity(s int, b []byte) error { expected := regexp.MustCompile(fmt.Sprintf(`^%s: [0-9]{4}/[0-9]{2}/[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2} %s\n$`, severityName[s], severityName[s])) if m := expected.Match(b); !m { return fmt.Errorf("got: %v, want string in format of: %v", string(b), severityName[s]+": 2016/10/05 17:09:26 "+severityName[s]) } return nil } grpc-go-1.29.1/health/000077500000000000000000000000001365033716300144165ustar00rootroot00000000000000grpc-go-1.29.1/health/client.go000066400000000000000000000067441365033716300162360ustar00rootroot00000000000000/* * * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package health import ( "context" "fmt" "io" "time" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/connectivity" healthpb "google.golang.org/grpc/health/grpc_health_v1" "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/backoff" "google.golang.org/grpc/status" ) var ( backoffStrategy = backoff.DefaultExponential backoffFunc = func(ctx context.Context, retries int) bool { d := backoffStrategy.Backoff(retries) timer := time.NewTimer(d) select { case <-timer.C: return true case <-ctx.Done(): timer.Stop() return false } } ) func init() { internal.HealthCheckFunc = clientHealthCheck } const healthCheckMethod = "/grpc.health.v1.Health/Watch" // This function implements the protocol defined at: // https://github.com/grpc/grpc/blob/master/doc/health-checking.md func clientHealthCheck(ctx context.Context, newStream func(string) (interface{}, error), setConnectivityState func(connectivity.State, error), service string) error { tryCnt := 0 retryConnection: for { // Backs off if the connection has failed in some way without receiving a message in the previous retry. if tryCnt > 0 && !backoffFunc(ctx, tryCnt-1) { return nil } tryCnt++ if ctx.Err() != nil { return nil } setConnectivityState(connectivity.Connecting, nil) rawS, err := newStream(healthCheckMethod) if err != nil { continue retryConnection } s, ok := rawS.(grpc.ClientStream) // Ideally, this should never happen. But if it happens, the server is marked as healthy for LBing purposes. if !ok { setConnectivityState(connectivity.Ready, nil) return fmt.Errorf("newStream returned %v (type %T); want grpc.ClientStream", rawS, rawS) } if err = s.SendMsg(&healthpb.HealthCheckRequest{Service: service}); err != nil && err != io.EOF { // Stream should have been closed, so we can safely continue to create a new stream. continue retryConnection } s.CloseSend() resp := new(healthpb.HealthCheckResponse) for { err = s.RecvMsg(resp) // Reports healthy for the LBing purposes if health check is not implemented in the server. if status.Code(err) == codes.Unimplemented { setConnectivityState(connectivity.Ready, nil) return err } // Reports unhealthy if server's Watch method gives an error other than UNIMPLEMENTED. if err != nil { setConnectivityState(connectivity.TransientFailure, fmt.Errorf("connection active but received health check RPC error: %v", err)) continue retryConnection } // As a message has been received, removes the need for backoff for the next retry by resetting the try count. tryCnt = 0 if resp.Status == healthpb.HealthCheckResponse_SERVING { setConnectivityState(connectivity.Ready, nil) } else { setConnectivityState(connectivity.TransientFailure, fmt.Errorf("connection active but health check failed. status=%s", resp.Status)) } } } } grpc-go-1.29.1/health/client_test.go000066400000000000000000000027761365033716300172760ustar00rootroot00000000000000/* * * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package health import ( "context" "errors" "reflect" "testing" "time" "google.golang.org/grpc/connectivity" ) func (s) TestClientHealthCheckBackoff(t *testing.T) { const maxRetries = 5 var want []time.Duration for i := 0; i < maxRetries; i++ { want = append(want, time.Duration(i+1)*time.Second) } var got []time.Duration newStream := func(string) (interface{}, error) { if len(got) < maxRetries { return nil, errors.New("backoff") } return nil, nil } oldBackoffFunc := backoffFunc backoffFunc = func(ctx context.Context, retries int) bool { got = append(got, time.Duration(retries+1)*time.Second) return true } defer func() { backoffFunc = oldBackoffFunc }() clientHealthCheck(context.Background(), newStream, func(connectivity.State, error) {}, "test") if !reflect.DeepEqual(got, want) { t.Fatalf("Backoff durations for %v retries are %v. (expected: %v)", maxRetries, got, want) } } grpc-go-1.29.1/health/grpc_health_v1/000077500000000000000000000000001365033716300173045ustar00rootroot00000000000000grpc-go-1.29.1/health/grpc_health_v1/health.pb.go000066400000000000000000000317161365033716300215100ustar00rootroot00000000000000// Code generated by protoc-gen-go. DO NOT EDIT. // source: grpc/health/v1/health.proto package grpc_health_v1 import ( context "context" fmt "fmt" proto "github.com/golang/protobuf/proto" grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" status "google.golang.org/grpc/status" math "math" ) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package type HealthCheckResponse_ServingStatus int32 const ( HealthCheckResponse_UNKNOWN HealthCheckResponse_ServingStatus = 0 HealthCheckResponse_SERVING HealthCheckResponse_ServingStatus = 1 HealthCheckResponse_NOT_SERVING HealthCheckResponse_ServingStatus = 2 HealthCheckResponse_SERVICE_UNKNOWN HealthCheckResponse_ServingStatus = 3 ) var HealthCheckResponse_ServingStatus_name = map[int32]string{ 0: "UNKNOWN", 1: "SERVING", 2: "NOT_SERVING", 3: "SERVICE_UNKNOWN", } var HealthCheckResponse_ServingStatus_value = map[string]int32{ "UNKNOWN": 0, "SERVING": 1, "NOT_SERVING": 2, "SERVICE_UNKNOWN": 3, } func (x HealthCheckResponse_ServingStatus) String() string { return proto.EnumName(HealthCheckResponse_ServingStatus_name, int32(x)) } func (HealthCheckResponse_ServingStatus) EnumDescriptor() ([]byte, []int) { return fileDescriptor_e265fd9d4e077217, []int{1, 0} } type HealthCheckRequest struct { Service string `protobuf:"bytes,1,opt,name=service,proto3" json:"service,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *HealthCheckRequest) Reset() { *m = HealthCheckRequest{} } func (m *HealthCheckRequest) String() string { return proto.CompactTextString(m) } func (*HealthCheckRequest) ProtoMessage() {} func (*HealthCheckRequest) Descriptor() ([]byte, []int) { return fileDescriptor_e265fd9d4e077217, []int{0} } func (m *HealthCheckRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_HealthCheckRequest.Unmarshal(m, b) } func (m *HealthCheckRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_HealthCheckRequest.Marshal(b, m, deterministic) } func (m *HealthCheckRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_HealthCheckRequest.Merge(m, src) } func (m *HealthCheckRequest) XXX_Size() int { return xxx_messageInfo_HealthCheckRequest.Size(m) } func (m *HealthCheckRequest) XXX_DiscardUnknown() { xxx_messageInfo_HealthCheckRequest.DiscardUnknown(m) } var xxx_messageInfo_HealthCheckRequest proto.InternalMessageInfo func (m *HealthCheckRequest) GetService() string { if m != nil { return m.Service } return "" } type HealthCheckResponse struct { Status HealthCheckResponse_ServingStatus `protobuf:"varint,1,opt,name=status,proto3,enum=grpc.health.v1.HealthCheckResponse_ServingStatus" json:"status,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *HealthCheckResponse) Reset() { *m = HealthCheckResponse{} } func (m *HealthCheckResponse) String() string { return proto.CompactTextString(m) } func (*HealthCheckResponse) ProtoMessage() {} func (*HealthCheckResponse) Descriptor() ([]byte, []int) { return fileDescriptor_e265fd9d4e077217, []int{1} } func (m *HealthCheckResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_HealthCheckResponse.Unmarshal(m, b) } func (m *HealthCheckResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_HealthCheckResponse.Marshal(b, m, deterministic) } func (m *HealthCheckResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_HealthCheckResponse.Merge(m, src) } func (m *HealthCheckResponse) XXX_Size() int { return xxx_messageInfo_HealthCheckResponse.Size(m) } func (m *HealthCheckResponse) XXX_DiscardUnknown() { xxx_messageInfo_HealthCheckResponse.DiscardUnknown(m) } var xxx_messageInfo_HealthCheckResponse proto.InternalMessageInfo func (m *HealthCheckResponse) GetStatus() HealthCheckResponse_ServingStatus { if m != nil { return m.Status } return HealthCheckResponse_UNKNOWN } func init() { proto.RegisterEnum("grpc.health.v1.HealthCheckResponse_ServingStatus", HealthCheckResponse_ServingStatus_name, HealthCheckResponse_ServingStatus_value) proto.RegisterType((*HealthCheckRequest)(nil), "grpc.health.v1.HealthCheckRequest") proto.RegisterType((*HealthCheckResponse)(nil), "grpc.health.v1.HealthCheckResponse") } func init() { proto.RegisterFile("grpc/health/v1/health.proto", fileDescriptor_e265fd9d4e077217) } var fileDescriptor_e265fd9d4e077217 = []byte{ // 297 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4e, 0x2f, 0x2a, 0x48, 0xd6, 0xcf, 0x48, 0x4d, 0xcc, 0x29, 0xc9, 0xd0, 0x2f, 0x33, 0x84, 0xb2, 0xf4, 0x0a, 0x8a, 0xf2, 0x4b, 0xf2, 0x85, 0xf8, 0x40, 0x92, 0x7a, 0x50, 0xa1, 0x32, 0x43, 0x25, 0x3d, 0x2e, 0x21, 0x0f, 0x30, 0xc7, 0x39, 0x23, 0x35, 0x39, 0x3b, 0x28, 0xb5, 0xb0, 0x34, 0xb5, 0xb8, 0x44, 0x48, 0x82, 0x8b, 0xbd, 0x38, 0xb5, 0xa8, 0x2c, 0x33, 0x39, 0x55, 0x82, 0x51, 0x81, 0x51, 0x83, 0x33, 0x08, 0xc6, 0x55, 0xda, 0xc8, 0xc8, 0x25, 0x8c, 0xa2, 0xa1, 0xb8, 0x20, 0x3f, 0xaf, 0x38, 0x55, 0xc8, 0x93, 0x8b, 0xad, 0xb8, 0x24, 0xb1, 0xa4, 0xb4, 0x18, 0xac, 0x81, 0xcf, 0xc8, 0x50, 0x0f, 0xd5, 0x22, 0x3d, 0x2c, 0x9a, 0xf4, 0x82, 0x41, 0x86, 0xe6, 0xa5, 0x07, 0x83, 0x35, 0x06, 0x41, 0x0d, 0x50, 0xf2, 0xe7, 0xe2, 0x45, 0x91, 0x10, 0xe2, 0xe6, 0x62, 0x0f, 0xf5, 0xf3, 0xf6, 0xf3, 0x0f, 0xf7, 0x13, 0x60, 0x00, 0x71, 0x82, 0x5d, 0x83, 0xc2, 0x3c, 0xfd, 0xdc, 0x05, 0x18, 0x85, 0xf8, 0xb9, 0xb8, 0xfd, 0xfc, 0x43, 0xe2, 0x61, 0x02, 0x4c, 0x42, 0xc2, 0x5c, 0xfc, 0x60, 0x8e, 0xb3, 0x6b, 0x3c, 0x4c, 0x0b, 0xb3, 0xd1, 0x3a, 0x46, 0x2e, 0x36, 0x88, 0xf5, 0x42, 0x01, 0x5c, 0xac, 0x60, 0x27, 0x08, 0x29, 0xe1, 0x75, 0x1f, 0x38, 0x14, 0xa4, 0x94, 0x89, 0xf0, 0x83, 0x50, 0x10, 0x17, 0x6b, 0x78, 0x62, 0x49, 0x72, 0x06, 0xd5, 0x4c, 0x34, 0x60, 0x74, 0x4a, 0xe4, 0x12, 0xcc, 0xcc, 0x47, 0x53, 0xea, 0xc4, 0x0d, 0x51, 0x1b, 0x00, 0x8a, 0xc6, 0x00, 0xc6, 0x28, 0x9d, 0xf4, 0xfc, 0xfc, 0xf4, 0x9c, 0x54, 0xbd, 0xf4, 0xfc, 0x9c, 0xc4, 0xbc, 0x74, 0xbd, 0xfc, 0xa2, 0x74, 0x7d, 0xe4, 0x78, 0x07, 0xb1, 0xe3, 0x21, 0xec, 0xf8, 0x32, 0xc3, 0x55, 0x4c, 0x7c, 0xee, 0x20, 0xd3, 0x20, 0x46, 0xe8, 0x85, 0x19, 0x26, 0xb1, 0x81, 0x93, 0x83, 0x31, 0x20, 0x00, 0x00, 0xff, 0xff, 0x12, 0x7d, 0x96, 0xcb, 0x2d, 0x02, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. var _ context.Context var _ grpc.ClientConnInterface // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. const _ = grpc.SupportPackageIsVersion6 // HealthClient is the client API for Health service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type HealthClient interface { // If the requested service is unknown, the call will fail with status // NOT_FOUND. Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) // Performs a watch for the serving status of the requested service. // The server will immediately send back a message indicating the current // serving status. It will then subsequently send a new message whenever // the service's serving status changes. // // If the requested service is unknown when the call is received, the // server will send a message setting the serving status to // SERVICE_UNKNOWN but will *not* terminate the call. If at some // future point, the serving status of the service becomes known, the // server will send a new message with the service's serving status. // // If the call terminates with status UNIMPLEMENTED, then clients // should assume this method is not supported and should not retry the // call. If the call terminates with any other status (including OK), // clients should retry the call with appropriate exponential backoff. Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (Health_WatchClient, error) } type healthClient struct { cc grpc.ClientConnInterface } func NewHealthClient(cc grpc.ClientConnInterface) HealthClient { return &healthClient{cc} } func (c *healthClient) Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) { out := new(HealthCheckResponse) err := c.cc.Invoke(ctx, "/grpc.health.v1.Health/Check", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *healthClient) Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (Health_WatchClient, error) { stream, err := c.cc.NewStream(ctx, &_Health_serviceDesc.Streams[0], "/grpc.health.v1.Health/Watch", opts...) if err != nil { return nil, err } x := &healthWatchClient{stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } if err := x.ClientStream.CloseSend(); err != nil { return nil, err } return x, nil } type Health_WatchClient interface { Recv() (*HealthCheckResponse, error) grpc.ClientStream } type healthWatchClient struct { grpc.ClientStream } func (x *healthWatchClient) Recv() (*HealthCheckResponse, error) { m := new(HealthCheckResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } // HealthServer is the server API for Health service. type HealthServer interface { // If the requested service is unknown, the call will fail with status // NOT_FOUND. Check(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error) // Performs a watch for the serving status of the requested service. // The server will immediately send back a message indicating the current // serving status. It will then subsequently send a new message whenever // the service's serving status changes. // // If the requested service is unknown when the call is received, the // server will send a message setting the serving status to // SERVICE_UNKNOWN but will *not* terminate the call. If at some // future point, the serving status of the service becomes known, the // server will send a new message with the service's serving status. // // If the call terminates with status UNIMPLEMENTED, then clients // should assume this method is not supported and should not retry the // call. If the call terminates with any other status (including OK), // clients should retry the call with appropriate exponential backoff. Watch(*HealthCheckRequest, Health_WatchServer) error } // UnimplementedHealthServer can be embedded to have forward compatible implementations. type UnimplementedHealthServer struct { } func (*UnimplementedHealthServer) Check(ctx context.Context, req *HealthCheckRequest) (*HealthCheckResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method Check not implemented") } func (*UnimplementedHealthServer) Watch(req *HealthCheckRequest, srv Health_WatchServer) error { return status.Errorf(codes.Unimplemented, "method Watch not implemented") } func RegisterHealthServer(s *grpc.Server, srv HealthServer) { s.RegisterService(&_Health_serviceDesc, srv) } func _Health_Check_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(HealthCheckRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(HealthServer).Check(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/grpc.health.v1.Health/Check", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(HealthServer).Check(ctx, req.(*HealthCheckRequest)) } return interceptor(ctx, in, info, handler) } func _Health_Watch_Handler(srv interface{}, stream grpc.ServerStream) error { m := new(HealthCheckRequest) if err := stream.RecvMsg(m); err != nil { return err } return srv.(HealthServer).Watch(m, &healthWatchServer{stream}) } type Health_WatchServer interface { Send(*HealthCheckResponse) error grpc.ServerStream } type healthWatchServer struct { grpc.ServerStream } func (x *healthWatchServer) Send(m *HealthCheckResponse) error { return x.ServerStream.SendMsg(m) } var _Health_serviceDesc = grpc.ServiceDesc{ ServiceName: "grpc.health.v1.Health", HandlerType: (*HealthServer)(nil), Methods: []grpc.MethodDesc{ { MethodName: "Check", Handler: _Health_Check_Handler, }, }, Streams: []grpc.StreamDesc{ { StreamName: "Watch", Handler: _Health_Watch_Handler, ServerStreams: true, }, }, Metadata: "grpc/health/v1/health.proto", } grpc-go-1.29.1/health/regenerate.sh000077500000000000000000000017571365033716300171100ustar00rootroot00000000000000#!/bin/bash # Copyright 2018 gRPC authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. set -eux -o pipefail TMP=$(mktemp -d) function finish { rm -rf "$TMP" } trap finish EXIT pushd "$TMP" mkdir -p grpc/health/v1 curl https://raw.githubusercontent.com/grpc/grpc-proto/master/grpc/health/v1/health.proto > grpc/health/v1/health.proto protoc --go_out=plugins=grpc,paths=source_relative:. -I. grpc/health/v1/*.proto popd rm -f grpc_health_v1/*.pb.go cp "$TMP"/grpc/health/v1/*.pb.go grpc_health_v1/ grpc-go-1.29.1/health/server.go000066400000000000000000000126171365033716300162620ustar00rootroot00000000000000/* * * Copyright 2017 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ //go:generate ./regenerate.sh // Package health provides a service that exposes server's health and it must be // imported to enable support for client-side health checks. package health import ( "context" "sync" "google.golang.org/grpc/codes" "google.golang.org/grpc/grpclog" healthgrpc "google.golang.org/grpc/health/grpc_health_v1" healthpb "google.golang.org/grpc/health/grpc_health_v1" "google.golang.org/grpc/status" ) // Server implements `service Health`. type Server struct { mu sync.RWMutex // If shutdown is true, it's expected all serving status is NOT_SERVING, and // will stay in NOT_SERVING. shutdown bool // statusMap stores the serving status of the services this Server monitors. statusMap map[string]healthpb.HealthCheckResponse_ServingStatus updates map[string]map[healthgrpc.Health_WatchServer]chan healthpb.HealthCheckResponse_ServingStatus } // NewServer returns a new Server. func NewServer() *Server { return &Server{ statusMap: map[string]healthpb.HealthCheckResponse_ServingStatus{"": healthpb.HealthCheckResponse_SERVING}, updates: make(map[string]map[healthgrpc.Health_WatchServer]chan healthpb.HealthCheckResponse_ServingStatus), } } // Check implements `service Health`. func (s *Server) Check(ctx context.Context, in *healthpb.HealthCheckRequest) (*healthpb.HealthCheckResponse, error) { s.mu.RLock() defer s.mu.RUnlock() if servingStatus, ok := s.statusMap[in.Service]; ok { return &healthpb.HealthCheckResponse{ Status: servingStatus, }, nil } return nil, status.Error(codes.NotFound, "unknown service") } // Watch implements `service Health`. func (s *Server) Watch(in *healthpb.HealthCheckRequest, stream healthgrpc.Health_WatchServer) error { service := in.Service // update channel is used for getting service status updates. update := make(chan healthpb.HealthCheckResponse_ServingStatus, 1) s.mu.Lock() // Puts the initial status to the channel. if servingStatus, ok := s.statusMap[service]; ok { update <- servingStatus } else { update <- healthpb.HealthCheckResponse_SERVICE_UNKNOWN } // Registers the update channel to the correct place in the updates map. if _, ok := s.updates[service]; !ok { s.updates[service] = make(map[healthgrpc.Health_WatchServer]chan healthpb.HealthCheckResponse_ServingStatus) } s.updates[service][stream] = update defer func() { s.mu.Lock() delete(s.updates[service], stream) s.mu.Unlock() }() s.mu.Unlock() var lastSentStatus healthpb.HealthCheckResponse_ServingStatus = -1 for { select { // Status updated. Sends the up-to-date status to the client. case servingStatus := <-update: if lastSentStatus == servingStatus { continue } lastSentStatus = servingStatus err := stream.Send(&healthpb.HealthCheckResponse{Status: servingStatus}) if err != nil { return status.Error(codes.Canceled, "Stream has ended.") } // Context done. Removes the update channel from the updates map. case <-stream.Context().Done(): return status.Error(codes.Canceled, "Stream has ended.") } } } // SetServingStatus is called when need to reset the serving status of a service // or insert a new service entry into the statusMap. func (s *Server) SetServingStatus(service string, servingStatus healthpb.HealthCheckResponse_ServingStatus) { s.mu.Lock() defer s.mu.Unlock() if s.shutdown { grpclog.Infof("health: status changing for %s to %v is ignored because health service is shutdown", service, servingStatus) return } s.setServingStatusLocked(service, servingStatus) } func (s *Server) setServingStatusLocked(service string, servingStatus healthpb.HealthCheckResponse_ServingStatus) { s.statusMap[service] = servingStatus for _, update := range s.updates[service] { // Clears previous updates, that are not sent to the client, from the channel. // This can happen if the client is not reading and the server gets flow control limited. select { case <-update: default: } // Puts the most recent update to the channel. update <- servingStatus } } // Shutdown sets all serving status to NOT_SERVING, and configures the server to // ignore all future status changes. // // This changes serving status for all services. To set status for a particular // services, call SetServingStatus(). func (s *Server) Shutdown() { s.mu.Lock() defer s.mu.Unlock() s.shutdown = true for service := range s.statusMap { s.setServingStatusLocked(service, healthpb.HealthCheckResponse_NOT_SERVING) } } // Resume sets all serving status to SERVING, and configures the server to // accept all future status changes. // // This changes serving status for all services. To set status for a particular // services, call SetServingStatus(). func (s *Server) Resume() { s.mu.Lock() defer s.mu.Unlock() s.shutdown = false for service := range s.statusMap { s.setServingStatusLocked(service, healthpb.HealthCheckResponse_SERVING) } } grpc-go-1.29.1/health/server_internal_test.go000066400000000000000000000043411365033716300212100ustar00rootroot00000000000000/* * * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package health import ( "sync" "testing" "time" healthpb "google.golang.org/grpc/health/grpc_health_v1" "google.golang.org/grpc/internal/grpctest" ) type s struct { grpctest.Tester } func Test(t *testing.T) { grpctest.RunSubTests(t, s{}) } func (s) TestShutdown(t *testing.T) { const testService = "tteesstt" s := NewServer() s.SetServingStatus(testService, healthpb.HealthCheckResponse_SERVING) status := s.statusMap[testService] if status != healthpb.HealthCheckResponse_SERVING { t.Fatalf("status for %s is %v, want %v", testService, status, healthpb.HealthCheckResponse_SERVING) } var wg sync.WaitGroup wg.Add(2) // Run SetServingStatus and Shutdown in parallel. go func() { for i := 0; i < 1000; i++ { s.SetServingStatus(testService, healthpb.HealthCheckResponse_SERVING) time.Sleep(time.Microsecond) } wg.Done() }() go func() { time.Sleep(300 * time.Microsecond) s.Shutdown() wg.Done() }() wg.Wait() s.mu.Lock() status = s.statusMap[testService] s.mu.Unlock() if status != healthpb.HealthCheckResponse_NOT_SERVING { t.Fatalf("status for %s is %v, want %v", testService, status, healthpb.HealthCheckResponse_NOT_SERVING) } s.Resume() status = s.statusMap[testService] if status != healthpb.HealthCheckResponse_SERVING { t.Fatalf("status for %s is %v, want %v", testService, status, healthpb.HealthCheckResponse_SERVING) } s.SetServingStatus(testService, healthpb.HealthCheckResponse_NOT_SERVING) status = s.statusMap[testService] if status != healthpb.HealthCheckResponse_NOT_SERVING { t.Fatalf("status for %s is %v, want %v", testService, status, healthpb.HealthCheckResponse_NOT_SERVING) } } grpc-go-1.29.1/health/server_test.go000066400000000000000000000021271365033716300173140ustar00rootroot00000000000000/* * * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package health_test import ( "testing" "google.golang.org/grpc" "google.golang.org/grpc/health" healthgrpc "google.golang.org/grpc/health/grpc_health_v1" "google.golang.org/grpc/internal/grpctest" ) type s struct { grpctest.Tester } func Test(t *testing.T) { grpctest.RunSubTests(t, s{}) } // Make sure the service implementation complies with the proto definition. func (s) TestRegister(t *testing.T) { s := grpc.NewServer() healthgrpc.RegisterHealthServer(s, health.NewServer()) s.Stop() } grpc-go-1.29.1/install_gae.sh000077500000000000000000000003541365033716300157740ustar00rootroot00000000000000#!/bin/bash TMP=$(mktemp -d /tmp/sdk.XXX) \ && curl -o $TMP.zip "https://storage.googleapis.com/appengine-sdks/featured/go_appengine_sdk_linux_amd64-1.9.68.zip" \ && unzip -q $TMP.zip -d $TMP \ && export PATH="$PATH:$TMP/go_appengine" grpc-go-1.29.1/interceptor.go000066400000000000000000000077021365033716300160440ustar00rootroot00000000000000/* * * Copyright 2016 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package grpc import ( "context" ) // UnaryInvoker is called by UnaryClientInterceptor to complete RPCs. type UnaryInvoker func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error // UnaryClientInterceptor intercepts the execution of a unary RPC on the client. invoker is the handler to complete the RPC // and it is the responsibility of the interceptor to call it. // This is an EXPERIMENTAL API. type UnaryClientInterceptor func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error // Streamer is called by StreamClientInterceptor to create a ClientStream. type Streamer func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) // StreamClientInterceptor intercepts the creation of ClientStream. It may return a custom ClientStream to intercept all I/O // operations. streamer is the handler to create a ClientStream and it is the responsibility of the interceptor to call it. // This is an EXPERIMENTAL API. type StreamClientInterceptor func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, streamer Streamer, opts ...CallOption) (ClientStream, error) // UnaryServerInfo consists of various information about a unary RPC on // server side. All per-rpc information may be mutated by the interceptor. type UnaryServerInfo struct { // Server is the service implementation the user provides. This is read-only. Server interface{} // FullMethod is the full RPC method string, i.e., /package.service/method. FullMethod string } // UnaryHandler defines the handler invoked by UnaryServerInterceptor to complete the normal // execution of a unary RPC. If a UnaryHandler returns an error, it should be produced by the // status package, or else gRPC will use codes.Unknown as the status code and err.Error() as // the status message of the RPC. type UnaryHandler func(ctx context.Context, req interface{}) (interface{}, error) // UnaryServerInterceptor provides a hook to intercept the execution of a unary RPC on the server. info // contains all the information of this RPC the interceptor can operate on. And handler is the wrapper // of the service method implementation. It is the responsibility of the interceptor to invoke handler // to complete the RPC. type UnaryServerInterceptor func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (resp interface{}, err error) // StreamServerInfo consists of various information about a streaming RPC on // server side. All per-rpc information may be mutated by the interceptor. type StreamServerInfo struct { // FullMethod is the full RPC method string, i.e., /package.service/method. FullMethod string // IsClientStream indicates whether the RPC is a client streaming RPC. IsClientStream bool // IsServerStream indicates whether the RPC is a server streaming RPC. IsServerStream bool } // StreamServerInterceptor provides a hook to intercept the execution of a streaming RPC on the server. // info contains all the information of this RPC the interceptor can operate on. And handler is the // service method implementation. It is the responsibility of the interceptor to invoke handler to // complete the RPC. type StreamServerInterceptor func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error grpc-go-1.29.1/internal/000077500000000000000000000000001365033716300147655ustar00rootroot00000000000000grpc-go-1.29.1/internal/backoff/000077500000000000000000000000001365033716300163605ustar00rootroot00000000000000grpc-go-1.29.1/internal/backoff/backoff.go000066400000000000000000000045111365033716300203030ustar00rootroot00000000000000/* * * Copyright 2017 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Package backoff implement the backoff strategy for gRPC. // // This is kept in internal until the gRPC project decides whether or not to // allow alternative backoff strategies. package backoff import ( "time" grpcbackoff "google.golang.org/grpc/backoff" "google.golang.org/grpc/internal/grpcrand" ) // Strategy defines the methodology for backing off after a grpc connection // failure. type Strategy interface { // Backoff returns the amount of time to wait before the next retry given // the number of consecutive failures. Backoff(retries int) time.Duration } // DefaultExponential is an exponential backoff implementation using the // default values for all the configurable knobs defined in // https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. var DefaultExponential = Exponential{Config: grpcbackoff.DefaultConfig} // Exponential implements exponential backoff algorithm as defined in // https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. type Exponential struct { // Config contains all options to configure the backoff algorithm. Config grpcbackoff.Config } // Backoff returns the amount of time to wait before the next retry given the // number of retries. func (bc Exponential) Backoff(retries int) time.Duration { if retries == 0 { return bc.Config.BaseDelay } backoff, max := float64(bc.Config.BaseDelay), float64(bc.Config.MaxDelay) for backoff < max && retries > 0 { backoff *= bc.Config.Multiplier retries-- } if backoff > max { backoff = max } // Randomize backoff delays so that if a cluster of requests start at // the same time, they won't operate in lockstep. backoff *= 1 + bc.Config.Jitter*(grpcrand.Float64()*2-1) if backoff < 0 { return 0 } return time.Duration(backoff) } grpc-go-1.29.1/internal/balancerload/000077500000000000000000000000001365033716300173745ustar00rootroot00000000000000grpc-go-1.29.1/internal/balancerload/load.go000066400000000000000000000023521365033716300206440ustar00rootroot00000000000000/* * Copyright 2019 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // Package balancerload defines APIs to parse server loads in trailers. The // parsed loads are sent to balancers in DoneInfo. package balancerload import ( "google.golang.org/grpc/metadata" ) // Parser converts loads from metadata into a concrete type. type Parser interface { // Parse parses loads from metadata. Parse(md metadata.MD) interface{} } var parser Parser // SetParser sets the load parser. // // Not mutex-protected, should be called before any gRPC functions. func SetParser(lr Parser) { parser = lr } // Parse calls parser.Read(). func Parse(md metadata.MD) interface{} { if parser == nil { return nil } return parser.Parse(md) } grpc-go-1.29.1/internal/binarylog/000077500000000000000000000000001365033716300167535ustar00rootroot00000000000000grpc-go-1.29.1/internal/binarylog/binarylog.go000066400000000000000000000111541365033716300212720ustar00rootroot00000000000000/* * * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Package binarylog implementation binary logging as defined in // https://github.com/grpc/proposal/blob/master/A16-binary-logging.md. package binarylog import ( "fmt" "os" "google.golang.org/grpc/grpclog" ) // Logger is the global binary logger. It can be used to get binary logger for // each method. type Logger interface { getMethodLogger(methodName string) *MethodLogger } // binLogger is the global binary logger for the binary. One of this should be // built at init time from the configuration (environment variable or flags). // // It is used to get a methodLogger for each individual method. var binLogger Logger // SetLogger sets the binarg logger. // // Only call this at init time. func SetLogger(l Logger) { binLogger = l } // GetMethodLogger returns the methodLogger for the given methodName. // // methodName should be in the format of "/service/method". // // Each methodLogger returned by this method is a new instance. This is to // generate sequence id within the call. func GetMethodLogger(methodName string) *MethodLogger { if binLogger == nil { return nil } return binLogger.getMethodLogger(methodName) } func init() { const envStr = "GRPC_BINARY_LOG_FILTER" configStr := os.Getenv(envStr) binLogger = NewLoggerFromConfigString(configStr) } type methodLoggerConfig struct { // Max length of header and message. hdr, msg uint64 } type logger struct { all *methodLoggerConfig services map[string]*methodLoggerConfig methods map[string]*methodLoggerConfig blacklist map[string]struct{} } // newEmptyLogger creates an empty logger. The map fields need to be filled in // using the set* functions. func newEmptyLogger() *logger { return &logger{} } // Set method logger for "*". func (l *logger) setDefaultMethodLogger(ml *methodLoggerConfig) error { if l.all != nil { return fmt.Errorf("conflicting global rules found") } l.all = ml return nil } // Set method logger for "service/*". // // New methodLogger with same service overrides the old one. func (l *logger) setServiceMethodLogger(service string, ml *methodLoggerConfig) error { if _, ok := l.services[service]; ok { return fmt.Errorf("conflicting service rules for service %v found", service) } if l.services == nil { l.services = make(map[string]*methodLoggerConfig) } l.services[service] = ml return nil } // Set method logger for "service/method". // // New methodLogger with same method overrides the old one. func (l *logger) setMethodMethodLogger(method string, ml *methodLoggerConfig) error { if _, ok := l.blacklist[method]; ok { return fmt.Errorf("conflicting blacklist rules for method %v found", method) } if _, ok := l.methods[method]; ok { return fmt.Errorf("conflicting method rules for method %v found", method) } if l.methods == nil { l.methods = make(map[string]*methodLoggerConfig) } l.methods[method] = ml return nil } // Set blacklist method for "-service/method". func (l *logger) setBlacklist(method string) error { if _, ok := l.blacklist[method]; ok { return fmt.Errorf("conflicting blacklist rules for method %v found", method) } if _, ok := l.methods[method]; ok { return fmt.Errorf("conflicting method rules for method %v found", method) } if l.blacklist == nil { l.blacklist = make(map[string]struct{}) } l.blacklist[method] = struct{}{} return nil } // getMethodLogger returns the methodLogger for the given methodName. // // methodName should be in the format of "/service/method". // // Each methodLogger returned by this method is a new instance. This is to // generate sequence id within the call. func (l *logger) getMethodLogger(methodName string) *MethodLogger { s, m, err := parseMethodName(methodName) if err != nil { grpclog.Infof("binarylogging: failed to parse %q: %v", methodName, err) return nil } if ml, ok := l.methods[s+"/"+m]; ok { return newMethodLogger(ml.hdr, ml.msg) } if _, ok := l.blacklist[s+"/"+m]; ok { return nil } if ml, ok := l.services[s]; ok { return newMethodLogger(ml.hdr, ml.msg) } if l.all == nil { return nil } return newMethodLogger(l.all.hdr, l.all.msg) } grpc-go-1.29.1/internal/binarylog/binarylog_end2end_test.go000066400000000000000000000701661365033716300237400ustar00rootroot00000000000000/* * * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package binarylog_test import ( "context" "fmt" "io" "net" "sort" "sync" "testing" "time" "github.com/golang/protobuf/proto" "google.golang.org/grpc" pb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal/binarylog" "google.golang.org/grpc/internal/grpctest" "google.golang.org/grpc/metadata" testpb "google.golang.org/grpc/stats/grpc_testing" "google.golang.org/grpc/status" ) type s struct { grpctest.Tester } func Test(t *testing.T) { grpctest.RunSubTests(t, s{}) } func init() { // Setting environment variable in tests doesn't work because of the init // orders. Set the loggers directly here. binarylog.SetLogger(binarylog.AllLogger) binarylog.SetDefaultSink(testSink) } var testSink = &testBinLogSink{} type testBinLogSink struct { mu sync.Mutex buf []*pb.GrpcLogEntry } func (s *testBinLogSink) Write(e *pb.GrpcLogEntry) error { s.mu.Lock() s.buf = append(s.buf, e) s.mu.Unlock() return nil } func (s *testBinLogSink) Close() error { return nil } // Returns all client entris if client is true, otherwise return all server // entries. func (s *testBinLogSink) logEntries(client bool) []*pb.GrpcLogEntry { logger := pb.GrpcLogEntry_LOGGER_SERVER if client { logger = pb.GrpcLogEntry_LOGGER_CLIENT } var ret []*pb.GrpcLogEntry s.mu.Lock() for _, e := range s.buf { if e.Logger == logger { ret = append(ret, e) } } s.mu.Unlock() return ret } func (s *testBinLogSink) clear() { s.mu.Lock() s.buf = nil s.mu.Unlock() } var ( // For headers: testMetadata = metadata.MD{ "key1": []string{"value1"}, "key2": []string{"value2"}, } // For trailers: testTrailerMetadata = metadata.MD{ "tkey1": []string{"trailerValue1"}, "tkey2": []string{"trailerValue2"}, } // The id for which the service handler should return error. errorID int32 = 32202 globalRPCID uint64 // RPC id starts with 1, but we do ++ at the beginning of each test. ) type testServer struct { testpb.UnimplementedTestServiceServer te *test } func (s *testServer) UnaryCall(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { md, ok := metadata.FromIncomingContext(ctx) if ok { if err := grpc.SendHeader(ctx, md); err != nil { return nil, status.Errorf(status.Code(err), "grpc.SendHeader(_, %v) = %v, want ", md, err) } if err := grpc.SetTrailer(ctx, testTrailerMetadata); err != nil { return nil, status.Errorf(status.Code(err), "grpc.SetTrailer(_, %v) = %v, want ", testTrailerMetadata, err) } } if in.Id == errorID { return nil, fmt.Errorf("got error id: %v", in.Id) } return &testpb.SimpleResponse{Id: in.Id}, nil } func (s *testServer) FullDuplexCall(stream testpb.TestService_FullDuplexCallServer) error { md, ok := metadata.FromIncomingContext(stream.Context()) if ok { if err := stream.SendHeader(md); err != nil { return status.Errorf(status.Code(err), "stream.SendHeader(%v) = %v, want %v", md, err, nil) } stream.SetTrailer(testTrailerMetadata) } for { in, err := stream.Recv() if err == io.EOF { // read done. return nil } if err != nil { return err } if in.Id == errorID { return fmt.Errorf("got error id: %v", in.Id) } if err := stream.Send(&testpb.SimpleResponse{Id: in.Id}); err != nil { return err } } } func (s *testServer) ClientStreamCall(stream testpb.TestService_ClientStreamCallServer) error { md, ok := metadata.FromIncomingContext(stream.Context()) if ok { if err := stream.SendHeader(md); err != nil { return status.Errorf(status.Code(err), "stream.SendHeader(%v) = %v, want %v", md, err, nil) } stream.SetTrailer(testTrailerMetadata) } for { in, err := stream.Recv() if err == io.EOF { // read done. return stream.SendAndClose(&testpb.SimpleResponse{Id: int32(0)}) } if err != nil { return err } if in.Id == errorID { return fmt.Errorf("got error id: %v", in.Id) } } } func (s *testServer) ServerStreamCall(in *testpb.SimpleRequest, stream testpb.TestService_ServerStreamCallServer) error { md, ok := metadata.FromIncomingContext(stream.Context()) if ok { if err := stream.SendHeader(md); err != nil { return status.Errorf(status.Code(err), "stream.SendHeader(%v) = %v, want %v", md, err, nil) } stream.SetTrailer(testTrailerMetadata) } if in.Id == errorID { return fmt.Errorf("got error id: %v", in.Id) } for i := 0; i < 5; i++ { if err := stream.Send(&testpb.SimpleResponse{Id: in.Id}); err != nil { return err } } return nil } // test is an end-to-end test. It should be created with the newTest // func, modified as needed, and then started with its startServer method. // It should be cleaned up with the tearDown method. type test struct { t *testing.T testServer testpb.TestServiceServer // nil means none // srv and srvAddr are set once startServer is called. srv *grpc.Server srvAddr string // Server IP without port. srvIP net.IP srvPort int cc *grpc.ClientConn // nil until requested via clientConn // Fields for client address. Set by the service handler. clientAddrMu sync.Mutex clientIP net.IP clientPort int } func (te *test) tearDown() { if te.cc != nil { te.cc.Close() te.cc = nil } te.srv.Stop() } type testConfig struct { } // newTest returns a new test using the provided testing.T and // environment. It is returned with default values. Tests should // modify it before calling its startServer and clientConn methods. func newTest(t *testing.T, tc *testConfig) *test { te := &test{ t: t, } return te } type listenerWrapper struct { net.Listener te *test } func (lw *listenerWrapper) Accept() (net.Conn, error) { conn, err := lw.Listener.Accept() if err != nil { return nil, err } lw.te.clientAddrMu.Lock() lw.te.clientIP = conn.RemoteAddr().(*net.TCPAddr).IP lw.te.clientPort = conn.RemoteAddr().(*net.TCPAddr).Port lw.te.clientAddrMu.Unlock() return conn, nil } // startServer starts a gRPC server listening. Callers should defer a // call to te.tearDown to clean up. func (te *test) startServer(ts testpb.TestServiceServer) { te.testServer = ts lis, err := net.Listen("tcp", "localhost:0") lis = &listenerWrapper{ Listener: lis, te: te, } if err != nil { te.t.Fatalf("Failed to listen: %v", err) } var opts []grpc.ServerOption s := grpc.NewServer(opts...) te.srv = s if te.testServer != nil { testpb.RegisterTestServiceServer(s, te.testServer) } go s.Serve(lis) te.srvAddr = lis.Addr().String() te.srvIP = lis.Addr().(*net.TCPAddr).IP te.srvPort = lis.Addr().(*net.TCPAddr).Port } func (te *test) clientConn() *grpc.ClientConn { if te.cc != nil { return te.cc } opts := []grpc.DialOption{grpc.WithInsecure(), grpc.WithBlock()} var err error te.cc, err = grpc.Dial(te.srvAddr, opts...) if err != nil { te.t.Fatalf("Dial(%q) = %v", te.srvAddr, err) } return te.cc } type rpcType int const ( unaryRPC rpcType = iota clientStreamRPC serverStreamRPC fullDuplexStreamRPC cancelRPC ) type rpcConfig struct { count int // Number of requests and responses for streaming RPCs. success bool // Whether the RPC should succeed or return error. callType rpcType // Type of RPC. } func (te *test) doUnaryCall(c *rpcConfig) (*testpb.SimpleRequest, *testpb.SimpleResponse, error) { var ( resp *testpb.SimpleResponse req *testpb.SimpleRequest err error ) tc := testpb.NewTestServiceClient(te.clientConn()) if c.success { req = &testpb.SimpleRequest{Id: errorID + 1} } else { req = &testpb.SimpleRequest{Id: errorID} } ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() ctx = metadata.NewOutgoingContext(ctx, testMetadata) resp, err = tc.UnaryCall(ctx, req) return req, resp, err } func (te *test) doFullDuplexCallRoundtrip(c *rpcConfig) ([]*testpb.SimpleRequest, []*testpb.SimpleResponse, error) { var ( reqs []*testpb.SimpleRequest resps []*testpb.SimpleResponse err error ) tc := testpb.NewTestServiceClient(te.clientConn()) ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() ctx = metadata.NewOutgoingContext(ctx, testMetadata) stream, err := tc.FullDuplexCall(ctx) if err != nil { return reqs, resps, err } if c.callType == cancelRPC { cancel() return reqs, resps, context.Canceled } var startID int32 if !c.success { startID = errorID } for i := 0; i < c.count; i++ { req := &testpb.SimpleRequest{ Id: int32(i) + startID, } reqs = append(reqs, req) if err = stream.Send(req); err != nil { return reqs, resps, err } var resp *testpb.SimpleResponse if resp, err = stream.Recv(); err != nil { return reqs, resps, err } resps = append(resps, resp) } if err = stream.CloseSend(); err != nil && err != io.EOF { return reqs, resps, err } if _, err = stream.Recv(); err != io.EOF { return reqs, resps, err } return reqs, resps, nil } func (te *test) doClientStreamCall(c *rpcConfig) ([]*testpb.SimpleRequest, *testpb.SimpleResponse, error) { var ( reqs []*testpb.SimpleRequest resp *testpb.SimpleResponse err error ) tc := testpb.NewTestServiceClient(te.clientConn()) ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() ctx = metadata.NewOutgoingContext(ctx, testMetadata) stream, err := tc.ClientStreamCall(ctx) if err != nil { return reqs, resp, err } var startID int32 if !c.success { startID = errorID } for i := 0; i < c.count; i++ { req := &testpb.SimpleRequest{ Id: int32(i) + startID, } reqs = append(reqs, req) if err = stream.Send(req); err != nil { return reqs, resp, err } } resp, err = stream.CloseAndRecv() return reqs, resp, err } func (te *test) doServerStreamCall(c *rpcConfig) (*testpb.SimpleRequest, []*testpb.SimpleResponse, error) { var ( req *testpb.SimpleRequest resps []*testpb.SimpleResponse err error ) tc := testpb.NewTestServiceClient(te.clientConn()) ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() ctx = metadata.NewOutgoingContext(ctx, testMetadata) var startID int32 if !c.success { startID = errorID } req = &testpb.SimpleRequest{Id: startID} stream, err := tc.ServerStreamCall(ctx, req) if err != nil { return req, resps, err } for { var resp *testpb.SimpleResponse resp, err := stream.Recv() if err == io.EOF { return req, resps, nil } else if err != nil { return req, resps, err } resps = append(resps, resp) } } type expectedData struct { te *test cc *rpcConfig method string requests []*testpb.SimpleRequest responses []*testpb.SimpleResponse err error } func (ed *expectedData) newClientHeaderEntry(client bool, rpcID, inRPCID uint64) *pb.GrpcLogEntry { logger := pb.GrpcLogEntry_LOGGER_CLIENT var peer *pb.Address if !client { logger = pb.GrpcLogEntry_LOGGER_SERVER ed.te.clientAddrMu.Lock() peer = &pb.Address{ Address: ed.te.clientIP.String(), IpPort: uint32(ed.te.clientPort), } if ed.te.clientIP.To4() != nil { peer.Type = pb.Address_TYPE_IPV4 } else { peer.Type = pb.Address_TYPE_IPV6 } ed.te.clientAddrMu.Unlock() } return &pb.GrpcLogEntry{ Timestamp: nil, CallId: rpcID, SequenceIdWithinCall: inRPCID, Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER, Logger: logger, Payload: &pb.GrpcLogEntry_ClientHeader{ ClientHeader: &pb.ClientHeader{ Metadata: binarylog.MdToMetadataProto(testMetadata), MethodName: ed.method, Authority: ed.te.srvAddr, }, }, Peer: peer, } } func (ed *expectedData) newServerHeaderEntry(client bool, rpcID, inRPCID uint64) *pb.GrpcLogEntry { logger := pb.GrpcLogEntry_LOGGER_SERVER var peer *pb.Address if client { logger = pb.GrpcLogEntry_LOGGER_CLIENT peer = &pb.Address{ Address: ed.te.srvIP.String(), IpPort: uint32(ed.te.srvPort), } if ed.te.srvIP.To4() != nil { peer.Type = pb.Address_TYPE_IPV4 } else { peer.Type = pb.Address_TYPE_IPV6 } } return &pb.GrpcLogEntry{ Timestamp: nil, CallId: rpcID, SequenceIdWithinCall: inRPCID, Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_HEADER, Logger: logger, Payload: &pb.GrpcLogEntry_ServerHeader{ ServerHeader: &pb.ServerHeader{ Metadata: binarylog.MdToMetadataProto(testMetadata), }, }, Peer: peer, } } func (ed *expectedData) newClientMessageEntry(client bool, rpcID, inRPCID uint64, msg *testpb.SimpleRequest) *pb.GrpcLogEntry { logger := pb.GrpcLogEntry_LOGGER_CLIENT if !client { logger = pb.GrpcLogEntry_LOGGER_SERVER } data, err := proto.Marshal(msg) if err != nil { grpclog.Infof("binarylogging_testing: failed to marshal proto message: %v", err) } return &pb.GrpcLogEntry{ Timestamp: nil, CallId: rpcID, SequenceIdWithinCall: inRPCID, Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE, Logger: logger, Payload: &pb.GrpcLogEntry_Message{ Message: &pb.Message{ Length: uint32(len(data)), Data: data, }, }, } } func (ed *expectedData) newServerMessageEntry(client bool, rpcID, inRPCID uint64, msg *testpb.SimpleResponse) *pb.GrpcLogEntry { logger := pb.GrpcLogEntry_LOGGER_CLIENT if !client { logger = pb.GrpcLogEntry_LOGGER_SERVER } data, err := proto.Marshal(msg) if err != nil { grpclog.Infof("binarylogging_testing: failed to marshal proto message: %v", err) } return &pb.GrpcLogEntry{ Timestamp: nil, CallId: rpcID, SequenceIdWithinCall: inRPCID, Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE, Logger: logger, Payload: &pb.GrpcLogEntry_Message{ Message: &pb.Message{ Length: uint32(len(data)), Data: data, }, }, } } func (ed *expectedData) newHalfCloseEntry(client bool, rpcID, inRPCID uint64) *pb.GrpcLogEntry { logger := pb.GrpcLogEntry_LOGGER_CLIENT if !client { logger = pb.GrpcLogEntry_LOGGER_SERVER } return &pb.GrpcLogEntry{ Timestamp: nil, CallId: rpcID, SequenceIdWithinCall: inRPCID, Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_HALF_CLOSE, Payload: nil, // No payload here. Logger: logger, } } func (ed *expectedData) newServerTrailerEntry(client bool, rpcID, inRPCID uint64, stErr error) *pb.GrpcLogEntry { logger := pb.GrpcLogEntry_LOGGER_SERVER var peer *pb.Address if client { logger = pb.GrpcLogEntry_LOGGER_CLIENT peer = &pb.Address{ Address: ed.te.srvIP.String(), IpPort: uint32(ed.te.srvPort), } if ed.te.srvIP.To4() != nil { peer.Type = pb.Address_TYPE_IPV4 } else { peer.Type = pb.Address_TYPE_IPV6 } } st, ok := status.FromError(stErr) if !ok { grpclog.Info("binarylogging: error in trailer is not a status error") } stProto := st.Proto() var ( detailsBytes []byte err error ) if stProto != nil && len(stProto.Details) != 0 { detailsBytes, err = proto.Marshal(stProto) if err != nil { grpclog.Infof("binarylogging: failed to marshal status proto: %v", err) } } return &pb.GrpcLogEntry{ Timestamp: nil, CallId: rpcID, SequenceIdWithinCall: inRPCID, Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER, Logger: logger, Payload: &pb.GrpcLogEntry_Trailer{ Trailer: &pb.Trailer{ Metadata: binarylog.MdToMetadataProto(testTrailerMetadata), // st will be nil if err was not a status error, but nil is ok. StatusCode: uint32(st.Code()), StatusMessage: st.Message(), StatusDetails: detailsBytes, }, }, Peer: peer, } } func (ed *expectedData) newCancelEntry(rpcID, inRPCID uint64) *pb.GrpcLogEntry { return &pb.GrpcLogEntry{ Timestamp: nil, CallId: rpcID, SequenceIdWithinCall: inRPCID, Type: pb.GrpcLogEntry_EVENT_TYPE_CANCEL, Logger: pb.GrpcLogEntry_LOGGER_CLIENT, Payload: nil, } } func (ed *expectedData) toClientLogEntries() []*pb.GrpcLogEntry { var ( ret []*pb.GrpcLogEntry idInRPC uint64 = 1 ) ret = append(ret, ed.newClientHeaderEntry(true, globalRPCID, idInRPC)) idInRPC++ switch ed.cc.callType { case unaryRPC, fullDuplexStreamRPC: for i := 0; i < len(ed.requests); i++ { ret = append(ret, ed.newClientMessageEntry(true, globalRPCID, idInRPC, ed.requests[i])) idInRPC++ if i == 0 { // First message, append ServerHeader. ret = append(ret, ed.newServerHeaderEntry(true, globalRPCID, idInRPC)) idInRPC++ } if !ed.cc.success { // There is no response in the RPC error case. continue } ret = append(ret, ed.newServerMessageEntry(true, globalRPCID, idInRPC, ed.responses[i])) idInRPC++ } if ed.cc.success && ed.cc.callType == fullDuplexStreamRPC { ret = append(ret, ed.newHalfCloseEntry(true, globalRPCID, idInRPC)) idInRPC++ } case clientStreamRPC, serverStreamRPC: for i := 0; i < len(ed.requests); i++ { ret = append(ret, ed.newClientMessageEntry(true, globalRPCID, idInRPC, ed.requests[i])) idInRPC++ } if ed.cc.callType == clientStreamRPC { ret = append(ret, ed.newHalfCloseEntry(true, globalRPCID, idInRPC)) idInRPC++ } ret = append(ret, ed.newServerHeaderEntry(true, globalRPCID, idInRPC)) idInRPC++ if ed.cc.success { for i := 0; i < len(ed.responses); i++ { ret = append(ret, ed.newServerMessageEntry(true, globalRPCID, idInRPC, ed.responses[0])) idInRPC++ } } } if ed.cc.callType == cancelRPC { ret = append(ret, ed.newCancelEntry(globalRPCID, idInRPC)) idInRPC++ } else { ret = append(ret, ed.newServerTrailerEntry(true, globalRPCID, idInRPC, ed.err)) idInRPC++ } return ret } func (ed *expectedData) toServerLogEntries() []*pb.GrpcLogEntry { var ( ret []*pb.GrpcLogEntry idInRPC uint64 = 1 ) ret = append(ret, ed.newClientHeaderEntry(false, globalRPCID, idInRPC)) idInRPC++ switch ed.cc.callType { case unaryRPC: ret = append(ret, ed.newClientMessageEntry(false, globalRPCID, idInRPC, ed.requests[0])) idInRPC++ ret = append(ret, ed.newServerHeaderEntry(false, globalRPCID, idInRPC)) idInRPC++ if ed.cc.success { ret = append(ret, ed.newServerMessageEntry(false, globalRPCID, idInRPC, ed.responses[0])) idInRPC++ } case fullDuplexStreamRPC: ret = append(ret, ed.newServerHeaderEntry(false, globalRPCID, idInRPC)) idInRPC++ for i := 0; i < len(ed.requests); i++ { ret = append(ret, ed.newClientMessageEntry(false, globalRPCID, idInRPC, ed.requests[i])) idInRPC++ if !ed.cc.success { // There is no response in the RPC error case. continue } ret = append(ret, ed.newServerMessageEntry(false, globalRPCID, idInRPC, ed.responses[i])) idInRPC++ } if ed.cc.success && ed.cc.callType == fullDuplexStreamRPC { ret = append(ret, ed.newHalfCloseEntry(false, globalRPCID, idInRPC)) idInRPC++ } case clientStreamRPC: ret = append(ret, ed.newServerHeaderEntry(false, globalRPCID, idInRPC)) idInRPC++ for i := 0; i < len(ed.requests); i++ { ret = append(ret, ed.newClientMessageEntry(false, globalRPCID, idInRPC, ed.requests[i])) idInRPC++ } if ed.cc.success { ret = append(ret, ed.newHalfCloseEntry(false, globalRPCID, idInRPC)) idInRPC++ ret = append(ret, ed.newServerMessageEntry(false, globalRPCID, idInRPC, ed.responses[0])) idInRPC++ } case serverStreamRPC: ret = append(ret, ed.newClientMessageEntry(false, globalRPCID, idInRPC, ed.requests[0])) idInRPC++ ret = append(ret, ed.newServerHeaderEntry(false, globalRPCID, idInRPC)) idInRPC++ for i := 0; i < len(ed.responses); i++ { ret = append(ret, ed.newServerMessageEntry(false, globalRPCID, idInRPC, ed.responses[0])) idInRPC++ } } ret = append(ret, ed.newServerTrailerEntry(false, globalRPCID, idInRPC, ed.err)) idInRPC++ return ret } func runRPCs(t *testing.T, tc *testConfig, cc *rpcConfig) *expectedData { te := newTest(t, tc) te.startServer(&testServer{te: te}) defer te.tearDown() expect := &expectedData{ te: te, cc: cc, } switch cc.callType { case unaryRPC: expect.method = "/grpc.testing.TestService/UnaryCall" req, resp, err := te.doUnaryCall(cc) expect.requests = []*testpb.SimpleRequest{req} expect.responses = []*testpb.SimpleResponse{resp} expect.err = err case clientStreamRPC: expect.method = "/grpc.testing.TestService/ClientStreamCall" reqs, resp, err := te.doClientStreamCall(cc) expect.requests = reqs expect.responses = []*testpb.SimpleResponse{resp} expect.err = err case serverStreamRPC: expect.method = "/grpc.testing.TestService/ServerStreamCall" req, resps, err := te.doServerStreamCall(cc) expect.responses = resps expect.requests = []*testpb.SimpleRequest{req} expect.err = err case fullDuplexStreamRPC, cancelRPC: expect.method = "/grpc.testing.TestService/FullDuplexCall" expect.requests, expect.responses, expect.err = te.doFullDuplexCallRoundtrip(cc) } if cc.success != (expect.err == nil) { t.Fatalf("cc.success: %v, got error: %v", cc.success, expect.err) } te.cc.Close() te.srv.GracefulStop() // Wait for the server to stop. return expect } // equalLogEntry sorts the metadata entries by key (to compare metadata). // // This function is typically called with only two entries. It's written in this // way so the code can be put in a for loop instead of copied twice. func equalLogEntry(entries ...*pb.GrpcLogEntry) (equal bool) { for i, e := range entries { // Clear out some fields we don't compare. e.Timestamp = nil e.CallId = 0 // CallID is global to the binary, hard to compare. if h := e.GetClientHeader(); h != nil { h.Timeout = nil tmp := append(h.Metadata.Entry[:0], h.Metadata.Entry...) h.Metadata.Entry = tmp sort.Slice(h.Metadata.Entry, func(i, j int) bool { return h.Metadata.Entry[i].Key < h.Metadata.Entry[j].Key }) } if h := e.GetServerHeader(); h != nil { tmp := append(h.Metadata.Entry[:0], h.Metadata.Entry...) h.Metadata.Entry = tmp sort.Slice(h.Metadata.Entry, func(i, j int) bool { return h.Metadata.Entry[i].Key < h.Metadata.Entry[j].Key }) } if h := e.GetTrailer(); h != nil { sort.Slice(h.Metadata.Entry, func(i, j int) bool { return h.Metadata.Entry[i].Key < h.Metadata.Entry[j].Key }) } if i > 0 && !proto.Equal(e, entries[i-1]) { return false } } return true } func testClientBinaryLog(t *testing.T, c *rpcConfig) error { defer testSink.clear() expect := runRPCs(t, &testConfig{}, c) want := expect.toClientLogEntries() var got []*pb.GrpcLogEntry // In racy cases, some entries are not logged when the RPC is finished (e.g. // context.Cancel). // // Check 10 times, with a sleep of 1/100 seconds between each check. Makes // it an 1-second wait in total. for i := 0; i < 10; i++ { got = testSink.logEntries(true) // all client entries. if len(want) == len(got) { break } time.Sleep(100 * time.Millisecond) } if len(want) != len(got) { for i, e := range want { t.Errorf("in want: %d, %s", i, e.GetType()) } for i, e := range got { t.Errorf("in got: %d, %s", i, e.GetType()) } return fmt.Errorf("didn't get same amount of log entries, want: %d, got: %d", len(want), len(got)) } var errored bool for i := 0; i < len(got); i++ { if !equalLogEntry(want[i], got[i]) { t.Errorf("entry: %d, want %+v, got %+v", i, want[i], got[i]) errored = true } } if errored { return fmt.Errorf("test failed") } return nil } func (s) TestClientBinaryLogUnaryRPC(t *testing.T) { if err := testClientBinaryLog(t, &rpcConfig{success: true, callType: unaryRPC}); err != nil { t.Fatal(err) } } func (s) TestClientBinaryLogUnaryRPCError(t *testing.T) { if err := testClientBinaryLog(t, &rpcConfig{success: false, callType: unaryRPC}); err != nil { t.Fatal(err) } } func (s) TestClientBinaryLogClientStreamRPC(t *testing.T) { count := 5 if err := testClientBinaryLog(t, &rpcConfig{count: count, success: true, callType: clientStreamRPC}); err != nil { t.Fatal(err) } } func (s) TestClientBinaryLogClientStreamRPCError(t *testing.T) { count := 1 if err := testClientBinaryLog(t, &rpcConfig{count: count, success: false, callType: clientStreamRPC}); err != nil { t.Fatal(err) } } func (s) TestClientBinaryLogServerStreamRPC(t *testing.T) { count := 5 if err := testClientBinaryLog(t, &rpcConfig{count: count, success: true, callType: serverStreamRPC}); err != nil { t.Fatal(err) } } func (s) TestClientBinaryLogServerStreamRPCError(t *testing.T) { count := 5 if err := testClientBinaryLog(t, &rpcConfig{count: count, success: false, callType: serverStreamRPC}); err != nil { t.Fatal(err) } } func (s) TestClientBinaryLogFullDuplexRPC(t *testing.T) { count := 5 if err := testClientBinaryLog(t, &rpcConfig{count: count, success: true, callType: fullDuplexStreamRPC}); err != nil { t.Fatal(err) } } func (s) TestClientBinaryLogFullDuplexRPCError(t *testing.T) { count := 5 if err := testClientBinaryLog(t, &rpcConfig{count: count, success: false, callType: fullDuplexStreamRPC}); err != nil { t.Fatal(err) } } func (s) TestClientBinaryLogCancel(t *testing.T) { count := 5 if err := testClientBinaryLog(t, &rpcConfig{count: count, success: false, callType: cancelRPC}); err != nil { t.Fatal(err) } } func testServerBinaryLog(t *testing.T, c *rpcConfig) error { defer testSink.clear() expect := runRPCs(t, &testConfig{}, c) want := expect.toServerLogEntries() var got []*pb.GrpcLogEntry // In racy cases, some entries are not logged when the RPC is finished (e.g. // context.Cancel). This is unlikely to happen on server side, but it does // no harm to retry. // // Check 10 times, with a sleep of 1/100 seconds between each check. Makes // it an 1-second wait in total. for i := 0; i < 10; i++ { got = testSink.logEntries(false) // all server entries. if len(want) == len(got) { break } time.Sleep(100 * time.Millisecond) } if len(want) != len(got) { for i, e := range want { t.Errorf("in want: %d, %s", i, e.GetType()) } for i, e := range got { t.Errorf("in got: %d, %s", i, e.GetType()) } return fmt.Errorf("didn't get same amount of log entries, want: %d, got: %d", len(want), len(got)) } var errored bool for i := 0; i < len(got); i++ { if !equalLogEntry(want[i], got[i]) { t.Errorf("entry: %d, want %+v, got %+v", i, want[i], got[i]) errored = true } } if errored { return fmt.Errorf("test failed") } return nil } func (s) TestServerBinaryLogUnaryRPC(t *testing.T) { if err := testServerBinaryLog(t, &rpcConfig{success: true, callType: unaryRPC}); err != nil { t.Fatal(err) } } func (s) TestServerBinaryLogUnaryRPCError(t *testing.T) { if err := testServerBinaryLog(t, &rpcConfig{success: false, callType: unaryRPC}); err != nil { t.Fatal(err) } } func (s) TestServerBinaryLogClientStreamRPC(t *testing.T) { count := 5 if err := testServerBinaryLog(t, &rpcConfig{count: count, success: true, callType: clientStreamRPC}); err != nil { t.Fatal(err) } } func (s) TestServerBinaryLogClientStreamRPCError(t *testing.T) { count := 1 if err := testServerBinaryLog(t, &rpcConfig{count: count, success: false, callType: clientStreamRPC}); err != nil { t.Fatal(err) } } func (s) TestServerBinaryLogServerStreamRPC(t *testing.T) { count := 5 if err := testServerBinaryLog(t, &rpcConfig{count: count, success: true, callType: serverStreamRPC}); err != nil { t.Fatal(err) } } func (s) TestServerBinaryLogServerStreamRPCError(t *testing.T) { count := 5 if err := testServerBinaryLog(t, &rpcConfig{count: count, success: false, callType: serverStreamRPC}); err != nil { t.Fatal(err) } } func (s) TestServerBinaryLogFullDuplex(t *testing.T) { count := 5 if err := testServerBinaryLog(t, &rpcConfig{count: count, success: true, callType: fullDuplexStreamRPC}); err != nil { t.Fatal(err) } } func (s) TestServerBinaryLogFullDuplexError(t *testing.T) { count := 5 if err := testServerBinaryLog(t, &rpcConfig{count: count, success: false, callType: fullDuplexStreamRPC}); err != nil { t.Fatal(err) } } grpc-go-1.29.1/internal/binarylog/binarylog_test.go000066400000000000000000000061431365033716300223330ustar00rootroot00000000000000/* * * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package binarylog import ( "testing" "google.golang.org/grpc/internal/grpctest" ) type s struct { grpctest.Tester } func Test(t *testing.T) { grpctest.RunSubTests(t, s{}) } // Test that get method logger returns the one with the most exact match. func (s) TestGetMethodLogger(t *testing.T) { testCases := []struct { in string method string hdr, msg uint64 }{ // Global. { in: "*{h:12;m:23}", method: "/s/m", hdr: 12, msg: 23, }, // service/*. { in: "*,s/*{h:12;m:23}", method: "/s/m", hdr: 12, msg: 23, }, // Service/method. { in: "*{h;m},s/m{h:12;m:23}", method: "/s/m", hdr: 12, msg: 23, }, { in: "*{h;m},s/*{h:314;m},s/m{h:12;m:23}", method: "/s/m", hdr: 12, msg: 23, }, { in: "*{h;m},s/*{h:12;m:23},s/m", method: "/s/m", hdr: maxUInt, msg: maxUInt, }, // service/*. { in: "*{h;m},s/*{h:12;m:23},s/m1", method: "/s/m", hdr: 12, msg: 23, }, { in: "*{h;m},s1/*,s/m{h:12;m:23}", method: "/s/m", hdr: 12, msg: 23, }, // With black list. { in: "*{h:12;m:23},-s/m1", method: "/s/m", hdr: 12, msg: 23, }, } for _, tc := range testCases { l := NewLoggerFromConfigString(tc.in) if l == nil { t.Errorf("in: %q, failed to create logger from config string", tc.in) continue } ml := l.getMethodLogger(tc.method) if ml == nil { t.Errorf("in: %q, method logger is nil, want non-nil", tc.in) continue } if ml.headerMaxLen != tc.hdr || ml.messageMaxLen != tc.msg { t.Errorf("in: %q, want header: %v, message: %v, got header: %v, message: %v", tc.in, tc.hdr, tc.msg, ml.headerMaxLen, ml.messageMaxLen) } } } // expect method logger to be nil func (s) TestGetMethodLoggerOff(t *testing.T) { testCases := []struct { in string method string }{ // method not specified. { in: "s1/m", method: "/s/m", }, { in: "s/m1", method: "/s/m", }, { in: "s1/*", method: "/s/m", }, { in: "s1/*,s/m1", method: "/s/m", }, // blacklisted. { in: "*,-s/m", method: "/s/m", }, { in: "s/*,-s/m", method: "/s/m", }, { in: "-s/m,s/*", method: "/s/m", }, } for _, tc := range testCases { l := NewLoggerFromConfigString(tc.in) if l == nil { t.Errorf("in: %q, failed to create logger from config string", tc.in) continue } ml := l.getMethodLogger(tc.method) if ml != nil { t.Errorf("in: %q, method logger is non-nil, want nil", tc.in) } } } grpc-go-1.29.1/internal/binarylog/binarylog_testutil.go000066400000000000000000000030021365033716300232200ustar00rootroot00000000000000/* * * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // This file contains exported variables/functions that are exported for testing // only. // // An ideal way for this would be to put those in a *_test.go but in binarylog // package. But this doesn't work with staticcheck with go module. Error was: // "MdToMetadataProto not declared by package binarylog". This could be caused // by the way staticcheck looks for files for a certain package, which doesn't // support *_test.go files. // // Move those to binary_test.go when staticcheck is fixed. package binarylog var ( // AllLogger is a logger that logs all headers/messages for all RPCs. It's // for testing only. AllLogger = NewLoggerFromConfigString("*") // MdToMetadataProto converts metadata to a binary logging proto message. // It's for testing only. MdToMetadataProto = mdToMetadataProto // AddrToProto converts an address to a binary logging proto message. It's // for testing only. AddrToProto = addrToProto ) grpc-go-1.29.1/internal/binarylog/env_config.go000066400000000000000000000152101365033716300214160ustar00rootroot00000000000000/* * * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package binarylog import ( "errors" "fmt" "regexp" "strconv" "strings" "google.golang.org/grpc/grpclog" ) // NewLoggerFromConfigString reads the string and build a logger. It can be used // to build a new logger and assign it to binarylog.Logger. // // Example filter config strings: // - "" Nothing will be logged // - "*" All headers and messages will be fully logged. // - "*{h}" Only headers will be logged. // - "*{m:256}" Only the first 256 bytes of each message will be logged. // - "Foo/*" Logs every method in service Foo // - "Foo/*,-Foo/Bar" Logs every method in service Foo except method /Foo/Bar // - "Foo/*,Foo/Bar{m:256}" Logs the first 256 bytes of each message in method // /Foo/Bar, logs all headers and messages in every other method in service // Foo. // // If two configs exist for one certain method or service, the one specified // later overrides the previous config. func NewLoggerFromConfigString(s string) Logger { if s == "" { return nil } l := newEmptyLogger() methods := strings.Split(s, ",") for _, method := range methods { if err := l.fillMethodLoggerWithConfigString(method); err != nil { grpclog.Warningf("failed to parse binary log config: %v", err) return nil } } return l } // fillMethodLoggerWithConfigString parses config, creates methodLogger and adds // it to the right map in the logger. func (l *logger) fillMethodLoggerWithConfigString(config string) error { // "" is invalid. if config == "" { return errors.New("empty string is not a valid method binary logging config") } // "-service/method", blacklist, no * or {} allowed. if config[0] == '-' { s, m, suffix, err := parseMethodConfigAndSuffix(config[1:]) if err != nil { return fmt.Errorf("invalid config: %q, %v", config, err) } if m == "*" { return fmt.Errorf("invalid config: %q, %v", config, "* not allowed in blacklist config") } if suffix != "" { return fmt.Errorf("invalid config: %q, %v", config, "header/message limit not allowed in blacklist config") } if err := l.setBlacklist(s + "/" + m); err != nil { return fmt.Errorf("invalid config: %v", err) } return nil } // "*{h:256;m:256}" if config[0] == '*' { hdr, msg, err := parseHeaderMessageLengthConfig(config[1:]) if err != nil { return fmt.Errorf("invalid config: %q, %v", config, err) } if err := l.setDefaultMethodLogger(&methodLoggerConfig{hdr: hdr, msg: msg}); err != nil { return fmt.Errorf("invalid config: %v", err) } return nil } s, m, suffix, err := parseMethodConfigAndSuffix(config) if err != nil { return fmt.Errorf("invalid config: %q, %v", config, err) } hdr, msg, err := parseHeaderMessageLengthConfig(suffix) if err != nil { return fmt.Errorf("invalid header/message length config: %q, %v", suffix, err) } if m == "*" { if err := l.setServiceMethodLogger(s, &methodLoggerConfig{hdr: hdr, msg: msg}); err != nil { return fmt.Errorf("invalid config: %v", err) } } else { if err := l.setMethodMethodLogger(s+"/"+m, &methodLoggerConfig{hdr: hdr, msg: msg}); err != nil { return fmt.Errorf("invalid config: %v", err) } } return nil } const ( // TODO: this const is only used by env_config now. But could be useful for // other config. Move to binarylog.go if necessary. maxUInt = ^uint64(0) // For "p.s/m" plus any suffix. Suffix will be parsed again. See test for // expected output. longMethodConfigRegexpStr = `^([\w./]+)/((?:\w+)|[*])(.+)?$` // For suffix from above, "{h:123,m:123}". See test for expected output. optionalLengthRegexpStr = `(?::(\d+))?` // Optional ":123". headerConfigRegexpStr = `^{h` + optionalLengthRegexpStr + `}$` messageConfigRegexpStr = `^{m` + optionalLengthRegexpStr + `}$` headerMessageConfigRegexpStr = `^{h` + optionalLengthRegexpStr + `;m` + optionalLengthRegexpStr + `}$` ) var ( longMethodConfigRegexp = regexp.MustCompile(longMethodConfigRegexpStr) headerConfigRegexp = regexp.MustCompile(headerConfigRegexpStr) messageConfigRegexp = regexp.MustCompile(messageConfigRegexpStr) headerMessageConfigRegexp = regexp.MustCompile(headerMessageConfigRegexpStr) ) // Turn "service/method{h;m}" into "service", "method", "{h;m}". func parseMethodConfigAndSuffix(c string) (service, method, suffix string, _ error) { // Regexp result: // // in: "p.s/m{h:123,m:123}", // out: []string{"p.s/m{h:123,m:123}", "p.s", "m", "{h:123,m:123}"}, match := longMethodConfigRegexp.FindStringSubmatch(c) if match == nil { return "", "", "", fmt.Errorf("%q contains invalid substring", c) } service = match[1] method = match[2] suffix = match[3] return } // Turn "{h:123;m:345}" into 123, 345. // // Return maxUInt if length is unspecified. func parseHeaderMessageLengthConfig(c string) (hdrLenStr, msgLenStr uint64, err error) { if c == "" { return maxUInt, maxUInt, nil } // Header config only. if match := headerConfigRegexp.FindStringSubmatch(c); match != nil { if s := match[1]; s != "" { hdrLenStr, err = strconv.ParseUint(s, 10, 64) if err != nil { return 0, 0, fmt.Errorf("failed to convert %q to uint", s) } return hdrLenStr, 0, nil } return maxUInt, 0, nil } // Message config only. if match := messageConfigRegexp.FindStringSubmatch(c); match != nil { if s := match[1]; s != "" { msgLenStr, err = strconv.ParseUint(s, 10, 64) if err != nil { return 0, 0, fmt.Errorf("failed to convert %q to uint", s) } return 0, msgLenStr, nil } return 0, maxUInt, nil } // Header and message config both. if match := headerMessageConfigRegexp.FindStringSubmatch(c); match != nil { // Both hdr and msg are specified, but one or two of them might be empty. hdrLenStr = maxUInt msgLenStr = maxUInt if s := match[1]; s != "" { hdrLenStr, err = strconv.ParseUint(s, 10, 64) if err != nil { return 0, 0, fmt.Errorf("failed to convert %q to uint", s) } } if s := match[2]; s != "" { msgLenStr, err = strconv.ParseUint(s, 10, 64) if err != nil { return 0, 0, fmt.Errorf("failed to convert %q to uint", s) } } return hdrLenStr, msgLenStr, nil } return 0, 0, fmt.Errorf("%q contains invalid substring", c) } grpc-go-1.29.1/internal/binarylog/env_config_test.go000066400000000000000000000234041365033716300224610ustar00rootroot00000000000000/* * * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package binarylog import ( "fmt" "testing" ) // This tests that when multiple configs are specified, all methods loggers will // be set correctly. Correctness of each logger is covered by other unit tests. func (s) TestNewLoggerFromConfigString(t *testing.T) { const ( s1 = "s1" m1 = "m1" m2 = "m2" fullM1 = s1 + "/" + m1 fullM2 = s1 + "/" + m2 ) c := fmt.Sprintf("*{h:1;m:2},%s{h},%s{m},%s{h;m}", s1+"/*", fullM1, fullM2) l := NewLoggerFromConfigString(c).(*logger) if l.all.hdr != 1 || l.all.msg != 2 { t.Errorf("l.all = %#v, want headerLen: 1, messageLen: 2", l.all) } if ml, ok := l.services[s1]; ok { if ml.hdr != maxUInt || ml.msg != 0 { t.Errorf("want maxUInt header, 0 message, got header: %v, message: %v", ml.hdr, ml.msg) } } else { t.Errorf("service/* is not set") } if ml, ok := l.methods[fullM1]; ok { if ml.hdr != 0 || ml.msg != maxUInt { t.Errorf("want 0 header, maxUInt message, got header: %v, message: %v", ml.hdr, ml.msg) } } else { t.Errorf("service/method{h} is not set") } if ml, ok := l.methods[fullM2]; ok { if ml.hdr != maxUInt || ml.msg != maxUInt { t.Errorf("want maxUInt header, maxUInt message, got header: %v, message: %v", ml.hdr, ml.msg) } } else { t.Errorf("service/method{h;m} is not set") } } func (s) TestNewLoggerFromConfigStringInvalid(t *testing.T) { testCases := []string{ "", "*{}", "s/m,*{}", "s/m,s/m{a}", // Duplicate rules. "s/m,-s/m", "-s/m,s/m", "s/m,s/m", "s/m,s/m{h:1;m:1}", "s/m{h:1;m:1},s/m", "-s/m,-s/m", "s/*,s/*{h:1;m:1}", "*,*{h:1;m:1}", } for _, tc := range testCases { l := NewLoggerFromConfigString(tc) if l != nil { t.Errorf("With config %q, want logger %v, got %v", tc, nil, l) } } } func (s) TestParseMethodConfigAndSuffix(t *testing.T) { testCases := []struct { in, service, method, suffix string }{ { in: "p.s/m", service: "p.s", method: "m", suffix: "", }, { in: "p.s/m{h,m}", service: "p.s", method: "m", suffix: "{h,m}", }, { in: "p.s/*", service: "p.s", method: "*", suffix: "", }, { in: "p.s/*{h,m}", service: "p.s", method: "*", suffix: "{h,m}", }, // invalid suffix will be detected by another function. { in: "p.s/m{invalidsuffix}", service: "p.s", method: "m", suffix: "{invalidsuffix}", }, { in: "p.s/*{invalidsuffix}", service: "p.s", method: "*", suffix: "{invalidsuffix}", }, { in: "s/m*", service: "s", method: "m", suffix: "*", }, { in: "s/*m", service: "s", method: "*", suffix: "m", }, { in: "s/**", service: "s", method: "*", suffix: "*", }, } for _, tc := range testCases { t.Logf("testing parseMethodConfigAndSuffix(%q)", tc.in) s, m, suffix, err := parseMethodConfigAndSuffix(tc.in) if err != nil { t.Errorf("returned error %v, want nil", err) continue } if s != tc.service { t.Errorf("service = %q, want %q", s, tc.service) } if m != tc.method { t.Errorf("method = %q, want %q", m, tc.method) } if suffix != tc.suffix { t.Errorf("suffix = %q, want %q", suffix, tc.suffix) } } } func (s) TestParseMethodConfigAndSuffixInvalid(t *testing.T) { testCases := []string{ "*/m", "*/m{}", } for _, tc := range testCases { s, m, suffix, err := parseMethodConfigAndSuffix(tc) if err == nil { t.Errorf("Parsing %q got nil error with %q, %q, %q, want non-nil error", tc, s, m, suffix) } } } func (s) TestParseHeaderMessageLengthConfig(t *testing.T) { testCases := []struct { in string hdr, msg uint64 }{ { in: "", hdr: maxUInt, msg: maxUInt, }, { in: "{h}", hdr: maxUInt, msg: 0, }, { in: "{h:314}", hdr: 314, msg: 0, }, { in: "{m}", hdr: 0, msg: maxUInt, }, { in: "{m:213}", hdr: 0, msg: 213, }, { in: "{h;m}", hdr: maxUInt, msg: maxUInt, }, { in: "{h:314;m}", hdr: 314, msg: maxUInt, }, { in: "{h;m:213}", hdr: maxUInt, msg: 213, }, { in: "{h:314;m:213}", hdr: 314, msg: 213, }, } for _, tc := range testCases { t.Logf("testing parseHeaderMessageLengthConfig(%q)", tc.in) hdr, msg, err := parseHeaderMessageLengthConfig(tc.in) if err != nil { t.Errorf("returned error %v, want nil", err) continue } if hdr != tc.hdr { t.Errorf("header length = %v, want %v", hdr, tc.hdr) } if msg != tc.msg { t.Errorf("message length = %v, want %v", msg, tc.msg) } } } func (s) TestParseHeaderMessageLengthConfigInvalid(t *testing.T) { testCases := []string{ "{}", "{h;a}", "{h;m;b}", } for _, tc := range testCases { _, _, err := parseHeaderMessageLengthConfig(tc) if err == nil { t.Errorf("Parsing %q got nil error, want non-nil error", tc) } } } func (s) TestFillMethodLoggerWithConfigStringBlacklist(t *testing.T) { testCases := []string{ "p.s/m", "service/method", } for _, tc := range testCases { c := "-" + tc t.Logf("testing fillMethodLoggerWithConfigString(%q)", c) l := newEmptyLogger() if err := l.fillMethodLoggerWithConfigString(c); err != nil { t.Errorf("returned err %v, want nil", err) continue } _, ok := l.blacklist[tc] if !ok { t.Errorf("blacklist[%q] is not set", tc) } } } func (s) TestFillMethodLoggerWithConfigStringGlobal(t *testing.T) { testCases := []struct { in string hdr, msg uint64 }{ { in: "", hdr: maxUInt, msg: maxUInt, }, { in: "{h}", hdr: maxUInt, msg: 0, }, { in: "{h:314}", hdr: 314, msg: 0, }, { in: "{m}", hdr: 0, msg: maxUInt, }, { in: "{m:213}", hdr: 0, msg: 213, }, { in: "{h;m}", hdr: maxUInt, msg: maxUInt, }, { in: "{h:314;m}", hdr: 314, msg: maxUInt, }, { in: "{h;m:213}", hdr: maxUInt, msg: 213, }, { in: "{h:314;m:213}", hdr: 314, msg: 213, }, } for _, tc := range testCases { c := "*" + tc.in t.Logf("testing fillMethodLoggerWithConfigString(%q)", c) l := newEmptyLogger() if err := l.fillMethodLoggerWithConfigString(c); err != nil { t.Errorf("returned err %v, want nil", err) continue } if l.all == nil { t.Errorf("l.all is not set") continue } if hdr := l.all.hdr; hdr != tc.hdr { t.Errorf("header length = %v, want %v", hdr, tc.hdr) } if msg := l.all.msg; msg != tc.msg { t.Errorf("message length = %v, want %v", msg, tc.msg) } } } func (s) TestFillMethodLoggerWithConfigStringPerService(t *testing.T) { testCases := []struct { in string hdr, msg uint64 }{ { in: "", hdr: maxUInt, msg: maxUInt, }, { in: "{h}", hdr: maxUInt, msg: 0, }, { in: "{h:314}", hdr: 314, msg: 0, }, { in: "{m}", hdr: 0, msg: maxUInt, }, { in: "{m:213}", hdr: 0, msg: 213, }, { in: "{h;m}", hdr: maxUInt, msg: maxUInt, }, { in: "{h:314;m}", hdr: 314, msg: maxUInt, }, { in: "{h;m:213}", hdr: maxUInt, msg: 213, }, { in: "{h:314;m:213}", hdr: 314, msg: 213, }, } const serviceName = "service" for _, tc := range testCases { c := serviceName + "/*" + tc.in t.Logf("testing fillMethodLoggerWithConfigString(%q)", c) l := newEmptyLogger() if err := l.fillMethodLoggerWithConfigString(c); err != nil { t.Errorf("returned err %v, want nil", err) continue } ml, ok := l.services[serviceName] if !ok { t.Errorf("l.service[%q] is not set", serviceName) continue } if hdr := ml.hdr; hdr != tc.hdr { t.Errorf("header length = %v, want %v", hdr, tc.hdr) } if msg := ml.msg; msg != tc.msg { t.Errorf("message length = %v, want %v", msg, tc.msg) } } } func (s) TestFillMethodLoggerWithConfigStringPerMethod(t *testing.T) { testCases := []struct { in string hdr, msg uint64 }{ { in: "", hdr: maxUInt, msg: maxUInt, }, { in: "{h}", hdr: maxUInt, msg: 0, }, { in: "{h:314}", hdr: 314, msg: 0, }, { in: "{m}", hdr: 0, msg: maxUInt, }, { in: "{m:213}", hdr: 0, msg: 213, }, { in: "{h;m}", hdr: maxUInt, msg: maxUInt, }, { in: "{h:314;m}", hdr: 314, msg: maxUInt, }, { in: "{h;m:213}", hdr: maxUInt, msg: 213, }, { in: "{h:314;m:213}", hdr: 314, msg: 213, }, } const ( serviceName = "service" methodName = "method" fullMethodName = serviceName + "/" + methodName ) for _, tc := range testCases { c := fullMethodName + tc.in t.Logf("testing fillMethodLoggerWithConfigString(%q)", c) l := newEmptyLogger() if err := l.fillMethodLoggerWithConfigString(c); err != nil { t.Errorf("returned err %v, want nil", err) continue } ml, ok := l.methods[fullMethodName] if !ok { t.Errorf("l.methods[%q] is not set", fullMethodName) continue } if hdr := ml.hdr; hdr != tc.hdr { t.Errorf("header length = %v, want %v", hdr, tc.hdr) } if msg := ml.msg; msg != tc.msg { t.Errorf("message length = %v, want %v", msg, tc.msg) } } } func (s) TestFillMethodLoggerWithConfigStringInvalid(t *testing.T) { testCases := []string{ "", "{}", "p.s/m{}", "p.s/m{a}", "p.s/m*", "p.s/**", "*/m", "-p.s/*", "-p.s/m{h}", } l := &logger{} for _, tc := range testCases { if err := l.fillMethodLoggerWithConfigString(tc); err == nil { t.Errorf("fillMethodLoggerWithConfigString(%q) returned nil error, want non-nil", tc) } } } grpc-go-1.29.1/internal/binarylog/method_logger.go000066400000000000000000000245301365033716300221250ustar00rootroot00000000000000/* * * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package binarylog import ( "net" "strings" "sync/atomic" "time" "github.com/golang/protobuf/proto" "github.com/golang/protobuf/ptypes" pb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" ) type callIDGenerator struct { id uint64 } func (g *callIDGenerator) next() uint64 { id := atomic.AddUint64(&g.id, 1) return id } // reset is for testing only, and doesn't need to be thread safe. func (g *callIDGenerator) reset() { g.id = 0 } var idGen callIDGenerator // MethodLogger is the sub-logger for each method. type MethodLogger struct { headerMaxLen, messageMaxLen uint64 callID uint64 idWithinCallGen *callIDGenerator sink Sink // TODO(blog): make this plugable. } func newMethodLogger(h, m uint64) *MethodLogger { return &MethodLogger{ headerMaxLen: h, messageMaxLen: m, callID: idGen.next(), idWithinCallGen: &callIDGenerator{}, sink: defaultSink, // TODO(blog): make it plugable. } } // Log creates a proto binary log entry, and logs it to the sink. func (ml *MethodLogger) Log(c LogEntryConfig) { m := c.toProto() timestamp, _ := ptypes.TimestampProto(time.Now()) m.Timestamp = timestamp m.CallId = ml.callID m.SequenceIdWithinCall = ml.idWithinCallGen.next() switch pay := m.Payload.(type) { case *pb.GrpcLogEntry_ClientHeader: m.PayloadTruncated = ml.truncateMetadata(pay.ClientHeader.GetMetadata()) case *pb.GrpcLogEntry_ServerHeader: m.PayloadTruncated = ml.truncateMetadata(pay.ServerHeader.GetMetadata()) case *pb.GrpcLogEntry_Message: m.PayloadTruncated = ml.truncateMessage(pay.Message) } ml.sink.Write(m) } func (ml *MethodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated bool) { if ml.headerMaxLen == maxUInt { return false } var ( bytesLimit = ml.headerMaxLen index int ) // At the end of the loop, index will be the first entry where the total // size is greater than the limit: // // len(entry[:index]) <= ml.hdr && len(entry[:index+1]) > ml.hdr. for ; index < len(mdPb.Entry); index++ { entry := mdPb.Entry[index] if entry.Key == "grpc-trace-bin" { // "grpc-trace-bin" is a special key. It's kept in the log entry, // but not counted towards the size limit. continue } currentEntryLen := uint64(len(entry.Value)) if currentEntryLen > bytesLimit { break } bytesLimit -= currentEntryLen } truncated = index < len(mdPb.Entry) mdPb.Entry = mdPb.Entry[:index] return truncated } func (ml *MethodLogger) truncateMessage(msgPb *pb.Message) (truncated bool) { if ml.messageMaxLen == maxUInt { return false } if ml.messageMaxLen >= uint64(len(msgPb.Data)) { return false } msgPb.Data = msgPb.Data[:ml.messageMaxLen] return true } // LogEntryConfig represents the configuration for binary log entry. type LogEntryConfig interface { toProto() *pb.GrpcLogEntry } // ClientHeader configs the binary log entry to be a ClientHeader entry. type ClientHeader struct { OnClientSide bool Header metadata.MD MethodName string Authority string Timeout time.Duration // PeerAddr is required only when it's on server side. PeerAddr net.Addr } func (c *ClientHeader) toProto() *pb.GrpcLogEntry { // This function doesn't need to set all the fields (e.g. seq ID). The Log // function will set the fields when necessary. clientHeader := &pb.ClientHeader{ Metadata: mdToMetadataProto(c.Header), MethodName: c.MethodName, Authority: c.Authority, } if c.Timeout > 0 { clientHeader.Timeout = ptypes.DurationProto(c.Timeout) } ret := &pb.GrpcLogEntry{ Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER, Payload: &pb.GrpcLogEntry_ClientHeader{ ClientHeader: clientHeader, }, } if c.OnClientSide { ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT } else { ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER } if c.PeerAddr != nil { ret.Peer = addrToProto(c.PeerAddr) } return ret } // ServerHeader configs the binary log entry to be a ServerHeader entry. type ServerHeader struct { OnClientSide bool Header metadata.MD // PeerAddr is required only when it's on client side. PeerAddr net.Addr } func (c *ServerHeader) toProto() *pb.GrpcLogEntry { ret := &pb.GrpcLogEntry{ Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_HEADER, Payload: &pb.GrpcLogEntry_ServerHeader{ ServerHeader: &pb.ServerHeader{ Metadata: mdToMetadataProto(c.Header), }, }, } if c.OnClientSide { ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT } else { ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER } if c.PeerAddr != nil { ret.Peer = addrToProto(c.PeerAddr) } return ret } // ClientMessage configs the binary log entry to be a ClientMessage entry. type ClientMessage struct { OnClientSide bool // Message can be a proto.Message or []byte. Other messages formats are not // supported. Message interface{} } func (c *ClientMessage) toProto() *pb.GrpcLogEntry { var ( data []byte err error ) if m, ok := c.Message.(proto.Message); ok { data, err = proto.Marshal(m) if err != nil { grpclog.Infof("binarylogging: failed to marshal proto message: %v", err) } } else if b, ok := c.Message.([]byte); ok { data = b } else { grpclog.Infof("binarylogging: message to log is neither proto.message nor []byte") } ret := &pb.GrpcLogEntry{ Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE, Payload: &pb.GrpcLogEntry_Message{ Message: &pb.Message{ Length: uint32(len(data)), Data: data, }, }, } if c.OnClientSide { ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT } else { ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER } return ret } // ServerMessage configs the binary log entry to be a ServerMessage entry. type ServerMessage struct { OnClientSide bool // Message can be a proto.Message or []byte. Other messages formats are not // supported. Message interface{} } func (c *ServerMessage) toProto() *pb.GrpcLogEntry { var ( data []byte err error ) if m, ok := c.Message.(proto.Message); ok { data, err = proto.Marshal(m) if err != nil { grpclog.Infof("binarylogging: failed to marshal proto message: %v", err) } } else if b, ok := c.Message.([]byte); ok { data = b } else { grpclog.Infof("binarylogging: message to log is neither proto.message nor []byte") } ret := &pb.GrpcLogEntry{ Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE, Payload: &pb.GrpcLogEntry_Message{ Message: &pb.Message{ Length: uint32(len(data)), Data: data, }, }, } if c.OnClientSide { ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT } else { ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER } return ret } // ClientHalfClose configs the binary log entry to be a ClientHalfClose entry. type ClientHalfClose struct { OnClientSide bool } func (c *ClientHalfClose) toProto() *pb.GrpcLogEntry { ret := &pb.GrpcLogEntry{ Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_HALF_CLOSE, Payload: nil, // No payload here. } if c.OnClientSide { ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT } else { ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER } return ret } // ServerTrailer configs the binary log entry to be a ServerTrailer entry. type ServerTrailer struct { OnClientSide bool Trailer metadata.MD // Err is the status error. Err error // PeerAddr is required only when it's on client side and the RPC is trailer // only. PeerAddr net.Addr } func (c *ServerTrailer) toProto() *pb.GrpcLogEntry { st, ok := status.FromError(c.Err) if !ok { grpclog.Info("binarylogging: error in trailer is not a status error") } var ( detailsBytes []byte err error ) stProto := st.Proto() if stProto != nil && len(stProto.Details) != 0 { detailsBytes, err = proto.Marshal(stProto) if err != nil { grpclog.Infof("binarylogging: failed to marshal status proto: %v", err) } } ret := &pb.GrpcLogEntry{ Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER, Payload: &pb.GrpcLogEntry_Trailer{ Trailer: &pb.Trailer{ Metadata: mdToMetadataProto(c.Trailer), StatusCode: uint32(st.Code()), StatusMessage: st.Message(), StatusDetails: detailsBytes, }, }, } if c.OnClientSide { ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT } else { ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER } if c.PeerAddr != nil { ret.Peer = addrToProto(c.PeerAddr) } return ret } // Cancel configs the binary log entry to be a Cancel entry. type Cancel struct { OnClientSide bool } func (c *Cancel) toProto() *pb.GrpcLogEntry { ret := &pb.GrpcLogEntry{ Type: pb.GrpcLogEntry_EVENT_TYPE_CANCEL, Payload: nil, } if c.OnClientSide { ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT } else { ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER } return ret } // metadataKeyOmit returns whether the metadata entry with this key should be // omitted. func metadataKeyOmit(key string) bool { switch key { case "lb-token", ":path", ":authority", "content-encoding", "content-type", "user-agent", "te": return true case "grpc-trace-bin": // grpc-trace-bin is special because it's visiable to users. return false } return strings.HasPrefix(key, "grpc-") } func mdToMetadataProto(md metadata.MD) *pb.Metadata { ret := &pb.Metadata{} for k, vv := range md { if metadataKeyOmit(k) { continue } for _, v := range vv { ret.Entry = append(ret.Entry, &pb.MetadataEntry{ Key: k, Value: []byte(v), }, ) } } return ret } func addrToProto(addr net.Addr) *pb.Address { ret := &pb.Address{} switch a := addr.(type) { case *net.TCPAddr: if a.IP.To4() != nil { ret.Type = pb.Address_TYPE_IPV4 } else if a.IP.To16() != nil { ret.Type = pb.Address_TYPE_IPV6 } else { ret.Type = pb.Address_TYPE_UNKNOWN // Do not set address and port fields. break } ret.Address = a.IP.String() ret.IpPort = uint32(a.Port) case *net.UnixAddr: ret.Type = pb.Address_TYPE_UNIX ret.Address = a.String() default: ret.Type = pb.Address_TYPE_UNKNOWN } return ret } grpc-go-1.29.1/internal/binarylog/method_logger_test.go000066400000000000000000000321401365033716300231600ustar00rootroot00000000000000/* * * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package binarylog import ( "bytes" "fmt" "net" "testing" "time" "github.com/golang/protobuf/proto" dpb "github.com/golang/protobuf/ptypes/duration" pb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" ) func (s) TestLog(t *testing.T) { idGen.reset() ml := newMethodLogger(10, 10) // Set sink to testing buffer. buf := bytes.NewBuffer(nil) ml.sink = newWriterSink(buf) addr := "1.2.3.4" port := 790 tcpAddr, _ := net.ResolveTCPAddr("tcp", fmt.Sprintf("%v:%d", addr, port)) addr6 := "2001:1db8:85a3::8a2e:1370:7334" port6 := 796 tcpAddr6, _ := net.ResolveTCPAddr("tcp", fmt.Sprintf("[%v]:%d", addr6, port6)) testProtoMsg := &pb.Message{ Length: 1, Data: []byte{'a'}, } testProtoBytes, _ := proto.Marshal(testProtoMsg) testCases := []struct { config LogEntryConfig want *pb.GrpcLogEntry }{ { config: &ClientHeader{ OnClientSide: false, Header: map[string][]string{ "a": {"b", "bb"}, }, MethodName: "testservice/testmethod", Authority: "test.service.io", Timeout: 2*time.Second + 3*time.Nanosecond, PeerAddr: tcpAddr, }, want: &pb.GrpcLogEntry{ Timestamp: nil, CallId: 1, SequenceIdWithinCall: 0, Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER, Logger: pb.GrpcLogEntry_LOGGER_SERVER, Payload: &pb.GrpcLogEntry_ClientHeader{ ClientHeader: &pb.ClientHeader{ Metadata: &pb.Metadata{ Entry: []*pb.MetadataEntry{ {Key: "a", Value: []byte{'b'}}, {Key: "a", Value: []byte{'b', 'b'}}, }, }, MethodName: "testservice/testmethod", Authority: "test.service.io", Timeout: &dpb.Duration{ Seconds: 2, Nanos: 3, }, }, }, PayloadTruncated: false, Peer: &pb.Address{ Type: pb.Address_TYPE_IPV4, Address: addr, IpPort: uint32(port), }, }, }, { config: &ClientHeader{ OnClientSide: false, MethodName: "testservice/testmethod", Authority: "test.service.io", }, want: &pb.GrpcLogEntry{ Timestamp: nil, CallId: 1, SequenceIdWithinCall: 0, Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER, Logger: pb.GrpcLogEntry_LOGGER_SERVER, Payload: &pb.GrpcLogEntry_ClientHeader{ ClientHeader: &pb.ClientHeader{ Metadata: &pb.Metadata{}, MethodName: "testservice/testmethod", Authority: "test.service.io", }, }, PayloadTruncated: false, }, }, { config: &ServerHeader{ OnClientSide: true, Header: map[string][]string{ "a": {"b", "bb"}, }, PeerAddr: tcpAddr6, }, want: &pb.GrpcLogEntry{ Timestamp: nil, CallId: 1, SequenceIdWithinCall: 0, Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_HEADER, Logger: pb.GrpcLogEntry_LOGGER_CLIENT, Payload: &pb.GrpcLogEntry_ServerHeader{ ServerHeader: &pb.ServerHeader{ Metadata: &pb.Metadata{ Entry: []*pb.MetadataEntry{ {Key: "a", Value: []byte{'b'}}, {Key: "a", Value: []byte{'b', 'b'}}, }, }, }, }, PayloadTruncated: false, Peer: &pb.Address{ Type: pb.Address_TYPE_IPV6, Address: addr6, IpPort: uint32(port6), }, }, }, { config: &ClientMessage{ OnClientSide: true, Message: testProtoMsg, }, want: &pb.GrpcLogEntry{ Timestamp: nil, CallId: 1, SequenceIdWithinCall: 0, Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE, Logger: pb.GrpcLogEntry_LOGGER_CLIENT, Payload: &pb.GrpcLogEntry_Message{ Message: &pb.Message{ Length: uint32(len(testProtoBytes)), Data: testProtoBytes, }, }, PayloadTruncated: false, Peer: nil, }, }, { config: &ServerMessage{ OnClientSide: false, Message: testProtoMsg, }, want: &pb.GrpcLogEntry{ Timestamp: nil, CallId: 1, SequenceIdWithinCall: 0, Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE, Logger: pb.GrpcLogEntry_LOGGER_SERVER, Payload: &pb.GrpcLogEntry_Message{ Message: &pb.Message{ Length: uint32(len(testProtoBytes)), Data: testProtoBytes, }, }, PayloadTruncated: false, Peer: nil, }, }, { config: &ClientHalfClose{ OnClientSide: false, }, want: &pb.GrpcLogEntry{ Timestamp: nil, CallId: 1, SequenceIdWithinCall: 0, Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_HALF_CLOSE, Logger: pb.GrpcLogEntry_LOGGER_SERVER, Payload: nil, PayloadTruncated: false, Peer: nil, }, }, { config: &ServerTrailer{ OnClientSide: true, Err: status.Errorf(codes.Unavailable, "test"), PeerAddr: tcpAddr, }, want: &pb.GrpcLogEntry{ Timestamp: nil, CallId: 1, SequenceIdWithinCall: 0, Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER, Logger: pb.GrpcLogEntry_LOGGER_CLIENT, Payload: &pb.GrpcLogEntry_Trailer{ Trailer: &pb.Trailer{ Metadata: &pb.Metadata{}, StatusCode: uint32(codes.Unavailable), StatusMessage: "test", StatusDetails: nil, }, }, PayloadTruncated: false, Peer: &pb.Address{ Type: pb.Address_TYPE_IPV4, Address: addr, IpPort: uint32(port), }, }, }, { // Err is nil, Log OK status. config: &ServerTrailer{ OnClientSide: true, }, want: &pb.GrpcLogEntry{ Timestamp: nil, CallId: 1, SequenceIdWithinCall: 0, Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER, Logger: pb.GrpcLogEntry_LOGGER_CLIENT, Payload: &pb.GrpcLogEntry_Trailer{ Trailer: &pb.Trailer{ Metadata: &pb.Metadata{}, StatusCode: uint32(codes.OK), StatusMessage: "", StatusDetails: nil, }, }, PayloadTruncated: false, Peer: nil, }, }, { config: &Cancel{ OnClientSide: true, }, want: &pb.GrpcLogEntry{ Timestamp: nil, CallId: 1, SequenceIdWithinCall: 0, Type: pb.GrpcLogEntry_EVENT_TYPE_CANCEL, Logger: pb.GrpcLogEntry_LOGGER_CLIENT, Payload: nil, PayloadTruncated: false, Peer: nil, }, }, // gRPC headers should be omitted. { config: &ClientHeader{ OnClientSide: false, Header: map[string][]string{ "grpc-reserved": {"to be omitted"}, ":authority": {"to be omitted"}, "a": {"b", "bb"}, }, }, want: &pb.GrpcLogEntry{ Timestamp: nil, CallId: 1, SequenceIdWithinCall: 0, Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER, Logger: pb.GrpcLogEntry_LOGGER_SERVER, Payload: &pb.GrpcLogEntry_ClientHeader{ ClientHeader: &pb.ClientHeader{ Metadata: &pb.Metadata{ Entry: []*pb.MetadataEntry{ {Key: "a", Value: []byte{'b'}}, {Key: "a", Value: []byte{'b', 'b'}}, }, }, }, }, PayloadTruncated: false, }, }, { config: &ServerHeader{ OnClientSide: true, Header: map[string][]string{ "grpc-reserved": {"to be omitted"}, ":authority": {"to be omitted"}, "a": {"b", "bb"}, }, }, want: &pb.GrpcLogEntry{ Timestamp: nil, CallId: 1, SequenceIdWithinCall: 0, Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_HEADER, Logger: pb.GrpcLogEntry_LOGGER_CLIENT, Payload: &pb.GrpcLogEntry_ServerHeader{ ServerHeader: &pb.ServerHeader{ Metadata: &pb.Metadata{ Entry: []*pb.MetadataEntry{ {Key: "a", Value: []byte{'b'}}, {Key: "a", Value: []byte{'b', 'b'}}, }, }, }, }, PayloadTruncated: false, }, }, } for i, tc := range testCases { buf.Reset() tc.want.SequenceIdWithinCall = uint64(i + 1) ml.Log(tc.config) inSink := new(pb.GrpcLogEntry) if err := proto.Unmarshal(buf.Bytes()[4:], inSink); err != nil { t.Errorf("failed to unmarshal bytes in sink to proto: %v", err) continue } inSink.Timestamp = nil // Strip timestamp before comparing. if !proto.Equal(inSink, tc.want) { t.Errorf("Log(%+v), in sink: %+v, want %+v", tc.config, inSink, tc.want) } } } func (s) TestTruncateMetadataNotTruncated(t *testing.T) { testCases := []struct { ml *MethodLogger mpPb *pb.Metadata }{ { ml: newMethodLogger(maxUInt, maxUInt), mpPb: &pb.Metadata{ Entry: []*pb.MetadataEntry{ {Key: "", Value: []byte{1}}, }, }, }, { ml: newMethodLogger(2, maxUInt), mpPb: &pb.Metadata{ Entry: []*pb.MetadataEntry{ {Key: "", Value: []byte{1}}, }, }, }, { ml: newMethodLogger(1, maxUInt), mpPb: &pb.Metadata{ Entry: []*pb.MetadataEntry{ {Key: "", Value: nil}, }, }, }, { ml: newMethodLogger(2, maxUInt), mpPb: &pb.Metadata{ Entry: []*pb.MetadataEntry{ {Key: "", Value: []byte{1, 1}}, }, }, }, { ml: newMethodLogger(2, maxUInt), mpPb: &pb.Metadata{ Entry: []*pb.MetadataEntry{ {Key: "", Value: []byte{1}}, {Key: "", Value: []byte{1}}, }, }, }, // "grpc-trace-bin" is kept in log but not counted towards the size // limit. { ml: newMethodLogger(1, maxUInt), mpPb: &pb.Metadata{ Entry: []*pb.MetadataEntry{ {Key: "", Value: []byte{1}}, {Key: "grpc-trace-bin", Value: []byte("some.trace.key")}, }, }, }, } for i, tc := range testCases { truncated := tc.ml.truncateMetadata(tc.mpPb) if truncated { t.Errorf("test case %v, returned truncated, want not truncated", i) } } } func (s) TestTruncateMetadataTruncated(t *testing.T) { testCases := []struct { ml *MethodLogger mpPb *pb.Metadata entryLen int }{ { ml: newMethodLogger(2, maxUInt), mpPb: &pb.Metadata{ Entry: []*pb.MetadataEntry{ {Key: "", Value: []byte{1, 1, 1}}, }, }, entryLen: 0, }, { ml: newMethodLogger(2, maxUInt), mpPb: &pb.Metadata{ Entry: []*pb.MetadataEntry{ {Key: "", Value: []byte{1}}, {Key: "", Value: []byte{1}}, {Key: "", Value: []byte{1}}, }, }, entryLen: 2, }, { ml: newMethodLogger(2, maxUInt), mpPb: &pb.Metadata{ Entry: []*pb.MetadataEntry{ {Key: "", Value: []byte{1, 1}}, {Key: "", Value: []byte{1}}, }, }, entryLen: 1, }, { ml: newMethodLogger(2, maxUInt), mpPb: &pb.Metadata{ Entry: []*pb.MetadataEntry{ {Key: "", Value: []byte{1}}, {Key: "", Value: []byte{1, 1}}, }, }, entryLen: 1, }, } for i, tc := range testCases { truncated := tc.ml.truncateMetadata(tc.mpPb) if !truncated { t.Errorf("test case %v, returned not truncated, want truncated", i) continue } if len(tc.mpPb.Entry) != tc.entryLen { t.Errorf("test case %v, entry length: %v, want: %v", i, len(tc.mpPb.Entry), tc.entryLen) } } } func (s) TestTruncateMessageNotTruncated(t *testing.T) { testCases := []struct { ml *MethodLogger msgPb *pb.Message }{ { ml: newMethodLogger(maxUInt, maxUInt), msgPb: &pb.Message{ Data: []byte{1}, }, }, { ml: newMethodLogger(maxUInt, 3), msgPb: &pb.Message{ Data: []byte{1, 1}, }, }, { ml: newMethodLogger(maxUInt, 2), msgPb: &pb.Message{ Data: []byte{1, 1}, }, }, } for i, tc := range testCases { truncated := tc.ml.truncateMessage(tc.msgPb) if truncated { t.Errorf("test case %v, returned truncated, want not truncated", i) } } } func (s) TestTruncateMessageTruncated(t *testing.T) { testCases := []struct { ml *MethodLogger msgPb *pb.Message oldLength uint32 }{ { ml: newMethodLogger(maxUInt, 2), msgPb: &pb.Message{ Length: 3, Data: []byte{1, 1, 1}, }, oldLength: 3, }, } for i, tc := range testCases { truncated := tc.ml.truncateMessage(tc.msgPb) if !truncated { t.Errorf("test case %v, returned not truncated, want truncated", i) continue } if len(tc.msgPb.Data) != int(tc.ml.messageMaxLen) { t.Errorf("test case %v, message length: %v, want: %v", i, len(tc.msgPb.Data), tc.ml.messageMaxLen) } if tc.msgPb.Length != tc.oldLength { t.Errorf("test case %v, message.Length field: %v, want: %v", i, tc.msgPb.Length, tc.oldLength) } } } grpc-go-1.29.1/internal/binarylog/regenerate.sh000077500000000000000000000021251365033716300214330ustar00rootroot00000000000000#!/bin/bash # Copyright 2018 gRPC authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. set -eux -o pipefail TMP=$(mktemp -d) function finish { rm -rf "$TMP" } trap finish EXIT pushd "$TMP" mkdir -p grpc/binarylog/grpc_binarylog_v1 curl https://raw.githubusercontent.com/grpc/grpc-proto/master/grpc/binlog/v1/binarylog.proto > grpc/binarylog/grpc_binarylog_v1/binarylog.proto protoc --go_out=plugins=grpc,paths=source_relative:. -I. grpc/binarylog/grpc_binarylog_v1/*.proto popd rm -f ./grpc_binarylog_v1/*.pb.go cp "$TMP"/grpc/binarylog/grpc_binarylog_v1/*.pb.go ../../binarylog/grpc_binarylog_v1/ grpc-go-1.29.1/internal/binarylog/regexp_test.go000066400000000000000000000072501365033716300216370ustar00rootroot00000000000000/* * * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package binarylog import ( "reflect" "testing" ) func (s) TestLongMethodConfigRegexp(t *testing.T) { testCases := []struct { in string out []string }{ {in: "", out: nil}, {in: "*/m", out: nil}, { in: "p.s/m{}", out: []string{"p.s/m{}", "p.s", "m", "{}"}, }, { in: "p.s/m", out: []string{"p.s/m", "p.s", "m", ""}, }, { in: "p.s/m{h}", out: []string{"p.s/m{h}", "p.s", "m", "{h}"}, }, { in: "p.s/m{m}", out: []string{"p.s/m{m}", "p.s", "m", "{m}"}, }, { in: "p.s/m{h:123}", out: []string{"p.s/m{h:123}", "p.s", "m", "{h:123}"}, }, { in: "p.s/m{m:123}", out: []string{"p.s/m{m:123}", "p.s", "m", "{m:123}"}, }, { in: "p.s/m{h:123,m:123}", out: []string{"p.s/m{h:123,m:123}", "p.s", "m", "{h:123,m:123}"}, }, { in: "p.s/*", out: []string{"p.s/*", "p.s", "*", ""}, }, { in: "p.s/*{h}", out: []string{"p.s/*{h}", "p.s", "*", "{h}"}, }, { in: "s/m*", out: []string{"s/m*", "s", "m", "*"}, }, { in: "s/**", out: []string{"s/**", "s", "*", "*"}, }, } for _, tc := range testCases { match := longMethodConfigRegexp.FindStringSubmatch(tc.in) if !reflect.DeepEqual(match, tc.out) { t.Errorf("in: %q, out: %q, want: %q", tc.in, match, tc.out) } } } func (s) TestHeaderConfigRegexp(t *testing.T) { testCases := []struct { in string out []string }{ {in: "{}", out: nil}, {in: "{a:b}", out: nil}, {in: "{m:123}", out: nil}, {in: "{h:123;m:123}", out: nil}, { in: "{h}", out: []string{"{h}", ""}, }, { in: "{h:123}", out: []string{"{h:123}", "123"}, }, } for _, tc := range testCases { match := headerConfigRegexp.FindStringSubmatch(tc.in) if !reflect.DeepEqual(match, tc.out) { t.Errorf("in: %q, out: %q, want: %q", tc.in, match, tc.out) } } } func (s) TestMessageConfigRegexp(t *testing.T) { testCases := []struct { in string out []string }{ {in: "{}", out: nil}, {in: "{a:b}", out: nil}, {in: "{h:123}", out: nil}, {in: "{h:123;m:123}", out: nil}, { in: "{m}", out: []string{"{m}", ""}, }, { in: "{m:123}", out: []string{"{m:123}", "123"}, }, } for _, tc := range testCases { match := messageConfigRegexp.FindStringSubmatch(tc.in) if !reflect.DeepEqual(match, tc.out) { t.Errorf("in: %q, out: %q, want: %q", tc.in, match, tc.out) } } } func (s) TestHeaderMessageConfigRegexp(t *testing.T) { testCases := []struct { in string out []string }{ {in: "{}", out: nil}, {in: "{a:b}", out: nil}, {in: "{h}", out: nil}, {in: "{h:123}", out: nil}, {in: "{m}", out: nil}, {in: "{m:123}", out: nil}, { in: "{h;m}", out: []string{"{h;m}", "", ""}, }, { in: "{h:123;m}", out: []string{"{h:123;m}", "123", ""}, }, { in: "{h;m:123}", out: []string{"{h;m:123}", "", "123"}, }, { in: "{h:123;m:123}", out: []string{"{h:123;m:123}", "123", "123"}, }, } for _, tc := range testCases { match := headerMessageConfigRegexp.FindStringSubmatch(tc.in) if !reflect.DeepEqual(match, tc.out) { t.Errorf("in: %q, out: %q, want: %q", tc.in, match, tc.out) } } } grpc-go-1.29.1/internal/binarylog/sink.go000066400000000000000000000075361365033716300202610ustar00rootroot00000000000000/* * * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package binarylog import ( "bufio" "encoding/binary" "fmt" "io" "io/ioutil" "sync" "time" "github.com/golang/protobuf/proto" pb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" "google.golang.org/grpc/grpclog" ) var ( defaultSink Sink = &noopSink{} // TODO(blog): change this default (file in /tmp). ) // SetDefaultSink sets the sink where binary logs will be written to. // // Not thread safe. Only set during initialization. func SetDefaultSink(s Sink) { if defaultSink != nil { defaultSink.Close() } defaultSink = s } // Sink writes log entry into the binary log sink. type Sink interface { // Write will be called to write the log entry into the sink. // // It should be thread-safe so it can be called in parallel. Write(*pb.GrpcLogEntry) error // Close will be called when the Sink is replaced by a new Sink. Close() error } type noopSink struct{} func (ns *noopSink) Write(*pb.GrpcLogEntry) error { return nil } func (ns *noopSink) Close() error { return nil } // newWriterSink creates a binary log sink with the given writer. // // Write() marshals the proto message and writes it to the given writer. Each // message is prefixed with a 4 byte big endian unsigned integer as the length. // // No buffer is done, Close() doesn't try to close the writer. func newWriterSink(w io.Writer) *writerSink { return &writerSink{out: w} } type writerSink struct { out io.Writer } func (ws *writerSink) Write(e *pb.GrpcLogEntry) error { b, err := proto.Marshal(e) if err != nil { grpclog.Infof("binary logging: failed to marshal proto message: %v", err) } hdr := make([]byte, 4) binary.BigEndian.PutUint32(hdr, uint32(len(b))) if _, err := ws.out.Write(hdr); err != nil { return err } if _, err := ws.out.Write(b); err != nil { return err } return nil } func (ws *writerSink) Close() error { return nil } type bufWriteCloserSink struct { mu sync.Mutex closer io.Closer out *writerSink // out is built on buf. buf *bufio.Writer // buf is kept for flush. writeStartOnce sync.Once writeTicker *time.Ticker } func (fs *bufWriteCloserSink) Write(e *pb.GrpcLogEntry) error { // Start the write loop when Write is called. fs.writeStartOnce.Do(fs.startFlushGoroutine) fs.mu.Lock() if err := fs.out.Write(e); err != nil { fs.mu.Unlock() return err } fs.mu.Unlock() return nil } const ( bufFlushDuration = 60 * time.Second ) func (fs *bufWriteCloserSink) startFlushGoroutine() { fs.writeTicker = time.NewTicker(bufFlushDuration) go func() { for range fs.writeTicker.C { fs.mu.Lock() fs.buf.Flush() fs.mu.Unlock() } }() } func (fs *bufWriteCloserSink) Close() error { if fs.writeTicker != nil { fs.writeTicker.Stop() } fs.mu.Lock() fs.buf.Flush() fs.closer.Close() fs.out.Close() fs.mu.Unlock() return nil } func newBufWriteCloserSink(o io.WriteCloser) Sink { bufW := bufio.NewWriter(o) return &bufWriteCloserSink{ closer: o, out: newWriterSink(bufW), buf: bufW, } } // NewTempFileSink creates a temp file and returns a Sink that writes to this // file. func NewTempFileSink() (Sink, error) { tempFile, err := ioutil.TempFile("/tmp", "grpcgo_binarylog_*.txt") if err != nil { return nil, fmt.Errorf("failed to create temp file: %v", err) } return newBufWriteCloserSink(tempFile), nil } grpc-go-1.29.1/internal/binarylog/util.go000066400000000000000000000022711365033716300202610ustar00rootroot00000000000000/* * * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package binarylog import ( "errors" "strings" ) // parseMethodName splits service and method from the input. It expects format // "/service/method". // // TODO: move to internal/grpcutil. func parseMethodName(methodName string) (service, method string, _ error) { if !strings.HasPrefix(methodName, "/") { return "", "", errors.New("invalid method name: should start with /") } methodName = methodName[1:] pos := strings.LastIndex(methodName, "/") if pos < 0 { return "", "", errors.New("invalid method name: suffix /method is missing") } return methodName[:pos], methodName[pos+1:], nil } grpc-go-1.29.1/internal/binarylog/util_test.go000066400000000000000000000030321365033716300213140ustar00rootroot00000000000000/* * * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package binarylog import "testing" func (s) TestParseMethodName(t *testing.T) { testCases := []struct { methodName string service, method string }{ {methodName: "/s/m", service: "s", method: "m"}, {methodName: "/p.s/m", service: "p.s", method: "m"}, {methodName: "/p/s/m", service: "p/s", method: "m"}, } for _, tc := range testCases { s, m, err := parseMethodName(tc.methodName) if err != nil { t.Errorf("Parsing %q got error %v, want nil", tc.methodName, err) continue } if s != tc.service || m != tc.method { t.Errorf("Parseing %q got service %q, method %q, want service %q, method %q", tc.methodName, s, m, tc.service, tc.method, ) } } } func (s) TestParseMethodNameInvalid(t *testing.T) { testCases := []string{ "/", "/sm", "", "sm", } for _, tc := range testCases { _, _, err := parseMethodName(tc) if err == nil { t.Errorf("Parsing %q got nil error, want non-nil error", tc) } } } grpc-go-1.29.1/internal/buffer/000077500000000000000000000000001365033716300162365ustar00rootroot00000000000000grpc-go-1.29.1/internal/buffer/unbounded.go000066400000000000000000000051011365033716300205450ustar00rootroot00000000000000/* * Copyright 2019 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Package buffer provides an implementation of an unbounded buffer. package buffer import "sync" // Unbounded is an implementation of an unbounded buffer which does not use // extra goroutines. This is typically used for passing updates from one entity // to another within gRPC. // // All methods on this type are thread-safe and don't block on anything except // the underlying mutex used for synchronization. // // Unbounded supports values of any type to be stored in it by using a channel // of `interface{}`. This means that a call to Put() incurs an extra memory // allocation, and also that users need a type assertion while reading. For // performance critical code paths, using Unbounded is strongly discouraged and // defining a new type specific implementation of this buffer is preferred. See // internal/transport/transport.go for an example of this. type Unbounded struct { c chan interface{} mu sync.Mutex backlog []interface{} } // NewUnbounded returns a new instance of Unbounded. func NewUnbounded() *Unbounded { return &Unbounded{c: make(chan interface{}, 1)} } // Put adds t to the unbounded buffer. func (b *Unbounded) Put(t interface{}) { b.mu.Lock() if len(b.backlog) == 0 { select { case b.c <- t: b.mu.Unlock() return default: } } b.backlog = append(b.backlog, t) b.mu.Unlock() } // Load sends the earliest buffered data, if any, onto the read channel // returned by Get(). Users are expected to call this every time they read a // value from the read channel. func (b *Unbounded) Load() { b.mu.Lock() if len(b.backlog) > 0 { select { case b.c <- b.backlog[0]: b.backlog[0] = nil b.backlog = b.backlog[1:] default: } } b.mu.Unlock() } // Get returns a read channel on which values added to the buffer, via Put(), // are sent on. // // Upon reading a value from this channel, users are expected to call Load() to // send the next buffered value onto the channel if there is any. func (b *Unbounded) Get() <-chan interface{} { return b.c } grpc-go-1.29.1/internal/buffer/unbounded_test.go000066400000000000000000000047451365033716300216210ustar00rootroot00000000000000/* * Copyright 2019 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package buffer import ( "reflect" "sort" "sync" "testing" "google.golang.org/grpc/internal/grpctest" ) const ( numWriters = 10 numWrites = 10 ) type s struct { grpctest.Tester } func Test(t *testing.T) { grpctest.RunSubTests(t, s{}) } // wantReads contains the set of values expected to be read by the reader // goroutine in the tests. var wantReads []int func init() { for i := 0; i < numWriters; i++ { for j := 0; j < numWrites; j++ { wantReads = append(wantReads, i) } } } // TestSingleWriter starts one reader and one writer goroutine and makes sure // that the reader gets all the value added to the buffer by the writer. func (s) TestSingleWriter(t *testing.T) { ub := NewUnbounded() reads := []int{} var wg sync.WaitGroup wg.Add(1) go func() { defer wg.Done() ch := ub.Get() for i := 0; i < numWriters*numWrites; i++ { r := <-ch reads = append(reads, r.(int)) ub.Load() } }() wg.Add(1) go func() { defer wg.Done() for i := 0; i < numWriters; i++ { for j := 0; j < numWrites; j++ { ub.Put(i) } } }() wg.Wait() if !reflect.DeepEqual(reads, wantReads) { t.Errorf("reads: %#v, wantReads: %#v", reads, wantReads) } } // TestMultipleWriters starts multiple writers and one reader goroutine and // makes sure that the reader gets all the data written by all writers. func (s) TestMultipleWriters(t *testing.T) { ub := NewUnbounded() reads := []int{} var wg sync.WaitGroup wg.Add(1) go func() { defer wg.Done() ch := ub.Get() for i := 0; i < numWriters*numWrites; i++ { r := <-ch reads = append(reads, r.(int)) ub.Load() } }() wg.Add(numWriters) for i := 0; i < numWriters; i++ { go func(index int) { defer wg.Done() for j := 0; j < numWrites; j++ { ub.Put(index) } }(i) } wg.Wait() sort.Ints(reads) if !reflect.DeepEqual(reads, wantReads) { t.Errorf("reads: %#v, wantReads: %#v", reads, wantReads) } } grpc-go-1.29.1/internal/cache/000077500000000000000000000000001365033716300160305ustar00rootroot00000000000000grpc-go-1.29.1/internal/cache/timeoutCache.go000066400000000000000000000075201365033716300207750ustar00rootroot00000000000000/* * Copyright 2019 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // Package cache implements caches to be used in gRPC. package cache import ( "sync" "time" ) type cacheEntry struct { item interface{} callback func() timer *time.Timer // deleted is set to true in Remove() when the call to timer.Stop() fails. // This can happen when the timer in the cache entry fires around the same // time that timer.stop() is called in Remove(). deleted bool } // TimeoutCache is a cache with items to be deleted after a timeout. type TimeoutCache struct { mu sync.Mutex timeout time.Duration cache map[interface{}]*cacheEntry } // NewTimeoutCache creates a TimeoutCache with the given timeout. func NewTimeoutCache(timeout time.Duration) *TimeoutCache { return &TimeoutCache{ timeout: timeout, cache: make(map[interface{}]*cacheEntry), } } // Add adds an item to the cache, with the specified callback to be called when // the item is removed from the cache upon timeout. If the item is removed from // the cache using a call to Remove before the timeout expires, the callback // will not be called. // // If the Add was successful, it returns (newly added item, true). If there is // an existing entry for the specified key, the cache entry is not be updated // with the specified item and it returns (existing item, false). func (c *TimeoutCache) Add(key, item interface{}, callback func()) (interface{}, bool) { c.mu.Lock() defer c.mu.Unlock() if e, ok := c.cache[key]; ok { return e.item, false } entry := &cacheEntry{ item: item, callback: callback, } entry.timer = time.AfterFunc(c.timeout, func() { c.mu.Lock() if entry.deleted { c.mu.Unlock() // Abort the delete since this has been taken care of in Remove(). return } delete(c.cache, key) c.mu.Unlock() entry.callback() }) c.cache[key] = entry return item, true } // Remove the item with the key from the cache. // // If the specified key exists in the cache, it returns (item associated with // key, true) and the callback associated with the item is guaranteed to be not // called. If the given key is not found in the cache, it returns (nil, false) func (c *TimeoutCache) Remove(key interface{}) (item interface{}, ok bool) { c.mu.Lock() defer c.mu.Unlock() entry, ok := c.removeInternal(key, false) if !ok { return nil, false } return entry.item, true } // removeInternal removes and returns the item with key. // // caller must hold c.mu. func (c *TimeoutCache) removeInternal(key interface{}, runCallback bool) (*cacheEntry, bool) { entry, ok := c.cache[key] if !ok { return nil, false } delete(c.cache, key) if !entry.timer.Stop() { // If stop was not successful, the timer has fired (this can only happen // in a race). But the deleting function is blocked on c.mu because the // mutex was held by the caller of this function. // // Set deleted to true to abort the deleting function. When the lock is // released, the delete function will acquire the lock, check the value // of deleted and return. entry.deleted = true } if runCallback { entry.callback() } return entry, true } // Clear removes all entries, and runs the callbacks if runCallback is true. func (c *TimeoutCache) Clear(runCallback bool) { c.mu.Lock() defer c.mu.Unlock() for key := range c.cache { c.removeInternal(key, runCallback) } } grpc-go-1.29.1/internal/cache/timeoutCache_test.go000066400000000000000000000135571365033716300220430ustar00rootroot00000000000000/* * * Copyright 2019 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package cache import ( "strconv" "sync" "testing" "time" "google.golang.org/grpc/internal/grpctest" ) const ( testCacheTimeout = 100 * time.Millisecond ) type s struct { grpctest.Tester } func Test(t *testing.T) { grpctest.RunSubTests(t, s{}) } func (c *TimeoutCache) getForTesting(key interface{}) (*cacheEntry, bool) { c.mu.Lock() defer c.mu.Unlock() r, ok := c.cache[key] return r, ok } // TestCacheExpire attempts to add an entry to the cache and verifies that it // was added successfully. It then makes sure that on timeout, it's removed and // the associated callback is called. func (s) TestCacheExpire(t *testing.T) { const k, v = 1, "1" c := NewTimeoutCache(testCacheTimeout) callbackChan := make(chan struct{}) c.Add(k, v, func() { close(callbackChan) }) if gotV, ok := c.getForTesting(k); !ok || gotV.item != v { t.Fatalf("After Add(), before timeout, from cache got: %v, %v, want %v, %v", gotV.item, ok, v, true) } select { case <-callbackChan: case <-time.After(testCacheTimeout * 2): t.Fatalf("timeout waiting for callback") } if _, ok := c.getForTesting(k); ok { t.Fatalf("After Add(), after timeout, from cache got: _, %v, want _, %v", ok, false) } } // TestCacheRemove attempts to remove an existing entry from the cache and // verifies that the entry is removed and the associated callback is not // invoked. func (s) TestCacheRemove(t *testing.T) { const k, v = 1, "1" c := NewTimeoutCache(testCacheTimeout) callbackChan := make(chan struct{}) c.Add(k, v, func() { close(callbackChan) }) if got, ok := c.getForTesting(k); !ok || got.item != v { t.Fatalf("After Add(), before timeout, from cache got: %v, %v, want %v, %v", got.item, ok, v, true) } time.Sleep(testCacheTimeout / 2) gotV, gotOK := c.Remove(k) if !gotOK || gotV != v { t.Fatalf("After Add(), before timeout, Remove() got: %v, %v, want %v, %v", gotV, gotOK, v, true) } if _, ok := c.getForTesting(k); ok { t.Fatalf("After Add(), before timeout, after Remove(), from cache got: _, %v, want _, %v", ok, false) } select { case <-callbackChan: t.Fatalf("unexpected callback after retrieve") case <-time.After(testCacheTimeout * 2): } } // TestCacheClearWithoutCallback attempts to clear all entries from the cache // and verifies that the associated callbacks are not invoked. func (s) TestCacheClearWithoutCallback(t *testing.T) { var values []string const itemCount = 3 for i := 0; i < itemCount; i++ { values = append(values, strconv.Itoa(i)) } c := NewTimeoutCache(testCacheTimeout) done := make(chan struct{}) defer close(done) callbackChan := make(chan struct{}, itemCount) for i, v := range values { callbackChanTemp := make(chan struct{}) c.Add(i, v, func() { close(callbackChanTemp) }) go func() { select { case <-callbackChanTemp: callbackChan <- struct{}{} case <-done: } }() } for i, v := range values { if got, ok := c.getForTesting(i); !ok || got.item != v { t.Fatalf("After Add(), before timeout, from cache got: %v, %v, want %v, %v", got.item, ok, v, true) } } time.Sleep(testCacheTimeout / 2) c.Clear(false) for i := range values { if _, ok := c.getForTesting(i); ok { t.Fatalf("After Add(), before timeout, after Remove(), from cache got: _, %v, want _, %v", ok, false) } } select { case <-callbackChan: t.Fatalf("unexpected callback after Clear") case <-time.After(testCacheTimeout * 2): } } // TestCacheClearWithCallback attempts to clear all entries from the cache and // verifies that the associated callbacks are invoked. func (s) TestCacheClearWithCallback(t *testing.T) { var values []string const itemCount = 3 for i := 0; i < itemCount; i++ { values = append(values, strconv.Itoa(i)) } c := NewTimeoutCache(time.Hour) testDone := make(chan struct{}) defer close(testDone) var wg sync.WaitGroup wg.Add(itemCount) for i, v := range values { callbackChanTemp := make(chan struct{}) c.Add(i, v, func() { close(callbackChanTemp) }) go func() { defer wg.Done() select { case <-callbackChanTemp: case <-testDone: } }() } allGoroutineDone := make(chan struct{}, itemCount) go func() { wg.Wait() close(allGoroutineDone) }() for i, v := range values { if got, ok := c.getForTesting(i); !ok || got.item != v { t.Fatalf("After Add(), before timeout, from cache got: %v, %v, want %v, %v", got.item, ok, v, true) } } time.Sleep(testCacheTimeout / 2) c.Clear(true) for i := range values { if _, ok := c.getForTesting(i); ok { t.Fatalf("After Add(), before timeout, after Remove(), from cache got: _, %v, want _, %v", ok, false) } } select { case <-allGoroutineDone: case <-time.After(testCacheTimeout * 2): t.Fatalf("timeout waiting for all callbacks") } } // TestCacheRetrieveTimeoutRace simulates the case where an entry's timer fires // around the same time that Remove() is called for it. It verifies that there // is no deadlock. func (s) TestCacheRetrieveTimeoutRace(t *testing.T) { c := NewTimeoutCache(time.Nanosecond) done := make(chan struct{}) go func() { for i := 0; i < 1000; i++ { // Add starts a timer with 1 ns timeout, then remove will race // with the timer. c.Add(i, strconv.Itoa(i), func() {}) c.Remove(i) } close(done) }() select { case <-time.After(time.Second): t.Fatalf("Test didn't finish within 1 second. Deadlock") case <-done: } } grpc-go-1.29.1/internal/channelz/000077500000000000000000000000001365033716300165675ustar00rootroot00000000000000grpc-go-1.29.1/internal/channelz/funcs.go000066400000000000000000000477411365033716300202510ustar00rootroot00000000000000/* * * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Package channelz defines APIs for enabling channelz service, entry // registration/deletion, and accessing channelz data. It also defines channelz // metric struct formats. // // All APIs in this package are experimental. package channelz import ( "fmt" "sort" "sync" "sync/atomic" "time" "google.golang.org/grpc/internal/grpclog" ) const ( defaultMaxTraceEntry int32 = 30 ) var ( db dbWrapper idGen idGenerator // EntryPerPage defines the number of channelz entries to be shown on a web page. EntryPerPage = int64(50) curState int32 maxTraceEntry = defaultMaxTraceEntry ) // TurnOn turns on channelz data collection. func TurnOn() { if !IsOn() { NewChannelzStorage() atomic.StoreInt32(&curState, 1) } } // IsOn returns whether channelz data collection is on. func IsOn() bool { return atomic.CompareAndSwapInt32(&curState, 1, 1) } // SetMaxTraceEntry sets maximum number of trace entry per entity (i.e. channel/subchannel). // Setting it to 0 will disable channel tracing. func SetMaxTraceEntry(i int32) { atomic.StoreInt32(&maxTraceEntry, i) } // ResetMaxTraceEntryToDefault resets the maximum number of trace entry per entity to default. func ResetMaxTraceEntryToDefault() { atomic.StoreInt32(&maxTraceEntry, defaultMaxTraceEntry) } func getMaxTraceEntry() int { i := atomic.LoadInt32(&maxTraceEntry) return int(i) } // dbWarpper wraps around a reference to internal channelz data storage, and // provide synchronized functionality to set and get the reference. type dbWrapper struct { mu sync.RWMutex DB *channelMap } func (d *dbWrapper) set(db *channelMap) { d.mu.Lock() d.DB = db d.mu.Unlock() } func (d *dbWrapper) get() *channelMap { d.mu.RLock() defer d.mu.RUnlock() return d.DB } // NewChannelzStorage initializes channelz data storage and id generator. // // This function returns a cleanup function to wait for all channelz state to be reset by the // grpc goroutines when those entities get closed. By using this cleanup function, we make sure tests // don't mess up each other, i.e. lingering goroutine from previous test doing entity removal happen // to remove some entity just register by the new test, since the id space is the same. // // Note: This function is exported for testing purpose only. User should not call // it in most cases. func NewChannelzStorage() (cleanup func() error) { db.set(&channelMap{ topLevelChannels: make(map[int64]struct{}), channels: make(map[int64]*channel), listenSockets: make(map[int64]*listenSocket), normalSockets: make(map[int64]*normalSocket), servers: make(map[int64]*server), subChannels: make(map[int64]*subChannel), }) idGen.reset() return func() error { var err error cm := db.get() if cm == nil { return nil } for i := 0; i < 1000; i++ { cm.mu.Lock() if len(cm.topLevelChannels) == 0 && len(cm.servers) == 0 && len(cm.channels) == 0 && len(cm.subChannels) == 0 && len(cm.listenSockets) == 0 && len(cm.normalSockets) == 0 { cm.mu.Unlock() // all things stored in the channelz map have been cleared. return nil } cm.mu.Unlock() time.Sleep(10 * time.Millisecond) } cm.mu.Lock() err = fmt.Errorf("after 10s the channelz map has not been cleaned up yet, topchannels: %d, servers: %d, channels: %d, subchannels: %d, listen sockets: %d, normal sockets: %d", len(cm.topLevelChannels), len(cm.servers), len(cm.channels), len(cm.subChannels), len(cm.listenSockets), len(cm.normalSockets)) cm.mu.Unlock() return err } } // GetTopChannels returns a slice of top channel's ChannelMetric, along with a // boolean indicating whether there's more top channels to be queried for. // // The arg id specifies that only top channel with id at or above it will be included // in the result. The returned slice is up to a length of the arg maxResults or // EntryPerPage if maxResults is zero, and is sorted in ascending id order. func GetTopChannels(id int64, maxResults int64) ([]*ChannelMetric, bool) { return db.get().GetTopChannels(id, maxResults) } // GetServers returns a slice of server's ServerMetric, along with a // boolean indicating whether there's more servers to be queried for. // // The arg id specifies that only server with id at or above it will be included // in the result. The returned slice is up to a length of the arg maxResults or // EntryPerPage if maxResults is zero, and is sorted in ascending id order. func GetServers(id int64, maxResults int64) ([]*ServerMetric, bool) { return db.get().GetServers(id, maxResults) } // GetServerSockets returns a slice of server's (identified by id) normal socket's // SocketMetric, along with a boolean indicating whether there's more sockets to // be queried for. // // The arg startID specifies that only sockets with id at or above it will be // included in the result. The returned slice is up to a length of the arg maxResults // or EntryPerPage if maxResults is zero, and is sorted in ascending id order. func GetServerSockets(id int64, startID int64, maxResults int64) ([]*SocketMetric, bool) { return db.get().GetServerSockets(id, startID, maxResults) } // GetChannel returns the ChannelMetric for the channel (identified by id). func GetChannel(id int64) *ChannelMetric { return db.get().GetChannel(id) } // GetSubChannel returns the SubChannelMetric for the subchannel (identified by id). func GetSubChannel(id int64) *SubChannelMetric { return db.get().GetSubChannel(id) } // GetSocket returns the SocketInternalMetric for the socket (identified by id). func GetSocket(id int64) *SocketMetric { return db.get().GetSocket(id) } // GetServer returns the ServerMetric for the server (identified by id). func GetServer(id int64) *ServerMetric { return db.get().GetServer(id) } // RegisterChannel registers the given channel c in channelz database with ref // as its reference name, and add it to the child list of its parent (identified // by pid). pid = 0 means no parent. It returns the unique channelz tracking id // assigned to this channel. func RegisterChannel(c Channel, pid int64, ref string) int64 { id := idGen.genID() cn := &channel{ refName: ref, c: c, subChans: make(map[int64]string), nestedChans: make(map[int64]string), id: id, pid: pid, trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())}, } if pid == 0 { db.get().addChannel(id, cn, true, pid, ref) } else { db.get().addChannel(id, cn, false, pid, ref) } return id } // RegisterSubChannel registers the given channel c in channelz database with ref // as its reference name, and add it to the child list of its parent (identified // by pid). It returns the unique channelz tracking id assigned to this subchannel. func RegisterSubChannel(c Channel, pid int64, ref string) int64 { if pid == 0 { grpclog.ErrorDepth(0, "a SubChannel's parent id cannot be 0") return 0 } id := idGen.genID() sc := &subChannel{ refName: ref, c: c, sockets: make(map[int64]string), id: id, pid: pid, trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())}, } db.get().addSubChannel(id, sc, pid, ref) return id } // RegisterServer registers the given server s in channelz database. It returns // the unique channelz tracking id assigned to this server. func RegisterServer(s Server, ref string) int64 { id := idGen.genID() svr := &server{ refName: ref, s: s, sockets: make(map[int64]string), listenSockets: make(map[int64]string), id: id, } db.get().addServer(id, svr) return id } // RegisterListenSocket registers the given listen socket s in channelz database // with ref as its reference name, and add it to the child list of its parent // (identified by pid). It returns the unique channelz tracking id assigned to // this listen socket. func RegisterListenSocket(s Socket, pid int64, ref string) int64 { if pid == 0 { grpclog.ErrorDepth(0, "a ListenSocket's parent id cannot be 0") return 0 } id := idGen.genID() ls := &listenSocket{refName: ref, s: s, id: id, pid: pid} db.get().addListenSocket(id, ls, pid, ref) return id } // RegisterNormalSocket registers the given normal socket s in channelz database // with ref as its reference name, and add it to the child list of its parent // (identified by pid). It returns the unique channelz tracking id assigned to // this normal socket. func RegisterNormalSocket(s Socket, pid int64, ref string) int64 { if pid == 0 { grpclog.ErrorDepth(0, "a NormalSocket's parent id cannot be 0") return 0 } id := idGen.genID() ns := &normalSocket{refName: ref, s: s, id: id, pid: pid} db.get().addNormalSocket(id, ns, pid, ref) return id } // RemoveEntry removes an entry with unique channelz trakcing id to be id from // channelz database. func RemoveEntry(id int64) { db.get().removeEntry(id) } // TraceEventDesc is what the caller of AddTraceEvent should provide to describe the event to be added // to the channel trace. // The Parent field is optional. It is used for event that will be recorded in the entity's parent // trace also. type TraceEventDesc struct { Desc string Severity Severity Parent *TraceEventDesc } // AddTraceEvent adds trace related to the entity with specified id, using the provided TraceEventDesc. func AddTraceEvent(id int64, depth int, desc *TraceEventDesc) { for d := desc; d != nil; d = d.Parent { switch d.Severity { case CtUNKNOWN: grpclog.InfoDepth(depth+1, d.Desc) case CtINFO: grpclog.InfoDepth(depth+1, d.Desc) case CtWarning: grpclog.WarningDepth(depth+1, d.Desc) case CtError: grpclog.ErrorDepth(depth+1, d.Desc) } } if getMaxTraceEntry() == 0 { return } db.get().traceEvent(id, desc) } // channelMap is the storage data structure for channelz. // Methods of channelMap can be divided in two two categories with respect to locking. // 1. Methods acquire the global lock. // 2. Methods that can only be called when global lock is held. // A second type of method need always to be called inside a first type of method. type channelMap struct { mu sync.RWMutex topLevelChannels map[int64]struct{} servers map[int64]*server channels map[int64]*channel subChannels map[int64]*subChannel listenSockets map[int64]*listenSocket normalSockets map[int64]*normalSocket } func (c *channelMap) addServer(id int64, s *server) { c.mu.Lock() s.cm = c c.servers[id] = s c.mu.Unlock() } func (c *channelMap) addChannel(id int64, cn *channel, isTopChannel bool, pid int64, ref string) { c.mu.Lock() cn.cm = c cn.trace.cm = c c.channels[id] = cn if isTopChannel { c.topLevelChannels[id] = struct{}{} } else { c.findEntry(pid).addChild(id, cn) } c.mu.Unlock() } func (c *channelMap) addSubChannel(id int64, sc *subChannel, pid int64, ref string) { c.mu.Lock() sc.cm = c sc.trace.cm = c c.subChannels[id] = sc c.findEntry(pid).addChild(id, sc) c.mu.Unlock() } func (c *channelMap) addListenSocket(id int64, ls *listenSocket, pid int64, ref string) { c.mu.Lock() ls.cm = c c.listenSockets[id] = ls c.findEntry(pid).addChild(id, ls) c.mu.Unlock() } func (c *channelMap) addNormalSocket(id int64, ns *normalSocket, pid int64, ref string) { c.mu.Lock() ns.cm = c c.normalSockets[id] = ns c.findEntry(pid).addChild(id, ns) c.mu.Unlock() } // removeEntry triggers the removal of an entry, which may not indeed delete the entry, if it has to // wait on the deletion of its children and until no other entity's channel trace references it. // It may lead to a chain of entry deletion. For example, deleting the last socket of a gracefully // shutting down server will lead to the server being also deleted. func (c *channelMap) removeEntry(id int64) { c.mu.Lock() c.findEntry(id).triggerDelete() c.mu.Unlock() } // c.mu must be held by the caller func (c *channelMap) decrTraceRefCount(id int64) { e := c.findEntry(id) if v, ok := e.(tracedChannel); ok { v.decrTraceRefCount() e.deleteSelfIfReady() } } // c.mu must be held by the caller. func (c *channelMap) findEntry(id int64) entry { var v entry var ok bool if v, ok = c.channels[id]; ok { return v } if v, ok = c.subChannels[id]; ok { return v } if v, ok = c.servers[id]; ok { return v } if v, ok = c.listenSockets[id]; ok { return v } if v, ok = c.normalSockets[id]; ok { return v } return &dummyEntry{idNotFound: id} } // c.mu must be held by the caller // deleteEntry simply deletes an entry from the channelMap. Before calling this // method, caller must check this entry is ready to be deleted, i.e removeEntry() // has been called on it, and no children still exist. // Conditionals are ordered by the expected frequency of deletion of each entity // type, in order to optimize performance. func (c *channelMap) deleteEntry(id int64) { var ok bool if _, ok = c.normalSockets[id]; ok { delete(c.normalSockets, id) return } if _, ok = c.subChannels[id]; ok { delete(c.subChannels, id) return } if _, ok = c.channels[id]; ok { delete(c.channels, id) delete(c.topLevelChannels, id) return } if _, ok = c.listenSockets[id]; ok { delete(c.listenSockets, id) return } if _, ok = c.servers[id]; ok { delete(c.servers, id) return } } func (c *channelMap) traceEvent(id int64, desc *TraceEventDesc) { c.mu.Lock() child := c.findEntry(id) childTC, ok := child.(tracedChannel) if !ok { c.mu.Unlock() return } childTC.getChannelTrace().append(&TraceEvent{Desc: desc.Desc, Severity: desc.Severity, Timestamp: time.Now()}) if desc.Parent != nil { parent := c.findEntry(child.getParentID()) var chanType RefChannelType switch child.(type) { case *channel: chanType = RefChannel case *subChannel: chanType = RefSubChannel } if parentTC, ok := parent.(tracedChannel); ok { parentTC.getChannelTrace().append(&TraceEvent{ Desc: desc.Parent.Desc, Severity: desc.Parent.Severity, Timestamp: time.Now(), RefID: id, RefName: childTC.getRefName(), RefType: chanType, }) childTC.incrTraceRefCount() } } c.mu.Unlock() } type int64Slice []int64 func (s int64Slice) Len() int { return len(s) } func (s int64Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } func (s int64Slice) Less(i, j int) bool { return s[i] < s[j] } func copyMap(m map[int64]string) map[int64]string { n := make(map[int64]string) for k, v := range m { n[k] = v } return n } func min(a, b int64) int64 { if a < b { return a } return b } func (c *channelMap) GetTopChannels(id int64, maxResults int64) ([]*ChannelMetric, bool) { if maxResults <= 0 { maxResults = EntryPerPage } c.mu.RLock() l := int64(len(c.topLevelChannels)) ids := make([]int64, 0, l) cns := make([]*channel, 0, min(l, maxResults)) for k := range c.topLevelChannels { ids = append(ids, k) } sort.Sort(int64Slice(ids)) idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id }) count := int64(0) var end bool var t []*ChannelMetric for i, v := range ids[idx:] { if count == maxResults { break } if cn, ok := c.channels[v]; ok { cns = append(cns, cn) t = append(t, &ChannelMetric{ NestedChans: copyMap(cn.nestedChans), SubChans: copyMap(cn.subChans), }) count++ } if i == len(ids[idx:])-1 { end = true break } } c.mu.RUnlock() if count == 0 { end = true } for i, cn := range cns { t[i].ChannelData = cn.c.ChannelzMetric() t[i].ID = cn.id t[i].RefName = cn.refName t[i].Trace = cn.trace.dumpData() } return t, end } func (c *channelMap) GetServers(id, maxResults int64) ([]*ServerMetric, bool) { if maxResults <= 0 { maxResults = EntryPerPage } c.mu.RLock() l := int64(len(c.servers)) ids := make([]int64, 0, l) ss := make([]*server, 0, min(l, maxResults)) for k := range c.servers { ids = append(ids, k) } sort.Sort(int64Slice(ids)) idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id }) count := int64(0) var end bool var s []*ServerMetric for i, v := range ids[idx:] { if count == maxResults { break } if svr, ok := c.servers[v]; ok { ss = append(ss, svr) s = append(s, &ServerMetric{ ListenSockets: copyMap(svr.listenSockets), }) count++ } if i == len(ids[idx:])-1 { end = true break } } c.mu.RUnlock() if count == 0 { end = true } for i, svr := range ss { s[i].ServerData = svr.s.ChannelzMetric() s[i].ID = svr.id s[i].RefName = svr.refName } return s, end } func (c *channelMap) GetServerSockets(id int64, startID int64, maxResults int64) ([]*SocketMetric, bool) { if maxResults <= 0 { maxResults = EntryPerPage } var svr *server var ok bool c.mu.RLock() if svr, ok = c.servers[id]; !ok { // server with id doesn't exist. c.mu.RUnlock() return nil, true } svrskts := svr.sockets l := int64(len(svrskts)) ids := make([]int64, 0, l) sks := make([]*normalSocket, 0, min(l, maxResults)) for k := range svrskts { ids = append(ids, k) } sort.Sort(int64Slice(ids)) idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= startID }) count := int64(0) var end bool for i, v := range ids[idx:] { if count == maxResults { break } if ns, ok := c.normalSockets[v]; ok { sks = append(sks, ns) count++ } if i == len(ids[idx:])-1 { end = true break } } c.mu.RUnlock() if count == 0 { end = true } var s []*SocketMetric for _, ns := range sks { sm := &SocketMetric{} sm.SocketData = ns.s.ChannelzMetric() sm.ID = ns.id sm.RefName = ns.refName s = append(s, sm) } return s, end } func (c *channelMap) GetChannel(id int64) *ChannelMetric { cm := &ChannelMetric{} var cn *channel var ok bool c.mu.RLock() if cn, ok = c.channels[id]; !ok { // channel with id doesn't exist. c.mu.RUnlock() return nil } cm.NestedChans = copyMap(cn.nestedChans) cm.SubChans = copyMap(cn.subChans) // cn.c can be set to &dummyChannel{} when deleteSelfFromMap is called. Save a copy of cn.c when // holding the lock to prevent potential data race. chanCopy := cn.c c.mu.RUnlock() cm.ChannelData = chanCopy.ChannelzMetric() cm.ID = cn.id cm.RefName = cn.refName cm.Trace = cn.trace.dumpData() return cm } func (c *channelMap) GetSubChannel(id int64) *SubChannelMetric { cm := &SubChannelMetric{} var sc *subChannel var ok bool c.mu.RLock() if sc, ok = c.subChannels[id]; !ok { // subchannel with id doesn't exist. c.mu.RUnlock() return nil } cm.Sockets = copyMap(sc.sockets) // sc.c can be set to &dummyChannel{} when deleteSelfFromMap is called. Save a copy of sc.c when // holding the lock to prevent potential data race. chanCopy := sc.c c.mu.RUnlock() cm.ChannelData = chanCopy.ChannelzMetric() cm.ID = sc.id cm.RefName = sc.refName cm.Trace = sc.trace.dumpData() return cm } func (c *channelMap) GetSocket(id int64) *SocketMetric { sm := &SocketMetric{} c.mu.RLock() if ls, ok := c.listenSockets[id]; ok { c.mu.RUnlock() sm.SocketData = ls.s.ChannelzMetric() sm.ID = ls.id sm.RefName = ls.refName return sm } if ns, ok := c.normalSockets[id]; ok { c.mu.RUnlock() sm.SocketData = ns.s.ChannelzMetric() sm.ID = ns.id sm.RefName = ns.refName return sm } c.mu.RUnlock() return nil } func (c *channelMap) GetServer(id int64) *ServerMetric { sm := &ServerMetric{} var svr *server var ok bool c.mu.RLock() if svr, ok = c.servers[id]; !ok { c.mu.RUnlock() return nil } sm.ListenSockets = copyMap(svr.listenSockets) c.mu.RUnlock() sm.ID = svr.id sm.RefName = svr.refName sm.ServerData = svr.s.ChannelzMetric() return sm } type idGenerator struct { id int64 } func (i *idGenerator) reset() { atomic.StoreInt64(&i.id, 0) } func (i *idGenerator) genID() int64 { return atomic.AddInt64(&i.id, 1) } grpc-go-1.29.1/internal/channelz/logging.go000066400000000000000000000047151365033716300205530ustar00rootroot00000000000000/* * * Copyright 2020 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package channelz import ( "fmt" "google.golang.org/grpc/internal/grpclog" ) // Info logs through grpclog.Info and adds a trace event if channelz is on. func Info(id int64, args ...interface{}) { if IsOn() { AddTraceEvent(id, 1, &TraceEventDesc{ Desc: fmt.Sprint(args...), Severity: CtINFO, }) } else { grpclog.InfoDepth(1, args...) } } // Infof logs through grpclog.Infof and adds a trace event if channelz is on. func Infof(id int64, format string, args ...interface{}) { msg := fmt.Sprintf(format, args...) if IsOn() { AddTraceEvent(id, 1, &TraceEventDesc{ Desc: msg, Severity: CtINFO, }) } else { grpclog.InfoDepth(1, msg) } } // Warning logs through grpclog.Warning and adds a trace event if channelz is on. func Warning(id int64, args ...interface{}) { if IsOn() { AddTraceEvent(id, 1, &TraceEventDesc{ Desc: fmt.Sprint(args...), Severity: CtWarning, }) } else { grpclog.WarningDepth(1, args...) } } // Warningf logs through grpclog.Warningf and adds a trace event if channelz is on. func Warningf(id int64, format string, args ...interface{}) { msg := fmt.Sprintf(format, args...) if IsOn() { AddTraceEvent(id, 1, &TraceEventDesc{ Desc: msg, Severity: CtWarning, }) } else { grpclog.WarningDepth(1, msg) } } // Error logs through grpclog.Error and adds a trace event if channelz is on. func Error(id int64, args ...interface{}) { if IsOn() { AddTraceEvent(id, 1, &TraceEventDesc{ Desc: fmt.Sprint(args...), Severity: CtError, }) } else { grpclog.ErrorDepth(1, args...) } } // Errorf logs through grpclog.Errorf and adds a trace event if channelz is on. func Errorf(id int64, format string, args ...interface{}) { msg := fmt.Sprintf(format, args...) if IsOn() { AddTraceEvent(id, 1, &TraceEventDesc{ Desc: msg, Severity: CtError, }) } else { grpclog.ErrorDepth(1, msg) } } grpc-go-1.29.1/internal/channelz/types.go000066400000000000000000000553041365033716300202710ustar00rootroot00000000000000/* * * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package channelz import ( "net" "sync" "sync/atomic" "time" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" "google.golang.org/grpc/grpclog" ) // entry represents a node in the channelz database. type entry interface { // addChild adds a child e, whose channelz id is id to child list addChild(id int64, e entry) // deleteChild deletes a child with channelz id to be id from child list deleteChild(id int64) // triggerDelete tries to delete self from channelz database. However, if child // list is not empty, then deletion from the database is on hold until the last // child is deleted from database. triggerDelete() // deleteSelfIfReady check whether triggerDelete() has been called before, and whether child // list is now empty. If both conditions are met, then delete self from database. deleteSelfIfReady() // getParentID returns parent ID of the entry. 0 value parent ID means no parent. getParentID() int64 } // dummyEntry is a fake entry to handle entry not found case. type dummyEntry struct { idNotFound int64 } func (d *dummyEntry) addChild(id int64, e entry) { // Note: It is possible for a normal program to reach here under race condition. // For example, there could be a race between ClientConn.Close() info being propagated // to addrConn and http2Client. ClientConn.Close() cancel the context and result // in http2Client to error. The error info is then caught by transport monitor // and before addrConn.tearDown() is called in side ClientConn.Close(). Therefore, // the addrConn will create a new transport. And when registering the new transport in // channelz, its parent addrConn could have already been torn down and deleted // from channelz tracking, and thus reach the code here. grpclog.Infof("attempt to add child of type %T with id %d to a parent (id=%d) that doesn't currently exist", e, id, d.idNotFound) } func (d *dummyEntry) deleteChild(id int64) { // It is possible for a normal program to reach here under race condition. // Refer to the example described in addChild(). grpclog.Infof("attempt to delete child with id %d from a parent (id=%d) that doesn't currently exist", id, d.idNotFound) } func (d *dummyEntry) triggerDelete() { grpclog.Warningf("attempt to delete an entry (id=%d) that doesn't currently exist", d.idNotFound) } func (*dummyEntry) deleteSelfIfReady() { // code should not reach here. deleteSelfIfReady is always called on an existing entry. } func (*dummyEntry) getParentID() int64 { return 0 } // ChannelMetric defines the info channelz provides for a specific Channel, which // includes ChannelInternalMetric and channelz-specific data, such as channelz id, // child list, etc. type ChannelMetric struct { // ID is the channelz id of this channel. ID int64 // RefName is the human readable reference string of this channel. RefName string // ChannelData contains channel internal metric reported by the channel through // ChannelzMetric(). ChannelData *ChannelInternalMetric // NestedChans tracks the nested channel type children of this channel in the format of // a map from nested channel channelz id to corresponding reference string. NestedChans map[int64]string // SubChans tracks the subchannel type children of this channel in the format of a // map from subchannel channelz id to corresponding reference string. SubChans map[int64]string // Sockets tracks the socket type children of this channel in the format of a map // from socket channelz id to corresponding reference string. // Note current grpc implementation doesn't allow channel having sockets directly, // therefore, this is field is unused. Sockets map[int64]string // Trace contains the most recent traced events. Trace *ChannelTrace } // SubChannelMetric defines the info channelz provides for a specific SubChannel, // which includes ChannelInternalMetric and channelz-specific data, such as // channelz id, child list, etc. type SubChannelMetric struct { // ID is the channelz id of this subchannel. ID int64 // RefName is the human readable reference string of this subchannel. RefName string // ChannelData contains subchannel internal metric reported by the subchannel // through ChannelzMetric(). ChannelData *ChannelInternalMetric // NestedChans tracks the nested channel type children of this subchannel in the format of // a map from nested channel channelz id to corresponding reference string. // Note current grpc implementation doesn't allow subchannel to have nested channels // as children, therefore, this field is unused. NestedChans map[int64]string // SubChans tracks the subchannel type children of this subchannel in the format of a // map from subchannel channelz id to corresponding reference string. // Note current grpc implementation doesn't allow subchannel to have subchannels // as children, therefore, this field is unused. SubChans map[int64]string // Sockets tracks the socket type children of this subchannel in the format of a map // from socket channelz id to corresponding reference string. Sockets map[int64]string // Trace contains the most recent traced events. Trace *ChannelTrace } // ChannelInternalMetric defines the struct that the implementor of Channel interface // should return from ChannelzMetric(). type ChannelInternalMetric struct { // current connectivity state of the channel. State connectivity.State // The target this channel originally tried to connect to. May be absent Target string // The number of calls started on the channel. CallsStarted int64 // The number of calls that have completed with an OK status. CallsSucceeded int64 // The number of calls that have a completed with a non-OK status. CallsFailed int64 // The last time a call was started on the channel. LastCallStartedTimestamp time.Time } // ChannelTrace stores traced events on a channel/subchannel and related info. type ChannelTrace struct { // EventNum is the number of events that ever got traced (i.e. including those that have been deleted) EventNum int64 // CreationTime is the creation time of the trace. CreationTime time.Time // Events stores the most recent trace events (up to $maxTraceEntry, newer event will overwrite the // oldest one) Events []*TraceEvent } // TraceEvent represent a single trace event type TraceEvent struct { // Desc is a simple description of the trace event. Desc string // Severity states the severity of this trace event. Severity Severity // Timestamp is the event time. Timestamp time.Time // RefID is the id of the entity that gets referenced in the event. RefID is 0 if no other entity is // involved in this event. // e.g. SubChannel (id: 4[]) Created. --> RefID = 4, RefName = "" (inside []) RefID int64 // RefName is the reference name for the entity that gets referenced in the event. RefName string // RefType indicates the referenced entity type, i.e Channel or SubChannel. RefType RefChannelType } // Channel is the interface that should be satisfied in order to be tracked by // channelz as Channel or SubChannel. type Channel interface { ChannelzMetric() *ChannelInternalMetric } type dummyChannel struct{} func (d *dummyChannel) ChannelzMetric() *ChannelInternalMetric { return &ChannelInternalMetric{} } type channel struct { refName string c Channel closeCalled bool nestedChans map[int64]string subChans map[int64]string id int64 pid int64 cm *channelMap trace *channelTrace // traceRefCount is the number of trace events that reference this channel. // Non-zero traceRefCount means the trace of this channel cannot be deleted. traceRefCount int32 } func (c *channel) addChild(id int64, e entry) { switch v := e.(type) { case *subChannel: c.subChans[id] = v.refName case *channel: c.nestedChans[id] = v.refName default: grpclog.Errorf("cannot add a child (id = %d) of type %T to a channel", id, e) } } func (c *channel) deleteChild(id int64) { delete(c.subChans, id) delete(c.nestedChans, id) c.deleteSelfIfReady() } func (c *channel) triggerDelete() { c.closeCalled = true c.deleteSelfIfReady() } func (c *channel) getParentID() int64 { return c.pid } // deleteSelfFromTree tries to delete the channel from the channelz entry relation tree, which means // deleting the channel reference from its parent's child list. // // In order for a channel to be deleted from the tree, it must meet the criteria that, removal of the // corresponding grpc object has been invoked, and the channel does not have any children left. // // The returned boolean value indicates whether the channel has been successfully deleted from tree. func (c *channel) deleteSelfFromTree() (deleted bool) { if !c.closeCalled || len(c.subChans)+len(c.nestedChans) != 0 { return false } // not top channel if c.pid != 0 { c.cm.findEntry(c.pid).deleteChild(c.id) } return true } // deleteSelfFromMap checks whether it is valid to delete the channel from the map, which means // deleting the channel from channelz's tracking entirely. Users can no longer use id to query the // channel, and its memory will be garbage collected. // // The trace reference count of the channel must be 0 in order to be deleted from the map. This is // specified in the channel tracing gRFC that as long as some other trace has reference to an entity, // the trace of the referenced entity must not be deleted. In order to release the resource allocated // by grpc, the reference to the grpc object is reset to a dummy object. // // deleteSelfFromMap must be called after deleteSelfFromTree returns true. // // It returns a bool to indicate whether the channel can be safely deleted from map. func (c *channel) deleteSelfFromMap() (delete bool) { if c.getTraceRefCount() != 0 { c.c = &dummyChannel{} return false } return true } // deleteSelfIfReady tries to delete the channel itself from the channelz database. // The delete process includes two steps: // 1. delete the channel from the entry relation tree, i.e. delete the channel reference from its // parent's child list. // 2. delete the channel from the map, i.e. delete the channel entirely from channelz. Lookup by id // will return entry not found error. func (c *channel) deleteSelfIfReady() { if !c.deleteSelfFromTree() { return } if !c.deleteSelfFromMap() { return } c.cm.deleteEntry(c.id) c.trace.clear() } func (c *channel) getChannelTrace() *channelTrace { return c.trace } func (c *channel) incrTraceRefCount() { atomic.AddInt32(&c.traceRefCount, 1) } func (c *channel) decrTraceRefCount() { atomic.AddInt32(&c.traceRefCount, -1) } func (c *channel) getTraceRefCount() int { i := atomic.LoadInt32(&c.traceRefCount) return int(i) } func (c *channel) getRefName() string { return c.refName } type subChannel struct { refName string c Channel closeCalled bool sockets map[int64]string id int64 pid int64 cm *channelMap trace *channelTrace traceRefCount int32 } func (sc *subChannel) addChild(id int64, e entry) { if v, ok := e.(*normalSocket); ok { sc.sockets[id] = v.refName } else { grpclog.Errorf("cannot add a child (id = %d) of type %T to a subChannel", id, e) } } func (sc *subChannel) deleteChild(id int64) { delete(sc.sockets, id) sc.deleteSelfIfReady() } func (sc *subChannel) triggerDelete() { sc.closeCalled = true sc.deleteSelfIfReady() } func (sc *subChannel) getParentID() int64 { return sc.pid } // deleteSelfFromTree tries to delete the subchannel from the channelz entry relation tree, which // means deleting the subchannel reference from its parent's child list. // // In order for a subchannel to be deleted from the tree, it must meet the criteria that, removal of // the corresponding grpc object has been invoked, and the subchannel does not have any children left. // // The returned boolean value indicates whether the channel has been successfully deleted from tree. func (sc *subChannel) deleteSelfFromTree() (deleted bool) { if !sc.closeCalled || len(sc.sockets) != 0 { return false } sc.cm.findEntry(sc.pid).deleteChild(sc.id) return true } // deleteSelfFromMap checks whether it is valid to delete the subchannel from the map, which means // deleting the subchannel from channelz's tracking entirely. Users can no longer use id to query // the subchannel, and its memory will be garbage collected. // // The trace reference count of the subchannel must be 0 in order to be deleted from the map. This is // specified in the channel tracing gRFC that as long as some other trace has reference to an entity, // the trace of the referenced entity must not be deleted. In order to release the resource allocated // by grpc, the reference to the grpc object is reset to a dummy object. // // deleteSelfFromMap must be called after deleteSelfFromTree returns true. // // It returns a bool to indicate whether the channel can be safely deleted from map. func (sc *subChannel) deleteSelfFromMap() (delete bool) { if sc.getTraceRefCount() != 0 { // free the grpc struct (i.e. addrConn) sc.c = &dummyChannel{} return false } return true } // deleteSelfIfReady tries to delete the subchannel itself from the channelz database. // The delete process includes two steps: // 1. delete the subchannel from the entry relation tree, i.e. delete the subchannel reference from // its parent's child list. // 2. delete the subchannel from the map, i.e. delete the subchannel entirely from channelz. Lookup // by id will return entry not found error. func (sc *subChannel) deleteSelfIfReady() { if !sc.deleteSelfFromTree() { return } if !sc.deleteSelfFromMap() { return } sc.cm.deleteEntry(sc.id) sc.trace.clear() } func (sc *subChannel) getChannelTrace() *channelTrace { return sc.trace } func (sc *subChannel) incrTraceRefCount() { atomic.AddInt32(&sc.traceRefCount, 1) } func (sc *subChannel) decrTraceRefCount() { atomic.AddInt32(&sc.traceRefCount, -1) } func (sc *subChannel) getTraceRefCount() int { i := atomic.LoadInt32(&sc.traceRefCount) return int(i) } func (sc *subChannel) getRefName() string { return sc.refName } // SocketMetric defines the info channelz provides for a specific Socket, which // includes SocketInternalMetric and channelz-specific data, such as channelz id, etc. type SocketMetric struct { // ID is the channelz id of this socket. ID int64 // RefName is the human readable reference string of this socket. RefName string // SocketData contains socket internal metric reported by the socket through // ChannelzMetric(). SocketData *SocketInternalMetric } // SocketInternalMetric defines the struct that the implementor of Socket interface // should return from ChannelzMetric(). type SocketInternalMetric struct { // The number of streams that have been started. StreamsStarted int64 // The number of streams that have ended successfully: // On client side, receiving frame with eos bit set. // On server side, sending frame with eos bit set. StreamsSucceeded int64 // The number of streams that have ended unsuccessfully: // On client side, termination without receiving frame with eos bit set. // On server side, termination without sending frame with eos bit set. StreamsFailed int64 // The number of messages successfully sent on this socket. MessagesSent int64 MessagesReceived int64 // The number of keep alives sent. This is typically implemented with HTTP/2 // ping messages. KeepAlivesSent int64 // The last time a stream was created by this endpoint. Usually unset for // servers. LastLocalStreamCreatedTimestamp time.Time // The last time a stream was created by the remote endpoint. Usually unset // for clients. LastRemoteStreamCreatedTimestamp time.Time // The last time a message was sent by this endpoint. LastMessageSentTimestamp time.Time // The last time a message was received by this endpoint. LastMessageReceivedTimestamp time.Time // The amount of window, granted to the local endpoint by the remote endpoint. // This may be slightly out of date due to network latency. This does NOT // include stream level or TCP level flow control info. LocalFlowControlWindow int64 // The amount of window, granted to the remote endpoint by the local endpoint. // This may be slightly out of date due to network latency. This does NOT // include stream level or TCP level flow control info. RemoteFlowControlWindow int64 // The locally bound address. LocalAddr net.Addr // The remote bound address. May be absent. RemoteAddr net.Addr // Optional, represents the name of the remote endpoint, if different than // the original target name. RemoteName string SocketOptions *SocketOptionData Security credentials.ChannelzSecurityValue } // Socket is the interface that should be satisfied in order to be tracked by // channelz as Socket. type Socket interface { ChannelzMetric() *SocketInternalMetric } type listenSocket struct { refName string s Socket id int64 pid int64 cm *channelMap } func (ls *listenSocket) addChild(id int64, e entry) { grpclog.Errorf("cannot add a child (id = %d) of type %T to a listen socket", id, e) } func (ls *listenSocket) deleteChild(id int64) { grpclog.Errorf("cannot delete a child (id = %d) from a listen socket", id) } func (ls *listenSocket) triggerDelete() { ls.cm.deleteEntry(ls.id) ls.cm.findEntry(ls.pid).deleteChild(ls.id) } func (ls *listenSocket) deleteSelfIfReady() { grpclog.Errorf("cannot call deleteSelfIfReady on a listen socket") } func (ls *listenSocket) getParentID() int64 { return ls.pid } type normalSocket struct { refName string s Socket id int64 pid int64 cm *channelMap } func (ns *normalSocket) addChild(id int64, e entry) { grpclog.Errorf("cannot add a child (id = %d) of type %T to a normal socket", id, e) } func (ns *normalSocket) deleteChild(id int64) { grpclog.Errorf("cannot delete a child (id = %d) from a normal socket", id) } func (ns *normalSocket) triggerDelete() { ns.cm.deleteEntry(ns.id) ns.cm.findEntry(ns.pid).deleteChild(ns.id) } func (ns *normalSocket) deleteSelfIfReady() { grpclog.Errorf("cannot call deleteSelfIfReady on a normal socket") } func (ns *normalSocket) getParentID() int64 { return ns.pid } // ServerMetric defines the info channelz provides for a specific Server, which // includes ServerInternalMetric and channelz-specific data, such as channelz id, // child list, etc. type ServerMetric struct { // ID is the channelz id of this server. ID int64 // RefName is the human readable reference string of this server. RefName string // ServerData contains server internal metric reported by the server through // ChannelzMetric(). ServerData *ServerInternalMetric // ListenSockets tracks the listener socket type children of this server in the // format of a map from socket channelz id to corresponding reference string. ListenSockets map[int64]string } // ServerInternalMetric defines the struct that the implementor of Server interface // should return from ChannelzMetric(). type ServerInternalMetric struct { // The number of incoming calls started on the server. CallsStarted int64 // The number of incoming calls that have completed with an OK status. CallsSucceeded int64 // The number of incoming calls that have a completed with a non-OK status. CallsFailed int64 // The last time a call was started on the server. LastCallStartedTimestamp time.Time } // Server is the interface to be satisfied in order to be tracked by channelz as // Server. type Server interface { ChannelzMetric() *ServerInternalMetric } type server struct { refName string s Server closeCalled bool sockets map[int64]string listenSockets map[int64]string id int64 cm *channelMap } func (s *server) addChild(id int64, e entry) { switch v := e.(type) { case *normalSocket: s.sockets[id] = v.refName case *listenSocket: s.listenSockets[id] = v.refName default: grpclog.Errorf("cannot add a child (id = %d) of type %T to a server", id, e) } } func (s *server) deleteChild(id int64) { delete(s.sockets, id) delete(s.listenSockets, id) s.deleteSelfIfReady() } func (s *server) triggerDelete() { s.closeCalled = true s.deleteSelfIfReady() } func (s *server) deleteSelfIfReady() { if !s.closeCalled || len(s.sockets)+len(s.listenSockets) != 0 { return } s.cm.deleteEntry(s.id) } func (s *server) getParentID() int64 { return 0 } type tracedChannel interface { getChannelTrace() *channelTrace incrTraceRefCount() decrTraceRefCount() getRefName() string } type channelTrace struct { cm *channelMap createdTime time.Time eventCount int64 mu sync.Mutex events []*TraceEvent } func (c *channelTrace) append(e *TraceEvent) { c.mu.Lock() if len(c.events) == getMaxTraceEntry() { del := c.events[0] c.events = c.events[1:] if del.RefID != 0 { // start recursive cleanup in a goroutine to not block the call originated from grpc. go func() { // need to acquire c.cm.mu lock to call the unlocked attemptCleanup func. c.cm.mu.Lock() c.cm.decrTraceRefCount(del.RefID) c.cm.mu.Unlock() }() } } e.Timestamp = time.Now() c.events = append(c.events, e) c.eventCount++ c.mu.Unlock() } func (c *channelTrace) clear() { c.mu.Lock() for _, e := range c.events { if e.RefID != 0 { // caller should have already held the c.cm.mu lock. c.cm.decrTraceRefCount(e.RefID) } } c.mu.Unlock() } // Severity is the severity level of a trace event. // The canonical enumeration of all valid values is here: // https://github.com/grpc/grpc-proto/blob/9b13d199cc0d4703c7ea26c9c330ba695866eb23/grpc/channelz/v1/channelz.proto#L126. type Severity int const ( // CtUNKNOWN indicates unknown severity of a trace event. CtUNKNOWN Severity = iota // CtINFO indicates info level severity of a trace event. CtINFO // CtWarning indicates warning level severity of a trace event. CtWarning // CtError indicates error level severity of a trace event. CtError ) // RefChannelType is the type of the entity being referenced in a trace event. type RefChannelType int const ( // RefChannel indicates the referenced entity is a Channel. RefChannel RefChannelType = iota // RefSubChannel indicates the referenced entity is a SubChannel. RefSubChannel ) func (c *channelTrace) dumpData() *ChannelTrace { c.mu.Lock() ct := &ChannelTrace{EventNum: c.eventCount, CreationTime: c.createdTime} ct.Events = c.events[:len(c.events)] c.mu.Unlock() return ct } grpc-go-1.29.1/internal/channelz/types_linux.go000066400000000000000000000031241365033716300215010ustar00rootroot00000000000000// +build !appengine /* * * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package channelz import ( "syscall" "golang.org/x/sys/unix" ) // SocketOptionData defines the struct to hold socket option data, and related // getter function to obtain info from fd. type SocketOptionData struct { Linger *unix.Linger RecvTimeout *unix.Timeval SendTimeout *unix.Timeval TCPInfo *unix.TCPInfo } // Getsockopt defines the function to get socket options requested by channelz. // It is to be passed to syscall.RawConn.Control(). func (s *SocketOptionData) Getsockopt(fd uintptr) { if v, err := unix.GetsockoptLinger(int(fd), syscall.SOL_SOCKET, syscall.SO_LINGER); err == nil { s.Linger = v } if v, err := unix.GetsockoptTimeval(int(fd), syscall.SOL_SOCKET, syscall.SO_RCVTIMEO); err == nil { s.RecvTimeout = v } if v, err := unix.GetsockoptTimeval(int(fd), syscall.SOL_SOCKET, syscall.SO_SNDTIMEO); err == nil { s.SendTimeout = v } if v, err := unix.GetsockoptTCPInfo(int(fd), syscall.SOL_TCP, syscall.TCP_INFO); err == nil { s.TCPInfo = v } } grpc-go-1.29.1/internal/channelz/types_nonlinux.go000066400000000000000000000023721365033716300222200ustar00rootroot00000000000000// +build !linux appengine /* * * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package channelz import ( "sync" "google.golang.org/grpc/grpclog" ) var once sync.Once // SocketOptionData defines the struct to hold socket option data, and related // getter function to obtain info from fd. // Windows OS doesn't support Socket Option type SocketOptionData struct { } // Getsockopt defines the function to get socket options requested by channelz. // It is to be passed to syscall.RawConn.Control(). // Windows OS doesn't support Socket Option func (s *SocketOptionData) Getsockopt(fd uintptr) { once.Do(func() { grpclog.Warningln("Channelz: socket options are not supported on non-linux os and appengine.") }) } grpc-go-1.29.1/internal/channelz/util_linux.go000066400000000000000000000017451365033716300213210ustar00rootroot00000000000000// +build linux,!appengine /* * * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package channelz import ( "syscall" ) // GetSocketOption gets the socket option info of the conn. func GetSocketOption(socket interface{}) *SocketOptionData { c, ok := socket.(syscall.Conn) if !ok { return nil } data := &SocketOptionData{} if rawConn, err := c.SyscallConn(); err == nil { rawConn.Control(data.Getsockopt) return data } return nil } grpc-go-1.29.1/internal/channelz/util_nonlinux.go000066400000000000000000000014141365033716300220250ustar00rootroot00000000000000// +build !linux appengine /* * * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package channelz // GetSocketOption gets the socket option info of the conn. func GetSocketOption(c interface{}) *SocketOptionData { return nil } grpc-go-1.29.1/internal/channelz/util_test.go000066400000000000000000000065051365033716300211400ustar00rootroot00000000000000// +build linux,go1.10,!appengine /* * * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // The test in this file should be run in an environment that has go1.10 or later, // as the function SyscallConn() (required to get socket option) was introduced // to net.TCPListener in go1.10. package channelz_test import ( "net" "reflect" "syscall" "testing" "golang.org/x/sys/unix" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpctest" ) type s struct { grpctest.Tester } func Test(t *testing.T) { grpctest.RunSubTests(t, s{}) } func (s) TestGetSocketOpt(t *testing.T) { network, addr := "tcp", ":0" ln, err := net.Listen(network, addr) if err != nil { t.Fatalf("net.Listen(%s,%s) failed with err: %v", network, addr, err) } defer ln.Close() go func() { ln.Accept() }() conn, _ := net.Dial(network, ln.Addr().String()) defer conn.Close() tcpc := conn.(*net.TCPConn) raw, err := tcpc.SyscallConn() if err != nil { t.Fatalf("SyscallConn() failed due to %v", err) } l := &unix.Linger{Onoff: 1, Linger: 5} recvTimout := &unix.Timeval{Sec: 100} sendTimeout := &unix.Timeval{Sec: 8888} raw.Control(func(fd uintptr) { err := unix.SetsockoptLinger(int(fd), syscall.SOL_SOCKET, syscall.SO_LINGER, l) if err != nil { t.Fatalf("failed to SetsockoptLinger(%v,%v,%v,%v) due to %v", int(fd), syscall.SOL_SOCKET, syscall.SO_LINGER, l, err) } err = unix.SetsockoptTimeval(int(fd), syscall.SOL_SOCKET, syscall.SO_RCVTIMEO, recvTimout) if err != nil { t.Fatalf("failed to SetsockoptTimeval(%v,%v,%v,%v) due to %v", int(fd), syscall.SOL_SOCKET, syscall.SO_RCVTIMEO, recvTimout, err) } err = unix.SetsockoptTimeval(int(fd), syscall.SOL_SOCKET, syscall.SO_SNDTIMEO, sendTimeout) if err != nil { t.Fatalf("failed to SetsockoptTimeval(%v,%v,%v,%v) due to %v", int(fd), syscall.SOL_SOCKET, syscall.SO_SNDTIMEO, sendTimeout, err) } }) sktopt := channelz.GetSocketOption(conn) if !reflect.DeepEqual(sktopt.Linger, l) { t.Fatalf("get socket option linger, want: %v, got %v", l, sktopt.Linger) } if !reflect.DeepEqual(sktopt.RecvTimeout, recvTimout) { t.Logf("get socket option recv timeout, want: %v, got %v, may be caused by system allowing non or partial setting of this value", recvTimout, sktopt.RecvTimeout) } if !reflect.DeepEqual(sktopt.SendTimeout, sendTimeout) { t.Logf("get socket option send timeout, want: %v, got %v, may be caused by system allowing non or partial setting of this value", sendTimeout, sktopt.SendTimeout) } if sktopt == nil || sktopt.TCPInfo != nil && sktopt.TCPInfo.State != 1 { t.Fatalf("TCPInfo.State want 1 (TCP_ESTABLISHED), got %v", sktopt) } sktopt = channelz.GetSocketOption(ln) if sktopt == nil || sktopt.TCPInfo == nil || sktopt.TCPInfo.State != 10 { t.Fatalf("TCPInfo.State want 10 (TCP_LISTEN), got %v", sktopt) } } grpc-go-1.29.1/internal/envconfig/000077500000000000000000000000001365033716300167435ustar00rootroot00000000000000grpc-go-1.29.1/internal/envconfig/envconfig.go000066400000000000000000000022071365033716300212510ustar00rootroot00000000000000/* * * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Package envconfig contains grpc settings configured by environment variables. package envconfig import ( "os" "strings" ) const ( prefix = "GRPC_GO_" retryStr = prefix + "RETRY" txtErrIgnoreStr = prefix + "IGNORE_TXT_ERRORS" ) var ( // Retry is set if retry is explicitly enabled via "GRPC_GO_RETRY=on". Retry = strings.EqualFold(os.Getenv(retryStr), "on") // TXTErrIgnore is set if TXT errors should be ignored ("GRPC_GO_IGNORE_TXT_ERRORS" is not "false"). TXTErrIgnore = !strings.EqualFold(os.Getenv(retryStr), "false") ) grpc-go-1.29.1/internal/grpclog/000077500000000000000000000000001365033716300164225ustar00rootroot00000000000000grpc-go-1.29.1/internal/grpclog/grpclog.go000066400000000000000000000113661365033716300204150ustar00rootroot00000000000000/* * * Copyright 2020 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Package grpclog (internal) defines depth logging for grpc. package grpclog // Logger is the logger used for the non-depth log functions. var Logger LoggerV2 // DepthLogger is the logger used for the depth log functions. var DepthLogger DepthLoggerV2 // InfoDepth logs to the INFO log at the specified depth. func InfoDepth(depth int, args ...interface{}) { if DepthLogger != nil { DepthLogger.InfoDepth(depth, args...) } else { Logger.Info(args...) } } // WarningDepth logs to the WARNING log at the specified depth. func WarningDepth(depth int, args ...interface{}) { if DepthLogger != nil { DepthLogger.WarningDepth(depth, args...) } else { Logger.Warning(args...) } } // ErrorDepth logs to the ERROR log at the specified depth. func ErrorDepth(depth int, args ...interface{}) { if DepthLogger != nil { DepthLogger.ErrorDepth(depth, args...) } else { Logger.Error(args...) } } // FatalDepth logs to the FATAL log at the specified depth. func FatalDepth(depth int, args ...interface{}) { if DepthLogger != nil { DepthLogger.FatalDepth(depth, args...) } else { Logger.Fatal(args...) } } // LoggerV2 does underlying logging work for grpclog. // This is a copy of the LoggerV2 defined in the external grpclog package. It // is defined here to avoid a circular dependency. type LoggerV2 interface { // Info logs to INFO log. Arguments are handled in the manner of fmt.Print. Info(args ...interface{}) // Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println. Infoln(args ...interface{}) // Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf. Infof(format string, args ...interface{}) // Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print. Warning(args ...interface{}) // Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println. Warningln(args ...interface{}) // Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf. Warningf(format string, args ...interface{}) // Error logs to ERROR log. Arguments are handled in the manner of fmt.Print. Error(args ...interface{}) // Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println. Errorln(args ...interface{}) // Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. Errorf(format string, args ...interface{}) // Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print. // gRPC ensures that all Fatal logs will exit with os.Exit(1). // Implementations may also call os.Exit() with a non-zero exit code. Fatal(args ...interface{}) // Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println. // gRPC ensures that all Fatal logs will exit with os.Exit(1). // Implementations may also call os.Exit() with a non-zero exit code. Fatalln(args ...interface{}) // Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. // gRPC ensures that all Fatal logs will exit with os.Exit(1). // Implementations may also call os.Exit() with a non-zero exit code. Fatalf(format string, args ...interface{}) // V reports whether verbosity level l is at least the requested verbose level. V(l int) bool } // DepthLoggerV2 logs at a specified call frame. If a LoggerV2 also implements // DepthLoggerV2, the below functions will be called with the appropriate stack // depth set for trivial functions the logger may ignore. // This is a copy of the DepthLoggerV2 defined in the external grpclog package. // It is defined here to avoid a circular dependency. // // This API is EXPERIMENTAL. type DepthLoggerV2 interface { // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Print. InfoDepth(depth int, args ...interface{}) // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Print. WarningDepth(depth int, args ...interface{}) // ErrorDetph logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Print. ErrorDepth(depth int, args ...interface{}) // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Print. FatalDepth(depth int, args ...interface{}) } grpc-go-1.29.1/internal/grpclog/prefixLogger.go000066400000000000000000000032451365033716300214120ustar00rootroot00000000000000/* * * Copyright 2020 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package grpclog // PrefixLogger does logging with a prefix. // // Logging method on a nil logs without any prefix. type PrefixLogger struct { prefix string } // Infof does info logging. func (pl *PrefixLogger) Infof(format string, args ...interface{}) { if pl != nil { // Handle nil, so the tests can pass in a nil logger. format = pl.prefix + format } Logger.Infof(format, args...) } // Warningf does warning logging. func (pl *PrefixLogger) Warningf(format string, args ...interface{}) { if pl != nil { format = pl.prefix + format } Logger.Warningf(format, args...) } // Errorf does error logging. func (pl *PrefixLogger) Errorf(format string, args ...interface{}) { if pl != nil { format = pl.prefix + format } Logger.Errorf(format, args...) } // Debugf does info logging at verbose level 2. func (pl *PrefixLogger) Debugf(format string, args ...interface{}) { if Logger.V(2) { pl.Infof(format, args...) } } // NewPrefixLogger creates a prefix logger with the given prefix. func NewPrefixLogger(prefix string) *PrefixLogger { return &PrefixLogger{prefix: prefix} } grpc-go-1.29.1/internal/grpcrand/000077500000000000000000000000001365033716300165655ustar00rootroot00000000000000grpc-go-1.29.1/internal/grpcrand/grpcrand.go000066400000000000000000000024641365033716300207220ustar00rootroot00000000000000/* * * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Package grpcrand implements math/rand functions in a concurrent-safe way // with a global random source, independent of math/rand's global source. package grpcrand import ( "math/rand" "sync" "time" ) var ( r = rand.New(rand.NewSource(time.Now().UnixNano())) mu sync.Mutex ) // Int63n implements rand.Int63n on the grpcrand global source. func Int63n(n int64) int64 { mu.Lock() res := r.Int63n(n) mu.Unlock() return res } // Intn implements rand.Intn on the grpcrand global source. func Intn(n int) int { mu.Lock() res := r.Intn(n) mu.Unlock() return res } // Float64 implements rand.Float64 on the grpcrand global source. func Float64() float64 { mu.Lock() res := r.Float64() mu.Unlock() return res } grpc-go-1.29.1/internal/grpcsync/000077500000000000000000000000001365033716300166155ustar00rootroot00000000000000grpc-go-1.29.1/internal/grpcsync/event.go000066400000000000000000000030621365033716300202660ustar00rootroot00000000000000/* * * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Package grpcsync implements additional synchronization primitives built upon // the sync package. package grpcsync import ( "sync" "sync/atomic" ) // Event represents a one-time event that may occur in the future. type Event struct { fired int32 c chan struct{} o sync.Once } // Fire causes e to complete. It is safe to call multiple times, and // concurrently. It returns true iff this call to Fire caused the signaling // channel returned by Done to close. func (e *Event) Fire() bool { ret := false e.o.Do(func() { atomic.StoreInt32(&e.fired, 1) close(e.c) ret = true }) return ret } // Done returns a channel that will be closed when Fire is called. func (e *Event) Done() <-chan struct{} { return e.c } // HasFired returns true if Fire has been called. func (e *Event) HasFired() bool { return atomic.LoadInt32(&e.fired) == 1 } // NewEvent returns a new, ready-to-use Event. func NewEvent() *Event { return &Event{c: make(chan struct{})} } grpc-go-1.29.1/internal/grpcsync/event_test.go000066400000000000000000000032411365033716300213240ustar00rootroot00000000000000/* * * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package grpcsync import ( "testing" "google.golang.org/grpc/internal/grpctest" ) type s struct { grpctest.Tester } func Test(t *testing.T) { grpctest.RunSubTests(t, s{}) } func (s) TestEventHasFired(t *testing.T) { e := NewEvent() if e.HasFired() { t.Fatal("e.HasFired() = true; want false") } if !e.Fire() { t.Fatal("e.Fire() = false; want true") } if !e.HasFired() { t.Fatal("e.HasFired() = false; want true") } } func (s) TestEventDoneChannel(t *testing.T) { e := NewEvent() select { case <-e.Done(): t.Fatal("e.HasFired() = true; want false") default: } if !e.Fire() { t.Fatal("e.Fire() = false; want true") } select { case <-e.Done(): default: t.Fatal("e.HasFired() = false; want true") } } func (s) TestEventMultipleFires(t *testing.T) { e := NewEvent() if e.HasFired() { t.Fatal("e.HasFired() = true; want false") } if !e.Fire() { t.Fatal("e.Fire() = false; want true") } for i := 0; i < 3; i++ { if !e.HasFired() { t.Fatal("e.HasFired() = false; want true") } if e.Fire() { t.Fatal("e.Fire() = true; want false") } } } grpc-go-1.29.1/internal/grpctest/000077500000000000000000000000001365033716300166205ustar00rootroot00000000000000grpc-go-1.29.1/internal/grpctest/example_test.go000066400000000000000000000024241365033716300216430ustar00rootroot00000000000000/* * * Copyright 2019 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package grpctest_test import ( "testing" "google.golang.org/grpc/internal/grpctest" ) type s struct { i int } func (s *s) Setup(t *testing.T) { t.Log("Per-test setup code") s.i = 5 } func (s *s) TestSomething(t *testing.T) { t.Log("TestSomething") if s.i != 5 { t.Errorf("s.i = %v; want 5", s.i) } s.i = 3 } func (s *s) TestSomethingElse(t *testing.T) { t.Log("TestSomethingElse") if got, want := s.i%4, 1; got != want { t.Errorf("s.i %% 4 = %v; want %v", got, want) } s.i = 3 } func (s *s) Teardown(t *testing.T) { t.Log("Per-test teardown code") if s.i != 3 { t.Fatalf("s.i = %v; want 3", s.i) } } func TestExample(t *testing.T) { grpctest.RunSubTests(t, &s{}) } grpc-go-1.29.1/internal/grpctest/grpctest.go000066400000000000000000000056141365033716300210100ustar00rootroot00000000000000/* * * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Package grpctest implements testing helpers. package grpctest import ( "reflect" "strings" "sync/atomic" "testing" "google.golang.org/grpc/internal/leakcheck" ) var lcFailed uint32 type errorer struct { t *testing.T } func (e errorer) Errorf(format string, args ...interface{}) { atomic.StoreUint32(&lcFailed, 1) e.t.Errorf(format, args...) } // Tester is an implementation of the x interface parameter to // grpctest.RunSubTests with default Setup and Teardown behavior. Setup updates // the tlogger and Teardown performs a leak check. Embed in a struct with tests // defined to use. type Tester struct{} // Setup updates the tlogger. func (Tester) Setup(t *testing.T) { TLogger.Update(t) } // Teardown performs a leak check. func (Tester) Teardown(t *testing.T) { if atomic.LoadUint32(&lcFailed) == 1 { return } leakcheck.Check(errorer{t: t}) if atomic.LoadUint32(&lcFailed) == 1 { t.Log("Leak check disabled for future tests") } TLogger.EndTest(t) } func getTestFunc(t *testing.T, xv reflect.Value, name string) func(*testing.T) { if m := xv.MethodByName(name); m.IsValid() { if f, ok := m.Interface().(func(*testing.T)); ok { return f } // Method exists but has the wrong type signature. t.Fatalf("grpctest: function %v has unexpected signature (%T)", name, m.Interface()) } return func(*testing.T) {} } // RunSubTests runs all "Test___" functions that are methods of x as subtests // of the current test. If x contains methods "Setup(*testing.T)" or // "Teardown(*testing.T)", those are run before or after each of the test // functions, respectively. // // For example usage, see example_test.go. Run it using: // $ go test -v -run TestExample . // // To run a specific test/subtest: // $ go test -v -run 'TestExample/^Something$' . func RunSubTests(t *testing.T, x interface{}) { xt := reflect.TypeOf(x) xv := reflect.ValueOf(x) setup := getTestFunc(t, xv, "Setup") teardown := getTestFunc(t, xv, "Teardown") for i := 0; i < xt.NumMethod(); i++ { methodName := xt.Method(i).Name if !strings.HasPrefix(methodName, "Test") { continue } tfunc := getTestFunc(t, xv, methodName) t.Run(strings.TrimPrefix(methodName, "Test"), func(t *testing.T) { setup(t) // defer teardown to guarantee it is run even if tfunc uses t.Fatal() defer teardown(t) tfunc(t) }) } } grpc-go-1.29.1/internal/grpctest/grpctest_test.go000066400000000000000000000026511365033716300220450ustar00rootroot00000000000000/* * * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package grpctest import ( "reflect" "testing" ) type tRunST struct { setup, test, teardown bool } func (t *tRunST) Setup(*testing.T) { t.setup = true } func (t *tRunST) TestSubTest(*testing.T) { t.test = true } func (t *tRunST) Teardown(*testing.T) { t.teardown = true } func TestRunSubTests(t *testing.T) { x := &tRunST{} RunSubTests(t, x) if want := (&tRunST{setup: true, test: true, teardown: true}); !reflect.DeepEqual(x, want) { t.Fatalf("x = %v; want all fields true", x) } } type tNoST struct { test bool } func (t *tNoST) TestSubTest(*testing.T) { t.test = true } func TestNoSetupOrTeardown(t *testing.T) { // Ensures nothing panics or fails if Setup/Teardown are omitted. x := &tNoST{} RunSubTests(t, x) if want := (&tNoST{test: true}); !reflect.DeepEqual(x, want) { t.Fatalf("x = %v; want %v", x, want) } } grpc-go-1.29.1/internal/grpctest/tlogger.go000066400000000000000000000133331365033716300206150ustar00rootroot00000000000000/* * * Copyright 2020 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package grpctest import ( "errors" "fmt" "os" "regexp" "runtime/debug" "strconv" "strings" "sync" "testing" "google.golang.org/grpc/grpclog" ) // TLogger serves as the grpclog logger and is the interface through which // expected errors are declared in tests. var TLogger *tLogger const callingFrame = 4 type logType int const ( logLog logType = iota errorLog fatalLog ) type tLogger struct { v int t *testing.T initialized bool m sync.Mutex // protects errors errors map[*regexp.Regexp]int } func init() { TLogger = &tLogger{errors: map[*regexp.Regexp]int{}} vLevel := os.Getenv("GRPC_GO_LOG_VERBOSITY_LEVEL") if vl, err := strconv.Atoi(vLevel); err == nil { TLogger.v = vl } } // getStackFrame gets, from the stack byte string, the appropriate stack frame. func getStackFrame(stack []byte, frame int) (string, error) { s := strings.Split(string(stack), "\n") if frame >= (len(s)-1)/2 { return "", errors.New("frame request out-of-bounds") } split := strings.Split(strings.Fields(s[(frame*2)+2][1:])[0], "/") return fmt.Sprintf("%v:", split[len(split)-1]), nil } // log logs the message with the specified parameters to the tLogger. func (g *tLogger) log(ltype logType, depth int, format string, args ...interface{}) { s := debug.Stack() prefix, err := getStackFrame(s, callingFrame+depth) args = append([]interface{}{prefix}, args...) if err != nil { g.t.Error(err) return } if format == "" { switch ltype { case errorLog: // fmt.Sprintln is used rather than fmt.Sprint because t.Log uses fmt.Sprintln behavior. if g.expected(fmt.Sprintln(args...)) { g.t.Log(args...) } else { g.t.Error(args...) } case fatalLog: panic(fmt.Sprint(args...)) default: g.t.Log(args...) } } else { format = "%v " + format switch ltype { case errorLog: if g.expected(fmt.Sprintf(format, args...)) { g.t.Logf(format, args...) } else { g.t.Errorf(format, args...) } case fatalLog: panic(fmt.Sprintf(format, args...)) default: g.t.Logf(format, args...) } } } // Update updates the testing.T that the testing logger logs to. Should be done // before every test. It also initializes the tLogger if it has not already. func (g *tLogger) Update(t *testing.T) { if !g.initialized { grpclog.SetLoggerV2(TLogger) g.initialized = true } g.t = t g.m.Lock() defer g.m.Unlock() g.errors = map[*regexp.Regexp]int{} } // ExpectError declares an error to be expected. For the next test, the first // error log matching the expression (using FindString) will not cause the test // to fail. "For the next test" includes all the time until the next call to // Update(). Note that if an expected error is not encountered, this will cause // the test to fail. func (g *tLogger) ExpectError(expr string) { g.ExpectErrorN(expr, 1) } // ExpectErrorN declares an error to be expected n times. func (g *tLogger) ExpectErrorN(expr string, n int) { re, err := regexp.Compile(expr) if err != nil { g.t.Error(err) return } g.m.Lock() defer g.m.Unlock() g.errors[re] += n } // EndTest checks if expected errors were not encountered. func (g *tLogger) EndTest(t *testing.T) { g.m.Lock() defer g.m.Unlock() for re, count := range g.errors { if count > 0 { t.Errorf("Expected error '%v' not encountered", re.String()) } } g.errors = map[*regexp.Regexp]int{} } // expected determines if the error string is protected or not. func (g *tLogger) expected(s string) bool { g.m.Lock() defer g.m.Unlock() for re, count := range g.errors { if re.FindStringIndex(s) != nil { g.errors[re]-- if count <= 1 { delete(g.errors, re) } return true } } return false } func (g *tLogger) Info(args ...interface{}) { g.log(logLog, 0, "", args...) } func (g *tLogger) Infoln(args ...interface{}) { g.log(logLog, 0, "", args...) } func (g *tLogger) Infof(format string, args ...interface{}) { g.log(logLog, 0, format, args...) } func (g *tLogger) InfoDepth(depth int, args ...interface{}) { g.log(logLog, depth, "", args...) } func (g *tLogger) Warning(args ...interface{}) { g.log(logLog, 0, "", args...) } func (g *tLogger) Warningln(args ...interface{}) { g.log(logLog, 0, "", args...) } func (g *tLogger) Warningf(format string, args ...interface{}) { g.log(logLog, 0, format, args...) } func (g *tLogger) WarningDepth(depth int, args ...interface{}) { g.log(logLog, depth, "", args...) } func (g *tLogger) Error(args ...interface{}) { g.log(errorLog, 0, "", args...) } func (g *tLogger) Errorln(args ...interface{}) { g.log(errorLog, 0, "", args...) } func (g *tLogger) Errorf(format string, args ...interface{}) { g.log(errorLog, 0, format, args...) } func (g *tLogger) ErrorDepth(depth int, args ...interface{}) { g.log(errorLog, depth, "", args...) } func (g *tLogger) Fatal(args ...interface{}) { g.log(fatalLog, 0, "", args...) } func (g *tLogger) Fatalln(args ...interface{}) { g.log(fatalLog, 0, "", args...) } func (g *tLogger) Fatalf(format string, args ...interface{}) { g.log(fatalLog, 0, format, args...) } func (g *tLogger) FatalDepth(depth int, args ...interface{}) { g.log(fatalLog, depth, "", args...) } func (g *tLogger) V(l int) bool { return l <= g.v } grpc-go-1.29.1/internal/grpctest/tlogger_test.go000066400000000000000000000036501365033716300216550ustar00rootroot00000000000000/* * * Copyright 2020 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package grpctest import ( "testing" "google.golang.org/grpc/grpclog" grpclogi "google.golang.org/grpc/internal/grpclog" ) type s struct { Tester } func Test(t *testing.T) { RunSubTests(t, s{}) } func (s) TestInfo(t *testing.T) { grpclog.Info("Info", "message.") } func (s) TestInfoln(t *testing.T) { grpclog.Infoln("Info", "message.") } func (s) TestInfof(t *testing.T) { grpclog.Infof("%v %v.", "Info", "message") } func (s) TestInfoDepth(t *testing.T) { grpclogi.InfoDepth(0, "Info", "depth", "message.") } func (s) TestWarning(t *testing.T) { grpclog.Warning("Warning", "message.") } func (s) TestWarningln(t *testing.T) { grpclog.Warningln("Warning", "message.") } func (s) TestWarningf(t *testing.T) { grpclog.Warningf("%v %v.", "Warning", "message") } func (s) TestWarningDepth(t *testing.T) { grpclogi.WarningDepth(0, "Warning", "depth", "message.") } func (s) TestError(t *testing.T) { const numErrors = 10 TLogger.ExpectError("Expected error") TLogger.ExpectError("Expected ln error") TLogger.ExpectError("Expected formatted error") TLogger.ExpectErrorN("Expected repeated error", numErrors) grpclog.Error("Expected", "error") grpclog.Errorln("Expected", "ln", "error") grpclog.Errorf("%v %v %v", "Expected", "formatted", "error") for i := 0; i < numErrors; i++ { grpclog.Error("Expected repeated error") } } grpc-go-1.29.1/internal/grpcutil/000077500000000000000000000000001365033716300166165ustar00rootroot00000000000000grpc-go-1.29.1/internal/grpcutil/target.go000066400000000000000000000030441365033716300204340ustar00rootroot00000000000000/* * * Copyright 2020 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Package grpcutil provides a bunch of utility functions to be used across the // gRPC codebase. package grpcutil import ( "strings" "google.golang.org/grpc/resolver" ) // split2 returns the values from strings.SplitN(s, sep, 2). // If sep is not found, it returns ("", "", false) instead. func split2(s, sep string) (string, string, bool) { spl := strings.SplitN(s, sep, 2) if len(spl) < 2 { return "", "", false } return spl[0], spl[1], true } // ParseTarget splits target into a resolver.Target struct containing scheme, // authority and endpoint. // // If target is not a valid scheme://authority/endpoint, it returns {Endpoint: // target}. func ParseTarget(target string) (ret resolver.Target) { var ok bool ret.Scheme, ret.Endpoint, ok = split2(target, "://") if !ok { return resolver.Target{Endpoint: target} } ret.Authority, ret.Endpoint, ok = split2(ret.Endpoint, "/") if !ok { return resolver.Target{Endpoint: target} } return ret } grpc-go-1.29.1/internal/grpcutil/target_test.go000066400000000000000000000073711365033716300215020ustar00rootroot00000000000000/* * * Copyright 2020 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package grpcutil import ( "testing" "google.golang.org/grpc/resolver" ) func TestParseTarget(t *testing.T) { for _, test := range []resolver.Target{ {Scheme: "dns", Authority: "", Endpoint: "google.com"}, {Scheme: "dns", Authority: "a.server.com", Endpoint: "google.com"}, {Scheme: "dns", Authority: "a.server.com", Endpoint: "google.com/?a=b"}, {Scheme: "passthrough", Authority: "", Endpoint: "/unix/socket/address"}, } { str := test.Scheme + "://" + test.Authority + "/" + test.Endpoint got := ParseTarget(str) if got != test { t.Errorf("ParseTarget(%q) = %+v, want %+v", str, got, test) } } } func TestParseTargetString(t *testing.T) { for _, test := range []struct { targetStr string want resolver.Target }{ {targetStr: "", want: resolver.Target{Scheme: "", Authority: "", Endpoint: ""}}, {targetStr: ":///", want: resolver.Target{Scheme: "", Authority: "", Endpoint: ""}}, {targetStr: "a:///", want: resolver.Target{Scheme: "a", Authority: "", Endpoint: ""}}, {targetStr: "://a/", want: resolver.Target{Scheme: "", Authority: "a", Endpoint: ""}}, {targetStr: ":///a", want: resolver.Target{Scheme: "", Authority: "", Endpoint: "a"}}, {targetStr: "a://b/", want: resolver.Target{Scheme: "a", Authority: "b", Endpoint: ""}}, {targetStr: "a:///b", want: resolver.Target{Scheme: "a", Authority: "", Endpoint: "b"}}, {targetStr: "://a/b", want: resolver.Target{Scheme: "", Authority: "a", Endpoint: "b"}}, {targetStr: "a://b/c", want: resolver.Target{Scheme: "a", Authority: "b", Endpoint: "c"}}, {targetStr: "dns:///google.com", want: resolver.Target{Scheme: "dns", Authority: "", Endpoint: "google.com"}}, {targetStr: "dns://a.server.com/google.com", want: resolver.Target{Scheme: "dns", Authority: "a.server.com", Endpoint: "google.com"}}, {targetStr: "dns://a.server.com/google.com/?a=b", want: resolver.Target{Scheme: "dns", Authority: "a.server.com", Endpoint: "google.com/?a=b"}}, {targetStr: "/", want: resolver.Target{Scheme: "", Authority: "", Endpoint: "/"}}, {targetStr: "google.com", want: resolver.Target{Scheme: "", Authority: "", Endpoint: "google.com"}}, {targetStr: "google.com/?a=b", want: resolver.Target{Scheme: "", Authority: "", Endpoint: "google.com/?a=b"}}, {targetStr: "/unix/socket/address", want: resolver.Target{Scheme: "", Authority: "", Endpoint: "/unix/socket/address"}}, // If we can only parse part of the target. {targetStr: "://", want: resolver.Target{Scheme: "", Authority: "", Endpoint: "://"}}, {targetStr: "unix://domain", want: resolver.Target{Scheme: "", Authority: "", Endpoint: "unix://domain"}}, {targetStr: "a:b", want: resolver.Target{Scheme: "", Authority: "", Endpoint: "a:b"}}, {targetStr: "a/b", want: resolver.Target{Scheme: "", Authority: "", Endpoint: "a/b"}}, {targetStr: "a:/b", want: resolver.Target{Scheme: "", Authority: "", Endpoint: "a:/b"}}, {targetStr: "a//b", want: resolver.Target{Scheme: "", Authority: "", Endpoint: "a//b"}}, {targetStr: "a://b", want: resolver.Target{Scheme: "", Authority: "", Endpoint: "a://b"}}, } { got := ParseTarget(test.targetStr) if got != test.want { t.Errorf("ParseTarget(%q) = %+v, want %+v", test.targetStr, got, test.want) } } } grpc-go-1.29.1/internal/internal.go000066400000000000000000000054601365033716300171350ustar00rootroot00000000000000/* * Copyright 2016 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Package internal contains gRPC-internal code, to avoid polluting // the godoc of the top-level grpc package. It must not import any grpc // symbols to avoid circular dependencies. package internal import ( "context" "time" "google.golang.org/grpc/connectivity" ) var ( // WithHealthCheckFunc is set by dialoptions.go WithHealthCheckFunc interface{} // func (HealthChecker) DialOption // HealthCheckFunc is used to provide client-side LB channel health checking HealthCheckFunc HealthChecker // BalancerUnregister is exported by package balancer to unregister a balancer. BalancerUnregister func(name string) // KeepaliveMinPingTime is the minimum ping interval. This must be 10s by // default, but tests may wish to set it lower for convenience. KeepaliveMinPingTime = 10 * time.Second // NewRequestInfoContext creates a new context based on the argument context attaching // the passed in RequestInfo to the new context. NewRequestInfoContext interface{} // func(context.Context, credentials.RequestInfo) context.Context // ParseServiceConfigForTesting is for creating a fake // ClientConn for resolver testing only ParseServiceConfigForTesting interface{} // func(string) *serviceconfig.ParseResult ) // HealthChecker defines the signature of the client-side LB channel health checking function. // // The implementation is expected to create a health checking RPC stream by // calling newStream(), watch for the health status of serviceName, and report // it's health back by calling setConnectivityState(). // // The health checking protocol is defined at: // https://github.com/grpc/grpc/blob/master/doc/health-checking.md type HealthChecker func(ctx context.Context, newStream func(string) (interface{}, error), setConnectivityState func(connectivity.State, error), serviceName string) error const ( // CredsBundleModeFallback switches GoogleDefaultCreds to fallback mode. CredsBundleModeFallback = "fallback" // CredsBundleModeBalancer switches GoogleDefaultCreds to grpclb balancer // mode. CredsBundleModeBalancer = "balancer" // CredsBundleModeBackendFromBalancer switches GoogleDefaultCreds to mode // that supports backend returned by grpclb balancer. CredsBundleModeBackendFromBalancer = "backend-from-balancer" ) grpc-go-1.29.1/internal/leakcheck/000077500000000000000000000000001365033716300166775ustar00rootroot00000000000000grpc-go-1.29.1/internal/leakcheck/leakcheck.go000066400000000000000000000057641365033716300211540ustar00rootroot00000000000000/* * * Copyright 2017 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Package leakcheck contains functions to check leaked goroutines. // // Call "defer leakcheck.Check(t)" at the beginning of tests. package leakcheck import ( "runtime" "sort" "strings" "time" ) var goroutinesToIgnore = []string{ "testing.Main(", "testing.tRunner(", "testing.(*M).", "runtime.goexit", "created by runtime.gc", "created by runtime/trace.Start", "interestingGoroutines", "runtime.MHeap_Scavenger", "signal.signal_recv", "sigterm.handler", "runtime_mcall", "(*loggingT).flushDaemon", "goroutine in C code", } // RegisterIgnoreGoroutine appends s into the ignore goroutine list. The // goroutines whose stack trace contains s will not be identified as leaked // goroutines. Not thread-safe, only call this function in init(). func RegisterIgnoreGoroutine(s string) { goroutinesToIgnore = append(goroutinesToIgnore, s) } func ignore(g string) bool { sl := strings.SplitN(g, "\n", 2) if len(sl) != 2 { return true } stack := strings.TrimSpace(sl[1]) if strings.HasPrefix(stack, "testing.RunTests") { return true } if stack == "" { return true } for _, s := range goroutinesToIgnore { if strings.Contains(stack, s) { return true } } return false } // interestingGoroutines returns all goroutines we care about for the purpose of // leak checking. It excludes testing or runtime ones. func interestingGoroutines() (gs []string) { buf := make([]byte, 2<<20) buf = buf[:runtime.Stack(buf, true)] for _, g := range strings.Split(string(buf), "\n\n") { if !ignore(g) { gs = append(gs, g) } } sort.Strings(gs) return } // Errorfer is the interface that wraps the Errorf method. It's a subset of // testing.TB to make it easy to use Check. type Errorfer interface { Errorf(format string, args ...interface{}) } func check(efer Errorfer, timeout time.Duration) { // Loop, waiting for goroutines to shut down. // Wait up to timeout, but finish as quickly as possible. deadline := time.Now().Add(timeout) var leaked []string for time.Now().Before(deadline) { if leaked = interestingGoroutines(); len(leaked) == 0 { return } time.Sleep(50 * time.Millisecond) } for _, g := range leaked { efer.Errorf("Leaked goroutine: %v", g) } } // Check looks at the currently-running goroutines and checks if there are any // interesting (created by gRPC) goroutines leaked. It waits up to 10 seconds // in the error cases. func Check(efer Errorfer) { check(efer, 10*time.Second) } grpc-go-1.29.1/internal/leakcheck/leakcheck_test.go000066400000000000000000000036571365033716300222120ustar00rootroot00000000000000/* * * Copyright 2017 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package leakcheck import ( "fmt" "strings" "testing" "time" ) type testErrorfer struct { errorCount int errors []string } func (e *testErrorfer) Errorf(format string, args ...interface{}) { e.errors = append(e.errors, fmt.Sprintf(format, args...)) e.errorCount++ } func TestCheck(t *testing.T) { const leakCount = 3 for i := 0; i < leakCount; i++ { go func() { time.Sleep(2 * time.Second) }() } if ig := interestingGoroutines(); len(ig) == 0 { t.Error("blah") } e := &testErrorfer{} check(e, time.Second) if e.errorCount != leakCount { t.Errorf("check found %v leaks, want %v leaks", e.errorCount, leakCount) t.Logf("leaked goroutines:\n%v", strings.Join(e.errors, "\n")) } check(t, 3*time.Second) } func ignoredTestingLeak(d time.Duration) { time.Sleep(d) } func TestCheckRegisterIgnore(t *testing.T) { RegisterIgnoreGoroutine("ignoredTestingLeak") const leakCount = 3 for i := 0; i < leakCount; i++ { go func() { time.Sleep(2 * time.Second) }() } go func() { ignoredTestingLeak(3 * time.Second) }() if ig := interestingGoroutines(); len(ig) == 0 { t.Error("blah") } e := &testErrorfer{} check(e, time.Second) if e.errorCount != leakCount { t.Errorf("check found %v leaks, want %v leaks", e.errorCount, leakCount) t.Logf("leaked goroutines:\n%v", strings.Join(e.errors, "\n")) } check(t, 3*time.Second) } grpc-go-1.29.1/internal/profiling/000077500000000000000000000000001365033716300167565ustar00rootroot00000000000000grpc-go-1.29.1/internal/profiling/buffer/000077500000000000000000000000001365033716300202275ustar00rootroot00000000000000grpc-go-1.29.1/internal/profiling/buffer/buffer.go000066400000000000000000000231511365033716300220310ustar00rootroot00000000000000// +build !appengine /* * * Copyright 2019 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Package buffer provides a high-performant lock free implementation of a // circular buffer used by the profiling code. package buffer import ( "errors" "math/bits" "runtime" "sync" "sync/atomic" "unsafe" ) type queue struct { // An array of pointers as references to the items stored in this queue. arr []unsafe.Pointer // The maximum number of elements this queue may store before it wraps around // and overwrites older values. Must be an exponent of 2. size uint32 // Always size - 1. A bitwise AND is performed with this mask in place of a // modulo operation by the Push operation. mask uint32 // Each Push operation into this queue increments the acquired counter before // proceeding forwarding with the actual write to arr. This counter is also // used by the Drain operation's drainWait subroutine to wait for all pushes // to complete. acquired uint32 // Accessed atomically. // After the completion of a Push operation, the written counter is // incremented. Also used by drainWait to wait for all pushes to complete. written uint32 } // Allocates and returns a new *queue. size needs to be a exponent of two. func newQueue(size uint32) *queue { return &queue{ arr: make([]unsafe.Pointer, size), size: size, mask: size - 1, } } // drainWait blocks the caller until all Pushes on this queue are complete. func (q *queue) drainWait() { for atomic.LoadUint32(&q.acquired) != atomic.LoadUint32(&q.written) { runtime.Gosched() } } // A queuePair has two queues. At any given time, Pushes go into the queue // referenced by queuePair.q. The active queue gets switched when there's a // drain operation on the circular buffer. type queuePair struct { q0 unsafe.Pointer q1 unsafe.Pointer q unsafe.Pointer } // Allocates and returns a new *queuePair with its internal queues allocated. func newQueuePair(size uint32) *queuePair { qp := &queuePair{} qp.q0 = unsafe.Pointer(newQueue(size)) qp.q1 = unsafe.Pointer(newQueue(size)) qp.q = qp.q0 return qp } // Switches the current queue for future Pushes to proceed to the other queue // so that there's no blocking in Push. Returns a pointer to the old queue that // was in place before the switch. func (qp *queuePair) switchQueues() *queue { // Even though we have mutual exclusion across drainers (thanks to mu.Lock in // drain), Push operations may access qp.q whilst we're writing to it. if atomic.CompareAndSwapPointer(&qp.q, qp.q0, qp.q1) { return (*queue)(qp.q0) } atomic.CompareAndSwapPointer(&qp.q, qp.q1, qp.q0) return (*queue)(qp.q1) } // In order to not have expensive modulo operations, we require the maximum // number of elements in the circular buffer (N) to be an exponent of two to // use a bitwise AND mask. Since a CircularBuffer is a collection of queuePairs // (see below), we need to divide N; since exponents of two are only divisible // by other exponents of two, we use floorCPUCount number of queuePairs within // each CircularBuffer. // // Floor of the number of CPUs (and not the ceiling) was found to the be the // optimal number through experiments. func floorCPUCount() uint32 { floorExponent := bits.Len32(uint32(runtime.NumCPU())) - 1 if floorExponent < 0 { floorExponent = 0 } return 1 << uint32(floorExponent) } var numCircularBufferPairs = floorCPUCount() // CircularBuffer is a lock-free data structure that supports Push and Drain // operations. // // Note that CircularBuffer is built for performance more than reliability. // That is, some Push operations may fail without retries in some situations // (such as during a Drain operation). Order of pushes is not maintained // either; that is, if A was pushed before B, the Drain operation may return an // array with B before A. These restrictions are acceptable within gRPC's // profiling, but if your use-case does not permit these relaxed constraints // or if performance is not a primary concern, you should probably use a // lock-based data structure such as internal/buffer.UnboundedBuffer. type CircularBuffer struct { drainMutex sync.Mutex qp []*queuePair // qpn is an monotonically incrementing counter that's used to determine // which queuePair a Push operation should write to. This approach's // performance was found to be better than writing to a random queue. qpn uint32 qpMask uint32 } var errInvalidCircularBufferSize = errors.New("buffer size is not an exponent of two") // NewCircularBuffer allocates a circular buffer of size size and returns a // reference to the struct. Only circular buffers of size 2^k are allowed // (saves us from having to do expensive modulo operations). func NewCircularBuffer(size uint32) (*CircularBuffer, error) { if size&(size-1) != 0 { return nil, errInvalidCircularBufferSize } n := numCircularBufferPairs if size/numCircularBufferPairs < 8 { // If each circular buffer is going to hold less than a very small number // of items (let's say 8), using multiple circular buffers is very likely // wasteful. Instead, fallback to one circular buffer holding everything. n = 1 } cb := &CircularBuffer{ qp: make([]*queuePair, n), qpMask: n - 1, } for i := uint32(0); i < n; i++ { cb.qp[i] = newQueuePair(size / n) } return cb, nil } // Push pushes an element in to the circular buffer. Guaranteed to complete in // a finite number of steps (also lock-free). Does not guarantee that push // order will be retained. Does not guarantee that the operation will succeed // if a Drain operation concurrently begins execution. func (cb *CircularBuffer) Push(x interface{}) { n := atomic.AddUint32(&cb.qpn, 1) & cb.qpMask qptr := atomic.LoadPointer(&cb.qp[n].q) q := (*queue)(qptr) acquired := atomic.AddUint32(&q.acquired, 1) - 1 // If true, it means that we have incremented acquired before any queuePair // was switched, and therefore before any drainWait completion. Therefore, it // is safe to proceed with the Push operation on this queue. Otherwise, it // means that a Drain operation has begun execution, but we don't know how // far along the process it is. If it is past the drainWait check, it is not // safe to proceed with the Push operation. We choose to drop this sample // entirely instead of retrying, as retrying may potentially send the Push // operation into a spin loop (we want to guarantee completion of the Push // operation within a finite time). Before exiting, we increment written so // that any existing drainWaits can proceed. if atomic.LoadPointer(&cb.qp[n].q) != qptr { atomic.AddUint32(&q.written, 1) return } // At this point, we're definitely writing to the right queue. That is, one // of the following is true: // 1. No drainer is in execution on this queue. // 2. A drainer is in execution on this queue and it is waiting at the // acquired == written barrier. // // Let's say two Pushes A and B happen on the same queue. Say A and B are // q.size apart; i.e. they get the same index. That is, // // index_A = index_B // acquired_A + q.size = acquired_B // // We say "B has wrapped around A" when this happens. In this case, since A // occurred before B, B's Push should be the final value. However, we // accommodate A being the final value because wrap-arounds are extremely // rare and accounting for them requires an additional counter and a // significant performance penalty. Note that the below approach never leads // to any data corruption. index := acquired & q.mask atomic.StorePointer(&q.arr[index], unsafe.Pointer(&x)) // Allows any drainWait checks to proceed. atomic.AddUint32(&q.written, 1) } // Dereferences non-nil pointers from arr into result. Range of elements from // arr that are copied is [from, to). Assumes that the result slice is already // allocated and is large enough to hold all the elements that might be copied. // Also assumes mutual exclusion on the array of pointers. func dereferenceAppend(result []interface{}, arr []unsafe.Pointer, from, to uint32) []interface{} { for i := from; i < to; i++ { // We have mutual exclusion on arr, there's no need for atomics. x := (*interface{})(arr[i]) if x != nil { result = append(result, *x) } } return result } // Drain allocates and returns an array of things Pushed in to the circular // buffer. Push order is not maintained; that is, if B was Pushed after A, // drain may return B at a lower index than A in the returned array. func (cb *CircularBuffer) Drain() []interface{} { cb.drainMutex.Lock() qs := make([]*queue, len(cb.qp)) for i := 0; i < len(cb.qp); i++ { qs[i] = cb.qp[i].switchQueues() } var wg sync.WaitGroup wg.Add(int(len(qs))) for i := 0; i < len(qs); i++ { go func(qi int) { qs[qi].drainWait() wg.Done() }(i) } wg.Wait() result := make([]interface{}, 0) for i := 0; i < len(qs); i++ { if acquired := atomic.LoadUint32(&qs[i].acquired); acquired < qs[i].size { result = dereferenceAppend(result, qs[i].arr, 0, acquired) } else { result = dereferenceAppend(result, qs[i].arr, 0, qs[i].size) } } for i := 0; i < len(qs); i++ { atomic.StoreUint32(&qs[i].acquired, 0) atomic.StoreUint32(&qs[i].written, 0) } cb.drainMutex.Unlock() return result } grpc-go-1.29.1/internal/profiling/buffer/buffer_appengine.go000066400000000000000000000025121365033716300240550ustar00rootroot00000000000000// +build appengine /* * * Copyright 2019 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package buffer // CircularBuffer is a no-op implementation for appengine builds. // // Appengine does not support stats because of lack of the support for unsafe // pointers, which are necessary to efficiently store and retrieve things into // and from a circular buffer. As a result, Push does not do anything and Drain // returns an empty slice. type CircularBuffer struct{} // NewCircularBuffer returns a no-op for appengine builds. func NewCircularBuffer(size uint32) (*CircularBuffer, error) { return nil, nil } // Push returns a no-op for appengine builds. func (cb *CircularBuffer) Push(x interface{}) { } // Drain returns a no-op for appengine builds. func (cb *CircularBuffer) Drain() []interface{} { return nil } grpc-go-1.29.1/internal/profiling/buffer/buffer_test.go000066400000000000000000000101501365033716300230630ustar00rootroot00000000000000// +build !appengine /* * * Copyright 2019 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package buffer import ( "fmt" "sync" "testing" "time" "google.golang.org/grpc/internal/grpctest" ) type s struct { grpctest.Tester } func Test(t *testing.T) { grpctest.RunSubTests(t, s{}) } func (s) TestCircularBufferSerial(t *testing.T) { var size, i uint32 var result []interface{} size = 1 << 15 cb, err := NewCircularBuffer(size) if err != nil { t.Fatalf("error allocating CircularBuffer: %v", err) } for i = 0; i < size/2; i++ { cb.Push(i) } result = cb.Drain() if uint32(len(result)) != size/2 { t.Fatalf("len(result) = %d; want %d", len(result), size/2) } // The returned result isn't necessarily sorted. seen := make(map[uint32]bool) for _, r := range result { seen[r.(uint32)] = true } for i = 0; i < uint32(len(result)); i++ { if !seen[i] { t.Fatalf("seen[%d] = false; want true", i) } } for i = 0; i < size; i++ { cb.Push(i) } result = cb.Drain() if uint32(len(result)) != size { t.Fatalf("len(result) = %d; want %d", len(result), size/2) } } func (s) TestCircularBufferOverflow(t *testing.T) { var size, i uint32 var result []interface{} size = 1 << 10 cb, err := NewCircularBuffer(size) if err != nil { t.Fatalf("error allocating CircularBuffer: %v", err) } for i = 0; i < 10*size; i++ { cb.Push(i) } result = cb.Drain() if uint32(len(result)) != size { t.Fatalf("len(result) = %d; want %d", len(result), size) } for idx, x := range result { if x.(uint32) < size { t.Fatalf("result[%d] = %d; want it to be >= %d", idx, x, size) } } } func (s) TestCircularBufferConcurrent(t *testing.T) { for tn := 0; tn < 2; tn++ { var size uint32 var result []interface{} size = 1 << 6 cb, err := NewCircularBuffer(size) if err != nil { t.Fatalf("error allocating CircularBuffer: %v", err) } type item struct { R uint32 N uint32 T time.Time } var wg sync.WaitGroup for r := uint32(0); r < 1024; r++ { wg.Add(1) go func(r uint32) { for n := uint32(0); n < size; n++ { cb.Push(item{R: r, N: n, T: time.Now()}) } wg.Done() }(r) } // Wait for all goroutines to finish only in one test. Draining // concurrently while Pushes are still happening will test for races in the // Draining lock. if tn == 0 { wg.Wait() } result = cb.Drain() // Can't expect the buffer to be full if the Pushes aren't necessarily done. if tn == 0 { if uint32(len(result)) != size { t.Fatalf("len(result) = %d; want %d", len(result), size) } } // There can be absolutely no expectation on the order of the data returned // by Drain because: (a) everything is happening concurrently (b) a // round-robin is used to write to different queues (and therefore // different cachelines) for less write contention. // Wait for all goroutines to complete before moving on to other tests. If // the benchmarks run after this, it might affect performance unfairly. wg.Wait() } } func BenchmarkCircularBuffer(b *testing.B) { x := 1 for size := 1 << 16; size <= 1<<20; size <<= 1 { for routines := 1; routines <= 1<<8; routines <<= 1 { b.Run(fmt.Sprintf("goroutines:%d/size:%d", routines, size), func(b *testing.B) { cb, err := NewCircularBuffer(uint32(size)) if err != nil { b.Fatalf("error allocating CircularBuffer: %v", err) } perRoutine := b.N / routines var wg sync.WaitGroup for r := 0; r < routines; r++ { wg.Add(1) go func() { for i := 0; i < perRoutine; i++ { cb.Push(&x) } wg.Done() }() } wg.Wait() }) } } } grpc-go-1.29.1/internal/profiling/goid_modified.go000066400000000000000000000064061365033716300220750ustar00rootroot00000000000000// +build grpcgoid /* * * Copyright 2019 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package profiling import ( "runtime" ) // This stubbed function usually returns zero (see goid_regular.go); however, // if grpc is built with `-tags 'grpcgoid'`, a runtime.Goid function, which // does not exist in the Go standard library, is expected. While not necessary, // sometimes, visualising grpc profiling data in trace-viewer is much nicer // with goroutines separated from each other. // // Several other approaches were considered before arriving at this: // // 1. Using a CGO module: CGO usually has access to some things that regular // Go does not. Till go1.4, CGO used to have access to the goroutine struct // because the Go runtime was written in C. However, 1.5+ uses a native Go // runtime; as a result, CGO does not have access to the goroutine structure // anymore in modern Go. Besides, CGO interop wasn't fast enough (estimated // to be ~170ns/op). This would also make building grpc require a C // compiler, which isn't a requirement currently, breaking a lot of stuff. // // 2. Using runtime.Stack stacktrace: While this would remove the need for a // modified Go runtime, this is ridiculously slow, thanks to the all the // string processing shenanigans required to extract the goroutine ID (about // ~2000ns/op). // // 3. Using Go version-specific build tags: For any given Go version, the // goroutine struct has a fixed structure. As a result, the goroutine ID // could be extracted if we know the offset using some assembly. This would // be faster then #1 and #2, but is harder to maintain. This would require // special Go code that's both architecture-specific and go version-specific // (a quadratic number of variants to maintain). // // 4. This approach, which requires a simple modification [1] to the Go runtime // to expose the current goroutine's ID. This is the chosen approach and it // takes about ~2 ns/op, which is negligible in the face of the tens of // microseconds that grpc takes to complete a RPC request. // // [1] To make the goroutine ID visible to Go programs apply the following // change to the runtime2.go file in your Go runtime installation: // // diff --git a/src/runtime/runtime2.go b/src/runtime/runtime2.go // --- a/src/runtime/runtime2.go // +++ b/src/runtime/runtime2.go // @@ -392,6 +392,10 @@ type stack struct { // hi uintptr // } // // +func Goid() int64 { // + return getg().goid // +} // + // type g struct { // // Stack parameters. // // stack describes the actual stack memory: [stack.lo, stack.hi). // // The exposed runtime.Goid() function will return a int64 goroutine ID. func goid() int64 { return runtime.Goid() } grpc-go-1.29.1/internal/profiling/goid_regular.go000066400000000000000000000017221365033716300217520ustar00rootroot00000000000000// +build !grpcgoid /* * * Copyright 2019 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package profiling // This dummy function always returns 0. In some modified dev environments, // this may be replaced with a call to a function in a modified Go runtime that // retrieves the goroutine ID efficiently. See goid_modified.go for a different // version of goId that requires a grpcgoid build tag to compile. func goid() int64 { return 0 } grpc-go-1.29.1/internal/profiling/profiling.go000066400000000000000000000177131365033716300213070ustar00rootroot00000000000000/* * * Copyright 2019 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Package profiling contains two logical components: buffer.go and // profiling.go. The former implements a circular buffer (a.k.a. ring buffer) // in a lock-free manner using atomics. This ring buffer is used by // profiling.go to store various statistics. For example, StreamStats is a // circular buffer of Stat objects, each of which is comprised of Timers. // // This abstraction is designed to accommodate more stats in the future; for // example, if one wants to profile the load balancing layer, which is // independent of RPC queries, a separate CircularBuffer can be used. // // Note that the circular buffer simply takes any interface{}. In the future, // more types of measurements (such as the number of memory allocations) could // be measured, which might require a different type of object being pushed // into the circular buffer. package profiling import ( "errors" "sync" "sync/atomic" "time" "google.golang.org/grpc/internal/profiling/buffer" ) // 0 or 1 representing profiling off and on, respectively. Use IsEnabled and // Enable to get and set this in a safe manner. var profilingEnabled uint32 // IsEnabled returns whether or not profiling is enabled. func IsEnabled() bool { return atomic.LoadUint32(&profilingEnabled) > 0 } // Enable turns profiling on and off. // // Note that it is impossible to enable profiling for one server and leave it // turned off for another. This is intentional and by design -- if the status // of profiling was server-specific, clients wouldn't be able to profile // themselves. As a result, Enable turns profiling on and off for all servers // and clients in the binary. Each stat will be, however, tagged with whether // it's a client stat or a server stat; so you should be able to filter for the // right type of stats in post-processing. func Enable(enabled bool) { if enabled { atomic.StoreUint32(&profilingEnabled, 1) } else { atomic.StoreUint32(&profilingEnabled, 0) } } // A Timer represents the wall-clock beginning and ending of a logical // operation. type Timer struct { // Tags is a comma-separated list of strings (usually forward-slash-separated // hierarchical strings) used to categorize a Timer. Tags string // Begin marks the beginning of this timer. The timezone is unspecified, but // must use the same timezone as End; this is so shave off the small, but // non-zero time required to convert to a standard timezone such as UTC. Begin time.Time // End marks the end of a timer. End time.Time // Each Timer must be started and ended within the same goroutine; GoID // captures this goroutine ID. The Go runtime does not typically expose this // information, so this is set to zero in the typical case. However, a // trivial patch to the runtime package can make this field useful. See // goid_modified.go in this package for more details. GoID int64 } // NewTimer creates and returns a new Timer object. This is useful when you // don't already have a Stat object to associate this Timer with; for example, // before the context of a new RPC query is created, a Timer may be needed to // measure transport-related operations. // // Use AppendTimer to append the returned Timer to a Stat. func NewTimer(tags string) *Timer { return &Timer{ Tags: tags, Begin: time.Now(), GoID: goid(), } } // Egress sets the End field of a timer to the current time. func (timer *Timer) Egress() { if timer == nil { return } timer.End = time.Now() } // A Stat is a collection of Timers that represent timing information for // different components within this Stat. For example, a Stat may be used to // reference the entire lifetime of an RPC request, with Timers within it // representing different components such as encoding, compression, and // transport. // // The user is expected to use the included helper functions to do operations // on the Stat such as creating or appending a new timer. Direct operations on // the Stat's exported fields (which are exported for encoding reasons) may // lead to data races. type Stat struct { // Tags is a comma-separated list of strings used to categorize a Stat. Tags string // Stats may also need to store other unstructured information specific to // this stat. For example, a StreamStat will use these bytes to encode the // connection ID and stream ID for each RPC to uniquely identify it. The // encoding that must be used is unspecified. Metadata []byte // A collection of *Timers and a mutex for append operations on the slice. mu sync.Mutex Timers []*Timer } // A power of two that's large enough to hold all timers within an average RPC // request (defined to be a unary request) without any reallocation. A typical // unary RPC creates 80-100 timers for various things. While this number is // purely anecdotal and may change in the future as the resolution of profiling // increases or decreases, it serves as a good estimate for what the initial // allocation size should be. const defaultStatAllocatedTimers int32 = 128 // NewStat creates and returns a new Stat object. func NewStat(tags string) *Stat { return &Stat{ Tags: tags, Timers: make([]*Timer, 0, defaultStatAllocatedTimers), } } // NewTimer creates a Timer object within the given stat if stat is non-nil. // The value passed in tags will be attached to the newly created Timer. // NewTimer also automatically sets the Begin value of the Timer to the current // time. The user is expected to call stat.Egress with the returned index as // argument to mark the end. func (stat *Stat) NewTimer(tags string) *Timer { if stat == nil { return nil } timer := &Timer{ Tags: tags, GoID: goid(), Begin: time.Now(), } stat.mu.Lock() stat.Timers = append(stat.Timers, timer) stat.mu.Unlock() return timer } // AppendTimer appends a given Timer object to the internal slice of timers. A // deep copy of the timer is made (i.e. no reference is retained to this // pointer) and the user is expected to lose their reference to the timer to // allow the Timer object to be garbage collected. func (stat *Stat) AppendTimer(timer *Timer) { if stat == nil || timer == nil { return } stat.mu.Lock() stat.Timers = append(stat.Timers, timer) stat.mu.Unlock() } // statsInitialized is 0 before InitStats has been called. Changed to 1 by // exactly one call to InitStats. var statsInitialized int32 // Stats for the last defaultStreamStatsBufsize RPCs will be stored in memory. // This is can be configured by the registering server at profiling service // initialization with google.golang.org/grpc/profiling/service.ProfilingConfig const defaultStreamStatsSize uint32 = 16 << 10 // StreamStats is a CircularBuffer containing data from the last N RPC calls // served, where N is set by the user. This will contain both server stats and // client stats (but each stat will be tagged with whether it's a server or a // client in its Tags). var StreamStats *buffer.CircularBuffer var errAlreadyInitialized = errors.New("profiling may be initialized at most once") // InitStats initializes all the relevant Stat objects. Must be called exactly // once per lifetime of a process; calls after the first one will return an // error. func InitStats(streamStatsSize uint32) error { var err error if !atomic.CompareAndSwapInt32(&statsInitialized, 0, 1) { return errAlreadyInitialized } if streamStatsSize == 0 { streamStatsSize = defaultStreamStatsSize } StreamStats, err = buffer.NewCircularBuffer(streamStatsSize) if err != nil { return err } return nil } grpc-go-1.29.1/internal/profiling/profiling_test.go000066400000000000000000000072241365033716300223420ustar00rootroot00000000000000// +build !appengine /* * * Copyright 2019 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package profiling import ( "fmt" "strconv" "sync" "testing" "time" "google.golang.org/grpc/internal/grpctest" "google.golang.org/grpc/internal/profiling/buffer" ) type s struct { grpctest.Tester } func Test(t *testing.T) { grpctest.RunSubTests(t, s{}) } func (s) TestProfiling(t *testing.T) { cb, err := buffer.NewCircularBuffer(128) if err != nil { t.Fatalf("error creating circular buffer: %v", err) } stat := NewStat("foo") cb.Push(stat) bar := func(n int) { if n%2 == 0 { defer stat.NewTimer(strconv.Itoa(n)).Egress() } else { timer := NewTimer(strconv.Itoa(n)) stat.AppendTimer(timer) defer timer.Egress() } time.Sleep(1 * time.Microsecond) } numTimers := int(8 * defaultStatAllocatedTimers) for i := 0; i < numTimers; i++ { bar(i) } results := cb.Drain() if len(results) != 1 { t.Fatalf("len(results) = %d; want 1", len(results)) } statReturned := results[0].(*Stat) if stat.Tags != "foo" { t.Fatalf("stat.Tags = %s; want foo", stat.Tags) } if len(stat.Timers) != numTimers { t.Fatalf("len(stat.Timers) = %d; want %d", len(stat.Timers), numTimers) } lastIdx := 0 for i, timer := range statReturned.Timers { // Check that they're in the order of append. if n, err := strconv.Atoi(timer.Tags); err != nil && n != lastIdx { t.Fatalf("stat.Timers[%d].Tags = %s; wanted %d", i, timer.Tags, lastIdx) } // Check that the timestamps are consistent. if diff := timer.End.Sub(timer.Begin); diff.Nanoseconds() < 1000 { t.Fatalf("stat.Timers[%d].End - stat.Timers[%d].Begin = %v; want >= 1000ns", i, i, diff) } lastIdx++ } } func (s) TestProfilingRace(t *testing.T) { stat := NewStat("foo") var wg sync.WaitGroup numTimers := int(8 * defaultStatAllocatedTimers) // also tests the slice growth code path wg.Add(numTimers) for i := 0; i < numTimers; i++ { go func(n int) { defer wg.Done() if n%2 == 0 { defer stat.NewTimer(strconv.Itoa(n)).Egress() } else { timer := NewTimer(strconv.Itoa(n)) stat.AppendTimer(timer) defer timer.Egress() } }(i) } wg.Wait() if len(stat.Timers) != numTimers { t.Fatalf("len(stat.Timers) = %d; want %d", len(stat.Timers), numTimers) } // The timers need not be ordered, so we can't expect them to be consecutive // like above. seen := make(map[int]bool) for i, timer := range stat.Timers { n, err := strconv.Atoi(timer.Tags) if err != nil { t.Fatalf("stat.Timers[%d].Tags = %s; wanted integer", i, timer.Tags) } seen[n] = true } for i := 0; i < numTimers; i++ { if _, ok := seen[i]; !ok { t.Fatalf("seen[%d] = false or does not exist; want it to be true", i) } } } func BenchmarkProfiling(b *testing.B) { for routines := 1; routines <= 1<<8; routines <<= 1 { b.Run(fmt.Sprintf("goroutines:%d", routines), func(b *testing.B) { perRoutine := b.N / routines stat := NewStat("foo") var wg sync.WaitGroup wg.Add(routines) for r := 0; r < routines; r++ { go func() { for i := 0; i < perRoutine; i++ { stat.NewTimer("bar").Egress() } wg.Done() }() } wg.Wait() }) } } grpc-go-1.29.1/internal/proto/000077500000000000000000000000001365033716300161305ustar00rootroot00000000000000grpc-go-1.29.1/internal/proto/grpc_service_config/000077500000000000000000000000001365033716300221305ustar00rootroot00000000000000grpc-go-1.29.1/internal/proto/grpc_service_config/example_test.go000066400000000000000000000035341365033716300251560ustar00rootroot00000000000000/* * * Copyright 2019 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ //go:generate ./regenerate.sh package grpc_service_config_test import ( "testing" "github.com/golang/protobuf/jsonpb" wrapperspb "github.com/golang/protobuf/ptypes/wrappers" "google.golang.org/grpc/internal/grpctest" scpb "google.golang.org/grpc/internal/proto/grpc_service_config" ) type s struct { grpctest.Tester } func Test(t *testing.T) { grpctest.RunSubTests(t, s{}) } // TestXdsConfigMarshalToJSON is an example to print json format of xds_config. func (s) TestXdsConfigMarshalToJSON(t *testing.T) { c := &scpb.XdsConfig{ ChildPolicy: []*scpb.LoadBalancingConfig{ {Policy: &scpb.LoadBalancingConfig_Grpclb{ Grpclb: &scpb.GrpcLbConfig{}, }}, {Policy: &scpb.LoadBalancingConfig_RoundRobin{ RoundRobin: &scpb.RoundRobinConfig{}, }}, }, FallbackPolicy: []*scpb.LoadBalancingConfig{ {Policy: &scpb.LoadBalancingConfig_Grpclb{ Grpclb: &scpb.GrpcLbConfig{}, }}, {Policy: &scpb.LoadBalancingConfig_PickFirst{ PickFirst: &scpb.PickFirstConfig{}, }}, }, EdsServiceName: "eds.service.name", LrsLoadReportingServerName: &wrapperspb.StringValue{ Value: "lrs.server.name", }, } j, err := (&jsonpb.Marshaler{}).MarshalToString(c) if err != nil { t.Fatalf("failed to marshal proto to json: %v", err) } t.Logf(j) } grpc-go-1.29.1/internal/proto/grpc_service_config/regenerate.sh000077500000000000000000000022041365033716300246060ustar00rootroot00000000000000#!/bin/bash # Copyright 2019 gRPC authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. set -eux -o pipefail TMP=$(mktemp -d) function finish { rm -rf "$TMP" } trap finish EXIT pushd "$TMP" mkdir -p grpc/service_config curl https://raw.githubusercontent.com/grpc/grpc-proto/master/grpc/service_config/service_config.proto > grpc/service_config/service_config.proto mkdir -p google/rpc curl https://raw.githubusercontent.com/googleapis/googleapis/master/google/rpc/code.proto > google/rpc/code.proto protoc --go_out=plugins=grpc,paths=source_relative:. -I. grpc/service_config/*.proto popd rm -f ./*.pb.go cp "$TMP"/grpc/service_config/*.pb.go ./ grpc-go-1.29.1/internal/proto/grpc_service_config/service_config.pb.go000066400000000000000000001373421365033716300260560ustar00rootroot00000000000000// Code generated by protoc-gen-go. DO NOT EDIT. // source: grpc/service_config/service_config.proto package grpc_service_config import ( fmt "fmt" proto "github.com/golang/protobuf/proto" duration "github.com/golang/protobuf/ptypes/duration" wrappers "github.com/golang/protobuf/ptypes/wrappers" code "google.golang.org/genproto/googleapis/rpc/code" math "math" ) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package // Load balancing policy. // // Note that load_balancing_policy is deprecated in favor of // load_balancing_config; the former will be used only if the latter // is unset. // // If no LB policy is configured here, then the default is pick_first. // If the policy name is set via the client API, that value overrides // the value specified here. // // If the deprecated load_balancing_policy field is used, note that if the // resolver returns at least one balancer address (as opposed to backend // addresses), gRPC will use grpclb (see // https://github.com/grpc/grpc/blob/master/doc/load-balancing.md), // regardless of what policy is configured here. However, if the resolver // returns at least one backend address in addition to the balancer // address(es), the client may fall back to the requested policy if it // is unable to reach any of the grpclb load balancers. type ServiceConfig_LoadBalancingPolicy int32 const ( ServiceConfig_UNSPECIFIED ServiceConfig_LoadBalancingPolicy = 0 ServiceConfig_ROUND_ROBIN ServiceConfig_LoadBalancingPolicy = 1 ) var ServiceConfig_LoadBalancingPolicy_name = map[int32]string{ 0: "UNSPECIFIED", 1: "ROUND_ROBIN", } var ServiceConfig_LoadBalancingPolicy_value = map[string]int32{ "UNSPECIFIED": 0, "ROUND_ROBIN": 1, } func (x ServiceConfig_LoadBalancingPolicy) String() string { return proto.EnumName(ServiceConfig_LoadBalancingPolicy_name, int32(x)) } func (ServiceConfig_LoadBalancingPolicy) EnumDescriptor() ([]byte, []int) { return fileDescriptor_e32d3cb2c41c77ce, []int{7, 0} } // Configuration for a method. type MethodConfig struct { Name []*MethodConfig_Name `protobuf:"bytes,1,rep,name=name,proto3" json:"name,omitempty"` // Whether RPCs sent to this method should wait until the connection is // ready by default. If false, the RPC will abort immediately if there is // a transient failure connecting to the server. Otherwise, gRPC will // attempt to connect until the deadline is exceeded. // // The value specified via the gRPC client API will override the value // set here. However, note that setting the value in the client API will // also affect transient errors encountered during name resolution, which // cannot be caught by the value here, since the service config is // obtained by the gRPC client via name resolution. WaitForReady *wrappers.BoolValue `protobuf:"bytes,2,opt,name=wait_for_ready,json=waitForReady,proto3" json:"wait_for_ready,omitempty"` // The default timeout in seconds for RPCs sent to this method. This can be // overridden in code. If no reply is received in the specified amount of // time, the request is aborted and a DEADLINE_EXCEEDED error status // is returned to the caller. // // The actual deadline used will be the minimum of the value specified here // and the value set by the application via the gRPC client API. If either // one is not set, then the other will be used. If neither is set, then the // request has no deadline. Timeout *duration.Duration `protobuf:"bytes,3,opt,name=timeout,proto3" json:"timeout,omitempty"` // The maximum allowed payload size for an individual request or object in a // stream (client->server) in bytes. The size which is measured is the // serialized payload after per-message compression (but before stream // compression) in bytes. This applies both to streaming and non-streaming // requests. // // The actual value used is the minimum of the value specified here and the // value set by the application via the gRPC client API. If either one is // not set, then the other will be used. If neither is set, then the // built-in default is used. // // If a client attempts to send an object larger than this value, it will not // be sent and the client will see a ClientError. // Note that 0 is a valid value, meaning that the request message // must be empty. MaxRequestMessageBytes *wrappers.UInt32Value `protobuf:"bytes,4,opt,name=max_request_message_bytes,json=maxRequestMessageBytes,proto3" json:"max_request_message_bytes,omitempty"` // The maximum allowed payload size for an individual response or object in a // stream (server->client) in bytes. The size which is measured is the // serialized payload after per-message compression (but before stream // compression) in bytes. This applies both to streaming and non-streaming // requests. // // The actual value used is the minimum of the value specified here and the // value set by the application via the gRPC client API. If either one is // not set, then the other will be used. If neither is set, then the // built-in default is used. // // If a server attempts to send an object larger than this value, it will not // be sent, and a ServerError will be sent to the client instead. // Note that 0 is a valid value, meaning that the response message // must be empty. MaxResponseMessageBytes *wrappers.UInt32Value `protobuf:"bytes,5,opt,name=max_response_message_bytes,json=maxResponseMessageBytes,proto3" json:"max_response_message_bytes,omitempty"` // Only one of retry_policy or hedging_policy may be set. If neither is set, // RPCs will not be retried or hedged. // // Types that are valid to be assigned to RetryOrHedgingPolicy: // *MethodConfig_RetryPolicy_ // *MethodConfig_HedgingPolicy_ RetryOrHedgingPolicy isMethodConfig_RetryOrHedgingPolicy `protobuf_oneof:"retry_or_hedging_policy"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *MethodConfig) Reset() { *m = MethodConfig{} } func (m *MethodConfig) String() string { return proto.CompactTextString(m) } func (*MethodConfig) ProtoMessage() {} func (*MethodConfig) Descriptor() ([]byte, []int) { return fileDescriptor_e32d3cb2c41c77ce, []int{0} } func (m *MethodConfig) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_MethodConfig.Unmarshal(m, b) } func (m *MethodConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_MethodConfig.Marshal(b, m, deterministic) } func (m *MethodConfig) XXX_Merge(src proto.Message) { xxx_messageInfo_MethodConfig.Merge(m, src) } func (m *MethodConfig) XXX_Size() int { return xxx_messageInfo_MethodConfig.Size(m) } func (m *MethodConfig) XXX_DiscardUnknown() { xxx_messageInfo_MethodConfig.DiscardUnknown(m) } var xxx_messageInfo_MethodConfig proto.InternalMessageInfo func (m *MethodConfig) GetName() []*MethodConfig_Name { if m != nil { return m.Name } return nil } func (m *MethodConfig) GetWaitForReady() *wrappers.BoolValue { if m != nil { return m.WaitForReady } return nil } func (m *MethodConfig) GetTimeout() *duration.Duration { if m != nil { return m.Timeout } return nil } func (m *MethodConfig) GetMaxRequestMessageBytes() *wrappers.UInt32Value { if m != nil { return m.MaxRequestMessageBytes } return nil } func (m *MethodConfig) GetMaxResponseMessageBytes() *wrappers.UInt32Value { if m != nil { return m.MaxResponseMessageBytes } return nil } type isMethodConfig_RetryOrHedgingPolicy interface { isMethodConfig_RetryOrHedgingPolicy() } type MethodConfig_RetryPolicy_ struct { RetryPolicy *MethodConfig_RetryPolicy `protobuf:"bytes,6,opt,name=retry_policy,json=retryPolicy,proto3,oneof"` } type MethodConfig_HedgingPolicy_ struct { HedgingPolicy *MethodConfig_HedgingPolicy `protobuf:"bytes,7,opt,name=hedging_policy,json=hedgingPolicy,proto3,oneof"` } func (*MethodConfig_RetryPolicy_) isMethodConfig_RetryOrHedgingPolicy() {} func (*MethodConfig_HedgingPolicy_) isMethodConfig_RetryOrHedgingPolicy() {} func (m *MethodConfig) GetRetryOrHedgingPolicy() isMethodConfig_RetryOrHedgingPolicy { if m != nil { return m.RetryOrHedgingPolicy } return nil } func (m *MethodConfig) GetRetryPolicy() *MethodConfig_RetryPolicy { if x, ok := m.GetRetryOrHedgingPolicy().(*MethodConfig_RetryPolicy_); ok { return x.RetryPolicy } return nil } func (m *MethodConfig) GetHedgingPolicy() *MethodConfig_HedgingPolicy { if x, ok := m.GetRetryOrHedgingPolicy().(*MethodConfig_HedgingPolicy_); ok { return x.HedgingPolicy } return nil } // XXX_OneofWrappers is for the internal use of the proto package. func (*MethodConfig) XXX_OneofWrappers() []interface{} { return []interface{}{ (*MethodConfig_RetryPolicy_)(nil), (*MethodConfig_HedgingPolicy_)(nil), } } // The names of the methods to which this configuration applies. // - MethodConfig without names (empty list) will be skipped. // - Each name entry must be unique across the entire ServiceConfig. // - If the 'method' field is empty, this MethodConfig specifies the defaults // for all methods for the specified service. // - If the 'service' field is empty, the 'method' field must be empty, and // this MethodConfig specifies the default for all methods (it's the default // config). // // When determining which MethodConfig to use for a given RPC, the most // specific match wins. For example, let's say that the service config // contains the following MethodConfig entries: // // method_config { name { } ... } // method_config { name { service: "MyService" } ... } // method_config { name { service: "MyService" method: "Foo" } ... } // // MyService/Foo will use the third entry, because it exactly matches the // service and method name. MyService/Bar will use the second entry, because // it provides the default for all methods of MyService. AnotherService/Baz // will use the first entry, because it doesn't match the other two. // // In JSON representation, value "", value `null`, and not present are the // same. The following are the same Name: // - { "service": "s" } // - { "service": "s", "method": null } // - { "service": "s", "method": "" } type MethodConfig_Name struct { Service string `protobuf:"bytes,1,opt,name=service,proto3" json:"service,omitempty"` Method string `protobuf:"bytes,2,opt,name=method,proto3" json:"method,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *MethodConfig_Name) Reset() { *m = MethodConfig_Name{} } func (m *MethodConfig_Name) String() string { return proto.CompactTextString(m) } func (*MethodConfig_Name) ProtoMessage() {} func (*MethodConfig_Name) Descriptor() ([]byte, []int) { return fileDescriptor_e32d3cb2c41c77ce, []int{0, 0} } func (m *MethodConfig_Name) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_MethodConfig_Name.Unmarshal(m, b) } func (m *MethodConfig_Name) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_MethodConfig_Name.Marshal(b, m, deterministic) } func (m *MethodConfig_Name) XXX_Merge(src proto.Message) { xxx_messageInfo_MethodConfig_Name.Merge(m, src) } func (m *MethodConfig_Name) XXX_Size() int { return xxx_messageInfo_MethodConfig_Name.Size(m) } func (m *MethodConfig_Name) XXX_DiscardUnknown() { xxx_messageInfo_MethodConfig_Name.DiscardUnknown(m) } var xxx_messageInfo_MethodConfig_Name proto.InternalMessageInfo func (m *MethodConfig_Name) GetService() string { if m != nil { return m.Service } return "" } func (m *MethodConfig_Name) GetMethod() string { if m != nil { return m.Method } return "" } // The retry policy for outgoing RPCs. type MethodConfig_RetryPolicy struct { // The maximum number of RPC attempts, including the original attempt. // // This field is required and must be greater than 1. // Any value greater than 5 will be treated as if it were 5. MaxAttempts uint32 `protobuf:"varint,1,opt,name=max_attempts,json=maxAttempts,proto3" json:"max_attempts,omitempty"` // Exponential backoff parameters. The initial retry attempt will occur at // random(0, initial_backoff). In general, the nth attempt will occur at // random(0, // min(initial_backoff*backoff_multiplier**(n-1), max_backoff)). // Required. Must be greater than zero. InitialBackoff *duration.Duration `protobuf:"bytes,2,opt,name=initial_backoff,json=initialBackoff,proto3" json:"initial_backoff,omitempty"` // Required. Must be greater than zero. MaxBackoff *duration.Duration `protobuf:"bytes,3,opt,name=max_backoff,json=maxBackoff,proto3" json:"max_backoff,omitempty"` BackoffMultiplier float32 `protobuf:"fixed32,4,opt,name=backoff_multiplier,json=backoffMultiplier,proto3" json:"backoff_multiplier,omitempty"` // The set of status codes which may be retried. // // This field is required and must be non-empty. RetryableStatusCodes []code.Code `protobuf:"varint,5,rep,packed,name=retryable_status_codes,json=retryableStatusCodes,proto3,enum=google.rpc.Code" json:"retryable_status_codes,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *MethodConfig_RetryPolicy) Reset() { *m = MethodConfig_RetryPolicy{} } func (m *MethodConfig_RetryPolicy) String() string { return proto.CompactTextString(m) } func (*MethodConfig_RetryPolicy) ProtoMessage() {} func (*MethodConfig_RetryPolicy) Descriptor() ([]byte, []int) { return fileDescriptor_e32d3cb2c41c77ce, []int{0, 1} } func (m *MethodConfig_RetryPolicy) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_MethodConfig_RetryPolicy.Unmarshal(m, b) } func (m *MethodConfig_RetryPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_MethodConfig_RetryPolicy.Marshal(b, m, deterministic) } func (m *MethodConfig_RetryPolicy) XXX_Merge(src proto.Message) { xxx_messageInfo_MethodConfig_RetryPolicy.Merge(m, src) } func (m *MethodConfig_RetryPolicy) XXX_Size() int { return xxx_messageInfo_MethodConfig_RetryPolicy.Size(m) } func (m *MethodConfig_RetryPolicy) XXX_DiscardUnknown() { xxx_messageInfo_MethodConfig_RetryPolicy.DiscardUnknown(m) } var xxx_messageInfo_MethodConfig_RetryPolicy proto.InternalMessageInfo func (m *MethodConfig_RetryPolicy) GetMaxAttempts() uint32 { if m != nil { return m.MaxAttempts } return 0 } func (m *MethodConfig_RetryPolicy) GetInitialBackoff() *duration.Duration { if m != nil { return m.InitialBackoff } return nil } func (m *MethodConfig_RetryPolicy) GetMaxBackoff() *duration.Duration { if m != nil { return m.MaxBackoff } return nil } func (m *MethodConfig_RetryPolicy) GetBackoffMultiplier() float32 { if m != nil { return m.BackoffMultiplier } return 0 } func (m *MethodConfig_RetryPolicy) GetRetryableStatusCodes() []code.Code { if m != nil { return m.RetryableStatusCodes } return nil } // The hedging policy for outgoing RPCs. Hedged RPCs may execute more than // once on the server, so only idempotent methods should specify a hedging // policy. type MethodConfig_HedgingPolicy struct { // The hedging policy will send up to max_requests RPCs. // This number represents the total number of all attempts, including // the original attempt. // // This field is required and must be greater than 1. // Any value greater than 5 will be treated as if it were 5. MaxAttempts uint32 `protobuf:"varint,1,opt,name=max_attempts,json=maxAttempts,proto3" json:"max_attempts,omitempty"` // The first RPC will be sent immediately, but the max_requests-1 subsequent // hedged RPCs will be sent at intervals of every hedging_delay. Set this // to 0 to immediately send all max_requests RPCs. HedgingDelay *duration.Duration `protobuf:"bytes,2,opt,name=hedging_delay,json=hedgingDelay,proto3" json:"hedging_delay,omitempty"` // The set of status codes which indicate other hedged RPCs may still // succeed. If a non-fatal status code is returned by the server, hedged // RPCs will continue. Otherwise, outstanding requests will be canceled and // the error returned to the client application layer. // // This field is optional. NonFatalStatusCodes []code.Code `protobuf:"varint,3,rep,packed,name=non_fatal_status_codes,json=nonFatalStatusCodes,proto3,enum=google.rpc.Code" json:"non_fatal_status_codes,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *MethodConfig_HedgingPolicy) Reset() { *m = MethodConfig_HedgingPolicy{} } func (m *MethodConfig_HedgingPolicy) String() string { return proto.CompactTextString(m) } func (*MethodConfig_HedgingPolicy) ProtoMessage() {} func (*MethodConfig_HedgingPolicy) Descriptor() ([]byte, []int) { return fileDescriptor_e32d3cb2c41c77ce, []int{0, 2} } func (m *MethodConfig_HedgingPolicy) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_MethodConfig_HedgingPolicy.Unmarshal(m, b) } func (m *MethodConfig_HedgingPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_MethodConfig_HedgingPolicy.Marshal(b, m, deterministic) } func (m *MethodConfig_HedgingPolicy) XXX_Merge(src proto.Message) { xxx_messageInfo_MethodConfig_HedgingPolicy.Merge(m, src) } func (m *MethodConfig_HedgingPolicy) XXX_Size() int { return xxx_messageInfo_MethodConfig_HedgingPolicy.Size(m) } func (m *MethodConfig_HedgingPolicy) XXX_DiscardUnknown() { xxx_messageInfo_MethodConfig_HedgingPolicy.DiscardUnknown(m) } var xxx_messageInfo_MethodConfig_HedgingPolicy proto.InternalMessageInfo func (m *MethodConfig_HedgingPolicy) GetMaxAttempts() uint32 { if m != nil { return m.MaxAttempts } return 0 } func (m *MethodConfig_HedgingPolicy) GetHedgingDelay() *duration.Duration { if m != nil { return m.HedgingDelay } return nil } func (m *MethodConfig_HedgingPolicy) GetNonFatalStatusCodes() []code.Code { if m != nil { return m.NonFatalStatusCodes } return nil } // Configuration for pick_first LB policy. type PickFirstConfig struct { XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *PickFirstConfig) Reset() { *m = PickFirstConfig{} } func (m *PickFirstConfig) String() string { return proto.CompactTextString(m) } func (*PickFirstConfig) ProtoMessage() {} func (*PickFirstConfig) Descriptor() ([]byte, []int) { return fileDescriptor_e32d3cb2c41c77ce, []int{1} } func (m *PickFirstConfig) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_PickFirstConfig.Unmarshal(m, b) } func (m *PickFirstConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_PickFirstConfig.Marshal(b, m, deterministic) } func (m *PickFirstConfig) XXX_Merge(src proto.Message) { xxx_messageInfo_PickFirstConfig.Merge(m, src) } func (m *PickFirstConfig) XXX_Size() int { return xxx_messageInfo_PickFirstConfig.Size(m) } func (m *PickFirstConfig) XXX_DiscardUnknown() { xxx_messageInfo_PickFirstConfig.DiscardUnknown(m) } var xxx_messageInfo_PickFirstConfig proto.InternalMessageInfo // Configuration for round_robin LB policy. type RoundRobinConfig struct { XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *RoundRobinConfig) Reset() { *m = RoundRobinConfig{} } func (m *RoundRobinConfig) String() string { return proto.CompactTextString(m) } func (*RoundRobinConfig) ProtoMessage() {} func (*RoundRobinConfig) Descriptor() ([]byte, []int) { return fileDescriptor_e32d3cb2c41c77ce, []int{2} } func (m *RoundRobinConfig) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_RoundRobinConfig.Unmarshal(m, b) } func (m *RoundRobinConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_RoundRobinConfig.Marshal(b, m, deterministic) } func (m *RoundRobinConfig) XXX_Merge(src proto.Message) { xxx_messageInfo_RoundRobinConfig.Merge(m, src) } func (m *RoundRobinConfig) XXX_Size() int { return xxx_messageInfo_RoundRobinConfig.Size(m) } func (m *RoundRobinConfig) XXX_DiscardUnknown() { xxx_messageInfo_RoundRobinConfig.DiscardUnknown(m) } var xxx_messageInfo_RoundRobinConfig proto.InternalMessageInfo // Configuration for grpclb LB policy. type GrpcLbConfig struct { // Optional. What LB policy to use for routing between the backend // addresses. If unset, defaults to round_robin. // Currently, the only supported values are round_robin and pick_first. // Note that this will be used both in balancer mode and in fallback mode. // Multiple LB policies can be specified; clients will iterate through // the list in order and stop at the first policy that they support. ChildPolicy []*LoadBalancingConfig `protobuf:"bytes,1,rep,name=child_policy,json=childPolicy,proto3" json:"child_policy,omitempty"` // Optional. If specified, overrides the name of the service to be sent to // the balancer. ServiceName string `protobuf:"bytes,2,opt,name=service_name,json=serviceName,proto3" json:"service_name,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *GrpcLbConfig) Reset() { *m = GrpcLbConfig{} } func (m *GrpcLbConfig) String() string { return proto.CompactTextString(m) } func (*GrpcLbConfig) ProtoMessage() {} func (*GrpcLbConfig) Descriptor() ([]byte, []int) { return fileDescriptor_e32d3cb2c41c77ce, []int{3} } func (m *GrpcLbConfig) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_GrpcLbConfig.Unmarshal(m, b) } func (m *GrpcLbConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_GrpcLbConfig.Marshal(b, m, deterministic) } func (m *GrpcLbConfig) XXX_Merge(src proto.Message) { xxx_messageInfo_GrpcLbConfig.Merge(m, src) } func (m *GrpcLbConfig) XXX_Size() int { return xxx_messageInfo_GrpcLbConfig.Size(m) } func (m *GrpcLbConfig) XXX_DiscardUnknown() { xxx_messageInfo_GrpcLbConfig.DiscardUnknown(m) } var xxx_messageInfo_GrpcLbConfig proto.InternalMessageInfo func (m *GrpcLbConfig) GetChildPolicy() []*LoadBalancingConfig { if m != nil { return m.ChildPolicy } return nil } func (m *GrpcLbConfig) GetServiceName() string { if m != nil { return m.ServiceName } return "" } // Configuration for the cds LB policy. type CdsConfig struct { Cluster string `protobuf:"bytes,1,opt,name=cluster,proto3" json:"cluster,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *CdsConfig) Reset() { *m = CdsConfig{} } func (m *CdsConfig) String() string { return proto.CompactTextString(m) } func (*CdsConfig) ProtoMessage() {} func (*CdsConfig) Descriptor() ([]byte, []int) { return fileDescriptor_e32d3cb2c41c77ce, []int{4} } func (m *CdsConfig) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_CdsConfig.Unmarshal(m, b) } func (m *CdsConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_CdsConfig.Marshal(b, m, deterministic) } func (m *CdsConfig) XXX_Merge(src proto.Message) { xxx_messageInfo_CdsConfig.Merge(m, src) } func (m *CdsConfig) XXX_Size() int { return xxx_messageInfo_CdsConfig.Size(m) } func (m *CdsConfig) XXX_DiscardUnknown() { xxx_messageInfo_CdsConfig.DiscardUnknown(m) } var xxx_messageInfo_CdsConfig proto.InternalMessageInfo func (m *CdsConfig) GetCluster() string { if m != nil { return m.Cluster } return "" } // Configuration for xds LB policy. type XdsConfig struct { // Name of balancer to connect to. BalancerName string `protobuf:"bytes,1,opt,name=balancer_name,json=balancerName,proto3" json:"balancer_name,omitempty"` // Deprecated: Do not use. // Optional. What LB policy to use for intra-locality routing. // If unset, will use whatever algorithm is specified by the balancer. // Multiple LB policies can be specified; clients will iterate through // the list in order and stop at the first policy that they support. ChildPolicy []*LoadBalancingConfig `protobuf:"bytes,2,rep,name=child_policy,json=childPolicy,proto3" json:"child_policy,omitempty"` // Optional. What LB policy to use in fallback mode. If not // specified, defaults to round_robin. // Multiple LB policies can be specified; clients will iterate through // the list in order and stop at the first policy that they support. FallbackPolicy []*LoadBalancingConfig `protobuf:"bytes,3,rep,name=fallback_policy,json=fallbackPolicy,proto3" json:"fallback_policy,omitempty"` // Optional. Name to use in EDS query. If not present, defaults to // the server name from the target URI. EdsServiceName string `protobuf:"bytes,4,opt,name=eds_service_name,json=edsServiceName,proto3" json:"eds_service_name,omitempty"` // LRS server to send load reports to. // If not present, load reporting will be disabled. // If set to the empty string, load reporting will be sent to the same // server that we obtained CDS data from. LrsLoadReportingServerName *wrappers.StringValue `protobuf:"bytes,5,opt,name=lrs_load_reporting_server_name,json=lrsLoadReportingServerName,proto3" json:"lrs_load_reporting_server_name,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *XdsConfig) Reset() { *m = XdsConfig{} } func (m *XdsConfig) String() string { return proto.CompactTextString(m) } func (*XdsConfig) ProtoMessage() {} func (*XdsConfig) Descriptor() ([]byte, []int) { return fileDescriptor_e32d3cb2c41c77ce, []int{5} } func (m *XdsConfig) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_XdsConfig.Unmarshal(m, b) } func (m *XdsConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_XdsConfig.Marshal(b, m, deterministic) } func (m *XdsConfig) XXX_Merge(src proto.Message) { xxx_messageInfo_XdsConfig.Merge(m, src) } func (m *XdsConfig) XXX_Size() int { return xxx_messageInfo_XdsConfig.Size(m) } func (m *XdsConfig) XXX_DiscardUnknown() { xxx_messageInfo_XdsConfig.DiscardUnknown(m) } var xxx_messageInfo_XdsConfig proto.InternalMessageInfo // Deprecated: Do not use. func (m *XdsConfig) GetBalancerName() string { if m != nil { return m.BalancerName } return "" } func (m *XdsConfig) GetChildPolicy() []*LoadBalancingConfig { if m != nil { return m.ChildPolicy } return nil } func (m *XdsConfig) GetFallbackPolicy() []*LoadBalancingConfig { if m != nil { return m.FallbackPolicy } return nil } func (m *XdsConfig) GetEdsServiceName() string { if m != nil { return m.EdsServiceName } return "" } func (m *XdsConfig) GetLrsLoadReportingServerName() *wrappers.StringValue { if m != nil { return m.LrsLoadReportingServerName } return nil } // Selects LB policy and provides corresponding configuration. // // In general, all instances of this field should be repeated. Clients will // iterate through the list in order and stop at the first policy that they // support. This allows the service config to specify custom policies that may // not be known to all clients. // // - If the config for the first supported policy is invalid, the whole service // config is invalid. // - If the list doesn't contain any supported policy, the whole service config // is invalid. type LoadBalancingConfig struct { // Exactly one LB policy may be configured. // // Types that are valid to be assigned to Policy: // *LoadBalancingConfig_PickFirst // *LoadBalancingConfig_RoundRobin // *LoadBalancingConfig_Grpclb // *LoadBalancingConfig_Cds // *LoadBalancingConfig_Xds // *LoadBalancingConfig_XdsExperimental Policy isLoadBalancingConfig_Policy `protobuf_oneof:"policy"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *LoadBalancingConfig) Reset() { *m = LoadBalancingConfig{} } func (m *LoadBalancingConfig) String() string { return proto.CompactTextString(m) } func (*LoadBalancingConfig) ProtoMessage() {} func (*LoadBalancingConfig) Descriptor() ([]byte, []int) { return fileDescriptor_e32d3cb2c41c77ce, []int{6} } func (m *LoadBalancingConfig) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_LoadBalancingConfig.Unmarshal(m, b) } func (m *LoadBalancingConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_LoadBalancingConfig.Marshal(b, m, deterministic) } func (m *LoadBalancingConfig) XXX_Merge(src proto.Message) { xxx_messageInfo_LoadBalancingConfig.Merge(m, src) } func (m *LoadBalancingConfig) XXX_Size() int { return xxx_messageInfo_LoadBalancingConfig.Size(m) } func (m *LoadBalancingConfig) XXX_DiscardUnknown() { xxx_messageInfo_LoadBalancingConfig.DiscardUnknown(m) } var xxx_messageInfo_LoadBalancingConfig proto.InternalMessageInfo type isLoadBalancingConfig_Policy interface { isLoadBalancingConfig_Policy() } type LoadBalancingConfig_PickFirst struct { PickFirst *PickFirstConfig `protobuf:"bytes,4,opt,name=pick_first,proto3,oneof"` } type LoadBalancingConfig_RoundRobin struct { RoundRobin *RoundRobinConfig `protobuf:"bytes,1,opt,name=round_robin,proto3,oneof"` } type LoadBalancingConfig_Grpclb struct { Grpclb *GrpcLbConfig `protobuf:"bytes,3,opt,name=grpclb,proto3,oneof"` } type LoadBalancingConfig_Cds struct { Cds *CdsConfig `protobuf:"bytes,6,opt,name=cds,proto3,oneof"` } type LoadBalancingConfig_Xds struct { Xds *XdsConfig `protobuf:"bytes,2,opt,name=xds,proto3,oneof"` } type LoadBalancingConfig_XdsExperimental struct { XdsExperimental *XdsConfig `protobuf:"bytes,5,opt,name=xds_experimental,proto3,oneof"` } func (*LoadBalancingConfig_PickFirst) isLoadBalancingConfig_Policy() {} func (*LoadBalancingConfig_RoundRobin) isLoadBalancingConfig_Policy() {} func (*LoadBalancingConfig_Grpclb) isLoadBalancingConfig_Policy() {} func (*LoadBalancingConfig_Cds) isLoadBalancingConfig_Policy() {} func (*LoadBalancingConfig_Xds) isLoadBalancingConfig_Policy() {} func (*LoadBalancingConfig_XdsExperimental) isLoadBalancingConfig_Policy() {} func (m *LoadBalancingConfig) GetPolicy() isLoadBalancingConfig_Policy { if m != nil { return m.Policy } return nil } func (m *LoadBalancingConfig) GetPickFirst() *PickFirstConfig { if x, ok := m.GetPolicy().(*LoadBalancingConfig_PickFirst); ok { return x.PickFirst } return nil } func (m *LoadBalancingConfig) GetRoundRobin() *RoundRobinConfig { if x, ok := m.GetPolicy().(*LoadBalancingConfig_RoundRobin); ok { return x.RoundRobin } return nil } func (m *LoadBalancingConfig) GetGrpclb() *GrpcLbConfig { if x, ok := m.GetPolicy().(*LoadBalancingConfig_Grpclb); ok { return x.Grpclb } return nil } func (m *LoadBalancingConfig) GetCds() *CdsConfig { if x, ok := m.GetPolicy().(*LoadBalancingConfig_Cds); ok { return x.Cds } return nil } func (m *LoadBalancingConfig) GetXds() *XdsConfig { if x, ok := m.GetPolicy().(*LoadBalancingConfig_Xds); ok { return x.Xds } return nil } func (m *LoadBalancingConfig) GetXdsExperimental() *XdsConfig { if x, ok := m.GetPolicy().(*LoadBalancingConfig_XdsExperimental); ok { return x.XdsExperimental } return nil } // XXX_OneofWrappers is for the internal use of the proto package. func (*LoadBalancingConfig) XXX_OneofWrappers() []interface{} { return []interface{}{ (*LoadBalancingConfig_PickFirst)(nil), (*LoadBalancingConfig_RoundRobin)(nil), (*LoadBalancingConfig_Grpclb)(nil), (*LoadBalancingConfig_Cds)(nil), (*LoadBalancingConfig_Xds)(nil), (*LoadBalancingConfig_XdsExperimental)(nil), } } // A ServiceConfig represents information about a service but is not specific to // any name resolver. type ServiceConfig struct { LoadBalancingPolicy ServiceConfig_LoadBalancingPolicy `protobuf:"varint,1,opt,name=load_balancing_policy,json=loadBalancingPolicy,proto3,enum=grpc.service_config.ServiceConfig_LoadBalancingPolicy" json:"load_balancing_policy,omitempty"` // Deprecated: Do not use. // Multiple LB policies can be specified; clients will iterate through // the list in order and stop at the first policy that they support. If none // are supported, the service config is considered invalid. LoadBalancingConfig []*LoadBalancingConfig `protobuf:"bytes,4,rep,name=load_balancing_config,json=loadBalancingConfig,proto3" json:"load_balancing_config,omitempty"` // Per-method configuration. MethodConfig []*MethodConfig `protobuf:"bytes,2,rep,name=method_config,json=methodConfig,proto3" json:"method_config,omitempty"` RetryThrottling *ServiceConfig_RetryThrottlingPolicy `protobuf:"bytes,3,opt,name=retry_throttling,json=retryThrottling,proto3" json:"retry_throttling,omitempty"` HealthCheckConfig *ServiceConfig_HealthCheckConfig `protobuf:"bytes,5,opt,name=health_check_config,json=healthCheckConfig,proto3" json:"health_check_config,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *ServiceConfig) Reset() { *m = ServiceConfig{} } func (m *ServiceConfig) String() string { return proto.CompactTextString(m) } func (*ServiceConfig) ProtoMessage() {} func (*ServiceConfig) Descriptor() ([]byte, []int) { return fileDescriptor_e32d3cb2c41c77ce, []int{7} } func (m *ServiceConfig) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ServiceConfig.Unmarshal(m, b) } func (m *ServiceConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ServiceConfig.Marshal(b, m, deterministic) } func (m *ServiceConfig) XXX_Merge(src proto.Message) { xxx_messageInfo_ServiceConfig.Merge(m, src) } func (m *ServiceConfig) XXX_Size() int { return xxx_messageInfo_ServiceConfig.Size(m) } func (m *ServiceConfig) XXX_DiscardUnknown() { xxx_messageInfo_ServiceConfig.DiscardUnknown(m) } var xxx_messageInfo_ServiceConfig proto.InternalMessageInfo // Deprecated: Do not use. func (m *ServiceConfig) GetLoadBalancingPolicy() ServiceConfig_LoadBalancingPolicy { if m != nil { return m.LoadBalancingPolicy } return ServiceConfig_UNSPECIFIED } func (m *ServiceConfig) GetLoadBalancingConfig() []*LoadBalancingConfig { if m != nil { return m.LoadBalancingConfig } return nil } func (m *ServiceConfig) GetMethodConfig() []*MethodConfig { if m != nil { return m.MethodConfig } return nil } func (m *ServiceConfig) GetRetryThrottling() *ServiceConfig_RetryThrottlingPolicy { if m != nil { return m.RetryThrottling } return nil } func (m *ServiceConfig) GetHealthCheckConfig() *ServiceConfig_HealthCheckConfig { if m != nil { return m.HealthCheckConfig } return nil } // If a RetryThrottlingPolicy is provided, gRPC will automatically throttle // retry attempts and hedged RPCs when the client's ratio of failures to // successes exceeds a threshold. // // For each server name, the gRPC client will maintain a token_count which is // initially set to max_tokens. Every outgoing RPC (regardless of service or // method invoked) will change token_count as follows: // // - Every failed RPC will decrement the token_count by 1. // - Every successful RPC will increment the token_count by token_ratio. // // If token_count is less than or equal to max_tokens / 2, then RPCs will not // be retried and hedged RPCs will not be sent. type ServiceConfig_RetryThrottlingPolicy struct { // The number of tokens starts at max_tokens. The token_count will always be // between 0 and max_tokens. // // This field is required and must be greater than zero. MaxTokens uint32 `protobuf:"varint,1,opt,name=max_tokens,json=maxTokens,proto3" json:"max_tokens,omitempty"` // The amount of tokens to add on each successful RPC. Typically this will // be some number between 0 and 1, e.g., 0.1. // // This field is required and must be greater than zero. Up to 3 decimal // places are supported. TokenRatio float32 `protobuf:"fixed32,2,opt,name=token_ratio,json=tokenRatio,proto3" json:"token_ratio,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *ServiceConfig_RetryThrottlingPolicy) Reset() { *m = ServiceConfig_RetryThrottlingPolicy{} } func (m *ServiceConfig_RetryThrottlingPolicy) String() string { return proto.CompactTextString(m) } func (*ServiceConfig_RetryThrottlingPolicy) ProtoMessage() {} func (*ServiceConfig_RetryThrottlingPolicy) Descriptor() ([]byte, []int) { return fileDescriptor_e32d3cb2c41c77ce, []int{7, 0} } func (m *ServiceConfig_RetryThrottlingPolicy) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ServiceConfig_RetryThrottlingPolicy.Unmarshal(m, b) } func (m *ServiceConfig_RetryThrottlingPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ServiceConfig_RetryThrottlingPolicy.Marshal(b, m, deterministic) } func (m *ServiceConfig_RetryThrottlingPolicy) XXX_Merge(src proto.Message) { xxx_messageInfo_ServiceConfig_RetryThrottlingPolicy.Merge(m, src) } func (m *ServiceConfig_RetryThrottlingPolicy) XXX_Size() int { return xxx_messageInfo_ServiceConfig_RetryThrottlingPolicy.Size(m) } func (m *ServiceConfig_RetryThrottlingPolicy) XXX_DiscardUnknown() { xxx_messageInfo_ServiceConfig_RetryThrottlingPolicy.DiscardUnknown(m) } var xxx_messageInfo_ServiceConfig_RetryThrottlingPolicy proto.InternalMessageInfo func (m *ServiceConfig_RetryThrottlingPolicy) GetMaxTokens() uint32 { if m != nil { return m.MaxTokens } return 0 } func (m *ServiceConfig_RetryThrottlingPolicy) GetTokenRatio() float32 { if m != nil { return m.TokenRatio } return 0 } type ServiceConfig_HealthCheckConfig struct { // Service name to use in the health-checking request. ServiceName *wrappers.StringValue `protobuf:"bytes,1,opt,name=service_name,json=serviceName,proto3" json:"service_name,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *ServiceConfig_HealthCheckConfig) Reset() { *m = ServiceConfig_HealthCheckConfig{} } func (m *ServiceConfig_HealthCheckConfig) String() string { return proto.CompactTextString(m) } func (*ServiceConfig_HealthCheckConfig) ProtoMessage() {} func (*ServiceConfig_HealthCheckConfig) Descriptor() ([]byte, []int) { return fileDescriptor_e32d3cb2c41c77ce, []int{7, 1} } func (m *ServiceConfig_HealthCheckConfig) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ServiceConfig_HealthCheckConfig.Unmarshal(m, b) } func (m *ServiceConfig_HealthCheckConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ServiceConfig_HealthCheckConfig.Marshal(b, m, deterministic) } func (m *ServiceConfig_HealthCheckConfig) XXX_Merge(src proto.Message) { xxx_messageInfo_ServiceConfig_HealthCheckConfig.Merge(m, src) } func (m *ServiceConfig_HealthCheckConfig) XXX_Size() int { return xxx_messageInfo_ServiceConfig_HealthCheckConfig.Size(m) } func (m *ServiceConfig_HealthCheckConfig) XXX_DiscardUnknown() { xxx_messageInfo_ServiceConfig_HealthCheckConfig.DiscardUnknown(m) } var xxx_messageInfo_ServiceConfig_HealthCheckConfig proto.InternalMessageInfo func (m *ServiceConfig_HealthCheckConfig) GetServiceName() *wrappers.StringValue { if m != nil { return m.ServiceName } return nil } func init() { proto.RegisterEnum("grpc.service_config.ServiceConfig_LoadBalancingPolicy", ServiceConfig_LoadBalancingPolicy_name, ServiceConfig_LoadBalancingPolicy_value) proto.RegisterType((*MethodConfig)(nil), "grpc.service_config.MethodConfig") proto.RegisterType((*MethodConfig_Name)(nil), "grpc.service_config.MethodConfig.Name") proto.RegisterType((*MethodConfig_RetryPolicy)(nil), "grpc.service_config.MethodConfig.RetryPolicy") proto.RegisterType((*MethodConfig_HedgingPolicy)(nil), "grpc.service_config.MethodConfig.HedgingPolicy") proto.RegisterType((*PickFirstConfig)(nil), "grpc.service_config.PickFirstConfig") proto.RegisterType((*RoundRobinConfig)(nil), "grpc.service_config.RoundRobinConfig") proto.RegisterType((*GrpcLbConfig)(nil), "grpc.service_config.GrpcLbConfig") proto.RegisterType((*CdsConfig)(nil), "grpc.service_config.CdsConfig") proto.RegisterType((*XdsConfig)(nil), "grpc.service_config.XdsConfig") proto.RegisterType((*LoadBalancingConfig)(nil), "grpc.service_config.LoadBalancingConfig") proto.RegisterType((*ServiceConfig)(nil), "grpc.service_config.ServiceConfig") proto.RegisterType((*ServiceConfig_RetryThrottlingPolicy)(nil), "grpc.service_config.ServiceConfig.RetryThrottlingPolicy") proto.RegisterType((*ServiceConfig_HealthCheckConfig)(nil), "grpc.service_config.ServiceConfig.HealthCheckConfig") } func init() { proto.RegisterFile("grpc/service_config/service_config.proto", fileDescriptor_e32d3cb2c41c77ce) } var fileDescriptor_e32d3cb2c41c77ce = []byte{ // 1161 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x56, 0xdb, 0x6e, 0x23, 0x45, 0x10, 0x8d, 0xed, 0x6c, 0xb2, 0x2e, 0x5f, 0xe2, 0x74, 0x48, 0xd6, 0x6b, 0xc1, 0x92, 0x44, 0x2c, 0xf8, 0x25, 0x8e, 0xe4, 0x20, 0x58, 0x2d, 0x12, 0x17, 0x27, 0x31, 0x8e, 0x48, 0xb2, 0xa1, 0x93, 0xb0, 0x41, 0x42, 0x6a, 0xda, 0x33, 0x6d, 0x7b, 0x94, 0x99, 0xe9, 0xa1, 0xa7, 0xcd, 0x3a, 0x2f, 0xfc, 0x0d, 0xe2, 0x1b, 0xf8, 0x13, 0x9e, 0x10, 0x9f, 0x82, 0xfa, 0x32, 0x8e, 0xc7, 0xf6, 0xe2, 0xac, 0x78, 0x9c, 0xaa, 0x3a, 0xa7, 0xab, 0xab, 0x4e, 0x55, 0x0f, 0xd4, 0xfb, 0x22, 0x72, 0xf6, 0x63, 0x26, 0x7e, 0xf5, 0x1c, 0x46, 0x1c, 0x1e, 0xf6, 0xbc, 0xfe, 0xd4, 0x67, 0x23, 0x12, 0x5c, 0x72, 0xb4, 0xa1, 0x22, 0x1b, 0x69, 0x57, 0xed, 0x59, 0x9f, 0xf3, 0xbe, 0xcf, 0xf6, 0x75, 0x48, 0x77, 0xd8, 0xdb, 0x77, 0x87, 0x82, 0x4a, 0x8f, 0x87, 0x06, 0x34, 0xeb, 0x7f, 0x23, 0x68, 0x14, 0x31, 0x11, 0x5b, 0xff, 0xa6, 0xf5, 0xab, 0x24, 0x1c, 0xee, 0x32, 0x63, 0xde, 0xfd, 0xfb, 0x31, 0x14, 0xcf, 0x98, 0x1c, 0x70, 0xf7, 0x50, 0x9f, 0x83, 0x5e, 0xc2, 0x72, 0x48, 0x03, 0x56, 0xcd, 0x6c, 0xe7, 0xea, 0x85, 0xe6, 0xc7, 0x8d, 0x39, 0xb9, 0x34, 0x26, 0x01, 0x8d, 0x73, 0x1a, 0x30, 0xac, 0x31, 0xe8, 0x6b, 0x28, 0xbf, 0xa1, 0x9e, 0x24, 0x3d, 0x2e, 0x88, 0x60, 0xd4, 0xbd, 0xab, 0x66, 0xb7, 0x33, 0xf5, 0x42, 0xb3, 0xd6, 0x30, 0x87, 0x37, 0x92, 0xe4, 0x1a, 0x2d, 0xce, 0xfd, 0x1f, 0xa8, 0x3f, 0x64, 0xb8, 0xa8, 0x10, 0x6d, 0x2e, 0xb0, 0x8a, 0x47, 0x07, 0xb0, 0x2a, 0xbd, 0x80, 0xf1, 0xa1, 0xac, 0xe6, 0x34, 0xf4, 0xe9, 0x0c, 0xf4, 0xc8, 0xde, 0x1b, 0x27, 0x91, 0xe8, 0x35, 0x3c, 0x0d, 0xe8, 0x88, 0x08, 0xf6, 0xcb, 0x90, 0xc5, 0x92, 0x04, 0x2c, 0x8e, 0x69, 0x9f, 0x91, 0xee, 0x9d, 0x64, 0x71, 0x75, 0x59, 0xd3, 0xbc, 0x3f, 0x43, 0x73, 0x7d, 0x12, 0xca, 0x83, 0xa6, 0xc9, 0x61, 0x2b, 0xa0, 0x23, 0x6c, 0xd0, 0x67, 0x06, 0xdc, 0x52, 0x58, 0xf4, 0x23, 0xd4, 0x0c, 0x71, 0x1c, 0xf1, 0x30, 0x66, 0x53, 0xcc, 0x8f, 0x1e, 0xc0, 0xfc, 0x44, 0x33, 0x1b, 0x78, 0x8a, 0x1a, 0x43, 0x51, 0x30, 0x29, 0xee, 0x48, 0xc4, 0x7d, 0xcf, 0xb9, 0xab, 0xae, 0x68, 0xb2, 0xbd, 0xc5, 0xe5, 0xc6, 0x0a, 0x75, 0xa1, 0x41, 0x9d, 0x25, 0x5c, 0x10, 0xf7, 0x9f, 0xe8, 0x06, 0xca, 0x03, 0xe6, 0xf6, 0xbd, 0xb0, 0x9f, 0xb0, 0xae, 0x6a, 0xd6, 0xfd, 0xc5, 0xac, 0x1d, 0x83, 0x1b, 0xf3, 0x96, 0x06, 0x93, 0x86, 0xda, 0x0b, 0x58, 0x56, 0x6d, 0x46, 0x55, 0x58, 0xb5, 0x2c, 0xd5, 0xcc, 0x76, 0xa6, 0x9e, 0xc7, 0xc9, 0x27, 0xda, 0x82, 0x95, 0x40, 0x13, 0xea, 0x96, 0xe7, 0xb1, 0xfd, 0xaa, 0xfd, 0x91, 0x85, 0xc2, 0x44, 0xca, 0x68, 0x07, 0x8a, 0xaa, 0xa4, 0x54, 0x4a, 0x16, 0x44, 0x32, 0xd6, 0x34, 0x25, 0x5c, 0x08, 0xe8, 0xe8, 0x1b, 0x6b, 0x42, 0x2d, 0x58, 0xf3, 0x42, 0x4f, 0x7a, 0xd4, 0x27, 0x5d, 0xea, 0xdc, 0xf2, 0x5e, 0xcf, 0xca, 0xe8, 0x3f, 0xb4, 0x50, 0xb6, 0x88, 0x96, 0x01, 0xa0, 0x97, 0xa0, 0x28, 0xc7, 0xf8, 0x85, 0x5a, 0x82, 0x80, 0x8e, 0x12, 0xec, 0x1e, 0x20, 0x8b, 0x23, 0xc1, 0xd0, 0x97, 0x5e, 0xe4, 0x7b, 0x4c, 0x68, 0x1d, 0x65, 0xf1, 0xba, 0xf5, 0x9c, 0x8d, 0x1d, 0xa8, 0x0d, 0x5b, 0xba, 0x09, 0xb4, 0xeb, 0x33, 0x12, 0x4b, 0x2a, 0x87, 0x31, 0x51, 0x03, 0xa6, 0x04, 0x92, 0xab, 0x97, 0x9b, 0x95, 0xe4, 0x54, 0xd5, 0x83, 0x43, 0xee, 0x32, 0xfc, 0xde, 0x38, 0xfe, 0x52, 0x87, 0x2b, 0x63, 0x5c, 0xfb, 0x33, 0x03, 0xa5, 0x54, 0x1b, 0x1e, 0x52, 0xab, 0x2f, 0x21, 0xe9, 0x14, 0x71, 0x99, 0x4f, 0xef, 0x16, 0x57, 0xaa, 0x68, 0xe3, 0x8f, 0x54, 0x38, 0x3a, 0x86, 0xad, 0x90, 0x87, 0xa4, 0x47, 0x25, 0xf5, 0xd3, 0xc9, 0xe7, 0xde, 0x92, 0xfc, 0x46, 0xc8, 0xc3, 0xb6, 0x0a, 0x9f, 0xc8, 0xbd, 0xf5, 0x14, 0x9e, 0x18, 0x35, 0x73, 0x41, 0xd2, 0x12, 0xdc, 0x5d, 0x87, 0xb5, 0x0b, 0xcf, 0xb9, 0x6d, 0x7b, 0x22, 0x96, 0x46, 0x6c, 0xbb, 0x08, 0x2a, 0x98, 0x0f, 0x43, 0x17, 0xf3, 0xae, 0x17, 0x5a, 0xdb, 0x6f, 0x50, 0xfc, 0x56, 0x44, 0xce, 0x69, 0xd7, 0xae, 0xa1, 0xef, 0xa0, 0xe8, 0x0c, 0x3c, 0xdf, 0x4d, 0x94, 0x6c, 0xd6, 0x51, 0x7d, 0xae, 0x92, 0x4f, 0x39, 0x75, 0x5b, 0xd4, 0xa7, 0xa1, 0xe3, 0x85, 0x7d, 0x83, 0xc7, 0x05, 0x8d, 0xbe, 0x2f, 0x64, 0x02, 0xd1, 0xbb, 0xcd, 0x48, 0xb4, 0x60, 0x6d, 0x4a, 0xd9, 0xbb, 0xcf, 0x21, 0x7f, 0xe8, 0xc6, 0xf6, 0xf0, 0x2a, 0xac, 0x3a, 0xfe, 0x30, 0x96, 0x4c, 0x24, 0x32, 0xb7, 0x9f, 0xbb, 0xff, 0x64, 0x21, 0x7f, 0x33, 0x8e, 0xfb, 0x04, 0x4a, 0x5d, 0x7d, 0x2e, 0x13, 0xc4, 0x2e, 0xcd, 0x4c, 0x3d, 0xdf, 0xca, 0x56, 0x33, 0xb8, 0x98, 0x38, 0xf4, 0xdc, 0x4c, 0xdf, 0x26, 0xfb, 0x7f, 0x6e, 0xf3, 0x3d, 0xac, 0xf5, 0xa8, 0xef, 0x2b, 0x25, 0x26, 0x7c, 0xb9, 0x77, 0xe4, 0x2b, 0x27, 0x04, 0x96, 0xb2, 0x0e, 0x15, 0xe6, 0xc6, 0x24, 0x55, 0xa4, 0x65, 0x7d, 0xf3, 0x32, 0x73, 0xe3, 0xcb, 0xfb, 0x3a, 0xa1, 0x9f, 0xe1, 0x99, 0x2f, 0x62, 0xe2, 0x73, 0xea, 0x12, 0xc1, 0x22, 0x2e, 0xa4, 0xea, 0xb5, 0x02, 0x26, 0x35, 0x78, 0xdb, 0x5a, 0xbc, 0x94, 0xc2, 0x0b, 0xfb, 0x66, 0x2d, 0xd6, 0x7c, 0x11, 0xab, 0xbc, 0x70, 0xc2, 0x70, 0xa9, 0x09, 0x74, 0x27, 0x7e, 0xcf, 0xc1, 0xc6, 0x9c, 0x9c, 0x51, 0x1b, 0x20, 0xf2, 0x9c, 0x5b, 0xd2, 0x53, 0x4a, 0xb2, 0x6b, 0xfd, 0xa3, 0xb9, 0x37, 0x9e, 0xd2, 0x5b, 0x67, 0x09, 0x4f, 0x20, 0xd1, 0x09, 0x14, 0x84, 0x52, 0x1f, 0x11, 0x4a, 0x7e, 0xba, 0x65, 0x85, 0xe6, 0xf3, 0xb9, 0x44, 0xd3, 0x2a, 0xd5, 0x0b, 0xf7, 0x1e, 0x8b, 0xbe, 0x80, 0x15, 0x05, 0xf3, 0xbb, 0x76, 0xc1, 0xec, 0xcc, 0x65, 0x99, 0xd4, 0x75, 0x67, 0x09, 0x5b, 0x08, 0x6a, 0x42, 0xce, 0x71, 0x63, 0xbb, 0xf8, 0x9f, 0xcd, 0x45, 0x8e, 0x15, 0xd9, 0x59, 0xc2, 0x2a, 0x58, 0x61, 0x46, 0x6e, 0x6c, 0x87, 0x7c, 0x3e, 0xe6, 0x66, 0x12, 0x33, 0x72, 0x63, 0x74, 0x0a, 0x95, 0x91, 0x1b, 0x13, 0x36, 0x8a, 0x98, 0xf0, 0x02, 0x16, 0x4a, 0xea, 0xdb, 0x1e, 0x2d, 0x26, 0x98, 0x41, 0xb6, 0x1e, 0xc3, 0x8a, 0x1d, 0xec, 0xbf, 0x1e, 0x41, 0xc9, 0x2a, 0xc3, 0x76, 0x28, 0x84, 0x4d, 0xad, 0x8b, 0x6e, 0xd2, 0xb9, 0xfb, 0xe1, 0xcd, 0xd4, 0xcb, 0xcd, 0xcf, 0xe6, 0x1e, 0x97, 0xa2, 0x48, 0x8b, 0xd5, 0x88, 0x53, 0x8f, 0xd3, 0x86, 0x3f, 0xeb, 0x40, 0x3f, 0xcd, 0x9c, 0x67, 0x38, 0xab, 0xcb, 0xef, 0x38, 0x0e, 0x69, 0xf6, 0xb1, 0xde, 0x4a, 0xe6, 0x0d, 0x4b, 0x58, 0xcd, 0xd0, 0xee, 0x2c, 0x7c, 0x4c, 0x71, 0x31, 0x98, 0xfc, 0xa1, 0x72, 0xa0, 0x62, 0x76, 0xa3, 0x1c, 0x08, 0x2e, 0xa5, 0xef, 0x85, 0x7d, 0x2b, 0x97, 0x17, 0x0f, 0x28, 0x88, 0x7e, 0x3b, 0xaf, 0xc6, 0x48, 0x73, 0x73, 0xbc, 0x26, 0xd2, 0x66, 0xe4, 0xc2, 0xc6, 0x80, 0x51, 0x5f, 0x0e, 0x88, 0x33, 0x60, 0xce, 0x6d, 0x92, 0xb2, 0xe9, 0xf3, 0xa7, 0x0f, 0x38, 0xa7, 0xa3, 0xd1, 0x87, 0x0a, 0x6c, 0x6f, 0xb1, 0x3e, 0x98, 0x36, 0xd5, 0x5e, 0xc3, 0xe6, 0xdc, 0x7c, 0xd0, 0x07, 0xa0, 0x1e, 0x50, 0x22, 0xf9, 0x2d, 0x0b, 0x93, 0x77, 0x2a, 0x1f, 0xd0, 0xd1, 0x95, 0x36, 0xa0, 0x0f, 0xa1, 0xa0, 0x5d, 0x44, 0xbf, 0x41, 0x5a, 0xbe, 0x59, 0x0c, 0xda, 0x84, 0x95, 0xa5, 0x76, 0x05, 0xeb, 0x33, 0x09, 0xa0, 0xaf, 0xa6, 0xb6, 0x76, 0xe6, 0x01, 0x8b, 0x25, 0xb5, 0xd3, 0x3f, 0x9f, 0x5a, 0x24, 0x36, 0xd9, 0x35, 0x28, 0x5c, 0x9f, 0x5f, 0x5e, 0x1c, 0x1f, 0x9e, 0xb4, 0x4f, 0x8e, 0x8f, 0x2a, 0x4b, 0xca, 0x80, 0x5f, 0x5d, 0x9f, 0x1f, 0x11, 0xfc, 0xaa, 0x75, 0x72, 0x5e, 0xc9, 0xb4, 0xf6, 0x60, 0xd3, 0xe3, 0xa9, 0xa2, 0x99, 0x9a, 0xb5, 0x50, 0xaa, 0x68, 0x17, 0x2a, 0x83, 0x8b, 0x4c, 0x77, 0x45, 0xa7, 0x72, 0xf0, 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x2d, 0xca, 0x96, 0x84, 0xe2, 0x0b, 0x00, 0x00, } grpc-go-1.29.1/internal/resolver/000077500000000000000000000000001365033716300166265ustar00rootroot00000000000000grpc-go-1.29.1/internal/resolver/dns/000077500000000000000000000000001365033716300174125ustar00rootroot00000000000000grpc-go-1.29.1/internal/resolver/dns/dns_resolver.go000066400000000000000000000310101365033716300224410ustar00rootroot00000000000000/* * * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Package dns implements a dns resolver to be installed as the default resolver // in grpc. package dns import ( "context" "encoding/json" "errors" "fmt" "net" "os" "strconv" "strings" "sync" "time" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" ) // EnableSRVLookups controls whether the DNS resolver attempts to fetch gRPCLB // addresses from SRV records. Must not be changed after init time. var EnableSRVLookups = false func init() { resolver.Register(NewBuilder()) } const ( defaultPort = "443" defaultDNSSvrPort = "53" golang = "GO" // txtPrefix is the prefix string to be prepended to the host name for txt record lookup. txtPrefix = "_grpc_config." // In DNS, service config is encoded in a TXT record via the mechanism // described in RFC-1464 using the attribute name grpc_config. txtAttribute = "grpc_config=" ) var ( errMissingAddr = errors.New("dns resolver: missing address") // Addresses ending with a colon that is supposed to be the separator // between host and port is not allowed. E.g. "::" is a valid address as // it is an IPv6 address (host only) and "[::]:" is invalid as it ends with // a colon as the host and port separator errEndsWithColon = errors.New("dns resolver: missing port after port-separator colon") ) var ( defaultResolver netResolver = net.DefaultResolver // To prevent excessive re-resolution, we enforce a rate limit on DNS // resolution requests. minDNSResRate = 30 * time.Second ) var customAuthorityDialler = func(authority string) func(ctx context.Context, network, address string) (net.Conn, error) { return func(ctx context.Context, network, address string) (net.Conn, error) { var dialer net.Dialer return dialer.DialContext(ctx, network, authority) } } var customAuthorityResolver = func(authority string) (netResolver, error) { host, port, err := parseTarget(authority, defaultDNSSvrPort) if err != nil { return nil, err } authorityWithPort := net.JoinHostPort(host, port) return &net.Resolver{ PreferGo: true, Dial: customAuthorityDialler(authorityWithPort), }, nil } // NewBuilder creates a dnsBuilder which is used to factory DNS resolvers. func NewBuilder() resolver.Builder { return &dnsBuilder{} } type dnsBuilder struct{} // Build creates and starts a DNS resolver that watches the name resolution of the target. func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { host, port, err := parseTarget(target.Endpoint, defaultPort) if err != nil { return nil, err } // IP address. if ipAddr, ok := formatIP(host); ok { addr := []resolver.Address{{Addr: ipAddr + ":" + port}} cc.UpdateState(resolver.State{Addresses: addr}) return deadResolver{}, nil } // DNS address (non-IP). ctx, cancel := context.WithCancel(context.Background()) d := &dnsResolver{ host: host, port: port, ctx: ctx, cancel: cancel, cc: cc, rn: make(chan struct{}, 1), disableServiceConfig: opts.DisableServiceConfig, } if target.Authority == "" { d.resolver = defaultResolver } else { d.resolver, err = customAuthorityResolver(target.Authority) if err != nil { return nil, err } } d.wg.Add(1) go d.watcher() d.ResolveNow(resolver.ResolveNowOptions{}) return d, nil } // Scheme returns the naming scheme of this resolver builder, which is "dns". func (b *dnsBuilder) Scheme() string { return "dns" } type netResolver interface { LookupHost(ctx context.Context, host string) (addrs []string, err error) LookupSRV(ctx context.Context, service, proto, name string) (cname string, addrs []*net.SRV, err error) LookupTXT(ctx context.Context, name string) (txts []string, err error) } // deadResolver is a resolver that does nothing. type deadResolver struct{} func (deadResolver) ResolveNow(resolver.ResolveNowOptions) {} func (deadResolver) Close() {} // dnsResolver watches for the name resolution update for a non-IP target. type dnsResolver struct { host string port string resolver netResolver ctx context.Context cancel context.CancelFunc cc resolver.ClientConn // rn channel is used by ResolveNow() to force an immediate resolution of the target. rn chan struct{} // wg is used to enforce Close() to return after the watcher() goroutine has finished. // Otherwise, data race will be possible. [Race Example] in dns_resolver_test we // replace the real lookup functions with mocked ones to facilitate testing. // If Close() doesn't wait for watcher() goroutine finishes, race detector sometimes // will warns lookup (READ the lookup function pointers) inside watcher() goroutine // has data race with replaceNetFunc (WRITE the lookup function pointers). wg sync.WaitGroup disableServiceConfig bool } // ResolveNow invoke an immediate resolution of the target that this dnsResolver watches. func (d *dnsResolver) ResolveNow(resolver.ResolveNowOptions) { select { case d.rn <- struct{}{}: default: } } // Close closes the dnsResolver. func (d *dnsResolver) Close() { d.cancel() d.wg.Wait() } func (d *dnsResolver) watcher() { defer d.wg.Done() for { select { case <-d.ctx.Done(): return case <-d.rn: } state, err := d.lookup() if err != nil { d.cc.ReportError(err) } else { d.cc.UpdateState(*state) } // Sleep to prevent excessive re-resolutions. Incoming resolution requests // will be queued in d.rn. t := time.NewTimer(minDNSResRate) select { case <-t.C: case <-d.ctx.Done(): t.Stop() return } } } func (d *dnsResolver) lookupSRV() ([]resolver.Address, error) { if !EnableSRVLookups { return nil, nil } var newAddrs []resolver.Address _, srvs, err := d.resolver.LookupSRV(d.ctx, "grpclb", "tcp", d.host) if err != nil { err = handleDNSError(err, "SRV") // may become nil return nil, err } for _, s := range srvs { lbAddrs, err := d.resolver.LookupHost(d.ctx, s.Target) if err != nil { err = handleDNSError(err, "A") // may become nil if err == nil { // If there are other SRV records, look them up and ignore this // one that does not exist. continue } return nil, err } for _, a := range lbAddrs { ip, ok := formatIP(a) if !ok { return nil, fmt.Errorf("dns: error parsing A record IP address %v", a) } addr := ip + ":" + strconv.Itoa(int(s.Port)) newAddrs = append(newAddrs, resolver.Address{Addr: addr, Type: resolver.GRPCLB, ServerName: s.Target}) } } return newAddrs, nil } var filterError = func(err error) error { if dnsErr, ok := err.(*net.DNSError); ok && !dnsErr.IsTimeout && !dnsErr.IsTemporary { // Timeouts and temporary errors should be communicated to gRPC to // attempt another DNS query (with backoff). Other errors should be // suppressed (they may represent the absence of a TXT record). return nil } return err } func handleDNSError(err error, lookupType string) error { err = filterError(err) if err != nil { err = fmt.Errorf("dns: %v record lookup error: %v", lookupType, err) grpclog.Infoln(err) } return err } func (d *dnsResolver) lookupTXT() *serviceconfig.ParseResult { ss, err := d.resolver.LookupTXT(d.ctx, txtPrefix+d.host) if err != nil { if envconfig.TXTErrIgnore { return nil } if err = handleDNSError(err, "TXT"); err != nil { return &serviceconfig.ParseResult{Err: err} } return nil } var res string for _, s := range ss { res += s } // TXT record must have "grpc_config=" attribute in order to be used as service config. if !strings.HasPrefix(res, txtAttribute) { grpclog.Warningf("dns: TXT record %v missing %v attribute", res, txtAttribute) // This is not an error; it is the equivalent of not having a service config. return nil } sc := canaryingSC(strings.TrimPrefix(res, txtAttribute)) return d.cc.ParseServiceConfig(sc) } func (d *dnsResolver) lookupHost() ([]resolver.Address, error) { var newAddrs []resolver.Address addrs, err := d.resolver.LookupHost(d.ctx, d.host) if err != nil { err = handleDNSError(err, "A") return nil, err } for _, a := range addrs { ip, ok := formatIP(a) if !ok { return nil, fmt.Errorf("dns: error parsing A record IP address %v", a) } addr := ip + ":" + d.port newAddrs = append(newAddrs, resolver.Address{Addr: addr}) } return newAddrs, nil } func (d *dnsResolver) lookup() (*resolver.State, error) { srv, srvErr := d.lookupSRV() addrs, hostErr := d.lookupHost() if hostErr != nil && (srvErr != nil || len(srv) == 0) { return nil, hostErr } state := &resolver.State{ Addresses: append(addrs, srv...), } if !d.disableServiceConfig { state.ServiceConfig = d.lookupTXT() } return state, nil } // formatIP returns ok = false if addr is not a valid textual representation of an IP address. // If addr is an IPv4 address, return the addr and ok = true. // If addr is an IPv6 address, return the addr enclosed in square brackets and ok = true. func formatIP(addr string) (addrIP string, ok bool) { ip := net.ParseIP(addr) if ip == nil { return "", false } if ip.To4() != nil { return addr, true } return "[" + addr + "]", true } // parseTarget takes the user input target string and default port, returns formatted host and port info. // If target doesn't specify a port, set the port to be the defaultPort. // If target is in IPv6 format and host-name is enclosed in square brackets, brackets // are stripped when setting the host. // examples: // target: "www.google.com" defaultPort: "443" returns host: "www.google.com", port: "443" // target: "ipv4-host:80" defaultPort: "443" returns host: "ipv4-host", port: "80" // target: "[ipv6-host]" defaultPort: "443" returns host: "ipv6-host", port: "443" // target: ":80" defaultPort: "443" returns host: "localhost", port: "80" func parseTarget(target, defaultPort string) (host, port string, err error) { if target == "" { return "", "", errMissingAddr } if ip := net.ParseIP(target); ip != nil { // target is an IPv4 or IPv6(without brackets) address return target, defaultPort, nil } if host, port, err = net.SplitHostPort(target); err == nil { if port == "" { // If the port field is empty (target ends with colon), e.g. "[::1]:", this is an error. return "", "", errEndsWithColon } // target has port, i.e ipv4-host:port, [ipv6-host]:port, host-name:port if host == "" { // Keep consistent with net.Dial(): If the host is empty, as in ":80", the local system is assumed. host = "localhost" } return host, port, nil } if host, port, err = net.SplitHostPort(target + ":" + defaultPort); err == nil { // target doesn't have port return host, port, nil } return "", "", fmt.Errorf("invalid target address %v, error info: %v", target, err) } type rawChoice struct { ClientLanguage *[]string `json:"clientLanguage,omitempty"` Percentage *int `json:"percentage,omitempty"` ClientHostName *[]string `json:"clientHostName,omitempty"` ServiceConfig *json.RawMessage `json:"serviceConfig,omitempty"` } func containsString(a *[]string, b string) bool { if a == nil { return true } for _, c := range *a { if c == b { return true } } return false } func chosenByPercentage(a *int) bool { if a == nil { return true } return grpcrand.Intn(100)+1 <= *a } func canaryingSC(js string) string { if js == "" { return "" } var rcs []rawChoice err := json.Unmarshal([]byte(js), &rcs) if err != nil { grpclog.Warningf("dns: error parsing service config json: %v", err) return "" } cliHostname, err := os.Hostname() if err != nil { grpclog.Warningf("dns: error getting client hostname: %v", err) return "" } var sc string for _, c := range rcs { if !containsString(c.ClientLanguage, golang) || !chosenByPercentage(c.Percentage) || !containsString(c.ClientHostName, cliHostname) || c.ServiceConfig == nil { continue } sc = string(*c.ServiceConfig) break } return sc } grpc-go-1.29.1/internal/resolver/dns/dns_resolver_test.go000066400000000000000000000762661365033716300235260ustar00rootroot00000000000000/* * * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package dns import ( "context" "errors" "fmt" "net" "os" "reflect" "strings" "sync" "testing" "time" "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/internal/leakcheck" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" ) func TestMain(m *testing.M) { // Set a non-zero duration only for tests which are actually testing that // feature. replaceDNSResRate(time.Duration(0)) // No nead to clean up since we os.Exit replaceNetFunc(nil) // No nead to clean up since we os.Exit code := m.Run() os.Exit(code) } const ( txtBytesLimit = 255 ) type testClientConn struct { resolver.ClientConn // For unimplemented functions target string m1 sync.Mutex state resolver.State updateStateCalls int errChan chan error } func (t *testClientConn) UpdateState(s resolver.State) { t.m1.Lock() defer t.m1.Unlock() t.state = s t.updateStateCalls++ } func (t *testClientConn) getState() (resolver.State, int) { t.m1.Lock() defer t.m1.Unlock() return t.state, t.updateStateCalls } func scFromState(s resolver.State) string { if s.ServiceConfig != nil { if s.ServiceConfig.Err != nil { return "" } return s.ServiceConfig.Config.(unparsedServiceConfig).config } return "" } type unparsedServiceConfig struct { serviceconfig.Config config string } func (t *testClientConn) ParseServiceConfig(s string) *serviceconfig.ParseResult { return &serviceconfig.ParseResult{Config: unparsedServiceConfig{config: s}} } func (t *testClientConn) ReportError(err error) { t.errChan <- err } type testResolver struct { // A write to this channel is made when this resolver receives a resolution // request. Tests can rely on reading from this channel to be notified about // resolution requests instead of sleeping for a predefined period of time. ch chan struct{} } func (tr *testResolver) LookupHost(ctx context.Context, host string) ([]string, error) { if tr.ch != nil { tr.ch <- struct{}{} } return hostLookup(host) } func (*testResolver) LookupSRV(ctx context.Context, service, proto, name string) (string, []*net.SRV, error) { return srvLookup(service, proto, name) } func (*testResolver) LookupTXT(ctx context.Context, host string) ([]string, error) { return txtLookup(host) } func replaceNetFunc(ch chan struct{}) func() { oldResolver := defaultResolver defaultResolver = &testResolver{ch: ch} return func() { defaultResolver = oldResolver } } func replaceDNSResRate(d time.Duration) func() { oldMinDNSResRate := minDNSResRate minDNSResRate = d return func() { minDNSResRate = oldMinDNSResRate } } var hostLookupTbl = struct { sync.Mutex tbl map[string][]string }{ tbl: map[string][]string{ "foo.bar.com": {"1.2.3.4", "5.6.7.8"}, "ipv4.single.fake": {"1.2.3.4"}, "srv.ipv4.single.fake": {"2.4.6.8"}, "srv.ipv4.multi.fake": {}, "srv.ipv6.single.fake": {}, "srv.ipv6.multi.fake": {}, "ipv4.multi.fake": {"1.2.3.4", "5.6.7.8", "9.10.11.12"}, "ipv6.single.fake": {"2607:f8b0:400a:801::1001"}, "ipv6.multi.fake": {"2607:f8b0:400a:801::1001", "2607:f8b0:400a:801::1002", "2607:f8b0:400a:801::1003"}, }, } func hostLookup(host string) ([]string, error) { hostLookupTbl.Lock() defer hostLookupTbl.Unlock() if addrs, ok := hostLookupTbl.tbl[host]; ok { return addrs, nil } return nil, &net.DNSError{ Err: "hostLookup error", Name: host, Server: "fake", IsTemporary: true, } } var srvLookupTbl = struct { sync.Mutex tbl map[string][]*net.SRV }{ tbl: map[string][]*net.SRV{ "_grpclb._tcp.srv.ipv4.single.fake": {&net.SRV{Target: "ipv4.single.fake", Port: 1234}}, "_grpclb._tcp.srv.ipv4.multi.fake": {&net.SRV{Target: "ipv4.multi.fake", Port: 1234}}, "_grpclb._tcp.srv.ipv6.single.fake": {&net.SRV{Target: "ipv6.single.fake", Port: 1234}}, "_grpclb._tcp.srv.ipv6.multi.fake": {&net.SRV{Target: "ipv6.multi.fake", Port: 1234}}, }, } func srvLookup(service, proto, name string) (string, []*net.SRV, error) { cname := "_" + service + "._" + proto + "." + name srvLookupTbl.Lock() defer srvLookupTbl.Unlock() if srvs, cnt := srvLookupTbl.tbl[cname]; cnt { return cname, srvs, nil } return "", nil, &net.DNSError{ Err: "srvLookup error", Name: cname, Server: "fake", IsTemporary: true, } } // scfs contains an array of service config file string in JSON format. // Notes about the scfs contents and usage: // scfs contains 4 service config file JSON strings for testing. Inside each // service config file, there are multiple choices. scfs[0:3] each contains 5 // choices, and first 3 choices are nonmatching choices based on canarying rule, // while the last two are matched choices. scfs[3] only contains 3 choices, and // all of them are nonmatching based on canarying rule. For each of scfs[0:3], // the eventually returned service config, which is from the first of the two // matched choices, is stored in the corresponding scs element (e.g. // scfs[0]->scs[0]). scfs and scs elements are used in pair to test the dns // resolver functionality, with scfs as the input and scs used for validation of // the output. For scfs[3], it corresponds to empty service config, since there // isn't a matched choice. var scfs = []string{ `[ { "clientLanguage": [ "CPP", "JAVA" ], "serviceConfig": { "loadBalancingPolicy": "grpclb", "methodConfig": [ { "name": [ { "service": "all" } ], "timeout": "1s" } ] } }, { "percentage": 0, "serviceConfig": { "loadBalancingPolicy": "grpclb", "methodConfig": [ { "name": [ { "service": "all" } ], "timeout": "1s" } ] } }, { "clientHostName": [ "localhost" ], "serviceConfig": { "loadBalancingPolicy": "grpclb", "methodConfig": [ { "name": [ { "service": "all" } ], "timeout": "1s" } ] } }, { "clientLanguage": [ "GO" ], "percentage": 100, "serviceConfig": { "methodConfig": [ { "name": [ { "method": "bar" } ], "maxRequestMessageBytes": 1024, "maxResponseMessageBytes": 1024 } ] } }, { "serviceConfig": { "loadBalancingPolicy": "round_robin", "methodConfig": [ { "name": [ { "service": "foo", "method": "bar" } ], "waitForReady": true } ] } } ]`, `[ { "clientLanguage": [ "CPP", "JAVA" ], "serviceConfig": { "loadBalancingPolicy": "grpclb", "methodConfig": [ { "name": [ { "service": "all" } ], "timeout": "1s" } ] } }, { "percentage": 0, "serviceConfig": { "loadBalancingPolicy": "grpclb", "methodConfig": [ { "name": [ { "service": "all" } ], "timeout": "1s" } ] } }, { "clientHostName": [ "localhost" ], "serviceConfig": { "loadBalancingPolicy": "grpclb", "methodConfig": [ { "name": [ { "service": "all" } ], "timeout": "1s" } ] } }, { "clientLanguage": [ "GO" ], "percentage": 100, "serviceConfig": { "methodConfig": [ { "name": [ { "service": "foo", "method": "bar" } ], "waitForReady": true, "timeout": "1s", "maxRequestMessageBytes": 1024, "maxResponseMessageBytes": 1024 } ] } }, { "serviceConfig": { "loadBalancingPolicy": "round_robin", "methodConfig": [ { "name": [ { "service": "foo", "method": "bar" } ], "waitForReady": true } ] } } ]`, `[ { "clientLanguage": [ "CPP", "JAVA" ], "serviceConfig": { "loadBalancingPolicy": "grpclb", "methodConfig": [ { "name": [ { "service": "all" } ], "timeout": "1s" } ] } }, { "percentage": 0, "serviceConfig": { "loadBalancingPolicy": "grpclb", "methodConfig": [ { "name": [ { "service": "all" } ], "timeout": "1s" } ] } }, { "clientHostName": [ "localhost" ], "serviceConfig": { "loadBalancingPolicy": "grpclb", "methodConfig": [ { "name": [ { "service": "all" } ], "timeout": "1s" } ] } }, { "clientLanguage": [ "GO" ], "percentage": 100, "serviceConfig": { "loadBalancingPolicy": "round_robin", "methodConfig": [ { "name": [ { "service": "foo" } ], "waitForReady": true, "timeout": "1s" }, { "name": [ { "service": "bar" } ], "waitForReady": false } ] } }, { "serviceConfig": { "loadBalancingPolicy": "round_robin", "methodConfig": [ { "name": [ { "service": "foo", "method": "bar" } ], "waitForReady": true } ] } } ]`, `[ { "clientLanguage": [ "CPP", "JAVA" ], "serviceConfig": { "loadBalancingPolicy": "grpclb", "methodConfig": [ { "name": [ { "service": "all" } ], "timeout": "1s" } ] } }, { "percentage": 0, "serviceConfig": { "loadBalancingPolicy": "grpclb", "methodConfig": [ { "name": [ { "service": "all" } ], "timeout": "1s" } ] } }, { "clientHostName": [ "localhost" ], "serviceConfig": { "loadBalancingPolicy": "grpclb", "methodConfig": [ { "name": [ { "service": "all" } ], "timeout": "1s" } ] } } ]`, } // scs contains an array of service config string in JSON format. var scs = []string{ `{ "methodConfig": [ { "name": [ { "method": "bar" } ], "maxRequestMessageBytes": 1024, "maxResponseMessageBytes": 1024 } ] }`, `{ "methodConfig": [ { "name": [ { "service": "foo", "method": "bar" } ], "waitForReady": true, "timeout": "1s", "maxRequestMessageBytes": 1024, "maxResponseMessageBytes": 1024 } ] }`, `{ "loadBalancingPolicy": "round_robin", "methodConfig": [ { "name": [ { "service": "foo" } ], "waitForReady": true, "timeout": "1s" }, { "name": [ { "service": "bar" } ], "waitForReady": false } ] }`, } // scLookupTbl is a map, which contains targets that have service config to // their configs. Targets not in this set should not have service config. var scLookupTbl = map[string]string{ "foo.bar.com": scs[0], "srv.ipv4.single.fake": scs[1], "srv.ipv4.multi.fake": scs[2], } // generateSC returns a service config string in JSON format for the input name. func generateSC(name string) string { return scLookupTbl[name] } // generateSCF generates a slice of strings (aggregately representing a single // service config file) for the input config string, which mocks the result // from a real DNS TXT record lookup. func generateSCF(cfg string) []string { b := append([]byte(txtAttribute), []byte(cfg)...) // Split b into multiple strings, each with a max of 255 bytes, which is // the DNS TXT record limit. var r []string for i := 0; i < len(b); i += txtBytesLimit { if i+txtBytesLimit > len(b) { r = append(r, string(b[i:])) } else { r = append(r, string(b[i:i+txtBytesLimit])) } } return r } var txtLookupTbl = struct { sync.Mutex tbl map[string][]string }{ tbl: map[string][]string{ txtPrefix + "foo.bar.com": generateSCF(scfs[0]), txtPrefix + "srv.ipv4.single.fake": generateSCF(scfs[1]), txtPrefix + "srv.ipv4.multi.fake": generateSCF(scfs[2]), txtPrefix + "srv.ipv6.single.fake": generateSCF(scfs[3]), txtPrefix + "srv.ipv6.multi.fake": generateSCF(scfs[3]), }, } func txtLookup(host string) ([]string, error) { txtLookupTbl.Lock() defer txtLookupTbl.Unlock() if scs, cnt := txtLookupTbl.tbl[host]; cnt { return scs, nil } return nil, &net.DNSError{ Err: "txtLookup error", Name: host, Server: "fake", IsTemporary: true, } } func TestResolve(t *testing.T) { testDNSResolver(t) testDNSResolverWithSRV(t) testDNSResolveNow(t) testIPResolver(t) } func testDNSResolver(t *testing.T) { defer leakcheck.Check(t) tests := []struct { target string addrWant []resolver.Address scWant string }{ { "foo.bar.com", []resolver.Address{{Addr: "1.2.3.4" + colonDefaultPort}, {Addr: "5.6.7.8" + colonDefaultPort}}, generateSC("foo.bar.com"), }, { "foo.bar.com:1234", []resolver.Address{{Addr: "1.2.3.4:1234"}, {Addr: "5.6.7.8:1234"}}, generateSC("foo.bar.com"), }, { "srv.ipv4.single.fake", []resolver.Address{{Addr: "2.4.6.8" + colonDefaultPort}}, generateSC("srv.ipv4.single.fake"), }, { "srv.ipv4.multi.fake", nil, generateSC("srv.ipv4.multi.fake"), }, { "srv.ipv6.single.fake", nil, generateSC("srv.ipv6.single.fake"), }, { "srv.ipv6.multi.fake", nil, generateSC("srv.ipv6.multi.fake"), }, } for _, a := range tests { b := NewBuilder() cc := &testClientConn{target: a.target} r, err := b.Build(resolver.Target{Endpoint: a.target}, cc, resolver.BuildOptions{}) if err != nil { t.Fatalf("%v\n", err) } var state resolver.State var cnt int for i := 0; i < 2000; i++ { state, cnt = cc.getState() if cnt > 0 { break } time.Sleep(time.Millisecond) } if cnt == 0 { t.Fatalf("UpdateState not called after 2s; aborting") } if !reflect.DeepEqual(a.addrWant, state.Addresses) { t.Errorf("Resolved addresses of target: %q = %+v, want %+v\n", a.target, state.Addresses, a.addrWant) } sc := scFromState(state) if a.scWant != sc { t.Errorf("Resolved service config of target: %q = %+v, want %+v\n", a.target, sc, a.scWant) } r.Close() } } func testDNSResolverWithSRV(t *testing.T) { EnableSRVLookups = true defer func() { EnableSRVLookups = false }() defer leakcheck.Check(t) tests := []struct { target string addrWant []resolver.Address scWant string }{ { "foo.bar.com", []resolver.Address{{Addr: "1.2.3.4" + colonDefaultPort}, {Addr: "5.6.7.8" + colonDefaultPort}}, generateSC("foo.bar.com"), }, { "foo.bar.com:1234", []resolver.Address{{Addr: "1.2.3.4:1234"}, {Addr: "5.6.7.8:1234"}}, generateSC("foo.bar.com"), }, { "srv.ipv4.single.fake", []resolver.Address{{Addr: "2.4.6.8" + colonDefaultPort}, {Addr: "1.2.3.4:1234", Type: resolver.GRPCLB, ServerName: "ipv4.single.fake"}}, generateSC("srv.ipv4.single.fake"), }, { "srv.ipv4.multi.fake", []resolver.Address{ {Addr: "1.2.3.4:1234", Type: resolver.GRPCLB, ServerName: "ipv4.multi.fake"}, {Addr: "5.6.7.8:1234", Type: resolver.GRPCLB, ServerName: "ipv4.multi.fake"}, {Addr: "9.10.11.12:1234", Type: resolver.GRPCLB, ServerName: "ipv4.multi.fake"}, }, generateSC("srv.ipv4.multi.fake"), }, { "srv.ipv6.single.fake", []resolver.Address{{Addr: "[2607:f8b0:400a:801::1001]:1234", Type: resolver.GRPCLB, ServerName: "ipv6.single.fake"}}, generateSC("srv.ipv6.single.fake"), }, { "srv.ipv6.multi.fake", []resolver.Address{ {Addr: "[2607:f8b0:400a:801::1001]:1234", Type: resolver.GRPCLB, ServerName: "ipv6.multi.fake"}, {Addr: "[2607:f8b0:400a:801::1002]:1234", Type: resolver.GRPCLB, ServerName: "ipv6.multi.fake"}, {Addr: "[2607:f8b0:400a:801::1003]:1234", Type: resolver.GRPCLB, ServerName: "ipv6.multi.fake"}, }, generateSC("srv.ipv6.multi.fake"), }, } for _, a := range tests { b := NewBuilder() cc := &testClientConn{target: a.target} r, err := b.Build(resolver.Target{Endpoint: a.target}, cc, resolver.BuildOptions{}) if err != nil { t.Fatalf("%v\n", err) } defer r.Close() var state resolver.State var cnt int for i := 0; i < 2000; i++ { state, cnt = cc.getState() if cnt > 0 { break } time.Sleep(time.Millisecond) } if cnt == 0 { t.Fatalf("UpdateState not called after 2s; aborting") } if !reflect.DeepEqual(a.addrWant, state.Addresses) { t.Errorf("Resolved addresses of target: %q = %+v, want %+v\n", a.target, state.Addresses, a.addrWant) } sc := scFromState(state) if a.scWant != sc { t.Errorf("Resolved service config of target: %q = %+v, want %+v\n", a.target, sc, a.scWant) } } } func mutateTbl(target string) func() { hostLookupTbl.Lock() oldHostTblEntry := hostLookupTbl.tbl[target] hostLookupTbl.tbl[target] = hostLookupTbl.tbl[target][:len(oldHostTblEntry)-1] hostLookupTbl.Unlock() txtLookupTbl.Lock() oldTxtTblEntry := txtLookupTbl.tbl[txtPrefix+target] txtLookupTbl.tbl[txtPrefix+target] = []string{txtAttribute + `[{"serviceConfig":{"loadBalancingPolicy": "grpclb"}}]`} txtLookupTbl.Unlock() return func() { hostLookupTbl.Lock() hostLookupTbl.tbl[target] = oldHostTblEntry hostLookupTbl.Unlock() txtLookupTbl.Lock() if len(oldTxtTblEntry) == 0 { delete(txtLookupTbl.tbl, txtPrefix+target) } else { txtLookupTbl.tbl[txtPrefix+target] = oldTxtTblEntry } txtLookupTbl.Unlock() } } func testDNSResolveNow(t *testing.T) { defer leakcheck.Check(t) tests := []struct { target string addrWant []resolver.Address addrNext []resolver.Address scWant string scNext string }{ { "foo.bar.com", []resolver.Address{{Addr: "1.2.3.4" + colonDefaultPort}, {Addr: "5.6.7.8" + colonDefaultPort}}, []resolver.Address{{Addr: "1.2.3.4" + colonDefaultPort}}, generateSC("foo.bar.com"), `{"loadBalancingPolicy": "grpclb"}`, }, } for _, a := range tests { b := NewBuilder() cc := &testClientConn{target: a.target} r, err := b.Build(resolver.Target{Endpoint: a.target}, cc, resolver.BuildOptions{}) if err != nil { t.Fatalf("%v\n", err) } defer r.Close() var state resolver.State var cnt int for i := 0; i < 2000; i++ { state, cnt = cc.getState() if cnt > 0 { break } time.Sleep(time.Millisecond) } if cnt == 0 { t.Fatalf("UpdateState not called after 2s; aborting. state=%v", state) } if !reflect.DeepEqual(a.addrWant, state.Addresses) { t.Errorf("Resolved addresses of target: %q = %+v, want %+v\n", a.target, state.Addresses, a.addrWant) } sc := scFromState(state) if a.scWant != sc { t.Errorf("Resolved service config of target: %q = %+v, want %+v\n", a.target, sc, a.scWant) } revertTbl := mutateTbl(a.target) r.ResolveNow(resolver.ResolveNowOptions{}) for i := 0; i < 2000; i++ { state, cnt = cc.getState() if cnt == 2 { break } time.Sleep(time.Millisecond) } if cnt != 2 { t.Fatalf("UpdateState not called after 2s; aborting. state=%v", state) } sc = scFromState(state) if !reflect.DeepEqual(a.addrNext, state.Addresses) { t.Errorf("Resolved addresses of target: %q = %+v, want %+v\n", a.target, state.Addresses, a.addrNext) } if a.scNext != sc { t.Errorf("Resolved service config of target: %q = %+v, want %+v\n", a.target, sc, a.scNext) } revertTbl() } } const colonDefaultPort = ":" + defaultPort func testIPResolver(t *testing.T) { defer leakcheck.Check(t) tests := []struct { target string want []resolver.Address }{ {"127.0.0.1", []resolver.Address{{Addr: "127.0.0.1" + colonDefaultPort}}}, {"127.0.0.1:12345", []resolver.Address{{Addr: "127.0.0.1:12345"}}}, {"::1", []resolver.Address{{Addr: "[::1]" + colonDefaultPort}}}, {"[::1]:12345", []resolver.Address{{Addr: "[::1]:12345"}}}, {"[::1]", []resolver.Address{{Addr: "[::1]:443"}}}, {"2001:db8:85a3::8a2e:370:7334", []resolver.Address{{Addr: "[2001:db8:85a3::8a2e:370:7334]" + colonDefaultPort}}}, {"[2001:db8:85a3::8a2e:370:7334]", []resolver.Address{{Addr: "[2001:db8:85a3::8a2e:370:7334]" + colonDefaultPort}}}, {"[2001:db8:85a3::8a2e:370:7334]:12345", []resolver.Address{{Addr: "[2001:db8:85a3::8a2e:370:7334]:12345"}}}, {"[2001:db8::1]:http", []resolver.Address{{Addr: "[2001:db8::1]:http"}}}, // TODO(yuxuanli): zone support? } for _, v := range tests { b := NewBuilder() cc := &testClientConn{target: v.target} r, err := b.Build(resolver.Target{Endpoint: v.target}, cc, resolver.BuildOptions{}) if err != nil { t.Fatalf("%v\n", err) } var state resolver.State var cnt int for { state, cnt = cc.getState() if cnt > 0 { break } time.Sleep(time.Millisecond) } if !reflect.DeepEqual(v.want, state.Addresses) { t.Errorf("Resolved addresses of target: %q = %+v, want %+v\n", v.target, state.Addresses, v.want) } r.ResolveNow(resolver.ResolveNowOptions{}) for i := 0; i < 50; i++ { state, cnt = cc.getState() if cnt > 1 { t.Fatalf("Unexpected second call by resolver to UpdateState. state: %v", state) } time.Sleep(time.Millisecond) } r.Close() } } func TestResolveFunc(t *testing.T) { defer leakcheck.Check(t) tests := []struct { addr string want error }{ // TODO(yuxuanli): More false cases? {"www.google.com", nil}, {"foo.bar:12345", nil}, {"127.0.0.1", nil}, {"::", nil}, {"127.0.0.1:12345", nil}, {"[::1]:80", nil}, {"[2001:db8:a0b:12f0::1]:21", nil}, {":80", nil}, {"127.0.0...1:12345", nil}, {"[fe80::1%lo0]:80", nil}, {"golang.org:http", nil}, {"[2001:db8::1]:http", nil}, {"[2001:db8::1]:", errEndsWithColon}, {":", errEndsWithColon}, {"", errMissingAddr}, {"[2001:db8:a0b:12f0::1", fmt.Errorf("invalid target address [2001:db8:a0b:12f0::1, error info: address [2001:db8:a0b:12f0::1:443: missing ']' in address")}, } b := NewBuilder() for _, v := range tests { cc := &testClientConn{target: v.addr, errChan: make(chan error, 1)} r, err := b.Build(resolver.Target{Endpoint: v.addr}, cc, resolver.BuildOptions{}) if err == nil { r.Close() } if !reflect.DeepEqual(err, v.want) { t.Errorf("Build(%q, cc, _) = %v, want %v", v.addr, err, v.want) } } } func TestDisableServiceConfig(t *testing.T) { defer leakcheck.Check(t) tests := []struct { target string scWant string disableServiceConfig bool }{ { "foo.bar.com", generateSC("foo.bar.com"), false, }, { "foo.bar.com", "", true, }, } for _, a := range tests { b := NewBuilder() cc := &testClientConn{target: a.target} r, err := b.Build(resolver.Target{Endpoint: a.target}, cc, resolver.BuildOptions{DisableServiceConfig: a.disableServiceConfig}) if err != nil { t.Fatalf("%v\n", err) } defer r.Close() var cnt int var state resolver.State for i := 0; i < 2000; i++ { state, cnt = cc.getState() if cnt > 0 { break } time.Sleep(time.Millisecond) } if cnt == 0 { t.Fatalf("UpdateState not called after 2s; aborting") } sc := scFromState(state) if a.scWant != sc { t.Errorf("Resolved service config of target: %q = %+v, want %+v\n", a.target, sc, a.scWant) } } } func TestTXTError(t *testing.T) { defer leakcheck.Check(t) defer func(v bool) { envconfig.TXTErrIgnore = v }(envconfig.TXTErrIgnore) for _, ignore := range []bool{false, true} { envconfig.TXTErrIgnore = ignore b := NewBuilder() cc := &testClientConn{target: "ipv4.single.fake"} // has A records but not TXT records. r, err := b.Build(resolver.Target{Endpoint: "ipv4.single.fake"}, cc, resolver.BuildOptions{}) if err != nil { t.Fatalf("%v\n", err) } defer r.Close() var cnt int var state resolver.State for i := 0; i < 2000; i++ { state, cnt = cc.getState() if cnt > 0 { break } time.Sleep(time.Millisecond) } if cnt == 0 { t.Fatalf("UpdateState not called after 2s; aborting") } if !ignore && (state.ServiceConfig == nil || state.ServiceConfig.Err == nil) { t.Errorf("state.ServiceConfig = %v; want non-nil error", state.ServiceConfig) } else if ignore && state.ServiceConfig != nil { t.Errorf("state.ServiceConfig = %v; want nil", state.ServiceConfig) } } } func TestDNSResolverRetry(t *testing.T) { b := NewBuilder() target := "ipv4.single.fake" cc := &testClientConn{target: target} r, err := b.Build(resolver.Target{Endpoint: target}, cc, resolver.BuildOptions{}) if err != nil { t.Fatalf("%v\n", err) } defer r.Close() var state resolver.State for i := 0; i < 2000; i++ { state, _ = cc.getState() if len(state.Addresses) == 1 { break } time.Sleep(time.Millisecond) } if len(state.Addresses) != 1 { t.Fatalf("UpdateState not called with 1 address after 2s; aborting. state=%v", state) } want := []resolver.Address{{Addr: "1.2.3.4" + colonDefaultPort}} if !reflect.DeepEqual(want, state.Addresses) { t.Errorf("Resolved addresses of target: %q = %+v, want %+v\n", target, state.Addresses, want) } // mutate the host lookup table so the target has 0 address returned. revertTbl := mutateTbl(target) // trigger a resolve that will get empty address list r.ResolveNow(resolver.ResolveNowOptions{}) for i := 0; i < 2000; i++ { state, _ = cc.getState() if len(state.Addresses) == 0 { break } time.Sleep(time.Millisecond) } if len(state.Addresses) != 0 { t.Fatalf("UpdateState not called with 0 address after 2s; aborting. state=%v", state) } revertTbl() // wait for the retry to happen in two seconds. r.ResolveNow(resolver.ResolveNowOptions{}) for i := 0; i < 2000; i++ { state, _ = cc.getState() if len(state.Addresses) == 1 { break } time.Sleep(time.Millisecond) } if !reflect.DeepEqual(want, state.Addresses) { t.Errorf("Resolved addresses of target: %q = %+v, want %+v\n", target, state.Addresses, want) } } func TestCustomAuthority(t *testing.T) { defer leakcheck.Check(t) tests := []struct { authority string authorityWant string expectError bool }{ { "4.3.2.1:" + defaultDNSSvrPort, "4.3.2.1:" + defaultDNSSvrPort, false, }, { "4.3.2.1:123", "4.3.2.1:123", false, }, { "4.3.2.1", "4.3.2.1:" + defaultDNSSvrPort, false, }, { "::1", "[::1]:" + defaultDNSSvrPort, false, }, { "[::1]", "[::1]:" + defaultDNSSvrPort, false, }, { "[::1]:123", "[::1]:123", false, }, { "dnsserver.com", "dnsserver.com:" + defaultDNSSvrPort, false, }, { ":123", "localhost:123", false, }, { ":", "", true, }, { "[::1]:", "", true, }, { "dnsserver.com:", "", true, }, } oldCustomAuthorityDialler := customAuthorityDialler defer func() { customAuthorityDialler = oldCustomAuthorityDialler }() for _, a := range tests { errChan := make(chan error, 1) customAuthorityDialler = func(authority string) func(ctx context.Context, network, address string) (net.Conn, error) { if authority != a.authorityWant { errChan <- fmt.Errorf("wrong custom authority passed to resolver. input: %s expected: %s actual: %s", a.authority, a.authorityWant, authority) } else { errChan <- nil } return func(ctx context.Context, network, address string) (net.Conn, error) { return nil, errors.New("no need to dial") } } b := NewBuilder() cc := &testClientConn{target: "foo.bar.com", errChan: make(chan error, 1)} r, err := b.Build(resolver.Target{Endpoint: "foo.bar.com", Authority: a.authority}, cc, resolver.BuildOptions{}) if err == nil { r.Close() err = <-errChan if err != nil { t.Errorf(err.Error()) } if a.expectError { t.Errorf("custom authority should have caused an error: %s", a.authority) } } else if !a.expectError { t.Errorf("unexpected error using custom authority %s: %s", a.authority, err) } } } // TestRateLimitedResolve exercises the rate limit enforced on re-resolution // requests. It sets the re-resolution rate to a small value and repeatedly // calls ResolveNow() and ensures only the expected number of resolution // requests are made. func TestRateLimitedResolve(t *testing.T) { defer leakcheck.Check(t) const dnsResRate = 10 * time.Millisecond dc := replaceDNSResRate(dnsResRate) defer dc() // Create a new testResolver{} for this test because we want the exact count // of the number of times the resolver was invoked. nc := replaceNetFunc(make(chan struct{})) defer nc() target := "foo.bar.com" b := NewBuilder() cc := &testClientConn{target: target} r, err := b.Build(resolver.Target{Endpoint: target}, cc, resolver.BuildOptions{}) if err != nil { t.Fatalf("resolver.Build() returned error: %v\n", err) } defer r.Close() dnsR, ok := r.(*dnsResolver) if !ok { t.Fatalf("resolver.Build() returned unexpected type: %T\n", dnsR) } tr, ok := dnsR.resolver.(*testResolver) if !ok { t.Fatalf("delegate resolver returned unexpected type: %T\n", tr) } // Observe the time before unblocking the lookupHost call. The 100ms rate // limiting timer will begin immediately after that. This means the next // resolution could happen less than 100ms if we read the time *after* // receiving from tr.ch start := time.Now() // Wait for the first resolution request to be done. This happens as part // of the first iteration of the for loop in watcher() because we call // ResolveNow in Build. <-tr.ch // Here we start a couple of goroutines. One repeatedly calls ResolveNow() // until asked to stop, and the other waits for two resolution requests to be // made to our testResolver and stops the former. We measure the start and // end times, and expect the duration elapsed to be in the interval // {wantCalls*dnsResRate, wantCalls*dnsResRate} done := make(chan struct{}) go func() { for { select { case <-done: return default: r.ResolveNow(resolver.ResolveNowOptions{}) time.Sleep(1 * time.Millisecond) } } }() gotCalls := 0 const wantCalls = 3 min, max := wantCalls*dnsResRate, (wantCalls+1)*dnsResRate tMax := time.NewTimer(max) for gotCalls != wantCalls { select { case <-tr.ch: gotCalls++ case <-tMax.C: t.Fatalf("Timed out waiting for %v calls after %v; got %v", wantCalls, max, gotCalls) } } close(done) elapsed := time.Since(start) if gotCalls != wantCalls { t.Fatalf("resolve count mismatch for target: %q = %+v, want %+v\n", target, gotCalls, wantCalls) } if elapsed < min { t.Fatalf("elapsed time: %v, wanted it to be between {%v and %v}", elapsed, min, max) } wantAddrs := []resolver.Address{{Addr: "1.2.3.4" + colonDefaultPort}, {Addr: "5.6.7.8" + colonDefaultPort}} var state resolver.State for { var cnt int state, cnt = cc.getState() if cnt > 0 { break } time.Sleep(time.Millisecond) } if !reflect.DeepEqual(state.Addresses, wantAddrs) { t.Errorf("Resolved addresses of target: %q = %+v, want %+v\n", target, state.Addresses, wantAddrs) } } func TestReportError(t *testing.T) { const target = "notfoundaddress" cc := &testClientConn{target: target, errChan: make(chan error)} b := NewBuilder() r, err := b.Build(resolver.Target{Endpoint: target}, cc, resolver.BuildOptions{}) if err != nil { t.Fatalf("%v\n", err) } defer r.Close() select { case err := <-cc.errChan: if !strings.Contains(err.Error(), "hostLookup error") { t.Fatalf(`ReportError(err=%v) called; want err contains "hostLookupError"`, err) } case <-time.After(time.Second): t.Fatalf("did not receive error after 1s") } } grpc-go-1.29.1/internal/resolver/dns/go113.go000066400000000000000000000015211365033716300205720ustar00rootroot00000000000000// +build go1.13 /* * * Copyright 2019 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package dns import "net" func init() { filterError = func(err error) error { if dnsErr, ok := err.(*net.DNSError); ok && dnsErr.IsNotFound { // The name does not exist; not an error. return nil } return err } } grpc-go-1.29.1/internal/resolver/passthrough/000077500000000000000000000000001365033716300211755ustar00rootroot00000000000000grpc-go-1.29.1/internal/resolver/passthrough/passthrough.go000066400000000000000000000030231365033716300240710ustar00rootroot00000000000000/* * * Copyright 2017 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Package passthrough implements a pass-through resolver. It sends the target // name without scheme back to gRPC as resolved address. package passthrough import "google.golang.org/grpc/resolver" const scheme = "passthrough" type passthroughBuilder struct{} func (*passthroughBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { r := &passthroughResolver{ target: target, cc: cc, } r.start() return r, nil } func (*passthroughBuilder) Scheme() string { return scheme } type passthroughResolver struct { target resolver.Target cc resolver.ClientConn } func (r *passthroughResolver) start() { r.cc.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: r.target.Endpoint}}}) } func (*passthroughResolver) ResolveNow(o resolver.ResolveNowOptions) {} func (*passthroughResolver) Close() {} func init() { resolver.Register(&passthroughBuilder{}) } grpc-go-1.29.1/internal/status/000077500000000000000000000000001365033716300163105ustar00rootroot00000000000000grpc-go-1.29.1/internal/status/status.go000066400000000000000000000113251365033716300201640ustar00rootroot00000000000000/* * * Copyright 2020 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Package status implements errors returned by gRPC. These errors are // serialized and transmitted on the wire between server and client, and allow // for additional data to be transmitted via the Details field in the status // proto. gRPC service handlers should return an error created by this // package, and gRPC clients should expect a corresponding error to be // returned from the RPC call. // // This package upholds the invariants that a non-nil error may not // contain an OK code, and an OK code must result in a nil error. package status import ( "errors" "fmt" "github.com/golang/protobuf/proto" "github.com/golang/protobuf/ptypes" spb "google.golang.org/genproto/googleapis/rpc/status" "google.golang.org/grpc/codes" ) // Status represents an RPC status code, message, and details. It is immutable // and should be created with New, Newf, or FromProto. type Status struct { s *spb.Status } // New returns a Status representing c and msg. func New(c codes.Code, msg string) *Status { return &Status{s: &spb.Status{Code: int32(c), Message: msg}} } // Newf returns New(c, fmt.Sprintf(format, a...)). func Newf(c codes.Code, format string, a ...interface{}) *Status { return New(c, fmt.Sprintf(format, a...)) } // FromProto returns a Status representing s. func FromProto(s *spb.Status) *Status { return &Status{s: proto.Clone(s).(*spb.Status)} } // Err returns an error representing c and msg. If c is OK, returns nil. func Err(c codes.Code, msg string) error { return New(c, msg).Err() } // Errorf returns Error(c, fmt.Sprintf(format, a...)). func Errorf(c codes.Code, format string, a ...interface{}) error { return Err(c, fmt.Sprintf(format, a...)) } // Code returns the status code contained in s. func (s *Status) Code() codes.Code { if s == nil || s.s == nil { return codes.OK } return codes.Code(s.s.Code) } // Message returns the message contained in s. func (s *Status) Message() string { if s == nil || s.s == nil { return "" } return s.s.Message } // Proto returns s's status as an spb.Status proto message. func (s *Status) Proto() *spb.Status { if s == nil { return nil } return proto.Clone(s.s).(*spb.Status) } // Err returns an immutable error representing s; returns nil if s.Code() is OK. func (s *Status) Err() error { if s.Code() == codes.OK { return nil } return (*Error)(s.Proto()) } // WithDetails returns a new status with the provided details messages appended to the status. // If any errors are encountered, it returns nil and the first error encountered. func (s *Status) WithDetails(details ...proto.Message) (*Status, error) { if s.Code() == codes.OK { return nil, errors.New("no error details for status with code OK") } // s.Code() != OK implies that s.Proto() != nil. p := s.Proto() for _, detail := range details { any, err := ptypes.MarshalAny(detail) if err != nil { return nil, err } p.Details = append(p.Details, any) } return &Status{s: p}, nil } // Details returns a slice of details messages attached to the status. // If a detail cannot be decoded, the error is returned in place of the detail. func (s *Status) Details() []interface{} { if s == nil || s.s == nil { return nil } details := make([]interface{}, 0, len(s.s.Details)) for _, any := range s.s.Details { detail := &ptypes.DynamicAny{} if err := ptypes.UnmarshalAny(any, detail); err != nil { details = append(details, err) continue } details = append(details, detail.Message) } return details } // Error is an alias of a status proto. It implements error and Status, // and a nil Error should never be returned by this package. type Error spb.Status func (se *Error) Error() string { p := (*spb.Status)(se) return fmt.Sprintf("rpc error: code = %s desc = %s", codes.Code(p.GetCode()), p.GetMessage()) } // GRPCStatus returns the Status represented by se. func (se *Error) GRPCStatus() *Status { return FromProto((*spb.Status)(se)) } // Is implements future error.Is functionality. // A Error is equivalent if the code and message are identical. func (se *Error) Is(target error) bool { tse, ok := target.(*Error) if !ok { return false } return proto.Equal((*spb.Status)(se), (*spb.Status)(tse)) } grpc-go-1.29.1/internal/syscall/000077500000000000000000000000001365033716300164375ustar00rootroot00000000000000grpc-go-1.29.1/internal/syscall/syscall_linux.go000066400000000000000000000062701365033716300216640ustar00rootroot00000000000000// +build !appengine /* * * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Package syscall provides functionalities that grpc uses to get low-level operating system // stats/info. package syscall import ( "fmt" "net" "syscall" "time" "golang.org/x/sys/unix" "google.golang.org/grpc/grpclog" ) // GetCPUTime returns the how much CPU time has passed since the start of this process. func GetCPUTime() int64 { var ts unix.Timespec if err := unix.ClockGettime(unix.CLOCK_PROCESS_CPUTIME_ID, &ts); err != nil { grpclog.Fatal(err) } return ts.Nano() } // Rusage is an alias for syscall.Rusage under linux non-appengine environment. type Rusage syscall.Rusage // GetRusage returns the resource usage of current process. func GetRusage() (rusage *Rusage) { rusage = new(Rusage) syscall.Getrusage(syscall.RUSAGE_SELF, (*syscall.Rusage)(rusage)) return } // CPUTimeDiff returns the differences of user CPU time and system CPU time used // between two Rusage structs. func CPUTimeDiff(first *Rusage, latest *Rusage) (float64, float64) { f := (*syscall.Rusage)(first) l := (*syscall.Rusage)(latest) var ( utimeDiffs = l.Utime.Sec - f.Utime.Sec utimeDiffus = l.Utime.Usec - f.Utime.Usec stimeDiffs = l.Stime.Sec - f.Stime.Sec stimeDiffus = l.Stime.Usec - f.Stime.Usec ) uTimeElapsed := float64(utimeDiffs) + float64(utimeDiffus)*1.0e-6 sTimeElapsed := float64(stimeDiffs) + float64(stimeDiffus)*1.0e-6 return uTimeElapsed, sTimeElapsed } // SetTCPUserTimeout sets the TCP user timeout on a connection's socket func SetTCPUserTimeout(conn net.Conn, timeout time.Duration) error { tcpconn, ok := conn.(*net.TCPConn) if !ok { // not a TCP connection. exit early return nil } rawConn, err := tcpconn.SyscallConn() if err != nil { return fmt.Errorf("error getting raw connection: %v", err) } err = rawConn.Control(func(fd uintptr) { err = syscall.SetsockoptInt(int(fd), syscall.IPPROTO_TCP, unix.TCP_USER_TIMEOUT, int(timeout/time.Millisecond)) }) if err != nil { return fmt.Errorf("error setting option on socket: %v", err) } return nil } // GetTCPUserTimeout gets the TCP user timeout on a connection's socket func GetTCPUserTimeout(conn net.Conn) (opt int, err error) { tcpconn, ok := conn.(*net.TCPConn) if !ok { err = fmt.Errorf("conn is not *net.TCPConn. got %T", conn) return } rawConn, err := tcpconn.SyscallConn() if err != nil { err = fmt.Errorf("error getting raw connection: %v", err) return } err = rawConn.Control(func(fd uintptr) { opt, err = syscall.GetsockoptInt(int(fd), syscall.IPPROTO_TCP, unix.TCP_USER_TIMEOUT) }) if err != nil { err = fmt.Errorf("error getting option on socket: %v", err) return } return } grpc-go-1.29.1/internal/syscall/syscall_nonlinux.go000066400000000000000000000036431365033716300224000ustar00rootroot00000000000000// +build !linux appengine /* * * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package syscall import ( "net" "sync" "time" "google.golang.org/grpc/grpclog" ) var once sync.Once func log() { once.Do(func() { grpclog.Info("CPU time info is unavailable on non-linux or appengine environment.") }) } // GetCPUTime returns the how much CPU time has passed since the start of this process. // It always returns 0 under non-linux or appengine environment. func GetCPUTime() int64 { log() return 0 } // Rusage is an empty struct under non-linux or appengine environment. type Rusage struct{} // GetRusage is a no-op function under non-linux or appengine environment. func GetRusage() (rusage *Rusage) { log() return nil } // CPUTimeDiff returns the differences of user CPU time and system CPU time used // between two Rusage structs. It a no-op function for non-linux or appengine environment. func CPUTimeDiff(first *Rusage, latest *Rusage) (float64, float64) { log() return 0, 0 } // SetTCPUserTimeout is a no-op function under non-linux or appengine environments func SetTCPUserTimeout(conn net.Conn, timeout time.Duration) error { log() return nil } // GetTCPUserTimeout is a no-op function under non-linux or appengine environments // a negative return value indicates the operation is not supported func GetTCPUserTimeout(conn net.Conn) (int, error) { log() return -1, nil } grpc-go-1.29.1/internal/testutils/000077500000000000000000000000001365033716300170255ustar00rootroot00000000000000grpc-go-1.29.1/internal/testutils/pipe_listener.go000066400000000000000000000042431365033716300222210ustar00rootroot00000000000000/* * * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Package testutils contains testing helpers. package testutils import ( "errors" "net" "time" ) var errClosed = errors.New("closed") type pipeAddr struct{} func (p pipeAddr) Network() string { return "pipe" } func (p pipeAddr) String() string { return "pipe" } // PipeListener is a listener with an unbuffered pipe. Each write will complete only once the other side reads. It // should only be created using NewPipeListener. type PipeListener struct { c chan chan<- net.Conn done chan struct{} } // NewPipeListener creates a new pipe listener. func NewPipeListener() *PipeListener { return &PipeListener{ c: make(chan chan<- net.Conn), done: make(chan struct{}), } } // Accept accepts a connection. func (p *PipeListener) Accept() (net.Conn, error) { var connChan chan<- net.Conn select { case <-p.done: return nil, errClosed case connChan = <-p.c: select { case <-p.done: close(connChan) return nil, errClosed default: } } c1, c2 := net.Pipe() connChan <- c1 close(connChan) return c2, nil } // Close closes the listener. func (p *PipeListener) Close() error { close(p.done) return nil } // Addr returns a pipe addr. func (p *PipeListener) Addr() net.Addr { return pipeAddr{} } // Dialer dials a connection. func (p *PipeListener) Dialer() func(string, time.Duration) (net.Conn, error) { return func(string, time.Duration) (net.Conn, error) { connChan := make(chan net.Conn) select { case p.c <- connChan: case <-p.done: return nil, errClosed } conn, ok := <-connChan if !ok { return nil, errClosed } return conn, nil } } grpc-go-1.29.1/internal/testutils/pipe_listener_test.go000066400000000000000000000101161365033716300232540ustar00rootroot00000000000000/* * * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package testutils_test import ( "testing" "time" "google.golang.org/grpc/internal/grpctest" "google.golang.org/grpc/internal/testutils" ) type s struct { grpctest.Tester } func Test(t *testing.T) { grpctest.RunSubTests(t, s{}) } func (s) TestPipeListener(t *testing.T) { pl := testutils.NewPipeListener() recvdBytes := make(chan []byte, 1) const want = "hello world" go func() { c, err := pl.Accept() if err != nil { t.Error(err) } read := make([]byte, len(want)) _, err = c.Read(read) if err != nil { t.Error(err) } recvdBytes <- read }() dl := pl.Dialer() conn, err := dl("", time.Duration(0)) if err != nil { t.Fatal(err) } _, err = conn.Write([]byte(want)) if err != nil { t.Fatal(err) } select { case gotBytes := <-recvdBytes: got := string(gotBytes) if got != want { t.Fatalf("expected to get %s, got %s", got, want) } case <-time.After(100 * time.Millisecond): t.Fatal("timed out waiting for server to receive bytes") } } func (s) TestUnblocking(t *testing.T) { for _, test := range []struct { desc string blockFuncShouldError bool blockFunc func(*testutils.PipeListener, chan struct{}) error unblockFunc func(*testutils.PipeListener) error }{ { desc: "Accept unblocks Dial", blockFunc: func(pl *testutils.PipeListener, done chan struct{}) error { dl := pl.Dialer() _, err := dl("", time.Duration(0)) close(done) return err }, unblockFunc: func(pl *testutils.PipeListener) error { _, err := pl.Accept() return err }, }, { desc: "Close unblocks Dial", blockFuncShouldError: true, // because pl.Close will be called blockFunc: func(pl *testutils.PipeListener, done chan struct{}) error { dl := pl.Dialer() _, err := dl("", time.Duration(0)) close(done) return err }, unblockFunc: func(pl *testutils.PipeListener) error { return pl.Close() }, }, { desc: "Dial unblocks Accept", blockFunc: func(pl *testutils.PipeListener, done chan struct{}) error { _, err := pl.Accept() close(done) return err }, unblockFunc: func(pl *testutils.PipeListener) error { dl := pl.Dialer() _, err := dl("", time.Duration(0)) return err }, }, { desc: "Close unblocks Accept", blockFuncShouldError: true, // because pl.Close will be called blockFunc: func(pl *testutils.PipeListener, done chan struct{}) error { _, err := pl.Accept() close(done) return err }, unblockFunc: func(pl *testutils.PipeListener) error { return pl.Close() }, }, } { t.Log(test.desc) testUnblocking(t, test.blockFunc, test.unblockFunc, test.blockFuncShouldError) } } func testUnblocking(t *testing.T, blockFunc func(*testutils.PipeListener, chan struct{}) error, unblockFunc func(*testutils.PipeListener) error, blockFuncShouldError bool) { pl := testutils.NewPipeListener() dialFinished := make(chan struct{}) go func() { err := blockFunc(pl, dialFinished) if blockFuncShouldError && err == nil { t.Error("expected blocking func to return error because pl.Close was called, but got nil") } if !blockFuncShouldError && err != nil { t.Error(err) } }() select { case <-dialFinished: t.Fatal("expected Dial to block until pl.Close or pl.Accept") default: } if err := unblockFunc(pl); err != nil { t.Fatal(err) } select { case <-dialFinished: case <-time.After(100 * time.Millisecond): t.Fatal("expected Accept to unblock after pl.Accept was called") } } grpc-go-1.29.1/internal/testutils/status_equal.go000066400000000000000000000020541365033716300220670ustar00rootroot00000000000000/* * * Copyright 2019 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package testutils import ( "github.com/golang/protobuf/proto" "google.golang.org/grpc/status" ) // StatusErrEqual returns true iff both err1 and err2 wrap status.Status errors // and their underlying status protos are equal. func StatusErrEqual(err1, err2 error) bool { status1, ok := status.FromError(err1) if !ok { return false } status2, ok := status.FromError(err2) if !ok { return false } return proto.Equal(status1.Proto(), status2.Proto()) } grpc-go-1.29.1/internal/testutils/status_equal_test.go000066400000000000000000000034101365033716300231230ustar00rootroot00000000000000/* * * Copyright 2019 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package testutils import ( "testing" anypb "github.com/golang/protobuf/ptypes/any" spb "google.golang.org/genproto/googleapis/rpc/status" "google.golang.org/grpc/codes" "google.golang.org/grpc/internal/grpctest" "google.golang.org/grpc/status" ) type s struct { grpctest.Tester } func Test(t *testing.T) { grpctest.RunSubTests(t, s{}) } var statusErr = status.ErrorProto(&spb.Status{ Code: int32(codes.DataLoss), Message: "error for testing", Details: []*anypb.Any{{ TypeUrl: "url", Value: []byte{6, 0, 0, 6, 1, 3}, }}, }) func (s) TestStatusErrEqual(t *testing.T) { tests := []struct { name string err1 error err2 error wantEqual bool }{ {"nil errors", nil, nil, true}, {"equal OK status", status.New(codes.OK, "").Err(), status.New(codes.OK, "").Err(), true}, {"equal status errors", statusErr, statusErr, true}, {"different status errors", statusErr, status.New(codes.OK, "").Err(), false}, } for _, test := range tests { if gotEqual := StatusErrEqual(test.err1, test.err2); gotEqual != test.wantEqual { t.Errorf("%v: StatusErrEqual(%v, %v) = %v, want %v", test.name, test.err1, test.err2, gotEqual, test.wantEqual) } } } grpc-go-1.29.1/internal/transport/000077500000000000000000000000001365033716300170215ustar00rootroot00000000000000grpc-go-1.29.1/internal/transport/bdp_estimator.go000066400000000000000000000104731365033716300222110ustar00rootroot00000000000000/* * * Copyright 2017 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package transport import ( "sync" "time" ) const ( // bdpLimit is the maximum value the flow control windows will be increased // to. TCP typically limits this to 4MB, but some systems go up to 16MB. // Since this is only a limit, it is safe to make it optimistic. bdpLimit = (1 << 20) * 16 // alpha is a constant factor used to keep a moving average // of RTTs. alpha = 0.9 // If the current bdp sample is greater than or equal to // our beta * our estimated bdp and the current bandwidth // sample is the maximum bandwidth observed so far, we // increase our bbp estimate by a factor of gamma. beta = 0.66 // To put our bdp to be smaller than or equal to twice the real BDP, // we should multiply our current sample with 4/3, however to round things out // we use 2 as the multiplication factor. gamma = 2 ) // Adding arbitrary data to ping so that its ack can be identified. // Easter-egg: what does the ping message say? var bdpPing = &ping{data: [8]byte{2, 4, 16, 16, 9, 14, 7, 7}} type bdpEstimator struct { // sentAt is the time when the ping was sent. sentAt time.Time mu sync.Mutex // bdp is the current bdp estimate. bdp uint32 // sample is the number of bytes received in one measurement cycle. sample uint32 // bwMax is the maximum bandwidth noted so far (bytes/sec). bwMax float64 // bool to keep track of the beginning of a new measurement cycle. isSent bool // Callback to update the window sizes. updateFlowControl func(n uint32) // sampleCount is the number of samples taken so far. sampleCount uint64 // round trip time (seconds) rtt float64 } // timesnap registers the time bdp ping was sent out so that // network rtt can be calculated when its ack is received. // It is called (by controller) when the bdpPing is // being written on the wire. func (b *bdpEstimator) timesnap(d [8]byte) { if bdpPing.data != d { return } b.sentAt = time.Now() } // add adds bytes to the current sample for calculating bdp. // It returns true only if a ping must be sent. This can be used // by the caller (handleData) to make decision about batching // a window update with it. func (b *bdpEstimator) add(n uint32) bool { b.mu.Lock() defer b.mu.Unlock() if b.bdp == bdpLimit { return false } if !b.isSent { b.isSent = true b.sample = n b.sentAt = time.Time{} b.sampleCount++ return true } b.sample += n return false } // calculate is called when an ack for a bdp ping is received. // Here we calculate the current bdp and bandwidth sample and // decide if the flow control windows should go up. func (b *bdpEstimator) calculate(d [8]byte) { // Check if the ping acked for was the bdp ping. if bdpPing.data != d { return } b.mu.Lock() rttSample := time.Since(b.sentAt).Seconds() if b.sampleCount < 10 { // Bootstrap rtt with an average of first 10 rtt samples. b.rtt += (rttSample - b.rtt) / float64(b.sampleCount) } else { // Heed to the recent past more. b.rtt += (rttSample - b.rtt) * float64(alpha) } b.isSent = false // The number of bytes accumulated so far in the sample is smaller // than or equal to 1.5 times the real BDP on a saturated connection. bwCurrent := float64(b.sample) / (b.rtt * float64(1.5)) if bwCurrent > b.bwMax { b.bwMax = bwCurrent } // If the current sample (which is smaller than or equal to the 1.5 times the real BDP) is // greater than or equal to 2/3rd our perceived bdp AND this is the maximum bandwidth seen so far, we // should update our perception of the network BDP. if float64(b.sample) >= beta*float64(b.bdp) && bwCurrent == b.bwMax && b.bdp != bdpLimit { sampleFloat := float64(b.sample) b.bdp = uint32(gamma * sampleFloat) if b.bdp > bdpLimit { b.bdp = bdpLimit } bdp := b.bdp b.mu.Unlock() b.updateFlowControl(bdp) return } b.mu.Unlock() } grpc-go-1.29.1/internal/transport/controlbuf.go000066400000000000000000000573601365033716300215400ustar00rootroot00000000000000/* * * Copyright 2014 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package transport import ( "bytes" "fmt" "runtime" "sync" "sync/atomic" "golang.org/x/net/http2" "golang.org/x/net/http2/hpack" ) var updateHeaderTblSize = func(e *hpack.Encoder, v uint32) { e.SetMaxDynamicTableSizeLimit(v) } type itemNode struct { it interface{} next *itemNode } type itemList struct { head *itemNode tail *itemNode } func (il *itemList) enqueue(i interface{}) { n := &itemNode{it: i} if il.tail == nil { il.head, il.tail = n, n return } il.tail.next = n il.tail = n } // peek returns the first item in the list without removing it from the // list. func (il *itemList) peek() interface{} { return il.head.it } func (il *itemList) dequeue() interface{} { if il.head == nil { return nil } i := il.head.it il.head = il.head.next if il.head == nil { il.tail = nil } return i } func (il *itemList) dequeueAll() *itemNode { h := il.head il.head, il.tail = nil, nil return h } func (il *itemList) isEmpty() bool { return il.head == nil } // The following defines various control items which could flow through // the control buffer of transport. They represent different aspects of // control tasks, e.g., flow control, settings, streaming resetting, etc. // maxQueuedTransportResponseFrames is the most queued "transport response" // frames we will buffer before preventing new reads from occurring on the // transport. These are control frames sent in response to client requests, // such as RST_STREAM due to bad headers or settings acks. const maxQueuedTransportResponseFrames = 50 type cbItem interface { isTransportResponseFrame() bool } // registerStream is used to register an incoming stream with loopy writer. type registerStream struct { streamID uint32 wq *writeQuota } func (*registerStream) isTransportResponseFrame() bool { return false } // headerFrame is also used to register stream on the client-side. type headerFrame struct { streamID uint32 hf []hpack.HeaderField endStream bool // Valid on server side. initStream func(uint32) error // Used only on the client side. onWrite func() wq *writeQuota // write quota for the stream created. cleanup *cleanupStream // Valid on the server side. onOrphaned func(error) // Valid on client-side } func (h *headerFrame) isTransportResponseFrame() bool { return h.cleanup != nil && h.cleanup.rst // Results in a RST_STREAM } type cleanupStream struct { streamID uint32 rst bool rstCode http2.ErrCode onWrite func() } func (c *cleanupStream) isTransportResponseFrame() bool { return c.rst } // Results in a RST_STREAM type dataFrame struct { streamID uint32 endStream bool h []byte d []byte // onEachWrite is called every time // a part of d is written out. onEachWrite func() } func (*dataFrame) isTransportResponseFrame() bool { return false } type incomingWindowUpdate struct { streamID uint32 increment uint32 } func (*incomingWindowUpdate) isTransportResponseFrame() bool { return false } type outgoingWindowUpdate struct { streamID uint32 increment uint32 } func (*outgoingWindowUpdate) isTransportResponseFrame() bool { return false // window updates are throttled by thresholds } type incomingSettings struct { ss []http2.Setting } func (*incomingSettings) isTransportResponseFrame() bool { return true } // Results in a settings ACK type outgoingSettings struct { ss []http2.Setting } func (*outgoingSettings) isTransportResponseFrame() bool { return false } type incomingGoAway struct { } func (*incomingGoAway) isTransportResponseFrame() bool { return false } type goAway struct { code http2.ErrCode debugData []byte headsUp bool closeConn bool } func (*goAway) isTransportResponseFrame() bool { return false } type ping struct { ack bool data [8]byte } func (*ping) isTransportResponseFrame() bool { return true } type outFlowControlSizeRequest struct { resp chan uint32 } func (*outFlowControlSizeRequest) isTransportResponseFrame() bool { return false } type outStreamState int const ( active outStreamState = iota empty waitingOnStreamQuota ) type outStream struct { id uint32 state outStreamState itl *itemList bytesOutStanding int wq *writeQuota next *outStream prev *outStream } func (s *outStream) deleteSelf() { if s.prev != nil { s.prev.next = s.next } if s.next != nil { s.next.prev = s.prev } s.next, s.prev = nil, nil } type outStreamList struct { // Following are sentinel objects that mark the // beginning and end of the list. They do not // contain any item lists. All valid objects are // inserted in between them. // This is needed so that an outStream object can // deleteSelf() in O(1) time without knowing which // list it belongs to. head *outStream tail *outStream } func newOutStreamList() *outStreamList { head, tail := new(outStream), new(outStream) head.next = tail tail.prev = head return &outStreamList{ head: head, tail: tail, } } func (l *outStreamList) enqueue(s *outStream) { e := l.tail.prev e.next = s s.prev = e s.next = l.tail l.tail.prev = s } // remove from the beginning of the list. func (l *outStreamList) dequeue() *outStream { b := l.head.next if b == l.tail { return nil } b.deleteSelf() return b } // controlBuffer is a way to pass information to loopy. // Information is passed as specific struct types called control frames. // A control frame not only represents data, messages or headers to be sent out // but can also be used to instruct loopy to update its internal state. // It shouldn't be confused with an HTTP2 frame, although some of the control frames // like dataFrame and headerFrame do go out on wire as HTTP2 frames. type controlBuffer struct { ch chan struct{} done <-chan struct{} mu sync.Mutex consumerWaiting bool list *itemList err error // transportResponseFrames counts the number of queued items that represent // the response of an action initiated by the peer. trfChan is created // when transportResponseFrames >= maxQueuedTransportResponseFrames and is // closed and nilled when transportResponseFrames drops below the // threshold. Both fields are protected by mu. transportResponseFrames int trfChan atomic.Value // *chan struct{} } func newControlBuffer(done <-chan struct{}) *controlBuffer { return &controlBuffer{ ch: make(chan struct{}, 1), list: &itemList{}, done: done, } } // throttle blocks if there are too many incomingSettings/cleanupStreams in the // controlbuf. func (c *controlBuffer) throttle() { ch, _ := c.trfChan.Load().(*chan struct{}) if ch != nil { select { case <-*ch: case <-c.done: } } } func (c *controlBuffer) put(it cbItem) error { _, err := c.executeAndPut(nil, it) return err } func (c *controlBuffer) executeAndPut(f func(it interface{}) bool, it cbItem) (bool, error) { var wakeUp bool c.mu.Lock() if c.err != nil { c.mu.Unlock() return false, c.err } if f != nil { if !f(it) { // f wasn't successful c.mu.Unlock() return false, nil } } if c.consumerWaiting { wakeUp = true c.consumerWaiting = false } c.list.enqueue(it) if it.isTransportResponseFrame() { c.transportResponseFrames++ if c.transportResponseFrames == maxQueuedTransportResponseFrames { // We are adding the frame that puts us over the threshold; create // a throttling channel. ch := make(chan struct{}) c.trfChan.Store(&ch) } } c.mu.Unlock() if wakeUp { select { case c.ch <- struct{}{}: default: } } return true, nil } // Note argument f should never be nil. func (c *controlBuffer) execute(f func(it interface{}) bool, it interface{}) (bool, error) { c.mu.Lock() if c.err != nil { c.mu.Unlock() return false, c.err } if !f(it) { // f wasn't successful c.mu.Unlock() return false, nil } c.mu.Unlock() return true, nil } func (c *controlBuffer) get(block bool) (interface{}, error) { for { c.mu.Lock() if c.err != nil { c.mu.Unlock() return nil, c.err } if !c.list.isEmpty() { h := c.list.dequeue().(cbItem) if h.isTransportResponseFrame() { if c.transportResponseFrames == maxQueuedTransportResponseFrames { // We are removing the frame that put us over the // threshold; close and clear the throttling channel. ch := c.trfChan.Load().(*chan struct{}) close(*ch) c.trfChan.Store((*chan struct{})(nil)) } c.transportResponseFrames-- } c.mu.Unlock() return h, nil } if !block { c.mu.Unlock() return nil, nil } c.consumerWaiting = true c.mu.Unlock() select { case <-c.ch: case <-c.done: c.finish() return nil, ErrConnClosing } } } func (c *controlBuffer) finish() { c.mu.Lock() if c.err != nil { c.mu.Unlock() return } c.err = ErrConnClosing // There may be headers for streams in the control buffer. // These streams need to be cleaned out since the transport // is still not aware of these yet. for head := c.list.dequeueAll(); head != nil; head = head.next { hdr, ok := head.it.(*headerFrame) if !ok { continue } if hdr.onOrphaned != nil { // It will be nil on the server-side. hdr.onOrphaned(ErrConnClosing) } } c.mu.Unlock() } type side int const ( clientSide side = iota serverSide ) // Loopy receives frames from the control buffer. // Each frame is handled individually; most of the work done by loopy goes // into handling data frames. Loopy maintains a queue of active streams, and each // stream maintains a queue of data frames; as loopy receives data frames // it gets added to the queue of the relevant stream. // Loopy goes over this list of active streams by processing one node every iteration, // thereby closely resemebling to a round-robin scheduling over all streams. While // processing a stream, loopy writes out data bytes from this stream capped by the min // of http2MaxFrameLen, connection-level flow control and stream-level flow control. type loopyWriter struct { side side cbuf *controlBuffer sendQuota uint32 oiws uint32 // outbound initial window size. // estdStreams is map of all established streams that are not cleaned-up yet. // On client-side, this is all streams whose headers were sent out. // On server-side, this is all streams whose headers were received. estdStreams map[uint32]*outStream // Established streams. // activeStreams is a linked-list of all streams that have data to send and some // stream-level flow control quota. // Each of these streams internally have a list of data items(and perhaps trailers // on the server-side) to be sent out. activeStreams *outStreamList framer *framer hBuf *bytes.Buffer // The buffer for HPACK encoding. hEnc *hpack.Encoder // HPACK encoder. bdpEst *bdpEstimator draining bool // Side-specific handlers ssGoAwayHandler func(*goAway) (bool, error) } func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator) *loopyWriter { var buf bytes.Buffer l := &loopyWriter{ side: s, cbuf: cbuf, sendQuota: defaultWindowSize, oiws: defaultWindowSize, estdStreams: make(map[uint32]*outStream), activeStreams: newOutStreamList(), framer: fr, hBuf: &buf, hEnc: hpack.NewEncoder(&buf), bdpEst: bdpEst, } return l } const minBatchSize = 1000 // run should be run in a separate goroutine. // It reads control frames from controlBuf and processes them by: // 1. Updating loopy's internal state, or/and // 2. Writing out HTTP2 frames on the wire. // // Loopy keeps all active streams with data to send in a linked-list. // All streams in the activeStreams linked-list must have both: // 1. Data to send, and // 2. Stream level flow control quota available. // // In each iteration of run loop, other than processing the incoming control // frame, loopy calls processData, which processes one node from the activeStreams linked-list. // This results in writing of HTTP2 frames into an underlying write buffer. // When there's no more control frames to read from controlBuf, loopy flushes the write buffer. // As an optimization, to increase the batch size for each flush, loopy yields the processor, once // if the batch size is too low to give stream goroutines a chance to fill it up. func (l *loopyWriter) run() (err error) { defer func() { if err == ErrConnClosing { // Don't log ErrConnClosing as error since it happens // 1. When the connection is closed by some other known issue. // 2. User closed the connection. // 3. A graceful close of connection. infof("transport: loopyWriter.run returning. %v", err) err = nil } }() for { it, err := l.cbuf.get(true) if err != nil { return err } if err = l.handle(it); err != nil { return err } if _, err = l.processData(); err != nil { return err } gosched := true hasdata: for { it, err := l.cbuf.get(false) if err != nil { return err } if it != nil { if err = l.handle(it); err != nil { return err } if _, err = l.processData(); err != nil { return err } continue hasdata } isEmpty, err := l.processData() if err != nil { return err } if !isEmpty { continue hasdata } if gosched { gosched = false if l.framer.writer.offset < minBatchSize { runtime.Gosched() continue hasdata } } l.framer.writer.Flush() break hasdata } } } func (l *loopyWriter) outgoingWindowUpdateHandler(w *outgoingWindowUpdate) error { return l.framer.fr.WriteWindowUpdate(w.streamID, w.increment) } func (l *loopyWriter) incomingWindowUpdateHandler(w *incomingWindowUpdate) error { // Otherwise update the quota. if w.streamID == 0 { l.sendQuota += w.increment return nil } // Find the stream and update it. if str, ok := l.estdStreams[w.streamID]; ok { str.bytesOutStanding -= int(w.increment) if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota > 0 && str.state == waitingOnStreamQuota { str.state = active l.activeStreams.enqueue(str) return nil } } return nil } func (l *loopyWriter) outgoingSettingsHandler(s *outgoingSettings) error { return l.framer.fr.WriteSettings(s.ss...) } func (l *loopyWriter) incomingSettingsHandler(s *incomingSettings) error { if err := l.applySettings(s.ss); err != nil { return err } return l.framer.fr.WriteSettingsAck() } func (l *loopyWriter) registerStreamHandler(h *registerStream) error { str := &outStream{ id: h.streamID, state: empty, itl: &itemList{}, wq: h.wq, } l.estdStreams[h.streamID] = str return nil } func (l *loopyWriter) headerHandler(h *headerFrame) error { if l.side == serverSide { str, ok := l.estdStreams[h.streamID] if !ok { warningf("transport: loopy doesn't recognize the stream: %d", h.streamID) return nil } // Case 1.A: Server is responding back with headers. if !h.endStream { return l.writeHeader(h.streamID, h.endStream, h.hf, h.onWrite) } // else: Case 1.B: Server wants to close stream. if str.state != empty { // either active or waiting on stream quota. // add it str's list of items. str.itl.enqueue(h) return nil } if err := l.writeHeader(h.streamID, h.endStream, h.hf, h.onWrite); err != nil { return err } return l.cleanupStreamHandler(h.cleanup) } // Case 2: Client wants to originate stream. str := &outStream{ id: h.streamID, state: empty, itl: &itemList{}, wq: h.wq, } str.itl.enqueue(h) return l.originateStream(str) } func (l *loopyWriter) originateStream(str *outStream) error { hdr := str.itl.dequeue().(*headerFrame) if err := hdr.initStream(str.id); err != nil { if err == ErrConnClosing { return err } // Other errors(errStreamDrain) need not close transport. return nil } if err := l.writeHeader(str.id, hdr.endStream, hdr.hf, hdr.onWrite); err != nil { return err } l.estdStreams[str.id] = str return nil } func (l *loopyWriter) writeHeader(streamID uint32, endStream bool, hf []hpack.HeaderField, onWrite func()) error { if onWrite != nil { onWrite() } l.hBuf.Reset() for _, f := range hf { if err := l.hEnc.WriteField(f); err != nil { warningf("transport: loopyWriter.writeHeader encountered error while encoding headers:", err) } } var ( err error endHeaders, first bool ) first = true for !endHeaders { size := l.hBuf.Len() if size > http2MaxFrameLen { size = http2MaxFrameLen } else { endHeaders = true } if first { first = false err = l.framer.fr.WriteHeaders(http2.HeadersFrameParam{ StreamID: streamID, BlockFragment: l.hBuf.Next(size), EndStream: endStream, EndHeaders: endHeaders, }) } else { err = l.framer.fr.WriteContinuation( streamID, endHeaders, l.hBuf.Next(size), ) } if err != nil { return err } } return nil } func (l *loopyWriter) preprocessData(df *dataFrame) error { str, ok := l.estdStreams[df.streamID] if !ok { return nil } // If we got data for a stream it means that // stream was originated and the headers were sent out. str.itl.enqueue(df) if str.state == empty { str.state = active l.activeStreams.enqueue(str) } return nil } func (l *loopyWriter) pingHandler(p *ping) error { if !p.ack { l.bdpEst.timesnap(p.data) } return l.framer.fr.WritePing(p.ack, p.data) } func (l *loopyWriter) outFlowControlSizeRequestHandler(o *outFlowControlSizeRequest) error { o.resp <- l.sendQuota return nil } func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error { c.onWrite() if str, ok := l.estdStreams[c.streamID]; ok { // On the server side it could be a trailers-only response or // a RST_STREAM before stream initialization thus the stream might // not be established yet. delete(l.estdStreams, c.streamID) str.deleteSelf() } if c.rst { // If RST_STREAM needs to be sent. if err := l.framer.fr.WriteRSTStream(c.streamID, c.rstCode); err != nil { return err } } if l.side == clientSide && l.draining && len(l.estdStreams) == 0 { return ErrConnClosing } return nil } func (l *loopyWriter) incomingGoAwayHandler(*incomingGoAway) error { if l.side == clientSide { l.draining = true if len(l.estdStreams) == 0 { return ErrConnClosing } } return nil } func (l *loopyWriter) goAwayHandler(g *goAway) error { // Handling of outgoing GoAway is very specific to side. if l.ssGoAwayHandler != nil { draining, err := l.ssGoAwayHandler(g) if err != nil { return err } l.draining = draining } return nil } func (l *loopyWriter) handle(i interface{}) error { switch i := i.(type) { case *incomingWindowUpdate: return l.incomingWindowUpdateHandler(i) case *outgoingWindowUpdate: return l.outgoingWindowUpdateHandler(i) case *incomingSettings: return l.incomingSettingsHandler(i) case *outgoingSettings: return l.outgoingSettingsHandler(i) case *headerFrame: return l.headerHandler(i) case *registerStream: return l.registerStreamHandler(i) case *cleanupStream: return l.cleanupStreamHandler(i) case *incomingGoAway: return l.incomingGoAwayHandler(i) case *dataFrame: return l.preprocessData(i) case *ping: return l.pingHandler(i) case *goAway: return l.goAwayHandler(i) case *outFlowControlSizeRequest: return l.outFlowControlSizeRequestHandler(i) default: return fmt.Errorf("transport: unknown control message type %T", i) } } func (l *loopyWriter) applySettings(ss []http2.Setting) error { for _, s := range ss { switch s.ID { case http2.SettingInitialWindowSize: o := l.oiws l.oiws = s.Val if o < l.oiws { // If the new limit is greater make all depleted streams active. for _, stream := range l.estdStreams { if stream.state == waitingOnStreamQuota { stream.state = active l.activeStreams.enqueue(stream) } } } case http2.SettingHeaderTableSize: updateHeaderTblSize(l.hEnc, s.Val) } } return nil } // processData removes the first stream from active streams, writes out at most 16KB // of its data and then puts it at the end of activeStreams if there's still more data // to be sent and stream has some stream-level flow control. func (l *loopyWriter) processData() (bool, error) { if l.sendQuota == 0 { return true, nil } str := l.activeStreams.dequeue() // Remove the first stream. if str == nil { return true, nil } dataItem := str.itl.peek().(*dataFrame) // Peek at the first data item this stream. // A data item is represented by a dataFrame, since it later translates into // multiple HTTP2 data frames. // Every dataFrame has two buffers; h that keeps grpc-message header and d that is acutal data. // As an optimization to keep wire traffic low, data from d is copied to h to make as big as the // maximum possilbe HTTP2 frame size. if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // Empty data frame // Client sends out empty data frame with endStream = true if err := l.framer.fr.WriteData(dataItem.streamID, dataItem.endStream, nil); err != nil { return false, err } str.itl.dequeue() // remove the empty data item from stream if str.itl.isEmpty() { str.state = empty } else if trailer, ok := str.itl.peek().(*headerFrame); ok { // the next item is trailers. if err := l.writeHeader(trailer.streamID, trailer.endStream, trailer.hf, trailer.onWrite); err != nil { return false, err } if err := l.cleanupStreamHandler(trailer.cleanup); err != nil { return false, nil } } else { l.activeStreams.enqueue(str) } return false, nil } var ( idx int buf []byte ) if len(dataItem.h) != 0 { // data header has not been written out yet. buf = dataItem.h } else { idx = 1 buf = dataItem.d } size := http2MaxFrameLen if len(buf) < size { size = len(buf) } if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota <= 0 { // stream-level flow control. str.state = waitingOnStreamQuota return false, nil } else if strQuota < size { size = strQuota } if l.sendQuota < uint32(size) { // connection-level flow control. size = int(l.sendQuota) } // Now that outgoing flow controls are checked we can replenish str's write quota str.wq.replenish(size) var endStream bool // If this is the last data message on this stream and all of it can be written in this iteration. if dataItem.endStream && size == len(buf) { // buf contains either data or it contains header but data is empty. if idx == 1 || len(dataItem.d) == 0 { endStream = true } } if dataItem.onEachWrite != nil { dataItem.onEachWrite() } if err := l.framer.fr.WriteData(dataItem.streamID, endStream, buf[:size]); err != nil { return false, err } buf = buf[size:] str.bytesOutStanding += size l.sendQuota -= uint32(size) if idx == 0 { dataItem.h = buf } else { dataItem.d = buf } if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // All the data from that message was written out. str.itl.dequeue() } if str.itl.isEmpty() { str.state = empty } else if trailer, ok := str.itl.peek().(*headerFrame); ok { // The next item is trailers. if err := l.writeHeader(trailer.streamID, trailer.endStream, trailer.hf, trailer.onWrite); err != nil { return false, err } if err := l.cleanupStreamHandler(trailer.cleanup); err != nil { return false, err } } else if int(l.oiws)-str.bytesOutStanding <= 0 { // Ran out of stream quota. str.state = waitingOnStreamQuota } else { // Otherwise add it back to the list of active streams. l.activeStreams.enqueue(str) } return false, nil } grpc-go-1.29.1/internal/transport/defaults.go000066400000000000000000000032301365033716300211550ustar00rootroot00000000000000/* * * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package transport import ( "math" "time" ) const ( // The default value of flow control window size in HTTP2 spec. defaultWindowSize = 65535 // The initial window size for flow control. initialWindowSize = defaultWindowSize // for an RPC infinity = time.Duration(math.MaxInt64) defaultClientKeepaliveTime = infinity defaultClientKeepaliveTimeout = 20 * time.Second defaultMaxStreamsClient = 100 defaultMaxConnectionIdle = infinity defaultMaxConnectionAge = infinity defaultMaxConnectionAgeGrace = infinity defaultServerKeepaliveTime = 2 * time.Hour defaultServerKeepaliveTimeout = 20 * time.Second defaultKeepalivePolicyMinTime = 5 * time.Minute // max window limit set by HTTP2 Specs. maxWindowSize = math.MaxInt32 // defaultWriteQuota is the default value for number of data // bytes that each stream can schedule before some of it being // flushed out. defaultWriteQuota = 64 * 1024 defaultClientMaxHeaderListSize = uint32(16 << 20) defaultServerMaxHeaderListSize = uint32(16 << 20) ) grpc-go-1.29.1/internal/transport/flowcontrol.go000066400000000000000000000131541365033716300217240ustar00rootroot00000000000000/* * * Copyright 2014 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package transport import ( "fmt" "math" "sync" "sync/atomic" ) // writeQuota is a soft limit on the amount of data a stream can // schedule before some of it is written out. type writeQuota struct { quota int32 // get waits on read from when quota goes less than or equal to zero. // replenish writes on it when quota goes positive again. ch chan struct{} // done is triggered in error case. done <-chan struct{} // replenish is called by loopyWriter to give quota back to. // It is implemented as a field so that it can be updated // by tests. replenish func(n int) } func newWriteQuota(sz int32, done <-chan struct{}) *writeQuota { w := &writeQuota{ quota: sz, ch: make(chan struct{}, 1), done: done, } w.replenish = w.realReplenish return w } func (w *writeQuota) get(sz int32) error { for { if atomic.LoadInt32(&w.quota) > 0 { atomic.AddInt32(&w.quota, -sz) return nil } select { case <-w.ch: continue case <-w.done: return errStreamDone } } } func (w *writeQuota) realReplenish(n int) { sz := int32(n) a := atomic.AddInt32(&w.quota, sz) b := a - sz if b <= 0 && a > 0 { select { case w.ch <- struct{}{}: default: } } } type trInFlow struct { limit uint32 unacked uint32 effectiveWindowSize uint32 } func (f *trInFlow) newLimit(n uint32) uint32 { d := n - f.limit f.limit = n f.updateEffectiveWindowSize() return d } func (f *trInFlow) onData(n uint32) uint32 { f.unacked += n if f.unacked >= f.limit/4 { w := f.unacked f.unacked = 0 f.updateEffectiveWindowSize() return w } f.updateEffectiveWindowSize() return 0 } func (f *trInFlow) reset() uint32 { w := f.unacked f.unacked = 0 f.updateEffectiveWindowSize() return w } func (f *trInFlow) updateEffectiveWindowSize() { atomic.StoreUint32(&f.effectiveWindowSize, f.limit-f.unacked) } func (f *trInFlow) getSize() uint32 { return atomic.LoadUint32(&f.effectiveWindowSize) } // TODO(mmukhi): Simplify this code. // inFlow deals with inbound flow control type inFlow struct { mu sync.Mutex // The inbound flow control limit for pending data. limit uint32 // pendingData is the overall data which have been received but not been // consumed by applications. pendingData uint32 // The amount of data the application has consumed but grpc has not sent // window update for them. Used to reduce window update frequency. pendingUpdate uint32 // delta is the extra window update given by receiver when an application // is reading data bigger in size than the inFlow limit. delta uint32 } // newLimit updates the inflow window to a new value n. // It assumes that n is always greater than the old limit. func (f *inFlow) newLimit(n uint32) uint32 { f.mu.Lock() d := n - f.limit f.limit = n f.mu.Unlock() return d } func (f *inFlow) maybeAdjust(n uint32) uint32 { if n > uint32(math.MaxInt32) { n = uint32(math.MaxInt32) } f.mu.Lock() defer f.mu.Unlock() // estSenderQuota is the receiver's view of the maximum number of bytes the sender // can send without a window update. estSenderQuota := int32(f.limit - (f.pendingData + f.pendingUpdate)) // estUntransmittedData is the maximum number of bytes the sends might not have put // on the wire yet. A value of 0 or less means that we have already received all or // more bytes than the application is requesting to read. estUntransmittedData := int32(n - f.pendingData) // Casting into int32 since it could be negative. // This implies that unless we send a window update, the sender won't be able to send all the bytes // for this message. Therefore we must send an update over the limit since there's an active read // request from the application. if estUntransmittedData > estSenderQuota { // Sender's window shouldn't go more than 2^31 - 1 as specified in the HTTP spec. if f.limit+n > maxWindowSize { f.delta = maxWindowSize - f.limit } else { // Send a window update for the whole message and not just the difference between // estUntransmittedData and estSenderQuota. This will be helpful in case the message // is padded; We will fallback on the current available window(at least a 1/4th of the limit). f.delta = n } return f.delta } return 0 } // onData is invoked when some data frame is received. It updates pendingData. func (f *inFlow) onData(n uint32) error { f.mu.Lock() f.pendingData += n if f.pendingData+f.pendingUpdate > f.limit+f.delta { limit := f.limit rcvd := f.pendingData + f.pendingUpdate f.mu.Unlock() return fmt.Errorf("received %d-bytes data exceeding the limit %d bytes", rcvd, limit) } f.mu.Unlock() return nil } // onRead is invoked when the application reads the data. It returns the window size // to be sent to the peer. func (f *inFlow) onRead(n uint32) uint32 { f.mu.Lock() if f.pendingData == 0 { f.mu.Unlock() return 0 } f.pendingData -= n if n > f.delta { n -= f.delta f.delta = 0 } else { f.delta -= n n = 0 } f.pendingUpdate += n if f.pendingUpdate >= f.limit/4 { wu := f.pendingUpdate f.pendingUpdate = 0 f.mu.Unlock() return wu } f.mu.Unlock() return 0 } grpc-go-1.29.1/internal/transport/handler_server.go000066400000000000000000000314071365033716300223600ustar00rootroot00000000000000/* * * Copyright 2016 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // This file is the implementation of a gRPC server using HTTP/2 which // uses the standard Go http2 Server implementation (via the // http.Handler interface), rather than speaking low-level HTTP/2 // frames itself. It is the implementation of *grpc.Server.ServeHTTP. package transport import ( "bytes" "context" "errors" "fmt" "io" "net" "net/http" "strings" "sync" "time" "github.com/golang/protobuf/proto" "golang.org/x/net/http2" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" "google.golang.org/grpc/stats" "google.golang.org/grpc/status" ) // NewServerHandlerTransport returns a ServerTransport handling gRPC // from inside an http.Handler. It requires that the http Server // supports HTTP/2. func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats stats.Handler) (ServerTransport, error) { if r.ProtoMajor != 2 { return nil, errors.New("gRPC requires HTTP/2") } if r.Method != "POST" { return nil, errors.New("invalid gRPC request method") } contentType := r.Header.Get("Content-Type") // TODO: do we assume contentType is lowercase? we did before contentSubtype, validContentType := contentSubtype(contentType) if !validContentType { return nil, errors.New("invalid gRPC request content-type") } if _, ok := w.(http.Flusher); !ok { return nil, errors.New("gRPC requires a ResponseWriter supporting http.Flusher") } st := &serverHandlerTransport{ rw: w, req: r, closedCh: make(chan struct{}), writes: make(chan func()), contentType: contentType, contentSubtype: contentSubtype, stats: stats, } if v := r.Header.Get("grpc-timeout"); v != "" { to, err := decodeTimeout(v) if err != nil { return nil, status.Errorf(codes.Internal, "malformed time-out: %v", err) } st.timeoutSet = true st.timeout = to } metakv := []string{"content-type", contentType} if r.Host != "" { metakv = append(metakv, ":authority", r.Host) } for k, vv := range r.Header { k = strings.ToLower(k) if isReservedHeader(k) && !isWhitelistedHeader(k) { continue } for _, v := range vv { v, err := decodeMetadataHeader(k, v) if err != nil { return nil, status.Errorf(codes.Internal, "malformed binary metadata: %v", err) } metakv = append(metakv, k, v) } } st.headerMD = metadata.Pairs(metakv...) return st, nil } // serverHandlerTransport is an implementation of ServerTransport // which replies to exactly one gRPC request (exactly one HTTP request), // using the net/http.Handler interface. This http.Handler is guaranteed // at this point to be speaking over HTTP/2, so it's able to speak valid // gRPC. type serverHandlerTransport struct { rw http.ResponseWriter req *http.Request timeoutSet bool timeout time.Duration headerMD metadata.MD closeOnce sync.Once closedCh chan struct{} // closed on Close // writes is a channel of code to run serialized in the // ServeHTTP (HandleStreams) goroutine. The channel is closed // when WriteStatus is called. writes chan func() // block concurrent WriteStatus calls // e.g. grpc/(*serverStream).SendMsg/RecvMsg writeStatusMu sync.Mutex // we just mirror the request content-type contentType string // we store both contentType and contentSubtype so we don't keep recreating them // TODO make sure this is consistent across handler_server and http2_server contentSubtype string stats stats.Handler } func (ht *serverHandlerTransport) Close() error { ht.closeOnce.Do(ht.closeCloseChanOnce) return nil } func (ht *serverHandlerTransport) closeCloseChanOnce() { close(ht.closedCh) } func (ht *serverHandlerTransport) RemoteAddr() net.Addr { return strAddr(ht.req.RemoteAddr) } // strAddr is a net.Addr backed by either a TCP "ip:port" string, or // the empty string if unknown. type strAddr string func (a strAddr) Network() string { if a != "" { // Per the documentation on net/http.Request.RemoteAddr, if this is // set, it's set to the IP:port of the peer (hence, TCP): // https://golang.org/pkg/net/http/#Request // // If we want to support Unix sockets later, we can // add our own grpc-specific convention within the // grpc codebase to set RemoteAddr to a different // format, or probably better: we can attach it to the // context and use that from serverHandlerTransport.RemoteAddr. return "tcp" } return "" } func (a strAddr) String() string { return string(a) } // do runs fn in the ServeHTTP goroutine. func (ht *serverHandlerTransport) do(fn func()) error { select { case <-ht.closedCh: return ErrConnClosing case ht.writes <- fn: return nil } } func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) error { ht.writeStatusMu.Lock() defer ht.writeStatusMu.Unlock() headersWritten := s.updateHeaderSent() err := ht.do(func() { if !headersWritten { ht.writePendingHeaders(s) } // And flush, in case no header or body has been sent yet. // This forces a separation of headers and trailers if this is the // first call (for example, in end2end tests's TestNoService). ht.rw.(http.Flusher).Flush() h := ht.rw.Header() h.Set("Grpc-Status", fmt.Sprintf("%d", st.Code())) if m := st.Message(); m != "" { h.Set("Grpc-Message", encodeGrpcMessage(m)) } if p := st.Proto(); p != nil && len(p.Details) > 0 { stBytes, err := proto.Marshal(p) if err != nil { // TODO: return error instead, when callers are able to handle it. panic(err) } h.Set("Grpc-Status-Details-Bin", encodeBinHeader(stBytes)) } if md := s.Trailer(); len(md) > 0 { for k, vv := range md { // Clients don't tolerate reading restricted headers after some non restricted ones were sent. if isReservedHeader(k) { continue } for _, v := range vv { // http2 ResponseWriter mechanism to send undeclared Trailers after // the headers have possibly been written. h.Add(http2.TrailerPrefix+k, encodeMetadataHeader(k, v)) } } } }) if err == nil { // transport has not been closed if ht.stats != nil { // Note: The trailer fields are compressed with hpack after this call returns. // No WireLength field is set here. ht.stats.HandleRPC(s.Context(), &stats.OutTrailer{ Trailer: s.trailer.Copy(), }) } } ht.Close() return err } // writePendingHeaders sets common and custom headers on the first // write call (Write, WriteHeader, or WriteStatus) func (ht *serverHandlerTransport) writePendingHeaders(s *Stream) { ht.writeCommonHeaders(s) ht.writeCustomHeaders(s) } // writeCommonHeaders sets common headers on the first write // call (Write, WriteHeader, or WriteStatus). func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) { h := ht.rw.Header() h["Date"] = nil // suppress Date to make tests happy; TODO: restore h.Set("Content-Type", ht.contentType) // Predeclare trailers we'll set later in WriteStatus (after the body). // This is a SHOULD in the HTTP RFC, and the way you add (known) // Trailers per the net/http.ResponseWriter contract. // See https://golang.org/pkg/net/http/#ResponseWriter // and https://golang.org/pkg/net/http/#example_ResponseWriter_trailers h.Add("Trailer", "Grpc-Status") h.Add("Trailer", "Grpc-Message") h.Add("Trailer", "Grpc-Status-Details-Bin") if s.sendCompress != "" { h.Set("Grpc-Encoding", s.sendCompress) } } // writeCustomHeaders sets custom headers set on the stream via SetHeader // on the first write call (Write, WriteHeader, or WriteStatus). func (ht *serverHandlerTransport) writeCustomHeaders(s *Stream) { h := ht.rw.Header() s.hdrMu.Lock() for k, vv := range s.header { if isReservedHeader(k) { continue } for _, v := range vv { h.Add(k, encodeMetadataHeader(k, v)) } } s.hdrMu.Unlock() } func (ht *serverHandlerTransport) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { headersWritten := s.updateHeaderSent() return ht.do(func() { if !headersWritten { ht.writePendingHeaders(s) } ht.rw.Write(hdr) ht.rw.Write(data) ht.rw.(http.Flusher).Flush() }) } func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error { if err := s.SetHeader(md); err != nil { return err } headersWritten := s.updateHeaderSent() err := ht.do(func() { if !headersWritten { ht.writePendingHeaders(s) } ht.rw.WriteHeader(200) ht.rw.(http.Flusher).Flush() }) if err == nil { if ht.stats != nil { // Note: The header fields are compressed with hpack after this call returns. // No WireLength field is set here. ht.stats.HandleRPC(s.Context(), &stats.OutHeader{ Header: md.Copy(), Compression: s.sendCompress, }) } } return err } func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), traceCtx func(context.Context, string) context.Context) { // With this transport type there will be exactly 1 stream: this HTTP request. ctx := ht.req.Context() var cancel context.CancelFunc if ht.timeoutSet { ctx, cancel = context.WithTimeout(ctx, ht.timeout) } else { ctx, cancel = context.WithCancel(ctx) } // requestOver is closed when the status has been written via WriteStatus. requestOver := make(chan struct{}) go func() { select { case <-requestOver: case <-ht.closedCh: case <-ht.req.Context().Done(): } cancel() ht.Close() }() req := ht.req s := &Stream{ id: 0, // irrelevant requestRead: func(int) {}, cancel: cancel, buf: newRecvBuffer(), st: ht, method: req.URL.Path, recvCompress: req.Header.Get("grpc-encoding"), contentSubtype: ht.contentSubtype, } pr := &peer.Peer{ Addr: ht.RemoteAddr(), } if req.TLS != nil { pr.AuthInfo = credentials.TLSInfo{State: *req.TLS, CommonAuthInfo: credentials.CommonAuthInfo{SecurityLevel: credentials.PrivacyAndIntegrity}} } ctx = metadata.NewIncomingContext(ctx, ht.headerMD) s.ctx = peer.NewContext(ctx, pr) if ht.stats != nil { s.ctx = ht.stats.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method}) inHeader := &stats.InHeader{ FullMethod: s.method, RemoteAddr: ht.RemoteAddr(), Compression: s.recvCompress, } ht.stats.HandleRPC(s.ctx, inHeader) } s.trReader = &transportReader{ reader: &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf, freeBuffer: func(*bytes.Buffer) {}}, windowHandler: func(int) {}, } // readerDone is closed when the Body.Read-ing goroutine exits. readerDone := make(chan struct{}) go func() { defer close(readerDone) // TODO: minimize garbage, optimize recvBuffer code/ownership const readSize = 8196 for buf := make([]byte, readSize); ; { n, err := req.Body.Read(buf) if n > 0 { s.buf.put(recvMsg{buffer: bytes.NewBuffer(buf[:n:n])}) buf = buf[n:] } if err != nil { s.buf.put(recvMsg{err: mapRecvMsgError(err)}) return } if len(buf) == 0 { buf = make([]byte, readSize) } } }() // startStream is provided by the *grpc.Server's serveStreams. // It starts a goroutine serving s and exits immediately. // The goroutine that is started is the one that then calls // into ht, calling WriteHeader, Write, WriteStatus, Close, etc. startStream(s) ht.runStream() close(requestOver) // Wait for reading goroutine to finish. req.Body.Close() <-readerDone } func (ht *serverHandlerTransport) runStream() { for { select { case fn := <-ht.writes: fn() case <-ht.closedCh: return } } } func (ht *serverHandlerTransport) IncrMsgSent() {} func (ht *serverHandlerTransport) IncrMsgRecv() {} func (ht *serverHandlerTransport) Drain() { panic("Drain() is not implemented") } // mapRecvMsgError returns the non-nil err into the appropriate // error value as expected by callers of *grpc.parser.recvMsg. // In particular, in can only be: // * io.EOF // * io.ErrUnexpectedEOF // * of type transport.ConnectionError // * an error from the status package func mapRecvMsgError(err error) error { if err == io.EOF || err == io.ErrUnexpectedEOF { return err } if se, ok := err.(http2.StreamError); ok { if code, ok := http2ErrConvTab[se.Code]; ok { return status.Error(code, se.Error()) } } if strings.Contains(err.Error(), "body closed by handler") { return status.Error(codes.Canceled, err.Error()) } return connectionErrorf(true, err, err.Error()) } grpc-go-1.29.1/internal/transport/handler_server_test.go000066400000000000000000000345011365033716300234150ustar00rootroot00000000000000/* * * Copyright 2016 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package transport import ( "context" "errors" "fmt" "io" "net/http" "net/http/httptest" "net/url" "reflect" "sync" "testing" "time" "github.com/golang/protobuf/proto" dpb "github.com/golang/protobuf/ptypes/duration" epb "google.golang.org/genproto/googleapis/rpc/errdetails" "google.golang.org/grpc/codes" "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" ) func (s) TestHandlerTransport_NewServerHandlerTransport(t *testing.T) { type testCase struct { name string req *http.Request wantErr string modrw func(http.ResponseWriter) http.ResponseWriter check func(*serverHandlerTransport, *testCase) error } tests := []testCase{ { name: "http/1.1", req: &http.Request{ ProtoMajor: 1, ProtoMinor: 1, }, wantErr: "gRPC requires HTTP/2", }, { name: "bad method", req: &http.Request{ ProtoMajor: 2, Method: "GET", Header: http.Header{}, RequestURI: "/", }, wantErr: "invalid gRPC request method", }, { name: "bad content type", req: &http.Request{ ProtoMajor: 2, Method: "POST", Header: http.Header{ "Content-Type": {"application/foo"}, }, RequestURI: "/service/foo.bar", }, wantErr: "invalid gRPC request content-type", }, { name: "not flusher", req: &http.Request{ ProtoMajor: 2, Method: "POST", Header: http.Header{ "Content-Type": {"application/grpc"}, }, RequestURI: "/service/foo.bar", }, modrw: func(w http.ResponseWriter) http.ResponseWriter { // Return w without its Flush method type onlyCloseNotifier interface { http.ResponseWriter http.CloseNotifier } return struct{ onlyCloseNotifier }{w.(onlyCloseNotifier)} }, wantErr: "gRPC requires a ResponseWriter supporting http.Flusher", }, { name: "valid", req: &http.Request{ ProtoMajor: 2, Method: "POST", Header: http.Header{ "Content-Type": {"application/grpc"}, }, URL: &url.URL{ Path: "/service/foo.bar", }, RequestURI: "/service/foo.bar", }, check: func(t *serverHandlerTransport, tt *testCase) error { if t.req != tt.req { return fmt.Errorf("t.req = %p; want %p", t.req, tt.req) } if t.rw == nil { return errors.New("t.rw = nil; want non-nil") } return nil }, }, { name: "with timeout", req: &http.Request{ ProtoMajor: 2, Method: "POST", Header: http.Header{ "Content-Type": []string{"application/grpc"}, "Grpc-Timeout": {"200m"}, }, URL: &url.URL{ Path: "/service/foo.bar", }, RequestURI: "/service/foo.bar", }, check: func(t *serverHandlerTransport, tt *testCase) error { if !t.timeoutSet { return errors.New("timeout not set") } if want := 200 * time.Millisecond; t.timeout != want { return fmt.Errorf("timeout = %v; want %v", t.timeout, want) } return nil }, }, { name: "with bad timeout", req: &http.Request{ ProtoMajor: 2, Method: "POST", Header: http.Header{ "Content-Type": []string{"application/grpc"}, "Grpc-Timeout": {"tomorrow"}, }, URL: &url.URL{ Path: "/service/foo.bar", }, RequestURI: "/service/foo.bar", }, wantErr: `rpc error: code = Internal desc = malformed time-out: transport: timeout unit is not recognized: "tomorrow"`, }, { name: "with metadata", req: &http.Request{ ProtoMajor: 2, Method: "POST", Header: http.Header{ "Content-Type": []string{"application/grpc"}, "meta-foo": {"foo-val"}, "meta-bar": {"bar-val1", "bar-val2"}, "user-agent": {"x/y a/b"}, }, URL: &url.URL{ Path: "/service/foo.bar", }, RequestURI: "/service/foo.bar", }, check: func(ht *serverHandlerTransport, tt *testCase) error { want := metadata.MD{ "meta-bar": {"bar-val1", "bar-val2"}, "user-agent": {"x/y a/b"}, "meta-foo": {"foo-val"}, "content-type": {"application/grpc"}, } if !reflect.DeepEqual(ht.headerMD, want) { return fmt.Errorf("metdata = %#v; want %#v", ht.headerMD, want) } return nil }, }, } for _, tt := range tests { rw := newTestHandlerResponseWriter() if tt.modrw != nil { rw = tt.modrw(rw) } got, gotErr := NewServerHandlerTransport(rw, tt.req, nil) if (gotErr != nil) != (tt.wantErr != "") || (gotErr != nil && gotErr.Error() != tt.wantErr) { t.Errorf("%s: error = %q; want %q", tt.name, gotErr.Error(), tt.wantErr) continue } if gotErr != nil { continue } if tt.check != nil { if err := tt.check(got.(*serverHandlerTransport), &tt); err != nil { t.Errorf("%s: %v", tt.name, err) } } } } type testHandlerResponseWriter struct { *httptest.ResponseRecorder closeNotify chan bool } func (w testHandlerResponseWriter) CloseNotify() <-chan bool { return w.closeNotify } func (w testHandlerResponseWriter) Flush() {} func newTestHandlerResponseWriter() http.ResponseWriter { return testHandlerResponseWriter{ ResponseRecorder: httptest.NewRecorder(), closeNotify: make(chan bool, 1), } } type handleStreamTest struct { t *testing.T bodyw *io.PipeWriter rw testHandlerResponseWriter ht *serverHandlerTransport } func newHandleStreamTest(t *testing.T) *handleStreamTest { bodyr, bodyw := io.Pipe() req := &http.Request{ ProtoMajor: 2, Method: "POST", Header: http.Header{ "Content-Type": {"application/grpc"}, }, URL: &url.URL{ Path: "/service/foo.bar", }, RequestURI: "/service/foo.bar", Body: bodyr, } rw := newTestHandlerResponseWriter().(testHandlerResponseWriter) ht, err := NewServerHandlerTransport(rw, req, nil) if err != nil { t.Fatal(err) } return &handleStreamTest{ t: t, bodyw: bodyw, ht: ht.(*serverHandlerTransport), rw: rw, } } func (s) TestHandlerTransport_HandleStreams(t *testing.T) { st := newHandleStreamTest(t) handleStream := func(s *Stream) { if want := "/service/foo.bar"; s.method != want { t.Errorf("stream method = %q; want %q", s.method, want) } err := s.SetHeader(metadata.Pairs("custom-header", "Custom header value")) if err != nil { t.Error(err) } err = s.SetTrailer(metadata.Pairs("custom-trailer", "Custom trailer value")) if err != nil { t.Error(err) } md := metadata.Pairs("custom-header", "Another custom header value") err = s.SendHeader(md) delete(md, "custom-header") if err != nil { t.Error(err) } err = s.SetHeader(metadata.Pairs("too-late", "Header value that should be ignored")) if err == nil { t.Error("expected SetHeader call after SendHeader to fail") } err = s.SendHeader(metadata.Pairs("too-late", "This header value should be ignored as well")) if err == nil { t.Error("expected second SendHeader call to fail") } st.bodyw.Close() // no body st.ht.WriteStatus(s, status.New(codes.OK, "")) } st.ht.HandleStreams( func(s *Stream) { go handleStream(s) }, func(ctx context.Context, method string) context.Context { return ctx }, ) wantHeader := http.Header{ "Date": {}, "Content-Type": {"application/grpc"}, "Trailer": {"Grpc-Status", "Grpc-Message", "Grpc-Status-Details-Bin"}, "Custom-Header": {"Custom header value", "Another custom header value"}, } wantTrailer := http.Header{ "Grpc-Status": {"0"}, "Custom-Trailer": {"Custom trailer value"}, } checkHeaderAndTrailer(t, st.rw, wantHeader, wantTrailer) } // Tests that codes.Unimplemented will close the body, per comment in handler_server.go. func (s) TestHandlerTransport_HandleStreams_Unimplemented(t *testing.T) { handleStreamCloseBodyTest(t, codes.Unimplemented, "thingy is unimplemented") } // Tests that codes.InvalidArgument will close the body, per comment in handler_server.go. func (s) TestHandlerTransport_HandleStreams_InvalidArgument(t *testing.T) { handleStreamCloseBodyTest(t, codes.InvalidArgument, "bad arg") } func handleStreamCloseBodyTest(t *testing.T, statusCode codes.Code, msg string) { st := newHandleStreamTest(t) handleStream := func(s *Stream) { st.ht.WriteStatus(s, status.New(statusCode, msg)) } st.ht.HandleStreams( func(s *Stream) { go handleStream(s) }, func(ctx context.Context, method string) context.Context { return ctx }, ) wantHeader := http.Header{ "Date": {}, "Content-Type": {"application/grpc"}, "Trailer": {"Grpc-Status", "Grpc-Message", "Grpc-Status-Details-Bin"}, } wantTrailer := http.Header{ "Grpc-Status": {fmt.Sprint(uint32(statusCode))}, "Grpc-Message": {encodeGrpcMessage(msg)}, } checkHeaderAndTrailer(t, st.rw, wantHeader, wantTrailer) } func (s) TestHandlerTransport_HandleStreams_Timeout(t *testing.T) { bodyr, bodyw := io.Pipe() req := &http.Request{ ProtoMajor: 2, Method: "POST", Header: http.Header{ "Content-Type": {"application/grpc"}, "Grpc-Timeout": {"200m"}, }, URL: &url.URL{ Path: "/service/foo.bar", }, RequestURI: "/service/foo.bar", Body: bodyr, } rw := newTestHandlerResponseWriter().(testHandlerResponseWriter) ht, err := NewServerHandlerTransport(rw, req, nil) if err != nil { t.Fatal(err) } runStream := func(s *Stream) { defer bodyw.Close() select { case <-s.ctx.Done(): case <-time.After(5 * time.Second): t.Errorf("timeout waiting for ctx.Done") return } err := s.ctx.Err() if err != context.DeadlineExceeded { t.Errorf("ctx.Err = %v; want %v", err, context.DeadlineExceeded) return } ht.WriteStatus(s, status.New(codes.DeadlineExceeded, "too slow")) } ht.HandleStreams( func(s *Stream) { go runStream(s) }, func(ctx context.Context, method string) context.Context { return ctx }, ) wantHeader := http.Header{ "Date": {}, "Content-Type": {"application/grpc"}, "Trailer": {"Grpc-Status", "Grpc-Message", "Grpc-Status-Details-Bin"}, } wantTrailer := http.Header{ "Grpc-Status": {"4"}, "Grpc-Message": {encodeGrpcMessage("too slow")}, } checkHeaderAndTrailer(t, rw, wantHeader, wantTrailer) } // TestHandlerTransport_HandleStreams_MultiWriteStatus ensures that // concurrent "WriteStatus"s do not panic writing to closed "writes" channel. func (s) TestHandlerTransport_HandleStreams_MultiWriteStatus(t *testing.T) { testHandlerTransportHandleStreams(t, func(st *handleStreamTest, s *Stream) { if want := "/service/foo.bar"; s.method != want { t.Errorf("stream method = %q; want %q", s.method, want) } st.bodyw.Close() // no body var wg sync.WaitGroup wg.Add(5) for i := 0; i < 5; i++ { go func() { defer wg.Done() st.ht.WriteStatus(s, status.New(codes.OK, "")) }() } wg.Wait() }) } // TestHandlerTransport_HandleStreams_WriteStatusWrite ensures that "Write" // following "WriteStatus" does not panic writing to closed "writes" channel. func (s) TestHandlerTransport_HandleStreams_WriteStatusWrite(t *testing.T) { testHandlerTransportHandleStreams(t, func(st *handleStreamTest, s *Stream) { if want := "/service/foo.bar"; s.method != want { t.Errorf("stream method = %q; want %q", s.method, want) } st.bodyw.Close() // no body st.ht.WriteStatus(s, status.New(codes.OK, "")) st.ht.Write(s, []byte("hdr"), []byte("data"), &Options{}) }) } func testHandlerTransportHandleStreams(t *testing.T, handleStream func(st *handleStreamTest, s *Stream)) { st := newHandleStreamTest(t) st.ht.HandleStreams( func(s *Stream) { go handleStream(st, s) }, func(ctx context.Context, method string) context.Context { return ctx }, ) } func (s) TestHandlerTransport_HandleStreams_ErrDetails(t *testing.T) { errDetails := []proto.Message{ &epb.RetryInfo{ RetryDelay: &dpb.Duration{Seconds: 60}, }, &epb.ResourceInfo{ ResourceType: "foo bar", ResourceName: "service.foo.bar", Owner: "User", }, } statusCode := codes.ResourceExhausted msg := "you are being throttled" st, err := status.New(statusCode, msg).WithDetails(errDetails...) if err != nil { t.Fatal(err) } stBytes, err := proto.Marshal(st.Proto()) if err != nil { t.Fatal(err) } hst := newHandleStreamTest(t) handleStream := func(s *Stream) { hst.ht.WriteStatus(s, st) } hst.ht.HandleStreams( func(s *Stream) { go handleStream(s) }, func(ctx context.Context, method string) context.Context { return ctx }, ) wantHeader := http.Header{ "Date": {}, "Content-Type": {"application/grpc"}, "Trailer": {"Grpc-Status", "Grpc-Message", "Grpc-Status-Details-Bin"}, } wantTrailer := http.Header{ "Grpc-Status": {fmt.Sprint(uint32(statusCode))}, "Grpc-Message": {encodeGrpcMessage(msg)}, "Grpc-Status-Details-Bin": {encodeBinHeader(stBytes)}, } checkHeaderAndTrailer(t, hst.rw, wantHeader, wantTrailer) } // checkHeaderAndTrailer checks that the resulting header and trailer matches the expectation. func checkHeaderAndTrailer(t *testing.T, rw testHandlerResponseWriter, wantHeader, wantTrailer http.Header) { // For trailer-only responses, the trailer values might be reported as part of the Header. They will however // be present in Trailer in either case. Hence, normalize the header by removing all trailer values. actualHeader := cloneHeader(rw.Result().Header) for _, trailerKey := range actualHeader["Trailer"] { actualHeader.Del(trailerKey) } if !reflect.DeepEqual(actualHeader, wantHeader) { t.Errorf("Header mismatch.\n got: %#v\n want: %#v", actualHeader, wantHeader) } if actualTrailer := rw.Result().Trailer; !reflect.DeepEqual(actualTrailer, wantTrailer) { t.Errorf("Trailer mismatch.\n got: %#v\n want: %#v", actualTrailer, wantTrailer) } } // cloneHeader performs a deep clone of an http.Header, since the (http.Header).Clone() method was only added in // Go 1.13. func cloneHeader(hdr http.Header) http.Header { if hdr == nil { return nil } hdrClone := make(http.Header, len(hdr)) for k, vv := range hdr { vvClone := make([]string, len(vv)) copy(vvClone, vv) hdrClone[k] = vvClone } return hdrClone } grpc-go-1.29.1/internal/transport/http2_client.go000066400000000000000000001315041365033716300217530ustar00rootroot00000000000000/* * * Copyright 2014 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package transport import ( "context" "fmt" "io" "math" "net" "strconv" "strings" "sync" "sync/atomic" "time" "golang.org/x/net/http2" "golang.org/x/net/http2/hpack" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/syscall" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" "google.golang.org/grpc/stats" "google.golang.org/grpc/status" ) // clientConnectionCounter counts the number of connections a client has // initiated (equal to the number of http2Clients created). Must be accessed // atomically. var clientConnectionCounter uint64 // http2Client implements the ClientTransport interface with HTTP2. type http2Client struct { lastRead int64 // Keep this field 64-bit aligned. Accessed atomically. ctx context.Context cancel context.CancelFunc ctxDone <-chan struct{} // Cache the ctx.Done() chan. userAgent string md interface{} conn net.Conn // underlying communication channel loopy *loopyWriter remoteAddr net.Addr localAddr net.Addr authInfo credentials.AuthInfo // auth info about the connection readerDone chan struct{} // sync point to enable testing. writerDone chan struct{} // sync point to enable testing. // goAway is closed to notify the upper layer (i.e., addrConn.transportMonitor) // that the server sent GoAway on this transport. goAway chan struct{} framer *framer // controlBuf delivers all the control related tasks (e.g., window // updates, reset streams, and various settings) to the controller. controlBuf *controlBuffer fc *trInFlow // The scheme used: https if TLS is on, http otherwise. scheme string isSecure bool perRPCCreds []credentials.PerRPCCredentials kp keepalive.ClientParameters keepaliveEnabled bool statsHandler stats.Handler initialWindowSize int32 // configured by peer through SETTINGS_MAX_HEADER_LIST_SIZE maxSendHeaderListSize *uint32 bdpEst *bdpEstimator // onPrefaceReceipt is a callback that client transport calls upon // receiving server preface to signal that a succefull HTTP2 // connection was established. onPrefaceReceipt func() maxConcurrentStreams uint32 streamQuota int64 streamsQuotaAvailable chan struct{} waitingStreams uint32 nextID uint32 mu sync.Mutex // guard the following variables state transportState activeStreams map[uint32]*Stream // prevGoAway ID records the Last-Stream-ID in the previous GOAway frame. prevGoAwayID uint32 // goAwayReason records the http2.ErrCode and debug data received with the // GoAway frame. goAwayReason GoAwayReason // A condition variable used to signal when the keepalive goroutine should // go dormant. The condition for dormancy is based on the number of active // streams and the `PermitWithoutStream` keepalive client parameter. And // since the number of active streams is guarded by the above mutex, we use // the same for this condition variable as well. kpDormancyCond *sync.Cond // A boolean to track whether the keepalive goroutine is dormant or not. // This is checked before attempting to signal the above condition // variable. kpDormant bool // Fields below are for channelz metric collection. channelzID int64 // channelz unique identification number czData *channelzData onGoAway func(GoAwayReason) onClose func() bufferPool *bufferPool connectionID uint64 } func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error), addr string) (net.Conn, error) { if fn != nil { return fn(ctx, addr) } return (&net.Dialer{}).DialContext(ctx, "tcp", addr) } func isTemporary(err error) bool { switch err := err.(type) { case interface { Temporary() bool }: return err.Temporary() case interface { Timeout() bool }: // Timeouts may be resolved upon retry, and are thus treated as // temporary. return err.Timeout() } return true } // newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2 // and starts to receive messages on it. Non-nil error returns if construction // fails. func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts ConnectOptions, onPrefaceReceipt func(), onGoAway func(GoAwayReason), onClose func()) (_ *http2Client, err error) { scheme := "http" ctx, cancel := context.WithCancel(ctx) defer func() { if err != nil { cancel() } }() conn, err := dial(connectCtx, opts.Dialer, addr.Addr) if err != nil { if opts.FailOnNonTempDialError { return nil, connectionErrorf(isTemporary(err), err, "transport: error while dialing: %v", err) } return nil, connectionErrorf(true, err, "transport: Error while dialing %v", err) } // Any further errors will close the underlying connection defer func(conn net.Conn) { if err != nil { conn.Close() } }(conn) kp := opts.KeepaliveParams // Validate keepalive parameters. if kp.Time == 0 { kp.Time = defaultClientKeepaliveTime } if kp.Timeout == 0 { kp.Timeout = defaultClientKeepaliveTimeout } keepaliveEnabled := false if kp.Time != infinity { if err = syscall.SetTCPUserTimeout(conn, kp.Timeout); err != nil { return nil, connectionErrorf(false, err, "transport: failed to set TCP_USER_TIMEOUT: %v", err) } keepaliveEnabled = true } var ( isSecure bool authInfo credentials.AuthInfo ) transportCreds := opts.TransportCredentials perRPCCreds := opts.PerRPCCredentials if b := opts.CredsBundle; b != nil { if t := b.TransportCredentials(); t != nil { transportCreds = t } if t := b.PerRPCCredentials(); t != nil { perRPCCreds = append(perRPCCreds, t) } } if transportCreds != nil { scheme = "https" conn, authInfo, err = transportCreds.ClientHandshake(connectCtx, addr.Authority, conn) if err != nil { return nil, connectionErrorf(isTemporary(err), err, "transport: authentication handshake failed: %v", err) } isSecure = true } dynamicWindow := true icwz := int32(initialWindowSize) if opts.InitialConnWindowSize >= defaultWindowSize { icwz = opts.InitialConnWindowSize dynamicWindow = false } writeBufSize := opts.WriteBufferSize readBufSize := opts.ReadBufferSize maxHeaderListSize := defaultClientMaxHeaderListSize if opts.MaxHeaderListSize != nil { maxHeaderListSize = *opts.MaxHeaderListSize } t := &http2Client{ ctx: ctx, ctxDone: ctx.Done(), // Cache Done chan. cancel: cancel, userAgent: opts.UserAgent, md: addr.Metadata, conn: conn, remoteAddr: conn.RemoteAddr(), localAddr: conn.LocalAddr(), authInfo: authInfo, readerDone: make(chan struct{}), writerDone: make(chan struct{}), goAway: make(chan struct{}), framer: newFramer(conn, writeBufSize, readBufSize, maxHeaderListSize), fc: &trInFlow{limit: uint32(icwz)}, scheme: scheme, activeStreams: make(map[uint32]*Stream), isSecure: isSecure, perRPCCreds: perRPCCreds, kp: kp, statsHandler: opts.StatsHandler, initialWindowSize: initialWindowSize, onPrefaceReceipt: onPrefaceReceipt, nextID: 1, maxConcurrentStreams: defaultMaxStreamsClient, streamQuota: defaultMaxStreamsClient, streamsQuotaAvailable: make(chan struct{}, 1), czData: new(channelzData), onGoAway: onGoAway, onClose: onClose, keepaliveEnabled: keepaliveEnabled, bufferPool: newBufferPool(), } t.controlBuf = newControlBuffer(t.ctxDone) if opts.InitialWindowSize >= defaultWindowSize { t.initialWindowSize = opts.InitialWindowSize dynamicWindow = false } if dynamicWindow { t.bdpEst = &bdpEstimator{ bdp: initialWindowSize, updateFlowControl: t.updateFlowControl, } } if t.statsHandler != nil { t.ctx = t.statsHandler.TagConn(t.ctx, &stats.ConnTagInfo{ RemoteAddr: t.remoteAddr, LocalAddr: t.localAddr, }) connBegin := &stats.ConnBegin{ Client: true, } t.statsHandler.HandleConn(t.ctx, connBegin) } if channelz.IsOn() { t.channelzID = channelz.RegisterNormalSocket(t, opts.ChannelzParentID, fmt.Sprintf("%s -> %s", t.localAddr, t.remoteAddr)) } if t.keepaliveEnabled { t.kpDormancyCond = sync.NewCond(&t.mu) go t.keepalive() } // Start the reader goroutine for incoming message. Each transport has // a dedicated goroutine which reads HTTP2 frame from network. Then it // dispatches the frame to the corresponding stream entity. go t.reader() // Send connection preface to server. n, err := t.conn.Write(clientPreface) if err != nil { t.Close() return nil, connectionErrorf(true, err, "transport: failed to write client preface: %v", err) } if n != len(clientPreface) { t.Close() return nil, connectionErrorf(true, err, "transport: preface mismatch, wrote %d bytes; want %d", n, len(clientPreface)) } var ss []http2.Setting if t.initialWindowSize != defaultWindowSize { ss = append(ss, http2.Setting{ ID: http2.SettingInitialWindowSize, Val: uint32(t.initialWindowSize), }) } if opts.MaxHeaderListSize != nil { ss = append(ss, http2.Setting{ ID: http2.SettingMaxHeaderListSize, Val: *opts.MaxHeaderListSize, }) } err = t.framer.fr.WriteSettings(ss...) if err != nil { t.Close() return nil, connectionErrorf(true, err, "transport: failed to write initial settings frame: %v", err) } // Adjust the connection flow control window if needed. if delta := uint32(icwz - defaultWindowSize); delta > 0 { if err := t.framer.fr.WriteWindowUpdate(0, delta); err != nil { t.Close() return nil, connectionErrorf(true, err, "transport: failed to write window update: %v", err) } } t.connectionID = atomic.AddUint64(&clientConnectionCounter, 1) if err := t.framer.writer.Flush(); err != nil { return nil, err } go func() { t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst) err := t.loopy.run() if err != nil { errorf("transport: loopyWriter.run returning. Err: %v", err) } // If it's a connection error, let reader goroutine handle it // since there might be data in the buffers. if _, ok := err.(net.Error); !ok { t.conn.Close() } close(t.writerDone) }() return t, nil } func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream { // TODO(zhaoq): Handle uint32 overflow of Stream.id. s := &Stream{ ct: t, done: make(chan struct{}), method: callHdr.Method, sendCompress: callHdr.SendCompress, buf: newRecvBuffer(), headerChan: make(chan struct{}), contentSubtype: callHdr.ContentSubtype, } s.wq = newWriteQuota(defaultWriteQuota, s.done) s.requestRead = func(n int) { t.adjustWindow(s, uint32(n)) } // The client side stream context should have exactly the same life cycle with the user provided context. // That means, s.ctx should be read-only. And s.ctx is done iff ctx is done. // So we use the original context here instead of creating a copy. s.ctx = ctx s.trReader = &transportReader{ reader: &recvBufferReader{ ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf, closeStream: func(err error) { t.CloseStream(s, err) }, freeBuffer: t.bufferPool.put, }, windowHandler: func(n int) { t.updateWindow(s, uint32(n)) }, } return s } func (t *http2Client) getPeer() *peer.Peer { return &peer.Peer{ Addr: t.remoteAddr, AuthInfo: t.authInfo, } } func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) ([]hpack.HeaderField, error) { aud := t.createAudience(callHdr) ri := credentials.RequestInfo{ Method: callHdr.Method, AuthInfo: t.authInfo, } ctxWithRequestInfo := internal.NewRequestInfoContext.(func(context.Context, credentials.RequestInfo) context.Context)(ctx, ri) authData, err := t.getTrAuthData(ctxWithRequestInfo, aud) if err != nil { return nil, err } callAuthData, err := t.getCallAuthData(ctxWithRequestInfo, aud, callHdr) if err != nil { return nil, err } // TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields // first and create a slice of that exact size. // Make the slice of certain predictable size to reduce allocations made by append. hfLen := 7 // :method, :scheme, :path, :authority, content-type, user-agent, te hfLen += len(authData) + len(callAuthData) headerFields := make([]hpack.HeaderField, 0, hfLen) headerFields = append(headerFields, hpack.HeaderField{Name: ":method", Value: "POST"}) headerFields = append(headerFields, hpack.HeaderField{Name: ":scheme", Value: t.scheme}) headerFields = append(headerFields, hpack.HeaderField{Name: ":path", Value: callHdr.Method}) headerFields = append(headerFields, hpack.HeaderField{Name: ":authority", Value: callHdr.Host}) headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: contentType(callHdr.ContentSubtype)}) headerFields = append(headerFields, hpack.HeaderField{Name: "user-agent", Value: t.userAgent}) headerFields = append(headerFields, hpack.HeaderField{Name: "te", Value: "trailers"}) if callHdr.PreviousAttempts > 0 { headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-previous-rpc-attempts", Value: strconv.Itoa(callHdr.PreviousAttempts)}) } if callHdr.SendCompress != "" { headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: callHdr.SendCompress}) headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-accept-encoding", Value: callHdr.SendCompress}) } if dl, ok := ctx.Deadline(); ok { // Send out timeout regardless its value. The server can detect timeout context by itself. // TODO(mmukhi): Perhaps this field should be updated when actually writing out to the wire. timeout := time.Until(dl) headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-timeout", Value: encodeTimeout(timeout)}) } for k, v := range authData { headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) } for k, v := range callAuthData { headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) } if b := stats.OutgoingTags(ctx); b != nil { headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-tags-bin", Value: encodeBinHeader(b)}) } if b := stats.OutgoingTrace(ctx); b != nil { headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-trace-bin", Value: encodeBinHeader(b)}) } if md, added, ok := metadata.FromOutgoingContextRaw(ctx); ok { var k string for k, vv := range md { // HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set. if isReservedHeader(k) { continue } for _, v := range vv { headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) } } for _, vv := range added { for i, v := range vv { if i%2 == 0 { k = v continue } // HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set. if isReservedHeader(k) { continue } headerFields = append(headerFields, hpack.HeaderField{Name: strings.ToLower(k), Value: encodeMetadataHeader(k, v)}) } } } if md, ok := t.md.(*metadata.MD); ok { for k, vv := range *md { if isReservedHeader(k) { continue } for _, v := range vv { headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) } } } return headerFields, nil } func (t *http2Client) createAudience(callHdr *CallHdr) string { // Create an audience string only if needed. if len(t.perRPCCreds) == 0 && callHdr.Creds == nil { return "" } // Construct URI required to get auth request metadata. // Omit port if it is the default one. host := strings.TrimSuffix(callHdr.Host, ":443") pos := strings.LastIndex(callHdr.Method, "/") if pos == -1 { pos = len(callHdr.Method) } return "https://" + host + callHdr.Method[:pos] } func (t *http2Client) getTrAuthData(ctx context.Context, audience string) (map[string]string, error) { if len(t.perRPCCreds) == 0 { return nil, nil } authData := map[string]string{} for _, c := range t.perRPCCreds { data, err := c.GetRequestMetadata(ctx, audience) if err != nil { if _, ok := status.FromError(err); ok { return nil, err } return nil, status.Errorf(codes.Unauthenticated, "transport: %v", err) } for k, v := range data { // Capital header names are illegal in HTTP/2. k = strings.ToLower(k) authData[k] = v } } return authData, nil } func (t *http2Client) getCallAuthData(ctx context.Context, audience string, callHdr *CallHdr) (map[string]string, error) { var callAuthData map[string]string // Check if credentials.PerRPCCredentials were provided via call options. // Note: if these credentials are provided both via dial options and call // options, then both sets of credentials will be applied. if callCreds := callHdr.Creds; callCreds != nil { if !t.isSecure && callCreds.RequireTransportSecurity() { return nil, status.Error(codes.Unauthenticated, "transport: cannot send secure credentials on an insecure connection") } data, err := callCreds.GetRequestMetadata(ctx, audience) if err != nil { return nil, status.Errorf(codes.Internal, "transport: %v", err) } callAuthData = make(map[string]string, len(data)) for k, v := range data { // Capital header names are illegal in HTTP/2 k = strings.ToLower(k) callAuthData[k] = v } } return callAuthData, nil } // NewStream creates a stream and registers it into the transport as "active" // streams. func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Stream, err error) { ctx = peer.NewContext(ctx, t.getPeer()) headerFields, err := t.createHeaderFields(ctx, callHdr) if err != nil { return nil, err } s := t.newStream(ctx, callHdr) cleanup := func(err error) { if s.swapState(streamDone) == streamDone { // If it was already done, return. return } // The stream was unprocessed by the server. atomic.StoreUint32(&s.unprocessed, 1) s.write(recvMsg{err: err}) close(s.done) // If headerChan isn't closed, then close it. if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) { close(s.headerChan) } } hdr := &headerFrame{ hf: headerFields, endStream: false, initStream: func(id uint32) error { t.mu.Lock() if state := t.state; state != reachable { t.mu.Unlock() // Do a quick cleanup. err := error(errStreamDrain) if state == closing { err = ErrConnClosing } cleanup(err) return err } t.activeStreams[id] = s if channelz.IsOn() { atomic.AddInt64(&t.czData.streamsStarted, 1) atomic.StoreInt64(&t.czData.lastStreamCreatedTime, time.Now().UnixNano()) } // If the keepalive goroutine has gone dormant, wake it up. if t.kpDormant { t.kpDormancyCond.Signal() } t.mu.Unlock() return nil }, onOrphaned: cleanup, wq: s.wq, } firstTry := true var ch chan struct{} checkForStreamQuota := func(it interface{}) bool { if t.streamQuota <= 0 { // Can go negative if server decreases it. if firstTry { t.waitingStreams++ } ch = t.streamsQuotaAvailable return false } if !firstTry { t.waitingStreams-- } t.streamQuota-- h := it.(*headerFrame) h.streamID = t.nextID t.nextID += 2 s.id = h.streamID s.fc = &inFlow{limit: uint32(t.initialWindowSize)} if t.streamQuota > 0 && t.waitingStreams > 0 { select { case t.streamsQuotaAvailable <- struct{}{}: default: } } return true } var hdrListSizeErr error checkForHeaderListSize := func(it interface{}) bool { if t.maxSendHeaderListSize == nil { return true } hdrFrame := it.(*headerFrame) var sz int64 for _, f := range hdrFrame.hf { if sz += int64(f.Size()); sz > int64(*t.maxSendHeaderListSize) { hdrListSizeErr = status.Errorf(codes.Internal, "header list size to send violates the maximum size (%d bytes) set by server", *t.maxSendHeaderListSize) return false } } return true } for { success, err := t.controlBuf.executeAndPut(func(it interface{}) bool { if !checkForStreamQuota(it) { return false } if !checkForHeaderListSize(it) { return false } return true }, hdr) if err != nil { return nil, err } if success { break } if hdrListSizeErr != nil { return nil, hdrListSizeErr } firstTry = false select { case <-ch: case <-s.ctx.Done(): return nil, ContextErr(s.ctx.Err()) case <-t.goAway: return nil, errStreamDrain case <-t.ctx.Done(): return nil, ErrConnClosing } } if t.statsHandler != nil { header, ok := metadata.FromOutgoingContext(ctx) if ok { header.Set("user-agent", t.userAgent) } else { header = metadata.Pairs("user-agent", t.userAgent) } // Note: The header fields are compressed with hpack after this call returns. // No WireLength field is set here. outHeader := &stats.OutHeader{ Client: true, FullMethod: callHdr.Method, RemoteAddr: t.remoteAddr, LocalAddr: t.localAddr, Compression: callHdr.SendCompress, Header: header, } t.statsHandler.HandleRPC(s.ctx, outHeader) } return s, nil } // CloseStream clears the footprint of a stream when the stream is not needed any more. // This must not be executed in reader's goroutine. func (t *http2Client) CloseStream(s *Stream, err error) { var ( rst bool rstCode http2.ErrCode ) if err != nil { rst = true rstCode = http2.ErrCodeCancel } t.closeStream(s, err, rst, rstCode, status.Convert(err), nil, false) } func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2.ErrCode, st *status.Status, mdata map[string][]string, eosReceived bool) { // Set stream status to done. if s.swapState(streamDone) == streamDone { // If it was already done, return. If multiple closeStream calls // happen simultaneously, wait for the first to finish. <-s.done return } // status and trailers can be updated here without any synchronization because the stream goroutine will // only read it after it sees an io.EOF error from read or write and we'll write those errors // only after updating this. s.status = st if len(mdata) > 0 { s.trailer = mdata } if err != nil { // This will unblock reads eventually. s.write(recvMsg{err: err}) } // If headerChan isn't closed, then close it. if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) { s.noHeaders = true close(s.headerChan) } cleanup := &cleanupStream{ streamID: s.id, onWrite: func() { t.mu.Lock() if t.activeStreams != nil { delete(t.activeStreams, s.id) } t.mu.Unlock() if channelz.IsOn() { if eosReceived { atomic.AddInt64(&t.czData.streamsSucceeded, 1) } else { atomic.AddInt64(&t.czData.streamsFailed, 1) } } }, rst: rst, rstCode: rstCode, } addBackStreamQuota := func(interface{}) bool { t.streamQuota++ if t.streamQuota > 0 && t.waitingStreams > 0 { select { case t.streamsQuotaAvailable <- struct{}{}: default: } } return true } t.controlBuf.executeAndPut(addBackStreamQuota, cleanup) // This will unblock write. close(s.done) } // Close kicks off the shutdown process of the transport. This should be called // only once on a transport. Once it is called, the transport should not be // accessed any more. // // This method blocks until the addrConn that initiated this transport is // re-connected. This happens because t.onClose() begins reconnect logic at the // addrConn level and blocks until the addrConn is successfully connected. func (t *http2Client) Close() error { t.mu.Lock() // Make sure we only Close once. if t.state == closing { t.mu.Unlock() return nil } // Call t.onClose before setting the state to closing to prevent the client // from attempting to create new streams ASAP. t.onClose() t.state = closing streams := t.activeStreams t.activeStreams = nil if t.kpDormant { // If the keepalive goroutine is blocked on this condition variable, we // should unblock it so that the goroutine eventually exits. t.kpDormancyCond.Signal() } t.mu.Unlock() t.controlBuf.finish() t.cancel() err := t.conn.Close() if channelz.IsOn() { channelz.RemoveEntry(t.channelzID) } // Notify all active streams. for _, s := range streams { t.closeStream(s, ErrConnClosing, false, http2.ErrCodeNo, status.New(codes.Unavailable, ErrConnClosing.Desc), nil, false) } if t.statsHandler != nil { connEnd := &stats.ConnEnd{ Client: true, } t.statsHandler.HandleConn(t.ctx, connEnd) } return err } // GracefulClose sets the state to draining, which prevents new streams from // being created and causes the transport to be closed when the last active // stream is closed. If there are no active streams, the transport is closed // immediately. This does nothing if the transport is already draining or // closing. func (t *http2Client) GracefulClose() { t.mu.Lock() // Make sure we move to draining only from active. if t.state == draining || t.state == closing { t.mu.Unlock() return } t.state = draining active := len(t.activeStreams) t.mu.Unlock() if active == 0 { t.Close() return } t.controlBuf.put(&incomingGoAway{}) } // Write formats the data into HTTP2 data frame(s) and sends it out. The caller // should proceed only if Write returns nil. func (t *http2Client) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { if opts.Last { // If it's the last message, update stream state. if !s.compareAndSwapState(streamActive, streamWriteDone) { return errStreamDone } } else if s.getState() != streamActive { return errStreamDone } df := &dataFrame{ streamID: s.id, endStream: opts.Last, } if hdr != nil || data != nil { // If it's not an empty data frame. // Add some data to grpc message header so that we can equally // distribute bytes across frames. emptyLen := http2MaxFrameLen - len(hdr) if emptyLen > len(data) { emptyLen = len(data) } hdr = append(hdr, data[:emptyLen]...) data = data[emptyLen:] df.h, df.d = hdr, data // TODO(mmukhi): The above logic in this if can be moved to loopyWriter's data handler. if err := s.wq.get(int32(len(hdr) + len(data))); err != nil { return err } } return t.controlBuf.put(df) } func (t *http2Client) getStream(f http2.Frame) *Stream { t.mu.Lock() s := t.activeStreams[f.Header().StreamID] t.mu.Unlock() return s } // adjustWindow sends out extra window update over the initial window size // of stream if the application is requesting data larger in size than // the window. func (t *http2Client) adjustWindow(s *Stream, n uint32) { if w := s.fc.maybeAdjust(n); w > 0 { t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w}) } } // updateWindow adjusts the inbound quota for the stream. // Window updates will be sent out when the cumulative quota // exceeds the corresponding threshold. func (t *http2Client) updateWindow(s *Stream, n uint32) { if w := s.fc.onRead(n); w > 0 { t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w}) } } // updateFlowControl updates the incoming flow control windows // for the transport and the stream based on the current bdp // estimation. func (t *http2Client) updateFlowControl(n uint32) { t.mu.Lock() for _, s := range t.activeStreams { s.fc.newLimit(n) } t.mu.Unlock() updateIWS := func(interface{}) bool { t.initialWindowSize = int32(n) return true } t.controlBuf.executeAndPut(updateIWS, &outgoingWindowUpdate{streamID: 0, increment: t.fc.newLimit(n)}) t.controlBuf.put(&outgoingSettings{ ss: []http2.Setting{ { ID: http2.SettingInitialWindowSize, Val: n, }, }, }) } func (t *http2Client) handleData(f *http2.DataFrame) { size := f.Header().Length var sendBDPPing bool if t.bdpEst != nil { sendBDPPing = t.bdpEst.add(size) } // Decouple connection's flow control from application's read. // An update on connection's flow control should not depend on // whether user application has read the data or not. Such a // restriction is already imposed on the stream's flow control, // and therefore the sender will be blocked anyways. // Decoupling the connection flow control will prevent other // active(fast) streams from starving in presence of slow or // inactive streams. // if w := t.fc.onData(size); w > 0 { t.controlBuf.put(&outgoingWindowUpdate{ streamID: 0, increment: w, }) } if sendBDPPing { // Avoid excessive ping detection (e.g. in an L7 proxy) // by sending a window update prior to the BDP ping. if w := t.fc.reset(); w > 0 { t.controlBuf.put(&outgoingWindowUpdate{ streamID: 0, increment: w, }) } t.controlBuf.put(bdpPing) } // Select the right stream to dispatch. s := t.getStream(f) if s == nil { return } if size > 0 { if err := s.fc.onData(size); err != nil { t.closeStream(s, io.EOF, true, http2.ErrCodeFlowControl, status.New(codes.Internal, err.Error()), nil, false) return } if f.Header().Flags.Has(http2.FlagDataPadded) { if w := s.fc.onRead(size - uint32(len(f.Data()))); w > 0 { t.controlBuf.put(&outgoingWindowUpdate{s.id, w}) } } // TODO(bradfitz, zhaoq): A copy is required here because there is no // guarantee f.Data() is consumed before the arrival of next frame. // Can this copy be eliminated? if len(f.Data()) > 0 { buffer := t.bufferPool.get() buffer.Reset() buffer.Write(f.Data()) s.write(recvMsg{buffer: buffer}) } } // The server has closed the stream without sending trailers. Record that // the read direction is closed, and set the status appropriately. if f.FrameHeader.Flags.Has(http2.FlagDataEndStream) { t.closeStream(s, io.EOF, false, http2.ErrCodeNo, status.New(codes.Internal, "server closed the stream without sending trailers"), nil, true) } } func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) { s := t.getStream(f) if s == nil { return } if f.ErrCode == http2.ErrCodeRefusedStream { // The stream was unprocessed by the server. atomic.StoreUint32(&s.unprocessed, 1) } statusCode, ok := http2ErrConvTab[f.ErrCode] if !ok { warningf("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error %v", f.ErrCode) statusCode = codes.Unknown } if statusCode == codes.Canceled { if d, ok := s.ctx.Deadline(); ok && !d.After(time.Now()) { // Our deadline was already exceeded, and that was likely the cause // of this cancelation. Alter the status code accordingly. statusCode = codes.DeadlineExceeded } } t.closeStream(s, io.EOF, false, http2.ErrCodeNo, status.Newf(statusCode, "stream terminated by RST_STREAM with error code: %v", f.ErrCode), nil, false) } func (t *http2Client) handleSettings(f *http2.SettingsFrame, isFirst bool) { if f.IsAck() { return } var maxStreams *uint32 var ss []http2.Setting var updateFuncs []func() f.ForeachSetting(func(s http2.Setting) error { switch s.ID { case http2.SettingMaxConcurrentStreams: maxStreams = new(uint32) *maxStreams = s.Val case http2.SettingMaxHeaderListSize: updateFuncs = append(updateFuncs, func() { t.maxSendHeaderListSize = new(uint32) *t.maxSendHeaderListSize = s.Val }) default: ss = append(ss, s) } return nil }) if isFirst && maxStreams == nil { maxStreams = new(uint32) *maxStreams = math.MaxUint32 } sf := &incomingSettings{ ss: ss, } if maxStreams != nil { updateStreamQuota := func() { delta := int64(*maxStreams) - int64(t.maxConcurrentStreams) t.maxConcurrentStreams = *maxStreams t.streamQuota += delta if delta > 0 && t.waitingStreams > 0 { close(t.streamsQuotaAvailable) // wake all of them up. t.streamsQuotaAvailable = make(chan struct{}, 1) } } updateFuncs = append(updateFuncs, updateStreamQuota) } t.controlBuf.executeAndPut(func(interface{}) bool { for _, f := range updateFuncs { f() } return true }, sf) } func (t *http2Client) handlePing(f *http2.PingFrame) { if f.IsAck() { // Maybe it's a BDP ping. if t.bdpEst != nil { t.bdpEst.calculate(f.Data) } return } pingAck := &ping{ack: true} copy(pingAck.data[:], f.Data[:]) t.controlBuf.put(pingAck) } func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { t.mu.Lock() if t.state == closing { t.mu.Unlock() return } if f.ErrCode == http2.ErrCodeEnhanceYourCalm { infof("Client received GoAway with http2.ErrCodeEnhanceYourCalm.") } id := f.LastStreamID if id > 0 && id%2 != 1 { t.mu.Unlock() t.Close() return } // A client can receive multiple GoAways from the server (see // https://github.com/grpc/grpc-go/issues/1387). The idea is that the first // GoAway will be sent with an ID of MaxInt32 and the second GoAway will be // sent after an RTT delay with the ID of the last stream the server will // process. // // Therefore, when we get the first GoAway we don't necessarily close any // streams. While in case of second GoAway we close all streams created after // the GoAwayId. This way streams that were in-flight while the GoAway from // server was being sent don't get killed. select { case <-t.goAway: // t.goAway has been closed (i.e.,multiple GoAways). // If there are multiple GoAways the first one should always have an ID greater than the following ones. if id > t.prevGoAwayID { t.mu.Unlock() t.Close() return } default: t.setGoAwayReason(f) close(t.goAway) t.controlBuf.put(&incomingGoAway{}) // Notify the clientconn about the GOAWAY before we set the state to // draining, to allow the client to stop attempting to create streams // before disallowing new streams on this connection. t.onGoAway(t.goAwayReason) t.state = draining } // All streams with IDs greater than the GoAwayId // and smaller than the previous GoAway ID should be killed. upperLimit := t.prevGoAwayID if upperLimit == 0 { // This is the first GoAway Frame. upperLimit = math.MaxUint32 // Kill all streams after the GoAway ID. } for streamID, stream := range t.activeStreams { if streamID > id && streamID <= upperLimit { // The stream was unprocessed by the server. atomic.StoreUint32(&stream.unprocessed, 1) t.closeStream(stream, errStreamDrain, false, http2.ErrCodeNo, statusGoAway, nil, false) } } t.prevGoAwayID = id active := len(t.activeStreams) t.mu.Unlock() if active == 0 { t.Close() } } // setGoAwayReason sets the value of t.goAwayReason based // on the GoAway frame received. // It expects a lock on transport's mutext to be held by // the caller. func (t *http2Client) setGoAwayReason(f *http2.GoAwayFrame) { t.goAwayReason = GoAwayNoReason switch f.ErrCode { case http2.ErrCodeEnhanceYourCalm: if string(f.DebugData()) == "too_many_pings" { t.goAwayReason = GoAwayTooManyPings } } } func (t *http2Client) GetGoAwayReason() GoAwayReason { t.mu.Lock() defer t.mu.Unlock() return t.goAwayReason } func (t *http2Client) handleWindowUpdate(f *http2.WindowUpdateFrame) { t.controlBuf.put(&incomingWindowUpdate{ streamID: f.Header().StreamID, increment: f.Increment, }) } // operateHeaders takes action on the decoded headers. func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { s := t.getStream(frame) if s == nil { return } endStream := frame.StreamEnded() atomic.StoreUint32(&s.bytesReceived, 1) initialHeader := atomic.LoadUint32(&s.headerChanClosed) == 0 if !initialHeader && !endStream { // As specified by gRPC over HTTP2, a HEADERS frame (and associated CONTINUATION frames) can only appear at the start or end of a stream. Therefore, second HEADERS frame must have EOS bit set. st := status.New(codes.Internal, "a HEADERS frame cannot appear in the middle of a stream") t.closeStream(s, st.Err(), true, http2.ErrCodeProtocol, st, nil, false) return } state := &decodeState{} // Initialize isGRPC value to be !initialHeader, since if a gRPC Response-Headers has already been received, then it means that the peer is speaking gRPC and we are in gRPC mode. state.data.isGRPC = !initialHeader if err := state.decodeHeader(frame); err != nil { t.closeStream(s, err, true, http2.ErrCodeProtocol, status.Convert(err), nil, endStream) return } isHeader := false defer func() { if t.statsHandler != nil { if isHeader { inHeader := &stats.InHeader{ Client: true, WireLength: int(frame.Header().Length), Header: s.header.Copy(), Compression: s.recvCompress, } t.statsHandler.HandleRPC(s.ctx, inHeader) } else { inTrailer := &stats.InTrailer{ Client: true, WireLength: int(frame.Header().Length), Trailer: s.trailer.Copy(), } t.statsHandler.HandleRPC(s.ctx, inTrailer) } } }() // If headerChan hasn't been closed yet if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) { s.headerValid = true if !endStream { // HEADERS frame block carries a Response-Headers. isHeader = true // These values can be set without any synchronization because // stream goroutine will read it only after seeing a closed // headerChan which we'll close after setting this. s.recvCompress = state.data.encoding if len(state.data.mdata) > 0 { s.header = state.data.mdata } } else { // HEADERS frame block carries a Trailers-Only. s.noHeaders = true } close(s.headerChan) } if !endStream { return } // if client received END_STREAM from server while stream was still active, send RST_STREAM rst := s.getState() == streamActive t.closeStream(s, io.EOF, rst, http2.ErrCodeNo, state.status(), state.data.mdata, true) } // reader runs as a separate goroutine in charge of reading data from network // connection. // // TODO(zhaoq): currently one reader per transport. Investigate whether this is // optimal. // TODO(zhaoq): Check the validity of the incoming frame sequence. func (t *http2Client) reader() { defer close(t.readerDone) // Check the validity of server preface. frame, err := t.framer.fr.ReadFrame() if err != nil { t.Close() // this kicks off resetTransport, so must be last before return return } t.conn.SetReadDeadline(time.Time{}) // reset deadline once we get the settings frame (we didn't time out, yay!) if t.keepaliveEnabled { atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) } sf, ok := frame.(*http2.SettingsFrame) if !ok { t.Close() // this kicks off resetTransport, so must be last before return return } t.onPrefaceReceipt() t.handleSettings(sf, true) // loop to keep reading incoming messages on this transport. for { t.controlBuf.throttle() frame, err := t.framer.fr.ReadFrame() if t.keepaliveEnabled { atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) } if err != nil { // Abort an active stream if the http2.Framer returns a // http2.StreamError. This can happen only if the server's response // is malformed http2. if se, ok := err.(http2.StreamError); ok { t.mu.Lock() s := t.activeStreams[se.StreamID] t.mu.Unlock() if s != nil { // use error detail to provide better err message code := http2ErrConvTab[se.Code] msg := t.framer.fr.ErrorDetail().Error() t.closeStream(s, status.Error(code, msg), true, http2.ErrCodeProtocol, status.New(code, msg), nil, false) } continue } else { // Transport error. t.Close() return } } switch frame := frame.(type) { case *http2.MetaHeadersFrame: t.operateHeaders(frame) case *http2.DataFrame: t.handleData(frame) case *http2.RSTStreamFrame: t.handleRSTStream(frame) case *http2.SettingsFrame: t.handleSettings(frame, false) case *http2.PingFrame: t.handlePing(frame) case *http2.GoAwayFrame: t.handleGoAway(frame) case *http2.WindowUpdateFrame: t.handleWindowUpdate(frame) default: errorf("transport: http2Client.reader got unhandled frame type %v.", frame) } } } func minTime(a, b time.Duration) time.Duration { if a < b { return a } return b } // keepalive running in a separate goroutune makes sure the connection is alive by sending pings. func (t *http2Client) keepalive() { p := &ping{data: [8]byte{}} // True iff a ping has been sent, and no data has been received since then. outstandingPing := false // Amount of time remaining before which we should receive an ACK for the // last sent ping. timeoutLeft := time.Duration(0) // Records the last value of t.lastRead before we go block on the timer. // This is required to check for read activity since then. prevNano := time.Now().UnixNano() timer := time.NewTimer(t.kp.Time) for { select { case <-timer.C: lastRead := atomic.LoadInt64(&t.lastRead) if lastRead > prevNano { // There has been read activity since the last time we were here. outstandingPing = false // Next timer should fire at kp.Time seconds from lastRead time. timer.Reset(time.Duration(lastRead) + t.kp.Time - time.Duration(time.Now().UnixNano())) prevNano = lastRead continue } if outstandingPing && timeoutLeft <= 0 { t.Close() return } t.mu.Lock() if t.state == closing { // If the transport is closing, we should exit from the // keepalive goroutine here. If not, we could have a race // between the call to Signal() from Close() and the call to // Wait() here, whereby the keepalive goroutine ends up // blocking on the condition variable which will never be // signalled again. t.mu.Unlock() return } if len(t.activeStreams) < 1 && !t.kp.PermitWithoutStream { // If a ping was sent out previously (because there were active // streams at that point) which wasn't acked and its timeout // hadn't fired, but we got here and are about to go dormant, // we should make sure that we unconditionally send a ping once // we awaken. outstandingPing = false t.kpDormant = true t.kpDormancyCond.Wait() } t.kpDormant = false t.mu.Unlock() // We get here either because we were dormant and a new stream was // created which unblocked the Wait() call, or because the // keepalive timer expired. In both cases, we need to send a ping. if !outstandingPing { if channelz.IsOn() { atomic.AddInt64(&t.czData.kpCount, 1) } t.controlBuf.put(p) timeoutLeft = t.kp.Timeout outstandingPing = true } // The amount of time to sleep here is the minimum of kp.Time and // timeoutLeft. This will ensure that we wait only for kp.Time // before sending out the next ping (for cases where the ping is // acked). sleepDuration := minTime(t.kp.Time, timeoutLeft) timeoutLeft -= sleepDuration timer.Reset(sleepDuration) case <-t.ctx.Done(): if !timer.Stop() { <-timer.C } return } } } func (t *http2Client) Error() <-chan struct{} { return t.ctx.Done() } func (t *http2Client) GoAway() <-chan struct{} { return t.goAway } func (t *http2Client) ChannelzMetric() *channelz.SocketInternalMetric { s := channelz.SocketInternalMetric{ StreamsStarted: atomic.LoadInt64(&t.czData.streamsStarted), StreamsSucceeded: atomic.LoadInt64(&t.czData.streamsSucceeded), StreamsFailed: atomic.LoadInt64(&t.czData.streamsFailed), MessagesSent: atomic.LoadInt64(&t.czData.msgSent), MessagesReceived: atomic.LoadInt64(&t.czData.msgRecv), KeepAlivesSent: atomic.LoadInt64(&t.czData.kpCount), LastLocalStreamCreatedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastStreamCreatedTime)), LastMessageSentTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgSentTime)), LastMessageReceivedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgRecvTime)), LocalFlowControlWindow: int64(t.fc.getSize()), SocketOptions: channelz.GetSocketOption(t.conn), LocalAddr: t.localAddr, RemoteAddr: t.remoteAddr, // RemoteName : } if au, ok := t.authInfo.(credentials.ChannelzSecurityInfo); ok { s.Security = au.GetSecurityValue() } s.RemoteFlowControlWindow = t.getOutFlowWindow() return &s } func (t *http2Client) RemoteAddr() net.Addr { return t.remoteAddr } func (t *http2Client) IncrMsgSent() { atomic.AddInt64(&t.czData.msgSent, 1) atomic.StoreInt64(&t.czData.lastMsgSentTime, time.Now().UnixNano()) } func (t *http2Client) IncrMsgRecv() { atomic.AddInt64(&t.czData.msgRecv, 1) atomic.StoreInt64(&t.czData.lastMsgRecvTime, time.Now().UnixNano()) } func (t *http2Client) getOutFlowWindow() int64 { resp := make(chan uint32, 1) timer := time.NewTimer(time.Second) defer timer.Stop() t.controlBuf.put(&outFlowControlSizeRequest{resp}) select { case sz := <-resp: return int64(sz) case <-t.ctxDone: return -1 case <-timer.C: return -2 } } grpc-go-1.29.1/internal/transport/http2_server.go000066400000000000000000001133621365033716300220050ustar00rootroot00000000000000/* * * Copyright 2014 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package transport import ( "bytes" "context" "errors" "fmt" "io" "math" "net" "strconv" "sync" "sync/atomic" "time" "github.com/golang/protobuf/proto" "golang.org/x/net/http2" "golang.org/x/net/http2/hpack" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" "google.golang.org/grpc/stats" "google.golang.org/grpc/status" "google.golang.org/grpc/tap" ) var ( // ErrIllegalHeaderWrite indicates that setting header is illegal because of // the stream's state. ErrIllegalHeaderWrite = errors.New("transport: the stream is done or WriteHeader was already called") // ErrHeaderListSizeLimitViolation indicates that the header list size is larger // than the limit set by peer. ErrHeaderListSizeLimitViolation = errors.New("transport: trying to send header list size larger than the limit set by peer") ) // serverConnectionCounter counts the number of connections a server has seen // (equal to the number of http2Servers created). Must be accessed atomically. var serverConnectionCounter uint64 // http2Server implements the ServerTransport interface with HTTP2. type http2Server struct { lastRead int64 // Keep this field 64-bit aligned. Accessed atomically. ctx context.Context done chan struct{} conn net.Conn loopy *loopyWriter readerDone chan struct{} // sync point to enable testing. writerDone chan struct{} // sync point to enable testing. remoteAddr net.Addr localAddr net.Addr maxStreamID uint32 // max stream ID ever seen authInfo credentials.AuthInfo // auth info about the connection inTapHandle tap.ServerInHandle framer *framer // The max number of concurrent streams. maxStreams uint32 // controlBuf delivers all the control related tasks (e.g., window // updates, reset streams, and various settings) to the controller. controlBuf *controlBuffer fc *trInFlow stats stats.Handler // Keepalive and max-age parameters for the server. kp keepalive.ServerParameters // Keepalive enforcement policy. kep keepalive.EnforcementPolicy // The time instance last ping was received. lastPingAt time.Time // Number of times the client has violated keepalive ping policy so far. pingStrikes uint8 // Flag to signify that number of ping strikes should be reset to 0. // This is set whenever data or header frames are sent. // 1 means yes. resetPingStrikes uint32 // Accessed atomically. initialWindowSize int32 bdpEst *bdpEstimator maxSendHeaderListSize *uint32 mu sync.Mutex // guard the following // drainChan is initialized when drain(...) is called the first time. // After which the server writes out the first GoAway(with ID 2^31-1) frame. // Then an independent goroutine will be launched to later send the second GoAway. // During this time we don't want to write another first GoAway(with ID 2^31 -1) frame. // Thus call to drain(...) will be a no-op if drainChan is already initialized since draining is // already underway. drainChan chan struct{} state transportState activeStreams map[uint32]*Stream // idle is the time instant when the connection went idle. // This is either the beginning of the connection or when the number of // RPCs go down to 0. // When the connection is busy, this value is set to 0. idle time.Time // Fields below are for channelz metric collection. channelzID int64 // channelz unique identification number czData *channelzData bufferPool *bufferPool connectionID uint64 } // newHTTP2Server constructs a ServerTransport based on HTTP2. ConnectionError is // returned if something goes wrong. func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err error) { writeBufSize := config.WriteBufferSize readBufSize := config.ReadBufferSize maxHeaderListSize := defaultServerMaxHeaderListSize if config.MaxHeaderListSize != nil { maxHeaderListSize = *config.MaxHeaderListSize } framer := newFramer(conn, writeBufSize, readBufSize, maxHeaderListSize) // Send initial settings as connection preface to client. isettings := []http2.Setting{{ ID: http2.SettingMaxFrameSize, Val: http2MaxFrameLen, }} // TODO(zhaoq): Have a better way to signal "no limit" because 0 is // permitted in the HTTP2 spec. maxStreams := config.MaxStreams if maxStreams == 0 { maxStreams = math.MaxUint32 } else { isettings = append(isettings, http2.Setting{ ID: http2.SettingMaxConcurrentStreams, Val: maxStreams, }) } dynamicWindow := true iwz := int32(initialWindowSize) if config.InitialWindowSize >= defaultWindowSize { iwz = config.InitialWindowSize dynamicWindow = false } icwz := int32(initialWindowSize) if config.InitialConnWindowSize >= defaultWindowSize { icwz = config.InitialConnWindowSize dynamicWindow = false } if iwz != defaultWindowSize { isettings = append(isettings, http2.Setting{ ID: http2.SettingInitialWindowSize, Val: uint32(iwz)}) } if config.MaxHeaderListSize != nil { isettings = append(isettings, http2.Setting{ ID: http2.SettingMaxHeaderListSize, Val: *config.MaxHeaderListSize, }) } if config.HeaderTableSize != nil { isettings = append(isettings, http2.Setting{ ID: http2.SettingHeaderTableSize, Val: *config.HeaderTableSize, }) } if err := framer.fr.WriteSettings(isettings...); err != nil { return nil, connectionErrorf(false, err, "transport: %v", err) } // Adjust the connection flow control window if needed. if delta := uint32(icwz - defaultWindowSize); delta > 0 { if err := framer.fr.WriteWindowUpdate(0, delta); err != nil { return nil, connectionErrorf(false, err, "transport: %v", err) } } kp := config.KeepaliveParams if kp.MaxConnectionIdle == 0 { kp.MaxConnectionIdle = defaultMaxConnectionIdle } if kp.MaxConnectionAge == 0 { kp.MaxConnectionAge = defaultMaxConnectionAge } // Add a jitter to MaxConnectionAge. kp.MaxConnectionAge += getJitter(kp.MaxConnectionAge) if kp.MaxConnectionAgeGrace == 0 { kp.MaxConnectionAgeGrace = defaultMaxConnectionAgeGrace } if kp.Time == 0 { kp.Time = defaultServerKeepaliveTime } if kp.Timeout == 0 { kp.Timeout = defaultServerKeepaliveTimeout } kep := config.KeepalivePolicy if kep.MinTime == 0 { kep.MinTime = defaultKeepalivePolicyMinTime } done := make(chan struct{}) t := &http2Server{ ctx: context.Background(), done: done, conn: conn, remoteAddr: conn.RemoteAddr(), localAddr: conn.LocalAddr(), authInfo: config.AuthInfo, framer: framer, readerDone: make(chan struct{}), writerDone: make(chan struct{}), maxStreams: maxStreams, inTapHandle: config.InTapHandle, fc: &trInFlow{limit: uint32(icwz)}, state: reachable, activeStreams: make(map[uint32]*Stream), stats: config.StatsHandler, kp: kp, idle: time.Now(), kep: kep, initialWindowSize: iwz, czData: new(channelzData), bufferPool: newBufferPool(), } t.controlBuf = newControlBuffer(t.done) if dynamicWindow { t.bdpEst = &bdpEstimator{ bdp: initialWindowSize, updateFlowControl: t.updateFlowControl, } } if t.stats != nil { t.ctx = t.stats.TagConn(t.ctx, &stats.ConnTagInfo{ RemoteAddr: t.remoteAddr, LocalAddr: t.localAddr, }) connBegin := &stats.ConnBegin{} t.stats.HandleConn(t.ctx, connBegin) } if channelz.IsOn() { t.channelzID = channelz.RegisterNormalSocket(t, config.ChannelzParentID, fmt.Sprintf("%s -> %s", t.remoteAddr, t.localAddr)) } t.connectionID = atomic.AddUint64(&serverConnectionCounter, 1) t.framer.writer.Flush() defer func() { if err != nil { t.Close() } }() // Check the validity of client preface. preface := make([]byte, len(clientPreface)) if _, err := io.ReadFull(t.conn, preface); err != nil { return nil, connectionErrorf(false, err, "transport: http2Server.HandleStreams failed to receive the preface from client: %v", err) } if !bytes.Equal(preface, clientPreface) { return nil, connectionErrorf(false, nil, "transport: http2Server.HandleStreams received bogus greeting from client: %q", preface) } frame, err := t.framer.fr.ReadFrame() if err == io.EOF || err == io.ErrUnexpectedEOF { return nil, err } if err != nil { return nil, connectionErrorf(false, err, "transport: http2Server.HandleStreams failed to read initial settings frame: %v", err) } atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) sf, ok := frame.(*http2.SettingsFrame) if !ok { return nil, connectionErrorf(false, nil, "transport: http2Server.HandleStreams saw invalid preface type %T from client", frame) } t.handleSettings(sf) go func() { t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst) t.loopy.ssGoAwayHandler = t.outgoingGoAwayHandler if err := t.loopy.run(); err != nil { errorf("transport: loopyWriter.run returning. Err: %v", err) } t.conn.Close() close(t.writerDone) }() go t.keepalive() return t, nil } // operateHeader takes action on the decoded headers. func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) (fatal bool) { streamID := frame.Header().StreamID state := &decodeState{ serverSide: true, } if err := state.decodeHeader(frame); err != nil { if se, ok := status.FromError(err); ok { t.controlBuf.put(&cleanupStream{ streamID: streamID, rst: true, rstCode: statusCodeConvTab[se.Code()], onWrite: func() {}, }) } return false } buf := newRecvBuffer() s := &Stream{ id: streamID, st: t, buf: buf, fc: &inFlow{limit: uint32(t.initialWindowSize)}, recvCompress: state.data.encoding, method: state.data.method, contentSubtype: state.data.contentSubtype, } if frame.StreamEnded() { // s is just created by the caller. No lock needed. s.state = streamReadDone } if state.data.timeoutSet { s.ctx, s.cancel = context.WithTimeout(t.ctx, state.data.timeout) } else { s.ctx, s.cancel = context.WithCancel(t.ctx) } pr := &peer.Peer{ Addr: t.remoteAddr, } // Attach Auth info if there is any. if t.authInfo != nil { pr.AuthInfo = t.authInfo } s.ctx = peer.NewContext(s.ctx, pr) // Attach the received metadata to the context. if len(state.data.mdata) > 0 { s.ctx = metadata.NewIncomingContext(s.ctx, state.data.mdata) } if state.data.statsTags != nil { s.ctx = stats.SetIncomingTags(s.ctx, state.data.statsTags) } if state.data.statsTrace != nil { s.ctx = stats.SetIncomingTrace(s.ctx, state.data.statsTrace) } if t.inTapHandle != nil { var err error info := &tap.Info{ FullMethodName: state.data.method, } s.ctx, err = t.inTapHandle(s.ctx, info) if err != nil { warningf("transport: http2Server.operateHeaders got an error from InTapHandle: %v", err) t.controlBuf.put(&cleanupStream{ streamID: s.id, rst: true, rstCode: http2.ErrCodeRefusedStream, onWrite: func() {}, }) s.cancel() return false } } t.mu.Lock() if t.state != reachable { t.mu.Unlock() s.cancel() return false } if uint32(len(t.activeStreams)) >= t.maxStreams { t.mu.Unlock() t.controlBuf.put(&cleanupStream{ streamID: streamID, rst: true, rstCode: http2.ErrCodeRefusedStream, onWrite: func() {}, }) s.cancel() return false } if streamID%2 != 1 || streamID <= t.maxStreamID { t.mu.Unlock() // illegal gRPC stream id. errorf("transport: http2Server.HandleStreams received an illegal stream id: %v", streamID) s.cancel() return true } t.maxStreamID = streamID t.activeStreams[streamID] = s if len(t.activeStreams) == 1 { t.idle = time.Time{} } t.mu.Unlock() if channelz.IsOn() { atomic.AddInt64(&t.czData.streamsStarted, 1) atomic.StoreInt64(&t.czData.lastStreamCreatedTime, time.Now().UnixNano()) } s.requestRead = func(n int) { t.adjustWindow(s, uint32(n)) } s.ctx = traceCtx(s.ctx, s.method) if t.stats != nil { s.ctx = t.stats.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method}) inHeader := &stats.InHeader{ FullMethod: s.method, RemoteAddr: t.remoteAddr, LocalAddr: t.localAddr, Compression: s.recvCompress, WireLength: int(frame.Header().Length), Header: metadata.MD(state.data.mdata).Copy(), } t.stats.HandleRPC(s.ctx, inHeader) } s.ctxDone = s.ctx.Done() s.wq = newWriteQuota(defaultWriteQuota, s.ctxDone) s.trReader = &transportReader{ reader: &recvBufferReader{ ctx: s.ctx, ctxDone: s.ctxDone, recv: s.buf, freeBuffer: t.bufferPool.put, }, windowHandler: func(n int) { t.updateWindow(s, uint32(n)) }, } // Register the stream with loopy. t.controlBuf.put(®isterStream{ streamID: s.id, wq: s.wq, }) handle(s) return false } // HandleStreams receives incoming streams using the given handler. This is // typically run in a separate goroutine. // traceCtx attaches trace to ctx and returns the new context. func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context.Context, string) context.Context) { defer close(t.readerDone) for { t.controlBuf.throttle() frame, err := t.framer.fr.ReadFrame() atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) if err != nil { if se, ok := err.(http2.StreamError); ok { warningf("transport: http2Server.HandleStreams encountered http2.StreamError: %v", se) t.mu.Lock() s := t.activeStreams[se.StreamID] t.mu.Unlock() if s != nil { t.closeStream(s, true, se.Code, false) } else { t.controlBuf.put(&cleanupStream{ streamID: se.StreamID, rst: true, rstCode: se.Code, onWrite: func() {}, }) } continue } if err == io.EOF || err == io.ErrUnexpectedEOF { t.Close() return } warningf("transport: http2Server.HandleStreams failed to read frame: %v", err) t.Close() return } switch frame := frame.(type) { case *http2.MetaHeadersFrame: if t.operateHeaders(frame, handle, traceCtx) { t.Close() break } case *http2.DataFrame: t.handleData(frame) case *http2.RSTStreamFrame: t.handleRSTStream(frame) case *http2.SettingsFrame: t.handleSettings(frame) case *http2.PingFrame: t.handlePing(frame) case *http2.WindowUpdateFrame: t.handleWindowUpdate(frame) case *http2.GoAwayFrame: // TODO: Handle GoAway from the client appropriately. default: errorf("transport: http2Server.HandleStreams found unhandled frame type %v.", frame) } } } func (t *http2Server) getStream(f http2.Frame) (*Stream, bool) { t.mu.Lock() defer t.mu.Unlock() if t.activeStreams == nil { // The transport is closing. return nil, false } s, ok := t.activeStreams[f.Header().StreamID] if !ok { // The stream is already done. return nil, false } return s, true } // adjustWindow sends out extra window update over the initial window size // of stream if the application is requesting data larger in size than // the window. func (t *http2Server) adjustWindow(s *Stream, n uint32) { if w := s.fc.maybeAdjust(n); w > 0 { t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w}) } } // updateWindow adjusts the inbound quota for the stream and the transport. // Window updates will deliver to the controller for sending when // the cumulative quota exceeds the corresponding threshold. func (t *http2Server) updateWindow(s *Stream, n uint32) { if w := s.fc.onRead(n); w > 0 { t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w, }) } } // updateFlowControl updates the incoming flow control windows // for the transport and the stream based on the current bdp // estimation. func (t *http2Server) updateFlowControl(n uint32) { t.mu.Lock() for _, s := range t.activeStreams { s.fc.newLimit(n) } t.initialWindowSize = int32(n) t.mu.Unlock() t.controlBuf.put(&outgoingWindowUpdate{ streamID: 0, increment: t.fc.newLimit(n), }) t.controlBuf.put(&outgoingSettings{ ss: []http2.Setting{ { ID: http2.SettingInitialWindowSize, Val: n, }, }, }) } func (t *http2Server) handleData(f *http2.DataFrame) { size := f.Header().Length var sendBDPPing bool if t.bdpEst != nil { sendBDPPing = t.bdpEst.add(size) } // Decouple connection's flow control from application's read. // An update on connection's flow control should not depend on // whether user application has read the data or not. Such a // restriction is already imposed on the stream's flow control, // and therefore the sender will be blocked anyways. // Decoupling the connection flow control will prevent other // active(fast) streams from starving in presence of slow or // inactive streams. if w := t.fc.onData(size); w > 0 { t.controlBuf.put(&outgoingWindowUpdate{ streamID: 0, increment: w, }) } if sendBDPPing { // Avoid excessive ping detection (e.g. in an L7 proxy) // by sending a window update prior to the BDP ping. if w := t.fc.reset(); w > 0 { t.controlBuf.put(&outgoingWindowUpdate{ streamID: 0, increment: w, }) } t.controlBuf.put(bdpPing) } // Select the right stream to dispatch. s, ok := t.getStream(f) if !ok { return } if size > 0 { if err := s.fc.onData(size); err != nil { t.closeStream(s, true, http2.ErrCodeFlowControl, false) return } if f.Header().Flags.Has(http2.FlagDataPadded) { if w := s.fc.onRead(size - uint32(len(f.Data()))); w > 0 { t.controlBuf.put(&outgoingWindowUpdate{s.id, w}) } } // TODO(bradfitz, zhaoq): A copy is required here because there is no // guarantee f.Data() is consumed before the arrival of next frame. // Can this copy be eliminated? if len(f.Data()) > 0 { buffer := t.bufferPool.get() buffer.Reset() buffer.Write(f.Data()) s.write(recvMsg{buffer: buffer}) } } if f.Header().Flags.Has(http2.FlagDataEndStream) { // Received the end of stream from the client. s.compareAndSwapState(streamActive, streamReadDone) s.write(recvMsg{err: io.EOF}) } } func (t *http2Server) handleRSTStream(f *http2.RSTStreamFrame) { // If the stream is not deleted from the transport's active streams map, then do a regular close stream. if s, ok := t.getStream(f); ok { t.closeStream(s, false, 0, false) return } // If the stream is already deleted from the active streams map, then put a cleanupStream item into controlbuf to delete the stream from loopy writer's established streams map. t.controlBuf.put(&cleanupStream{ streamID: f.Header().StreamID, rst: false, rstCode: 0, onWrite: func() {}, }) } func (t *http2Server) handleSettings(f *http2.SettingsFrame) { if f.IsAck() { return } var ss []http2.Setting var updateFuncs []func() f.ForeachSetting(func(s http2.Setting) error { switch s.ID { case http2.SettingMaxHeaderListSize: updateFuncs = append(updateFuncs, func() { t.maxSendHeaderListSize = new(uint32) *t.maxSendHeaderListSize = s.Val }) default: ss = append(ss, s) } return nil }) t.controlBuf.executeAndPut(func(interface{}) bool { for _, f := range updateFuncs { f() } return true }, &incomingSettings{ ss: ss, }) } const ( maxPingStrikes = 2 defaultPingTimeout = 2 * time.Hour ) func (t *http2Server) handlePing(f *http2.PingFrame) { if f.IsAck() { if f.Data == goAwayPing.data && t.drainChan != nil { close(t.drainChan) return } // Maybe it's a BDP ping. if t.bdpEst != nil { t.bdpEst.calculate(f.Data) } return } pingAck := &ping{ack: true} copy(pingAck.data[:], f.Data[:]) t.controlBuf.put(pingAck) now := time.Now() defer func() { t.lastPingAt = now }() // A reset ping strikes means that we don't need to check for policy // violation for this ping and the pingStrikes counter should be set // to 0. if atomic.CompareAndSwapUint32(&t.resetPingStrikes, 1, 0) { t.pingStrikes = 0 return } t.mu.Lock() ns := len(t.activeStreams) t.mu.Unlock() if ns < 1 && !t.kep.PermitWithoutStream { // Keepalive shouldn't be active thus, this new ping should // have come after at least defaultPingTimeout. if t.lastPingAt.Add(defaultPingTimeout).After(now) { t.pingStrikes++ } } else { // Check if keepalive policy is respected. if t.lastPingAt.Add(t.kep.MinTime).After(now) { t.pingStrikes++ } } if t.pingStrikes > maxPingStrikes { // Send goaway and close the connection. errorf("transport: Got too many pings from the client, closing the connection.") t.controlBuf.put(&goAway{code: http2.ErrCodeEnhanceYourCalm, debugData: []byte("too_many_pings"), closeConn: true}) } } func (t *http2Server) handleWindowUpdate(f *http2.WindowUpdateFrame) { t.controlBuf.put(&incomingWindowUpdate{ streamID: f.Header().StreamID, increment: f.Increment, }) } func appendHeaderFieldsFromMD(headerFields []hpack.HeaderField, md metadata.MD) []hpack.HeaderField { for k, vv := range md { if isReservedHeader(k) { // Clients don't tolerate reading restricted headers after some non restricted ones were sent. continue } for _, v := range vv { headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) } } return headerFields } func (t *http2Server) checkForHeaderListSize(it interface{}) bool { if t.maxSendHeaderListSize == nil { return true } hdrFrame := it.(*headerFrame) var sz int64 for _, f := range hdrFrame.hf { if sz += int64(f.Size()); sz > int64(*t.maxSendHeaderListSize) { errorf("header list size to send violates the maximum size (%d bytes) set by client", *t.maxSendHeaderListSize) return false } } return true } // WriteHeader sends the header metadata md back to the client. func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { if s.updateHeaderSent() || s.getState() == streamDone { return ErrIllegalHeaderWrite } s.hdrMu.Lock() if md.Len() > 0 { if s.header.Len() > 0 { s.header = metadata.Join(s.header, md) } else { s.header = md } } if err := t.writeHeaderLocked(s); err != nil { s.hdrMu.Unlock() return err } s.hdrMu.Unlock() return nil } func (t *http2Server) setResetPingStrikes() { atomic.StoreUint32(&t.resetPingStrikes, 1) } func (t *http2Server) writeHeaderLocked(s *Stream) error { // TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields // first and create a slice of that exact size. headerFields := make([]hpack.HeaderField, 0, 2) // at least :status, content-type will be there if none else. headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"}) headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: contentType(s.contentSubtype)}) if s.sendCompress != "" { headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: s.sendCompress}) } headerFields = appendHeaderFieldsFromMD(headerFields, s.header) success, err := t.controlBuf.executeAndPut(t.checkForHeaderListSize, &headerFrame{ streamID: s.id, hf: headerFields, endStream: false, onWrite: t.setResetPingStrikes, }) if !success { if err != nil { return err } t.closeStream(s, true, http2.ErrCodeInternal, false) return ErrHeaderListSizeLimitViolation } if t.stats != nil { // Note: Headers are compressed with hpack after this call returns. // No WireLength field is set here. outHeader := &stats.OutHeader{ Header: s.header.Copy(), Compression: s.sendCompress, } t.stats.HandleRPC(s.Context(), outHeader) } return nil } // WriteStatus sends stream status to the client and terminates the stream. // There is no further I/O operations being able to perform on this stream. // TODO(zhaoq): Now it indicates the end of entire stream. Revisit if early // OK is adopted. func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { if s.getState() == streamDone { return nil } s.hdrMu.Lock() // TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields // first and create a slice of that exact size. headerFields := make([]hpack.HeaderField, 0, 2) // grpc-status and grpc-message will be there if none else. if !s.updateHeaderSent() { // No headers have been sent. if len(s.header) > 0 { // Send a separate header frame. if err := t.writeHeaderLocked(s); err != nil { s.hdrMu.Unlock() return err } } else { // Send a trailer only response. headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"}) headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: contentType(s.contentSubtype)}) } } headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status", Value: strconv.Itoa(int(st.Code()))}) headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-message", Value: encodeGrpcMessage(st.Message())}) if p := st.Proto(); p != nil && len(p.Details) > 0 { stBytes, err := proto.Marshal(p) if err != nil { // TODO: return error instead, when callers are able to handle it. grpclog.Errorf("transport: failed to marshal rpc status: %v, error: %v", p, err) } else { headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status-details-bin", Value: encodeBinHeader(stBytes)}) } } // Attach the trailer metadata. headerFields = appendHeaderFieldsFromMD(headerFields, s.trailer) trailingHeader := &headerFrame{ streamID: s.id, hf: headerFields, endStream: true, onWrite: t.setResetPingStrikes, } s.hdrMu.Unlock() success, err := t.controlBuf.execute(t.checkForHeaderListSize, trailingHeader) if !success { if err != nil { return err } t.closeStream(s, true, http2.ErrCodeInternal, false) return ErrHeaderListSizeLimitViolation } // Send a RST_STREAM after the trailers if the client has not already half-closed. rst := s.getState() == streamActive t.finishStream(s, rst, http2.ErrCodeNo, trailingHeader, true) if t.stats != nil { // Note: The trailer fields are compressed with hpack after this call returns. // No WireLength field is set here. t.stats.HandleRPC(s.Context(), &stats.OutTrailer{ Trailer: s.trailer.Copy(), }) } return nil } // Write converts the data into HTTP2 data frame and sends it out. Non-nil error // is returns if it fails (e.g., framing error, transport error). func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { if !s.isHeaderSent() { // Headers haven't been written yet. if err := t.WriteHeader(s, nil); err != nil { if _, ok := err.(ConnectionError); ok { return err } // TODO(mmukhi, dfawley): Make sure this is the right code to return. return status.Errorf(codes.Internal, "transport: %v", err) } } else { // Writing headers checks for this condition. if s.getState() == streamDone { // TODO(mmukhi, dfawley): Should the server write also return io.EOF? s.cancel() select { case <-t.done: return ErrConnClosing default: } return ContextErr(s.ctx.Err()) } } // Add some data to header frame so that we can equally distribute bytes across frames. emptyLen := http2MaxFrameLen - len(hdr) if emptyLen > len(data) { emptyLen = len(data) } hdr = append(hdr, data[:emptyLen]...) data = data[emptyLen:] df := &dataFrame{ streamID: s.id, h: hdr, d: data, onEachWrite: t.setResetPingStrikes, } if err := s.wq.get(int32(len(hdr) + len(data))); err != nil { select { case <-t.done: return ErrConnClosing default: } return ContextErr(s.ctx.Err()) } return t.controlBuf.put(df) } // keepalive running in a separate goroutine does the following: // 1. Gracefully closes an idle connection after a duration of keepalive.MaxConnectionIdle. // 2. Gracefully closes any connection after a duration of keepalive.MaxConnectionAge. // 3. Forcibly closes a connection after an additive period of keepalive.MaxConnectionAgeGrace over keepalive.MaxConnectionAge. // 4. Makes sure a connection is alive by sending pings with a frequency of keepalive.Time and closes a non-responsive connection // after an additional duration of keepalive.Timeout. func (t *http2Server) keepalive() { p := &ping{} // True iff a ping has been sent, and no data has been received since then. outstandingPing := false // Amount of time remaining before which we should receive an ACK for the // last sent ping. kpTimeoutLeft := time.Duration(0) // Records the last value of t.lastRead before we go block on the timer. // This is required to check for read activity since then. prevNano := time.Now().UnixNano() // Initialize the different timers to their default values. idleTimer := time.NewTimer(t.kp.MaxConnectionIdle) ageTimer := time.NewTimer(t.kp.MaxConnectionAge) kpTimer := time.NewTimer(t.kp.Time) defer func() { // We need to drain the underlying channel in these timers after a call // to Stop(), only if we are interested in resetting them. Clearly we // are not interested in resetting them here. idleTimer.Stop() ageTimer.Stop() kpTimer.Stop() }() for { select { case <-idleTimer.C: t.mu.Lock() idle := t.idle if idle.IsZero() { // The connection is non-idle. t.mu.Unlock() idleTimer.Reset(t.kp.MaxConnectionIdle) continue } val := t.kp.MaxConnectionIdle - time.Since(idle) t.mu.Unlock() if val <= 0 { // The connection has been idle for a duration of keepalive.MaxConnectionIdle or more. // Gracefully close the connection. t.drain(http2.ErrCodeNo, []byte{}) return } idleTimer.Reset(val) case <-ageTimer.C: t.drain(http2.ErrCodeNo, []byte{}) ageTimer.Reset(t.kp.MaxConnectionAgeGrace) select { case <-ageTimer.C: // Close the connection after grace period. infof("transport: closing server transport due to maximum connection age.") t.Close() case <-t.done: } return case <-kpTimer.C: lastRead := atomic.LoadInt64(&t.lastRead) if lastRead > prevNano { // There has been read activity since the last time we were // here. Setup the timer to fire at kp.Time seconds from // lastRead time and continue. outstandingPing = false kpTimer.Reset(time.Duration(lastRead) + t.kp.Time - time.Duration(time.Now().UnixNano())) prevNano = lastRead continue } if outstandingPing && kpTimeoutLeft <= 0 { infof("transport: closing server transport due to idleness.") t.Close() return } if !outstandingPing { if channelz.IsOn() { atomic.AddInt64(&t.czData.kpCount, 1) } t.controlBuf.put(p) kpTimeoutLeft = t.kp.Timeout outstandingPing = true } // The amount of time to sleep here is the minimum of kp.Time and // timeoutLeft. This will ensure that we wait only for kp.Time // before sending out the next ping (for cases where the ping is // acked). sleepDuration := minTime(t.kp.Time, kpTimeoutLeft) kpTimeoutLeft -= sleepDuration kpTimer.Reset(sleepDuration) case <-t.done: return } } } // Close starts shutting down the http2Server transport. // TODO(zhaoq): Now the destruction is not blocked on any pending streams. This // could cause some resource issue. Revisit this later. func (t *http2Server) Close() error { t.mu.Lock() if t.state == closing { t.mu.Unlock() return errors.New("transport: Close() was already called") } t.state = closing streams := t.activeStreams t.activeStreams = nil t.mu.Unlock() t.controlBuf.finish() close(t.done) err := t.conn.Close() if channelz.IsOn() { channelz.RemoveEntry(t.channelzID) } // Cancel all active streams. for _, s := range streams { s.cancel() } if t.stats != nil { connEnd := &stats.ConnEnd{} t.stats.HandleConn(t.ctx, connEnd) } return err } // deleteStream deletes the stream s from transport's active streams. func (t *http2Server) deleteStream(s *Stream, eosReceived bool) { // In case stream sending and receiving are invoked in separate // goroutines (e.g., bi-directional streaming), cancel needs to be // called to interrupt the potential blocking on other goroutines. s.cancel() t.mu.Lock() if _, ok := t.activeStreams[s.id]; ok { delete(t.activeStreams, s.id) if len(t.activeStreams) == 0 { t.idle = time.Now() } } t.mu.Unlock() if channelz.IsOn() { if eosReceived { atomic.AddInt64(&t.czData.streamsSucceeded, 1) } else { atomic.AddInt64(&t.czData.streamsFailed, 1) } } } // finishStream closes the stream and puts the trailing headerFrame into controlbuf. func (t *http2Server) finishStream(s *Stream, rst bool, rstCode http2.ErrCode, hdr *headerFrame, eosReceived bool) { oldState := s.swapState(streamDone) if oldState == streamDone { // If the stream was already done, return. return } hdr.cleanup = &cleanupStream{ streamID: s.id, rst: rst, rstCode: rstCode, onWrite: func() { t.deleteStream(s, eosReceived) }, } t.controlBuf.put(hdr) } // closeStream clears the footprint of a stream when the stream is not needed any more. func (t *http2Server) closeStream(s *Stream, rst bool, rstCode http2.ErrCode, eosReceived bool) { s.swapState(streamDone) t.deleteStream(s, eosReceived) t.controlBuf.put(&cleanupStream{ streamID: s.id, rst: rst, rstCode: rstCode, onWrite: func() {}, }) } func (t *http2Server) RemoteAddr() net.Addr { return t.remoteAddr } func (t *http2Server) Drain() { t.drain(http2.ErrCodeNo, []byte{}) } func (t *http2Server) drain(code http2.ErrCode, debugData []byte) { t.mu.Lock() defer t.mu.Unlock() if t.drainChan != nil { return } t.drainChan = make(chan struct{}) t.controlBuf.put(&goAway{code: code, debugData: debugData, headsUp: true}) } var goAwayPing = &ping{data: [8]byte{1, 6, 1, 8, 0, 3, 3, 9}} // Handles outgoing GoAway and returns true if loopy needs to put itself // in draining mode. func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) { t.mu.Lock() if t.state == closing { // TODO(mmukhi): This seems unnecessary. t.mu.Unlock() // The transport is closing. return false, ErrConnClosing } sid := t.maxStreamID if !g.headsUp { // Stop accepting more streams now. t.state = draining if len(t.activeStreams) == 0 { g.closeConn = true } t.mu.Unlock() if err := t.framer.fr.WriteGoAway(sid, g.code, g.debugData); err != nil { return false, err } if g.closeConn { // Abruptly close the connection following the GoAway (via // loopywriter). But flush out what's inside the buffer first. t.framer.writer.Flush() return false, fmt.Errorf("transport: Connection closing") } return true, nil } t.mu.Unlock() // For a graceful close, send out a GoAway with stream ID of MaxUInt32, // Follow that with a ping and wait for the ack to come back or a timer // to expire. During this time accept new streams since they might have // originated before the GoAway reaches the client. // After getting the ack or timer expiration send out another GoAway this // time with an ID of the max stream server intends to process. if err := t.framer.fr.WriteGoAway(math.MaxUint32, http2.ErrCodeNo, []byte{}); err != nil { return false, err } if err := t.framer.fr.WritePing(false, goAwayPing.data); err != nil { return false, err } go func() { timer := time.NewTimer(time.Minute) defer timer.Stop() select { case <-t.drainChan: case <-timer.C: case <-t.done: return } t.controlBuf.put(&goAway{code: g.code, debugData: g.debugData}) }() return false, nil } func (t *http2Server) ChannelzMetric() *channelz.SocketInternalMetric { s := channelz.SocketInternalMetric{ StreamsStarted: atomic.LoadInt64(&t.czData.streamsStarted), StreamsSucceeded: atomic.LoadInt64(&t.czData.streamsSucceeded), StreamsFailed: atomic.LoadInt64(&t.czData.streamsFailed), MessagesSent: atomic.LoadInt64(&t.czData.msgSent), MessagesReceived: atomic.LoadInt64(&t.czData.msgRecv), KeepAlivesSent: atomic.LoadInt64(&t.czData.kpCount), LastRemoteStreamCreatedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastStreamCreatedTime)), LastMessageSentTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgSentTime)), LastMessageReceivedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgRecvTime)), LocalFlowControlWindow: int64(t.fc.getSize()), SocketOptions: channelz.GetSocketOption(t.conn), LocalAddr: t.localAddr, RemoteAddr: t.remoteAddr, // RemoteName : } if au, ok := t.authInfo.(credentials.ChannelzSecurityInfo); ok { s.Security = au.GetSecurityValue() } s.RemoteFlowControlWindow = t.getOutFlowWindow() return &s } func (t *http2Server) IncrMsgSent() { atomic.AddInt64(&t.czData.msgSent, 1) atomic.StoreInt64(&t.czData.lastMsgSentTime, time.Now().UnixNano()) } func (t *http2Server) IncrMsgRecv() { atomic.AddInt64(&t.czData.msgRecv, 1) atomic.StoreInt64(&t.czData.lastMsgRecvTime, time.Now().UnixNano()) } func (t *http2Server) getOutFlowWindow() int64 { resp := make(chan uint32, 1) timer := time.NewTimer(time.Second) defer timer.Stop() t.controlBuf.put(&outFlowControlSizeRequest{resp}) select { case sz := <-resp: return int64(sz) case <-t.done: return -1 case <-timer.C: return -2 } } func getJitter(v time.Duration) time.Duration { if v == infinity { return 0 } // Generate a jitter between +/- 10% of the value. r := int64(v / 10) j := grpcrand.Int63n(2*r) - r return time.Duration(j) } grpc-go-1.29.1/internal/transport/http_util.go000066400000000000000000000462041365033716300213720ustar00rootroot00000000000000/* * * Copyright 2014 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package transport import ( "bufio" "bytes" "encoding/base64" "fmt" "io" "math" "net" "net/http" "strconv" "strings" "time" "unicode/utf8" "github.com/golang/protobuf/proto" "golang.org/x/net/http2" "golang.org/x/net/http2/hpack" spb "google.golang.org/genproto/googleapis/rpc/status" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" ) const ( // http2MaxFrameLen specifies the max length of a HTTP2 frame. http2MaxFrameLen = 16384 // 16KB frame // http://http2.github.io/http2-spec/#SettingValues http2InitHeaderTableSize = 4096 // baseContentType is the base content-type for gRPC. This is a valid // content-type on it's own, but can also include a content-subtype such as // "proto" as a suffix after "+" or ";". See // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests // for more details. baseContentType = "application/grpc" ) var ( clientPreface = []byte(http2.ClientPreface) http2ErrConvTab = map[http2.ErrCode]codes.Code{ http2.ErrCodeNo: codes.Internal, http2.ErrCodeProtocol: codes.Internal, http2.ErrCodeInternal: codes.Internal, http2.ErrCodeFlowControl: codes.ResourceExhausted, http2.ErrCodeSettingsTimeout: codes.Internal, http2.ErrCodeStreamClosed: codes.Internal, http2.ErrCodeFrameSize: codes.Internal, http2.ErrCodeRefusedStream: codes.Unavailable, http2.ErrCodeCancel: codes.Canceled, http2.ErrCodeCompression: codes.Internal, http2.ErrCodeConnect: codes.Internal, http2.ErrCodeEnhanceYourCalm: codes.ResourceExhausted, http2.ErrCodeInadequateSecurity: codes.PermissionDenied, http2.ErrCodeHTTP11Required: codes.Internal, } statusCodeConvTab = map[codes.Code]http2.ErrCode{ codes.Internal: http2.ErrCodeInternal, codes.Canceled: http2.ErrCodeCancel, codes.Unavailable: http2.ErrCodeRefusedStream, codes.ResourceExhausted: http2.ErrCodeEnhanceYourCalm, codes.PermissionDenied: http2.ErrCodeInadequateSecurity, } // HTTPStatusConvTab is the HTTP status code to gRPC error code conversion table. HTTPStatusConvTab = map[int]codes.Code{ // 400 Bad Request - INTERNAL. http.StatusBadRequest: codes.Internal, // 401 Unauthorized - UNAUTHENTICATED. http.StatusUnauthorized: codes.Unauthenticated, // 403 Forbidden - PERMISSION_DENIED. http.StatusForbidden: codes.PermissionDenied, // 404 Not Found - UNIMPLEMENTED. http.StatusNotFound: codes.Unimplemented, // 429 Too Many Requests - UNAVAILABLE. http.StatusTooManyRequests: codes.Unavailable, // 502 Bad Gateway - UNAVAILABLE. http.StatusBadGateway: codes.Unavailable, // 503 Service Unavailable - UNAVAILABLE. http.StatusServiceUnavailable: codes.Unavailable, // 504 Gateway timeout - UNAVAILABLE. http.StatusGatewayTimeout: codes.Unavailable, } ) type parsedHeaderData struct { encoding string // statusGen caches the stream status received from the trailer the server // sent. Client side only. Do not access directly. After all trailers are // parsed, use the status method to retrieve the status. statusGen *status.Status // rawStatusCode and rawStatusMsg are set from the raw trailer fields and are not // intended for direct access outside of parsing. rawStatusCode *int rawStatusMsg string httpStatus *int // Server side only fields. timeoutSet bool timeout time.Duration method string // key-value metadata map from the peer. mdata map[string][]string statsTags []byte statsTrace []byte contentSubtype string // isGRPC field indicates whether the peer is speaking gRPC (otherwise HTTP). // // We are in gRPC mode (peer speaking gRPC) if: // * We are client side and have already received a HEADER frame that indicates gRPC peer. // * The header contains valid a content-type, i.e. a string starts with "application/grpc" // And we should handle error specific to gRPC. // // Otherwise (i.e. a content-type string starts without "application/grpc", or does not exist), we // are in HTTP fallback mode, and should handle error specific to HTTP. isGRPC bool grpcErr error httpErr error contentTypeErr string } // decodeState configures decoding criteria and records the decoded data. type decodeState struct { // whether decoding on server side or not serverSide bool // Records the states during HPACK decoding. It will be filled with info parsed from HTTP HEADERS // frame once decodeHeader function has been invoked and returned. data parsedHeaderData } // isReservedHeader checks whether hdr belongs to HTTP2 headers // reserved by gRPC protocol. Any other headers are classified as the // user-specified metadata. func isReservedHeader(hdr string) bool { if hdr != "" && hdr[0] == ':' { return true } switch hdr { case "content-type", "user-agent", "grpc-message-type", "grpc-encoding", "grpc-message", "grpc-status", "grpc-timeout", "grpc-status-details-bin", // Intentionally exclude grpc-previous-rpc-attempts and // grpc-retry-pushback-ms, which are "reserved", but their API // intentionally works via metadata. "te": return true default: return false } } // isWhitelistedHeader checks whether hdr should be propagated into metadata // visible to users, even though it is classified as "reserved", above. func isWhitelistedHeader(hdr string) bool { switch hdr { case ":authority", "user-agent": return true default: return false } } // contentSubtype returns the content-subtype for the given content-type. The // given content-type must be a valid content-type that starts with // "application/grpc". A content-subtype will follow "application/grpc" after a // "+" or ";". See // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for // more details. // // If contentType is not a valid content-type for gRPC, the boolean // will be false, otherwise true. If content-type == "application/grpc", // "application/grpc+", or "application/grpc;", the boolean will be true, // but no content-subtype will be returned. // // contentType is assumed to be lowercase already. func contentSubtype(contentType string) (string, bool) { if contentType == baseContentType { return "", true } if !strings.HasPrefix(contentType, baseContentType) { return "", false } // guaranteed since != baseContentType and has baseContentType prefix switch contentType[len(baseContentType)] { case '+', ';': // this will return true for "application/grpc+" or "application/grpc;" // which the previous validContentType function tested to be valid, so we // just say that no content-subtype is specified in this case return contentType[len(baseContentType)+1:], true default: return "", false } } // contentSubtype is assumed to be lowercase func contentType(contentSubtype string) string { if contentSubtype == "" { return baseContentType } return baseContentType + "+" + contentSubtype } func (d *decodeState) status() *status.Status { if d.data.statusGen == nil { // No status-details were provided; generate status using code/msg. d.data.statusGen = status.New(codes.Code(int32(*(d.data.rawStatusCode))), d.data.rawStatusMsg) } return d.data.statusGen } const binHdrSuffix = "-bin" func encodeBinHeader(v []byte) string { return base64.RawStdEncoding.EncodeToString(v) } func decodeBinHeader(v string) ([]byte, error) { if len(v)%4 == 0 { // Input was padded, or padding was not necessary. return base64.StdEncoding.DecodeString(v) } return base64.RawStdEncoding.DecodeString(v) } func encodeMetadataHeader(k, v string) string { if strings.HasSuffix(k, binHdrSuffix) { return encodeBinHeader(([]byte)(v)) } return v } func decodeMetadataHeader(k, v string) (string, error) { if strings.HasSuffix(k, binHdrSuffix) { b, err := decodeBinHeader(v) return string(b), err } return v, nil } func (d *decodeState) decodeHeader(frame *http2.MetaHeadersFrame) error { // frame.Truncated is set to true when framer detects that the current header // list size hits MaxHeaderListSize limit. if frame.Truncated { return status.Error(codes.Internal, "peer header list size exceeded limit") } for _, hf := range frame.Fields { d.processHeaderField(hf) } if d.data.isGRPC { if d.data.grpcErr != nil { return d.data.grpcErr } if d.serverSide { return nil } if d.data.rawStatusCode == nil && d.data.statusGen == nil { // gRPC status doesn't exist. // Set rawStatusCode to be unknown and return nil error. // So that, if the stream has ended this Unknown status // will be propagated to the user. // Otherwise, it will be ignored. In which case, status from // a later trailer, that has StreamEnded flag set, is propagated. code := int(codes.Unknown) d.data.rawStatusCode = &code } return nil } // HTTP fallback mode if d.data.httpErr != nil { return d.data.httpErr } var ( code = codes.Internal // when header does not include HTTP status, return INTERNAL ok bool ) if d.data.httpStatus != nil { code, ok = HTTPStatusConvTab[*(d.data.httpStatus)] if !ok { code = codes.Unknown } } return status.Error(code, d.constructHTTPErrMsg()) } // constructErrMsg constructs error message to be returned in HTTP fallback mode. // Format: HTTP status code and its corresponding message + content-type error message. func (d *decodeState) constructHTTPErrMsg() string { var errMsgs []string if d.data.httpStatus == nil { errMsgs = append(errMsgs, "malformed header: missing HTTP status") } else { errMsgs = append(errMsgs, fmt.Sprintf("%s: HTTP status code %d", http.StatusText(*(d.data.httpStatus)), *d.data.httpStatus)) } if d.data.contentTypeErr == "" { errMsgs = append(errMsgs, "transport: missing content-type field") } else { errMsgs = append(errMsgs, d.data.contentTypeErr) } return strings.Join(errMsgs, "; ") } func (d *decodeState) addMetadata(k, v string) { if d.data.mdata == nil { d.data.mdata = make(map[string][]string) } d.data.mdata[k] = append(d.data.mdata[k], v) } func (d *decodeState) processHeaderField(f hpack.HeaderField) { switch f.Name { case "content-type": contentSubtype, validContentType := contentSubtype(f.Value) if !validContentType { d.data.contentTypeErr = fmt.Sprintf("transport: received the unexpected content-type %q", f.Value) return } d.data.contentSubtype = contentSubtype // TODO: do we want to propagate the whole content-type in the metadata, // or come up with a way to just propagate the content-subtype if it was set? // ie {"content-type": "application/grpc+proto"} or {"content-subtype": "proto"} // in the metadata? d.addMetadata(f.Name, f.Value) d.data.isGRPC = true case "grpc-encoding": d.data.encoding = f.Value case "grpc-status": code, err := strconv.Atoi(f.Value) if err != nil { d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-status: %v", err) return } d.data.rawStatusCode = &code case "grpc-message": d.data.rawStatusMsg = decodeGrpcMessage(f.Value) case "grpc-status-details-bin": v, err := decodeBinHeader(f.Value) if err != nil { d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-status-details-bin: %v", err) return } s := &spb.Status{} if err := proto.Unmarshal(v, s); err != nil { d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-status-details-bin: %v", err) return } d.data.statusGen = status.FromProto(s) case "grpc-timeout": d.data.timeoutSet = true var err error if d.data.timeout, err = decodeTimeout(f.Value); err != nil { d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed time-out: %v", err) } case ":path": d.data.method = f.Value case ":status": code, err := strconv.Atoi(f.Value) if err != nil { d.data.httpErr = status.Errorf(codes.Internal, "transport: malformed http-status: %v", err) return } d.data.httpStatus = &code case "grpc-tags-bin": v, err := decodeBinHeader(f.Value) if err != nil { d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-tags-bin: %v", err) return } d.data.statsTags = v d.addMetadata(f.Name, string(v)) case "grpc-trace-bin": v, err := decodeBinHeader(f.Value) if err != nil { d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-trace-bin: %v", err) return } d.data.statsTrace = v d.addMetadata(f.Name, string(v)) default: if isReservedHeader(f.Name) && !isWhitelistedHeader(f.Name) { break } v, err := decodeMetadataHeader(f.Name, f.Value) if err != nil { errorf("Failed to decode metadata header (%q, %q): %v", f.Name, f.Value, err) return } d.addMetadata(f.Name, v) } } type timeoutUnit uint8 const ( hour timeoutUnit = 'H' minute timeoutUnit = 'M' second timeoutUnit = 'S' millisecond timeoutUnit = 'm' microsecond timeoutUnit = 'u' nanosecond timeoutUnit = 'n' ) func timeoutUnitToDuration(u timeoutUnit) (d time.Duration, ok bool) { switch u { case hour: return time.Hour, true case minute: return time.Minute, true case second: return time.Second, true case millisecond: return time.Millisecond, true case microsecond: return time.Microsecond, true case nanosecond: return time.Nanosecond, true default: } return } const maxTimeoutValue int64 = 100000000 - 1 // div does integer division and round-up the result. Note that this is // equivalent to (d+r-1)/r but has less chance to overflow. func div(d, r time.Duration) int64 { if m := d % r; m > 0 { return int64(d/r + 1) } return int64(d / r) } // TODO(zhaoq): It is the simplistic and not bandwidth efficient. Improve it. func encodeTimeout(t time.Duration) string { if t <= 0 { return "0n" } if d := div(t, time.Nanosecond); d <= maxTimeoutValue { return strconv.FormatInt(d, 10) + "n" } if d := div(t, time.Microsecond); d <= maxTimeoutValue { return strconv.FormatInt(d, 10) + "u" } if d := div(t, time.Millisecond); d <= maxTimeoutValue { return strconv.FormatInt(d, 10) + "m" } if d := div(t, time.Second); d <= maxTimeoutValue { return strconv.FormatInt(d, 10) + "S" } if d := div(t, time.Minute); d <= maxTimeoutValue { return strconv.FormatInt(d, 10) + "M" } // Note that maxTimeoutValue * time.Hour > MaxInt64. return strconv.FormatInt(div(t, time.Hour), 10) + "H" } func decodeTimeout(s string) (time.Duration, error) { size := len(s) if size < 2 { return 0, fmt.Errorf("transport: timeout string is too short: %q", s) } if size > 9 { // Spec allows for 8 digits plus the unit. return 0, fmt.Errorf("transport: timeout string is too long: %q", s) } unit := timeoutUnit(s[size-1]) d, ok := timeoutUnitToDuration(unit) if !ok { return 0, fmt.Errorf("transport: timeout unit is not recognized: %q", s) } t, err := strconv.ParseInt(s[:size-1], 10, 64) if err != nil { return 0, err } const maxHours = math.MaxInt64 / int64(time.Hour) if d == time.Hour && t > maxHours { // This timeout would overflow math.MaxInt64; clamp it. return time.Duration(math.MaxInt64), nil } return d * time.Duration(t), nil } const ( spaceByte = ' ' tildeByte = '~' percentByte = '%' ) // encodeGrpcMessage is used to encode status code in header field // "grpc-message". It does percent encoding and also replaces invalid utf-8 // characters with Unicode replacement character. // // It checks to see if each individual byte in msg is an allowable byte, and // then either percent encoding or passing it through. When percent encoding, // the byte is converted into hexadecimal notation with a '%' prepended. func encodeGrpcMessage(msg string) string { if msg == "" { return "" } lenMsg := len(msg) for i := 0; i < lenMsg; i++ { c := msg[i] if !(c >= spaceByte && c <= tildeByte && c != percentByte) { return encodeGrpcMessageUnchecked(msg) } } return msg } func encodeGrpcMessageUnchecked(msg string) string { var buf bytes.Buffer for len(msg) > 0 { r, size := utf8.DecodeRuneInString(msg) for _, b := range []byte(string(r)) { if size > 1 { // If size > 1, r is not ascii. Always do percent encoding. buf.WriteString(fmt.Sprintf("%%%02X", b)) continue } // The for loop is necessary even if size == 1. r could be // utf8.RuneError. // // fmt.Sprintf("%%%02X", utf8.RuneError) gives "%FFFD". if b >= spaceByte && b <= tildeByte && b != percentByte { buf.WriteByte(b) } else { buf.WriteString(fmt.Sprintf("%%%02X", b)) } } msg = msg[size:] } return buf.String() } // decodeGrpcMessage decodes the msg encoded by encodeGrpcMessage. func decodeGrpcMessage(msg string) string { if msg == "" { return "" } lenMsg := len(msg) for i := 0; i < lenMsg; i++ { if msg[i] == percentByte && i+2 < lenMsg { return decodeGrpcMessageUnchecked(msg) } } return msg } func decodeGrpcMessageUnchecked(msg string) string { var buf bytes.Buffer lenMsg := len(msg) for i := 0; i < lenMsg; i++ { c := msg[i] if c == percentByte && i+2 < lenMsg { parsed, err := strconv.ParseUint(msg[i+1:i+3], 16, 8) if err != nil { buf.WriteByte(c) } else { buf.WriteByte(byte(parsed)) i += 2 } } else { buf.WriteByte(c) } } return buf.String() } type bufWriter struct { buf []byte offset int batchSize int conn net.Conn err error onFlush func() } func newBufWriter(conn net.Conn, batchSize int) *bufWriter { return &bufWriter{ buf: make([]byte, batchSize*2), batchSize: batchSize, conn: conn, } } func (w *bufWriter) Write(b []byte) (n int, err error) { if w.err != nil { return 0, w.err } if w.batchSize == 0 { // Buffer has been disabled. return w.conn.Write(b) } for len(b) > 0 { nn := copy(w.buf[w.offset:], b) b = b[nn:] w.offset += nn n += nn if w.offset >= w.batchSize { err = w.Flush() } } return n, err } func (w *bufWriter) Flush() error { if w.err != nil { return w.err } if w.offset == 0 { return nil } if w.onFlush != nil { w.onFlush() } _, w.err = w.conn.Write(w.buf[:w.offset]) w.offset = 0 return w.err } type framer struct { writer *bufWriter fr *http2.Framer } func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, maxHeaderListSize uint32) *framer { if writeBufferSize < 0 { writeBufferSize = 0 } var r io.Reader = conn if readBufferSize > 0 { r = bufio.NewReaderSize(r, readBufferSize) } w := newBufWriter(conn, writeBufferSize) f := &framer{ writer: w, fr: http2.NewFramer(w, r), } f.fr.SetMaxReadFrameSize(http2MaxFrameLen) // Opt-in to Frame reuse API on framer to reduce garbage. // Frames aren't safe to read from after a subsequent call to ReadFrame. f.fr.SetReuseFrames() f.fr.MaxHeaderListSize = maxHeaderListSize f.fr.ReadMetaHeaders = hpack.NewDecoder(http2InitHeaderTableSize, nil) return f } grpc-go-1.29.1/internal/transport/http_util_test.go000066400000000000000000000150331365033716300224250ustar00rootroot00000000000000/* * * Copyright 2014 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package transport import ( "fmt" "reflect" "testing" "time" ) func (s) TestTimeoutEncode(t *testing.T) { for _, test := range []struct { in string out string }{ {"12345678ns", "12345678n"}, {"123456789ns", "123457u"}, {"12345678us", "12345678u"}, {"123456789us", "123457m"}, {"12345678ms", "12345678m"}, {"123456789ms", "123457S"}, {"12345678s", "12345678S"}, {"123456789s", "2057614M"}, {"12345678m", "12345678M"}, {"123456789m", "2057614H"}, } { d, err := time.ParseDuration(test.in) if err != nil { t.Fatalf("failed to parse duration string %s: %v", test.in, err) } out := encodeTimeout(d) if out != test.out { t.Fatalf("timeoutEncode(%s) = %s, want %s", test.in, out, test.out) } } } func (s) TestTimeoutDecode(t *testing.T) { for _, test := range []struct { // input s string // output d time.Duration err error }{ {"1234S", time.Second * 1234, nil}, {"1234x", 0, fmt.Errorf("transport: timeout unit is not recognized: %q", "1234x")}, {"1", 0, fmt.Errorf("transport: timeout string is too short: %q", "1")}, {"", 0, fmt.Errorf("transport: timeout string is too short: %q", "")}, } { d, err := decodeTimeout(test.s) if d != test.d || fmt.Sprint(err) != fmt.Sprint(test.err) { t.Fatalf("timeoutDecode(%q) = %d, %v, want %d, %v", test.s, int64(d), err, int64(test.d), test.err) } } } func (s) TestContentSubtype(t *testing.T) { tests := []struct { contentType string want string wantValid bool }{ {"application/grpc", "", true}, {"application/grpc+", "", true}, {"application/grpc+blah", "blah", true}, {"application/grpc;", "", true}, {"application/grpc;blah", "blah", true}, {"application/grpcd", "", false}, {"application/grpd", "", false}, {"application/grp", "", false}, } for _, tt := range tests { got, gotValid := contentSubtype(tt.contentType) if got != tt.want || gotValid != tt.wantValid { t.Errorf("contentSubtype(%q) = (%v, %v); want (%v, %v)", tt.contentType, got, gotValid, tt.want, tt.wantValid) } } } func (s) TestEncodeGrpcMessage(t *testing.T) { for _, tt := range []struct { input string expected string }{ {"", ""}, {"Hello", "Hello"}, {"\u0000", "%00"}, {"%", "%25"}, {"系统", "%E7%B3%BB%E7%BB%9F"}, {string([]byte{0xff, 0xfe, 0xfd}), "%EF%BF%BD%EF%BF%BD%EF%BF%BD"}, } { actual := encodeGrpcMessage(tt.input) if tt.expected != actual { t.Errorf("encodeGrpcMessage(%q) = %q, want %q", tt.input, actual, tt.expected) } } // make sure that all the visible ASCII chars except '%' are not percent encoded. for i := ' '; i <= '~' && i != '%'; i++ { output := encodeGrpcMessage(string(i)) if output != string(i) { t.Errorf("encodeGrpcMessage(%v) = %v, want %v", string(i), output, string(i)) } } // make sure that all the invisible ASCII chars and '%' are percent encoded. for i := rune(0); i == '%' || (i >= rune(0) && i < ' ') || (i > '~' && i <= rune(127)); i++ { output := encodeGrpcMessage(string(i)) expected := fmt.Sprintf("%%%02X", i) if output != expected { t.Errorf("encodeGrpcMessage(%v) = %v, want %v", string(i), output, expected) } } } func (s) TestDecodeGrpcMessage(t *testing.T) { for _, tt := range []struct { input string expected string }{ {"", ""}, {"Hello", "Hello"}, {"H%61o", "Hao"}, {"H%6", "H%6"}, {"%G0", "%G0"}, {"%E7%B3%BB%E7%BB%9F", "系统"}, {"%EF%BF%BD", "�"}, } { actual := decodeGrpcMessage(tt.input) if tt.expected != actual { t.Errorf("decodeGrpcMessage(%q) = %q, want %q", tt.input, actual, tt.expected) } } // make sure that all the visible ASCII chars except '%' are not percent decoded. for i := ' '; i <= '~' && i != '%'; i++ { output := decodeGrpcMessage(string(i)) if output != string(i) { t.Errorf("decodeGrpcMessage(%v) = %v, want %v", string(i), output, string(i)) } } // make sure that all the invisible ASCII chars and '%' are percent decoded. for i := rune(0); i == '%' || (i >= rune(0) && i < ' ') || (i > '~' && i <= rune(127)); i++ { output := decodeGrpcMessage(fmt.Sprintf("%%%02X", i)) if output != string(i) { t.Errorf("decodeGrpcMessage(%v) = %v, want %v", fmt.Sprintf("%%%02X", i), output, string(i)) } } } // Decode an encoded string should get the same thing back, except for invalid // utf8 chars. func (s) TestDecodeEncodeGrpcMessage(t *testing.T) { testCases := []struct { orig string want string }{ {"", ""}, {"hello", "hello"}, {"h%6", "h%6"}, {"%G0", "%G0"}, {"系统", "系统"}, {"Hello, 世界", "Hello, 世界"}, {string([]byte{0xff, 0xfe, 0xfd}), "���"}, {string([]byte{0xff}) + "Hello" + string([]byte{0xfe}) + "世界" + string([]byte{0xfd}), "�Hello�世界�"}, } for _, tC := range testCases { got := decodeGrpcMessage(encodeGrpcMessage(tC.orig)) if got != tC.want { t.Errorf("decodeGrpcMessage(encodeGrpcMessage(%q)) = %q, want %q", tC.orig, got, tC.want) } } } const binaryValue = string(128) func (s) TestEncodeMetadataHeader(t *testing.T) { for _, test := range []struct { // input kin string vin string // output vout string }{ {"key", "abc", "abc"}, {"KEY", "abc", "abc"}, {"key-bin", "abc", "YWJj"}, {"key-bin", binaryValue, "woA"}, } { v := encodeMetadataHeader(test.kin, test.vin) if !reflect.DeepEqual(v, test.vout) { t.Fatalf("encodeMetadataHeader(%q, %q) = %q, want %q", test.kin, test.vin, v, test.vout) } } } func (s) TestDecodeMetadataHeader(t *testing.T) { for _, test := range []struct { // input kin string vin string // output vout string err error }{ {"a", "abc", "abc", nil}, {"key-bin", "Zm9vAGJhcg==", "foo\x00bar", nil}, {"key-bin", "Zm9vAGJhcg", "foo\x00bar", nil}, {"key-bin", "woA=", binaryValue, nil}, {"a", "abc,efg", "abc,efg", nil}, } { v, err := decodeMetadataHeader(test.kin, test.vin) if !reflect.DeepEqual(v, test.vout) || !reflect.DeepEqual(err, test.err) { t.Fatalf("decodeMetadataHeader(%q, %q) = %q, %v, want %q, %v", test.kin, test.vin, v, err, test.vout, test.err) } } } grpc-go-1.29.1/internal/transport/keepalive_test.go000066400000000000000000000474471365033716300223740ustar00rootroot00000000000000/* * * Copyright 2019 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // This file contains tests related to the following proposals: // https://github.com/grpc/proposal/blob/master/A8-client-side-keepalive.md // https://github.com/grpc/proposal/blob/master/A9-server-side-conn-mgt.md // https://github.com/grpc/proposal/blob/master/A18-tcp-user-timeout.md package transport import ( "context" "io" "net" "testing" "time" "golang.org/x/net/http2" "google.golang.org/grpc/internal/syscall" "google.golang.org/grpc/keepalive" ) // TestMaxConnectionIdle tests that a server will send GoAway to an idle // client. An idle client is one who doesn't make any RPC calls for a duration // of MaxConnectionIdle time. func (s) TestMaxConnectionIdle(t *testing.T) { serverConfig := &ServerConfig{ KeepaliveParams: keepalive.ServerParameters{ MaxConnectionIdle: 2 * time.Second, }, } server, client, cancel := setUpWithOptions(t, 0, serverConfig, suspended, ConnectOptions{}) defer func() { client.Close() server.stop() cancel() }() stream, err := client.NewStream(context.Background(), &CallHdr{}) if err != nil { t.Fatalf("client.NewStream() failed: %v", err) } client.CloseStream(stream, io.EOF) // Wait for the server's MaxConnectionIdle timeout to kick in, and for it // to send a GoAway. timeout := time.NewTimer(time.Second * 4) select { case <-client.Error(): if !timeout.Stop() { <-timeout.C } if reason := client.GetGoAwayReason(); reason != GoAwayNoReason { t.Fatalf("GoAwayReason is %v, want %v", reason, GoAwayNoReason) } case <-timeout.C: t.Fatalf("MaxConnectionIdle timeout expired, expected a GoAway from the server.") } } // TestMaxConenctionIdleBusyClient tests that a server will not send GoAway to // a busy client. func (s) TestMaxConnectionIdleBusyClient(t *testing.T) { serverConfig := &ServerConfig{ KeepaliveParams: keepalive.ServerParameters{ MaxConnectionIdle: 2 * time.Second, }, } server, client, cancel := setUpWithOptions(t, 0, serverConfig, suspended, ConnectOptions{}) defer func() { client.Close() server.stop() cancel() }() _, err := client.NewStream(context.Background(), &CallHdr{}) if err != nil { t.Fatalf("client.NewStream() failed: %v", err) } // Wait for double the MaxConnectionIdle time to make sure the server does // not send a GoAway, as the client has an open stream. timeout := time.NewTimer(time.Second * 4) select { case <-client.GoAway(): if !timeout.Stop() { <-timeout.C } t.Fatalf("A non-idle client received a GoAway.") case <-timeout.C: } } // TestMaxConnectionAge tests that a server will send GoAway after a duration // of MaxConnectionAge. func (s) TestMaxConnectionAge(t *testing.T) { serverConfig := &ServerConfig{ KeepaliveParams: keepalive.ServerParameters{ MaxConnectionAge: 1 * time.Second, MaxConnectionAgeGrace: 1 * time.Second, }, } server, client, cancel := setUpWithOptions(t, 0, serverConfig, suspended, ConnectOptions{}) defer func() { client.Close() server.stop() cancel() }() _, err := client.NewStream(context.Background(), &CallHdr{}) if err != nil { t.Fatalf("client.NewStream() failed: %v", err) } // Wait for the server's MaxConnectionAge timeout to kick in, and for it // to send a GoAway. timeout := time.NewTimer(4 * time.Second) select { case <-client.Error(): if !timeout.Stop() { <-timeout.C } if reason := client.GetGoAwayReason(); reason != GoAwayNoReason { t.Fatalf("GoAwayReason is %v, want %v", reason, GoAwayNoReason) } case <-timeout.C: t.Fatalf("MaxConnectionAge timeout expired, expected a GoAway from the server.") } } const ( defaultWriteBufSize = 32 * 1024 defaultReadBufSize = 32 * 1024 ) // TestKeepaliveServerClosesUnresponsiveClient tests that a server closes // the connection with a client that doesn't respond to keepalive pings. // // This test creates a regular net.Conn connection to the server and sends the // clientPreface and the initial Settings frame, and then remains unresponsive. func (s) TestKeepaliveServerClosesUnresponsiveClient(t *testing.T) { serverConfig := &ServerConfig{ KeepaliveParams: keepalive.ServerParameters{ Time: 1 * time.Second, Timeout: 1 * time.Second, }, } server, client, cancel := setUpWithOptions(t, 0, serverConfig, suspended, ConnectOptions{}) defer func() { client.Close() server.stop() cancel() }() addr := server.addr() conn, err := net.Dial("tcp", addr) if err != nil { t.Fatalf("net.Dial(tcp, %v) failed: %v", addr, err) } defer conn.Close() if n, err := conn.Write(clientPreface); err != nil || n != len(clientPreface) { t.Fatalf("conn.Write(clientPreface) failed: n=%v, err=%v", n, err) } framer := newFramer(conn, defaultWriteBufSize, defaultReadBufSize, 0) if err := framer.fr.WriteSettings(http2.Setting{}); err != nil { t.Fatal("framer.WriteSettings(http2.Setting{}) failed:", err) } framer.writer.Flush() // We read from the net.Conn till we get an error, which is expected when // the server closes the connection as part of the keepalive logic. errCh := make(chan error) go func() { b := make([]byte, 24) for { if _, err = conn.Read(b); err != nil { errCh <- err return } } }() // Server waits for KeepaliveParams.Time seconds before sending out a ping, // and then waits for KeepaliveParams.Timeout for a ping ack. timeout := time.NewTimer(4 * time.Second) select { case err := <-errCh: if err != io.EOF { t.Fatalf("client.Read(_) = _,%v, want io.EOF", err) } case <-timeout.C: t.Fatalf("keepalive timeout expired, server should have closed the connection.") } } // TestKeepaliveServerWithResponsiveClient tests that a server doesn't close // the connection with a client that responds to keepalive pings. func (s) TestKeepaliveServerWithResponsiveClient(t *testing.T) { serverConfig := &ServerConfig{ KeepaliveParams: keepalive.ServerParameters{ Time: 1 * time.Second, Timeout: 1 * time.Second, }, } server, client, cancel := setUpWithOptions(t, 0, serverConfig, suspended, ConnectOptions{}) defer func() { client.Close() server.stop() cancel() }() // Give keepalive logic some time by sleeping. time.Sleep(4 * time.Second) // Make sure the client transport is healthy. if _, err := client.NewStream(context.Background(), &CallHdr{}); err != nil { t.Fatalf("client.NewStream() failed: %v", err) } } // TestKeepaliveClientClosesUnresponsiveServer creates a server which does not // respond to keepalive pings, and makes sure that the client closes the // transport once the keepalive logic kicks in. Here, we set the // `PermitWithoutStream` parameter to true which ensures that the keepalive // logic is running even without any active streams. func (s) TestKeepaliveClientClosesUnresponsiveServer(t *testing.T) { connCh := make(chan net.Conn, 1) client, cancel := setUpWithNoPingServer(t, ConnectOptions{KeepaliveParams: keepalive.ClientParameters{ Time: 1 * time.Second, Timeout: 1 * time.Second, PermitWithoutStream: true, }}, connCh) defer cancel() defer client.Close() conn, ok := <-connCh if !ok { t.Fatalf("Server didn't return connection object") } defer conn.Close() // Sleep for keepalive to close the connection. time.Sleep(4 * time.Second) // Make sure the client transport is not healthy. if _, err := client.NewStream(context.Background(), &CallHdr{}); err == nil { t.Fatal("client.NewStream() should have failed, but succeeded") } } // TestKeepaliveClientOpenWithUnresponsiveServer creates a server which does // not respond to keepalive pings, and makes sure that the client does not // close the transport. Here, we do not set the `PermitWithoutStream` parameter // to true which ensures that the keepalive logic is turned off without any // active streams, and therefore the transport stays open. func (s) TestKeepaliveClientOpenWithUnresponsiveServer(t *testing.T) { connCh := make(chan net.Conn, 1) client, cancel := setUpWithNoPingServer(t, ConnectOptions{KeepaliveParams: keepalive.ClientParameters{ Time: 1 * time.Second, Timeout: 1 * time.Second, }}, connCh) defer cancel() defer client.Close() conn, ok := <-connCh if !ok { t.Fatalf("Server didn't return connection object") } defer conn.Close() // Give keepalive some time. time.Sleep(4 * time.Second) // Make sure the client transport is healthy. if _, err := client.NewStream(context.Background(), &CallHdr{}); err != nil { t.Fatalf("client.NewStream() failed: %v", err) } } // TestKeepaliveClientClosesWithActiveStreams creates a server which does not // respond to keepalive pings, and makes sure that the client closes the // transport even when there is an active stream. func (s) TestKeepaliveClientClosesWithActiveStreams(t *testing.T) { connCh := make(chan net.Conn, 1) client, cancel := setUpWithNoPingServer(t, ConnectOptions{KeepaliveParams: keepalive.ClientParameters{ Time: 1 * time.Second, Timeout: 1 * time.Second, }}, connCh) defer cancel() defer client.Close() conn, ok := <-connCh if !ok { t.Fatalf("Server didn't return connection object") } defer conn.Close() // Create a stream, but send no data on it. if _, err := client.NewStream(context.Background(), &CallHdr{}); err != nil { t.Fatalf("client.NewStream() failed: %v", err) } // Give keepalive some time. time.Sleep(4 * time.Second) // Make sure the client transport is not healthy. if _, err := client.NewStream(context.Background(), &CallHdr{}); err == nil { t.Fatal("client.NewStream() should have failed, but succeeded") } } // TestKeepaliveClientStaysHealthyWithResponsiveServer creates a server which // responds to keepalive pings, and makes sure than a client transport stays // healthy without any active streams. func (s) TestKeepaliveClientStaysHealthyWithResponsiveServer(t *testing.T) { server, client, cancel := setUpWithOptions(t, 0, &ServerConfig{}, normal, ConnectOptions{ KeepaliveParams: keepalive.ClientParameters{ Time: 1 * time.Second, Timeout: 1 * time.Second, PermitWithoutStream: true, }}) defer func() { client.Close() server.stop() cancel() }() // Give keepalive some time. time.Sleep(4 * time.Second) // Make sure the client transport is healthy. if _, err := client.NewStream(context.Background(), &CallHdr{}); err != nil { t.Fatalf("client.NewStream() failed: %v", err) } } // TestKeepaliveClientFrequency creates a server which expects at most 1 client // ping for every 1.2 seconds, while the client is configured to send a ping // every 1 second. So, this configuration should end up with the client // transport being closed. But we had a bug wherein the client was sending one // ping every [Time+Timeout] instead of every [Time] period, and this test // explicitly makes sure the fix works and the client sends a ping every [Time] // period. func (s) TestKeepaliveClientFrequency(t *testing.T) { serverConfig := &ServerConfig{ KeepalivePolicy: keepalive.EnforcementPolicy{ MinTime: 1200 * time.Millisecond, // 1.2 seconds PermitWithoutStream: true, }, } clientOptions := ConnectOptions{ KeepaliveParams: keepalive.ClientParameters{ Time: 1 * time.Second, Timeout: 2 * time.Second, PermitWithoutStream: true, }, } server, client, cancel := setUpWithOptions(t, 0, serverConfig, normal, clientOptions) defer func() { client.Close() server.stop() cancel() }() timeout := time.NewTimer(6 * time.Second) select { case <-client.Error(): if !timeout.Stop() { <-timeout.C } if reason := client.GetGoAwayReason(); reason != GoAwayTooManyPings { t.Fatalf("GoAwayReason is %v, want %v", reason, GoAwayTooManyPings) } case <-timeout.C: t.Fatalf("client transport still healthy; expected GoAway from the server.") } // Make sure the client transport is not healthy. if _, err := client.NewStream(context.Background(), &CallHdr{}); err == nil { t.Fatal("client.NewStream() should have failed, but succeeded") } } // TestKeepaliveServerEnforcementWithAbusiveClientNoRPC verifies that the // server closes a client transport when it sends too many keepalive pings // (when there are no active streams), based on the configured // EnforcementPolicy. func (s) TestKeepaliveServerEnforcementWithAbusiveClientNoRPC(t *testing.T) { serverConfig := &ServerConfig{ KeepalivePolicy: keepalive.EnforcementPolicy{ MinTime: 2 * time.Second, }, } clientOptions := ConnectOptions{ KeepaliveParams: keepalive.ClientParameters{ Time: 50 * time.Millisecond, Timeout: 1 * time.Second, PermitWithoutStream: true, }, } server, client, cancel := setUpWithOptions(t, 0, serverConfig, normal, clientOptions) defer func() { client.Close() server.stop() cancel() }() timeout := time.NewTimer(4 * time.Second) select { case <-client.Error(): if !timeout.Stop() { <-timeout.C } if reason := client.GetGoAwayReason(); reason != GoAwayTooManyPings { t.Fatalf("GoAwayReason is %v, want %v", reason, GoAwayTooManyPings) } case <-timeout.C: t.Fatalf("client transport still healthy; expected GoAway from the server.") } // Make sure the client transport is not healthy. if _, err := client.NewStream(context.Background(), &CallHdr{}); err == nil { t.Fatal("client.NewStream() should have failed, but succeeded") } } // TestKeepaliveServerEnforcementWithAbusiveClientWithRPC verifies that the // server closes a client transport when it sends too many keepalive pings // (even when there is an active stream), based on the configured // EnforcementPolicy. func (s) TestKeepaliveServerEnforcementWithAbusiveClientWithRPC(t *testing.T) { serverConfig := &ServerConfig{ KeepalivePolicy: keepalive.EnforcementPolicy{ MinTime: 2 * time.Second, }, } clientOptions := ConnectOptions{ KeepaliveParams: keepalive.ClientParameters{ Time: 50 * time.Millisecond, Timeout: 1 * time.Second, }, } server, client, cancel := setUpWithOptions(t, 0, serverConfig, suspended, clientOptions) defer func() { client.Close() server.stop() cancel() }() if _, err := client.NewStream(context.Background(), &CallHdr{}); err != nil { t.Fatalf("client.NewStream() failed: %v", err) } timeout := time.NewTimer(4 * time.Second) select { case <-client.Error(): if !timeout.Stop() { <-timeout.C } if reason := client.GetGoAwayReason(); reason != GoAwayTooManyPings { t.Fatalf("GoAwayReason is %v, want %v", reason, GoAwayTooManyPings) } case <-timeout.C: t.Fatalf("client transport still healthy; expected GoAway from the server.") } // Make sure the client transport is not healthy. if _, err := client.NewStream(context.Background(), &CallHdr{}); err == nil { t.Fatal("client.NewStream() should have failed, but succeeded") } } // TestKeepaliveServerEnforcementWithObeyingClientNoRPC verifies that the // server does not close a client transport (with no active streams) which // sends keepalive pings in accordance to the configured keepalive // EnforcementPolicy. func (s) TestKeepaliveServerEnforcementWithObeyingClientNoRPC(t *testing.T) { serverConfig := &ServerConfig{ KeepalivePolicy: keepalive.EnforcementPolicy{ MinTime: 100 * time.Millisecond, PermitWithoutStream: true, }, } clientOptions := ConnectOptions{ KeepaliveParams: keepalive.ClientParameters{ Time: 101 * time.Millisecond, Timeout: 1 * time.Second, PermitWithoutStream: true, }, } server, client, cancel := setUpWithOptions(t, 0, serverConfig, normal, clientOptions) defer func() { client.Close() server.stop() cancel() }() // Give keepalive enough time. time.Sleep(3 * time.Second) // Make sure the client transport is healthy. if _, err := client.NewStream(context.Background(), &CallHdr{}); err != nil { t.Fatalf("client.NewStream() failed: %v", err) } } // TestKeepaliveServerEnforcementWithObeyingClientWithRPC verifies that the // server does not close a client transport (with active streams) which // sends keepalive pings in accordance to the configured keepalive // EnforcementPolicy. func (s) TestKeepaliveServerEnforcementWithObeyingClientWithRPC(t *testing.T) { serverConfig := &ServerConfig{ KeepalivePolicy: keepalive.EnforcementPolicy{ MinTime: 100 * time.Millisecond, }, } clientOptions := ConnectOptions{ KeepaliveParams: keepalive.ClientParameters{ Time: 101 * time.Millisecond, Timeout: 1 * time.Second, }, } server, client, cancel := setUpWithOptions(t, 0, serverConfig, suspended, clientOptions) defer func() { client.Close() server.stop() cancel() }() if _, err := client.NewStream(context.Background(), &CallHdr{}); err != nil { t.Fatalf("client.NewStream() failed: %v", err) } // Give keepalive enough time. time.Sleep(3 * time.Second) // Make sure the client transport is healthy. if _, err := client.NewStream(context.Background(), &CallHdr{}); err != nil { t.Fatalf("client.NewStream() failed: %v", err) } } // TestKeepaliveServerEnforcementWithDormantKeepaliveOnClient verifies that the // server does not closes a client transport, which has been configured to send // more pings than allowed by the server's EnforcementPolicy. This client // transport does not have any active streams and `PermitWithoutStream` is set // to false. This should ensure that the keepalive functionality on the client // side enters a dormant state. func (s) TestKeepaliveServerEnforcementWithDormantKeepaliveOnClient(t *testing.T) { serverConfig := &ServerConfig{ KeepalivePolicy: keepalive.EnforcementPolicy{ MinTime: 2 * time.Second, }, } clientOptions := ConnectOptions{ KeepaliveParams: keepalive.ClientParameters{ Time: 50 * time.Millisecond, Timeout: 1 * time.Second, }, } server, client, cancel := setUpWithOptions(t, 0, serverConfig, normal, clientOptions) defer func() { client.Close() server.stop() cancel() }() // No active streams on the client. Give keepalive enough time. time.Sleep(5 * time.Second) // Make sure the client transport is healthy. if _, err := client.NewStream(context.Background(), &CallHdr{}); err != nil { t.Fatalf("client.NewStream() failed: %v", err) } } // TestTCPUserTimeout tests that the TCP_USER_TIMEOUT socket option is set to // the keepalive timeout, as detailed in proposal A18. func (s) TestTCPUserTimeout(t *testing.T) { tests := []struct { time time.Duration timeout time.Duration wantTimeout time.Duration }{ { 10 * time.Second, 10 * time.Second, 10 * 1000 * time.Millisecond, }, { 0, 0, 0, }, } for _, tt := range tests { server, client, cancel := setUpWithOptions( t, 0, &ServerConfig{ KeepaliveParams: keepalive.ServerParameters{ Time: tt.timeout, Timeout: tt.timeout, }, }, normal, ConnectOptions{ KeepaliveParams: keepalive.ClientParameters{ Time: tt.time, Timeout: tt.timeout, }, }, ) defer func() { client.Close() server.stop() cancel() }() stream, err := client.NewStream(context.Background(), &CallHdr{}) if err != nil { t.Fatalf("client.NewStream() failed: %v", err) } client.CloseStream(stream, io.EOF) opt, err := syscall.GetTCPUserTimeout(client.conn) if err != nil { t.Fatalf("syscall.GetTCPUserTimeout() failed: %v", err) } if opt < 0 { t.Skipf("skipping test on unsupported environment") } if gotTimeout := time.Duration(opt) * time.Millisecond; gotTimeout != tt.wantTimeout { t.Fatalf("syscall.GetTCPUserTimeout() = %d, want %d", gotTimeout, tt.wantTimeout) } } } grpc-go-1.29.1/internal/transport/log.go000066400000000000000000000022021365033716300201250ustar00rootroot00000000000000/* * * Copyright 2017 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // This file contains wrappers for grpclog functions. // The transport package only logs to verbose level 2 by default. package transport import "google.golang.org/grpc/grpclog" const logLevel = 2 func infof(format string, args ...interface{}) { if grpclog.V(logLevel) { grpclog.Infof(format, args...) } } func warningf(format string, args ...interface{}) { if grpclog.V(logLevel) { grpclog.Warningf(format, args...) } } func errorf(format string, args ...interface{}) { if grpclog.V(logLevel) { grpclog.Errorf(format, args...) } } grpc-go-1.29.1/internal/transport/transport.go000066400000000000000000000645321365033716300214160ustar00rootroot00000000000000/* * * Copyright 2014 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Package transport defines and implements message oriented communication // channel to complete various transactions (e.g., an RPC). It is meant for // grpc-internal usage and is not intended to be imported directly by users. package transport import ( "bytes" "context" "errors" "fmt" "io" "net" "sync" "sync/atomic" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" "google.golang.org/grpc/stats" "google.golang.org/grpc/status" "google.golang.org/grpc/tap" ) type bufferPool struct { pool sync.Pool } func newBufferPool() *bufferPool { return &bufferPool{ pool: sync.Pool{ New: func() interface{} { return new(bytes.Buffer) }, }, } } func (p *bufferPool) get() *bytes.Buffer { return p.pool.Get().(*bytes.Buffer) } func (p *bufferPool) put(b *bytes.Buffer) { p.pool.Put(b) } // recvMsg represents the received msg from the transport. All transport // protocol specific info has been removed. type recvMsg struct { buffer *bytes.Buffer // nil: received some data // io.EOF: stream is completed. data is nil. // other non-nil error: transport failure. data is nil. err error } // recvBuffer is an unbounded channel of recvMsg structs. // // Note: recvBuffer differs from buffer.Unbounded only in the fact that it // holds a channel of recvMsg structs instead of objects implementing "item" // interface. recvBuffer is written to much more often and using strict recvMsg // structs helps avoid allocation in "recvBuffer.put" type recvBuffer struct { c chan recvMsg mu sync.Mutex backlog []recvMsg err error } func newRecvBuffer() *recvBuffer { b := &recvBuffer{ c: make(chan recvMsg, 1), } return b } func (b *recvBuffer) put(r recvMsg) { b.mu.Lock() if b.err != nil { b.mu.Unlock() // An error had occurred earlier, don't accept more // data or errors. return } b.err = r.err if len(b.backlog) == 0 { select { case b.c <- r: b.mu.Unlock() return default: } } b.backlog = append(b.backlog, r) b.mu.Unlock() } func (b *recvBuffer) load() { b.mu.Lock() if len(b.backlog) > 0 { select { case b.c <- b.backlog[0]: b.backlog[0] = recvMsg{} b.backlog = b.backlog[1:] default: } } b.mu.Unlock() } // get returns the channel that receives a recvMsg in the buffer. // // Upon receipt of a recvMsg, the caller should call load to send another // recvMsg onto the channel if there is any. func (b *recvBuffer) get() <-chan recvMsg { return b.c } // recvBufferReader implements io.Reader interface to read the data from // recvBuffer. type recvBufferReader struct { closeStream func(error) // Closes the client transport stream with the given error and nil trailer metadata. ctx context.Context ctxDone <-chan struct{} // cache of ctx.Done() (for performance). recv *recvBuffer last *bytes.Buffer // Stores the remaining data in the previous calls. err error freeBuffer func(*bytes.Buffer) } // Read reads the next len(p) bytes from last. If last is drained, it tries to // read additional data from recv. It blocks if there no additional data available // in recv. If Read returns any non-nil error, it will continue to return that error. func (r *recvBufferReader) Read(p []byte) (n int, err error) { if r.err != nil { return 0, r.err } if r.last != nil { // Read remaining data left in last call. copied, _ := r.last.Read(p) if r.last.Len() == 0 { r.freeBuffer(r.last) r.last = nil } return copied, nil } if r.closeStream != nil { n, r.err = r.readClient(p) } else { n, r.err = r.read(p) } return n, r.err } func (r *recvBufferReader) read(p []byte) (n int, err error) { select { case <-r.ctxDone: return 0, ContextErr(r.ctx.Err()) case m := <-r.recv.get(): return r.readAdditional(m, p) } } func (r *recvBufferReader) readClient(p []byte) (n int, err error) { // If the context is canceled, then closes the stream with nil metadata. // closeStream writes its error parameter to r.recv as a recvMsg. // r.readAdditional acts on that message and returns the necessary error. select { case <-r.ctxDone: // Note that this adds the ctx error to the end of recv buffer, and // reads from the head. This will delay the error until recv buffer is // empty, thus will delay ctx cancellation in Recv(). // // It's done this way to fix a race between ctx cancel and trailer. The // race was, stream.Recv() may return ctx error if ctxDone wins the // race, but stream.Trailer() may return a non-nil md because the stream // was not marked as done when trailer is received. This closeStream // call will mark stream as done, thus fix the race. // // TODO: delaying ctx error seems like a unnecessary side effect. What // we really want is to mark the stream as done, and return ctx error // faster. r.closeStream(ContextErr(r.ctx.Err())) m := <-r.recv.get() return r.readAdditional(m, p) case m := <-r.recv.get(): return r.readAdditional(m, p) } } func (r *recvBufferReader) readAdditional(m recvMsg, p []byte) (n int, err error) { r.recv.load() if m.err != nil { return 0, m.err } copied, _ := m.buffer.Read(p) if m.buffer.Len() == 0 { r.freeBuffer(m.buffer) r.last = nil } else { r.last = m.buffer } return copied, nil } type streamState uint32 const ( streamActive streamState = iota streamWriteDone // EndStream sent streamReadDone // EndStream received streamDone // the entire stream is finished. ) // Stream represents an RPC in the transport layer. type Stream struct { id uint32 st ServerTransport // nil for client side Stream ct *http2Client // nil for server side Stream ctx context.Context // the associated context of the stream cancel context.CancelFunc // always nil for client side Stream done chan struct{} // closed at the end of stream to unblock writers. On the client side. ctxDone <-chan struct{} // same as done chan but for server side. Cache of ctx.Done() (for performance) method string // the associated RPC method of the stream recvCompress string sendCompress string buf *recvBuffer trReader io.Reader fc *inFlow wq *writeQuota // Callback to state application's intentions to read data. This // is used to adjust flow control, if needed. requestRead func(int) headerChan chan struct{} // closed to indicate the end of header metadata. headerChanClosed uint32 // set when headerChan is closed. Used to avoid closing headerChan multiple times. // headerValid indicates whether a valid header was received. Only // meaningful after headerChan is closed (always call waitOnHeader() before // reading its value). Not valid on server side. headerValid bool // hdrMu protects header and trailer metadata on the server-side. hdrMu sync.Mutex // On client side, header keeps the received header metadata. // // On server side, header keeps the header set by SetHeader(). The complete // header will merged into this after t.WriteHeader() is called. header metadata.MD trailer metadata.MD // the key-value map of trailer metadata. noHeaders bool // set if the client never received headers (set only after the stream is done). // On the server-side, headerSent is atomically set to 1 when the headers are sent out. headerSent uint32 state streamState // On client-side it is the status error received from the server. // On server-side it is unused. status *status.Status bytesReceived uint32 // indicates whether any bytes have been received on this stream unprocessed uint32 // set if the server sends a refused stream or GOAWAY including this stream // contentSubtype is the content-subtype for requests. // this must be lowercase or the behavior is undefined. contentSubtype string } // isHeaderSent is only valid on the server-side. func (s *Stream) isHeaderSent() bool { return atomic.LoadUint32(&s.headerSent) == 1 } // updateHeaderSent updates headerSent and returns true // if it was alreay set. It is valid only on server-side. func (s *Stream) updateHeaderSent() bool { return atomic.SwapUint32(&s.headerSent, 1) == 1 } func (s *Stream) swapState(st streamState) streamState { return streamState(atomic.SwapUint32((*uint32)(&s.state), uint32(st))) } func (s *Stream) compareAndSwapState(oldState, newState streamState) bool { return atomic.CompareAndSwapUint32((*uint32)(&s.state), uint32(oldState), uint32(newState)) } func (s *Stream) getState() streamState { return streamState(atomic.LoadUint32((*uint32)(&s.state))) } func (s *Stream) waitOnHeader() { if s.headerChan == nil { // On the server headerChan is always nil since a stream originates // only after having received headers. return } select { case <-s.ctx.Done(): // Close the stream to prevent headers/trailers from changing after // this function returns. s.ct.CloseStream(s, ContextErr(s.ctx.Err())) // headerChan could possibly not be closed yet if closeStream raced // with operateHeaders; wait until it is closed explicitly here. <-s.headerChan case <-s.headerChan: } } // RecvCompress returns the compression algorithm applied to the inbound // message. It is empty string if there is no compression applied. func (s *Stream) RecvCompress() string { s.waitOnHeader() return s.recvCompress } // SetSendCompress sets the compression algorithm to the stream. func (s *Stream) SetSendCompress(str string) { s.sendCompress = str } // Done returns a channel which is closed when it receives the final status // from the server. func (s *Stream) Done() <-chan struct{} { return s.done } // Header returns the header metadata of the stream. // // On client side, it acquires the key-value pairs of header metadata once it is // available. It blocks until i) the metadata is ready or ii) there is no header // metadata or iii) the stream is canceled/expired. // // On server side, it returns the out header after t.WriteHeader is called. It // does not block and must not be called until after WriteHeader. func (s *Stream) Header() (metadata.MD, error) { if s.headerChan == nil { // On server side, return the header in stream. It will be the out // header after t.WriteHeader is called. return s.header.Copy(), nil } s.waitOnHeader() if !s.headerValid { return nil, s.status.Err() } return s.header.Copy(), nil } // TrailersOnly blocks until a header or trailers-only frame is received and // then returns true if the stream was trailers-only. If the stream ends // before headers are received, returns true, nil. Client-side only. func (s *Stream) TrailersOnly() bool { s.waitOnHeader() return s.noHeaders } // Trailer returns the cached trailer metedata. Note that if it is not called // after the entire stream is done, it could return an empty MD. Client // side only. // It can be safely read only after stream has ended that is either read // or write have returned io.EOF. func (s *Stream) Trailer() metadata.MD { c := s.trailer.Copy() return c } // ContentSubtype returns the content-subtype for a request. For example, a // content-subtype of "proto" will result in a content-type of // "application/grpc+proto". This will always be lowercase. See // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for // more details. func (s *Stream) ContentSubtype() string { return s.contentSubtype } // Context returns the context of the stream. func (s *Stream) Context() context.Context { return s.ctx } // Method returns the method for the stream. func (s *Stream) Method() string { return s.method } // Status returns the status received from the server. // Status can be read safely only after the stream has ended, // that is, after Done() is closed. func (s *Stream) Status() *status.Status { return s.status } // SetHeader sets the header metadata. This can be called multiple times. // Server side only. // This should not be called in parallel to other data writes. func (s *Stream) SetHeader(md metadata.MD) error { if md.Len() == 0 { return nil } if s.isHeaderSent() || s.getState() == streamDone { return ErrIllegalHeaderWrite } s.hdrMu.Lock() s.header = metadata.Join(s.header, md) s.hdrMu.Unlock() return nil } // SendHeader sends the given header metadata. The given metadata is // combined with any metadata set by previous calls to SetHeader and // then written to the transport stream. func (s *Stream) SendHeader(md metadata.MD) error { return s.st.WriteHeader(s, md) } // SetTrailer sets the trailer metadata which will be sent with the RPC status // by the server. This can be called multiple times. Server side only. // This should not be called parallel to other data writes. func (s *Stream) SetTrailer(md metadata.MD) error { if md.Len() == 0 { return nil } if s.getState() == streamDone { return ErrIllegalHeaderWrite } s.hdrMu.Lock() s.trailer = metadata.Join(s.trailer, md) s.hdrMu.Unlock() return nil } func (s *Stream) write(m recvMsg) { s.buf.put(m) } // Read reads all p bytes from the wire for this stream. func (s *Stream) Read(p []byte) (n int, err error) { // Don't request a read if there was an error earlier if er := s.trReader.(*transportReader).er; er != nil { return 0, er } s.requestRead(len(p)) return io.ReadFull(s.trReader, p) } // tranportReader reads all the data available for this Stream from the transport and // passes them into the decoder, which converts them into a gRPC message stream. // The error is io.EOF when the stream is done or another non-nil error if // the stream broke. type transportReader struct { reader io.Reader // The handler to control the window update procedure for both this // particular stream and the associated transport. windowHandler func(int) er error } func (t *transportReader) Read(p []byte) (n int, err error) { n, err = t.reader.Read(p) if err != nil { t.er = err return } t.windowHandler(n) return } // BytesReceived indicates whether any bytes have been received on this stream. func (s *Stream) BytesReceived() bool { return atomic.LoadUint32(&s.bytesReceived) == 1 } // Unprocessed indicates whether the server did not process this stream -- // i.e. it sent a refused stream or GOAWAY including this stream ID. func (s *Stream) Unprocessed() bool { return atomic.LoadUint32(&s.unprocessed) == 1 } // GoString is implemented by Stream so context.String() won't // race when printing %#v. func (s *Stream) GoString() string { return fmt.Sprintf("", s, s.method) } // state of transport type transportState int const ( reachable transportState = iota closing draining ) // ServerConfig consists of all the configurations to establish a server transport. type ServerConfig struct { MaxStreams uint32 AuthInfo credentials.AuthInfo InTapHandle tap.ServerInHandle StatsHandler stats.Handler KeepaliveParams keepalive.ServerParameters KeepalivePolicy keepalive.EnforcementPolicy InitialWindowSize int32 InitialConnWindowSize int32 WriteBufferSize int ReadBufferSize int ChannelzParentID int64 MaxHeaderListSize *uint32 HeaderTableSize *uint32 } // NewServerTransport creates a ServerTransport with conn or non-nil error // if it fails. func NewServerTransport(protocol string, conn net.Conn, config *ServerConfig) (ServerTransport, error) { return newHTTP2Server(conn, config) } // ConnectOptions covers all relevant options for communicating with the server. type ConnectOptions struct { // UserAgent is the application user agent. UserAgent string // Dialer specifies how to dial a network address. Dialer func(context.Context, string) (net.Conn, error) // FailOnNonTempDialError specifies if gRPC fails on non-temporary dial errors. FailOnNonTempDialError bool // PerRPCCredentials stores the PerRPCCredentials required to issue RPCs. PerRPCCredentials []credentials.PerRPCCredentials // TransportCredentials stores the Authenticator required to setup a client // connection. Only one of TransportCredentials and CredsBundle is non-nil. TransportCredentials credentials.TransportCredentials // CredsBundle is the credentials bundle to be used. Only one of // TransportCredentials and CredsBundle is non-nil. CredsBundle credentials.Bundle // KeepaliveParams stores the keepalive parameters. KeepaliveParams keepalive.ClientParameters // StatsHandler stores the handler for stats. StatsHandler stats.Handler // InitialWindowSize sets the initial window size for a stream. InitialWindowSize int32 // InitialConnWindowSize sets the initial window size for a connection. InitialConnWindowSize int32 // WriteBufferSize sets the size of write buffer which in turn determines how much data can be batched before it's written on the wire. WriteBufferSize int // ReadBufferSize sets the size of read buffer, which in turn determines how much data can be read at most for one read syscall. ReadBufferSize int // ChannelzParentID sets the addrConn id which initiate the creation of this client transport. ChannelzParentID int64 // MaxHeaderListSize sets the max (uncompressed) size of header list that is prepared to be received. MaxHeaderListSize *uint32 } // TargetInfo contains the information of the target such as network address and metadata. type TargetInfo struct { Addr string Metadata interface{} Authority string } // NewClientTransport establishes the transport with the required ConnectOptions // and returns it to the caller. func NewClientTransport(connectCtx, ctx context.Context, target TargetInfo, opts ConnectOptions, onPrefaceReceipt func(), onGoAway func(GoAwayReason), onClose func()) (ClientTransport, error) { return newHTTP2Client(connectCtx, ctx, target, opts, onPrefaceReceipt, onGoAway, onClose) } // Options provides additional hints and information for message // transmission. type Options struct { // Last indicates whether this write is the last piece for // this stream. Last bool } // CallHdr carries the information of a particular RPC. type CallHdr struct { // Host specifies the peer's host. Host string // Method specifies the operation to perform. Method string // SendCompress specifies the compression algorithm applied on // outbound message. SendCompress string // Creds specifies credentials.PerRPCCredentials for a call. Creds credentials.PerRPCCredentials // ContentSubtype specifies the content-subtype for a request. For example, a // content-subtype of "proto" will result in a content-type of // "application/grpc+proto". The value of ContentSubtype must be all // lowercase, otherwise the behavior is undefined. See // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests // for more details. ContentSubtype string PreviousAttempts int // value of grpc-previous-rpc-attempts header to set } // ClientTransport is the common interface for all gRPC client-side transport // implementations. type ClientTransport interface { // Close tears down this transport. Once it returns, the transport // should not be accessed any more. The caller must make sure this // is called only once. Close() error // GracefulClose starts to tear down the transport: the transport will stop // accepting new RPCs and NewStream will return error. Once all streams are // finished, the transport will close. // // It does not block. GracefulClose() // Write sends the data for the given stream. A nil stream indicates // the write is to be performed on the transport as a whole. Write(s *Stream, hdr []byte, data []byte, opts *Options) error // NewStream creates a Stream for an RPC. NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) // CloseStream clears the footprint of a stream when the stream is // not needed any more. The err indicates the error incurred when // CloseStream is called. Must be called when a stream is finished // unless the associated transport is closing. CloseStream(stream *Stream, err error) // Error returns a channel that is closed when some I/O error // happens. Typically the caller should have a goroutine to monitor // this in order to take action (e.g., close the current transport // and create a new one) in error case. It should not return nil // once the transport is initiated. Error() <-chan struct{} // GoAway returns a channel that is closed when ClientTransport // receives the draining signal from the server (e.g., GOAWAY frame in // HTTP/2). GoAway() <-chan struct{} // GetGoAwayReason returns the reason why GoAway frame was received. GetGoAwayReason() GoAwayReason // RemoteAddr returns the remote network address. RemoteAddr() net.Addr // IncrMsgSent increments the number of message sent through this transport. IncrMsgSent() // IncrMsgRecv increments the number of message received through this transport. IncrMsgRecv() } // ServerTransport is the common interface for all gRPC server-side transport // implementations. // // Methods may be called concurrently from multiple goroutines, but // Write methods for a given Stream will be called serially. type ServerTransport interface { // HandleStreams receives incoming streams using the given handler. HandleStreams(func(*Stream), func(context.Context, string) context.Context) // WriteHeader sends the header metadata for the given stream. // WriteHeader may not be called on all streams. WriteHeader(s *Stream, md metadata.MD) error // Write sends the data for the given stream. // Write may not be called on all streams. Write(s *Stream, hdr []byte, data []byte, opts *Options) error // WriteStatus sends the status of a stream to the client. WriteStatus is // the final call made on a stream and always occurs. WriteStatus(s *Stream, st *status.Status) error // Close tears down the transport. Once it is called, the transport // should not be accessed any more. All the pending streams and their // handlers will be terminated asynchronously. Close() error // RemoteAddr returns the remote network address. RemoteAddr() net.Addr // Drain notifies the client this ServerTransport stops accepting new RPCs. Drain() // IncrMsgSent increments the number of message sent through this transport. IncrMsgSent() // IncrMsgRecv increments the number of message received through this transport. IncrMsgRecv() } // connectionErrorf creates an ConnectionError with the specified error description. func connectionErrorf(temp bool, e error, format string, a ...interface{}) ConnectionError { return ConnectionError{ Desc: fmt.Sprintf(format, a...), temp: temp, err: e, } } // ConnectionError is an error that results in the termination of the // entire connection and the retry of all the active streams. type ConnectionError struct { Desc string temp bool err error } func (e ConnectionError) Error() string { return fmt.Sprintf("connection error: desc = %q", e.Desc) } // Temporary indicates if this connection error is temporary or fatal. func (e ConnectionError) Temporary() bool { return e.temp } // Origin returns the original error of this connection error. func (e ConnectionError) Origin() error { // Never return nil error here. // If the original error is nil, return itself. if e.err == nil { return e } return e.err } var ( // ErrConnClosing indicates that the transport is closing. ErrConnClosing = connectionErrorf(true, nil, "transport is closing") // errStreamDrain indicates that the stream is rejected because the // connection is draining. This could be caused by goaway or balancer // removing the address. errStreamDrain = status.Error(codes.Unavailable, "the connection is draining") // errStreamDone is returned from write at the client side to indiacte application // layer of an error. errStreamDone = errors.New("the stream is done") // StatusGoAway indicates that the server sent a GOAWAY that included this // stream's ID in unprocessed RPCs. statusGoAway = status.New(codes.Unavailable, "the stream is rejected because server is draining the connection") ) // GoAwayReason contains the reason for the GoAway frame received. type GoAwayReason uint8 const ( // GoAwayInvalid indicates that no GoAway frame is received. GoAwayInvalid GoAwayReason = 0 // GoAwayNoReason is the default value when GoAway frame is received. GoAwayNoReason GoAwayReason = 1 // GoAwayTooManyPings indicates that a GoAway frame with // ErrCodeEnhanceYourCalm was received and that the debug data said // "too_many_pings". GoAwayTooManyPings GoAwayReason = 2 ) // channelzData is used to store channelz related data for http2Client and http2Server. // These fields cannot be embedded in the original structs (e.g. http2Client), since to do atomic // operation on int64 variable on 32-bit machine, user is responsible to enforce memory alignment. // Here, by grouping those int64 fields inside a struct, we are enforcing the alignment. type channelzData struct { kpCount int64 // The number of streams that have started, including already finished ones. streamsStarted int64 // Client side: The number of streams that have ended successfully by receiving // EoS bit set frame from server. // Server side: The number of streams that have ended successfully by sending // frame with EoS bit set. streamsSucceeded int64 streamsFailed int64 // lastStreamCreatedTime stores the timestamp that the last stream gets created. It is of int64 type // instead of time.Time since it's more costly to atomically update time.Time variable than int64 // variable. The same goes for lastMsgSentTime and lastMsgRecvTime. lastStreamCreatedTime int64 msgSent int64 msgRecv int64 lastMsgSentTime int64 lastMsgRecvTime int64 } // ContextErr converts the error from context package into a status error. func ContextErr(err error) error { switch err { case context.DeadlineExceeded: return status.Error(codes.DeadlineExceeded, err.Error()) case context.Canceled: return status.Error(codes.Canceled, err.Error()) } return status.Errorf(codes.Internal, "Unexpected error from context packet: %v", err) } grpc-go-1.29.1/internal/transport/transport_test.go000066400000000000000000001461531365033716300224550ustar00rootroot00000000000000/* * * Copyright 2014 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package transport import ( "bytes" "context" "encoding/binary" "errors" "fmt" "io" "math" "net" "runtime" "strconv" "strings" "sync" "testing" "time" "golang.org/x/net/http2" "golang.org/x/net/http2/hpack" "google.golang.org/grpc/codes" "google.golang.org/grpc/internal/grpctest" "google.golang.org/grpc/internal/leakcheck" "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/status" ) type s struct { grpctest.Tester } func Test(t *testing.T) { grpctest.RunSubTests(t, s{}) } type server struct { lis net.Listener port string startedErr chan error // error (or nil) with server start value mu sync.Mutex conns map[ServerTransport]bool h *testStreamHandler ready chan struct{} } var ( expectedRequest = []byte("ping") expectedResponse = []byte("pong") expectedRequestLarge = make([]byte, initialWindowSize*2) expectedResponseLarge = make([]byte, initialWindowSize*2) expectedInvalidHeaderField = "invalid/content-type" ) func init() { expectedRequestLarge[0] = 'g' expectedRequestLarge[len(expectedRequestLarge)-1] = 'r' expectedResponseLarge[0] = 'p' expectedResponseLarge[len(expectedResponseLarge)-1] = 'c' } type testStreamHandler struct { t *http2Server notify chan struct{} getNotified chan struct{} } type hType int const ( normal hType = iota suspended notifyCall misbehaved encodingRequiredStatus invalidHeaderField delayRead pingpong ) func (h *testStreamHandler) handleStreamAndNotify(s *Stream) { if h.notify == nil { return } go func() { select { case <-h.notify: default: close(h.notify) } }() } func (h *testStreamHandler) handleStream(t *testing.T, s *Stream) { req := expectedRequest resp := expectedResponse if s.Method() == "foo.Large" { req = expectedRequestLarge resp = expectedResponseLarge } p := make([]byte, len(req)) _, err := s.Read(p) if err != nil { return } if !bytes.Equal(p, req) { t.Errorf("handleStream got %v, want %v", p, req) h.t.WriteStatus(s, status.New(codes.Internal, "panic")) return } // send a response back to the client. h.t.Write(s, nil, resp, &Options{}) // send the trailer to end the stream. h.t.WriteStatus(s, status.New(codes.OK, "")) } func (h *testStreamHandler) handleStreamPingPong(t *testing.T, s *Stream) { header := make([]byte, 5) for { if _, err := s.Read(header); err != nil { if err == io.EOF { h.t.WriteStatus(s, status.New(codes.OK, "")) return } t.Errorf("Error on server while reading data header: %v", err) h.t.WriteStatus(s, status.New(codes.Internal, "panic")) return } sz := binary.BigEndian.Uint32(header[1:]) msg := make([]byte, int(sz)) if _, err := s.Read(msg); err != nil { t.Errorf("Error on server while reading message: %v", err) h.t.WriteStatus(s, status.New(codes.Internal, "panic")) return } buf := make([]byte, sz+5) buf[0] = byte(0) binary.BigEndian.PutUint32(buf[1:], uint32(sz)) copy(buf[5:], msg) h.t.Write(s, nil, buf, &Options{}) } } func (h *testStreamHandler) handleStreamMisbehave(t *testing.T, s *Stream) { conn, ok := s.st.(*http2Server) if !ok { t.Errorf("Failed to convert %v to *http2Server", s.st) h.t.WriteStatus(s, status.New(codes.Internal, "")) return } var sent int p := make([]byte, http2MaxFrameLen) for sent < initialWindowSize { n := initialWindowSize - sent // The last message may be smaller than http2MaxFrameLen if n <= http2MaxFrameLen { if s.Method() == "foo.Connection" { // Violate connection level flow control window of client but do not // violate any stream level windows. p = make([]byte, n) } else { // Violate stream level flow control window of client. p = make([]byte, n+1) } } conn.controlBuf.put(&dataFrame{ streamID: s.id, h: nil, d: p, onEachWrite: func() {}, }) sent += len(p) } } func (h *testStreamHandler) handleStreamEncodingRequiredStatus(t *testing.T, s *Stream) { // raw newline is not accepted by http2 framer so it must be encoded. h.t.WriteStatus(s, encodingTestStatus) } func (h *testStreamHandler) handleStreamInvalidHeaderField(t *testing.T, s *Stream) { headerFields := []hpack.HeaderField{} headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: expectedInvalidHeaderField}) h.t.controlBuf.put(&headerFrame{ streamID: s.id, hf: headerFields, endStream: false, }) } // handleStreamDelayRead delays reads so that the other side has to halt on // stream-level flow control. // This handler assumes dynamic flow control is turned off and assumes window // sizes to be set to defaultWindowSize. func (h *testStreamHandler) handleStreamDelayRead(t *testing.T, s *Stream) { req := expectedRequest resp := expectedResponse if s.Method() == "foo.Large" { req = expectedRequestLarge resp = expectedResponseLarge } var ( mu sync.Mutex total int ) s.wq.replenish = func(n int) { mu.Lock() total += n mu.Unlock() s.wq.realReplenish(n) } getTotal := func() int { mu.Lock() defer mu.Unlock() return total } done := make(chan struct{}) defer close(done) go func() { for { select { // Prevent goroutine from leaking. case <-done: return default: } if getTotal() == defaultWindowSize { // Signal the client to start reading and // thereby send window update. close(h.notify) return } runtime.Gosched() } }() p := make([]byte, len(req)) // Let the other side run out of stream-level window before // starting to read and thereby sending a window update. timer := time.NewTimer(time.Second * 10) select { case <-h.getNotified: timer.Stop() case <-timer.C: t.Errorf("Server timed-out.") return } _, err := s.Read(p) if err != nil { t.Errorf("s.Read(_) = _, %v, want _, ", err) return } if !bytes.Equal(p, req) { t.Errorf("handleStream got %v, want %v", p, req) return } // This write will cause server to run out of stream level, // flow control and the other side won't send a window update // until that happens. if err := h.t.Write(s, nil, resp, &Options{}); err != nil { t.Errorf("server Write got %v, want ", err) return } // Read one more time to ensure that everything remains fine and // that the goroutine, that we launched earlier to signal client // to read, gets enough time to process. _, err = s.Read(p) if err != nil { t.Errorf("s.Read(_) = _, %v, want _, nil", err) return } // send the trailer to end the stream. if err := h.t.WriteStatus(s, status.New(codes.OK, "")); err != nil { t.Errorf("server WriteStatus got %v, want ", err) return } } // start starts server. Other goroutines should block on s.readyChan for further operations. func (s *server) start(t *testing.T, port int, serverConfig *ServerConfig, ht hType) { var err error if port == 0 { s.lis, err = net.Listen("tcp", "localhost:0") } else { s.lis, err = net.Listen("tcp", "localhost:"+strconv.Itoa(port)) } if err != nil { s.startedErr <- fmt.Errorf("failed to listen: %v", err) return } _, p, err := net.SplitHostPort(s.lis.Addr().String()) if err != nil { s.startedErr <- fmt.Errorf("failed to parse listener address: %v", err) return } s.port = p s.conns = make(map[ServerTransport]bool) s.startedErr <- nil for { conn, err := s.lis.Accept() if err != nil { return } transport, err := NewServerTransport("http2", conn, serverConfig) if err != nil { return } s.mu.Lock() if s.conns == nil { s.mu.Unlock() transport.Close() return } s.conns[transport] = true h := &testStreamHandler{t: transport.(*http2Server)} s.h = h s.mu.Unlock() switch ht { case notifyCall: go transport.HandleStreams(h.handleStreamAndNotify, func(ctx context.Context, _ string) context.Context { return ctx }) case suspended: go transport.HandleStreams(func(*Stream) {}, // Do nothing to handle the stream. func(ctx context.Context, method string) context.Context { return ctx }) case misbehaved: go transport.HandleStreams(func(s *Stream) { go h.handleStreamMisbehave(t, s) }, func(ctx context.Context, method string) context.Context { return ctx }) case encodingRequiredStatus: go transport.HandleStreams(func(s *Stream) { go h.handleStreamEncodingRequiredStatus(t, s) }, func(ctx context.Context, method string) context.Context { return ctx }) case invalidHeaderField: go transport.HandleStreams(func(s *Stream) { go h.handleStreamInvalidHeaderField(t, s) }, func(ctx context.Context, method string) context.Context { return ctx }) case delayRead: h.notify = make(chan struct{}) h.getNotified = make(chan struct{}) s.mu.Lock() close(s.ready) s.mu.Unlock() go transport.HandleStreams(func(s *Stream) { go h.handleStreamDelayRead(t, s) }, func(ctx context.Context, method string) context.Context { return ctx }) case pingpong: go transport.HandleStreams(func(s *Stream) { go h.handleStreamPingPong(t, s) }, func(ctx context.Context, method string) context.Context { return ctx }) default: go transport.HandleStreams(func(s *Stream) { go h.handleStream(t, s) }, func(ctx context.Context, method string) context.Context { return ctx }) } } } func (s *server) wait(t *testing.T, timeout time.Duration) { select { case err := <-s.startedErr: if err != nil { t.Fatal(err) } case <-time.After(timeout): t.Fatalf("Timed out after %v waiting for server to be ready", timeout) } } func (s *server) stop() { s.lis.Close() s.mu.Lock() for c := range s.conns { c.Close() } s.conns = nil s.mu.Unlock() } func (s *server) addr() string { if s.lis == nil { return "" } return s.lis.Addr().String() } func setUpServerOnly(t *testing.T, port int, serverConfig *ServerConfig, ht hType) *server { server := &server{startedErr: make(chan error, 1), ready: make(chan struct{})} go server.start(t, port, serverConfig, ht) server.wait(t, 2*time.Second) return server } func setUp(t *testing.T, port int, maxStreams uint32, ht hType) (*server, *http2Client, func()) { return setUpWithOptions(t, port, &ServerConfig{MaxStreams: maxStreams}, ht, ConnectOptions{}) } func setUpWithOptions(t *testing.T, port int, serverConfig *ServerConfig, ht hType, copts ConnectOptions) (*server, *http2Client, func()) { server := setUpServerOnly(t, port, serverConfig, ht) addr := "localhost:" + server.port target := TargetInfo{ Addr: addr, } connectCtx, cancel := context.WithDeadline(context.Background(), time.Now().Add(2*time.Second)) ct, connErr := NewClientTransport(connectCtx, context.Background(), target, copts, func() {}, func(GoAwayReason) {}, func() {}) if connErr != nil { cancel() // Do not cancel in success path. t.Fatalf("failed to create transport: %v", connErr) } return server, ct.(*http2Client), cancel } func setUpWithNoPingServer(t *testing.T, copts ConnectOptions, connCh chan net.Conn) (*http2Client, func()) { lis, err := net.Listen("tcp", "localhost:0") if err != nil { t.Fatalf("Failed to listen: %v", err) } // Launch a non responsive server. go func() { defer lis.Close() conn, err := lis.Accept() if err != nil { t.Errorf("Error at server-side while accepting: %v", err) close(connCh) return } connCh <- conn }() connectCtx, cancel := context.WithDeadline(context.Background(), time.Now().Add(2*time.Second)) tr, err := NewClientTransport(connectCtx, context.Background(), TargetInfo{Addr: lis.Addr().String()}, copts, func() {}, func(GoAwayReason) {}, func() {}) if err != nil { cancel() // Do not cancel in success path. // Server clean-up. lis.Close() if conn, ok := <-connCh; ok { conn.Close() } t.Fatalf("Failed to dial: %v", err) } return tr.(*http2Client), cancel } // TestInflightStreamClosing ensures that closing in-flight stream // sends status error to concurrent stream reader. func (s) TestInflightStreamClosing(t *testing.T) { serverConfig := &ServerConfig{} server, client, cancel := setUpWithOptions(t, 0, serverConfig, suspended, ConnectOptions{}) defer cancel() defer server.stop() defer client.Close() stream, err := client.NewStream(context.Background(), &CallHdr{}) if err != nil { t.Fatalf("Client failed to create RPC request: %v", err) } donec := make(chan struct{}) serr := status.Error(codes.Internal, "client connection is closing") go func() { defer close(donec) if _, err := stream.Read(make([]byte, defaultWindowSize)); err != serr { t.Errorf("unexpected Stream error %v, expected %v", err, serr) } }() // should unblock concurrent stream.Read client.CloseStream(stream, serr) // wait for stream.Read error timeout := time.NewTimer(5 * time.Second) select { case <-donec: if !timeout.Stop() { <-timeout.C } case <-timeout.C: t.Fatalf("Test timed out, expected a status error.") } } func (s) TestClientSendAndReceive(t *testing.T) { server, ct, cancel := setUp(t, 0, math.MaxUint32, normal) defer cancel() callHdr := &CallHdr{ Host: "localhost", Method: "foo.Small", } s1, err1 := ct.NewStream(context.Background(), callHdr) if err1 != nil { t.Fatalf("failed to open stream: %v", err1) } if s1.id != 1 { t.Fatalf("wrong stream id: %d", s1.id) } s2, err2 := ct.NewStream(context.Background(), callHdr) if err2 != nil { t.Fatalf("failed to open stream: %v", err2) } if s2.id != 3 { t.Fatalf("wrong stream id: %d", s2.id) } opts := Options{Last: true} if err := ct.Write(s1, nil, expectedRequest, &opts); err != nil && err != io.EOF { t.Fatalf("failed to send data: %v", err) } p := make([]byte, len(expectedResponse)) _, recvErr := s1.Read(p) if recvErr != nil || !bytes.Equal(p, expectedResponse) { t.Fatalf("Error: %v, want ; Result: %v, want %v", recvErr, p, expectedResponse) } _, recvErr = s1.Read(p) if recvErr != io.EOF { t.Fatalf("Error: %v; want ", recvErr) } ct.Close() server.stop() } func (s) TestClientErrorNotify(t *testing.T) { server, ct, cancel := setUp(t, 0, math.MaxUint32, normal) defer cancel() go server.stop() // ct.reader should detect the error and activate ct.Error(). <-ct.Error() ct.Close() } func performOneRPC(ct ClientTransport) { callHdr := &CallHdr{ Host: "localhost", Method: "foo.Small", } s, err := ct.NewStream(context.Background(), callHdr) if err != nil { return } opts := Options{Last: true} if err := ct.Write(s, []byte{}, expectedRequest, &opts); err == nil || err == io.EOF { time.Sleep(5 * time.Millisecond) // The following s.Recv()'s could error out because the // underlying transport is gone. // // Read response p := make([]byte, len(expectedResponse)) s.Read(p) // Read io.EOF s.Read(p) } } func (s) TestClientMix(t *testing.T) { s, ct, cancel := setUp(t, 0, math.MaxUint32, normal) defer cancel() go func(s *server) { time.Sleep(5 * time.Second) s.stop() }(s) go func(ct ClientTransport) { <-ct.Error() ct.Close() }(ct) for i := 0; i < 1000; i++ { time.Sleep(10 * time.Millisecond) go performOneRPC(ct) } } func (s) TestLargeMessage(t *testing.T) { server, ct, cancel := setUp(t, 0, math.MaxUint32, normal) defer cancel() callHdr := &CallHdr{ Host: "localhost", Method: "foo.Large", } var wg sync.WaitGroup for i := 0; i < 2; i++ { wg.Add(1) go func() { defer wg.Done() s, err := ct.NewStream(context.Background(), callHdr) if err != nil { t.Errorf("%v.NewStream(_, _) = _, %v, want _, ", ct, err) } if err := ct.Write(s, []byte{}, expectedRequestLarge, &Options{Last: true}); err != nil && err != io.EOF { t.Errorf("%v.Write(_, _, _) = %v, want ", ct, err) } p := make([]byte, len(expectedResponseLarge)) if _, err := s.Read(p); err != nil || !bytes.Equal(p, expectedResponseLarge) { t.Errorf("s.Read(%v) = _, %v, want %v, ", err, p, expectedResponse) } if _, err = s.Read(p); err != io.EOF { t.Errorf("Failed to complete the stream %v; want ", err) } }() } wg.Wait() ct.Close() server.stop() } func (s) TestLargeMessageWithDelayRead(t *testing.T) { // Disable dynamic flow control. sc := &ServerConfig{ InitialWindowSize: defaultWindowSize, InitialConnWindowSize: defaultWindowSize, } co := ConnectOptions{ InitialWindowSize: defaultWindowSize, InitialConnWindowSize: defaultWindowSize, } server, ct, cancel := setUpWithOptions(t, 0, sc, delayRead, co) defer cancel() defer server.stop() defer ct.Close() server.mu.Lock() ready := server.ready server.mu.Unlock() callHdr := &CallHdr{ Host: "localhost", Method: "foo.Large", } ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(time.Second*10)) defer cancel() s, err := ct.NewStream(ctx, callHdr) if err != nil { t.Fatalf("%v.NewStream(_, _) = _, %v, want _, ", ct, err) return } // Wait for server's handerler to be initialized select { case <-ready: case <-ctx.Done(): t.Fatalf("Client timed out waiting for server handler to be initialized.") } server.mu.Lock() serviceHandler := server.h server.mu.Unlock() var ( mu sync.Mutex total int ) s.wq.replenish = func(n int) { mu.Lock() total += n mu.Unlock() s.wq.realReplenish(n) } getTotal := func() int { mu.Lock() defer mu.Unlock() return total } done := make(chan struct{}) defer close(done) go func() { for { select { // Prevent goroutine from leaking in case of error. case <-done: return default: } if getTotal() == defaultWindowSize { // unblock server to be able to read and // thereby send stream level window update. close(serviceHandler.getNotified) return } runtime.Gosched() } }() // This write will cause client to run out of stream level, // flow control and the other side won't send a window update // until that happens. if err := ct.Write(s, []byte{}, expectedRequestLarge, &Options{}); err != nil { t.Fatalf("write(_, _, _) = %v, want ", err) } p := make([]byte, len(expectedResponseLarge)) // Wait for the other side to run out of stream level flow control before // reading and thereby sending a window update. select { case <-serviceHandler.notify: case <-ctx.Done(): t.Fatalf("Client timed out") } if _, err := s.Read(p); err != nil || !bytes.Equal(p, expectedResponseLarge) { t.Fatalf("s.Read(_) = _, %v, want _, ", err) } if err := ct.Write(s, []byte{}, expectedRequestLarge, &Options{Last: true}); err != nil { t.Fatalf("Write(_, _, _) = %v, want ", err) } if _, err = s.Read(p); err != io.EOF { t.Fatalf("Failed to complete the stream %v; want ", err) } } func (s) TestGracefulClose(t *testing.T) { server, ct, cancel := setUp(t, 0, math.MaxUint32, pingpong) defer cancel() defer func() { // Stop the server's listener to make the server's goroutines terminate // (after the last active stream is done). server.lis.Close() // Check for goroutine leaks (i.e. GracefulClose with an active stream // doesn't eventually close the connection when that stream completes). leakcheck.Check(t) // Correctly clean up the server server.stop() }() ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(time.Second*10)) defer cancel() s, err := ct.NewStream(ctx, &CallHdr{}) if err != nil { t.Fatalf("NewStream(_, _) = _, %v, want _, ", err) } msg := make([]byte, 1024) outgoingHeader := make([]byte, 5) outgoingHeader[0] = byte(0) binary.BigEndian.PutUint32(outgoingHeader[1:], uint32(len(msg))) incomingHeader := make([]byte, 5) if err := ct.Write(s, outgoingHeader, msg, &Options{}); err != nil { t.Fatalf("Error while writing: %v", err) } if _, err := s.Read(incomingHeader); err != nil { t.Fatalf("Error while reading: %v", err) } sz := binary.BigEndian.Uint32(incomingHeader[1:]) recvMsg := make([]byte, int(sz)) if _, err := s.Read(recvMsg); err != nil { t.Fatalf("Error while reading: %v", err) } ct.GracefulClose() var wg sync.WaitGroup // Expect the failure for all the follow-up streams because ct has been closed gracefully. for i := 0; i < 200; i++ { wg.Add(1) go func() { defer wg.Done() str, err := ct.NewStream(context.Background(), &CallHdr{}) if err == ErrConnClosing { return } else if err != nil { t.Errorf("_.NewStream(_, _) = _, %v, want _, %v", err, ErrConnClosing) return } ct.Write(str, nil, nil, &Options{Last: true}) if _, err := str.Read(make([]byte, 8)); err != errStreamDrain && err != ErrConnClosing { t.Errorf("_.Read(_) = _, %v, want _, %v or %v", err, errStreamDrain, ErrConnClosing) } }() } ct.Write(s, nil, nil, &Options{Last: true}) if _, err := s.Read(incomingHeader); err != io.EOF { t.Fatalf("Client expected EOF from the server. Got: %v", err) } // The stream which was created before graceful close can still proceed. wg.Wait() } func (s) TestLargeMessageSuspension(t *testing.T) { server, ct, cancel := setUp(t, 0, math.MaxUint32, suspended) defer cancel() callHdr := &CallHdr{ Host: "localhost", Method: "foo.Large", } // Set a long enough timeout for writing a large message out. ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() s, err := ct.NewStream(ctx, callHdr) if err != nil { t.Fatalf("failed to open stream: %v", err) } // Launch a goroutine simillar to the stream monitoring goroutine in // stream.go to keep track of context timeout and call CloseStream. go func() { <-ctx.Done() ct.CloseStream(s, ContextErr(ctx.Err())) }() // Write should not be done successfully due to flow control. msg := make([]byte, initialWindowSize*8) ct.Write(s, nil, msg, &Options{}) err = ct.Write(s, nil, msg, &Options{Last: true}) if err != errStreamDone { t.Fatalf("Write got %v, want io.EOF", err) } expectedErr := status.Error(codes.DeadlineExceeded, context.DeadlineExceeded.Error()) if _, err := s.Read(make([]byte, 8)); err.Error() != expectedErr.Error() { t.Fatalf("Read got %v of type %T, want %v", err, err, expectedErr) } ct.Close() server.stop() } func (s) TestMaxStreams(t *testing.T) { serverConfig := &ServerConfig{ MaxStreams: 1, } server, ct, cancel := setUpWithOptions(t, 0, serverConfig, suspended, ConnectOptions{}) defer cancel() defer ct.Close() defer server.stop() callHdr := &CallHdr{ Host: "localhost", Method: "foo.Large", } s, err := ct.NewStream(context.Background(), callHdr) if err != nil { t.Fatalf("Failed to open stream: %v", err) } // Keep creating streams until one fails with deadline exceeded, marking the application // of server settings on client. slist := []*Stream{} pctx, cancel := context.WithCancel(context.Background()) defer cancel() timer := time.NewTimer(time.Second * 10) expectedErr := status.Error(codes.DeadlineExceeded, context.DeadlineExceeded.Error()) for { select { case <-timer.C: t.Fatalf("Test timeout: client didn't receive server settings.") default: } ctx, cancel := context.WithDeadline(pctx, time.Now().Add(time.Second)) // This is only to get rid of govet. All these context are based on a base // context which is canceled at the end of the test. defer cancel() if str, err := ct.NewStream(ctx, callHdr); err == nil { slist = append(slist, str) continue } else if err.Error() != expectedErr.Error() { t.Fatalf("ct.NewStream(_,_) = _, %v, want _, %v", err, expectedErr) } timer.Stop() break } done := make(chan struct{}) // Try and create a new stream. go func() { defer close(done) ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(time.Second*10)) defer cancel() if _, err := ct.NewStream(ctx, callHdr); err != nil { t.Errorf("Failed to open stream: %v", err) } }() // Close all the extra streams created and make sure the new stream is not created. for _, str := range slist { ct.CloseStream(str, nil) } select { case <-done: t.Fatalf("Test failed: didn't expect new stream to be created just yet.") default: } // Close the first stream created so that the new stream can finally be created. ct.CloseStream(s, nil) <-done ct.Close() <-ct.writerDone if ct.maxConcurrentStreams != 1 { t.Fatalf("ct.maxConcurrentStreams: %d, want 1", ct.maxConcurrentStreams) } } func (s) TestServerContextCanceledOnClosedConnection(t *testing.T) { server, ct, cancel := setUp(t, 0, math.MaxUint32, suspended) defer cancel() callHdr := &CallHdr{ Host: "localhost", Method: "foo", } var sc *http2Server // Wait until the server transport is setup. for { server.mu.Lock() if len(server.conns) == 0 { server.mu.Unlock() time.Sleep(time.Millisecond) continue } for k := range server.conns { var ok bool sc, ok = k.(*http2Server) if !ok { t.Fatalf("Failed to convert %v to *http2Server", k) } } server.mu.Unlock() break } s, err := ct.NewStream(context.Background(), callHdr) if err != nil { t.Fatalf("Failed to open stream: %v", err) } ct.controlBuf.put(&dataFrame{ streamID: s.id, endStream: false, h: nil, d: make([]byte, http2MaxFrameLen), onEachWrite: func() {}, }) // Loop until the server side stream is created. var ss *Stream for { time.Sleep(time.Second) sc.mu.Lock() if len(sc.activeStreams) == 0 { sc.mu.Unlock() continue } ss = sc.activeStreams[s.id] sc.mu.Unlock() break } ct.Close() select { case <-ss.Context().Done(): if ss.Context().Err() != context.Canceled { t.Fatalf("ss.Context().Err() got %v, want %v", ss.Context().Err(), context.Canceled) } case <-time.After(5 * time.Second): t.Fatalf("Failed to cancel the context of the sever side stream.") } server.stop() } func (s) TestClientConnDecoupledFromApplicationRead(t *testing.T) { connectOptions := ConnectOptions{ InitialWindowSize: defaultWindowSize, InitialConnWindowSize: defaultWindowSize, } server, client, cancel := setUpWithOptions(t, 0, &ServerConfig{}, notifyCall, connectOptions) defer cancel() defer server.stop() defer client.Close() waitWhileTrue(t, func() (bool, error) { server.mu.Lock() defer server.mu.Unlock() if len(server.conns) == 0 { return true, fmt.Errorf("timed-out while waiting for connection to be created on the server") } return false, nil }) var st *http2Server server.mu.Lock() for k := range server.conns { st = k.(*http2Server) } notifyChan := make(chan struct{}) server.h.notify = notifyChan server.mu.Unlock() cstream1, err := client.NewStream(context.Background(), &CallHdr{}) if err != nil { t.Fatalf("Client failed to create first stream. Err: %v", err) } <-notifyChan var sstream1 *Stream // Access stream on the server. st.mu.Lock() for _, v := range st.activeStreams { if v.id == cstream1.id { sstream1 = v } } st.mu.Unlock() if sstream1 == nil { t.Fatalf("Didn't find stream corresponding to client cstream.id: %v on the server", cstream1.id) } // Exhaust client's connection window. if err := st.Write(sstream1, []byte{}, make([]byte, defaultWindowSize), &Options{}); err != nil { t.Fatalf("Server failed to write data. Err: %v", err) } notifyChan = make(chan struct{}) server.mu.Lock() server.h.notify = notifyChan server.mu.Unlock() // Create another stream on client. cstream2, err := client.NewStream(context.Background(), &CallHdr{}) if err != nil { t.Fatalf("Client failed to create second stream. Err: %v", err) } <-notifyChan var sstream2 *Stream st.mu.Lock() for _, v := range st.activeStreams { if v.id == cstream2.id { sstream2 = v } } st.mu.Unlock() if sstream2 == nil { t.Fatalf("Didn't find stream corresponding to client cstream.id: %v on the server", cstream2.id) } // Server should be able to send data on the new stream, even though the client hasn't read anything on the first stream. if err := st.Write(sstream2, []byte{}, make([]byte, defaultWindowSize), &Options{}); err != nil { t.Fatalf("Server failed to write data. Err: %v", err) } // Client should be able to read data on second stream. if _, err := cstream2.Read(make([]byte, defaultWindowSize)); err != nil { t.Fatalf("_.Read(_) = _, %v, want _, ", err) } // Client should be able to read data on first stream. if _, err := cstream1.Read(make([]byte, defaultWindowSize)); err != nil { t.Fatalf("_.Read(_) = _, %v, want _, ", err) } } func (s) TestServerConnDecoupledFromApplicationRead(t *testing.T) { serverConfig := &ServerConfig{ InitialWindowSize: defaultWindowSize, InitialConnWindowSize: defaultWindowSize, } server, client, cancel := setUpWithOptions(t, 0, serverConfig, suspended, ConnectOptions{}) defer cancel() defer server.stop() defer client.Close() waitWhileTrue(t, func() (bool, error) { server.mu.Lock() defer server.mu.Unlock() if len(server.conns) == 0 { return true, fmt.Errorf("timed-out while waiting for connection to be created on the server") } return false, nil }) var st *http2Server server.mu.Lock() for k := range server.conns { st = k.(*http2Server) } server.mu.Unlock() cstream1, err := client.NewStream(context.Background(), &CallHdr{}) if err != nil { t.Fatalf("Failed to create 1st stream. Err: %v", err) } // Exhaust server's connection window. if err := client.Write(cstream1, nil, make([]byte, defaultWindowSize), &Options{Last: true}); err != nil { t.Fatalf("Client failed to write data. Err: %v", err) } //Client should be able to create another stream and send data on it. cstream2, err := client.NewStream(context.Background(), &CallHdr{}) if err != nil { t.Fatalf("Failed to create 2nd stream. Err: %v", err) } if err := client.Write(cstream2, nil, make([]byte, defaultWindowSize), &Options{}); err != nil { t.Fatalf("Client failed to write data. Err: %v", err) } // Get the streams on server. waitWhileTrue(t, func() (bool, error) { st.mu.Lock() defer st.mu.Unlock() if len(st.activeStreams) != 2 { return true, fmt.Errorf("timed-out while waiting for server to have created the streams") } return false, nil }) var sstream1 *Stream st.mu.Lock() for _, v := range st.activeStreams { if v.id == 1 { sstream1 = v } } st.mu.Unlock() // Reading from the stream on server should succeed. if _, err := sstream1.Read(make([]byte, defaultWindowSize)); err != nil { t.Fatalf("_.Read(_) = %v, want ", err) } if _, err := sstream1.Read(make([]byte, 1)); err != io.EOF { t.Fatalf("_.Read(_) = %v, want io.EOF", err) } } func (s) TestServerWithMisbehavedClient(t *testing.T) { server := setUpServerOnly(t, 0, &ServerConfig{}, suspended) defer server.stop() // Create a client that can override server stream quota. mconn, err := net.Dial("tcp", server.lis.Addr().String()) if err != nil { t.Fatalf("Clent failed to dial:%v", err) } defer mconn.Close() if err := mconn.SetWriteDeadline(time.Now().Add(time.Second * 10)); err != nil { t.Fatalf("Failed to set write deadline: %v", err) } if n, err := mconn.Write(clientPreface); err != nil || n != len(clientPreface) { t.Fatalf("mconn.Write(clientPreface) = %d, %v, want %d, ", n, err, len(clientPreface)) } // success chan indicates that reader received a RSTStream from server. success := make(chan struct{}) var mu sync.Mutex framer := http2.NewFramer(mconn, mconn) if err := framer.WriteSettings(); err != nil { t.Fatalf("Error while writing settings: %v", err) } go func() { // Launch a reader for this misbehaving client. for { frame, err := framer.ReadFrame() if err != nil { return } switch frame := frame.(type) { case *http2.PingFrame: // Write ping ack back so that server's BDP estimation works right. mu.Lock() framer.WritePing(true, frame.Data) mu.Unlock() case *http2.RSTStreamFrame: if frame.Header().StreamID != 1 || http2.ErrCode(frame.ErrCode) != http2.ErrCodeFlowControl { t.Errorf("RST stream received with streamID: %d and code: %v, want streamID: 1 and code: http2.ErrCodeFlowControl", frame.Header().StreamID, http2.ErrCode(frame.ErrCode)) } close(success) return default: // Do nothing. } } }() // Create a stream. var buf bytes.Buffer henc := hpack.NewEncoder(&buf) // TODO(mmukhi): Remove unnecessary fields. if err := henc.WriteField(hpack.HeaderField{Name: ":method", Value: "POST"}); err != nil { t.Fatalf("Error while encoding header: %v", err) } if err := henc.WriteField(hpack.HeaderField{Name: ":path", Value: "foo"}); err != nil { t.Fatalf("Error while encoding header: %v", err) } if err := henc.WriteField(hpack.HeaderField{Name: ":authority", Value: "localhost"}); err != nil { t.Fatalf("Error while encoding header: %v", err) } if err := henc.WriteField(hpack.HeaderField{Name: "content-type", Value: "application/grpc"}); err != nil { t.Fatalf("Error while encoding header: %v", err) } mu.Lock() if err := framer.WriteHeaders(http2.HeadersFrameParam{StreamID: 1, BlockFragment: buf.Bytes(), EndHeaders: true}); err != nil { mu.Unlock() t.Fatalf("Error while writing headers: %v", err) } mu.Unlock() // Test server behavior for violation of stream flow control window size restriction. timer := time.NewTimer(time.Second * 5) dbuf := make([]byte, http2MaxFrameLen) for { select { case <-timer.C: t.Fatalf("Test timed out.") case <-success: return default: } mu.Lock() if err := framer.WriteData(1, false, dbuf); err != nil { mu.Unlock() // Error here means the server could have closed the connection due to flow control // violation. Make sure that is the case by waiting for success chan to be closed. select { case <-timer.C: t.Fatalf("Error while writing data: %v", err) case <-success: return } } mu.Unlock() // This for loop is capable of hogging the CPU and cause starvation // in Go versions prior to 1.9, // in single CPU environment. Explicitly relinquish processor. runtime.Gosched() } } func (s) TestClientWithMisbehavedServer(t *testing.T) { // Create a misbehaving server. lis, err := net.Listen("tcp", "localhost:0") if err != nil { t.Fatalf("Error while listening: %v", err) } defer lis.Close() // success chan indicates that the server received // RSTStream from the client. success := make(chan struct{}) go func() { // Launch the misbehaving server. sconn, err := lis.Accept() if err != nil { t.Errorf("Error while accepting: %v", err) return } defer sconn.Close() if _, err := io.ReadFull(sconn, make([]byte, len(clientPreface))); err != nil { t.Errorf("Error while reading clieng preface: %v", err) return } sfr := http2.NewFramer(sconn, sconn) if err := sfr.WriteSettingsAck(); err != nil { t.Errorf("Error while writing settings: %v", err) return } var mu sync.Mutex for { frame, err := sfr.ReadFrame() if err != nil { return } switch frame := frame.(type) { case *http2.HeadersFrame: // When the client creates a stream, violate the stream flow control. go func() { buf := make([]byte, http2MaxFrameLen) for { mu.Lock() if err := sfr.WriteData(1, false, buf); err != nil { mu.Unlock() return } mu.Unlock() // This for loop is capable of hogging the CPU and cause starvation // in Go versions prior to 1.9, // in single CPU environment. Explicitly relinquish processor. runtime.Gosched() } }() case *http2.RSTStreamFrame: if frame.Header().StreamID != 1 || http2.ErrCode(frame.ErrCode) != http2.ErrCodeFlowControl { t.Errorf("RST stream received with streamID: %d and code: %v, want streamID: 1 and code: http2.ErrCodeFlowControl", frame.Header().StreamID, http2.ErrCode(frame.ErrCode)) } close(success) return case *http2.PingFrame: mu.Lock() sfr.WritePing(true, frame.Data) mu.Unlock() default: } } }() connectCtx, cancel := context.WithDeadline(context.Background(), time.Now().Add(2*time.Second)) defer cancel() ct, err := NewClientTransport(connectCtx, context.Background(), TargetInfo{Addr: lis.Addr().String()}, ConnectOptions{}, func() {}, func(GoAwayReason) {}, func() {}) if err != nil { t.Fatalf("Error while creating client transport: %v", err) } defer ct.Close() str, err := ct.NewStream(context.Background(), &CallHdr{}) if err != nil { t.Fatalf("Error while creating stream: %v", err) } timer := time.NewTimer(time.Second * 5) go func() { // This go routine mimics the one in stream.go to call CloseStream. <-str.Done() ct.CloseStream(str, nil) }() select { case <-timer.C: t.Fatalf("Test timed-out.") case <-success: } } var encodingTestStatus = status.New(codes.Internal, "\n") func (s) TestEncodingRequiredStatus(t *testing.T) { server, ct, cancel := setUp(t, 0, math.MaxUint32, encodingRequiredStatus) defer cancel() callHdr := &CallHdr{ Host: "localhost", Method: "foo", } s, err := ct.NewStream(context.Background(), callHdr) if err != nil { return } opts := Options{Last: true} if err := ct.Write(s, nil, expectedRequest, &opts); err != nil && err != errStreamDone { t.Fatalf("Failed to write the request: %v", err) } p := make([]byte, http2MaxFrameLen) if _, err := s.trReader.(*transportReader).Read(p); err != io.EOF { t.Fatalf("Read got error %v, want %v", err, io.EOF) } if !testutils.StatusErrEqual(s.Status().Err(), encodingTestStatus.Err()) { t.Fatalf("stream with status %v, want %v", s.Status(), encodingTestStatus) } ct.Close() server.stop() } func (s) TestInvalidHeaderField(t *testing.T) { server, ct, cancel := setUp(t, 0, math.MaxUint32, invalidHeaderField) defer cancel() callHdr := &CallHdr{ Host: "localhost", Method: "foo", } s, err := ct.NewStream(context.Background(), callHdr) if err != nil { return } p := make([]byte, http2MaxFrameLen) _, err = s.trReader.(*transportReader).Read(p) if se, ok := status.FromError(err); !ok || se.Code() != codes.Internal || !strings.Contains(err.Error(), expectedInvalidHeaderField) { t.Fatalf("Read got error %v, want error with code %s and contains %q", err, codes.Internal, expectedInvalidHeaderField) } ct.Close() server.stop() } func (s) TestHeaderChanClosedAfterReceivingAnInvalidHeader(t *testing.T) { server, ct, cancel := setUp(t, 0, math.MaxUint32, invalidHeaderField) defer cancel() defer server.stop() defer ct.Close() s, err := ct.NewStream(context.Background(), &CallHdr{Host: "localhost", Method: "foo"}) if err != nil { t.Fatalf("failed to create the stream") } timer := time.NewTimer(time.Second) defer timer.Stop() select { case <-s.headerChan: case <-timer.C: t.Errorf("s.headerChan: got open, want closed") } } func (s) TestIsReservedHeader(t *testing.T) { tests := []struct { h string want bool }{ {"", false}, // but should be rejected earlier {"foo", false}, {"content-type", true}, {"user-agent", true}, {":anything", true}, {"grpc-message-type", true}, {"grpc-encoding", true}, {"grpc-message", true}, {"grpc-status", true}, {"grpc-timeout", true}, {"te", true}, } for _, tt := range tests { got := isReservedHeader(tt.h) if got != tt.want { t.Errorf("isReservedHeader(%q) = %v; want %v", tt.h, got, tt.want) } } } func (s) TestContextErr(t *testing.T) { for _, test := range []struct { // input errIn error // outputs errOut error }{ {context.DeadlineExceeded, status.Error(codes.DeadlineExceeded, context.DeadlineExceeded.Error())}, {context.Canceled, status.Error(codes.Canceled, context.Canceled.Error())}, } { err := ContextErr(test.errIn) if err.Error() != test.errOut.Error() { t.Fatalf("ContextErr{%v} = %v \nwant %v", test.errIn, err, test.errOut) } } } type windowSizeConfig struct { serverStream int32 serverConn int32 clientStream int32 clientConn int32 } func (s) TestAccountCheckWindowSizeWithLargeWindow(t *testing.T) { wc := windowSizeConfig{ serverStream: 10 * 1024 * 1024, serverConn: 12 * 1024 * 1024, clientStream: 6 * 1024 * 1024, clientConn: 8 * 1024 * 1024, } testFlowControlAccountCheck(t, 1024*1024, wc) } func (s) TestAccountCheckWindowSizeWithSmallWindow(t *testing.T) { wc := windowSizeConfig{ serverStream: defaultWindowSize, // Note this is smaller than initialConnWindowSize which is the current default. serverConn: defaultWindowSize, clientStream: defaultWindowSize, clientConn: defaultWindowSize, } testFlowControlAccountCheck(t, 1024*1024, wc) } func (s) TestAccountCheckDynamicWindowSmallMessage(t *testing.T) { testFlowControlAccountCheck(t, 1024, windowSizeConfig{}) } func (s) TestAccountCheckDynamicWindowLargeMessage(t *testing.T) { testFlowControlAccountCheck(t, 1024*1024, windowSizeConfig{}) } func testFlowControlAccountCheck(t *testing.T, msgSize int, wc windowSizeConfig) { sc := &ServerConfig{ InitialWindowSize: wc.serverStream, InitialConnWindowSize: wc.serverConn, } co := ConnectOptions{ InitialWindowSize: wc.clientStream, InitialConnWindowSize: wc.clientConn, } server, client, cancel := setUpWithOptions(t, 0, sc, pingpong, co) defer cancel() defer server.stop() defer client.Close() waitWhileTrue(t, func() (bool, error) { server.mu.Lock() defer server.mu.Unlock() if len(server.conns) == 0 { return true, fmt.Errorf("timed out while waiting for server transport to be created") } return false, nil }) var st *http2Server server.mu.Lock() for k := range server.conns { st = k.(*http2Server) } server.mu.Unlock() const numStreams = 10 clientStreams := make([]*Stream, numStreams) for i := 0; i < numStreams; i++ { var err error clientStreams[i], err = client.NewStream(context.Background(), &CallHdr{}) if err != nil { t.Fatalf("Failed to create stream. Err: %v", err) } } var wg sync.WaitGroup // For each stream send pingpong messages to the server. for _, stream := range clientStreams { wg.Add(1) go func(stream *Stream) { defer wg.Done() buf := make([]byte, msgSize+5) buf[0] = byte(0) binary.BigEndian.PutUint32(buf[1:], uint32(msgSize)) opts := Options{} header := make([]byte, 5) for i := 1; i <= 10; i++ { if err := client.Write(stream, nil, buf, &opts); err != nil { t.Errorf("Error on client while writing message: %v", err) return } if _, err := stream.Read(header); err != nil { t.Errorf("Error on client while reading data frame header: %v", err) return } sz := binary.BigEndian.Uint32(header[1:]) recvMsg := make([]byte, int(sz)) if _, err := stream.Read(recvMsg); err != nil { t.Errorf("Error on client while reading data: %v", err) return } if len(recvMsg) != msgSize { t.Errorf("Length of message received by client: %v, want: %v", len(recvMsg), msgSize) return } } }(stream) } wg.Wait() serverStreams := map[uint32]*Stream{} loopyClientStreams := map[uint32]*outStream{} loopyServerStreams := map[uint32]*outStream{} // Get all the streams from server reader and writer and client writer. st.mu.Lock() for _, stream := range clientStreams { id := stream.id serverStreams[id] = st.activeStreams[id] loopyServerStreams[id] = st.loopy.estdStreams[id] loopyClientStreams[id] = client.loopy.estdStreams[id] } st.mu.Unlock() // Close all streams for _, stream := range clientStreams { client.Write(stream, nil, nil, &Options{Last: true}) if _, err := stream.Read(make([]byte, 5)); err != io.EOF { t.Fatalf("Client expected an EOF from the server. Got: %v", err) } } // Close down both server and client so that their internals can be read without data // races. client.Close() st.Close() <-st.readerDone <-st.writerDone <-client.readerDone <-client.writerDone for _, cstream := range clientStreams { id := cstream.id sstream := serverStreams[id] loopyServerStream := loopyServerStreams[id] loopyClientStream := loopyClientStreams[id] // Check stream flow control. if int(cstream.fc.limit+cstream.fc.delta-cstream.fc.pendingData-cstream.fc.pendingUpdate) != int(st.loopy.oiws)-loopyServerStream.bytesOutStanding { t.Fatalf("Account mismatch: client stream inflow limit(%d) + delta(%d) - pendingData(%d) - pendingUpdate(%d) != server outgoing InitialWindowSize(%d) - outgoingStream.bytesOutStanding(%d)", cstream.fc.limit, cstream.fc.delta, cstream.fc.pendingData, cstream.fc.pendingUpdate, st.loopy.oiws, loopyServerStream.bytesOutStanding) } if int(sstream.fc.limit+sstream.fc.delta-sstream.fc.pendingData-sstream.fc.pendingUpdate) != int(client.loopy.oiws)-loopyClientStream.bytesOutStanding { t.Fatalf("Account mismatch: server stream inflow limit(%d) + delta(%d) - pendingData(%d) - pendingUpdate(%d) != client outgoing InitialWindowSize(%d) - outgoingStream.bytesOutStanding(%d)", sstream.fc.limit, sstream.fc.delta, sstream.fc.pendingData, sstream.fc.pendingUpdate, client.loopy.oiws, loopyClientStream.bytesOutStanding) } } // Check transport flow control. if client.fc.limit != client.fc.unacked+st.loopy.sendQuota { t.Fatalf("Account mismatch: client transport inflow(%d) != client unacked(%d) + server sendQuota(%d)", client.fc.limit, client.fc.unacked, st.loopy.sendQuota) } if st.fc.limit != st.fc.unacked+client.loopy.sendQuota { t.Fatalf("Account mismatch: server transport inflow(%d) != server unacked(%d) + client sendQuota(%d)", st.fc.limit, st.fc.unacked, client.loopy.sendQuota) } } func waitWhileTrue(t *testing.T, condition func() (bool, error)) { var ( wait bool err error ) timer := time.NewTimer(time.Second * 5) for { wait, err = condition() if wait { select { case <-timer.C: t.Fatalf(err.Error()) default: time.Sleep(50 * time.Millisecond) continue } } if !timer.Stop() { <-timer.C } break } } // If any error occurs on a call to Stream.Read, future calls // should continue to return that same error. func (s) TestReadGivesSameErrorAfterAnyErrorOccurs(t *testing.T) { testRecvBuffer := newRecvBuffer() s := &Stream{ ctx: context.Background(), buf: testRecvBuffer, requestRead: func(int) {}, } s.trReader = &transportReader{ reader: &recvBufferReader{ ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf, freeBuffer: func(*bytes.Buffer) {}, }, windowHandler: func(int) {}, } testData := make([]byte, 1) testData[0] = 5 testBuffer := bytes.NewBuffer(testData) testErr := errors.New("test error") s.write(recvMsg{buffer: testBuffer, err: testErr}) inBuf := make([]byte, 1) actualCount, actualErr := s.Read(inBuf) if actualCount != 0 { t.Errorf("actualCount, _ := s.Read(_) differs; want 0; got %v", actualCount) } if actualErr.Error() != testErr.Error() { t.Errorf("_ , actualErr := s.Read(_) differs; want actualErr.Error() to be %v; got %v", testErr.Error(), actualErr.Error()) } s.write(recvMsg{buffer: testBuffer, err: nil}) s.write(recvMsg{buffer: testBuffer, err: errors.New("different error from first")}) for i := 0; i < 2; i++ { inBuf := make([]byte, 1) actualCount, actualErr := s.Read(inBuf) if actualCount != 0 { t.Errorf("actualCount, _ := s.Read(_) differs; want %v; got %v", 0, actualCount) } if actualErr.Error() != testErr.Error() { t.Errorf("_ , actualErr := s.Read(_) differs; want actualErr.Error() to be %v; got %v", testErr.Error(), actualErr.Error()) } } } func (s) TestPingPong1B(t *testing.T) { runPingPongTest(t, 1) } func (s) TestPingPong1KB(t *testing.T) { runPingPongTest(t, 1024) } func (s) TestPingPong64KB(t *testing.T) { runPingPongTest(t, 65536) } func (s) TestPingPong1MB(t *testing.T) { runPingPongTest(t, 1048576) } //This is a stress-test of flow control logic. func runPingPongTest(t *testing.T, msgSize int) { server, client, cancel := setUp(t, 0, 0, pingpong) defer cancel() defer server.stop() defer client.Close() waitWhileTrue(t, func() (bool, error) { server.mu.Lock() defer server.mu.Unlock() if len(server.conns) == 0 { return true, fmt.Errorf("timed out while waiting for server transport to be created") } return false, nil }) stream, err := client.NewStream(context.Background(), &CallHdr{}) if err != nil { t.Fatalf("Failed to create stream. Err: %v", err) } msg := make([]byte, msgSize) outgoingHeader := make([]byte, 5) outgoingHeader[0] = byte(0) binary.BigEndian.PutUint32(outgoingHeader[1:], uint32(msgSize)) opts := &Options{} incomingHeader := make([]byte, 5) done := make(chan struct{}) go func() { timer := time.NewTimer(time.Second * 5) <-timer.C close(done) }() for { select { case <-done: client.Write(stream, nil, nil, &Options{Last: true}) if _, err := stream.Read(incomingHeader); err != io.EOF { t.Fatalf("Client expected EOF from the server. Got: %v", err) } return default: if err := client.Write(stream, outgoingHeader, msg, opts); err != nil { t.Fatalf("Error on client while writing message. Err: %v", err) } if _, err := stream.Read(incomingHeader); err != nil { t.Fatalf("Error on client while reading data header. Err: %v", err) } sz := binary.BigEndian.Uint32(incomingHeader[1:]) recvMsg := make([]byte, int(sz)) if _, err := stream.Read(recvMsg); err != nil { t.Fatalf("Error on client while reading data. Err: %v", err) } } } } type tableSizeLimit struct { mu sync.Mutex limits []uint32 } func (t *tableSizeLimit) add(limit uint32) { t.mu.Lock() t.limits = append(t.limits, limit) t.mu.Unlock() } func (t *tableSizeLimit) getLen() int { t.mu.Lock() defer t.mu.Unlock() return len(t.limits) } func (t *tableSizeLimit) getIndex(i int) uint32 { t.mu.Lock() defer t.mu.Unlock() return t.limits[i] } func (s) TestHeaderTblSize(t *testing.T) { limits := &tableSizeLimit{} updateHeaderTblSize = func(e *hpack.Encoder, v uint32) { e.SetMaxDynamicTableSizeLimit(v) limits.add(v) } defer func() { updateHeaderTblSize = func(e *hpack.Encoder, v uint32) { e.SetMaxDynamicTableSizeLimit(v) } }() server, ct, cancel := setUp(t, 0, math.MaxUint32, normal) defer cancel() defer ct.Close() defer server.stop() _, err := ct.NewStream(context.Background(), &CallHdr{}) if err != nil { t.Fatalf("failed to open stream: %v", err) } var svrTransport ServerTransport var i int for i = 0; i < 1000; i++ { server.mu.Lock() if len(server.conns) != 0 { server.mu.Unlock() break } server.mu.Unlock() time.Sleep(10 * time.Millisecond) continue } if i == 1000 { t.Fatalf("unable to create any server transport after 10s") } for st := range server.conns { svrTransport = st break } svrTransport.(*http2Server).controlBuf.put(&outgoingSettings{ ss: []http2.Setting{ { ID: http2.SettingHeaderTableSize, Val: uint32(100), }, }, }) for i = 0; i < 1000; i++ { if limits.getLen() != 1 { time.Sleep(10 * time.Millisecond) continue } if val := limits.getIndex(0); val != uint32(100) { t.Fatalf("expected limits[0] = 100, got %d", val) } break } if i == 1000 { t.Fatalf("expected len(limits) = 1 within 10s, got != 1") } ct.controlBuf.put(&outgoingSettings{ ss: []http2.Setting{ { ID: http2.SettingHeaderTableSize, Val: uint32(200), }, }, }) for i := 0; i < 1000; i++ { if limits.getLen() != 2 { time.Sleep(10 * time.Millisecond) continue } if val := limits.getIndex(1); val != uint32(200) { t.Fatalf("expected limits[1] = 200, got %d", val) } break } if i == 1000 { t.Fatalf("expected len(limits) = 2 within 10s, got != 2") } } grpc-go-1.29.1/internal/wrr/000077500000000000000000000000001365033716300155775ustar00rootroot00000000000000grpc-go-1.29.1/internal/wrr/edf.go000066400000000000000000000050131365033716300166630ustar00rootroot00000000000000/* * * Copyright 2019 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package wrr import ( "container/heap" "sync" ) // edfWrr is a struct for EDF weighted round robin implementation. type edfWrr struct { lock sync.Mutex items edfPriorityQueue } // NewEDF creates Earliest Deadline First (EDF) // (https://en.wikipedia.org/wiki/Earliest_deadline_first_scheduling) implementation for weighted round robin. // Each pick from the schedule has the earliest deadline entry selected. Entries have deadlines set // at current time + 1 / weight, providing weighted round robin behavior with O(log n) pick time. func NewEDF() WRR { return &edfWrr{} } // edfEntry is an internal wrapper for item that also stores weight and relative position in the queue. type edfEntry struct { deadline float64 weight int64 item interface{} } // edfPriorityQueue is a heap.Interface implementation for edfEntry elements. type edfPriorityQueue []*edfEntry func (pq edfPriorityQueue) Len() int { return len(pq) } func (pq edfPriorityQueue) Less(i, j int) bool { return pq[i].deadline < pq[j].deadline } func (pq edfPriorityQueue) Swap(i, j int) { pq[i], pq[j] = pq[j], pq[i] } func (pq *edfPriorityQueue) Push(x interface{}) { *pq = append(*pq, x.(*edfEntry)) } func (pq *edfPriorityQueue) Pop() interface{} { old := *pq *pq = old[0 : len(old)-1] return old[len(old)-1] } // Current time in EDF scheduler. func (edf *edfWrr) currentTime() float64 { if len(edf.items) == 0 { return 0.0 } return edf.items[0].deadline } func (edf *edfWrr) Add(item interface{}, weight int64) { edf.lock.Lock() defer edf.lock.Unlock() entry := edfEntry{ deadline: edf.currentTime() + 1.0/float64(weight), weight: weight, item: item, } heap.Push(&edf.items, &entry) } func (edf *edfWrr) Next() interface{} { edf.lock.Lock() defer edf.lock.Unlock() if len(edf.items) == 0 { return nil } item := edf.items[0] item.deadline = edf.currentTime() + 1.0/float64(item.weight) heap.Fix(&edf.items, 0) return item.item } grpc-go-1.29.1/internal/wrr/random.go000066400000000000000000000033341365033716300174110ustar00rootroot00000000000000/* * * Copyright 2019 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package wrr import ( "sync" "google.golang.org/grpc/internal/grpcrand" ) // weightedItem is a wrapped weighted item that is used to implement weighted random algorithm. type weightedItem struct { Item interface{} Weight int64 } // randomWRR is a struct that contains weighted items implement weighted random algorithm. type randomWRR struct { mu sync.RWMutex items []*weightedItem sumOfWeights int64 } // NewRandom creates a new WRR with random. func NewRandom() WRR { return &randomWRR{} } var grpcrandInt63n = grpcrand.Int63n func (rw *randomWRR) Next() (item interface{}) { rw.mu.RLock() defer rw.mu.RUnlock() if rw.sumOfWeights == 0 { return nil } // Random number in [0, sum). randomWeight := grpcrandInt63n(rw.sumOfWeights) for _, item := range rw.items { randomWeight = randomWeight - item.Weight if randomWeight < 0 { return item.Item } } return rw.items[len(rw.items)-1].Item } func (rw *randomWRR) Add(item interface{}, weight int64) { rw.mu.Lock() defer rw.mu.Unlock() rItem := &weightedItem{Item: item, Weight: weight} rw.items = append(rw.items, rItem) rw.sumOfWeights += weight } grpc-go-1.29.1/internal/wrr/wrr.go000066400000000000000000000016521365033716300167440ustar00rootroot00000000000000/* * * Copyright 2019 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package wrr // WRR defines an interface that implements weighted round robin. type WRR interface { // Add adds an item with weight to the WRR set. // // Add and Next need to be thread safe. Add(item interface{}, weight int64) // Next returns the next picked item. // // Add and Next need to be thread safe. Next() interface{} } grpc-go-1.29.1/internal/wrr/wrr_test.go000066400000000000000000000046361365033716300200100ustar00rootroot00000000000000/* * * Copyright 2019 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package wrr import ( "errors" "math" "math/rand" "testing" "github.com/google/go-cmp/cmp" "google.golang.org/grpc/internal/grpctest" ) type s struct { grpctest.Tester } func Test(t *testing.T) { grpctest.RunSubTests(t, s{}) } const iterCount = 10000 func equalApproximate(a, b float64) error { opt := cmp.Comparer(func(x, y float64) bool { delta := math.Abs(x - y) mean := math.Abs(x+y) / 2.0 return delta/mean < 0.05 }) if !cmp.Equal(a, b, opt) { return errors.New(cmp.Diff(a, b)) } return nil } func testWRRNext(t *testing.T, newWRR func() WRR) { tests := []struct { name string weights []int64 }{ { name: "1-1-1", weights: []int64{1, 1, 1}, }, { name: "1-2-3", weights: []int64{1, 2, 3}, }, { name: "5-3-2", weights: []int64{5, 3, 2}, }, { name: "17-23-37", weights: []int64{17, 23, 37}, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { var sumOfWeights int64 w := newWRR() for i, weight := range tt.weights { w.Add(i, weight) sumOfWeights += weight } results := make(map[int]int) for i := 0; i < iterCount; i++ { results[w.Next().(int)]++ } wantRatio := make([]float64, len(tt.weights)) for i, weight := range tt.weights { wantRatio[i] = float64(weight) / float64(sumOfWeights) } gotRatio := make([]float64, len(tt.weights)) for i, count := range results { gotRatio[i] = float64(count) / iterCount } for i := range wantRatio { if err := equalApproximate(gotRatio[i], wantRatio[i]); err != nil { t.Errorf("%v not equal %v", i, err) } } }) } } func (s) TestRandomWRRNext(t *testing.T) { testWRRNext(t, NewRandom) } func (s) TestEdfWrrNext(t *testing.T) { testWRRNext(t, NewEDF) } func init() { r := rand.New(rand.NewSource(0)) grpcrandInt63n = r.Int63n } grpc-go-1.29.1/interop/000077500000000000000000000000001365033716300146315ustar00rootroot00000000000000grpc-go-1.29.1/interop/alts/000077500000000000000000000000001365033716300155745ustar00rootroot00000000000000grpc-go-1.29.1/interop/alts/client/000077500000000000000000000000001365033716300170525ustar00rootroot00000000000000grpc-go-1.29.1/interop/alts/client/client.go000066400000000000000000000037471365033716300206720ustar00rootroot00000000000000/* * * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // This binary can only run on Google Cloud Platform (GCP). package main import ( "context" "flag" "time" grpc "google.golang.org/grpc" "google.golang.org/grpc/credentials/alts" "google.golang.org/grpc/grpclog" testpb "google.golang.org/grpc/interop/grpc_testing" ) var ( hsAddr = flag.String("alts_handshaker_service_address", "", "ALTS handshaker gRPC service address") serverAddr = flag.String("server_address", ":8080", "The port on which the server is listening") ) func main() { flag.Parse() opts := alts.DefaultClientOptions() if *hsAddr != "" { opts.HandshakerServiceAddress = *hsAddr } altsTC := alts.NewClientCreds(opts) // Block until the server is ready. conn, err := grpc.Dial(*serverAddr, grpc.WithTransportCredentials(altsTC), grpc.WithBlock()) if err != nil { grpclog.Fatalf("gRPC Client: failed to dial the server at %v: %v", *serverAddr, err) } defer conn.Close() grpcClient := testpb.NewTestServiceClient(conn) // Call the EmptyCall API. ctx := context.Background() request := &testpb.Empty{} if _, err := grpcClient.EmptyCall(ctx, request); err != nil { grpclog.Fatalf("grpc Client: EmptyCall(_, %v) failed: %v", request, err) } grpclog.Info("grpc Client: empty call succeeded") // This sleep prevents the connection from being abruptly disconnected // when running this binary (along with grpc_server) on GCP dev cluster. time.Sleep(1 * time.Second) } grpc-go-1.29.1/interop/alts/server/000077500000000000000000000000001365033716300171025ustar00rootroot00000000000000grpc-go-1.29.1/interop/alts/server/server.go000066400000000000000000000055131365033716300207430ustar00rootroot00000000000000/* * * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // This binary can only run on Google Cloud Platform (GCP). package main import ( "context" "flag" "net" "strings" grpc "google.golang.org/grpc" "google.golang.org/grpc/credentials/alts" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/interop" testpb "google.golang.org/grpc/interop/grpc_testing" "google.golang.org/grpc/tap" ) const ( udsAddrPrefix = "unix:" ) var ( hsAddr = flag.String("alts_handshaker_service_address", "", "ALTS handshaker gRPC service address") serverAddr = flag.String("server_address", ":8080", "The address on which the server is listening. Only two types of addresses are supported, 'host:port' and 'unix:/path'.") ) func main() { flag.Parse() // If the server address starts with `unix:`, then we have a UDS address. network := "tcp" address := *serverAddr if strings.HasPrefix(address, udsAddrPrefix) { network = "unix" address = strings.TrimPrefix(address, udsAddrPrefix) } lis, err := net.Listen(network, address) if err != nil { grpclog.Fatalf("gRPC Server: failed to start the server at %v: %v", address, err) } opts := alts.DefaultServerOptions() if *hsAddr != "" { opts.HandshakerServiceAddress = *hsAddr } altsTC := alts.NewServerCreds(opts) grpcServer := grpc.NewServer(grpc.Creds(altsTC), grpc.InTapHandle(authz)) testpb.RegisterTestServiceServer(grpcServer, interop.NewTestServer()) grpcServer.Serve(lis) } // authz shows how to access client information at the server side to perform // application-layer authorization checks. func authz(ctx context.Context, info *tap.Info) (context.Context, error) { authInfo, err := alts.AuthInfoFromContext(ctx) if err != nil { return nil, err } // Access all alts.AuthInfo data: grpclog.Infof("authInfo.ApplicationProtocol() = %v", authInfo.ApplicationProtocol()) grpclog.Infof("authInfo.RecordProtocol() = %v", authInfo.RecordProtocol()) grpclog.Infof("authInfo.SecurityLevel() = %v", authInfo.SecurityLevel()) grpclog.Infof("authInfo.PeerServiceAccount() = %v", authInfo.PeerServiceAccount()) grpclog.Infof("authInfo.LocalServiceAccount() = %v", authInfo.LocalServiceAccount()) grpclog.Infof("authInfo.PeerRPCVersions() = %v", authInfo.PeerRPCVersions()) grpclog.Infof("info.FullMethodName = %v", info.FullMethodName) return ctx, nil } grpc-go-1.29.1/interop/client/000077500000000000000000000000001365033716300161075ustar00rootroot00000000000000grpc-go-1.29.1/interop/client/client.go000066400000000000000000000257311365033716300177240ustar00rootroot00000000000000/* * * Copyright 2014 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package main import ( "flag" "net" "strconv" "google.golang.org/grpc" _ "google.golang.org/grpc/balancer/grpclb" "google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials/alts" "google.golang.org/grpc/credentials/google" "google.golang.org/grpc/credentials/oauth" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/interop" testpb "google.golang.org/grpc/interop/grpc_testing" "google.golang.org/grpc/resolver" "google.golang.org/grpc/testdata" ) const ( googleDefaultCredsName = "google_default_credentials" computeEngineCredsName = "compute_engine_channel_creds" ) var ( caFile = flag.String("ca_file", "", "The file containning the CA root cert file") useTLS = flag.Bool("use_tls", false, "Connection uses TLS if true") useALTS = flag.Bool("use_alts", false, "Connection uses ALTS if true (this option can only be used on GCP)") customCredentialsType = flag.String("custom_credentials_type", "", "Custom creds to use, excluding TLS or ALTS") altsHSAddr = flag.String("alts_handshaker_service_address", "", "ALTS handshaker gRPC service address") testCA = flag.Bool("use_test_ca", false, "Whether to replace platform root CAs with test CA as the CA root") serviceAccountKeyFile = flag.String("service_account_key_file", "", "Path to service account json key file") oauthScope = flag.String("oauth_scope", "", "The scope for OAuth2 tokens") defaultServiceAccount = flag.String("default_service_account", "", "Email of GCE default service account") serverHost = flag.String("server_host", "localhost", "The server host name") serverPort = flag.Int("server_port", 10000, "The server port number") tlsServerName = flag.String("server_host_override", "", "The server name use to verify the hostname returned by TLS handshake if it is not empty. Otherwise, --server_host is used.") testCase = flag.String("test_case", "large_unary", `Configure different test cases. Valid options are: empty_unary : empty (zero bytes) request and response; large_unary : single request and (large) response; client_streaming : request streaming with single response; server_streaming : single request with response streaming; ping_pong : full-duplex streaming; empty_stream : full-duplex streaming with zero message; timeout_on_sleeping_server: fullduplex streaming on a sleeping server; compute_engine_creds: large_unary with compute engine auth; service_account_creds: large_unary with service account auth; jwt_token_creds: large_unary with jwt token auth; per_rpc_creds: large_unary with per rpc token; oauth2_auth_token: large_unary with oauth2 token auth; google_default_credentials: large_unary with google default credentials compute_engine_channel_credentials: large_unary with compute engine creds cancel_after_begin: cancellation after metadata has been sent but before payloads are sent; cancel_after_first_response: cancellation after receiving 1st message from the server; status_code_and_message: status code propagated back to client; special_status_message: Unicode and whitespace is correctly processed in status message; custom_metadata: server will echo custom metadata; unimplemented_method: client attempts to call unimplemented method; unimplemented_service: client attempts to call unimplemented service; pick_first_unary: all requests are sent to one server despite multiple servers are resolved.`) ) type credsMode uint8 const ( credsNone credsMode = iota credsTLS credsALTS credsGoogleDefaultCreds credsComputeEngineCreds ) func main() { flag.Parse() var useGDC bool // use google default creds var useCEC bool // use compute engine creds if *customCredentialsType != "" { switch *customCredentialsType { case googleDefaultCredsName: useGDC = true case computeEngineCredsName: useCEC = true default: grpclog.Fatalf("If set, custom_credentials_type can only be set to one of %v or %v", googleDefaultCredsName, computeEngineCredsName) } } if (*useTLS && *useALTS) || (*useTLS && useGDC) || (*useALTS && useGDC) || (*useTLS && useCEC) || (*useALTS && useCEC) { grpclog.Fatalf("only one of TLS, ALTS, google default creds, or compute engine creds can be used") } var credsChosen credsMode switch { case *useTLS: credsChosen = credsTLS case *useALTS: credsChosen = credsALTS case useGDC: credsChosen = credsGoogleDefaultCreds case useCEC: credsChosen = credsComputeEngineCreds } resolver.SetDefaultScheme("dns") serverAddr := net.JoinHostPort(*serverHost, strconv.Itoa(*serverPort)) var opts []grpc.DialOption switch credsChosen { case credsTLS: var sn string if *tlsServerName != "" { sn = *tlsServerName } var creds credentials.TransportCredentials if *testCA { var err error if *caFile == "" { *caFile = testdata.Path("ca.pem") } creds, err = credentials.NewClientTLSFromFile(*caFile, sn) if err != nil { grpclog.Fatalf("Failed to create TLS credentials %v", err) } } else { creds = credentials.NewClientTLSFromCert(nil, sn) } opts = append(opts, grpc.WithTransportCredentials(creds)) case credsALTS: altsOpts := alts.DefaultClientOptions() if *altsHSAddr != "" { altsOpts.HandshakerServiceAddress = *altsHSAddr } altsTC := alts.NewClientCreds(altsOpts) opts = append(opts, grpc.WithTransportCredentials(altsTC)) case credsGoogleDefaultCreds: opts = append(opts, grpc.WithCredentialsBundle(google.NewDefaultCredentials())) case credsComputeEngineCreds: opts = append(opts, grpc.WithCredentialsBundle(google.NewComputeEngineCredentials())) case credsNone: opts = append(opts, grpc.WithInsecure()) default: grpclog.Fatal("Invalid creds") } if credsChosen == credsTLS { if *testCase == "compute_engine_creds" { opts = append(opts, grpc.WithPerRPCCredentials(oauth.NewComputeEngine())) } else if *testCase == "service_account_creds" { jwtCreds, err := oauth.NewServiceAccountFromFile(*serviceAccountKeyFile, *oauthScope) if err != nil { grpclog.Fatalf("Failed to create JWT credentials: %v", err) } opts = append(opts, grpc.WithPerRPCCredentials(jwtCreds)) } else if *testCase == "jwt_token_creds" { jwtCreds, err := oauth.NewJWTAccessFromFile(*serviceAccountKeyFile) if err != nil { grpclog.Fatalf("Failed to create JWT credentials: %v", err) } opts = append(opts, grpc.WithPerRPCCredentials(jwtCreds)) } else if *testCase == "oauth2_auth_token" { opts = append(opts, grpc.WithPerRPCCredentials(oauth.NewOauthAccess(interop.GetToken(*serviceAccountKeyFile, *oauthScope)))) } } opts = append(opts, grpc.WithBlock()) conn, err := grpc.Dial(serverAddr, opts...) if err != nil { grpclog.Fatalf("Fail to dial: %v", err) } defer conn.Close() tc := testpb.NewTestServiceClient(conn) switch *testCase { case "empty_unary": interop.DoEmptyUnaryCall(tc) grpclog.Infoln("EmptyUnaryCall done") case "large_unary": interop.DoLargeUnaryCall(tc) grpclog.Infoln("LargeUnaryCall done") case "client_streaming": interop.DoClientStreaming(tc) grpclog.Infoln("ClientStreaming done") case "server_streaming": interop.DoServerStreaming(tc) grpclog.Infoln("ServerStreaming done") case "ping_pong": interop.DoPingPong(tc) grpclog.Infoln("Pingpong done") case "empty_stream": interop.DoEmptyStream(tc) grpclog.Infoln("Emptystream done") case "timeout_on_sleeping_server": interop.DoTimeoutOnSleepingServer(tc) grpclog.Infoln("TimeoutOnSleepingServer done") case "compute_engine_creds": if credsChosen != credsTLS { grpclog.Fatalf("TLS credentials need to be set for compute_engine_creds test case.") } interop.DoComputeEngineCreds(tc, *defaultServiceAccount, *oauthScope) grpclog.Infoln("ComputeEngineCreds done") case "service_account_creds": if credsChosen != credsTLS { grpclog.Fatalf("TLS credentials need to be set for service_account_creds test case.") } interop.DoServiceAccountCreds(tc, *serviceAccountKeyFile, *oauthScope) grpclog.Infoln("ServiceAccountCreds done") case "jwt_token_creds": if credsChosen != credsTLS { grpclog.Fatalf("TLS credentials need to be set for jwt_token_creds test case.") } interop.DoJWTTokenCreds(tc, *serviceAccountKeyFile) grpclog.Infoln("JWTtokenCreds done") case "per_rpc_creds": if credsChosen != credsTLS { grpclog.Fatalf("TLS credentials need to be set for per_rpc_creds test case.") } interop.DoPerRPCCreds(tc, *serviceAccountKeyFile, *oauthScope) grpclog.Infoln("PerRPCCreds done") case "oauth2_auth_token": if credsChosen != credsTLS { grpclog.Fatalf("TLS credentials need to be set for oauth2_auth_token test case.") } interop.DoOauth2TokenCreds(tc, *serviceAccountKeyFile, *oauthScope) grpclog.Infoln("Oauth2TokenCreds done") case "google_default_credentials": if credsChosen != credsGoogleDefaultCreds { grpclog.Fatalf("GoogleDefaultCredentials need to be set for google_default_credentials test case.") } interop.DoGoogleDefaultCredentials(tc, *defaultServiceAccount) grpclog.Infoln("GoogleDefaultCredentials done") case "compute_engine_channel_credentials": if credsChosen != credsComputeEngineCreds { grpclog.Fatalf("ComputeEngineCreds need to be set for compute_engine_channel_credentials test case.") } interop.DoComputeEngineChannelCredentials(tc, *defaultServiceAccount) grpclog.Infoln("ComputeEngineChannelCredentials done") case "cancel_after_begin": interop.DoCancelAfterBegin(tc) grpclog.Infoln("CancelAfterBegin done") case "cancel_after_first_response": interop.DoCancelAfterFirstResponse(tc) grpclog.Infoln("CancelAfterFirstResponse done") case "status_code_and_message": interop.DoStatusCodeAndMessage(tc) grpclog.Infoln("StatusCodeAndMessage done") case "special_status_message": interop.DoSpecialStatusMessage(tc) grpclog.Infoln("SpecialStatusMessage done") case "custom_metadata": interop.DoCustomMetadata(tc) grpclog.Infoln("CustomMetadata done") case "unimplemented_method": interop.DoUnimplementedMethod(conn) grpclog.Infoln("UnimplementedMethod done") case "unimplemented_service": interop.DoUnimplementedService(testpb.NewUnimplementedServiceClient(conn)) grpclog.Infoln("UnimplementedService done") case "pick_first_unary": interop.DoPickFirstUnary(tc) grpclog.Infoln("PickFirstUnary done") default: grpclog.Fatal("Unsupported test case: ", *testCase) } } grpc-go-1.29.1/interop/fake_grpclb/000077500000000000000000000000001365033716300170705ustar00rootroot00000000000000grpc-go-1.29.1/interop/fake_grpclb/fake_grpclb.go000066400000000000000000000135701365033716300216640ustar00rootroot00000000000000/* * * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // This file is for testing only. Runs a fake grpclb balancer server. // The name of the service to load balance for and the addresses // of that service are provided by command line flags. package main import ( "flag" "net" "strconv" "strings" "time" "google.golang.org/grpc" lbpb "google.golang.org/grpc/balancer/grpclb/grpc_lb_v1" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials/alts" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/status" "google.golang.org/grpc/testdata" ) var ( port = flag.Int("port", 10000, "Port to listen on.") backendAddrs = flag.String("backend_addrs", "", "Comma separated list of backend IP/port addresses.") useALTS = flag.Bool("use_alts", false, "Listen on ALTS credentials.") useTLS = flag.Bool("use_tls", false, "Listen on TLS credentials, using a test certificate.") shortStream = flag.Bool("short_stream", false, "End the balancer stream immediately after sending the first server list.") serviceName = flag.String("service_name", "UNSET", "Name of the service being load balanced for.") ) type loadBalancerServer struct { serverListResponse *lbpb.LoadBalanceResponse } func (l *loadBalancerServer) BalanceLoad(stream lbpb.LoadBalancer_BalanceLoadServer) error { grpclog.Info("Begin handling new BalancerLoad request.") var lbReq *lbpb.LoadBalanceRequest var err error if lbReq, err = stream.Recv(); err != nil { grpclog.Errorf("Error receiving LoadBalanceRequest: %v", err) return err } grpclog.Info("LoadBalancerRequest received.") initialReq := lbReq.GetInitialRequest() if initialReq == nil { grpclog.Info("Expected first request to be an InitialRequest. Got: %v", lbReq) return status.Error(codes.Unknown, "First request not an InitialRequest") } // gRPC clients targeting foo.bar.com:443 can sometimes include the ":443" suffix in // their requested names; handle this case. TODO: make 443 configurable? var cleanedName string var requestedNamePortNumber string if cleanedName, requestedNamePortNumber, err = net.SplitHostPort(initialReq.Name); err != nil { cleanedName = initialReq.Name } else { if requestedNamePortNumber != "443" { grpclog.Info("Bad requested service name port number: %v.", requestedNamePortNumber) return status.Error(codes.Unknown, "Bad requested service name port number") } } if cleanedName != *serviceName { grpclog.Info("Expected requested service name: %v. Got: %v", *serviceName, initialReq.Name) return status.Error(codes.NotFound, "Bad requested service name") } if err := stream.Send(&lbpb.LoadBalanceResponse{ LoadBalanceResponseType: &lbpb.LoadBalanceResponse_InitialResponse{ InitialResponse: &lbpb.InitialLoadBalanceResponse{}, }, }); err != nil { grpclog.Errorf("Error sending initial LB response: %v", err) return status.Error(codes.Unknown, "Error sending initial response") } grpclog.Info("Send LoadBalanceResponse: %v", l.serverListResponse) if err := stream.Send(l.serverListResponse); err != nil { grpclog.Errorf("Error sending LB response: %v", err) return status.Error(codes.Unknown, "Error sending response") } if *shortStream { return nil } for { grpclog.Info("Send LoadBalanceResponse: %v", l.serverListResponse) if err := stream.Send(l.serverListResponse); err != nil { grpclog.Errorf("Error sending LB response: %v", err) return status.Error(codes.Unknown, "Error sending response") } time.Sleep(10 * time.Second) } } func main() { flag.Parse() var opts []grpc.ServerOption if *useTLS { certFile := testdata.Path("server1.pem") keyFile := testdata.Path("server1.key") creds, err := credentials.NewServerTLSFromFile(certFile, keyFile) if err != nil { grpclog.Fatalf("Failed to generate credentials %v", err) } opts = append(opts, grpc.Creds(creds)) } else if *useALTS { altsOpts := alts.DefaultServerOptions() altsTC := alts.NewServerCreds(altsOpts) opts = append(opts, grpc.Creds(altsTC)) } var serverList []*lbpb.Server if len(*backendAddrs) == 0 { serverList = make([]*lbpb.Server, 0) } else { rawBackendAddrs := strings.Split(*backendAddrs, ",") serverList = make([]*lbpb.Server, len(rawBackendAddrs)) for i := range rawBackendAddrs { rawIP, rawPort, err := net.SplitHostPort(rawBackendAddrs[i]) if err != nil { grpclog.Fatalf("Failed to parse --backend_addrs[%d]=%v, error: %v", i, rawBackendAddrs[i], err) } ip := net.ParseIP(rawIP) if ip == nil { grpclog.Fatalf("Failed to parse ip: %v", rawIP) } numericPort, err := strconv.Atoi(rawPort) if err != nil { grpclog.Fatalf("Failed to convert port %v to int", rawPort) } grpclog.Infof("Adding backend ip: %v, port: %d", ip.String(), numericPort) serverList[i] = &lbpb.Server{ IpAddress: ip, Port: int32(numericPort), } } } serverListResponse := &lbpb.LoadBalanceResponse{ LoadBalanceResponseType: &lbpb.LoadBalanceResponse_ServerList{ ServerList: &lbpb.ServerList{ Servers: serverList, }, }, } server := grpc.NewServer(opts...) grpclog.Infof("Begin listening on %d.", *port) lis, err := net.Listen("tcp", ":"+strconv.Itoa(*port)) if err != nil { grpclog.Fatalf("Failed to listen on port %v: %v", *port, err) } lbpb.RegisterLoadBalancerServer(server, &loadBalancerServer{ serverListResponse: serverListResponse, }) server.Serve(lis) } grpc-go-1.29.1/interop/grpc_testing/000077500000000000000000000000001365033716300173215ustar00rootroot00000000000000grpc-go-1.29.1/interop/grpc_testing/test.pb.go000066400000000000000000001524241365033716300212370ustar00rootroot00000000000000// Code generated by protoc-gen-go. DO NOT EDIT. // source: grpc_testing/test.proto package grpc_testing import ( context "context" fmt "fmt" proto "github.com/golang/protobuf/proto" grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" status "google.golang.org/grpc/status" math "math" ) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package // The type of payload that should be returned. type PayloadType int32 const ( // Compressable text format. PayloadType_COMPRESSABLE PayloadType = 0 // Uncompressable binary format. PayloadType_UNCOMPRESSABLE PayloadType = 1 // Randomly chosen from all other formats defined in this enum. PayloadType_RANDOM PayloadType = 2 ) var PayloadType_name = map[int32]string{ 0: "COMPRESSABLE", 1: "UNCOMPRESSABLE", 2: "RANDOM", } var PayloadType_value = map[string]int32{ "COMPRESSABLE": 0, "UNCOMPRESSABLE": 1, "RANDOM": 2, } func (x PayloadType) String() string { return proto.EnumName(PayloadType_name, int32(x)) } func (PayloadType) EnumDescriptor() ([]byte, []int) { return fileDescriptor_e1cda82041fed8bf, []int{0} } // The type of route that a client took to reach a server w.r.t. gRPCLB. // The server must fill in "fallback" if it detects that the RPC reached // the server via the "gRPCLB fallback" path, and "backend" if it detects // that the RPC reached the server via "gRPCLB backend" path (i.e. if it got // the address of this server from the gRPCLB server BalanceLoad RPC). Exactly // how this detection is done is context and server dependant. type GrpclbRouteType int32 const ( // Server didn't detect the route that a client took to reach it. GrpclbRouteType_GRPCLB_ROUTE_TYPE_UNKNOWN GrpclbRouteType = 0 // Indicates that a client reached a server via gRPCLB fallback. GrpclbRouteType_GRPCLB_ROUTE_TYPE_FALLBACK GrpclbRouteType = 1 // Indicates that a client reached a server as a gRPCLB-given backend. GrpclbRouteType_GRPCLB_ROUTE_TYPE_BACKEND GrpclbRouteType = 2 ) var GrpclbRouteType_name = map[int32]string{ 0: "GRPCLB_ROUTE_TYPE_UNKNOWN", 1: "GRPCLB_ROUTE_TYPE_FALLBACK", 2: "GRPCLB_ROUTE_TYPE_BACKEND", } var GrpclbRouteType_value = map[string]int32{ "GRPCLB_ROUTE_TYPE_UNKNOWN": 0, "GRPCLB_ROUTE_TYPE_FALLBACK": 1, "GRPCLB_ROUTE_TYPE_BACKEND": 2, } func (x GrpclbRouteType) String() string { return proto.EnumName(GrpclbRouteType_name, int32(x)) } func (GrpclbRouteType) EnumDescriptor() ([]byte, []int) { return fileDescriptor_e1cda82041fed8bf, []int{1} } type Empty struct { XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *Empty) Reset() { *m = Empty{} } func (m *Empty) String() string { return proto.CompactTextString(m) } func (*Empty) ProtoMessage() {} func (*Empty) Descriptor() ([]byte, []int) { return fileDescriptor_e1cda82041fed8bf, []int{0} } func (m *Empty) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Empty.Unmarshal(m, b) } func (m *Empty) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Empty.Marshal(b, m, deterministic) } func (m *Empty) XXX_Merge(src proto.Message) { xxx_messageInfo_Empty.Merge(m, src) } func (m *Empty) XXX_Size() int { return xxx_messageInfo_Empty.Size(m) } func (m *Empty) XXX_DiscardUnknown() { xxx_messageInfo_Empty.DiscardUnknown(m) } var xxx_messageInfo_Empty proto.InternalMessageInfo // A block of data, to simply increase gRPC message size. type Payload struct { // The type of data in body. Type PayloadType `protobuf:"varint,1,opt,name=type,proto3,enum=grpc.testing.PayloadType" json:"type,omitempty"` // Primary contents of payload. Body []byte `protobuf:"bytes,2,opt,name=body,proto3" json:"body,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *Payload) Reset() { *m = Payload{} } func (m *Payload) String() string { return proto.CompactTextString(m) } func (*Payload) ProtoMessage() {} func (*Payload) Descriptor() ([]byte, []int) { return fileDescriptor_e1cda82041fed8bf, []int{1} } func (m *Payload) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Payload.Unmarshal(m, b) } func (m *Payload) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Payload.Marshal(b, m, deterministic) } func (m *Payload) XXX_Merge(src proto.Message) { xxx_messageInfo_Payload.Merge(m, src) } func (m *Payload) XXX_Size() int { return xxx_messageInfo_Payload.Size(m) } func (m *Payload) XXX_DiscardUnknown() { xxx_messageInfo_Payload.DiscardUnknown(m) } var xxx_messageInfo_Payload proto.InternalMessageInfo func (m *Payload) GetType() PayloadType { if m != nil { return m.Type } return PayloadType_COMPRESSABLE } func (m *Payload) GetBody() []byte { if m != nil { return m.Body } return nil } // A protobuf representation for grpc status. This is used by test // clients to specify a status that the server should attempt to return. type EchoStatus struct { Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *EchoStatus) Reset() { *m = EchoStatus{} } func (m *EchoStatus) String() string { return proto.CompactTextString(m) } func (*EchoStatus) ProtoMessage() {} func (*EchoStatus) Descriptor() ([]byte, []int) { return fileDescriptor_e1cda82041fed8bf, []int{2} } func (m *EchoStatus) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_EchoStatus.Unmarshal(m, b) } func (m *EchoStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_EchoStatus.Marshal(b, m, deterministic) } func (m *EchoStatus) XXX_Merge(src proto.Message) { xxx_messageInfo_EchoStatus.Merge(m, src) } func (m *EchoStatus) XXX_Size() int { return xxx_messageInfo_EchoStatus.Size(m) } func (m *EchoStatus) XXX_DiscardUnknown() { xxx_messageInfo_EchoStatus.DiscardUnknown(m) } var xxx_messageInfo_EchoStatus proto.InternalMessageInfo func (m *EchoStatus) GetCode() int32 { if m != nil { return m.Code } return 0 } func (m *EchoStatus) GetMessage() string { if m != nil { return m.Message } return "" } // Unary request. type SimpleRequest struct { // Desired payload type in the response from the server. // If response_type is RANDOM, server randomly chooses one from other formats. ResponseType PayloadType `protobuf:"varint,1,opt,name=response_type,json=responseType,proto3,enum=grpc.testing.PayloadType" json:"response_type,omitempty"` // Desired payload size in the response from the server. // If response_type is COMPRESSABLE, this denotes the size before compression. ResponseSize int32 `protobuf:"varint,2,opt,name=response_size,json=responseSize,proto3" json:"response_size,omitempty"` // Optional input payload sent along with the request. Payload *Payload `protobuf:"bytes,3,opt,name=payload,proto3" json:"payload,omitempty"` // Whether SimpleResponse should include username. FillUsername bool `protobuf:"varint,4,opt,name=fill_username,json=fillUsername,proto3" json:"fill_username,omitempty"` // Whether SimpleResponse should include OAuth scope. FillOauthScope bool `protobuf:"varint,5,opt,name=fill_oauth_scope,json=fillOauthScope,proto3" json:"fill_oauth_scope,omitempty"` // Whether server should return a given status ResponseStatus *EchoStatus `protobuf:"bytes,7,opt,name=response_status,json=responseStatus,proto3" json:"response_status,omitempty"` // Whether SimpleResponse should include server_id. FillServerId bool `protobuf:"varint,9,opt,name=fill_server_id,json=fillServerId,proto3" json:"fill_server_id,omitempty"` // Whether SimpleResponse should include grpclb_route_type. FillGrpclbRouteType bool `protobuf:"varint,10,opt,name=fill_grpclb_route_type,json=fillGrpclbRouteType,proto3" json:"fill_grpclb_route_type,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *SimpleRequest) Reset() { *m = SimpleRequest{} } func (m *SimpleRequest) String() string { return proto.CompactTextString(m) } func (*SimpleRequest) ProtoMessage() {} func (*SimpleRequest) Descriptor() ([]byte, []int) { return fileDescriptor_e1cda82041fed8bf, []int{3} } func (m *SimpleRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_SimpleRequest.Unmarshal(m, b) } func (m *SimpleRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_SimpleRequest.Marshal(b, m, deterministic) } func (m *SimpleRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_SimpleRequest.Merge(m, src) } func (m *SimpleRequest) XXX_Size() int { return xxx_messageInfo_SimpleRequest.Size(m) } func (m *SimpleRequest) XXX_DiscardUnknown() { xxx_messageInfo_SimpleRequest.DiscardUnknown(m) } var xxx_messageInfo_SimpleRequest proto.InternalMessageInfo func (m *SimpleRequest) GetResponseType() PayloadType { if m != nil { return m.ResponseType } return PayloadType_COMPRESSABLE } func (m *SimpleRequest) GetResponseSize() int32 { if m != nil { return m.ResponseSize } return 0 } func (m *SimpleRequest) GetPayload() *Payload { if m != nil { return m.Payload } return nil } func (m *SimpleRequest) GetFillUsername() bool { if m != nil { return m.FillUsername } return false } func (m *SimpleRequest) GetFillOauthScope() bool { if m != nil { return m.FillOauthScope } return false } func (m *SimpleRequest) GetResponseStatus() *EchoStatus { if m != nil { return m.ResponseStatus } return nil } func (m *SimpleRequest) GetFillServerId() bool { if m != nil { return m.FillServerId } return false } func (m *SimpleRequest) GetFillGrpclbRouteType() bool { if m != nil { return m.FillGrpclbRouteType } return false } // Unary response, as configured by the request. type SimpleResponse struct { // Payload to increase message size. Payload *Payload `protobuf:"bytes,1,opt,name=payload,proto3" json:"payload,omitempty"` // The user the request came from, for verifying authentication was // successful when the client expected it. Username string `protobuf:"bytes,2,opt,name=username,proto3" json:"username,omitempty"` // OAuth scope. OauthScope string `protobuf:"bytes,3,opt,name=oauth_scope,json=oauthScope,proto3" json:"oauth_scope,omitempty"` // Server ID. This must be unique among different server instances, // but the same across all RPC's made to a particular server instance. ServerId string `protobuf:"bytes,4,opt,name=server_id,json=serverId,proto3" json:"server_id,omitempty"` // gRPCLB Path. GrpclbRouteType GrpclbRouteType `protobuf:"varint,5,opt,name=grpclb_route_type,json=grpclbRouteType,proto3,enum=grpc.testing.GrpclbRouteType" json:"grpclb_route_type,omitempty"` // Server hostname. Hostname string `protobuf:"bytes,6,opt,name=hostname,proto3" json:"hostname,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *SimpleResponse) Reset() { *m = SimpleResponse{} } func (m *SimpleResponse) String() string { return proto.CompactTextString(m) } func (*SimpleResponse) ProtoMessage() {} func (*SimpleResponse) Descriptor() ([]byte, []int) { return fileDescriptor_e1cda82041fed8bf, []int{4} } func (m *SimpleResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_SimpleResponse.Unmarshal(m, b) } func (m *SimpleResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_SimpleResponse.Marshal(b, m, deterministic) } func (m *SimpleResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_SimpleResponse.Merge(m, src) } func (m *SimpleResponse) XXX_Size() int { return xxx_messageInfo_SimpleResponse.Size(m) } func (m *SimpleResponse) XXX_DiscardUnknown() { xxx_messageInfo_SimpleResponse.DiscardUnknown(m) } var xxx_messageInfo_SimpleResponse proto.InternalMessageInfo func (m *SimpleResponse) GetPayload() *Payload { if m != nil { return m.Payload } return nil } func (m *SimpleResponse) GetUsername() string { if m != nil { return m.Username } return "" } func (m *SimpleResponse) GetOauthScope() string { if m != nil { return m.OauthScope } return "" } func (m *SimpleResponse) GetServerId() string { if m != nil { return m.ServerId } return "" } func (m *SimpleResponse) GetGrpclbRouteType() GrpclbRouteType { if m != nil { return m.GrpclbRouteType } return GrpclbRouteType_GRPCLB_ROUTE_TYPE_UNKNOWN } func (m *SimpleResponse) GetHostname() string { if m != nil { return m.Hostname } return "" } // Client-streaming request. type StreamingInputCallRequest struct { // Optional input payload sent along with the request. Payload *Payload `protobuf:"bytes,1,opt,name=payload,proto3" json:"payload,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *StreamingInputCallRequest) Reset() { *m = StreamingInputCallRequest{} } func (m *StreamingInputCallRequest) String() string { return proto.CompactTextString(m) } func (*StreamingInputCallRequest) ProtoMessage() {} func (*StreamingInputCallRequest) Descriptor() ([]byte, []int) { return fileDescriptor_e1cda82041fed8bf, []int{5} } func (m *StreamingInputCallRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_StreamingInputCallRequest.Unmarshal(m, b) } func (m *StreamingInputCallRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_StreamingInputCallRequest.Marshal(b, m, deterministic) } func (m *StreamingInputCallRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_StreamingInputCallRequest.Merge(m, src) } func (m *StreamingInputCallRequest) XXX_Size() int { return xxx_messageInfo_StreamingInputCallRequest.Size(m) } func (m *StreamingInputCallRequest) XXX_DiscardUnknown() { xxx_messageInfo_StreamingInputCallRequest.DiscardUnknown(m) } var xxx_messageInfo_StreamingInputCallRequest proto.InternalMessageInfo func (m *StreamingInputCallRequest) GetPayload() *Payload { if m != nil { return m.Payload } return nil } // Client-streaming response. type StreamingInputCallResponse struct { // Aggregated size of payloads received from the client. AggregatedPayloadSize int32 `protobuf:"varint,1,opt,name=aggregated_payload_size,json=aggregatedPayloadSize,proto3" json:"aggregated_payload_size,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *StreamingInputCallResponse) Reset() { *m = StreamingInputCallResponse{} } func (m *StreamingInputCallResponse) String() string { return proto.CompactTextString(m) } func (*StreamingInputCallResponse) ProtoMessage() {} func (*StreamingInputCallResponse) Descriptor() ([]byte, []int) { return fileDescriptor_e1cda82041fed8bf, []int{6} } func (m *StreamingInputCallResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_StreamingInputCallResponse.Unmarshal(m, b) } func (m *StreamingInputCallResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_StreamingInputCallResponse.Marshal(b, m, deterministic) } func (m *StreamingInputCallResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_StreamingInputCallResponse.Merge(m, src) } func (m *StreamingInputCallResponse) XXX_Size() int { return xxx_messageInfo_StreamingInputCallResponse.Size(m) } func (m *StreamingInputCallResponse) XXX_DiscardUnknown() { xxx_messageInfo_StreamingInputCallResponse.DiscardUnknown(m) } var xxx_messageInfo_StreamingInputCallResponse proto.InternalMessageInfo func (m *StreamingInputCallResponse) GetAggregatedPayloadSize() int32 { if m != nil { return m.AggregatedPayloadSize } return 0 } // Configuration for a particular response. type ResponseParameters struct { // Desired payload sizes in responses from the server. // If response_type is COMPRESSABLE, this denotes the size before compression. Size int32 `protobuf:"varint,1,opt,name=size,proto3" json:"size,omitempty"` // Desired interval between consecutive responses in the response stream in // microseconds. IntervalUs int32 `protobuf:"varint,2,opt,name=interval_us,json=intervalUs,proto3" json:"interval_us,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *ResponseParameters) Reset() { *m = ResponseParameters{} } func (m *ResponseParameters) String() string { return proto.CompactTextString(m) } func (*ResponseParameters) ProtoMessage() {} func (*ResponseParameters) Descriptor() ([]byte, []int) { return fileDescriptor_e1cda82041fed8bf, []int{7} } func (m *ResponseParameters) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ResponseParameters.Unmarshal(m, b) } func (m *ResponseParameters) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ResponseParameters.Marshal(b, m, deterministic) } func (m *ResponseParameters) XXX_Merge(src proto.Message) { xxx_messageInfo_ResponseParameters.Merge(m, src) } func (m *ResponseParameters) XXX_Size() int { return xxx_messageInfo_ResponseParameters.Size(m) } func (m *ResponseParameters) XXX_DiscardUnknown() { xxx_messageInfo_ResponseParameters.DiscardUnknown(m) } var xxx_messageInfo_ResponseParameters proto.InternalMessageInfo func (m *ResponseParameters) GetSize() int32 { if m != nil { return m.Size } return 0 } func (m *ResponseParameters) GetIntervalUs() int32 { if m != nil { return m.IntervalUs } return 0 } // Server-streaming request. type StreamingOutputCallRequest struct { // Desired payload type in the response from the server. // If response_type is RANDOM, the payload from each response in the stream // might be of different types. This is to simulate a mixed type of payload // stream. ResponseType PayloadType `protobuf:"varint,1,opt,name=response_type,json=responseType,proto3,enum=grpc.testing.PayloadType" json:"response_type,omitempty"` // Configuration for each expected response message. ResponseParameters []*ResponseParameters `protobuf:"bytes,2,rep,name=response_parameters,json=responseParameters,proto3" json:"response_parameters,omitempty"` // Optional input payload sent along with the request. Payload *Payload `protobuf:"bytes,3,opt,name=payload,proto3" json:"payload,omitempty"` // Whether server should return a given status ResponseStatus *EchoStatus `protobuf:"bytes,7,opt,name=response_status,json=responseStatus,proto3" json:"response_status,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *StreamingOutputCallRequest) Reset() { *m = StreamingOutputCallRequest{} } func (m *StreamingOutputCallRequest) String() string { return proto.CompactTextString(m) } func (*StreamingOutputCallRequest) ProtoMessage() {} func (*StreamingOutputCallRequest) Descriptor() ([]byte, []int) { return fileDescriptor_e1cda82041fed8bf, []int{8} } func (m *StreamingOutputCallRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_StreamingOutputCallRequest.Unmarshal(m, b) } func (m *StreamingOutputCallRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_StreamingOutputCallRequest.Marshal(b, m, deterministic) } func (m *StreamingOutputCallRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_StreamingOutputCallRequest.Merge(m, src) } func (m *StreamingOutputCallRequest) XXX_Size() int { return xxx_messageInfo_StreamingOutputCallRequest.Size(m) } func (m *StreamingOutputCallRequest) XXX_DiscardUnknown() { xxx_messageInfo_StreamingOutputCallRequest.DiscardUnknown(m) } var xxx_messageInfo_StreamingOutputCallRequest proto.InternalMessageInfo func (m *StreamingOutputCallRequest) GetResponseType() PayloadType { if m != nil { return m.ResponseType } return PayloadType_COMPRESSABLE } func (m *StreamingOutputCallRequest) GetResponseParameters() []*ResponseParameters { if m != nil { return m.ResponseParameters } return nil } func (m *StreamingOutputCallRequest) GetPayload() *Payload { if m != nil { return m.Payload } return nil } func (m *StreamingOutputCallRequest) GetResponseStatus() *EchoStatus { if m != nil { return m.ResponseStatus } return nil } // Server-streaming response, as configured by the request and parameters. type StreamingOutputCallResponse struct { // Payload to increase response size. Payload *Payload `protobuf:"bytes,1,opt,name=payload,proto3" json:"payload,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *StreamingOutputCallResponse) Reset() { *m = StreamingOutputCallResponse{} } func (m *StreamingOutputCallResponse) String() string { return proto.CompactTextString(m) } func (*StreamingOutputCallResponse) ProtoMessage() {} func (*StreamingOutputCallResponse) Descriptor() ([]byte, []int) { return fileDescriptor_e1cda82041fed8bf, []int{9} } func (m *StreamingOutputCallResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_StreamingOutputCallResponse.Unmarshal(m, b) } func (m *StreamingOutputCallResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_StreamingOutputCallResponse.Marshal(b, m, deterministic) } func (m *StreamingOutputCallResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_StreamingOutputCallResponse.Merge(m, src) } func (m *StreamingOutputCallResponse) XXX_Size() int { return xxx_messageInfo_StreamingOutputCallResponse.Size(m) } func (m *StreamingOutputCallResponse) XXX_DiscardUnknown() { xxx_messageInfo_StreamingOutputCallResponse.DiscardUnknown(m) } var xxx_messageInfo_StreamingOutputCallResponse proto.InternalMessageInfo func (m *StreamingOutputCallResponse) GetPayload() *Payload { if m != nil { return m.Payload } return nil } type LoadBalancerStatsRequest struct { // Request stats for the next num_rpcs sent by client. NumRpcs int32 `protobuf:"varint,1,opt,name=num_rpcs,json=numRpcs,proto3" json:"num_rpcs,omitempty"` // If num_rpcs have not completed within timeout_sec, return partial results. TimeoutSec int32 `protobuf:"varint,2,opt,name=timeout_sec,json=timeoutSec,proto3" json:"timeout_sec,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *LoadBalancerStatsRequest) Reset() { *m = LoadBalancerStatsRequest{} } func (m *LoadBalancerStatsRequest) String() string { return proto.CompactTextString(m) } func (*LoadBalancerStatsRequest) ProtoMessage() {} func (*LoadBalancerStatsRequest) Descriptor() ([]byte, []int) { return fileDescriptor_e1cda82041fed8bf, []int{10} } func (m *LoadBalancerStatsRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_LoadBalancerStatsRequest.Unmarshal(m, b) } func (m *LoadBalancerStatsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_LoadBalancerStatsRequest.Marshal(b, m, deterministic) } func (m *LoadBalancerStatsRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_LoadBalancerStatsRequest.Merge(m, src) } func (m *LoadBalancerStatsRequest) XXX_Size() int { return xxx_messageInfo_LoadBalancerStatsRequest.Size(m) } func (m *LoadBalancerStatsRequest) XXX_DiscardUnknown() { xxx_messageInfo_LoadBalancerStatsRequest.DiscardUnknown(m) } var xxx_messageInfo_LoadBalancerStatsRequest proto.InternalMessageInfo func (m *LoadBalancerStatsRequest) GetNumRpcs() int32 { if m != nil { return m.NumRpcs } return 0 } func (m *LoadBalancerStatsRequest) GetTimeoutSec() int32 { if m != nil { return m.TimeoutSec } return 0 } type LoadBalancerStatsResponse struct { // The number of completed RPCs for each peer. RpcsByPeer map[string]int32 `protobuf:"bytes,1,rep,name=rpcs_by_peer,json=rpcsByPeer,proto3" json:"rpcs_by_peer,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` // The number of RPCs that failed to record a remote peer. NumFailures int32 `protobuf:"varint,2,opt,name=num_failures,json=numFailures,proto3" json:"num_failures,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *LoadBalancerStatsResponse) Reset() { *m = LoadBalancerStatsResponse{} } func (m *LoadBalancerStatsResponse) String() string { return proto.CompactTextString(m) } func (*LoadBalancerStatsResponse) ProtoMessage() {} func (*LoadBalancerStatsResponse) Descriptor() ([]byte, []int) { return fileDescriptor_e1cda82041fed8bf, []int{11} } func (m *LoadBalancerStatsResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_LoadBalancerStatsResponse.Unmarshal(m, b) } func (m *LoadBalancerStatsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_LoadBalancerStatsResponse.Marshal(b, m, deterministic) } func (m *LoadBalancerStatsResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_LoadBalancerStatsResponse.Merge(m, src) } func (m *LoadBalancerStatsResponse) XXX_Size() int { return xxx_messageInfo_LoadBalancerStatsResponse.Size(m) } func (m *LoadBalancerStatsResponse) XXX_DiscardUnknown() { xxx_messageInfo_LoadBalancerStatsResponse.DiscardUnknown(m) } var xxx_messageInfo_LoadBalancerStatsResponse proto.InternalMessageInfo func (m *LoadBalancerStatsResponse) GetRpcsByPeer() map[string]int32 { if m != nil { return m.RpcsByPeer } return nil } func (m *LoadBalancerStatsResponse) GetNumFailures() int32 { if m != nil { return m.NumFailures } return 0 } func init() { proto.RegisterEnum("grpc.testing.PayloadType", PayloadType_name, PayloadType_value) proto.RegisterEnum("grpc.testing.GrpclbRouteType", GrpclbRouteType_name, GrpclbRouteType_value) proto.RegisterType((*Empty)(nil), "grpc.testing.Empty") proto.RegisterType((*Payload)(nil), "grpc.testing.Payload") proto.RegisterType((*EchoStatus)(nil), "grpc.testing.EchoStatus") proto.RegisterType((*SimpleRequest)(nil), "grpc.testing.SimpleRequest") proto.RegisterType((*SimpleResponse)(nil), "grpc.testing.SimpleResponse") proto.RegisterType((*StreamingInputCallRequest)(nil), "grpc.testing.StreamingInputCallRequest") proto.RegisterType((*StreamingInputCallResponse)(nil), "grpc.testing.StreamingInputCallResponse") proto.RegisterType((*ResponseParameters)(nil), "grpc.testing.ResponseParameters") proto.RegisterType((*StreamingOutputCallRequest)(nil), "grpc.testing.StreamingOutputCallRequest") proto.RegisterType((*StreamingOutputCallResponse)(nil), "grpc.testing.StreamingOutputCallResponse") proto.RegisterType((*LoadBalancerStatsRequest)(nil), "grpc.testing.LoadBalancerStatsRequest") proto.RegisterType((*LoadBalancerStatsResponse)(nil), "grpc.testing.LoadBalancerStatsResponse") proto.RegisterMapType((map[string]int32)(nil), "grpc.testing.LoadBalancerStatsResponse.RpcsByPeerEntry") } func init() { proto.RegisterFile("grpc_testing/test.proto", fileDescriptor_e1cda82041fed8bf) } var fileDescriptor_e1cda82041fed8bf = []byte{ // 989 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x56, 0xfd, 0x6e, 0x1b, 0x45, 0x10, 0xef, 0x39, 0x71, 0x1c, 0x8f, 0x5d, 0xc7, 0xdd, 0xb4, 0xf4, 0xec, 0x50, 0x30, 0x07, 0xa2, 0x47, 0x25, 0x5c, 0xe4, 0x8a, 0x0f, 0x55, 0x2a, 0xc8, 0x76, 0x9c, 0x10, 0xd5, 0xb5, 0xcd, 0xd9, 0x06, 0xf5, 0xaf, 0xd3, 0xe6, 0x3c, 0x71, 0x4e, 0xdc, 0x17, 0x7b, 0x7b, 0x11, 0xee, 0x3f, 0x48, 0x3c, 0x02, 0xaf, 0xc0, 0x63, 0xf0, 0x22, 0x3c, 0x0e, 0xda, 0xbd, 0x3b, 0x7f, 0x47, 0x4d, 0xa8, 0xe0, 0x2f, 0xef, 0xce, 0xc7, 0x6f, 0x66, 0x7e, 0x33, 0x3b, 0x67, 0x78, 0x38, 0x65, 0x81, 0x65, 0x72, 0x0c, 0xb9, 0xed, 0x4d, 0x9f, 0x8a, 0xdf, 0x7a, 0xc0, 0x7c, 0xee, 0x93, 0xa2, 0x50, 0xd4, 0x13, 0x85, 0x96, 0x83, 0x6c, 0xc7, 0x0d, 0xf8, 0x4c, 0xeb, 0x42, 0x6e, 0x40, 0x67, 0x8e, 0x4f, 0x27, 0xe4, 0x73, 0xd8, 0xe5, 0xb3, 0x00, 0x55, 0xa5, 0xa6, 0xe8, 0xa5, 0x46, 0xa5, 0xbe, 0xec, 0x50, 0x4f, 0x8c, 0x46, 0xb3, 0x00, 0x0d, 0x69, 0x46, 0x08, 0xec, 0x9e, 0xfb, 0x93, 0x99, 0x9a, 0xa9, 0x29, 0x7a, 0xd1, 0x90, 0x67, 0xed, 0x39, 0x40, 0xc7, 0xba, 0xf4, 0x87, 0x9c, 0xf2, 0x28, 0x14, 0x16, 0x96, 0x3f, 0x89, 0x01, 0xb3, 0x86, 0x3c, 0x13, 0x15, 0x72, 0x2e, 0x86, 0x21, 0x9d, 0xa2, 0x74, 0xcc, 0x1b, 0xe9, 0x55, 0xfb, 0x63, 0x07, 0xee, 0x0e, 0x6d, 0x37, 0x70, 0xd0, 0xc0, 0x5f, 0x22, 0x0c, 0x39, 0xf9, 0x16, 0xee, 0x32, 0x0c, 0x03, 0xdf, 0x0b, 0xd1, 0xbc, 0x59, 0x66, 0xc5, 0xd4, 0x5e, 0xdc, 0xc8, 0xc7, 0x4b, 0xfe, 0xa1, 0xfd, 0x26, 0x8e, 0x98, 0x5d, 0x18, 0x0d, 0xed, 0x37, 0x48, 0x9e, 0x42, 0x2e, 0x88, 0x11, 0xd4, 0x9d, 0x9a, 0xa2, 0x17, 0x1a, 0x0f, 0xb6, 0xc2, 0x1b, 0xa9, 0x95, 0x40, 0xbd, 0xb0, 0x1d, 0xc7, 0x8c, 0x42, 0x64, 0x1e, 0x75, 0x51, 0xdd, 0xad, 0x29, 0xfa, 0xbe, 0x51, 0x14, 0xc2, 0x71, 0x22, 0x23, 0x3a, 0x94, 0xa5, 0x91, 0x4f, 0x23, 0x7e, 0x69, 0x86, 0x96, 0x1f, 0xa0, 0x9a, 0x95, 0x76, 0x25, 0x21, 0xef, 0x0b, 0xf1, 0x50, 0x48, 0x49, 0x13, 0x0e, 0x16, 0x49, 0x4a, 0xde, 0xd4, 0x9c, 0xcc, 0x43, 0x5d, 0xcd, 0x63, 0xc1, 0xab, 0x51, 0x9a, 0x17, 0x10, 0xf3, 0xfc, 0x09, 0x48, 0x50, 0x33, 0x44, 0x76, 0x85, 0xcc, 0xb4, 0x27, 0x6a, 0x7e, 0x91, 0xd2, 0x50, 0x0a, 0xcf, 0x26, 0xe4, 0x19, 0xbc, 0x27, 0xad, 0x04, 0xaa, 0x73, 0x6e, 0x32, 0x3f, 0xe2, 0x09, 0xad, 0x20, 0xad, 0x0f, 0x85, 0xf6, 0x54, 0x2a, 0x0d, 0xa1, 0x13, 0x14, 0x6a, 0xbf, 0x67, 0xa0, 0x94, 0x36, 0x25, 0x8e, 0xb9, 0x4c, 0x98, 0x72, 0x23, 0xc2, 0xaa, 0xb0, 0x3f, 0xe7, 0x2a, 0xee, 0xf9, 0xfc, 0x4e, 0x3e, 0x84, 0xc2, 0x32, 0x45, 0x3b, 0x52, 0x0d, 0xfe, 0x82, 0x9e, 0x23, 0xc8, 0x2f, 0xca, 0xda, 0x8d, 0xbd, 0xc3, 0xb4, 0xa4, 0x33, 0xb8, 0xb7, 0x59, 0x4d, 0x56, 0x0e, 0xc9, 0xa3, 0xd5, 0xa4, 0xd6, 0xea, 0x32, 0x0e, 0xa6, 0xab, 0x02, 0x91, 0xe4, 0xa5, 0x1f, 0x72, 0x99, 0xe4, 0x5e, 0x1c, 0x26, 0xbd, 0x6b, 0x5d, 0xa8, 0x0c, 0x39, 0x43, 0xea, 0xda, 0xde, 0xf4, 0xcc, 0x0b, 0x22, 0xde, 0xa6, 0x8e, 0x93, 0x0e, 0xe9, 0x6d, 0xe9, 0xd0, 0x46, 0x50, 0xdd, 0x86, 0x96, 0xb0, 0xfb, 0x15, 0x3c, 0xa4, 0xd3, 0x29, 0xc3, 0x29, 0xe5, 0x38, 0x31, 0x13, 0x9f, 0x78, 0x7a, 0xe3, 0x67, 0xf4, 0x60, 0xa1, 0x4e, 0xa0, 0xc5, 0x18, 0x6b, 0x67, 0x40, 0x52, 0x8c, 0x01, 0x65, 0xd4, 0x45, 0x8e, 0x4c, 0xbe, 0xc0, 0x25, 0x57, 0x79, 0x16, 0x94, 0xdb, 0x1e, 0x47, 0x76, 0x45, 0xc5, 0x0c, 0x27, 0x6f, 0x02, 0x52, 0xd1, 0x38, 0xd4, 0xfe, 0xcc, 0x2c, 0x65, 0xd8, 0x8f, 0xf8, 0x5a, 0xc1, 0xef, 0xfa, 0x2a, 0x7f, 0x80, 0xc3, 0xb9, 0x7f, 0x30, 0x4f, 0x55, 0xcd, 0xd4, 0x76, 0xf4, 0x42, 0xa3, 0xb6, 0x8a, 0xb2, 0x59, 0x92, 0x41, 0xd8, 0x66, 0x99, 0xb7, 0x7e, 0xc3, 0xef, 0xfe, 0xe8, 0xb4, 0x1e, 0x1c, 0x6d, 0x25, 0xe9, 0x5f, 0xbe, 0x12, 0xed, 0x47, 0x50, 0xbb, 0x3e, 0x9d, 0xb4, 0xa8, 0x43, 0x3d, 0x0b, 0x99, 0x88, 0x12, 0xa6, 0x94, 0x57, 0x60, 0xdf, 0x8b, 0x5c, 0x93, 0x05, 0x56, 0x98, 0xb4, 0x32, 0xe7, 0x45, 0xae, 0x11, 0x58, 0xa1, 0xe8, 0x26, 0xb7, 0x5d, 0xf4, 0x23, 0x6e, 0x86, 0x68, 0xa5, 0xdd, 0x4c, 0x44, 0x43, 0xb4, 0xb4, 0xbf, 0x15, 0xa8, 0x6c, 0x01, 0x4e, 0xd2, 0x7c, 0x0d, 0x45, 0x81, 0x6a, 0x9e, 0xcf, 0xcc, 0x00, 0x91, 0xa9, 0x8a, 0xec, 0xc2, 0xd7, 0xab, 0xb9, 0x5e, 0xeb, 0x5e, 0x17, 0x29, 0xb4, 0x66, 0x03, 0x44, 0xd6, 0xf1, 0x38, 0x9b, 0x19, 0xc0, 0xe6, 0x02, 0xf2, 0x11, 0x14, 0x45, 0xd2, 0x17, 0xd4, 0x76, 0x22, 0x86, 0xe9, 0xa0, 0x15, 0xbc, 0xc8, 0x3d, 0x49, 0x44, 0xd5, 0x17, 0x70, 0xb0, 0x86, 0x40, 0xca, 0xb0, 0xf3, 0x33, 0xce, 0x64, 0x95, 0x79, 0x43, 0x1c, 0xc9, 0x7d, 0xc8, 0x5e, 0x51, 0x27, 0x4a, 0xb7, 0x77, 0x7c, 0x79, 0x9e, 0xf9, 0x46, 0x79, 0xf2, 0x1d, 0x14, 0x96, 0xc6, 0x8c, 0x94, 0xa1, 0xd8, 0xee, 0xbf, 0x1a, 0x18, 0x9d, 0xe1, 0xb0, 0xd9, 0xea, 0x76, 0xca, 0x77, 0x08, 0x81, 0xd2, 0xb8, 0xb7, 0x22, 0x53, 0x08, 0xc0, 0x9e, 0xd1, 0xec, 0x1d, 0xf7, 0x5f, 0x95, 0x33, 0x4f, 0x7c, 0x38, 0x58, 0x5b, 0x0c, 0xe4, 0x11, 0x54, 0x4e, 0x8d, 0x41, 0xbb, 0xdb, 0x32, 0x8d, 0xfe, 0x78, 0xd4, 0x31, 0x47, 0xaf, 0x07, 0x1d, 0x73, 0xdc, 0x7b, 0xd9, 0xeb, 0xff, 0xd4, 0x2b, 0xdf, 0x21, 0x1f, 0x40, 0x75, 0x53, 0x7d, 0xd2, 0xec, 0x76, 0x5b, 0xcd, 0xf6, 0xcb, 0xb2, 0xb2, 0xdd, 0x5d, 0xe8, 0x3a, 0xbd, 0xe3, 0x72, 0xa6, 0xf1, 0xd7, 0x2e, 0x14, 0x46, 0x18, 0x72, 0xb1, 0x94, 0x6d, 0x0b, 0xc9, 0x97, 0x90, 0x97, 0x9f, 0x61, 0x31, 0x3a, 0xe4, 0x70, 0x6d, 0xf6, 0x84, 0xa2, 0xba, 0x4d, 0x48, 0x4e, 0x20, 0x3f, 0xf6, 0x28, 0x8b, 0xdd, 0x8e, 0x56, 0x2d, 0x56, 0x3e, 0xa1, 0xd5, 0xf7, 0xb7, 0x2b, 0x93, 0xee, 0x3b, 0x70, 0xb8, 0x65, 0x86, 0x89, 0xbe, 0xe6, 0x74, 0xed, 0x2e, 0xa8, 0x7e, 0x76, 0x03, 0xcb, 0x38, 0xd6, 0x17, 0x0a, 0xb1, 0x81, 0x6c, 0x2e, 0x3e, 0xf2, 0xf8, 0x1a, 0x88, 0xf5, 0x45, 0x5b, 0xd5, 0xdf, 0x6e, 0x18, 0x87, 0xd2, 0x45, 0xa8, 0xd2, 0x49, 0xe4, 0x38, 0xc7, 0x51, 0xe0, 0xe0, 0xaf, 0xff, 0x59, 0x4d, 0xba, 0x22, 0xab, 0x2a, 0x7d, 0x4f, 0x9d, 0x8b, 0xff, 0x21, 0x54, 0x63, 0x0c, 0xf7, 0xc7, 0x9e, 0xec, 0xa0, 0x8b, 0x1e, 0xc7, 0x49, 0x3a, 0x45, 0x2f, 0xe0, 0xde, 0x8a, 0xfc, 0x76, 0xd3, 0xd4, 0xf8, 0x6d, 0xcb, 0xe6, 0x49, 0xa1, 0x2d, 0x28, 0x9d, 0x22, 0x6f, 0x3b, 0x36, 0x7a, 0x5c, 0x2a, 0xc8, 0xa7, 0x6f, 0xdd, 0x0d, 0x71, 0x6d, 0x8f, 0x6f, 0xb8, 0x43, 0xb4, 0x3b, 0xe7, 0x7b, 0xf2, 0x1f, 0xea, 0xb3, 0x7f, 0x02, 0x00, 0x00, 0xff, 0xff, 0x13, 0xfb, 0x86, 0xc3, 0xbc, 0x0a, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. var _ context.Context var _ grpc.ClientConnInterface // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. const _ = grpc.SupportPackageIsVersion6 // TestServiceClient is the client API for TestService service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type TestServiceClient interface { // One empty request followed by one empty response. EmptyCall(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) // One request followed by one response. // The server returns the client payload as-is. UnaryCall(ctx context.Context, in *SimpleRequest, opts ...grpc.CallOption) (*SimpleResponse, error) // One request followed by a sequence of responses (streamed download). // The server returns the payload with client desired type and sizes. StreamingOutputCall(ctx context.Context, in *StreamingOutputCallRequest, opts ...grpc.CallOption) (TestService_StreamingOutputCallClient, error) // A sequence of requests followed by one response (streamed upload). // The server returns the aggregated size of client payload as the result. StreamingInputCall(ctx context.Context, opts ...grpc.CallOption) (TestService_StreamingInputCallClient, error) // A sequence of requests with each request served by the server immediately. // As one request could lead to multiple responses, this interface // demonstrates the idea of full duplexing. FullDuplexCall(ctx context.Context, opts ...grpc.CallOption) (TestService_FullDuplexCallClient, error) // A sequence of requests followed by a sequence of responses. // The server buffers all the client requests and then serves them in order. A // stream of responses are returned to the client when the server starts with // first request. HalfDuplexCall(ctx context.Context, opts ...grpc.CallOption) (TestService_HalfDuplexCallClient, error) } type testServiceClient struct { cc grpc.ClientConnInterface } func NewTestServiceClient(cc grpc.ClientConnInterface) TestServiceClient { return &testServiceClient{cc} } func (c *testServiceClient) EmptyCall(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) { out := new(Empty) err := c.cc.Invoke(ctx, "/grpc.testing.TestService/EmptyCall", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *testServiceClient) UnaryCall(ctx context.Context, in *SimpleRequest, opts ...grpc.CallOption) (*SimpleResponse, error) { out := new(SimpleResponse) err := c.cc.Invoke(ctx, "/grpc.testing.TestService/UnaryCall", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *testServiceClient) StreamingOutputCall(ctx context.Context, in *StreamingOutputCallRequest, opts ...grpc.CallOption) (TestService_StreamingOutputCallClient, error) { stream, err := c.cc.NewStream(ctx, &_TestService_serviceDesc.Streams[0], "/grpc.testing.TestService/StreamingOutputCall", opts...) if err != nil { return nil, err } x := &testServiceStreamingOutputCallClient{stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } if err := x.ClientStream.CloseSend(); err != nil { return nil, err } return x, nil } type TestService_StreamingOutputCallClient interface { Recv() (*StreamingOutputCallResponse, error) grpc.ClientStream } type testServiceStreamingOutputCallClient struct { grpc.ClientStream } func (x *testServiceStreamingOutputCallClient) Recv() (*StreamingOutputCallResponse, error) { m := new(StreamingOutputCallResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func (c *testServiceClient) StreamingInputCall(ctx context.Context, opts ...grpc.CallOption) (TestService_StreamingInputCallClient, error) { stream, err := c.cc.NewStream(ctx, &_TestService_serviceDesc.Streams[1], "/grpc.testing.TestService/StreamingInputCall", opts...) if err != nil { return nil, err } x := &testServiceStreamingInputCallClient{stream} return x, nil } type TestService_StreamingInputCallClient interface { Send(*StreamingInputCallRequest) error CloseAndRecv() (*StreamingInputCallResponse, error) grpc.ClientStream } type testServiceStreamingInputCallClient struct { grpc.ClientStream } func (x *testServiceStreamingInputCallClient) Send(m *StreamingInputCallRequest) error { return x.ClientStream.SendMsg(m) } func (x *testServiceStreamingInputCallClient) CloseAndRecv() (*StreamingInputCallResponse, error) { if err := x.ClientStream.CloseSend(); err != nil { return nil, err } m := new(StreamingInputCallResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func (c *testServiceClient) FullDuplexCall(ctx context.Context, opts ...grpc.CallOption) (TestService_FullDuplexCallClient, error) { stream, err := c.cc.NewStream(ctx, &_TestService_serviceDesc.Streams[2], "/grpc.testing.TestService/FullDuplexCall", opts...) if err != nil { return nil, err } x := &testServiceFullDuplexCallClient{stream} return x, nil } type TestService_FullDuplexCallClient interface { Send(*StreamingOutputCallRequest) error Recv() (*StreamingOutputCallResponse, error) grpc.ClientStream } type testServiceFullDuplexCallClient struct { grpc.ClientStream } func (x *testServiceFullDuplexCallClient) Send(m *StreamingOutputCallRequest) error { return x.ClientStream.SendMsg(m) } func (x *testServiceFullDuplexCallClient) Recv() (*StreamingOutputCallResponse, error) { m := new(StreamingOutputCallResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func (c *testServiceClient) HalfDuplexCall(ctx context.Context, opts ...grpc.CallOption) (TestService_HalfDuplexCallClient, error) { stream, err := c.cc.NewStream(ctx, &_TestService_serviceDesc.Streams[3], "/grpc.testing.TestService/HalfDuplexCall", opts...) if err != nil { return nil, err } x := &testServiceHalfDuplexCallClient{stream} return x, nil } type TestService_HalfDuplexCallClient interface { Send(*StreamingOutputCallRequest) error Recv() (*StreamingOutputCallResponse, error) grpc.ClientStream } type testServiceHalfDuplexCallClient struct { grpc.ClientStream } func (x *testServiceHalfDuplexCallClient) Send(m *StreamingOutputCallRequest) error { return x.ClientStream.SendMsg(m) } func (x *testServiceHalfDuplexCallClient) Recv() (*StreamingOutputCallResponse, error) { m := new(StreamingOutputCallResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } // TestServiceServer is the server API for TestService service. type TestServiceServer interface { // One empty request followed by one empty response. EmptyCall(context.Context, *Empty) (*Empty, error) // One request followed by one response. // The server returns the client payload as-is. UnaryCall(context.Context, *SimpleRequest) (*SimpleResponse, error) // One request followed by a sequence of responses (streamed download). // The server returns the payload with client desired type and sizes. StreamingOutputCall(*StreamingOutputCallRequest, TestService_StreamingOutputCallServer) error // A sequence of requests followed by one response (streamed upload). // The server returns the aggregated size of client payload as the result. StreamingInputCall(TestService_StreamingInputCallServer) error // A sequence of requests with each request served by the server immediately. // As one request could lead to multiple responses, this interface // demonstrates the idea of full duplexing. FullDuplexCall(TestService_FullDuplexCallServer) error // A sequence of requests followed by a sequence of responses. // The server buffers all the client requests and then serves them in order. A // stream of responses are returned to the client when the server starts with // first request. HalfDuplexCall(TestService_HalfDuplexCallServer) error } // UnimplementedTestServiceServer can be embedded to have forward compatible implementations. type UnimplementedTestServiceServer struct { } func (*UnimplementedTestServiceServer) EmptyCall(ctx context.Context, req *Empty) (*Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method EmptyCall not implemented") } func (*UnimplementedTestServiceServer) UnaryCall(ctx context.Context, req *SimpleRequest) (*SimpleResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method UnaryCall not implemented") } func (*UnimplementedTestServiceServer) StreamingOutputCall(req *StreamingOutputCallRequest, srv TestService_StreamingOutputCallServer) error { return status.Errorf(codes.Unimplemented, "method StreamingOutputCall not implemented") } func (*UnimplementedTestServiceServer) StreamingInputCall(srv TestService_StreamingInputCallServer) error { return status.Errorf(codes.Unimplemented, "method StreamingInputCall not implemented") } func (*UnimplementedTestServiceServer) FullDuplexCall(srv TestService_FullDuplexCallServer) error { return status.Errorf(codes.Unimplemented, "method FullDuplexCall not implemented") } func (*UnimplementedTestServiceServer) HalfDuplexCall(srv TestService_HalfDuplexCallServer) error { return status.Errorf(codes.Unimplemented, "method HalfDuplexCall not implemented") } func RegisterTestServiceServer(s *grpc.Server, srv TestServiceServer) { s.RegisterService(&_TestService_serviceDesc, srv) } func _TestService_EmptyCall_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(Empty) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(TestServiceServer).EmptyCall(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/grpc.testing.TestService/EmptyCall", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(TestServiceServer).EmptyCall(ctx, req.(*Empty)) } return interceptor(ctx, in, info, handler) } func _TestService_UnaryCall_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(SimpleRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(TestServiceServer).UnaryCall(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/grpc.testing.TestService/UnaryCall", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(TestServiceServer).UnaryCall(ctx, req.(*SimpleRequest)) } return interceptor(ctx, in, info, handler) } func _TestService_StreamingOutputCall_Handler(srv interface{}, stream grpc.ServerStream) error { m := new(StreamingOutputCallRequest) if err := stream.RecvMsg(m); err != nil { return err } return srv.(TestServiceServer).StreamingOutputCall(m, &testServiceStreamingOutputCallServer{stream}) } type TestService_StreamingOutputCallServer interface { Send(*StreamingOutputCallResponse) error grpc.ServerStream } type testServiceStreamingOutputCallServer struct { grpc.ServerStream } func (x *testServiceStreamingOutputCallServer) Send(m *StreamingOutputCallResponse) error { return x.ServerStream.SendMsg(m) } func _TestService_StreamingInputCall_Handler(srv interface{}, stream grpc.ServerStream) error { return srv.(TestServiceServer).StreamingInputCall(&testServiceStreamingInputCallServer{stream}) } type TestService_StreamingInputCallServer interface { SendAndClose(*StreamingInputCallResponse) error Recv() (*StreamingInputCallRequest, error) grpc.ServerStream } type testServiceStreamingInputCallServer struct { grpc.ServerStream } func (x *testServiceStreamingInputCallServer) SendAndClose(m *StreamingInputCallResponse) error { return x.ServerStream.SendMsg(m) } func (x *testServiceStreamingInputCallServer) Recv() (*StreamingInputCallRequest, error) { m := new(StreamingInputCallRequest) if err := x.ServerStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func _TestService_FullDuplexCall_Handler(srv interface{}, stream grpc.ServerStream) error { return srv.(TestServiceServer).FullDuplexCall(&testServiceFullDuplexCallServer{stream}) } type TestService_FullDuplexCallServer interface { Send(*StreamingOutputCallResponse) error Recv() (*StreamingOutputCallRequest, error) grpc.ServerStream } type testServiceFullDuplexCallServer struct { grpc.ServerStream } func (x *testServiceFullDuplexCallServer) Send(m *StreamingOutputCallResponse) error { return x.ServerStream.SendMsg(m) } func (x *testServiceFullDuplexCallServer) Recv() (*StreamingOutputCallRequest, error) { m := new(StreamingOutputCallRequest) if err := x.ServerStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func _TestService_HalfDuplexCall_Handler(srv interface{}, stream grpc.ServerStream) error { return srv.(TestServiceServer).HalfDuplexCall(&testServiceHalfDuplexCallServer{stream}) } type TestService_HalfDuplexCallServer interface { Send(*StreamingOutputCallResponse) error Recv() (*StreamingOutputCallRequest, error) grpc.ServerStream } type testServiceHalfDuplexCallServer struct { grpc.ServerStream } func (x *testServiceHalfDuplexCallServer) Send(m *StreamingOutputCallResponse) error { return x.ServerStream.SendMsg(m) } func (x *testServiceHalfDuplexCallServer) Recv() (*StreamingOutputCallRequest, error) { m := new(StreamingOutputCallRequest) if err := x.ServerStream.RecvMsg(m); err != nil { return nil, err } return m, nil } var _TestService_serviceDesc = grpc.ServiceDesc{ ServiceName: "grpc.testing.TestService", HandlerType: (*TestServiceServer)(nil), Methods: []grpc.MethodDesc{ { MethodName: "EmptyCall", Handler: _TestService_EmptyCall_Handler, }, { MethodName: "UnaryCall", Handler: _TestService_UnaryCall_Handler, }, }, Streams: []grpc.StreamDesc{ { StreamName: "StreamingOutputCall", Handler: _TestService_StreamingOutputCall_Handler, ServerStreams: true, }, { StreamName: "StreamingInputCall", Handler: _TestService_StreamingInputCall_Handler, ClientStreams: true, }, { StreamName: "FullDuplexCall", Handler: _TestService_FullDuplexCall_Handler, ServerStreams: true, ClientStreams: true, }, { StreamName: "HalfDuplexCall", Handler: _TestService_HalfDuplexCall_Handler, ServerStreams: true, ClientStreams: true, }, }, Metadata: "grpc_testing/test.proto", } // UnimplementedServiceClient is the client API for UnimplementedService service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type UnimplementedServiceClient interface { // A call that no server should implement UnimplementedCall(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) } type unimplementedServiceClient struct { cc grpc.ClientConnInterface } func NewUnimplementedServiceClient(cc grpc.ClientConnInterface) UnimplementedServiceClient { return &unimplementedServiceClient{cc} } func (c *unimplementedServiceClient) UnimplementedCall(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) { out := new(Empty) err := c.cc.Invoke(ctx, "/grpc.testing.UnimplementedService/UnimplementedCall", in, out, opts...) if err != nil { return nil, err } return out, nil } // UnimplementedServiceServer is the server API for UnimplementedService service. type UnimplementedServiceServer interface { // A call that no server should implement UnimplementedCall(context.Context, *Empty) (*Empty, error) } // UnimplementedUnimplementedServiceServer can be embedded to have forward compatible implementations. type UnimplementedUnimplementedServiceServer struct { } func (*UnimplementedUnimplementedServiceServer) UnimplementedCall(ctx context.Context, req *Empty) (*Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method UnimplementedCall not implemented") } func RegisterUnimplementedServiceServer(s *grpc.Server, srv UnimplementedServiceServer) { s.RegisterService(&_UnimplementedService_serviceDesc, srv) } func _UnimplementedService_UnimplementedCall_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(Empty) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(UnimplementedServiceServer).UnimplementedCall(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/grpc.testing.UnimplementedService/UnimplementedCall", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(UnimplementedServiceServer).UnimplementedCall(ctx, req.(*Empty)) } return interceptor(ctx, in, info, handler) } var _UnimplementedService_serviceDesc = grpc.ServiceDesc{ ServiceName: "grpc.testing.UnimplementedService", HandlerType: (*UnimplementedServiceServer)(nil), Methods: []grpc.MethodDesc{ { MethodName: "UnimplementedCall", Handler: _UnimplementedService_UnimplementedCall_Handler, }, }, Streams: []grpc.StreamDesc{}, Metadata: "grpc_testing/test.proto", } // LoadBalancerStatsServiceClient is the client API for LoadBalancerStatsService service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type LoadBalancerStatsServiceClient interface { // Gets the backend distribution for RPCs sent by a test client. GetClientStats(ctx context.Context, in *LoadBalancerStatsRequest, opts ...grpc.CallOption) (*LoadBalancerStatsResponse, error) } type loadBalancerStatsServiceClient struct { cc grpc.ClientConnInterface } func NewLoadBalancerStatsServiceClient(cc grpc.ClientConnInterface) LoadBalancerStatsServiceClient { return &loadBalancerStatsServiceClient{cc} } func (c *loadBalancerStatsServiceClient) GetClientStats(ctx context.Context, in *LoadBalancerStatsRequest, opts ...grpc.CallOption) (*LoadBalancerStatsResponse, error) { out := new(LoadBalancerStatsResponse) err := c.cc.Invoke(ctx, "/grpc.testing.LoadBalancerStatsService/GetClientStats", in, out, opts...) if err != nil { return nil, err } return out, nil } // LoadBalancerStatsServiceServer is the server API for LoadBalancerStatsService service. type LoadBalancerStatsServiceServer interface { // Gets the backend distribution for RPCs sent by a test client. GetClientStats(context.Context, *LoadBalancerStatsRequest) (*LoadBalancerStatsResponse, error) } // UnimplementedLoadBalancerStatsServiceServer can be embedded to have forward compatible implementations. type UnimplementedLoadBalancerStatsServiceServer struct { } func (*UnimplementedLoadBalancerStatsServiceServer) GetClientStats(ctx context.Context, req *LoadBalancerStatsRequest) (*LoadBalancerStatsResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method GetClientStats not implemented") } func RegisterLoadBalancerStatsServiceServer(s *grpc.Server, srv LoadBalancerStatsServiceServer) { s.RegisterService(&_LoadBalancerStatsService_serviceDesc, srv) } func _LoadBalancerStatsService_GetClientStats_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(LoadBalancerStatsRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(LoadBalancerStatsServiceServer).GetClientStats(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/grpc.testing.LoadBalancerStatsService/GetClientStats", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(LoadBalancerStatsServiceServer).GetClientStats(ctx, req.(*LoadBalancerStatsRequest)) } return interceptor(ctx, in, info, handler) } var _LoadBalancerStatsService_serviceDesc = grpc.ServiceDesc{ ServiceName: "grpc.testing.LoadBalancerStatsService", HandlerType: (*LoadBalancerStatsServiceServer)(nil), Methods: []grpc.MethodDesc{ { MethodName: "GetClientStats", Handler: _LoadBalancerStatsService_GetClientStats_Handler, }, }, Streams: []grpc.StreamDesc{}, Metadata: "grpc_testing/test.proto", } grpc-go-1.29.1/interop/grpc_testing/test.proto000066400000000000000000000170041365033716300213670ustar00rootroot00000000000000// Copyright 2017 gRPC authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // An integration test service that covers all the method signature permutations // of unary/streaming requests/responses. syntax = "proto3"; package grpc.testing; message Empty {} // The type of payload that should be returned. enum PayloadType { // Compressable text format. COMPRESSABLE = 0; // Uncompressable binary format. UNCOMPRESSABLE = 1; // Randomly chosen from all other formats defined in this enum. RANDOM = 2; } // A block of data, to simply increase gRPC message size. message Payload { // The type of data in body. PayloadType type = 1; // Primary contents of payload. bytes body = 2; } // A protobuf representation for grpc status. This is used by test // clients to specify a status that the server should attempt to return. message EchoStatus { int32 code = 1; string message = 2; } // The type of route that a client took to reach a server w.r.t. gRPCLB. // The server must fill in "fallback" if it detects that the RPC reached // the server via the "gRPCLB fallback" path, and "backend" if it detects // that the RPC reached the server via "gRPCLB backend" path (i.e. if it got // the address of this server from the gRPCLB server BalanceLoad RPC). Exactly // how this detection is done is context and server dependant. enum GrpclbRouteType { // Server didn't detect the route that a client took to reach it. GRPCLB_ROUTE_TYPE_UNKNOWN = 0; // Indicates that a client reached a server via gRPCLB fallback. GRPCLB_ROUTE_TYPE_FALLBACK = 1; // Indicates that a client reached a server as a gRPCLB-given backend. GRPCLB_ROUTE_TYPE_BACKEND = 2; } // Unary request. message SimpleRequest { // Desired payload type in the response from the server. // If response_type is RANDOM, server randomly chooses one from other formats. PayloadType response_type = 1; // Desired payload size in the response from the server. // If response_type is COMPRESSABLE, this denotes the size before compression. int32 response_size = 2; // Optional input payload sent along with the request. Payload payload = 3; // Whether SimpleResponse should include username. bool fill_username = 4; // Whether SimpleResponse should include OAuth scope. bool fill_oauth_scope = 5; // Whether server should return a given status EchoStatus response_status = 7; // Whether SimpleResponse should include server_id. bool fill_server_id = 9; // Whether SimpleResponse should include grpclb_route_type. bool fill_grpclb_route_type = 10; } // Unary response, as configured by the request. message SimpleResponse { // Payload to increase message size. Payload payload = 1; // The user the request came from, for verifying authentication was // successful when the client expected it. string username = 2; // OAuth scope. string oauth_scope = 3; // Server ID. This must be unique among different server instances, // but the same across all RPC's made to a particular server instance. string server_id = 4; // gRPCLB Path. GrpclbRouteType grpclb_route_type = 5; // Server hostname. string hostname = 6; } // Client-streaming request. message StreamingInputCallRequest { // Optional input payload sent along with the request. Payload payload = 1; // Not expecting any payload from the response. } // Client-streaming response. message StreamingInputCallResponse { // Aggregated size of payloads received from the client. int32 aggregated_payload_size = 1; } // Configuration for a particular response. message ResponseParameters { // Desired payload sizes in responses from the server. // If response_type is COMPRESSABLE, this denotes the size before compression. int32 size = 1; // Desired interval between consecutive responses in the response stream in // microseconds. int32 interval_us = 2; } // Server-streaming request. message StreamingOutputCallRequest { // Desired payload type in the response from the server. // If response_type is RANDOM, the payload from each response in the stream // might be of different types. This is to simulate a mixed type of payload // stream. PayloadType response_type = 1; // Configuration for each expected response message. repeated ResponseParameters response_parameters = 2; // Optional input payload sent along with the request. Payload payload = 3; // Whether server should return a given status EchoStatus response_status = 7; } // Server-streaming response, as configured by the request and parameters. message StreamingOutputCallResponse { // Payload to increase response size. Payload payload = 1; } // A simple service to test the various types of RPCs and experiment with // performance with various types of payload. service TestService { // One empty request followed by one empty response. rpc EmptyCall(Empty) returns (Empty); // One request followed by one response. // The server returns the client payload as-is. rpc UnaryCall(SimpleRequest) returns (SimpleResponse); // One request followed by a sequence of responses (streamed download). // The server returns the payload with client desired type and sizes. rpc StreamingOutputCall(StreamingOutputCallRequest) returns (stream StreamingOutputCallResponse); // A sequence of requests followed by one response (streamed upload). // The server returns the aggregated size of client payload as the result. rpc StreamingInputCall(stream StreamingInputCallRequest) returns (StreamingInputCallResponse); // A sequence of requests with each request served by the server immediately. // As one request could lead to multiple responses, this interface // demonstrates the idea of full duplexing. rpc FullDuplexCall(stream StreamingOutputCallRequest) returns (stream StreamingOutputCallResponse); // A sequence of requests followed by a sequence of responses. // The server buffers all the client requests and then serves them in order. A // stream of responses are returned to the client when the server starts with // first request. rpc HalfDuplexCall(stream StreamingOutputCallRequest) returns (stream StreamingOutputCallResponse); } // A simple service NOT implemented at servers so clients can test for // that case. service UnimplementedService { // A call that no server should implement rpc UnimplementedCall(grpc.testing.Empty) returns (grpc.testing.Empty); } message LoadBalancerStatsRequest { // Request stats for the next num_rpcs sent by client. int32 num_rpcs = 1; // If num_rpcs have not completed within timeout_sec, return partial results. int32 timeout_sec = 2; } message LoadBalancerStatsResponse { // The number of completed RPCs for each peer. map rpcs_by_peer = 1; // The number of RPCs that failed to record a remote peer. int32 num_failures = 2; } // A service used to obtain stats for verifying LB behavior. service LoadBalancerStatsService { // Gets the backend distribution for RPCs sent by a test client. rpc GetClientStats(LoadBalancerStatsRequest) returns (LoadBalancerStatsResponse) {} } grpc-go-1.29.1/interop/grpclb_fallback/000077500000000000000000000000001365033716300177215ustar00rootroot00000000000000grpc-go-1.29.1/interop/grpclb_fallback/client.go000066400000000000000000000177061365033716300215410ustar00rootroot00000000000000// +build linux // +build !appengine // +build go1.11 /* * * Copyright 2019 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package main import ( "context" "flag" "log" "net" "os" "os/exec" "syscall" "time" "golang.org/x/sys/unix" "google.golang.org/grpc" _ "google.golang.org/grpc/balancer/grpclb" "google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials/alts" "google.golang.org/grpc/credentials/google" testpb "google.golang.org/grpc/interop/grpc_testing" ) var ( customCredentialsType = flag.String("custom_credentials_type", "", "Client creds to use") serverURI = flag.String("server_uri", "dns:///staging-grpc-directpath-fallback-test.googleapis.com:443", "The server host name") unrouteLBAndBackendAddrsCmd = flag.String("unroute_lb_and_backend_addrs_cmd", "", "Command to make LB and backend address unroutable") blackholeLBAndBackendAddrsCmd = flag.String("blackhole_lb_and_backend_addrs_cmd", "", "Command to make LB and backend addresses blackholed") testCase = flag.String("test_case", "", `Configure different test cases. Valid options are: fast_fallback_before_startup : LB/backend connections fail fast before RPC's have been made; fast_fallback_after_startup : LB/backend connections fail fast after RPC's have been made; slow_fallback_before_startup : LB/backend connections black hole before RPC's have been made; slow_fallback_after_startup : LB/backend connections black hole after RPC's have been made;`) infoLog = log.New(os.Stderr, "INFO: ", log.Ldate|log.Ltime|log.Lshortfile) errorLog = log.New(os.Stderr, "ERROR: ", log.Ldate|log.Ltime|log.Lshortfile) ) func doRPCAndGetPath(client testpb.TestServiceClient, timeout time.Duration) testpb.GrpclbRouteType { infoLog.Printf("doRPCAndGetPath timeout:%v\n", timeout) ctx, cancel := context.WithTimeout(context.Background(), timeout) defer cancel() req := &testpb.SimpleRequest{ FillGrpclbRouteType: true, } reply, err := client.UnaryCall(ctx, req) if err != nil { infoLog.Printf("doRPCAndGetPath error:%v\n", err) return testpb.GrpclbRouteType_GRPCLB_ROUTE_TYPE_UNKNOWN } g := reply.GetGrpclbRouteType() infoLog.Printf("doRPCAndGetPath got grpclb route type: %v\n", g) if g != testpb.GrpclbRouteType_GRPCLB_ROUTE_TYPE_FALLBACK && g != testpb.GrpclbRouteType_GRPCLB_ROUTE_TYPE_BACKEND { errorLog.Fatalf("Expected grpclb route type to be either backend or fallback; got: %d", g) } return g } func dialTCPUserTimeout(ctx context.Context, addr string) (net.Conn, error) { control := func(network, address string, c syscall.RawConn) error { var syscallErr error controlErr := c.Control(func(fd uintptr) { syscallErr = syscall.SetsockoptInt(int(fd), syscall.IPPROTO_TCP, unix.TCP_USER_TIMEOUT, 20000) }) if syscallErr != nil { errorLog.Fatalf("syscall error setting sockopt TCP_USER_TIMEOUT: %v", syscallErr) } if controlErr != nil { errorLog.Fatalf("control error setting sockopt TCP_USER_TIMEOUT: %v", syscallErr) } return nil } d := &net.Dialer{ Control: control, } return d.DialContext(ctx, "tcp", addr) } func createTestConn() *grpc.ClientConn { opts := []grpc.DialOption{ grpc.WithContextDialer(dialTCPUserTimeout), grpc.WithBlock(), } switch *customCredentialsType { case "tls": creds := credentials.NewClientTLSFromCert(nil, "") opts = append(opts, grpc.WithTransportCredentials(creds)) case "alts": creds := alts.NewClientCreds(alts.DefaultClientOptions()) opts = append(opts, grpc.WithTransportCredentials(creds)) case "google_default_credentials": opts = append(opts, grpc.WithCredentialsBundle(google.NewDefaultCredentials())) case "compute_engine_channel_creds": opts = append(opts, grpc.WithCredentialsBundle(google.NewComputeEngineCredentials())) default: errorLog.Fatalf("Invalid --custom_credentials_type:%v", *customCredentialsType) } conn, err := grpc.Dial(*serverURI, opts...) if err != nil { errorLog.Fatalf("Fail to dial: %v", err) } return conn } func runCmd(command string) { infoLog.Printf("Running cmd:|%v|\n", command) if err := exec.Command("bash", "-c", command).Run(); err != nil { errorLog.Fatalf("error running cmd:|%v| : %v", command, err) } } func waitForFallbackAndDoRPCs(client testpb.TestServiceClient, fallbackDeadline time.Time) { fallbackRetryCount := 0 fellBack := false for time.Now().Before(fallbackDeadline) { g := doRPCAndGetPath(client, 1*time.Second) if g == testpb.GrpclbRouteType_GRPCLB_ROUTE_TYPE_FALLBACK { infoLog.Println("Made one successul RPC to a fallback. Now expect the same for the rest.") fellBack = true break } else if g == testpb.GrpclbRouteType_GRPCLB_ROUTE_TYPE_BACKEND { errorLog.Fatalf("Got RPC type backend. This suggests an error in test implementation") } else { infoLog.Println("Retryable RPC failure on iteration:", fallbackRetryCount) } fallbackRetryCount++ } if !fellBack { infoLog.Fatalf("Didn't fall back before deadline: %v\n", fallbackDeadline) } for i := 0; i < 30; i++ { if g := doRPCAndGetPath(client, 20*time.Second); g != testpb.GrpclbRouteType_GRPCLB_ROUTE_TYPE_FALLBACK { errorLog.Fatalf("Expected RPC to take grpclb route type FALLBACK. Got: %v", g) } time.Sleep(time.Second) } } func doFastFallbackBeforeStartup() { runCmd(*unrouteLBAndBackendAddrsCmd) fallbackDeadline := time.Now().Add(5 * time.Second) conn := createTestConn() defer conn.Close() client := testpb.NewTestServiceClient(conn) waitForFallbackAndDoRPCs(client, fallbackDeadline) } func doSlowFallbackBeforeStartup() { runCmd(*blackholeLBAndBackendAddrsCmd) fallbackDeadline := time.Now().Add(20 * time.Second) conn := createTestConn() defer conn.Close() client := testpb.NewTestServiceClient(conn) waitForFallbackAndDoRPCs(client, fallbackDeadline) } func doFastFallbackAfterStartup() { conn := createTestConn() defer conn.Close() client := testpb.NewTestServiceClient(conn) if g := doRPCAndGetPath(client, 20*time.Second); g != testpb.GrpclbRouteType_GRPCLB_ROUTE_TYPE_BACKEND { errorLog.Fatalf("Expected RPC to take grpclb route type BACKEND. Got: %v", g) } runCmd(*unrouteLBAndBackendAddrsCmd) fallbackDeadline := time.Now().Add(40 * time.Second) waitForFallbackAndDoRPCs(client, fallbackDeadline) } func doSlowFallbackAfterStartup() { conn := createTestConn() defer conn.Close() client := testpb.NewTestServiceClient(conn) if g := doRPCAndGetPath(client, 20*time.Second); g != testpb.GrpclbRouteType_GRPCLB_ROUTE_TYPE_BACKEND { errorLog.Fatalf("Expected RPC to take grpclb route type BACKEND. Got: %v", g) } runCmd(*blackholeLBAndBackendAddrsCmd) fallbackDeadline := time.Now().Add(40 * time.Second) waitForFallbackAndDoRPCs(client, fallbackDeadline) } func main() { flag.Parse() if len(*unrouteLBAndBackendAddrsCmd) == 0 { errorLog.Fatalf("--unroute_lb_and_backend_addrs_cmd unset") } if len(*blackholeLBAndBackendAddrsCmd) == 0 { errorLog.Fatalf("--blackhole_lb_and_backend_addrs_cmd unset") } switch *testCase { case "fast_fallback_before_startup": doFastFallbackBeforeStartup() log.Printf("FastFallbackBeforeStartup done!\n") case "fast_fallback_after_startup": doFastFallbackAfterStartup() log.Printf("FastFallbackAfterStartup done!\n") case "slow_fallback_before_startup": doSlowFallbackBeforeStartup() log.Printf("SlowFallbackBeforeStartup done!\n") case "slow_fallback_after_startup": doSlowFallbackAfterStartup() log.Printf("SlowFallbackAfterStartup done!\n") default: errorLog.Fatalf("Unsupported test case: %v", *testCase) } } grpc-go-1.29.1/interop/http2/000077500000000000000000000000001365033716300156725ustar00rootroot00000000000000grpc-go-1.29.1/interop/http2/negative_http2_client.go000066400000000000000000000114101365033716300224770ustar00rootroot00000000000000/* * * Copyright 2016 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * * Client used to test http2 error edge cases like GOAWAYs and RST_STREAMs * * Documentation: * https://github.com/grpc/grpc/blob/master/doc/negative-http2-interop-test-descriptions.md */ package main import ( "context" "flag" "net" "strconv" "sync" "time" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/interop" testpb "google.golang.org/grpc/interop/grpc_testing" "google.golang.org/grpc/status" ) var ( serverHost = flag.String("server_host", "localhost", "The server host name") serverPort = flag.Int("server_port", 8080, "The server port number") testCase = flag.String("test_case", "goaway", `Configure different test cases. Valid options are: goaway : client sends two requests, the server will send a goaway in between; rst_after_header : server will send rst_stream after it sends headers; rst_during_data : server will send rst_stream while sending data; rst_after_data : server will send rst_stream after sending data; ping : server will send pings between each http2 frame; max_streams : server will ensure that the max_concurrent_streams limit is upheld;`) largeReqSize = 271828 largeRespSize = 314159 ) func largeSimpleRequest() *testpb.SimpleRequest { pl := interop.ClientNewPayload(testpb.PayloadType_COMPRESSABLE, largeReqSize) return &testpb.SimpleRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE, ResponseSize: int32(largeRespSize), Payload: pl, } } // sends two unary calls. The server asserts that the calls use different connections. func goaway(tc testpb.TestServiceClient) { interop.DoLargeUnaryCall(tc) // sleep to ensure that the client has time to recv the GOAWAY. // TODO(ncteisen): make this less hacky. time.Sleep(1 * time.Second) interop.DoLargeUnaryCall(tc) } func rstAfterHeader(tc testpb.TestServiceClient) { req := largeSimpleRequest() reply, err := tc.UnaryCall(context.Background(), req) if reply != nil { grpclog.Fatalf("Client received reply despite server sending rst stream after header") } if status.Code(err) != codes.Internal { grpclog.Fatalf("%v.UnaryCall() = _, %v, want _, %v", tc, status.Code(err), codes.Internal) } } func rstDuringData(tc testpb.TestServiceClient) { req := largeSimpleRequest() reply, err := tc.UnaryCall(context.Background(), req) if reply != nil { grpclog.Fatalf("Client received reply despite server sending rst stream during data") } if status.Code(err) != codes.Unknown { grpclog.Fatalf("%v.UnaryCall() = _, %v, want _, %v", tc, status.Code(err), codes.Unknown) } } func rstAfterData(tc testpb.TestServiceClient) { req := largeSimpleRequest() reply, err := tc.UnaryCall(context.Background(), req) if reply != nil { grpclog.Fatalf("Client received reply despite server sending rst stream after data") } if status.Code(err) != codes.Internal { grpclog.Fatalf("%v.UnaryCall() = _, %v, want _, %v", tc, status.Code(err), codes.Internal) } } func ping(tc testpb.TestServiceClient) { // The server will assert that every ping it sends was ACK-ed by the client. interop.DoLargeUnaryCall(tc) } func maxStreams(tc testpb.TestServiceClient) { interop.DoLargeUnaryCall(tc) var wg sync.WaitGroup for i := 0; i < 15; i++ { wg.Add(1) go func() { defer wg.Done() interop.DoLargeUnaryCall(tc) }() } wg.Wait() } func main() { flag.Parse() serverAddr := net.JoinHostPort(*serverHost, strconv.Itoa(*serverPort)) var opts []grpc.DialOption opts = append(opts, grpc.WithInsecure()) conn, err := grpc.Dial(serverAddr, opts...) if err != nil { grpclog.Fatalf("Fail to dial: %v", err) } defer conn.Close() tc := testpb.NewTestServiceClient(conn) switch *testCase { case "goaway": goaway(tc) grpclog.Infoln("goaway done") case "rst_after_header": rstAfterHeader(tc) grpclog.Infoln("rst_after_header done") case "rst_during_data": rstDuringData(tc) grpclog.Infoln("rst_during_data done") case "rst_after_data": rstAfterData(tc) grpclog.Infoln("rst_after_data done") case "ping": ping(tc) grpclog.Infoln("ping done") case "max_streams": maxStreams(tc) grpclog.Infoln("max_streams done") default: grpclog.Fatal("Unsupported test case: ", *testCase) } } grpc-go-1.29.1/interop/interop_test.sh000077500000000000000000000045621365033716300177160ustar00rootroot00000000000000#!/bin/bash # # Copyright 2019 gRPC authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # set -e +x export TMPDIR=$(mktemp -d) trap "rm -rf ${TMPDIR}" EXIT clean () { for i in {1..10}; do jobs -p | xargs -n1 pkill -P # A simple "wait" just hangs sometimes. Running `jobs` seems to help. sleep 1 if jobs | read; then return fi done echo "$(tput setaf 1) clean failed to kill tests $(tput sgr 0)" jobs pstree exit 1 } fail () { echo "$(tput setaf 1) $1 $(tput sgr 0)" clean exit 1 } pass () { echo "$(tput setaf 2) $1 $(tput sgr 0)" } # Don't run some tests that need a special environment: # "google_default_credentials" # "compute_engine_channel_credentials" # "compute_engine_creds" # "service_account_creds" # "jwt_token_creds" # "oauth2_auth_token" # "per_rpc_creds" # "pick_first_unary" CASES=( "empty_unary" "large_unary" "client_streaming" "server_streaming" "ping_pong" "empty_stream" "timeout_on_sleeping_server" "cancel_after_begin" "cancel_after_first_response" "status_code_and_message" "special_status_message" "custom_metadata" "unimplemented_method" "unimplemented_service" ) # Build server if ! go build -o /dev/null ./interop/server; then fail "failed to build server" else pass "successfully built server" fi # Start server SERVER_LOG="$(mktemp)" go run ./interop/server --use_tls &> $SERVER_LOG & for case in ${CASES[@]}; do echo "$(tput setaf 4) testing: ${case} $(tput sgr 0)" CLIENT_LOG="$(mktemp)" if ! timeout 20 go run ./interop/client --use_tls --server_host_override=foo.test.google.fr --use_test_ca --test_case="${case}" &> $CLIENT_LOG; then fail "FAIL: test case ${case} got server log: $(cat $SERVER_LOG) got client log: $(cat $CLIENT_LOG) " else pass "PASS: test case ${case}" fi done clean grpc-go-1.29.1/interop/server/000077500000000000000000000000001365033716300161375ustar00rootroot00000000000000grpc-go-1.29.1/interop/server/server.go000066400000000000000000000045561365033716300200060ustar00rootroot00000000000000/* * * Copyright 2014 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package main import ( "flag" "net" "strconv" "google.golang.org/grpc" "google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials/alts" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/interop" testpb "google.golang.org/grpc/interop/grpc_testing" "google.golang.org/grpc/testdata" ) var ( useTLS = flag.Bool("use_tls", false, "Connection uses TLS if true, else plain TCP") useALTS = flag.Bool("use_alts", false, "Connection uses ALTS if true (this option can only be used on GCP)") altsHSAddr = flag.String("alts_handshaker_service_address", "", "ALTS handshaker gRPC service address") certFile = flag.String("tls_cert_file", "", "The TLS cert file") keyFile = flag.String("tls_key_file", "", "The TLS key file") port = flag.Int("port", 10000, "The server port") ) func main() { flag.Parse() if *useTLS && *useALTS { grpclog.Fatalf("use_tls and use_alts cannot be both set to true") } p := strconv.Itoa(*port) lis, err := net.Listen("tcp", ":"+p) if err != nil { grpclog.Fatalf("failed to listen: %v", err) } var opts []grpc.ServerOption if *useTLS { if *certFile == "" { *certFile = testdata.Path("server1.pem") } if *keyFile == "" { *keyFile = testdata.Path("server1.key") } creds, err := credentials.NewServerTLSFromFile(*certFile, *keyFile) if err != nil { grpclog.Fatalf("Failed to generate credentials %v", err) } opts = append(opts, grpc.Creds(creds)) } else if *useALTS { altsOpts := alts.DefaultServerOptions() if *altsHSAddr != "" { altsOpts.HandshakerServiceAddress = *altsHSAddr } altsTC := alts.NewServerCreds(altsOpts) opts = append(opts, grpc.Creds(altsTC)) } server := grpc.NewServer(opts...) testpb.RegisterTestServiceServer(server, interop.NewTestServer()) server.Serve(lis) } grpc-go-1.29.1/interop/test_utils.go000066400000000000000000000665331365033716300173740ustar00rootroot00000000000000/* * * Copyright 2014 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ //go:generate protoc --go_out=plugins=grpc:. grpc_testing/test.proto package interop import ( "context" "fmt" "io" "io/ioutil" "strings" "time" "github.com/golang/protobuf/proto" "golang.org/x/oauth2" "golang.org/x/oauth2/google" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/grpclog" testpb "google.golang.org/grpc/interop/grpc_testing" "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" ) var ( reqSizes = []int{27182, 8, 1828, 45904} respSizes = []int{31415, 9, 2653, 58979} largeReqSize = 271828 largeRespSize = 314159 initialMetadataKey = "x-grpc-test-echo-initial" trailingMetadataKey = "x-grpc-test-echo-trailing-bin" ) // ClientNewPayload returns a payload of the given type and size. func ClientNewPayload(t testpb.PayloadType, size int) *testpb.Payload { if size < 0 { grpclog.Fatalf("Requested a response with invalid length %d", size) } body := make([]byte, size) switch t { case testpb.PayloadType_COMPRESSABLE: case testpb.PayloadType_UNCOMPRESSABLE: grpclog.Fatalf("PayloadType UNCOMPRESSABLE is not supported") default: grpclog.Fatalf("Unsupported payload type: %d", t) } return &testpb.Payload{ Type: t, Body: body, } } // DoEmptyUnaryCall performs a unary RPC with empty request and response messages. func DoEmptyUnaryCall(tc testpb.TestServiceClient, args ...grpc.CallOption) { reply, err := tc.EmptyCall(context.Background(), &testpb.Empty{}, args...) if err != nil { grpclog.Fatal("/TestService/EmptyCall RPC failed: ", err) } if !proto.Equal(&testpb.Empty{}, reply) { grpclog.Fatalf("/TestService/EmptyCall receives %v, want %v", reply, testpb.Empty{}) } } // DoLargeUnaryCall performs a unary RPC with large payload in the request and response. func DoLargeUnaryCall(tc testpb.TestServiceClient, args ...grpc.CallOption) { pl := ClientNewPayload(testpb.PayloadType_COMPRESSABLE, largeReqSize) req := &testpb.SimpleRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE, ResponseSize: int32(largeRespSize), Payload: pl, } reply, err := tc.UnaryCall(context.Background(), req, args...) if err != nil { grpclog.Fatal("/TestService/UnaryCall RPC failed: ", err) } t := reply.GetPayload().GetType() s := len(reply.GetPayload().GetBody()) if t != testpb.PayloadType_COMPRESSABLE || s != largeRespSize { grpclog.Fatalf("Got the reply with type %d len %d; want %d, %d", t, s, testpb.PayloadType_COMPRESSABLE, largeRespSize) } } // DoClientStreaming performs a client streaming RPC. func DoClientStreaming(tc testpb.TestServiceClient, args ...grpc.CallOption) { stream, err := tc.StreamingInputCall(context.Background(), args...) if err != nil { grpclog.Fatalf("%v.StreamingInputCall(_) = _, %v", tc, err) } var sum int for _, s := range reqSizes { pl := ClientNewPayload(testpb.PayloadType_COMPRESSABLE, s) req := &testpb.StreamingInputCallRequest{ Payload: pl, } if err := stream.Send(req); err != nil { grpclog.Fatalf("%v has error %v while sending %v", stream, err, req) } sum += s } reply, err := stream.CloseAndRecv() if err != nil { grpclog.Fatalf("%v.CloseAndRecv() got error %v, want %v", stream, err, nil) } if reply.GetAggregatedPayloadSize() != int32(sum) { grpclog.Fatalf("%v.CloseAndRecv().GetAggregatePayloadSize() = %v; want %v", stream, reply.GetAggregatedPayloadSize(), sum) } } // DoServerStreaming performs a server streaming RPC. func DoServerStreaming(tc testpb.TestServiceClient, args ...grpc.CallOption) { respParam := make([]*testpb.ResponseParameters, len(respSizes)) for i, s := range respSizes { respParam[i] = &testpb.ResponseParameters{ Size: int32(s), } } req := &testpb.StreamingOutputCallRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE, ResponseParameters: respParam, } stream, err := tc.StreamingOutputCall(context.Background(), req, args...) if err != nil { grpclog.Fatalf("%v.StreamingOutputCall(_) = _, %v", tc, err) } var rpcStatus error var respCnt int var index int for { reply, err := stream.Recv() if err != nil { rpcStatus = err break } t := reply.GetPayload().GetType() if t != testpb.PayloadType_COMPRESSABLE { grpclog.Fatalf("Got the reply of type %d, want %d", t, testpb.PayloadType_COMPRESSABLE) } size := len(reply.GetPayload().GetBody()) if size != respSizes[index] { grpclog.Fatalf("Got reply body of length %d, want %d", size, respSizes[index]) } index++ respCnt++ } if rpcStatus != io.EOF { grpclog.Fatalf("Failed to finish the server streaming rpc: %v", rpcStatus) } if respCnt != len(respSizes) { grpclog.Fatalf("Got %d reply, want %d", len(respSizes), respCnt) } } // DoPingPong performs ping-pong style bi-directional streaming RPC. func DoPingPong(tc testpb.TestServiceClient, args ...grpc.CallOption) { stream, err := tc.FullDuplexCall(context.Background(), args...) if err != nil { grpclog.Fatalf("%v.FullDuplexCall(_) = _, %v", tc, err) } var index int for index < len(reqSizes) { respParam := []*testpb.ResponseParameters{ { Size: int32(respSizes[index]), }, } pl := ClientNewPayload(testpb.PayloadType_COMPRESSABLE, reqSizes[index]) req := &testpb.StreamingOutputCallRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE, ResponseParameters: respParam, Payload: pl, } if err := stream.Send(req); err != nil { grpclog.Fatalf("%v has error %v while sending %v", stream, err, req) } reply, err := stream.Recv() if err != nil { grpclog.Fatalf("%v.Recv() = %v", stream, err) } t := reply.GetPayload().GetType() if t != testpb.PayloadType_COMPRESSABLE { grpclog.Fatalf("Got the reply of type %d, want %d", t, testpb.PayloadType_COMPRESSABLE) } size := len(reply.GetPayload().GetBody()) if size != respSizes[index] { grpclog.Fatalf("Got reply body of length %d, want %d", size, respSizes[index]) } index++ } if err := stream.CloseSend(); err != nil { grpclog.Fatalf("%v.CloseSend() got %v, want %v", stream, err, nil) } if _, err := stream.Recv(); err != io.EOF { grpclog.Fatalf("%v failed to complele the ping pong test: %v", stream, err) } } // DoEmptyStream sets up a bi-directional streaming with zero message. func DoEmptyStream(tc testpb.TestServiceClient, args ...grpc.CallOption) { stream, err := tc.FullDuplexCall(context.Background(), args...) if err != nil { grpclog.Fatalf("%v.FullDuplexCall(_) = _, %v", tc, err) } if err := stream.CloseSend(); err != nil { grpclog.Fatalf("%v.CloseSend() got %v, want %v", stream, err, nil) } if _, err := stream.Recv(); err != io.EOF { grpclog.Fatalf("%v failed to complete the empty stream test: %v", stream, err) } } // DoTimeoutOnSleepingServer performs an RPC on a sleep server which causes RPC timeout. func DoTimeoutOnSleepingServer(tc testpb.TestServiceClient, args ...grpc.CallOption) { ctx, cancel := context.WithTimeout(context.Background(), 1*time.Millisecond) defer cancel() stream, err := tc.FullDuplexCall(ctx, args...) if err != nil { if status.Code(err) == codes.DeadlineExceeded { return } grpclog.Fatalf("%v.FullDuplexCall(_) = _, %v", tc, err) } pl := ClientNewPayload(testpb.PayloadType_COMPRESSABLE, 27182) req := &testpb.StreamingOutputCallRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE, Payload: pl, } if err := stream.Send(req); err != nil && err != io.EOF { grpclog.Fatalf("%v.Send(_) = %v", stream, err) } if _, err := stream.Recv(); status.Code(err) != codes.DeadlineExceeded { grpclog.Fatalf("%v.Recv() = _, %v, want error code %d", stream, err, codes.DeadlineExceeded) } } // DoComputeEngineCreds performs a unary RPC with compute engine auth. func DoComputeEngineCreds(tc testpb.TestServiceClient, serviceAccount, oauthScope string) { pl := ClientNewPayload(testpb.PayloadType_COMPRESSABLE, largeReqSize) req := &testpb.SimpleRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE, ResponseSize: int32(largeRespSize), Payload: pl, FillUsername: true, FillOauthScope: true, } reply, err := tc.UnaryCall(context.Background(), req) if err != nil { grpclog.Fatal("/TestService/UnaryCall RPC failed: ", err) } user := reply.GetUsername() scope := reply.GetOauthScope() if user != serviceAccount { grpclog.Fatalf("Got user name %q, want %q.", user, serviceAccount) } if !strings.Contains(oauthScope, scope) { grpclog.Fatalf("Got OAuth scope %q which is NOT a substring of %q.", scope, oauthScope) } } func getServiceAccountJSONKey(keyFile string) []byte { jsonKey, err := ioutil.ReadFile(keyFile) if err != nil { grpclog.Fatalf("Failed to read the service account key file: %v", err) } return jsonKey } // DoServiceAccountCreds performs a unary RPC with service account auth. func DoServiceAccountCreds(tc testpb.TestServiceClient, serviceAccountKeyFile, oauthScope string) { pl := ClientNewPayload(testpb.PayloadType_COMPRESSABLE, largeReqSize) req := &testpb.SimpleRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE, ResponseSize: int32(largeRespSize), Payload: pl, FillUsername: true, FillOauthScope: true, } reply, err := tc.UnaryCall(context.Background(), req) if err != nil { grpclog.Fatal("/TestService/UnaryCall RPC failed: ", err) } jsonKey := getServiceAccountJSONKey(serviceAccountKeyFile) user := reply.GetUsername() scope := reply.GetOauthScope() if !strings.Contains(string(jsonKey), user) { grpclog.Fatalf("Got user name %q which is NOT a substring of %q.", user, jsonKey) } if !strings.Contains(oauthScope, scope) { grpclog.Fatalf("Got OAuth scope %q which is NOT a substring of %q.", scope, oauthScope) } } // DoJWTTokenCreds performs a unary RPC with JWT token auth. func DoJWTTokenCreds(tc testpb.TestServiceClient, serviceAccountKeyFile string) { pl := ClientNewPayload(testpb.PayloadType_COMPRESSABLE, largeReqSize) req := &testpb.SimpleRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE, ResponseSize: int32(largeRespSize), Payload: pl, FillUsername: true, } reply, err := tc.UnaryCall(context.Background(), req) if err != nil { grpclog.Fatal("/TestService/UnaryCall RPC failed: ", err) } jsonKey := getServiceAccountJSONKey(serviceAccountKeyFile) user := reply.GetUsername() if !strings.Contains(string(jsonKey), user) { grpclog.Fatalf("Got user name %q which is NOT a substring of %q.", user, jsonKey) } } // GetToken obtains an OAUTH token from the input. func GetToken(serviceAccountKeyFile string, oauthScope string) *oauth2.Token { jsonKey := getServiceAccountJSONKey(serviceAccountKeyFile) config, err := google.JWTConfigFromJSON(jsonKey, oauthScope) if err != nil { grpclog.Fatalf("Failed to get the config: %v", err) } token, err := config.TokenSource(context.Background()).Token() if err != nil { grpclog.Fatalf("Failed to get the token: %v", err) } return token } // DoOauth2TokenCreds performs a unary RPC with OAUTH2 token auth. func DoOauth2TokenCreds(tc testpb.TestServiceClient, serviceAccountKeyFile, oauthScope string) { pl := ClientNewPayload(testpb.PayloadType_COMPRESSABLE, largeReqSize) req := &testpb.SimpleRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE, ResponseSize: int32(largeRespSize), Payload: pl, FillUsername: true, FillOauthScope: true, } reply, err := tc.UnaryCall(context.Background(), req) if err != nil { grpclog.Fatal("/TestService/UnaryCall RPC failed: ", err) } jsonKey := getServiceAccountJSONKey(serviceAccountKeyFile) user := reply.GetUsername() scope := reply.GetOauthScope() if !strings.Contains(string(jsonKey), user) { grpclog.Fatalf("Got user name %q which is NOT a substring of %q.", user, jsonKey) } if !strings.Contains(oauthScope, scope) { grpclog.Fatalf("Got OAuth scope %q which is NOT a substring of %q.", scope, oauthScope) } } // DoPerRPCCreds performs a unary RPC with per RPC OAUTH2 token. func DoPerRPCCreds(tc testpb.TestServiceClient, serviceAccountKeyFile, oauthScope string) { jsonKey := getServiceAccountJSONKey(serviceAccountKeyFile) pl := ClientNewPayload(testpb.PayloadType_COMPRESSABLE, largeReqSize) req := &testpb.SimpleRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE, ResponseSize: int32(largeRespSize), Payload: pl, FillUsername: true, FillOauthScope: true, } token := GetToken(serviceAccountKeyFile, oauthScope) kv := map[string]string{"authorization": token.Type() + " " + token.AccessToken} ctx := metadata.NewOutgoingContext(context.Background(), metadata.MD{"authorization": []string{kv["authorization"]}}) reply, err := tc.UnaryCall(ctx, req) if err != nil { grpclog.Fatal("/TestService/UnaryCall RPC failed: ", err) } user := reply.GetUsername() scope := reply.GetOauthScope() if !strings.Contains(string(jsonKey), user) { grpclog.Fatalf("Got user name %q which is NOT a substring of %q.", user, jsonKey) } if !strings.Contains(oauthScope, scope) { grpclog.Fatalf("Got OAuth scope %q which is NOT a substring of %q.", scope, oauthScope) } } // DoGoogleDefaultCredentials performs an unary RPC with google default credentials func DoGoogleDefaultCredentials(tc testpb.TestServiceClient, defaultServiceAccount string) { pl := ClientNewPayload(testpb.PayloadType_COMPRESSABLE, largeReqSize) req := &testpb.SimpleRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE, ResponseSize: int32(largeRespSize), Payload: pl, FillUsername: true, FillOauthScope: true, } reply, err := tc.UnaryCall(context.Background(), req) if err != nil { grpclog.Fatal("/TestService/UnaryCall RPC failed: ", err) } if reply.GetUsername() != defaultServiceAccount { grpclog.Fatalf("Got user name %q; wanted %q. ", reply.GetUsername(), defaultServiceAccount) } } // DoComputeEngineChannelCredentials performs an unary RPC with compute engine channel credentials func DoComputeEngineChannelCredentials(tc testpb.TestServiceClient, defaultServiceAccount string) { pl := ClientNewPayload(testpb.PayloadType_COMPRESSABLE, largeReqSize) req := &testpb.SimpleRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE, ResponseSize: int32(largeRespSize), Payload: pl, FillUsername: true, FillOauthScope: true, } reply, err := tc.UnaryCall(context.Background(), req) if err != nil { grpclog.Fatal("/TestService/UnaryCall RPC failed: ", err) } if reply.GetUsername() != defaultServiceAccount { grpclog.Fatalf("Got user name %q; wanted %q. ", reply.GetUsername(), defaultServiceAccount) } } var testMetadata = metadata.MD{ "key1": []string{"value1"}, "key2": []string{"value2"}, } // DoCancelAfterBegin cancels the RPC after metadata has been sent but before payloads are sent. func DoCancelAfterBegin(tc testpb.TestServiceClient, args ...grpc.CallOption) { ctx, cancel := context.WithCancel(metadata.NewOutgoingContext(context.Background(), testMetadata)) stream, err := tc.StreamingInputCall(ctx, args...) if err != nil { grpclog.Fatalf("%v.StreamingInputCall(_) = _, %v", tc, err) } cancel() _, err = stream.CloseAndRecv() if status.Code(err) != codes.Canceled { grpclog.Fatalf("%v.CloseAndRecv() got error code %d, want %d", stream, status.Code(err), codes.Canceled) } } // DoCancelAfterFirstResponse cancels the RPC after receiving the first message from the server. func DoCancelAfterFirstResponse(tc testpb.TestServiceClient, args ...grpc.CallOption) { ctx, cancel := context.WithCancel(context.Background()) stream, err := tc.FullDuplexCall(ctx, args...) if err != nil { grpclog.Fatalf("%v.FullDuplexCall(_) = _, %v", tc, err) } respParam := []*testpb.ResponseParameters{ { Size: 31415, }, } pl := ClientNewPayload(testpb.PayloadType_COMPRESSABLE, 27182) req := &testpb.StreamingOutputCallRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE, ResponseParameters: respParam, Payload: pl, } if err := stream.Send(req); err != nil { grpclog.Fatalf("%v has error %v while sending %v", stream, err, req) } if _, err := stream.Recv(); err != nil { grpclog.Fatalf("%v.Recv() = %v", stream, err) } cancel() if _, err := stream.Recv(); status.Code(err) != codes.Canceled { grpclog.Fatalf("%v compleled with error code %d, want %d", stream, status.Code(err), codes.Canceled) } } var ( initialMetadataValue = "test_initial_metadata_value" trailingMetadataValue = "\x0a\x0b\x0a\x0b\x0a\x0b" customMetadata = metadata.Pairs( initialMetadataKey, initialMetadataValue, trailingMetadataKey, trailingMetadataValue, ) ) func validateMetadata(header, trailer metadata.MD) { if len(header[initialMetadataKey]) != 1 { grpclog.Fatalf("Expected exactly one header from server. Received %d", len(header[initialMetadataKey])) } if header[initialMetadataKey][0] != initialMetadataValue { grpclog.Fatalf("Got header %s; want %s", header[initialMetadataKey][0], initialMetadataValue) } if len(trailer[trailingMetadataKey]) != 1 { grpclog.Fatalf("Expected exactly one trailer from server. Received %d", len(trailer[trailingMetadataKey])) } if trailer[trailingMetadataKey][0] != trailingMetadataValue { grpclog.Fatalf("Got trailer %s; want %s", trailer[trailingMetadataKey][0], trailingMetadataValue) } } // DoCustomMetadata checks that metadata is echoed back to the client. func DoCustomMetadata(tc testpb.TestServiceClient, args ...grpc.CallOption) { // Testing with UnaryCall. pl := ClientNewPayload(testpb.PayloadType_COMPRESSABLE, 1) req := &testpb.SimpleRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE, ResponseSize: int32(1), Payload: pl, } ctx := metadata.NewOutgoingContext(context.Background(), customMetadata) var header, trailer metadata.MD args = append(args, grpc.Header(&header), grpc.Trailer(&trailer)) reply, err := tc.UnaryCall( ctx, req, args..., ) if err != nil { grpclog.Fatal("/TestService/UnaryCall RPC failed: ", err) } t := reply.GetPayload().GetType() s := len(reply.GetPayload().GetBody()) if t != testpb.PayloadType_COMPRESSABLE || s != 1 { grpclog.Fatalf("Got the reply with type %d len %d; want %d, %d", t, s, testpb.PayloadType_COMPRESSABLE, 1) } validateMetadata(header, trailer) // Testing with FullDuplex. stream, err := tc.FullDuplexCall(ctx, args...) if err != nil { grpclog.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) } respParam := []*testpb.ResponseParameters{ { Size: 1, }, } streamReq := &testpb.StreamingOutputCallRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE, ResponseParameters: respParam, Payload: pl, } if err := stream.Send(streamReq); err != nil { grpclog.Fatalf("%v has error %v while sending %v", stream, err, streamReq) } streamHeader, err := stream.Header() if err != nil { grpclog.Fatalf("%v.Header() = %v", stream, err) } if _, err := stream.Recv(); err != nil { grpclog.Fatalf("%v.Recv() = %v", stream, err) } if err := stream.CloseSend(); err != nil { grpclog.Fatalf("%v.CloseSend() = %v, want ", stream, err) } if _, err := stream.Recv(); err != io.EOF { grpclog.Fatalf("%v failed to complete the custom metadata test: %v", stream, err) } streamTrailer := stream.Trailer() validateMetadata(streamHeader, streamTrailer) } // DoStatusCodeAndMessage checks that the status code is propagated back to the client. func DoStatusCodeAndMessage(tc testpb.TestServiceClient, args ...grpc.CallOption) { var code int32 = 2 msg := "test status message" expectedErr := status.Error(codes.Code(code), msg) respStatus := &testpb.EchoStatus{ Code: code, Message: msg, } // Test UnaryCall. req := &testpb.SimpleRequest{ ResponseStatus: respStatus, } if _, err := tc.UnaryCall(context.Background(), req, args...); err == nil || err.Error() != expectedErr.Error() { grpclog.Fatalf("%v.UnaryCall(_, %v) = _, %v, want _, %v", tc, req, err, expectedErr) } // Test FullDuplexCall. stream, err := tc.FullDuplexCall(context.Background(), args...) if err != nil { grpclog.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) } streamReq := &testpb.StreamingOutputCallRequest{ ResponseStatus: respStatus, } if err := stream.Send(streamReq); err != nil { grpclog.Fatalf("%v has error %v while sending %v, want ", stream, err, streamReq) } if err := stream.CloseSend(); err != nil { grpclog.Fatalf("%v.CloseSend() = %v, want ", stream, err) } if _, err = stream.Recv(); err.Error() != expectedErr.Error() { grpclog.Fatalf("%v.Recv() returned error %v, want %v", stream, err, expectedErr) } } // DoSpecialStatusMessage verifies Unicode and whitespace is correctly processed // in status message. func DoSpecialStatusMessage(tc testpb.TestServiceClient, args ...grpc.CallOption) { const ( code int32 = 2 msg string = "\t\ntest with whitespace\r\nand Unicode BMP ☺ and non-BMP 😈\t\n" ) expectedErr := status.Error(codes.Code(code), msg) req := &testpb.SimpleRequest{ ResponseStatus: &testpb.EchoStatus{ Code: code, Message: msg, }, } ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() if _, err := tc.UnaryCall(ctx, req, args...); err == nil || err.Error() != expectedErr.Error() { grpclog.Fatalf("%v.UnaryCall(_, %v) = _, %v, want _, %v", tc, req, err, expectedErr) } } // DoUnimplementedService attempts to call a method from an unimplemented service. func DoUnimplementedService(tc testpb.UnimplementedServiceClient) { _, err := tc.UnimplementedCall(context.Background(), &testpb.Empty{}) if status.Code(err) != codes.Unimplemented { grpclog.Fatalf("%v.UnimplementedCall() = _, %v, want _, %v", tc, status.Code(err), codes.Unimplemented) } } // DoUnimplementedMethod attempts to call an unimplemented method. func DoUnimplementedMethod(cc *grpc.ClientConn) { var req, reply proto.Message if err := cc.Invoke(context.Background(), "/grpc.testing.TestService/UnimplementedCall", req, reply); err == nil || status.Code(err) != codes.Unimplemented { grpclog.Fatalf("ClientConn.Invoke(_, _, _, _, _) = %v, want error code %s", err, codes.Unimplemented) } } // DoPickFirstUnary runs multiple RPCs (rpcCount) and checks that all requests // are sent to the same backend. func DoPickFirstUnary(tc testpb.TestServiceClient) { const rpcCount = 100 pl := ClientNewPayload(testpb.PayloadType_COMPRESSABLE, 1) req := &testpb.SimpleRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE, ResponseSize: int32(1), Payload: pl, FillServerId: true, } ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() var serverID string for i := 0; i < rpcCount; i++ { resp, err := tc.UnaryCall(ctx, req) if err != nil { grpclog.Fatalf("iteration %d, failed to do UnaryCall: %v", i, err) } id := resp.ServerId if id == "" { grpclog.Fatalf("iteration %d, got empty server ID", i) } if i == 0 { serverID = id continue } if serverID != id { grpclog.Fatalf("iteration %d, got different server ids: %q vs %q", i, serverID, id) } } } type testServer struct { } // NewTestServer creates a test server for test service. func NewTestServer() testpb.TestServiceServer { return &testServer{} } func (s *testServer) EmptyCall(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { return new(testpb.Empty), nil } func serverNewPayload(t testpb.PayloadType, size int32) (*testpb.Payload, error) { if size < 0 { return nil, fmt.Errorf("requested a response with invalid length %d", size) } body := make([]byte, size) switch t { case testpb.PayloadType_COMPRESSABLE: case testpb.PayloadType_UNCOMPRESSABLE: return nil, fmt.Errorf("payloadType UNCOMPRESSABLE is not supported") default: return nil, fmt.Errorf("unsupported payload type: %d", t) } return &testpb.Payload{ Type: t, Body: body, }, nil } func (s *testServer) UnaryCall(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { st := in.GetResponseStatus() if md, ok := metadata.FromIncomingContext(ctx); ok { if initialMetadata, ok := md[initialMetadataKey]; ok { header := metadata.Pairs(initialMetadataKey, initialMetadata[0]) grpc.SendHeader(ctx, header) } if trailingMetadata, ok := md[trailingMetadataKey]; ok { trailer := metadata.Pairs(trailingMetadataKey, trailingMetadata[0]) grpc.SetTrailer(ctx, trailer) } } if st != nil && st.Code != 0 { return nil, status.Error(codes.Code(st.Code), st.Message) } pl, err := serverNewPayload(in.GetResponseType(), in.GetResponseSize()) if err != nil { return nil, err } return &testpb.SimpleResponse{ Payload: pl, }, nil } func (s *testServer) StreamingOutputCall(args *testpb.StreamingOutputCallRequest, stream testpb.TestService_StreamingOutputCallServer) error { cs := args.GetResponseParameters() for _, c := range cs { if us := c.GetIntervalUs(); us > 0 { time.Sleep(time.Duration(us) * time.Microsecond) } pl, err := serverNewPayload(args.GetResponseType(), c.GetSize()) if err != nil { return err } if err := stream.Send(&testpb.StreamingOutputCallResponse{ Payload: pl, }); err != nil { return err } } return nil } func (s *testServer) StreamingInputCall(stream testpb.TestService_StreamingInputCallServer) error { var sum int for { in, err := stream.Recv() if err == io.EOF { return stream.SendAndClose(&testpb.StreamingInputCallResponse{ AggregatedPayloadSize: int32(sum), }) } if err != nil { return err } p := in.GetPayload().GetBody() sum += len(p) } } func (s *testServer) FullDuplexCall(stream testpb.TestService_FullDuplexCallServer) error { if md, ok := metadata.FromIncomingContext(stream.Context()); ok { if initialMetadata, ok := md[initialMetadataKey]; ok { header := metadata.Pairs(initialMetadataKey, initialMetadata[0]) stream.SendHeader(header) } if trailingMetadata, ok := md[trailingMetadataKey]; ok { trailer := metadata.Pairs(trailingMetadataKey, trailingMetadata[0]) stream.SetTrailer(trailer) } } for { in, err := stream.Recv() if err == io.EOF { // read done. return nil } if err != nil { return err } st := in.GetResponseStatus() if st != nil && st.Code != 0 { return status.Error(codes.Code(st.Code), st.Message) } cs := in.GetResponseParameters() for _, c := range cs { if us := c.GetIntervalUs(); us > 0 { time.Sleep(time.Duration(us) * time.Microsecond) } pl, err := serverNewPayload(in.GetResponseType(), c.GetSize()) if err != nil { return err } if err := stream.Send(&testpb.StreamingOutputCallResponse{ Payload: pl, }); err != nil { return err } } } } func (s *testServer) HalfDuplexCall(stream testpb.TestService_HalfDuplexCallServer) error { var msgBuf []*testpb.StreamingOutputCallRequest for { in, err := stream.Recv() if err == io.EOF { // read done. break } if err != nil { return err } msgBuf = append(msgBuf, in) } for _, m := range msgBuf { cs := m.GetResponseParameters() for _, c := range cs { if us := c.GetIntervalUs(); us > 0 { time.Sleep(time.Duration(us) * time.Microsecond) } pl, err := serverNewPayload(m.GetResponseType(), c.GetSize()) if err != nil { return err } if err := stream.Send(&testpb.StreamingOutputCallResponse{ Payload: pl, }); err != nil { return err } } } return nil } grpc-go-1.29.1/interop/xds/000077500000000000000000000000001365033716300154275ustar00rootroot00000000000000grpc-go-1.29.1/interop/xds/client/000077500000000000000000000000001365033716300167055ustar00rootroot00000000000000grpc-go-1.29.1/interop/xds/client/client.go000066400000000000000000000116721365033716300205210ustar00rootroot00000000000000/* * * Copyright 2020 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Binary client for xDS interop tests. package main import ( "context" "flag" "fmt" "net" "sync" "time" "google.golang.org/grpc" "google.golang.org/grpc/grpclog" testpb "google.golang.org/grpc/interop/grpc_testing" "google.golang.org/grpc/peer" _ "google.golang.org/grpc/xds/experimental" ) type statsWatcherKey struct { startID int32 endID int32 } type statsWatcher struct { rpcsByPeer map[string]int32 numFailures int32 remainingRpcs int32 c chan *testpb.SimpleResponse } var ( numChannels = flag.Int("num_channels", 1, "Num of channels") printResponse = flag.Bool("print_response", false, "Write RPC response to stdout") qps = flag.Int("qps", 1, "QPS per channel") rpcTimeout = flag.Duration("rpc_timeout", 10*time.Second, "Per RPC timeout") server = flag.String("server", "localhost:8080", "Address of server to connect to") statsPort = flag.Int("stats_port", 8081, "Port to expose peer distribution stats service") mu sync.Mutex currentRequestID int32 watchers = make(map[statsWatcherKey]*statsWatcher) ) type statsService struct{} // Wait for the next LoadBalancerStatsRequest.GetNumRpcs to start and complete, // and return the distribution of remote peers. This is essentially a clientside // LB reporting mechanism that is designed to be queried by an external test // driver when verifying that the client is distributing RPCs as expected. func (s *statsService) GetClientStats(ctx context.Context, in *testpb.LoadBalancerStatsRequest) (*testpb.LoadBalancerStatsResponse, error) { mu.Lock() watcherKey := statsWatcherKey{currentRequestID, currentRequestID + in.GetNumRpcs()} watcher, ok := watchers[watcherKey] if !ok { watcher = &statsWatcher{ rpcsByPeer: make(map[string]int32), numFailures: 0, remainingRpcs: in.GetNumRpcs(), c: make(chan *testpb.SimpleResponse), } watchers[watcherKey] = watcher } mu.Unlock() ctx, cancel := context.WithTimeout(ctx, time.Duration(in.GetTimeoutSec())*time.Second) defer cancel() defer func() { mu.Lock() delete(watchers, watcherKey) mu.Unlock() }() // Wait until the requested RPCs have all been recorded or timeout occurs. for { select { case r := <-watcher.c: if r != nil { watcher.rpcsByPeer[(*r).GetHostname()]++ } else { watcher.numFailures++ } watcher.remainingRpcs-- if watcher.remainingRpcs == 0 { return &testpb.LoadBalancerStatsResponse{NumFailures: watcher.numFailures + watcher.remainingRpcs, RpcsByPeer: watcher.rpcsByPeer}, nil } case <-ctx.Done(): grpclog.Info("Timed out, returning partial stats") return &testpb.LoadBalancerStatsResponse{NumFailures: watcher.numFailures + watcher.remainingRpcs, RpcsByPeer: watcher.rpcsByPeer}, nil } } } func main() { flag.Parse() lis, err := net.Listen("tcp", fmt.Sprintf(":%d", *statsPort)) if err != nil { grpclog.Fatalf("failed to listen: %v", err) } s := grpc.NewServer() defer s.Stop() testpb.RegisterLoadBalancerStatsServiceServer(s, &statsService{}) go s.Serve(lis) clients := make([]testpb.TestServiceClient, *numChannels) for i := 0; i < *numChannels; i++ { conn, err := grpc.DialContext(context.Background(), *server, grpc.WithInsecure(), grpc.WithBlock()) if err != nil { grpclog.Fatalf("Fail to dial: %v", err) } defer conn.Close() clients[i] = testpb.NewTestServiceClient(conn) } ticker := time.NewTicker(time.Second / time.Duration(*qps**numChannels)) defer ticker.Stop() sendRPCs(clients, ticker) } func sendRPCs(clients []testpb.TestServiceClient, ticker *time.Ticker) { var i int for range ticker.C { go func(i int) { c := clients[i] ctx, cancel := context.WithTimeout(context.Background(), *rpcTimeout) p := new(peer.Peer) mu.Lock() savedRequestID := currentRequestID currentRequestID++ savedWatchers := []*statsWatcher{} for key, value := range watchers { if key.startID <= savedRequestID && savedRequestID < key.endID { savedWatchers = append(savedWatchers, value) } } mu.Unlock() r, err := c.UnaryCall(ctx, &testpb.SimpleRequest{FillServerId: true}, grpc.Peer(p)) success := err == nil cancel() for _, watcher := range savedWatchers { watcher.c <- r } if success && *printResponse { fmt.Printf("Greeting: Hello world, this is %s, from %v\n", r.GetHostname(), p.Addr) } }(i) i = (i + 1) % len(clients) } } grpc-go-1.29.1/interop/xds/server/000077500000000000000000000000001365033716300167355ustar00rootroot00000000000000grpc-go-1.29.1/interop/xds/server/server.go000066400000000000000000000031641365033716300205760ustar00rootroot00000000000000/* * * Copyright 2020 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Binary server for xDS interop tests. package main import ( "context" "flag" "log" "net" "os" "strconv" "google.golang.org/grpc" "google.golang.org/grpc/grpclog" testpb "google.golang.org/grpc/interop/grpc_testing" ) var ( port = flag.Int("port", 8080, "The server port") serverID = flag.String("server_id", "go_server", "Server ID included in response") hostname = getHostname() ) func getHostname() string { hostname, err := os.Hostname() if err != nil { log.Fatalf("failed to get hostname: %v", err) } return hostname } type server struct { testpb.UnimplementedTestServiceServer } func (s *server) UnaryCall(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { return &testpb.SimpleResponse{ServerId: *serverID, Hostname: hostname}, nil } func main() { flag.Parse() p := strconv.Itoa(*port) lis, err := net.Listen("tcp", ":"+p) if err != nil { grpclog.Fatalf("failed to listen: %v", err) } s := grpc.NewServer() testpb.RegisterTestServiceServer(s, &server{}) s.Serve(lis) } grpc-go-1.29.1/keepalive/000077500000000000000000000000001365033716300151165ustar00rootroot00000000000000grpc-go-1.29.1/keepalive/keepalive.go000066400000000000000000000076631365033716300174260ustar00rootroot00000000000000/* * * Copyright 2017 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Package keepalive defines configurable parameters for point-to-point // healthcheck. package keepalive import ( "time" ) // ClientParameters is used to set keepalive parameters on the client-side. // These configure how the client will actively probe to notice when a // connection is broken and send pings so intermediaries will be aware of the // liveness of the connection. Make sure these parameters are set in // coordination with the keepalive policy on the server, as incompatible // settings can result in closing of connection. type ClientParameters struct { // After a duration of this time if the client doesn't see any activity it // pings the server to see if the transport is still alive. // If set below 10s, a minimum value of 10s will be used instead. Time time.Duration // The current default value is infinity. // After having pinged for keepalive check, the client waits for a duration // of Timeout and if no activity is seen even after that the connection is // closed. Timeout time.Duration // The current default value is 20 seconds. // If true, client sends keepalive pings even with no active RPCs. If false, // when there are no active RPCs, Time and Timeout will be ignored and no // keepalive pings will be sent. PermitWithoutStream bool // false by default. } // ServerParameters is used to set keepalive and max-age parameters on the // server-side. type ServerParameters struct { // MaxConnectionIdle is a duration for the amount of time after which an // idle connection would be closed by sending a GoAway. Idleness duration is // defined since the most recent time the number of outstanding RPCs became // zero or the connection establishment. MaxConnectionIdle time.Duration // The current default value is infinity. // MaxConnectionAge is a duration for the maximum amount of time a // connection may exist before it will be closed by sending a GoAway. A // random jitter of +/-10% will be added to MaxConnectionAge to spread out // connection storms. MaxConnectionAge time.Duration // The current default value is infinity. // MaxConnectionAgeGrace is an additive period after MaxConnectionAge after // which the connection will be forcibly closed. MaxConnectionAgeGrace time.Duration // The current default value is infinity. // After a duration of this time if the server doesn't see any activity it // pings the client to see if the transport is still alive. // If set below 1s, a minimum value of 1s will be used instead. Time time.Duration // The current default value is 2 hours. // After having pinged for keepalive check, the server waits for a duration // of Timeout and if no activity is seen even after that the connection is // closed. Timeout time.Duration // The current default value is 20 seconds. } // EnforcementPolicy is used to set keepalive enforcement policy on the // server-side. Server will close connection with a client that violates this // policy. type EnforcementPolicy struct { // MinTime is the minimum amount of time a client should wait before sending // a keepalive ping. MinTime time.Duration // The current default value is 5 minutes. // If true, server allows keepalive pings even when there are no active // streams(RPCs). If false, and client sends ping when there are no active // streams, server will send GOAWAY and close the connection. PermitWithoutStream bool // false by default. } grpc-go-1.29.1/metadata/000077500000000000000000000000001365033716300147315ustar00rootroot00000000000000grpc-go-1.29.1/metadata/metadata.go000066400000000000000000000142541365033716300170460ustar00rootroot00000000000000/* * * Copyright 2014 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Package metadata define the structure of the metadata supported by gRPC library. // Please refer to https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md // for more information about custom-metadata. package metadata // import "google.golang.org/grpc/metadata" import ( "context" "fmt" "strings" ) // DecodeKeyValue returns k, v, nil. // // Deprecated: use k and v directly instead. func DecodeKeyValue(k, v string) (string, string, error) { return k, v, nil } // MD is a mapping from metadata keys to values. Users should use the following // two convenience functions New and Pairs to generate MD. type MD map[string][]string // New creates an MD from a given key-value map. // // Only the following ASCII characters are allowed in keys: // - digits: 0-9 // - uppercase letters: A-Z (normalized to lower) // - lowercase letters: a-z // - special characters: -_. // Uppercase letters are automatically converted to lowercase. // // Keys beginning with "grpc-" are reserved for grpc-internal use only and may // result in errors if set in metadata. func New(m map[string]string) MD { md := MD{} for k, val := range m { key := strings.ToLower(k) md[key] = append(md[key], val) } return md } // Pairs returns an MD formed by the mapping of key, value ... // Pairs panics if len(kv) is odd. // // Only the following ASCII characters are allowed in keys: // - digits: 0-9 // - uppercase letters: A-Z (normalized to lower) // - lowercase letters: a-z // - special characters: -_. // Uppercase letters are automatically converted to lowercase. // // Keys beginning with "grpc-" are reserved for grpc-internal use only and may // result in errors if set in metadata. func Pairs(kv ...string) MD { if len(kv)%2 == 1 { panic(fmt.Sprintf("metadata: Pairs got the odd number of input pairs for metadata: %d", len(kv))) } md := MD{} var key string for i, s := range kv { if i%2 == 0 { key = strings.ToLower(s) continue } md[key] = append(md[key], s) } return md } // Len returns the number of items in md. func (md MD) Len() int { return len(md) } // Copy returns a copy of md. func (md MD) Copy() MD { return Join(md) } // Get obtains the values for a given key. func (md MD) Get(k string) []string { k = strings.ToLower(k) return md[k] } // Set sets the value of a given key with a slice of values. func (md MD) Set(k string, vals ...string) { if len(vals) == 0 { return } k = strings.ToLower(k) md[k] = vals } // Append adds the values to key k, not overwriting what was already stored at that key. func (md MD) Append(k string, vals ...string) { if len(vals) == 0 { return } k = strings.ToLower(k) md[k] = append(md[k], vals...) } // Join joins any number of mds into a single MD. // The order of values for each key is determined by the order in which // the mds containing those values are presented to Join. func Join(mds ...MD) MD { out := MD{} for _, md := range mds { for k, v := range md { out[k] = append(out[k], v...) } } return out } type mdIncomingKey struct{} type mdOutgoingKey struct{} // NewIncomingContext creates a new context with incoming md attached. func NewIncomingContext(ctx context.Context, md MD) context.Context { return context.WithValue(ctx, mdIncomingKey{}, md) } // NewOutgoingContext creates a new context with outgoing md attached. If used // in conjunction with AppendToOutgoingContext, NewOutgoingContext will // overwrite any previously-appended metadata. func NewOutgoingContext(ctx context.Context, md MD) context.Context { return context.WithValue(ctx, mdOutgoingKey{}, rawMD{md: md}) } // AppendToOutgoingContext returns a new context with the provided kv merged // with any existing metadata in the context. Please refer to the // documentation of Pairs for a description of kv. func AppendToOutgoingContext(ctx context.Context, kv ...string) context.Context { if len(kv)%2 == 1 { panic(fmt.Sprintf("metadata: AppendToOutgoingContext got an odd number of input pairs for metadata: %d", len(kv))) } md, _ := ctx.Value(mdOutgoingKey{}).(rawMD) added := make([][]string, len(md.added)+1) copy(added, md.added) added[len(added)-1] = make([]string, len(kv)) copy(added[len(added)-1], kv) return context.WithValue(ctx, mdOutgoingKey{}, rawMD{md: md.md, added: added}) } // FromIncomingContext returns the incoming metadata in ctx if it exists. The // returned MD should not be modified. Writing to it may cause races. // Modification should be made to copies of the returned MD. func FromIncomingContext(ctx context.Context) (md MD, ok bool) { md, ok = ctx.Value(mdIncomingKey{}).(MD) return } // FromOutgoingContextRaw returns the un-merged, intermediary contents // of rawMD. Remember to perform strings.ToLower on the keys. The returned // MD should not be modified. Writing to it may cause races. Modification // should be made to copies of the returned MD. // // This is intended for gRPC-internal use ONLY. func FromOutgoingContextRaw(ctx context.Context) (MD, [][]string, bool) { raw, ok := ctx.Value(mdOutgoingKey{}).(rawMD) if !ok { return nil, nil, false } return raw.md, raw.added, true } // FromOutgoingContext returns the outgoing metadata in ctx if it exists. The // returned MD should not be modified. Writing to it may cause races. // Modification should be made to copies of the returned MD. func FromOutgoingContext(ctx context.Context) (MD, bool) { raw, ok := ctx.Value(mdOutgoingKey{}).(rawMD) if !ok { return nil, false } mds := make([]MD, 0, len(raw.added)+1) mds = append(mds, raw.md) for _, vv := range raw.added { mds = append(mds, Pairs(vv...)) } return Join(mds...), ok } type rawMD struct { md MD added [][]string } grpc-go-1.29.1/metadata/metadata_test.go000066400000000000000000000157071365033716300201110ustar00rootroot00000000000000/* * * Copyright 2014 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package metadata import ( "context" "reflect" "strconv" "testing" "google.golang.org/grpc/internal/grpctest" ) type s struct { grpctest.Tester } func Test(t *testing.T) { grpctest.RunSubTests(t, s{}) } func (s) TestPairsMD(t *testing.T) { for _, test := range []struct { // input kv []string // output md MD }{ {[]string{}, MD{}}, {[]string{"k1", "v1", "k1", "v2"}, MD{"k1": []string{"v1", "v2"}}}, } { md := Pairs(test.kv...) if !reflect.DeepEqual(md, test.md) { t.Fatalf("Pairs(%v) = %v, want %v", test.kv, md, test.md) } } } func (s) TestCopy(t *testing.T) { const key, val = "key", "val" orig := Pairs(key, val) cpy := orig.Copy() if !reflect.DeepEqual(orig, cpy) { t.Errorf("copied value not equal to the original, got %v, want %v", cpy, orig) } orig[key][0] = "foo" if v := cpy[key][0]; v != val { t.Errorf("change in original should not affect copy, got %q, want %q", v, val) } } func (s) TestJoin(t *testing.T) { for _, test := range []struct { mds []MD want MD }{ {[]MD{}, MD{}}, {[]MD{Pairs("foo", "bar")}, Pairs("foo", "bar")}, {[]MD{Pairs("foo", "bar"), Pairs("foo", "baz")}, Pairs("foo", "bar", "foo", "baz")}, {[]MD{Pairs("foo", "bar"), Pairs("foo", "baz"), Pairs("zip", "zap")}, Pairs("foo", "bar", "foo", "baz", "zip", "zap")}, } { md := Join(test.mds...) if !reflect.DeepEqual(md, test.want) { t.Errorf("context's metadata is %v, want %v", md, test.want) } } } func (s) TestGet(t *testing.T) { for _, test := range []struct { md MD key string wantVals []string }{ {md: Pairs("My-Optional-Header", "42"), key: "My-Optional-Header", wantVals: []string{"42"}}, {md: Pairs("Header", "42", "Header", "43", "Header", "44", "other", "1"), key: "HEADER", wantVals: []string{"42", "43", "44"}}, {md: Pairs("HEADER", "10"), key: "HEADER", wantVals: []string{"10"}}, } { vals := test.md.Get(test.key) if !reflect.DeepEqual(vals, test.wantVals) { t.Errorf("value of metadata %v is %v, want %v", test.key, vals, test.wantVals) } } } func (s) TestSet(t *testing.T) { for _, test := range []struct { md MD setKey string setVals []string want MD }{ { md: Pairs("My-Optional-Header", "42", "other-key", "999"), setKey: "Other-Key", setVals: []string{"1"}, want: Pairs("my-optional-header", "42", "other-key", "1"), }, { md: Pairs("My-Optional-Header", "42"), setKey: "Other-Key", setVals: []string{"1", "2", "3"}, want: Pairs("my-optional-header", "42", "other-key", "1", "other-key", "2", "other-key", "3"), }, { md: Pairs("My-Optional-Header", "42"), setKey: "Other-Key", setVals: []string{}, want: Pairs("my-optional-header", "42"), }, } { test.md.Set(test.setKey, test.setVals...) if !reflect.DeepEqual(test.md, test.want) { t.Errorf("value of metadata is %v, want %v", test.md, test.want) } } } func (s) TestAppend(t *testing.T) { for _, test := range []struct { md MD appendKey string appendVals []string want MD }{ { md: Pairs("My-Optional-Header", "42"), appendKey: "Other-Key", appendVals: []string{"1"}, want: Pairs("my-optional-header", "42", "other-key", "1"), }, { md: Pairs("My-Optional-Header", "42"), appendKey: "my-OptIoNal-HeAder", appendVals: []string{"1", "2", "3"}, want: Pairs("my-optional-header", "42", "my-optional-header", "1", "my-optional-header", "2", "my-optional-header", "3"), }, { md: Pairs("My-Optional-Header", "42"), appendKey: "my-OptIoNal-HeAder", appendVals: []string{}, want: Pairs("my-optional-header", "42"), }, } { test.md.Append(test.appendKey, test.appendVals...) if !reflect.DeepEqual(test.md, test.want) { t.Errorf("value of metadata is %v, want %v", test.md, test.want) } } } func (s) TestAppendToOutgoingContext(t *testing.T) { // Pre-existing metadata ctx := NewOutgoingContext(context.Background(), Pairs("k1", "v1", "k2", "v2")) ctx = AppendToOutgoingContext(ctx, "k1", "v3") ctx = AppendToOutgoingContext(ctx, "k1", "v4") md, ok := FromOutgoingContext(ctx) if !ok { t.Errorf("Expected MD to exist in ctx, but got none") } want := Pairs("k1", "v1", "k1", "v3", "k1", "v4", "k2", "v2") if !reflect.DeepEqual(md, want) { t.Errorf("context's metadata is %v, want %v", md, want) } // No existing metadata ctx = AppendToOutgoingContext(context.Background(), "k1", "v1") md, ok = FromOutgoingContext(ctx) if !ok { t.Errorf("Expected MD to exist in ctx, but got none") } want = Pairs("k1", "v1") if !reflect.DeepEqual(md, want) { t.Errorf("context's metadata is %v, want %v", md, want) } } func (s) TestAppendToOutgoingContext_Repeated(t *testing.T) { ctx := context.Background() for i := 0; i < 100; i = i + 2 { ctx1 := AppendToOutgoingContext(ctx, "k", strconv.Itoa(i)) ctx2 := AppendToOutgoingContext(ctx, "k", strconv.Itoa(i+1)) md1, _ := FromOutgoingContext(ctx1) md2, _ := FromOutgoingContext(ctx2) if reflect.DeepEqual(md1, md2) { t.Fatalf("md1, md2 = %v, %v; should not be equal", md1, md2) } ctx = ctx1 } } func (s) TestAppendToOutgoingContext_FromKVSlice(t *testing.T) { const k, v = "a", "b" kv := []string{k, v} ctx := AppendToOutgoingContext(context.Background(), kv...) md, _ := FromOutgoingContext(ctx) if md[k][0] != v { t.Fatalf("md[%q] = %q; want %q", k, md[k], v) } kv[1] = "xxx" md, _ = FromOutgoingContext(ctx) if md[k][0] != v { t.Fatalf("md[%q] = %q; want %q", k, md[k], v) } } // Old/slow approach to adding metadata to context func Benchmark_AddingMetadata_ContextManipulationApproach(b *testing.B) { // TODO: Add in N=1-100 tests once Go1.6 support is removed. const num = 10 for n := 0; n < b.N; n++ { ctx := context.Background() for i := 0; i < num; i++ { md, _ := FromOutgoingContext(ctx) NewOutgoingContext(ctx, Join(Pairs("k1", "v1", "k2", "v2"), md)) } } } // Newer/faster approach to adding metadata to context func BenchmarkAppendToOutgoingContext(b *testing.B) { const num = 10 for n := 0; n < b.N; n++ { ctx := context.Background() for i := 0; i < num; i++ { ctx = AppendToOutgoingContext(ctx, "k1", "v1", "k2", "v2") } } } func BenchmarkFromOutgoingContext(b *testing.B) { ctx := context.Background() ctx = NewOutgoingContext(ctx, MD{"k3": {"v3", "v4"}}) ctx = AppendToOutgoingContext(ctx, "k1", "v1", "k2", "v2") for n := 0; n < b.N; n++ { FromOutgoingContext(ctx) } } grpc-go-1.29.1/naming/000077500000000000000000000000001365033716300144225ustar00rootroot00000000000000grpc-go-1.29.1/naming/dns_resolver.go000066400000000000000000000206151365033716300174620ustar00rootroot00000000000000/* * * Copyright 2017 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package naming import ( "context" "errors" "fmt" "net" "strconv" "time" "google.golang.org/grpc/grpclog" ) const ( defaultPort = "443" defaultFreq = time.Minute * 30 ) var ( errMissingAddr = errors.New("missing address") errWatcherClose = errors.New("watcher has been closed") lookupHost = net.DefaultResolver.LookupHost lookupSRV = net.DefaultResolver.LookupSRV ) // NewDNSResolverWithFreq creates a DNS Resolver that can resolve DNS names, and // create watchers that poll the DNS server using the frequency set by freq. func NewDNSResolverWithFreq(freq time.Duration) (Resolver, error) { return &dnsResolver{freq: freq}, nil } // NewDNSResolver creates a DNS Resolver that can resolve DNS names, and create // watchers that poll the DNS server using the default frequency defined by defaultFreq. func NewDNSResolver() (Resolver, error) { return NewDNSResolverWithFreq(defaultFreq) } // dnsResolver handles name resolution for names following the DNS scheme type dnsResolver struct { // frequency of polling the DNS server that the watchers created by this resolver will use. freq time.Duration } // formatIP returns ok = false if addr is not a valid textual representation of an IP address. // If addr is an IPv4 address, return the addr and ok = true. // If addr is an IPv6 address, return the addr enclosed in square brackets and ok = true. func formatIP(addr string) (addrIP string, ok bool) { ip := net.ParseIP(addr) if ip == nil { return "", false } if ip.To4() != nil { return addr, true } return "[" + addr + "]", true } // parseTarget takes the user input target string, returns formatted host and port info. // If target doesn't specify a port, set the port to be the defaultPort. // If target is in IPv6 format and host-name is enclosed in square brackets, brackets // are stripped when setting the host. // examples: // target: "www.google.com" returns host: "www.google.com", port: "443" // target: "ipv4-host:80" returns host: "ipv4-host", port: "80" // target: "[ipv6-host]" returns host: "ipv6-host", port: "443" // target: ":80" returns host: "localhost", port: "80" // target: ":" returns host: "localhost", port: "443" func parseTarget(target string) (host, port string, err error) { if target == "" { return "", "", errMissingAddr } if ip := net.ParseIP(target); ip != nil { // target is an IPv4 or IPv6(without brackets) address return target, defaultPort, nil } if host, port, err := net.SplitHostPort(target); err == nil { // target has port, i.e ipv4-host:port, [ipv6-host]:port, host-name:port if host == "" { // Keep consistent with net.Dial(): If the host is empty, as in ":80", the local system is assumed. host = "localhost" } if port == "" { // If the port field is empty(target ends with colon), e.g. "[::1]:", defaultPort is used. port = defaultPort } return host, port, nil } if host, port, err := net.SplitHostPort(target + ":" + defaultPort); err == nil { // target doesn't have port return host, port, nil } return "", "", fmt.Errorf("invalid target address %v", target) } // Resolve creates a watcher that watches the name resolution of the target. func (r *dnsResolver) Resolve(target string) (Watcher, error) { host, port, err := parseTarget(target) if err != nil { return nil, err } if net.ParseIP(host) != nil { ipWatcher := &ipWatcher{ updateChan: make(chan *Update, 1), } host, _ = formatIP(host) ipWatcher.updateChan <- &Update{Op: Add, Addr: host + ":" + port} return ipWatcher, nil } ctx, cancel := context.WithCancel(context.Background()) return &dnsWatcher{ r: r, host: host, port: port, ctx: ctx, cancel: cancel, t: time.NewTimer(0), }, nil } // dnsWatcher watches for the name resolution update for a specific target type dnsWatcher struct { r *dnsResolver host string port string // The latest resolved address set curAddrs map[string]*Update ctx context.Context cancel context.CancelFunc t *time.Timer } // ipWatcher watches for the name resolution update for an IP address. type ipWatcher struct { updateChan chan *Update } // Next returns the address resolution Update for the target. For IP address, // the resolution is itself, thus polling name server is unnecessary. Therefore, // Next() will return an Update the first time it is called, and will be blocked // for all following calls as no Update exists until watcher is closed. func (i *ipWatcher) Next() ([]*Update, error) { u, ok := <-i.updateChan if !ok { return nil, errWatcherClose } return []*Update{u}, nil } // Close closes the ipWatcher. func (i *ipWatcher) Close() { close(i.updateChan) } // AddressType indicates the address type returned by name resolution. type AddressType uint8 const ( // Backend indicates the server is a backend server. Backend AddressType = iota // GRPCLB indicates the server is a grpclb load balancer. GRPCLB ) // AddrMetadataGRPCLB contains the information the name resolver for grpclb should provide. The // name resolver used by the grpclb balancer is required to provide this type of metadata in // its address updates. type AddrMetadataGRPCLB struct { // AddrType is the type of server (grpc load balancer or backend). AddrType AddressType // ServerName is the name of the grpc load balancer. Used for authentication. ServerName string } // compileUpdate compares the old resolved addresses and newly resolved addresses, // and generates an update list func (w *dnsWatcher) compileUpdate(newAddrs map[string]*Update) []*Update { var res []*Update for a, u := range w.curAddrs { if _, ok := newAddrs[a]; !ok { u.Op = Delete res = append(res, u) } } for a, u := range newAddrs { if _, ok := w.curAddrs[a]; !ok { res = append(res, u) } } return res } func (w *dnsWatcher) lookupSRV() map[string]*Update { newAddrs := make(map[string]*Update) _, srvs, err := lookupSRV(w.ctx, "grpclb", "tcp", w.host) if err != nil { grpclog.Infof("grpc: failed dns SRV record lookup due to %v.\n", err) return nil } for _, s := range srvs { lbAddrs, err := lookupHost(w.ctx, s.Target) if err != nil { grpclog.Warningf("grpc: failed load balancer address dns lookup due to %v.\n", err) continue } for _, a := range lbAddrs { a, ok := formatIP(a) if !ok { grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err) continue } addr := a + ":" + strconv.Itoa(int(s.Port)) newAddrs[addr] = &Update{Addr: addr, Metadata: AddrMetadataGRPCLB{AddrType: GRPCLB, ServerName: s.Target}} } } return newAddrs } func (w *dnsWatcher) lookupHost() map[string]*Update { newAddrs := make(map[string]*Update) addrs, err := lookupHost(w.ctx, w.host) if err != nil { grpclog.Warningf("grpc: failed dns A record lookup due to %v.\n", err) return nil } for _, a := range addrs { a, ok := formatIP(a) if !ok { grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err) continue } addr := a + ":" + w.port newAddrs[addr] = &Update{Addr: addr} } return newAddrs } func (w *dnsWatcher) lookup() []*Update { newAddrs := w.lookupSRV() if newAddrs == nil { // If failed to get any balancer address (either no corresponding SRV for the // target, or caused by failure during resolution/parsing of the balancer target), // return any A record info available. newAddrs = w.lookupHost() } result := w.compileUpdate(newAddrs) w.curAddrs = newAddrs return result } // Next returns the resolved address update(delta) for the target. If there's no // change, it will sleep for 30 mins and try to resolve again after that. func (w *dnsWatcher) Next() ([]*Update, error) { for { select { case <-w.ctx.Done(): return nil, errWatcherClose case <-w.t.C: } result := w.lookup() // Next lookup should happen after an interval defined by w.r.freq. w.t.Reset(w.r.freq) if len(result) > 0 { return result, nil } } } func (w *dnsWatcher) Close() { w.cancel() } grpc-go-1.29.1/naming/dns_resolver_test.go000066400000000000000000000221551365033716300205220ustar00rootroot00000000000000/* * * Copyright 2017 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package naming import ( "context" "fmt" "net" "reflect" "sync" "testing" "time" "google.golang.org/grpc/internal/grpctest" ) type s struct { grpctest.Tester } func Test(t *testing.T) { grpctest.RunSubTests(t, s{}) } func newUpdateWithMD(op Operation, addr, lb string) *Update { return &Update{ Op: op, Addr: addr, Metadata: AddrMetadataGRPCLB{AddrType: GRPCLB, ServerName: lb}, } } func toMap(u []*Update) map[string]*Update { m := make(map[string]*Update) for _, v := range u { m[v.Addr] = v } return m } func (s) TestCompileUpdate(t *testing.T) { tests := []struct { oldAddrs []string newAddrs []string want []*Update }{ { []string{}, []string{"1.0.0.1"}, []*Update{{Op: Add, Addr: "1.0.0.1"}}, }, { []string{"1.0.0.1"}, []string{"1.0.0.1"}, []*Update{}, }, { []string{"1.0.0.0"}, []string{"1.0.0.1"}, []*Update{{Op: Delete, Addr: "1.0.0.0"}, {Op: Add, Addr: "1.0.0.1"}}, }, { []string{"1.0.0.1"}, []string{"1.0.0.0"}, []*Update{{Op: Add, Addr: "1.0.0.0"}, {Op: Delete, Addr: "1.0.0.1"}}, }, { []string{"1.0.0.1"}, []string{"1.0.0.1", "1.0.0.2", "1.0.0.3"}, []*Update{{Op: Add, Addr: "1.0.0.2"}, {Op: Add, Addr: "1.0.0.3"}}, }, { []string{"1.0.0.1", "1.0.0.2", "1.0.0.3"}, []string{"1.0.0.0"}, []*Update{{Op: Add, Addr: "1.0.0.0"}, {Op: Delete, Addr: "1.0.0.1"}, {Op: Delete, Addr: "1.0.0.2"}, {Op: Delete, Addr: "1.0.0.3"}}, }, { []string{"1.0.0.1", "1.0.0.3", "1.0.0.5"}, []string{"1.0.0.2", "1.0.0.3", "1.0.0.6"}, []*Update{{Op: Delete, Addr: "1.0.0.1"}, {Op: Add, Addr: "1.0.0.2"}, {Op: Delete, Addr: "1.0.0.5"}, {Op: Add, Addr: "1.0.0.6"}}, }, { []string{"1.0.0.1", "1.0.0.1", "1.0.0.2"}, []string{"1.0.0.1"}, []*Update{{Op: Delete, Addr: "1.0.0.2"}}, }, } var w dnsWatcher for _, c := range tests { w.curAddrs = make(map[string]*Update) newUpdates := make(map[string]*Update) for _, a := range c.oldAddrs { w.curAddrs[a] = &Update{Addr: a} } for _, a := range c.newAddrs { newUpdates[a] = &Update{Addr: a} } r := w.compileUpdate(newUpdates) if !reflect.DeepEqual(toMap(c.want), toMap(r)) { t.Errorf("w(%+v).compileUpdate(%+v) = %+v, want %+v", c.oldAddrs, c.newAddrs, updatesToSlice(r), updatesToSlice(c.want)) } } } func (s) TestResolveFunc(t *testing.T) { tests := []struct { addr string want error }{ // TODO(yuxuanli): More false cases? {"www.google.com", nil}, {"foo.bar:12345", nil}, {"127.0.0.1", nil}, {"127.0.0.1:12345", nil}, {"[::1]:80", nil}, {"[2001:db8:a0b:12f0::1]:21", nil}, {":80", nil}, {"127.0.0...1:12345", nil}, {"[fe80::1%lo0]:80", nil}, {"golang.org:http", nil}, {"[2001:db8::1]:http", nil}, {":", nil}, {"", errMissingAddr}, {"[2001:db8:a0b:12f0::1", fmt.Errorf("invalid target address %v", "[2001:db8:a0b:12f0::1")}, } r, err := NewDNSResolver() if err != nil { t.Errorf("%v", err) } for _, v := range tests { _, err := r.Resolve(v.addr) if !reflect.DeepEqual(err, v.want) { t.Errorf("Resolve(%q) = %v, want %v", v.addr, err, v.want) } } } var hostLookupTbl = map[string][]string{ "foo.bar.com": {"1.2.3.4", "5.6.7.8"}, "ipv4.single.fake": {"1.2.3.4"}, "ipv4.multi.fake": {"1.2.3.4", "5.6.7.8", "9.10.11.12"}, "ipv6.single.fake": {"2607:f8b0:400a:801::1001"}, "ipv6.multi.fake": {"2607:f8b0:400a:801::1001", "2607:f8b0:400a:801::1002", "2607:f8b0:400a:801::1003"}, } func hostLookup(host string) ([]string, error) { if addrs, ok := hostLookupTbl[host]; ok { return addrs, nil } return nil, fmt.Errorf("failed to lookup host:%s resolution in hostLookupTbl", host) } var srvLookupTbl = map[string][]*net.SRV{ "_grpclb._tcp.srv.ipv4.single.fake": {&net.SRV{Target: "ipv4.single.fake", Port: 1234}}, "_grpclb._tcp.srv.ipv4.multi.fake": {&net.SRV{Target: "ipv4.multi.fake", Port: 1234}}, "_grpclb._tcp.srv.ipv6.single.fake": {&net.SRV{Target: "ipv6.single.fake", Port: 1234}}, "_grpclb._tcp.srv.ipv6.multi.fake": {&net.SRV{Target: "ipv6.multi.fake", Port: 1234}}, } func srvLookup(service, proto, name string) (string, []*net.SRV, error) { cname := "_" + service + "._" + proto + "." + name if srvs, ok := srvLookupTbl[cname]; ok { return cname, srvs, nil } return "", nil, fmt.Errorf("failed to lookup srv record for %s in srvLookupTbl", cname) } func updatesToSlice(updates []*Update) []Update { res := make([]Update, len(updates)) for i, u := range updates { res[i] = *u } return res } func testResolver(t *testing.T, freq time.Duration, slp time.Duration) { tests := []struct { target string want []*Update }{ { "foo.bar.com", []*Update{{Op: Add, Addr: "1.2.3.4" + colonDefaultPort}, {Op: Add, Addr: "5.6.7.8" + colonDefaultPort}}, }, { "foo.bar.com:1234", []*Update{{Op: Add, Addr: "1.2.3.4:1234"}, {Op: Add, Addr: "5.6.7.8:1234"}}, }, { "srv.ipv4.single.fake", []*Update{newUpdateWithMD(Add, "1.2.3.4:1234", "ipv4.single.fake")}, }, { "srv.ipv4.multi.fake", []*Update{ newUpdateWithMD(Add, "1.2.3.4:1234", "ipv4.multi.fake"), newUpdateWithMD(Add, "5.6.7.8:1234", "ipv4.multi.fake"), newUpdateWithMD(Add, "9.10.11.12:1234", "ipv4.multi.fake")}, }, { "srv.ipv6.single.fake", []*Update{newUpdateWithMD(Add, "[2607:f8b0:400a:801::1001]:1234", "ipv6.single.fake")}, }, { "srv.ipv6.multi.fake", []*Update{ newUpdateWithMD(Add, "[2607:f8b0:400a:801::1001]:1234", "ipv6.multi.fake"), newUpdateWithMD(Add, "[2607:f8b0:400a:801::1002]:1234", "ipv6.multi.fake"), newUpdateWithMD(Add, "[2607:f8b0:400a:801::1003]:1234", "ipv6.multi.fake"), }, }, } for _, a := range tests { r, err := NewDNSResolverWithFreq(freq) if err != nil { t.Fatalf("%v\n", err) } w, err := r.Resolve(a.target) if err != nil { t.Fatalf("%v\n", err) } updates, err := w.Next() if err != nil { t.Fatalf("%v\n", err) } if !reflect.DeepEqual(toMap(a.want), toMap(updates)) { t.Errorf("Resolve(%q) = %+v, want %+v\n", a.target, updatesToSlice(updates), updatesToSlice(a.want)) } var wg sync.WaitGroup wg.Add(1) go func() { defer wg.Done() for { _, err := w.Next() if err != nil { return } t.Error("Execution shouldn't reach here, since w.Next() should be blocked until close happen.") } }() // Sleep for sometime to let watcher do more than one lookup time.Sleep(slp) w.Close() wg.Wait() } } func replaceNetFunc() func() { oldLookupHost := lookupHost oldLookupSRV := lookupSRV lookupHost = func(ctx context.Context, host string) ([]string, error) { return hostLookup(host) } lookupSRV = func(ctx context.Context, service, proto, name string) (string, []*net.SRV, error) { return srvLookup(service, proto, name) } return func() { lookupHost = oldLookupHost lookupSRV = oldLookupSRV } } func (s) TestResolve(t *testing.T) { defer replaceNetFunc()() testResolver(t, time.Millisecond*5, time.Millisecond*10) } const colonDefaultPort = ":" + defaultPort func (s) TestIPWatcher(t *testing.T) { tests := []struct { target string want []*Update }{ {"127.0.0.1", []*Update{{Op: Add, Addr: "127.0.0.1" + colonDefaultPort}}}, {"127.0.0.1:12345", []*Update{{Op: Add, Addr: "127.0.0.1:12345"}}}, {"::1", []*Update{{Op: Add, Addr: "[::1]" + colonDefaultPort}}}, {"[::1]:12345", []*Update{{Op: Add, Addr: "[::1]:12345"}}}, {"[::1]:", []*Update{{Op: Add, Addr: "[::1]:443"}}}, {"2001:db8:85a3::8a2e:370:7334", []*Update{{Op: Add, Addr: "[2001:db8:85a3::8a2e:370:7334]" + colonDefaultPort}}}, {"[2001:db8:85a3::8a2e:370:7334]", []*Update{{Op: Add, Addr: "[2001:db8:85a3::8a2e:370:7334]" + colonDefaultPort}}}, {"[2001:db8:85a3::8a2e:370:7334]:12345", []*Update{{Op: Add, Addr: "[2001:db8:85a3::8a2e:370:7334]:12345"}}}, {"[2001:db8::1]:http", []*Update{{Op: Add, Addr: "[2001:db8::1]:http"}}}, // TODO(yuxuanli): zone support? } for _, v := range tests { r, err := NewDNSResolverWithFreq(time.Millisecond * 5) if err != nil { t.Fatalf("%v\n", err) } w, err := r.Resolve(v.target) if err != nil { t.Fatalf("%v\n", err) } var updates []*Update var wg sync.WaitGroup wg.Add(1) count := 0 go func() { defer wg.Done() for { u, err := w.Next() if err != nil { return } updates = u count++ } }() // Sleep for sometime to let watcher do more than one lookup time.Sleep(time.Millisecond * 10) w.Close() wg.Wait() if !reflect.DeepEqual(v.want, updates) { t.Errorf("Resolve(%q) = %v, want %+v\n", v.target, updatesToSlice(updates), updatesToSlice(v.want)) } if count != 1 { t.Errorf("IPWatcher Next() should return only once, not %d times\n", count) } } } grpc-go-1.29.1/naming/naming.go000066400000000000000000000042711365033716300162260ustar00rootroot00000000000000/* * * Copyright 2014 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Package naming defines the naming API and related data structures for gRPC. // // This package is deprecated: please use package resolver instead. package naming // Operation defines the corresponding operations for a name resolution change. // // Deprecated: please use package resolver. type Operation uint8 const ( // Add indicates a new address is added. Add Operation = iota // Delete indicates an existing address is deleted. Delete ) // Update defines a name resolution update. Notice that it is not valid having both // empty string Addr and nil Metadata in an Update. // // Deprecated: please use package resolver. type Update struct { // Op indicates the operation of the update. Op Operation // Addr is the updated address. It is empty string if there is no address update. Addr string // Metadata is the updated metadata. It is nil if there is no metadata update. // Metadata is not required for a custom naming implementation. Metadata interface{} } // Resolver creates a Watcher for a target to track its resolution changes. // // Deprecated: please use package resolver. type Resolver interface { // Resolve creates a Watcher for target. Resolve(target string) (Watcher, error) } // Watcher watches for the updates on the specified target. // // Deprecated: please use package resolver. type Watcher interface { // Next blocks until an update or error happens. It may return one or more // updates. The first call should get the full set of the results. It should // return an error if and only if Watcher cannot recover. Next() ([]*Update, error) // Close closes the Watcher. Close() } grpc-go-1.29.1/peer/000077500000000000000000000000001365033716300141045ustar00rootroot00000000000000grpc-go-1.29.1/peer/peer.go000066400000000000000000000027271365033716300153760ustar00rootroot00000000000000/* * * Copyright 2014 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Package peer defines various peer information associated with RPCs and // corresponding utils. package peer import ( "context" "net" "google.golang.org/grpc/credentials" ) // Peer contains the information of the peer for an RPC, such as the address // and authentication information. type Peer struct { // Addr is the peer address. Addr net.Addr // AuthInfo is the authentication information of the transport. // It is nil if there is no transport security being used. AuthInfo credentials.AuthInfo } type peerKey struct{} // NewContext creates a new context with peer information attached. func NewContext(ctx context.Context, p *Peer) context.Context { return context.WithValue(ctx, peerKey{}, p) } // FromContext returns the peer information in ctx if it exists. func FromContext(ctx context.Context) (p *Peer, ok bool) { p, ok = ctx.Value(peerKey{}).(*Peer) return } grpc-go-1.29.1/picker_wrapper.go000066400000000000000000000144301365033716300165170ustar00rootroot00000000000000/* * * Copyright 2017 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package grpc import ( "context" "fmt" "io" "sync" "google.golang.org/grpc/balancer" "google.golang.org/grpc/codes" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/status" ) // v2PickerWrapper wraps a balancer.Picker while providing the // balancer.V2Picker API. It requires a pickerWrapper to generate errors // including the latest connectionError. To be deleted when balancer.Picker is // updated to the balancer.V2Picker API. type v2PickerWrapper struct { picker balancer.Picker connErr *connErr } func (v *v2PickerWrapper) Pick(info balancer.PickInfo) (balancer.PickResult, error) { sc, done, err := v.picker.Pick(info.Ctx, info) if err != nil { if err == balancer.ErrTransientFailure { return balancer.PickResult{}, balancer.TransientFailureError(fmt.Errorf("%v, latest connection error: %v", err, v.connErr.connectionError())) } return balancer.PickResult{}, err } return balancer.PickResult{SubConn: sc, Done: done}, nil } // pickerWrapper is a wrapper of balancer.Picker. It blocks on certain pick // actions and unblock when there's a picker update. type pickerWrapper struct { mu sync.Mutex done bool blockingCh chan struct{} picker balancer.V2Picker // The latest connection error. TODO: remove when V1 picker is deprecated; // balancer should be responsible for providing the error. *connErr } type connErr struct { mu sync.Mutex err error } func (c *connErr) updateConnectionError(err error) { c.mu.Lock() c.err = err c.mu.Unlock() } func (c *connErr) connectionError() error { c.mu.Lock() err := c.err c.mu.Unlock() return err } func newPickerWrapper() *pickerWrapper { return &pickerWrapper{blockingCh: make(chan struct{}), connErr: &connErr{}} } // updatePicker is called by UpdateBalancerState. It unblocks all blocked pick. func (pw *pickerWrapper) updatePicker(p balancer.Picker) { pw.updatePickerV2(&v2PickerWrapper{picker: p, connErr: pw.connErr}) } // updatePicker is called by UpdateBalancerState. It unblocks all blocked pick. func (pw *pickerWrapper) updatePickerV2(p balancer.V2Picker) { pw.mu.Lock() if pw.done { pw.mu.Unlock() return } pw.picker = p // pw.blockingCh should never be nil. close(pw.blockingCh) pw.blockingCh = make(chan struct{}) pw.mu.Unlock() } func doneChannelzWrapper(acw *acBalancerWrapper, done func(balancer.DoneInfo)) func(balancer.DoneInfo) { acw.mu.Lock() ac := acw.ac acw.mu.Unlock() ac.incrCallsStarted() return func(b balancer.DoneInfo) { if b.Err != nil && b.Err != io.EOF { ac.incrCallsFailed() } else { ac.incrCallsSucceeded() } if done != nil { done(b) } } } // pick returns the transport that will be used for the RPC. // It may block in the following cases: // - there's no picker // - the current picker returns ErrNoSubConnAvailable // - the current picker returns other errors and failfast is false. // - the subConn returned by the current picker is not READY // When one of these situations happens, pick blocks until the picker gets updated. func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.PickInfo) (transport.ClientTransport, func(balancer.DoneInfo), error) { var ch chan struct{} var lastPickErr error for { pw.mu.Lock() if pw.done { pw.mu.Unlock() return nil, nil, ErrClientConnClosing } if pw.picker == nil { ch = pw.blockingCh } if ch == pw.blockingCh { // This could happen when either: // - pw.picker is nil (the previous if condition), or // - has called pick on the current picker. pw.mu.Unlock() select { case <-ctx.Done(): var errStr string if lastPickErr != nil { errStr = "latest balancer error: " + lastPickErr.Error() } else if connectionErr := pw.connectionError(); connectionErr != nil { errStr = "latest connection error: " + connectionErr.Error() } else { errStr = ctx.Err().Error() } switch ctx.Err() { case context.DeadlineExceeded: return nil, nil, status.Error(codes.DeadlineExceeded, errStr) case context.Canceled: return nil, nil, status.Error(codes.Canceled, errStr) } case <-ch: } continue } ch = pw.blockingCh p := pw.picker pw.mu.Unlock() pickResult, err := p.Pick(info) if err != nil { if err == balancer.ErrNoSubConnAvailable { continue } if tfe, ok := err.(interface{ IsTransientFailure() bool }); ok && tfe.IsTransientFailure() { if !failfast { lastPickErr = err continue } return nil, nil, status.Error(codes.Unavailable, err.Error()) } if _, ok := status.FromError(err); ok { return nil, nil, err } // err is some other error. return nil, nil, status.Error(codes.Unknown, err.Error()) } acw, ok := pickResult.SubConn.(*acBalancerWrapper) if !ok { grpclog.Error("subconn returned from pick is not *acBalancerWrapper") continue } if t, ok := acw.getAddrConn().getReadyTransport(); ok { if channelz.IsOn() { return t, doneChannelzWrapper(acw, pickResult.Done), nil } return t, pickResult.Done, nil } if pickResult.Done != nil { // Calling done with nil error, no bytes sent and no bytes received. // DoneInfo with default value works. pickResult.Done(balancer.DoneInfo{}) } grpclog.Infof("blockingPicker: the picked transport is not ready, loop back to repick") // If ok == false, ac.state is not READY. // A valid picker always returns READY subConn. This means the state of ac // just changed, and picker will be updated shortly. // continue back to the beginning of the for loop to repick. } } func (pw *pickerWrapper) close() { pw.mu.Lock() defer pw.mu.Unlock() if pw.done { return } pw.done = true close(pw.blockingCh) } grpc-go-1.29.1/picker_wrapper_test.go000066400000000000000000000114171365033716300175600ustar00rootroot00000000000000/* * * Copyright 2017 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package grpc import ( "context" "fmt" "sync/atomic" "testing" "time" "google.golang.org/grpc/balancer" "google.golang.org/grpc/codes" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/status" ) const goroutineCount = 5 var ( testT = &testTransport{} testSC = &acBalancerWrapper{ac: &addrConn{ state: connectivity.Ready, transport: testT, }} testSCNotReady = &acBalancerWrapper{ac: &addrConn{ state: connectivity.TransientFailure, }} ) type testTransport struct { transport.ClientTransport } type testingPicker struct { err error sc balancer.SubConn maxCalled int64 } func (p *testingPicker) Pick(ctx context.Context, info balancer.PickInfo) (balancer.SubConn, func(balancer.DoneInfo), error) { if atomic.AddInt64(&p.maxCalled, -1) < 0 { return nil, nil, fmt.Errorf("pick called to many times (> goroutineCount)") } if p.err != nil { return nil, nil, p.err } return p.sc, nil, nil } func (s) TestBlockingPickTimeout(t *testing.T) { bp := newPickerWrapper() ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond) defer cancel() if _, _, err := bp.pick(ctx, true, balancer.PickInfo{}); status.Code(err) != codes.DeadlineExceeded { t.Errorf("bp.pick returned error %v, want DeadlineExceeded", err) } } func (s) TestBlockingPick(t *testing.T) { bp := newPickerWrapper() // All goroutines should block because picker is nil in bp. var finishedCount uint64 for i := goroutineCount; i > 0; i-- { go func() { if tr, _, err := bp.pick(context.Background(), true, balancer.PickInfo{}); err != nil || tr != testT { t.Errorf("bp.pick returned non-nil error: %v", err) } atomic.AddUint64(&finishedCount, 1) }() } time.Sleep(50 * time.Millisecond) if c := atomic.LoadUint64(&finishedCount); c != 0 { t.Errorf("finished goroutines count: %v, want 0", c) } bp.updatePicker(&testingPicker{sc: testSC, maxCalled: goroutineCount}) } func (s) TestBlockingPickNoSubAvailable(t *testing.T) { bp := newPickerWrapper() var finishedCount uint64 bp.updatePicker(&testingPicker{err: balancer.ErrNoSubConnAvailable, maxCalled: goroutineCount}) // All goroutines should block because picker returns no sc available. for i := goroutineCount; i > 0; i-- { go func() { if tr, _, err := bp.pick(context.Background(), true, balancer.PickInfo{}); err != nil || tr != testT { t.Errorf("bp.pick returned non-nil error: %v", err) } atomic.AddUint64(&finishedCount, 1) }() } time.Sleep(50 * time.Millisecond) if c := atomic.LoadUint64(&finishedCount); c != 0 { t.Errorf("finished goroutines count: %v, want 0", c) } bp.updatePicker(&testingPicker{sc: testSC, maxCalled: goroutineCount}) } func (s) TestBlockingPickTransientWaitforready(t *testing.T) { bp := newPickerWrapper() bp.updatePicker(&testingPicker{err: balancer.ErrTransientFailure, maxCalled: goroutineCount}) var finishedCount uint64 // All goroutines should block because picker returns transientFailure and // picks are not failfast. for i := goroutineCount; i > 0; i-- { go func() { if tr, _, err := bp.pick(context.Background(), false, balancer.PickInfo{}); err != nil || tr != testT { t.Errorf("bp.pick returned non-nil error: %v", err) } atomic.AddUint64(&finishedCount, 1) }() } time.Sleep(time.Millisecond) if c := atomic.LoadUint64(&finishedCount); c != 0 { t.Errorf("finished goroutines count: %v, want 0", c) } bp.updatePicker(&testingPicker{sc: testSC, maxCalled: goroutineCount}) } func (s) TestBlockingPickSCNotReady(t *testing.T) { bp := newPickerWrapper() bp.updatePicker(&testingPicker{sc: testSCNotReady, maxCalled: goroutineCount}) var finishedCount uint64 // All goroutines should block because sc is not ready. for i := goroutineCount; i > 0; i-- { go func() { if tr, _, err := bp.pick(context.Background(), true, balancer.PickInfo{}); err != nil || tr != testT { t.Errorf("bp.pick returned non-nil error: %v", err) } atomic.AddUint64(&finishedCount, 1) }() } time.Sleep(time.Millisecond) if c := atomic.LoadUint64(&finishedCount); c != 0 { t.Errorf("finished goroutines count: %v, want 0", c) } bp.updatePicker(&testingPicker{sc: testSC, maxCalled: goroutineCount}) } grpc-go-1.29.1/pickfirst.go000066400000000000000000000114741365033716300155050ustar00rootroot00000000000000/* * * Copyright 2017 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package grpc import ( "errors" "google.golang.org/grpc/balancer" "google.golang.org/grpc/codes" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/resolver" "google.golang.org/grpc/status" ) // PickFirstBalancerName is the name of the pick_first balancer. const PickFirstBalancerName = "pick_first" func newPickfirstBuilder() balancer.Builder { return &pickfirstBuilder{} } type pickfirstBuilder struct{} func (*pickfirstBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer { return &pickfirstBalancer{cc: cc} } func (*pickfirstBuilder) Name() string { return PickFirstBalancerName } type pickfirstBalancer struct { state connectivity.State cc balancer.ClientConn sc balancer.SubConn } var _ balancer.V2Balancer = &pickfirstBalancer{} // Assert we implement v2 func (b *pickfirstBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) { if err != nil { b.ResolverError(err) return } b.UpdateClientConnState(balancer.ClientConnState{ResolverState: resolver.State{Addresses: addrs}}) // Ignore error } func (b *pickfirstBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) { b.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: s}) } func (b *pickfirstBalancer) ResolverError(err error) { switch b.state { case connectivity.TransientFailure, connectivity.Idle, connectivity.Connecting: // Set a failing picker if we don't have a good picker. b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.TransientFailure, Picker: &picker{err: status.Errorf(codes.Unavailable, "name resolver error: %v", err)}}, ) } if grpclog.V(2) { grpclog.Infof("pickfirstBalancer: ResolverError called with error %v", err) } } func (b *pickfirstBalancer) UpdateClientConnState(cs balancer.ClientConnState) error { if len(cs.ResolverState.Addresses) == 0 { b.ResolverError(errors.New("produced zero addresses")) return balancer.ErrBadResolverState } if b.sc == nil { var err error b.sc, err = b.cc.NewSubConn(cs.ResolverState.Addresses, balancer.NewSubConnOptions{}) if err != nil { if grpclog.V(2) { grpclog.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err) } b.state = connectivity.TransientFailure b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.TransientFailure, Picker: &picker{err: status.Errorf(codes.Unavailable, "error creating connection: %v", err)}}, ) return balancer.ErrBadResolverState } b.state = connectivity.Idle b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.Idle, Picker: &picker{result: balancer.PickResult{SubConn: b.sc}}}) b.sc.Connect() } else { b.sc.UpdateAddresses(cs.ResolverState.Addresses) b.sc.Connect() } return nil } func (b *pickfirstBalancer) UpdateSubConnState(sc balancer.SubConn, s balancer.SubConnState) { if grpclog.V(2) { grpclog.Infof("pickfirstBalancer: HandleSubConnStateChange: %p, %v", sc, s) } if b.sc != sc { if grpclog.V(2) { grpclog.Infof("pickfirstBalancer: ignored state change because sc is not recognized") } return } b.state = s.ConnectivityState if s.ConnectivityState == connectivity.Shutdown { b.sc = nil return } switch s.ConnectivityState { case connectivity.Ready, connectivity.Idle: b.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: &picker{result: balancer.PickResult{SubConn: sc}}}) case connectivity.Connecting: b.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: &picker{err: balancer.ErrNoSubConnAvailable}}) case connectivity.TransientFailure: err := balancer.ErrTransientFailure // TODO: this can be unconditional after the V1 API is removed, as // SubConnState will always contain a connection error. if s.ConnectionError != nil { err = balancer.TransientFailureError(s.ConnectionError) } b.cc.UpdateState(balancer.State{ ConnectivityState: s.ConnectivityState, Picker: &picker{err: err}, }) } } func (b *pickfirstBalancer) Close() { } type picker struct { result balancer.PickResult err error } func (p *picker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { return p.result, p.err } func init() { balancer.Register(newPickfirstBuilder()) } grpc-go-1.29.1/pickfirst_test.go000066400000000000000000000272411365033716300165430ustar00rootroot00000000000000/* * * Copyright 2017 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package grpc import ( "context" "math" "sync" "testing" "time" "google.golang.org/grpc/codes" "google.golang.org/grpc/resolver" "google.golang.org/grpc/resolver/manual" "google.golang.org/grpc/status" ) func errorDesc(err error) string { if s, ok := status.FromError(err); ok { return s.Message() } return err.Error() } func (s) TestOneBackendPickfirst(t *testing.T) { r, rcleanup := manual.GenerateAndRegisterManualResolver() defer rcleanup() numServers := 1 servers, _, scleanup := startServers(t, numServers, math.MaxInt32) defer scleanup() cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithCodec(testCodec{})) if err != nil { t.Fatalf("failed to dial: %v", err) } defer cc.Close() // The first RPC should fail because there's no address. ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond) defer cancel() req := "port" var reply string if err := cc.Invoke(ctx, "/foo/bar", &req, &reply); err == nil || status.Code(err) != codes.DeadlineExceeded { t.Fatalf("EmptyCall() = _, %v, want _, DeadlineExceeded", err) } r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: servers[0].addr}}}) // The second RPC should succeed. for i := 0; i < 1000; i++ { if err = cc.Invoke(context.Background(), "/foo/bar", &req, &reply); err != nil && errorDesc(err) == servers[0].port { return } time.Sleep(time.Millisecond) } t.Fatalf("EmptyCall() = _, %v, want _, %v", err, servers[0].port) } func (s) TestBackendsPickfirst(t *testing.T) { r, rcleanup := manual.GenerateAndRegisterManualResolver() defer rcleanup() numServers := 2 servers, _, scleanup := startServers(t, numServers, math.MaxInt32) defer scleanup() cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithCodec(testCodec{})) if err != nil { t.Fatalf("failed to dial: %v", err) } defer cc.Close() // The first RPC should fail because there's no address. ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond) defer cancel() req := "port" var reply string if err := cc.Invoke(ctx, "/foo/bar", &req, &reply); err == nil || status.Code(err) != codes.DeadlineExceeded { t.Fatalf("EmptyCall() = _, %v, want _, DeadlineExceeded", err) } r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: servers[0].addr}, {Addr: servers[1].addr}}}) // The second RPC should succeed with the first server. for i := 0; i < 1000; i++ { if err = cc.Invoke(context.Background(), "/foo/bar", &req, &reply); err != nil && errorDesc(err) == servers[0].port { return } time.Sleep(time.Millisecond) } t.Fatalf("EmptyCall() = _, %v, want _, %v", err, servers[0].port) } func (s) TestNewAddressWhileBlockingPickfirst(t *testing.T) { r, rcleanup := manual.GenerateAndRegisterManualResolver() defer rcleanup() numServers := 1 servers, _, scleanup := startServers(t, numServers, math.MaxInt32) defer scleanup() cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithCodec(testCodec{})) if err != nil { t.Fatalf("failed to dial: %v", err) } defer cc.Close() // The first RPC should fail because there's no address. ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond) defer cancel() req := "port" var reply string if err := cc.Invoke(ctx, "/foo/bar", &req, &reply); err == nil || status.Code(err) != codes.DeadlineExceeded { t.Fatalf("EmptyCall() = _, %v, want _, DeadlineExceeded", err) } var wg sync.WaitGroup for i := 0; i < 3; i++ { wg.Add(1) go func() { defer wg.Done() // This RPC blocks until NewAddress is called. cc.Invoke(context.Background(), "/foo/bar", &req, &reply) }() } time.Sleep(50 * time.Millisecond) r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: servers[0].addr}}}) wg.Wait() } func (s) TestCloseWithPendingRPCPickfirst(t *testing.T) { r, rcleanup := manual.GenerateAndRegisterManualResolver() defer rcleanup() numServers := 1 _, _, scleanup := startServers(t, numServers, math.MaxInt32) defer scleanup() cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithCodec(testCodec{})) if err != nil { t.Fatalf("failed to dial: %v", err) } defer cc.Close() // The first RPC should fail because there's no address. ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond) defer cancel() req := "port" var reply string if err := cc.Invoke(ctx, "/foo/bar", &req, &reply); err == nil || status.Code(err) != codes.DeadlineExceeded { t.Fatalf("EmptyCall() = _, %v, want _, DeadlineExceeded", err) } var wg sync.WaitGroup for i := 0; i < 3; i++ { wg.Add(1) go func() { defer wg.Done() // This RPC blocks until NewAddress is called. cc.Invoke(context.Background(), "/foo/bar", &req, &reply) }() } time.Sleep(50 * time.Millisecond) cc.Close() wg.Wait() } func (s) TestOneServerDownPickfirst(t *testing.T) { r, rcleanup := manual.GenerateAndRegisterManualResolver() defer rcleanup() numServers := 2 servers, _, scleanup := startServers(t, numServers, math.MaxInt32) defer scleanup() cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithCodec(testCodec{})) if err != nil { t.Fatalf("failed to dial: %v", err) } defer cc.Close() // The first RPC should fail because there's no address. ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond) defer cancel() req := "port" var reply string if err := cc.Invoke(ctx, "/foo/bar", &req, &reply); err == nil || status.Code(err) != codes.DeadlineExceeded { t.Fatalf("EmptyCall() = _, %v, want _, DeadlineExceeded", err) } r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: servers[0].addr}, {Addr: servers[1].addr}}}) // The second RPC should succeed with the first server. for i := 0; i < 1000; i++ { if err = cc.Invoke(context.Background(), "/foo/bar", &req, &reply); err != nil && errorDesc(err) == servers[0].port { break } time.Sleep(time.Millisecond) } servers[0].stop() for i := 0; i < 1000; i++ { if err = cc.Invoke(context.Background(), "/foo/bar", &req, &reply); err != nil && errorDesc(err) == servers[1].port { return } time.Sleep(time.Millisecond) } t.Fatalf("EmptyCall() = _, %v, want _, %v", err, servers[0].port) } func (s) TestAllServersDownPickfirst(t *testing.T) { r, rcleanup := manual.GenerateAndRegisterManualResolver() defer rcleanup() numServers := 2 servers, _, scleanup := startServers(t, numServers, math.MaxInt32) defer scleanup() cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithCodec(testCodec{})) if err != nil { t.Fatalf("failed to dial: %v", err) } defer cc.Close() // The first RPC should fail because there's no address. ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond) defer cancel() req := "port" var reply string if err := cc.Invoke(ctx, "/foo/bar", &req, &reply); err == nil || status.Code(err) != codes.DeadlineExceeded { t.Fatalf("EmptyCall() = _, %v, want _, DeadlineExceeded", err) } r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: servers[0].addr}, {Addr: servers[1].addr}}}) // The second RPC should succeed with the first server. for i := 0; i < 1000; i++ { if err = cc.Invoke(context.Background(), "/foo/bar", &req, &reply); err != nil && errorDesc(err) == servers[0].port { break } time.Sleep(time.Millisecond) } for i := 0; i < numServers; i++ { servers[i].stop() } for i := 0; i < 1000; i++ { if err = cc.Invoke(context.Background(), "/foo/bar", &req, &reply); status.Code(err) == codes.Unavailable { return } time.Sleep(time.Millisecond) } t.Fatalf("EmptyCall() = _, %v, want _, error with code unavailable", err) } func (s) TestAddressesRemovedPickfirst(t *testing.T) { r, rcleanup := manual.GenerateAndRegisterManualResolver() defer rcleanup() numServers := 3 servers, _, scleanup := startServers(t, numServers, math.MaxInt32) defer scleanup() cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithCodec(testCodec{})) if err != nil { t.Fatalf("failed to dial: %v", err) } defer cc.Close() // The first RPC should fail because there's no address. ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond) defer cancel() req := "port" var reply string if err := cc.Invoke(ctx, "/foo/bar", &req, &reply); err == nil || status.Code(err) != codes.DeadlineExceeded { t.Fatalf("EmptyCall() = _, %v, want _, DeadlineExceeded", err) } r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: servers[0].addr}, {Addr: servers[1].addr}, {Addr: servers[2].addr}}}) for i := 0; i < 1000; i++ { if err = cc.Invoke(context.Background(), "/foo/bar", &req, &reply); err != nil && errorDesc(err) == servers[0].port { break } time.Sleep(time.Millisecond) } for i := 0; i < 20; i++ { if err := cc.Invoke(context.Background(), "/foo/bar", &req, &reply); err == nil || errorDesc(err) != servers[0].port { t.Fatalf("Index %d: Invoke(_, _, _, _, _) = %v, want %s", 0, err, servers[0].port) } time.Sleep(10 * time.Millisecond) } // Remove server[0]. r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: servers[1].addr}, {Addr: servers[2].addr}}}) for i := 0; i < 1000; i++ { if err = cc.Invoke(context.Background(), "/foo/bar", &req, &reply); err != nil && errorDesc(err) == servers[1].port { break } time.Sleep(time.Millisecond) } for i := 0; i < 20; i++ { if err := cc.Invoke(context.Background(), "/foo/bar", &req, &reply); err == nil || errorDesc(err) != servers[1].port { t.Fatalf("Index %d: Invoke(_, _, _, _, _) = %v, want %s", 1, err, servers[1].port) } time.Sleep(10 * time.Millisecond) } // Append server[0], nothing should change. r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: servers[1].addr}, {Addr: servers[2].addr}, {Addr: servers[0].addr}}}) for i := 0; i < 20; i++ { if err := cc.Invoke(context.Background(), "/foo/bar", &req, &reply); err == nil || errorDesc(err) != servers[1].port { t.Fatalf("Index %d: Invoke(_, _, _, _, _) = %v, want %s", 1, err, servers[1].port) } time.Sleep(10 * time.Millisecond) } // Remove server[1]. r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: servers[2].addr}, {Addr: servers[0].addr}}}) for i := 0; i < 1000; i++ { if err = cc.Invoke(context.Background(), "/foo/bar", &req, &reply); err != nil && errorDesc(err) == servers[2].port { break } time.Sleep(time.Millisecond) } for i := 0; i < 20; i++ { if err := cc.Invoke(context.Background(), "/foo/bar", &req, &reply); err == nil || errorDesc(err) != servers[2].port { t.Fatalf("Index %d: Invoke(_, _, _, _, _) = %v, want %s", 2, err, servers[2].port) } time.Sleep(10 * time.Millisecond) } // Remove server[2]. r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: servers[0].addr}}}) for i := 0; i < 1000; i++ { if err = cc.Invoke(context.Background(), "/foo/bar", &req, &reply); err != nil && errorDesc(err) == servers[0].port { break } time.Sleep(time.Millisecond) } for i := 0; i < 20; i++ { if err := cc.Invoke(context.Background(), "/foo/bar", &req, &reply); err == nil || errorDesc(err) != servers[0].port { t.Fatalf("Index %d: Invoke(_, _, _, _, _) = %v, want %s", 0, err, servers[0].port) } time.Sleep(10 * time.Millisecond) } } grpc-go-1.29.1/preloader.go000066400000000000000000000035131365033716300154570ustar00rootroot00000000000000/* * * Copyright 2019 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package grpc import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/status" ) // PreparedMsg is responsible for creating a Marshalled and Compressed object. // // This API is EXPERIMENTAL. type PreparedMsg struct { // Struct for preparing msg before sending them encodedData []byte hdr []byte payload []byte } // Encode marshalls and compresses the message using the codec and compressor for the stream. func (p *PreparedMsg) Encode(s Stream, msg interface{}) error { ctx := s.Context() rpcInfo, ok := rpcInfoFromContext(ctx) if !ok { return status.Errorf(codes.Internal, "grpc: unable to get rpcInfo") } // check if the context has the relevant information to prepareMsg if rpcInfo.preloaderInfo == nil { return status.Errorf(codes.Internal, "grpc: rpcInfo.preloaderInfo is nil") } if rpcInfo.preloaderInfo.codec == nil { return status.Errorf(codes.Internal, "grpc: rpcInfo.preloaderInfo.codec is nil") } // prepare the msg data, err := encode(rpcInfo.preloaderInfo.codec, msg) if err != nil { return err } p.encodedData = data compData, err := compress(data, rpcInfo.preloaderInfo.cp, rpcInfo.preloaderInfo.comp) if err != nil { return err } p.hdr, p.payload = msgHeader(data, compData) return nil } grpc-go-1.29.1/profiling/000077500000000000000000000000001365033716300151425ustar00rootroot00000000000000grpc-go-1.29.1/profiling/cmd/000077500000000000000000000000001365033716300157055ustar00rootroot00000000000000grpc-go-1.29.1/profiling/cmd/catapult.go000066400000000000000000000312131365033716300200510ustar00rootroot00000000000000/* * * Copyright 2019 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package main import ( "encoding/binary" "encoding/json" "fmt" "os" "sort" "strings" "google.golang.org/grpc/grpclog" ppb "google.golang.org/grpc/profiling/proto" ) type jsonNode struct { Name string `json:"name"` Cat string `json:"cat"` ID string `json:"id"` Cname string `json:"cname"` Phase string `json:"ph"` Timestamp float64 `json:"ts"` PID string `json:"pid"` TID string `json:"tid"` } // Catapult does not allow specifying colours manually; a 20-odd predefined // labels are used (that don't make much sense outside the context of // Chromium). See this for more details: // // https://github.com/catapult-project/catapult/blob/bef344f7017fc9e04f7049d0f58af6d9ce9f4ab6/tracing/tracing/base/color_scheme.html#L29 func hashCname(tag string) string { if strings.Contains(tag, "encoding") { return "rail_response" } if strings.Contains(tag, "compression") { return "cq_build_passed" } if strings.Contains(tag, "transport") { if strings.Contains(tag, "blocking") { return "rail_animation" } return "good" } if strings.Contains(tag, "header") { return "cq_build_attempt_failed" } if tag == "/" { return "heap_dump_stack_frame" } if strings.Contains(tag, "flow") || strings.Contains(tag, "tmp") { return "heap_dump_stack_frame" } return "" } // filterCounter identifies the counter-th instance of a timer of the type // `filter` within a Stat. This, in conjunction with the counter data structure // defined below, is used to draw flows between linked loopy writer/reader // events with application goroutine events in trace-viewer. This is possible // because enqueues and dequeues are ordered -- that is, the first dequeue must // be dequeueing the first enqueue operation. func filterCounter(stat *ppb.Stat, filter string, counter int) int { localCounter := 0 for i := 0; i < len(stat.Timers); i++ { if stat.Timers[i].Tags == filter { if localCounter == counter { return i } localCounter++ } } return -1 } // counter is state object used to store and retrieve the number of timers of a // particular type that have been seen. type counter struct { c map[string]int } func newCounter() *counter { return &counter{c: make(map[string]int)} } func (c *counter) GetAndInc(s string) int { ret := c.c[s] c.c[s]++ return ret } func catapultNs(sec int64, nsec int32) float64 { return float64((sec * 1000000000) + int64(nsec)) } // streamStatsCatapultJSONSingle processes a single proto Stat object to return // an array of jsonNodes in trace-viewer's format. func streamStatsCatapultJSONSingle(stat *ppb.Stat, baseSec int64, baseNsec int32) []jsonNode { if len(stat.Timers) == 0 { return nil } connectionCounter := binary.BigEndian.Uint64(stat.Metadata[0:8]) streamID := binary.BigEndian.Uint32(stat.Metadata[8:12]) opid := fmt.Sprintf("/%s/%d/%d", stat.Tags, connectionCounter, streamID) var loopyReaderGoID, loopyWriterGoID int64 for i := 0; i < len(stat.Timers) && (loopyReaderGoID == 0 || loopyWriterGoID == 0); i++ { if strings.Contains(stat.Timers[i].Tags, "/loopyReader") { loopyReaderGoID = stat.Timers[i].GoId } else if strings.Contains(stat.Timers[i].Tags, "/loopyWriter") { loopyWriterGoID = stat.Timers[i].GoId } } lrc, lwc := newCounter(), newCounter() var result []jsonNode result = append(result, jsonNode{ Name: "loopyReaderTmp", ID: opid, Cname: hashCname("tmp"), Phase: "i", Timestamp: 0, PID: fmt.Sprintf("/%s/%d/loopyReader", stat.Tags, connectionCounter), TID: fmt.Sprintf("%d", loopyReaderGoID), }, jsonNode{ Name: "loopyWriterTmp", ID: opid, Cname: hashCname("tmp"), Phase: "i", Timestamp: 0, PID: fmt.Sprintf("/%s/%d/loopyWriter", stat.Tags, connectionCounter), TID: fmt.Sprintf("%d", loopyWriterGoID), }, ) for i := 0; i < len(stat.Timers); i++ { categories := stat.Tags pid, tid := opid, fmt.Sprintf("%d", stat.Timers[i].GoId) if stat.Timers[i].GoId == loopyReaderGoID { pid, tid = fmt.Sprintf("/%s/%d/loopyReader", stat.Tags, connectionCounter), fmt.Sprintf("%d", stat.Timers[i].GoId) var flowEndID int var flowEndPID, flowEndTID string switch stat.Timers[i].Tags { case "/http2/recv/header": flowEndID = filterCounter(stat, "/grpc/stream/recv/header", lrc.GetAndInc("/http2/recv/header")) if flowEndID != -1 { flowEndPID = opid flowEndTID = fmt.Sprintf("%d", stat.Timers[flowEndID].GoId) } else { grpclog.Infof("cannot find %s/grpc/stream/recv/header for %s/http2/recv/header", opid, opid) } case "/http2/recv/dataFrame/loopyReader": flowEndID = filterCounter(stat, "/recvAndDecompress", lrc.GetAndInc("/http2/recv/dataFrame/loopyReader")) if flowEndID != -1 { flowEndPID = opid flowEndTID = fmt.Sprintf("%d", stat.Timers[flowEndID].GoId) } else { grpclog.Infof("cannot find %s/recvAndDecompress for %s/http2/recv/dataFrame/loopyReader", opid, opid) } default: flowEndID = -1 } if flowEndID != -1 { flowID := fmt.Sprintf("lrc begin:/%d%s end:/%d%s begin:(%d, %s, %s) end:(%d, %s, %s)", connectionCounter, stat.Timers[i].Tags, connectionCounter, stat.Timers[flowEndID].Tags, i, pid, tid, flowEndID, flowEndPID, flowEndTID) result = append(result, jsonNode{ Name: fmt.Sprintf("%s/flow", opid), Cat: categories + ",flow", ID: flowID, Cname: hashCname("flow"), Phase: "s", Timestamp: catapultNs(stat.Timers[i].EndSec-baseSec, stat.Timers[i].EndNsec-baseNsec), PID: pid, TID: tid, }, jsonNode{ Name: fmt.Sprintf("%s/flow", opid), Cat: categories + ",flow", ID: flowID, Cname: hashCname("flow"), Phase: "f", Timestamp: catapultNs(stat.Timers[flowEndID].BeginSec-baseSec, stat.Timers[flowEndID].BeginNsec-baseNsec), PID: flowEndPID, TID: flowEndTID, }, ) } } else if stat.Timers[i].GoId == loopyWriterGoID { pid, tid = fmt.Sprintf("/%s/%d/loopyWriter", stat.Tags, connectionCounter), fmt.Sprintf("%d", stat.Timers[i].GoId) var flowBeginID int var flowBeginPID, flowBeginTID string switch stat.Timers[i].Tags { case "/http2/recv/header/loopyWriter/registerOutStream": flowBeginID = filterCounter(stat, "/http2/recv/header", lwc.GetAndInc("/http2/recv/header/loopyWriter/registerOutStream")) flowBeginPID = fmt.Sprintf("/%s/%d/loopyReader", stat.Tags, connectionCounter) flowBeginTID = fmt.Sprintf("%d", loopyReaderGoID) case "/http2/send/dataFrame/loopyWriter/preprocess": flowBeginID = filterCounter(stat, "/transport/enqueue", lwc.GetAndInc("/http2/send/dataFrame/loopyWriter/preprocess")) if flowBeginID != -1 { flowBeginPID = opid flowBeginTID = fmt.Sprintf("%d", stat.Timers[flowBeginID].GoId) } else { grpclog.Infof("cannot find /%d/transport/enqueue for /%d/http2/send/dataFrame/loopyWriter/preprocess", connectionCounter, connectionCounter) } default: flowBeginID = -1 } if flowBeginID != -1 { flowID := fmt.Sprintf("lwc begin:/%d%s end:/%d%s begin:(%d, %s, %s) end:(%d, %s, %s)", connectionCounter, stat.Timers[flowBeginID].Tags, connectionCounter, stat.Timers[i].Tags, flowBeginID, flowBeginPID, flowBeginTID, i, pid, tid) result = append(result, jsonNode{ Name: fmt.Sprintf("/%s/%d/%d/flow", stat.Tags, connectionCounter, streamID), Cat: categories + ",flow", ID: flowID, Cname: hashCname("flow"), Phase: "s", Timestamp: catapultNs(stat.Timers[flowBeginID].EndSec-baseSec, stat.Timers[flowBeginID].EndNsec-baseNsec), PID: flowBeginPID, TID: flowBeginTID, }, jsonNode{ Name: fmt.Sprintf("/%s/%d/%d/flow", stat.Tags, connectionCounter, streamID), Cat: categories + ",flow", ID: flowID, Cname: hashCname("flow"), Phase: "f", Timestamp: catapultNs(stat.Timers[i].BeginSec-baseSec, stat.Timers[i].BeginNsec-baseNsec), PID: pid, TID: tid, }, ) } } result = append(result, jsonNode{ Name: fmt.Sprintf("%s%s", opid, stat.Timers[i].Tags), Cat: categories, ID: opid, Cname: hashCname(stat.Timers[i].Tags), Phase: "B", Timestamp: catapultNs(stat.Timers[i].BeginSec-baseSec, stat.Timers[i].BeginNsec-baseNsec), PID: pid, TID: tid, }, jsonNode{ Name: fmt.Sprintf("%s%s", opid, stat.Timers[i].Tags), Cat: categories, ID: opid, Cname: hashCname(stat.Timers[i].Tags), Phase: "E", Timestamp: catapultNs(stat.Timers[i].EndSec-baseSec, stat.Timers[i].EndNsec-baseNsec), PID: pid, TID: tid, }, ) } return result } // timerBeginIsBefore compares two proto Timer objects to determine if the // first comes before the second chronologically. func timerBeginIsBefore(ti *ppb.Timer, tj *ppb.Timer) bool { if ti.BeginSec == tj.BeginSec { return ti.BeginNsec < tj.BeginNsec } return ti.BeginSec < tj.BeginSec } // streamStatsCatapulJSON receives a *snapshot and the name of a JSON file to // write to. The grpc-go profiling snapshot is processed and converted to a // JSON format that can be understood by trace-viewer. func streamStatsCatapultJSON(s *snapshot, streamStatsCatapultJSONFileName string) (err error) { grpclog.Infof("calculating stream stats filters") filterArray := strings.Split(*flagStreamStatsFilter, ",") filter := make(map[string]bool) for _, f := range filterArray { filter[f] = true } grpclog.Infof("filter stream stats for %s", *flagStreamStatsFilter) var streamStats []*ppb.Stat for _, stat := range s.StreamStats { if _, ok := filter[stat.Tags]; ok { streamStats = append(streamStats, stat) } } grpclog.Infof("sorting timers within all stats") for id := range streamStats { sort.Slice(streamStats[id].Timers, func(i, j int) bool { return timerBeginIsBefore(streamStats[id].Timers[i], streamStats[id].Timers[j]) }) } grpclog.Infof("sorting stream stats") sort.Slice(streamStats, func(i, j int) bool { if len(streamStats[j].Timers) == 0 { return true } else if len(streamStats[i].Timers) == 0 { return false } pi := binary.BigEndian.Uint64(streamStats[i].Metadata[0:8]) pj := binary.BigEndian.Uint64(streamStats[j].Metadata[0:8]) if pi == pj { return timerBeginIsBefore(streamStats[i].Timers[0], streamStats[j].Timers[0]) } return pi < pj }) // Clip the last stat as it's from the /Profiling/GetStreamStats call that we // made to retrieve the stats themselves. This likely happened millions of // nanoseconds after the last stream we want to profile, so it'd just make // the catapult graph less readable. if len(streamStats) > 0 { streamStats = streamStats[:len(streamStats)-1] } // All timestamps use the earliest timestamp available as the reference. grpclog.Infof("calculating the earliest timestamp across all timers") var base *ppb.Timer for _, stat := range streamStats { for _, timer := range stat.Timers { if base == nil || timerBeginIsBefore(base, timer) { base = timer } } } grpclog.Infof("converting %d stats to catapult JSON format", len(streamStats)) var jsonNodes []jsonNode for _, stat := range streamStats { jsonNodes = append(jsonNodes, streamStatsCatapultJSONSingle(stat, base.BeginSec, base.BeginNsec)...) } grpclog.Infof("marshalling catapult JSON") b, err := json.Marshal(jsonNodes) if err != nil { grpclog.Errorf("cannot marshal JSON: %v", err) return err } grpclog.Infof("creating catapult JSON file") streamStatsCatapultJSONFile, err := os.Create(streamStatsCatapultJSONFileName) if err != nil { grpclog.Errorf("cannot create file %s: %v", streamStatsCatapultJSONFileName, err) return err } defer streamStatsCatapultJSONFile.Close() grpclog.Infof("writing catapult JSON to disk") _, err = streamStatsCatapultJSONFile.Write(b) if err != nil { grpclog.Errorf("cannot write marshalled JSON: %v", err) return err } grpclog.Infof("successfully wrote catapult JSON file %s", streamStatsCatapultJSONFileName) return nil } grpc-go-1.29.1/profiling/cmd/flags.go000066400000000000000000000053211365033716300173310ustar00rootroot00000000000000/* * * Copyright 2019 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package main import ( "flag" "fmt" ) var flagAddress = flag.String("address", "", "address of a remote gRPC server with profiling turned on to retrieve stats from") var flagTimeout = flag.Int("timeout", 0, "network operations timeout in seconds to remote target (0 indicates unlimited)") var flagRetrieveSnapshot = flag.Bool("retrieve-snapshot", false, "connect to remote target and retrieve a profiling snapshot locally for processing") var flagSnapshot = flag.String("snapshot", "", "snapshot file to write to when retrieving profiling data or snapshot file to read from when processing profiling data") var flagEnableProfiling = flag.Bool("enable-profiling", false, "enable profiling in remote target") var flagDisableProfiling = flag.Bool("disable-profiling", false, "disable profiling in remote target") var flagStreamStatsCatapultJSON = flag.String("stream-stats-catapult-json", "", "path to a file to write to after transforming a snapshot into catapult's JSON format") var flagStreamStatsFilter = flag.String("stream-stats-filter", "server,client", "comma-separated list of stat tags to filter for") func exactlyOneOf(opts ...bool) bool { first := true for _, o := range opts { if !o { continue } if first { first = false } else { return false } } return !first } func parseArgs() error { flag.Parse() if *flagAddress != "" { if !exactlyOneOf(*flagEnableProfiling, *flagDisableProfiling, *flagRetrieveSnapshot) { return fmt.Errorf("when -address is specified, you must include exactly only one of -enable-profiling, -disable-profiling, and -retrieve-snapshot") } if *flagStreamStatsCatapultJSON != "" { return fmt.Errorf("when -address is specified, you must not include -stream-stats-catapult-json") } } else { if *flagEnableProfiling || *flagDisableProfiling || *flagRetrieveSnapshot { return fmt.Errorf("when -address isn't specified, you must not include any of -enable-profiling, -disable-profiling, and -retrieve-snapshot") } if *flagStreamStatsCatapultJSON == "" { return fmt.Errorf("when -address isn't specified, you must include -stream-stats-catapult-json") } } return nil } grpc-go-1.29.1/profiling/cmd/local.go000066400000000000000000000032701365033716300173300ustar00rootroot00000000000000/* * * Copyright 2019 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package main import ( "encoding/gob" "fmt" "os" "google.golang.org/grpc/grpclog" ) func loadSnapshot(snapshotFileName string) (*snapshot, error) { grpclog.Infof("opening snapshot file %s", snapshotFileName) snapshotFile, err := os.Open(snapshotFileName) if err != nil { grpclog.Errorf("cannot open %s: %v", snapshotFileName, err) return nil, err } defer snapshotFile.Close() grpclog.Infof("decoding snapshot file %s", snapshotFileName) s := &snapshot{} decoder := gob.NewDecoder(snapshotFile) if err = decoder.Decode(s); err != nil { grpclog.Errorf("cannot decode %s: %v", snapshotFileName, err) return nil, err } return s, nil } func localCommand() error { if *flagSnapshot == "" { return fmt.Errorf("-snapshot flag missing") } s, err := loadSnapshot(*flagSnapshot) if err != nil { return err } if *flagStreamStatsCatapultJSON == "" { return fmt.Errorf("snapshot file specified without an action to perform") } if *flagStreamStatsCatapultJSON != "" { if err = streamStatsCatapultJSON(s, *flagStreamStatsCatapultJSON); err != nil { return err } } return nil } grpc-go-1.29.1/profiling/cmd/main.go000066400000000000000000000023321365033716300171600ustar00rootroot00000000000000/* * * Copyright 2019 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Binary cmd is a command-line tool for profiling management. It retrieves and // processes data from the profiling service. package main import ( "os" "google.golang.org/grpc/grpclog" ppb "google.golang.org/grpc/profiling/proto" ) type snapshot struct { StreamStats []*ppb.Stat } func main() { if err := parseArgs(); err != nil { grpclog.Errorf("error parsing flags: %v", err) os.Exit(1) } if *flagAddress != "" { if err := remoteCommand(); err != nil { grpclog.Errorf("error: %v", err) os.Exit(1) } } else { if err := localCommand(); err != nil { grpclog.Errorf("error: %v", err) os.Exit(1) } } } grpc-go-1.29.1/profiling/cmd/remote.go000066400000000000000000000050561365033716300175350ustar00rootroot00000000000000/* * * Copyright 2019 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package main import ( "context" "encoding/gob" "fmt" "os" "time" "google.golang.org/grpc" "google.golang.org/grpc/grpclog" ppb "google.golang.org/grpc/profiling/proto" ) func setEnabled(ctx context.Context, c ppb.ProfilingClient, enabled bool) error { _, err := c.Enable(ctx, &ppb.EnableRequest{Enabled: enabled}) if err != nil { grpclog.Infof("error calling Enable: %v\n", err) return err } grpclog.Infof("successfully set enabled = %v", enabled) return nil } func retrieveSnapshot(ctx context.Context, c ppb.ProfilingClient, f string) error { grpclog.Infof("getting stream stats") resp, err := c.GetStreamStats(ctx, &ppb.GetStreamStatsRequest{}) if err != nil { grpclog.Errorf("error calling GetStreamStats: %v\n", err) return err } s := &snapshot{StreamStats: resp.StreamStats} grpclog.Infof("creating snapshot file %s", f) file, err := os.Create(f) if err != nil { grpclog.Errorf("cannot create %s: %v", f, err) return err } defer file.Close() grpclog.Infof("encoding data and writing to snapshot file %s", f) encoder := gob.NewEncoder(file) err = encoder.Encode(s) if err != nil { grpclog.Infof("error encoding: %v", err) return err } grpclog.Infof("successfully wrote profiling snapshot to %s", f) return nil } func remoteCommand() error { ctx := context.Background() if *flagTimeout > 0 { var cancel func() ctx, cancel = context.WithTimeout(context.Background(), time.Duration(*flagTimeout)*time.Second) defer cancel() } grpclog.Infof("dialing %s", *flagAddress) cc, err := grpc.Dial(*flagAddress, grpc.WithInsecure()) if err != nil { grpclog.Errorf("cannot dial %s: %v", *flagAddress, err) return err } defer cc.Close() c := ppb.NewProfilingClient(cc) if *flagEnableProfiling || *flagDisableProfiling { return setEnabled(ctx, c, *flagEnableProfiling) } else if *flagRetrieveSnapshot { return retrieveSnapshot(ctx, c, *flagSnapshot) } else { return fmt.Errorf("what should I do with the remote target?") } } grpc-go-1.29.1/profiling/profiling.go000066400000000000000000000024031365033716300174610ustar00rootroot00000000000000/* * * Copyright 2019 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Package profiling exposes methods to manage profiling within gRPC. // // This package and all its methods are EXPERIMENTAL. package profiling import ( internal "google.golang.org/grpc/internal/profiling" ) // Enable turns profiling on and off. This operation is safe for concurrent // access from different goroutines. // // Note that this is the only operation that's accessible through the publicly // exposed profiling package. Everything else (such as retrieving stats) must // be done through the profiling service. This is allowed so that users can use // heuristics to turn profiling on and off automatically. func Enable(enabled bool) { internal.Enable(enabled) } grpc-go-1.29.1/profiling/proto/000077500000000000000000000000001365033716300163055ustar00rootroot00000000000000grpc-go-1.29.1/profiling/proto/service.pb.go000066400000000000000000000443221365033716300207010ustar00rootroot00000000000000// Code generated by protoc-gen-go. DO NOT EDIT. // source: service.proto package proto import ( context "context" fmt "fmt" proto "github.com/golang/protobuf/proto" grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" status "google.golang.org/grpc/status" math "math" ) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package // EnableRequest defines the fields in a /Profiling/Enable method request to // toggle profiling on and off within a gRPC program. type EnableRequest struct { // Setting this to true will enable profiling. Setting this to false will // disable profiling. Enabled bool `protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *EnableRequest) Reset() { *m = EnableRequest{} } func (m *EnableRequest) String() string { return proto.CompactTextString(m) } func (*EnableRequest) ProtoMessage() {} func (*EnableRequest) Descriptor() ([]byte, []int) { return fileDescriptor_a0b84a42fa06f626, []int{0} } func (m *EnableRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_EnableRequest.Unmarshal(m, b) } func (m *EnableRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_EnableRequest.Marshal(b, m, deterministic) } func (m *EnableRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_EnableRequest.Merge(m, src) } func (m *EnableRequest) XXX_Size() int { return xxx_messageInfo_EnableRequest.Size(m) } func (m *EnableRequest) XXX_DiscardUnknown() { xxx_messageInfo_EnableRequest.DiscardUnknown(m) } var xxx_messageInfo_EnableRequest proto.InternalMessageInfo func (m *EnableRequest) GetEnabled() bool { if m != nil { return m.Enabled } return false } // EnableResponse defines the fields in a /Profiling/Enable method response. type EnableResponse struct { XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *EnableResponse) Reset() { *m = EnableResponse{} } func (m *EnableResponse) String() string { return proto.CompactTextString(m) } func (*EnableResponse) ProtoMessage() {} func (*EnableResponse) Descriptor() ([]byte, []int) { return fileDescriptor_a0b84a42fa06f626, []int{1} } func (m *EnableResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_EnableResponse.Unmarshal(m, b) } func (m *EnableResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_EnableResponse.Marshal(b, m, deterministic) } func (m *EnableResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_EnableResponse.Merge(m, src) } func (m *EnableResponse) XXX_Size() int { return xxx_messageInfo_EnableResponse.Size(m) } func (m *EnableResponse) XXX_DiscardUnknown() { xxx_messageInfo_EnableResponse.DiscardUnknown(m) } var xxx_messageInfo_EnableResponse proto.InternalMessageInfo // GetStreamStatsRequest defines the fields in a /Profiling/GetStreamStats // method request to retrieve stream-level stats in a gRPC client/server. type GetStreamStatsRequest struct { XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *GetStreamStatsRequest) Reset() { *m = GetStreamStatsRequest{} } func (m *GetStreamStatsRequest) String() string { return proto.CompactTextString(m) } func (*GetStreamStatsRequest) ProtoMessage() {} func (*GetStreamStatsRequest) Descriptor() ([]byte, []int) { return fileDescriptor_a0b84a42fa06f626, []int{2} } func (m *GetStreamStatsRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_GetStreamStatsRequest.Unmarshal(m, b) } func (m *GetStreamStatsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_GetStreamStatsRequest.Marshal(b, m, deterministic) } func (m *GetStreamStatsRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_GetStreamStatsRequest.Merge(m, src) } func (m *GetStreamStatsRequest) XXX_Size() int { return xxx_messageInfo_GetStreamStatsRequest.Size(m) } func (m *GetStreamStatsRequest) XXX_DiscardUnknown() { xxx_messageInfo_GetStreamStatsRequest.DiscardUnknown(m) } var xxx_messageInfo_GetStreamStatsRequest proto.InternalMessageInfo // GetStreamStatsResponse defines the fields in a /Profiling/GetStreamStats // method response. type GetStreamStatsResponse struct { StreamStats []*Stat `protobuf:"bytes,1,rep,name=stream_stats,json=streamStats,proto3" json:"stream_stats,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *GetStreamStatsResponse) Reset() { *m = GetStreamStatsResponse{} } func (m *GetStreamStatsResponse) String() string { return proto.CompactTextString(m) } func (*GetStreamStatsResponse) ProtoMessage() {} func (*GetStreamStatsResponse) Descriptor() ([]byte, []int) { return fileDescriptor_a0b84a42fa06f626, []int{3} } func (m *GetStreamStatsResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_GetStreamStatsResponse.Unmarshal(m, b) } func (m *GetStreamStatsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_GetStreamStatsResponse.Marshal(b, m, deterministic) } func (m *GetStreamStatsResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_GetStreamStatsResponse.Merge(m, src) } func (m *GetStreamStatsResponse) XXX_Size() int { return xxx_messageInfo_GetStreamStatsResponse.Size(m) } func (m *GetStreamStatsResponse) XXX_DiscardUnknown() { xxx_messageInfo_GetStreamStatsResponse.DiscardUnknown(m) } var xxx_messageInfo_GetStreamStatsResponse proto.InternalMessageInfo func (m *GetStreamStatsResponse) GetStreamStats() []*Stat { if m != nil { return m.StreamStats } return nil } // A Timer measures the start and end of execution of a component within // gRPC that's being profiled. It includes a tag and some additional metadata // to identify itself. type Timer struct { // tags is a comma-separated list of strings used to tag a timer. Tags string `protobuf:"bytes,1,opt,name=tags,proto3" json:"tags,omitempty"` // begin_sec and begin_nsec are the start epoch second and nanosecond, // respectively, of the component profiled by this timer in UTC. begin_nsec // must be a non-negative integer. BeginSec int64 `protobuf:"varint,2,opt,name=begin_sec,json=beginSec,proto3" json:"begin_sec,omitempty"` BeginNsec int32 `protobuf:"varint,3,opt,name=begin_nsec,json=beginNsec,proto3" json:"begin_nsec,omitempty"` // end_sec and end_nsec are the end epoch second and nanosecond, // respectively, of the component profiled by this timer in UTC. end_nsec // must be a non-negative integer. EndSec int64 `protobuf:"varint,4,opt,name=end_sec,json=endSec,proto3" json:"end_sec,omitempty"` EndNsec int32 `protobuf:"varint,5,opt,name=end_nsec,json=endNsec,proto3" json:"end_nsec,omitempty"` // go_id is the goroutine ID of the component being profiled. GoId int64 `protobuf:"varint,6,opt,name=go_id,json=goId,proto3" json:"go_id,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *Timer) Reset() { *m = Timer{} } func (m *Timer) String() string { return proto.CompactTextString(m) } func (*Timer) ProtoMessage() {} func (*Timer) Descriptor() ([]byte, []int) { return fileDescriptor_a0b84a42fa06f626, []int{4} } func (m *Timer) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Timer.Unmarshal(m, b) } func (m *Timer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Timer.Marshal(b, m, deterministic) } func (m *Timer) XXX_Merge(src proto.Message) { xxx_messageInfo_Timer.Merge(m, src) } func (m *Timer) XXX_Size() int { return xxx_messageInfo_Timer.Size(m) } func (m *Timer) XXX_DiscardUnknown() { xxx_messageInfo_Timer.DiscardUnknown(m) } var xxx_messageInfo_Timer proto.InternalMessageInfo func (m *Timer) GetTags() string { if m != nil { return m.Tags } return "" } func (m *Timer) GetBeginSec() int64 { if m != nil { return m.BeginSec } return 0 } func (m *Timer) GetBeginNsec() int32 { if m != nil { return m.BeginNsec } return 0 } func (m *Timer) GetEndSec() int64 { if m != nil { return m.EndSec } return 0 } func (m *Timer) GetEndNsec() int32 { if m != nil { return m.EndNsec } return 0 } func (m *Timer) GetGoId() int64 { if m != nil { return m.GoId } return 0 } // A Stat is a collection of Timers along with some additional // metadata to tag and identify itself. type Stat struct { // tags is a comma-separated list of strings used to categorize a stat. Tags string `protobuf:"bytes,1,opt,name=tags,proto3" json:"tags,omitempty"` // timers is an array of Timers, each representing a different // (but possibly overlapping) component within this stat. Timers []*Timer `protobuf:"bytes,2,rep,name=timers,proto3" json:"timers,omitempty"` // metadata is an array of bytes used to uniquely identify a stat with an // undefined encoding format. For example, the Stats returned by the // /Profiling/GetStreamStats service use the metadata field to encode the // connection ID and the stream ID of each query. Metadata []byte `protobuf:"bytes,3,opt,name=metadata,proto3" json:"metadata,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *Stat) Reset() { *m = Stat{} } func (m *Stat) String() string { return proto.CompactTextString(m) } func (*Stat) ProtoMessage() {} func (*Stat) Descriptor() ([]byte, []int) { return fileDescriptor_a0b84a42fa06f626, []int{5} } func (m *Stat) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Stat.Unmarshal(m, b) } func (m *Stat) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Stat.Marshal(b, m, deterministic) } func (m *Stat) XXX_Merge(src proto.Message) { xxx_messageInfo_Stat.Merge(m, src) } func (m *Stat) XXX_Size() int { return xxx_messageInfo_Stat.Size(m) } func (m *Stat) XXX_DiscardUnknown() { xxx_messageInfo_Stat.DiscardUnknown(m) } var xxx_messageInfo_Stat proto.InternalMessageInfo func (m *Stat) GetTags() string { if m != nil { return m.Tags } return "" } func (m *Stat) GetTimers() []*Timer { if m != nil { return m.Timers } return nil } func (m *Stat) GetMetadata() []byte { if m != nil { return m.Metadata } return nil } func init() { proto.RegisterType((*EnableRequest)(nil), "grpc.go.profiling.v1alpha.EnableRequest") proto.RegisterType((*EnableResponse)(nil), "grpc.go.profiling.v1alpha.EnableResponse") proto.RegisterType((*GetStreamStatsRequest)(nil), "grpc.go.profiling.v1alpha.GetStreamStatsRequest") proto.RegisterType((*GetStreamStatsResponse)(nil), "grpc.go.profiling.v1alpha.GetStreamStatsResponse") proto.RegisterType((*Timer)(nil), "grpc.go.profiling.v1alpha.Timer") proto.RegisterType((*Stat)(nil), "grpc.go.profiling.v1alpha.Stat") } func init() { proto.RegisterFile("service.proto", fileDescriptor_a0b84a42fa06f626) } var fileDescriptor_a0b84a42fa06f626 = []byte{ // 388 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x92, 0x4f, 0xcf, 0xd2, 0x40, 0x10, 0xc6, 0x53, 0x68, 0x6b, 0x19, 0xfe, 0xc4, 0xac, 0x51, 0x0a, 0xc6, 0xd8, 0xf4, 0x60, 0xca, 0xa5, 0x08, 0x5e, 0x3c, 0x93, 0x18, 0xe3, 0xc5, 0x98, 0xc5, 0x93, 0xd1, 0x90, 0xa5, 0x1d, 0xd7, 0x26, 0xa5, 0x5b, 0xbb, 0x0b, 0x9f, 0xc7, 0xaf, 0xe6, 0x37, 0x31, 0x3b, 0x05, 0x0c, 0x6f, 0x78, 0xc9, 0xfb, 0x9e, 0x60, 0x66, 0x9e, 0xdf, 0xd3, 0x67, 0x32, 0x0b, 0x43, 0x8d, 0xcd, 0xa1, 0xc8, 0x30, 0xad, 0x1b, 0x65, 0x14, 0x9b, 0xc8, 0xa6, 0xce, 0x52, 0xa9, 0x6c, 0xf9, 0xb3, 0x28, 0x8b, 0x4a, 0xa6, 0x87, 0x85, 0x28, 0xeb, 0x5f, 0x22, 0x9e, 0xc1, 0xf0, 0x43, 0x25, 0xb6, 0x25, 0x72, 0xfc, 0xbd, 0x47, 0x6d, 0x58, 0x08, 0x4f, 0x90, 0x1a, 0x79, 0xe8, 0x44, 0x4e, 0x12, 0xf0, 0x53, 0x19, 0x3f, 0x85, 0xd1, 0x49, 0xaa, 0x6b, 0x55, 0x69, 0x8c, 0xc7, 0xf0, 0xfc, 0x23, 0x9a, 0xb5, 0x69, 0x50, 0xec, 0xd6, 0x46, 0x18, 0x7d, 0x34, 0x89, 0xbf, 0xc3, 0x8b, 0xbb, 0x83, 0x16, 0x61, 0x2b, 0x18, 0x68, 0x6a, 0x6f, 0xb4, 0xed, 0x87, 0x4e, 0xd4, 0x4d, 0xfa, 0xcb, 0xd7, 0xe9, 0xbd, 0x09, 0x53, 0xcb, 0xf3, 0xbe, 0xfe, 0xef, 0x15, 0xff, 0x71, 0xc0, 0xfb, 0x5a, 0xec, 0xb0, 0x61, 0x0c, 0x5c, 0x23, 0xa4, 0xa6, 0xa4, 0x3d, 0x4e, 0xff, 0xd9, 0x4b, 0xe8, 0x6d, 0x51, 0x16, 0xd5, 0x46, 0x63, 0x16, 0x76, 0x22, 0x27, 0xe9, 0xf2, 0x80, 0x1a, 0x6b, 0xcc, 0xd8, 0x2b, 0x80, 0x76, 0x58, 0xd9, 0x69, 0x37, 0x72, 0x12, 0x8f, 0xb7, 0xf2, 0xcf, 0x1a, 0x33, 0x36, 0xb6, 0xcb, 0xe7, 0x44, 0xba, 0x44, 0xfa, 0x58, 0xe5, 0x96, 0x9b, 0x40, 0x60, 0x07, 0x44, 0x79, 0x44, 0x59, 0x21, 0x31, 0xcf, 0xc0, 0x93, 0x6a, 0x53, 0xe4, 0xa1, 0x4f, 0x84, 0x2b, 0xd5, 0xa7, 0x3c, 0xae, 0xc1, 0xb5, 0x59, 0xaf, 0x06, 0x7c, 0x0f, 0xbe, 0xb1, 0xe9, 0x75, 0xd8, 0xa1, 0xe5, 0xa3, 0x1b, 0xcb, 0xd3, 0x9a, 0xfc, 0xa8, 0x67, 0x53, 0x08, 0x76, 0x68, 0x44, 0x2e, 0x8c, 0xa0, 0xec, 0x03, 0x7e, 0xae, 0x97, 0x7f, 0x1d, 0xe8, 0x7d, 0x39, 0xf1, 0xec, 0x07, 0xf8, 0xed, 0xad, 0x58, 0x72, 0xc3, 0xfd, 0xe2, 0xf2, 0xd3, 0xd9, 0x03, 0x94, 0xc7, 0x2b, 0xee, 0x61, 0x74, 0x79, 0x5f, 0xf6, 0xf6, 0x06, 0x7c, 0xf5, 0x8d, 0x4c, 0x17, 0x8f, 0x20, 0xda, 0xcf, 0xae, 0x92, 0x6f, 0x6f, 0xa4, 0x52, 0xb2, 0xc4, 0x54, 0xaa, 0x52, 0x54, 0x32, 0x55, 0x8d, 0x9c, 0x5b, 0x97, 0xf9, 0xd9, 0x62, 0x4e, 0x2f, 0x7e, 0xeb, 0xd3, 0xcf, 0xbb, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x13, 0x63, 0x69, 0xce, 0x09, 0x03, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. var _ context.Context var _ grpc.ClientConnInterface // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. const _ = grpc.SupportPackageIsVersion6 // ProfilingClient is the client API for Profiling service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type ProfilingClient interface { // Enable allows users to toggle profiling on and off remotely. Enable(ctx context.Context, in *EnableRequest, opts ...grpc.CallOption) (*EnableResponse, error) // GetStreamStats is used to retrieve an array of stream-level stats from a // gRPC client/server. GetStreamStats(ctx context.Context, in *GetStreamStatsRequest, opts ...grpc.CallOption) (*GetStreamStatsResponse, error) } type profilingClient struct { cc grpc.ClientConnInterface } func NewProfilingClient(cc grpc.ClientConnInterface) ProfilingClient { return &profilingClient{cc} } func (c *profilingClient) Enable(ctx context.Context, in *EnableRequest, opts ...grpc.CallOption) (*EnableResponse, error) { out := new(EnableResponse) err := c.cc.Invoke(ctx, "/grpc.go.profiling.v1alpha.Profiling/Enable", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *profilingClient) GetStreamStats(ctx context.Context, in *GetStreamStatsRequest, opts ...grpc.CallOption) (*GetStreamStatsResponse, error) { out := new(GetStreamStatsResponse) err := c.cc.Invoke(ctx, "/grpc.go.profiling.v1alpha.Profiling/GetStreamStats", in, out, opts...) if err != nil { return nil, err } return out, nil } // ProfilingServer is the server API for Profiling service. type ProfilingServer interface { // Enable allows users to toggle profiling on and off remotely. Enable(context.Context, *EnableRequest) (*EnableResponse, error) // GetStreamStats is used to retrieve an array of stream-level stats from a // gRPC client/server. GetStreamStats(context.Context, *GetStreamStatsRequest) (*GetStreamStatsResponse, error) } // UnimplementedProfilingServer can be embedded to have forward compatible implementations. type UnimplementedProfilingServer struct { } func (*UnimplementedProfilingServer) Enable(ctx context.Context, req *EnableRequest) (*EnableResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method Enable not implemented") } func (*UnimplementedProfilingServer) GetStreamStats(ctx context.Context, req *GetStreamStatsRequest) (*GetStreamStatsResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method GetStreamStats not implemented") } func RegisterProfilingServer(s *grpc.Server, srv ProfilingServer) { s.RegisterService(&_Profiling_serviceDesc, srv) } func _Profiling_Enable_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(EnableRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(ProfilingServer).Enable(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/grpc.go.profiling.v1alpha.Profiling/Enable", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ProfilingServer).Enable(ctx, req.(*EnableRequest)) } return interceptor(ctx, in, info, handler) } func _Profiling_GetStreamStats_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(GetStreamStatsRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(ProfilingServer).GetStreamStats(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/grpc.go.profiling.v1alpha.Profiling/GetStreamStats", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ProfilingServer).GetStreamStats(ctx, req.(*GetStreamStatsRequest)) } return interceptor(ctx, in, info, handler) } var _Profiling_serviceDesc = grpc.ServiceDesc{ ServiceName: "grpc.go.profiling.v1alpha.Profiling", HandlerType: (*ProfilingServer)(nil), Methods: []grpc.MethodDesc{ { MethodName: "Enable", Handler: _Profiling_Enable_Handler, }, { MethodName: "GetStreamStats", Handler: _Profiling_GetStreamStats_Handler, }, }, Streams: []grpc.StreamDesc{}, Metadata: "service.proto", } grpc-go-1.29.1/profiling/proto/service.proto000066400000000000000000000071431365033716300210370ustar00rootroot00000000000000// Copyright 2019 gRPC authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. syntax = "proto3"; package grpc.go.profiling.v1alpha; // This package defines the proto messages and RPC services exposed by gRPC for // profiling management. A reference client implementation to interact with // this service is provided as a command-line application. This service can be // used to toggle profiling on and off and retrieve stats from a gRPC // application. option go_package = "google.golang.org/grpc/profiling/proto"; // EnableRequest defines the fields in a /Profiling/Enable method request to // toggle profiling on and off within a gRPC program. message EnableRequest { // Setting this to true will enable profiling. Setting this to false will // disable profiling. bool enabled = 1; } // EnableResponse defines the fields in a /Profiling/Enable method response. message EnableResponse { } // GetStreamStatsRequest defines the fields in a /Profiling/GetStreamStats // method request to retrieve stream-level stats in a gRPC client/server. message GetStreamStatsRequest { } // GetStreamStatsResponse defines the fields in a /Profiling/GetStreamStats // method response. message GetStreamStatsResponse { repeated Stat stream_stats = 1; } // A Timer measures the start and end of execution of a component within // gRPC that's being profiled. It includes a tag and some additional metadata // to identify itself. message Timer { // tags is a comma-separated list of strings used to tag a timer. string tags = 1; // begin_sec and begin_nsec are the start epoch second and nanosecond, // respectively, of the component profiled by this timer in UTC. begin_nsec // must be a non-negative integer. int64 begin_sec = 2; int32 begin_nsec = 3; // end_sec and end_nsec are the end epoch second and nanosecond, // respectively, of the component profiled by this timer in UTC. end_nsec // must be a non-negative integer. int64 end_sec = 4; int32 end_nsec = 5; // go_id is the goroutine ID of the component being profiled. int64 go_id = 6; } // A Stat is a collection of Timers along with some additional // metadata to tag and identify itself. message Stat { // tags is a comma-separated list of strings used to categorize a stat. string tags = 1; // timers is an array of Timers, each representing a different // (but possibly overlapping) component within this stat. repeated Timer timers = 2; // metadata is an array of bytes used to uniquely identify a stat with an // undefined encoding format. For example, the Stats returned by the // /Profiling/GetStreamStats service use the metadata field to encode the // connection ID and the stream ID of each query. bytes metadata = 3; } // The Profiling service exposes functions to remotely manage the gRPC // profiling behaviour in a program. service Profiling { // Enable allows users to toggle profiling on and off remotely. rpc Enable (EnableRequest) returns (EnableResponse); // GetStreamStats is used to retrieve an array of stream-level stats from a // gRPC client/server. rpc GetStreamStats (GetStreamStatsRequest) returns (GetStreamStatsResponse); } grpc-go-1.29.1/profiling/service/000077500000000000000000000000001365033716300166025ustar00rootroot00000000000000grpc-go-1.29.1/profiling/service/service.go000066400000000000000000000112141365033716300205700ustar00rootroot00000000000000/* * * Copyright 2019 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Package service defines methods to register a gRPC client/service for a // profiling service that is exposed in the same server. This service can be // queried by a client to remotely manage the gRPC profiling behaviour of an // application. // // This package and all its methods are EXPERIMENTAL. package service //go:generate protoc --go_out=plugins=grpc,paths=source_relative:../proto -I../proto ../proto/service.proto import ( "context" "errors" "sync" "google.golang.org/grpc" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal/profiling" ppb "google.golang.org/grpc/profiling/proto" ) // ProfilingConfig defines configuration options for the Init method. type ProfilingConfig struct { // Setting this to true will enable profiling. Enabled bool // Profiling uses a circular buffer (ring buffer) to store statistics for // only the last few RPCs so that profiling stats do not grow unbounded. This // parameter defines the upper limit on the number of RPCs for which // statistics should be stored at any given time. An average RPC requires // approximately 2-3 KiB of memory for profiling-related statistics, so // choose an appropriate number based on the amount of memory you can afford. StreamStatsSize uint32 // To expose the profiling service and its methods, a *grpc.Server must be // provided. Server *grpc.Server } var errorNilServer = errors.New("profiling: no grpc.Server provided") // Init takes a *ProfilingConfig to initialize profiling (turned on/off // depending on the value set in pc.Enabled) and register the profiling service // in the server provided in pc.Server. func Init(pc *ProfilingConfig) error { if pc.Server == nil { return errorNilServer } if err := profiling.InitStats(pc.StreamStatsSize); err != nil { return err } ppb.RegisterProfilingServer(pc.Server, getProfilingServerInstance()) // Do this last after everything has been initialized and allocated. profiling.Enable(pc.Enabled) return nil } type profilingServer struct { drainMutex sync.Mutex } var profilingServerInstance *profilingServer var profilingServerOnce sync.Once // getProfilingServerInstance creates and returns a singleton instance of // profilingServer. Only one instance of profilingServer is created to use a // shared mutex across all profilingServer instances. func getProfilingServerInstance() *profilingServer { profilingServerOnce.Do(func() { profilingServerInstance = &profilingServer{} }) return profilingServerInstance } func (s *profilingServer) Enable(ctx context.Context, req *ppb.EnableRequest) (*ppb.EnableResponse, error) { if req.Enabled { grpclog.Infof("profilingServer: Enable: enabling profiling") } else { grpclog.Infof("profilingServer: Enable: disabling profiling") } profiling.Enable(req.Enabled) return &ppb.EnableResponse{}, nil } func timerToProtoTimer(timer *profiling.Timer) *ppb.Timer { return &ppb.Timer{ Tags: timer.Tags, BeginSec: timer.Begin.Unix(), BeginNsec: int32(timer.Begin.Nanosecond()), EndSec: timer.End.Unix(), EndNsec: int32(timer.End.Nanosecond()), GoId: timer.GoID, } } func statToProtoStat(stat *profiling.Stat) *ppb.Stat { protoStat := &ppb.Stat{ Tags: stat.Tags, Timers: make([]*ppb.Timer, 0, len(stat.Timers)), Metadata: stat.Metadata, } for _, t := range stat.Timers { protoStat.Timers = append(protoStat.Timers, timerToProtoTimer(t)) } return protoStat } func (s *profilingServer) GetStreamStats(ctx context.Context, req *ppb.GetStreamStatsRequest) (*ppb.GetStreamStatsResponse, error) { // Since the drain operation is destructive, only one client request should // be served at a time. grpclog.Infof("profilingServer: GetStreamStats: processing request") s.drainMutex.Lock() results := profiling.StreamStats.Drain() s.drainMutex.Unlock() grpclog.Infof("profilingServer: GetStreamStats: returning %v records", len(results)) streamStats := make([]*ppb.Stat, 0) for _, stat := range results { streamStats = append(streamStats, statToProtoStat(stat.(*profiling.Stat))) } return &ppb.GetStreamStatsResponse{StreamStats: streamStats}, nil } grpc-go-1.29.1/proxy.go000066400000000000000000000100521365033716300146570ustar00rootroot00000000000000/* * * Copyright 2017 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package grpc import ( "bufio" "context" "encoding/base64" "errors" "fmt" "io" "net" "net/http" "net/http/httputil" "net/url" ) const proxyAuthHeaderKey = "Proxy-Authorization" var ( // errDisabled indicates that proxy is disabled for the address. errDisabled = errors.New("proxy is disabled for the address") // The following variable will be overwritten in the tests. httpProxyFromEnvironment = http.ProxyFromEnvironment ) func mapAddress(ctx context.Context, address string) (*url.URL, error) { req := &http.Request{ URL: &url.URL{ Scheme: "https", Host: address, }, } url, err := httpProxyFromEnvironment(req) if err != nil { return nil, err } if url == nil { return nil, errDisabled } return url, nil } // To read a response from a net.Conn, http.ReadResponse() takes a bufio.Reader. // It's possible that this reader reads more than what's need for the response and stores // those bytes in the buffer. // bufConn wraps the original net.Conn and the bufio.Reader to make sure we don't lose the // bytes in the buffer. type bufConn struct { net.Conn r io.Reader } func (c *bufConn) Read(b []byte) (int, error) { return c.r.Read(b) } func basicAuth(username, password string) string { auth := username + ":" + password return base64.StdEncoding.EncodeToString([]byte(auth)) } func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, backendAddr string, proxyURL *url.URL) (_ net.Conn, err error) { defer func() { if err != nil { conn.Close() } }() req := &http.Request{ Method: http.MethodConnect, URL: &url.URL{Host: backendAddr}, Header: map[string][]string{"User-Agent": {grpcUA}}, } if t := proxyURL.User; t != nil { u := t.Username() p, _ := t.Password() req.Header.Add(proxyAuthHeaderKey, "Basic "+basicAuth(u, p)) } if err := sendHTTPRequest(ctx, req, conn); err != nil { return nil, fmt.Errorf("failed to write the HTTP request: %v", err) } r := bufio.NewReader(conn) resp, err := http.ReadResponse(r, req) if err != nil { return nil, fmt.Errorf("reading server HTTP response: %v", err) } defer resp.Body.Close() if resp.StatusCode != http.StatusOK { dump, err := httputil.DumpResponse(resp, true) if err != nil { return nil, fmt.Errorf("failed to do connect handshake, status code: %s", resp.Status) } return nil, fmt.Errorf("failed to do connect handshake, response: %q", dump) } return &bufConn{Conn: conn, r: r}, nil } // newProxyDialer returns a dialer that connects to proxy first if necessary. // The returned dialer checks if a proxy is necessary, dial to the proxy with the // provided dialer, does HTTP CONNECT handshake and returns the connection. func newProxyDialer(dialer func(context.Context, string) (net.Conn, error)) func(context.Context, string) (net.Conn, error) { return func(ctx context.Context, addr string) (conn net.Conn, err error) { var newAddr string proxyURL, err := mapAddress(ctx, addr) if err != nil { if err != errDisabled { return nil, err } newAddr = addr } else { newAddr = proxyURL.Host } conn, err = dialer(ctx, newAddr) if err != nil { return } if proxyURL != nil { // proxy is disabled if proxyURL is nil. conn, err = doHTTPConnectHandshake(ctx, conn, addr, proxyURL) } return } } func sendHTTPRequest(ctx context.Context, req *http.Request, conn net.Conn) error { req = req.WithContext(ctx) if err := req.Write(conn); err != nil { return fmt.Errorf("failed to write the HTTP request: %v", err) } return nil } grpc-go-1.29.1/proxy_test.go000066400000000000000000000130751365033716300157260ustar00rootroot00000000000000// +build !race /* * * Copyright 2017 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package grpc import ( "bufio" "context" "encoding/base64" "fmt" "io" "net" "net/http" "net/url" "testing" "time" ) const ( envTestAddr = "1.2.3.4:8080" envProxyAddr = "2.3.4.5:7687" ) // overwriteAndRestore overwrite function httpProxyFromEnvironment and // returns a function to restore the default values. func overwrite(hpfe func(req *http.Request) (*url.URL, error)) func() { backHPFE := httpProxyFromEnvironment httpProxyFromEnvironment = hpfe return func() { httpProxyFromEnvironment = backHPFE } } type proxyServer struct { t *testing.T lis net.Listener in net.Conn out net.Conn requestCheck func(*http.Request) error } func (p *proxyServer) run() { in, err := p.lis.Accept() if err != nil { return } p.in = in req, err := http.ReadRequest(bufio.NewReader(in)) if err != nil { p.t.Errorf("failed to read CONNECT req: %v", err) return } if err := p.requestCheck(req); err != nil { resp := http.Response{StatusCode: http.StatusMethodNotAllowed} resp.Write(p.in) p.in.Close() p.t.Errorf("get wrong CONNECT req: %+v, error: %v", req, err) return } out, err := net.Dial("tcp", req.URL.Host) if err != nil { p.t.Errorf("failed to dial to server: %v", err) return } resp := http.Response{StatusCode: http.StatusOK, Proto: "HTTP/1.0"} resp.Write(p.in) p.out = out go io.Copy(p.in, p.out) go io.Copy(p.out, p.in) } func (p *proxyServer) stop() { p.lis.Close() if p.in != nil { p.in.Close() } if p.out != nil { p.out.Close() } } func testHTTPConnect(t *testing.T, proxyURLModify func(*url.URL) *url.URL, proxyReqCheck func(*http.Request) error) { plis, err := net.Listen("tcp", "localhost:0") if err != nil { t.Fatalf("failed to listen: %v", err) } p := &proxyServer{ t: t, lis: plis, requestCheck: proxyReqCheck, } go p.run() defer p.stop() blis, err := net.Listen("tcp", "localhost:0") if err != nil { t.Fatalf("failed to listen: %v", err) } msg := []byte{4, 3, 5, 2} recvBuf := make([]byte, len(msg)) done := make(chan error) go func() { in, err := blis.Accept() if err != nil { done <- err return } defer in.Close() in.Read(recvBuf) done <- nil }() // Overwrite the function in the test and restore them in defer. hpfe := func(req *http.Request) (*url.URL, error) { return proxyURLModify(&url.URL{Host: plis.Addr().String()}), nil } defer overwrite(hpfe)() // Dial to proxy server. dialer := newProxyDialer(func(ctx context.Context, addr string) (net.Conn, error) { if deadline, ok := ctx.Deadline(); ok { return net.DialTimeout("tcp", addr, time.Until(deadline)) } return net.Dial("tcp", addr) }) ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() c, err := dialer(ctx, blis.Addr().String()) if err != nil { t.Fatalf("http connect Dial failed: %v", err) } defer c.Close() // Send msg on the connection. c.Write(msg) if err := <-done; err != nil { t.Fatalf("failed to accept: %v", err) } // Check received msg. if string(recvBuf) != string(msg) { t.Fatalf("received msg: %v, want %v", recvBuf, msg) } } func (s) TestHTTPConnect(t *testing.T) { testHTTPConnect(t, func(in *url.URL) *url.URL { return in }, func(req *http.Request) error { if req.Method != http.MethodConnect { return fmt.Errorf("unexpected Method %q, want %q", req.Method, http.MethodConnect) } if req.UserAgent() != grpcUA { return fmt.Errorf("unexpect user agent %q, want %q", req.UserAgent(), grpcUA) } return nil }, ) } func (s) TestHTTPConnectBasicAuth(t *testing.T) { const ( user = "notAUser" password = "notAPassword" ) testHTTPConnect(t, func(in *url.URL) *url.URL { in.User = url.UserPassword(user, password) return in }, func(req *http.Request) error { if req.Method != http.MethodConnect { return fmt.Errorf("unexpected Method %q, want %q", req.Method, http.MethodConnect) } if req.UserAgent() != grpcUA { return fmt.Errorf("unexpect user agent %q, want %q", req.UserAgent(), grpcUA) } wantProxyAuthStr := "Basic " + base64.StdEncoding.EncodeToString([]byte(user+":"+password)) if got := req.Header.Get(proxyAuthHeaderKey); got != wantProxyAuthStr { gotDecoded, _ := base64.StdEncoding.DecodeString(got) wantDecoded, _ := base64.StdEncoding.DecodeString(wantProxyAuthStr) return fmt.Errorf("unexpected auth %q (%q), want %q (%q)", got, gotDecoded, wantProxyAuthStr, wantDecoded) } return nil }, ) } func (s) TestMapAddressEnv(t *testing.T) { // Overwrite the function in the test and restore them in defer. hpfe := func(req *http.Request) (*url.URL, error) { if req.URL.Host == envTestAddr { return &url.URL{ Scheme: "https", Host: envProxyAddr, }, nil } return nil, nil } defer overwrite(hpfe)() // envTestAddr should be handled by ProxyFromEnvironment. got, err := mapAddress(context.Background(), envTestAddr) if err != nil { t.Error(err) } if got.Host != envProxyAddr { t.Errorf("want %v, got %v", envProxyAddr, got) } } grpc-go-1.29.1/reflection/000077500000000000000000000000001365033716300153035ustar00rootroot00000000000000grpc-go-1.29.1/reflection/README.md000066400000000000000000000007051365033716300165640ustar00rootroot00000000000000# Reflection Package reflection implements server reflection service. The service implemented is defined in: https://github.com/grpc/grpc/blob/master/src/proto/grpc/reflection/v1alpha/reflection.proto. To register server reflection on a gRPC server: ```go import "google.golang.org/grpc/reflection" s := grpc.NewServer() pb.RegisterYourOwnServer(s, &server{}) // Register reflection service on gRPC server. reflection.Register(s) s.Serve(lis) ``` grpc-go-1.29.1/reflection/grpc_reflection_v1alpha/000077500000000000000000000000001365033716300220645ustar00rootroot00000000000000grpc-go-1.29.1/reflection/grpc_reflection_v1alpha/reflection.pb.go000066400000000000000000000741471365033716300251620ustar00rootroot00000000000000// Code generated by protoc-gen-go. DO NOT EDIT. // source: grpc_reflection_v1alpha/reflection.proto package grpc_reflection_v1alpha import ( context "context" fmt "fmt" proto "github.com/golang/protobuf/proto" grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" status "google.golang.org/grpc/status" math "math" ) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package // The message sent by the client when calling ServerReflectionInfo method. type ServerReflectionRequest struct { Host string `protobuf:"bytes,1,opt,name=host,proto3" json:"host,omitempty"` // To use reflection service, the client should set one of the following // fields in message_request. The server distinguishes requests by their // defined field and then handles them using corresponding methods. // // Types that are valid to be assigned to MessageRequest: // *ServerReflectionRequest_FileByFilename // *ServerReflectionRequest_FileContainingSymbol // *ServerReflectionRequest_FileContainingExtension // *ServerReflectionRequest_AllExtensionNumbersOfType // *ServerReflectionRequest_ListServices MessageRequest isServerReflectionRequest_MessageRequest `protobuf_oneof:"message_request"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *ServerReflectionRequest) Reset() { *m = ServerReflectionRequest{} } func (m *ServerReflectionRequest) String() string { return proto.CompactTextString(m) } func (*ServerReflectionRequest) ProtoMessage() {} func (*ServerReflectionRequest) Descriptor() ([]byte, []int) { return fileDescriptor_42a8ac412db3cb03, []int{0} } func (m *ServerReflectionRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ServerReflectionRequest.Unmarshal(m, b) } func (m *ServerReflectionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ServerReflectionRequest.Marshal(b, m, deterministic) } func (m *ServerReflectionRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_ServerReflectionRequest.Merge(m, src) } func (m *ServerReflectionRequest) XXX_Size() int { return xxx_messageInfo_ServerReflectionRequest.Size(m) } func (m *ServerReflectionRequest) XXX_DiscardUnknown() { xxx_messageInfo_ServerReflectionRequest.DiscardUnknown(m) } var xxx_messageInfo_ServerReflectionRequest proto.InternalMessageInfo func (m *ServerReflectionRequest) GetHost() string { if m != nil { return m.Host } return "" } type isServerReflectionRequest_MessageRequest interface { isServerReflectionRequest_MessageRequest() } type ServerReflectionRequest_FileByFilename struct { FileByFilename string `protobuf:"bytes,3,opt,name=file_by_filename,json=fileByFilename,proto3,oneof"` } type ServerReflectionRequest_FileContainingSymbol struct { FileContainingSymbol string `protobuf:"bytes,4,opt,name=file_containing_symbol,json=fileContainingSymbol,proto3,oneof"` } type ServerReflectionRequest_FileContainingExtension struct { FileContainingExtension *ExtensionRequest `protobuf:"bytes,5,opt,name=file_containing_extension,json=fileContainingExtension,proto3,oneof"` } type ServerReflectionRequest_AllExtensionNumbersOfType struct { AllExtensionNumbersOfType string `protobuf:"bytes,6,opt,name=all_extension_numbers_of_type,json=allExtensionNumbersOfType,proto3,oneof"` } type ServerReflectionRequest_ListServices struct { ListServices string `protobuf:"bytes,7,opt,name=list_services,json=listServices,proto3,oneof"` } func (*ServerReflectionRequest_FileByFilename) isServerReflectionRequest_MessageRequest() {} func (*ServerReflectionRequest_FileContainingSymbol) isServerReflectionRequest_MessageRequest() {} func (*ServerReflectionRequest_FileContainingExtension) isServerReflectionRequest_MessageRequest() {} func (*ServerReflectionRequest_AllExtensionNumbersOfType) isServerReflectionRequest_MessageRequest() {} func (*ServerReflectionRequest_ListServices) isServerReflectionRequest_MessageRequest() {} func (m *ServerReflectionRequest) GetMessageRequest() isServerReflectionRequest_MessageRequest { if m != nil { return m.MessageRequest } return nil } func (m *ServerReflectionRequest) GetFileByFilename() string { if x, ok := m.GetMessageRequest().(*ServerReflectionRequest_FileByFilename); ok { return x.FileByFilename } return "" } func (m *ServerReflectionRequest) GetFileContainingSymbol() string { if x, ok := m.GetMessageRequest().(*ServerReflectionRequest_FileContainingSymbol); ok { return x.FileContainingSymbol } return "" } func (m *ServerReflectionRequest) GetFileContainingExtension() *ExtensionRequest { if x, ok := m.GetMessageRequest().(*ServerReflectionRequest_FileContainingExtension); ok { return x.FileContainingExtension } return nil } func (m *ServerReflectionRequest) GetAllExtensionNumbersOfType() string { if x, ok := m.GetMessageRequest().(*ServerReflectionRequest_AllExtensionNumbersOfType); ok { return x.AllExtensionNumbersOfType } return "" } func (m *ServerReflectionRequest) GetListServices() string { if x, ok := m.GetMessageRequest().(*ServerReflectionRequest_ListServices); ok { return x.ListServices } return "" } // XXX_OneofWrappers is for the internal use of the proto package. func (*ServerReflectionRequest) XXX_OneofWrappers() []interface{} { return []interface{}{ (*ServerReflectionRequest_FileByFilename)(nil), (*ServerReflectionRequest_FileContainingSymbol)(nil), (*ServerReflectionRequest_FileContainingExtension)(nil), (*ServerReflectionRequest_AllExtensionNumbersOfType)(nil), (*ServerReflectionRequest_ListServices)(nil), } } // The type name and extension number sent by the client when requesting // file_containing_extension. type ExtensionRequest struct { // Fully-qualified type name. The format should be . ContainingType string `protobuf:"bytes,1,opt,name=containing_type,json=containingType,proto3" json:"containing_type,omitempty"` ExtensionNumber int32 `protobuf:"varint,2,opt,name=extension_number,json=extensionNumber,proto3" json:"extension_number,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *ExtensionRequest) Reset() { *m = ExtensionRequest{} } func (m *ExtensionRequest) String() string { return proto.CompactTextString(m) } func (*ExtensionRequest) ProtoMessage() {} func (*ExtensionRequest) Descriptor() ([]byte, []int) { return fileDescriptor_42a8ac412db3cb03, []int{1} } func (m *ExtensionRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ExtensionRequest.Unmarshal(m, b) } func (m *ExtensionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ExtensionRequest.Marshal(b, m, deterministic) } func (m *ExtensionRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_ExtensionRequest.Merge(m, src) } func (m *ExtensionRequest) XXX_Size() int { return xxx_messageInfo_ExtensionRequest.Size(m) } func (m *ExtensionRequest) XXX_DiscardUnknown() { xxx_messageInfo_ExtensionRequest.DiscardUnknown(m) } var xxx_messageInfo_ExtensionRequest proto.InternalMessageInfo func (m *ExtensionRequest) GetContainingType() string { if m != nil { return m.ContainingType } return "" } func (m *ExtensionRequest) GetExtensionNumber() int32 { if m != nil { return m.ExtensionNumber } return 0 } // The message sent by the server to answer ServerReflectionInfo method. type ServerReflectionResponse struct { ValidHost string `protobuf:"bytes,1,opt,name=valid_host,json=validHost,proto3" json:"valid_host,omitempty"` OriginalRequest *ServerReflectionRequest `protobuf:"bytes,2,opt,name=original_request,json=originalRequest,proto3" json:"original_request,omitempty"` // The server sets one of the following fields according to the // message_request in the request. // // Types that are valid to be assigned to MessageResponse: // *ServerReflectionResponse_FileDescriptorResponse // *ServerReflectionResponse_AllExtensionNumbersResponse // *ServerReflectionResponse_ListServicesResponse // *ServerReflectionResponse_ErrorResponse MessageResponse isServerReflectionResponse_MessageResponse `protobuf_oneof:"message_response"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *ServerReflectionResponse) Reset() { *m = ServerReflectionResponse{} } func (m *ServerReflectionResponse) String() string { return proto.CompactTextString(m) } func (*ServerReflectionResponse) ProtoMessage() {} func (*ServerReflectionResponse) Descriptor() ([]byte, []int) { return fileDescriptor_42a8ac412db3cb03, []int{2} } func (m *ServerReflectionResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ServerReflectionResponse.Unmarshal(m, b) } func (m *ServerReflectionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ServerReflectionResponse.Marshal(b, m, deterministic) } func (m *ServerReflectionResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_ServerReflectionResponse.Merge(m, src) } func (m *ServerReflectionResponse) XXX_Size() int { return xxx_messageInfo_ServerReflectionResponse.Size(m) } func (m *ServerReflectionResponse) XXX_DiscardUnknown() { xxx_messageInfo_ServerReflectionResponse.DiscardUnknown(m) } var xxx_messageInfo_ServerReflectionResponse proto.InternalMessageInfo func (m *ServerReflectionResponse) GetValidHost() string { if m != nil { return m.ValidHost } return "" } func (m *ServerReflectionResponse) GetOriginalRequest() *ServerReflectionRequest { if m != nil { return m.OriginalRequest } return nil } type isServerReflectionResponse_MessageResponse interface { isServerReflectionResponse_MessageResponse() } type ServerReflectionResponse_FileDescriptorResponse struct { FileDescriptorResponse *FileDescriptorResponse `protobuf:"bytes,4,opt,name=file_descriptor_response,json=fileDescriptorResponse,proto3,oneof"` } type ServerReflectionResponse_AllExtensionNumbersResponse struct { AllExtensionNumbersResponse *ExtensionNumberResponse `protobuf:"bytes,5,opt,name=all_extension_numbers_response,json=allExtensionNumbersResponse,proto3,oneof"` } type ServerReflectionResponse_ListServicesResponse struct { ListServicesResponse *ListServiceResponse `protobuf:"bytes,6,opt,name=list_services_response,json=listServicesResponse,proto3,oneof"` } type ServerReflectionResponse_ErrorResponse struct { ErrorResponse *ErrorResponse `protobuf:"bytes,7,opt,name=error_response,json=errorResponse,proto3,oneof"` } func (*ServerReflectionResponse_FileDescriptorResponse) isServerReflectionResponse_MessageResponse() {} func (*ServerReflectionResponse_AllExtensionNumbersResponse) isServerReflectionResponse_MessageResponse() { } func (*ServerReflectionResponse_ListServicesResponse) isServerReflectionResponse_MessageResponse() {} func (*ServerReflectionResponse_ErrorResponse) isServerReflectionResponse_MessageResponse() {} func (m *ServerReflectionResponse) GetMessageResponse() isServerReflectionResponse_MessageResponse { if m != nil { return m.MessageResponse } return nil } func (m *ServerReflectionResponse) GetFileDescriptorResponse() *FileDescriptorResponse { if x, ok := m.GetMessageResponse().(*ServerReflectionResponse_FileDescriptorResponse); ok { return x.FileDescriptorResponse } return nil } func (m *ServerReflectionResponse) GetAllExtensionNumbersResponse() *ExtensionNumberResponse { if x, ok := m.GetMessageResponse().(*ServerReflectionResponse_AllExtensionNumbersResponse); ok { return x.AllExtensionNumbersResponse } return nil } func (m *ServerReflectionResponse) GetListServicesResponse() *ListServiceResponse { if x, ok := m.GetMessageResponse().(*ServerReflectionResponse_ListServicesResponse); ok { return x.ListServicesResponse } return nil } func (m *ServerReflectionResponse) GetErrorResponse() *ErrorResponse { if x, ok := m.GetMessageResponse().(*ServerReflectionResponse_ErrorResponse); ok { return x.ErrorResponse } return nil } // XXX_OneofWrappers is for the internal use of the proto package. func (*ServerReflectionResponse) XXX_OneofWrappers() []interface{} { return []interface{}{ (*ServerReflectionResponse_FileDescriptorResponse)(nil), (*ServerReflectionResponse_AllExtensionNumbersResponse)(nil), (*ServerReflectionResponse_ListServicesResponse)(nil), (*ServerReflectionResponse_ErrorResponse)(nil), } } // Serialized FileDescriptorProto messages sent by the server answering // a file_by_filename, file_containing_symbol, or file_containing_extension // request. type FileDescriptorResponse struct { // Serialized FileDescriptorProto messages. We avoid taking a dependency on // descriptor.proto, which uses proto2 only features, by making them opaque // bytes instead. FileDescriptorProto [][]byte `protobuf:"bytes,1,rep,name=file_descriptor_proto,json=fileDescriptorProto,proto3" json:"file_descriptor_proto,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *FileDescriptorResponse) Reset() { *m = FileDescriptorResponse{} } func (m *FileDescriptorResponse) String() string { return proto.CompactTextString(m) } func (*FileDescriptorResponse) ProtoMessage() {} func (*FileDescriptorResponse) Descriptor() ([]byte, []int) { return fileDescriptor_42a8ac412db3cb03, []int{3} } func (m *FileDescriptorResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_FileDescriptorResponse.Unmarshal(m, b) } func (m *FileDescriptorResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_FileDescriptorResponse.Marshal(b, m, deterministic) } func (m *FileDescriptorResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_FileDescriptorResponse.Merge(m, src) } func (m *FileDescriptorResponse) XXX_Size() int { return xxx_messageInfo_FileDescriptorResponse.Size(m) } func (m *FileDescriptorResponse) XXX_DiscardUnknown() { xxx_messageInfo_FileDescriptorResponse.DiscardUnknown(m) } var xxx_messageInfo_FileDescriptorResponse proto.InternalMessageInfo func (m *FileDescriptorResponse) GetFileDescriptorProto() [][]byte { if m != nil { return m.FileDescriptorProto } return nil } // A list of extension numbers sent by the server answering // all_extension_numbers_of_type request. type ExtensionNumberResponse struct { // Full name of the base type, including the package name. The format // is . BaseTypeName string `protobuf:"bytes,1,opt,name=base_type_name,json=baseTypeName,proto3" json:"base_type_name,omitempty"` ExtensionNumber []int32 `protobuf:"varint,2,rep,packed,name=extension_number,json=extensionNumber,proto3" json:"extension_number,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *ExtensionNumberResponse) Reset() { *m = ExtensionNumberResponse{} } func (m *ExtensionNumberResponse) String() string { return proto.CompactTextString(m) } func (*ExtensionNumberResponse) ProtoMessage() {} func (*ExtensionNumberResponse) Descriptor() ([]byte, []int) { return fileDescriptor_42a8ac412db3cb03, []int{4} } func (m *ExtensionNumberResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ExtensionNumberResponse.Unmarshal(m, b) } func (m *ExtensionNumberResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ExtensionNumberResponse.Marshal(b, m, deterministic) } func (m *ExtensionNumberResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_ExtensionNumberResponse.Merge(m, src) } func (m *ExtensionNumberResponse) XXX_Size() int { return xxx_messageInfo_ExtensionNumberResponse.Size(m) } func (m *ExtensionNumberResponse) XXX_DiscardUnknown() { xxx_messageInfo_ExtensionNumberResponse.DiscardUnknown(m) } var xxx_messageInfo_ExtensionNumberResponse proto.InternalMessageInfo func (m *ExtensionNumberResponse) GetBaseTypeName() string { if m != nil { return m.BaseTypeName } return "" } func (m *ExtensionNumberResponse) GetExtensionNumber() []int32 { if m != nil { return m.ExtensionNumber } return nil } // A list of ServiceResponse sent by the server answering list_services request. type ListServiceResponse struct { // The information of each service may be expanded in the future, so we use // ServiceResponse message to encapsulate it. Service []*ServiceResponse `protobuf:"bytes,1,rep,name=service,proto3" json:"service,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *ListServiceResponse) Reset() { *m = ListServiceResponse{} } func (m *ListServiceResponse) String() string { return proto.CompactTextString(m) } func (*ListServiceResponse) ProtoMessage() {} func (*ListServiceResponse) Descriptor() ([]byte, []int) { return fileDescriptor_42a8ac412db3cb03, []int{5} } func (m *ListServiceResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ListServiceResponse.Unmarshal(m, b) } func (m *ListServiceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ListServiceResponse.Marshal(b, m, deterministic) } func (m *ListServiceResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_ListServiceResponse.Merge(m, src) } func (m *ListServiceResponse) XXX_Size() int { return xxx_messageInfo_ListServiceResponse.Size(m) } func (m *ListServiceResponse) XXX_DiscardUnknown() { xxx_messageInfo_ListServiceResponse.DiscardUnknown(m) } var xxx_messageInfo_ListServiceResponse proto.InternalMessageInfo func (m *ListServiceResponse) GetService() []*ServiceResponse { if m != nil { return m.Service } return nil } // The information of a single service used by ListServiceResponse to answer // list_services request. type ServiceResponse struct { // Full name of a registered service, including its package name. The format // is . Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *ServiceResponse) Reset() { *m = ServiceResponse{} } func (m *ServiceResponse) String() string { return proto.CompactTextString(m) } func (*ServiceResponse) ProtoMessage() {} func (*ServiceResponse) Descriptor() ([]byte, []int) { return fileDescriptor_42a8ac412db3cb03, []int{6} } func (m *ServiceResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ServiceResponse.Unmarshal(m, b) } func (m *ServiceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ServiceResponse.Marshal(b, m, deterministic) } func (m *ServiceResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_ServiceResponse.Merge(m, src) } func (m *ServiceResponse) XXX_Size() int { return xxx_messageInfo_ServiceResponse.Size(m) } func (m *ServiceResponse) XXX_DiscardUnknown() { xxx_messageInfo_ServiceResponse.DiscardUnknown(m) } var xxx_messageInfo_ServiceResponse proto.InternalMessageInfo func (m *ServiceResponse) GetName() string { if m != nil { return m.Name } return "" } // The error code and error message sent by the server when an error occurs. type ErrorResponse struct { // This field uses the error codes defined in grpc::StatusCode. ErrorCode int32 `protobuf:"varint,1,opt,name=error_code,json=errorCode,proto3" json:"error_code,omitempty"` ErrorMessage string `protobuf:"bytes,2,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *ErrorResponse) Reset() { *m = ErrorResponse{} } func (m *ErrorResponse) String() string { return proto.CompactTextString(m) } func (*ErrorResponse) ProtoMessage() {} func (*ErrorResponse) Descriptor() ([]byte, []int) { return fileDescriptor_42a8ac412db3cb03, []int{7} } func (m *ErrorResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ErrorResponse.Unmarshal(m, b) } func (m *ErrorResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ErrorResponse.Marshal(b, m, deterministic) } func (m *ErrorResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_ErrorResponse.Merge(m, src) } func (m *ErrorResponse) XXX_Size() int { return xxx_messageInfo_ErrorResponse.Size(m) } func (m *ErrorResponse) XXX_DiscardUnknown() { xxx_messageInfo_ErrorResponse.DiscardUnknown(m) } var xxx_messageInfo_ErrorResponse proto.InternalMessageInfo func (m *ErrorResponse) GetErrorCode() int32 { if m != nil { return m.ErrorCode } return 0 } func (m *ErrorResponse) GetErrorMessage() string { if m != nil { return m.ErrorMessage } return "" } func init() { proto.RegisterType((*ServerReflectionRequest)(nil), "grpc.reflection.v1alpha.ServerReflectionRequest") proto.RegisterType((*ExtensionRequest)(nil), "grpc.reflection.v1alpha.ExtensionRequest") proto.RegisterType((*ServerReflectionResponse)(nil), "grpc.reflection.v1alpha.ServerReflectionResponse") proto.RegisterType((*FileDescriptorResponse)(nil), "grpc.reflection.v1alpha.FileDescriptorResponse") proto.RegisterType((*ExtensionNumberResponse)(nil), "grpc.reflection.v1alpha.ExtensionNumberResponse") proto.RegisterType((*ListServiceResponse)(nil), "grpc.reflection.v1alpha.ListServiceResponse") proto.RegisterType((*ServiceResponse)(nil), "grpc.reflection.v1alpha.ServiceResponse") proto.RegisterType((*ErrorResponse)(nil), "grpc.reflection.v1alpha.ErrorResponse") } func init() { proto.RegisterFile("grpc_reflection_v1alpha/reflection.proto", fileDescriptor_42a8ac412db3cb03) } var fileDescriptor_42a8ac412db3cb03 = []byte{ // 656 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0x51, 0x73, 0xd2, 0x40, 0x10, 0x6e, 0x5a, 0x68, 0x87, 0x85, 0x02, 0x5e, 0x2b, 0xa4, 0x3a, 0x75, 0x98, 0x68, 0x35, 0x75, 0x1c, 0xda, 0xe2, 0x8c, 0x3f, 0x80, 0xaa, 0x83, 0x33, 0xb5, 0x75, 0x0e, 0x5f, 0x1c, 0x1f, 0x6e, 0x02, 0x2c, 0x34, 0x1a, 0x72, 0xf1, 0x2e, 0x45, 0x79, 0xf2, 0x47, 0xf8, 0xa3, 0xfc, 0x4b, 0x3e, 0x3a, 0x77, 0x09, 0x21, 0xa4, 0x44, 0xa7, 0x4f, 0x30, 0xdf, 0xee, 0xde, 0xb7, 0xbb, 0xdf, 0xb7, 0x01, 0x7b, 0x22, 0x82, 0x21, 0x13, 0x38, 0xf6, 0x70, 0x18, 0xba, 0xdc, 0x67, 0xb3, 0x33, 0xc7, 0x0b, 0xae, 0x9d, 0x93, 0x25, 0xd4, 0x0e, 0x04, 0x0f, 0x39, 0x69, 0xaa, 0xcc, 0x76, 0x0a, 0x8e, 0x33, 0xad, 0x3f, 0x9b, 0xd0, 0xec, 0xa3, 0x98, 0xa1, 0xa0, 0x49, 0x90, 0xe2, 0xb7, 0x1b, 0x94, 0x21, 0x21, 0x50, 0xb8, 0xe6, 0x32, 0x34, 0x8d, 0x96, 0x61, 0x97, 0xa8, 0xfe, 0x4f, 0x9e, 0x43, 0x7d, 0xec, 0x7a, 0xc8, 0x06, 0x73, 0xa6, 0x7e, 0x7d, 0x67, 0x8a, 0xe6, 0x96, 0x8a, 0xf7, 0x36, 0x68, 0x55, 0x21, 0xdd, 0xf9, 0xdb, 0x18, 0x27, 0xaf, 0xa0, 0xa1, 0x73, 0x87, 0xdc, 0x0f, 0x1d, 0xd7, 0x77, 0xfd, 0x09, 0x93, 0xf3, 0xe9, 0x80, 0x7b, 0x66, 0x21, 0xae, 0xd8, 0x57, 0xf1, 0xf3, 0x24, 0xdc, 0xd7, 0x51, 0x32, 0x81, 0x83, 0x6c, 0x1d, 0xfe, 0x08, 0xd1, 0x97, 0x2e, 0xf7, 0xcd, 0x62, 0xcb, 0xb0, 0xcb, 0x9d, 0xe3, 0x76, 0xce, 0x40, 0xed, 0x37, 0x8b, 0xcc, 0x78, 0x8a, 0xde, 0x06, 0x6d, 0xae, 0xb2, 0x24, 0x19, 0xa4, 0x0b, 0x87, 0x8e, 0xe7, 0x2d, 0x1f, 0x67, 0xfe, 0xcd, 0x74, 0x80, 0x42, 0x32, 0x3e, 0x66, 0xe1, 0x3c, 0x40, 0x73, 0x3b, 0xee, 0xf3, 0xc0, 0xf1, 0xbc, 0xa4, 0xec, 0x32, 0x4a, 0xba, 0x1a, 0x7f, 0x9c, 0x07, 0x48, 0x8e, 0x60, 0xd7, 0x73, 0x65, 0xc8, 0x24, 0x8a, 0x99, 0x3b, 0x44, 0x69, 0xee, 0xc4, 0x35, 0x15, 0x05, 0xf7, 0x63, 0xb4, 0x7b, 0x0f, 0x6a, 0x53, 0x94, 0xd2, 0x99, 0x20, 0x13, 0x51, 0x63, 0xd6, 0x18, 0xea, 0xd9, 0x66, 0xc9, 0x33, 0xa8, 0xa5, 0xa6, 0xd6, 0x3d, 0x44, 0xdb, 0xaf, 0x2e, 0x61, 0x4d, 0x7b, 0x0c, 0xf5, 0x6c, 0xdb, 0xe6, 0x66, 0xcb, 0xb0, 0x8b, 0xb4, 0x86, 0xab, 0x8d, 0x5a, 0xbf, 0x0b, 0x60, 0xde, 0x96, 0x58, 0x06, 0xdc, 0x97, 0x48, 0x0e, 0x01, 0x66, 0x8e, 0xe7, 0x8e, 0x58, 0x4a, 0xe9, 0x92, 0x46, 0x7a, 0x4a, 0xee, 0xcf, 0x50, 0xe7, 0xc2, 0x9d, 0xb8, 0xbe, 0xe3, 0x2d, 0xfa, 0xd6, 0x34, 0xe5, 0xce, 0x69, 0xae, 0x02, 0x39, 0x76, 0xa2, 0xb5, 0xc5, 0x4b, 0x8b, 0x61, 0xbf, 0x82, 0xa9, 0x75, 0x1e, 0xa1, 0x1c, 0x0a, 0x37, 0x08, 0xb9, 0x60, 0x22, 0xee, 0x4b, 0x3b, 0xa4, 0xdc, 0x39, 0xc9, 0x25, 0x51, 0x26, 0x7b, 0x9d, 0xd4, 0x2d, 0xc6, 0xe9, 0x6d, 0x50, 0x6d, 0xb9, 0xdb, 0x11, 0xf2, 0x1d, 0x1e, 0xad, 0xd7, 0x3a, 0xa1, 0x2c, 0xfe, 0x67, 0xae, 0x8c, 0x01, 0x52, 0x9c, 0x0f, 0xd7, 0xd8, 0x23, 0x21, 0x1e, 0x41, 0x63, 0xc5, 0x20, 0x4b, 0xc2, 0x6d, 0x4d, 0xf8, 0x22, 0x97, 0xf0, 0x62, 0x69, 0xa0, 0x14, 0xd9, 0x7e, 0xda, 0x57, 0x09, 0xcb, 0x15, 0x54, 0x51, 0x88, 0xf4, 0x06, 0x77, 0xf4, 0xeb, 0x4f, 0xf3, 0xc7, 0x51, 0xe9, 0xa9, 0x77, 0x77, 0x31, 0x0d, 0x74, 0x09, 0xd4, 0x97, 0x86, 0x8d, 0x30, 0xeb, 0x02, 0x1a, 0xeb, 0xf7, 0x4e, 0x3a, 0x70, 0x3f, 0x2b, 0xa5, 0xfe, 0xf0, 0x98, 0x46, 0x6b, 0xcb, 0xae, 0xd0, 0xbd, 0x55, 0x51, 0x3e, 0xa8, 0x90, 0xf5, 0x05, 0x9a, 0x39, 0x2b, 0x25, 0x4f, 0xa0, 0x3a, 0x70, 0x24, 0xea, 0x03, 0x60, 0xfa, 0x1b, 0x13, 0x39, 0xb3, 0xa2, 0x50, 0xe5, 0xff, 0x4b, 0xf5, 0x7d, 0x59, 0x7f, 0x03, 0x5b, 0xeb, 0x6e, 0xe0, 0x13, 0xec, 0xad, 0xd9, 0x26, 0xe9, 0xc2, 0x4e, 0x2c, 0x8b, 0x6e, 0xb4, 0xdc, 0xb1, 0xff, 0xe9, 0xea, 0x54, 0x29, 0x5d, 0x14, 0x5a, 0x47, 0x50, 0xcb, 0x3e, 0x4b, 0xa0, 0x90, 0x6a, 0x5a, 0xff, 0xb7, 0xfa, 0xb0, 0xbb, 0xb2, 0x71, 0x75, 0x79, 0x91, 0x62, 0x43, 0x3e, 0x8a, 0x52, 0x8b, 0xb4, 0xa4, 0x91, 0x73, 0x3e, 0x42, 0xf2, 0x18, 0x22, 0x41, 0x58, 0xac, 0x82, 0x3e, 0xbb, 0x12, 0xad, 0x68, 0xf0, 0x7d, 0x84, 0x75, 0x7e, 0x19, 0x50, 0xcf, 0x9e, 0x1b, 0xf9, 0x09, 0xfb, 0x59, 0xec, 0x9d, 0x3f, 0xe6, 0xe4, 0xce, 0x17, 0xfb, 0xe0, 0xec, 0x0e, 0x15, 0xd1, 0x54, 0xb6, 0x71, 0x6a, 0x0c, 0xb6, 0xb5, 0xf4, 0x2f, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0x85, 0x02, 0x09, 0x9d, 0x9f, 0x06, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. var _ context.Context var _ grpc.ClientConnInterface // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. const _ = grpc.SupportPackageIsVersion6 // ServerReflectionClient is the client API for ServerReflection service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type ServerReflectionClient interface { // The reflection service is structured as a bidirectional stream, ensuring // all related requests go to a single server. ServerReflectionInfo(ctx context.Context, opts ...grpc.CallOption) (ServerReflection_ServerReflectionInfoClient, error) } type serverReflectionClient struct { cc grpc.ClientConnInterface } func NewServerReflectionClient(cc grpc.ClientConnInterface) ServerReflectionClient { return &serverReflectionClient{cc} } func (c *serverReflectionClient) ServerReflectionInfo(ctx context.Context, opts ...grpc.CallOption) (ServerReflection_ServerReflectionInfoClient, error) { stream, err := c.cc.NewStream(ctx, &_ServerReflection_serviceDesc.Streams[0], "/grpc.reflection.v1alpha.ServerReflection/ServerReflectionInfo", opts...) if err != nil { return nil, err } x := &serverReflectionServerReflectionInfoClient{stream} return x, nil } type ServerReflection_ServerReflectionInfoClient interface { Send(*ServerReflectionRequest) error Recv() (*ServerReflectionResponse, error) grpc.ClientStream } type serverReflectionServerReflectionInfoClient struct { grpc.ClientStream } func (x *serverReflectionServerReflectionInfoClient) Send(m *ServerReflectionRequest) error { return x.ClientStream.SendMsg(m) } func (x *serverReflectionServerReflectionInfoClient) Recv() (*ServerReflectionResponse, error) { m := new(ServerReflectionResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } // ServerReflectionServer is the server API for ServerReflection service. type ServerReflectionServer interface { // The reflection service is structured as a bidirectional stream, ensuring // all related requests go to a single server. ServerReflectionInfo(ServerReflection_ServerReflectionInfoServer) error } // UnimplementedServerReflectionServer can be embedded to have forward compatible implementations. type UnimplementedServerReflectionServer struct { } func (*UnimplementedServerReflectionServer) ServerReflectionInfo(srv ServerReflection_ServerReflectionInfoServer) error { return status.Errorf(codes.Unimplemented, "method ServerReflectionInfo not implemented") } func RegisterServerReflectionServer(s *grpc.Server, srv ServerReflectionServer) { s.RegisterService(&_ServerReflection_serviceDesc, srv) } func _ServerReflection_ServerReflectionInfo_Handler(srv interface{}, stream grpc.ServerStream) error { return srv.(ServerReflectionServer).ServerReflectionInfo(&serverReflectionServerReflectionInfoServer{stream}) } type ServerReflection_ServerReflectionInfoServer interface { Send(*ServerReflectionResponse) error Recv() (*ServerReflectionRequest, error) grpc.ServerStream } type serverReflectionServerReflectionInfoServer struct { grpc.ServerStream } func (x *serverReflectionServerReflectionInfoServer) Send(m *ServerReflectionResponse) error { return x.ServerStream.SendMsg(m) } func (x *serverReflectionServerReflectionInfoServer) Recv() (*ServerReflectionRequest, error) { m := new(ServerReflectionRequest) if err := x.ServerStream.RecvMsg(m); err != nil { return nil, err } return m, nil } var _ServerReflection_serviceDesc = grpc.ServiceDesc{ ServiceName: "grpc.reflection.v1alpha.ServerReflection", HandlerType: (*ServerReflectionServer)(nil), Methods: []grpc.MethodDesc{}, Streams: []grpc.StreamDesc{ { StreamName: "ServerReflectionInfo", Handler: _ServerReflection_ServerReflectionInfo_Handler, ServerStreams: true, ClientStreams: true, }, }, Metadata: "grpc_reflection_v1alpha/reflection.proto", } grpc-go-1.29.1/reflection/grpc_reflection_v1alpha/reflection.proto000066400000000000000000000124751365033716300253140ustar00rootroot00000000000000// Copyright 2016 gRPC authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Service exported by server reflection syntax = "proto3"; package grpc.reflection.v1alpha; service ServerReflection { // The reflection service is structured as a bidirectional stream, ensuring // all related requests go to a single server. rpc ServerReflectionInfo(stream ServerReflectionRequest) returns (stream ServerReflectionResponse); } // The message sent by the client when calling ServerReflectionInfo method. message ServerReflectionRequest { string host = 1; // To use reflection service, the client should set one of the following // fields in message_request. The server distinguishes requests by their // defined field and then handles them using corresponding methods. oneof message_request { // Find a proto file by the file name. string file_by_filename = 3; // Find the proto file that declares the given fully-qualified symbol name. // This field should be a fully-qualified symbol name // (e.g. .[.] or .). string file_containing_symbol = 4; // Find the proto file which defines an extension extending the given // message type with the given field number. ExtensionRequest file_containing_extension = 5; // Finds the tag numbers used by all known extensions of extendee_type, and // appends them to ExtensionNumberResponse in an undefined order. // Its corresponding method is best-effort: it's not guaranteed that the // reflection service will implement this method, and it's not guaranteed // that this method will provide all extensions. Returns // StatusCode::UNIMPLEMENTED if it's not implemented. // This field should be a fully-qualified type name. The format is // . string all_extension_numbers_of_type = 6; // List the full names of registered services. The content will not be // checked. string list_services = 7; } } // The type name and extension number sent by the client when requesting // file_containing_extension. message ExtensionRequest { // Fully-qualified type name. The format should be . string containing_type = 1; int32 extension_number = 2; } // The message sent by the server to answer ServerReflectionInfo method. message ServerReflectionResponse { string valid_host = 1; ServerReflectionRequest original_request = 2; // The server sets one of the following fields according to the // message_request in the request. oneof message_response { // This message is used to answer file_by_filename, file_containing_symbol, // file_containing_extension requests with transitive dependencies. // As the repeated label is not allowed in oneof fields, we use a // FileDescriptorResponse message to encapsulate the repeated fields. // The reflection service is allowed to avoid sending FileDescriptorProtos // that were previously sent in response to earlier requests in the stream. FileDescriptorResponse file_descriptor_response = 4; // This message is used to answer all_extension_numbers_of_type requests. ExtensionNumberResponse all_extension_numbers_response = 5; // This message is used to answer list_services requests. ListServiceResponse list_services_response = 6; // This message is used when an error occurs. ErrorResponse error_response = 7; } } // Serialized FileDescriptorProto messages sent by the server answering // a file_by_filename, file_containing_symbol, or file_containing_extension // request. message FileDescriptorResponse { // Serialized FileDescriptorProto messages. We avoid taking a dependency on // descriptor.proto, which uses proto2 only features, by making them opaque // bytes instead. repeated bytes file_descriptor_proto = 1; } // A list of extension numbers sent by the server answering // all_extension_numbers_of_type request. message ExtensionNumberResponse { // Full name of the base type, including the package name. The format // is . string base_type_name = 1; repeated int32 extension_number = 2; } // A list of ServiceResponse sent by the server answering list_services request. message ListServiceResponse { // The information of each service may be expanded in the future, so we use // ServiceResponse message to encapsulate it. repeated ServiceResponse service = 1; } // The information of a single service used by ListServiceResponse to answer // list_services request. message ServiceResponse { // Full name of a registered service, including its package name. The format // is . string name = 1; } // The error code and error message sent by the server when an error occurs. message ErrorResponse { // This field uses the error codes defined in grpc::StatusCode. int32 error_code = 1; string error_message = 2; } grpc-go-1.29.1/reflection/grpc_testing/000077500000000000000000000000001365033716300177735ustar00rootroot00000000000000grpc-go-1.29.1/reflection/grpc_testing/proto2.pb.go000066400000000000000000000055251365033716300221560ustar00rootroot00000000000000// Code generated by protoc-gen-go. DO NOT EDIT. // source: proto2.proto package grpc_testing import ( fmt "fmt" proto "github.com/golang/protobuf/proto" math "math" ) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package type ToBeExtended struct { Foo *int32 `protobuf:"varint,1,req,name=foo" json:"foo,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` proto.XXX_InternalExtensions `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *ToBeExtended) Reset() { *m = ToBeExtended{} } func (m *ToBeExtended) String() string { return proto.CompactTextString(m) } func (*ToBeExtended) ProtoMessage() {} func (*ToBeExtended) Descriptor() ([]byte, []int) { return fileDescriptor_1f509089572db8e7, []int{0} } var extRange_ToBeExtended = []proto.ExtensionRange{ {Start: 10, End: 30}, } func (*ToBeExtended) ExtensionRangeArray() []proto.ExtensionRange { return extRange_ToBeExtended } func (m *ToBeExtended) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ToBeExtended.Unmarshal(m, b) } func (m *ToBeExtended) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ToBeExtended.Marshal(b, m, deterministic) } func (m *ToBeExtended) XXX_Merge(src proto.Message) { xxx_messageInfo_ToBeExtended.Merge(m, src) } func (m *ToBeExtended) XXX_Size() int { return xxx_messageInfo_ToBeExtended.Size(m) } func (m *ToBeExtended) XXX_DiscardUnknown() { xxx_messageInfo_ToBeExtended.DiscardUnknown(m) } var xxx_messageInfo_ToBeExtended proto.InternalMessageInfo func (m *ToBeExtended) GetFoo() int32 { if m != nil && m.Foo != nil { return *m.Foo } return 0 } func init() { proto.RegisterType((*ToBeExtended)(nil), "grpc.testing.ToBeExtended") } func init() { proto.RegisterFile("proto2.proto", fileDescriptor_1f509089572db8e7) } var fileDescriptor_1f509089572db8e7 = []byte{ // 86 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x29, 0x28, 0xca, 0x2f, 0xc9, 0x37, 0xd2, 0x03, 0x53, 0x42, 0x3c, 0xe9, 0x45, 0x05, 0xc9, 0x7a, 0x25, 0xa9, 0xc5, 0x25, 0x99, 0x79, 0xe9, 0x4a, 0x6a, 0x5c, 0x3c, 0x21, 0xf9, 0x4e, 0xa9, 0xae, 0x15, 0x25, 0xa9, 0x79, 0x29, 0xa9, 0x29, 0x42, 0x02, 0x5c, 0xcc, 0x69, 0xf9, 0xf9, 0x12, 0x8c, 0x0a, 0x4c, 0x1a, 0xac, 0x41, 0x20, 0xa6, 0x16, 0x0b, 0x07, 0x97, 0x80, 0x3c, 0x20, 0x00, 0x00, 0xff, 0xff, 0x74, 0x86, 0x9c, 0x08, 0x44, 0x00, 0x00, 0x00, } grpc-go-1.29.1/reflection/grpc_testing/proto2.proto000066400000000000000000000013041365033716300223030ustar00rootroot00000000000000// Copyright 2017 gRPC authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. syntax = "proto2"; package grpc.testing; message ToBeExtended { required int32 foo = 1; extensions 10 to 30; } grpc-go-1.29.1/reflection/grpc_testing/proto2_ext.pb.go000066400000000000000000000076541365033716300230430ustar00rootroot00000000000000// Code generated by protoc-gen-go. DO NOT EDIT. // source: proto2_ext.proto package grpc_testing import ( fmt "fmt" proto "github.com/golang/protobuf/proto" math "math" ) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package type Extension struct { Whatzit *int32 `protobuf:"varint,1,opt,name=whatzit" json:"whatzit,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *Extension) Reset() { *m = Extension{} } func (m *Extension) String() string { return proto.CompactTextString(m) } func (*Extension) ProtoMessage() {} func (*Extension) Descriptor() ([]byte, []int) { return fileDescriptor_85b2817ade17959b, []int{0} } func (m *Extension) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Extension.Unmarshal(m, b) } func (m *Extension) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Extension.Marshal(b, m, deterministic) } func (m *Extension) XXX_Merge(src proto.Message) { xxx_messageInfo_Extension.Merge(m, src) } func (m *Extension) XXX_Size() int { return xxx_messageInfo_Extension.Size(m) } func (m *Extension) XXX_DiscardUnknown() { xxx_messageInfo_Extension.DiscardUnknown(m) } var xxx_messageInfo_Extension proto.InternalMessageInfo func (m *Extension) GetWhatzit() int32 { if m != nil && m.Whatzit != nil { return *m.Whatzit } return 0 } var E_Foo = &proto.ExtensionDesc{ ExtendedType: (*ToBeExtended)(nil), ExtensionType: (*int32)(nil), Field: 13, Name: "grpc.testing.foo", Tag: "varint,13,opt,name=foo", Filename: "proto2_ext.proto", } var E_Bar = &proto.ExtensionDesc{ ExtendedType: (*ToBeExtended)(nil), ExtensionType: (*Extension)(nil), Field: 17, Name: "grpc.testing.bar", Tag: "bytes,17,opt,name=bar", Filename: "proto2_ext.proto", } var E_Baz = &proto.ExtensionDesc{ ExtendedType: (*ToBeExtended)(nil), ExtensionType: (*SearchRequest)(nil), Field: 19, Name: "grpc.testing.baz", Tag: "bytes,19,opt,name=baz", Filename: "proto2_ext.proto", } func init() { proto.RegisterType((*Extension)(nil), "grpc.testing.Extension") proto.RegisterExtension(E_Foo) proto.RegisterExtension(E_Bar) proto.RegisterExtension(E_Baz) } func init() { proto.RegisterFile("proto2_ext.proto", fileDescriptor_85b2817ade17959b) } var fileDescriptor_85b2817ade17959b = []byte{ // 179 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x28, 0x28, 0xca, 0x2f, 0xc9, 0x37, 0x8a, 0x4f, 0xad, 0x28, 0xd1, 0x03, 0x33, 0x85, 0x78, 0xd2, 0x8b, 0x0a, 0x92, 0xf5, 0x4a, 0x52, 0x8b, 0x4b, 0x32, 0xf3, 0xd2, 0xa5, 0x78, 0x20, 0xf2, 0x10, 0x39, 0x29, 0x2e, 0x90, 0x30, 0x84, 0xad, 0xa4, 0xca, 0xc5, 0xe9, 0x5a, 0x51, 0x92, 0x9a, 0x57, 0x9c, 0x99, 0x9f, 0x27, 0x24, 0xc1, 0xc5, 0x5e, 0x9e, 0x91, 0x58, 0x52, 0x95, 0x59, 0x22, 0xc1, 0xa8, 0xc0, 0xa8, 0xc1, 0x1a, 0x04, 0xe3, 0x5a, 0xe9, 0x70, 0x31, 0xa7, 0xe5, 0xe7, 0x0b, 0x49, 0xe9, 0x21, 0x1b, 0xab, 0x17, 0x92, 0xef, 0x94, 0x0a, 0xd6, 0x9d, 0x92, 0x9a, 0x22, 0xc1, 0x0b, 0xd6, 0x01, 0x52, 0x66, 0xe5, 0xca, 0xc5, 0x9c, 0x94, 0x58, 0x84, 0x57, 0xb5, 0xa0, 0x02, 0xa3, 0x06, 0xb7, 0x91, 0x38, 0xaa, 0x0a, 0xb8, 0x4b, 0x82, 0x40, 0xfa, 0xad, 0x3c, 0x41, 0xc6, 0x54, 0xe1, 0x35, 0x46, 0x18, 0x6c, 0x8c, 0x34, 0xaa, 0x8a, 0xe0, 0xd4, 0xc4, 0xa2, 0xe4, 0x8c, 0xa0, 0xd4, 0xc2, 0xd2, 0xd4, 0xe2, 0x12, 0x90, 0x51, 0x55, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0x71, 0x6b, 0x94, 0x9f, 0x21, 0x01, 0x00, 0x00, } grpc-go-1.29.1/reflection/grpc_testing/proto2_ext.proto000066400000000000000000000015211365033716300231640ustar00rootroot00000000000000// Copyright 2017 gRPC authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. syntax = "proto2"; package grpc.testing; import "proto2.proto"; import "test.proto"; extend ToBeExtended { optional int32 foo = 13; optional Extension bar = 17; optional SearchRequest baz = 19; } message Extension { optional int32 whatzit = 1; } grpc-go-1.29.1/reflection/grpc_testing/proto2_ext2.pb.go000066400000000000000000000074071365033716300231210ustar00rootroot00000000000000// Code generated by protoc-gen-go. DO NOT EDIT. // source: proto2_ext2.proto package grpc_testing import ( fmt "fmt" proto "github.com/golang/protobuf/proto" math "math" ) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package type AnotherExtension struct { Whatchamacallit *int32 `protobuf:"varint,1,opt,name=whatchamacallit" json:"whatchamacallit,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *AnotherExtension) Reset() { *m = AnotherExtension{} } func (m *AnotherExtension) String() string { return proto.CompactTextString(m) } func (*AnotherExtension) ProtoMessage() {} func (*AnotherExtension) Descriptor() ([]byte, []int) { return fileDescriptor_21d110045b8a354c, []int{0} } func (m *AnotherExtension) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_AnotherExtension.Unmarshal(m, b) } func (m *AnotherExtension) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_AnotherExtension.Marshal(b, m, deterministic) } func (m *AnotherExtension) XXX_Merge(src proto.Message) { xxx_messageInfo_AnotherExtension.Merge(m, src) } func (m *AnotherExtension) XXX_Size() int { return xxx_messageInfo_AnotherExtension.Size(m) } func (m *AnotherExtension) XXX_DiscardUnknown() { xxx_messageInfo_AnotherExtension.DiscardUnknown(m) } var xxx_messageInfo_AnotherExtension proto.InternalMessageInfo func (m *AnotherExtension) GetWhatchamacallit() int32 { if m != nil && m.Whatchamacallit != nil { return *m.Whatchamacallit } return 0 } var E_Frob = &proto.ExtensionDesc{ ExtendedType: (*ToBeExtended)(nil), ExtensionType: (*string)(nil), Field: 23, Name: "grpc.testing.frob", Tag: "bytes,23,opt,name=frob", Filename: "proto2_ext2.proto", } var E_Nitz = &proto.ExtensionDesc{ ExtendedType: (*ToBeExtended)(nil), ExtensionType: (*AnotherExtension)(nil), Field: 29, Name: "grpc.testing.nitz", Tag: "bytes,29,opt,name=nitz", Filename: "proto2_ext2.proto", } func init() { proto.RegisterType((*AnotherExtension)(nil), "grpc.testing.AnotherExtension") proto.RegisterExtension(E_Frob) proto.RegisterExtension(E_Nitz) } func init() { proto.RegisterFile("proto2_ext2.proto", fileDescriptor_21d110045b8a354c) } var fileDescriptor_21d110045b8a354c = []byte{ // 165 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x2c, 0x28, 0xca, 0x2f, 0xc9, 0x37, 0x8a, 0x4f, 0xad, 0x28, 0x31, 0xd2, 0x03, 0xb3, 0x85, 0x78, 0xd2, 0x8b, 0x0a, 0x92, 0xf5, 0x4a, 0x52, 0x8b, 0x4b, 0x32, 0xf3, 0xd2, 0xa5, 0x78, 0x20, 0x0a, 0x20, 0x72, 0x4a, 0x36, 0x5c, 0x02, 0x8e, 0x79, 0xf9, 0x25, 0x19, 0xa9, 0x45, 0xae, 0x15, 0x25, 0xa9, 0x79, 0xc5, 0x99, 0xf9, 0x79, 0x42, 0x1a, 0x5c, 0xfc, 0xe5, 0x19, 0x89, 0x25, 0xc9, 0x19, 0x89, 0xb9, 0x89, 0xc9, 0x89, 0x39, 0x39, 0x99, 0x25, 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0xac, 0x41, 0xe8, 0xc2, 0x56, 0x7a, 0x5c, 0x2c, 0x69, 0x45, 0xf9, 0x49, 0x42, 0x52, 0x7a, 0xc8, 0x56, 0xe8, 0x85, 0xe4, 0x3b, 0xa5, 0x82, 0x8d, 0x4b, 0x49, 0x4d, 0x91, 0x10, 0x57, 0x60, 0xd4, 0xe0, 0x0c, 0x02, 0xab, 0xb3, 0xf2, 0xe3, 0x62, 0xc9, 0xcb, 0x2c, 0xa9, 0xc2, 0xab, 0x5e, 0x56, 0x81, 0x51, 0x83, 0xdb, 0x48, 0x0e, 0x55, 0x05, 0xba, 0x1b, 0x83, 0xc0, 0xe6, 0x00, 0x02, 0x00, 0x00, 0xff, 0xff, 0xf0, 0x7e, 0x0d, 0x26, 0xed, 0x00, 0x00, 0x00, } grpc-go-1.29.1/reflection/grpc_testing/proto2_ext2.proto000066400000000000000000000014621365033716300232520ustar00rootroot00000000000000// Copyright 2017 gRPC authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. syntax = "proto2"; package grpc.testing; import "proto2.proto"; extend ToBeExtended { optional string frob = 23; optional AnotherExtension nitz = 29; } message AnotherExtension { optional int32 whatchamacallit = 1; } grpc-go-1.29.1/reflection/grpc_testing/test.pb.go000066400000000000000000000271141365033716300217060ustar00rootroot00000000000000// Code generated by protoc-gen-go. DO NOT EDIT. // source: test.proto package grpc_testing import ( context "context" fmt "fmt" proto "github.com/golang/protobuf/proto" grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" status "google.golang.org/grpc/status" math "math" ) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package type SearchResponse struct { Results []*SearchResponse_Result `protobuf:"bytes,1,rep,name=results,proto3" json:"results,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *SearchResponse) Reset() { *m = SearchResponse{} } func (m *SearchResponse) String() string { return proto.CompactTextString(m) } func (*SearchResponse) ProtoMessage() {} func (*SearchResponse) Descriptor() ([]byte, []int) { return fileDescriptor_c161fcfdc0c3ff1e, []int{0} } func (m *SearchResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_SearchResponse.Unmarshal(m, b) } func (m *SearchResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_SearchResponse.Marshal(b, m, deterministic) } func (m *SearchResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_SearchResponse.Merge(m, src) } func (m *SearchResponse) XXX_Size() int { return xxx_messageInfo_SearchResponse.Size(m) } func (m *SearchResponse) XXX_DiscardUnknown() { xxx_messageInfo_SearchResponse.DiscardUnknown(m) } var xxx_messageInfo_SearchResponse proto.InternalMessageInfo func (m *SearchResponse) GetResults() []*SearchResponse_Result { if m != nil { return m.Results } return nil } type SearchResponse_Result struct { Url string `protobuf:"bytes,1,opt,name=url,proto3" json:"url,omitempty"` Title string `protobuf:"bytes,2,opt,name=title,proto3" json:"title,omitempty"` Snippets []string `protobuf:"bytes,3,rep,name=snippets,proto3" json:"snippets,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *SearchResponse_Result) Reset() { *m = SearchResponse_Result{} } func (m *SearchResponse_Result) String() string { return proto.CompactTextString(m) } func (*SearchResponse_Result) ProtoMessage() {} func (*SearchResponse_Result) Descriptor() ([]byte, []int) { return fileDescriptor_c161fcfdc0c3ff1e, []int{0, 0} } func (m *SearchResponse_Result) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_SearchResponse_Result.Unmarshal(m, b) } func (m *SearchResponse_Result) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_SearchResponse_Result.Marshal(b, m, deterministic) } func (m *SearchResponse_Result) XXX_Merge(src proto.Message) { xxx_messageInfo_SearchResponse_Result.Merge(m, src) } func (m *SearchResponse_Result) XXX_Size() int { return xxx_messageInfo_SearchResponse_Result.Size(m) } func (m *SearchResponse_Result) XXX_DiscardUnknown() { xxx_messageInfo_SearchResponse_Result.DiscardUnknown(m) } var xxx_messageInfo_SearchResponse_Result proto.InternalMessageInfo func (m *SearchResponse_Result) GetUrl() string { if m != nil { return m.Url } return "" } func (m *SearchResponse_Result) GetTitle() string { if m != nil { return m.Title } return "" } func (m *SearchResponse_Result) GetSnippets() []string { if m != nil { return m.Snippets } return nil } type SearchRequest struct { Query string `protobuf:"bytes,1,opt,name=query,proto3" json:"query,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *SearchRequest) Reset() { *m = SearchRequest{} } func (m *SearchRequest) String() string { return proto.CompactTextString(m) } func (*SearchRequest) ProtoMessage() {} func (*SearchRequest) Descriptor() ([]byte, []int) { return fileDescriptor_c161fcfdc0c3ff1e, []int{1} } func (m *SearchRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_SearchRequest.Unmarshal(m, b) } func (m *SearchRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_SearchRequest.Marshal(b, m, deterministic) } func (m *SearchRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_SearchRequest.Merge(m, src) } func (m *SearchRequest) XXX_Size() int { return xxx_messageInfo_SearchRequest.Size(m) } func (m *SearchRequest) XXX_DiscardUnknown() { xxx_messageInfo_SearchRequest.DiscardUnknown(m) } var xxx_messageInfo_SearchRequest proto.InternalMessageInfo func (m *SearchRequest) GetQuery() string { if m != nil { return m.Query } return "" } func init() { proto.RegisterType((*SearchResponse)(nil), "grpc.testing.SearchResponse") proto.RegisterType((*SearchResponse_Result)(nil), "grpc.testing.SearchResponse.Result") proto.RegisterType((*SearchRequest)(nil), "grpc.testing.SearchRequest") } func init() { proto.RegisterFile("test.proto", fileDescriptor_c161fcfdc0c3ff1e) } var fileDescriptor_c161fcfdc0c3ff1e = []byte{ // 231 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x91, 0xbd, 0x4a, 0xc5, 0x40, 0x10, 0x85, 0x59, 0x83, 0xd1, 0x3b, 0xfe, 0x32, 0x58, 0x84, 0x68, 0x11, 0xae, 0x08, 0xa9, 0x16, 0xb9, 0xd6, 0x56, 0xb6, 0x16, 0xb2, 0x79, 0x82, 0x6b, 0x18, 0xe2, 0x42, 0x4c, 0x36, 0x33, 0x13, 0xc1, 0x87, 0xb1, 0xf5, 0x39, 0x25, 0x59, 0x23, 0x0a, 0x62, 0x63, 0xb7, 0xe7, 0xe3, 0xcc, 0xb7, 0xbb, 0x0c, 0x80, 0x92, 0xa8, 0x0d, 0xdc, 0x6b, 0x8f, 0x87, 0x0d, 0x87, 0xda, 0x4e, 0xc0, 0x77, 0xcd, 0xfa, 0xcd, 0xc0, 0x71, 0x45, 0x5b, 0xae, 0x9f, 0x1c, 0x49, 0xe8, 0x3b, 0x21, 0xbc, 0x85, 0x3d, 0x26, 0x19, 0x5b, 0x95, 0xcc, 0x14, 0x49, 0x79, 0xb0, 0xb9, 0xb4, 0xdf, 0x47, 0xec, 0xcf, 0xba, 0x75, 0x73, 0xd7, 0x2d, 0x33, 0xf9, 0x3d, 0xa4, 0x11, 0xe1, 0x29, 0x24, 0x23, 0xb7, 0x99, 0x29, 0x4c, 0xb9, 0x72, 0xd3, 0x11, 0xcf, 0x60, 0x57, 0xbd, 0xb6, 0x94, 0xed, 0xcc, 0x2c, 0x06, 0xcc, 0x61, 0x5f, 0x3a, 0x1f, 0x02, 0xa9, 0x64, 0x49, 0x91, 0x94, 0x2b, 0xf7, 0x95, 0xd7, 0x57, 0x70, 0xb4, 0xdc, 0x37, 0x8c, 0x24, 0x3a, 0x29, 0x86, 0x91, 0xf8, 0xf5, 0x53, 0x1b, 0xc3, 0xe6, 0xdd, 0x2c, 0xbd, 0x8a, 0xf8, 0xc5, 0xd7, 0x84, 0x77, 0x90, 0x46, 0x80, 0xe7, 0xbf, 0x3f, 0x7f, 0xd6, 0xe5, 0x17, 0x7f, 0xfd, 0x0d, 0x1f, 0xe0, 0xa4, 0x52, 0xa6, 0xed, 0xb3, 0xef, 0x9a, 0x7f, 0xdb, 0x4a, 0x73, 0x6d, 0x1e, 0xd3, 0x79, 0x09, 0x37, 0x1f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x20, 0xd6, 0x09, 0xb8, 0x92, 0x01, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. var _ context.Context var _ grpc.ClientConnInterface // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. const _ = grpc.SupportPackageIsVersion6 // SearchServiceClient is the client API for SearchService service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type SearchServiceClient interface { Search(ctx context.Context, in *SearchRequest, opts ...grpc.CallOption) (*SearchResponse, error) StreamingSearch(ctx context.Context, opts ...grpc.CallOption) (SearchService_StreamingSearchClient, error) } type searchServiceClient struct { cc grpc.ClientConnInterface } func NewSearchServiceClient(cc grpc.ClientConnInterface) SearchServiceClient { return &searchServiceClient{cc} } func (c *searchServiceClient) Search(ctx context.Context, in *SearchRequest, opts ...grpc.CallOption) (*SearchResponse, error) { out := new(SearchResponse) err := c.cc.Invoke(ctx, "/grpc.testing.SearchService/Search", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *searchServiceClient) StreamingSearch(ctx context.Context, opts ...grpc.CallOption) (SearchService_StreamingSearchClient, error) { stream, err := c.cc.NewStream(ctx, &_SearchService_serviceDesc.Streams[0], "/grpc.testing.SearchService/StreamingSearch", opts...) if err != nil { return nil, err } x := &searchServiceStreamingSearchClient{stream} return x, nil } type SearchService_StreamingSearchClient interface { Send(*SearchRequest) error Recv() (*SearchResponse, error) grpc.ClientStream } type searchServiceStreamingSearchClient struct { grpc.ClientStream } func (x *searchServiceStreamingSearchClient) Send(m *SearchRequest) error { return x.ClientStream.SendMsg(m) } func (x *searchServiceStreamingSearchClient) Recv() (*SearchResponse, error) { m := new(SearchResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } // SearchServiceServer is the server API for SearchService service. type SearchServiceServer interface { Search(context.Context, *SearchRequest) (*SearchResponse, error) StreamingSearch(SearchService_StreamingSearchServer) error } // UnimplementedSearchServiceServer can be embedded to have forward compatible implementations. type UnimplementedSearchServiceServer struct { } func (*UnimplementedSearchServiceServer) Search(ctx context.Context, req *SearchRequest) (*SearchResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method Search not implemented") } func (*UnimplementedSearchServiceServer) StreamingSearch(srv SearchService_StreamingSearchServer) error { return status.Errorf(codes.Unimplemented, "method StreamingSearch not implemented") } func RegisterSearchServiceServer(s *grpc.Server, srv SearchServiceServer) { s.RegisterService(&_SearchService_serviceDesc, srv) } func _SearchService_Search_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(SearchRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(SearchServiceServer).Search(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/grpc.testing.SearchService/Search", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SearchServiceServer).Search(ctx, req.(*SearchRequest)) } return interceptor(ctx, in, info, handler) } func _SearchService_StreamingSearch_Handler(srv interface{}, stream grpc.ServerStream) error { return srv.(SearchServiceServer).StreamingSearch(&searchServiceStreamingSearchServer{stream}) } type SearchService_StreamingSearchServer interface { Send(*SearchResponse) error Recv() (*SearchRequest, error) grpc.ServerStream } type searchServiceStreamingSearchServer struct { grpc.ServerStream } func (x *searchServiceStreamingSearchServer) Send(m *SearchResponse) error { return x.ServerStream.SendMsg(m) } func (x *searchServiceStreamingSearchServer) Recv() (*SearchRequest, error) { m := new(SearchRequest) if err := x.ServerStream.RecvMsg(m); err != nil { return nil, err } return m, nil } var _SearchService_serviceDesc = grpc.ServiceDesc{ ServiceName: "grpc.testing.SearchService", HandlerType: (*SearchServiceServer)(nil), Methods: []grpc.MethodDesc{ { MethodName: "Search", Handler: _SearchService_Search_Handler, }, }, Streams: []grpc.StreamDesc{ { StreamName: "StreamingSearch", Handler: _SearchService_StreamingSearch_Handler, ServerStreams: true, ClientStreams: true, }, }, Metadata: "test.proto", } grpc-go-1.29.1/reflection/grpc_testing/test.proto000066400000000000000000000017441365033716300220450ustar00rootroot00000000000000// Copyright 2017 gRPC authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. syntax = "proto3"; package grpc.testing; message SearchResponse { message Result { string url = 1; string title = 2; repeated string snippets = 3; } repeated Result results = 1; } message SearchRequest { string query = 1; } service SearchService { rpc Search(SearchRequest) returns (SearchResponse); rpc StreamingSearch(stream SearchRequest) returns (stream SearchResponse); } grpc-go-1.29.1/reflection/grpc_testingv3/000077500000000000000000000000001365033716300202445ustar00rootroot00000000000000grpc-go-1.29.1/reflection/grpc_testingv3/README.md000066400000000000000000000003421365033716300215220ustar00rootroot00000000000000The pb.go is genenated with an older version of codegen, to test reflection behavior with `grpc.SupportPackageIsVersion3`. DO NOT REGENERATE! pb.go is manually edited to replace `"golang.org/x/net/context"` with `"context"`. grpc-go-1.29.1/reflection/grpc_testingv3/testv3.pb.go000066400000000000000000000376401365033716300224350ustar00rootroot00000000000000// Code generated by protoc-gen-go. // source: testv3.proto // DO NOT EDIT! /* Package grpc_testingv3 is a generated protocol buffer package. It is generated from these files: testv3.proto It has these top-level messages: SearchResponseV3 SearchRequestV3 */ package grpc_testingv3 import proto "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" import ( context "context" grpc "google.golang.org/grpc" ) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package type SearchResponseV3_State int32 const ( SearchResponseV3_UNKNOWN SearchResponseV3_State = 0 SearchResponseV3_FRESH SearchResponseV3_State = 1 SearchResponseV3_STALE SearchResponseV3_State = 2 ) var SearchResponseV3_State_name = map[int32]string{ 0: "UNKNOWN", 1: "FRESH", 2: "STALE", } var SearchResponseV3_State_value = map[string]int32{ "UNKNOWN": 0, "FRESH": 1, "STALE": 2, } func (x SearchResponseV3_State) String() string { return proto.EnumName(SearchResponseV3_State_name, int32(x)) } func (SearchResponseV3_State) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0, 0} } type SearchResponseV3 struct { Results []*SearchResponseV3_Result `protobuf:"bytes,1,rep,name=results" json:"results,omitempty"` State SearchResponseV3_State `protobuf:"varint,2,opt,name=state,enum=grpc.testingv3.SearchResponseV3_State" json:"state,omitempty"` } func (m *SearchResponseV3) Reset() { *m = SearchResponseV3{} } func (m *SearchResponseV3) String() string { return proto.CompactTextString(m) } func (*SearchResponseV3) ProtoMessage() {} func (*SearchResponseV3) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } func (m *SearchResponseV3) GetResults() []*SearchResponseV3_Result { if m != nil { return m.Results } return nil } func (m *SearchResponseV3) GetState() SearchResponseV3_State { if m != nil { return m.State } return SearchResponseV3_UNKNOWN } type SearchResponseV3_Result struct { Url string `protobuf:"bytes,1,opt,name=url" json:"url,omitempty"` Title string `protobuf:"bytes,2,opt,name=title" json:"title,omitempty"` Snippets []string `protobuf:"bytes,3,rep,name=snippets" json:"snippets,omitempty"` Metadata map[string]*SearchResponseV3_Result_Value `protobuf:"bytes,4,rep,name=metadata" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` } func (m *SearchResponseV3_Result) Reset() { *m = SearchResponseV3_Result{} } func (m *SearchResponseV3_Result) String() string { return proto.CompactTextString(m) } func (*SearchResponseV3_Result) ProtoMessage() {} func (*SearchResponseV3_Result) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0, 0} } func (m *SearchResponseV3_Result) GetUrl() string { if m != nil { return m.Url } return "" } func (m *SearchResponseV3_Result) GetTitle() string { if m != nil { return m.Title } return "" } func (m *SearchResponseV3_Result) GetSnippets() []string { if m != nil { return m.Snippets } return nil } func (m *SearchResponseV3_Result) GetMetadata() map[string]*SearchResponseV3_Result_Value { if m != nil { return m.Metadata } return nil } type SearchResponseV3_Result_Value struct { // Types that are valid to be assigned to Val: // *SearchResponseV3_Result_Value_Str // *SearchResponseV3_Result_Value_Int // *SearchResponseV3_Result_Value_Real Val isSearchResponseV3_Result_Value_Val `protobuf_oneof:"val"` } func (m *SearchResponseV3_Result_Value) Reset() { *m = SearchResponseV3_Result_Value{} } func (m *SearchResponseV3_Result_Value) String() string { return proto.CompactTextString(m) } func (*SearchResponseV3_Result_Value) ProtoMessage() {} func (*SearchResponseV3_Result_Value) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0, 0, 0} } type isSearchResponseV3_Result_Value_Val interface { isSearchResponseV3_Result_Value_Val() } type SearchResponseV3_Result_Value_Str struct { Str string `protobuf:"bytes,1,opt,name=str,oneof"` } type SearchResponseV3_Result_Value_Int struct { Int int64 `protobuf:"varint,2,opt,name=int,oneof"` } type SearchResponseV3_Result_Value_Real struct { Real float64 `protobuf:"fixed64,3,opt,name=real,oneof"` } func (*SearchResponseV3_Result_Value_Str) isSearchResponseV3_Result_Value_Val() {} func (*SearchResponseV3_Result_Value_Int) isSearchResponseV3_Result_Value_Val() {} func (*SearchResponseV3_Result_Value_Real) isSearchResponseV3_Result_Value_Val() {} func (m *SearchResponseV3_Result_Value) GetVal() isSearchResponseV3_Result_Value_Val { if m != nil { return m.Val } return nil } func (m *SearchResponseV3_Result_Value) GetStr() string { if x, ok := m.GetVal().(*SearchResponseV3_Result_Value_Str); ok { return x.Str } return "" } func (m *SearchResponseV3_Result_Value) GetInt() int64 { if x, ok := m.GetVal().(*SearchResponseV3_Result_Value_Int); ok { return x.Int } return 0 } func (m *SearchResponseV3_Result_Value) GetReal() float64 { if x, ok := m.GetVal().(*SearchResponseV3_Result_Value_Real); ok { return x.Real } return 0 } // XXX_OneofFuncs is for the internal use of the proto package. func (*SearchResponseV3_Result_Value) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { return _SearchResponseV3_Result_Value_OneofMarshaler, _SearchResponseV3_Result_Value_OneofUnmarshaler, _SearchResponseV3_Result_Value_OneofSizer, []interface{}{ (*SearchResponseV3_Result_Value_Str)(nil), (*SearchResponseV3_Result_Value_Int)(nil), (*SearchResponseV3_Result_Value_Real)(nil), } } func _SearchResponseV3_Result_Value_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { m := msg.(*SearchResponseV3_Result_Value) // val switch x := m.Val.(type) { case *SearchResponseV3_Result_Value_Str: b.EncodeVarint(1<<3 | proto.WireBytes) b.EncodeStringBytes(x.Str) case *SearchResponseV3_Result_Value_Int: b.EncodeVarint(2<<3 | proto.WireVarint) b.EncodeVarint(uint64(x.Int)) case *SearchResponseV3_Result_Value_Real: b.EncodeVarint(3<<3 | proto.WireFixed64) b.EncodeFixed64(math.Float64bits(x.Real)) case nil: default: return fmt.Errorf("SearchResponseV3_Result_Value.Val has unexpected type %T", x) } return nil } func _SearchResponseV3_Result_Value_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { m := msg.(*SearchResponseV3_Result_Value) switch tag { case 1: // val.str if wire != proto.WireBytes { return true, proto.ErrInternalBadWireType } x, err := b.DecodeStringBytes() m.Val = &SearchResponseV3_Result_Value_Str{x} return true, err case 2: // val.int if wire != proto.WireVarint { return true, proto.ErrInternalBadWireType } x, err := b.DecodeVarint() m.Val = &SearchResponseV3_Result_Value_Int{int64(x)} return true, err case 3: // val.real if wire != proto.WireFixed64 { return true, proto.ErrInternalBadWireType } x, err := b.DecodeFixed64() m.Val = &SearchResponseV3_Result_Value_Real{math.Float64frombits(x)} return true, err default: return false, nil } } func _SearchResponseV3_Result_Value_OneofSizer(msg proto.Message) (n int) { m := msg.(*SearchResponseV3_Result_Value) // val switch x := m.Val.(type) { case *SearchResponseV3_Result_Value_Str: n += proto.SizeVarint(1<<3 | proto.WireBytes) n += proto.SizeVarint(uint64(len(x.Str))) n += len(x.Str) case *SearchResponseV3_Result_Value_Int: n += proto.SizeVarint(2<<3 | proto.WireVarint) n += proto.SizeVarint(uint64(x.Int)) case *SearchResponseV3_Result_Value_Real: n += proto.SizeVarint(3<<3 | proto.WireFixed64) n += 8 case nil: default: panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) } return n } type SearchRequestV3 struct { Query string `protobuf:"bytes,1,opt,name=query" json:"query,omitempty"` } func (m *SearchRequestV3) Reset() { *m = SearchRequestV3{} } func (m *SearchRequestV3) String() string { return proto.CompactTextString(m) } func (*SearchRequestV3) ProtoMessage() {} func (*SearchRequestV3) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } func (m *SearchRequestV3) GetQuery() string { if m != nil { return m.Query } return "" } func init() { proto.RegisterType((*SearchResponseV3)(nil), "grpc.testingv3.SearchResponseV3") proto.RegisterType((*SearchResponseV3_Result)(nil), "grpc.testingv3.SearchResponseV3.Result") proto.RegisterType((*SearchResponseV3_Result_Value)(nil), "grpc.testingv3.SearchResponseV3.Result.Value") proto.RegisterType((*SearchRequestV3)(nil), "grpc.testingv3.SearchRequestV3") proto.RegisterEnum("grpc.testingv3.SearchResponseV3_State", SearchResponseV3_State_name, SearchResponseV3_State_value) } // Reference imports to suppress errors if they are not otherwise used. var _ context.Context var _ grpc.ClientConn // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. const _ = grpc.SupportPackageIsVersion3 // Client API for SearchServiceV3 service type SearchServiceV3Client interface { Search(ctx context.Context, in *SearchRequestV3, opts ...grpc.CallOption) (*SearchResponseV3, error) StreamingSearch(ctx context.Context, opts ...grpc.CallOption) (SearchServiceV3_StreamingSearchClient, error) } type searchServiceV3Client struct { cc *grpc.ClientConn } func NewSearchServiceV3Client(cc *grpc.ClientConn) SearchServiceV3Client { return &searchServiceV3Client{cc} } func (c *searchServiceV3Client) Search(ctx context.Context, in *SearchRequestV3, opts ...grpc.CallOption) (*SearchResponseV3, error) { out := new(SearchResponseV3) err := grpc.Invoke(ctx, "/grpc.testingv3.SearchServiceV3/Search", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } func (c *searchServiceV3Client) StreamingSearch(ctx context.Context, opts ...grpc.CallOption) (SearchServiceV3_StreamingSearchClient, error) { stream, err := grpc.NewClientStream(ctx, &_SearchServiceV3_serviceDesc.Streams[0], c.cc, "/grpc.testingv3.SearchServiceV3/StreamingSearch", opts...) if err != nil { return nil, err } x := &searchServiceV3StreamingSearchClient{stream} return x, nil } type SearchServiceV3_StreamingSearchClient interface { Send(*SearchRequestV3) error Recv() (*SearchResponseV3, error) grpc.ClientStream } type searchServiceV3StreamingSearchClient struct { grpc.ClientStream } func (x *searchServiceV3StreamingSearchClient) Send(m *SearchRequestV3) error { return x.ClientStream.SendMsg(m) } func (x *searchServiceV3StreamingSearchClient) Recv() (*SearchResponseV3, error) { m := new(SearchResponseV3) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } // Server API for SearchServiceV3 service type SearchServiceV3Server interface { Search(context.Context, *SearchRequestV3) (*SearchResponseV3, error) StreamingSearch(SearchServiceV3_StreamingSearchServer) error } func RegisterSearchServiceV3Server(s *grpc.Server, srv SearchServiceV3Server) { s.RegisterService(&_SearchServiceV3_serviceDesc, srv) } func _SearchServiceV3_Search_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(SearchRequestV3) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(SearchServiceV3Server).Search(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/grpc.testingv3.SearchServiceV3/Search", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SearchServiceV3Server).Search(ctx, req.(*SearchRequestV3)) } return interceptor(ctx, in, info, handler) } func _SearchServiceV3_StreamingSearch_Handler(srv interface{}, stream grpc.ServerStream) error { return srv.(SearchServiceV3Server).StreamingSearch(&searchServiceV3StreamingSearchServer{stream}) } type SearchServiceV3_StreamingSearchServer interface { Send(*SearchResponseV3) error Recv() (*SearchRequestV3, error) grpc.ServerStream } type searchServiceV3StreamingSearchServer struct { grpc.ServerStream } func (x *searchServiceV3StreamingSearchServer) Send(m *SearchResponseV3) error { return x.ServerStream.SendMsg(m) } func (x *searchServiceV3StreamingSearchServer) Recv() (*SearchRequestV3, error) { m := new(SearchRequestV3) if err := x.ServerStream.RecvMsg(m); err != nil { return nil, err } return m, nil } var _SearchServiceV3_serviceDesc = grpc.ServiceDesc{ ServiceName: "grpc.testingv3.SearchServiceV3", HandlerType: (*SearchServiceV3Server)(nil), Methods: []grpc.MethodDesc{ { MethodName: "Search", Handler: _SearchServiceV3_Search_Handler, }, }, Streams: []grpc.StreamDesc{ { StreamName: "StreamingSearch", Handler: _SearchServiceV3_StreamingSearch_Handler, ServerStreams: true, ClientStreams: true, }, }, Metadata: fileDescriptor0, } func init() { proto.RegisterFile("testv3.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ // 416 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x93, 0xd1, 0x6a, 0xd4, 0x40, 0x14, 0x86, 0x77, 0x36, 0x9b, 0x6d, 0xf7, 0xac, 0xb6, 0x61, 0xe8, 0x45, 0xc8, 0x8d, 0x61, 0x2f, 0x6c, 0x10, 0x0c, 0x92, 0x20, 0x88, 0x78, 0x53, 0x65, 0x65, 0xa1, 0x75, 0xc5, 0x89, 0xae, 0xde, 0x8e, 0xeb, 0x61, 0x8d, 0x4d, 0xb3, 0xe9, 0xcc, 0x49, 0x60, 0x9f, 0xc5, 0x17, 0xf1, 0x55, 0x7c, 0x1b, 0x99, 0x99, 0xa6, 0x50, 0x41, 0xba, 0x17, 0xde, 0xcd, 0x7f, 0x38, 0xff, 0x37, 0xff, 0x3f, 0x24, 0xf0, 0x80, 0x50, 0x53, 0x97, 0xa7, 0x8d, 0xda, 0xd2, 0x96, 0x1f, 0x6d, 0x54, 0xb3, 0x4e, 0xcd, 0xa8, 0xac, 0x37, 0x5d, 0x3e, 0xfb, 0x39, 0x82, 0xa0, 0x40, 0xa9, 0xd6, 0xdf, 0x05, 0xea, 0x66, 0x5b, 0x6b, 0x5c, 0xe5, 0xfc, 0x0c, 0x0e, 0x14, 0xea, 0xb6, 0x22, 0x1d, 0xb2, 0xd8, 0x4b, 0xa6, 0xd9, 0x69, 0x7a, 0xd7, 0x96, 0xfe, 0x6d, 0x49, 0x85, 0xdd, 0x17, 0xbd, 0x8f, 0xbf, 0x02, 0x5f, 0x93, 0x24, 0x0c, 0x87, 0x31, 0x4b, 0x8e, 0xb2, 0xc7, 0xf7, 0x02, 0x0a, 0xb3, 0x2d, 0x9c, 0x29, 0xfa, 0x3d, 0x84, 0xb1, 0x23, 0xf2, 0x00, 0xbc, 0x56, 0x55, 0x21, 0x8b, 0x59, 0x32, 0x11, 0xe6, 0xc8, 0x4f, 0xc0, 0xa7, 0x92, 0x2a, 0x87, 0x9e, 0x08, 0x27, 0x78, 0x04, 0x87, 0xba, 0x2e, 0x9b, 0x06, 0x49, 0x87, 0x5e, 0xec, 0x25, 0x13, 0x71, 0xab, 0xf9, 0x07, 0x38, 0xbc, 0x42, 0x92, 0xdf, 0x24, 0xc9, 0x70, 0x64, 0x0b, 0x3d, 0xdf, 0xb3, 0x50, 0xfa, 0xee, 0xc6, 0x37, 0xaf, 0x49, 0xed, 0xc4, 0x2d, 0x26, 0xba, 0x00, 0x7f, 0x25, 0xab, 0x16, 0x39, 0x07, 0x4f, 0x93, 0x72, 0xf9, 0x16, 0x03, 0x61, 0x84, 0x99, 0x95, 0x35, 0xd9, 0x7c, 0x9e, 0x99, 0x95, 0x35, 0xf1, 0x13, 0x18, 0x29, 0x94, 0x55, 0xe8, 0xc5, 0x2c, 0x61, 0x8b, 0x81, 0xb0, 0xea, 0xb5, 0x0f, 0x5e, 0x27, 0xab, 0xe8, 0x07, 0x3c, 0xbc, 0x73, 0x91, 0x69, 0x7d, 0x89, 0xbb, 0xbe, 0xf5, 0x25, 0xee, 0xf8, 0x1b, 0xf0, 0x3b, 0x73, 0xa1, 0xa5, 0x4e, 0xb3, 0xa7, 0xfb, 0x16, 0xb0, 0x29, 0x85, 0xf3, 0xbe, 0x1c, 0xbe, 0x60, 0xb3, 0x27, 0xe0, 0xdb, 0xb7, 0xe6, 0x53, 0x38, 0xf8, 0xb4, 0x3c, 0x5f, 0xbe, 0xff, 0xbc, 0x0c, 0x06, 0x7c, 0x02, 0xfe, 0x5b, 0x31, 0x2f, 0x16, 0x01, 0x33, 0xc7, 0xe2, 0xe3, 0xd9, 0xc5, 0x3c, 0x18, 0xce, 0x4e, 0xe1, 0xb8, 0xe7, 0x5e, 0xb7, 0xa8, 0x69, 0x95, 0x9b, 0xd7, 0xbf, 0x6e, 0x51, 0xf5, 0xd9, 0x9c, 0xc8, 0x7e, 0xb1, 0x7e, 0xb3, 0x40, 0xd5, 0x95, 0x6b, 0xf3, 0x15, 0x9d, 0xc3, 0xd8, 0x8d, 0xf8, 0xa3, 0x7f, 0x85, 0xbd, 0x81, 0x46, 0xf1, 0x7d, 0x6d, 0xf8, 0x17, 0x38, 0x2e, 0x48, 0xa1, 0xbc, 0x2a, 0xeb, 0xcd, 0x7f, 0xa3, 0x26, 0xec, 0x19, 0xfb, 0x3a, 0xb6, 0x3f, 0x46, 0xfe, 0x27, 0x00, 0x00, 0xff, 0xff, 0xed, 0xa2, 0x8d, 0x75, 0x28, 0x03, 0x00, 0x00, } grpc-go-1.29.1/reflection/grpc_testingv3/testv3.proto000066400000000000000000000012331365033716300225600ustar00rootroot00000000000000syntax = "proto3"; package grpc.testingv3; message SearchResponseV3 { message Result { string url = 1; string title = 2; repeated string snippets = 3; message Value { oneof val { string str = 1; int64 int = 2; double real = 3; } } map metadata = 4; } enum State { UNKNOWN = 0; FRESH = 1; STALE = 2; } repeated Result results = 1; State state = 2; } message SearchRequestV3 { string query = 1; } service SearchServiceV3 { rpc Search(SearchRequestV3) returns (SearchResponseV3); rpc StreamingSearch(stream SearchRequestV3) returns (stream SearchResponseV3); } grpc-go-1.29.1/reflection/serverreflection.go000066400000000000000000000316761365033716300212300ustar00rootroot00000000000000/* * * Copyright 2016 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ //go:generate protoc --go_out=plugins=grpc:. grpc_reflection_v1alpha/reflection.proto /* Package reflection implements server reflection service. The service implemented is defined in: https://github.com/grpc/grpc/blob/master/src/proto/grpc/reflection/v1alpha/reflection.proto. To register server reflection on a gRPC server: import "google.golang.org/grpc/reflection" s := grpc.NewServer() pb.RegisterYourOwnServer(s, &server{}) // Register reflection service on gRPC server. reflection.Register(s) s.Serve(lis) */ package reflection // import "google.golang.org/grpc/reflection" import ( "bytes" "compress/gzip" "fmt" "io" "io/ioutil" "reflect" "sort" "sync" "github.com/golang/protobuf/proto" dpb "github.com/golang/protobuf/protoc-gen-go/descriptor" "google.golang.org/grpc" "google.golang.org/grpc/codes" rpb "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" "google.golang.org/grpc/status" ) type serverReflectionServer struct { s *grpc.Server initSymbols sync.Once serviceNames []string symbols map[string]*dpb.FileDescriptorProto // map of fully-qualified names to files } // Register registers the server reflection service on the given gRPC server. func Register(s *grpc.Server) { rpb.RegisterServerReflectionServer(s, &serverReflectionServer{ s: s, }) } // protoMessage is used for type assertion on proto messages. // Generated proto message implements function Descriptor(), but Descriptor() // is not part of interface proto.Message. This interface is needed to // call Descriptor(). type protoMessage interface { Descriptor() ([]byte, []int) } func (s *serverReflectionServer) getSymbols() (svcNames []string, symbolIndex map[string]*dpb.FileDescriptorProto) { s.initSymbols.Do(func() { serviceInfo := s.s.GetServiceInfo() s.symbols = map[string]*dpb.FileDescriptorProto{} s.serviceNames = make([]string, 0, len(serviceInfo)) processed := map[string]struct{}{} for svc, info := range serviceInfo { s.serviceNames = append(s.serviceNames, svc) fdenc, ok := parseMetadata(info.Metadata) if !ok { continue } fd, err := decodeFileDesc(fdenc) if err != nil { continue } s.processFile(fd, processed) } sort.Strings(s.serviceNames) }) return s.serviceNames, s.symbols } func (s *serverReflectionServer) processFile(fd *dpb.FileDescriptorProto, processed map[string]struct{}) { filename := fd.GetName() if _, ok := processed[filename]; ok { return } processed[filename] = struct{}{} prefix := fd.GetPackage() for _, msg := range fd.MessageType { s.processMessage(fd, prefix, msg) } for _, en := range fd.EnumType { s.processEnum(fd, prefix, en) } for _, ext := range fd.Extension { s.processField(fd, prefix, ext) } for _, svc := range fd.Service { svcName := fqn(prefix, svc.GetName()) s.symbols[svcName] = fd for _, meth := range svc.Method { name := fqn(svcName, meth.GetName()) s.symbols[name] = fd } } for _, dep := range fd.Dependency { fdenc := proto.FileDescriptor(dep) fdDep, err := decodeFileDesc(fdenc) if err != nil { continue } s.processFile(fdDep, processed) } } func (s *serverReflectionServer) processMessage(fd *dpb.FileDescriptorProto, prefix string, msg *dpb.DescriptorProto) { msgName := fqn(prefix, msg.GetName()) s.symbols[msgName] = fd for _, nested := range msg.NestedType { s.processMessage(fd, msgName, nested) } for _, en := range msg.EnumType { s.processEnum(fd, msgName, en) } for _, ext := range msg.Extension { s.processField(fd, msgName, ext) } for _, fld := range msg.Field { s.processField(fd, msgName, fld) } for _, oneof := range msg.OneofDecl { oneofName := fqn(msgName, oneof.GetName()) s.symbols[oneofName] = fd } } func (s *serverReflectionServer) processEnum(fd *dpb.FileDescriptorProto, prefix string, en *dpb.EnumDescriptorProto) { enName := fqn(prefix, en.GetName()) s.symbols[enName] = fd for _, val := range en.Value { valName := fqn(enName, val.GetName()) s.symbols[valName] = fd } } func (s *serverReflectionServer) processField(fd *dpb.FileDescriptorProto, prefix string, fld *dpb.FieldDescriptorProto) { fldName := fqn(prefix, fld.GetName()) s.symbols[fldName] = fd } func fqn(prefix, name string) string { if prefix == "" { return name } return prefix + "." + name } // fileDescForType gets the file descriptor for the given type. // The given type should be a proto message. func (s *serverReflectionServer) fileDescForType(st reflect.Type) (*dpb.FileDescriptorProto, error) { m, ok := reflect.Zero(reflect.PtrTo(st)).Interface().(protoMessage) if !ok { return nil, fmt.Errorf("failed to create message from type: %v", st) } enc, _ := m.Descriptor() return decodeFileDesc(enc) } // decodeFileDesc does decompression and unmarshalling on the given // file descriptor byte slice. func decodeFileDesc(enc []byte) (*dpb.FileDescriptorProto, error) { raw, err := decompress(enc) if err != nil { return nil, fmt.Errorf("failed to decompress enc: %v", err) } fd := new(dpb.FileDescriptorProto) if err := proto.Unmarshal(raw, fd); err != nil { return nil, fmt.Errorf("bad descriptor: %v", err) } return fd, nil } // decompress does gzip decompression. func decompress(b []byte) ([]byte, error) { r, err := gzip.NewReader(bytes.NewReader(b)) if err != nil { return nil, fmt.Errorf("bad gzipped descriptor: %v", err) } out, err := ioutil.ReadAll(r) if err != nil { return nil, fmt.Errorf("bad gzipped descriptor: %v", err) } return out, nil } func typeForName(name string) (reflect.Type, error) { pt := proto.MessageType(name) if pt == nil { return nil, fmt.Errorf("unknown type: %q", name) } st := pt.Elem() return st, nil } func fileDescContainingExtension(st reflect.Type, ext int32) (*dpb.FileDescriptorProto, error) { m, ok := reflect.Zero(reflect.PtrTo(st)).Interface().(proto.Message) if !ok { return nil, fmt.Errorf("failed to create message from type: %v", st) } var extDesc *proto.ExtensionDesc for id, desc := range proto.RegisteredExtensions(m) { if id == ext { extDesc = desc break } } if extDesc == nil { return nil, fmt.Errorf("failed to find registered extension for extension number %v", ext) } return decodeFileDesc(proto.FileDescriptor(extDesc.Filename)) } func (s *serverReflectionServer) allExtensionNumbersForType(st reflect.Type) ([]int32, error) { m, ok := reflect.Zero(reflect.PtrTo(st)).Interface().(proto.Message) if !ok { return nil, fmt.Errorf("failed to create message from type: %v", st) } exts := proto.RegisteredExtensions(m) out := make([]int32, 0, len(exts)) for id := range exts { out = append(out, id) } return out, nil } // fileDescEncodingByFilename finds the file descriptor for given filename, // does marshalling on it and returns the marshalled result. func (s *serverReflectionServer) fileDescEncodingByFilename(name string) ([]byte, error) { enc := proto.FileDescriptor(name) if enc == nil { return nil, fmt.Errorf("unknown file: %v", name) } fd, err := decodeFileDesc(enc) if err != nil { return nil, err } return proto.Marshal(fd) } // parseMetadata finds the file descriptor bytes specified meta. // For SupportPackageIsVersion4, m is the name of the proto file, we // call proto.FileDescriptor to get the byte slice. // For SupportPackageIsVersion3, m is a byte slice itself. func parseMetadata(meta interface{}) ([]byte, bool) { // Check if meta is the file name. if fileNameForMeta, ok := meta.(string); ok { return proto.FileDescriptor(fileNameForMeta), true } // Check if meta is the byte slice. if enc, ok := meta.([]byte); ok { return enc, true } return nil, false } // fileDescEncodingContainingSymbol finds the file descriptor containing the given symbol, // does marshalling on it and returns the marshalled result. // The given symbol can be a type, a service or a method. func (s *serverReflectionServer) fileDescEncodingContainingSymbol(name string) ([]byte, error) { _, symbols := s.getSymbols() fd := symbols[name] if fd == nil { // Check if it's a type name that was not present in the // transitive dependencies of the registered services. if st, err := typeForName(name); err == nil { fd, err = s.fileDescForType(st) if err != nil { return nil, err } } } if fd == nil { return nil, fmt.Errorf("unknown symbol: %v", name) } return proto.Marshal(fd) } // fileDescEncodingContainingExtension finds the file descriptor containing given extension, // does marshalling on it and returns the marshalled result. func (s *serverReflectionServer) fileDescEncodingContainingExtension(typeName string, extNum int32) ([]byte, error) { st, err := typeForName(typeName) if err != nil { return nil, err } fd, err := fileDescContainingExtension(st, extNum) if err != nil { return nil, err } return proto.Marshal(fd) } // allExtensionNumbersForTypeName returns all extension numbers for the given type. func (s *serverReflectionServer) allExtensionNumbersForTypeName(name string) ([]int32, error) { st, err := typeForName(name) if err != nil { return nil, err } extNums, err := s.allExtensionNumbersForType(st) if err != nil { return nil, err } return extNums, nil } // ServerReflectionInfo is the reflection service handler. func (s *serverReflectionServer) ServerReflectionInfo(stream rpb.ServerReflection_ServerReflectionInfoServer) error { for { in, err := stream.Recv() if err == io.EOF { return nil } if err != nil { return err } out := &rpb.ServerReflectionResponse{ ValidHost: in.Host, OriginalRequest: in, } switch req := in.MessageRequest.(type) { case *rpb.ServerReflectionRequest_FileByFilename: b, err := s.fileDescEncodingByFilename(req.FileByFilename) if err != nil { out.MessageResponse = &rpb.ServerReflectionResponse_ErrorResponse{ ErrorResponse: &rpb.ErrorResponse{ ErrorCode: int32(codes.NotFound), ErrorMessage: err.Error(), }, } } else { out.MessageResponse = &rpb.ServerReflectionResponse_FileDescriptorResponse{ FileDescriptorResponse: &rpb.FileDescriptorResponse{FileDescriptorProto: [][]byte{b}}, } } case *rpb.ServerReflectionRequest_FileContainingSymbol: b, err := s.fileDescEncodingContainingSymbol(req.FileContainingSymbol) if err != nil { out.MessageResponse = &rpb.ServerReflectionResponse_ErrorResponse{ ErrorResponse: &rpb.ErrorResponse{ ErrorCode: int32(codes.NotFound), ErrorMessage: err.Error(), }, } } else { out.MessageResponse = &rpb.ServerReflectionResponse_FileDescriptorResponse{ FileDescriptorResponse: &rpb.FileDescriptorResponse{FileDescriptorProto: [][]byte{b}}, } } case *rpb.ServerReflectionRequest_FileContainingExtension: typeName := req.FileContainingExtension.ContainingType extNum := req.FileContainingExtension.ExtensionNumber b, err := s.fileDescEncodingContainingExtension(typeName, extNum) if err != nil { out.MessageResponse = &rpb.ServerReflectionResponse_ErrorResponse{ ErrorResponse: &rpb.ErrorResponse{ ErrorCode: int32(codes.NotFound), ErrorMessage: err.Error(), }, } } else { out.MessageResponse = &rpb.ServerReflectionResponse_FileDescriptorResponse{ FileDescriptorResponse: &rpb.FileDescriptorResponse{FileDescriptorProto: [][]byte{b}}, } } case *rpb.ServerReflectionRequest_AllExtensionNumbersOfType: extNums, err := s.allExtensionNumbersForTypeName(req.AllExtensionNumbersOfType) if err != nil { out.MessageResponse = &rpb.ServerReflectionResponse_ErrorResponse{ ErrorResponse: &rpb.ErrorResponse{ ErrorCode: int32(codes.NotFound), ErrorMessage: err.Error(), }, } } else { out.MessageResponse = &rpb.ServerReflectionResponse_AllExtensionNumbersResponse{ AllExtensionNumbersResponse: &rpb.ExtensionNumberResponse{ BaseTypeName: req.AllExtensionNumbersOfType, ExtensionNumber: extNums, }, } } case *rpb.ServerReflectionRequest_ListServices: svcNames, _ := s.getSymbols() serviceResponses := make([]*rpb.ServiceResponse, len(svcNames)) for i, n := range svcNames { serviceResponses[i] = &rpb.ServiceResponse{ Name: n, } } out.MessageResponse = &rpb.ServerReflectionResponse_ListServicesResponse{ ListServicesResponse: &rpb.ListServiceResponse{ Service: serviceResponses, }, } default: return status.Errorf(codes.InvalidArgument, "invalid MessageRequest: %v", in.MessageRequest) } if err := stream.Send(out); err != nil { return err } } } grpc-go-1.29.1/reflection/serverreflection_test.go000066400000000000000000000412531365033716300222570ustar00rootroot00000000000000/* * * Copyright 2016 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ //go:generate protoc -I grpc_testing --go_out=plugins=grpc:grpc_testing/ grpc_testing/proto2.proto grpc_testing/proto2_ext.proto grpc_testing/proto2_ext2.proto grpc_testing/test.proto // Note: grpc_testingv3/testv3.pb.go is not re-generated because it was // intentionally generated by an older version of protoc-gen-go. package reflection import ( "context" "fmt" "net" "reflect" "sort" "testing" "github.com/golang/protobuf/proto" dpb "github.com/golang/protobuf/protoc-gen-go/descriptor" "google.golang.org/grpc" "google.golang.org/grpc/internal/grpctest" rpb "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" pb "google.golang.org/grpc/reflection/grpc_testing" pbv3 "google.golang.org/grpc/reflection/grpc_testingv3" ) var ( s = &serverReflectionServer{} // fileDescriptor of each test proto file. fdTest *dpb.FileDescriptorProto fdTestv3 *dpb.FileDescriptorProto fdProto2 *dpb.FileDescriptorProto fdProto2Ext *dpb.FileDescriptorProto fdProto2Ext2 *dpb.FileDescriptorProto // fileDescriptor marshalled. fdTestByte []byte fdTestv3Byte []byte fdProto2Byte []byte fdProto2ExtByte []byte fdProto2Ext2Byte []byte ) type x struct { grpctest.Tester } func Test(t *testing.T) { grpctest.RunSubTests(t, x{}) } func loadFileDesc(filename string) (*dpb.FileDescriptorProto, []byte) { enc := proto.FileDescriptor(filename) if enc == nil { panic(fmt.Sprintf("failed to find fd for file: %v", filename)) } fd, err := decodeFileDesc(enc) if err != nil { panic(fmt.Sprintf("failed to decode enc: %v", err)) } b, err := proto.Marshal(fd) if err != nil { panic(fmt.Sprintf("failed to marshal fd: %v", err)) } return fd, b } func init() { fdTest, fdTestByte = loadFileDesc("test.proto") fdTestv3, fdTestv3Byte = loadFileDesc("testv3.proto") fdProto2, fdProto2Byte = loadFileDesc("proto2.proto") fdProto2Ext, fdProto2ExtByte = loadFileDesc("proto2_ext.proto") fdProto2Ext2, fdProto2Ext2Byte = loadFileDesc("proto2_ext2.proto") } func (x) TestFileDescForType(t *testing.T) { for _, test := range []struct { st reflect.Type wantFd *dpb.FileDescriptorProto }{ {reflect.TypeOf(pb.SearchResponse_Result{}), fdTest}, {reflect.TypeOf(pb.ToBeExtended{}), fdProto2}, } { fd, err := s.fileDescForType(test.st) if err != nil || !proto.Equal(fd, test.wantFd) { t.Errorf("fileDescForType(%q) = %q, %v, want %q, ", test.st, fd, err, test.wantFd) } } } func (x) TestTypeForName(t *testing.T) { for _, test := range []struct { name string want reflect.Type }{ {"grpc.testing.SearchResponse", reflect.TypeOf(pb.SearchResponse{})}, } { r, err := typeForName(test.name) if err != nil || r != test.want { t.Errorf("typeForName(%q) = %q, %v, want %q, ", test.name, r, err, test.want) } } } func (x) TestTypeForNameNotFound(t *testing.T) { for _, test := range []string{ "grpc.testing.not_exiting", } { _, err := typeForName(test) if err == nil { t.Errorf("typeForName(%q) = _, %v, want _, ", test, err) } } } func (x) TestFileDescContainingExtension(t *testing.T) { for _, test := range []struct { st reflect.Type extNum int32 want *dpb.FileDescriptorProto }{ {reflect.TypeOf(pb.ToBeExtended{}), 13, fdProto2Ext}, {reflect.TypeOf(pb.ToBeExtended{}), 17, fdProto2Ext}, {reflect.TypeOf(pb.ToBeExtended{}), 19, fdProto2Ext}, {reflect.TypeOf(pb.ToBeExtended{}), 23, fdProto2Ext2}, {reflect.TypeOf(pb.ToBeExtended{}), 29, fdProto2Ext2}, } { fd, err := fileDescContainingExtension(test.st, test.extNum) if err != nil || !proto.Equal(fd, test.want) { t.Errorf("fileDescContainingExtension(%q) = %q, %v, want %q, ", test.st, fd, err, test.want) } } } // intArray is used to sort []int32 type intArray []int32 func (s intArray) Len() int { return len(s) } func (s intArray) Swap(i, j int) { s[i], s[j] = s[j], s[i] } func (s intArray) Less(i, j int) bool { return s[i] < s[j] } func (x) TestAllExtensionNumbersForType(t *testing.T) { for _, test := range []struct { st reflect.Type want []int32 }{ {reflect.TypeOf(pb.ToBeExtended{}), []int32{13, 17, 19, 23, 29}}, } { r, err := s.allExtensionNumbersForType(test.st) sort.Sort(intArray(r)) if err != nil || !reflect.DeepEqual(r, test.want) { t.Errorf("allExtensionNumbersForType(%q) = %v, %v, want %v, ", test.st, r, err, test.want) } } } // Do end2end tests. type server struct { pb.UnimplementedSearchServiceServer } func (s *server) Search(ctx context.Context, in *pb.SearchRequest) (*pb.SearchResponse, error) { return &pb.SearchResponse{}, nil } func (s *server) StreamingSearch(stream pb.SearchService_StreamingSearchServer) error { return nil } type serverV3 struct{} func (s *serverV3) Search(ctx context.Context, in *pbv3.SearchRequestV3) (*pbv3.SearchResponseV3, error) { return &pbv3.SearchResponseV3{}, nil } func (s *serverV3) StreamingSearch(stream pbv3.SearchServiceV3_StreamingSearchServer) error { return nil } func (x) TestReflectionEnd2end(t *testing.T) { // Start server. lis, err := net.Listen("tcp", "localhost:0") if err != nil { t.Fatalf("failed to listen: %v", err) } s := grpc.NewServer() pb.RegisterSearchServiceServer(s, &server{}) pbv3.RegisterSearchServiceV3Server(s, &serverV3{}) // Register reflection service on s. Register(s) go s.Serve(lis) // Create client. conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) if err != nil { t.Fatalf("cannot connect to server: %v", err) } defer conn.Close() c := rpb.NewServerReflectionClient(conn) stream, err := c.ServerReflectionInfo(context.Background(), grpc.WaitForReady(true)) if err != nil { t.Fatalf("cannot get ServerReflectionInfo: %v", err) } testFileByFilename(t, stream) testFileByFilenameError(t, stream) testFileContainingSymbol(t, stream) testFileContainingSymbolError(t, stream) testFileContainingExtension(t, stream) testFileContainingExtensionError(t, stream) testAllExtensionNumbersOfType(t, stream) testAllExtensionNumbersOfTypeError(t, stream) testListServices(t, stream) s.Stop() } func testFileByFilename(t *testing.T, stream rpb.ServerReflection_ServerReflectionInfoClient) { for _, test := range []struct { filename string want []byte }{ {"test.proto", fdTestByte}, {"proto2.proto", fdProto2Byte}, {"proto2_ext.proto", fdProto2ExtByte}, } { if err := stream.Send(&rpb.ServerReflectionRequest{ MessageRequest: &rpb.ServerReflectionRequest_FileByFilename{ FileByFilename: test.filename, }, }); err != nil { t.Fatalf("failed to send request: %v", err) } r, err := stream.Recv() if err != nil { // io.EOF is not ok. t.Fatalf("failed to recv response: %v", err) } switch r.MessageResponse.(type) { case *rpb.ServerReflectionResponse_FileDescriptorResponse: if !reflect.DeepEqual(r.GetFileDescriptorResponse().FileDescriptorProto[0], test.want) { t.Errorf("FileByFilename(%v)\nreceived: %q,\nwant: %q", test.filename, r.GetFileDescriptorResponse().FileDescriptorProto[0], test.want) } default: t.Errorf("FileByFilename(%v) = %v, want type ", test.filename, r.MessageResponse) } } } func testFileByFilenameError(t *testing.T, stream rpb.ServerReflection_ServerReflectionInfoClient) { for _, test := range []string{ "test.poto", "proo2.proto", "proto2_et.proto", } { if err := stream.Send(&rpb.ServerReflectionRequest{ MessageRequest: &rpb.ServerReflectionRequest_FileByFilename{ FileByFilename: test, }, }); err != nil { t.Fatalf("failed to send request: %v", err) } r, err := stream.Recv() if err != nil { // io.EOF is not ok. t.Fatalf("failed to recv response: %v", err) } switch r.MessageResponse.(type) { case *rpb.ServerReflectionResponse_ErrorResponse: default: t.Errorf("FileByFilename(%v) = %v, want type ", test, r.MessageResponse) } } } func testFileContainingSymbol(t *testing.T, stream rpb.ServerReflection_ServerReflectionInfoClient) { for _, test := range []struct { symbol string want []byte }{ {"grpc.testing.SearchService", fdTestByte}, {"grpc.testing.SearchService.Search", fdTestByte}, {"grpc.testing.SearchService.StreamingSearch", fdTestByte}, {"grpc.testing.SearchResponse", fdTestByte}, {"grpc.testing.ToBeExtended", fdProto2Byte}, // Test support package v3. {"grpc.testingv3.SearchServiceV3", fdTestv3Byte}, {"grpc.testingv3.SearchServiceV3.Search", fdTestv3Byte}, {"grpc.testingv3.SearchServiceV3.StreamingSearch", fdTestv3Byte}, {"grpc.testingv3.SearchResponseV3", fdTestv3Byte}, // search for field, oneof, enum, and enum value symbols, too {"grpc.testingv3.SearchResponseV3.Result.snippets", fdTestv3Byte}, {"grpc.testingv3.SearchResponseV3.Result.Value.val", fdTestv3Byte}, {"grpc.testingv3.SearchResponseV3.Result.Value.str", fdTestv3Byte}, {"grpc.testingv3.SearchResponseV3.State", fdTestv3Byte}, {"grpc.testingv3.SearchResponseV3.State.FRESH", fdTestv3Byte}, } { if err := stream.Send(&rpb.ServerReflectionRequest{ MessageRequest: &rpb.ServerReflectionRequest_FileContainingSymbol{ FileContainingSymbol: test.symbol, }, }); err != nil { t.Fatalf("failed to send request: %v", err) } r, err := stream.Recv() if err != nil { // io.EOF is not ok. t.Fatalf("failed to recv response: %v", err) } switch r.MessageResponse.(type) { case *rpb.ServerReflectionResponse_FileDescriptorResponse: if !reflect.DeepEqual(r.GetFileDescriptorResponse().FileDescriptorProto[0], test.want) { t.Errorf("FileContainingSymbol(%v)\nreceived: %q,\nwant: %q", test.symbol, r.GetFileDescriptorResponse().FileDescriptorProto[0], test.want) } default: t.Errorf("FileContainingSymbol(%v) = %v, want type ", test.symbol, r.MessageResponse) } } } func testFileContainingSymbolError(t *testing.T, stream rpb.ServerReflection_ServerReflectionInfoClient) { for _, test := range []string{ "grpc.testing.SerchService", "grpc.testing.SearchService.SearchE", "grpc.tesing.SearchResponse", "gpc.testing.ToBeExtended", } { if err := stream.Send(&rpb.ServerReflectionRequest{ MessageRequest: &rpb.ServerReflectionRequest_FileContainingSymbol{ FileContainingSymbol: test, }, }); err != nil { t.Fatalf("failed to send request: %v", err) } r, err := stream.Recv() if err != nil { // io.EOF is not ok. t.Fatalf("failed to recv response: %v", err) } switch r.MessageResponse.(type) { case *rpb.ServerReflectionResponse_ErrorResponse: default: t.Errorf("FileContainingSymbol(%v) = %v, want type ", test, r.MessageResponse) } } } func testFileContainingExtension(t *testing.T, stream rpb.ServerReflection_ServerReflectionInfoClient) { for _, test := range []struct { typeName string extNum int32 want []byte }{ {"grpc.testing.ToBeExtended", 13, fdProto2ExtByte}, {"grpc.testing.ToBeExtended", 17, fdProto2ExtByte}, {"grpc.testing.ToBeExtended", 19, fdProto2ExtByte}, {"grpc.testing.ToBeExtended", 23, fdProto2Ext2Byte}, {"grpc.testing.ToBeExtended", 29, fdProto2Ext2Byte}, } { if err := stream.Send(&rpb.ServerReflectionRequest{ MessageRequest: &rpb.ServerReflectionRequest_FileContainingExtension{ FileContainingExtension: &rpb.ExtensionRequest{ ContainingType: test.typeName, ExtensionNumber: test.extNum, }, }, }); err != nil { t.Fatalf("failed to send request: %v", err) } r, err := stream.Recv() if err != nil { // io.EOF is not ok. t.Fatalf("failed to recv response: %v", err) } switch r.MessageResponse.(type) { case *rpb.ServerReflectionResponse_FileDescriptorResponse: if !reflect.DeepEqual(r.GetFileDescriptorResponse().FileDescriptorProto[0], test.want) { t.Errorf("FileContainingExtension(%v, %v)\nreceived: %q,\nwant: %q", test.typeName, test.extNum, r.GetFileDescriptorResponse().FileDescriptorProto[0], test.want) } default: t.Errorf("FileContainingExtension(%v, %v) = %v, want type ", test.typeName, test.extNum, r.MessageResponse) } } } func testFileContainingExtensionError(t *testing.T, stream rpb.ServerReflection_ServerReflectionInfoClient) { for _, test := range []struct { typeName string extNum int32 }{ {"grpc.testing.ToBExtended", 17}, {"grpc.testing.ToBeExtended", 15}, } { if err := stream.Send(&rpb.ServerReflectionRequest{ MessageRequest: &rpb.ServerReflectionRequest_FileContainingExtension{ FileContainingExtension: &rpb.ExtensionRequest{ ContainingType: test.typeName, ExtensionNumber: test.extNum, }, }, }); err != nil { t.Fatalf("failed to send request: %v", err) } r, err := stream.Recv() if err != nil { // io.EOF is not ok. t.Fatalf("failed to recv response: %v", err) } switch r.MessageResponse.(type) { case *rpb.ServerReflectionResponse_ErrorResponse: default: t.Errorf("FileContainingExtension(%v, %v) = %v, want type ", test.typeName, test.extNum, r.MessageResponse) } } } func testAllExtensionNumbersOfType(t *testing.T, stream rpb.ServerReflection_ServerReflectionInfoClient) { for _, test := range []struct { typeName string want []int32 }{ {"grpc.testing.ToBeExtended", []int32{13, 17, 19, 23, 29}}, } { if err := stream.Send(&rpb.ServerReflectionRequest{ MessageRequest: &rpb.ServerReflectionRequest_AllExtensionNumbersOfType{ AllExtensionNumbersOfType: test.typeName, }, }); err != nil { t.Fatalf("failed to send request: %v", err) } r, err := stream.Recv() if err != nil { // io.EOF is not ok. t.Fatalf("failed to recv response: %v", err) } switch r.MessageResponse.(type) { case *rpb.ServerReflectionResponse_AllExtensionNumbersResponse: extNum := r.GetAllExtensionNumbersResponse().ExtensionNumber sort.Sort(intArray(extNum)) if r.GetAllExtensionNumbersResponse().BaseTypeName != test.typeName || !reflect.DeepEqual(extNum, test.want) { t.Errorf("AllExtensionNumbersOfType(%v)\nreceived: %v,\nwant: {%q %v}", r.GetAllExtensionNumbersResponse(), test.typeName, test.typeName, test.want) } default: t.Errorf("AllExtensionNumbersOfType(%v) = %v, want type ", test.typeName, r.MessageResponse) } } } func testAllExtensionNumbersOfTypeError(t *testing.T, stream rpb.ServerReflection_ServerReflectionInfoClient) { for _, test := range []string{ "grpc.testing.ToBeExtendedE", } { if err := stream.Send(&rpb.ServerReflectionRequest{ MessageRequest: &rpb.ServerReflectionRequest_AllExtensionNumbersOfType{ AllExtensionNumbersOfType: test, }, }); err != nil { t.Fatalf("failed to send request: %v", err) } r, err := stream.Recv() if err != nil { // io.EOF is not ok. t.Fatalf("failed to recv response: %v", err) } switch r.MessageResponse.(type) { case *rpb.ServerReflectionResponse_ErrorResponse: default: t.Errorf("AllExtensionNumbersOfType(%v) = %v, want type ", test, r.MessageResponse) } } } func testListServices(t *testing.T, stream rpb.ServerReflection_ServerReflectionInfoClient) { if err := stream.Send(&rpb.ServerReflectionRequest{ MessageRequest: &rpb.ServerReflectionRequest_ListServices{}, }); err != nil { t.Fatalf("failed to send request: %v", err) } r, err := stream.Recv() if err != nil { // io.EOF is not ok. t.Fatalf("failed to recv response: %v", err) } switch r.MessageResponse.(type) { case *rpb.ServerReflectionResponse_ListServicesResponse: services := r.GetListServicesResponse().Service want := []string{ "grpc.testingv3.SearchServiceV3", "grpc.testing.SearchService", "grpc.reflection.v1alpha.ServerReflection", } // Compare service names in response with want. if len(services) != len(want) { t.Errorf("= %v, want service names: %v", services, want) } m := make(map[string]int) for _, e := range services { m[e.Name]++ } for _, e := range want { if m[e] > 0 { m[e]-- continue } t.Errorf("ListService\nreceived: %v,\nwant: %q", services, want) } default: t.Errorf("ListServices = %v, want type ", r.MessageResponse) } } grpc-go-1.29.1/resolver/000077500000000000000000000000001365033716300150125ustar00rootroot00000000000000grpc-go-1.29.1/resolver/dns/000077500000000000000000000000001365033716300155765ustar00rootroot00000000000000grpc-go-1.29.1/resolver/dns/dns_resolver.go000066400000000000000000000021371365033716300206350ustar00rootroot00000000000000/* * * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Package dns implements a dns resolver to be installed as the default resolver // in grpc. // // Deprecated: this package is imported by grpc and should not need to be // imported directly by users. package dns import ( "google.golang.org/grpc/internal/resolver/dns" "google.golang.org/grpc/resolver" ) // NewBuilder creates a dnsBuilder which is used to factory DNS resolvers. // // Deprecated: import grpc and use resolver.Get("dns") instead. func NewBuilder() resolver.Builder { return dns.NewBuilder() } grpc-go-1.29.1/resolver/manual/000077500000000000000000000000001365033716300162675ustar00rootroot00000000000000grpc-go-1.29.1/resolver/manual/manual.go000066400000000000000000000054441365033716300201020ustar00rootroot00000000000000/* * * Copyright 2017 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Package manual defines a resolver that can be used to manually send resolved // addresses to ClientConn. package manual import ( "strconv" "time" "google.golang.org/grpc/resolver" ) // NewBuilderWithScheme creates a new test resolver builder with the given scheme. func NewBuilderWithScheme(scheme string) *Resolver { return &Resolver{ ResolveNowCallback: func(resolver.ResolveNowOptions) {}, scheme: scheme, } } // Resolver is also a resolver builder. // It's build() function always returns itself. type Resolver struct { // ResolveNowCallback is called when the ResolveNow method is called on the // resolver. Must not be nil. Must not be changed after the resolver may // be built. ResolveNowCallback func(resolver.ResolveNowOptions) scheme string // Fields actually belong to the resolver. CC resolver.ClientConn bootstrapState *resolver.State } // InitialState adds initial state to the resolver so that UpdateState doesn't // need to be explicitly called after Dial. func (r *Resolver) InitialState(s resolver.State) { r.bootstrapState = &s } // Build returns itself for Resolver, because it's both a builder and a resolver. func (r *Resolver) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { r.CC = cc if r.bootstrapState != nil { r.UpdateState(*r.bootstrapState) } return r, nil } // Scheme returns the test scheme. func (r *Resolver) Scheme() string { return r.scheme } // ResolveNow is a noop for Resolver. func (r *Resolver) ResolveNow(o resolver.ResolveNowOptions) { r.ResolveNowCallback(o) } // Close is a noop for Resolver. func (*Resolver) Close() {} // UpdateState calls CC.UpdateState. func (r *Resolver) UpdateState(s resolver.State) { r.CC.UpdateState(s) } // GenerateAndRegisterManualResolver generates a random scheme and a Resolver // with it. It also registers this Resolver. // It returns the Resolver and a cleanup function to unregister it. func GenerateAndRegisterManualResolver() (*Resolver, func()) { scheme := strconv.FormatInt(time.Now().UnixNano(), 36) r := NewBuilderWithScheme(scheme) resolver.Register(r) return r, func() { resolver.UnregisterForTesting(scheme) } } grpc-go-1.29.1/resolver/passthrough/000077500000000000000000000000001365033716300173615ustar00rootroot00000000000000grpc-go-1.29.1/resolver/passthrough/passthrough.go000066400000000000000000000017301365033716300222600ustar00rootroot00000000000000/* * * Copyright 2017 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Package passthrough implements a pass-through resolver. It sends the target // name without scheme back to gRPC as resolved address. // // Deprecated: this package is imported by grpc and should not need to be // imported directly by users. package passthrough import _ "google.golang.org/grpc/internal/resolver/passthrough" // import for side effects after package was moved grpc-go-1.29.1/resolver/resolver.go000066400000000000000000000230171365033716300172050ustar00rootroot00000000000000/* * * Copyright 2017 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Package resolver defines APIs for name resolution in gRPC. // All APIs in this package are experimental. package resolver import ( "context" "net" "google.golang.org/grpc/attributes" "google.golang.org/grpc/credentials" "google.golang.org/grpc/serviceconfig" ) var ( // m is a map from scheme to resolver builder. m = make(map[string]Builder) // defaultScheme is the default scheme to use. defaultScheme = "passthrough" ) // TODO(bar) install dns resolver in init(){}. // Register registers the resolver builder to the resolver map. b.Scheme will be // used as the scheme registered with this builder. // // NOTE: this function must only be called during initialization time (i.e. in // an init() function), and is not thread-safe. If multiple Resolvers are // registered with the same name, the one registered last will take effect. func Register(b Builder) { m[b.Scheme()] = b } // Get returns the resolver builder registered with the given scheme. // // If no builder is register with the scheme, nil will be returned. func Get(scheme string) Builder { if b, ok := m[scheme]; ok { return b } return nil } // SetDefaultScheme sets the default scheme that will be used. The default // default scheme is "passthrough". // // NOTE: this function must only be called during initialization time (i.e. in // an init() function), and is not thread-safe. The scheme set last overrides // previously set values. func SetDefaultScheme(scheme string) { defaultScheme = scheme } // GetDefaultScheme gets the default scheme that will be used. func GetDefaultScheme() string { return defaultScheme } // AddressType indicates the address type returned by name resolution. // // Deprecated: use Attributes in Address instead. type AddressType uint8 const ( // Backend indicates the address is for a backend server. // // Deprecated: use Attributes in Address instead. Backend AddressType = iota // GRPCLB indicates the address is for a grpclb load balancer. // // Deprecated: use Attributes in Address instead. GRPCLB ) // Address represents a server the client connects to. // This is the EXPERIMENTAL API and may be changed or extended in the future. type Address struct { // Addr is the server address on which a connection will be established. Addr string // ServerName is the name of this address. // If non-empty, the ServerName is used as the transport certification authority for // the address, instead of the hostname from the Dial target string. In most cases, // this should not be set. // // If Type is GRPCLB, ServerName should be the name of the remote load // balancer, not the name of the backend. // // WARNING: ServerName must only be populated with trusted values. It // is insecure to populate it with data from untrusted inputs since untrusted // values could be used to bypass the authority checks performed by TLS. ServerName string // Attributes contains arbitrary data about this address intended for // consumption by the load balancing policy. Attributes *attributes.Attributes // Type is the type of this address. // // Deprecated: use Attributes instead. Type AddressType // Metadata is the information associated with Addr, which may be used // to make load balancing decision. // // Deprecated: use Attributes instead. Metadata interface{} } // BuildOptions includes additional information for the builder to create // the resolver. type BuildOptions struct { // DisableServiceConfig indicates whether a resolver implementation should // fetch service config data. DisableServiceConfig bool // DialCreds is the transport credentials used by the ClientConn for // communicating with the target gRPC service (set via // WithTransportCredentials). In cases where a name resolution service // requires the same credentials, the resolver may use this field. In most // cases though, it is not appropriate, and this field may be ignored. DialCreds credentials.TransportCredentials // CredsBundle is the credentials bundle used by the ClientConn for // communicating with the target gRPC service (set via // WithCredentialsBundle). In cases where a name resolution service // requires the same credentials, the resolver may use this field. In most // cases though, it is not appropriate, and this field may be ignored. CredsBundle credentials.Bundle // Dialer is the custom dialer used by the ClientConn for dialling the // target gRPC service (set via WithDialer). In cases where a name // resolution service requires the same dialer, the resolver may use this // field. In most cases though, it is not appropriate, and this field may // be ignored. Dialer func(context.Context, string) (net.Conn, error) } // State contains the current Resolver state relevant to the ClientConn. type State struct { // Addresses is the latest set of resolved addresses for the target. Addresses []Address // ServiceConfig contains the result from parsing the latest service // config. If it is nil, it indicates no service config is present or the // resolver does not provide service configs. ServiceConfig *serviceconfig.ParseResult // Attributes contains arbitrary data about the resolver intended for // consumption by the load balancing policy. Attributes *attributes.Attributes } // ClientConn contains the callbacks for resolver to notify any updates // to the gRPC ClientConn. // // This interface is to be implemented by gRPC. Users should not need a // brand new implementation of this interface. For the situations like // testing, the new implementation should embed this interface. This allows // gRPC to add new methods to this interface. type ClientConn interface { // UpdateState updates the state of the ClientConn appropriately. UpdateState(State) // ReportError notifies the ClientConn that the Resolver encountered an // error. The ClientConn will notify the load balancer and begin calling // ResolveNow on the Resolver with exponential backoff. ReportError(error) // NewAddress is called by resolver to notify ClientConn a new list // of resolved addresses. // The address list should be the complete list of resolved addresses. // // Deprecated: Use UpdateState instead. NewAddress(addresses []Address) // NewServiceConfig is called by resolver to notify ClientConn a new // service config. The service config should be provided as a json string. // // Deprecated: Use UpdateState instead. NewServiceConfig(serviceConfig string) // ParseServiceConfig parses the provided service config and returns an // object that provides the parsed config. ParseServiceConfig(serviceConfigJSON string) *serviceconfig.ParseResult } // Target represents a target for gRPC, as specified in: // https://github.com/grpc/grpc/blob/master/doc/naming.md. // It is parsed from the target string that gets passed into Dial or DialContext by the user. And // grpc passes it to the resolver and the balancer. // // If the target follows the naming spec, and the parsed scheme is registered with grpc, we will // parse the target string according to the spec. e.g. "dns://some_authority/foo.bar" will be parsed // into &Target{Scheme: "dns", Authority: "some_authority", Endpoint: "foo.bar"} // // If the target does not contain a scheme, we will apply the default scheme, and set the Target to // be the full target string. e.g. "foo.bar" will be parsed into // &Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "foo.bar"}. // // If the parsed scheme is not registered (i.e. no corresponding resolver available to resolve the // endpoint), we set the Scheme to be the default scheme, and set the Endpoint to be the full target // string. e.g. target string "unknown_scheme://authority/endpoint" will be parsed into // &Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "unknown_scheme://authority/endpoint"}. type Target struct { Scheme string Authority string Endpoint string } // Builder creates a resolver that will be used to watch name resolution updates. type Builder interface { // Build creates a new resolver for the given target. // // gRPC dial calls Build synchronously, and fails if the returned error is // not nil. Build(target Target, cc ClientConn, opts BuildOptions) (Resolver, error) // Scheme returns the scheme supported by this resolver. // Scheme is defined at https://github.com/grpc/grpc/blob/master/doc/naming.md. Scheme() string } // ResolveNowOptions includes additional information for ResolveNow. type ResolveNowOptions struct{} // Resolver watches for the updates on the specified target. // Updates include address updates and service config updates. type Resolver interface { // ResolveNow will be called by gRPC to try to resolve the target name // again. It's just a hint, resolver can ignore this if it's not necessary. // // It could be called multiple times concurrently. ResolveNow(ResolveNowOptions) // Close closes the resolver. Close() } // UnregisterForTesting removes the resolver builder with the given scheme from the // resolver map. // This function is for testing only. func UnregisterForTesting(scheme string) { delete(m, scheme) } grpc-go-1.29.1/resolver_conn_wrapper.go000066400000000000000000000146351365033716300201270ustar00rootroot00000000000000/* * * Copyright 2017 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package grpc import ( "fmt" "strings" "sync" "time" "google.golang.org/grpc/balancer" "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" ) // ccResolverWrapper is a wrapper on top of cc for resolvers. // It implements resolver.ClientConn interface. type ccResolverWrapper struct { cc *ClientConn resolverMu sync.Mutex resolver resolver.Resolver done *grpcsync.Event curState resolver.State pollingMu sync.Mutex polling chan struct{} } // newCCResolverWrapper uses the resolver.Builder to build a Resolver and // returns a ccResolverWrapper object which wraps the newly built resolver. func newCCResolverWrapper(cc *ClientConn, rb resolver.Builder) (*ccResolverWrapper, error) { ccr := &ccResolverWrapper{ cc: cc, done: grpcsync.NewEvent(), } var credsClone credentials.TransportCredentials if creds := cc.dopts.copts.TransportCredentials; creds != nil { credsClone = creds.Clone() } rbo := resolver.BuildOptions{ DisableServiceConfig: cc.dopts.disableServiceConfig, DialCreds: credsClone, CredsBundle: cc.dopts.copts.CredsBundle, Dialer: cc.dopts.copts.Dialer, } var err error // We need to hold the lock here while we assign to the ccr.resolver field // to guard against a data race caused by the following code path, // rb.Build-->ccr.ReportError-->ccr.poll-->ccr.resolveNow, would end up // accessing ccr.resolver which is being assigned here. ccr.resolverMu.Lock() defer ccr.resolverMu.Unlock() ccr.resolver, err = rb.Build(cc.parsedTarget, ccr, rbo) if err != nil { return nil, err } return ccr, nil } func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOptions) { ccr.resolverMu.Lock() if !ccr.done.HasFired() { ccr.resolver.ResolveNow(o) } ccr.resolverMu.Unlock() } func (ccr *ccResolverWrapper) close() { ccr.resolverMu.Lock() ccr.resolver.Close() ccr.done.Fire() ccr.resolverMu.Unlock() } // poll begins or ends asynchronous polling of the resolver based on whether // err is ErrBadResolverState. func (ccr *ccResolverWrapper) poll(err error) { ccr.pollingMu.Lock() defer ccr.pollingMu.Unlock() if err != balancer.ErrBadResolverState { // stop polling if ccr.polling != nil { close(ccr.polling) ccr.polling = nil } return } if ccr.polling != nil { // already polling return } p := make(chan struct{}) ccr.polling = p go func() { for i := 0; ; i++ { ccr.resolveNow(resolver.ResolveNowOptions{}) t := time.NewTimer(ccr.cc.dopts.resolveNowBackoff(i)) select { case <-p: t.Stop() return case <-ccr.done.Done(): // Resolver has been closed. t.Stop() return case <-t.C: select { case <-p: return default: } // Timer expired; re-resolve. } } }() } func (ccr *ccResolverWrapper) UpdateState(s resolver.State) { if ccr.done.HasFired() { return } channelz.Infof(ccr.cc.channelzID, "ccResolverWrapper: sending update to cc: %v", s) if channelz.IsOn() { ccr.addChannelzTraceEvent(s) } ccr.curState = s ccr.poll(ccr.cc.updateResolverState(ccr.curState, nil)) } func (ccr *ccResolverWrapper) ReportError(err error) { if ccr.done.HasFired() { return } channelz.Warningf(ccr.cc.channelzID, "ccResolverWrapper: reporting error to cc: %v", err) ccr.poll(ccr.cc.updateResolverState(resolver.State{}, err)) } // NewAddress is called by the resolver implementation to send addresses to gRPC. func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) { if ccr.done.HasFired() { return } channelz.Infof(ccr.cc.channelzID, "ccResolverWrapper: sending new addresses to cc: %v", addrs) if channelz.IsOn() { ccr.addChannelzTraceEvent(resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig}) } ccr.curState.Addresses = addrs ccr.poll(ccr.cc.updateResolverState(ccr.curState, nil)) } // NewServiceConfig is called by the resolver implementation to send service // configs to gRPC. func (ccr *ccResolverWrapper) NewServiceConfig(sc string) { if ccr.done.HasFired() { return } channelz.Infof(ccr.cc.channelzID, "ccResolverWrapper: got new service config: %v", sc) if ccr.cc.dopts.disableServiceConfig { channelz.Info(ccr.cc.channelzID, "Service config lookups disabled; ignoring config") return } scpr := parseServiceConfig(sc) if scpr.Err != nil { channelz.Warningf(ccr.cc.channelzID, "ccResolverWrapper: error parsing service config: %v", scpr.Err) ccr.poll(balancer.ErrBadResolverState) return } if channelz.IsOn() { ccr.addChannelzTraceEvent(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: scpr}) } ccr.curState.ServiceConfig = scpr ccr.poll(ccr.cc.updateResolverState(ccr.curState, nil)) } func (ccr *ccResolverWrapper) ParseServiceConfig(scJSON string) *serviceconfig.ParseResult { return parseServiceConfig(scJSON) } func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) { var updates []string var oldSC, newSC *ServiceConfig var oldOK, newOK bool if ccr.curState.ServiceConfig != nil { oldSC, oldOK = ccr.curState.ServiceConfig.Config.(*ServiceConfig) } if s.ServiceConfig != nil { newSC, newOK = s.ServiceConfig.Config.(*ServiceConfig) } if oldOK != newOK || (oldOK && newOK && oldSC.rawJSONString != newSC.rawJSONString) { updates = append(updates, "service config updated") } if len(ccr.curState.Addresses) > 0 && len(s.Addresses) == 0 { updates = append(updates, "resolver returned an empty address list") } else if len(ccr.curState.Addresses) == 0 && len(s.Addresses) > 0 { updates = append(updates, "resolver returned new addresses") } channelz.AddTraceEvent(ccr.cc.channelzID, 0, &channelz.TraceEventDesc{ Desc: fmt.Sprintf("Resolver state updated: %+v (%v)", s, strings.Join(updates, "; ")), Severity: channelz.CtINFO, }) } grpc-go-1.29.1/resolver_conn_wrapper_test.go000066400000000000000000000156421365033716300211650ustar00rootroot00000000000000/* * * Copyright 2017 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package grpc import ( "context" "errors" "fmt" "net" "strings" "testing" "time" "google.golang.org/grpc/balancer" "google.golang.org/grpc/codes" "google.golang.org/grpc/resolver" "google.golang.org/grpc/resolver/manual" "google.golang.org/grpc/serviceconfig" "google.golang.org/grpc/status" ) // The target string with unknown scheme should be kept unchanged and passed to // the dialer. func (s) TestDialParseTargetUnknownScheme(t *testing.T) { for _, test := range []struct { targetStr string want string }{ {"/unix/socket/address", "/unix/socket/address"}, // Special test for "unix:///". {"unix:///unix/socket/address", "unix:///unix/socket/address"}, // For known scheme. {"passthrough://a.server.com/google.com", "google.com"}, } { dialStrCh := make(chan string, 1) cc, err := Dial(test.targetStr, WithInsecure(), WithDialer(func(addr string, _ time.Duration) (net.Conn, error) { select { case dialStrCh <- addr: default: } return nil, fmt.Errorf("test dialer, always error") })) if err != nil { t.Fatalf("Failed to create ClientConn: %v", err) } got := <-dialStrCh cc.Close() if got != test.want { t.Errorf("Dial(%q), dialer got %q, want %q", test.targetStr, got, test.want) } } } func testResolverErrorPolling(t *testing.T, badUpdate func(*manual.Resolver), goodUpdate func(*manual.Resolver), dopts ...DialOption) { boIter := make(chan int) resolverBackoff := func(v int) time.Duration { boIter <- v return 0 } r, rcleanup := manual.GenerateAndRegisterManualResolver() defer rcleanup() rn := make(chan struct{}) defer func() { close(rn) }() r.ResolveNowCallback = func(resolver.ResolveNowOptions) { rn <- struct{}{} } defaultDialOptions := []DialOption{ WithInsecure(), withResolveNowBackoff(resolverBackoff), } cc, err := Dial(r.Scheme()+":///test.server", append(defaultDialOptions, dopts...)...) if err != nil { t.Fatalf("Dial(_, _) = _, %v; want _, nil", err) } defer cc.Close() badUpdate(r) panicAfter := time.AfterFunc(5*time.Second, func() { panic("timed out polling resolver") }) defer panicAfter.Stop() // Ensure ResolveNow is called, then Backoff with the right parameter, several times for i := 0; i < 7; i++ { <-rn if v := <-boIter; v != i { t.Errorf("Backoff call %v uses value %v", i, v) } } // UpdateState will block if ResolveNow is being called (which blocks on // rn), so call it in a goroutine. goodUpdate(r) // Wait awhile to ensure ResolveNow and Backoff stop being called when the // state is OK (i.e. polling was cancelled). for { t := time.NewTimer(50 * time.Millisecond) select { case <-rn: // ClientConn is still calling ResolveNow <-boIter time.Sleep(5 * time.Millisecond) continue case <-t.C: // ClientConn stopped calling ResolveNow; success } break } } const happyBalancerName = "happy balancer" func init() { // Register a balancer that never returns an error from // UpdateClientConnState, and doesn't do anything else either. fb := &funcBalancer{ updateClientConnState: func(s balancer.ClientConnState) error { return nil }, } balancer.Register(&funcBalancerBuilder{name: happyBalancerName, instance: fb}) } // TestResolverErrorPolling injects resolver errors and verifies ResolveNow is // called with the appropriate backoff strategy being consulted between // ResolveNow calls. func (s) TestResolverErrorPolling(t *testing.T) { testResolverErrorPolling(t, func(r *manual.Resolver) { r.CC.ReportError(errors.New("res err")) }, func(r *manual.Resolver) { // UpdateState will block if ResolveNow is being called (which blocks on // rn), so call it in a goroutine. go r.CC.UpdateState(resolver.State{}) }, WithDefaultServiceConfig(fmt.Sprintf(`{ "loadBalancingConfig": [{"%v": {}}] }`, happyBalancerName))) } // TestServiceConfigErrorPolling injects a service config error and verifies // ResolveNow is called with the appropriate backoff strategy being consulted // between ResolveNow calls. func (s) TestServiceConfigErrorPolling(t *testing.T) { testResolverErrorPolling(t, func(r *manual.Resolver) { badsc := r.CC.ParseServiceConfig("bad config") r.UpdateState(resolver.State{ServiceConfig: badsc}) }, func(r *manual.Resolver) { // UpdateState will block if ResolveNow is being called (which blocks on // rn), so call it in a goroutine. go r.CC.UpdateState(resolver.State{}) }, WithDefaultServiceConfig(fmt.Sprintf(`{ "loadBalancingConfig": [{"%v": {}}] }`, happyBalancerName))) } // TestResolverErrorInBuild makes the resolver.Builder call into the ClientConn // during the Build call. We use two separate mutexes in the code which make // sure there is no data race in this code path, and also that there is no // deadlock. func (s) TestResolverErrorInBuild(t *testing.T) { r, rcleanup := manual.GenerateAndRegisterManualResolver() defer rcleanup() r.InitialState(resolver.State{ServiceConfig: &serviceconfig.ParseResult{Err: errors.New("resolver build err")}}) cc, err := Dial(r.Scheme()+":///test.server", WithInsecure()) if err != nil { t.Fatalf("Dial(_, _) = _, %v; want _, nil", err) } defer cc.Close() ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() var dummy int const wantMsg = "error parsing service config" const wantCode = codes.Unavailable if err := cc.Invoke(ctx, "/foo/bar", &dummy, &dummy); status.Code(err) != wantCode || !strings.Contains(status.Convert(err).Message(), wantMsg) { t.Fatalf("cc.Invoke(_, _, _, _) = %v; want status.Code()==%v, status.Message() contains %q", err, wantCode, wantMsg) } } func (s) TestServiceConfigErrorRPC(t *testing.T) { r, rcleanup := manual.GenerateAndRegisterManualResolver() defer rcleanup() cc, err := Dial(r.Scheme()+":///test.server", WithInsecure()) if err != nil { t.Fatalf("Dial(_, _) = _, %v; want _, nil", err) } defer cc.Close() badsc := r.CC.ParseServiceConfig("bad config") r.UpdateState(resolver.State{ServiceConfig: badsc}) ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() var dummy int const wantMsg = "error parsing service config" const wantCode = codes.Unavailable if err := cc.Invoke(ctx, "/foo/bar", &dummy, &dummy); status.Code(err) != wantCode || !strings.Contains(status.Convert(err).Message(), wantMsg) { t.Fatalf("cc.Invoke(_, _, _, _) = %v; want status.Code()==%v, status.Message() contains %q", err, wantCode, wantMsg) } } grpc-go-1.29.1/rpc_util.go000066400000000000000000000651561365033716300153360ustar00rootroot00000000000000/* * * Copyright 2014 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package grpc import ( "bytes" "compress/gzip" "context" "encoding/binary" "fmt" "io" "io/ioutil" "math" "net/url" "strings" "sync" "time" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" "google.golang.org/grpc/encoding" "google.golang.org/grpc/encoding/proto" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" "google.golang.org/grpc/stats" "google.golang.org/grpc/status" ) // Compressor defines the interface gRPC uses to compress a message. // // Deprecated: use package encoding. type Compressor interface { // Do compresses p into w. Do(w io.Writer, p []byte) error // Type returns the compression algorithm the Compressor uses. Type() string } type gzipCompressor struct { pool sync.Pool } // NewGZIPCompressor creates a Compressor based on GZIP. // // Deprecated: use package encoding/gzip. func NewGZIPCompressor() Compressor { c, _ := NewGZIPCompressorWithLevel(gzip.DefaultCompression) return c } // NewGZIPCompressorWithLevel is like NewGZIPCompressor but specifies the gzip compression level instead // of assuming DefaultCompression. // // The error returned will be nil if the level is valid. // // Deprecated: use package encoding/gzip. func NewGZIPCompressorWithLevel(level int) (Compressor, error) { if level < gzip.DefaultCompression || level > gzip.BestCompression { return nil, fmt.Errorf("grpc: invalid compression level: %d", level) } return &gzipCompressor{ pool: sync.Pool{ New: func() interface{} { w, err := gzip.NewWriterLevel(ioutil.Discard, level) if err != nil { panic(err) } return w }, }, }, nil } func (c *gzipCompressor) Do(w io.Writer, p []byte) error { z := c.pool.Get().(*gzip.Writer) defer c.pool.Put(z) z.Reset(w) if _, err := z.Write(p); err != nil { return err } return z.Close() } func (c *gzipCompressor) Type() string { return "gzip" } // Decompressor defines the interface gRPC uses to decompress a message. // // Deprecated: use package encoding. type Decompressor interface { // Do reads the data from r and uncompress them. Do(r io.Reader) ([]byte, error) // Type returns the compression algorithm the Decompressor uses. Type() string } type gzipDecompressor struct { pool sync.Pool } // NewGZIPDecompressor creates a Decompressor based on GZIP. // // Deprecated: use package encoding/gzip. func NewGZIPDecompressor() Decompressor { return &gzipDecompressor{} } func (d *gzipDecompressor) Do(r io.Reader) ([]byte, error) { var z *gzip.Reader switch maybeZ := d.pool.Get().(type) { case nil: newZ, err := gzip.NewReader(r) if err != nil { return nil, err } z = newZ case *gzip.Reader: z = maybeZ if err := z.Reset(r); err != nil { d.pool.Put(z) return nil, err } } defer func() { z.Close() d.pool.Put(z) }() return ioutil.ReadAll(z) } func (d *gzipDecompressor) Type() string { return "gzip" } // callInfo contains all related configuration and information about an RPC. type callInfo struct { compressorType string failFast bool stream ClientStream maxReceiveMessageSize *int maxSendMessageSize *int creds credentials.PerRPCCredentials contentSubtype string codec baseCodec maxRetryRPCBufferSize int } func defaultCallInfo() *callInfo { return &callInfo{ failFast: true, maxRetryRPCBufferSize: 256 * 1024, // 256KB } } // CallOption configures a Call before it starts or extracts information from // a Call after it completes. type CallOption interface { // before is called before the call is sent to any server. If before // returns a non-nil error, the RPC fails with that error. before(*callInfo) error // after is called after the call has completed. after cannot return an // error, so any failures should be reported via output parameters. after(*callInfo) } // EmptyCallOption does not alter the Call configuration. // It can be embedded in another structure to carry satellite data for use // by interceptors. type EmptyCallOption struct{} func (EmptyCallOption) before(*callInfo) error { return nil } func (EmptyCallOption) after(*callInfo) {} // Header returns a CallOptions that retrieves the header metadata // for a unary RPC. func Header(md *metadata.MD) CallOption { return HeaderCallOption{HeaderAddr: md} } // HeaderCallOption is a CallOption for collecting response header metadata. // The metadata field will be populated *after* the RPC completes. // This is an EXPERIMENTAL API. type HeaderCallOption struct { HeaderAddr *metadata.MD } func (o HeaderCallOption) before(c *callInfo) error { return nil } func (o HeaderCallOption) after(c *callInfo) { if c.stream != nil { *o.HeaderAddr, _ = c.stream.Header() } } // Trailer returns a CallOptions that retrieves the trailer metadata // for a unary RPC. func Trailer(md *metadata.MD) CallOption { return TrailerCallOption{TrailerAddr: md} } // TrailerCallOption is a CallOption for collecting response trailer metadata. // The metadata field will be populated *after* the RPC completes. // This is an EXPERIMENTAL API. type TrailerCallOption struct { TrailerAddr *metadata.MD } func (o TrailerCallOption) before(c *callInfo) error { return nil } func (o TrailerCallOption) after(c *callInfo) { if c.stream != nil { *o.TrailerAddr = c.stream.Trailer() } } // Peer returns a CallOption that retrieves peer information for a unary RPC. // The peer field will be populated *after* the RPC completes. func Peer(p *peer.Peer) CallOption { return PeerCallOption{PeerAddr: p} } // PeerCallOption is a CallOption for collecting the identity of the remote // peer. The peer field will be populated *after* the RPC completes. // This is an EXPERIMENTAL API. type PeerCallOption struct { PeerAddr *peer.Peer } func (o PeerCallOption) before(c *callInfo) error { return nil } func (o PeerCallOption) after(c *callInfo) { if c.stream != nil { if x, ok := peer.FromContext(c.stream.Context()); ok { *o.PeerAddr = *x } } } // WaitForReady configures the action to take when an RPC is attempted on broken // connections or unreachable servers. If waitForReady is false, the RPC will fail // immediately. Otherwise, the RPC client will block the call until a // connection is available (or the call is canceled or times out) and will // retry the call if it fails due to a transient error. gRPC will not retry if // data was written to the wire unless the server indicates it did not process // the data. Please refer to // https://github.com/grpc/grpc/blob/master/doc/wait-for-ready.md. // // By default, RPCs don't "wait for ready". func WaitForReady(waitForReady bool) CallOption { return FailFastCallOption{FailFast: !waitForReady} } // FailFast is the opposite of WaitForReady. // // Deprecated: use WaitForReady. func FailFast(failFast bool) CallOption { return FailFastCallOption{FailFast: failFast} } // FailFastCallOption is a CallOption for indicating whether an RPC should fail // fast or not. // This is an EXPERIMENTAL API. type FailFastCallOption struct { FailFast bool } func (o FailFastCallOption) before(c *callInfo) error { c.failFast = o.FailFast return nil } func (o FailFastCallOption) after(c *callInfo) {} // MaxCallRecvMsgSize returns a CallOption which sets the maximum message size // in bytes the client can receive. func MaxCallRecvMsgSize(bytes int) CallOption { return MaxRecvMsgSizeCallOption{MaxRecvMsgSize: bytes} } // MaxRecvMsgSizeCallOption is a CallOption that indicates the maximum message // size in bytes the client can receive. // This is an EXPERIMENTAL API. type MaxRecvMsgSizeCallOption struct { MaxRecvMsgSize int } func (o MaxRecvMsgSizeCallOption) before(c *callInfo) error { c.maxReceiveMessageSize = &o.MaxRecvMsgSize return nil } func (o MaxRecvMsgSizeCallOption) after(c *callInfo) {} // MaxCallSendMsgSize returns a CallOption which sets the maximum message size // in bytes the client can send. func MaxCallSendMsgSize(bytes int) CallOption { return MaxSendMsgSizeCallOption{MaxSendMsgSize: bytes} } // MaxSendMsgSizeCallOption is a CallOption that indicates the maximum message // size in bytes the client can send. // This is an EXPERIMENTAL API. type MaxSendMsgSizeCallOption struct { MaxSendMsgSize int } func (o MaxSendMsgSizeCallOption) before(c *callInfo) error { c.maxSendMessageSize = &o.MaxSendMsgSize return nil } func (o MaxSendMsgSizeCallOption) after(c *callInfo) {} // PerRPCCredentials returns a CallOption that sets credentials.PerRPCCredentials // for a call. func PerRPCCredentials(creds credentials.PerRPCCredentials) CallOption { return PerRPCCredsCallOption{Creds: creds} } // PerRPCCredsCallOption is a CallOption that indicates the per-RPC // credentials to use for the call. // This is an EXPERIMENTAL API. type PerRPCCredsCallOption struct { Creds credentials.PerRPCCredentials } func (o PerRPCCredsCallOption) before(c *callInfo) error { c.creds = o.Creds return nil } func (o PerRPCCredsCallOption) after(c *callInfo) {} // UseCompressor returns a CallOption which sets the compressor used when // sending the request. If WithCompressor is also set, UseCompressor has // higher priority. // // This API is EXPERIMENTAL. func UseCompressor(name string) CallOption { return CompressorCallOption{CompressorType: name} } // CompressorCallOption is a CallOption that indicates the compressor to use. // This is an EXPERIMENTAL API. type CompressorCallOption struct { CompressorType string } func (o CompressorCallOption) before(c *callInfo) error { c.compressorType = o.CompressorType return nil } func (o CompressorCallOption) after(c *callInfo) {} // CallContentSubtype returns a CallOption that will set the content-subtype // for a call. For example, if content-subtype is "json", the Content-Type over // the wire will be "application/grpc+json". The content-subtype is converted // to lowercase before being included in Content-Type. See Content-Type on // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for // more details. // // If ForceCodec is not also used, the content-subtype will be used to look up // the Codec to use in the registry controlled by RegisterCodec. See the // documentation on RegisterCodec for details on registration. The lookup of // content-subtype is case-insensitive. If no such Codec is found, the call // will result in an error with code codes.Internal. // // If ForceCodec is also used, that Codec will be used for all request and // response messages, with the content-subtype set to the given contentSubtype // here for requests. func CallContentSubtype(contentSubtype string) CallOption { return ContentSubtypeCallOption{ContentSubtype: strings.ToLower(contentSubtype)} } // ContentSubtypeCallOption is a CallOption that indicates the content-subtype // used for marshaling messages. // This is an EXPERIMENTAL API. type ContentSubtypeCallOption struct { ContentSubtype string } func (o ContentSubtypeCallOption) before(c *callInfo) error { c.contentSubtype = o.ContentSubtype return nil } func (o ContentSubtypeCallOption) after(c *callInfo) {} // ForceCodec returns a CallOption that will set the given Codec to be // used for all request and response messages for a call. The result of calling // String() will be used as the content-subtype in a case-insensitive manner. // // See Content-Type on // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for // more details. Also see the documentation on RegisterCodec and // CallContentSubtype for more details on the interaction between Codec and // content-subtype. // // This function is provided for advanced users; prefer to use only // CallContentSubtype to select a registered codec instead. // // This is an EXPERIMENTAL API. func ForceCodec(codec encoding.Codec) CallOption { return ForceCodecCallOption{Codec: codec} } // ForceCodecCallOption is a CallOption that indicates the codec used for // marshaling messages. // // This is an EXPERIMENTAL API. type ForceCodecCallOption struct { Codec encoding.Codec } func (o ForceCodecCallOption) before(c *callInfo) error { c.codec = o.Codec return nil } func (o ForceCodecCallOption) after(c *callInfo) {} // CallCustomCodec behaves like ForceCodec, but accepts a grpc.Codec instead of // an encoding.Codec. // // Deprecated: use ForceCodec instead. func CallCustomCodec(codec Codec) CallOption { return CustomCodecCallOption{Codec: codec} } // CustomCodecCallOption is a CallOption that indicates the codec used for // marshaling messages. // // This is an EXPERIMENTAL API. type CustomCodecCallOption struct { Codec Codec } func (o CustomCodecCallOption) before(c *callInfo) error { c.codec = o.Codec return nil } func (o CustomCodecCallOption) after(c *callInfo) {} // MaxRetryRPCBufferSize returns a CallOption that limits the amount of memory // used for buffering this RPC's requests for retry purposes. // // This API is EXPERIMENTAL. func MaxRetryRPCBufferSize(bytes int) CallOption { return MaxRetryRPCBufferSizeCallOption{bytes} } // MaxRetryRPCBufferSizeCallOption is a CallOption indicating the amount of // memory to be used for caching this RPC for retry purposes. // This is an EXPERIMENTAL API. type MaxRetryRPCBufferSizeCallOption struct { MaxRetryRPCBufferSize int } func (o MaxRetryRPCBufferSizeCallOption) before(c *callInfo) error { c.maxRetryRPCBufferSize = o.MaxRetryRPCBufferSize return nil } func (o MaxRetryRPCBufferSizeCallOption) after(c *callInfo) {} // The format of the payload: compressed or not? type payloadFormat uint8 const ( compressionNone payloadFormat = 0 // no compression compressionMade payloadFormat = 1 // compressed ) // parser reads complete gRPC messages from the underlying reader. type parser struct { // r is the underlying reader. // See the comment on recvMsg for the permissible // error types. r io.Reader // The header of a gRPC message. Find more detail at // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md header [5]byte } // recvMsg reads a complete gRPC message from the stream. // // It returns the message and its payload (compression/encoding) // format. The caller owns the returned msg memory. // // If there is an error, possible values are: // * io.EOF, when no messages remain // * io.ErrUnexpectedEOF // * of type transport.ConnectionError // * an error from the status package // No other error values or types must be returned, which also means // that the underlying io.Reader must not return an incompatible // error. func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byte, err error) { if _, err := p.r.Read(p.header[:]); err != nil { return 0, nil, err } pf = payloadFormat(p.header[0]) length := binary.BigEndian.Uint32(p.header[1:]) if length == 0 { return pf, nil, nil } if int64(length) > int64(maxInt) { return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max length allowed on current machine (%d vs. %d)", length, maxInt) } if int(length) > maxReceiveMessageSize { return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", length, maxReceiveMessageSize) } // TODO(bradfitz,zhaoq): garbage. reuse buffer after proto decoding instead // of making it for each message: msg = make([]byte, int(length)) if _, err := p.r.Read(msg); err != nil { if err == io.EOF { err = io.ErrUnexpectedEOF } return 0, nil, err } return pf, msg, nil } // encode serializes msg and returns a buffer containing the message, or an // error if it is too large to be transmitted by grpc. If msg is nil, it // generates an empty message. func encode(c baseCodec, msg interface{}) ([]byte, error) { if msg == nil { // NOTE: typed nils will not be caught by this check return nil, nil } b, err := c.Marshal(msg) if err != nil { return nil, status.Errorf(codes.Internal, "grpc: error while marshaling: %v", err.Error()) } if uint(len(b)) > math.MaxUint32 { return nil, status.Errorf(codes.ResourceExhausted, "grpc: message too large (%d bytes)", len(b)) } return b, nil } // compress returns the input bytes compressed by compressor or cp. If both // compressors are nil, returns nil. // // TODO(dfawley): eliminate cp parameter by wrapping Compressor in an encoding.Compressor. func compress(in []byte, cp Compressor, compressor encoding.Compressor) ([]byte, error) { if compressor == nil && cp == nil { return nil, nil } wrapErr := func(err error) error { return status.Errorf(codes.Internal, "grpc: error while compressing: %v", err.Error()) } cbuf := &bytes.Buffer{} if compressor != nil { z, err := compressor.Compress(cbuf) if err != nil { return nil, wrapErr(err) } if _, err := z.Write(in); err != nil { return nil, wrapErr(err) } if err := z.Close(); err != nil { return nil, wrapErr(err) } } else { if err := cp.Do(cbuf, in); err != nil { return nil, wrapErr(err) } } return cbuf.Bytes(), nil } const ( payloadLen = 1 sizeLen = 4 headerLen = payloadLen + sizeLen ) // msgHeader returns a 5-byte header for the message being transmitted and the // payload, which is compData if non-nil or data otherwise. func msgHeader(data, compData []byte) (hdr []byte, payload []byte) { hdr = make([]byte, headerLen) if compData != nil { hdr[0] = byte(compressionMade) data = compData } else { hdr[0] = byte(compressionNone) } // Write length of payload into buf binary.BigEndian.PutUint32(hdr[payloadLen:], uint32(len(data))) return hdr, data } func outPayload(client bool, msg interface{}, data, payload []byte, t time.Time) *stats.OutPayload { return &stats.OutPayload{ Client: client, Payload: msg, Data: data, Length: len(data), WireLength: len(payload) + headerLen, SentTime: t, } } func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool) *status.Status { switch pf { case compressionNone: case compressionMade: if recvCompress == "" || recvCompress == encoding.Identity { return status.New(codes.Internal, "grpc: compressed flag set with identity or empty encoding") } if !haveCompressor { return status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress) } default: return status.Newf(codes.Internal, "grpc: received unexpected payload format %d", pf) } return nil } type payloadInfo struct { wireLength int // The compressed length got from wire. uncompressedBytes []byte } func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) ([]byte, error) { pf, d, err := p.recvMsg(maxReceiveMessageSize) if err != nil { return nil, err } if payInfo != nil { payInfo.wireLength = len(d) } if st := checkRecvPayload(pf, s.RecvCompress(), compressor != nil || dc != nil); st != nil { return nil, st.Err() } var size int if pf == compressionMade { // To match legacy behavior, if the decompressor is set by WithDecompressor or RPCDecompressor, // use this decompressor as the default. if dc != nil { d, err = dc.Do(bytes.NewReader(d)) size = len(d) } else { d, size, err = decompress(compressor, d, maxReceiveMessageSize) } if err != nil { return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err) } } else { size = len(d) } if size > maxReceiveMessageSize { // TODO: Revisit the error code. Currently keep it consistent with java // implementation. return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", size, maxReceiveMessageSize) } return d, nil } // Using compressor, decompress d, returning data and size. // Optionally, if data will be over maxReceiveMessageSize, just return the size. func decompress(compressor encoding.Compressor, d []byte, maxReceiveMessageSize int) ([]byte, int, error) { dcReader, err := compressor.Decompress(bytes.NewReader(d)) if err != nil { return nil, 0, err } if sizer, ok := compressor.(interface { DecompressedSize(compressedBytes []byte) int }); ok { if size := sizer.DecompressedSize(d); size >= 0 { if size > maxReceiveMessageSize { return nil, size, nil } // size is used as an estimate to size the buffer, but we // will read more data if available. // +MinRead so ReadFrom will not reallocate if size is correct. buf := bytes.NewBuffer(make([]byte, 0, size+bytes.MinRead)) bytesRead, err := buf.ReadFrom(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) return buf.Bytes(), int(bytesRead), err } } // Read from LimitReader with limit max+1. So if the underlying // reader is over limit, the result will be bigger than max. d, err = ioutil.ReadAll(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) return d, len(d), err } // For the two compressor parameters, both should not be set, but if they are, // dc takes precedence over compressor. // TODO(dfawley): wrap the old compressor/decompressor using the new API? func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m interface{}, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) error { d, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor) if err != nil { return err } if err := c.Unmarshal(d, m); err != nil { return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message %v", err) } if payInfo != nil { payInfo.uncompressedBytes = d } return nil } // Information about RPC type rpcInfo struct { failfast bool preloaderInfo *compressorInfo } // Information about Preloader // Responsible for storing codec, and compressors // If stream (s) has context s.Context which stores rpcInfo that has non nil // pointers to codec, and compressors, then we can use preparedMsg for Async message prep // and reuse marshalled bytes type compressorInfo struct { codec baseCodec cp Compressor comp encoding.Compressor } type rpcInfoContextKey struct{} func newContextWithRPCInfo(ctx context.Context, failfast bool, codec baseCodec, cp Compressor, comp encoding.Compressor) context.Context { return context.WithValue(ctx, rpcInfoContextKey{}, &rpcInfo{ failfast: failfast, preloaderInfo: &compressorInfo{ codec: codec, cp: cp, comp: comp, }, }) } func rpcInfoFromContext(ctx context.Context) (s *rpcInfo, ok bool) { s, ok = ctx.Value(rpcInfoContextKey{}).(*rpcInfo) return } // Code returns the error code for err if it was produced by the rpc system. // Otherwise, it returns codes.Unknown. // // Deprecated: use status.Code instead. func Code(err error) codes.Code { return status.Code(err) } // ErrorDesc returns the error description of err if it was produced by the rpc system. // Otherwise, it returns err.Error() or empty string when err is nil. // // Deprecated: use status.Convert and Message method instead. func ErrorDesc(err error) string { return status.Convert(err).Message() } // Errorf returns an error containing an error code and a description; // Errorf returns nil if c is OK. // // Deprecated: use status.Errorf instead. func Errorf(c codes.Code, format string, a ...interface{}) error { return status.Errorf(c, format, a...) } // toRPCErr converts an error into an error from the status package. func toRPCErr(err error) error { if err == nil || err == io.EOF { return err } if err == io.ErrUnexpectedEOF { return status.Error(codes.Internal, err.Error()) } if _, ok := status.FromError(err); ok { return err } switch e := err.(type) { case transport.ConnectionError: return status.Error(codes.Unavailable, e.Desc) default: switch err { case context.DeadlineExceeded: return status.Error(codes.DeadlineExceeded, err.Error()) case context.Canceled: return status.Error(codes.Canceled, err.Error()) } } return status.Error(codes.Unknown, err.Error()) } // setCallInfoCodec should only be called after CallOptions have been applied. func setCallInfoCodec(c *callInfo) error { if c.codec != nil { // codec was already set by a CallOption; use it. return nil } if c.contentSubtype == "" { // No codec specified in CallOptions; use proto by default. c.codec = encoding.GetCodec(proto.Name) return nil } // c.contentSubtype is already lowercased in CallContentSubtype c.codec = encoding.GetCodec(c.contentSubtype) if c.codec == nil { return status.Errorf(codes.Internal, "no codec registered for content-subtype %s", c.contentSubtype) } return nil } // parseDialTarget returns the network and address to pass to dialer func parseDialTarget(target string) (net string, addr string) { net = "tcp" m1 := strings.Index(target, ":") m2 := strings.Index(target, ":/") // handle unix:addr which will fail with url.Parse if m1 >= 0 && m2 < 0 { if n := target[0:m1]; n == "unix" { net = n addr = target[m1+1:] return net, addr } } if m2 >= 0 { t, err := url.Parse(target) if err != nil { return net, target } scheme := t.Scheme addr = t.Path if scheme == "unix" { net = scheme if addr == "" { addr = t.Host } return net, addr } } return net, target } // channelzData is used to store channelz related data for ClientConn, addrConn and Server. // These fields cannot be embedded in the original structs (e.g. ClientConn), since to do atomic // operation on int64 variable on 32-bit machine, user is responsible to enforce memory alignment. // Here, by grouping those int64 fields inside a struct, we are enforcing the alignment. type channelzData struct { callsStarted int64 callsFailed int64 callsSucceeded int64 // lastCallStartedTime stores the timestamp that last call starts. It is of int64 type instead of // time.Time since it's more costly to atomically update time.Time variable than int64 variable. lastCallStartedTime int64 } // The SupportPackageIsVersion variables are referenced from generated protocol // buffer files to ensure compatibility with the gRPC version used. The latest // support package version is 6. // // Older versions are kept for compatibility. They may be removed if // compatibility cannot be maintained. // // These constants should not be referenced from any other code. const ( SupportPackageIsVersion3 = true SupportPackageIsVersion4 = true SupportPackageIsVersion5 = true SupportPackageIsVersion6 = true ) const grpcUA = "grpc-go/" + Version grpc-go-1.29.1/rpc_util_test.go000066400000000000000000000203371365033716300163650ustar00rootroot00000000000000/* * * Copyright 2014 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package grpc import ( "bytes" "compress/gzip" "io" "math" "reflect" "testing" "github.com/golang/protobuf/proto" "google.golang.org/grpc/codes" "google.golang.org/grpc/encoding" protoenc "google.golang.org/grpc/encoding/proto" "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/status" perfpb "google.golang.org/grpc/test/codec_perf" ) type fullReader struct { reader io.Reader } func (f fullReader) Read(p []byte) (int, error) { return io.ReadFull(f.reader, p) } var _ CallOption = EmptyCallOption{} // ensure EmptyCallOption implements the interface func (s) TestSimpleParsing(t *testing.T) { bigMsg := bytes.Repeat([]byte{'x'}, 1<<24) for _, test := range []struct { // input p []byte // outputs err error b []byte pt payloadFormat }{ {nil, io.EOF, nil, compressionNone}, {[]byte{0, 0, 0, 0, 0}, nil, nil, compressionNone}, {[]byte{0, 0, 0, 0, 1, 'a'}, nil, []byte{'a'}, compressionNone}, {[]byte{1, 0}, io.ErrUnexpectedEOF, nil, compressionNone}, {[]byte{0, 0, 0, 0, 10, 'a'}, io.ErrUnexpectedEOF, nil, compressionNone}, // Check that messages with length >= 2^24 are parsed. {append([]byte{0, 1, 0, 0, 0}, bigMsg...), nil, bigMsg, compressionNone}, } { buf := fullReader{bytes.NewReader(test.p)} parser := &parser{r: buf} pt, b, err := parser.recvMsg(math.MaxInt32) if err != test.err || !bytes.Equal(b, test.b) || pt != test.pt { t.Fatalf("parser{%v}.recvMsg(_) = %v, %v, %v\nwant %v, %v, %v", test.p, pt, b, err, test.pt, test.b, test.err) } } } func (s) TestMultipleParsing(t *testing.T) { // Set a byte stream consists of 3 messages with their headers. p := []byte{0, 0, 0, 0, 1, 'a', 0, 0, 0, 0, 2, 'b', 'c', 0, 0, 0, 0, 1, 'd'} b := fullReader{bytes.NewReader(p)} parser := &parser{r: b} wantRecvs := []struct { pt payloadFormat data []byte }{ {compressionNone, []byte("a")}, {compressionNone, []byte("bc")}, {compressionNone, []byte("d")}, } for i, want := range wantRecvs { pt, data, err := parser.recvMsg(math.MaxInt32) if err != nil || pt != want.pt || !reflect.DeepEqual(data, want.data) { t.Fatalf("after %d calls, parser{%v}.recvMsg(_) = %v, %v, %v\nwant %v, %v, ", i, p, pt, data, err, want.pt, want.data) } } pt, data, err := parser.recvMsg(math.MaxInt32) if err != io.EOF { t.Fatalf("after %d recvMsgs calls, parser{%v}.recvMsg(_) = %v, %v, %v\nwant _, _, %v", len(wantRecvs), p, pt, data, err, io.EOF) } } func (s) TestEncode(t *testing.T) { for _, test := range []struct { // input msg proto.Message // outputs hdr []byte data []byte err error }{ {nil, []byte{0, 0, 0, 0, 0}, []byte{}, nil}, } { data, err := encode(encoding.GetCodec(protoenc.Name), test.msg) if err != test.err || !bytes.Equal(data, test.data) { t.Errorf("encode(_, %v) = %v, %v; want %v, %v", test.msg, data, err, test.data, test.err) continue } if hdr, _ := msgHeader(data, nil); !bytes.Equal(hdr, test.hdr) { t.Errorf("msgHeader(%v, false) = %v; want %v", data, hdr, test.hdr) } } } func (s) TestCompress(t *testing.T) { bestCompressor, err := NewGZIPCompressorWithLevel(gzip.BestCompression) if err != nil { t.Fatalf("Could not initialize gzip compressor with best compression.") } bestSpeedCompressor, err := NewGZIPCompressorWithLevel(gzip.BestSpeed) if err != nil { t.Fatalf("Could not initialize gzip compressor with best speed compression.") } defaultCompressor, err := NewGZIPCompressorWithLevel(gzip.BestSpeed) if err != nil { t.Fatalf("Could not initialize gzip compressor with default compression.") } level5, err := NewGZIPCompressorWithLevel(5) if err != nil { t.Fatalf("Could not initialize gzip compressor with level 5 compression.") } for _, test := range []struct { // input data []byte cp Compressor dc Decompressor // outputs err error }{ {make([]byte, 1024), NewGZIPCompressor(), NewGZIPDecompressor(), nil}, {make([]byte, 1024), bestCompressor, NewGZIPDecompressor(), nil}, {make([]byte, 1024), bestSpeedCompressor, NewGZIPDecompressor(), nil}, {make([]byte, 1024), defaultCompressor, NewGZIPDecompressor(), nil}, {make([]byte, 1024), level5, NewGZIPDecompressor(), nil}, } { b := new(bytes.Buffer) if err := test.cp.Do(b, test.data); err != test.err { t.Fatalf("Compressor.Do(_, %v) = %v, want %v", test.data, err, test.err) } if b.Len() >= len(test.data) { t.Fatalf("The compressor fails to compress data.") } if p, err := test.dc.Do(b); err != nil || !bytes.Equal(test.data, p) { t.Fatalf("Decompressor.Do(%v) = %v, %v, want %v, ", b, p, err, test.data) } } } func (s) TestToRPCErr(t *testing.T) { for _, test := range []struct { // input errIn error // outputs errOut error }{ {transport.ErrConnClosing, status.Error(codes.Unavailable, transport.ErrConnClosing.Desc)}, {io.ErrUnexpectedEOF, status.Error(codes.Internal, io.ErrUnexpectedEOF.Error())}, } { err := toRPCErr(test.errIn) if _, ok := status.FromError(err); !ok { t.Errorf("toRPCErr{%v} returned type %T, want %T", test.errIn, err, status.Error) } if !testutils.StatusErrEqual(err, test.errOut) { t.Errorf("toRPCErr{%v} = %v \nwant %v", test.errIn, err, test.errOut) } } } func (s) TestParseDialTarget(t *testing.T) { for _, test := range []struct { target, wantNet, wantAddr string }{ {"unix:etcd:0", "unix", "etcd:0"}, {"unix:///tmp/unix-3", "unix", "/tmp/unix-3"}, {"unix://domain", "unix", "domain"}, {"unix://etcd:0", "unix", "etcd:0"}, {"unix:///etcd:0", "unix", "/etcd:0"}, {"passthrough://unix://domain", "tcp", "passthrough://unix://domain"}, {"https://google.com:443", "tcp", "https://google.com:443"}, {"dns:///google.com", "tcp", "dns:///google.com"}, {"/unix/socket/address", "tcp", "/unix/socket/address"}, } { gotNet, gotAddr := parseDialTarget(test.target) if gotNet != test.wantNet || gotAddr != test.wantAddr { t.Errorf("parseDialTarget(%q) = %s, %s want %s, %s", test.target, gotNet, gotAddr, test.wantNet, test.wantAddr) } } } // bmEncode benchmarks encoding a Protocol Buffer message containing mSize // bytes. func bmEncode(b *testing.B, mSize int) { cdc := encoding.GetCodec(protoenc.Name) msg := &perfpb.Buffer{Body: make([]byte, mSize)} encodeData, _ := encode(cdc, msg) encodedSz := int64(len(encodeData)) b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { encode(cdc, msg) } b.SetBytes(encodedSz) } func BenchmarkEncode1B(b *testing.B) { bmEncode(b, 1) } func BenchmarkEncode1KiB(b *testing.B) { bmEncode(b, 1024) } func BenchmarkEncode8KiB(b *testing.B) { bmEncode(b, 8*1024) } func BenchmarkEncode64KiB(b *testing.B) { bmEncode(b, 64*1024) } func BenchmarkEncode512KiB(b *testing.B) { bmEncode(b, 512*1024) } func BenchmarkEncode1MiB(b *testing.B) { bmEncode(b, 1024*1024) } // bmCompressor benchmarks a compressor of a Protocol Buffer message containing // mSize bytes. func bmCompressor(b *testing.B, mSize int, cp Compressor) { payload := make([]byte, mSize) cBuf := bytes.NewBuffer(make([]byte, mSize)) b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { cp.Do(cBuf, payload) cBuf.Reset() } } func BenchmarkGZIPCompressor1B(b *testing.B) { bmCompressor(b, 1, NewGZIPCompressor()) } func BenchmarkGZIPCompressor1KiB(b *testing.B) { bmCompressor(b, 1024, NewGZIPCompressor()) } func BenchmarkGZIPCompressor8KiB(b *testing.B) { bmCompressor(b, 8*1024, NewGZIPCompressor()) } func BenchmarkGZIPCompressor64KiB(b *testing.B) { bmCompressor(b, 64*1024, NewGZIPCompressor()) } func BenchmarkGZIPCompressor512KiB(b *testing.B) { bmCompressor(b, 512*1024, NewGZIPCompressor()) } func BenchmarkGZIPCompressor1MiB(b *testing.B) { bmCompressor(b, 1024*1024, NewGZIPCompressor()) } grpc-go-1.29.1/security/000077500000000000000000000000001365033716300150205ustar00rootroot00000000000000grpc-go-1.29.1/security/advancedtls/000077500000000000000000000000001365033716300173105ustar00rootroot00000000000000grpc-go-1.29.1/security/advancedtls/advancedtls.go000066400000000000000000000357441365033716300221440ustar00rootroot00000000000000/* * * Copyright 2019 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Package advancedtls is a utility library containing functions to construct // credentials.TransportCredentials that can perform credential reloading and custom // server authorization. package advancedtls import ( "context" "crypto/tls" "crypto/x509" "fmt" "net" "syscall" "time" "google.golang.org/grpc/credentials" ) // VerificationFuncParams contains the parameters available to users when implementing CustomVerificationFunc. type VerificationFuncParams struct { ServerName string RawCerts [][]byte VerifiedChains [][]*x509.Certificate } // VerificationResults contains the information about results of CustomVerificationFunc. // VerificationResults is an empty struct for now. It may be extended in the future to include more information. type VerificationResults struct{} // CustomVerificationFunc is the function defined by users to perform custom server authorization. // CustomVerificationFunc returns nil if the authorization fails; otherwise returns an empty struct. type CustomVerificationFunc func(params *VerificationFuncParams) (*VerificationResults, error) // GetRootCAsParams contains the parameters available to users when implementing GetRootCAs. type GetRootCAsParams struct { RawConn net.Conn RawCerts [][]byte } // GetRootCAsResults contains the results of GetRootCAs. // If users want to reload the root trust certificate, it is required to return the proper TrustCerts in GetRootCAs. type GetRootCAsResults struct { TrustCerts *x509.CertPool } // RootCertificateOptions contains a field and a function for obtaining root trust certificates. // It is used by both ClientOptions and ServerOptions. Note that RootCertificateOptions is required // to be correctly set on client side; on server side, it is only required when mutual TLS is // enabled(RequireClientCert in ServerOptions is true). type RootCertificateOptions struct { // If field RootCACerts is set, field GetRootCAs will be ignored. RootCACerts will be used // every time when verifying the peer certificates, without performing root certificate reloading. RootCACerts *x509.CertPool // If GetRootCAs is set and RootCACerts is nil, GetRootCAs will be invoked every time // asked to check certificates sent from the server when a new connection is established. // This is known as root CA certificate reloading. GetRootCAs func(params *GetRootCAsParams) (*GetRootCAsResults, error) } // ClientOptions contains all the fields and functions needed to be filled by the client. // General rules for certificate setting on client side: // Certificates or GetClientCertificate indicates the certificates sent from the client to the // server to prove client's identities. The rules for setting these two fields are: // If requiring mutual authentication on server side: // Either Certificates or GetClientCertificate must be set; the other will be ignored // Otherwise: // Nothing needed(the two fields will be ignored) type ClientOptions struct { // If field Certificates is set, field GetClientCertificate will be ignored. The client will use // Certificates every time when asked for a certificate, without performing certificate reloading. Certificates []tls.Certificate // If GetClientCertificate is set and Certificates is nil, the client will invoke this // function every time asked to present certificates to the server when a new connection is // established. This is known as peer certificate reloading. GetClientCertificate func(*tls.CertificateRequestInfo) (*tls.Certificate, error) // VerifyPeer is a custom server authorization checking after certificate signature check. // If this is set, we will replace the hostname check with this customized authorization check. // If this is nil, we fall back to typical hostname check. VerifyPeer CustomVerificationFunc // ServerNameOverride is for testing only. If set to a non-empty string, // it will override the virtual host name of authority (e.g. :authority header field) in requests. ServerNameOverride string RootCertificateOptions } // ServerOptions contains all the fields and functions needed to be filled by the client. // General rules for certificate setting on server side: // Certificates or GetClientCertificate indicates the certificates sent from the server to // the client to prove server's identities. The rules for setting these two fields are: // Either Certificates or GetCertificate must be set; the other will be ignored type ServerOptions struct { // If field Certificates is set, field GetClientCertificate will be ignored. The server will use // Certificates every time when asked for a certificate, without performing certificate reloading. Certificates []tls.Certificate // If GetClientCertificate is set and Certificates is nil, the server will invoke this // function every time asked to present certificates to the client when a new connection is // established. This is known as peer certificate reloading. GetCertificate func(*tls.ClientHelloInfo) (*tls.Certificate, error) RootCertificateOptions // If the server want the client to send certificates. RequireClientCert bool } func (o *ClientOptions) config() (*tls.Config, error) { if o.RootCACerts == nil && o.GetRootCAs == nil && o.VerifyPeer == nil { return nil, fmt.Errorf( "client needs to provide root CA certs, or a custom verification function") } // We have to set InsecureSkipVerify to true to skip the default checks and use the // verification function we built from buildVerifyFunc. config := &tls.Config{ ServerName: o.ServerNameOverride, Certificates: o.Certificates, GetClientCertificate: o.GetClientCertificate, RootCAs: o.RootCACerts, InsecureSkipVerify: true, } return config, nil } func (o *ServerOptions) config() (*tls.Config, error) { if o.Certificates == nil && o.GetCertificate == nil { return nil, fmt.Errorf("either Certificates or GetCertificate must be specified") } if o.RequireClientCert && o.GetRootCAs == nil && o.RootCACerts == nil { return nil, fmt.Errorf("server needs to provide root CA certs if requiring client cert") } clientAuth := tls.NoClientCert if o.RequireClientCert { // We fall back to normal config settings if users don't need to reload root certificates. // If using RequireAndVerifyClientCert, the underlying stack would use the default // checking and ignore the verification function we built from buildVerifyFunc. // If using RequireAnyClientCert, the code would skip all the checks and use the // function from buildVerifyFunc. if o.RootCACerts != nil { clientAuth = tls.RequireAndVerifyClientCert } else { clientAuth = tls.RequireAnyClientCert } } config := &tls.Config{ ClientAuth: clientAuth, Certificates: o.Certificates, GetCertificate: o.GetCertificate, } if o.RootCACerts != nil { config.ClientCAs = o.RootCACerts } return config, nil } // advancedTLSCreds is the credentials required for authenticating a connection using TLS. type advancedTLSCreds struct { config *tls.Config verifyFunc CustomVerificationFunc getRootCAs func(params *GetRootCAsParams) (*GetRootCAsResults, error) isClient bool } func (c advancedTLSCreds) Info() credentials.ProtocolInfo { return credentials.ProtocolInfo{ SecurityProtocol: "tls", SecurityVersion: "1.2", ServerName: c.config.ServerName, } } func (c *advancedTLSCreds) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { // Use local cfg to avoid clobbering ServerName if using multiple endpoints. cfg := cloneTLSConfig(c.config) // We return the full authority name to users if ServerName is empty without // stripping the trailing port. if cfg.ServerName == "" { cfg.ServerName = authority } cfg.VerifyPeerCertificate = buildVerifyFunc(c, cfg.ServerName, rawConn) conn := tls.Client(rawConn, cfg) errChannel := make(chan error, 1) go func() { errChannel <- conn.Handshake() close(errChannel) }() select { case err := <-errChannel: if err != nil { conn.Close() return nil, nil, err } case <-ctx.Done(): conn.Close() return nil, nil, ctx.Err() } info := credentials.TLSInfo{ State: conn.ConnectionState(), CommonAuthInfo: credentials.CommonAuthInfo{ SecurityLevel: credentials.PrivacyAndIntegrity, }, } return WrapSyscallConn(rawConn, conn), info, nil } func (c *advancedTLSCreds) ServerHandshake(rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { cfg := cloneTLSConfig(c.config) // We build server side verification function only when root cert reloading is needed. if c.getRootCAs != nil { cfg.VerifyPeerCertificate = buildVerifyFunc(c, "", rawConn) } conn := tls.Server(rawConn, cfg) if err := conn.Handshake(); err != nil { conn.Close() return nil, nil, err } info := credentials.TLSInfo{ State: conn.ConnectionState(), CommonAuthInfo: credentials.CommonAuthInfo{ SecurityLevel: credentials.PrivacyAndIntegrity, }, } return WrapSyscallConn(rawConn, conn), info, nil } func (c *advancedTLSCreds) Clone() credentials.TransportCredentials { return &advancedTLSCreds{ config: cloneTLSConfig(c.config), verifyFunc: c.verifyFunc, getRootCAs: c.getRootCAs, isClient: c.isClient, } } func (c *advancedTLSCreds) OverrideServerName(serverNameOverride string) error { c.config.ServerName = serverNameOverride return nil } // The function buildVerifyFunc is used when users want root cert reloading, and possibly custom // server authorization check. // We have to build our own verification function here because current tls module: // 1. does not have a good support on root cert reloading // 2. will ignore basic certificate check when setting InsecureSkipVerify to true func buildVerifyFunc(c *advancedTLSCreds, serverName string, rawConn net.Conn) func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error { return func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error { // If users didn't specify either rootCAs or getRootCAs on client side, // as we see some use cases such as https://github.com/grpc/grpc/pull/20530, // instead of failing, we just don't validate the server cert and let // application decide via VerifyPeer if c.isClient && c.config.RootCAs == nil && c.getRootCAs == nil { if c.verifyFunc != nil { _, err := c.verifyFunc(&VerificationFuncParams{ ServerName: serverName, RawCerts: rawCerts, VerifiedChains: verifiedChains, }) return err } } var rootCAs *x509.CertPool if c.isClient { rootCAs = c.config.RootCAs } else { rootCAs = c.config.ClientCAs } // reload root CA certs if rootCAs == nil && c.getRootCAs != nil { results, err := c.getRootCAs(&GetRootCAsParams{ RawConn: rawConn, RawCerts: rawCerts, }) if err != nil { return err } rootCAs = results.TrustCerts } // verify peers' certificates against RootCAs and get verifiedChains certs := make([]*x509.Certificate, len(rawCerts)) for i, asn1Data := range rawCerts { cert, err := x509.ParseCertificate(asn1Data) if err != nil { return err } certs[i] = cert } opts := x509.VerifyOptions{ Roots: rootCAs, CurrentTime: time.Now(), Intermediates: x509.NewCertPool(), } if !c.isClient { opts.KeyUsages = []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth} } else { opts.KeyUsages = []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth} } for _, cert := range certs[1:] { opts.Intermediates.AddCert(cert) } // We use default hostname check if users don't specify verifyFunc function if c.isClient && c.verifyFunc == nil && serverName != "" { opts.DNSName = serverName } verifiedChains, err := certs[0].Verify(opts) if err != nil { return err } if c.isClient && c.verifyFunc != nil { if c.verifyFunc != nil { _, err := c.verifyFunc(&VerificationFuncParams{ ServerName: serverName, RawCerts: rawCerts, VerifiedChains: verifiedChains, }) return err } } return nil } } // NewClientCreds uses ClientOptions to construct a TransportCredentials based on TLS. func NewClientCreds(o *ClientOptions) (credentials.TransportCredentials, error) { conf, err := o.config() if err != nil { return nil, err } tc := &advancedTLSCreds{ config: conf, isClient: true, getRootCAs: o.GetRootCAs, verifyFunc: o.VerifyPeer, } tc.config.NextProtos = appendH2ToNextProtos(tc.config.NextProtos) return tc, nil } // NewServerCreds uses ServerOptions to construct a TransportCredentials based on TLS. func NewServerCreds(o *ServerOptions) (credentials.TransportCredentials, error) { conf, err := o.config() if err != nil { return nil, err } tc := &advancedTLSCreds{ config: conf, isClient: false, getRootCAs: o.GetRootCAs, } tc.config.NextProtos = appendH2ToNextProtos(tc.config.NextProtos) return tc, nil } // TODO(ZhenLian): The code below are duplicates with gRPC-Go under // credentials/internal. Consider refactoring in the future. const alpnProtoStrH2 = "h2" func appendH2ToNextProtos(ps []string) []string { for _, p := range ps { if p == alpnProtoStrH2 { return ps } } ret := make([]string, 0, len(ps)+1) ret = append(ret, ps...) return append(ret, alpnProtoStrH2) } // We give syscall.Conn a new name here since syscall.Conn and net.Conn used // below have the same names. type sysConn = syscall.Conn // syscallConn keeps reference of rawConn to support syscall.Conn for channelz. // SyscallConn() (the method in interface syscall.Conn) is explicitly // implemented on this type, // // Interface syscall.Conn is implemented by most net.Conn implementations (e.g. // TCPConn, UnixConn), but is not part of net.Conn interface. So wrapper conns // that embed net.Conn don't implement syscall.Conn. (Side note: tls.Conn // doesn't embed net.Conn, so even if syscall.Conn is part of net.Conn, it won't // help here). type syscallConn struct { net.Conn // sysConn is a type alias of syscall.Conn. It's necessary because the name // `Conn` collides with `net.Conn`. sysConn } // WrapSyscallConn tries to wrap rawConn and newConn into a net.Conn that // implements syscall.Conn. rawConn will be used to support syscall, and newConn // will be used for read/write. // // This function returns newConn if rawConn doesn't implement syscall.Conn. func WrapSyscallConn(rawConn, newConn net.Conn) net.Conn { sysConn, ok := rawConn.(syscall.Conn) if !ok { return newConn } return &syscallConn{ Conn: newConn, sysConn: sysConn, } } func cloneTLSConfig(cfg *tls.Config) *tls.Config { if cfg == nil { return &tls.Config{} } return cfg.Clone() } grpc-go-1.29.1/security/advancedtls/advancedtls_integration_test.go000066400000000000000000000352771365033716300256070ustar00rootroot00000000000000/* * * Copyright 2020 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package advancedtls import ( "context" "crypto/tls" "crypto/x509" "fmt" "net" "sync" "testing" "time" "google.golang.org/grpc" "google.golang.org/grpc/credentials" pb "google.golang.org/grpc/examples/helloworld/helloworld" "google.golang.org/grpc/security/advancedtls/testdata" ) var ( address = "localhost:50051" port = ":50051" ) // stageInfo contains a stage number indicating the current phase of each integration test, and a mutex. // Based on the stage number of current test, we will use different certificates and server authorization // functions to check if our tests behave as expected. type stageInfo struct { mutex sync.Mutex stage int } func (s *stageInfo) increase() { s.mutex.Lock() defer s.mutex.Unlock() s.stage = s.stage + 1 } func (s *stageInfo) read() int { s.mutex.Lock() defer s.mutex.Unlock() return s.stage } func (s *stageInfo) reset() { s.mutex.Lock() defer s.mutex.Unlock() s.stage = 0 } // certStore contains all the certificates used in the integration tests. type certStore struct { // clientPeer1 is the certificate sent by client to prove its identity. It is trusted by serverTrust1. clientPeer1 tls.Certificate // clientPeer2 is the certificate sent by client to prove its identity. It is trusted by serverTrust2. clientPeer2 tls.Certificate // serverPeer1 is the certificate sent by server to prove its identity. It is trusted by clientTrust1. serverPeer1 tls.Certificate // serverPeer2 is the certificate sent by server to prove its identity. It is trusted by clientTrust2. serverPeer2 tls.Certificate clientTrust1 *x509.CertPool clientTrust2 *x509.CertPool serverTrust1 *x509.CertPool serverTrust2 *x509.CertPool } // loadCerts function is used to load test certificates at the beginning of each integration test. func (cs *certStore) loadCerts() error { var err error cs.clientPeer1, err = tls.LoadX509KeyPair(testdata.Path("client_cert_1.pem"), testdata.Path("client_key_1.pem")) if err != nil { return err } cs.clientPeer2, err = tls.LoadX509KeyPair(testdata.Path("client_cert_2.pem"), testdata.Path("client_key_2.pem")) if err != nil { return err } cs.serverPeer1, err = tls.LoadX509KeyPair(testdata.Path("server_cert_1.pem"), testdata.Path("server_key_1.pem")) if err != nil { return err } cs.serverPeer2, err = tls.LoadX509KeyPair(testdata.Path("server_cert_2.pem"), testdata.Path("server_key_2.pem")) if err != nil { return err } cs.clientTrust1, err = readTrustCert(testdata.Path("client_trust_cert_1.pem")) if err != nil { return err } cs.clientTrust2, err = readTrustCert(testdata.Path("client_trust_cert_2.pem")) if err != nil { return err } cs.serverTrust1, err = readTrustCert(testdata.Path("server_trust_cert_1.pem")) if err != nil { return err } cs.serverTrust2, err = readTrustCert(testdata.Path("server_trust_cert_2.pem")) if err != nil { return err } return nil } // serverImpl is used to implement pb.GreeterServer. type serverImpl struct{} // SayHello is a simple implementation of pb.GreeterServer. func (s *serverImpl) SayHello(ctx context.Context, in *pb.HelloRequest) (*pb.HelloReply, error) { return &pb.HelloReply{Message: "Hello " + in.Name}, nil } func callAndVerify(msg string, client pb.GreeterClient, shouldFail bool) error { ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() _, err := client.SayHello(ctx, &pb.HelloRequest{Name: msg}) if want, got := shouldFail == true, err != nil; got != want { return fmt.Errorf("want and got mismatch, want shouldFail=%v, got fail=%v, rpc error: %v", want, got, err) } return nil } func callAndVerifyWithClientConn(connCtx context.Context, msg string, creds credentials.TransportCredentials, shouldFail bool) (*grpc.ClientConn, pb.GreeterClient, error) { var conn *grpc.ClientConn var err error // If we want the test to fail, we establish a non-blocking connection to avoid it hangs and killed by the context. if shouldFail { conn, err = grpc.DialContext(connCtx, address, grpc.WithTransportCredentials(creds)) if err != nil { return nil, nil, fmt.Errorf("client failed to connect to %s. Error: %v", address, err) } } else { conn, err = grpc.DialContext(connCtx, address, grpc.WithTransportCredentials(creds), grpc.WithBlock()) if err != nil { return nil, nil, fmt.Errorf("client failed to connect to %s. Error: %v", address, err) } } greetClient := pb.NewGreeterClient(conn) err = callAndVerify(msg, greetClient, shouldFail) if err != nil { return nil, nil, err } return conn, greetClient, nil } // The advanced TLS features are tested in different stages. // At stage 0, we establish a good connection between client and server. // At stage 1, we change one factor(it could be we change the server's certificate, or server authorization function, etc), // and test if the following connections would be dropped. // At stage 2, we re-establish the connection by changing the counterpart of the factor we modified in stage 1. // (could be change the client's trust certificate, or change server authorization function, etc) func TestEnd2End(t *testing.T) { cs := &certStore{} err := cs.loadCerts() if err != nil { t.Fatalf("failed to load certs: %v", err) } stage := &stageInfo{} for _, test := range []struct { desc string clientCert []tls.Certificate clientGetCert func(*tls.CertificateRequestInfo) (*tls.Certificate, error) clientRoot *x509.CertPool clientGetRoot func(params *GetRootCAsParams) (*GetRootCAsResults, error) clientVerifyFunc CustomVerificationFunc serverCert []tls.Certificate serverGetCert func(*tls.ClientHelloInfo) (*tls.Certificate, error) serverRoot *x509.CertPool serverGetRoot func(params *GetRootCAsParams) (*GetRootCAsResults, error) }{ // Test Scenarios: // At initialization(stage = 0), client will be initialized with cert clientPeer1 and clientTrust1, server with serverPeer1 and serverTrust1. // The mutual authentication works at the beginning, since clientPeer1 is trusted by serverTrust1, and serverPeer1 by clientTrust1. // At stage 1, client changes clientPeer1 to clientPeer2. Since clientPeer2 is not trusted by serverTrust1, following rpc calls are expected // to fail, while the previous rpc calls are still good because those are already authenticated. // At stage 2, the server changes serverTrust1 to serverTrust2, and we should see it again accepts the connection, since clientPeer2 is trusted // by serverTrust2. { desc: "TestClientPeerCertReloadServerTrustCertReload", clientCert: nil, clientGetCert: func(*tls.CertificateRequestInfo) (*tls.Certificate, error) { switch stage.read() { case 0: return &cs.clientPeer1, nil default: return &cs.clientPeer2, nil } }, clientGetRoot: nil, clientRoot: cs.clientTrust1, clientVerifyFunc: func(params *VerificationFuncParams) (*VerificationResults, error) { return &VerificationResults{}, nil }, serverCert: []tls.Certificate{cs.serverPeer1}, serverGetCert: nil, serverRoot: nil, serverGetRoot: func(params *GetRootCAsParams) (*GetRootCAsResults, error) { switch stage.read() { case 0, 1: return &GetRootCAsResults{TrustCerts: cs.serverTrust1}, nil default: return &GetRootCAsResults{TrustCerts: cs.serverTrust2}, nil } }, }, // Test Scenarios: // At initialization(stage = 0), client will be initialized with cert clientPeer1 and clientTrust1, server with serverPeer1 and serverTrust1. // The mutual authentication works at the beginning, since clientPeer1 is trusted by serverTrust1, and serverPeer1 by clientTrust1. // At stage 1, server changes serverPeer1 to serverPeer2. Since serverPeer2 is not trusted by clientTrust1, following rpc calls are expected // to fail, while the previous rpc calls are still good because those are already authenticated. // At stage 2, the client changes clientTrust1 to clientTrust2, and we should see it again accepts the connection, since serverPeer2 is trusted // by clientTrust2. { desc: "TestServerPeerCertReloadClientTrustCertReload", clientCert: []tls.Certificate{cs.clientPeer1}, clientGetCert: nil, clientGetRoot: func(params *GetRootCAsParams) (*GetRootCAsResults, error) { switch stage.read() { case 0, 1: return &GetRootCAsResults{TrustCerts: cs.clientTrust1}, nil default: return &GetRootCAsResults{TrustCerts: cs.clientTrust2}, nil } }, clientRoot: nil, clientVerifyFunc: func(params *VerificationFuncParams) (*VerificationResults, error) { return &VerificationResults{}, nil }, serverCert: nil, serverGetCert: func(*tls.ClientHelloInfo) (*tls.Certificate, error) { switch stage.read() { case 0: return &cs.serverPeer1, nil default: return &cs.serverPeer2, nil } }, serverRoot: cs.serverTrust1, serverGetRoot: nil, }, // Test Scenarios: // At initialization(stage = 0), client will be initialized with cert clientPeer1 and clientTrust1, server with serverPeer1 and serverTrust1. // The mutual authentication works at the beginning, since clientPeer1 trusted by serverTrust1, serverPeer1 by clientTrust1, and also the // custom server authorization check allows the CommonName on serverPeer1. // At stage 1, server changes serverPeer1 to serverPeer2, and client changes clientTrust1 to clientTrust2. Although serverPeer2 is trusted by // clientTrust2, our authorization check only accepts serverPeer1, and hence the following calls should fail. Previous connections should // not be affected. // At stage 2, the client changes authorization check to only accept serverPeer2. Now we should see the connection becomes normal again. { desc: "TestClientCustomServerAuthz", clientCert: []tls.Certificate{cs.clientPeer1}, clientGetCert: nil, clientGetRoot: func(params *GetRootCAsParams) (*GetRootCAsResults, error) { switch stage.read() { case 0: return &GetRootCAsResults{TrustCerts: cs.clientTrust1}, nil default: return &GetRootCAsResults{TrustCerts: cs.clientTrust2}, nil } }, clientRoot: nil, clientVerifyFunc: func(params *VerificationFuncParams) (*VerificationResults, error) { if len(params.RawCerts) == 0 { return nil, fmt.Errorf("no peer certs") } cert, err := x509.ParseCertificate(params.RawCerts[0]) if err != nil || cert == nil { return nil, fmt.Errorf("failed to parse certificate: " + err.Error()) } authzCheck := false switch stage.read() { case 0, 1: // foo.bar.com is the common name on serverPeer1 if cert.Subject.CommonName == "foo.bar.com" { authzCheck = true } default: // foo.bar.server2.com is the common name on serverPeer2 if cert.Subject.CommonName == "foo.bar.server2.com" { authzCheck = true } } if authzCheck { return &VerificationResults{}, nil } return nil, fmt.Errorf("custom authz check fails") }, serverCert: nil, serverGetCert: func(*tls.ClientHelloInfo) (*tls.Certificate, error) { switch stage.read() { case 0: return &cs.serverPeer1, nil default: return &cs.serverPeer2, nil } }, serverRoot: cs.serverTrust1, serverGetRoot: nil, }, } { test := test t.Run(test.desc, func(t *testing.T) { // Start a server using ServerOptions in another goroutine. serverOptions := &ServerOptions{ Certificates: test.serverCert, GetCertificate: test.serverGetCert, RootCertificateOptions: RootCertificateOptions{ RootCACerts: test.serverRoot, GetRootCAs: test.serverGetRoot, }, RequireClientCert: true, } serverTLSCreds, err := NewServerCreds(serverOptions) if err != nil { t.Fatalf("failed to create server creds: %v", err) } s := grpc.NewServer(grpc.Creds(serverTLSCreds)) defer s.Stop() go func(s *grpc.Server) { lis, err := net.Listen("tcp", port) // defer lis.Close() if err != nil { t.Fatalf("failed to listen: %v", err) } pb.RegisterGreeterServer(s, &serverImpl{}) if err := s.Serve(lis); err != nil { t.Fatalf("failed to serve: %v", err) } }(s) clientOptions := &ClientOptions{ Certificates: test.clientCert, GetClientCertificate: test.clientGetCert, VerifyPeer: test.clientVerifyFunc, RootCertificateOptions: RootCertificateOptions{ RootCACerts: test.clientRoot, GetRootCAs: test.clientGetRoot, }, } clientTLSCreds, err := NewClientCreds(clientOptions) if err != nil { t.Fatalf("clientTLSCreds failed to create") } // ------------------------Scenario 1----------------------------------------- // stage = 0, initial connection should succeed ctx1, cancel1 := context.WithTimeout(context.Background(), 5*time.Second) defer cancel1() conn, greetClient, err := callAndVerifyWithClientConn(ctx1, "rpc call 1", clientTLSCreds, false) defer conn.Close() if err != nil { t.Fatal(err) } // --------------------------------------------------------------------------- stage.increase() // ------------------------Scenario 2----------------------------------------- // stage = 1, previous connection should still succeed err = callAndVerify("rpc call 2", greetClient, false) if err != nil { t.Fatal(err) } // ------------------------Scenario 3----------------------------------------- // stage = 1, new connection should fail ctx2, cancel2 := context.WithTimeout(context.Background(), 5*time.Second) defer cancel2() conn2, greetClient, err := callAndVerifyWithClientConn(ctx2, "rpc call 3", clientTLSCreds, true) defer conn2.Close() if err != nil { t.Fatal(err) } //// --------------------------------------------------------------------------- stage.increase() // ------------------------Scenario 4----------------------------------------- // stage = 2, new connection should succeed ctx3, cancel3 := context.WithTimeout(context.Background(), 5*time.Second) defer cancel3() conn3, greetClient, err := callAndVerifyWithClientConn(ctx3, "rpc call 4", clientTLSCreds, false) defer conn3.Close() if err != nil { t.Fatal(err) } // --------------------------------------------------------------------------- stage.reset() }) } } grpc-go-1.29.1/security/advancedtls/advancedtls_test.go000066400000000000000000000437071365033716300232010ustar00rootroot00000000000000/* * * Copyright 2019 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package advancedtls import ( "context" "crypto/tls" "crypto/x509" "encoding/pem" "fmt" "io/ioutil" "net" "reflect" "syscall" "testing" "google.golang.org/grpc/credentials" "google.golang.org/grpc/security/advancedtls/testdata" ) func TestClientServerHandshake(t *testing.T) { // ------------------Load Client Trust Cert and Peer Cert------------------- clientTrustPool, err := readTrustCert(testdata.Path("client_trust_cert_1.pem")) if err != nil { t.Fatalf("Client is unable to load trust certs. Error: %v", err) } getRootCAsForClient := func(params *GetRootCAsParams) (*GetRootCAsResults, error) { return &GetRootCAsResults{TrustCerts: clientTrustPool}, nil } verifyFuncGood := func(params *VerificationFuncParams) (*VerificationResults, error) { return &VerificationResults{}, nil } verifyFuncBad := func(params *VerificationFuncParams) (*VerificationResults, error) { return nil, fmt.Errorf("custom verification function failed") } clientPeerCert, err := tls.LoadX509KeyPair(testdata.Path("client_cert_1.pem"), testdata.Path("client_key_1.pem")) if err != nil { t.Fatalf("Client is unable to parse peer certificates. Error: %v", err) } // ------------------Load Server Trust Cert and Peer Cert------------------- serverTrustPool, err := readTrustCert(testdata.Path("server_trust_cert_1.pem")) if err != nil { t.Fatalf("Server is unable to load trust certs. Error: %v", err) } getRootCAsForServer := func(params *GetRootCAsParams) (*GetRootCAsResults, error) { return &GetRootCAsResults{TrustCerts: serverTrustPool}, nil } serverPeerCert, err := tls.LoadX509KeyPair(testdata.Path("server_cert_1.pem"), testdata.Path("server_key_1.pem")) if err != nil { t.Fatalf("Server is unable to parse peer certificates. Error: %v", err) } getRootCAsForServerBad := func(params *GetRootCAsParams) (*GetRootCAsResults, error) { return nil, fmt.Errorf("bad root certificate reloading") } for _, test := range []struct { desc string clientCert []tls.Certificate clientGetClientCert func(*tls.CertificateRequestInfo) (*tls.Certificate, error) clientRoot *x509.CertPool clientGetRoot func(params *GetRootCAsParams) (*GetRootCAsResults, error) clientVerifyFunc CustomVerificationFunc clientExpectCreateError bool clientExpectHandshakeError bool serverMutualTLS bool serverCert []tls.Certificate serverGetCert func(*tls.ClientHelloInfo) (*tls.Certificate, error) serverRoot *x509.CertPool serverGetRoot func(params *GetRootCAsParams) (*GetRootCAsResults, error) serverExpectError bool }{ // Client: nil setting // Server: only set serverCert with mutual TLS off // Expected Behavior: server side failure // Reason: if either clientCert or clientGetClientCert is not set and // verifyFunc is not set, we will fail directly { "Client_no_trust_cert_Server_peer_cert", nil, nil, nil, nil, nil, true, false, false, []tls.Certificate{serverPeerCert}, nil, nil, nil, true, }, // Client: nil setting except verifyFuncGood // Server: only set serverCert with mutual TLS off // Expected Behavior: success // Reason: we will use verifyFuncGood to verify the server, // if either clientCert or clientGetClientCert is not set { "Client_no_trust_cert_verifyFuncGood_Server_peer_cert", nil, nil, nil, nil, verifyFuncGood, false, false, false, []tls.Certificate{serverPeerCert}, nil, nil, nil, false, }, // Client: only set clientRoot // Server: only set serverCert with mutual TLS off // Expected Behavior: server side failure and client handshake failure // Reason: not setting advanced TLS features will fall back to normal check, and will hence fail // on default host name check. All the default hostname checks will fail in this test suites. { "Client_root_cert_Server_peer_cert", nil, nil, clientTrustPool, nil, nil, false, true, false, []tls.Certificate{serverPeerCert}, nil, nil, nil, true, }, // Client: only set clientGetRoot // Server: only set serverCert with mutual TLS off // Expected Behavior: server side failure and client handshake failure // Reason: setting root reloading function without custom verifyFunc will also fail, // since it will also fall back to default host name check { "Client_reload_root_Server_peer_cert", nil, nil, nil, getRootCAsForClient, nil, false, true, false, []tls.Certificate{serverPeerCert}, nil, nil, nil, true, }, // Client: set clientGetRoot and clientVerifyFunc // Server: only set serverCert with mutual TLS off // Expected Behavior: success { "Client_reload_root_verifyFuncGood_Server_peer_cert", nil, nil, nil, getRootCAsForClient, verifyFuncGood, false, false, false, []tls.Certificate{serverPeerCert}, nil, nil, nil, false, }, // Client: set clientGetRoot and bad clientVerifyFunc function // Server: only set serverCert with mutual TLS off // Expected Behavior: server side failure and client handshake failure // Reason: custom verification function is bad { "Client_reload_root_verifyFuncBad_Server_peer_cert", nil, nil, nil, getRootCAsForClient, verifyFuncBad, false, true, false, []tls.Certificate{serverPeerCert}, nil, nil, nil, true, }, // Client: set clientGetRoot and clientVerifyFunc // Server: nil setting // Expected Behavior: server side failure // Reason: server side must either set serverCert or serverGetCert { "Client_reload_root_verifyFuncGood_Server_nil", nil, nil, nil, getRootCAsForClient, verifyFuncGood, false, false, false, nil, nil, nil, nil, true, }, // Client: set clientGetRoot and clientVerifyFunc // Server: only set serverCert with mutual TLS on // Expected Behavior: server side failure // Reason: server side must either set serverRoot or serverGetRoot when using mutual TLS { "Client_reload_root_verifyFuncGood_Server_peer_cert_no_root_cert_mutualTLS", nil, nil, nil, getRootCAsForClient, verifyFuncGood, false, false, true, []tls.Certificate{serverPeerCert}, nil, nil, nil, true, }, // Client: set clientGetRoot, clientVerifyFunc and clientCert // Server: set serverRoot and serverCert with mutual TLS on // Expected Behavior: success { "Client_peer_cert_reload_root_verifyFuncGood_Server_peer_cert_root_cert_mutualTLS", []tls.Certificate{clientPeerCert}, nil, nil, getRootCAsForClient, verifyFuncGood, false, false, true, []tls.Certificate{serverPeerCert}, nil, serverTrustPool, nil, false, }, // Client: set clientGetRoot, clientVerifyFunc and clientCert // Server: set serverGetRoot and serverCert with mutual TLS on // Expected Behavior: success { "Client_peer_cert_reload_root_verifyFuncGood_Server_peer_cert_reload_root_mutualTLS", []tls.Certificate{clientPeerCert}, nil, nil, getRootCAsForClient, verifyFuncGood, false, false, true, []tls.Certificate{serverPeerCert}, nil, nil, getRootCAsForServer, false, }, // Client: set clientGetRoot, clientVerifyFunc and clientCert // Server: set serverGetRoot returning error and serverCert with mutual TLS on // Expected Behavior: server side failure // Reason: server side reloading returns failure { "Client_peer_cert_reload_root_verifyFuncGood_Server_peer_cert_bad_reload_root_mutualTLS", []tls.Certificate{clientPeerCert}, nil, nil, getRootCAsForClient, verifyFuncGood, false, false, true, []tls.Certificate{serverPeerCert}, nil, nil, getRootCAsForServerBad, true, }, // Client: set clientGetRoot, clientVerifyFunc and clientGetClientCert // Server: set serverGetRoot and serverGetCert with mutual TLS on // Expected Behavior: success { "Client_reload_both_certs_verifyFuncGood_Server_reload_both_certs_mutualTLS", nil, func(info *tls.CertificateRequestInfo) (*tls.Certificate, error) { return &clientPeerCert, nil }, nil, getRootCAsForClient, verifyFuncGood, false, false, true, nil, func(info *tls.ClientHelloInfo) (*tls.Certificate, error) { return &serverPeerCert, nil }, nil, getRootCAsForServer, false, }, // Client: set everything but with the wrong peer cert not trusted by server // Server: set serverGetRoot and serverGetCert with mutual TLS on // Expected Behavior: server side returns failure because of // certificate mismatch { "Client_wrong_peer_cert_Server_reload_both_certs_mutualTLS", nil, func(info *tls.CertificateRequestInfo) (*tls.Certificate, error) { return &serverPeerCert, nil }, nil, getRootCAsForClient, verifyFuncGood, false, false, true, nil, func(info *tls.ClientHelloInfo) (*tls.Certificate, error) { return &serverPeerCert, nil }, nil, getRootCAsForServer, true, }, // Client: set everything but with the wrong trust cert not trusting server // Server: set serverGetRoot and serverGetCert with mutual TLS on // Expected Behavior: server side and client side return failure due to // certificate mismatch and handshake failure { "Client_wrong_trust_cert_Server_reload_both_certs_mutualTLS", nil, func(info *tls.CertificateRequestInfo) (*tls.Certificate, error) { return &clientPeerCert, nil }, nil, getRootCAsForServer, verifyFuncGood, false, true, true, nil, func(info *tls.ClientHelloInfo) (*tls.Certificate, error) { return &serverPeerCert, nil }, nil, getRootCAsForServer, true, }, // Client: set clientGetRoot, clientVerifyFunc and clientCert // Server: set everything but with the wrong peer cert not trusted by client // Expected Behavior: server side and client side return failure due to // certificate mismatch and handshake failure { "Client_reload_both_certs_verifyFuncGood_Server_wrong_peer_cert", nil, func(info *tls.CertificateRequestInfo) (*tls.Certificate, error) { return &clientPeerCert, nil }, nil, getRootCAsForClient, verifyFuncGood, false, false, true, nil, func(info *tls.ClientHelloInfo) (*tls.Certificate, error) { return &clientPeerCert, nil }, nil, getRootCAsForServer, true, }, // Client: set clientGetRoot, clientVerifyFunc and clientCert // Server: set everything but with the wrong trust cert not trusting client // Expected Behavior: server side and client side return failure due to // certificate mismatch and handshake failure { "Client_reload_both_certs_verifyFuncGood_Server_wrong_trust_cert", nil, func(info *tls.CertificateRequestInfo) (*tls.Certificate, error) { return &clientPeerCert, nil }, nil, getRootCAsForClient, verifyFuncGood, false, true, true, nil, func(info *tls.ClientHelloInfo) (*tls.Certificate, error) { return &serverPeerCert, nil }, nil, getRootCAsForClient, true, }, } { test := test t.Run(test.desc, func(t *testing.T) { done := make(chan credentials.AuthInfo, 1) lis, err := net.Listen("tcp", "localhost:0") if err != nil { t.Fatalf("Failed to listen: %v", err) } // Start a server using ServerOptions in another goroutine. serverOptions := &ServerOptions{ Certificates: test.serverCert, GetCertificate: test.serverGetCert, RootCertificateOptions: RootCertificateOptions{ RootCACerts: test.serverRoot, GetRootCAs: test.serverGetRoot, }, RequireClientCert: test.serverMutualTLS, } go func(done chan credentials.AuthInfo, lis net.Listener, serverOptions *ServerOptions) { serverRawConn, err := lis.Accept() if err != nil { close(done) return } serverTLS, err := NewServerCreds(serverOptions) if err != nil { serverRawConn.Close() close(done) return } _, serverAuthInfo, err := serverTLS.ServerHandshake(serverRawConn) if err != nil { serverRawConn.Close() close(done) return } done <- serverAuthInfo }(done, lis, serverOptions) defer lis.Close() // Start a client using ClientOptions and connects to the server. lisAddr := lis.Addr().String() conn, err := net.Dial("tcp", lisAddr) if err != nil { t.Fatalf("Client failed to connect to %s. Error: %v", lisAddr, err) } defer conn.Close() clientOptions := &ClientOptions{ Certificates: test.clientCert, GetClientCertificate: test.clientGetClientCert, VerifyPeer: test.clientVerifyFunc, RootCertificateOptions: RootCertificateOptions{ RootCACerts: test.clientRoot, GetRootCAs: test.clientGetRoot, }, } clientTLS, newClientErr := NewClientCreds(clientOptions) if newClientErr != nil && test.clientExpectCreateError { return } if newClientErr != nil && !test.clientExpectCreateError || newClientErr == nil && test.clientExpectCreateError { t.Fatalf("Expect error: %v, but err is %v", test.clientExpectCreateError, newClientErr) } _, clientAuthInfo, handshakeErr := clientTLS.ClientHandshake(context.Background(), lisAddr, conn) // wait until server sends serverAuthInfo or fails. serverAuthInfo, ok := <-done if !ok && test.serverExpectError { return } if ok && test.serverExpectError || !ok && !test.serverExpectError { t.Fatalf("Server side error mismatch, got %v, want %v", !ok, test.serverExpectError) } if handshakeErr != nil && test.clientExpectHandshakeError { return } if handshakeErr != nil && !test.clientExpectHandshakeError || handshakeErr == nil && test.clientExpectHandshakeError { t.Fatalf("Expect error: %v, but err is %v", test.clientExpectHandshakeError, handshakeErr) } if !compare(clientAuthInfo, serverAuthInfo) { t.Fatalf("c.ClientHandshake(_, %v, _) = %v, want %v.", lisAddr, clientAuthInfo, serverAuthInfo) } }) } } func readTrustCert(fileName string) (*x509.CertPool, error) { trustData, err := ioutil.ReadFile(fileName) if err != nil { return nil, err } trustBlock, _ := pem.Decode(trustData) if trustBlock == nil { return nil, err } trustCert, err := x509.ParseCertificate(trustBlock.Bytes) if err != nil { return nil, err } trustPool := x509.NewCertPool() trustPool.AddCert(trustCert) return trustPool, nil } func compare(a1, a2 credentials.AuthInfo) bool { if a1.AuthType() != a2.AuthType() { return false } switch a1.AuthType() { case "tls": state1 := a1.(credentials.TLSInfo).State state2 := a2.(credentials.TLSInfo).State if state1.Version == state2.Version && state1.HandshakeComplete == state2.HandshakeComplete && state1.CipherSuite == state2.CipherSuite && state1.NegotiatedProtocol == state2.NegotiatedProtocol { return true } return false default: return false } } func TestAdvancedTLSOverrideServerName(t *testing.T) { expectedServerName := "server.name" clientTrustPool, err := readTrustCert(testdata.Path("client_trust_cert_1.pem")) if err != nil { t.Fatalf("Client is unable to load trust certs. Error: %v", err) } clientOptions := &ClientOptions{ RootCertificateOptions: RootCertificateOptions{ RootCACerts: clientTrustPool, }, ServerNameOverride: expectedServerName, } c, err := NewClientCreds(clientOptions) if err != nil { t.Fatalf("Client is unable to create credentials. Error: %v", err) } c.OverrideServerName(expectedServerName) if c.Info().ServerName != expectedServerName { t.Fatalf("c.Info().ServerName = %v, want %v", c.Info().ServerName, expectedServerName) } } func TestTLSClone(t *testing.T) { expectedServerName := "server.name" clientTrustPool, err := readTrustCert(testdata.Path("client_trust_cert_1.pem")) if err != nil { t.Fatalf("Client is unable to load trust certs. Error: %v", err) } clientOptions := &ClientOptions{ RootCertificateOptions: RootCertificateOptions{ RootCACerts: clientTrustPool, }, ServerNameOverride: expectedServerName, } c, err := NewClientCreds(clientOptions) if err != nil { t.Fatalf("Failed to create new client: %v", err) } cc := c.Clone() if cc.Info().ServerName != expectedServerName { t.Fatalf("cc.Info().ServerName = %v, want %v", cc.Info().ServerName, expectedServerName) } cc.OverrideServerName("") if c.Info().ServerName != expectedServerName { t.Fatalf("Change in clone should not affect the original, "+ "c.Info().ServerName = %v, want %v", c.Info().ServerName, expectedServerName) } } func TestAppendH2ToNextProtos(t *testing.T) { tests := []struct { name string ps []string want []string }{ { name: "empty", ps: nil, want: []string{"h2"}, }, { name: "only h2", ps: []string{"h2"}, want: []string{"h2"}, }, { name: "with h2", ps: []string{"alpn", "h2"}, want: []string{"alpn", "h2"}, }, { name: "no h2", ps: []string{"alpn"}, want: []string{"alpn", "h2"}, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { if got := appendH2ToNextProtos(tt.ps); !reflect.DeepEqual(got, tt.want) { t.Errorf("appendH2ToNextProtos() = %v, want %v", got, tt.want) } }) } } type nonSyscallConn struct { net.Conn } func TestWrapSyscallConn(t *testing.T) { sc := &syscallConn{} nsc := &nonSyscallConn{} wrapConn := WrapSyscallConn(sc, nsc) if _, ok := wrapConn.(syscall.Conn); !ok { t.Errorf("returned conn (type %T) doesn't implement syscall.Conn, want implement", wrapConn) } } grpc-go-1.29.1/security/advancedtls/go.mod000066400000000000000000000001441365033716300204150ustar00rootroot00000000000000module google.golang.org/grpc/security/advancedtls go 1.13 require google.golang.org/grpc v1.27.0 grpc-go-1.29.1/security/advancedtls/go.sum000066400000000000000000000122301365033716300204410ustar00rootroot00000000000000cloud.google.com/go v0.26.0 h1:e0WKqKTd5BnrG8aKH3J3h+QvEIQtSUcf2n5UZ5ZgLtQ= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 h1:gSJIx1SDwno+2ElGhA4+qG2zF97qiUzTM+rQ0klBOcE= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1 h1:wdKvqQk7IttEw92GoRyKG2IDrUIpgpj6H6m81yfeMW0= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.26.0 h1:2dTRdpdFEEhJYQD8EMLB61nnrzSCTbG38PhqdhvOltg= google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0 h1:rRYRFMVgRv6E0D70Skyfsr28tDXIuuPZyWGMPdMcnXg= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= grpc-go-1.29.1/security/advancedtls/testdata/000077500000000000000000000000001365033716300211215ustar00rootroot00000000000000grpc-go-1.29.1/security/advancedtls/testdata/README.md000066400000000000000000000025131365033716300224010ustar00rootroot00000000000000About This Directory ------------- This testdata directory contains the certificates used in the tests of package advancedtls. How to Generate Test Certificates Using OpenSSL ------------- Supposing we are going to create a `subject_cert.pem` that is trusted by `ca_cert.pem`, here are the commands we run: 1. Generate the private key, `ca_key.pem`, and the cert `ca_cert.pem`, for the CA: ``` $ openssl req -x509 -newkey rsa:4096 -keyout ca_key.pem -out ca_cert.pem -nodes -days $DURATION_DAYS ``` 2. Generate a CSR `csr.pem` using `subject_key.pem`: ``` $ openssl req -new -key subject_key.pem -out csr.pem ``` 3. Generate a private key `subject_key.pem` for the subject: ``` $ openssl genrsa -out subject_key.pem 4096 ``` 4. Use `ca_key.pem` and `ca_cert.pem` to sign `csr.pem`, and get a certificate, `subject_cert.pem`, for the subject: This step requires some additional files and please check out [this answer from StackOverflow](https://stackoverflow.com/a/21340898) for more. ``` $ openssl ca -config openssl-ca.cnf -policy signing_policy -extensions signing_req -out subject_cert.pem -in csr.pem -keyfile ca_key.pem -cert ca_cert.pem ``` 5. Verify the `subject_cert.pem` is trusted by `ca_cert.pem`: ``` $ openssl verify -verbose -CAfile ca_cert.pem subject_cert.pem ``` grpc-go-1.29.1/security/advancedtls/testdata/client_cert_1.pem000066400000000000000000000157501365033716300243470ustar00rootroot00000000000000Certificate: Data: Version: 3 (0x2) Serial Number: 5 (0x5) Signature Algorithm: sha256WithRSAEncryption Issuer: C=US, ST=VA, O=Internet Widgits Pty Ltd, CN=foo.bar.hoo.ca.com Validity Not Before: Nov 15 19:15:24 2019 GMT Not After : Aug 29 19:15:24 2293 GMT Subject: C=US, ST=CA, O=Internet Widgits Pty Ltd, CN=foo.bar.hoo.com Subject Public Key Info: Public Key Algorithm: rsaEncryption RSA Public-Key: (4096 bit) Modulus: 00:c3:3e:b5:d8:bc:73:5f:b4:e5:60:a8:73:0e:8c: 9c:ff:28:2c:a9:bc:68:12:8b:ae:3e:aa:7b:f5:b9: 2d:b3:73:f8:9e:64:e9:8e:26:ca:36:11:5f:f9:73: 39:f0:19:55:d1:ba:f4:4e:2b:ab:ee:01:3b:eb:f0: 5f:6e:b3:24:39:c1:f5:f0:bd:6a:d0:6c:56:cf:96: 33:5d:05:48:c4:b5:b3:3e:55:8e:89:8e:6b:79:c5: 3b:99:1c:e3:03:69:0f:74:a2:97:7b:bf:c4:11:1d: da:d7:cb:87:0d:90:25:64:29:3e:f6:62:bc:f9:a5: 56:de:56:e1:27:77:51:1a:30:f1:88:89:01:c2:c8: 35:40:d3:2e:2d:30:ef:d7:de:3b:28:15:4a:a4:a9: ba:f0:40:f0:79:3a:16:f9:ae:52:32:c3:52:ad:53: 9c:94:07:d5:9b:63:50:90:ff:f1:8c:fd:4e:59:b8: 5e:0a:73:9b:b4:b7:60:e1:7c:07:02:50:74:f3:48: 69:6a:74:7c:b2:96:70:86:19:2f:82:4c:95:57:aa: 4c:2f:38:75:8b:9b:a1:3e:7d:dd:da:bf:d2:a4:a3: 3a:02:17:43:35:0a:52:03:f5:fb:1a:a1:60:28:c3: e7:41:eb:4a:0c:f4:43:6e:81:64:ba:41:8d:61:40: 97:9f:e2:67:51:7c:2d:2f:17:72:b9:a0:27:5c:fc: e3:b6:a6:de:f4:1e:34:95:2c:c5:7f:13:c4:bb:25: 76:3e:3b:39:b6:36:d0:60:17:1e:c7:01:9c:3d:65: 9a:96:4c:d8:4c:10:85:32:76:c7:6e:53:64:80:c9: 33:1a:44:39:a7:c7:69:d3:64:c3:4c:06:20:56:d2: eb:d9:65:56:02:65:c4:ba:72:db:89:c4:00:3f:89: f4:75:d5:6d:83:ce:ad:66:fb:73:f8:8e:bb:dc:01: c0:4f:86:c1:57:45:68:34:3f:55:1f:0e:ef:82:3f: 9a:26:1c:9c:8d:88:5e:27:ab:b6:b9:58:a7:c5:b0: 36:0f:99:ba:d8:cc:89:41:ed:ab:26:b8:8a:16:17: 21:67:b6:4d:83:d1:dd:53:de:67:ab:76:a3:af:f8: 60:99:29:6a:0a:4f:f2:ad:32:54:69:33:8c:f2:ca: 9b:d6:59:cd:8c:69:cd:3f:d3:8f:05:28:d1:29:04: bf:b2:de:98:0f:9d:62:13:6d:fe:de:be:2d:c6:be: d6:f8:10:cb:b5:b3:4f:ad:a4:60:36:b3:19:29:29: b9:b4:37:5d:13:e7:36:cb:f9:fa:7f:9e:63:7e:f3: 05:ee:9e:e6:4d:ff:e3:46:a4:7b:1f:12:72:89:b6: 10:5f:bd Exponent: 65537 (0x10001) X509v3 extensions: X509v3 Subject Key Identifier: 7F:9D:9C:C6:86:DF:9E:07:93:94:EF:18:2D:0A:0A:50:AA:1F:A2:B7 X509v3 Authority Key Identifier: keyid:B4:19:08:1C:FC:10:23:C5:30:86:22:BC:CB:B1:5F:AD:EA:7A:5D:F1 X509v3 Basic Constraints: CA:FALSE X509v3 Key Usage: Digital Signature, Key Encipherment Signature Algorithm: sha256WithRSAEncryption 31:b0:6d:25:5e:8e:9b:73:01:ac:08:b9:a6:70:8e:de:18:fd: b8:2b:bb:2d:7c:c0:84:20:c8:d2:32:8a:d9:ca:24:b9:75:e2: c8:91:40:db:0a:4e:e5:05:bd:a6:bb:22:85:3c:8e:be:d3:65: 0a:7f:cd:c0:fc:eb:94:61:91:30:53:1d:4d:9f:4e:d7:38:0f: ab:d9:3d:a1:1c:48:c1:e6:3c:35:cc:db:47:31:a6:b2:44:b8: db:34:6c:28:20:49:ff:e1:2b:cd:48:e1:7e:78:7a:05:0a:31: 3d:dd:45:51:95:06:ad:5c:8c:0e:ff:0c:98:77:4f:5c:42:dc: da:d8:d3:30:58:e4:3c:ef:b3:64:3f:f2:e2:19:d9:36:04:1a: b4:87:c2:1b:89:5d:52:17:fb:27:a2:83:2d:55:6d:1f:80:d5: a7:ea:20:b0:0a:23:4d:0f:48:36:ae:42:f9:fc:c8:86:f4:69: 30:e8:cd:52:34:62:ee:b9:fd:12:4b:ba:4d:a2:75:47:d4:b6: b2:dd:ea:6f:6b:a2:86:f5:c0:3b:06:09:c1:5f:30:96:b6:79: 32:45:b3:d1:8c:0a:d2:58:d3:39:2f:21:ba:7a:3e:a7:38:cc: 88:16:1e:75:62:30:fd:79:a3:1d:a9:bd:df:66:dc:b9:f5:79: bc:fb:bd:bd:e5:f0:46:60:d1:03:7b:58:06:00:f5:d8:36:a0: a9:b0:2d:4f:4e:1b:6f:17:f0:d9:51:0c:25:a2:48:ac:e3:f4: a6:52:59:84:83:e3:79:df:ca:9e:5c:24:d3:f9:55:39:8c:3e: 2a:91:3f:53:0b:d4:22:55:c7:a3:80:41:05:e3:41:7d:16:d1: af:a2:1e:f7:fa:ee:f3:a7:6e:19:66:af:dd:23:39:5a:33:f9: 61:3d:e7:90:e2:9a:0e:8e:8b:a0:3b:27:55:e2:ed:09:c5:ca: 71:14:95:10:be:03:8e:2a:6d:48:c5:85:a5:f4:39:0e:2d:f5: 64:50:f4:b6:35:f9:63:58:d0:5d:09:01:f9:bc:99:60:dc:25: 94:36:3b:ee:b9:9d:23:2f:52:80:9c:f1:e4:9b:5f:a4:37:c9: 63:32:cf:ca:d6:2a:b7:3b:c8:10:54:21:ca:03:d3:ae:0e:da: cd:08:fe:71:10:f8:db:d4:e6:cf:d2:59:9b:3d:96:4a:a8:80: 42:69:ff:7f:4b:4b:52:42:aa:e7:e9:6e:7f:84:98:f5:13:16: 14:b0:4e:22:a6:80:03:29:6b:2e:33:ac:05:b5:75:25:58:72: 34:ff:ad:95:f0:52:9e:46:81:91:7b:6c:12:b1:43:af:70:06: 03:d8:c8:cb:4a:85:f2:37 -----BEGIN CERTIFICATE----- MIIFiDCCA3CgAwIBAgIBBTANBgkqhkiG9w0BAQsFADBaMQswCQYDVQQGEwJVUzEL MAkGA1UECAwCVkExITAfBgNVBAoMGEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDEb MBkGA1UEAwwSZm9vLmJhci5ob28uY2EuY29tMCAXDTE5MTExNTE5MTUyNFoYDzIy OTMwODI5MTkxNTI0WjBXMQswCQYDVQQGEwJVUzELMAkGA1UECAwCQ0ExITAfBgNV BAoMGEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDEYMBYGA1UEAwwPZm9vLmJhci5o b28uY29tMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAwz612LxzX7Tl YKhzDoyc/ygsqbxoEouuPqp79bkts3P4nmTpjibKNhFf+XM58BlV0br0Tiur7gE7 6/BfbrMkOcH18L1q0GxWz5YzXQVIxLWzPlWOiY5recU7mRzjA2kPdKKXe7/EER3a 18uHDZAlZCk+9mK8+aVW3lbhJ3dRGjDxiIkBwsg1QNMuLTDv1947KBVKpKm68EDw eToW+a5SMsNSrVOclAfVm2NQkP/xjP1OWbheCnObtLdg4XwHAlB080hpanR8spZw hhkvgkyVV6pMLzh1i5uhPn3d2r/SpKM6AhdDNQpSA/X7GqFgKMPnQetKDPRDboFk ukGNYUCXn+JnUXwtLxdyuaAnXPzjtqbe9B40lSzFfxPEuyV2Pjs5tjbQYBcexwGc PWWalkzYTBCFMnbHblNkgMkzGkQ5p8dp02TDTAYgVtLr2WVWAmXEunLbicQAP4n0 ddVtg86tZvtz+I673AHAT4bBV0VoND9VHw7vgj+aJhycjYheJ6u2uVinxbA2D5m6 2MyJQe2rJriKFhchZ7ZNg9HdU95nq3ajr/hgmSlqCk/yrTJUaTOM8sqb1lnNjGnN P9OPBSjRKQS/st6YD51iE23+3r4txr7W+BDLtbNPraRgNrMZKSm5tDddE+c2y/n6 f55jfvMF7p7mTf/jRqR7HxJyibYQX70CAwEAAaNaMFgwHQYDVR0OBBYEFH+dnMaG 354Hk5TvGC0KClCqH6K3MB8GA1UdIwQYMBaAFLQZCBz8ECPFMIYivMuxX63qel3x MAkGA1UdEwQCMAAwCwYDVR0PBAQDAgWgMA0GCSqGSIb3DQEBCwUAA4ICAQAxsG0l Xo6bcwGsCLmmcI7eGP24K7stfMCEIMjSMorZyiS5deLIkUDbCk7lBb2muyKFPI6+ 02UKf83A/OuUYZEwUx1Nn07XOA+r2T2hHEjB5jw1zNtHMaayRLjbNGwoIEn/4SvN SOF+eHoFCjE93UVRlQatXIwO/wyYd09cQtza2NMwWOQ877NkP/LiGdk2BBq0h8Ib iV1SF/snooMtVW0fgNWn6iCwCiNND0g2rkL5/MiG9Gkw6M1SNGLuuf0SS7pNonVH 1Lay3epva6KG9cA7BgnBXzCWtnkyRbPRjArSWNM5LyG6ej6nOMyIFh51YjD9eaMd qb3fZty59Xm8+7295fBGYNEDe1gGAPXYNqCpsC1PThtvF/DZUQwlokis4/SmUlmE g+N538qeXCTT+VU5jD4qkT9TC9QiVcejgEEF40F9FtGvoh73+u7zp24ZZq/dIzla M/lhPeeQ4poOjougOydV4u0JxcpxFJUQvgOOKm1IxYWl9DkOLfVkUPS2NfljWNBd CQH5vJlg3CWUNjvuuZ0jL1KAnPHkm1+kN8ljMs/K1iq3O8gQVCHKA9OuDtrNCP5x EPjb1ObP0lmbPZZKqIBCaf9/S0tSQqrn6W5/hJj1ExYUsE4ipoADKWsuM6wFtXUl WHI0/62V8FKeRoGRe2wSsUOvcAYD2MjLSoXyNw== -----END CERTIFICATE----- grpc-go-1.29.1/security/advancedtls/testdata/client_cert_2.pem000066400000000000000000000157771365033716300243610ustar00rootroot00000000000000Certificate: Data: Version: 3 (0x2) Serial Number: 6 (0x6) Signature Algorithm: sha256WithRSAEncryption Issuer: C=US, ST=CA, O=Internet Widgits Pty Ltd, CN=foo.bar.server2.trust.com Validity Not Before: Jan 9 22:47:15 2020 GMT Not After : Oct 23 22:47:15 2293 GMT Subject: C=US, ST=CA, O=Internet Widgits Pty Ltd, CN=foo.bar.client2.com Subject Public Key Info: Public Key Algorithm: rsaEncryption RSA Public-Key: (4096 bit) Modulus: 00:b9:3e:c6:3b:cb:d6:77:4b:17:d4:8b:91:27:f4: 62:01:60:8d:01:2f:0a:a8:b1:d6:e3:59:d6:25:3a: a1:7f:2f:5d:ef:02:f9:6f:4f:72:db:75:ce:0b:22: a2:05:7c:e0:7c:a3:d3:c8:fa:87:c0:6c:a9:47:00: ed:52:2b:ba:95:36:36:1a:d3:59:1e:a7:30:a7:48: 38:7f:1a:7a:3f:84:cf:83:f0:fe:60:61:9e:c0:46: ce:44:b5:37:83:ef:14:6c:9a:ea:3b:fe:37:8a:ab: ea:28:59:43:f0:d7:1a:a0:57:a6:5e:a7:3f:46:95: 92:fb:44:77:68:ee:41:ca:57:1b:de:4c:80:ea:16: b7:25:c5:b2:e5:d4:47:a7:bb:8d:f5:53:9d:a3:0e: d0:eb:59:5e:7a:6d:8e:a1:8e:f3:b7:b1:4a:8b:f1: 8a:01:f1:e1:14:85:dc:91:ce:25:7a:fd:db:17:b8: 15:60:34:4b:f5:35:df:bd:22:65:b9:85:4a:7a:39: 74:c0:88:c9:15:61:62:a8:4b:b6:ae:87:0b:2d:5f: 2b:c6:13:c5:9c:1b:63:c0:23:73:6f:24:5e:e1:f9: f5:ed:82:81:51:90:4a:08:7f:6e:4f:bd:27:00:b2: b4:be:a8:0b:65:95:22:a4:c7:24:5b:07:5f:3c:66: 55:2d:af:ec:d3:f7:ca:e6:07:44:09:6f:da:a2:f3: c9:4b:1f:9b:d7:e0:0c:6c:a0:be:4d:4c:6c:c5:3a: bb:0d:a1:c4:82:75:42:ba:c0:10:d2:93:a4:0e:4e: 41:9a:c2:3c:68:ae:17:92:ec:4b:4f:ca:ef:09:7c: b2:6d:16:31:15:31:67:78:02:0a:57:6b:60:4e:7f: cb:0a:27:a5:cd:dd:d9:29:a5:a2:e8:d8:f5:e9:8c: a3:16:72:9d:b9:94:3e:ef:b1:70:27:2e:16:0f:06: f9:50:81:99:a2:aa:b2:74:d8:b9:24:0d:08:f4:ff: 16:c1:2b:32:ad:d1:7d:c2:db:ed:e5:8c:52:26:ed: 8c:04:af:86:9e:a1:5f:48:81:20:79:bc:57:58:25: 89:85:02:ba:e1:5f:66:e4:4a:30:2e:6d:3b:89:2c: 4f:e9:02:6a:e9:9e:b3:6c:7e:9d:1b:a9:37:3e:bf: 06:ec:ce:d6:d7:6e:e3:e2:5c:2a:fd:98:dd:4d:59: e8:43:be:44:fe:ee:0a:64:fe:fc:e3:4d:88:23:27: 46:a7:f0:b5:80:c4:d8:2c:ad:02:a9:68:a7:d5:64: 74:b9:14:21:68:c9:f5:3c:62:73:ed:b2:be:10:89: 1f:d0:1d:1b:8a:ef:5e:6b:4b:08:15:25:4d:9c:b6: f4:2a:0b Exponent: 65537 (0x10001) X509v3 extensions: X509v3 Subject Key Identifier: E0:27:7D:90:FC:81:7F:F3:EE:97:CE:65:A2:AD:D2:1E:CC:D5:2B:0F X509v3 Authority Key Identifier: keyid:63:88:EA:4D:D0:3E:EF:5E:F8:43:91:75:40:E4:16:AB:15:B3:32:B9 X509v3 Basic Constraints: CA:FALSE X509v3 Key Usage: Digital Signature, Key Encipherment Signature Algorithm: sha256WithRSAEncryption 8c:81:8f:65:38:2c:db:69:34:26:47:62:b7:5d:4e:67:41:c2: 67:b2:97:72:51:84:f5:73:8e:cf:9d:0f:a2:91:1e:ec:e4:72: 6f:08:da:26:06:c0:f0:11:fd:b8:ac:23:c7:cf:35:ac:d0:90: e3:da:f0:8b:7b:55:16:00:5f:82:92:40:07:12:d1:ae:06:13: c0:5d:7c:9b:64:d7:35:86:59:c3:8d:cd:b9:a8:17:03:2e:b5: d4:8b:18:11:cf:8d:90:74:8f:12:f6:53:99:66:d8:50:b6:c6: ef:c8:e3:bc:26:74:67:cb:6d:34:bd:c6:58:38:ef:4b:5e:56: 80:37:2d:25:64:31:96:6e:8d:13:ff:21:63:c9:ec:8f:b6:05: 5a:8b:b5:ae:88:50:af:00:c4:c7:9d:9b:88:a3:05:6c:63:85: 46:1a:b1:6b:32:11:cc:0c:a6:75:44:a2:39:c6:58:c8:2a:f8: 08:8c:9a:12:c2:49:e0:03:da:fa:f7:67:a3:7b:91:71:46:24: 71:83:3f:a9:a0:a9:4f:e5:77:9d:a4:49:2b:0e:69:dd:47:93: b9:4d:82:3d:f7:12:b1:02:0e:ec:4c:98:76:c2:48:81:30:68: 7c:04:90:e7:a7:e5:0f:44:cf:48:e3:04:1b:9c:4a:0f:20:25: ce:74:13:83:96:d8:78:69:a0:1c:e4:9e:8d:1b:0c:9f:e8:43: 29:72:82:96:98:6e:8e:8b:0c:0e:18:4e:dd:62:e8:e9:5c:77: 64:40:5b:c3:44:3d:21:0f:3f:ef:04:c8:83:f0:af:cc:be:9c: b5:6b:32:c3:26:66:a0:06:bc:7b:b0:c8:54:8f:0a:d7:57:bb: c7:d9:7a:7f:3e:61:ab:64:03:cc:32:44:a1:71:6f:9a:cc:80: a6:e6:de:2d:8e:8a:2f:ca:bf:63:42:24:de:3f:c2:47:a4:e2: fb:3d:6f:70:3f:6f:cb:bd:61:40:af:c9:59:75:99:39:9d:65: e4:89:48:fc:14:1c:ad:03:fc:5f:a2:69:be:4d:a1:a3:ad:6b: e7:f8:8d:13:64:f8:76:7d:04:af:61:f9:9c:39:68:68:99:bc: ec:53:b9:d1:e7:f3:c2:c9:87:42:f0:26:8f:47:c3:6d:de:2a: f5:df:b4:58:f2:1e:f5:6c:29:0b:dd:de:ea:1a:88:21:a4:d1: bb:7f:54:c5:cd:75:71:4e:ef:d0:50:f8:ff:a2:0f:d5:02:fd: 51:52:86:b8:30:db:4f:e0:3b:f1:91:45:72:49:df:a4:17:97: 25:ca:12:9d:61:9d:29:2c:e4:5f:da:c7:3c:ee:4c:65:5d:2f: 38:a6:7d:8b:52:af:af:18 -----BEGIN CERTIFICATE----- MIIFkzCCA3ugAwIBAgIBBjANBgkqhkiG9w0BAQsFADBhMQswCQYDVQQGEwJVUzEL MAkGA1UECAwCQ0ExITAfBgNVBAoMGEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDEi MCAGA1UEAwwZZm9vLmJhci5zZXJ2ZXIyLnRydXN0LmNvbTAgFw0yMDAxMDkyMjQ3 MTVaGA8yMjkzMTAyMzIyNDcxNVowWzELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNB MSEwHwYDVQQKDBhJbnRlcm5ldCBXaWRnaXRzIFB0eSBMdGQxHDAaBgNVBAMME2Zv by5iYXIuY2xpZW50Mi5jb20wggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoIC AQC5PsY7y9Z3SxfUi5En9GIBYI0BLwqosdbjWdYlOqF/L13vAvlvT3Lbdc4LIqIF fOB8o9PI+ofAbKlHAO1SK7qVNjYa01kepzCnSDh/Gno/hM+D8P5gYZ7ARs5EtTeD 7xRsmuo7/jeKq+ooWUPw1xqgV6Zepz9GlZL7RHdo7kHKVxveTIDqFrclxbLl1Een u431U52jDtDrWV56bY6hjvO3sUqL8YoB8eEUhdyRziV6/dsXuBVgNEv1Nd+9ImW5 hUp6OXTAiMkVYWKoS7auhwstXyvGE8WcG2PAI3NvJF7h+fXtgoFRkEoIf25PvScA srS+qAtllSKkxyRbB188ZlUtr+zT98rmB0QJb9qi88lLH5vX4AxsoL5NTGzFOrsN ocSCdUK6wBDSk6QOTkGawjxorheS7EtPyu8JfLJtFjEVMWd4AgpXa2BOf8sKJ6XN 3dkppaLo2PXpjKMWcp25lD7vsXAnLhYPBvlQgZmiqrJ02LkkDQj0/xbBKzKt0X3C 2+3ljFIm7YwEr4aeoV9IgSB5vFdYJYmFArrhX2bkSjAubTuJLE/pAmrpnrNsfp0b qTc+vwbsztbXbuPiXCr9mN1NWehDvkT+7gpk/vzjTYgjJ0an8LWAxNgsrQKpaKfV ZHS5FCFoyfU8YnPtsr4QiR/QHRuK715rSwgVJU2ctvQqCwIDAQABo1owWDAdBgNV HQ4EFgQU4Cd9kPyBf/Pul85loq3SHszVKw8wHwYDVR0jBBgwFoAUY4jqTdA+7174 Q5F1QOQWqxWzMrkwCQYDVR0TBAIwADALBgNVHQ8EBAMCBaAwDQYJKoZIhvcNAQEL BQADggIBAIyBj2U4LNtpNCZHYrddTmdBwmeyl3JRhPVzjs+dD6KRHuzkcm8I2iYG wPAR/bisI8fPNazQkOPa8It7VRYAX4KSQAcS0a4GE8BdfJtk1zWGWcONzbmoFwMu tdSLGBHPjZB0jxL2U5lm2FC2xu/I47wmdGfLbTS9xlg470teVoA3LSVkMZZujRP/ IWPJ7I+2BVqLta6IUK8AxMedm4ijBWxjhUYasWsyEcwMpnVEojnGWMgq+AiMmhLC SeAD2vr3Z6N7kXFGJHGDP6mgqU/ld52kSSsOad1Hk7lNgj33ErECDuxMmHbCSIEw aHwEkOen5Q9Ez0jjBBucSg8gJc50E4OW2HhpoBzkno0bDJ/oQylygpaYbo6LDA4Y Tt1i6Olcd2RAW8NEPSEPP+8EyIPwr8y+nLVrMsMmZqAGvHuwyFSPCtdXu8fZen8+ YatkA8wyRKFxb5rMgKbm3i2Oii/Kv2NCJN4/wkek4vs9b3A/b8u9YUCvyVl1mTmd ZeSJSPwUHK0D/F+iab5NoaOta+f4jRNk+HZ9BK9h+Zw5aGiZvOxTudHn88LJh0Lw Jo9Hw23eKvXftFjyHvVsKQvd3uoaiCGk0bt/VMXNdXFO79BQ+P+iD9UC/VFShrgw 20/gO/GRRXJJ36QXlyXKEp1hnSks5F/axzzuTGVdLzimfYtSr68Y -----END CERTIFICATE----- grpc-go-1.29.1/security/advancedtls/testdata/client_key_1.pem000066400000000000000000000062531365033716300242000ustar00rootroot00000000000000-----BEGIN RSA PRIVATE KEY----- MIIJKAIBAAKCAgEAwz612LxzX7TlYKhzDoyc/ygsqbxoEouuPqp79bkts3P4nmTp jibKNhFf+XM58BlV0br0Tiur7gE76/BfbrMkOcH18L1q0GxWz5YzXQVIxLWzPlWO iY5recU7mRzjA2kPdKKXe7/EER3a18uHDZAlZCk+9mK8+aVW3lbhJ3dRGjDxiIkB wsg1QNMuLTDv1947KBVKpKm68EDweToW+a5SMsNSrVOclAfVm2NQkP/xjP1OWbhe CnObtLdg4XwHAlB080hpanR8spZwhhkvgkyVV6pMLzh1i5uhPn3d2r/SpKM6AhdD NQpSA/X7GqFgKMPnQetKDPRDboFkukGNYUCXn+JnUXwtLxdyuaAnXPzjtqbe9B40 lSzFfxPEuyV2Pjs5tjbQYBcexwGcPWWalkzYTBCFMnbHblNkgMkzGkQ5p8dp02TD TAYgVtLr2WVWAmXEunLbicQAP4n0ddVtg86tZvtz+I673AHAT4bBV0VoND9VHw7v gj+aJhycjYheJ6u2uVinxbA2D5m62MyJQe2rJriKFhchZ7ZNg9HdU95nq3ajr/hg mSlqCk/yrTJUaTOM8sqb1lnNjGnNP9OPBSjRKQS/st6YD51iE23+3r4txr7W+BDL tbNPraRgNrMZKSm5tDddE+c2y/n6f55jfvMF7p7mTf/jRqR7HxJyibYQX70CAwEA AQKCAgEAjT9s5yNOhEqmNssmkbwASEeUKCd5UxFiOUu06gvRmCWqE00F+iTt3Tes qxZFMAHkKBqMa5EEjOavpvz6zWckKfS8LDGceLQoCX2sIvuTrVuWFN5og/NYpXue piJTyT/UQpjt5kTRX2Ct1bgUOCe0JUYBmtXLyP9oXOmVcavMLJqD4jbb40Jb5E3i 9iaVHSJUwabFnWJ9LxqL3ee8f10xcjAEPAhlGmKgkg3DV2MSKOGIMThEMGN6nb6c hAPqPi5erTIRsUYcgEZ9mUXXLPiigg1dmDvMLfelK0R7n6luhlTfvmt9331b4Cmw Q4/DtTokr3e81qpPrj5F1Mlfsp+8EFd8Ucwtu37DFiwpqYpDeMYxbbVC1toa5QQy 6VRa7NQLTHXfRp6mmaf37KnganLYOqX0vF8LMIn2O11jEnfdAfqSkHY8JzvNG9OJ 71LO5FXa7VEfOGfu1lNXScFN3yqukjr2aPo8bd9hIIw4ZEvJtto+0hBBTF4ttJ+r R5j+h764A6vqxBo8Oh60sahY7sYBD0BIZT/hmxqaEC1PUpPfveGzWcr6r/xb30ak DhrbWsH2/St8NjCL/9u86K/KyQB8nDwOQlTC/gB+SxLCp9KcEG3HuNVMFtV/pic/ lzqChT9p+2/F+iv/aIb69FcBuGMfljdrsnnrc9954nco8sXgmDECggEBAOPbmWLb vnCzZ8VdbqsxlXeF9Ype9tyINE8az9rG0A15tTXtHCwYQRpTi8PWevaTYtJvzj5Z 7DnMH0B56q8p+oMyEP8YkfOQK6OavW9ehSui1y7KSgjFsFXuXPUR6BnLPUZW/BuD UHrbjspFREWZBrm2y0tOk2sYZirqg9r+Hl1yAZXeXXkAK+UdNygRuoG3by8Nemql wA22pLu6J0dZ04AQX4ERdxJcTxLx3wf3tpFltSWdsJr6kuevNBcdvpq/Xc9M91bW n8POxMWIBTZTC5nDTJd9nCip1J8jACFII5evr/L+O3Bwda4k/B277D/DVUtKhhcD UBucDcLXQro6eHMCggEBANtb9ZBYw3R/JbZrNwShB379I5p2SN30mSbkNB+0PYYx WNX5YQADFlulG5/spPD15dyHdYWDWI+c40ZXAKfgN12it6id+eUC5hbx4+N9E6yP 4+9mkvPiV2HpIOLSb/fDReJsE/d6l0Fwqh2xGCN6adLSa3DCy4q9IP9pIJHOAkeY kdBQwtXH0xo9hM25/ZFnWGmhugRvllB2rPCEPFxhsuS1ExgEPLm1T8DnueQTGuEf lAXUj/s+RVcGgHgQ/ONv9O6uEhmZYST+ZFu3sb2Rq6YwNUjbIiaMuzMgEHAdQ4C4 xYQDC0Bnf3Lt1iszvypwAxjPBcHhVeqzTL4l1sn4Kw8CggEAEK59Fk28LYgU6tAi UAo7RRrblRvKuu6F1dzCpuOzS6lDaQVI8Ll92q2PJ/FF41N7AqkI0mvG7ZxSFWhX lCdgncZGlEZ6OPivGTU09ThYS4+KbXSF4wqGFGR1DcQX1/uXKtUnc+QzOitk0s4r Z2UCpwoI7CR+inKo2C9/I8NC+dhk4VH8SeWHUSjIZviVTPXe//Tep3wnCVn7yXqh cYnUACYyt8JNk1yKtXpbt7uc9BwcHPrkeRQrOScMizy0PaQQ/CJIYWUpIS68HTIO H6II0WMI8nZRvnBgjp4DXmxnnq1QFlwigeLZ2rv+cTbW3vwv/GkiVAD8FmlgYIld 60BonQKCAQBAP2fmFklxBoiKLE7Z+TwT0pqp8/kVoT12KaKmoojek/d7/GWPtlfH Ec3Mgmgw9ySS+c3PBBBdR8s9X+AeS0qMD0uRhGubysSPdduUVp77jM1q4fUqn2GO mNR7+ry2qaf/UD5s3qgMj64TsjnqskDqcZzsUvGAujI+/JCAhAEg7SvQAsd+C9/l sJ0EEHSXMNixX5/3CqPQ/2FZtLFlMWxPFkX4Y81RayxnyLcmeP4Hb9NP/dkJ8kwm 2A2qnPckujbX7X35p3XPev7z6hKR/mdy7m284AnZlqCBseN+ouORgQzAxI94Fpg6 ljSDRM255ULS8leyWIhsjIVur/CACUK7AoIBAHRqbtOLnfrDS8VlI+V0GtNJXLVS XDlgTtPNMaDWMKxVLFNwF8MeY5pf1QHNa399bOumZUlmRfZ+AcJYDTyfF366Eoh6 yatmoQKMJotsQWln9iGWv7wqTP7omrL+Y053R1ypdY4k/4Yf9ptykiCBIUwYqjxk +NvIcf8r0cZZjsx7SlkjhGGhFHkeFewhbPm7o8bolZ26Nf/luNGuJSOzGac88Sq5 9jSKbkWTI4Rukw3n73AAKkdbLmGkIw81BnMbXH3bBoB+fdmILgIFx61D1QeCipOQ WJIht2SLm8UXfYAQLGL2kQ2+C531uFvV+hzNA1H5KHj1Lo4BD2ogjjePdFY= -----END RSA PRIVATE KEY----- grpc-go-1.29.1/security/advancedtls/testdata/client_key_2.pem000066400000000000000000000062531365033716300242010ustar00rootroot00000000000000-----BEGIN RSA PRIVATE KEY----- MIIJKAIBAAKCAgEAuT7GO8vWd0sX1IuRJ/RiAWCNAS8KqLHW41nWJTqhfy9d7wL5 b09y23XOCyKiBXzgfKPTyPqHwGypRwDtUiu6lTY2GtNZHqcwp0g4fxp6P4TPg/D+ YGGewEbORLU3g+8UbJrqO/43iqvqKFlD8NcaoFemXqc/RpWS+0R3aO5Bylcb3kyA 6ha3JcWy5dRHp7uN9VOdow7Q61leem2OoY7zt7FKi/GKAfHhFIXckc4lev3bF7gV YDRL9TXfvSJluYVKejl0wIjJFWFiqEu2rocLLV8rxhPFnBtjwCNzbyRe4fn17YKB UZBKCH9uT70nALK0vqgLZZUipMckWwdfPGZVLa/s0/fK5gdECW/aovPJSx+b1+AM bKC+TUxsxTq7DaHEgnVCusAQ0pOkDk5BmsI8aK4XkuxLT8rvCXyybRYxFTFneAIK V2tgTn/LCielzd3ZKaWi6Nj16YyjFnKduZQ+77FwJy4WDwb5UIGZoqqydNi5JA0I 9P8WwSsyrdF9wtvt5YxSJu2MBK+GnqFfSIEgebxXWCWJhQK64V9m5EowLm07iSxP 6QJq6Z6zbH6dG6k3Pr8G7M7W127j4lwq/ZjdTVnoQ75E/u4KZP78402IIydGp/C1 gMTYLK0CqWin1WR0uRQhaMn1PGJz7bK+EIkf0B0biu9ea0sIFSVNnLb0KgsCAwEA AQKCAgBtWJWxJFBzWFs3ti630/Sp9XEmOrti+p7q0tOqZCKCLdaXyDyurMoSq0Y1 onrbHGxyhk30O5Y4SqvdYrmzoGZhv39OdGUNyAjbJbFbrahtqBrKOk4dXGJWAzWs rv+XHGAE/6i2QwhMDdCJgq+tEXwBG9vz0WtzYcVCFpcZ1FH3e1XS8XvDMidn33wL WDP32akhH/tUDeHamoU/ZT4lNXm9e6SSWMBrB3kiISYi1vme0QwrwxizEguoMeXh AdXkHb7pyNKW9+cifLq8tvydps89OAlhwbgKvswx1XtFJsXvRBob2cY1/CMHQxk9 bl0Ad3xjclRP4Sly9K4MIZzgzVMHRCstG1K60cDVK5GeiBkXHKfihgXIIk5iILjH jplpTx54KEtC+NTd0/i9DsK6/DKcATt+AAPgjoEy2giSgfTpZqyMgLgIAvYKgrYF SME7jm4rFe950VpR7vBVtBtXKnea39/75uwbTAjL6kpqvDARM7MWb4R25voOmlo+ 6Jzw4VyktVb/p7HLq0ayONGGBIF3H3P+wnvhulHR1I/OHhNwnYsH5mFju7t5qO3H ot/DxLOTmV8PkrHgfGwvbmwF5E66dpv4m5oCYHn8SiCEsXF1PkVrnSE1yeuzq681 tAnaLPRO2UXlpe4I1CY45a/WTPoXCfxJdtjjLchY10bZXV+dEQKCAQEA6eSN8as7 aJa7ljqh4Qf9LkD2lDzvYlyxzuwIh5d4+4YoctaiZdHWxXMeI6xPTrS2gTQVyCW/ 9edq9822Xo6ti4RK2yab1ewAcBDEBpDdTrcQZ9k9f5HVrCXyEuajKBCN0j5uGsPQ cwv415xyfj/fudH/xj+FwstBnc6YDxHGC6SdhXghhLCfAJJROneu7W8eQuqt8tKo eOGheiTo/WPGkNOPu6BXW5/lxMXXCPsqPJS6MBAphFDkCp+deXw4xjL4sKyqRWFY HFH17tzPiyCPdOEnuytFJcrK7+0svACdwYbypbJpHSvjWmPwoB9+58mFODF4Lvub ZD5VviRyDerf6QKCAQEAysEbRyEFquN+6PPhiS9wYdjHHiJXBtfmXf2fKBCRrLJ3 y+/qPaViyEBgb7mKblaFBluitKevg7Oge6VY22moMRTR8L9zU6mKPjt2OiHmwsB+ L0+8Z1wTO7knBJq8dwCc8Y1gpU+fWGoz5vYAWDX03yJeLsW9OG/pKm0tAFEY4GxJ qVIz2NRjBc6ojisWN+QTonxQXkevaXw0sIL7Ol1pW0zQIXVkrzjvxV1KfdXwhXLI jdxs5NrVOGNLCtrW8+vLBTbCuOWSJIzJOEMUH6UYhQCXLM5T+snEL3S0U46yqHOG FcepRU2ncsHEz5eMN+JA8N6/ZVv2eIXfub/59dOV0wKCAQEAk3nsUmRgijr4vunr ZkOuTTri/2dInaHK76j+W9iTjSzzVi2lqkPcgxVp/J5KR1tE9ETOMywyVK/9T5Cj HA4kuSLKPFKk0gcD46V+pJE1KcveCUz+LPDcZLZsY6SPXdTKR7XboP608cWruu/H dXl67OTPvMYS5ldY4VMBqAbR9Edwl1a+87aWGzsnApGyd72nvBPTaJeRaN8D/UtG qXb/HhR3vZuFWZ2BuEfypZQQ9q/kkieuteJ3V4d7OL2t4rMDAgttNWACuaCoTFto ddYq/kx1y9ultwWeXhgTK9vLnNolJ3tOMfmZWkZH0/7n+uijGmJ+4Ej/mv5+++xp CgN9+QKCAQAstfDB+rI5QPmXfVBa5C8wJJGkP4ZZZ/rQ90DFoQG+x4xLWJibB4GF D0001gGE22dyQ3rZw7CcplvZaFjz6ZTBXgn9wPo5lMV7e7lSkG9GuxQYcsjlMhS7 stS72zN8OpJhYf/R9ID7ClBvugfRa/SX0Ahc4BYd/++2/2RREZEezEJiKFJumkdL 3Iqm7zFzGcSKrEc8wyoXZOBpnDiyYi79hy7OcgjF6xRUvYHTxf3IL8uyHM2Wmfsy +BJwTlngaDrY536BL37OuI0W7xPc9pc1nS+5Hba/MwckP+QUGP+kzfTfkKvvMHSg hcJU1OKC4E3Z0AT84Q60/TCc0YzZfNMpAoIBAA3Bb4lau9KWjVMza2fLdLmPqMM0 MSCU7jo+xGH49YgET+lGFy00lbdIENMP0nv8pr7IKFy3pbMsZRHG53VPylUXSvdE UJdW+7X/d5G8VVDypypgtSptD96kAU/ctq/Ty7uZw621vvTMuwokRTsL5ipE24ys aA7M5GrMer9wrp3q7RNz64MVrnqJEFc4waFn9W7ZWG2i/upTj1oFcFF57QJz793m KnFy7cOApEBahRFIkW3AuVdg0pJuYTRsrvfjYvFD5eKEON4qSXPxAgRl2zLR8i0x jbKCySBaSFSYrnWs9Tt4QEiEYLGNe1WoCfxaUHCvM+d50GiZeJQkXCT3m80= -----END RSA PRIVATE KEY----- grpc-go-1.29.1/security/advancedtls/testdata/client_trust_cert_1.pem000066400000000000000000000036501365033716300256040ustar00rootroot00000000000000-----BEGIN CERTIFICATE----- MIIFeTCCA2GgAwIBAgIURfVPAG6lOcTq29Ht/6KNbaKhm7IwDQYJKoZIhvcNAQEL BQAwSzELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNBMQwwCgYDVQQHDANTVkwxITAf BgNVBAoMGEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDAgFw0xOTExMDQyMTQyNTFa GA8yMjI1MDMwOTIxNDI1MVowSzELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNBMQww CgYDVQQHDANTVkwxITAfBgNVBAoMGEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDCC AiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAL95INSZmsn3AgGX2Z9bu+6T 7n9VevXuD5bv/oc/Wpfb3d2rueBizpo1xRWMiNFq59wf+6o80LlRsJwqRCnMo7AU FrR9zaryGzKa7mqkGnrX0oPIe/KP6sw+nUNtpr7m3PMpi3CkqTtZ1Oo1PhphyEu/ pTF2mmS/Blaez3BKFuX/zMLbnxedUgKD2ok8VqOPUGr8uQ4IPp3eZL9FptQ8Tt7N 7H4U/Wng4YNvCBDSlLP60NGycJ8yrdL1aJSWAHJ1vDbYlSo3JeO4cjHdGUq6HTx0 7USgg6ZX2OSEiE/DXRQbu7QkGjetssaURmUjUB3vbFCqmWZ9HDtkE3YYhk2vks6H PQXlunNHUS7Ain+IgYsqK9cNRLWqcBbdo2IEKYYwAaK0xsQox/m4TuWacHMZw4Tg Zh2Y984n6Hyq1H5FgWMYpng45VihT/iKZpD0r0vUBsDJQSQzsQkHjIJe6333gtey 8nWXm/dcRUZotcL+eJ6essniJ0ZBFz2m2DB/BKJ/5rA3hf1uQCPAdaLCho0QVUeE gQShwTiP0og/0V6dHhqoDjnEnII9ZItGVn0NTl688a9VpzPGyCDcNtTuB089KtLs UcE0vLtEmhlM0NI3CpP+ahQfxmF6i37VEzBoEzZfyzeaO2MrvmH7djfP3HYmx85M yutAMo9NSpGWOiPYi/lFAgMBAAGjUzBRMB0GA1UdDgQWBBRapdqxmdTlDuYelOr/ /GLi7QnxBjAfBgNVHSMEGDAWgBRapdqxmdTlDuYelOr//GLi7QnxBjAPBgNVHRMB Af8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4ICAQBpeytWYsMSa3sTZD1pA39IvQ4v xAbzmJB6P86sjmt2hNkCZTJYnOdSnwr8mrsKnLgjf/6kpg2OO3RqN+aXUnDeCpjl 9mzDJ4kcoVxbOXYAhTkhgugT/b8HOlMEvC+3yULrKN9deUmQPCdyVOtMBnTHE3++ VyAlP7T7R2xbgeVpOurO8SM7NLt04xxbtOS1cPVkhU2AE0+KlQBQYK9bProHTynR zhVixFWtyS9asrMUwGh/xye85xTpy+unUxkzZoPqYvK6YiKv/WB3U3SURuLcOqAD T/BMpOUwbYAP0KVEL5K82uo7csADLMwBkPvFhDsMaFGTkUdb014NQqRjTXOOK8ad Xwnm4ur4GgT0Rr/iJ990JTOQkWYWlW3ZO6DSiUZCqeRWWhju290+aviOcrJeXS0V XKkeJiYjbdnvFp/LIcg+V+n/HCDwwQgC3vQqlwd8PNvl0gKRX4EGjV+1lobZoKvD WdIuSIIUkIDbv547n2ldp3GhJHIft6jlTOLAd3jonURO2/lZVyxj8yGFPbTWRUa0 aK7IWkcOzof0+v2BrEhQQoL+lwJahqYEPSKw6WNehQxYWaxr3TL/xeawFzEVW1ve v8Vh1LvZ/qyucpP3dgDuj7gpVg0xshKpKEbGwzPKMz9PGcHJvgF1GOXVRIdoB9nU IdXOcawI6rpqTXrTgA== -----END CERTIFICATE----- grpc-go-1.29.1/security/advancedtls/testdata/client_trust_cert_2.pem000066400000000000000000000037411365033716300256060ustar00rootroot00000000000000-----BEGIN CERTIFICATE----- MIIFpTCCA42gAwIBAgIURc12C7/2O090oCXCOxpatu7h4m8wDQYJKoZIhvcNAQEL BQAwYTELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNBMSEwHwYDVQQKDBhJbnRlcm5l dCBXaWRnaXRzIFB0eSBMdGQxIjAgBgNVBAMMGWZvby5iYXIuY2xpZW50Mi50cnVz dC5jb20wIBcNMjAwMTA5MjI0OTU1WhgPMjIyNTA1MTQyMjQ5NTVaMGExCzAJBgNV BAYTAlVTMQswCQYDVQQIDAJDQTEhMB8GA1UECgwYSW50ZXJuZXQgV2lkZ2l0cyBQ dHkgTHRkMSIwIAYDVQQDDBlmb28uYmFyLmNsaWVudDIudHJ1c3QuY29tMIICIjAN BgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAxWqK9pdKrJUupCKUCcaVGWLq1wh6 9I8YhLuWv73R15FbImFeh3085o009a4symIMSgfn8iPrN97WAMn4OSxAmJfSs0FA DBmfpf0JOloFM5GHYVgGpdkFCiEjnJ+eTvIxRwsvbeT9EsLkeOVb8syr9bp/w5ZW oh5Su9b7pwpAanQx67dxq2lCndVjxZLKgAXO23m70xoFOKwVaynxcUdnYVskFy30 SRhB9h0w7I0L1pb5F1BTrsMgBLtrg81JCQzdmgoTKnn8AHDnA+rwe7ushXE3eCrm Uxj4n2OYc2siSXBQFyUa/x18kgubS/FPJNHYFPqnyw+g+yk9hraq3OQ8XwHA03eg 1TZkttQwfUV3g2gywDaC6e2PGl2q8+h1g/7kaSu9yiihlMfgQoa7cmC+j1MKAgki FEkyuQtYGx08rAKL/Gllmgm0VxT9jO33YnuZBqDbfnF3PYGBo4ZW9ERyJDguPTI6 6Ms68uO/B10mNePwOunlKwJxnYZkDnGcqVZpm0RCt5IFWIk+b0ek1OhpzEeGmQp+ xLWzC+O62WVmW5B2aKmJ/jV4MUOA9HFELrbh0kS+Odp1ANgFr0UKQK1O04Hex+7O 3rnHHzeAjHk8SzZRENKFp0Srf5L9GpDb4/FDmNM1XWw2g12R7nD69dNvC6OCiRvi 8TQxRAMYqSU8XKcCAwEAAaNTMFEwHQYDVR0OBBYEFAF0qURhPXq7wjLN0O0g2jrE xgLoMB8GA1UdIwQYMBaAFAF0qURhPXq7wjLN0O0g2jrExgLoMA8GA1UdEwEB/wQF MAMBAf8wDQYJKoZIhvcNAQELBQADggIBABHATLUgMaHJwT5Rc5P/vPeIu09zZyK5 avol+tSGbMmcWAUK9gYlivyqcPzeJ6m5+GJ2WkfumdhkUY7XclddxEGyw6q/eRE6 nirt84TFlc2QleSFFg84lwTLT6wE6Ym9+qC3C2b0nOgUeGl5J9itoYqDTOp5gF7Q Ileh2+9aZSnbaR9W3QgRteTIq+9cVnBZExwgrLa6/Iam0x1ERtd/U94prO57D6mE Wspvj3wfn7oUfTsTGuBjq20xjmQEGxMF+zgMTJGgkOUxwIGrhXWlK80GX6ff9tJJ 3WQ1lBG2BE1eB3NWLuyQjtO0Jl9bfrpz5sUyXMWyGD9bOz/qFLLdi1AxPAu4qIWt j8avS4DavUtU3LJarW2IVIrVVSs+hg+mrzMpjso0/8QI7kG5hV4vvD6bOxMZzoBW g6M9+eXYsp03HjNI34Je/w5tcUY90Jfk3mVxz1hTRh1Hj5EhtSlmwxLdBgRe1fdM Y3gsHP/OFk7MpMFWZQmxZhsfrV1Nfh1XeznKuUCx0EaGPuZcjKeqUroYvlSWKLl9 F2VfCIo0hKE1VZ9G1QxVuB65N+sdgotyj45LCn51HV1unYqY7Lsnmvbyxgevz1Sv X9kF21BV+lBLQq8aQGyGwk2RfUVlVp2cKvWHqVT+qF9QgW66Dt1gU7+m9qC4jCTO 2OGZ/CvtfsXA -----END CERTIFICATE----- grpc-go-1.29.1/security/advancedtls/testdata/client_trust_key_1.pem000066400000000000000000000063101365033716300254330ustar00rootroot00000000000000-----BEGIN PRIVATE KEY----- MIIJQwIBADANBgkqhkiG9w0BAQEFAASCCS0wggkpAgEAAoICAQC/eSDUmZrJ9wIB l9mfW7vuk+5/VXr17g+W7/6HP1qX293dq7ngYs6aNcUVjIjRaufcH/uqPNC5UbCc KkQpzKOwFBa0fc2q8hsymu5qpBp619KDyHvyj+rMPp1Dbaa+5tzzKYtwpKk7WdTq NT4aYchLv6UxdppkvwZWns9wShbl/8zC258XnVICg9qJPFajj1Bq/LkOCD6d3mS/ RabUPE7ezex+FP1p4OGDbwgQ0pSz+tDRsnCfMq3S9WiUlgBydbw22JUqNyXjuHIx 3RlKuh08dO1EoIOmV9jkhIhPw10UG7u0JBo3rbLGlEZlI1Ad72xQqplmfRw7ZBN2 GIZNr5LOhz0F5bpzR1EuwIp/iIGLKivXDUS1qnAW3aNiBCmGMAGitMbEKMf5uE7l mnBzGcOE4GYdmPfOJ+h8qtR+RYFjGKZ4OOVYoU/4imaQ9K9L1AbAyUEkM7EJB4yC Xut994LXsvJ1l5v3XEVGaLXC/nienrLJ4idGQRc9ptgwfwSif+awN4X9bkAjwHWi woaNEFVHhIEEocE4j9KIP9FenR4aqA45xJyCPWSLRlZ9DU5evPGvVaczxsgg3DbU 7gdPPSrS7FHBNLy7RJoZTNDSNwqT/moUH8Zheot+1RMwaBM2X8s3mjtjK75h+3Y3 z9x2JsfOTMrrQDKPTUqRljoj2Iv5RQIDAQABAoICAC+ehV66cPenudUBmfr7Cos0 OU1rye/d6/yi5U9nnzVDVjNqIQlAKZfKpaBNWj2S8+UYAzP8egCM43qDPH6UyWTi Kh9rZjoMil0UkRTuiTNh95YUx1a1GjT/oYcCf0TdD7hd7bLvELOVDNHOugo/pVvJ ZuEdWRqTM5VZW8fWdUlwS9FuY2uxEZNUjYYx/m4hF2P0RGXMAR6sD6xOO0ZvVUIu PpHA0KGDbzKL65qbdKYqS8LLOR0usnJT3FWP1L6ir1OIm9hq7L5sweHK1h5ymRDP F69IqFU3Zda3a1tDACQfHZiYnfiY92xRtgwzMxquz+Zj91C47suKgRiO0uABOWY7 rRCE4aVihSH8hjW2U9tRJBZdpyTlk5wyfoBlVGHOHXhHR0LIBJHcffB3Zwm8BkGd OR/+4b8yqBBDGC/Bt9dIxM0QdLgmdWO0oywXircEzv6O+l3LGeQg2d7dkWKmMGxi chnVJVq/txuZVw2+nifI2NueOlc28dIy+GkQqXFVVFgqLCNd7K7wfZ6OVHpb0qx1 fXYtk1Vsx/3YgVKcbHpOKxiJK2xFVtIepooTSHuohZEX+kVtSvh664bmUJ0eZpdN lkKUhgRfFLtXS6eBPlocZFzWJKUJQ+0b1l4W9G73m68XbByH9dUEe0K1i1ERXcp3 RsSKmouSK04RKbEmyKXhAoIBAQDjGdJvFYRIpHFgsX3cZXC6t6OwSybAM+9g7LOR jfDZasYs9Tonh2y2MhGKqjKdGQ513Ni1WRzuGrItuyrru+PCLCbattw/GNAlkIZ4 Kqfuex8Ys9TqeW3qfBnIbpc4Sgcrjke5nqTdksoYVMM94alZVLOpsaOvdUwa2keS MgSwHh6qNVw2/Vz20i8RTzJg3fkhdEQG+atLMl2+J+LhFRlMEACnEJM29UNcuQ3N TinMRwiSSSCwQpTGMXi61tuJ2m+6cdmHUChX8QoYh9jZLBtjxwEAkGAvzZQeGgxc bafofHey/ZoZg+DSXgwlKJffmc0huswELB5CiJx43Q5JBj65AoIBAQDX1q0MVxaK iGsb9g9QE5iO+HD31TBp15rZgyJtUXJWHSFIIs2yCfD2nlxDTKp1KIdLCHj0vnca /9VGy2h2MNnoU665JCg9wJI9Tgbs7raIM5tQMaJxysGhP/jkY1v0l2fc58+TxgFy xzqbUeti2t3aQUISWzGukDlwTrQWW7/2DK4JE8pBhDI3n0n6A+eZaeNwWr2ChUlB 1syO5mQpvltM3IWJ/B5CHi5dzRupslnFkIGTzXFhf4kCXzxJb/JLY4XLHSxhiWWg GvjObbb2FTPgYc+HanpDxM5eRW2oH0hyJUoKR4IrxvvSwGJQD2FejZzRhEI0/L94 D59Ri1nALyjtAoIBAQC8878ircRirG+pBAS0W7JvqFuJUv3q7Us+WbMOaAr82toI jgDU4tiQvxfZR8LU8wQVDKtCN+LaOVwGsLQFb08RP6sUTxDxbrPAjX9UfCk9QzOc WgPNEztg3eCV423uZ6mPk9IZnuWNdZSwqdXIpvlAWjkh96s5UV8A+JyUBwnffzAE bmFLX4L52edPf5VrA0VFkHcJVrIu3rkgfg9HN0bVAnuIhUH3eBmUDGRvbZlZXcDD 9hQ8kyk1vfO1gQ8oo5ZSimdzLj5i7Sp5Po4uI4Smf+1Visp8+49BfGrMfHA3/1eY lWih0hg88AMq55t1b4I9ji4xSoPi18dYyJQaLhgBAoIBAQDMWsNpJaN/8n2G8ce5 x3PwGaXL4Jt/+tTwEEquOikJA3eZdupOIT92IKW2SoYxevftwM3U2+ilNYhXCQuU q9gFMgYB4QwAu606QgAooDNObZ4lpXjqSFBgPdOHWdOclyWNcCWHAjgo1hzVJhC5 fgQDOzo1awZ1ArR/cuTrLl9ntMWqboRW17U8GKLQBpZnGGxw2lkHlO6xWZA/1D8N jt+evEPrSzvS2gSIZ0RDvUtl1NX6fM9WwouUJVtNJKLBYi8xCiQVDSOdHSxpNlO+ VoDRd4on6lZsh4/kjdOvFD9hY5Dgfqfuju2qst/icU19WpMZhCGzTYJzSEdNy6Rk Y8JZAoIBAGFKZq43NwicIrBTIUKgvLntuNtvkCgBp9awGMptRPqVOkOSkFJLxL6v pvSjQLLsvoHmgw9DHFYi9D5bWmdNIV/8rPch8XiNyBmjitAMq5siL6cZmswpjwIN V81q7zt5bRvVJWGXL4JrfUL79bWlzPRBB+jYn2ktsdoz+vQR9tj5ohrOkjwnLSwj bqhTawwMey4q5LeZPyegkEojx5U/pp/spisT16v9dkGbxgLc7wcmT/7vU2IWY+Es 7WX5FhV0jmj4zESGD5CNtBxkTyBmKJYSXxLZ4ZjS8v3Ua8DkQUdlD73STVK9Lxdp +xZ1BJ0Xfq/t2SnXDABwi9hvqTNOGqY= -----END PRIVATE KEY----- grpc-go-1.29.1/security/advancedtls/testdata/client_trust_key_2.pem000066400000000000000000000063101365033716300254340ustar00rootroot00000000000000-----BEGIN PRIVATE KEY----- MIIJQgIBADANBgkqhkiG9w0BAQEFAASCCSwwggkoAgEAAoICAQDFaor2l0qslS6k IpQJxpUZYurXCHr0jxiEu5a/vdHXkVsiYV6HfTzmjTT1rizKYgxKB+fyI+s33tYA yfg5LECYl9KzQUAMGZ+l/Qk6WgUzkYdhWAal2QUKISOcn55O8jFHCy9t5P0SwuR4 5VvyzKv1un/DllaiHlK71vunCkBqdDHrt3GraUKd1WPFksqABc7bebvTGgU4rBVr KfFxR2dhWyQXLfRJGEH2HTDsjQvWlvkXUFOuwyAEu2uDzUkJDN2aChMqefwAcOcD 6vB7u6yFcTd4KuZTGPifY5hzayJJcFAXJRr/HXySC5tL8U8k0dgU+qfLD6D7KT2G tqrc5DxfAcDTd6DVNmS21DB9RXeDaDLANoLp7Y8aXarz6HWD/uRpK73KKKGUx+BC hrtyYL6PUwoCCSIUSTK5C1gbHTysAov8aWWaCbRXFP2M7fdie5kGoNt+cXc9gYGj hlb0RHIkOC49Mjroyzry478HXSY14/A66eUrAnGdhmQOcZypVmmbREK3kgVYiT5v R6TU6GnMR4aZCn7EtbML47rZZWZbkHZoqYn+NXgxQ4D0cUQutuHSRL452nUA2AWv RQpArU7Tgd7H7s7euccfN4CMeTxLNlEQ0oWnRKt/kv0akNvj8UOY0zVdbDaDXZHu cPr1028Lo4KJG+LxNDFEAxipJTxcpwIDAQABAoICAG9UwV+FPKCNVQtNUM0eh3EU nrl719NZa4tXOxGQ2+lE2O9Pl/6yuwiN86Llge70Ulfhk4WzifAtI+S4AdtEQH2N iU576sGoJad3Rp/4qlxFouJbwQwAkl3/CFVIkv+UiAO3pBzGeY3+CNjBCBSqJgPj FDBZ9StiDGhQOgUeu+sM8iYrgtgW+XGHKMgAG2ENZXXSdgD7+JvYOBACTF4E1aFK w9Sqnswl+PTxy2hrtpRi+cCTFU5GTiU9CMoAmEKZVdOMAPkAaARbp3xHHy24TffH PG/xSYjtWTCR+ySD84cU5qXW0B21JE48a2ztfiOWj9Rs8vmKK8/YlxEErOD7eatg v1e47Ygv8JBLhEvk38HQYv3EdsV/2jAXg7K3d1s4znyiJdHg2ujCuTiR98auDh5S Er3yFG38KKagw9I7yli/S5B7RKbhjHIunBfCA2W6cJVyA7smBllQ3YraVZWWWKIX Z9UeZrA+KoBssg16c2Pwyg0X8HuDN39n9YwTFqj2VCrap71NYNn9G0q40aI7duaA Ehl/NOBPyBMnXbnocj+0QkuKwW/i4wMRKREkzTGRHI1fXy0/LRO4Adc3ZUXaOxZx aIM/BnNhuifk7rBk8VHAngWxRj3vfVP4lgqmizczHQ5hHO15Tb6Rhng3LfqeDJjZ NOgdYMNm7epr5OsMjgApAoIBAQDrVAqnLm8jkBJHyrM577RVqCrPOUsndVZ86lg+ cN4oyg6CWyNWJyKBYHpEyAx7d6qSOyRwMZfXlupXJga/sUQzvRdS96jbcBRMLXfN ObHFRbgFF4xIuvqhUagzrMhtRPchh4dOQND8mpzRQoAvKryJrlm1o62AK1v/94a8 K4Tbtpogfc/si2RimHeNc5dilBiNRhrewA4xXYvZ2xhNBfHD1AP8O3wSsmd3aI9J PAqLaDCFuA+h8qa0qQmQC1Rehf031PEHGWmluEGuxfA6eeCQha5fzMPj7qWIN5RL X7oGji+dj+pyKfKGbOnNTNJzHi7ppnh2R2saf19+j7joadGtAoIBAQDWwfNZPQkS 5tEHzDeEyG+oWBn9OxMaBoJ1VEuZNrcjSbqgDxwcyczUTO6dpINz2ve7Dv3s0V8T 75YI16jorpT6iJr3oD+6F28PD3jghgCTtEJoFbojdffBXXTvGU1UwtJ5eTTBpKRe mOuxNL8dhMqCnmDVZ+4DQSWQ8h29xshVuymnlADfSqZC/zYLjLZrPFj2Lv/QVsvt 7V+D4UFlNI9aEYgnlsMa5A7MfTr7M1cEDhfUz7QpUufZRzGvVx4gk98lF6AzuvRI tdcpOJUAowU8XchtI8x5NubtF04e0lpmlQMKhq8eZ7+URmYwZIROim4KV2eYL0M/ PB4Jl6otwbojAoIBAQDDABb7xaxuiZm8R6kQHyMNv5YJtO4jukV6qS2KQDi3EAfJ 2P+FClS7ZFis2iANx3FeTwe4uD+cc/+nS2lYOunK/atwIqyXeV44aYzWUDKQx17f SU4DjnzUZDe+6jQC55zo+ccS/v6t8uhzNmnFq+IjLIhFzWWdyVAo4NGS53TmI3+/ 4MEEv9TlJnYajmgpVZKqribh4b9hBKU4Vybh3EUkAnFy90+upoq6Fbh19Py/3Awp IgZCKjIdjdzQsbKtyNW1CAzZ1yMGIZK74mVX71o4J64A0Eqae0xLfdKySpZ5jCTE qVaaV0wSO/nZFwlkPuSc1EcJq9CCWn2lAC8210jZAoIBAEI/uIsp2fe7vnXyWJoc nt1GuFW2+JCJu4roQx3zlBFNuEWSA7EZy5ceWGnHC0odHVjWKhz5BaSHvzfhF1kY KhsTMwL6q04D1p3Fvxs8G0d1Txr+wNoZlSFQbDcqDgH8y6Lvcgfee1o3QFX9GIvJ oBMlOmf61KCqYyVQmz4k6T4RK6tna9F2HM4EHq73bHquNh9TplSlwekW1eVAAsVu rl4xlFfqGSvdeHc6loxRbSFyG4XpwQESczVC0h/t9vxDwY2WuTPcE2mutr4fl0+H +qCBqceJSJWICzrOeqnlaD/G7hY8MB9oD+B0yydYirwT1hhYmDuJMOx75iQ9ZiER ZxMCggEAUenerHVg6/+T0IwSeWPjR3GtJ+SWij44n99ojhq67rXaJ2jHuMaC0t4N +VsspSISO71PuOgjQNjdN8xn8QaYBcLt8HMAcZFLJnDnbhfJ4iNbToIWhKqwjtKW 8eMeNziz9kE9jazOt8l9ErRiXmxZ7P7P4fnARtX0+X2TU2r1pYFt21Mj6yrqVkj5 d4EMIl8NrHxoHhdGXN78yI6eoAxBwanLdILVw51PHShXODiCnA22lzzialsVxp09 wV0LJJv2AsnVHxFdYCVZjDKG6WDL/U8PgDgsznhkCOuvzLPUtM2rAxgq//QtJygY QBqTUW3bGnskKC0gOUqWO3Kd9zCnbA== -----END PRIVATE KEY----- grpc-go-1.29.1/security/advancedtls/testdata/server_cert_1.pem000066400000000000000000000157411365033716300243770ustar00rootroot00000000000000Certificate: Data: Version: 3 (0x2) Serial Number: 3 (0x3) Signature Algorithm: sha256WithRSAEncryption Issuer: C=US, ST=CA, L=SVL, O=Internet Widgits Pty Ltd Validity Not Before: Nov 4 21:43:00 2019 GMT Not After : Aug 18 21:43:00 2293 GMT Subject: C=US, ST=CA, L=DUMMYCITY, O=Internet Widgits Pty Ltd, CN=foo.bar.com Subject Public Key Info: Public Key Algorithm: rsaEncryption RSA Public-Key: (4096 bit) Modulus: 00:ec:3f:24:2d:91:3a:bd:c3:fc:15:72:42:b3:fb: 28:e6:04:a3:be:26:20:e6:ea:30:a8:aa:48:78:36: 0e:0b:99:29:3b:4b:f9:f1:d5:bf:bd:0c:13:7c:ea: 52:06:f4:bc:34:9e:2b:c0:b4:82:2c:87:fa:2f:e2: cd:7c:d7:b9:e1:8f:04:71:6d:85:77:ae:18:40:e4: b1:3a:4a:6b:e5:33:bf:3e:65:db:cf:94:64:87:1a: 20:46:c0:37:3a:9f:93:3f:d4:4f:ac:c4:e4:e0:28: b6:0f:28:53:2a:cf:b9:fe:50:f2:ef:47:dc:7e:b6: 60:c2:47:85:b8:cb:ca:48:5b:fa:9f:8a:97:30:01: f4:b3:51:0f:68:e1:60:ab:2f:a0:ad:fc:f0:10:4f: 60:e1:92:db:be:83:04:5c:40:87:ce:51:3e:9a:9e: d6:1c:1b:19:cb:8c:c2:6c:57:74:6f:7b:af:94:3d: 53:ad:17:a5:99:69:7c:41:f5:3e:7a:5b:48:c7:78: ff:d7:3b:a8:1f:f7:30:e7:83:26:78:e2:cb:a2:8f: 58:92:61:cd:ca:e9:b8:d1:80:c0:40:58:e9:d8:d3: 42:64:82:8f:e4:0c:b9:b1:36:db:9f:65:3f:3f:5b: 24:59:31:b3:60:0c:fa:41:5a:1b:b8:9d:ec:99:37: 90:fa:b5:e7:3f:cb:7c:e0:f9:ed:ea:27:ce:15:24: c7:77:3b:45:45:2d:19:8e:2e:7f:65:0e:85:df:66: 50:69:24:2c:a4:6a:07:e5:3f:eb:28:84:53:94:4d: 5f:9c:a8:65:a6:50:4c:c0:35:06:40:6a:a5:62:b1: 93:60:e5:1c:85:28:34:9b:29:81:6f:e2:4f:cd:15: 30:b9:19:d7:4b:bb:30:0c:4b:2d:64:fe:3b:dd:0e: a4:25:2c:4a:5c:de:d7:74:1f:5e:93:7b:1c:e8:c8: fa:72:1f:4a:eb:8d:3f:98:e4:55:98:b8:e0:8a:29: 92:33:af:75:6b:05:84:05:d3:0c:2c:07:78:bc:0e: b2:6d:a7:00:35:c4:53:1f:7b:e6:ba:07:72:a8:24: c1:0a:a7:c4:46:e6:f2:6f:3a:79:23:00:0b:b8:e5: 1f:e0:e2:ee:c6:13:a3:57:d9:86:1a:95:f7:a3:04: f1:46:d5:5f:21:d2:aa:d2:30:fb:f6:cb:e0:da:24: c6:c3:30:2f:d2:1f:21:fe:bc:0f:99:ac:ac:9b:65: 9b:e4:83:9a:00:b8:2f:40:fc:3b:42:d3:7a:e8:b7: 52:d7:f4:67:2a:a5:f7:eb:78:f1:0a:56:8b:56:12: d5:48:d8:48:70:ab:b8:69:5a:21:d3:71:b0:59:9d: 17:b4:4b Exponent: 65537 (0x10001) X509v3 extensions: X509v3 Subject Key Identifier: C0:82:DA:FA:69:46:30:AE:FF:6F:CD:BB:93:49:94:A6:D0:E2:17:EB X509v3 Authority Key Identifier: keyid:5A:A5:DA:B1:99:D4:E5:0E:E6:1E:94:EA:FF:FC:62:E2:ED:09:F1:06 X509v3 Basic Constraints: CA:FALSE X509v3 Key Usage: Digital Signature, Key Encipherment Signature Algorithm: sha256WithRSAEncryption 36:fd:cf:ec:f5:20:4b:52:dc:2e:38:f3:92:b1:e4:b6:a1:06: 86:aa:2d:c0:e6:f5:0a:58:97:a9:e3:be:13:09:61:79:ed:d4: 41:83:26:ad:ee:0b:43:83:d1:dd:19:1a:e8:7b:b2:1f:fe:d4: c1:57:7d:6d:6b:d4:42:ea:7d:cd:34:8c:a4:1f:5b:3b:fa:de: bb:2f:ae:56:b6:18:e5:53:a9:a3:99:58:ad:36:be:19:54:61: 0d:52:b6:a7:53:fc:60:e5:ff:f5:7f:82:3f:c1:49:06:cd:b2: af:25:ee:de:bd:e0:e5:5e:ad:0b:dc:2e:b1:ec:7a:52:6f:9d: e0:b9:84:18:db:49:53:ee:df:93:ee:8b:9d:9b:8e:3b:2a:82: 86:7f:45:c8:dd:d1:b0:40:17:ed:63:52:a1:5b:6e:d3:5c:a2: 72:05:fb:3a:39:71:0d:b4:2c:9d:15:23:1b:1f:8d:ac:89:dc: c9:56:f2:19:c7:f3:2f:bb:d5:de:40:17:f1:52:ea:e8:93:ff: 56:43:f5:1d:cb:c0:51:52:25:d7:b0:81:a9:0e:4d:92:24:e7: 10:81:c7:31:26:ac:cb:66:c1:3f:f6:5f:69:7b:74:87:0d:b0: 8c:27:d4:24:29:59:e9:5b:a2:cb:0c:c0:f5:9b:1d:42:38:6b: e3:c3:43:1e:ba:df:b1:51:0a:b7:33:55:26:39:01:2f:9f:c7: 88:ac:2f:4a:89:f3:69:de:72:43:48:49:08:59:36:86:84:09: db:6a:82:84:3e:71:6a:9d:f9:bd:d8:b5:1e:7c:2c:29:e1:27: 45:4c:47:5b:88:b8:e6:fa:9d:9b:ff:d4:e9:8d:2d:5e:64:7f: 27:87:b2:8c:d8:7e:f5:52:3c:c4:d8:30:03:24:d7:ac:f8:53: 91:80:98:42:24:5a:6b:cb:34:48:57:e0:82:ac:96:d9:55:6c: c2:c3:8c:19:7c:56:39:0a:a8:f1:b8:77:64:70:83:a8:04:c8: 3a:5d:0b:00:4c:e5:ba:f1:40:e5:57:cd:d9:67:48:21:e9:9c: d3:f2:b8:01:b8:d1:c0:d1:3a:44:c0:97:db:e6:bc:8f:2e:33: d5:e2:38:3d:d7:7b:50:13:01:36:28:61:cc:28:98:3c:f8:21: 5d:8c:fe:f5:d0:ab:e0:60:ec:36:22:8d:0b:71:30:1b:3d:56: ae:96:e9:d2:89:c2:43:8b:ef:25:b7:d6:0d:82:e6:5a:c6:91: 8a:ad:8c:28:2a:2b:5c:4e:a1:de:cb:7d:cb:29:11:a2:66:c8: a1:33:35:75:16:fe:28:0b:78:31:0a:1f:fa:d0:a8:f4:f1:69: c7:97:1e:5d:fb:53:08:b5 -----BEGIN CERTIFICATE----- MIIFiTCCA3GgAwIBAgIBAzANBgkqhkiG9w0BAQsFADBLMQswCQYDVQQGEwJVUzEL MAkGA1UECAwCQ0ExDDAKBgNVBAcMA1NWTDEhMB8GA1UECgwYSW50ZXJuZXQgV2lk Z2l0cyBQdHkgTHRkMCAXDTE5MTEwNDIxNDMwMFoYDzIyOTMwODE4MjE0MzAwWjBn MQswCQYDVQQGEwJVUzELMAkGA1UECAwCQ0ExEjAQBgNVBAcMCURVTU1ZQ0lUWTEh MB8GA1UECgwYSW50ZXJuZXQgV2lkZ2l0cyBQdHkgTHRkMRQwEgYDVQQDDAtmb28u YmFyLmNvbTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAOw/JC2ROr3D /BVyQrP7KOYEo74mIObqMKiqSHg2DguZKTtL+fHVv70ME3zqUgb0vDSeK8C0giyH +i/izXzXueGPBHFthXeuGEDksTpKa+Uzvz5l28+UZIcaIEbANzqfkz/UT6zE5OAo tg8oUyrPuf5Q8u9H3H62YMJHhbjLykhb+p+KlzAB9LNRD2jhYKsvoK388BBPYOGS 276DBFxAh85RPpqe1hwbGcuMwmxXdG97r5Q9U60XpZlpfEH1PnpbSMd4/9c7qB/3 MOeDJnjiy6KPWJJhzcrpuNGAwEBY6djTQmSCj+QMubE2259lPz9bJFkxs2AM+kFa G7id7Jk3kPq15z/LfOD57eonzhUkx3c7RUUtGY4uf2UOhd9mUGkkLKRqB+U/6yiE U5RNX5yoZaZQTMA1BkBqpWKxk2DlHIUoNJspgW/iT80VMLkZ10u7MAxLLWT+O90O pCUsSlze13QfXpN7HOjI+nIfSuuNP5jkVZi44IopkjOvdWsFhAXTDCwHeLwOsm2n ADXEUx975roHcqgkwQqnxEbm8m86eSMAC7jlH+Di7sYTo1fZhhqV96ME8UbVXyHS qtIw+/bL4NokxsMwL9IfIf68D5msrJtlm+SDmgC4L0D8O0LTeui3Utf0Zyql9+t4 8QpWi1YS1UjYSHCruGlaIdNxsFmdF7RLAgMBAAGjWjBYMB0GA1UdDgQWBBTAgtr6 aUYwrv9vzbuTSZSm0OIX6zAfBgNVHSMEGDAWgBRapdqxmdTlDuYelOr//GLi7Qnx BjAJBgNVHRMEAjAAMAsGA1UdDwQEAwIFoDANBgkqhkiG9w0BAQsFAAOCAgEANv3P 7PUgS1LcLjjzkrHktqEGhqotwOb1CliXqeO+Ewlhee3UQYMmre4LQ4PR3Rka6Huy H/7UwVd9bWvUQup9zTSMpB9bO/reuy+uVrYY5VOpo5lYrTa+GVRhDVK2p1P8YOX/ 9X+CP8FJBs2yryXu3r3g5V6tC9wusex6Um+d4LmEGNtJU+7fk+6LnZuOOyqChn9F yN3RsEAX7WNSoVtu01yicgX7OjlxDbQsnRUjGx+NrIncyVbyGcfzL7vV3kAX8VLq 6JP/VkP1HcvAUVIl17CBqQ5NkiTnEIHHMSasy2bBP/ZfaXt0hw2wjCfUJClZ6Vui ywzA9ZsdQjhr48NDHrrfsVEKtzNVJjkBL5/HiKwvSonzad5yQ0hJCFk2hoQJ22qC hD5xap35vdi1HnwsKeEnRUxHW4i45vqdm//U6Y0tXmR/J4eyjNh+9VI8xNgwAyTX rPhTkYCYQiRaa8s0SFfggqyW2VVswsOMGXxWOQqo8bh3ZHCDqATIOl0LAEzluvFA 5VfN2WdIIemc0/K4AbjRwNE6RMCX2+a8jy4z1eI4Pdd7UBMBNihhzCiYPPghXYz+ 9dCr4GDsNiKNC3EwGz1Wrpbp0onCQ4vvJbfWDYLmWsaRiq2MKCorXE6h3st9yykR ombIoTM1dRb+KAt4MQof+tCo9PFpx5ceXftTCLU= -----END CERTIFICATE----- grpc-go-1.29.1/security/advancedtls/testdata/server_cert_2.pem000066400000000000000000000157771365033716300244110ustar00rootroot00000000000000Certificate: Data: Version: 3 (0x2) Serial Number: 7 (0x7) Signature Algorithm: sha256WithRSAEncryption Issuer: C=US, ST=CA, O=Internet Widgits Pty Ltd, CN=foo.bar.client2.trust.com Validity Not Before: Jan 9 22:51:54 2020 GMT Not After : Oct 23 22:51:54 2293 GMT Subject: C=US, ST=CA, O=Internet Widgits Pty Ltd, CN=foo.bar.server2.com Subject Public Key Info: Public Key Algorithm: rsaEncryption RSA Public-Key: (4096 bit) Modulus: 00:b1:0b:d3:7e:5b:61:30:db:b0:5f:3f:6d:d2:e0: 3b:c6:4c:88:95:f5:7e:fd:cd:aa:20:5d:08:b9:6e: 41:db:c4:ed:0d:f8:bc:cb:b4:ee:c5:87:11:05:a0: ac:12:3b:4e:0b:4c:e4:43:e4:17:89:c1:ae:b4:13: 58:1c:31:58:6a:f2:01:ed:df:66:e9:f9:2e:9c:c5: 85:e6:02:db:36:f4:f3:07:39:75:30:f1:b5:55:5b: 46:2f:87:b0:d4:a0:ab:57:df:30:45:ae:bd:b0:49: 9a:fc:ba:5e:bc:d0:5d:86:f4:24:45:4a:d5:4d:5b: b6:ba:e8:b7:a1:3b:c3:2f:46:2e:b3:ad:2c:63:03: df:cb:f4:56:62:91:bd:bc:23:00:af:a2:7a:3d:6f: f1:33:81:60:0e:bc:20:f5:8a:49:5f:ec:58:bc:64: d5:47:36:a0:2b:b8:1f:76:25:01:89:3e:ff:52:69: 95:03:8f:bb:14:2f:1a:38:a3:9f:c1:45:20:22:77: 70:97:5e:25:51:b8:3d:5d:89:7a:bb:15:12:cd:1d: 96:d2:9c:72:67:12:85:72:6e:27:7a:ef:25:da:af: 49:26:8d:eb:a0:34:a4:4d:64:c3:63:33:77:5d:ad: 53:c7:ee:51:32:7b:cc:43:bb:86:8d:f9:52:ba:35: 23:0e:30:5d:dc:3b:25:63:c1:e3:5f:4b:b2:02:fc: fe:5b:18:7f:84:aa:f3:71:e4:16:b5:98:bc:73:c5: 58:13:41:38:eb:f3:a2:fa:8c:98:bd:f1:10:ee:b6: fe:7e:a5:81:c7:5e:f2:72:54:8e:db:09:f0:35:42: ca:b7:86:c2:48:b2:c6:18:08:ac:d1:f0:5d:de:b0: b8:25:8b:3b:bd:61:48:0f:71:3f:ed:97:72:02:c9: 44:5d:0c:00:fc:30:ca:5d:1c:e5:13:1b:3a:d0:ce: d9:36:a0:db:f5:c2:ad:a6:95:26:4e:7b:29:2d:fc: c4:04:1d:47:6e:03:59:68:1e:7a:20:6d:e8:a8:e1: 3c:57:59:f8:3d:2f:16:61:7e:24:e5:13:ca:48:0a: e6:f0:60:a3:2d:93:0b:8f:93:eb:b5:d1:06:26:52: c0:63:1f:fc:9b:73:fe:91:c3:04:40:32:8d:09:d5: 9e:c4:f6:0b:61:3d:9f:a1:d7:94:a2:e1:3d:b6:bb: 60:26:74:89:33:25:18:0f:c3:88:db:10:5e:a0:5b: f4:ee:d0:18:ab:36:50:c5:44:9b:6d:ba:ea:e2:6e: 52:3a:55:49:a3:72:ae:04:af:1d:f6:f2:83:27:17: 8b:9a:98:0a:f5:44:b1:c8:f2:a9:c8:ed:b0:75:ca: 52:25:f3 Exponent: 65537 (0x10001) X509v3 extensions: X509v3 Subject Key Identifier: 74:BD:18:0B:32:AF:D0:51:8E:4C:4C:8D:B2:F6:4E:B8:6D:AB:BD:BA X509v3 Authority Key Identifier: keyid:01:74:A9:44:61:3D:7A:BB:C2:32:CD:D0:ED:20:DA:3A:C4:C6:02:E8 X509v3 Basic Constraints: CA:FALSE X509v3 Key Usage: Digital Signature, Key Encipherment Signature Algorithm: sha256WithRSAEncryption b5:63:0c:d8:ed:af:74:2d:4c:94:36:41:05:2a:f2:ef:45:e5: 6a:0c:76:0c:f3:90:25:e0:54:56:f3:26:23:95:7e:24:74:6b: fd:02:0a:bc:33:ba:e8:e8:8f:a3:b3:85:2e:59:4c:cf:e3:85: 1a:d6:70:5c:7c:86:e2:7a:11:99:a8:fa:43:9a:bf:50:54:00: 9e:6a:7b:72:7f:c5:20:89:6e:18:6c:46:64:ce:44:44:47:4d: 87:b5:fc:cf:f3:b9:9f:45:a3:cb:b0:91:00:96:2d:29:68:8b: ff:c7:e0:f1:b7:8d:31:c2:01:be:5b:51:1d:af:42:b1:17:22: bc:91:e4:d9:b9:96:6d:64:40:79:6c:71:ed:f6:e5:49:16:0a: e3:bc:18:95:2e:89:ba:c4:a5:ce:ba:ab:3a:32:eb:bc:d8:91: cd:f2:ee:d1:fc:67:3a:51:00:92:bd:b8:68:0b:54:04:d5:07: 0b:97:11:2c:42:64:7c:47:c1:68:b4:eb:21:c4:e4:ad:17:a7: 16:b9:e0:e6:cd:04:c6:89:36:40:d4:4b:c3:f7:7e:26:6b:3a: d7:68:b3:b2:da:00:65:13:c8:fa:d0:1c:2e:10:ba:71:3e:0f: aa:8b:d0:ff:b7:3e:83:9c:bc:b3:d1:52:0c:9f:3f:21:4a:10: dc:8f:ab:38:45:d4:2c:2a:15:2d:71:45:fe:91:a2:d8:d9:dd: 0c:dc:a7:d9:cd:1b:f5:35:fe:14:ba:c5:1f:ed:ee:fb:87:cc: 87:a1:08:c2:2e:ff:5d:af:b3:3d:6e:11:94:79:0b:28:e6:83: 4e:fc:28:8f:7f:00:85:79:7f:3a:d1:07:ee:6e:fa:94:c4:0b: 4b:2c:05:b1:68:00:e8:37:bc:b8:b2:03:5c:5a:ca:13:f2:68: 57:df:ac:fc:da:be:27:24:7e:6d:c4:a9:53:2d:f2:43:0e:30: 9c:82:d5:fb:f1:a2:0a:83:e0:a5:d8:9f:09:3e:99:c8:39:d6: 69:6d:d6:c2:27:70:59:05:3c:3c:7d:d6:41:6a:b4:9c:1f:70: 7e:3e:ee:6f:67:de:95:1d:eb:31:8b:11:c8:0d:a1:25:4e:08: ef:3a:11:2d:a7:98:0d:a1:d9:30:2d:da:d2:a0:05:6b:34:38: a6:87:b2:bd:0f:9c:51:cc:e0:2e:a2:1b:a3:a0:a6:eb:1f:0a: 22:70:59:f0:0b:c9:bd:94:4e:1d:65:3b:99:5d:8e:6c:18:82: 1d:b5:cc:6f:14:21:c4:89:07:9b:81:1d:9a:79:ff:bf:fd:ce: e4:77:11:0f:47:21:dc:d9:79:f3:40:26:56:5c:b4:86:32:8e: 28:b9:14:e7:b3:fe:86:47 -----BEGIN CERTIFICATE----- MIIFkzCCA3ugAwIBAgIBBzANBgkqhkiG9w0BAQsFADBhMQswCQYDVQQGEwJVUzEL MAkGA1UECAwCQ0ExITAfBgNVBAoMGEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDEi MCAGA1UEAwwZZm9vLmJhci5jbGllbnQyLnRydXN0LmNvbTAgFw0yMDAxMDkyMjUx NTRaGA8yMjkzMTAyMzIyNTE1NFowWzELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNB MSEwHwYDVQQKDBhJbnRlcm5ldCBXaWRnaXRzIFB0eSBMdGQxHDAaBgNVBAMME2Zv by5iYXIuc2VydmVyMi5jb20wggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoIC AQCxC9N+W2Ew27BfP23S4DvGTIiV9X79zaogXQi5bkHbxO0N+LzLtO7FhxEFoKwS O04LTORD5BeJwa60E1gcMVhq8gHt32bp+S6cxYXmAts29PMHOXUw8bVVW0Yvh7DU oKtX3zBFrr2wSZr8ul680F2G9CRFStVNW7a66LehO8MvRi6zrSxjA9/L9FZikb28 IwCvono9b/EzgWAOvCD1iklf7Fi8ZNVHNqAruB92JQGJPv9SaZUDj7sULxo4o5/B RSAid3CXXiVRuD1diXq7FRLNHZbSnHJnEoVybid67yXar0kmjeugNKRNZMNjM3dd rVPH7lEye8xDu4aN+VK6NSMOMF3cOyVjweNfS7IC/P5bGH+EqvNx5Ba1mLxzxVgT QTjr86L6jJi98RDutv5+pYHHXvJyVI7bCfA1Qsq3hsJIssYYCKzR8F3esLglizu9 YUgPcT/tl3ICyURdDAD8MMpdHOUTGzrQztk2oNv1wq2mlSZOeykt/MQEHUduA1lo Hnogbeio4TxXWfg9LxZhfiTlE8pICubwYKMtkwuPk+u10QYmUsBjH/ybc/6RwwRA Mo0J1Z7E9gthPZ+h15Si4T22u2AmdIkzJRgPw4jbEF6gW/Tu0BirNlDFRJttuuri blI6VUmjcq4Erx328oMnF4uamAr1RLHI8qnI7bB1ylIl8wIDAQABo1owWDAdBgNV HQ4EFgQUdL0YCzKv0FGOTEyNsvZOuG2rvbowHwYDVR0jBBgwFoAUAXSpRGE9ervC Ms3Q7SDaOsTGAugwCQYDVR0TBAIwADALBgNVHQ8EBAMCBaAwDQYJKoZIhvcNAQEL BQADggIBALVjDNjtr3QtTJQ2QQUq8u9F5WoMdgzzkCXgVFbzJiOVfiR0a/0CCrwz uujoj6OzhS5ZTM/jhRrWcFx8huJ6EZmo+kOav1BUAJ5qe3J/xSCJbhhsRmTORERH TYe1/M/zuZ9Fo8uwkQCWLSloi//H4PG3jTHCAb5bUR2vQrEXIryR5Nm5lm1kQHls ce325UkWCuO8GJUuibrEpc66qzoy67zYkc3y7tH8ZzpRAJK9uGgLVATVBwuXESxC ZHxHwWi06yHE5K0Xpxa54ObNBMaJNkDUS8P3fiZrOtdos7LaAGUTyPrQHC4QunE+ D6qL0P+3PoOcvLPRUgyfPyFKENyPqzhF1CwqFS1xRf6RotjZ3Qzcp9nNG/U1/hS6 xR/t7vuHzIehCMIu/12vsz1uEZR5Cyjmg078KI9/AIV5fzrRB+5u+pTEC0ssBbFo AOg3vLiyA1xayhPyaFffrPzavickfm3EqVMt8kMOMJyC1fvxogqD4KXYnwk+mcg5 1mlt1sIncFkFPDx91kFqtJwfcH4+7m9n3pUd6zGLEcgNoSVOCO86ES2nmA2h2TAt 2tKgBWs0OKaHsr0PnFHM4C6iG6OgpusfCiJwWfALyb2UTh1lO5ldjmwYgh21zG8U IcSJB5uBHZp5/7/9zuR3EQ9HIdzZefNAJlZctIYyjii5FOez/oZH -----END CERTIFICATE----- grpc-go-1.29.1/security/advancedtls/testdata/server_key_1.pem000066400000000000000000000062531365033716300242300ustar00rootroot00000000000000-----BEGIN RSA PRIVATE KEY----- MIIJKAIBAAKCAgEA7D8kLZE6vcP8FXJCs/so5gSjviYg5uowqKpIeDYOC5kpO0v5 8dW/vQwTfOpSBvS8NJ4rwLSCLIf6L+LNfNe54Y8EcW2Fd64YQOSxOkpr5TO/PmXb z5RkhxogRsA3Op+TP9RPrMTk4Ci2DyhTKs+5/lDy70fcfrZgwkeFuMvKSFv6n4qX MAH0s1EPaOFgqy+grfzwEE9g4ZLbvoMEXECHzlE+mp7WHBsZy4zCbFd0b3uvlD1T rRelmWl8QfU+eltIx3j/1zuoH/cw54MmeOLLoo9YkmHNyum40YDAQFjp2NNCZIKP 5Ay5sTbbn2U/P1skWTGzYAz6QVobuJ3smTeQ+rXnP8t84Pnt6ifOFSTHdztFRS0Z ji5/ZQ6F32ZQaSQspGoH5T/rKIRTlE1fnKhlplBMwDUGQGqlYrGTYOUchSg0mymB b+JPzRUwuRnXS7swDEstZP473Q6kJSxKXN7XdB9ek3sc6Mj6ch9K640/mORVmLjg iimSM691awWEBdMMLAd4vA6ybacANcRTH3vmugdyqCTBCqfERubybzp5IwALuOUf 4OLuxhOjV9mGGpX3owTxRtVfIdKq0jD79svg2iTGwzAv0h8h/rwPmaysm2Wb5IOa ALgvQPw7QtN66LdS1/RnKqX363jxClaLVhLVSNhIcKu4aVoh03GwWZ0XtEsCAwEA AQKCAgAiGesq+K+1/LhCkD+4oySAL2NDa1WMf3mOnyXe1E6qte0RtiHaGrSWoUue 2GQGxQT1w28lXej8bJRcnSx0PN+EA5TsmpaNc//kPh6m/18bsqCEbUeRayYnqknG bLCMMcSbjhYCJlmzUa0V+wgmQd3jK+QlTgYx9Dl7Ub+nsSL91ukSZnr0XxPnXmgP B5lgnHthIgW1FQAzD3PQyDC08EuqKGgVAaB+ZhsPGr5lzSntfbkWeNO/RI6O2n8p NjFSkCKtSHYFp4LZOmFAydmf0Xz7dh2e46dFBv+6ng8iOrNmrPgEciQ7Eusq/XQu SfsbNhjFFzuBPd5R2KPvvjwM0cyHXP8H84zOb/2LZ49J/RADG7CGpxCGF+27R8ex JpQJysA0T7A4JhzvwS3t5BFP5DHr+1gJW5z6RAr5kMqW65EOOIokzRcejnu6gNee +cYAGrUjxRoi/+ba23SaUwmYUfvxWeWtwG7ybIUGtMdHiAt+KegO8nNFvMUE0/un TIGyrrvhmq/L0Y4EoKOTZTJ1Qf6FSdCfimtMhoaMEehZ+squSA+lWjJQ2uLe3qC9 24n4rFyHl3rvSW12uHiYWWkGbnLtzlqL+uL3Yi7yb49PSDSHawNUKctS9tySRh0v I7H7GSFKWi+P85vdzWc4F2bWxA4bWZQ+LtfVa7sdEZggoO8OgQKCAQEA+zShQBmD Ao2sTW+rWpl0KdpbdO/+5eXu59yCYxxuwaWdzE+Hqw5Zvz0oL7/KWckTQV9Cg9vx pt8FYPOuuJfXCD5kUXnZsdXS2qUGIuLMxX3aUrxuw0NeckF42iFom0+nO8NWtWzh xnO59OQOLj/VER2QhI2fVAMIw62wZIR6pSDYCM2Rn/J4X00r8vLIUQ7ifGqw3uV7 cezyenfpb5Gli+OmQCtcI4wvZUKdxDA5WjcqEqpTcxfb8emiENfP3J0FKfumYw6Y rTM2SI2cpDzC05TF8PaucO6A48f39920d9AP+5WdExJ5XFsFFaPX7WM6w71iixr6 Ntp1DO2VHnVcUQKCAQEA8MFsW2o9sAr18sJj06azaFk0otw9wz79aqrag/uSgJDL FYiGixdRXVfT3m4/DYHVRSh3NPHcbh6KdaO1eJ2HOL11GwkclipIQhC8xvnbfYKb xg49StUhyD1HxVXI+iIRW8jtJ6Fx+HltGlPp1muEdrehTbOTQz7Oc9TYm8aFNtWP yPDqiAeOsy5v30oxKTm3D3i4hD0COcNXqKbMSI8iBULhIF0b7wU70qaILLiX/xoZ zG5ipnPdsZQHC9y26j+2NAur+JCMHQFiapWctTOQRmX27LY+aQm7JtyUw/x+GGx/ Ixc0gqoW05ngfMr3McMJ3f+kSc1FeaTe+ERG5sZL2wKCAQBsO7/iS1usJPiBIMUW sxle0wsmtiUATvKBifvP0jdSThZQKlAM/pDimeoPsLXxu3YFa5LQF1rmCB9cJ4I3 XIy0q5UzmamXOsavl/yt2URbLx97GF8s2ID//3+flFdq24X1dPOOFcytYb1Ua1JE 0RHvXuqeghqM6wXCsbpXhNEHBsCuAkxlOuZsQWbXNY3jhuNEsf9k+kEW0/2hkLrO bFWEkWBXM5duZX8iRPKOzixX137ULfjolPYaJAzE7wdLSYgpD5kgAvD7Zx5TYliE Vv2mhepHKTH9zHVSLx2C+U5BdS79uffEeOg7R6hIK6DkUiXGonmr78KxEazvFgpy 5iQRAoIBAQDXtna/8ZEUCr4TpNiM6vAUrtjakztDlUy6Jhtj5iR9zT4pLQpf1aSx XeAXi/AyygGs1XT5mztF71df0C7owzxFOnuSnbdfVMMpbpW2MmjXLA8mhdulERIT t9R2m0ZX1+51rrHOsHjNiP6YeFcsJ2modR+x3xQzTDLu1ea+rEDvwKn0AOgiuaLC KPlTt8YUigHbeu7YjVFRMBV6pviiipyQ2jucI9DDeI0BUPTyHPMTPu+em8kIGwin 81nc5wV9HVjDiTGspNblpjfoB+VA9dJvQSzdKu0AcBef2kPw1mqkt5GyfzgtWvjY 3yakqbaSf453unYZKjL1qyOcjpB4dXPBAoIBAGzkqzHE0Izjd0jH59gxIBZvz864 A6lrC+ltMtCYcVdDfvShgnTINnf7RYQ4U2HPB+IJ3IMRysT0LYdGDhp5Y8Zi73YO KLGSl+P4jzs2z+MsavXk/wPi2xwc3htKHu8P6EFm50lR4jxNEXveGscwIm+wgr0F W7gJsJSVeB3aK10dn1hfBo1J/8mimz3mZxpYIb/v+x5DYvwik657C+6p7RmylrZx 20jwy6L6d+qWL5V8H+KZoyRMb3xfsvHiOAUgFaNa+XivzRFeVqHYl9Cr1hpL0I8j 21Nm0f7u3QAGTrgjmPPNBI2lRoDbrOOO49R5rQne41iw9ahqSYfmOEYDTs8= -----END RSA PRIVATE KEY----- grpc-go-1.29.1/security/advancedtls/testdata/server_key_2.pem000066400000000000000000000062531365033716300242310ustar00rootroot00000000000000-----BEGIN RSA PRIVATE KEY----- MIIJKQIBAAKCAgEAsQvTflthMNuwXz9t0uA7xkyIlfV+/c2qIF0IuW5B28TtDfi8 y7TuxYcRBaCsEjtOC0zkQ+QXicGutBNYHDFYavIB7d9m6fkunMWF5gLbNvTzBzl1 MPG1VVtGL4ew1KCrV98wRa69sEma/LpevNBdhvQkRUrVTVu2uui3oTvDL0Yus60s YwPfy/RWYpG9vCMAr6J6PW/xM4FgDrwg9YpJX+xYvGTVRzagK7gfdiUBiT7/UmmV A4+7FC8aOKOfwUUgIndwl14lUbg9XYl6uxUSzR2W0pxyZxKFcm4neu8l2q9JJo3r oDSkTWTDYzN3Xa1Tx+5RMnvMQ7uGjflSujUjDjBd3DslY8HjX0uyAvz+Wxh/hKrz ceQWtZi8c8VYE0E46/Oi+oyYvfEQ7rb+fqWBx17yclSO2wnwNULKt4bCSLLGGAis 0fBd3rC4JYs7vWFID3E/7ZdyAslEXQwA/DDKXRzlExs60M7ZNqDb9cKtppUmTnsp LfzEBB1HbgNZaB56IG3oqOE8V1n4PS8WYX4k5RPKSArm8GCjLZMLj5PrtdEGJlLA Yx/8m3P+kcMEQDKNCdWexPYLYT2fodeUouE9trtgJnSJMyUYD8OI2xBeoFv07tAY qzZQxUSbbbrq4m5SOlVJo3KuBK8d9vKDJxeLmpgK9USxyPKpyO2wdcpSJfMCAwEA AQKCAgEAmB9YNs7fgLKTJhQDElk3Ixipl2gcGIm5bxthHqsdDW90XDfoSIQLUU/P kW1PzE6GrXEBBVCb5PK1YObqIzdHCIUuoSv+anV/1pZliY/UubDYjNGS314f99p4 QOivSNNQxizwdj9Bn5JvCE4+jq/eXNGzxJIbGt/97zV8ap5GBH2iLSJT7DPs/HrS KtmdFGVi9oZ90AI6Vo4IckC1dSTADRqv2BgvpYPLNiV7avE7E6k8ipxLvIaoMRyT xCzbXJ4/kT3dUUJEgKX0nEU/XjYqNHIDIK3qIqQoY31AkQGhHfjUurrgxYPV1OYK eFdFbgk63qPnwp/akCw13hFnQrXbiqt+ecpH82aGA5XW7wdngo59Ehpy7XWwG4Zn MuyNVusSRUcclWD8PydLaweAKizjRzfVW/6nVtKiYTfkscArQTMZwEdkFkT/ZwcG OSPTyf3hSUSmd17HPCvHm66jX2EVfB+MQfQhDulcbPyzvNDNHg83miMv0nrnWiHe viOxT7M6pdJwMdHH7KfkkJmx+HJYDa8GwdGyCh2+dTfq42hRFvHNUMTCNrupwTO7 yrxFnKMo/c5z6m6OvYyCh5k/wAkpbgZi5/k1EQG9uo7E7crO9AdMuzAgR1bvcU48 MjJvxxh51J5A/VqV3RZR9CNomfLQ3WD6xVZUuvAyspRf3meO2akCggEBAOasi0oL eEXNSLRlW+OxBenEL2Ke/GuAVy1+TkUAgNtHawUNK81FWSDIjv/+gB7WDZ2CaLUw 5UY6QigQ5Qjme0cE8QPnAdbCev0LSrXXbZ1aCF546szZu3VYVdU4s4PHcOojAzKk pHYIYfbD11VHK5f5Ve8qt/I+DDGGALldfzgdSwx8K7n01Uu6zmeOvpXXirfR10AS BOU9m/O2K5qk8g1MD33xqQjEk5BKdpgz9zfyWYlPj4rdo4IFK0em9bnwPJLPDu58 F7DbKoAH5a5GY3bsODzWMWMhThpNTTvmqgZ1bLPBepnREslQ5Mf8MJYG0WU6QPNx 7tErFtpgY9PDEzcCggEBAMR7/PmV6fIpkEeAo490csFl1uoeiFEUF62SosJD2lpx +iUirGAqs0c59NtzS8PzheDuU6S1EAvMcd5uJetST4NH/Yw4T1xjKFqkRC6Wlq/x iokaH8SDizFx20dRCsJiNaxqqyr/RrVVYv27R2ihtW2482NNIl/bG/GgESZKN0hb yHplWH0UoAwwSsJDRASi4CcrS30khjr/W3LKIo2iXVEd00P+Wbin9Vo7SgrpVujS P2jrd0pp33yxZetur8XESnAjOiyStZ2tcapp1rFvj8i2YS9Zxd9bRXoaHd2XPvb2 hm2l6VtqLZVpJyUlTNvWqWmM84EZAPSfB3BSMI/AGSUCggEAHioOBN6/GZGgokZm 3710Yn9PGvxjUcN0ovRTU96e+w25xu1T/wHEh+7yFDO5mU6wdRpqitccBDT2Fbsv 2BwbnsvcoIAC04yW/KQPXvwOz3bIhWIWgjcutkeY4csKXn8kGtn9PxAcmXq7JMOz Uul9n9/xBtd1Om42tfsp+RNq4XGjMLzEEwsbIU4KU6xs67dF4ofEOBKjJT8LN7Fo vk43gNmjZPrG+eiKy2GRZJHXEC/W2YfX43bcPNJkOHhyxZ/Oq/v7neAIUQ433oop 1MJLm2+EYyA3URk312SoZt7g+Ps9/budRqP6auzzHduylsvJcg1OFQefDScvU9sq 8rQdvQKCAQALgzhPZ3lNtyG9DsyGm0weCNmO3jsehQ7eHLlsqI0iv4rooh93gwj+ I2c1dIv770jo5Q4BmJpYFqKVZd7S6v+9sXopvSLpRuYWaYmVMT2jEYQMhHtYCF0f iIxQoW7/9MEwWQ+udUavWVFzjIWim9cFltCsANkCxNPeVIKsu6yBkN8uTMHiklLO ZAX9W/OgUerQYLkLnBhBXLT/BNkBc4IEPrsiQMUBDNZTcyXjfciZ27fbbfCPa6Ss qbhPEy05aUbzSx0df3skwgTm90ydGOxT1lvbamctryti/CTD1xjZX5iA1DfYI2CI YKDqjET0nJ9Qj/G0nsJvkuHcsvQleBwBAoIBAQCvilpLyh8XzVY3TAsvVEaHpoNp y2sIwDiI2elZOBcQkeUbsD4bhA1iF/4cpI9tgl7ApK8ZmmcRiKh1/PIHI4Ru4nB4 bNqn7FP32vKyJ5O0o7bQBGGJIpLe0rVUmJ+ROB5PLw5aG/oo1bVKoMuw/u4o1z9S 90qI3LW6jNs+7UOELH6Gex6rfA+9xi//7NDUlJhQST++mS2pTm1/cq4RIaWx6EyF N1hqjcWESyS1EtZYKp+/Mx4PDQKDAm2f9mjuTViW15EdcOBlMm40ZKRbxIJImlEe fjZBgqsDoQKK0yYcQiVimMNc5vtNaT38lVu1NxvKJg1OeTboMBISLUOzZqQj -----END RSA PRIVATE KEY----- grpc-go-1.29.1/security/advancedtls/testdata/server_trust_cert_1.pem000066400000000000000000000037201365033716300256320ustar00rootroot00000000000000-----BEGIN CERTIFICATE----- MIIFlzCCA3+gAwIBAgIUdkt73feqv3fH1K1fBBp2ryU4TUMwDQYJKoZIhvcNAQEL BQAwWjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAlZBMSEwHwYDVQQKDBhJbnRlcm5l dCBXaWRnaXRzIFB0eSBMdGQxGzAZBgNVBAMMEmZvby5iYXIuaG9vLmNhLmNvbTAg Fw0xOTExMTUxOTE1MTFaGA8yMjI1MDMyMDE5MTUxMVowWjELMAkGA1UEBhMCVVMx CzAJBgNVBAgMAlZBMSEwHwYDVQQKDBhJbnRlcm5ldCBXaWRnaXRzIFB0eSBMdGQx GzAZBgNVBAMMEmZvby5iYXIuaG9vLmNhLmNvbTCCAiIwDQYJKoZIhvcNAQEBBQAD ggIPADCCAgoCggIBAMLHM55ChPP7dk+1uWz2DSkLE/xkoBaagJkjXuBgpgtdcxfp hG9qTQ/vPAnqRFNSqQPU/A0dbKlnlK2ibrSb1LD4CPiXMqMAVojbEBRiNZK2+E1p 6FDaUO/CiurX8QPsVHUp2Zol38FoGdHL3tHSEf2xfJzs1Ka4g54FASOn+wJSAdAG Ai+TUT137NqmeIVMIhg8x2vtKJpIH016mBqPENpccb3wsk9kNLj9TonKP16Nkngm YKdLBnhB5Coz9gFqnTFEXp54ESOKttNtAAdFfhBqJhYMAdoFxSsuDdpr23Nyfuzf uT5QnIffD0JCxH6bGYpMgJMVLiWJSuZ6wohFl04lwQTj3UXC8GU9o8YGC1UnvJoZ rTgC8bM+yNJnEsrU90dPMLAi6qN5pl0y18/jtyaP5YXjv2TCGAjmB3dUyFa4nCg+ 7w9tAi4pC3cBusN7e4cOseOM/23qKbcudHWAQ46VkTMs36DQyzxZutgZUI9lesol o3eCR00v4N3Uf0yXff866EaDg3NmcZzhn1stJMHJMkhPOQZZmD8dd3Pi4DuQZMa/ 74vMcjLxXo2xKTQklBUDCAFVEIR0y0oHwYUCk+AuS0PAXbGred0KOs6Ey8c68JYZ OfgD/jjY/emYzyNeGGKUkMtNA9xUqWNEnqmIQgpMndzy1c5UlnGpoOt/cfztAgMB AAGjUzBRMB0GA1UdDgQWBBS0GQgc/BAjxTCGIrzLsV+t6npd8TAfBgNVHSMEGDAW gBS0GQgc/BAjxTCGIrzLsV+t6npd8TAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3 DQEBCwUAA4ICAQARMgDs180PFUSjXrRQ6hfpmYaauoq2atSYfzECfo92iemE1KiK qUAVX/fjRy69/r6BKAo/j8F7BJVRqKhfiZm+EUIGWCtNRpVCx6WCtfJ1G8rEEH+U E4kNpPC1OyVAhFMYKFJVXkyzpxjggLeY0bGs7BrX4wSid7vj6HM/pzfOShvB6qv0 VfpAGwTKnqw64kpy+9QPwS0sDH17oJAteJ3WeRopsqCjK9eXljmGBKVZjv2m9/TT 7Jd6VCBm/x1yxPeuJfPTxkfGR3UEcKPgXG84N0nfbTLspQcBf2QqQtW4yL/PyRC/ 8sFAPanSkNc3u1ERQub0oUtd+jQalvxXqW1N0GAJHLvtXa5Etrz3WMfOVdqthEKK CjGXdt4JoO+gvCGZH9jKa6HTgy+0QZrbOxBsJpbxSjXrJOeeJ2OgGZg8qBe5LqUD Z3o45x6j3RiQrK24luZE/6A25VUvUke4Hr9oTBQFgMlIPuTeRw6XGkNzScaPrXEU MnijDX8n7OcME+lCVCpgSd1SZzkTn4JYqlx8U33j1hRD1m5quO9+GOLQpWvZC5A5 FsikGXULKuIxVCJMuCXeWdY1aDAJ/6cwz77eDzNkySUfDEhxjQGhCmNlNDHN3dCM NtSqXJSDIwqikj7izot3evkoYa9j6w3qkNyg9fyGbdNHXp135RP5HIhqjA== -----END CERTIFICATE----- grpc-go-1.29.1/security/advancedtls/testdata/server_trust_cert_2.pem000066400000000000000000000037411365033716300256360ustar00rootroot00000000000000-----BEGIN CERTIFICATE----- MIIFpTCCA42gAwIBAgIUTdt7HKlUedh94k4eA+nlamVgGSkwDQYJKoZIhvcNAQEL BQAwYTELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNBMSEwHwYDVQQKDBhJbnRlcm5l dCBXaWRnaXRzIFB0eSBMdGQxIjAgBgNVBAMMGWZvby5iYXIuc2VydmVyMi50cnVz dC5jb20wIBcNMjAwMTA5MjI0NDA5WhgPMjIyNTA1MTQyMjQ0MDlaMGExCzAJBgNV BAYTAlVTMQswCQYDVQQIDAJDQTEhMB8GA1UECgwYSW50ZXJuZXQgV2lkZ2l0cyBQ dHkgTHRkMSIwIAYDVQQDDBlmb28uYmFyLnNlcnZlcjIudHJ1c3QuY29tMIICIjAN BgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA2KadB/xdPMRDW/LhFGJcbzVU+yoS iRudc8w0Wq/0XpQwcDjxrq6v5XuzFIZU46Wb2g+eALNMjW7zv4BLFwEU0+CvMYWt vgbTA2A07sU7P3WA8uZwjB25nkk0iMVBclL+g1XABnfXNobbKB/dyKArlyzFBV+w rpV17RdkfXfGjeFWpfxF7KF4Wzh86XKSDYSQQE4kcQqSxDeZfRwm02jaXuPDmvUw KFIxcfEW/3SadulFvOKgHWjUEirTGsT+8B8fWsfeJjGRmFcc1+utpOoOaC1+sRe6 xTe7JJB9F13mZxEPJuFxuBvjmGiSXkyLWhVWeqzhTipojZ69mYzAxMs8AVWrYeru EKuf3MlABub8dgDLocvOYD3A0IDm5173pU5RPW9tA2jBNLnyEF+wYFLjtFfYQesl UlldccG+nZowaeUsiUPhTBzwAYSCdB+imtJxIT0xdOQCo+h9ASvnPpgk6AYaU/2d gsFY39CvKmTFYlH2EGIJK3MWm6YT3T1fTTUgs/s++CkLzwAXpna4w8SLDl3IdeLX lMiXhnoNr3uYeusxkJp5rtUHBsYPbH4Ec4erNRgbUuBnHJe4nlC6LCCycLHywhBr niPPxyNBZzvrmRrVwx5xNEQn8r4ffftpASY/uePJK2wtrZop7mWFo/OnfMO6y/3C 22FK5wIbVLLsDlECAwEAAaNTMFEwHQYDVR0OBBYEFGOI6k3QPu9e+EORdUDkFqsV szK5MB8GA1UdIwQYMBaAFGOI6k3QPu9e+EORdUDkFqsVszK5MA8GA1UdEwEB/wQF MAMBAf8wDQYJKoZIhvcNAQELBQADggIBAMJtk0AbpT3pu+2G+NK3D4T2brrP66An lRlQxDXQ0uKunGYMgam+sJWMz3agviekRVQk9Vog9FwiGoYsS3X6ojLrA1FXp/8h oVXNmW8R87IS2KyPbzTmO+0OvO/KhYmA0USIhAmj645fyy8dGCQQOZCSfXE5/zCM ODnrgeai3qw+KB4aGJ6fgDKMdPbyl7fyvu5EWDIycuij9S8FQJ7m2gWolxFAN4/c nnWr/s6n8AQrb+k4Dp50nOrDA7JUEnFfQcBuJpDN2v5MD1/x83R1ZVuqNa+fOgrW DdSm/XbaPpzZa/R6iJQxG8mNpNEjMnBq7WCa1tLLd7MrdxzrwaFdfRiMj91b/A4W GZbX7SMrByI/6M01YoTdsPW2i/EDxJjghSGkvwuA2MPe8UqXELn5wpTXTDgCsj8V j25GUupDB8Dm5aocLEFHiUwzAGcy19zVqepTaM4w//iA1qUuaG7DE8pVzL9XFxm+ L1CGfxSTqdbqWa9PcLUoTI/8n6KQdK+vczgY4y+aUOZdGgLcVoO1BF6McnNPiihk d+HdWb0xGjw63XsV5kC41y6mHBQJdJTm0CE+yZ1e6gt+YEZCELxpxg530J0CngHs tCftzNI8o2pQhDhhKzxxGiA1cuzrrLDpdqNZo6VNm5tyYPicVbicZoJSbNDxohEJ rzhu9hQ7iDV5 -----END CERTIFICATE----- grpc-go-1.29.1/security/advancedtls/testdata/server_trust_key_1.pem000066400000000000000000000063101365033716300254630ustar00rootroot00000000000000-----BEGIN PRIVATE KEY----- MIIJQwIBADANBgkqhkiG9w0BAQEFAASCCS0wggkpAgEAAoICAQDCxzOeQoTz+3ZP tbls9g0pCxP8ZKAWmoCZI17gYKYLXXMX6YRvak0P7zwJ6kRTUqkD1PwNHWypZ5St om60m9Sw+Aj4lzKjAFaI2xAUYjWStvhNaehQ2lDvworq1/ED7FR1KdmaJd/BaBnR y97R0hH9sXyc7NSmuIOeBQEjp/sCUgHQBgIvk1E9d+zapniFTCIYPMdr7SiaSB9N epgajxDaXHG98LJPZDS4/U6Jyj9ejZJ4JmCnSwZ4QeQqM/YBap0xRF6eeBEjirbT bQAHRX4QaiYWDAHaBcUrLg3aa9tzcn7s37k+UJyH3w9CQsR+mxmKTICTFS4liUrm esKIRZdOJcEE491FwvBlPaPGBgtVJ7yaGa04AvGzPsjSZxLK1PdHTzCwIuqjeaZd MtfP47cmj+WF479kwhgI5gd3VMhWuJwoPu8PbQIuKQt3AbrDe3uHDrHjjP9t6im3 LnR1gEOOlZEzLN+g0Ms8WbrYGVCPZXrKJaN3gkdNL+Dd1H9Ml33/OuhGg4NzZnGc 4Z9bLSTByTJITzkGWZg/HXdz4uA7kGTGv++LzHIy8V6NsSk0JJQVAwgBVRCEdMtK B8GFApPgLktDwF2xq3ndCjrOhMvHOvCWGTn4A/442P3pmM8jXhhilJDLTQPcVKlj RJ6piEIKTJ3c8tXOVJZxqaDrf3H87QIDAQABAoICAGXTJ7wDgGfgPNCc6uv4kZa0 UOVwYXSPnszv/ciFHijw2JtWm8J3KwQ6iAOS8dcxbmQvcvkUOdsx6DsBoKhQktdV Q7NZr8IhChwPkY9mbCVf+9zUkfu6tfcxl9f/veLUKK77iuOYCyqb1mukDb9Y98jN gZyz/tONwFjauua+CW4EGyh6C6h9dkoRKMSBpJ3i2Cwdkg9s8v382Ehz35J62k+d ZmTqsPzqINnYqrdEAO7YSgr/3SV4BlDV+YbKlT/WUYkQ+foUQLl46e0LnakvfiDs rS53Znxo6dOSBvH50sa+w3Xn23qlP7+UL/Du4LRjNu3i4pCB0RcUeBCXep0s7FSm ZjhxZvFpFBin5NjoCrtwCwl+ijJfprKnNBPD0X+cpYKNuw7QBPufPUvLmje2m9mi R9GTqMF9Ur2ZqERU9NQ7hPPYYBJ6Fu6xWi8tsu8919FOn0sxTaWcAMmN9cp4sQ9M fLnMNQdsySp7YtEQ2cXMQv0SyId3q+rfM5wSNH0YO548X0pWApjHFUSj8qZDgXIH 4TJzPfpGcvCVXPBujcKSKocme3PcDRXjXwBV39fuZ0A/1DUusJKU7gYZN1ZR4jrI TGEmf8AvFZUxeJ7w2QnlRYBhMWUnItGAA39YCIsBin7GRD5IgpINCM9ccxrykbuH 2RDahIVs7uXfBdTu3h2NAoIBAQD5bVlrJkQFbtEM7blCQe4ffEEzxsx15U9ZmIzu YtfvXGevs5mAzP5NcQOWcmD+wvd69alN08E0sWeje9fEXUyILD0ZIhRMNn6DQVI0 DtKLfOCtTtDabh7PBl3v7W54pzH217KUzE3Ob29rd7ALJebSgyVMtLGQ6gLe0HWy immFpnOm8qbCSabhfR6ZdiIok+ST9lFpAmTnkz+dgQLWJQ4MpM0AqUD5OiYahyFj 7LggVXWSDAQCqfZbVr1KLOfjoVtsUGChSwzpFxlEmB1wXM64Q4t5gYTsT2gew8pz bNSE7OqKTHv7foqS8tfISSi7JlJ3LL9VR9Dld1nBAN5+qXyfAoIBAQDH6S/P1/W/ WRUTZuhJfKNz8zG9fG4AwKqJazf4YA/XsOwMFyDRLSMv7DDfwbXutUhItwPuDQPy 3qG0jhL/ipIJNwney148eBU8SunEgKnZm6bNk+08VL+o/9/mZkRKG6uzPDdeUVwo CSZvLtJWo6f0IrIFtzd07fANqZ6CFnyQDA4o3bc1Eq8t0rbWE1fNMVKKBn/b1N6y tgDVyGKpj4ZuwLDGZ+gQLSYdH9v8xp8pzDxallE2HP2dtqt01FEEMXsIW6tZ04l4 /VRdAXi8ro1nWus0yiX2RonAbcnJ7zVM/YdDMFU7DawzjQMiO4UT4OPPVxe+2tNV R9ra6owoQA7zAoIBAQCzLYlpvqBoorXMKs3FuiT8Oz9/mVTxcFwzSbIb4aerTF8z yboA28HnEcN5BQuGl7o+e1E3FmIZn0OLHoDekANVYyo07tVT9mWllnwd53P6PigM d6zy7N526+T5YT/Vro3m/AZOfAF8xXJt6hntuDl7ijh2ROu15VVQiMG0E1hAaVV1 XaTLtysJmt8rcMCTE8LFQ9IxtEWWUaIGXFIUUaQpEw4tZmjFYK9UqTQkWz3eBGYk FzueSkguTz5FlcKzNAu/4HG6DHbmzvAY5YloWVMq7WK5U4CQXW63gwDhMBHut165 IL6D6OBVNdwrBdsbrijZcay075Ux8i3oxt4OcWSTAoIBAEYiOPPiAAUxa4NzButB Htb+6uRfUvhQn4O2adxpVyWEnEthkdHQ1Bdr9XmKrBki4Ekia+6IAmqiUHjXnzKn mrRA6uWO03DDcC/G2FxoBy6gvNRCoWgZE2Rm4FYkarDVJFetOH+Oa5ZgH2vCMWjT 4Yh045+9t2b+Usl4SHO7D9g5Yn5TyoKEG5En650PDC6gryRdQ14MQFTSJVjbBEIY aEFSuLHiojeKn2R4WOVFiXFQhZwCQFuLsC40d9J06jdeZJt6DZNl80TPG1nFumX3 lwQ7kWjjwo20EX/BBJojob9w8pNP0Zb2JQOw5PiNiRKAQ2vqUhpTCvFQVCeZQbKd RqECggEBAIZh7qdFBFcCGzoRYnn+eNaJTxGDIRCZIn5Ur8SBUYEIE6+aB5ecTaLK eBfSCl9lmVaol6P3T/fXVyUwCscPU6FaeWGe9v89+Y/JqM1zGWtXqWI9Lcvowmb0 f5AenJXAjtcFUakB3xYyOakBzAHLEnacwaTPGR8s186hNXl9PV5sTFDN89IGhh9G hCQyNtiyNbckQOYzO4yoDQiYfcsTZ57DWtfFvRP3T4A08fgmUzkr0jYoy1dPP1g/ GBsgOVNr+LLgj353GqwrsHnG0Y+JarOfb31HcgR9fi4w7PruQ3ioQQaKINJBpfzH HASpvDH+panUrtqSvjDZMuDvkA6qft8= -----END PRIVATE KEY----- grpc-go-1.29.1/security/advancedtls/testdata/server_trust_key_2.pem000066400000000000000000000063101365033716300254640ustar00rootroot00000000000000-----BEGIN PRIVATE KEY----- MIIJRAIBADANBgkqhkiG9w0BAQEFAASCCS4wggkqAgEAAoICAQDYpp0H/F08xENb 8uEUYlxvNVT7KhKJG51zzDRar/RelDBwOPGurq/le7MUhlTjpZvaD54As0yNbvO/ gEsXARTT4K8xha2+BtMDYDTuxTs/dYDy5nCMHbmeSTSIxUFyUv6DVcAGd9c2htso H93IoCuXLMUFX7CulXXtF2R9d8aN4Val/EXsoXhbOHzpcpINhJBATiRxCpLEN5l9 HCbTaNpe48Oa9TAoUjFx8Rb/dJp26UW84qAdaNQSKtMaxP7wHx9ax94mMZGYVxzX 662k6g5oLX6xF7rFN7skkH0XXeZnEQ8m4XG4G+OYaJJeTItaFVZ6rOFOKmiNnr2Z jMDEyzwBVath6u4Qq5/cyUAG5vx2AMuhy85gPcDQgObnXvelTlE9b20DaME0ufIQ X7BgUuO0V9hB6yVSWV1xwb6dmjBp5SyJQ+FMHPABhIJ0H6Ka0nEhPTF05AKj6H0B K+c+mCToBhpT/Z2CwVjf0K8qZMViUfYQYgkrcxabphPdPV9NNSCz+z74KQvPABem drjDxIsOXch14teUyJeGeg2ve5h66zGQmnmu1QcGxg9sfgRzh6s1GBtS4Gccl7ie ULosILJwsfLCEGueI8/HI0FnO+uZGtXDHnE0RCfyvh99+2kBJj+548krbC2tminu ZYWj86d8w7rL/cLbYUrnAhtUsuwOUQIDAQABAoICAQCioY/Hat3iu8GEyHHFh4Cz ymkckZyQZ7ZuMqAqY2MhjERAOb7SzjckIRNxGNWofazcqFSHWhDhKqS24Gt9vUYR NtzMY/jkaOMF6bZSdqPfIynFLM7Xn4izFWjmMozKcRq1JC2drWBUgi8Jk8I81F9k gCr1ubs7kt6PN7wrozndT4Zn21PyKdPbRjAeXe7dTuGqI/6fDLzXppUFoZhToqYq DPfM3rljyy9qxPvqj3FUShAbllNzQDnR2WvW8IIfZn12/An6ycLthJcWTshuv3RJ J72u2o1NdmR5Mi102PwX6mphWWKwPd8/jWAygWsqGFJujFAlCRirFrplBY+/KoDD bcJz7jek7elO09SGA20W2G9DHRvUr4fknUsXCUj5PCGehDQrfYFeKFt3t383i765 WIXZmak1owxPtSuOmVbXqEVvwBkQ990E0+qxKeo1Tn1aANBZZVVb1LgJ75Zkmqrp ARRb0h75G9cKZYex+3mgjECsBWurk2eriHS2D3RfJzlDpoZWqiMhMjC32kJQonws 0X7fgGs0vl2gPxq1xAs0QLjV6BgcYwJF7QdhEXiJUUKaB7aDBpVr+7jbVl8eIoql zPE9owqQHhN5POSEnu76RPByYHt2twHXBpF0SFWKx7Nu0DpNqNqexVpqMNlz2Ehk tjY6xm/hdWRLw0cNUI/4AQKCAQEA9sBDIEmXbnF0RtuKXMkkDOaeS3QM6vDQbRPf itfuC9+B+qeLUkA4yyMLYml5mNuHawx34NoClmruESw/ASqgcqCxX/R9qGBJzkXn saN6uF1ZQKKngzZ3UdrbVf7R1RBikHZHN0/Sn7mdhwM/CyQnD6/H0EOHLTk90v1d Ctz8zOn6yqCqpyLedQteZavO3WKLzzothzS9WmblgALuYfGG/bRhNIkoM4JtkLsB 4hdp0n/tbIEIbMNAtvemXDO4N0VvMOa4m5if26tYI0vGIvkqHc+Oe5Jx1w3u9G6J n+gF4hvdgpa3hpmIyP6o97hmyviP4I1KaonT5lHk9UvbKVEz8QKCAQEA4MWFJ5+7 dpjvHLH9p1iBEbtdpd3Nd6wdcNjGErFdMEdsSdEQgVdbQVSbnCPqr9d79lqrzIsM wfV6AND15SfAVdD4BS7DQI5RxwQCnMU+Z4knGTUZp72TtuWYLDQ1zkKbVfl9U97a jtCz+YYp/GHJxHF+TVW8ltvPmNja+Cccf1DfXXwJG539Rl7NGULaBurRn4rNKNA2 JmNB5DEnI+34ly+DBt5KKzbUc1nL6dO2ddnl7uokgDW3B6xDSZ+tdLFhYPSrX1em VhxxvteLTqv9hyLu9u5f6wxphyo6GSMXTA8Yc+ID0GNLLb8kJmi97jFYVxRbWxev QtOJGRjn631gYQKCAQEAoBFk+kMDG0A6H+U3Qq2w1zWbpnLoFliVvMzRjO46nDUn yoR5mqfSr+RR9Etb+E8g786szY5fc1h2i2lajdUrNHEN36NpCJs+BbPPc6sLZyIX Thi19iaVDOKeupCNalwwtGomFLmRdtAgYn82nHGdbU2on2/O9wVVF9QIUY296Og4 Ks5DJh02llMDr4zeqzrMW2fwNO9/jm+FnZ9JKPxXh6lGDaCUFaYckXDe7d4mZclb KbIi1vtqtca9gr6CWEiQsvZY94bw3L2wdWUoaXOdYK1OTtdXRhzh0GsMmFEZz+4n qhk/gO+Ejm61Cc3z0OOh4heGGMrETXr+vimxSIJG4QKCAQBTYfLbmC36+RD7HCx1 ACghY9iBx56JXpgtXL1eAd4IIvbRC3WMBdQckD6J1ekiAlZCNbC12H+LFH2F//64 W97F9xeLFKXqNOGxapNthN55mi+e8kvqJjG+D74758JuGdd2NW+AxZNel52sW1EI B17KOTAZkEy9yh1hHlFc7WVs9ZtnGrRmQl3K1TBQxrQLDOFmxh8FnPf5lajD9lgG xCkMLNv2mE/7aAO4Jv+2ZouxfHwH/WQ9C7AycH0lus6mE4eEaD+KxwE1wKeRnHRZ YwRSNWtgv11l3Nzo/4k9+f6SgKcZlibED5G8DsRiW0jaLAQRicO6LzcdG0wou0yN 150BAoIBAQD0dDgOjnlXzvw8OXFcNn41K9U/oXzO/cNyxbRZP4wY0f7PEUIoF2gJ OZ4bTAXA5PQxs3fwKfC1UKN129mTcJy9HnJGJQKBRwN+W/SRbnw7yR93idwe1kGy iGGBO1bORbgj9y40QamZgnGqDRxsYmwCVss6mamtyNJtwobkWK4Wb33Uex6ZXyFK wJ5htqviYe5oYo2Yor9ok5Xf66npmYTtv5STAhKjk+PTvlTGckwr4zEWvkgnXHJd XDNx0r6O6FhkxPMIlLfX5fsaCL0jBxX+tkh/vYuF70JnZAQmEphRsLljVCr2jIQs m4DEMelbu4jDoUwmms+yra/9chKHzaRB -----END PRIVATE KEY----- grpc-go-1.29.1/security/advancedtls/testdata/testdata.go000066400000000000000000000023201365033716300232560ustar00rootroot00000000000000/* * Copyright 2017 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Package testdata contains functionality to find data files in tests. package testdata import ( "path/filepath" "runtime" ) // basepath is the root directory of this package. var basepath string func init() { _, currentFile, _, _ := runtime.Caller(0) basepath = filepath.Dir(currentFile) } // Path returns the absolute path the given relative file or directory path, // relative to the google.golang.org/grpc/testdata directory in the user's GOPATH. // If rel is already absolute, it is returned unmodified. func Path(rel string) string { if filepath.IsAbs(rel) { return rel } return filepath.Join(basepath, rel) } grpc-go-1.29.1/server.go000066400000000000000000001406461365033716300150210ustar00rootroot00000000000000/* * * Copyright 2014 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package grpc import ( "context" "errors" "fmt" "io" "math" "net" "net/http" "reflect" "runtime" "strings" "sync" "sync/atomic" "time" "golang.org/x/net/trace" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" "google.golang.org/grpc/encoding" "google.golang.org/grpc/encoding/proto" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal/binarylog" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" "google.golang.org/grpc/stats" "google.golang.org/grpc/status" "google.golang.org/grpc/tap" ) const ( defaultServerMaxReceiveMessageSize = 1024 * 1024 * 4 defaultServerMaxSendMessageSize = math.MaxInt32 ) var statusOK = status.New(codes.OK, "") type methodHandler func(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor UnaryServerInterceptor) (interface{}, error) // MethodDesc represents an RPC service's method specification. type MethodDesc struct { MethodName string Handler methodHandler } // ServiceDesc represents an RPC service's specification. type ServiceDesc struct { ServiceName string // The pointer to the service interface. Used to check whether the user // provided implementation satisfies the interface requirements. HandlerType interface{} Methods []MethodDesc Streams []StreamDesc Metadata interface{} } // service consists of the information of the server serving this service and // the methods in this service. type service struct { server interface{} // the server for service methods md map[string]*MethodDesc sd map[string]*StreamDesc mdata interface{} } // Server is a gRPC server to serve RPC requests. type Server struct { opts serverOptions mu sync.Mutex // guards following lis map[net.Listener]bool conns map[transport.ServerTransport]bool serve bool drain bool cv *sync.Cond // signaled when connections close for GracefulStop m map[string]*service // service name -> service info events trace.EventLog quit *grpcsync.Event done *grpcsync.Event channelzRemoveOnce sync.Once serveWG sync.WaitGroup // counts active Serve goroutines for GracefulStop channelzID int64 // channelz unique identification number czData *channelzData } type serverOptions struct { creds credentials.TransportCredentials codec baseCodec cp Compressor dc Decompressor unaryInt UnaryServerInterceptor streamInt StreamServerInterceptor chainUnaryInts []UnaryServerInterceptor chainStreamInts []StreamServerInterceptor inTapHandle tap.ServerInHandle statsHandler stats.Handler maxConcurrentStreams uint32 maxReceiveMessageSize int maxSendMessageSize int unknownStreamDesc *StreamDesc keepaliveParams keepalive.ServerParameters keepalivePolicy keepalive.EnforcementPolicy initialWindowSize int32 initialConnWindowSize int32 writeBufferSize int readBufferSize int connectionTimeout time.Duration maxHeaderListSize *uint32 headerTableSize *uint32 } var defaultServerOptions = serverOptions{ maxReceiveMessageSize: defaultServerMaxReceiveMessageSize, maxSendMessageSize: defaultServerMaxSendMessageSize, connectionTimeout: 120 * time.Second, writeBufferSize: defaultWriteBufSize, readBufferSize: defaultReadBufSize, } // A ServerOption sets options such as credentials, codec and keepalive parameters, etc. type ServerOption interface { apply(*serverOptions) } // EmptyServerOption does not alter the server configuration. It can be embedded // in another structure to build custom server options. // // This API is EXPERIMENTAL. type EmptyServerOption struct{} func (EmptyServerOption) apply(*serverOptions) {} // funcServerOption wraps a function that modifies serverOptions into an // implementation of the ServerOption interface. type funcServerOption struct { f func(*serverOptions) } func (fdo *funcServerOption) apply(do *serverOptions) { fdo.f(do) } func newFuncServerOption(f func(*serverOptions)) *funcServerOption { return &funcServerOption{ f: f, } } // WriteBufferSize determines how much data can be batched before doing a write on the wire. // The corresponding memory allocation for this buffer will be twice the size to keep syscalls low. // The default value for this buffer is 32KB. // Zero will disable the write buffer such that each write will be on underlying connection. // Note: A Send call may not directly translate to a write. func WriteBufferSize(s int) ServerOption { return newFuncServerOption(func(o *serverOptions) { o.writeBufferSize = s }) } // ReadBufferSize lets you set the size of read buffer, this determines how much data can be read at most // for one read syscall. // The default value for this buffer is 32KB. // Zero will disable read buffer for a connection so data framer can access the underlying // conn directly. func ReadBufferSize(s int) ServerOption { return newFuncServerOption(func(o *serverOptions) { o.readBufferSize = s }) } // InitialWindowSize returns a ServerOption that sets window size for stream. // The lower bound for window size is 64K and any value smaller than that will be ignored. func InitialWindowSize(s int32) ServerOption { return newFuncServerOption(func(o *serverOptions) { o.initialWindowSize = s }) } // InitialConnWindowSize returns a ServerOption that sets window size for a connection. // The lower bound for window size is 64K and any value smaller than that will be ignored. func InitialConnWindowSize(s int32) ServerOption { return newFuncServerOption(func(o *serverOptions) { o.initialConnWindowSize = s }) } // KeepaliveParams returns a ServerOption that sets keepalive and max-age parameters for the server. func KeepaliveParams(kp keepalive.ServerParameters) ServerOption { if kp.Time > 0 && kp.Time < time.Second { grpclog.Warning("Adjusting keepalive ping interval to minimum period of 1s") kp.Time = time.Second } return newFuncServerOption(func(o *serverOptions) { o.keepaliveParams = kp }) } // KeepaliveEnforcementPolicy returns a ServerOption that sets keepalive enforcement policy for the server. func KeepaliveEnforcementPolicy(kep keepalive.EnforcementPolicy) ServerOption { return newFuncServerOption(func(o *serverOptions) { o.keepalivePolicy = kep }) } // CustomCodec returns a ServerOption that sets a codec for message marshaling and unmarshaling. // // This will override any lookups by content-subtype for Codecs registered with RegisterCodec. func CustomCodec(codec Codec) ServerOption { return newFuncServerOption(func(o *serverOptions) { o.codec = codec }) } // RPCCompressor returns a ServerOption that sets a compressor for outbound // messages. For backward compatibility, all outbound messages will be sent // using this compressor, regardless of incoming message compression. By // default, server messages will be sent using the same compressor with which // request messages were sent. // // Deprecated: use encoding.RegisterCompressor instead. func RPCCompressor(cp Compressor) ServerOption { return newFuncServerOption(func(o *serverOptions) { o.cp = cp }) } // RPCDecompressor returns a ServerOption that sets a decompressor for inbound // messages. It has higher priority than decompressors registered via // encoding.RegisterCompressor. // // Deprecated: use encoding.RegisterCompressor instead. func RPCDecompressor(dc Decompressor) ServerOption { return newFuncServerOption(func(o *serverOptions) { o.dc = dc }) } // MaxMsgSize returns a ServerOption to set the max message size in bytes the server can receive. // If this is not set, gRPC uses the default limit. // // Deprecated: use MaxRecvMsgSize instead. func MaxMsgSize(m int) ServerOption { return MaxRecvMsgSize(m) } // MaxRecvMsgSize returns a ServerOption to set the max message size in bytes the server can receive. // If this is not set, gRPC uses the default 4MB. func MaxRecvMsgSize(m int) ServerOption { return newFuncServerOption(func(o *serverOptions) { o.maxReceiveMessageSize = m }) } // MaxSendMsgSize returns a ServerOption to set the max message size in bytes the server can send. // If this is not set, gRPC uses the default `math.MaxInt32`. func MaxSendMsgSize(m int) ServerOption { return newFuncServerOption(func(o *serverOptions) { o.maxSendMessageSize = m }) } // MaxConcurrentStreams returns a ServerOption that will apply a limit on the number // of concurrent streams to each ServerTransport. func MaxConcurrentStreams(n uint32) ServerOption { return newFuncServerOption(func(o *serverOptions) { o.maxConcurrentStreams = n }) } // Creds returns a ServerOption that sets credentials for server connections. func Creds(c credentials.TransportCredentials) ServerOption { return newFuncServerOption(func(o *serverOptions) { o.creds = c }) } // UnaryInterceptor returns a ServerOption that sets the UnaryServerInterceptor for the // server. Only one unary interceptor can be installed. The construction of multiple // interceptors (e.g., chaining) can be implemented at the caller. func UnaryInterceptor(i UnaryServerInterceptor) ServerOption { return newFuncServerOption(func(o *serverOptions) { if o.unaryInt != nil { panic("The unary server interceptor was already set and may not be reset.") } o.unaryInt = i }) } // ChainUnaryInterceptor returns a ServerOption that specifies the chained interceptor // for unary RPCs. The first interceptor will be the outer most, // while the last interceptor will be the inner most wrapper around the real call. // All unary interceptors added by this method will be chained. func ChainUnaryInterceptor(interceptors ...UnaryServerInterceptor) ServerOption { return newFuncServerOption(func(o *serverOptions) { o.chainUnaryInts = append(o.chainUnaryInts, interceptors...) }) } // StreamInterceptor returns a ServerOption that sets the StreamServerInterceptor for the // server. Only one stream interceptor can be installed. func StreamInterceptor(i StreamServerInterceptor) ServerOption { return newFuncServerOption(func(o *serverOptions) { if o.streamInt != nil { panic("The stream server interceptor was already set and may not be reset.") } o.streamInt = i }) } // ChainStreamInterceptor returns a ServerOption that specifies the chained interceptor // for stream RPCs. The first interceptor will be the outer most, // while the last interceptor will be the inner most wrapper around the real call. // All stream interceptors added by this method will be chained. func ChainStreamInterceptor(interceptors ...StreamServerInterceptor) ServerOption { return newFuncServerOption(func(o *serverOptions) { o.chainStreamInts = append(o.chainStreamInts, interceptors...) }) } // InTapHandle returns a ServerOption that sets the tap handle for all the server // transport to be created. Only one can be installed. func InTapHandle(h tap.ServerInHandle) ServerOption { return newFuncServerOption(func(o *serverOptions) { if o.inTapHandle != nil { panic("The tap handle was already set and may not be reset.") } o.inTapHandle = h }) } // StatsHandler returns a ServerOption that sets the stats handler for the server. func StatsHandler(h stats.Handler) ServerOption { return newFuncServerOption(func(o *serverOptions) { o.statsHandler = h }) } // UnknownServiceHandler returns a ServerOption that allows for adding a custom // unknown service handler. The provided method is a bidi-streaming RPC service // handler that will be invoked instead of returning the "unimplemented" gRPC // error whenever a request is received for an unregistered service or method. // The handling function and stream interceptor (if set) have full access to // the ServerStream, including its Context. func UnknownServiceHandler(streamHandler StreamHandler) ServerOption { return newFuncServerOption(func(o *serverOptions) { o.unknownStreamDesc = &StreamDesc{ StreamName: "unknown_service_handler", Handler: streamHandler, // We need to assume that the users of the streamHandler will want to use both. ClientStreams: true, ServerStreams: true, } }) } // ConnectionTimeout returns a ServerOption that sets the timeout for // connection establishment (up to and including HTTP/2 handshaking) for all // new connections. If this is not set, the default is 120 seconds. A zero or // negative value will result in an immediate timeout. // // This API is EXPERIMENTAL. func ConnectionTimeout(d time.Duration) ServerOption { return newFuncServerOption(func(o *serverOptions) { o.connectionTimeout = d }) } // MaxHeaderListSize returns a ServerOption that sets the max (uncompressed) size // of header list that the server is prepared to accept. func MaxHeaderListSize(s uint32) ServerOption { return newFuncServerOption(func(o *serverOptions) { o.maxHeaderListSize = &s }) } // HeaderTableSize returns a ServerOption that sets the size of dynamic // header table for stream. // // This API is EXPERIMENTAL. func HeaderTableSize(s uint32) ServerOption { return newFuncServerOption(func(o *serverOptions) { o.headerTableSize = &s }) } // NewServer creates a gRPC server which has no service registered and has not // started to accept requests yet. func NewServer(opt ...ServerOption) *Server { opts := defaultServerOptions for _, o := range opt { o.apply(&opts) } s := &Server{ lis: make(map[net.Listener]bool), opts: opts, conns: make(map[transport.ServerTransport]bool), m: make(map[string]*service), quit: grpcsync.NewEvent(), done: grpcsync.NewEvent(), czData: new(channelzData), } chainUnaryServerInterceptors(s) chainStreamServerInterceptors(s) s.cv = sync.NewCond(&s.mu) if EnableTracing { _, file, line, _ := runtime.Caller(1) s.events = trace.NewEventLog("grpc.Server", fmt.Sprintf("%s:%d", file, line)) } if channelz.IsOn() { s.channelzID = channelz.RegisterServer(&channelzServer{s}, "") } return s } // printf records an event in s's event log, unless s has been stopped. // REQUIRES s.mu is held. func (s *Server) printf(format string, a ...interface{}) { if s.events != nil { s.events.Printf(format, a...) } } // errorf records an error in s's event log, unless s has been stopped. // REQUIRES s.mu is held. func (s *Server) errorf(format string, a ...interface{}) { if s.events != nil { s.events.Errorf(format, a...) } } // RegisterService registers a service and its implementation to the gRPC // server. It is called from the IDL generated code. This must be called before // invoking Serve. func (s *Server) RegisterService(sd *ServiceDesc, ss interface{}) { ht := reflect.TypeOf(sd.HandlerType).Elem() st := reflect.TypeOf(ss) if !st.Implements(ht) { grpclog.Fatalf("grpc: Server.RegisterService found the handler of type %v that does not satisfy %v", st, ht) } s.register(sd, ss) } func (s *Server) register(sd *ServiceDesc, ss interface{}) { s.mu.Lock() defer s.mu.Unlock() s.printf("RegisterService(%q)", sd.ServiceName) if s.serve { grpclog.Fatalf("grpc: Server.RegisterService after Server.Serve for %q", sd.ServiceName) } if _, ok := s.m[sd.ServiceName]; ok { grpclog.Fatalf("grpc: Server.RegisterService found duplicate service registration for %q", sd.ServiceName) } srv := &service{ server: ss, md: make(map[string]*MethodDesc), sd: make(map[string]*StreamDesc), mdata: sd.Metadata, } for i := range sd.Methods { d := &sd.Methods[i] srv.md[d.MethodName] = d } for i := range sd.Streams { d := &sd.Streams[i] srv.sd[d.StreamName] = d } s.m[sd.ServiceName] = srv } // MethodInfo contains the information of an RPC including its method name and type. type MethodInfo struct { // Name is the method name only, without the service name or package name. Name string // IsClientStream indicates whether the RPC is a client streaming RPC. IsClientStream bool // IsServerStream indicates whether the RPC is a server streaming RPC. IsServerStream bool } // ServiceInfo contains unary RPC method info, streaming RPC method info and metadata for a service. type ServiceInfo struct { Methods []MethodInfo // Metadata is the metadata specified in ServiceDesc when registering service. Metadata interface{} } // GetServiceInfo returns a map from service names to ServiceInfo. // Service names include the package names, in the form of .. func (s *Server) GetServiceInfo() map[string]ServiceInfo { ret := make(map[string]ServiceInfo) for n, srv := range s.m { methods := make([]MethodInfo, 0, len(srv.md)+len(srv.sd)) for m := range srv.md { methods = append(methods, MethodInfo{ Name: m, IsClientStream: false, IsServerStream: false, }) } for m, d := range srv.sd { methods = append(methods, MethodInfo{ Name: m, IsClientStream: d.ClientStreams, IsServerStream: d.ServerStreams, }) } ret[n] = ServiceInfo{ Methods: methods, Metadata: srv.mdata, } } return ret } // ErrServerStopped indicates that the operation is now illegal because of // the server being stopped. var ErrServerStopped = errors.New("grpc: the server has been stopped") func (s *Server) useTransportAuthenticator(rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { if s.opts.creds == nil { return rawConn, nil, nil } return s.opts.creds.ServerHandshake(rawConn) } type listenSocket struct { net.Listener channelzID int64 } func (l *listenSocket) ChannelzMetric() *channelz.SocketInternalMetric { return &channelz.SocketInternalMetric{ SocketOptions: channelz.GetSocketOption(l.Listener), LocalAddr: l.Listener.Addr(), } } func (l *listenSocket) Close() error { err := l.Listener.Close() if channelz.IsOn() { channelz.RemoveEntry(l.channelzID) } return err } // Serve accepts incoming connections on the listener lis, creating a new // ServerTransport and service goroutine for each. The service goroutines // read gRPC requests and then call the registered handlers to reply to them. // Serve returns when lis.Accept fails with fatal errors. lis will be closed when // this method returns. // Serve will return a non-nil error unless Stop or GracefulStop is called. func (s *Server) Serve(lis net.Listener) error { s.mu.Lock() s.printf("serving") s.serve = true if s.lis == nil { // Serve called after Stop or GracefulStop. s.mu.Unlock() lis.Close() return ErrServerStopped } s.serveWG.Add(1) defer func() { s.serveWG.Done() if s.quit.HasFired() { // Stop or GracefulStop called; block until done and return nil. <-s.done.Done() } }() ls := &listenSocket{Listener: lis} s.lis[ls] = true if channelz.IsOn() { ls.channelzID = channelz.RegisterListenSocket(ls, s.channelzID, lis.Addr().String()) } s.mu.Unlock() defer func() { s.mu.Lock() if s.lis != nil && s.lis[ls] { ls.Close() delete(s.lis, ls) } s.mu.Unlock() }() var tempDelay time.Duration // how long to sleep on accept failure for { rawConn, err := lis.Accept() if err != nil { if ne, ok := err.(interface { Temporary() bool }); ok && ne.Temporary() { if tempDelay == 0 { tempDelay = 5 * time.Millisecond } else { tempDelay *= 2 } if max := 1 * time.Second; tempDelay > max { tempDelay = max } s.mu.Lock() s.printf("Accept error: %v; retrying in %v", err, tempDelay) s.mu.Unlock() timer := time.NewTimer(tempDelay) select { case <-timer.C: case <-s.quit.Done(): timer.Stop() return nil } continue } s.mu.Lock() s.printf("done serving; Accept = %v", err) s.mu.Unlock() if s.quit.HasFired() { return nil } return err } tempDelay = 0 // Start a new goroutine to deal with rawConn so we don't stall this Accept // loop goroutine. // // Make sure we account for the goroutine so GracefulStop doesn't nil out // s.conns before this conn can be added. s.serveWG.Add(1) go func() { s.handleRawConn(rawConn) s.serveWG.Done() }() } } // handleRawConn forks a goroutine to handle a just-accepted connection that // has not had any I/O performed on it yet. func (s *Server) handleRawConn(rawConn net.Conn) { if s.quit.HasFired() { rawConn.Close() return } rawConn.SetDeadline(time.Now().Add(s.opts.connectionTimeout)) conn, authInfo, err := s.useTransportAuthenticator(rawConn) if err != nil { // ErrConnDispatched means that the connection was dispatched away from // gRPC; those connections should be left open. if err != credentials.ErrConnDispatched { s.mu.Lock() s.errorf("ServerHandshake(%q) failed: %v", rawConn.RemoteAddr(), err) s.mu.Unlock() channelz.Warningf(s.channelzID, "grpc: Server.Serve failed to complete security handshake from %q: %v", rawConn.RemoteAddr(), err) rawConn.Close() } rawConn.SetDeadline(time.Time{}) return } // Finish handshaking (HTTP2) st := s.newHTTP2Transport(conn, authInfo) if st == nil { return } rawConn.SetDeadline(time.Time{}) if !s.addConn(st) { return } go func() { s.serveStreams(st) s.removeConn(st) }() } // newHTTP2Transport sets up a http/2 transport (using the // gRPC http2 server transport in transport/http2_server.go). func (s *Server) newHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) transport.ServerTransport { config := &transport.ServerConfig{ MaxStreams: s.opts.maxConcurrentStreams, AuthInfo: authInfo, InTapHandle: s.opts.inTapHandle, StatsHandler: s.opts.statsHandler, KeepaliveParams: s.opts.keepaliveParams, KeepalivePolicy: s.opts.keepalivePolicy, InitialWindowSize: s.opts.initialWindowSize, InitialConnWindowSize: s.opts.initialConnWindowSize, WriteBufferSize: s.opts.writeBufferSize, ReadBufferSize: s.opts.readBufferSize, ChannelzParentID: s.channelzID, MaxHeaderListSize: s.opts.maxHeaderListSize, HeaderTableSize: s.opts.headerTableSize, } st, err := transport.NewServerTransport("http2", c, config) if err != nil { s.mu.Lock() s.errorf("NewServerTransport(%q) failed: %v", c.RemoteAddr(), err) s.mu.Unlock() c.Close() channelz.Warning(s.channelzID, "grpc: Server.Serve failed to create ServerTransport: ", err) return nil } return st } func (s *Server) serveStreams(st transport.ServerTransport) { defer st.Close() var wg sync.WaitGroup st.HandleStreams(func(stream *transport.Stream) { wg.Add(1) go func() { defer wg.Done() s.handleStream(st, stream, s.traceInfo(st, stream)) }() }, func(ctx context.Context, method string) context.Context { if !EnableTracing { return ctx } tr := trace.New("grpc.Recv."+methodFamily(method), method) return trace.NewContext(ctx, tr) }) wg.Wait() } var _ http.Handler = (*Server)(nil) // ServeHTTP implements the Go standard library's http.Handler // interface by responding to the gRPC request r, by looking up // the requested gRPC method in the gRPC server s. // // The provided HTTP request must have arrived on an HTTP/2 // connection. When using the Go standard library's server, // practically this means that the Request must also have arrived // over TLS. // // To share one port (such as 443 for https) between gRPC and an // existing http.Handler, use a root http.Handler such as: // // if r.ProtoMajor == 2 && strings.HasPrefix( // r.Header.Get("Content-Type"), "application/grpc") { // grpcServer.ServeHTTP(w, r) // } else { // yourMux.ServeHTTP(w, r) // } // // Note that ServeHTTP uses Go's HTTP/2 server implementation which is totally // separate from grpc-go's HTTP/2 server. Performance and features may vary // between the two paths. ServeHTTP does not support some gRPC features // available through grpc-go's HTTP/2 server, and it is currently EXPERIMENTAL // and subject to change. func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandler) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } if !s.addConn(st) { return } defer s.removeConn(st) s.serveStreams(st) } // traceInfo returns a traceInfo and associates it with stream, if tracing is enabled. // If tracing is not enabled, it returns nil. func (s *Server) traceInfo(st transport.ServerTransport, stream *transport.Stream) (trInfo *traceInfo) { if !EnableTracing { return nil } tr, ok := trace.FromContext(stream.Context()) if !ok { return nil } trInfo = &traceInfo{ tr: tr, firstLine: firstLine{ client: false, remoteAddr: st.RemoteAddr(), }, } if dl, ok := stream.Context().Deadline(); ok { trInfo.firstLine.deadline = time.Until(dl) } return trInfo } func (s *Server) addConn(st transport.ServerTransport) bool { s.mu.Lock() defer s.mu.Unlock() if s.conns == nil { st.Close() return false } if s.drain { // Transport added after we drained our existing conns: drain it // immediately. st.Drain() } s.conns[st] = true return true } func (s *Server) removeConn(st transport.ServerTransport) { s.mu.Lock() defer s.mu.Unlock() if s.conns != nil { delete(s.conns, st) s.cv.Broadcast() } } func (s *Server) channelzMetric() *channelz.ServerInternalMetric { return &channelz.ServerInternalMetric{ CallsStarted: atomic.LoadInt64(&s.czData.callsStarted), CallsSucceeded: atomic.LoadInt64(&s.czData.callsSucceeded), CallsFailed: atomic.LoadInt64(&s.czData.callsFailed), LastCallStartedTimestamp: time.Unix(0, atomic.LoadInt64(&s.czData.lastCallStartedTime)), } } func (s *Server) incrCallsStarted() { atomic.AddInt64(&s.czData.callsStarted, 1) atomic.StoreInt64(&s.czData.lastCallStartedTime, time.Now().UnixNano()) } func (s *Server) incrCallsSucceeded() { atomic.AddInt64(&s.czData.callsSucceeded, 1) } func (s *Server) incrCallsFailed() { atomic.AddInt64(&s.czData.callsFailed, 1) } func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg interface{}, cp Compressor, opts *transport.Options, comp encoding.Compressor) error { data, err := encode(s.getCodec(stream.ContentSubtype()), msg) if err != nil { channelz.Error(s.channelzID, "grpc: server failed to encode response: ", err) return err } compData, err := compress(data, cp, comp) if err != nil { channelz.Error(s.channelzID, "grpc: server failed to compress response: ", err) return err } hdr, payload := msgHeader(data, compData) // TODO(dfawley): should we be checking len(data) instead? if len(payload) > s.opts.maxSendMessageSize { return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", len(payload), s.opts.maxSendMessageSize) } err = t.Write(stream, hdr, payload, opts) if err == nil && s.opts.statsHandler != nil { s.opts.statsHandler.HandleRPC(stream.Context(), outPayload(false, msg, data, payload, time.Now())) } return err } // chainUnaryServerInterceptors chains all unary server interceptors into one. func chainUnaryServerInterceptors(s *Server) { // Prepend opts.unaryInt to the chaining interceptors if it exists, since unaryInt will // be executed before any other chained interceptors. interceptors := s.opts.chainUnaryInts if s.opts.unaryInt != nil { interceptors = append([]UnaryServerInterceptor{s.opts.unaryInt}, s.opts.chainUnaryInts...) } var chainedInt UnaryServerInterceptor if len(interceptors) == 0 { chainedInt = nil } else if len(interceptors) == 1 { chainedInt = interceptors[0] } else { chainedInt = func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (interface{}, error) { return interceptors[0](ctx, req, info, getChainUnaryHandler(interceptors, 0, info, handler)) } } s.opts.unaryInt = chainedInt } // getChainUnaryHandler recursively generate the chained UnaryHandler func getChainUnaryHandler(interceptors []UnaryServerInterceptor, curr int, info *UnaryServerInfo, finalHandler UnaryHandler) UnaryHandler { if curr == len(interceptors)-1 { return finalHandler } return func(ctx context.Context, req interface{}) (interface{}, error) { return interceptors[curr+1](ctx, req, info, getChainUnaryHandler(interceptors, curr+1, info, finalHandler)) } } func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, md *MethodDesc, trInfo *traceInfo) (err error) { sh := s.opts.statsHandler if sh != nil || trInfo != nil || channelz.IsOn() { if channelz.IsOn() { s.incrCallsStarted() } var statsBegin *stats.Begin if sh != nil { beginTime := time.Now() statsBegin = &stats.Begin{ BeginTime: beginTime, } sh.HandleRPC(stream.Context(), statsBegin) } if trInfo != nil { trInfo.tr.LazyLog(&trInfo.firstLine, false) } // The deferred error handling for tracing, stats handler and channelz are // combined into one function to reduce stack usage -- a defer takes ~56-64 // bytes on the stack, so overflowing the stack will require a stack // re-allocation, which is expensive. // // To maintain behavior similar to separate deferred statements, statements // should be executed in the reverse order. That is, tracing first, stats // handler second, and channelz last. Note that panics *within* defers will // lead to different behavior, but that's an acceptable compromise; that // would be undefined behavior territory anyway. defer func() { if trInfo != nil { if err != nil && err != io.EOF { trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) trInfo.tr.SetError() } trInfo.tr.Finish() } if sh != nil { end := &stats.End{ BeginTime: statsBegin.BeginTime, EndTime: time.Now(), } if err != nil && err != io.EOF { end.Error = toRPCErr(err) } sh.HandleRPC(stream.Context(), end) } if channelz.IsOn() { if err != nil && err != io.EOF { s.incrCallsFailed() } else { s.incrCallsSucceeded() } } }() } binlog := binarylog.GetMethodLogger(stream.Method()) if binlog != nil { ctx := stream.Context() md, _ := metadata.FromIncomingContext(ctx) logEntry := &binarylog.ClientHeader{ Header: md, MethodName: stream.Method(), PeerAddr: nil, } if deadline, ok := ctx.Deadline(); ok { logEntry.Timeout = time.Until(deadline) if logEntry.Timeout < 0 { logEntry.Timeout = 0 } } if a := md[":authority"]; len(a) > 0 { logEntry.Authority = a[0] } if peer, ok := peer.FromContext(ctx); ok { logEntry.PeerAddr = peer.Addr } binlog.Log(logEntry) } // comp and cp are used for compression. decomp and dc are used for // decompression. If comp and decomp are both set, they are the same; // however they are kept separate to ensure that at most one of the // compressor/decompressor variable pairs are set for use later. var comp, decomp encoding.Compressor var cp Compressor var dc Decompressor // If dc is set and matches the stream's compression, use it. Otherwise, try // to find a matching registered compressor for decomp. if rc := stream.RecvCompress(); s.opts.dc != nil && s.opts.dc.Type() == rc { dc = s.opts.dc } else if rc != "" && rc != encoding.Identity { decomp = encoding.GetCompressor(rc) if decomp == nil { st := status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", rc) t.WriteStatus(stream, st) return st.Err() } } // If cp is set, use it. Otherwise, attempt to compress the response using // the incoming message compression method. // // NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686. if s.opts.cp != nil { cp = s.opts.cp stream.SetSendCompress(cp.Type()) } else if rc := stream.RecvCompress(); rc != "" && rc != encoding.Identity { // Legacy compressor not specified; attempt to respond with same encoding. comp = encoding.GetCompressor(rc) if comp != nil { stream.SetSendCompress(rc) } } var payInfo *payloadInfo if sh != nil || binlog != nil { payInfo = &payloadInfo{} } d, err := recvAndDecompress(&parser{r: stream}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp) if err != nil { if st, ok := status.FromError(err); ok { if e := t.WriteStatus(stream, st); e != nil { channelz.Warningf(s.channelzID, "grpc: Server.processUnaryRPC failed to write status %v", e) } } return err } if channelz.IsOn() { t.IncrMsgRecv() } df := func(v interface{}) error { if err := s.getCodec(stream.ContentSubtype()).Unmarshal(d, v); err != nil { return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err) } if sh != nil { sh.HandleRPC(stream.Context(), &stats.InPayload{ RecvTime: time.Now(), Payload: v, WireLength: payInfo.wireLength, Data: d, Length: len(d), }) } if binlog != nil { binlog.Log(&binarylog.ClientMessage{ Message: d, }) } if trInfo != nil { trInfo.tr.LazyLog(&payload{sent: false, msg: v}, true) } return nil } ctx := NewContextWithServerTransportStream(stream.Context(), stream) reply, appErr := md.Handler(srv.server, ctx, df, s.opts.unaryInt) if appErr != nil { appStatus, ok := status.FromError(appErr) if !ok { // Convert appErr if it is not a grpc status error. appErr = status.Error(codes.Unknown, appErr.Error()) appStatus, _ = status.FromError(appErr) } if trInfo != nil { trInfo.tr.LazyLog(stringer(appStatus.Message()), true) trInfo.tr.SetError() } if e := t.WriteStatus(stream, appStatus); e != nil { channelz.Warningf(s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e) } if binlog != nil { if h, _ := stream.Header(); h.Len() > 0 { // Only log serverHeader if there was header. Otherwise it can // be trailer only. binlog.Log(&binarylog.ServerHeader{ Header: h, }) } binlog.Log(&binarylog.ServerTrailer{ Trailer: stream.Trailer(), Err: appErr, }) } return appErr } if trInfo != nil { trInfo.tr.LazyLog(stringer("OK"), false) } opts := &transport.Options{Last: true} if err := s.sendResponse(t, stream, reply, cp, opts, comp); err != nil { if err == io.EOF { // The entire stream is done (for unary RPC only). return err } if sts, ok := status.FromError(err); ok { if e := t.WriteStatus(stream, sts); e != nil { channelz.Warningf(s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e) } } else { switch st := err.(type) { case transport.ConnectionError: // Nothing to do here. default: panic(fmt.Sprintf("grpc: Unexpected error (%T) from sendResponse: %v", st, st)) } } if binlog != nil { h, _ := stream.Header() binlog.Log(&binarylog.ServerHeader{ Header: h, }) binlog.Log(&binarylog.ServerTrailer{ Trailer: stream.Trailer(), Err: appErr, }) } return err } if binlog != nil { h, _ := stream.Header() binlog.Log(&binarylog.ServerHeader{ Header: h, }) binlog.Log(&binarylog.ServerMessage{ Message: reply, }) } if channelz.IsOn() { t.IncrMsgSent() } if trInfo != nil { trInfo.tr.LazyLog(&payload{sent: true, msg: reply}, true) } // TODO: Should we be logging if writing status failed here, like above? // Should the logging be in WriteStatus? Should we ignore the WriteStatus // error or allow the stats handler to see it? err = t.WriteStatus(stream, statusOK) if binlog != nil { binlog.Log(&binarylog.ServerTrailer{ Trailer: stream.Trailer(), Err: appErr, }) } return err } // chainStreamServerInterceptors chains all stream server interceptors into one. func chainStreamServerInterceptors(s *Server) { // Prepend opts.streamInt to the chaining interceptors if it exists, since streamInt will // be executed before any other chained interceptors. interceptors := s.opts.chainStreamInts if s.opts.streamInt != nil { interceptors = append([]StreamServerInterceptor{s.opts.streamInt}, s.opts.chainStreamInts...) } var chainedInt StreamServerInterceptor if len(interceptors) == 0 { chainedInt = nil } else if len(interceptors) == 1 { chainedInt = interceptors[0] } else { chainedInt = func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error { return interceptors[0](srv, ss, info, getChainStreamHandler(interceptors, 0, info, handler)) } } s.opts.streamInt = chainedInt } // getChainStreamHandler recursively generate the chained StreamHandler func getChainStreamHandler(interceptors []StreamServerInterceptor, curr int, info *StreamServerInfo, finalHandler StreamHandler) StreamHandler { if curr == len(interceptors)-1 { return finalHandler } return func(srv interface{}, ss ServerStream) error { return interceptors[curr+1](srv, ss, info, getChainStreamHandler(interceptors, curr+1, info, finalHandler)) } } func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, sd *StreamDesc, trInfo *traceInfo) (err error) { if channelz.IsOn() { s.incrCallsStarted() } sh := s.opts.statsHandler var statsBegin *stats.Begin if sh != nil { beginTime := time.Now() statsBegin = &stats.Begin{ BeginTime: beginTime, } sh.HandleRPC(stream.Context(), statsBegin) } ctx := NewContextWithServerTransportStream(stream.Context(), stream) ss := &serverStream{ ctx: ctx, t: t, s: stream, p: &parser{r: stream}, codec: s.getCodec(stream.ContentSubtype()), maxReceiveMessageSize: s.opts.maxReceiveMessageSize, maxSendMessageSize: s.opts.maxSendMessageSize, trInfo: trInfo, statsHandler: sh, } if sh != nil || trInfo != nil || channelz.IsOn() { // See comment in processUnaryRPC on defers. defer func() { if trInfo != nil { ss.mu.Lock() if err != nil && err != io.EOF { ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) ss.trInfo.tr.SetError() } ss.trInfo.tr.Finish() ss.trInfo.tr = nil ss.mu.Unlock() } if sh != nil { end := &stats.End{ BeginTime: statsBegin.BeginTime, EndTime: time.Now(), } if err != nil && err != io.EOF { end.Error = toRPCErr(err) } sh.HandleRPC(stream.Context(), end) } if channelz.IsOn() { if err != nil && err != io.EOF { s.incrCallsFailed() } else { s.incrCallsSucceeded() } } }() } ss.binlog = binarylog.GetMethodLogger(stream.Method()) if ss.binlog != nil { md, _ := metadata.FromIncomingContext(ctx) logEntry := &binarylog.ClientHeader{ Header: md, MethodName: stream.Method(), PeerAddr: nil, } if deadline, ok := ctx.Deadline(); ok { logEntry.Timeout = time.Until(deadline) if logEntry.Timeout < 0 { logEntry.Timeout = 0 } } if a := md[":authority"]; len(a) > 0 { logEntry.Authority = a[0] } if peer, ok := peer.FromContext(ss.Context()); ok { logEntry.PeerAddr = peer.Addr } ss.binlog.Log(logEntry) } // If dc is set and matches the stream's compression, use it. Otherwise, try // to find a matching registered compressor for decomp. if rc := stream.RecvCompress(); s.opts.dc != nil && s.opts.dc.Type() == rc { ss.dc = s.opts.dc } else if rc != "" && rc != encoding.Identity { ss.decomp = encoding.GetCompressor(rc) if ss.decomp == nil { st := status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", rc) t.WriteStatus(ss.s, st) return st.Err() } } // If cp is set, use it. Otherwise, attempt to compress the response using // the incoming message compression method. // // NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686. if s.opts.cp != nil { ss.cp = s.opts.cp stream.SetSendCompress(s.opts.cp.Type()) } else if rc := stream.RecvCompress(); rc != "" && rc != encoding.Identity { // Legacy compressor not specified; attempt to respond with same encoding. ss.comp = encoding.GetCompressor(rc) if ss.comp != nil { stream.SetSendCompress(rc) } } if trInfo != nil { trInfo.tr.LazyLog(&trInfo.firstLine, false) } var appErr error var server interface{} if srv != nil { server = srv.server } if s.opts.streamInt == nil { appErr = sd.Handler(server, ss) } else { info := &StreamServerInfo{ FullMethod: stream.Method(), IsClientStream: sd.ClientStreams, IsServerStream: sd.ServerStreams, } appErr = s.opts.streamInt(server, ss, info, sd.Handler) } if appErr != nil { appStatus, ok := status.FromError(appErr) if !ok { appStatus = status.New(codes.Unknown, appErr.Error()) appErr = appStatus.Err() } if trInfo != nil { ss.mu.Lock() ss.trInfo.tr.LazyLog(stringer(appStatus.Message()), true) ss.trInfo.tr.SetError() ss.mu.Unlock() } t.WriteStatus(ss.s, appStatus) if ss.binlog != nil { ss.binlog.Log(&binarylog.ServerTrailer{ Trailer: ss.s.Trailer(), Err: appErr, }) } // TODO: Should we log an error from WriteStatus here and below? return appErr } if trInfo != nil { ss.mu.Lock() ss.trInfo.tr.LazyLog(stringer("OK"), false) ss.mu.Unlock() } err = t.WriteStatus(ss.s, statusOK) if ss.binlog != nil { ss.binlog.Log(&binarylog.ServerTrailer{ Trailer: ss.s.Trailer(), Err: appErr, }) } return err } func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream, trInfo *traceInfo) { sm := stream.Method() if sm != "" && sm[0] == '/' { sm = sm[1:] } pos := strings.LastIndex(sm, "/") if pos == -1 { if trInfo != nil { trInfo.tr.LazyLog(&fmtStringer{"Malformed method name %q", []interface{}{sm}}, true) trInfo.tr.SetError() } errDesc := fmt.Sprintf("malformed method name: %q", stream.Method()) if err := t.WriteStatus(stream, status.New(codes.ResourceExhausted, errDesc)); err != nil { if trInfo != nil { trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) trInfo.tr.SetError() } channelz.Warningf(s.channelzID, "grpc: Server.handleStream failed to write status: %v", err) } if trInfo != nil { trInfo.tr.Finish() } return } service := sm[:pos] method := sm[pos+1:] srv, knownService := s.m[service] if knownService { if md, ok := srv.md[method]; ok { s.processUnaryRPC(t, stream, srv, md, trInfo) return } if sd, ok := srv.sd[method]; ok { s.processStreamingRPC(t, stream, srv, sd, trInfo) return } } // Unknown service, or known server unknown method. if unknownDesc := s.opts.unknownStreamDesc; unknownDesc != nil { s.processStreamingRPC(t, stream, nil, unknownDesc, trInfo) return } var errDesc string if !knownService { errDesc = fmt.Sprintf("unknown service %v", service) } else { errDesc = fmt.Sprintf("unknown method %v for service %v", method, service) } if trInfo != nil { trInfo.tr.LazyPrintf("%s", errDesc) trInfo.tr.SetError() } if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil { if trInfo != nil { trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) trInfo.tr.SetError() } channelz.Warningf(s.channelzID, "grpc: Server.handleStream failed to write status: %v", err) } if trInfo != nil { trInfo.tr.Finish() } } // The key to save ServerTransportStream in the context. type streamKey struct{} // NewContextWithServerTransportStream creates a new context from ctx and // attaches stream to it. // // This API is EXPERIMENTAL. func NewContextWithServerTransportStream(ctx context.Context, stream ServerTransportStream) context.Context { return context.WithValue(ctx, streamKey{}, stream) } // ServerTransportStream is a minimal interface that a transport stream must // implement. This can be used to mock an actual transport stream for tests of // handler code that use, for example, grpc.SetHeader (which requires some // stream to be in context). // // See also NewContextWithServerTransportStream. // // This API is EXPERIMENTAL. type ServerTransportStream interface { Method() string SetHeader(md metadata.MD) error SendHeader(md metadata.MD) error SetTrailer(md metadata.MD) error } // ServerTransportStreamFromContext returns the ServerTransportStream saved in // ctx. Returns nil if the given context has no stream associated with it // (which implies it is not an RPC invocation context). // // This API is EXPERIMENTAL. func ServerTransportStreamFromContext(ctx context.Context) ServerTransportStream { s, _ := ctx.Value(streamKey{}).(ServerTransportStream) return s } // Stop stops the gRPC server. It immediately closes all open // connections and listeners. // It cancels all active RPCs on the server side and the corresponding // pending RPCs on the client side will get notified by connection // errors. func (s *Server) Stop() { s.quit.Fire() defer func() { s.serveWG.Wait() s.done.Fire() }() s.channelzRemoveOnce.Do(func() { if channelz.IsOn() { channelz.RemoveEntry(s.channelzID) } }) s.mu.Lock() listeners := s.lis s.lis = nil st := s.conns s.conns = nil // interrupt GracefulStop if Stop and GracefulStop are called concurrently. s.cv.Broadcast() s.mu.Unlock() for lis := range listeners { lis.Close() } for c := range st { c.Close() } s.mu.Lock() if s.events != nil { s.events.Finish() s.events = nil } s.mu.Unlock() } // GracefulStop stops the gRPC server gracefully. It stops the server from // accepting new connections and RPCs and blocks until all the pending RPCs are // finished. func (s *Server) GracefulStop() { s.quit.Fire() defer s.done.Fire() s.channelzRemoveOnce.Do(func() { if channelz.IsOn() { channelz.RemoveEntry(s.channelzID) } }) s.mu.Lock() if s.conns == nil { s.mu.Unlock() return } for lis := range s.lis { lis.Close() } s.lis = nil if !s.drain { for st := range s.conns { st.Drain() } s.drain = true } // Wait for serving threads to be ready to exit. Only then can we be sure no // new conns will be created. s.mu.Unlock() s.serveWG.Wait() s.mu.Lock() for len(s.conns) != 0 { s.cv.Wait() } s.conns = nil if s.events != nil { s.events.Finish() s.events = nil } s.mu.Unlock() } // contentSubtype must be lowercase // cannot return nil func (s *Server) getCodec(contentSubtype string) baseCodec { if s.opts.codec != nil { return s.opts.codec } if contentSubtype == "" { return encoding.GetCodec(proto.Name) } codec := encoding.GetCodec(contentSubtype) if codec == nil { return encoding.GetCodec(proto.Name) } return codec } // SetHeader sets the header metadata. // When called multiple times, all the provided metadata will be merged. // All the metadata will be sent out when one of the following happens: // - grpc.SendHeader() is called; // - The first response is sent out; // - An RPC status is sent out (error or success). func SetHeader(ctx context.Context, md metadata.MD) error { if md.Len() == 0 { return nil } stream := ServerTransportStreamFromContext(ctx) if stream == nil { return status.Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx) } return stream.SetHeader(md) } // SendHeader sends header metadata. It may be called at most once. // The provided md and headers set by SetHeader() will be sent. func SendHeader(ctx context.Context, md metadata.MD) error { stream := ServerTransportStreamFromContext(ctx) if stream == nil { return status.Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx) } if err := stream.SendHeader(md); err != nil { return toRPCErr(err) } return nil } // SetTrailer sets the trailer metadata that will be sent when an RPC returns. // When called more than once, all the provided metadata will be merged. func SetTrailer(ctx context.Context, md metadata.MD) error { if md.Len() == 0 { return nil } stream := ServerTransportStreamFromContext(ctx) if stream == nil { return status.Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx) } return stream.SetTrailer(md) } // Method returns the method string for the server context. The returned // string is in the format of "/service/method". func Method(ctx context.Context) (string, bool) { s := ServerTransportStreamFromContext(ctx) if s == nil { return "", false } return s.Method(), true } type channelzServer struct { s *Server } func (c *channelzServer) ChannelzMetric() *channelz.ServerInternalMetric { return c.s.channelzMetric() } grpc-go-1.29.1/server_test.go000066400000000000000000000061361365033716300160530ustar00rootroot00000000000000/* * * Copyright 2016 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package grpc import ( "context" "net" "reflect" "strings" "testing" "time" "google.golang.org/grpc/internal/transport" ) type emptyServiceServer interface{} type testServer struct{} func (s) TestStopBeforeServe(t *testing.T) { lis, err := net.Listen("tcp", "localhost:0") if err != nil { t.Fatalf("failed to create listener: %v", err) } server := NewServer() server.Stop() err = server.Serve(lis) if err != ErrServerStopped { t.Fatalf("server.Serve() error = %v, want %v", err, ErrServerStopped) } // server.Serve is responsible for closing the listener, even if the // server was already stopped. err = lis.Close() if got, want := errorDesc(err), "use of closed"; !strings.Contains(got, want) { t.Errorf("Close() error = %q, want %q", got, want) } } func (s) TestGracefulStop(t *testing.T) { lis, err := net.Listen("tcp", "localhost:0") if err != nil { t.Fatalf("failed to create listener: %v", err) } server := NewServer() go func() { // make sure Serve() is called time.Sleep(time.Millisecond * 500) server.GracefulStop() }() err = server.Serve(lis) if err != nil { t.Fatalf("Serve() returned non-nil error on GracefulStop: %v", err) } } func (s) TestGetServiceInfo(t *testing.T) { testSd := ServiceDesc{ ServiceName: "grpc.testing.EmptyService", HandlerType: (*emptyServiceServer)(nil), Methods: []MethodDesc{ { MethodName: "EmptyCall", Handler: nil, }, }, Streams: []StreamDesc{ { StreamName: "EmptyStream", Handler: nil, ServerStreams: false, ClientStreams: true, }, }, Metadata: []int{0, 2, 1, 3}, } server := NewServer() server.RegisterService(&testSd, &testServer{}) info := server.GetServiceInfo() want := map[string]ServiceInfo{ "grpc.testing.EmptyService": { Methods: []MethodInfo{ { Name: "EmptyCall", IsClientStream: false, IsServerStream: false, }, { Name: "EmptyStream", IsClientStream: true, IsServerStream: false, }}, Metadata: []int{0, 2, 1, 3}, }, } if !reflect.DeepEqual(info, want) { t.Errorf("GetServiceInfo() = %+v, want %+v", info, want) } } func (s) TestStreamContext(t *testing.T) { expectedStream := &transport.Stream{} ctx := NewContextWithServerTransportStream(context.Background(), expectedStream) s := ServerTransportStreamFromContext(ctx) stream, ok := s.(*transport.Stream) if !ok || expectedStream != stream { t.Fatalf("GetStreamFromContext(%v) = %v, %t, want: %v, true", ctx, stream, ok, expectedStream) } } grpc-go-1.29.1/service_config.go000066400000000000000000000333361365033716300164750ustar00rootroot00000000000000/* * * Copyright 2017 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package grpc import ( "encoding/json" "fmt" "strconv" "strings" "time" "google.golang.org/grpc/balancer" "google.golang.org/grpc/codes" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal" "google.golang.org/grpc/serviceconfig" ) const maxInt = int(^uint(0) >> 1) // MethodConfig defines the configuration recommended by the service providers for a // particular method. // // Deprecated: Users should not use this struct. Service config should be received // through name resolver, as specified here // https://github.com/grpc/grpc/blob/master/doc/service_config.md type MethodConfig struct { // WaitForReady indicates whether RPCs sent to this method should wait until // the connection is ready by default (!failfast). The value specified via the // gRPC client API will override the value set here. WaitForReady *bool // Timeout is the default timeout for RPCs sent to this method. The actual // deadline used will be the minimum of the value specified here and the value // set by the application via the gRPC client API. If either one is not set, // then the other will be used. If neither is set, then the RPC has no deadline. Timeout *time.Duration // MaxReqSize is the maximum allowed payload size for an individual request in a // stream (client->server) in bytes. The size which is measured is the serialized // payload after per-message compression (but before stream compression) in bytes. // The actual value used is the minimum of the value specified here and the value set // by the application via the gRPC client API. If either one is not set, then the other // will be used. If neither is set, then the built-in default is used. MaxReqSize *int // MaxRespSize is the maximum allowed payload size for an individual response in a // stream (server->client) in bytes. MaxRespSize *int // RetryPolicy configures retry options for the method. retryPolicy *retryPolicy } type lbConfig struct { name string cfg serviceconfig.LoadBalancingConfig } // ServiceConfig is provided by the service provider and contains parameters for how // clients that connect to the service should behave. // // Deprecated: Users should not use this struct. Service config should be received // through name resolver, as specified here // https://github.com/grpc/grpc/blob/master/doc/service_config.md type ServiceConfig struct { serviceconfig.Config // LB is the load balancer the service providers recommends. The balancer // specified via grpc.WithBalancer will override this. This is deprecated; // lbConfigs is preferred. If lbConfig and LB are both present, lbConfig // will be used. LB *string // lbConfig is the service config's load balancing configuration. If // lbConfig and LB are both present, lbConfig will be used. lbConfig *lbConfig // Methods contains a map for the methods in this service. If there is an // exact match for a method (i.e. /service/method) in the map, use the // corresponding MethodConfig. If there's no exact match, look for the // default config for the service (/service/) and use the corresponding // MethodConfig if it exists. Otherwise, the method has no MethodConfig to // use. Methods map[string]MethodConfig // If a retryThrottlingPolicy is provided, gRPC will automatically throttle // retry attempts and hedged RPCs when the client’s ratio of failures to // successes exceeds a threshold. // // For each server name, the gRPC client will maintain a token_count which is // initially set to maxTokens, and can take values between 0 and maxTokens. // // Every outgoing RPC (regardless of service or method invoked) will change // token_count as follows: // // - Every failed RPC will decrement the token_count by 1. // - Every successful RPC will increment the token_count by tokenRatio. // // If token_count is less than or equal to maxTokens / 2, then RPCs will not // be retried and hedged RPCs will not be sent. retryThrottling *retryThrottlingPolicy // healthCheckConfig must be set as one of the requirement to enable LB channel // health check. healthCheckConfig *healthCheckConfig // rawJSONString stores service config json string that get parsed into // this service config struct. rawJSONString string } // healthCheckConfig defines the go-native version of the LB channel health check config. type healthCheckConfig struct { // serviceName is the service name to use in the health-checking request. ServiceName string } // retryPolicy defines the go-native version of the retry policy defined by the // service config here: // https://github.com/grpc/proposal/blob/master/A6-client-retries.md#integration-with-service-config type retryPolicy struct { // MaxAttempts is the maximum number of attempts, including the original RPC. // // This field is required and must be two or greater. maxAttempts int // Exponential backoff parameters. The initial retry attempt will occur at // random(0, initialBackoff). In general, the nth attempt will occur at // random(0, // min(initialBackoff*backoffMultiplier**(n-1), maxBackoff)). // // These fields are required and must be greater than zero. initialBackoff time.Duration maxBackoff time.Duration backoffMultiplier float64 // The set of status codes which may be retried. // // Status codes are specified as strings, e.g., "UNAVAILABLE". // // This field is required and must be non-empty. // Note: a set is used to store this for easy lookup. retryableStatusCodes map[codes.Code]bool } type jsonRetryPolicy struct { MaxAttempts int InitialBackoff string MaxBackoff string BackoffMultiplier float64 RetryableStatusCodes []codes.Code } // retryThrottlingPolicy defines the go-native version of the retry throttling // policy defined by the service config here: // https://github.com/grpc/proposal/blob/master/A6-client-retries.md#integration-with-service-config type retryThrottlingPolicy struct { // The number of tokens starts at maxTokens. The token_count will always be // between 0 and maxTokens. // // This field is required and must be greater than zero. MaxTokens float64 // The amount of tokens to add on each successful RPC. Typically this will // be some number between 0 and 1, e.g., 0.1. // // This field is required and must be greater than zero. Up to 3 decimal // places are supported. TokenRatio float64 } func parseDuration(s *string) (*time.Duration, error) { if s == nil { return nil, nil } if !strings.HasSuffix(*s, "s") { return nil, fmt.Errorf("malformed duration %q", *s) } ss := strings.SplitN((*s)[:len(*s)-1], ".", 3) if len(ss) > 2 { return nil, fmt.Errorf("malformed duration %q", *s) } // hasDigits is set if either the whole or fractional part of the number is // present, since both are optional but one is required. hasDigits := false var d time.Duration if len(ss[0]) > 0 { i, err := strconv.ParseInt(ss[0], 10, 32) if err != nil { return nil, fmt.Errorf("malformed duration %q: %v", *s, err) } d = time.Duration(i) * time.Second hasDigits = true } if len(ss) == 2 && len(ss[1]) > 0 { if len(ss[1]) > 9 { return nil, fmt.Errorf("malformed duration %q", *s) } f, err := strconv.ParseInt(ss[1], 10, 64) if err != nil { return nil, fmt.Errorf("malformed duration %q: %v", *s, err) } for i := 9; i > len(ss[1]); i-- { f *= 10 } d += time.Duration(f) hasDigits = true } if !hasDigits { return nil, fmt.Errorf("malformed duration %q", *s) } return &d, nil } type jsonName struct { Service *string Method *string } func (j jsonName) generatePath() (string, bool) { if j.Service == nil { return "", false } res := "/" + *j.Service + "/" if j.Method != nil { res += *j.Method } return res, true } // TODO(lyuxuan): delete this struct after cleaning up old service config implementation. type jsonMC struct { Name *[]jsonName WaitForReady *bool Timeout *string MaxRequestMessageBytes *int64 MaxResponseMessageBytes *int64 RetryPolicy *jsonRetryPolicy } type loadBalancingConfig map[string]json.RawMessage // TODO(lyuxuan): delete this struct after cleaning up old service config implementation. type jsonSC struct { LoadBalancingPolicy *string LoadBalancingConfig *[]loadBalancingConfig MethodConfig *[]jsonMC RetryThrottling *retryThrottlingPolicy HealthCheckConfig *healthCheckConfig } func init() { internal.ParseServiceConfigForTesting = parseServiceConfig } func parseServiceConfig(js string) *serviceconfig.ParseResult { if len(js) == 0 { return &serviceconfig.ParseResult{Err: fmt.Errorf("no JSON service config provided")} } var rsc jsonSC err := json.Unmarshal([]byte(js), &rsc) if err != nil { grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) return &serviceconfig.ParseResult{Err: err} } sc := ServiceConfig{ LB: rsc.LoadBalancingPolicy, Methods: make(map[string]MethodConfig), retryThrottling: rsc.RetryThrottling, healthCheckConfig: rsc.HealthCheckConfig, rawJSONString: js, } if rsc.LoadBalancingConfig != nil { for i, lbcfg := range *rsc.LoadBalancingConfig { if len(lbcfg) != 1 { err := fmt.Errorf("invalid loadBalancingConfig: entry %v does not contain exactly 1 policy/config pair: %q", i, lbcfg) grpclog.Warningf(err.Error()) return &serviceconfig.ParseResult{Err: err} } var name string var jsonCfg json.RawMessage for name, jsonCfg = range lbcfg { } builder := balancer.Get(name) if builder == nil { continue } sc.lbConfig = &lbConfig{name: name} if parser, ok := builder.(balancer.ConfigParser); ok { var err error sc.lbConfig.cfg, err = parser.ParseConfig(jsonCfg) if err != nil { return &serviceconfig.ParseResult{Err: fmt.Errorf("error parsing loadBalancingConfig for policy %q: %v", name, err)} } } else if string(jsonCfg) != "{}" { grpclog.Warningf("non-empty balancer configuration %q, but balancer does not implement ParseConfig", string(jsonCfg)) } break } if sc.lbConfig == nil { // We had a loadBalancingConfig field but did not encounter a // supported policy. The config is considered invalid in this // case. err := fmt.Errorf("invalid loadBalancingConfig: no supported policies found") grpclog.Warningf(err.Error()) return &serviceconfig.ParseResult{Err: err} } } if rsc.MethodConfig == nil { return &serviceconfig.ParseResult{Config: &sc} } for _, m := range *rsc.MethodConfig { if m.Name == nil { continue } d, err := parseDuration(m.Timeout) if err != nil { grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) return &serviceconfig.ParseResult{Err: err} } mc := MethodConfig{ WaitForReady: m.WaitForReady, Timeout: d, } if mc.retryPolicy, err = convertRetryPolicy(m.RetryPolicy); err != nil { grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) return &serviceconfig.ParseResult{Err: err} } if m.MaxRequestMessageBytes != nil { if *m.MaxRequestMessageBytes > int64(maxInt) { mc.MaxReqSize = newInt(maxInt) } else { mc.MaxReqSize = newInt(int(*m.MaxRequestMessageBytes)) } } if m.MaxResponseMessageBytes != nil { if *m.MaxResponseMessageBytes > int64(maxInt) { mc.MaxRespSize = newInt(maxInt) } else { mc.MaxRespSize = newInt(int(*m.MaxResponseMessageBytes)) } } for _, n := range *m.Name { if path, valid := n.generatePath(); valid { sc.Methods[path] = mc } } } if sc.retryThrottling != nil { if mt := sc.retryThrottling.MaxTokens; mt <= 0 || mt > 1000 { return &serviceconfig.ParseResult{Err: fmt.Errorf("invalid retry throttling config: maxTokens (%v) out of range (0, 1000]", mt)} } if tr := sc.retryThrottling.TokenRatio; tr <= 0 { return &serviceconfig.ParseResult{Err: fmt.Errorf("invalid retry throttling config: tokenRatio (%v) may not be negative", tr)} } } return &serviceconfig.ParseResult{Config: &sc} } func convertRetryPolicy(jrp *jsonRetryPolicy) (p *retryPolicy, err error) { if jrp == nil { return nil, nil } ib, err := parseDuration(&jrp.InitialBackoff) if err != nil { return nil, err } mb, err := parseDuration(&jrp.MaxBackoff) if err != nil { return nil, err } if jrp.MaxAttempts <= 1 || *ib <= 0 || *mb <= 0 || jrp.BackoffMultiplier <= 0 || len(jrp.RetryableStatusCodes) == 0 { grpclog.Warningf("grpc: ignoring retry policy %v due to illegal configuration", jrp) return nil, nil } rp := &retryPolicy{ maxAttempts: jrp.MaxAttempts, initialBackoff: *ib, maxBackoff: *mb, backoffMultiplier: jrp.BackoffMultiplier, retryableStatusCodes: make(map[codes.Code]bool), } if rp.maxAttempts > 5 { // TODO(retry): Make the max maxAttempts configurable. rp.maxAttempts = 5 } for _, code := range jrp.RetryableStatusCodes { rp.retryableStatusCodes[code] = true } return rp, nil } func min(a, b *int) *int { if *a < *b { return a } return b } func getMaxSize(mcMax, doptMax *int, defaultVal int) *int { if mcMax == nil && doptMax == nil { return &defaultVal } if mcMax != nil && doptMax != nil { return min(mcMax, doptMax) } if mcMax != nil { return mcMax } return doptMax } func newInt(b int) *int { return &b } grpc-go-1.29.1/service_config_test.go000066400000000000000000000221001365033716300175170ustar00rootroot00000000000000/* * * Copyright 2017 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package grpc import ( "encoding/json" "fmt" "math" "reflect" "testing" "time" "google.golang.org/grpc/balancer" "google.golang.org/grpc/serviceconfig" ) type parseTestCase struct { scjs string wantSC *ServiceConfig wantErr bool } func runParseTests(t *testing.T, testCases []parseTestCase) { t.Helper() for _, c := range testCases { scpr := parseServiceConfig(c.scjs) var sc *ServiceConfig sc, _ = scpr.Config.(*ServiceConfig) if !c.wantErr { c.wantSC.rawJSONString = c.scjs } if c.wantErr != (scpr.Err != nil) || !reflect.DeepEqual(sc, c.wantSC) { t.Fatalf("parseServiceConfig(%s) = %+v, %v, want %+v, %v", c.scjs, sc, scpr.Err, c.wantSC, c.wantErr) } } } type pbbData struct { serviceconfig.LoadBalancingConfig Foo string Bar int } type parseBalancerBuilder struct{} func (parseBalancerBuilder) Name() string { return "pbb" } func (parseBalancerBuilder) ParseConfig(c json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { d := pbbData{} if err := json.Unmarshal(c, &d); err != nil { return nil, err } return d, nil } func (parseBalancerBuilder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { panic("unimplemented") } func init() { balancer.Register(parseBalancerBuilder{}) } func (s) TestParseLBConfig(t *testing.T) { testcases := []parseTestCase{ { `{ "loadBalancingConfig": [{"pbb": { "foo": "hi" } }] }`, &ServiceConfig{ Methods: make(map[string]MethodConfig), lbConfig: &lbConfig{name: "pbb", cfg: pbbData{Foo: "hi"}}, }, false, }, } runParseTests(t, testcases) } func (s) TestParseNoLBConfigSupported(t *testing.T) { // We have a loadBalancingConfig field but will not encounter a supported // policy. The config will be considered invalid in this case. testcases := []parseTestCase{ { scjs: `{ "loadBalancingConfig": [{"not_a_balancer1": {} }, {"not_a_balancer2": {}}] }`, wantErr: true, }, { scjs: `{"loadBalancingConfig": []}`, wantErr: true, }, } runParseTests(t, testcases) } func (s) TestParseLoadBalancer(t *testing.T) { testcases := []parseTestCase{ { `{ "loadBalancingPolicy": "round_robin", "methodConfig": [ { "name": [ { "service": "foo", "method": "Bar" } ], "waitForReady": true } ] }`, &ServiceConfig{ LB: newString("round_robin"), Methods: map[string]MethodConfig{ "/foo/Bar": { WaitForReady: newBool(true), }, }, }, false, }, { `{ "loadBalancingPolicy": 1, "methodConfig": [ { "name": [ { "service": "foo", "method": "Bar" } ], "waitForReady": false } ] }`, nil, true, }, } runParseTests(t, testcases) } func (s) TestParseWaitForReady(t *testing.T) { testcases := []parseTestCase{ { `{ "methodConfig": [ { "name": [ { "service": "foo", "method": "Bar" } ], "waitForReady": true } ] }`, &ServiceConfig{ Methods: map[string]MethodConfig{ "/foo/Bar": { WaitForReady: newBool(true), }, }, }, false, }, { `{ "methodConfig": [ { "name": [ { "service": "foo", "method": "Bar" } ], "waitForReady": false } ] }`, &ServiceConfig{ Methods: map[string]MethodConfig{ "/foo/Bar": { WaitForReady: newBool(false), }, }, }, false, }, { `{ "methodConfig": [ { "name": [ { "service": "foo", "method": "Bar" } ], "waitForReady": fall }, { "name": [ { "service": "foo", "method": "Bar" } ], "waitForReady": true } ] }`, nil, true, }, } runParseTests(t, testcases) } func (s) TestParseTimeOut(t *testing.T) { testcases := []parseTestCase{ { `{ "methodConfig": [ { "name": [ { "service": "foo", "method": "Bar" } ], "timeout": "1s" } ] }`, &ServiceConfig{ Methods: map[string]MethodConfig{ "/foo/Bar": { Timeout: newDuration(time.Second), }, }, }, false, }, { `{ "methodConfig": [ { "name": [ { "service": "foo", "method": "Bar" } ], "timeout": "3c" } ] }`, nil, true, }, { `{ "methodConfig": [ { "name": [ { "service": "foo", "method": "Bar" } ], "timeout": "3c" }, { "name": [ { "service": "foo", "method": "Bar" } ], "timeout": "1s" } ] }`, nil, true, }, } runParseTests(t, testcases) } func (s) TestParseMsgSize(t *testing.T) { testcases := []parseTestCase{ { `{ "methodConfig": [ { "name": [ { "service": "foo", "method": "Bar" } ], "maxRequestMessageBytes": 1024, "maxResponseMessageBytes": 2048 } ] }`, &ServiceConfig{ Methods: map[string]MethodConfig{ "/foo/Bar": { MaxReqSize: newInt(1024), MaxRespSize: newInt(2048), }, }, }, false, }, { `{ "methodConfig": [ { "name": [ { "service": "foo", "method": "Bar" } ], "maxRequestMessageBytes": "1024", "maxResponseMessageBytes": "2048" }, { "name": [ { "service": "foo", "method": "Bar" } ], "maxRequestMessageBytes": 1024, "maxResponseMessageBytes": 2048 } ] }`, nil, true, }, } runParseTests(t, testcases) } func (s) TestParseDuration(t *testing.T) { testCases := []struct { s *string want *time.Duration err bool }{ {s: nil, want: nil}, {s: newString("1s"), want: newDuration(time.Second)}, {s: newString("-1s"), want: newDuration(-time.Second)}, {s: newString("1.1s"), want: newDuration(1100 * time.Millisecond)}, {s: newString("1.s"), want: newDuration(time.Second)}, {s: newString("1.0s"), want: newDuration(time.Second)}, {s: newString(".002s"), want: newDuration(2 * time.Millisecond)}, {s: newString(".002000s"), want: newDuration(2 * time.Millisecond)}, {s: newString("0.003s"), want: newDuration(3 * time.Millisecond)}, {s: newString("0.000004s"), want: newDuration(4 * time.Microsecond)}, {s: newString("5000.000000009s"), want: newDuration(5000*time.Second + 9*time.Nanosecond)}, {s: newString("4999.999999999s"), want: newDuration(5000*time.Second - time.Nanosecond)}, {s: newString("1"), err: true}, {s: newString("s"), err: true}, {s: newString(".s"), err: true}, {s: newString("1 s"), err: true}, {s: newString(" 1s"), err: true}, {s: newString("1ms"), err: true}, {s: newString("1.1.1s"), err: true}, {s: newString("Xs"), err: true}, {s: newString("as"), err: true}, {s: newString(".0000000001s"), err: true}, {s: newString(fmt.Sprint(math.MaxInt32) + "s"), want: newDuration(math.MaxInt32 * time.Second)}, {s: newString(fmt.Sprint(int64(math.MaxInt32)+1) + "s"), err: true}, } for _, tc := range testCases { got, err := parseDuration(tc.s) if tc.err != (err != nil) || (got == nil) != (tc.want == nil) || (got != nil && *got != *tc.want) { wantErr := "" if tc.err { wantErr = "" } s := "" if tc.s != nil { s = `&"` + *tc.s + `"` } t.Errorf("parseDuration(%v) = %v, %v; want %v, %v", s, got, err, tc.want, wantErr) } } } func newBool(b bool) *bool { return &b } func newDuration(b time.Duration) *time.Duration { return &b } func newString(b string) *string { return &b } grpc-go-1.29.1/serviceconfig/000077500000000000000000000000001365033716300157775ustar00rootroot00000000000000grpc-go-1.29.1/serviceconfig/serviceconfig.go000066400000000000000000000022321365033716300211530ustar00rootroot00000000000000/* * * Copyright 2019 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Package serviceconfig defines types and methods for operating on gRPC // service configs. // // This package is EXPERIMENTAL. package serviceconfig // Config represents an opaque data structure holding a service config. type Config interface { isServiceConfig() } // LoadBalancingConfig represents an opaque data structure holding a load // balancing config. type LoadBalancingConfig interface { isLoadBalancingConfig() } // ParseResult contains a service config or an error. Exactly one must be // non-nil. type ParseResult struct { Config Config Err error } grpc-go-1.29.1/stats/000077500000000000000000000000001365033716300143075ustar00rootroot00000000000000grpc-go-1.29.1/stats/grpc_testing/000077500000000000000000000000001365033716300167775ustar00rootroot00000000000000grpc-go-1.29.1/stats/grpc_testing/test.pb.go000066400000000000000000000341461365033716300207150ustar00rootroot00000000000000// Code generated by protoc-gen-go. DO NOT EDIT. // source: grpc_testing/test.proto package grpc_testing import ( context "context" fmt "fmt" proto "github.com/golang/protobuf/proto" grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" status "google.golang.org/grpc/status" math "math" ) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package type SimpleRequest struct { Id int32 `protobuf:"varint,2,opt,name=id,proto3" json:"id,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *SimpleRequest) Reset() { *m = SimpleRequest{} } func (m *SimpleRequest) String() string { return proto.CompactTextString(m) } func (*SimpleRequest) ProtoMessage() {} func (*SimpleRequest) Descriptor() ([]byte, []int) { return fileDescriptor_e1cda82041fed8bf, []int{0} } func (m *SimpleRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_SimpleRequest.Unmarshal(m, b) } func (m *SimpleRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_SimpleRequest.Marshal(b, m, deterministic) } func (m *SimpleRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_SimpleRequest.Merge(m, src) } func (m *SimpleRequest) XXX_Size() int { return xxx_messageInfo_SimpleRequest.Size(m) } func (m *SimpleRequest) XXX_DiscardUnknown() { xxx_messageInfo_SimpleRequest.DiscardUnknown(m) } var xxx_messageInfo_SimpleRequest proto.InternalMessageInfo func (m *SimpleRequest) GetId() int32 { if m != nil { return m.Id } return 0 } type SimpleResponse struct { Id int32 `protobuf:"varint,3,opt,name=id,proto3" json:"id,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *SimpleResponse) Reset() { *m = SimpleResponse{} } func (m *SimpleResponse) String() string { return proto.CompactTextString(m) } func (*SimpleResponse) ProtoMessage() {} func (*SimpleResponse) Descriptor() ([]byte, []int) { return fileDescriptor_e1cda82041fed8bf, []int{1} } func (m *SimpleResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_SimpleResponse.Unmarshal(m, b) } func (m *SimpleResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_SimpleResponse.Marshal(b, m, deterministic) } func (m *SimpleResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_SimpleResponse.Merge(m, src) } func (m *SimpleResponse) XXX_Size() int { return xxx_messageInfo_SimpleResponse.Size(m) } func (m *SimpleResponse) XXX_DiscardUnknown() { xxx_messageInfo_SimpleResponse.DiscardUnknown(m) } var xxx_messageInfo_SimpleResponse proto.InternalMessageInfo func (m *SimpleResponse) GetId() int32 { if m != nil { return m.Id } return 0 } func init() { proto.RegisterType((*SimpleRequest)(nil), "grpc.testing.SimpleRequest") proto.RegisterType((*SimpleResponse)(nil), "grpc.testing.SimpleResponse") } func init() { proto.RegisterFile("grpc_testing/test.proto", fileDescriptor_e1cda82041fed8bf) } var fileDescriptor_e1cda82041fed8bf = []byte{ // 202 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4f, 0x2f, 0x2a, 0x48, 0x8e, 0x2f, 0x49, 0x2d, 0x2e, 0xc9, 0xcc, 0x4b, 0xd7, 0x07, 0xd1, 0x7a, 0x05, 0x45, 0xf9, 0x25, 0xf9, 0x42, 0x3c, 0x20, 0x09, 0x3d, 0xa8, 0x84, 0x92, 0x3c, 0x17, 0x6f, 0x70, 0x66, 0x6e, 0x41, 0x4e, 0x6a, 0x50, 0x6a, 0x61, 0x69, 0x6a, 0x71, 0x89, 0x10, 0x1f, 0x17, 0x53, 0x66, 0x8a, 0x04, 0x93, 0x02, 0xa3, 0x06, 0x6b, 0x10, 0x53, 0x66, 0x8a, 0x92, 0x02, 0x17, 0x1f, 0x4c, 0x41, 0x71, 0x41, 0x7e, 0x5e, 0x71, 0x2a, 0x54, 0x05, 0x33, 0x4c, 0x85, 0xd1, 0x09, 0x26, 0x2e, 0xee, 0x90, 0xd4, 0xe2, 0x92, 0xe0, 0xd4, 0xa2, 0xb2, 0xcc, 0xe4, 0x54, 0x21, 0x37, 0x2e, 0xce, 0xd0, 0xbc, 0xc4, 0xa2, 0x4a, 0xe7, 0xc4, 0x9c, 0x1c, 0x21, 0x69, 0x3d, 0x64, 0xeb, 0xf4, 0x50, 0xec, 0x92, 0x92, 0xc1, 0x2e, 0x09, 0xb5, 0xc7, 0x9f, 0x8b, 0xcf, 0xad, 0x34, 0x27, 0xc7, 0xa5, 0xb4, 0x20, 0x27, 0xb5, 0x82, 0x42, 0xc3, 0x34, 0x18, 0x0d, 0x18, 0x85, 0xfc, 0xb9, 0x04, 0x9c, 0x73, 0x32, 0x53, 0xf3, 0x4a, 0x82, 0x4b, 0x8a, 0x52, 0x13, 0x73, 0x29, 0x36, 0x12, 0x64, 0x20, 0xc8, 0xd3, 0xa9, 0x45, 0x54, 0x31, 0xd0, 0x80, 0x31, 0x89, 0x0d, 0x1c, 0x45, 0xc6, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0x4c, 0x43, 0x27, 0x67, 0xbd, 0x01, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. var _ context.Context var _ grpc.ClientConnInterface // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. const _ = grpc.SupportPackageIsVersion6 // TestServiceClient is the client API for TestService service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type TestServiceClient interface { // One request followed by one response. // The server returns the client id as-is. UnaryCall(ctx context.Context, in *SimpleRequest, opts ...grpc.CallOption) (*SimpleResponse, error) // A sequence of requests with each request served by the server immediately. // As one request could lead to multiple responses, this interface // demonstrates the idea of full duplexing. FullDuplexCall(ctx context.Context, opts ...grpc.CallOption) (TestService_FullDuplexCallClient, error) // Client stream ClientStreamCall(ctx context.Context, opts ...grpc.CallOption) (TestService_ClientStreamCallClient, error) // Server stream ServerStreamCall(ctx context.Context, in *SimpleRequest, opts ...grpc.CallOption) (TestService_ServerStreamCallClient, error) } type testServiceClient struct { cc grpc.ClientConnInterface } func NewTestServiceClient(cc grpc.ClientConnInterface) TestServiceClient { return &testServiceClient{cc} } func (c *testServiceClient) UnaryCall(ctx context.Context, in *SimpleRequest, opts ...grpc.CallOption) (*SimpleResponse, error) { out := new(SimpleResponse) err := c.cc.Invoke(ctx, "/grpc.testing.TestService/UnaryCall", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *testServiceClient) FullDuplexCall(ctx context.Context, opts ...grpc.CallOption) (TestService_FullDuplexCallClient, error) { stream, err := c.cc.NewStream(ctx, &_TestService_serviceDesc.Streams[0], "/grpc.testing.TestService/FullDuplexCall", opts...) if err != nil { return nil, err } x := &testServiceFullDuplexCallClient{stream} return x, nil } type TestService_FullDuplexCallClient interface { Send(*SimpleRequest) error Recv() (*SimpleResponse, error) grpc.ClientStream } type testServiceFullDuplexCallClient struct { grpc.ClientStream } func (x *testServiceFullDuplexCallClient) Send(m *SimpleRequest) error { return x.ClientStream.SendMsg(m) } func (x *testServiceFullDuplexCallClient) Recv() (*SimpleResponse, error) { m := new(SimpleResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func (c *testServiceClient) ClientStreamCall(ctx context.Context, opts ...grpc.CallOption) (TestService_ClientStreamCallClient, error) { stream, err := c.cc.NewStream(ctx, &_TestService_serviceDesc.Streams[1], "/grpc.testing.TestService/ClientStreamCall", opts...) if err != nil { return nil, err } x := &testServiceClientStreamCallClient{stream} return x, nil } type TestService_ClientStreamCallClient interface { Send(*SimpleRequest) error CloseAndRecv() (*SimpleResponse, error) grpc.ClientStream } type testServiceClientStreamCallClient struct { grpc.ClientStream } func (x *testServiceClientStreamCallClient) Send(m *SimpleRequest) error { return x.ClientStream.SendMsg(m) } func (x *testServiceClientStreamCallClient) CloseAndRecv() (*SimpleResponse, error) { if err := x.ClientStream.CloseSend(); err != nil { return nil, err } m := new(SimpleResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func (c *testServiceClient) ServerStreamCall(ctx context.Context, in *SimpleRequest, opts ...grpc.CallOption) (TestService_ServerStreamCallClient, error) { stream, err := c.cc.NewStream(ctx, &_TestService_serviceDesc.Streams[2], "/grpc.testing.TestService/ServerStreamCall", opts...) if err != nil { return nil, err } x := &testServiceServerStreamCallClient{stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } if err := x.ClientStream.CloseSend(); err != nil { return nil, err } return x, nil } type TestService_ServerStreamCallClient interface { Recv() (*SimpleResponse, error) grpc.ClientStream } type testServiceServerStreamCallClient struct { grpc.ClientStream } func (x *testServiceServerStreamCallClient) Recv() (*SimpleResponse, error) { m := new(SimpleResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } // TestServiceServer is the server API for TestService service. type TestServiceServer interface { // One request followed by one response. // The server returns the client id as-is. UnaryCall(context.Context, *SimpleRequest) (*SimpleResponse, error) // A sequence of requests with each request served by the server immediately. // As one request could lead to multiple responses, this interface // demonstrates the idea of full duplexing. FullDuplexCall(TestService_FullDuplexCallServer) error // Client stream ClientStreamCall(TestService_ClientStreamCallServer) error // Server stream ServerStreamCall(*SimpleRequest, TestService_ServerStreamCallServer) error } // UnimplementedTestServiceServer can be embedded to have forward compatible implementations. type UnimplementedTestServiceServer struct { } func (*UnimplementedTestServiceServer) UnaryCall(ctx context.Context, req *SimpleRequest) (*SimpleResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method UnaryCall not implemented") } func (*UnimplementedTestServiceServer) FullDuplexCall(srv TestService_FullDuplexCallServer) error { return status.Errorf(codes.Unimplemented, "method FullDuplexCall not implemented") } func (*UnimplementedTestServiceServer) ClientStreamCall(srv TestService_ClientStreamCallServer) error { return status.Errorf(codes.Unimplemented, "method ClientStreamCall not implemented") } func (*UnimplementedTestServiceServer) ServerStreamCall(req *SimpleRequest, srv TestService_ServerStreamCallServer) error { return status.Errorf(codes.Unimplemented, "method ServerStreamCall not implemented") } func RegisterTestServiceServer(s *grpc.Server, srv TestServiceServer) { s.RegisterService(&_TestService_serviceDesc, srv) } func _TestService_UnaryCall_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(SimpleRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(TestServiceServer).UnaryCall(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/grpc.testing.TestService/UnaryCall", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(TestServiceServer).UnaryCall(ctx, req.(*SimpleRequest)) } return interceptor(ctx, in, info, handler) } func _TestService_FullDuplexCall_Handler(srv interface{}, stream grpc.ServerStream) error { return srv.(TestServiceServer).FullDuplexCall(&testServiceFullDuplexCallServer{stream}) } type TestService_FullDuplexCallServer interface { Send(*SimpleResponse) error Recv() (*SimpleRequest, error) grpc.ServerStream } type testServiceFullDuplexCallServer struct { grpc.ServerStream } func (x *testServiceFullDuplexCallServer) Send(m *SimpleResponse) error { return x.ServerStream.SendMsg(m) } func (x *testServiceFullDuplexCallServer) Recv() (*SimpleRequest, error) { m := new(SimpleRequest) if err := x.ServerStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func _TestService_ClientStreamCall_Handler(srv interface{}, stream grpc.ServerStream) error { return srv.(TestServiceServer).ClientStreamCall(&testServiceClientStreamCallServer{stream}) } type TestService_ClientStreamCallServer interface { SendAndClose(*SimpleResponse) error Recv() (*SimpleRequest, error) grpc.ServerStream } type testServiceClientStreamCallServer struct { grpc.ServerStream } func (x *testServiceClientStreamCallServer) SendAndClose(m *SimpleResponse) error { return x.ServerStream.SendMsg(m) } func (x *testServiceClientStreamCallServer) Recv() (*SimpleRequest, error) { m := new(SimpleRequest) if err := x.ServerStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func _TestService_ServerStreamCall_Handler(srv interface{}, stream grpc.ServerStream) error { m := new(SimpleRequest) if err := stream.RecvMsg(m); err != nil { return err } return srv.(TestServiceServer).ServerStreamCall(m, &testServiceServerStreamCallServer{stream}) } type TestService_ServerStreamCallServer interface { Send(*SimpleResponse) error grpc.ServerStream } type testServiceServerStreamCallServer struct { grpc.ServerStream } func (x *testServiceServerStreamCallServer) Send(m *SimpleResponse) error { return x.ServerStream.SendMsg(m) } var _TestService_serviceDesc = grpc.ServiceDesc{ ServiceName: "grpc.testing.TestService", HandlerType: (*TestServiceServer)(nil), Methods: []grpc.MethodDesc{ { MethodName: "UnaryCall", Handler: _TestService_UnaryCall_Handler, }, }, Streams: []grpc.StreamDesc{ { StreamName: "FullDuplexCall", Handler: _TestService_FullDuplexCall_Handler, ServerStreams: true, ClientStreams: true, }, { StreamName: "ClientStreamCall", Handler: _TestService_ClientStreamCall_Handler, ClientStreams: true, }, { StreamName: "ServerStreamCall", Handler: _TestService_ServerStreamCall_Handler, ServerStreams: true, }, }, Metadata: "grpc_testing/test.proto", } grpc-go-1.29.1/stats/grpc_testing/test.proto000066400000000000000000000025331365033716300210460ustar00rootroot00000000000000// Copyright 2017 gRPC authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. syntax = "proto3"; package grpc.testing; message SimpleRequest { int32 id = 2; } message SimpleResponse { int32 id = 3; } // A simple test service. service TestService { // One request followed by one response. // The server returns the client id as-is. rpc UnaryCall(SimpleRequest) returns (SimpleResponse); // A sequence of requests with each request served by the server immediately. // As one request could lead to multiple responses, this interface // demonstrates the idea of full duplexing. rpc FullDuplexCall(stream SimpleRequest) returns (stream SimpleResponse); // Client stream rpc ClientStreamCall(stream SimpleRequest) returns (SimpleResponse); // Server stream rpc ServerStreamCall(SimpleRequest) returns (stream SimpleResponse); } grpc-go-1.29.1/stats/handlers.go000066400000000000000000000044241365033716300164420ustar00rootroot00000000000000/* * * Copyright 2016 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package stats import ( "context" "net" ) // ConnTagInfo defines the relevant information needed by connection context tagger. type ConnTagInfo struct { // RemoteAddr is the remote address of the corresponding connection. RemoteAddr net.Addr // LocalAddr is the local address of the corresponding connection. LocalAddr net.Addr } // RPCTagInfo defines the relevant information needed by RPC context tagger. type RPCTagInfo struct { // FullMethodName is the RPC method in the format of /package.service/method. FullMethodName string // FailFast indicates if this RPC is failfast. // This field is only valid on client side, it's always false on server side. FailFast bool } // Handler defines the interface for the related stats handling (e.g., RPCs, connections). type Handler interface { // TagRPC can attach some information to the given context. // The context used for the rest lifetime of the RPC will be derived from // the returned context. TagRPC(context.Context, *RPCTagInfo) context.Context // HandleRPC processes the RPC stats. HandleRPC(context.Context, RPCStats) // TagConn can attach some information to the given context. // The returned context will be used for stats handling. // For conn stats handling, the context used in HandleConn for this // connection will be derived from the context returned. // For RPC stats handling, // - On server side, the context used in HandleRPC for all RPCs on this // connection will be derived from the context returned. // - On client side, the context is not derived from the context returned. TagConn(context.Context, *ConnTagInfo) context.Context // HandleConn processes the Conn stats. HandleConn(context.Context, ConnStats) } grpc-go-1.29.1/stats/stats.go000066400000000000000000000250051365033716300157760ustar00rootroot00000000000000/* * * Copyright 2016 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ //go:generate protoc --go_out=plugins=grpc:. grpc_testing/test.proto // Package stats is for collecting and reporting various network and RPC stats. // This package is for monitoring purpose only. All fields are read-only. // All APIs are experimental. package stats // import "google.golang.org/grpc/stats" import ( "context" "net" "time" "google.golang.org/grpc/metadata" ) // RPCStats contains stats information about RPCs. type RPCStats interface { isRPCStats() // IsClient returns true if this RPCStats is from client side. IsClient() bool } // Begin contains stats when an RPC begins. // FailFast is only valid if this Begin is from client side. type Begin struct { // Client is true if this Begin is from client side. Client bool // BeginTime is the time when the RPC begins. BeginTime time.Time // FailFast indicates if this RPC is failfast. FailFast bool } // IsClient indicates if the stats information is from client side. func (s *Begin) IsClient() bool { return s.Client } func (s *Begin) isRPCStats() {} // InPayload contains the information for an incoming payload. type InPayload struct { // Client is true if this InPayload is from client side. Client bool // Payload is the payload with original type. Payload interface{} // Data is the serialized message payload. Data []byte // Length is the length of uncompressed data. Length int // WireLength is the length of data on wire (compressed, signed, encrypted). WireLength int // RecvTime is the time when the payload is received. RecvTime time.Time } // IsClient indicates if the stats information is from client side. func (s *InPayload) IsClient() bool { return s.Client } func (s *InPayload) isRPCStats() {} // InHeader contains stats when a header is received. type InHeader struct { // Client is true if this InHeader is from client side. Client bool // WireLength is the wire length of header. WireLength int // Compression is the compression algorithm used for the RPC. Compression string // Header contains the header metadata received. Header metadata.MD // The following fields are valid only if Client is false. // FullMethod is the full RPC method string, i.e., /package.service/method. FullMethod string // RemoteAddr is the remote address of the corresponding connection. RemoteAddr net.Addr // LocalAddr is the local address of the corresponding connection. LocalAddr net.Addr } // IsClient indicates if the stats information is from client side. func (s *InHeader) IsClient() bool { return s.Client } func (s *InHeader) isRPCStats() {} // InTrailer contains stats when a trailer is received. type InTrailer struct { // Client is true if this InTrailer is from client side. Client bool // WireLength is the wire length of trailer. WireLength int // Trailer contains the trailer metadata received from the server. This // field is only valid if this InTrailer is from the client side. Trailer metadata.MD } // IsClient indicates if the stats information is from client side. func (s *InTrailer) IsClient() bool { return s.Client } func (s *InTrailer) isRPCStats() {} // OutPayload contains the information for an outgoing payload. type OutPayload struct { // Client is true if this OutPayload is from client side. Client bool // Payload is the payload with original type. Payload interface{} // Data is the serialized message payload. Data []byte // Length is the length of uncompressed data. Length int // WireLength is the length of data on wire (compressed, signed, encrypted). WireLength int // SentTime is the time when the payload is sent. SentTime time.Time } // IsClient indicates if this stats information is from client side. func (s *OutPayload) IsClient() bool { return s.Client } func (s *OutPayload) isRPCStats() {} // OutHeader contains stats when a header is sent. type OutHeader struct { // Client is true if this OutHeader is from client side. Client bool // Compression is the compression algorithm used for the RPC. Compression string // Header contains the header metadata sent. Header metadata.MD // The following fields are valid only if Client is true. // FullMethod is the full RPC method string, i.e., /package.service/method. FullMethod string // RemoteAddr is the remote address of the corresponding connection. RemoteAddr net.Addr // LocalAddr is the local address of the corresponding connection. LocalAddr net.Addr } // IsClient indicates if this stats information is from client side. func (s *OutHeader) IsClient() bool { return s.Client } func (s *OutHeader) isRPCStats() {} // OutTrailer contains stats when a trailer is sent. type OutTrailer struct { // Client is true if this OutTrailer is from client side. Client bool // WireLength is the wire length of trailer. // // Deprecated: This field is never set. The length is not known when this message is // emitted because the trailer fields are compressed with hpack after that. WireLength int // Trailer contains the trailer metadata sent to the client. This // field is only valid if this OutTrailer is from the server side. Trailer metadata.MD } // IsClient indicates if this stats information is from client side. func (s *OutTrailer) IsClient() bool { return s.Client } func (s *OutTrailer) isRPCStats() {} // End contains stats when an RPC ends. type End struct { // Client is true if this End is from client side. Client bool // BeginTime is the time when the RPC began. BeginTime time.Time // EndTime is the time when the RPC ends. EndTime time.Time // Trailer contains the trailer metadata received from the server. This // field is only valid if this End is from the client side. // Deprecated: use Trailer in InTrailer instead. Trailer metadata.MD // Error is the error the RPC ended with. It is an error generated from // status.Status and can be converted back to status.Status using // status.FromError if non-nil. Error error } // IsClient indicates if this is from client side. func (s *End) IsClient() bool { return s.Client } func (s *End) isRPCStats() {} // ConnStats contains stats information about connections. type ConnStats interface { isConnStats() // IsClient returns true if this ConnStats is from client side. IsClient() bool } // ConnBegin contains the stats of a connection when it is established. type ConnBegin struct { // Client is true if this ConnBegin is from client side. Client bool } // IsClient indicates if this is from client side. func (s *ConnBegin) IsClient() bool { return s.Client } func (s *ConnBegin) isConnStats() {} // ConnEnd contains the stats of a connection when it ends. type ConnEnd struct { // Client is true if this ConnEnd is from client side. Client bool } // IsClient indicates if this is from client side. func (s *ConnEnd) IsClient() bool { return s.Client } func (s *ConnEnd) isConnStats() {} type incomingTagsKey struct{} type outgoingTagsKey struct{} // SetTags attaches stats tagging data to the context, which will be sent in // the outgoing RPC with the header grpc-tags-bin. Subsequent calls to // SetTags will overwrite the values from earlier calls. // // NOTE: this is provided only for backward compatibility with existing clients // and will likely be removed in an upcoming release. New uses should transmit // this type of data using metadata with a different, non-reserved (i.e. does // not begin with "grpc-") header name. func SetTags(ctx context.Context, b []byte) context.Context { return context.WithValue(ctx, outgoingTagsKey{}, b) } // Tags returns the tags from the context for the inbound RPC. // // NOTE: this is provided only for backward compatibility with existing clients // and will likely be removed in an upcoming release. New uses should transmit // this type of data using metadata with a different, non-reserved (i.e. does // not begin with "grpc-") header name. func Tags(ctx context.Context) []byte { b, _ := ctx.Value(incomingTagsKey{}).([]byte) return b } // SetIncomingTags attaches stats tagging data to the context, to be read by // the application (not sent in outgoing RPCs). // // This is intended for gRPC-internal use ONLY. func SetIncomingTags(ctx context.Context, b []byte) context.Context { return context.WithValue(ctx, incomingTagsKey{}, b) } // OutgoingTags returns the tags from the context for the outbound RPC. // // This is intended for gRPC-internal use ONLY. func OutgoingTags(ctx context.Context) []byte { b, _ := ctx.Value(outgoingTagsKey{}).([]byte) return b } type incomingTraceKey struct{} type outgoingTraceKey struct{} // SetTrace attaches stats tagging data to the context, which will be sent in // the outgoing RPC with the header grpc-trace-bin. Subsequent calls to // SetTrace will overwrite the values from earlier calls. // // NOTE: this is provided only for backward compatibility with existing clients // and will likely be removed in an upcoming release. New uses should transmit // this type of data using metadata with a different, non-reserved (i.e. does // not begin with "grpc-") header name. func SetTrace(ctx context.Context, b []byte) context.Context { return context.WithValue(ctx, outgoingTraceKey{}, b) } // Trace returns the trace from the context for the inbound RPC. // // NOTE: this is provided only for backward compatibility with existing clients // and will likely be removed in an upcoming release. New uses should transmit // this type of data using metadata with a different, non-reserved (i.e. does // not begin with "grpc-") header name. func Trace(ctx context.Context) []byte { b, _ := ctx.Value(incomingTraceKey{}).([]byte) return b } // SetIncomingTrace attaches stats tagging data to the context, to be read by // the application (not sent in outgoing RPCs). It is intended for // gRPC-internal use. func SetIncomingTrace(ctx context.Context, b []byte) context.Context { return context.WithValue(ctx, incomingTraceKey{}, b) } // OutgoingTrace returns the trace from the context for the outbound RPC. It is // intended for gRPC-internal use. func OutgoingTrace(ctx context.Context) []byte { b, _ := ctx.Value(outgoingTraceKey{}).([]byte) return b } grpc-go-1.29.1/stats/stats_test.go000066400000000000000000001075351365033716300170460ustar00rootroot00000000000000/* * * Copyright 2016 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package stats_test import ( "context" "fmt" "io" "net" "reflect" "sync" "testing" "time" "github.com/golang/protobuf/proto" "google.golang.org/grpc" "google.golang.org/grpc/internal/grpctest" "google.golang.org/grpc/metadata" "google.golang.org/grpc/stats" testpb "google.golang.org/grpc/stats/grpc_testing" "google.golang.org/grpc/status" ) type s struct { grpctest.Tester } func Test(t *testing.T) { grpctest.RunSubTests(t, s{}) } func init() { grpc.EnableTracing = false } type connCtxKey struct{} type rpcCtxKey struct{} var ( // For headers sent to server: testMetadata = metadata.MD{ "key1": []string{"value1"}, "key2": []string{"value2"}, "user-agent": []string{fmt.Sprintf("test/0.0.1 grpc-go/%s", grpc.Version)}, } // For headers sent from server: testHeaderMetadata = metadata.MD{ "hkey1": []string{"headerValue1"}, "hkey2": []string{"headerValue2"}, } // For trailers sent from server: testTrailerMetadata = metadata.MD{ "tkey1": []string{"trailerValue1"}, "tkey2": []string{"trailerValue2"}, } // The id for which the service handler should return error. errorID int32 = 32202 ) type testServer struct { testpb.UnimplementedTestServiceServer } func (s *testServer) UnaryCall(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { if err := grpc.SendHeader(ctx, testHeaderMetadata); err != nil { return nil, status.Errorf(status.Code(err), "grpc.SendHeader(_, %v) = %v, want ", testHeaderMetadata, err) } if err := grpc.SetTrailer(ctx, testTrailerMetadata); err != nil { return nil, status.Errorf(status.Code(err), "grpc.SetTrailer(_, %v) = %v, want ", testTrailerMetadata, err) } if in.Id == errorID { return nil, fmt.Errorf("got error id: %v", in.Id) } return &testpb.SimpleResponse{Id: in.Id}, nil } func (s *testServer) FullDuplexCall(stream testpb.TestService_FullDuplexCallServer) error { if err := stream.SendHeader(testHeaderMetadata); err != nil { return status.Errorf(status.Code(err), "%v.SendHeader(%v) = %v, want %v", stream, testHeaderMetadata, err, nil) } stream.SetTrailer(testTrailerMetadata) for { in, err := stream.Recv() if err == io.EOF { // read done. return nil } if err != nil { return err } if in.Id == errorID { return fmt.Errorf("got error id: %v", in.Id) } if err := stream.Send(&testpb.SimpleResponse{Id: in.Id}); err != nil { return err } } } func (s *testServer) ClientStreamCall(stream testpb.TestService_ClientStreamCallServer) error { if err := stream.SendHeader(testHeaderMetadata); err != nil { return status.Errorf(status.Code(err), "%v.SendHeader(%v) = %v, want %v", stream, testHeaderMetadata, err, nil) } stream.SetTrailer(testTrailerMetadata) for { in, err := stream.Recv() if err == io.EOF { // read done. return stream.SendAndClose(&testpb.SimpleResponse{Id: int32(0)}) } if err != nil { return err } if in.Id == errorID { return fmt.Errorf("got error id: %v", in.Id) } } } func (s *testServer) ServerStreamCall(in *testpb.SimpleRequest, stream testpb.TestService_ServerStreamCallServer) error { if err := stream.SendHeader(testHeaderMetadata); err != nil { return status.Errorf(status.Code(err), "%v.SendHeader(%v) = %v, want %v", stream, testHeaderMetadata, err, nil) } stream.SetTrailer(testTrailerMetadata) if in.Id == errorID { return fmt.Errorf("got error id: %v", in.Id) } for i := 0; i < 5; i++ { if err := stream.Send(&testpb.SimpleResponse{Id: in.Id}); err != nil { return err } } return nil } // test is an end-to-end test. It should be created with the newTest // func, modified as needed, and then started with its startServer method. // It should be cleaned up with the tearDown method. type test struct { t *testing.T compress string clientStatsHandler stats.Handler serverStatsHandler stats.Handler testServer testpb.TestServiceServer // nil means none // srv and srvAddr are set once startServer is called. srv *grpc.Server srvAddr string cc *grpc.ClientConn // nil until requested via clientConn } func (te *test) tearDown() { if te.cc != nil { te.cc.Close() te.cc = nil } te.srv.Stop() } type testConfig struct { compress string } // newTest returns a new test using the provided testing.T and // environment. It is returned with default values. Tests should // modify it before calling its startServer and clientConn methods. func newTest(t *testing.T, tc *testConfig, ch stats.Handler, sh stats.Handler) *test { te := &test{ t: t, compress: tc.compress, clientStatsHandler: ch, serverStatsHandler: sh, } return te } // startServer starts a gRPC server listening. Callers should defer a // call to te.tearDown to clean up. func (te *test) startServer(ts testpb.TestServiceServer) { te.testServer = ts lis, err := net.Listen("tcp", "localhost:0") if err != nil { te.t.Fatalf("Failed to listen: %v", err) } var opts []grpc.ServerOption if te.compress == "gzip" { opts = append(opts, grpc.RPCCompressor(grpc.NewGZIPCompressor()), grpc.RPCDecompressor(grpc.NewGZIPDecompressor()), ) } if te.serverStatsHandler != nil { opts = append(opts, grpc.StatsHandler(te.serverStatsHandler)) } s := grpc.NewServer(opts...) te.srv = s if te.testServer != nil { testpb.RegisterTestServiceServer(s, te.testServer) } go s.Serve(lis) te.srvAddr = lis.Addr().String() } func (te *test) clientConn() *grpc.ClientConn { if te.cc != nil { return te.cc } opts := []grpc.DialOption{ grpc.WithInsecure(), grpc.WithBlock(), grpc.WithUserAgent("test/0.0.1"), } if te.compress == "gzip" { opts = append(opts, grpc.WithCompressor(grpc.NewGZIPCompressor()), grpc.WithDecompressor(grpc.NewGZIPDecompressor()), ) } if te.clientStatsHandler != nil { opts = append(opts, grpc.WithStatsHandler(te.clientStatsHandler)) } var err error te.cc, err = grpc.Dial(te.srvAddr, opts...) if err != nil { te.t.Fatalf("Dial(%q) = %v", te.srvAddr, err) } return te.cc } type rpcType int const ( unaryRPC rpcType = iota clientStreamRPC serverStreamRPC fullDuplexStreamRPC ) type rpcConfig struct { count int // Number of requests and responses for streaming RPCs. success bool // Whether the RPC should succeed or return error. failfast bool callType rpcType // Type of RPC. } func (te *test) doUnaryCall(c *rpcConfig) (*testpb.SimpleRequest, *testpb.SimpleResponse, error) { var ( resp *testpb.SimpleResponse req *testpb.SimpleRequest err error ) tc := testpb.NewTestServiceClient(te.clientConn()) if c.success { req = &testpb.SimpleRequest{Id: errorID + 1} } else { req = &testpb.SimpleRequest{Id: errorID} } ctx := metadata.NewOutgoingContext(context.Background(), testMetadata) resp, err = tc.UnaryCall(ctx, req, grpc.WaitForReady(!c.failfast)) return req, resp, err } func (te *test) doFullDuplexCallRoundtrip(c *rpcConfig) ([]*testpb.SimpleRequest, []*testpb.SimpleResponse, error) { var ( reqs []*testpb.SimpleRequest resps []*testpb.SimpleResponse err error ) tc := testpb.NewTestServiceClient(te.clientConn()) stream, err := tc.FullDuplexCall(metadata.NewOutgoingContext(context.Background(), testMetadata), grpc.WaitForReady(!c.failfast)) if err != nil { return reqs, resps, err } var startID int32 if !c.success { startID = errorID } for i := 0; i < c.count; i++ { req := &testpb.SimpleRequest{ Id: int32(i) + startID, } reqs = append(reqs, req) if err = stream.Send(req); err != nil { return reqs, resps, err } var resp *testpb.SimpleResponse if resp, err = stream.Recv(); err != nil { return reqs, resps, err } resps = append(resps, resp) } if err = stream.CloseSend(); err != nil && err != io.EOF { return reqs, resps, err } if _, err = stream.Recv(); err != io.EOF { return reqs, resps, err } return reqs, resps, nil } func (te *test) doClientStreamCall(c *rpcConfig) ([]*testpb.SimpleRequest, *testpb.SimpleResponse, error) { var ( reqs []*testpb.SimpleRequest resp *testpb.SimpleResponse err error ) tc := testpb.NewTestServiceClient(te.clientConn()) stream, err := tc.ClientStreamCall(metadata.NewOutgoingContext(context.Background(), testMetadata), grpc.WaitForReady(!c.failfast)) if err != nil { return reqs, resp, err } var startID int32 if !c.success { startID = errorID } for i := 0; i < c.count; i++ { req := &testpb.SimpleRequest{ Id: int32(i) + startID, } reqs = append(reqs, req) if err = stream.Send(req); err != nil { return reqs, resp, err } } resp, err = stream.CloseAndRecv() return reqs, resp, err } func (te *test) doServerStreamCall(c *rpcConfig) (*testpb.SimpleRequest, []*testpb.SimpleResponse, error) { var ( req *testpb.SimpleRequest resps []*testpb.SimpleResponse err error ) tc := testpb.NewTestServiceClient(te.clientConn()) var startID int32 if !c.success { startID = errorID } req = &testpb.SimpleRequest{Id: startID} stream, err := tc.ServerStreamCall(metadata.NewOutgoingContext(context.Background(), testMetadata), req, grpc.WaitForReady(!c.failfast)) if err != nil { return req, resps, err } for { var resp *testpb.SimpleResponse resp, err := stream.Recv() if err == io.EOF { return req, resps, nil } else if err != nil { return req, resps, err } resps = append(resps, resp) } } type expectedData struct { method string serverAddr string compression string reqIdx int requests []*testpb.SimpleRequest respIdx int responses []*testpb.SimpleResponse err error failfast bool } type gotData struct { ctx context.Context client bool s interface{} // This could be RPCStats or ConnStats. } const ( begin int = iota end inPayload inHeader inTrailer outPayload outHeader // TODO: test outTrailer ? connBegin connEnd ) func checkBegin(t *testing.T, d *gotData, e *expectedData) { var ( ok bool st *stats.Begin ) if st, ok = d.s.(*stats.Begin); !ok { t.Fatalf("got %T, want Begin", d.s) } if d.ctx == nil { t.Fatalf("d.ctx = nil, want ") } if st.BeginTime.IsZero() { t.Fatalf("st.BeginTime = %v, want ", st.BeginTime) } if d.client { if st.FailFast != e.failfast { t.Fatalf("st.FailFast = %v, want %v", st.FailFast, e.failfast) } } } func checkInHeader(t *testing.T, d *gotData, e *expectedData) { var ( ok bool st *stats.InHeader ) if st, ok = d.s.(*stats.InHeader); !ok { t.Fatalf("got %T, want InHeader", d.s) } if d.ctx == nil { t.Fatalf("d.ctx = nil, want ") } if st.Compression != e.compression { t.Fatalf("st.Compression = %v, want %v", st.Compression, e.compression) } if d.client { // additional headers might be injected so instead of testing equality, test that all the // expected headers keys have the expected header values. for key := range testHeaderMetadata { if !reflect.DeepEqual(st.Header.Get(key), testHeaderMetadata.Get(key)) { t.Fatalf("st.Header[%s] = %v, want %v", key, st.Header.Get(key), testHeaderMetadata.Get(key)) } } } else { if st.FullMethod != e.method { t.Fatalf("st.FullMethod = %s, want %v", st.FullMethod, e.method) } if st.LocalAddr.String() != e.serverAddr { t.Fatalf("st.LocalAddr = %v, want %v", st.LocalAddr, e.serverAddr) } // additional headers might be injected so instead of testing equality, test that all the // expected headers keys have the expected header values. for key := range testMetadata { if !reflect.DeepEqual(st.Header.Get(key), testMetadata.Get(key)) { t.Fatalf("st.Header[%s] = %v, want %v", key, st.Header.Get(key), testMetadata.Get(key)) } } if connInfo, ok := d.ctx.Value(connCtxKey{}).(*stats.ConnTagInfo); ok { if connInfo.RemoteAddr != st.RemoteAddr { t.Fatalf("connInfo.RemoteAddr = %v, want %v", connInfo.RemoteAddr, st.RemoteAddr) } if connInfo.LocalAddr != st.LocalAddr { t.Fatalf("connInfo.LocalAddr = %v, want %v", connInfo.LocalAddr, st.LocalAddr) } } else { t.Fatalf("got context %v, want one with connCtxKey", d.ctx) } if rpcInfo, ok := d.ctx.Value(rpcCtxKey{}).(*stats.RPCTagInfo); ok { if rpcInfo.FullMethodName != st.FullMethod { t.Fatalf("rpcInfo.FullMethod = %s, want %v", rpcInfo.FullMethodName, st.FullMethod) } } else { t.Fatalf("got context %v, want one with rpcCtxKey", d.ctx) } } } func checkInPayload(t *testing.T, d *gotData, e *expectedData) { var ( ok bool st *stats.InPayload ) if st, ok = d.s.(*stats.InPayload); !ok { t.Fatalf("got %T, want InPayload", d.s) } if d.ctx == nil { t.Fatalf("d.ctx = nil, want ") } if d.client { b, err := proto.Marshal(e.responses[e.respIdx]) if err != nil { t.Fatalf("failed to marshal message: %v", err) } if reflect.TypeOf(st.Payload) != reflect.TypeOf(e.responses[e.respIdx]) { t.Fatalf("st.Payload = %T, want %T", st.Payload, e.responses[e.respIdx]) } e.respIdx++ if string(st.Data) != string(b) { t.Fatalf("st.Data = %v, want %v", st.Data, b) } if st.Length != len(b) { t.Fatalf("st.Lenght = %v, want %v", st.Length, len(b)) } } else { b, err := proto.Marshal(e.requests[e.reqIdx]) if err != nil { t.Fatalf("failed to marshal message: %v", err) } if reflect.TypeOf(st.Payload) != reflect.TypeOf(e.requests[e.reqIdx]) { t.Fatalf("st.Payload = %T, want %T", st.Payload, e.requests[e.reqIdx]) } e.reqIdx++ if string(st.Data) != string(b) { t.Fatalf("st.Data = %v, want %v", st.Data, b) } if st.Length != len(b) { t.Fatalf("st.Lenght = %v, want %v", st.Length, len(b)) } } // Below are sanity checks that WireLength and RecvTime are populated. // TODO: check values of WireLength and RecvTime. if len(st.Data) > 0 && st.WireLength == 0 { t.Fatalf("st.WireLength = %v with non-empty data, want ", st.WireLength) } if st.RecvTime.IsZero() { t.Fatalf("st.ReceivedTime = %v, want ", st.RecvTime) } } func checkInTrailer(t *testing.T, d *gotData, e *expectedData) { var ( ok bool st *stats.InTrailer ) if st, ok = d.s.(*stats.InTrailer); !ok { t.Fatalf("got %T, want InTrailer", d.s) } if d.ctx == nil { t.Fatalf("d.ctx = nil, want ") } if !st.Client { t.Fatalf("st IsClient = false, want true") } if !reflect.DeepEqual(st.Trailer, testTrailerMetadata) { t.Fatalf("st.Trailer = %v, want %v", st.Trailer, testTrailerMetadata) } } func checkOutHeader(t *testing.T, d *gotData, e *expectedData) { var ( ok bool st *stats.OutHeader ) if st, ok = d.s.(*stats.OutHeader); !ok { t.Fatalf("got %T, want OutHeader", d.s) } if d.ctx == nil { t.Fatalf("d.ctx = nil, want ") } if st.Compression != e.compression { t.Fatalf("st.Compression = %v, want %v", st.Compression, e.compression) } if d.client { if st.FullMethod != e.method { t.Fatalf("st.FullMethod = %s, want %v", st.FullMethod, e.method) } if st.RemoteAddr.String() != e.serverAddr { t.Fatalf("st.RemoteAddr = %v, want %v", st.RemoteAddr, e.serverAddr) } // additional headers might be injected so instead of testing equality, test that all the // expected headers keys have the expected header values. for key := range testMetadata { if !reflect.DeepEqual(st.Header.Get(key), testMetadata.Get(key)) { t.Fatalf("st.Header[%s] = %v, want %v", key, st.Header.Get(key), testMetadata.Get(key)) } } if rpcInfo, ok := d.ctx.Value(rpcCtxKey{}).(*stats.RPCTagInfo); ok { if rpcInfo.FullMethodName != st.FullMethod { t.Fatalf("rpcInfo.FullMethod = %s, want %v", rpcInfo.FullMethodName, st.FullMethod) } } else { t.Fatalf("got context %v, want one with rpcCtxKey", d.ctx) } } else { // additional headers might be injected so instead of testing equality, test that all the // expected headers keys have the expected header values. for key := range testHeaderMetadata { if !reflect.DeepEqual(st.Header.Get(key), testHeaderMetadata.Get(key)) { t.Fatalf("st.Header[%s] = %v, want %v", key, st.Header.Get(key), testHeaderMetadata.Get(key)) } } } } func checkOutPayload(t *testing.T, d *gotData, e *expectedData) { var ( ok bool st *stats.OutPayload ) if st, ok = d.s.(*stats.OutPayload); !ok { t.Fatalf("got %T, want OutPayload", d.s) } if d.ctx == nil { t.Fatalf("d.ctx = nil, want ") } if d.client { b, err := proto.Marshal(e.requests[e.reqIdx]) if err != nil { t.Fatalf("failed to marshal message: %v", err) } if reflect.TypeOf(st.Payload) != reflect.TypeOf(e.requests[e.reqIdx]) { t.Fatalf("st.Payload = %T, want %T", st.Payload, e.requests[e.reqIdx]) } e.reqIdx++ if string(st.Data) != string(b) { t.Fatalf("st.Data = %v, want %v", st.Data, b) } if st.Length != len(b) { t.Fatalf("st.Lenght = %v, want %v", st.Length, len(b)) } } else { b, err := proto.Marshal(e.responses[e.respIdx]) if err != nil { t.Fatalf("failed to marshal message: %v", err) } if reflect.TypeOf(st.Payload) != reflect.TypeOf(e.responses[e.respIdx]) { t.Fatalf("st.Payload = %T, want %T", st.Payload, e.responses[e.respIdx]) } e.respIdx++ if string(st.Data) != string(b) { t.Fatalf("st.Data = %v, want %v", st.Data, b) } if st.Length != len(b) { t.Fatalf("st.Lenght = %v, want %v", st.Length, len(b)) } } // Below are sanity checks that WireLength and SentTime are populated. // TODO: check values of WireLength and SentTime. if len(st.Data) > 0 && st.WireLength == 0 { t.Fatalf("st.WireLength = %v with non-empty data, want ", st.WireLength) } if st.SentTime.IsZero() { t.Fatalf("st.SentTime = %v, want ", st.SentTime) } } func checkOutTrailer(t *testing.T, d *gotData, e *expectedData) { var ( ok bool st *stats.OutTrailer ) if st, ok = d.s.(*stats.OutTrailer); !ok { t.Fatalf("got %T, want OutTrailer", d.s) } if d.ctx == nil { t.Fatalf("d.ctx = nil, want ") } if st.Client { t.Fatalf("st IsClient = true, want false") } if !reflect.DeepEqual(st.Trailer, testTrailerMetadata) { t.Fatalf("st.Trailer = %v, want %v", st.Trailer, testTrailerMetadata) } } func checkEnd(t *testing.T, d *gotData, e *expectedData) { var ( ok bool st *stats.End ) if st, ok = d.s.(*stats.End); !ok { t.Fatalf("got %T, want End", d.s) } if d.ctx == nil { t.Fatalf("d.ctx = nil, want ") } if st.BeginTime.IsZero() { t.Fatalf("st.BeginTime = %v, want ", st.BeginTime) } if st.EndTime.IsZero() { t.Fatalf("st.EndTime = %v, want ", st.EndTime) } actual, ok := status.FromError(st.Error) if !ok { t.Fatalf("expected st.Error to be a statusError, got %v (type %T)", st.Error, st.Error) } expectedStatus, _ := status.FromError(e.err) if actual.Code() != expectedStatus.Code() || actual.Message() != expectedStatus.Message() { t.Fatalf("st.Error = %v, want %v", st.Error, e.err) } if st.Client { if !reflect.DeepEqual(st.Trailer, testTrailerMetadata) { t.Fatalf("st.Trailer = %v, want %v", st.Trailer, testTrailerMetadata) } } else { if st.Trailer != nil { t.Fatalf("st.Trailer = %v, want nil", st.Trailer) } } } func checkConnBegin(t *testing.T, d *gotData, e *expectedData) { var ( ok bool st *stats.ConnBegin ) if st, ok = d.s.(*stats.ConnBegin); !ok { t.Fatalf("got %T, want ConnBegin", d.s) } if d.ctx == nil { t.Fatalf("d.ctx = nil, want ") } st.IsClient() // TODO remove this. } func checkConnEnd(t *testing.T, d *gotData, e *expectedData) { var ( ok bool st *stats.ConnEnd ) if st, ok = d.s.(*stats.ConnEnd); !ok { t.Fatalf("got %T, want ConnEnd", d.s) } if d.ctx == nil { t.Fatalf("d.ctx = nil, want ") } st.IsClient() // TODO remove this. } type statshandler struct { mu sync.Mutex gotRPC []*gotData gotConn []*gotData } func (h *statshandler) TagConn(ctx context.Context, info *stats.ConnTagInfo) context.Context { return context.WithValue(ctx, connCtxKey{}, info) } func (h *statshandler) TagRPC(ctx context.Context, info *stats.RPCTagInfo) context.Context { return context.WithValue(ctx, rpcCtxKey{}, info) } func (h *statshandler) HandleConn(ctx context.Context, s stats.ConnStats) { h.mu.Lock() defer h.mu.Unlock() h.gotConn = append(h.gotConn, &gotData{ctx, s.IsClient(), s}) } func (h *statshandler) HandleRPC(ctx context.Context, s stats.RPCStats) { h.mu.Lock() defer h.mu.Unlock() h.gotRPC = append(h.gotRPC, &gotData{ctx, s.IsClient(), s}) } func checkConnStats(t *testing.T, got []*gotData) { if len(got) <= 0 || len(got)%2 != 0 { for i, g := range got { t.Errorf(" - %v, %T = %+v, ctx: %v", i, g.s, g.s, g.ctx) } t.Fatalf("got %v stats, want even positive number", len(got)) } // The first conn stats must be a ConnBegin. checkConnBegin(t, got[0], nil) // The last conn stats must be a ConnEnd. checkConnEnd(t, got[len(got)-1], nil) } func checkServerStats(t *testing.T, got []*gotData, expect *expectedData, checkFuncs []func(t *testing.T, d *gotData, e *expectedData)) { if len(got) != len(checkFuncs) { for i, g := range got { t.Errorf(" - %v, %T", i, g.s) } t.Fatalf("got %v stats, want %v stats", len(got), len(checkFuncs)) } var rpcctx context.Context for i := 0; i < len(got); i++ { if _, ok := got[i].s.(stats.RPCStats); ok { if rpcctx != nil && got[i].ctx != rpcctx { t.Fatalf("got different contexts with stats %T", got[i].s) } rpcctx = got[i].ctx } } for i, f := range checkFuncs { f(t, got[i], expect) } } func testServerStats(t *testing.T, tc *testConfig, cc *rpcConfig, checkFuncs []func(t *testing.T, d *gotData, e *expectedData)) { h := &statshandler{} te := newTest(t, tc, nil, h) te.startServer(&testServer{}) defer te.tearDown() var ( reqs []*testpb.SimpleRequest resps []*testpb.SimpleResponse err error method string req *testpb.SimpleRequest resp *testpb.SimpleResponse e error ) switch cc.callType { case unaryRPC: method = "/grpc.testing.TestService/UnaryCall" req, resp, e = te.doUnaryCall(cc) reqs = []*testpb.SimpleRequest{req} resps = []*testpb.SimpleResponse{resp} err = e case clientStreamRPC: method = "/grpc.testing.TestService/ClientStreamCall" reqs, resp, e = te.doClientStreamCall(cc) resps = []*testpb.SimpleResponse{resp} err = e case serverStreamRPC: method = "/grpc.testing.TestService/ServerStreamCall" req, resps, e = te.doServerStreamCall(cc) reqs = []*testpb.SimpleRequest{req} err = e case fullDuplexStreamRPC: method = "/grpc.testing.TestService/FullDuplexCall" reqs, resps, err = te.doFullDuplexCallRoundtrip(cc) } if cc.success != (err == nil) { t.Fatalf("cc.success: %v, got error: %v", cc.success, err) } te.cc.Close() te.srv.GracefulStop() // Wait for the server to stop. for { h.mu.Lock() if len(h.gotRPC) >= len(checkFuncs) { h.mu.Unlock() break } h.mu.Unlock() time.Sleep(10 * time.Millisecond) } for { h.mu.Lock() if _, ok := h.gotConn[len(h.gotConn)-1].s.(*stats.ConnEnd); ok { h.mu.Unlock() break } h.mu.Unlock() time.Sleep(10 * time.Millisecond) } expect := &expectedData{ serverAddr: te.srvAddr, compression: tc.compress, method: method, requests: reqs, responses: resps, err: err, } h.mu.Lock() checkConnStats(t, h.gotConn) h.mu.Unlock() checkServerStats(t, h.gotRPC, expect, checkFuncs) } func (s) TestServerStatsUnaryRPC(t *testing.T) { testServerStats(t, &testConfig{compress: ""}, &rpcConfig{success: true, callType: unaryRPC}, []func(t *testing.T, d *gotData, e *expectedData){ checkInHeader, checkBegin, checkInPayload, checkOutHeader, checkOutPayload, checkOutTrailer, checkEnd, }) } func (s) TestServerStatsUnaryRPCError(t *testing.T) { testServerStats(t, &testConfig{compress: ""}, &rpcConfig{success: false, callType: unaryRPC}, []func(t *testing.T, d *gotData, e *expectedData){ checkInHeader, checkBegin, checkInPayload, checkOutHeader, checkOutTrailer, checkEnd, }) } func (s) TestServerStatsClientStreamRPC(t *testing.T) { count := 5 checkFuncs := []func(t *testing.T, d *gotData, e *expectedData){ checkInHeader, checkBegin, checkOutHeader, } ioPayFuncs := []func(t *testing.T, d *gotData, e *expectedData){ checkInPayload, } for i := 0; i < count; i++ { checkFuncs = append(checkFuncs, ioPayFuncs...) } checkFuncs = append(checkFuncs, checkOutPayload, checkOutTrailer, checkEnd, ) testServerStats(t, &testConfig{compress: "gzip"}, &rpcConfig{count: count, success: true, callType: clientStreamRPC}, checkFuncs) } func (s) TestServerStatsClientStreamRPCError(t *testing.T) { count := 1 testServerStats(t, &testConfig{compress: "gzip"}, &rpcConfig{count: count, success: false, callType: clientStreamRPC}, []func(t *testing.T, d *gotData, e *expectedData){ checkInHeader, checkBegin, checkOutHeader, checkInPayload, checkOutTrailer, checkEnd, }) } func (s) TestServerStatsServerStreamRPC(t *testing.T) { count := 5 checkFuncs := []func(t *testing.T, d *gotData, e *expectedData){ checkInHeader, checkBegin, checkInPayload, checkOutHeader, } ioPayFuncs := []func(t *testing.T, d *gotData, e *expectedData){ checkOutPayload, } for i := 0; i < count; i++ { checkFuncs = append(checkFuncs, ioPayFuncs...) } checkFuncs = append(checkFuncs, checkOutTrailer, checkEnd, ) testServerStats(t, &testConfig{compress: "gzip"}, &rpcConfig{count: count, success: true, callType: serverStreamRPC}, checkFuncs) } func (s) TestServerStatsServerStreamRPCError(t *testing.T) { count := 5 testServerStats(t, &testConfig{compress: "gzip"}, &rpcConfig{count: count, success: false, callType: serverStreamRPC}, []func(t *testing.T, d *gotData, e *expectedData){ checkInHeader, checkBegin, checkInPayload, checkOutHeader, checkOutTrailer, checkEnd, }) } func (s) TestServerStatsFullDuplexRPC(t *testing.T) { count := 5 checkFuncs := []func(t *testing.T, d *gotData, e *expectedData){ checkInHeader, checkBegin, checkOutHeader, } ioPayFuncs := []func(t *testing.T, d *gotData, e *expectedData){ checkInPayload, checkOutPayload, } for i := 0; i < count; i++ { checkFuncs = append(checkFuncs, ioPayFuncs...) } checkFuncs = append(checkFuncs, checkOutTrailer, checkEnd, ) testServerStats(t, &testConfig{compress: "gzip"}, &rpcConfig{count: count, success: true, callType: fullDuplexStreamRPC}, checkFuncs) } func (s) TestServerStatsFullDuplexRPCError(t *testing.T) { count := 5 testServerStats(t, &testConfig{compress: "gzip"}, &rpcConfig{count: count, success: false, callType: fullDuplexStreamRPC}, []func(t *testing.T, d *gotData, e *expectedData){ checkInHeader, checkBegin, checkOutHeader, checkInPayload, checkOutTrailer, checkEnd, }) } type checkFuncWithCount struct { f func(t *testing.T, d *gotData, e *expectedData) c int // expected count } func checkClientStats(t *testing.T, got []*gotData, expect *expectedData, checkFuncs map[int]*checkFuncWithCount) { var expectLen int for _, v := range checkFuncs { expectLen += v.c } if len(got) != expectLen { for i, g := range got { t.Errorf(" - %v, %T", i, g.s) } t.Fatalf("got %v stats, want %v stats", len(got), expectLen) } var tagInfoInCtx *stats.RPCTagInfo for i := 0; i < len(got); i++ { if _, ok := got[i].s.(stats.RPCStats); ok { tagInfoInCtxNew, _ := got[i].ctx.Value(rpcCtxKey{}).(*stats.RPCTagInfo) if tagInfoInCtx != nil && tagInfoInCtx != tagInfoInCtxNew { t.Fatalf("got context containing different tagInfo with stats %T", got[i].s) } tagInfoInCtx = tagInfoInCtxNew } } for _, s := range got { switch s.s.(type) { case *stats.Begin: if checkFuncs[begin].c <= 0 { t.Fatalf("unexpected stats: %T", s.s) } checkFuncs[begin].f(t, s, expect) checkFuncs[begin].c-- case *stats.OutHeader: if checkFuncs[outHeader].c <= 0 { t.Fatalf("unexpected stats: %T", s.s) } checkFuncs[outHeader].f(t, s, expect) checkFuncs[outHeader].c-- case *stats.OutPayload: if checkFuncs[outPayload].c <= 0 { t.Fatalf("unexpected stats: %T", s.s) } checkFuncs[outPayload].f(t, s, expect) checkFuncs[outPayload].c-- case *stats.InHeader: if checkFuncs[inHeader].c <= 0 { t.Fatalf("unexpected stats: %T", s.s) } checkFuncs[inHeader].f(t, s, expect) checkFuncs[inHeader].c-- case *stats.InPayload: if checkFuncs[inPayload].c <= 0 { t.Fatalf("unexpected stats: %T", s.s) } checkFuncs[inPayload].f(t, s, expect) checkFuncs[inPayload].c-- case *stats.InTrailer: if checkFuncs[inTrailer].c <= 0 { t.Fatalf("unexpected stats: %T", s.s) } checkFuncs[inTrailer].f(t, s, expect) checkFuncs[inTrailer].c-- case *stats.End: if checkFuncs[end].c <= 0 { t.Fatalf("unexpected stats: %T", s.s) } checkFuncs[end].f(t, s, expect) checkFuncs[end].c-- case *stats.ConnBegin: if checkFuncs[connBegin].c <= 0 { t.Fatalf("unexpected stats: %T", s.s) } checkFuncs[connBegin].f(t, s, expect) checkFuncs[connBegin].c-- case *stats.ConnEnd: if checkFuncs[connEnd].c <= 0 { t.Fatalf("unexpected stats: %T", s.s) } checkFuncs[connEnd].f(t, s, expect) checkFuncs[connEnd].c-- default: t.Fatalf("unexpected stats: %T", s.s) } } } func testClientStats(t *testing.T, tc *testConfig, cc *rpcConfig, checkFuncs map[int]*checkFuncWithCount) { h := &statshandler{} te := newTest(t, tc, h, nil) te.startServer(&testServer{}) defer te.tearDown() var ( reqs []*testpb.SimpleRequest resps []*testpb.SimpleResponse method string err error req *testpb.SimpleRequest resp *testpb.SimpleResponse e error ) switch cc.callType { case unaryRPC: method = "/grpc.testing.TestService/UnaryCall" req, resp, e = te.doUnaryCall(cc) reqs = []*testpb.SimpleRequest{req} resps = []*testpb.SimpleResponse{resp} err = e case clientStreamRPC: method = "/grpc.testing.TestService/ClientStreamCall" reqs, resp, e = te.doClientStreamCall(cc) resps = []*testpb.SimpleResponse{resp} err = e case serverStreamRPC: method = "/grpc.testing.TestService/ServerStreamCall" req, resps, e = te.doServerStreamCall(cc) reqs = []*testpb.SimpleRequest{req} err = e case fullDuplexStreamRPC: method = "/grpc.testing.TestService/FullDuplexCall" reqs, resps, err = te.doFullDuplexCallRoundtrip(cc) } if cc.success != (err == nil) { t.Fatalf("cc.success: %v, got error: %v", cc.success, err) } te.cc.Close() te.srv.GracefulStop() // Wait for the server to stop. lenRPCStats := 0 for _, v := range checkFuncs { lenRPCStats += v.c } for { h.mu.Lock() if len(h.gotRPC) >= lenRPCStats { h.mu.Unlock() break } h.mu.Unlock() time.Sleep(10 * time.Millisecond) } for { h.mu.Lock() if _, ok := h.gotConn[len(h.gotConn)-1].s.(*stats.ConnEnd); ok { h.mu.Unlock() break } h.mu.Unlock() time.Sleep(10 * time.Millisecond) } expect := &expectedData{ serverAddr: te.srvAddr, compression: tc.compress, method: method, requests: reqs, responses: resps, failfast: cc.failfast, err: err, } h.mu.Lock() checkConnStats(t, h.gotConn) h.mu.Unlock() checkClientStats(t, h.gotRPC, expect, checkFuncs) } func (s) TestClientStatsUnaryRPC(t *testing.T) { testClientStats(t, &testConfig{compress: ""}, &rpcConfig{success: true, failfast: false, callType: unaryRPC}, map[int]*checkFuncWithCount{ begin: {checkBegin, 1}, outHeader: {checkOutHeader, 1}, outPayload: {checkOutPayload, 1}, inHeader: {checkInHeader, 1}, inPayload: {checkInPayload, 1}, inTrailer: {checkInTrailer, 1}, end: {checkEnd, 1}, }) } func (s) TestClientStatsUnaryRPCError(t *testing.T) { testClientStats(t, &testConfig{compress: ""}, &rpcConfig{success: false, failfast: false, callType: unaryRPC}, map[int]*checkFuncWithCount{ begin: {checkBegin, 1}, outHeader: {checkOutHeader, 1}, outPayload: {checkOutPayload, 1}, inHeader: {checkInHeader, 1}, inTrailer: {checkInTrailer, 1}, end: {checkEnd, 1}, }) } func (s) TestClientStatsClientStreamRPC(t *testing.T) { count := 5 testClientStats(t, &testConfig{compress: "gzip"}, &rpcConfig{count: count, success: true, failfast: false, callType: clientStreamRPC}, map[int]*checkFuncWithCount{ begin: {checkBegin, 1}, outHeader: {checkOutHeader, 1}, inHeader: {checkInHeader, 1}, outPayload: {checkOutPayload, count}, inTrailer: {checkInTrailer, 1}, inPayload: {checkInPayload, 1}, end: {checkEnd, 1}, }) } func (s) TestClientStatsClientStreamRPCError(t *testing.T) { count := 1 testClientStats(t, &testConfig{compress: "gzip"}, &rpcConfig{count: count, success: false, failfast: false, callType: clientStreamRPC}, map[int]*checkFuncWithCount{ begin: {checkBegin, 1}, outHeader: {checkOutHeader, 1}, inHeader: {checkInHeader, 1}, outPayload: {checkOutPayload, 1}, inTrailer: {checkInTrailer, 1}, end: {checkEnd, 1}, }) } func (s) TestClientStatsServerStreamRPC(t *testing.T) { count := 5 testClientStats(t, &testConfig{compress: "gzip"}, &rpcConfig{count: count, success: true, failfast: false, callType: serverStreamRPC}, map[int]*checkFuncWithCount{ begin: {checkBegin, 1}, outHeader: {checkOutHeader, 1}, outPayload: {checkOutPayload, 1}, inHeader: {checkInHeader, 1}, inPayload: {checkInPayload, count}, inTrailer: {checkInTrailer, 1}, end: {checkEnd, 1}, }) } func (s) TestClientStatsServerStreamRPCError(t *testing.T) { count := 5 testClientStats(t, &testConfig{compress: "gzip"}, &rpcConfig{count: count, success: false, failfast: false, callType: serverStreamRPC}, map[int]*checkFuncWithCount{ begin: {checkBegin, 1}, outHeader: {checkOutHeader, 1}, outPayload: {checkOutPayload, 1}, inHeader: {checkInHeader, 1}, inTrailer: {checkInTrailer, 1}, end: {checkEnd, 1}, }) } func (s) TestClientStatsFullDuplexRPC(t *testing.T) { count := 5 testClientStats(t, &testConfig{compress: "gzip"}, &rpcConfig{count: count, success: true, failfast: false, callType: fullDuplexStreamRPC}, map[int]*checkFuncWithCount{ begin: {checkBegin, 1}, outHeader: {checkOutHeader, 1}, outPayload: {checkOutPayload, count}, inHeader: {checkInHeader, 1}, inPayload: {checkInPayload, count}, inTrailer: {checkInTrailer, 1}, end: {checkEnd, 1}, }) } func (s) TestClientStatsFullDuplexRPCError(t *testing.T) { count := 5 testClientStats(t, &testConfig{compress: "gzip"}, &rpcConfig{count: count, success: false, failfast: false, callType: fullDuplexStreamRPC}, map[int]*checkFuncWithCount{ begin: {checkBegin, 1}, outHeader: {checkOutHeader, 1}, outPayload: {checkOutPayload, 1}, inHeader: {checkInHeader, 1}, inTrailer: {checkInTrailer, 1}, end: {checkEnd, 1}, }) } func (s) TestTags(t *testing.T) { b := []byte{5, 2, 4, 3, 1} ctx := stats.SetTags(context.Background(), b) if tg := stats.OutgoingTags(ctx); !reflect.DeepEqual(tg, b) { t.Errorf("OutgoingTags(%v) = %v; want %v", ctx, tg, b) } if tg := stats.Tags(ctx); tg != nil { t.Errorf("Tags(%v) = %v; want nil", ctx, tg) } ctx = stats.SetIncomingTags(context.Background(), b) if tg := stats.Tags(ctx); !reflect.DeepEqual(tg, b) { t.Errorf("Tags(%v) = %v; want %v", ctx, tg, b) } if tg := stats.OutgoingTags(ctx); tg != nil { t.Errorf("OutgoingTags(%v) = %v; want nil", ctx, tg) } } func (s) TestTrace(t *testing.T) { b := []byte{5, 2, 4, 3, 1} ctx := stats.SetTrace(context.Background(), b) if tr := stats.OutgoingTrace(ctx); !reflect.DeepEqual(tr, b) { t.Errorf("OutgoingTrace(%v) = %v; want %v", ctx, tr, b) } if tr := stats.Trace(ctx); tr != nil { t.Errorf("Trace(%v) = %v; want nil", ctx, tr) } ctx = stats.SetIncomingTrace(context.Background(), b) if tr := stats.Trace(ctx); !reflect.DeepEqual(tr, b) { t.Errorf("Trace(%v) = %v; want %v", ctx, tr, b) } if tr := stats.OutgoingTrace(ctx); tr != nil { t.Errorf("OutgoingTrace(%v) = %v; want nil", ctx, tr) } } grpc-go-1.29.1/status/000077500000000000000000000000001365033716300144745ustar00rootroot00000000000000grpc-go-1.29.1/status/status.go000066400000000000000000000076001365033716300163510ustar00rootroot00000000000000/* * * Copyright 2017 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Package status implements errors returned by gRPC. These errors are // serialized and transmitted on the wire between server and client, and allow // for additional data to be transmitted via the Details field in the status // proto. gRPC service handlers should return an error created by this // package, and gRPC clients should expect a corresponding error to be // returned from the RPC call. // // This package upholds the invariants that a non-nil error may not // contain an OK code, and an OK code must result in a nil error. package status import ( "context" "fmt" spb "google.golang.org/genproto/googleapis/rpc/status" "google.golang.org/grpc/codes" "google.golang.org/grpc/internal/status" ) // Status references google.golang.org/grpc/internal/status. It represents an // RPC status code, message, and details. It is immutable and should be // created with New, Newf, or FromProto. // https://godoc.org/google.golang.org/grpc/internal/status type Status = status.Status // New returns a Status representing c and msg. func New(c codes.Code, msg string) *Status { return status.New(c, msg) } // Newf returns New(c, fmt.Sprintf(format, a...)). func Newf(c codes.Code, format string, a ...interface{}) *Status { return New(c, fmt.Sprintf(format, a...)) } // Error returns an error representing c and msg. If c is OK, returns nil. func Error(c codes.Code, msg string) error { return New(c, msg).Err() } // Errorf returns Error(c, fmt.Sprintf(format, a...)). func Errorf(c codes.Code, format string, a ...interface{}) error { return Error(c, fmt.Sprintf(format, a...)) } // ErrorProto returns an error representing s. If s.Code is OK, returns nil. func ErrorProto(s *spb.Status) error { return FromProto(s).Err() } // FromProto returns a Status representing s. func FromProto(s *spb.Status) *Status { return status.FromProto(s) } // FromError returns a Status representing err if it was produced from this // package or has a method `GRPCStatus() *Status`. Otherwise, ok is false and a // Status is returned with codes.Unknown and the original error message. func FromError(err error) (s *Status, ok bool) { if err == nil { return nil, true } if se, ok := err.(interface { GRPCStatus() *Status }); ok { return se.GRPCStatus(), true } return New(codes.Unknown, err.Error()), false } // Convert is a convenience function which removes the need to handle the // boolean return value from FromError. func Convert(err error) *Status { s, _ := FromError(err) return s } // Code returns the Code of the error if it is a Status error, codes.OK if err // is nil, or codes.Unknown otherwise. func Code(err error) codes.Code { // Don't use FromError to avoid allocation of OK status. if err == nil { return codes.OK } if se, ok := err.(interface { GRPCStatus() *Status }); ok { return se.GRPCStatus().Code() } return codes.Unknown } // FromContextError converts a context error into a Status. It returns a // Status with codes.OK if err is nil, or a Status with codes.Unknown if err is // non-nil and not a context error. func FromContextError(err error) *Status { switch err { case nil: return nil case context.DeadlineExceeded: return New(codes.DeadlineExceeded, err.Error()) case context.Canceled: return New(codes.Canceled, err.Error()) default: return New(codes.Unknown, err.Error()) } } grpc-go-1.29.1/status/status_ext_test.go000066400000000000000000000051001365033716300202610ustar00rootroot00000000000000/* * * Copyright 2019 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package status_test import ( "errors" "testing" "github.com/golang/protobuf/proto" "google.golang.org/grpc/codes" "google.golang.org/grpc/internal/grpctest" "google.golang.org/grpc/status" "google.golang.org/grpc/test/grpc_testing" ) type s struct { grpctest.Tester } func Test(t *testing.T) { grpctest.RunSubTests(t, s{}) } func errWithDetails(t *testing.T, s *status.Status, details ...proto.Message) error { t.Helper() res, err := s.WithDetails(details...) if err != nil { t.Fatalf("(%v).WithDetails(%v) = %v, %v; want _, ", s, details, res, err) } return res.Err() } func (s) TestErrorIs(t *testing.T) { // Test errors. testErr := status.Error(codes.Internal, "internal server error") testErrWithDetails := errWithDetails(t, status.New(codes.Internal, "internal server error"), &grpc_testing.Empty{}) // Test cases. testCases := []struct { err1, err2 error want bool }{ {err1: testErr, err2: nil, want: false}, {err1: testErr, err2: status.Error(codes.Internal, "internal server error"), want: true}, {err1: testErr, err2: status.Error(codes.Internal, "internal error"), want: false}, {err1: testErr, err2: status.Error(codes.Unknown, "internal server error"), want: false}, {err1: testErr, err2: errors.New("non-grpc error"), want: false}, {err1: testErrWithDetails, err2: status.Error(codes.Internal, "internal server error"), want: false}, {err1: testErrWithDetails, err2: errWithDetails(t, status.New(codes.Internal, "internal server error"), &grpc_testing.Empty{}), want: true}, {err1: testErrWithDetails, err2: errWithDetails(t, status.New(codes.Internal, "internal server error"), &grpc_testing.Empty{}, &grpc_testing.Empty{}), want: false}, } for _, tc := range testCases { isError, ok := tc.err1.(interface{ Is(target error) bool }) if !ok { t.Errorf("(%v) does not implement is", tc.err1) continue } is := isError.Is(tc.err2) if is != tc.want { t.Errorf("(%v).Is(%v) = %t; want %t", tc.err1, tc.err2, is, tc.want) } } } grpc-go-1.29.1/status/status_test.go000066400000000000000000000231441365033716300174110ustar00rootroot00000000000000/* * * Copyright 2017 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package status import ( "context" "errors" "fmt" "testing" "github.com/golang/protobuf/proto" "github.com/golang/protobuf/ptypes" apb "github.com/golang/protobuf/ptypes/any" dpb "github.com/golang/protobuf/ptypes/duration" "github.com/google/go-cmp/cmp" cpb "google.golang.org/genproto/googleapis/rpc/code" epb "google.golang.org/genproto/googleapis/rpc/errdetails" spb "google.golang.org/genproto/googleapis/rpc/status" "google.golang.org/grpc/codes" "google.golang.org/grpc/internal/grpctest" "google.golang.org/grpc/internal/status" ) type s struct { grpctest.Tester } func Test(t *testing.T) { grpctest.RunSubTests(t, s{}) } // errEqual is essentially a copy of testutils.StatusErrEqual(), to avoid a // cyclic dependency. func errEqual(err1, err2 error) bool { status1, ok := FromError(err1) if !ok { return false } status2, ok := FromError(err2) if !ok { return false } return proto.Equal(status1.Proto(), status2.Proto()) } func (s) TestErrorsWithSameParameters(t *testing.T) { const description = "some description" e1 := Errorf(codes.AlreadyExists, description) e2 := Errorf(codes.AlreadyExists, description) if e1 == e2 || !errEqual(e1, e2) { t.Fatalf("Errors should be equivalent but unique - e1: %v, %v e2: %p, %v", e1.(*status.Error), e1, e2.(*status.Error), e2) } } func (s) TestFromToProto(t *testing.T) { s := &spb.Status{ Code: int32(codes.Internal), Message: "test test test", Details: []*apb.Any{{TypeUrl: "foo", Value: []byte{3, 2, 1}}}, } err := FromProto(s) if got := err.Proto(); !proto.Equal(s, got) { t.Fatalf("Expected errors to be identical - s: %v got: %v", s, got) } } func (s) TestFromNilProto(t *testing.T) { tests := []*Status{nil, FromProto(nil)} for _, s := range tests { if c := s.Code(); c != codes.OK { t.Errorf("s: %v - Expected s.Code() = OK; got %v", s, c) } if m := s.Message(); m != "" { t.Errorf("s: %v - Expected s.Message() = \"\"; got %q", s, m) } if p := s.Proto(); p != nil { t.Errorf("s: %v - Expected s.Proto() = nil; got %q", s, p) } if e := s.Err(); e != nil { t.Errorf("s: %v - Expected s.Err() = nil; got %v", s, e) } } } func (s) TestError(t *testing.T) { err := Error(codes.Internal, "test description") if got, want := err.Error(), "rpc error: code = Internal desc = test description"; got != want { t.Fatalf("err.Error() = %q; want %q", got, want) } s, _ := FromError(err) if got, want := s.Code(), codes.Internal; got != want { t.Fatalf("err.Code() = %s; want %s", got, want) } if got, want := s.Message(), "test description"; got != want { t.Fatalf("err.Message() = %s; want %s", got, want) } } func (s) TestErrorOK(t *testing.T) { err := Error(codes.OK, "foo") if err != nil { t.Fatalf("Error(codes.OK, _) = %p; want nil", err.(*status.Error)) } } func (s) TestErrorProtoOK(t *testing.T) { s := &spb.Status{Code: int32(codes.OK)} if got := ErrorProto(s); got != nil { t.Fatalf("ErrorProto(%v) = %v; want nil", s, got) } } func (s) TestFromError(t *testing.T) { code, message := codes.Internal, "test description" err := Error(code, message) s, ok := FromError(err) if !ok || s.Code() != code || s.Message() != message || s.Err() == nil { t.Fatalf("FromError(%v) = %v, %v; want , true", err, s, ok, code, message) } } func (s) TestFromErrorOK(t *testing.T) { code, message := codes.OK, "" s, ok := FromError(nil) if !ok || s.Code() != code || s.Message() != message || s.Err() != nil { t.Fatalf("FromError(nil) = %v, %v; want , true", s, ok, code, message) } } type customError struct { Code codes.Code Message string Details []*apb.Any } func (c customError) Error() string { return fmt.Sprintf("rpc error: code = %s desc = %s", c.Code, c.Message) } func (c customError) GRPCStatus() *Status { return status.FromProto(&spb.Status{ Code: int32(c.Code), Message: c.Message, Details: c.Details, }) } func (s) TestFromErrorImplementsInterface(t *testing.T) { code, message := codes.Internal, "test description" details := []*apb.Any{{ TypeUrl: "testUrl", Value: []byte("testValue"), }} err := customError{ Code: code, Message: message, Details: details, } s, ok := FromError(err) if !ok || s.Code() != code || s.Message() != message || s.Err() == nil { t.Fatalf("FromError(%v) = %v, %v; want , true", err, s, ok, code, message) } pd := s.Proto().GetDetails() if len(pd) != 1 || !proto.Equal(pd[0], details[0]) { t.Fatalf("s.Proto.GetDetails() = %v; want ", pd, details) } } func (s) TestFromErrorUnknownError(t *testing.T) { code, message := codes.Unknown, "unknown error" err := errors.New("unknown error") s, ok := FromError(err) if ok || s.Code() != code || s.Message() != message { t.Fatalf("FromError(%v) = %v, %v; want , false", err, s, ok, code, message) } } func (s) TestConvertKnownError(t *testing.T) { code, message := codes.Internal, "test description" err := Error(code, message) s := Convert(err) if s.Code() != code || s.Message() != message { t.Fatalf("Convert(%v) = %v; want ", err, s, code, message) } } func (s) TestConvertUnknownError(t *testing.T) { code, message := codes.Unknown, "unknown error" err := errors.New("unknown error") s := Convert(err) if s.Code() != code || s.Message() != message { t.Fatalf("Convert(%v) = %v; want ", err, s, code, message) } } func (s) TestStatus_ErrorDetails(t *testing.T) { tests := []struct { code codes.Code details []proto.Message }{ { code: codes.NotFound, details: nil, }, { code: codes.NotFound, details: []proto.Message{ &epb.ResourceInfo{ ResourceType: "book", ResourceName: "projects/1234/books/5678", Owner: "User", }, }, }, { code: codes.Internal, details: []proto.Message{ &epb.DebugInfo{ StackEntries: []string{ "first stack", "second stack", }, }, }, }, { code: codes.Unavailable, details: []proto.Message{ &epb.RetryInfo{ RetryDelay: &dpb.Duration{Seconds: 60}, }, &epb.ResourceInfo{ ResourceType: "book", ResourceName: "projects/1234/books/5678", Owner: "User", }, }, }, } for _, tc := range tests { s, err := New(tc.code, "").WithDetails(tc.details...) if err != nil { t.Fatalf("(%v).WithDetails(%+v) failed: %v", str(s), tc.details, err) } details := s.Details() for i := range details { if !proto.Equal(details[i].(proto.Message), tc.details[i]) { t.Fatalf("(%v).Details()[%d] = %+v, want %+v", str(s), i, details[i], tc.details[i]) } } } } func (s) TestStatus_WithDetails_Fail(t *testing.T) { tests := []*Status{ nil, FromProto(nil), New(codes.OK, ""), } for _, s := range tests { if s, err := s.WithDetails(); err == nil || s != nil { t.Fatalf("(%v).WithDetails(%+v) = %v, %v; want nil, non-nil", str(s), []proto.Message{}, s, err) } } } func (s) TestStatus_ErrorDetails_Fail(t *testing.T) { tests := []struct { s *Status i []interface{} }{ { nil, nil, }, { FromProto(nil), nil, }, { New(codes.OK, ""), []interface{}{}, }, { FromProto(&spb.Status{ Code: int32(cpb.Code_CANCELLED), Details: []*apb.Any{ { TypeUrl: "", Value: []byte{}, }, mustMarshalAny(&epb.ResourceInfo{ ResourceType: "book", ResourceName: "projects/1234/books/5678", Owner: "User", }), }, }), []interface{}{ errors.New(`message type url "" is invalid`), &epb.ResourceInfo{ ResourceType: "book", ResourceName: "projects/1234/books/5678", Owner: "User", }, }, }, } for _, tc := range tests { got := tc.s.Details() if !cmp.Equal(got, tc.i, cmp.Comparer(proto.Equal), cmp.Comparer(equalError)) { t.Errorf("(%v).Details() = %+v, want %+v", str(tc.s), got, tc.i) } } } func equalError(x, y error) bool { return x == y || (x != nil && y != nil && x.Error() == y.Error()) } func str(s *Status) string { if s == nil { return "nil" } if s.Proto() == nil { return "" } return fmt.Sprintf("", s.Code(), s.Message(), s.Details()) } // mustMarshalAny converts a protobuf message to an any. func mustMarshalAny(msg proto.Message) *apb.Any { any, err := ptypes.MarshalAny(msg) if err != nil { panic(fmt.Sprintf("ptypes.MarshalAny(%+v) failed: %v", msg, err)) } return any } func (s) TestFromContextError(t *testing.T) { testCases := []struct { in error want *Status }{ {in: nil, want: New(codes.OK, "")}, {in: context.DeadlineExceeded, want: New(codes.DeadlineExceeded, context.DeadlineExceeded.Error())}, {in: context.Canceled, want: New(codes.Canceled, context.Canceled.Error())}, {in: errors.New("other"), want: New(codes.Unknown, "other")}, } for _, tc := range testCases { got := FromContextError(tc.in) if got.Code() != tc.want.Code() || got.Message() != tc.want.Message() { t.Errorf("FromContextError(%v) = %v; want %v", tc.in, got, tc.want) } } } grpc-go-1.29.1/stream.go000066400000000000000000001314541365033716300150030ustar00rootroot00000000000000/* * * Copyright 2014 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package grpc import ( "context" "errors" "io" "math" "strconv" "sync" "time" "golang.org/x/net/trace" "google.golang.org/grpc/balancer" "google.golang.org/grpc/codes" "google.golang.org/grpc/encoding" "google.golang.org/grpc/internal/balancerload" "google.golang.org/grpc/internal/binarylog" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" "google.golang.org/grpc/stats" "google.golang.org/grpc/status" ) // StreamHandler defines the handler called by gRPC server to complete the // execution of a streaming RPC. If a StreamHandler returns an error, it // should be produced by the status package, or else gRPC will use // codes.Unknown as the status code and err.Error() as the status message // of the RPC. type StreamHandler func(srv interface{}, stream ServerStream) error // StreamDesc represents a streaming RPC service's method specification. type StreamDesc struct { StreamName string Handler StreamHandler // At least one of these is true. ServerStreams bool ClientStreams bool } // Stream defines the common interface a client or server stream has to satisfy. // // Deprecated: See ClientStream and ServerStream documentation instead. type Stream interface { // Deprecated: See ClientStream and ServerStream documentation instead. Context() context.Context // Deprecated: See ClientStream and ServerStream documentation instead. SendMsg(m interface{}) error // Deprecated: See ClientStream and ServerStream documentation instead. RecvMsg(m interface{}) error } // ClientStream defines the client-side behavior of a streaming RPC. // // All errors returned from ClientStream methods are compatible with the // status package. type ClientStream interface { // Header returns the header metadata received from the server if there // is any. It blocks if the metadata is not ready to read. Header() (metadata.MD, error) // Trailer returns the trailer metadata from the server, if there is any. // It must only be called after stream.CloseAndRecv has returned, or // stream.Recv has returned a non-nil error (including io.EOF). Trailer() metadata.MD // CloseSend closes the send direction of the stream. It closes the stream // when non-nil error is met. It is also not safe to call CloseSend // concurrently with SendMsg. CloseSend() error // Context returns the context for this stream. // // It should not be called until after Header or RecvMsg has returned. Once // called, subsequent client-side retries are disabled. Context() context.Context // SendMsg is generally called by generated code. On error, SendMsg aborts // the stream. If the error was generated by the client, the status is // returned directly; otherwise, io.EOF is returned and the status of // the stream may be discovered using RecvMsg. // // SendMsg blocks until: // - There is sufficient flow control to schedule m with the transport, or // - The stream is done, or // - The stream breaks. // // SendMsg does not wait until the message is received by the server. An // untimely stream closure may result in lost messages. To ensure delivery, // users should ensure the RPC completed successfully using RecvMsg. // // It is safe to have a goroutine calling SendMsg and another goroutine // calling RecvMsg on the same stream at the same time, but it is not safe // to call SendMsg on the same stream in different goroutines. It is also // not safe to call CloseSend concurrently with SendMsg. SendMsg(m interface{}) error // RecvMsg blocks until it receives a message into m or the stream is // done. It returns io.EOF when the stream completes successfully. On // any other error, the stream is aborted and the error contains the RPC // status. // // It is safe to have a goroutine calling SendMsg and another goroutine // calling RecvMsg on the same stream at the same time, but it is not // safe to call RecvMsg on the same stream in different goroutines. RecvMsg(m interface{}) error } // NewStream creates a new Stream for the client side. This is typically // called by generated code. ctx is used for the lifetime of the stream. // // To ensure resources are not leaked due to the stream returned, one of the following // actions must be performed: // // 1. Call Close on the ClientConn. // 2. Cancel the context provided. // 3. Call RecvMsg until a non-nil error is returned. A protobuf-generated // client-streaming RPC, for instance, might use the helper function // CloseAndRecv (note that CloseSend does not Recv, therefore is not // guaranteed to release all resources). // 4. Receive a non-nil, non-io.EOF error from Header or SendMsg. // // If none of the above happen, a goroutine and a context will be leaked, and grpc // will not call the optionally-configured stats handler with a stats.End message. func (cc *ClientConn) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) { // allow interceptor to see all applicable call options, which means those // configured as defaults from dial option as well as per-call options opts = combine(cc.dopts.callOptions, opts) if cc.dopts.streamInt != nil { return cc.dopts.streamInt(ctx, desc, cc, method, newClientStream, opts...) } return newClientStream(ctx, desc, cc, method, opts...) } // NewClientStream is a wrapper for ClientConn.NewStream. func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) { return cc.NewStream(ctx, desc, method, opts...) } func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) { if channelz.IsOn() { cc.incrCallsStarted() defer func() { if err != nil { cc.incrCallsFailed() } }() } c := defaultCallInfo() // Provide an opportunity for the first RPC to see the first service config // provided by the resolver. if err := cc.waitForResolvedAddrs(ctx); err != nil { return nil, err } mc := cc.GetMethodConfig(method) if mc.WaitForReady != nil { c.failFast = !*mc.WaitForReady } // Possible context leak: // The cancel function for the child context we create will only be called // when RecvMsg returns a non-nil error, if the ClientConn is closed, or if // an error is generated by SendMsg. // https://github.com/grpc/grpc-go/issues/1818. var cancel context.CancelFunc if mc.Timeout != nil && *mc.Timeout >= 0 { ctx, cancel = context.WithTimeout(ctx, *mc.Timeout) } else { ctx, cancel = context.WithCancel(ctx) } defer func() { if err != nil { cancel() } }() for _, o := range opts { if err := o.before(c); err != nil { return nil, toRPCErr(err) } } c.maxSendMessageSize = getMaxSize(mc.MaxReqSize, c.maxSendMessageSize, defaultClientMaxSendMessageSize) c.maxReceiveMessageSize = getMaxSize(mc.MaxRespSize, c.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize) if err := setCallInfoCodec(c); err != nil { return nil, err } callHdr := &transport.CallHdr{ Host: cc.authority, Method: method, ContentSubtype: c.contentSubtype, } // Set our outgoing compression according to the UseCompressor CallOption, if // set. In that case, also find the compressor from the encoding package. // Otherwise, use the compressor configured by the WithCompressor DialOption, // if set. var cp Compressor var comp encoding.Compressor if ct := c.compressorType; ct != "" { callHdr.SendCompress = ct if ct != encoding.Identity { comp = encoding.GetCompressor(ct) if comp == nil { return nil, status.Errorf(codes.Internal, "grpc: Compressor is not installed for requested grpc-encoding %q", ct) } } } else if cc.dopts.cp != nil { callHdr.SendCompress = cc.dopts.cp.Type() cp = cc.dopts.cp } if c.creds != nil { callHdr.Creds = c.creds } var trInfo *traceInfo if EnableTracing { trInfo = &traceInfo{ tr: trace.New("grpc.Sent."+methodFamily(method), method), firstLine: firstLine{ client: true, }, } if deadline, ok := ctx.Deadline(); ok { trInfo.firstLine.deadline = time.Until(deadline) } trInfo.tr.LazyLog(&trInfo.firstLine, false) ctx = trace.NewContext(ctx, trInfo.tr) } ctx = newContextWithRPCInfo(ctx, c.failFast, c.codec, cp, comp) sh := cc.dopts.copts.StatsHandler var beginTime time.Time if sh != nil { ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: c.failFast}) beginTime = time.Now() begin := &stats.Begin{ Client: true, BeginTime: beginTime, FailFast: c.failFast, } sh.HandleRPC(ctx, begin) } cs := &clientStream{ callHdr: callHdr, ctx: ctx, methodConfig: &mc, opts: opts, callInfo: c, cc: cc, desc: desc, codec: c.codec, cp: cp, comp: comp, cancel: cancel, beginTime: beginTime, firstAttempt: true, } if !cc.dopts.disableRetry { cs.retryThrottler = cc.retryThrottler.Load().(*retryThrottler) } cs.binlog = binarylog.GetMethodLogger(method) cs.callInfo.stream = cs // Only this initial attempt has stats/tracing. // TODO(dfawley): move to newAttempt when per-attempt stats are implemented. if err := cs.newAttemptLocked(sh, trInfo); err != nil { cs.finish(err) return nil, err } op := func(a *csAttempt) error { return a.newStream() } if err := cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) }); err != nil { cs.finish(err) return nil, err } if cs.binlog != nil { md, _ := metadata.FromOutgoingContext(ctx) logEntry := &binarylog.ClientHeader{ OnClientSide: true, Header: md, MethodName: method, Authority: cs.cc.authority, } if deadline, ok := ctx.Deadline(); ok { logEntry.Timeout = time.Until(deadline) if logEntry.Timeout < 0 { logEntry.Timeout = 0 } } cs.binlog.Log(logEntry) } if desc != unaryStreamDesc { // Listen on cc and stream contexts to cleanup when the user closes the // ClientConn or cancels the stream context. In all other cases, an error // should already be injected into the recv buffer by the transport, which // the client will eventually receive, and then we will cancel the stream's // context in clientStream.finish. go func() { select { case <-cc.ctx.Done(): cs.finish(ErrClientConnClosing) case <-ctx.Done(): cs.finish(toRPCErr(ctx.Err())) } }() } return cs, nil } // newAttemptLocked creates a new attempt with a transport. // If it succeeds, then it replaces clientStream's attempt with this new attempt. func (cs *clientStream) newAttemptLocked(sh stats.Handler, trInfo *traceInfo) (retErr error) { newAttempt := &csAttempt{ cs: cs, dc: cs.cc.dopts.dc, statsHandler: sh, trInfo: trInfo, } defer func() { if retErr != nil { // This attempt is not set in the clientStream, so it's finish won't // be called. Call it here for stats and trace in case they are not // nil. newAttempt.finish(retErr) } }() if err := cs.ctx.Err(); err != nil { return toRPCErr(err) } t, done, err := cs.cc.getTransport(cs.ctx, cs.callInfo.failFast, cs.callHdr.Method) if err != nil { return err } if trInfo != nil { trInfo.firstLine.SetRemoteAddr(t.RemoteAddr()) } newAttempt.t = t newAttempt.done = done cs.attempt = newAttempt return nil } func (a *csAttempt) newStream() error { cs := a.cs cs.callHdr.PreviousAttempts = cs.numRetries s, err := a.t.NewStream(cs.ctx, cs.callHdr) if err != nil { return toRPCErr(err) } cs.attempt.s = s cs.attempt.p = &parser{r: s} return nil } // clientStream implements a client side Stream. type clientStream struct { callHdr *transport.CallHdr opts []CallOption callInfo *callInfo cc *ClientConn desc *StreamDesc codec baseCodec cp Compressor comp encoding.Compressor cancel context.CancelFunc // cancels all attempts sentLast bool // sent an end stream beginTime time.Time methodConfig *MethodConfig ctx context.Context // the application's context, wrapped by stats/tracing retryThrottler *retryThrottler // The throttler active when the RPC began. binlog *binarylog.MethodLogger // Binary logger, can be nil. // serverHeaderBinlogged is a boolean for whether server header has been // logged. Server header will be logged when the first time one of those // happens: stream.Header(), stream.Recv(). // // It's only read and used by Recv() and Header(), so it doesn't need to be // synchronized. serverHeaderBinlogged bool mu sync.Mutex firstAttempt bool // if true, transparent retry is valid numRetries int // exclusive of transparent retry attempt(s) numRetriesSincePushback int // retries since pushback; to reset backoff finished bool // TODO: replace with atomic cmpxchg or sync.Once? // attempt is the active client stream attempt. // The only place where it is written is the newAttemptLocked method and this method never writes nil. // So, attempt can be nil only inside newClientStream function when clientStream is first created. // One of the first things done after clientStream's creation, is to call newAttemptLocked which either // assigns a non nil value to the attempt or returns an error. If an error is returned from newAttemptLocked, // then newClientStream calls finish on the clientStream and returns. So, finish method is the only // place where we need to check if the attempt is nil. attempt *csAttempt // TODO(hedging): hedging will have multiple attempts simultaneously. committed bool // active attempt committed for retry? buffer []func(a *csAttempt) error // operations to replay on retry bufferSize int // current size of buffer } // csAttempt implements a single transport stream attempt within a // clientStream. type csAttempt struct { cs *clientStream t transport.ClientTransport s *transport.Stream p *parser done func(balancer.DoneInfo) finished bool dc Decompressor decomp encoding.Compressor decompSet bool mu sync.Mutex // guards trInfo.tr // trInfo may be nil (if EnableTracing is false). // trInfo.tr is set when created (if EnableTracing is true), // and cleared when the finish method is called. trInfo *traceInfo statsHandler stats.Handler } func (cs *clientStream) commitAttemptLocked() { cs.committed = true cs.buffer = nil } func (cs *clientStream) commitAttempt() { cs.mu.Lock() cs.commitAttemptLocked() cs.mu.Unlock() } // shouldRetry returns nil if the RPC should be retried; otherwise it returns // the error that should be returned by the operation. func (cs *clientStream) shouldRetry(err error) error { if cs.attempt.s == nil && !cs.callInfo.failFast { // In the event of any error from NewStream (attempt.s == nil), we // never attempted to write anything to the wire, so we can retry // indefinitely for non-fail-fast RPCs. return nil } if cs.finished || cs.committed { // RPC is finished or committed; cannot retry. return err } // Wait for the trailers. if cs.attempt.s != nil { <-cs.attempt.s.Done() } if cs.firstAttempt && (cs.attempt.s == nil || cs.attempt.s.Unprocessed()) { // First attempt, stream unprocessed: transparently retry. cs.firstAttempt = false return nil } cs.firstAttempt = false if cs.cc.dopts.disableRetry { return err } pushback := 0 hasPushback := false if cs.attempt.s != nil { if !cs.attempt.s.TrailersOnly() { return err } // TODO(retry): Move down if the spec changes to not check server pushback // before considering this a failure for throttling. sps := cs.attempt.s.Trailer()["grpc-retry-pushback-ms"] if len(sps) == 1 { var e error if pushback, e = strconv.Atoi(sps[0]); e != nil || pushback < 0 { channelz.Infof(cs.cc.channelzID, "Server retry pushback specified to abort (%q).", sps[0]) cs.retryThrottler.throttle() // This counts as a failure for throttling. return err } hasPushback = true } else if len(sps) > 1 { channelz.Warningf(cs.cc.channelzID, "Server retry pushback specified multiple values (%q); not retrying.", sps) cs.retryThrottler.throttle() // This counts as a failure for throttling. return err } } var code codes.Code if cs.attempt.s != nil { code = cs.attempt.s.Status().Code() } else { code = status.Convert(err).Code() } rp := cs.methodConfig.retryPolicy if rp == nil || !rp.retryableStatusCodes[code] { return err } // Note: the ordering here is important; we count this as a failure // only if the code matched a retryable code. if cs.retryThrottler.throttle() { return err } if cs.numRetries+1 >= rp.maxAttempts { return err } var dur time.Duration if hasPushback { dur = time.Millisecond * time.Duration(pushback) cs.numRetriesSincePushback = 0 } else { fact := math.Pow(rp.backoffMultiplier, float64(cs.numRetriesSincePushback)) cur := float64(rp.initialBackoff) * fact if max := float64(rp.maxBackoff); cur > max { cur = max } dur = time.Duration(grpcrand.Int63n(int64(cur))) cs.numRetriesSincePushback++ } // TODO(dfawley): we could eagerly fail here if dur puts us past the // deadline, but unsure if it is worth doing. t := time.NewTimer(dur) select { case <-t.C: cs.numRetries++ return nil case <-cs.ctx.Done(): t.Stop() return status.FromContextError(cs.ctx.Err()).Err() } } // Returns nil if a retry was performed and succeeded; error otherwise. func (cs *clientStream) retryLocked(lastErr error) error { for { cs.attempt.finish(lastErr) if err := cs.shouldRetry(lastErr); err != nil { cs.commitAttemptLocked() return err } if err := cs.newAttemptLocked(nil, nil); err != nil { return err } if lastErr = cs.replayBufferLocked(); lastErr == nil { return nil } } } func (cs *clientStream) Context() context.Context { cs.commitAttempt() // No need to lock before using attempt, since we know it is committed and // cannot change. return cs.attempt.s.Context() } func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) error { cs.mu.Lock() for { if cs.committed { cs.mu.Unlock() return op(cs.attempt) } a := cs.attempt cs.mu.Unlock() err := op(a) cs.mu.Lock() if a != cs.attempt { // We started another attempt already. continue } if err == io.EOF { <-a.s.Done() } if err == nil || (err == io.EOF && a.s.Status().Code() == codes.OK) { onSuccess() cs.mu.Unlock() return err } if err := cs.retryLocked(err); err != nil { cs.mu.Unlock() return err } } } func (cs *clientStream) Header() (metadata.MD, error) { var m metadata.MD err := cs.withRetry(func(a *csAttempt) error { var err error m, err = a.s.Header() return toRPCErr(err) }, cs.commitAttemptLocked) if err != nil { cs.finish(err) return nil, err } if cs.binlog != nil && !cs.serverHeaderBinlogged { // Only log if binary log is on and header has not been logged. logEntry := &binarylog.ServerHeader{ OnClientSide: true, Header: m, PeerAddr: nil, } if peer, ok := peer.FromContext(cs.Context()); ok { logEntry.PeerAddr = peer.Addr } cs.binlog.Log(logEntry) cs.serverHeaderBinlogged = true } return m, err } func (cs *clientStream) Trailer() metadata.MD { // On RPC failure, we never need to retry, because usage requires that // RecvMsg() returned a non-nil error before calling this function is valid. // We would have retried earlier if necessary. // // Commit the attempt anyway, just in case users are not following those // directions -- it will prevent races and should not meaningfully impact // performance. cs.commitAttempt() if cs.attempt.s == nil { return nil } return cs.attempt.s.Trailer() } func (cs *clientStream) replayBufferLocked() error { a := cs.attempt for _, f := range cs.buffer { if err := f(a); err != nil { return err } } return nil } func (cs *clientStream) bufferForRetryLocked(sz int, op func(a *csAttempt) error) { // Note: we still will buffer if retry is disabled (for transparent retries). if cs.committed { return } cs.bufferSize += sz if cs.bufferSize > cs.callInfo.maxRetryRPCBufferSize { cs.commitAttemptLocked() return } cs.buffer = append(cs.buffer, op) } func (cs *clientStream) SendMsg(m interface{}) (err error) { defer func() { if err != nil && err != io.EOF { // Call finish on the client stream for errors generated by this SendMsg // call, as these indicate problems created by this client. (Transport // errors are converted to an io.EOF error in csAttempt.sendMsg; the real // error will be returned from RecvMsg eventually in that case, or be // retried.) cs.finish(err) } }() if cs.sentLast { return status.Errorf(codes.Internal, "SendMsg called after CloseSend") } if !cs.desc.ClientStreams { cs.sentLast = true } // load hdr, payload, data hdr, payload, data, err := prepareMsg(m, cs.codec, cs.cp, cs.comp) if err != nil { return err } // TODO(dfawley): should we be checking len(data) instead? if len(payload) > *cs.callInfo.maxSendMessageSize { return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), *cs.callInfo.maxSendMessageSize) } msgBytes := data // Store the pointer before setting to nil. For binary logging. op := func(a *csAttempt) error { err := a.sendMsg(m, hdr, payload, data) // nil out the message and uncomp when replaying; they are only needed for // stats which is disabled for subsequent attempts. m, data = nil, nil return err } err = cs.withRetry(op, func() { cs.bufferForRetryLocked(len(hdr)+len(payload), op) }) if cs.binlog != nil && err == nil { cs.binlog.Log(&binarylog.ClientMessage{ OnClientSide: true, Message: msgBytes, }) } return } func (cs *clientStream) RecvMsg(m interface{}) error { if cs.binlog != nil && !cs.serverHeaderBinlogged { // Call Header() to binary log header if it's not already logged. cs.Header() } var recvInfo *payloadInfo if cs.binlog != nil { recvInfo = &payloadInfo{} } err := cs.withRetry(func(a *csAttempt) error { return a.recvMsg(m, recvInfo) }, cs.commitAttemptLocked) if cs.binlog != nil && err == nil { cs.binlog.Log(&binarylog.ServerMessage{ OnClientSide: true, Message: recvInfo.uncompressedBytes, }) } if err != nil || !cs.desc.ServerStreams { // err != nil or non-server-streaming indicates end of stream. cs.finish(err) if cs.binlog != nil { // finish will not log Trailer. Log Trailer here. logEntry := &binarylog.ServerTrailer{ OnClientSide: true, Trailer: cs.Trailer(), Err: err, } if logEntry.Err == io.EOF { logEntry.Err = nil } if peer, ok := peer.FromContext(cs.Context()); ok { logEntry.PeerAddr = peer.Addr } cs.binlog.Log(logEntry) } } return err } func (cs *clientStream) CloseSend() error { if cs.sentLast { // TODO: return an error and finish the stream instead, due to API misuse? return nil } cs.sentLast = true op := func(a *csAttempt) error { a.t.Write(a.s, nil, nil, &transport.Options{Last: true}) // Always return nil; io.EOF is the only error that might make sense // instead, but there is no need to signal the client to call RecvMsg // as the only use left for the stream after CloseSend is to call // RecvMsg. This also matches historical behavior. return nil } cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) }) if cs.binlog != nil { cs.binlog.Log(&binarylog.ClientHalfClose{ OnClientSide: true, }) } // We never returned an error here for reasons. return nil } func (cs *clientStream) finish(err error) { if err == io.EOF { // Ending a stream with EOF indicates a success. err = nil } cs.mu.Lock() if cs.finished { cs.mu.Unlock() return } cs.finished = true cs.commitAttemptLocked() cs.mu.Unlock() // For binary logging. only log cancel in finish (could be caused by RPC ctx // canceled or ClientConn closed). Trailer will be logged in RecvMsg. // // Only one of cancel or trailer needs to be logged. In the cases where // users don't call RecvMsg, users must have already canceled the RPC. if cs.binlog != nil && status.Code(err) == codes.Canceled { cs.binlog.Log(&binarylog.Cancel{ OnClientSide: true, }) } if err == nil { cs.retryThrottler.successfulRPC() } if channelz.IsOn() { if err != nil { cs.cc.incrCallsFailed() } else { cs.cc.incrCallsSucceeded() } } if cs.attempt != nil { cs.attempt.finish(err) // after functions all rely upon having a stream. if cs.attempt.s != nil { for _, o := range cs.opts { o.after(cs.callInfo) } } } cs.cancel() } func (a *csAttempt) sendMsg(m interface{}, hdr, payld, data []byte) error { cs := a.cs if a.trInfo != nil { a.mu.Lock() if a.trInfo.tr != nil { a.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true) } a.mu.Unlock() } if err := a.t.Write(a.s, hdr, payld, &transport.Options{Last: !cs.desc.ClientStreams}); err != nil { if !cs.desc.ClientStreams { // For non-client-streaming RPCs, we return nil instead of EOF on error // because the generated code requires it. finish is not called; RecvMsg() // will call it with the stream's status independently. return nil } return io.EOF } if a.statsHandler != nil { a.statsHandler.HandleRPC(cs.ctx, outPayload(true, m, data, payld, time.Now())) } if channelz.IsOn() { a.t.IncrMsgSent() } return nil } func (a *csAttempt) recvMsg(m interface{}, payInfo *payloadInfo) (err error) { cs := a.cs if a.statsHandler != nil && payInfo == nil { payInfo = &payloadInfo{} } if !a.decompSet { // Block until we receive headers containing received message encoding. if ct := a.s.RecvCompress(); ct != "" && ct != encoding.Identity { if a.dc == nil || a.dc.Type() != ct { // No configured decompressor, or it does not match the incoming // message encoding; attempt to find a registered compressor that does. a.dc = nil a.decomp = encoding.GetCompressor(ct) } } else { // No compression is used; disable our decompressor. a.dc = nil } // Only initialize this state once per stream. a.decompSet = true } err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, payInfo, a.decomp) if err != nil { if err == io.EOF { if statusErr := a.s.Status().Err(); statusErr != nil { return statusErr } return io.EOF // indicates successful end of stream. } return toRPCErr(err) } if a.trInfo != nil { a.mu.Lock() if a.trInfo.tr != nil { a.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true) } a.mu.Unlock() } if a.statsHandler != nil { a.statsHandler.HandleRPC(cs.ctx, &stats.InPayload{ Client: true, RecvTime: time.Now(), Payload: m, // TODO truncate large payload. Data: payInfo.uncompressedBytes, WireLength: payInfo.wireLength, Length: len(payInfo.uncompressedBytes), }) } if channelz.IsOn() { a.t.IncrMsgRecv() } if cs.desc.ServerStreams { // Subsequent messages should be received by subsequent RecvMsg calls. return nil } // Special handling for non-server-stream rpcs. // This recv expects EOF or errors, so we don't collect inPayload. err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, nil, a.decomp) if err == nil { return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want ")) } if err == io.EOF { return a.s.Status().Err() // non-server streaming Recv returns nil on success } return toRPCErr(err) } func (a *csAttempt) finish(err error) { a.mu.Lock() if a.finished { a.mu.Unlock() return } a.finished = true if err == io.EOF { // Ending a stream with EOF indicates a success. err = nil } var tr metadata.MD if a.s != nil { a.t.CloseStream(a.s, err) tr = a.s.Trailer() } if a.done != nil { br := false if a.s != nil { br = a.s.BytesReceived() } a.done(balancer.DoneInfo{ Err: err, Trailer: tr, BytesSent: a.s != nil, BytesReceived: br, ServerLoad: balancerload.Parse(tr), }) } if a.statsHandler != nil { end := &stats.End{ Client: true, BeginTime: a.cs.beginTime, EndTime: time.Now(), Trailer: tr, Error: err, } a.statsHandler.HandleRPC(a.cs.ctx, end) } if a.trInfo != nil && a.trInfo.tr != nil { if err == nil { a.trInfo.tr.LazyPrintf("RPC: [OK]") } else { a.trInfo.tr.LazyPrintf("RPC: [%v]", err) a.trInfo.tr.SetError() } a.trInfo.tr.Finish() a.trInfo.tr = nil } a.mu.Unlock() } // newClientStream creates a ClientStream with the specified transport, on the // given addrConn. // // It's expected that the given transport is either the same one in addrConn, or // is already closed. To avoid race, transport is specified separately, instead // of using ac.transpot. // // Main difference between this and ClientConn.NewStream: // - no retry // - no service config (or wait for service config) // - no tracing or stats func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method string, t transport.ClientTransport, ac *addrConn, opts ...CallOption) (_ ClientStream, err error) { if t == nil { // TODO: return RPC error here? return nil, errors.New("transport provided is nil") } // defaultCallInfo contains unnecessary info(i.e. failfast, maxRetryRPCBufferSize), so we just initialize an empty struct. c := &callInfo{} // Possible context leak: // The cancel function for the child context we create will only be called // when RecvMsg returns a non-nil error, if the ClientConn is closed, or if // an error is generated by SendMsg. // https://github.com/grpc/grpc-go/issues/1818. ctx, cancel := context.WithCancel(ctx) defer func() { if err != nil { cancel() } }() for _, o := range opts { if err := o.before(c); err != nil { return nil, toRPCErr(err) } } c.maxReceiveMessageSize = getMaxSize(nil, c.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize) c.maxSendMessageSize = getMaxSize(nil, c.maxSendMessageSize, defaultServerMaxSendMessageSize) if err := setCallInfoCodec(c); err != nil { return nil, err } callHdr := &transport.CallHdr{ Host: ac.cc.authority, Method: method, ContentSubtype: c.contentSubtype, } // Set our outgoing compression according to the UseCompressor CallOption, if // set. In that case, also find the compressor from the encoding package. // Otherwise, use the compressor configured by the WithCompressor DialOption, // if set. var cp Compressor var comp encoding.Compressor if ct := c.compressorType; ct != "" { callHdr.SendCompress = ct if ct != encoding.Identity { comp = encoding.GetCompressor(ct) if comp == nil { return nil, status.Errorf(codes.Internal, "grpc: Compressor is not installed for requested grpc-encoding %q", ct) } } } else if ac.cc.dopts.cp != nil { callHdr.SendCompress = ac.cc.dopts.cp.Type() cp = ac.cc.dopts.cp } if c.creds != nil { callHdr.Creds = c.creds } // Use a special addrConnStream to avoid retry. as := &addrConnStream{ callHdr: callHdr, ac: ac, ctx: ctx, cancel: cancel, opts: opts, callInfo: c, desc: desc, codec: c.codec, cp: cp, comp: comp, t: t, } as.callInfo.stream = as s, err := as.t.NewStream(as.ctx, as.callHdr) if err != nil { err = toRPCErr(err) return nil, err } as.s = s as.p = &parser{r: s} ac.incrCallsStarted() if desc != unaryStreamDesc { // Listen on cc and stream contexts to cleanup when the user closes the // ClientConn or cancels the stream context. In all other cases, an error // should already be injected into the recv buffer by the transport, which // the client will eventually receive, and then we will cancel the stream's // context in clientStream.finish. go func() { select { case <-ac.ctx.Done(): as.finish(status.Error(codes.Canceled, "grpc: the SubConn is closing")) case <-ctx.Done(): as.finish(toRPCErr(ctx.Err())) } }() } return as, nil } type addrConnStream struct { s *transport.Stream ac *addrConn callHdr *transport.CallHdr cancel context.CancelFunc opts []CallOption callInfo *callInfo t transport.ClientTransport ctx context.Context sentLast bool desc *StreamDesc codec baseCodec cp Compressor comp encoding.Compressor decompSet bool dc Decompressor decomp encoding.Compressor p *parser mu sync.Mutex finished bool } func (as *addrConnStream) Header() (metadata.MD, error) { m, err := as.s.Header() if err != nil { as.finish(toRPCErr(err)) } return m, err } func (as *addrConnStream) Trailer() metadata.MD { return as.s.Trailer() } func (as *addrConnStream) CloseSend() error { if as.sentLast { // TODO: return an error and finish the stream instead, due to API misuse? return nil } as.sentLast = true as.t.Write(as.s, nil, nil, &transport.Options{Last: true}) // Always return nil; io.EOF is the only error that might make sense // instead, but there is no need to signal the client to call RecvMsg // as the only use left for the stream after CloseSend is to call // RecvMsg. This also matches historical behavior. return nil } func (as *addrConnStream) Context() context.Context { return as.s.Context() } func (as *addrConnStream) SendMsg(m interface{}) (err error) { defer func() { if err != nil && err != io.EOF { // Call finish on the client stream for errors generated by this SendMsg // call, as these indicate problems created by this client. (Transport // errors are converted to an io.EOF error in csAttempt.sendMsg; the real // error will be returned from RecvMsg eventually in that case, or be // retried.) as.finish(err) } }() if as.sentLast { return status.Errorf(codes.Internal, "SendMsg called after CloseSend") } if !as.desc.ClientStreams { as.sentLast = true } // load hdr, payload, data hdr, payld, _, err := prepareMsg(m, as.codec, as.cp, as.comp) if err != nil { return err } // TODO(dfawley): should we be checking len(data) instead? if len(payld) > *as.callInfo.maxSendMessageSize { return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payld), *as.callInfo.maxSendMessageSize) } if err := as.t.Write(as.s, hdr, payld, &transport.Options{Last: !as.desc.ClientStreams}); err != nil { if !as.desc.ClientStreams { // For non-client-streaming RPCs, we return nil instead of EOF on error // because the generated code requires it. finish is not called; RecvMsg() // will call it with the stream's status independently. return nil } return io.EOF } if channelz.IsOn() { as.t.IncrMsgSent() } return nil } func (as *addrConnStream) RecvMsg(m interface{}) (err error) { defer func() { if err != nil || !as.desc.ServerStreams { // err != nil or non-server-streaming indicates end of stream. as.finish(err) } }() if !as.decompSet { // Block until we receive headers containing received message encoding. if ct := as.s.RecvCompress(); ct != "" && ct != encoding.Identity { if as.dc == nil || as.dc.Type() != ct { // No configured decompressor, or it does not match the incoming // message encoding; attempt to find a registered compressor that does. as.dc = nil as.decomp = encoding.GetCompressor(ct) } } else { // No compression is used; disable our decompressor. as.dc = nil } // Only initialize this state once per stream. as.decompSet = true } err = recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp) if err != nil { if err == io.EOF { if statusErr := as.s.Status().Err(); statusErr != nil { return statusErr } return io.EOF // indicates successful end of stream. } return toRPCErr(err) } if channelz.IsOn() { as.t.IncrMsgRecv() } if as.desc.ServerStreams { // Subsequent messages should be received by subsequent RecvMsg calls. return nil } // Special handling for non-server-stream rpcs. // This recv expects EOF or errors, so we don't collect inPayload. err = recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp) if err == nil { return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want ")) } if err == io.EOF { return as.s.Status().Err() // non-server streaming Recv returns nil on success } return toRPCErr(err) } func (as *addrConnStream) finish(err error) { as.mu.Lock() if as.finished { as.mu.Unlock() return } as.finished = true if err == io.EOF { // Ending a stream with EOF indicates a success. err = nil } if as.s != nil { as.t.CloseStream(as.s, err) } if err != nil { as.ac.incrCallsFailed() } else { as.ac.incrCallsSucceeded() } as.cancel() as.mu.Unlock() } // ServerStream defines the server-side behavior of a streaming RPC. // // All errors returned from ServerStream methods are compatible with the // status package. type ServerStream interface { // SetHeader sets the header metadata. It may be called multiple times. // When call multiple times, all the provided metadata will be merged. // All the metadata will be sent out when one of the following happens: // - ServerStream.SendHeader() is called; // - The first response is sent out; // - An RPC status is sent out (error or success). SetHeader(metadata.MD) error // SendHeader sends the header metadata. // The provided md and headers set by SetHeader() will be sent. // It fails if called multiple times. SendHeader(metadata.MD) error // SetTrailer sets the trailer metadata which will be sent with the RPC status. // When called more than once, all the provided metadata will be merged. SetTrailer(metadata.MD) // Context returns the context for this stream. Context() context.Context // SendMsg sends a message. On error, SendMsg aborts the stream and the // error is returned directly. // // SendMsg blocks until: // - There is sufficient flow control to schedule m with the transport, or // - The stream is done, or // - The stream breaks. // // SendMsg does not wait until the message is received by the client. An // untimely stream closure may result in lost messages. // // It is safe to have a goroutine calling SendMsg and another goroutine // calling RecvMsg on the same stream at the same time, but it is not safe // to call SendMsg on the same stream in different goroutines. SendMsg(m interface{}) error // RecvMsg blocks until it receives a message into m or the stream is // done. It returns io.EOF when the client has performed a CloseSend. On // any non-EOF error, the stream is aborted and the error contains the // RPC status. // // It is safe to have a goroutine calling SendMsg and another goroutine // calling RecvMsg on the same stream at the same time, but it is not // safe to call RecvMsg on the same stream in different goroutines. RecvMsg(m interface{}) error } // serverStream implements a server side Stream. type serverStream struct { ctx context.Context t transport.ServerTransport s *transport.Stream p *parser codec baseCodec cp Compressor dc Decompressor comp encoding.Compressor decomp encoding.Compressor maxReceiveMessageSize int maxSendMessageSize int trInfo *traceInfo statsHandler stats.Handler binlog *binarylog.MethodLogger // serverHeaderBinlogged indicates whether server header has been logged. It // will happen when one of the following two happens: stream.SendHeader(), // stream.Send(). // // It's only checked in send and sendHeader, doesn't need to be // synchronized. serverHeaderBinlogged bool mu sync.Mutex // protects trInfo.tr after the service handler runs. } func (ss *serverStream) Context() context.Context { return ss.ctx } func (ss *serverStream) SetHeader(md metadata.MD) error { if md.Len() == 0 { return nil } return ss.s.SetHeader(md) } func (ss *serverStream) SendHeader(md metadata.MD) error { err := ss.t.WriteHeader(ss.s, md) if ss.binlog != nil && !ss.serverHeaderBinlogged { h, _ := ss.s.Header() ss.binlog.Log(&binarylog.ServerHeader{ Header: h, }) ss.serverHeaderBinlogged = true } return err } func (ss *serverStream) SetTrailer(md metadata.MD) { if md.Len() == 0 { return } ss.s.SetTrailer(md) } func (ss *serverStream) SendMsg(m interface{}) (err error) { defer func() { if ss.trInfo != nil { ss.mu.Lock() if ss.trInfo.tr != nil { if err == nil { ss.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true) } else { ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) ss.trInfo.tr.SetError() } } ss.mu.Unlock() } if err != nil && err != io.EOF { st, _ := status.FromError(toRPCErr(err)) ss.t.WriteStatus(ss.s, st) // Non-user specified status was sent out. This should be an error // case (as a server side Cancel maybe). // // This is not handled specifically now. User will return a final // status from the service handler, we will log that error instead. // This behavior is similar to an interceptor. } if channelz.IsOn() && err == nil { ss.t.IncrMsgSent() } }() // load hdr, payload, data hdr, payload, data, err := prepareMsg(m, ss.codec, ss.cp, ss.comp) if err != nil { return err } // TODO(dfawley): should we be checking len(data) instead? if len(payload) > ss.maxSendMessageSize { return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), ss.maxSendMessageSize) } if err := ss.t.Write(ss.s, hdr, payload, &transport.Options{Last: false}); err != nil { return toRPCErr(err) } if ss.binlog != nil { if !ss.serverHeaderBinlogged { h, _ := ss.s.Header() ss.binlog.Log(&binarylog.ServerHeader{ Header: h, }) ss.serverHeaderBinlogged = true } ss.binlog.Log(&binarylog.ServerMessage{ Message: data, }) } if ss.statsHandler != nil { ss.statsHandler.HandleRPC(ss.s.Context(), outPayload(false, m, data, payload, time.Now())) } return nil } func (ss *serverStream) RecvMsg(m interface{}) (err error) { defer func() { if ss.trInfo != nil { ss.mu.Lock() if ss.trInfo.tr != nil { if err == nil { ss.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true) } else if err != io.EOF { ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) ss.trInfo.tr.SetError() } } ss.mu.Unlock() } if err != nil && err != io.EOF { st, _ := status.FromError(toRPCErr(err)) ss.t.WriteStatus(ss.s, st) // Non-user specified status was sent out. This should be an error // case (as a server side Cancel maybe). // // This is not handled specifically now. User will return a final // status from the service handler, we will log that error instead. // This behavior is similar to an interceptor. } if channelz.IsOn() && err == nil { ss.t.IncrMsgRecv() } }() var payInfo *payloadInfo if ss.statsHandler != nil || ss.binlog != nil { payInfo = &payloadInfo{} } if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, payInfo, ss.decomp); err != nil { if err == io.EOF { if ss.binlog != nil { ss.binlog.Log(&binarylog.ClientHalfClose{}) } return err } if err == io.ErrUnexpectedEOF { err = status.Errorf(codes.Internal, io.ErrUnexpectedEOF.Error()) } return toRPCErr(err) } if ss.statsHandler != nil { ss.statsHandler.HandleRPC(ss.s.Context(), &stats.InPayload{ RecvTime: time.Now(), Payload: m, // TODO truncate large payload. Data: payInfo.uncompressedBytes, WireLength: payInfo.wireLength, Length: len(payInfo.uncompressedBytes), }) } if ss.binlog != nil { ss.binlog.Log(&binarylog.ClientMessage{ Message: payInfo.uncompressedBytes, }) } return nil } // MethodFromServerStream returns the method string for the input stream. // The returned string is in the format of "/service/method". func MethodFromServerStream(stream ServerStream) (string, bool) { return Method(stream.Context()) } // prepareMsg returns the hdr, payload and data // using the compressors passed or using the // passed preparedmsg func prepareMsg(m interface{}, codec baseCodec, cp Compressor, comp encoding.Compressor) (hdr, payload, data []byte, err error) { if preparedMsg, ok := m.(*PreparedMsg); ok { return preparedMsg.hdr, preparedMsg.payload, preparedMsg.encodedData, nil } // The input interface is not a prepared msg. // Marshal and Compress the data at this point data, err = encode(codec, m) if err != nil { return nil, nil, nil, err } compData, err := compress(data, cp, comp) if err != nil { return nil, nil, nil, err } hdr, payload = msgHeader(data, compData) return hdr, payload, data, nil } grpc-go-1.29.1/stress/000077500000000000000000000000001365033716300144745ustar00rootroot00000000000000grpc-go-1.29.1/stress/client/000077500000000000000000000000001365033716300157525ustar00rootroot00000000000000grpc-go-1.29.1/stress/client/main.go000066400000000000000000000247341365033716300172370ustar00rootroot00000000000000/* * * Copyright 2016 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ //go:generate protoc -I ../grpc_testing --go_out=plugins=grpc:../grpc_testing ../grpc_testing/metrics.proto // client starts an interop client to do stress test and a metrics server to report qps. package main import ( "context" "flag" "fmt" "math/rand" "net" "strconv" "strings" "sync" "time" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/interop" testpb "google.golang.org/grpc/interop/grpc_testing" "google.golang.org/grpc/status" metricspb "google.golang.org/grpc/stress/grpc_testing" "google.golang.org/grpc/testdata" ) var ( serverAddresses = flag.String("server_addresses", "localhost:8080", "a list of server addresses") testCases = flag.String("test_cases", "", "a list of test cases along with the relative weights") testDurationSecs = flag.Int("test_duration_secs", -1, "test duration in seconds") numChannelsPerServer = flag.Int("num_channels_per_server", 1, "Number of channels (i.e connections) to each server") numStubsPerChannel = flag.Int("num_stubs_per_channel", 1, "Number of client stubs per each connection to server") metricsPort = flag.Int("metrics_port", 8081, "The port at which the stress client exposes QPS metrics") useTLS = flag.Bool("use_tls", false, "Connection uses TLS if true, else plain TCP") testCA = flag.Bool("use_test_ca", false, "Whether to replace platform root CAs with test CA as the CA root") tlsServerName = flag.String("server_host_override", "foo.test.google.fr", "The server name use to verify the hostname returned by TLS handshake if it is not empty. Otherwise, --server_host is used.") caFile = flag.String("ca_file", "", "The file containing the CA root cert file") ) // testCaseWithWeight contains the test case type and its weight. type testCaseWithWeight struct { name string weight int } // parseTestCases converts test case string to a list of struct testCaseWithWeight. func parseTestCases(testCaseString string) []testCaseWithWeight { testCaseStrings := strings.Split(testCaseString, ",") testCases := make([]testCaseWithWeight, len(testCaseStrings)) for i, str := range testCaseStrings { testCase := strings.Split(str, ":") if len(testCase) != 2 { panic(fmt.Sprintf("invalid test case with weight: %s", str)) } // Check if test case is supported. switch testCase[0] { case "empty_unary", "large_unary", "client_streaming", "server_streaming", "ping_pong", "empty_stream", "timeout_on_sleeping_server", "cancel_after_begin", "cancel_after_first_response", "status_code_and_message", "custom_metadata": default: panic(fmt.Sprintf("unknown test type: %s", testCase[0])) } testCases[i].name = testCase[0] w, err := strconv.Atoi(testCase[1]) if err != nil { panic(fmt.Sprintf("%v", err)) } testCases[i].weight = w } return testCases } // weightedRandomTestSelector defines a weighted random selector for test case types. type weightedRandomTestSelector struct { tests []testCaseWithWeight totalWeight int } // newWeightedRandomTestSelector constructs a weightedRandomTestSelector with the given list of testCaseWithWeight. func newWeightedRandomTestSelector(tests []testCaseWithWeight) *weightedRandomTestSelector { var totalWeight int for _, t := range tests { totalWeight += t.weight } rand.Seed(time.Now().UnixNano()) return &weightedRandomTestSelector{tests, totalWeight} } func (selector weightedRandomTestSelector) getNextTest() string { random := rand.Intn(selector.totalWeight) var weightSofar int for _, test := range selector.tests { weightSofar += test.weight if random < weightSofar { return test.name } } panic("no test case selected by weightedRandomTestSelector") } // gauge stores the qps of one interop client (one stub). type gauge struct { mutex sync.RWMutex val int64 } func (g *gauge) set(v int64) { g.mutex.Lock() defer g.mutex.Unlock() g.val = v } func (g *gauge) get() int64 { g.mutex.RLock() defer g.mutex.RUnlock() return g.val } // server implements metrics server functions. type server struct { mutex sync.RWMutex // gauges is a map from /stress_test/server_/channel_/stub_/qps to its qps gauge. gauges map[string]*gauge } // newMetricsServer returns a new metrics server. func newMetricsServer() *server { return &server{gauges: make(map[string]*gauge)} } // GetAllGauges returns all gauges. func (s *server) GetAllGauges(in *metricspb.EmptyMessage, stream metricspb.MetricsService_GetAllGaugesServer) error { s.mutex.RLock() defer s.mutex.RUnlock() for name, gauge := range s.gauges { if err := stream.Send(&metricspb.GaugeResponse{Name: name, Value: &metricspb.GaugeResponse_LongValue{LongValue: gauge.get()}}); err != nil { return err } } return nil } // GetGauge returns the gauge for the given name. func (s *server) GetGauge(ctx context.Context, in *metricspb.GaugeRequest) (*metricspb.GaugeResponse, error) { s.mutex.RLock() defer s.mutex.RUnlock() if g, ok := s.gauges[in.Name]; ok { return &metricspb.GaugeResponse{Name: in.Name, Value: &metricspb.GaugeResponse_LongValue{LongValue: g.get()}}, nil } return nil, status.Errorf(codes.InvalidArgument, "gauge with name %s not found", in.Name) } // createGauge creates a gauge using the given name in metrics server. func (s *server) createGauge(name string) *gauge { s.mutex.Lock() defer s.mutex.Unlock() if _, ok := s.gauges[name]; ok { // gauge already exists. panic(fmt.Sprintf("gauge %s already exists", name)) } var g gauge s.gauges[name] = &g return &g } func startServer(server *server, port int) { lis, err := net.Listen("tcp", ":"+strconv.Itoa(port)) if err != nil { grpclog.Fatalf("failed to listen: %v", err) } s := grpc.NewServer() metricspb.RegisterMetricsServiceServer(s, server) s.Serve(lis) } // performRPCs uses weightedRandomTestSelector to select test case and runs the tests. func performRPCs(gauge *gauge, conn *grpc.ClientConn, selector *weightedRandomTestSelector, stop <-chan bool) { client := testpb.NewTestServiceClient(conn) var numCalls int64 startTime := time.Now() for { test := selector.getNextTest() switch test { case "empty_unary": interop.DoEmptyUnaryCall(client, grpc.WaitForReady(true)) case "large_unary": interop.DoLargeUnaryCall(client, grpc.WaitForReady(true)) case "client_streaming": interop.DoClientStreaming(client, grpc.WaitForReady(true)) case "server_streaming": interop.DoServerStreaming(client, grpc.WaitForReady(true)) case "ping_pong": interop.DoPingPong(client, grpc.WaitForReady(true)) case "empty_stream": interop.DoEmptyStream(client, grpc.WaitForReady(true)) case "timeout_on_sleeping_server": interop.DoTimeoutOnSleepingServer(client, grpc.WaitForReady(true)) case "cancel_after_begin": interop.DoCancelAfterBegin(client, grpc.WaitForReady(true)) case "cancel_after_first_response": interop.DoCancelAfterFirstResponse(client, grpc.WaitForReady(true)) case "status_code_and_message": interop.DoStatusCodeAndMessage(client, grpc.WaitForReady(true)) case "custom_metadata": interop.DoCustomMetadata(client, grpc.WaitForReady(true)) } numCalls++ gauge.set(int64(float64(numCalls) / time.Since(startTime).Seconds())) select { case <-stop: return default: } } } func logParameterInfo(addresses []string, tests []testCaseWithWeight) { grpclog.Infof("server_addresses: %s", *serverAddresses) grpclog.Infof("test_cases: %s", *testCases) grpclog.Infof("test_duration_secs: %d", *testDurationSecs) grpclog.Infof("num_channels_per_server: %d", *numChannelsPerServer) grpclog.Infof("num_stubs_per_channel: %d", *numStubsPerChannel) grpclog.Infof("metrics_port: %d", *metricsPort) grpclog.Infof("use_tls: %t", *useTLS) grpclog.Infof("use_test_ca: %t", *testCA) grpclog.Infof("server_host_override: %s", *tlsServerName) grpclog.Infoln("addresses:") for i, addr := range addresses { grpclog.Infof("%d. %s\n", i+1, addr) } grpclog.Infoln("tests:") for i, test := range tests { grpclog.Infof("%d. %v\n", i+1, test) } } func newConn(address string, useTLS, testCA bool, tlsServerName string) (*grpc.ClientConn, error) { var opts []grpc.DialOption if useTLS { var sn string if tlsServerName != "" { sn = tlsServerName } var creds credentials.TransportCredentials if testCA { var err error if *caFile == "" { *caFile = testdata.Path("ca.pem") } creds, err = credentials.NewClientTLSFromFile(*caFile, sn) if err != nil { grpclog.Fatalf("Failed to create TLS credentials %v", err) } } else { creds = credentials.NewClientTLSFromCert(nil, sn) } opts = append(opts, grpc.WithTransportCredentials(creds)) } else { opts = append(opts, grpc.WithInsecure()) } return grpc.Dial(address, opts...) } func main() { flag.Parse() addresses := strings.Split(*serverAddresses, ",") tests := parseTestCases(*testCases) logParameterInfo(addresses, tests) testSelector := newWeightedRandomTestSelector(tests) metricsServer := newMetricsServer() var wg sync.WaitGroup wg.Add(len(addresses) * *numChannelsPerServer * *numStubsPerChannel) stop := make(chan bool) for serverIndex, address := range addresses { for connIndex := 0; connIndex < *numChannelsPerServer; connIndex++ { conn, err := newConn(address, *useTLS, *testCA, *tlsServerName) if err != nil { grpclog.Fatalf("Fail to dial: %v", err) } defer conn.Close() for clientIndex := 0; clientIndex < *numStubsPerChannel; clientIndex++ { name := fmt.Sprintf("/stress_test/server_%d/channel_%d/stub_%d/qps", serverIndex+1, connIndex+1, clientIndex+1) go func() { defer wg.Done() g := metricsServer.createGauge(name) performRPCs(g, conn, testSelector, stop) }() } } } go startServer(metricsServer, *metricsPort) if *testDurationSecs > 0 { time.Sleep(time.Duration(*testDurationSecs) * time.Second) close(stop) } wg.Wait() grpclog.Infof(" ===== ALL DONE ===== ") } grpc-go-1.29.1/stress/grpc_testing/000077500000000000000000000000001365033716300171645ustar00rootroot00000000000000grpc-go-1.29.1/stress/grpc_testing/metrics.pb.go000066400000000000000000000314061365033716300215650ustar00rootroot00000000000000// Code generated by protoc-gen-go. DO NOT EDIT. // source: metrics.proto package grpc_testing import ( context "context" fmt "fmt" proto "github.com/golang/protobuf/proto" grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" status "google.golang.org/grpc/status" math "math" ) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package // Response message containing the gauge name and value type GaugeResponse struct { Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // Types that are valid to be assigned to Value: // *GaugeResponse_LongValue // *GaugeResponse_DoubleValue // *GaugeResponse_StringValue Value isGaugeResponse_Value `protobuf_oneof:"value"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *GaugeResponse) Reset() { *m = GaugeResponse{} } func (m *GaugeResponse) String() string { return proto.CompactTextString(m) } func (*GaugeResponse) ProtoMessage() {} func (*GaugeResponse) Descriptor() ([]byte, []int) { return fileDescriptor_6039342a2ba47b72, []int{0} } func (m *GaugeResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_GaugeResponse.Unmarshal(m, b) } func (m *GaugeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_GaugeResponse.Marshal(b, m, deterministic) } func (m *GaugeResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_GaugeResponse.Merge(m, src) } func (m *GaugeResponse) XXX_Size() int { return xxx_messageInfo_GaugeResponse.Size(m) } func (m *GaugeResponse) XXX_DiscardUnknown() { xxx_messageInfo_GaugeResponse.DiscardUnknown(m) } var xxx_messageInfo_GaugeResponse proto.InternalMessageInfo func (m *GaugeResponse) GetName() string { if m != nil { return m.Name } return "" } type isGaugeResponse_Value interface { isGaugeResponse_Value() } type GaugeResponse_LongValue struct { LongValue int64 `protobuf:"varint,2,opt,name=long_value,json=longValue,proto3,oneof"` } type GaugeResponse_DoubleValue struct { DoubleValue float64 `protobuf:"fixed64,3,opt,name=double_value,json=doubleValue,proto3,oneof"` } type GaugeResponse_StringValue struct { StringValue string `protobuf:"bytes,4,opt,name=string_value,json=stringValue,proto3,oneof"` } func (*GaugeResponse_LongValue) isGaugeResponse_Value() {} func (*GaugeResponse_DoubleValue) isGaugeResponse_Value() {} func (*GaugeResponse_StringValue) isGaugeResponse_Value() {} func (m *GaugeResponse) GetValue() isGaugeResponse_Value { if m != nil { return m.Value } return nil } func (m *GaugeResponse) GetLongValue() int64 { if x, ok := m.GetValue().(*GaugeResponse_LongValue); ok { return x.LongValue } return 0 } func (m *GaugeResponse) GetDoubleValue() float64 { if x, ok := m.GetValue().(*GaugeResponse_DoubleValue); ok { return x.DoubleValue } return 0 } func (m *GaugeResponse) GetStringValue() string { if x, ok := m.GetValue().(*GaugeResponse_StringValue); ok { return x.StringValue } return "" } // XXX_OneofWrappers is for the internal use of the proto package. func (*GaugeResponse) XXX_OneofWrappers() []interface{} { return []interface{}{ (*GaugeResponse_LongValue)(nil), (*GaugeResponse_DoubleValue)(nil), (*GaugeResponse_StringValue)(nil), } } // Request message containing the gauge name type GaugeRequest struct { Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *GaugeRequest) Reset() { *m = GaugeRequest{} } func (m *GaugeRequest) String() string { return proto.CompactTextString(m) } func (*GaugeRequest) ProtoMessage() {} func (*GaugeRequest) Descriptor() ([]byte, []int) { return fileDescriptor_6039342a2ba47b72, []int{1} } func (m *GaugeRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_GaugeRequest.Unmarshal(m, b) } func (m *GaugeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_GaugeRequest.Marshal(b, m, deterministic) } func (m *GaugeRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_GaugeRequest.Merge(m, src) } func (m *GaugeRequest) XXX_Size() int { return xxx_messageInfo_GaugeRequest.Size(m) } func (m *GaugeRequest) XXX_DiscardUnknown() { xxx_messageInfo_GaugeRequest.DiscardUnknown(m) } var xxx_messageInfo_GaugeRequest proto.InternalMessageInfo func (m *GaugeRequest) GetName() string { if m != nil { return m.Name } return "" } type EmptyMessage struct { XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *EmptyMessage) Reset() { *m = EmptyMessage{} } func (m *EmptyMessage) String() string { return proto.CompactTextString(m) } func (*EmptyMessage) ProtoMessage() {} func (*EmptyMessage) Descriptor() ([]byte, []int) { return fileDescriptor_6039342a2ba47b72, []int{2} } func (m *EmptyMessage) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_EmptyMessage.Unmarshal(m, b) } func (m *EmptyMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_EmptyMessage.Marshal(b, m, deterministic) } func (m *EmptyMessage) XXX_Merge(src proto.Message) { xxx_messageInfo_EmptyMessage.Merge(m, src) } func (m *EmptyMessage) XXX_Size() int { return xxx_messageInfo_EmptyMessage.Size(m) } func (m *EmptyMessage) XXX_DiscardUnknown() { xxx_messageInfo_EmptyMessage.DiscardUnknown(m) } var xxx_messageInfo_EmptyMessage proto.InternalMessageInfo func init() { proto.RegisterType((*GaugeResponse)(nil), "grpc.testing.GaugeResponse") proto.RegisterType((*GaugeRequest)(nil), "grpc.testing.GaugeRequest") proto.RegisterType((*EmptyMessage)(nil), "grpc.testing.EmptyMessage") } func init() { proto.RegisterFile("metrics.proto", fileDescriptor_6039342a2ba47b72) } var fileDescriptor_6039342a2ba47b72 = []byte{ // 256 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x91, 0x3f, 0x4f, 0xc3, 0x30, 0x10, 0xc5, 0x6b, 0x5a, 0xfe, 0xf4, 0x70, 0x3b, 0x78, 0xaa, 0xca, 0x40, 0x14, 0x96, 0x4c, 0x11, 0x82, 0x4f, 0x00, 0x08, 0xa5, 0x0c, 0x5d, 0x82, 0xc4, 0x8a, 0xd2, 0x70, 0xb2, 0x22, 0x39, 0x71, 0xf0, 0x5d, 0x2a, 0xf1, 0x49, 0x58, 0xf9, 0xa8, 0xc8, 0x4e, 0x55, 0xa5, 0x08, 0x75, 0xb3, 0x7e, 0xf7, 0xfc, 0xfc, 0x9e, 0x0f, 0x66, 0x35, 0xb2, 0xab, 0x4a, 0x4a, 0x5b, 0x67, 0xd9, 0x2a, 0xa9, 0x5d, 0x5b, 0xa6, 0x8c, 0xc4, 0x55, 0xa3, 0xe3, 0x6f, 0x01, 0xb3, 0xac, 0xe8, 0x34, 0xe6, 0x48, 0xad, 0x6d, 0x08, 0x95, 0x82, 0x49, 0x53, 0xd4, 0xb8, 0x10, 0x91, 0x48, 0xa6, 0x79, 0x38, 0xab, 0x6b, 0x00, 0x63, 0x1b, 0xfd, 0xbe, 0x2d, 0x4c, 0x87, 0x8b, 0x93, 0x48, 0x24, 0xe3, 0xd5, 0x28, 0x9f, 0x7a, 0xf6, 0xe6, 0x91, 0xba, 0x01, 0xf9, 0x61, 0xbb, 0x8d, 0xc1, 0x9d, 0x64, 0x1c, 0x89, 0x44, 0xac, 0x46, 0xf9, 0x65, 0x4f, 0xf7, 0x22, 0x62, 0x57, 0xed, 0x7d, 0x26, 0xfe, 0x05, 0x2f, 0xea, 0x69, 0x10, 0x3d, 0x9e, 0xc3, 0x69, 0x98, 0xc6, 0x31, 0xc8, 0x5d, 0xb0, 0xcf, 0x0e, 0x89, 0xff, 0xcb, 0x15, 0xcf, 0x41, 0x3e, 0xd7, 0x2d, 0x7f, 0xad, 0x91, 0xa8, 0xd0, 0x78, 0xf7, 0x23, 0x60, 0xbe, 0xee, 0xdb, 0xbe, 0xa2, 0xdb, 0x56, 0x25, 0xaa, 0x17, 0x90, 0x19, 0xf2, 0x83, 0x31, 0xc1, 0x8c, 0xd4, 0x32, 0x1d, 0xf6, 0x4f, 0x87, 0xd7, 0x97, 0x57, 0x87, 0xb3, 0x83, 0x7f, 0xb9, 0x15, 0xea, 0x09, 0x2e, 0x32, 0xe4, 0x40, 0xff, 0xda, 0x0c, 0x93, 0x1e, 0xb5, 0xd9, 0x9c, 0x85, 0x2d, 0xdc, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0x5e, 0x7d, 0xb2, 0xc9, 0x96, 0x01, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. var _ context.Context var _ grpc.ClientConnInterface // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. const _ = grpc.SupportPackageIsVersion6 // MetricsServiceClient is the client API for MetricsService service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type MetricsServiceClient interface { // Returns the values of all the gauges that are currently being maintained by // the service GetAllGauges(ctx context.Context, in *EmptyMessage, opts ...grpc.CallOption) (MetricsService_GetAllGaugesClient, error) // Returns the value of one gauge GetGauge(ctx context.Context, in *GaugeRequest, opts ...grpc.CallOption) (*GaugeResponse, error) } type metricsServiceClient struct { cc grpc.ClientConnInterface } func NewMetricsServiceClient(cc grpc.ClientConnInterface) MetricsServiceClient { return &metricsServiceClient{cc} } func (c *metricsServiceClient) GetAllGauges(ctx context.Context, in *EmptyMessage, opts ...grpc.CallOption) (MetricsService_GetAllGaugesClient, error) { stream, err := c.cc.NewStream(ctx, &_MetricsService_serviceDesc.Streams[0], "/grpc.testing.MetricsService/GetAllGauges", opts...) if err != nil { return nil, err } x := &metricsServiceGetAllGaugesClient{stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } if err := x.ClientStream.CloseSend(); err != nil { return nil, err } return x, nil } type MetricsService_GetAllGaugesClient interface { Recv() (*GaugeResponse, error) grpc.ClientStream } type metricsServiceGetAllGaugesClient struct { grpc.ClientStream } func (x *metricsServiceGetAllGaugesClient) Recv() (*GaugeResponse, error) { m := new(GaugeResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func (c *metricsServiceClient) GetGauge(ctx context.Context, in *GaugeRequest, opts ...grpc.CallOption) (*GaugeResponse, error) { out := new(GaugeResponse) err := c.cc.Invoke(ctx, "/grpc.testing.MetricsService/GetGauge", in, out, opts...) if err != nil { return nil, err } return out, nil } // MetricsServiceServer is the server API for MetricsService service. type MetricsServiceServer interface { // Returns the values of all the gauges that are currently being maintained by // the service GetAllGauges(*EmptyMessage, MetricsService_GetAllGaugesServer) error // Returns the value of one gauge GetGauge(context.Context, *GaugeRequest) (*GaugeResponse, error) } // UnimplementedMetricsServiceServer can be embedded to have forward compatible implementations. type UnimplementedMetricsServiceServer struct { } func (*UnimplementedMetricsServiceServer) GetAllGauges(req *EmptyMessage, srv MetricsService_GetAllGaugesServer) error { return status.Errorf(codes.Unimplemented, "method GetAllGauges not implemented") } func (*UnimplementedMetricsServiceServer) GetGauge(ctx context.Context, req *GaugeRequest) (*GaugeResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method GetGauge not implemented") } func RegisterMetricsServiceServer(s *grpc.Server, srv MetricsServiceServer) { s.RegisterService(&_MetricsService_serviceDesc, srv) } func _MetricsService_GetAllGauges_Handler(srv interface{}, stream grpc.ServerStream) error { m := new(EmptyMessage) if err := stream.RecvMsg(m); err != nil { return err } return srv.(MetricsServiceServer).GetAllGauges(m, &metricsServiceGetAllGaugesServer{stream}) } type MetricsService_GetAllGaugesServer interface { Send(*GaugeResponse) error grpc.ServerStream } type metricsServiceGetAllGaugesServer struct { grpc.ServerStream } func (x *metricsServiceGetAllGaugesServer) Send(m *GaugeResponse) error { return x.ServerStream.SendMsg(m) } func _MetricsService_GetGauge_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(GaugeRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(MetricsServiceServer).GetGauge(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/grpc.testing.MetricsService/GetGauge", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(MetricsServiceServer).GetGauge(ctx, req.(*GaugeRequest)) } return interceptor(ctx, in, info, handler) } var _MetricsService_serviceDesc = grpc.ServiceDesc{ ServiceName: "grpc.testing.MetricsService", HandlerType: (*MetricsServiceServer)(nil), Methods: []grpc.MethodDesc{ { MethodName: "GetGauge", Handler: _MetricsService_GetGauge_Handler, }, }, Streams: []grpc.StreamDesc{ { StreamName: "GetAllGauges", Handler: _MetricsService_GetAllGauges_Handler, ServerStreams: true, }, }, Metadata: "metrics.proto", } grpc-go-1.29.1/stress/grpc_testing/metrics.proto000066400000000000000000000027441365033716300217260ustar00rootroot00000000000000// Copyright 2015-2016 gRPC authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Contains the definitions for a metrics service and the type of metrics // exposed by the service. // // Currently, 'Gauge' (i.e a metric that represents the measured value of // something at an instant of time) is the only metric type supported by the // service. syntax = "proto3"; package grpc.testing; // Response message containing the gauge name and value message GaugeResponse { string name = 1; oneof value { int64 long_value = 2; double double_value = 3; string string_value = 4; } } // Request message containing the gauge name message GaugeRequest { string name = 1; } message EmptyMessage {} service MetricsService { // Returns the values of all the gauges that are currently being maintained by // the service rpc GetAllGauges(EmptyMessage) returns (stream GaugeResponse); // Returns the value of one gauge rpc GetGauge(GaugeRequest) returns (GaugeResponse); } grpc-go-1.29.1/stress/metrics_client/000077500000000000000000000000001365033716300175005ustar00rootroot00000000000000grpc-go-1.29.1/stress/metrics_client/main.go000066400000000000000000000042511365033716300207550ustar00rootroot00000000000000/* * * Copyright 2016 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package main import ( "context" "flag" "fmt" "io" "google.golang.org/grpc" "google.golang.org/grpc/grpclog" metricspb "google.golang.org/grpc/stress/grpc_testing" ) var ( metricsServerAddress = flag.String("metrics_server_address", "", "The metrics server addresses in the format :") totalOnly = flag.Bool("total_only", false, "If true, this prints only the total value of all gauges") ) func printMetrics(client metricspb.MetricsServiceClient, totalOnly bool) { stream, err := client.GetAllGauges(context.Background(), &metricspb.EmptyMessage{}) if err != nil { grpclog.Fatalf("failed to call GetAllGauges: %v", err) } var ( overallQPS int64 rpcStatus error ) for { gaugeResponse, err := stream.Recv() if err != nil { rpcStatus = err break } if _, ok := gaugeResponse.GetValue().(*metricspb.GaugeResponse_LongValue); !ok { panic(fmt.Sprintf("gauge %s is not a long value", gaugeResponse.Name)) } v := gaugeResponse.GetLongValue() if !totalOnly { grpclog.Infof("%s: %d", gaugeResponse.Name, v) } overallQPS += v } if rpcStatus != io.EOF { grpclog.Fatalf("failed to finish server streaming: %v", rpcStatus) } grpclog.Infof("overall qps: %d", overallQPS) } func main() { flag.Parse() if *metricsServerAddress == "" { grpclog.Fatalf("Metrics server address is empty.") } conn, err := grpc.Dial(*metricsServerAddress, grpc.WithInsecure()) if err != nil { grpclog.Fatalf("cannot connect to metrics server: %v", err) } defer conn.Close() c := metricspb.NewMetricsServiceClient(conn) printMetrics(c, *totalOnly) } grpc-go-1.29.1/tap/000077500000000000000000000000001365033716300137355ustar00rootroot00000000000000grpc-go-1.29.1/tap/tap.go000066400000000000000000000040401365033716300150460ustar00rootroot00000000000000/* * * Copyright 2016 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Package tap defines the function handles which are executed on the transport // layer of gRPC-Go and related information. Everything here is EXPERIMENTAL. package tap import ( "context" ) // Info defines the relevant information needed by the handles. type Info struct { // FullMethodName is the string of grpc method (in the format of // /package.service/method). FullMethodName string // TODO: More to be added. } // ServerInHandle defines the function which runs before a new stream is created // on the server side. If it returns a non-nil error, the stream will not be // created and a RST_STREAM will be sent back to the client with REFUSED_STREAM. // The client will receive an RPC error "code = Unavailable, desc = stream // terminated by RST_STREAM with error code: REFUSED_STREAM". // // It's intended to be used in situations where you don't want to waste the // resources to accept the new stream (e.g. rate-limiting). And the content of // the error will be ignored and won't be sent back to the client. For other // general usages, please use interceptors. // // Note that it is executed in the per-connection I/O goroutine(s) instead of // per-RPC goroutine. Therefore, users should NOT have any // blocking/time-consuming work in this handle. Otherwise all the RPCs would // slow down. Also, for the same reason, this handle won't be called // concurrently by gRPC. type ServerInHandle func(ctx context.Context, info *Info) (context.Context, error) grpc-go-1.29.1/test/000077500000000000000000000000001365033716300141305ustar00rootroot00000000000000grpc-go-1.29.1/test/balancer_test.go000066400000000000000000000240661365033716300172750ustar00rootroot00000000000000/* * * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package test import ( "context" "reflect" "testing" "time" "google.golang.org/grpc" "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal/balancerload" "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/metadata" "google.golang.org/grpc/resolver" "google.golang.org/grpc/resolver/manual" testpb "google.golang.org/grpc/test/grpc_testing" "google.golang.org/grpc/testdata" ) const testBalancerName = "testbalancer" // testBalancer creates one subconn with the first address from resolved // addresses. // // It's used to test options for NewSubConn are applies correctly. type testBalancer struct { cc balancer.ClientConn sc balancer.SubConn newSubConnOptions balancer.NewSubConnOptions pickInfos []balancer.PickInfo doneInfo []balancer.DoneInfo } func (b *testBalancer) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer { b.cc = cc return b } func (*testBalancer) Name() string { return testBalancerName } func (b *testBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) { // Only create a subconn at the first time. if err == nil && b.sc == nil { b.sc, err = b.cc.NewSubConn(addrs, b.newSubConnOptions) if err != nil { grpclog.Errorf("testBalancer: failed to NewSubConn: %v", err) return } b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.Connecting, Picker: &picker{sc: b.sc, bal: b}}) b.sc.Connect() } } func (b *testBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) { grpclog.Infof("testBalancer: HandleSubConnStateChange: %p, %v", sc, s) if b.sc != sc { grpclog.Infof("testBalancer: ignored state change because sc is not recognized") return } if s == connectivity.Shutdown { b.sc = nil return } switch s { case connectivity.Ready, connectivity.Idle: b.cc.UpdateState(balancer.State{ConnectivityState: s, Picker: &picker{sc: sc, bal: b}}) case connectivity.Connecting: b.cc.UpdateState(balancer.State{ConnectivityState: s, Picker: &picker{err: balancer.ErrNoSubConnAvailable, bal: b}}) case connectivity.TransientFailure: b.cc.UpdateState(balancer.State{ConnectivityState: s, Picker: &picker{err: balancer.ErrTransientFailure, bal: b}}) } } func (b *testBalancer) Close() { } type picker struct { err error sc balancer.SubConn bal *testBalancer } func (p *picker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { if p.err != nil { return balancer.PickResult{}, p.err } info.Ctx = nil // Do not validate context. p.bal.pickInfos = append(p.bal.pickInfos, info) return balancer.PickResult{SubConn: p.sc, Done: func(d balancer.DoneInfo) { p.bal.doneInfo = append(p.bal.doneInfo, d) }}, nil } func (s) TestCredsBundleFromBalancer(t *testing.T) { balancer.Register(&testBalancer{ newSubConnOptions: balancer.NewSubConnOptions{ CredsBundle: &testCredsBundle{}, }, }) te := newTest(t, env{name: "creds-bundle", network: "tcp", balancer: ""}) te.tapHandle = authHandle te.customDialOptions = []grpc.DialOption{ grpc.WithBalancerName(testBalancerName), } creds, err := credentials.NewServerTLSFromFile(testdata.Path("server1.pem"), testdata.Path("server1.key")) if err != nil { t.Fatalf("Failed to generate credentials %v", err) } te.customServerOptions = []grpc.ServerOption{ grpc.Creds(creds), } te.startServer(&testServer{}) defer te.tearDown() cc := te.clientConn() tc := testpb.NewTestServiceClient(cc) if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}); err != nil { t.Fatalf("Test failed. Reason: %v", err) } } func (s) TestDoneInfo(t *testing.T) { for _, e := range listTestEnv() { testDoneInfo(t, e) } } func testDoneInfo(t *testing.T, e env) { te := newTest(t, e) b := &testBalancer{} balancer.Register(b) te.customDialOptions = []grpc.DialOption{ grpc.WithBalancerName(testBalancerName), } te.userAgent = failAppUA te.startServer(&testServer{security: e.security}) defer te.tearDown() cc := te.clientConn() tc := testpb.NewTestServiceClient(cc) ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() wantErr := detailedError if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); !testutils.StatusErrEqual(err, wantErr) { t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %v", err, wantErr) } if _, err := tc.UnaryCall(ctx, &testpb.SimpleRequest{}); err != nil { t.Fatalf("TestService.UnaryCall(%v, _, _, _) = _, %v; want _, ", ctx, err) } if len(b.doneInfo) < 1 || !testutils.StatusErrEqual(b.doneInfo[0].Err, wantErr) { t.Fatalf("b.doneInfo = %v; want b.doneInfo[0].Err = %v", b.doneInfo, wantErr) } if len(b.doneInfo) < 2 || !reflect.DeepEqual(b.doneInfo[1].Trailer, testTrailerMetadata) { t.Fatalf("b.doneInfo = %v; want b.doneInfo[1].Trailer = %v", b.doneInfo, testTrailerMetadata) } if len(b.pickInfos) != len(b.doneInfo) { t.Fatalf("Got %d picks, but %d doneInfo, want equal amount", len(b.pickInfos), len(b.doneInfo)) } // To test done() is always called, even if it's returned with a non-Ready // SubConn. // // Stop server and at the same time send RPCs. There are chances that picker // is not updated in time, causing a non-Ready SubConn to be returned. finished := make(chan struct{}) go func() { for i := 0; i < 20; i++ { tc.UnaryCall(ctx, &testpb.SimpleRequest{}) } close(finished) }() te.srv.Stop() <-finished if len(b.pickInfos) != len(b.doneInfo) { t.Fatalf("Got %d picks, %d doneInfo, want equal amount", len(b.pickInfos), len(b.doneInfo)) } } const loadMDKey = "X-Endpoint-Load-Metrics-Bin" type testLoadParser struct{} func (*testLoadParser) Parse(md metadata.MD) interface{} { vs := md.Get(loadMDKey) if len(vs) == 0 { return nil } return vs[0] } func init() { balancerload.SetParser(&testLoadParser{}) } func (s) TestDoneLoads(t *testing.T) { for _, e := range listTestEnv() { testDoneLoads(t, e) } } func testDoneLoads(t *testing.T, e env) { b := &testBalancer{} balancer.Register(b) const testLoad = "test-load-,-should-be-orca" ss := &stubServer{ emptyCall: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { grpc.SetTrailer(ctx, metadata.Pairs(loadMDKey, testLoad)) return &testpb.Empty{}, nil }, } if err := ss.Start(nil, grpc.WithBalancerName(testBalancerName)); err != nil { t.Fatalf("error starting testing server: %v", err) } defer ss.Stop() tc := testpb.NewTestServiceClient(ss.cc) ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); err != nil { t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %v", err, nil) } piWant := []balancer.PickInfo{ {FullMethodName: "/grpc.testing.TestService/EmptyCall"}, } if !reflect.DeepEqual(b.pickInfos, piWant) { t.Fatalf("b.pickInfos = %v; want %v", b.pickInfos, piWant) } if len(b.doneInfo) < 1 { t.Fatalf("b.doneInfo = %v, want length 1", b.doneInfo) } gotLoad, _ := b.doneInfo[0].ServerLoad.(string) if gotLoad != testLoad { t.Fatalf("b.doneInfo[0].ServerLoad = %v; want = %v", b.doneInfo[0].ServerLoad, testLoad) } } const testBalancerKeepAddressesName = "testbalancer-keepingaddresses" // testBalancerKeepAddresses keeps the addresses in the builder instead of // creating SubConns. // // It's used to test the addresses balancer gets are correct. type testBalancerKeepAddresses struct { addrsChan chan []resolver.Address } func newTestBalancerKeepAddresses() *testBalancerKeepAddresses { return &testBalancerKeepAddresses{ addrsChan: make(chan []resolver.Address, 10), } } func (b *testBalancerKeepAddresses) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer { return b } func (*testBalancerKeepAddresses) Name() string { return testBalancerKeepAddressesName } func (b *testBalancerKeepAddresses) HandleResolvedAddrs(addrs []resolver.Address, err error) { b.addrsChan <- addrs } func (testBalancerKeepAddresses) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) { panic("not used") } func (testBalancerKeepAddresses) Close() { } // Make sure that non-grpclb balancers don't get grpclb addresses even if name // resolver sends them func (s) TestNonGRPCLBBalancerGetsNoGRPCLBAddress(t *testing.T) { r, rcleanup := manual.GenerateAndRegisterManualResolver() defer rcleanup() b := newTestBalancerKeepAddresses() balancer.Register(b) cc, err := grpc.Dial(r.Scheme()+":///test.server", grpc.WithInsecure(), grpc.WithBalancerName(b.Name())) if err != nil { t.Fatalf("failed to dial: %v", err) } defer cc.Close() grpclbAddresses := []resolver.Address{{ Addr: "grpc.lb.com", Type: resolver.GRPCLB, ServerName: "grpc.lb.com", }} nonGRPCLBAddresses := []resolver.Address{{ Addr: "localhost", Type: resolver.Backend, }} r.UpdateState(resolver.State{ Addresses: nonGRPCLBAddresses, }) if got := <-b.addrsChan; !reflect.DeepEqual(got, nonGRPCLBAddresses) { t.Fatalf("With only backend addresses, balancer got addresses %v, want %v", got, nonGRPCLBAddresses) } r.UpdateState(resolver.State{ Addresses: grpclbAddresses, }) if got := <-b.addrsChan; len(got) != 0 { t.Fatalf("With only grpclb addresses, balancer got addresses %v, want empty", got) } r.UpdateState(resolver.State{ Addresses: append(grpclbAddresses, nonGRPCLBAddresses...), }) if got := <-b.addrsChan; !reflect.DeepEqual(got, nonGRPCLBAddresses) { t.Fatalf("With both backend and grpclb addresses, balancer got addresses %v, want %v", got, nonGRPCLBAddresses) } } grpc-go-1.29.1/test/bufconn/000077500000000000000000000000001365033716300155625ustar00rootroot00000000000000grpc-go-1.29.1/test/bufconn/bufconn.go000066400000000000000000000142451365033716300175510ustar00rootroot00000000000000/* * * Copyright 2017 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Package bufconn provides a net.Conn implemented by a buffer and related // dialing and listening functionality. package bufconn import ( "fmt" "io" "net" "sync" "time" ) // Listener implements a net.Listener that creates local, buffered net.Conns // via its Accept and Dial method. type Listener struct { mu sync.Mutex sz int ch chan net.Conn done chan struct{} } // Implementation of net.Error providing timeout type netErrorTimeout struct { error } func (e netErrorTimeout) Timeout() bool { return true } func (e netErrorTimeout) Temporary() bool { return false } var errClosed = fmt.Errorf("closed") var errTimeout net.Error = netErrorTimeout{error: fmt.Errorf("i/o timeout")} // Listen returns a Listener that can only be contacted by its own Dialers and // creates buffered connections between the two. func Listen(sz int) *Listener { return &Listener{sz: sz, ch: make(chan net.Conn), done: make(chan struct{})} } // Accept blocks until Dial is called, then returns a net.Conn for the server // half of the connection. func (l *Listener) Accept() (net.Conn, error) { select { case <-l.done: return nil, errClosed case c := <-l.ch: return c, nil } } // Close stops the listener. func (l *Listener) Close() error { l.mu.Lock() defer l.mu.Unlock() select { case <-l.done: // Already closed. break default: close(l.done) } return nil } // Addr reports the address of the listener. func (l *Listener) Addr() net.Addr { return addr{} } // Dial creates an in-memory full-duplex network connection, unblocks Accept by // providing it the server half of the connection, and returns the client half // of the connection. func (l *Listener) Dial() (net.Conn, error) { p1, p2 := newPipe(l.sz), newPipe(l.sz) select { case <-l.done: return nil, errClosed case l.ch <- &conn{p1, p2}: return &conn{p2, p1}, nil } } type pipe struct { mu sync.Mutex // buf contains the data in the pipe. It is a ring buffer of fixed capacity, // with r and w pointing to the offset to read and write, respsectively. // // Data is read between [r, w) and written to [w, r), wrapping around the end // of the slice if necessary. // // The buffer is empty if r == len(buf), otherwise if r == w, it is full. // // w and r are always in the range [0, cap(buf)) and [0, len(buf)]. buf []byte w, r int wwait sync.Cond rwait sync.Cond // Indicate that a write/read timeout has occurred wtimedout bool rtimedout bool wtimer *time.Timer rtimer *time.Timer closed bool writeClosed bool } func newPipe(sz int) *pipe { p := &pipe{buf: make([]byte, 0, sz)} p.wwait.L = &p.mu p.rwait.L = &p.mu p.wtimer = time.AfterFunc(0, func() {}) p.rtimer = time.AfterFunc(0, func() {}) return p } func (p *pipe) empty() bool { return p.r == len(p.buf) } func (p *pipe) full() bool { return p.r < len(p.buf) && p.r == p.w } func (p *pipe) Read(b []byte) (n int, err error) { p.mu.Lock() defer p.mu.Unlock() // Block until p has data. for { if p.closed { return 0, io.ErrClosedPipe } if !p.empty() { break } if p.writeClosed { return 0, io.EOF } if p.rtimedout { return 0, errTimeout } p.rwait.Wait() } wasFull := p.full() n = copy(b, p.buf[p.r:len(p.buf)]) p.r += n if p.r == cap(p.buf) { p.r = 0 p.buf = p.buf[:p.w] } // Signal a blocked writer, if any if wasFull { p.wwait.Signal() } return n, nil } func (p *pipe) Write(b []byte) (n int, err error) { p.mu.Lock() defer p.mu.Unlock() if p.closed { return 0, io.ErrClosedPipe } for len(b) > 0 { // Block until p is not full. for { if p.closed || p.writeClosed { return 0, io.ErrClosedPipe } if !p.full() { break } if p.wtimedout { return 0, errTimeout } p.wwait.Wait() } wasEmpty := p.empty() end := cap(p.buf) if p.w < p.r { end = p.r } x := copy(p.buf[p.w:end], b) b = b[x:] n += x p.w += x if p.w > len(p.buf) { p.buf = p.buf[:p.w] } if p.w == cap(p.buf) { p.w = 0 } // Signal a blocked reader, if any. if wasEmpty { p.rwait.Signal() } } return n, nil } func (p *pipe) Close() error { p.mu.Lock() defer p.mu.Unlock() p.closed = true // Signal all blocked readers and writers to return an error. p.rwait.Broadcast() p.wwait.Broadcast() return nil } func (p *pipe) closeWrite() error { p.mu.Lock() defer p.mu.Unlock() p.writeClosed = true // Signal all blocked readers and writers to return an error. p.rwait.Broadcast() p.wwait.Broadcast() return nil } type conn struct { io.Reader io.Writer } func (c *conn) Close() error { err1 := c.Reader.(*pipe).Close() err2 := c.Writer.(*pipe).closeWrite() if err1 != nil { return err1 } return err2 } func (c *conn) SetDeadline(t time.Time) error { c.SetReadDeadline(t) c.SetWriteDeadline(t) return nil } func (c *conn) SetReadDeadline(t time.Time) error { p := c.Reader.(*pipe) p.mu.Lock() defer p.mu.Unlock() p.rtimer.Stop() p.rtimedout = false if !t.IsZero() { p.rtimer = time.AfterFunc(time.Until(t), func() { p.mu.Lock() defer p.mu.Unlock() p.rtimedout = true p.rwait.Broadcast() }) } return nil } func (c *conn) SetWriteDeadline(t time.Time) error { p := c.Writer.(*pipe) p.mu.Lock() defer p.mu.Unlock() p.wtimer.Stop() p.wtimedout = false if !t.IsZero() { p.wtimer = time.AfterFunc(time.Until(t), func() { p.mu.Lock() defer p.mu.Unlock() p.wtimedout = true p.wwait.Broadcast() }) } return nil } func (*conn) LocalAddr() net.Addr { return addr{} } func (*conn) RemoteAddr() net.Addr { return addr{} } type addr struct{} func (addr) Network() string { return "bufconn" } func (addr) String() string { return "bufconn" } grpc-go-1.29.1/test/bufconn/bufconn_test.go000066400000000000000000000173311365033716300206070ustar00rootroot00000000000000/* * * Copyright 2017 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package bufconn import ( "fmt" "io" "net" "reflect" "testing" "time" "google.golang.org/grpc/internal/grpctest" ) type s struct { grpctest.Tester } func Test(t *testing.T) { grpctest.RunSubTests(t, s{}) } func testRW(r io.Reader, w io.Writer) error { for i := 0; i < 20; i++ { d := make([]byte, i) for j := 0; j < i; j++ { d[j] = byte(i - j) } var rn int var rerr error b := make([]byte, i) done := make(chan struct{}) go func() { for rn < len(b) && rerr == nil { var x int x, rerr = r.Read(b[rn:]) rn += x } close(done) }() wn, werr := w.Write(d) if wn != i || werr != nil { return fmt.Errorf("%v: w.Write(%v) = %v, %v; want %v, nil", i, d, wn, werr, i) } select { case <-done: case <-time.After(500 * time.Millisecond): return fmt.Errorf("%v: r.Read never returned", i) } if rn != i || rerr != nil { return fmt.Errorf("%v: r.Read = %v, %v; want %v, nil", i, rn, rerr, i) } if !reflect.DeepEqual(b, d) { return fmt.Errorf("%v: r.Read read %v; want %v", i, b, d) } } return nil } func (s) TestPipe(t *testing.T) { p := newPipe(10) if err := testRW(p, p); err != nil { t.Fatalf(err.Error()) } } func (s) TestPipeClose(t *testing.T) { p := newPipe(10) p.Close() if _, err := p.Write(nil); err != io.ErrClosedPipe { t.Fatalf("p.Write = _, %v; want _, %v", err, io.ErrClosedPipe) } if _, err := p.Read(nil); err != io.ErrClosedPipe { t.Fatalf("p.Read = _, %v; want _, %v", err, io.ErrClosedPipe) } } func (s) TestConn(t *testing.T) { p1, p2 := newPipe(10), newPipe(10) c1, c2 := &conn{p1, p2}, &conn{p2, p1} if err := testRW(c1, c2); err != nil { t.Fatalf(err.Error()) } if err := testRW(c2, c1); err != nil { t.Fatalf(err.Error()) } } func (s) TestConnCloseWithData(t *testing.T) { lis := Listen(7) errChan := make(chan error, 1) var lisConn net.Conn go func() { var err error if lisConn, err = lis.Accept(); err != nil { errChan <- err } close(errChan) }() dialConn, err := lis.Dial() if err != nil { t.Fatalf("Dial error: %v", err) } if err := <-errChan; err != nil { t.Fatalf("Listen error: %v", err) } // Write some data on both sides of the connection. n, err := dialConn.Write([]byte("hello")) if n != 5 || err != nil { t.Fatalf("dialConn.Write([]byte{\"hello\"}) = %v, %v; want 5, ", n, err) } n, err = lisConn.Write([]byte("hello")) if n != 5 || err != nil { t.Fatalf("lisConn.Write([]byte{\"hello\"}) = %v, %v; want 5, ", n, err) } // Close dial-side; writes from either side should fail. dialConn.Close() if _, err := lisConn.Write([]byte("hello")); err != io.ErrClosedPipe { t.Fatalf("lisConn.Write() = _, ; want _, ") } if _, err := dialConn.Write([]byte("hello")); err != io.ErrClosedPipe { t.Fatalf("dialConn.Write() = _, ; want _, ") } // Read from both sides; reads on lisConn should work, but dialConn should // fail. buf := make([]byte, 6) if _, err := dialConn.Read(buf); err != io.ErrClosedPipe { t.Fatalf("dialConn.Read(buf) = %v, %v; want _, io.ErrClosedPipe", n, err) } n, err = lisConn.Read(buf) if n != 5 || err != nil { t.Fatalf("lisConn.Read(buf) = %v, %v; want 5, ", n, err) } } func (s) TestListener(t *testing.T) { l := Listen(7) var s net.Conn var serr error done := make(chan struct{}) go func() { s, serr = l.Accept() close(done) }() c, cerr := l.Dial() <-done if cerr != nil || serr != nil { t.Fatalf("cerr = %v, serr = %v; want nil, nil", cerr, serr) } if err := testRW(c, s); err != nil { t.Fatalf(err.Error()) } if err := testRW(s, c); err != nil { t.Fatalf(err.Error()) } } func (s) TestCloseWhileDialing(t *testing.T) { l := Listen(7) var c net.Conn var err error done := make(chan struct{}) go func() { c, err = l.Dial() close(done) }() l.Close() <-done if c != nil || err != errClosed { t.Fatalf("c, err = %v, %v; want nil, %v", c, err, errClosed) } } func (s) TestCloseWhileAccepting(t *testing.T) { l := Listen(7) var c net.Conn var err error done := make(chan struct{}) go func() { c, err = l.Accept() close(done) }() l.Close() <-done if c != nil || err != errClosed { t.Fatalf("c, err = %v, %v; want nil, %v", c, err, errClosed) } } func (s) TestDeadline(t *testing.T) { sig := make(chan error, 2) blockingWrite := func(conn net.Conn) { _, err := conn.Write([]byte("0123456789")) sig <- err } blockingRead := func(conn net.Conn) { _, err := conn.Read(make([]byte, 10)) sig <- err } p1, p2 := newPipe(5), newPipe(5) c1, c2 := &conn{p1, p1}, &conn{p2, p2} defer c1.Close() defer c2.Close() // Test with deadline c1.SetWriteDeadline(time.Now()) go blockingWrite(c1) select { case <-time.After(100 * time.Millisecond): t.Fatalf("Write timeout timed out, c = %v", c1) case err := <-sig: if netErr, ok := err.(net.Error); ok { if !netErr.Timeout() { t.Fatalf("Write returned unexpected error, c = %v, err = %v", c1, netErr) } } else { t.Fatalf("Write returned unexpected error, c = %v, err = %v", c1, err) } } c2.SetReadDeadline(time.Now()) go blockingRead(c2) select { case <-time.After(100 * time.Millisecond): t.Fatalf("Read timeout timed out, c = %v", c2) case err := <-sig: if netErr, ok := err.(net.Error); ok { if !netErr.Timeout() { t.Fatalf("Read returned unexpected error, c = %v, err = %v", c2, netErr) } } else { t.Fatalf("Read returned unexpected error, c = %v, err = %v", c2, err) } } // Test timing out pending reads/writes c1.SetWriteDeadline(time.Time{}) c2.SetReadDeadline(time.Time{}) go blockingWrite(c1) select { case <-time.After(100 * time.Millisecond): case err := <-sig: t.Fatalf("Write returned before timeout, err = %v", err) } c1.SetWriteDeadline(time.Now()) select { case <-time.After(100 * time.Millisecond): t.Fatalf("Write timeout timed out, c = %v", c1) case err := <-sig: if netErr, ok := err.(net.Error); ok { if !netErr.Timeout() { t.Fatalf("Write returned unexpected error, c = %v, err = %v", c1, netErr) } } else { t.Fatalf("Write returned unexpected error, c = %v, err = %v", c1, err) } } go blockingRead(c2) select { case <-time.After(100 * time.Millisecond): case err := <-sig: t.Fatalf("Read returned before timeout, err = %v", err) } c2.SetReadDeadline(time.Now()) select { case <-time.After(100 * time.Millisecond): t.Fatalf("Read timeout timed out, c = %v", c2) case err := <-sig: if netErr, ok := err.(net.Error); ok { if !netErr.Timeout() { t.Fatalf("Read returned unexpected error, c = %v, err = %v", c2, netErr) } } else { t.Fatalf("Read returned unexpected error, c = %v, err = %v", c2, err) } } // Test non-blocking read/write c1, c2 = &conn{p1, p2}, &conn{p2, p1} c1.SetWriteDeadline(time.Now().Add(10 * time.Second)) c2.SetReadDeadline(time.Now().Add(10 * time.Second)) // Not blocking here go blockingWrite(c1) go blockingRead(c2) // Read response from both routines for i := 0; i < 2; i++ { select { case <-time.After(100 * time.Millisecond): t.Fatalf("Read/Write timed out, c1 = %v, c2 = %v", c1, c2) case err := <-sig: if err != nil { t.Fatalf("Read/Write failed to complete, c1 = %v, c2 = %v, err = %v", c1, c2, err) } } } } grpc-go-1.29.1/test/channelz_linux_go110_test.go000066400000000000000000000060121365033716300214450ustar00rootroot00000000000000// +build go1.10,linux,!appengine /* * * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // The test in this file should be run in an environment that has go1.10 or later, // as the function SyscallConn() (required to get socket option) was // introduced to net.TCPListener in go1.10. package test import ( "testing" "time" "google.golang.org/grpc/internal/channelz" testpb "google.golang.org/grpc/test/grpc_testing" ) func (s) TestCZSocketMetricsSocketOption(t *testing.T) { envs := []env{tcpClearRREnv, tcpTLSRREnv} for _, e := range envs { testCZSocketMetricsSocketOption(t, e) } } func testCZSocketMetricsSocketOption(t *testing.T, e env) { czCleanup := channelz.NewChannelzStorage() defer czCleanupWrapper(czCleanup, t) te := newTest(t, e) te.startServer(&testServer{security: e.security}) defer te.tearDown() cc := te.clientConn() tc := testpb.NewTestServiceClient(cc) doSuccessfulUnaryCall(tc, t) time.Sleep(10 * time.Millisecond) ss, _ := channelz.GetServers(0, 0) if len(ss) != 1 { t.Fatalf("There should be one server, not %d", len(ss)) } if len(ss[0].ListenSockets) != 1 { t.Fatalf("There should be one listen socket, not %d", len(ss[0].ListenSockets)) } for id := range ss[0].ListenSockets { sm := channelz.GetSocket(id) if sm == nil || sm.SocketData == nil || sm.SocketData.SocketOptions == nil { t.Fatalf("Unable to get server listen socket options") } } ns, _ := channelz.GetServerSockets(ss[0].ID, 0, 0) if len(ns) != 1 { t.Fatalf("There should be one server normal socket, not %d", len(ns)) } if ns[0] == nil || ns[0].SocketData == nil || ns[0].SocketData.SocketOptions == nil { t.Fatalf("Unable to get server normal socket options") } tchan, _ := channelz.GetTopChannels(0, 0) if len(tchan) != 1 { t.Fatalf("There should only be one top channel, not %d", len(tchan)) } if len(tchan[0].SubChans) != 1 { t.Fatalf("There should only be one subchannel under top channel %d, not %d", tchan[0].ID, len(tchan[0].SubChans)) } var id int64 for id = range tchan[0].SubChans { break } sc := channelz.GetSubChannel(id) if sc == nil { t.Fatalf("There should only be one socket under subchannel %d, not 0", id) } if len(sc.Sockets) != 1 { t.Fatalf("There should only be one socket under subchannel %d, not %d", sc.ID, len(sc.Sockets)) } for id = range sc.Sockets { break } skt := channelz.GetSocket(id) if skt == nil || skt.SocketData == nil || skt.SocketData.SocketOptions == nil { t.Fatalf("Unable to get client normal socket options") } } grpc-go-1.29.1/test/channelz_test.go000066400000000000000000002214051365033716300173240ustar00rootroot00000000000000/* * * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package test import ( "context" "crypto/tls" "fmt" "net" "reflect" "strings" "sync" "testing" "time" "golang.org/x/net/http2" "google.golang.org/grpc" _ "google.golang.org/grpc/balancer/grpclb" "google.golang.org/grpc/balancer/roundrobin" "google.golang.org/grpc/codes" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/resolver" "google.golang.org/grpc/resolver/manual" "google.golang.org/grpc/status" testpb "google.golang.org/grpc/test/grpc_testing" "google.golang.org/grpc/testdata" ) func czCleanupWrapper(cleanup func() error, t *testing.T) { if err := cleanup(); err != nil { t.Error(err) } } func verifyResultWithDelay(f func() (bool, error)) error { var ok bool var err error for i := 0; i < 1000; i++ { if ok, err = f(); ok { return nil } time.Sleep(10 * time.Millisecond) } return err } func (s) TestCZServerRegistrationAndDeletion(t *testing.T) { testcases := []struct { total int start int64 max int64 length int64 end bool }{ {total: int(channelz.EntryPerPage), start: 0, max: 0, length: channelz.EntryPerPage, end: true}, {total: int(channelz.EntryPerPage) - 1, start: 0, max: 0, length: channelz.EntryPerPage - 1, end: true}, {total: int(channelz.EntryPerPage) + 1, start: 0, max: 0, length: channelz.EntryPerPage, end: false}, {total: int(channelz.EntryPerPage) + 1, start: int64(2*(channelz.EntryPerPage+1) + 1), max: 0, length: 0, end: true}, {total: int(channelz.EntryPerPage), start: 0, max: 1, length: 1, end: false}, {total: int(channelz.EntryPerPage), start: 0, max: channelz.EntryPerPage - 1, length: channelz.EntryPerPage - 1, end: false}, } for _, c := range testcases { czCleanup := channelz.NewChannelzStorage() defer czCleanupWrapper(czCleanup, t) e := tcpClearRREnv te := newTest(t, e) te.startServers(&testServer{security: e.security}, c.total) ss, end := channelz.GetServers(c.start, c.max) if int64(len(ss)) != c.length || end != c.end { t.Fatalf("GetServers(%d) = %+v (len of which: %d), end: %+v, want len(GetServers(%d)) = %d, end: %+v", c.start, ss, len(ss), end, c.start, c.length, c.end) } te.tearDown() ss, end = channelz.GetServers(c.start, c.max) if len(ss) != 0 || !end { t.Fatalf("GetServers(0) = %+v (len of which: %d), end: %+v, want len(GetServers(0)) = 0, end: true", ss, len(ss), end) } } } func (s) TestCZGetServer(t *testing.T) { czCleanup := channelz.NewChannelzStorage() defer czCleanupWrapper(czCleanup, t) e := tcpClearRREnv te := newTest(t, e) te.startServer(&testServer{security: e.security}) defer te.tearDown() ss, _ := channelz.GetServers(0, 0) if len(ss) != 1 { t.Fatalf("there should only be one server, not %d", len(ss)) } serverID := ss[0].ID srv := channelz.GetServer(serverID) if srv == nil { t.Fatalf("server %d does not exist", serverID) } if srv.ID != serverID { t.Fatalf("server want id %d, but got %d", serverID, srv.ID) } te.tearDown() if err := verifyResultWithDelay(func() (bool, error) { srv := channelz.GetServer(serverID) if srv != nil { return false, fmt.Errorf("server %d should not exist", serverID) } return true, nil }); err != nil { t.Fatal(err) } } func (s) TestCZTopChannelRegistrationAndDeletion(t *testing.T) { testcases := []struct { total int start int64 max int64 length int64 end bool }{ {total: int(channelz.EntryPerPage), start: 0, max: 0, length: channelz.EntryPerPage, end: true}, {total: int(channelz.EntryPerPage) - 1, start: 0, max: 0, length: channelz.EntryPerPage - 1, end: true}, {total: int(channelz.EntryPerPage) + 1, start: 0, max: 0, length: channelz.EntryPerPage, end: false}, {total: int(channelz.EntryPerPage) + 1, start: int64(2*(channelz.EntryPerPage+1) + 1), max: 0, length: 0, end: true}, {total: int(channelz.EntryPerPage), start: 0, max: 1, length: 1, end: false}, {total: int(channelz.EntryPerPage), start: 0, max: channelz.EntryPerPage - 1, length: channelz.EntryPerPage - 1, end: false}, } for _, c := range testcases { czCleanup := channelz.NewChannelzStorage() defer czCleanupWrapper(czCleanup, t) e := tcpClearRREnv te := newTest(t, e) var ccs []*grpc.ClientConn for i := 0; i < c.total; i++ { cc := te.clientConn() te.cc = nil // avoid making next dial blocking te.srvAddr = "" ccs = append(ccs, cc) } if err := verifyResultWithDelay(func() (bool, error) { if tcs, end := channelz.GetTopChannels(c.start, c.max); int64(len(tcs)) != c.length || end != c.end { return false, fmt.Errorf("getTopChannels(%d) = %+v (len of which: %d), end: %+v, want len(GetTopChannels(%d)) = %d, end: %+v", c.start, tcs, len(tcs), end, c.start, c.length, c.end) } return true, nil }); err != nil { t.Fatal(err) } for _, cc := range ccs { cc.Close() } if err := verifyResultWithDelay(func() (bool, error) { if tcs, end := channelz.GetTopChannels(c.start, c.max); len(tcs) != 0 || !end { return false, fmt.Errorf("getTopChannels(0) = %+v (len of which: %d), end: %+v, want len(GetTopChannels(0)) = 0, end: true", tcs, len(tcs), end) } return true, nil }); err != nil { t.Fatal(err) } te.tearDown() } } func (s) TestCZTopChannelRegistrationAndDeletionWhenDialFail(t *testing.T) { czCleanup := channelz.NewChannelzStorage() defer czCleanupWrapper(czCleanup, t) // Make dial fails (due to no transport security specified) _, err := grpc.Dial("fake.addr") if err == nil { t.Fatal("expecting dial to fail") } if tcs, end := channelz.GetTopChannels(0, 0); tcs != nil || !end { t.Fatalf("GetTopChannels(0, 0) = %v, %v, want , true", tcs, end) } } func (s) TestCZNestedChannelRegistrationAndDeletion(t *testing.T) { czCleanup := channelz.NewChannelzStorage() defer czCleanupWrapper(czCleanup, t) e := tcpClearRREnv // avoid calling API to set balancer type, which will void service config's change of balancer. e.balancer = "" te := newTest(t, e) r, cleanup := manual.GenerateAndRegisterManualResolver() defer cleanup() resolvedAddrs := []resolver.Address{{Addr: "127.0.0.1:0", Type: resolver.GRPCLB, ServerName: "grpclb.server"}} r.InitialState(resolver.State{Addresses: resolvedAddrs}) te.resolverScheme = r.Scheme() te.clientConn() defer te.tearDown() if err := verifyResultWithDelay(func() (bool, error) { tcs, _ := channelz.GetTopChannels(0, 0) if len(tcs) != 1 { return false, fmt.Errorf("there should only be one top channel, not %d", len(tcs)) } if len(tcs[0].NestedChans) != 1 { return false, fmt.Errorf("there should be one nested channel from grpclb, not %d", len(tcs[0].NestedChans)) } return true, nil }); err != nil { t.Fatal(err) } r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: "127.0.0.1:0"}}, ServiceConfig: parseCfg(r, `{"loadBalancingPolicy": "round_robin"}`)}) // wait for the shutdown of grpclb balancer if err := verifyResultWithDelay(func() (bool, error) { tcs, _ := channelz.GetTopChannels(0, 0) if len(tcs) != 1 { return false, fmt.Errorf("there should only be one top channel, not %d", len(tcs)) } if len(tcs[0].NestedChans) != 0 { return false, fmt.Errorf("there should be 0 nested channel from grpclb, not %d", len(tcs[0].NestedChans)) } return true, nil }); err != nil { t.Fatal(err) } } func (s) TestCZClientSubChannelSocketRegistrationAndDeletion(t *testing.T) { czCleanup := channelz.NewChannelzStorage() defer czCleanupWrapper(czCleanup, t) e := tcpClearRREnv num := 3 // number of backends te := newTest(t, e) var svrAddrs []resolver.Address te.startServers(&testServer{security: e.security}, num) r, cleanup := manual.GenerateAndRegisterManualResolver() defer cleanup() for _, a := range te.srvAddrs { svrAddrs = append(svrAddrs, resolver.Address{Addr: a}) } r.InitialState(resolver.State{Addresses: svrAddrs}) te.resolverScheme = r.Scheme() te.clientConn() defer te.tearDown() // Here, we just wait for all sockets to be up. In the future, if we implement // IDLE, we may need to make several rpc calls to create the sockets. if err := verifyResultWithDelay(func() (bool, error) { tcs, _ := channelz.GetTopChannels(0, 0) if len(tcs) != 1 { return false, fmt.Errorf("there should only be one top channel, not %d", len(tcs)) } if len(tcs[0].SubChans) != num { return false, fmt.Errorf("there should be %d subchannel not %d", num, len(tcs[0].SubChans)) } count := 0 for k := range tcs[0].SubChans { sc := channelz.GetSubChannel(k) if sc == nil { return false, fmt.Errorf("got subchannel") } count += len(sc.Sockets) } if count != num { return false, fmt.Errorf("there should be %d sockets not %d", num, count) } return true, nil }); err != nil { t.Fatal(err) } r.UpdateState(resolver.State{Addresses: svrAddrs[:len(svrAddrs)-1]}) if err := verifyResultWithDelay(func() (bool, error) { tcs, _ := channelz.GetTopChannels(0, 0) if len(tcs) != 1 { return false, fmt.Errorf("there should only be one top channel, not %d", len(tcs)) } if len(tcs[0].SubChans) != num-1 { return false, fmt.Errorf("there should be %d subchannel not %d", num-1, len(tcs[0].SubChans)) } count := 0 for k := range tcs[0].SubChans { sc := channelz.GetSubChannel(k) if sc == nil { return false, fmt.Errorf("got subchannel") } count += len(sc.Sockets) } if count != num-1 { return false, fmt.Errorf("there should be %d sockets not %d", num-1, count) } return true, nil }); err != nil { t.Fatal(err) } } func (s) TestCZServerSocketRegistrationAndDeletion(t *testing.T) { testcases := []struct { total int start int64 max int64 length int64 end bool }{ {total: int(channelz.EntryPerPage), start: 0, max: 0, length: channelz.EntryPerPage, end: true}, {total: int(channelz.EntryPerPage) - 1, start: 0, max: 0, length: channelz.EntryPerPage - 1, end: true}, {total: int(channelz.EntryPerPage) + 1, start: 0, max: 0, length: channelz.EntryPerPage, end: false}, {total: int(channelz.EntryPerPage), start: 1, max: 0, length: channelz.EntryPerPage - 1, end: true}, {total: int(channelz.EntryPerPage) + 1, start: channelz.EntryPerPage + 1, max: 0, length: 0, end: true}, {total: int(channelz.EntryPerPage), start: 0, max: 1, length: 1, end: false}, {total: int(channelz.EntryPerPage), start: 0, max: channelz.EntryPerPage - 1, length: channelz.EntryPerPage - 1, end: false}, } for _, c := range testcases { czCleanup := channelz.NewChannelzStorage() defer czCleanupWrapper(czCleanup, t) e := tcpClearRREnv te := newTest(t, e) te.startServer(&testServer{security: e.security}) var ccs []*grpc.ClientConn for i := 0; i < c.total; i++ { cc := te.clientConn() te.cc = nil ccs = append(ccs, cc) } var svrID int64 if err := verifyResultWithDelay(func() (bool, error) { ss, _ := channelz.GetServers(0, 0) if len(ss) != 1 { return false, fmt.Errorf("there should only be one server, not %d", len(ss)) } if len(ss[0].ListenSockets) != 1 { return false, fmt.Errorf("there should only be one server listen socket, not %d", len(ss[0].ListenSockets)) } startID := c.start if startID != 0 { ns, _ := channelz.GetServerSockets(ss[0].ID, 0, int64(c.total)) if int64(len(ns)) < c.start { return false, fmt.Errorf("there should more than %d sockets, not %d", len(ns), c.start) } startID = ns[c.start-1].ID + 1 } ns, end := channelz.GetServerSockets(ss[0].ID, startID, c.max) if int64(len(ns)) != c.length || end != c.end { return false, fmt.Errorf("GetServerSockets(%d) = %+v (len of which: %d), end: %+v, want len(GetServerSockets(%d)) = %d, end: %+v", c.start, ns, len(ns), end, c.start, c.length, c.end) } svrID = ss[0].ID return true, nil }); err != nil { t.Fatal(err) } for _, cc := range ccs { cc.Close() } if err := verifyResultWithDelay(func() (bool, error) { ns, _ := channelz.GetServerSockets(svrID, c.start, c.max) if len(ns) != 0 { return false, fmt.Errorf("there should be %d normal sockets not %d", 0, len(ns)) } return true, nil }); err != nil { t.Fatal(err) } te.tearDown() } } func (s) TestCZServerListenSocketDeletion(t *testing.T) { czCleanup := channelz.NewChannelzStorage() defer czCleanupWrapper(czCleanup, t) s := grpc.NewServer() lis, err := net.Listen("tcp", "localhost:0") if err != nil { t.Fatalf("failed to listen: %v", err) } go s.Serve(lis) if err := verifyResultWithDelay(func() (bool, error) { ss, _ := channelz.GetServers(0, 0) if len(ss) != 1 { return false, fmt.Errorf("there should only be one server, not %d", len(ss)) } if len(ss[0].ListenSockets) != 1 { return false, fmt.Errorf("there should only be one server listen socket, not %d", len(ss[0].ListenSockets)) } return true, nil }); err != nil { t.Fatal(err) } lis.Close() if err := verifyResultWithDelay(func() (bool, error) { ss, _ := channelz.GetServers(0, 0) if len(ss) != 1 { return false, fmt.Errorf("there should be 1 server, not %d", len(ss)) } if len(ss[0].ListenSockets) != 0 { return false, fmt.Errorf("there should only be %d server listen socket, not %d", 0, len(ss[0].ListenSockets)) } return true, nil }); err != nil { t.Fatal(err) } s.Stop() } type dummyChannel struct{} func (d *dummyChannel) ChannelzMetric() *channelz.ChannelInternalMetric { return &channelz.ChannelInternalMetric{} } type dummySocket struct{} func (d *dummySocket) ChannelzMetric() *channelz.SocketInternalMetric { return &channelz.SocketInternalMetric{} } func (s) TestCZRecusivelyDeletionOfEntry(t *testing.T) { // +--+TopChan+---+ // | | // v v // +-+SubChan1+--+ SubChan2 // | | // v v // Socket1 Socket2 czCleanup := channelz.NewChannelzStorage() defer czCleanupWrapper(czCleanup, t) topChanID := channelz.RegisterChannel(&dummyChannel{}, 0, "") subChanID1 := channelz.RegisterSubChannel(&dummyChannel{}, topChanID, "") subChanID2 := channelz.RegisterSubChannel(&dummyChannel{}, topChanID, "") sktID1 := channelz.RegisterNormalSocket(&dummySocket{}, subChanID1, "") sktID2 := channelz.RegisterNormalSocket(&dummySocket{}, subChanID1, "") tcs, _ := channelz.GetTopChannels(0, 0) if tcs == nil || len(tcs) != 1 { t.Fatalf("There should be one TopChannel entry") } if len(tcs[0].SubChans) != 2 { t.Fatalf("There should be two SubChannel entries") } sc := channelz.GetSubChannel(subChanID1) if sc == nil || len(sc.Sockets) != 2 { t.Fatalf("There should be two Socket entries") } channelz.RemoveEntry(topChanID) tcs, _ = channelz.GetTopChannels(0, 0) if tcs == nil || len(tcs) != 1 { t.Fatalf("There should be one TopChannel entry") } channelz.RemoveEntry(subChanID1) channelz.RemoveEntry(subChanID2) tcs, _ = channelz.GetTopChannels(0, 0) if tcs == nil || len(tcs) != 1 { t.Fatalf("There should be one TopChannel entry") } if len(tcs[0].SubChans) != 1 { t.Fatalf("There should be one SubChannel entry") } channelz.RemoveEntry(sktID1) channelz.RemoveEntry(sktID2) tcs, _ = channelz.GetTopChannels(0, 0) if tcs != nil { t.Fatalf("There should be no TopChannel entry") } } func (s) TestCZChannelMetrics(t *testing.T) { czCleanup := channelz.NewChannelzStorage() defer czCleanupWrapper(czCleanup, t) e := tcpClearRREnv num := 3 // number of backends te := newTest(t, e) te.maxClientSendMsgSize = newInt(8) var svrAddrs []resolver.Address te.startServers(&testServer{security: e.security}, num) r, cleanup := manual.GenerateAndRegisterManualResolver() defer cleanup() for _, a := range te.srvAddrs { svrAddrs = append(svrAddrs, resolver.Address{Addr: a}) } r.InitialState(resolver.State{Addresses: svrAddrs}) te.resolverScheme = r.Scheme() cc := te.clientConn() defer te.tearDown() tc := testpb.NewTestServiceClient(cc) if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}); err != nil { t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, ", err) } const smallSize = 1 const largeSize = 8 largePayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, largeSize) if err != nil { t.Fatal(err) } req := &testpb.SimpleRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE, ResponseSize: int32(smallSize), Payload: largePayload, } if _, err := tc.UnaryCall(context.Background(), req); err == nil || status.Code(err) != codes.ResourceExhausted { t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted) } stream, err := tc.FullDuplexCall(context.Background()) if err != nil { t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) } defer stream.CloseSend() // Here, we just wait for all sockets to be up. In the future, if we implement // IDLE, we may need to make several rpc calls to create the sockets. if err := verifyResultWithDelay(func() (bool, error) { tcs, _ := channelz.GetTopChannels(0, 0) if len(tcs) != 1 { return false, fmt.Errorf("there should only be one top channel, not %d", len(tcs)) } if len(tcs[0].SubChans) != num { return false, fmt.Errorf("there should be %d subchannel not %d", num, len(tcs[0].SubChans)) } var cst, csu, cf int64 for k := range tcs[0].SubChans { sc := channelz.GetSubChannel(k) if sc == nil { return false, fmt.Errorf("got subchannel") } cst += sc.ChannelData.CallsStarted csu += sc.ChannelData.CallsSucceeded cf += sc.ChannelData.CallsFailed } if cst != 3 { return false, fmt.Errorf("there should be 3 CallsStarted not %d", cst) } if csu != 1 { return false, fmt.Errorf("there should be 1 CallsSucceeded not %d", csu) } if cf != 1 { return false, fmt.Errorf("there should be 1 CallsFailed not %d", cf) } if tcs[0].ChannelData.CallsStarted != 3 { return false, fmt.Errorf("there should be 3 CallsStarted not %d", tcs[0].ChannelData.CallsStarted) } if tcs[0].ChannelData.CallsSucceeded != 1 { return false, fmt.Errorf("there should be 1 CallsSucceeded not %d", tcs[0].ChannelData.CallsSucceeded) } if tcs[0].ChannelData.CallsFailed != 1 { return false, fmt.Errorf("there should be 1 CallsFailed not %d", tcs[0].ChannelData.CallsFailed) } return true, nil }); err != nil { t.Fatal(err) } } func (s) TestCZServerMetrics(t *testing.T) { czCleanup := channelz.NewChannelzStorage() defer czCleanupWrapper(czCleanup, t) e := tcpClearRREnv te := newTest(t, e) te.maxServerReceiveMsgSize = newInt(8) te.startServer(&testServer{security: e.security}) defer te.tearDown() cc := te.clientConn() tc := testpb.NewTestServiceClient(cc) if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}); err != nil { t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, ", err) } const smallSize = 1 const largeSize = 8 largePayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, largeSize) if err != nil { t.Fatal(err) } req := &testpb.SimpleRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE, ResponseSize: int32(smallSize), Payload: largePayload, } if _, err := tc.UnaryCall(context.Background(), req); err == nil || status.Code(err) != codes.ResourceExhausted { t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted) } stream, err := tc.FullDuplexCall(context.Background()) if err != nil { t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) } defer stream.CloseSend() if err := verifyResultWithDelay(func() (bool, error) { ss, _ := channelz.GetServers(0, 0) if len(ss) != 1 { return false, fmt.Errorf("there should only be one server, not %d", len(ss)) } if ss[0].ServerData.CallsStarted != 3 { return false, fmt.Errorf("there should be 3 CallsStarted not %d", ss[0].ServerData.CallsStarted) } if ss[0].ServerData.CallsSucceeded != 1 { return false, fmt.Errorf("there should be 1 CallsSucceeded not %d", ss[0].ServerData.CallsSucceeded) } if ss[0].ServerData.CallsFailed != 1 { return false, fmt.Errorf("there should be 1 CallsFailed not %d", ss[0].ServerData.CallsFailed) } return true, nil }); err != nil { t.Fatal(err) } } type testServiceClientWrapper struct { testpb.TestServiceClient mu sync.RWMutex streamsCreated int } func (t *testServiceClientWrapper) getCurrentStreamID() uint32 { t.mu.RLock() defer t.mu.RUnlock() return uint32(2*t.streamsCreated - 1) } func (t *testServiceClientWrapper) EmptyCall(ctx context.Context, in *testpb.Empty, opts ...grpc.CallOption) (*testpb.Empty, error) { t.mu.Lock() defer t.mu.Unlock() t.streamsCreated++ return t.TestServiceClient.EmptyCall(ctx, in, opts...) } func (t *testServiceClientWrapper) UnaryCall(ctx context.Context, in *testpb.SimpleRequest, opts ...grpc.CallOption) (*testpb.SimpleResponse, error) { t.mu.Lock() defer t.mu.Unlock() t.streamsCreated++ return t.TestServiceClient.UnaryCall(ctx, in, opts...) } func (t *testServiceClientWrapper) StreamingOutputCall(ctx context.Context, in *testpb.StreamingOutputCallRequest, opts ...grpc.CallOption) (testpb.TestService_StreamingOutputCallClient, error) { t.mu.Lock() defer t.mu.Unlock() t.streamsCreated++ return t.TestServiceClient.StreamingOutputCall(ctx, in, opts...) } func (t *testServiceClientWrapper) StreamingInputCall(ctx context.Context, opts ...grpc.CallOption) (testpb.TestService_StreamingInputCallClient, error) { t.mu.Lock() defer t.mu.Unlock() t.streamsCreated++ return t.TestServiceClient.StreamingInputCall(ctx, opts...) } func (t *testServiceClientWrapper) FullDuplexCall(ctx context.Context, opts ...grpc.CallOption) (testpb.TestService_FullDuplexCallClient, error) { t.mu.Lock() defer t.mu.Unlock() t.streamsCreated++ return t.TestServiceClient.FullDuplexCall(ctx, opts...) } func (t *testServiceClientWrapper) HalfDuplexCall(ctx context.Context, opts ...grpc.CallOption) (testpb.TestService_HalfDuplexCallClient, error) { t.mu.Lock() defer t.mu.Unlock() t.streamsCreated++ return t.TestServiceClient.HalfDuplexCall(ctx, opts...) } func doSuccessfulUnaryCall(tc testpb.TestServiceClient, t *testing.T) { if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}); err != nil { t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, ", err) } } func doStreamingInputCallWithLargePayload(tc testpb.TestServiceClient, t *testing.T) { s, err := tc.StreamingInputCall(context.Background()) if err != nil { t.Fatalf("TestService/StreamingInputCall(_) = _, %v, want ", err) } payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, 10000) if err != nil { t.Fatal(err) } s.Send(&testpb.StreamingInputCallRequest{Payload: payload}) } func doServerSideFailedUnaryCall(tc testpb.TestServiceClient, t *testing.T) { const smallSize = 1 const largeSize = 2000 largePayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, largeSize) if err != nil { t.Fatal(err) } req := &testpb.SimpleRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE, ResponseSize: int32(smallSize), Payload: largePayload, } if _, err := tc.UnaryCall(context.Background(), req); err == nil || status.Code(err) != codes.ResourceExhausted { t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted) } } func doClientSideInitiatedFailedStream(tc testpb.TestServiceClient, t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) stream, err := tc.FullDuplexCall(ctx) if err != nil { t.Fatalf("TestService/FullDuplexCall(_) = _, %v, want ", err) } const smallSize = 1 smallPayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, smallSize) if err != nil { t.Fatal(err) } sreq := &testpb.StreamingOutputCallRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE, ResponseParameters: []*testpb.ResponseParameters{ {Size: smallSize}, }, Payload: smallPayload, } if err := stream.Send(sreq); err != nil { t.Fatalf("%v.Send(%v) = %v, want ", stream, sreq, err) } if _, err := stream.Recv(); err != nil { t.Fatalf("%v.Recv() = %v, want ", stream, err) } // By canceling the call, the client will send rst_stream to end the call, and // the stream will failed as a result. cancel() } // This func is to be used to test client side counting of failed streams. func doServerSideInitiatedFailedStreamWithRSTStream(tc testpb.TestServiceClient, t *testing.T, l *listenerWrapper) { stream, err := tc.FullDuplexCall(context.Background()) if err != nil { t.Fatalf("TestService/FullDuplexCall(_) = _, %v, want ", err) } const smallSize = 1 smallPayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, smallSize) if err != nil { t.Fatal(err) } sreq := &testpb.StreamingOutputCallRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE, ResponseParameters: []*testpb.ResponseParameters{ {Size: smallSize}, }, Payload: smallPayload, } if err := stream.Send(sreq); err != nil { t.Fatalf("%v.Send(%v) = %v, want ", stream, sreq, err) } if _, err := stream.Recv(); err != nil { t.Fatalf("%v.Recv() = %v, want ", stream, err) } rcw := l.getLastConn() if rcw != nil { rcw.writeRSTStream(tc.(*testServiceClientWrapper).getCurrentStreamID(), http2.ErrCodeCancel) } if _, err := stream.Recv(); err == nil { t.Fatalf("%v.Recv() = %v, want ", stream, err) } } // this func is to be used to test client side counting of failed streams. func doServerSideInitiatedFailedStreamWithGoAway(tc testpb.TestServiceClient, t *testing.T, l *listenerWrapper) { // This call is just to keep the transport from shutting down (socket will be deleted // in this case, and we will not be able to get metrics). s, err := tc.FullDuplexCall(context.Background()) if err != nil { t.Fatalf("TestService/FullDuplexCall(_) = _, %v, want ", err) } if err := s.Send(&testpb.StreamingOutputCallRequest{ResponseParameters: []*testpb.ResponseParameters{ { Size: 1, }, }}); err != nil { t.Fatalf("s.Send() failed with error: %v", err) } if _, err := s.Recv(); err != nil { t.Fatalf("s.Recv() failed with error: %v", err) } s, err = tc.FullDuplexCall(context.Background()) if err != nil { t.Fatalf("TestService/FullDuplexCall(_) = _, %v, want ", err) } if err := s.Send(&testpb.StreamingOutputCallRequest{ResponseParameters: []*testpb.ResponseParameters{ { Size: 1, }, }}); err != nil { t.Fatalf("s.Send() failed with error: %v", err) } if _, err := s.Recv(); err != nil { t.Fatalf("s.Recv() failed with error: %v", err) } rcw := l.getLastConn() if rcw != nil { rcw.writeGoAway(tc.(*testServiceClientWrapper).getCurrentStreamID()-2, http2.ErrCodeCancel, []byte{}) } if _, err := s.Recv(); err == nil { t.Fatalf("%v.Recv() = %v, want ", s, err) } } func doIdleCallToInvokeKeepAlive(tc testpb.TestServiceClient, t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) _, err := tc.FullDuplexCall(ctx) if err != nil { t.Fatalf("TestService/FullDuplexCall(_) = _, %v, want ", err) } // Allow for at least 2 keepalives (1s per ping interval) time.Sleep(4 * time.Second) cancel() } func (s) TestCZClientSocketMetricsStreamsAndMessagesCount(t *testing.T) { czCleanup := channelz.NewChannelzStorage() defer czCleanupWrapper(czCleanup, t) e := tcpClearRREnv te := newTest(t, e) te.maxServerReceiveMsgSize = newInt(20) te.maxClientReceiveMsgSize = newInt(20) rcw := te.startServerWithConnControl(&testServer{security: e.security}) defer te.tearDown() cc := te.clientConn() tc := &testServiceClientWrapper{TestServiceClient: testpb.NewTestServiceClient(cc)} doSuccessfulUnaryCall(tc, t) var scID, skID int64 if err := verifyResultWithDelay(func() (bool, error) { tchan, _ := channelz.GetTopChannels(0, 0) if len(tchan) != 1 { return false, fmt.Errorf("there should only be one top channel, not %d", len(tchan)) } if len(tchan[0].SubChans) != 1 { return false, fmt.Errorf("there should only be one subchannel under top channel %d, not %d", tchan[0].ID, len(tchan[0].SubChans)) } for scID = range tchan[0].SubChans { break } sc := channelz.GetSubChannel(scID) if sc == nil { return false, fmt.Errorf("there should only be one socket under subchannel %d, not 0", scID) } if len(sc.Sockets) != 1 { return false, fmt.Errorf("there should only be one socket under subchannel %d, not %d", sc.ID, len(sc.Sockets)) } for skID = range sc.Sockets { break } skt := channelz.GetSocket(skID) sktData := skt.SocketData if sktData.StreamsStarted != 1 || sktData.StreamsSucceeded != 1 || sktData.MessagesSent != 1 || sktData.MessagesReceived != 1 { return false, fmt.Errorf("channelz.GetSocket(%d), want (StreamsStarted, StreamsSucceeded, MessagesSent, MessagesReceived) = (1, 1, 1, 1), got (%d, %d, %d, %d)", skt.ID, sktData.StreamsStarted, sktData.StreamsSucceeded, sktData.MessagesSent, sktData.MessagesReceived) } return true, nil }); err != nil { t.Fatal(err) } doServerSideFailedUnaryCall(tc, t) if err := verifyResultWithDelay(func() (bool, error) { skt := channelz.GetSocket(skID) sktData := skt.SocketData if sktData.StreamsStarted != 2 || sktData.StreamsSucceeded != 2 || sktData.MessagesSent != 2 || sktData.MessagesReceived != 1 { return false, fmt.Errorf("channelz.GetSocket(%d), want (StreamsStarted, StreamsSucceeded, MessagesSent, MessagesReceived) = (2, 2, 2, 1), got (%d, %d, %d, %d)", skt.ID, sktData.StreamsStarted, sktData.StreamsSucceeded, sktData.MessagesSent, sktData.MessagesReceived) } return true, nil }); err != nil { t.Fatal(err) } doClientSideInitiatedFailedStream(tc, t) if err := verifyResultWithDelay(func() (bool, error) { skt := channelz.GetSocket(skID) sktData := skt.SocketData if sktData.StreamsStarted != 3 || sktData.StreamsSucceeded != 2 || sktData.StreamsFailed != 1 || sktData.MessagesSent != 3 || sktData.MessagesReceived != 2 { return false, fmt.Errorf("channelz.GetSocket(%d), want (StreamsStarted, StreamsSucceeded, StreamsFailed, MessagesSent, MessagesReceived) = (3, 2, 1, 3, 2), got (%d, %d, %d, %d, %d)", skt.ID, sktData.StreamsStarted, sktData.StreamsSucceeded, sktData.StreamsFailed, sktData.MessagesSent, sktData.MessagesReceived) } return true, nil }); err != nil { t.Fatal(err) } doServerSideInitiatedFailedStreamWithRSTStream(tc, t, rcw) if err := verifyResultWithDelay(func() (bool, error) { skt := channelz.GetSocket(skID) sktData := skt.SocketData if sktData.StreamsStarted != 4 || sktData.StreamsSucceeded != 2 || sktData.StreamsFailed != 2 || sktData.MessagesSent != 4 || sktData.MessagesReceived != 3 { return false, fmt.Errorf("channelz.GetSocket(%d), want (StreamsStarted, StreamsSucceeded, StreamsFailed, MessagesSent, MessagesReceived) = (4, 2, 2, 4, 3), got (%d, %d, %d, %d, %d)", skt.ID, sktData.StreamsStarted, sktData.StreamsSucceeded, sktData.StreamsFailed, sktData.MessagesSent, sktData.MessagesReceived) } return true, nil }); err != nil { t.Fatal(err) } doServerSideInitiatedFailedStreamWithGoAway(tc, t, rcw) if err := verifyResultWithDelay(func() (bool, error) { skt := channelz.GetSocket(skID) sktData := skt.SocketData if sktData.StreamsStarted != 6 || sktData.StreamsSucceeded != 2 || sktData.StreamsFailed != 3 || sktData.MessagesSent != 6 || sktData.MessagesReceived != 5 { return false, fmt.Errorf("channelz.GetSocket(%d), want (StreamsStarted, StreamsSucceeded, StreamsFailed, MessagesSent, MessagesReceived) = (6, 2, 3, 6, 5), got (%d, %d, %d, %d, %d)", skt.ID, sktData.StreamsStarted, sktData.StreamsSucceeded, sktData.StreamsFailed, sktData.MessagesSent, sktData.MessagesReceived) } return true, nil }); err != nil { t.Fatal(err) } } // This test is to complete TestCZClientSocketMetricsStreamsAndMessagesCount and // TestCZServerSocketMetricsStreamsAndMessagesCount by adding the test case of // server sending RST_STREAM to client due to client side flow control violation. // It is separated from other cases due to setup incompatibly, i.e. max receive // size violation will mask flow control violation. func (s) TestCZClientAndServerSocketMetricsStreamsCountFlowControlRSTStream(t *testing.T) { czCleanup := channelz.NewChannelzStorage() defer czCleanupWrapper(czCleanup, t) e := tcpClearRREnv te := newTest(t, e) te.serverInitialWindowSize = 65536 // Avoid overflowing connection level flow control window, which will lead to // transport being closed. te.serverInitialConnWindowSize = 65536 * 2 ts := &funcServer{fullDuplexCall: func(stream testpb.TestService_FullDuplexCallServer) error { stream.Send(&testpb.StreamingOutputCallResponse{}) <-stream.Context().Done() return status.Errorf(codes.DeadlineExceeded, "deadline exceeded or cancelled") }} te.startServer(ts) defer te.tearDown() cc, dw := te.clientConnWithConnControl() tc := &testServiceClientWrapper{TestServiceClient: testpb.NewTestServiceClient(cc)} ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) stream, err := tc.FullDuplexCall(ctx) if err != nil { t.Fatalf("TestService/FullDuplexCall(_) = _, %v, want ", err) } if _, err := stream.Recv(); err != nil { t.Fatalf("stream.Recv() = %v, want nil", err) } go func() { payload := make([]byte, 16384) for i := 0; i < 6; i++ { dw.getRawConnWrapper().writeRawFrame(http2.FrameData, 0, tc.getCurrentStreamID(), payload) } }() if _, err := stream.Recv(); status.Code(err) != codes.ResourceExhausted { t.Fatalf("stream.Recv() = %v, want error code: %v", err, codes.ResourceExhausted) } cancel() if err := verifyResultWithDelay(func() (bool, error) { tchan, _ := channelz.GetTopChannels(0, 0) if len(tchan) != 1 { return false, fmt.Errorf("there should only be one top channel, not %d", len(tchan)) } if len(tchan[0].SubChans) != 1 { return false, fmt.Errorf("there should only be one subchannel under top channel %d, not %d", tchan[0].ID, len(tchan[0].SubChans)) } var id int64 for id = range tchan[0].SubChans { break } sc := channelz.GetSubChannel(id) if sc == nil { return false, fmt.Errorf("there should only be one socket under subchannel %d, not 0", id) } if len(sc.Sockets) != 1 { return false, fmt.Errorf("there should only be one socket under subchannel %d, not %d", sc.ID, len(sc.Sockets)) } for id = range sc.Sockets { break } skt := channelz.GetSocket(id) sktData := skt.SocketData if sktData.StreamsStarted != 1 || sktData.StreamsSucceeded != 0 || sktData.StreamsFailed != 1 { return false, fmt.Errorf("channelz.GetSocket(%d), want (StreamsStarted, StreamsSucceeded, StreamsFailed) = (1, 0, 1), got (%d, %d, %d)", skt.ID, sktData.StreamsStarted, sktData.StreamsSucceeded, sktData.StreamsFailed) } ss, _ := channelz.GetServers(0, 0) if len(ss) != 1 { return false, fmt.Errorf("there should only be one server, not %d", len(ss)) } ns, _ := channelz.GetServerSockets(ss[0].ID, 0, 0) if len(ns) != 1 { return false, fmt.Errorf("there should be one server normal socket, not %d", len(ns)) } sktData = ns[0].SocketData if sktData.StreamsStarted != 1 || sktData.StreamsSucceeded != 0 || sktData.StreamsFailed != 1 { return false, fmt.Errorf("server socket metric with ID %d, want (StreamsStarted, StreamsSucceeded, StreamsFailed) = (1, 0, 1), got (%d, %d, %d)", ns[0].ID, sktData.StreamsStarted, sktData.StreamsSucceeded, sktData.StreamsFailed) } return true, nil }); err != nil { t.Fatal(err) } } func (s) TestCZClientAndServerSocketMetricsFlowControl(t *testing.T) { czCleanup := channelz.NewChannelzStorage() defer czCleanupWrapper(czCleanup, t) e := tcpClearRREnv te := newTest(t, e) // disable BDP te.serverInitialWindowSize = 65536 te.serverInitialConnWindowSize = 65536 te.clientInitialWindowSize = 65536 te.clientInitialConnWindowSize = 65536 te.startServer(&testServer{security: e.security}) defer te.tearDown() cc := te.clientConn() tc := testpb.NewTestServiceClient(cc) for i := 0; i < 10; i++ { doSuccessfulUnaryCall(tc, t) } var cliSktID, svrSktID int64 if err := verifyResultWithDelay(func() (bool, error) { tchan, _ := channelz.GetTopChannels(0, 0) if len(tchan) != 1 { return false, fmt.Errorf("there should only be one top channel, not %d", len(tchan)) } if len(tchan[0].SubChans) != 1 { return false, fmt.Errorf("there should only be one subchannel under top channel %d, not %d", tchan[0].ID, len(tchan[0].SubChans)) } var id int64 for id = range tchan[0].SubChans { break } sc := channelz.GetSubChannel(id) if sc == nil { return false, fmt.Errorf("there should only be one socket under subchannel %d, not 0", id) } if len(sc.Sockets) != 1 { return false, fmt.Errorf("there should only be one socket under subchannel %d, not %d", sc.ID, len(sc.Sockets)) } for id = range sc.Sockets { break } skt := channelz.GetSocket(id) sktData := skt.SocketData // 65536 - 5 (Length-Prefixed-Message size) * 10 = 65486 if sktData.LocalFlowControlWindow != 65486 || sktData.RemoteFlowControlWindow != 65486 { return false, fmt.Errorf("client: (LocalFlowControlWindow, RemoteFlowControlWindow) size should be (65536, 65486), not (%d, %d)", sktData.LocalFlowControlWindow, sktData.RemoteFlowControlWindow) } ss, _ := channelz.GetServers(0, 0) if len(ss) != 1 { return false, fmt.Errorf("there should only be one server, not %d", len(ss)) } ns, _ := channelz.GetServerSockets(ss[0].ID, 0, 0) sktData = ns[0].SocketData if sktData.LocalFlowControlWindow != 65486 || sktData.RemoteFlowControlWindow != 65486 { return false, fmt.Errorf("server: (LocalFlowControlWindow, RemoteFlowControlWindow) size should be (65536, 65486), not (%d, %d)", sktData.LocalFlowControlWindow, sktData.RemoteFlowControlWindow) } cliSktID, svrSktID = id, ss[0].ID return true, nil }); err != nil { t.Fatal(err) } doStreamingInputCallWithLargePayload(tc, t) if err := verifyResultWithDelay(func() (bool, error) { skt := channelz.GetSocket(cliSktID) sktData := skt.SocketData // Local: 65536 - 5 (Length-Prefixed-Message size) * 10 = 65486 // Remote: 65536 - 5 (Length-Prefixed-Message size) * 10 - 10011 = 55475 if sktData.LocalFlowControlWindow != 65486 || sktData.RemoteFlowControlWindow != 55475 { return false, fmt.Errorf("client: (LocalFlowControlWindow, RemoteFlowControlWindow) size should be (65486, 55475), not (%d, %d)", sktData.LocalFlowControlWindow, sktData.RemoteFlowControlWindow) } ss, _ := channelz.GetServers(0, 0) if len(ss) != 1 { return false, fmt.Errorf("there should only be one server, not %d", len(ss)) } ns, _ := channelz.GetServerSockets(svrSktID, 0, 0) sktData = ns[0].SocketData if sktData.LocalFlowControlWindow != 55475 || sktData.RemoteFlowControlWindow != 65486 { return false, fmt.Errorf("server: (LocalFlowControlWindow, RemoteFlowControlWindow) size should be (55475, 65486), not (%d, %d)", sktData.LocalFlowControlWindow, sktData.RemoteFlowControlWindow) } return true, nil }); err != nil { t.Fatal(err) } // triggers transport flow control window update on server side, since unacked // bytes should be larger than limit now. i.e. 50 + 20022 > 65536/4. doStreamingInputCallWithLargePayload(tc, t) if err := verifyResultWithDelay(func() (bool, error) { skt := channelz.GetSocket(cliSktID) sktData := skt.SocketData // Local: 65536 - 5 (Length-Prefixed-Message size) * 10 = 65486 // Remote: 65536 if sktData.LocalFlowControlWindow != 65486 || sktData.RemoteFlowControlWindow != 65536 { return false, fmt.Errorf("client: (LocalFlowControlWindow, RemoteFlowControlWindow) size should be (65486, 65536), not (%d, %d)", sktData.LocalFlowControlWindow, sktData.RemoteFlowControlWindow) } ss, _ := channelz.GetServers(0, 0) if len(ss) != 1 { return false, fmt.Errorf("there should only be one server, not %d", len(ss)) } ns, _ := channelz.GetServerSockets(svrSktID, 0, 0) sktData = ns[0].SocketData if sktData.LocalFlowControlWindow != 65536 || sktData.RemoteFlowControlWindow != 65486 { return false, fmt.Errorf("server: (LocalFlowControlWindow, RemoteFlowControlWindow) size should be (65536, 65486), not (%d, %d)", sktData.LocalFlowControlWindow, sktData.RemoteFlowControlWindow) } return true, nil }); err != nil { t.Fatal(err) } } func (s) TestCZClientSocketMetricsKeepAlive(t *testing.T) { czCleanup := channelz.NewChannelzStorage() defer czCleanupWrapper(czCleanup, t) defer func(t time.Duration) { internal.KeepaliveMinPingTime = t }(internal.KeepaliveMinPingTime) internal.KeepaliveMinPingTime = time.Second e := tcpClearRREnv te := newTest(t, e) te.customDialOptions = append(te.customDialOptions, grpc.WithKeepaliveParams( keepalive.ClientParameters{ Time: time.Second, Timeout: 500 * time.Millisecond, PermitWithoutStream: true, })) te.customServerOptions = append(te.customServerOptions, grpc.KeepaliveEnforcementPolicy( keepalive.EnforcementPolicy{ MinTime: 500 * time.Millisecond, PermitWithoutStream: true, })) te.startServer(&testServer{security: e.security}) te.clientConn() // Dial the server defer te.tearDown() if err := verifyResultWithDelay(func() (bool, error) { tchan, _ := channelz.GetTopChannels(0, 0) if len(tchan) != 1 { return false, fmt.Errorf("there should only be one top channel, not %d", len(tchan)) } if len(tchan[0].SubChans) != 1 { return false, fmt.Errorf("there should only be one subchannel under top channel %d, not %d", tchan[0].ID, len(tchan[0].SubChans)) } var id int64 for id = range tchan[0].SubChans { break } sc := channelz.GetSubChannel(id) if sc == nil { return false, fmt.Errorf("there should only be one socket under subchannel %d, not 0", id) } if len(sc.Sockets) != 1 { return false, fmt.Errorf("there should only be one socket under subchannel %d, not %d", sc.ID, len(sc.Sockets)) } for id = range sc.Sockets { break } skt := channelz.GetSocket(id) if skt.SocketData.KeepAlivesSent != 2 { return false, fmt.Errorf("there should be 2 KeepAlives sent, not %d", skt.SocketData.KeepAlivesSent) } return true, nil }); err != nil { t.Fatal(err) } } func (s) TestCZServerSocketMetricsStreamsAndMessagesCount(t *testing.T) { czCleanup := channelz.NewChannelzStorage() defer czCleanupWrapper(czCleanup, t) e := tcpClearRREnv te := newTest(t, e) te.maxServerReceiveMsgSize = newInt(20) te.maxClientReceiveMsgSize = newInt(20) te.startServer(&testServer{security: e.security}) defer te.tearDown() cc, _ := te.clientConnWithConnControl() tc := &testServiceClientWrapper{TestServiceClient: testpb.NewTestServiceClient(cc)} var svrID int64 if err := verifyResultWithDelay(func() (bool, error) { ss, _ := channelz.GetServers(0, 0) if len(ss) != 1 { return false, fmt.Errorf("there should only be one server, not %d", len(ss)) } svrID = ss[0].ID return true, nil }); err != nil { t.Fatal(err) } doSuccessfulUnaryCall(tc, t) if err := verifyResultWithDelay(func() (bool, error) { ns, _ := channelz.GetServerSockets(svrID, 0, 0) sktData := ns[0].SocketData if sktData.StreamsStarted != 1 || sktData.StreamsSucceeded != 1 || sktData.StreamsFailed != 0 || sktData.MessagesSent != 1 || sktData.MessagesReceived != 1 { return false, fmt.Errorf("server socket metric with ID %d, want (StreamsStarted, StreamsSucceeded, MessagesSent, MessagesReceived) = (1, 1, 1, 1), got (%d, %d, %d, %d, %d)", ns[0].ID, sktData.StreamsStarted, sktData.StreamsSucceeded, sktData.StreamsFailed, sktData.MessagesSent, sktData.MessagesReceived) } return true, nil }); err != nil { t.Fatal(err) } doServerSideFailedUnaryCall(tc, t) if err := verifyResultWithDelay(func() (bool, error) { ns, _ := channelz.GetServerSockets(svrID, 0, 0) sktData := ns[0].SocketData if sktData.StreamsStarted != 2 || sktData.StreamsSucceeded != 2 || sktData.StreamsFailed != 0 || sktData.MessagesSent != 1 || sktData.MessagesReceived != 1 { return false, fmt.Errorf("server socket metric with ID %d, want (StreamsStarted, StreamsSucceeded, StreamsFailed, MessagesSent, MessagesReceived) = (2, 2, 0, 1, 1), got (%d, %d, %d, %d, %d)", ns[0].ID, sktData.StreamsStarted, sktData.StreamsSucceeded, sktData.StreamsFailed, sktData.MessagesSent, sktData.MessagesReceived) } return true, nil }); err != nil { t.Fatal(err) } doClientSideInitiatedFailedStream(tc, t) if err := verifyResultWithDelay(func() (bool, error) { ns, _ := channelz.GetServerSockets(svrID, 0, 0) sktData := ns[0].SocketData if sktData.StreamsStarted != 3 || sktData.StreamsSucceeded != 2 || sktData.StreamsFailed != 1 || sktData.MessagesSent != 2 || sktData.MessagesReceived != 2 { return false, fmt.Errorf("server socket metric with ID %d, want (StreamsStarted, StreamsSucceeded, StreamsFailed, MessagesSent, MessagesReceived) = (3, 2, 1, 2, 2), got (%d, %d, %d, %d, %d)", ns[0].ID, sktData.StreamsStarted, sktData.StreamsSucceeded, sktData.StreamsFailed, sktData.MessagesSent, sktData.MessagesReceived) } return true, nil }); err != nil { t.Fatal(err) } } func (s) TestCZServerSocketMetricsKeepAlive(t *testing.T) { czCleanup := channelz.NewChannelzStorage() defer czCleanupWrapper(czCleanup, t) e := tcpClearRREnv te := newTest(t, e) // We setup the server keepalive parameters to send one keepalive every // second, and verify that the actual number of keepalives is very close to // the number of seconds elapsed in the test. We had a bug wherein the // server was sending one keepalive every [Time+Timeout] instead of every // [Time] period, and since Timeout is configured to a low value here, we // should be able to verify that the fix works with the above mentioned // logic. kpOption := grpc.KeepaliveParams(keepalive.ServerParameters{ Time: time.Second, Timeout: 100 * time.Millisecond, }) te.customServerOptions = append(te.customServerOptions, kpOption) te.startServer(&testServer{security: e.security}) defer te.tearDown() cc := te.clientConn() tc := testpb.NewTestServiceClient(cc) start := time.Now() doIdleCallToInvokeKeepAlive(tc, t) if err := verifyResultWithDelay(func() (bool, error) { ss, _ := channelz.GetServers(0, 0) if len(ss) != 1 { return false, fmt.Errorf("there should be one server, not %d", len(ss)) } ns, _ := channelz.GetServerSockets(ss[0].ID, 0, 0) if len(ns) != 1 { return false, fmt.Errorf("there should be one server normal socket, not %d", len(ns)) } wantKeepalivesCount := int64(time.Since(start).Seconds()) - 1 if gotKeepalivesCount := ns[0].SocketData.KeepAlivesSent; gotKeepalivesCount != wantKeepalivesCount { return false, fmt.Errorf("got keepalivesCount: %v, want keepalivesCount: %v", gotKeepalivesCount, wantKeepalivesCount) } return true, nil }); err != nil { t.Fatal(err) } } var cipherSuites = []string{ "TLS_RSA_WITH_RC4_128_SHA", "TLS_RSA_WITH_3DES_EDE_CBC_SHA", "TLS_RSA_WITH_AES_128_CBC_SHA", "TLS_RSA_WITH_AES_256_CBC_SHA", "TLS_RSA_WITH_AES_128_GCM_SHA256", "TLS_RSA_WITH_AES_256_GCM_SHA384", "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA", "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA", "TLS_ECDHE_RSA_WITH_RC4_128_SHA", "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA", "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", "TLS_FALLBACK_SCSV", "TLS_RSA_WITH_AES_128_CBC_SHA256", "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256", "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305", "TLS_AES_128_GCM_SHA256", "TLS_AES_256_GCM_SHA384", "TLS_CHACHA20_POLY1305_SHA256", } func (s) TestCZSocketGetSecurityValueTLS(t *testing.T) { czCleanup := channelz.NewChannelzStorage() defer czCleanupWrapper(czCleanup, t) e := tcpTLSRREnv te := newTest(t, e) te.startServer(&testServer{security: e.security}) defer te.tearDown() te.clientConn() if err := verifyResultWithDelay(func() (bool, error) { tchan, _ := channelz.GetTopChannels(0, 0) if len(tchan) != 1 { return false, fmt.Errorf("there should only be one top channel, not %d", len(tchan)) } if len(tchan[0].SubChans) != 1 { return false, fmt.Errorf("there should only be one subchannel under top channel %d, not %d", tchan[0].ID, len(tchan[0].SubChans)) } var id int64 for id = range tchan[0].SubChans { break } sc := channelz.GetSubChannel(id) if sc == nil { return false, fmt.Errorf("there should only be one socket under subchannel %d, not 0", id) } if len(sc.Sockets) != 1 { return false, fmt.Errorf("there should only be one socket under subchannel %d, not %d", sc.ID, len(sc.Sockets)) } for id = range sc.Sockets { break } skt := channelz.GetSocket(id) cert, _ := tls.LoadX509KeyPair(testdata.Path("server1.pem"), testdata.Path("server1.key")) securityVal, ok := skt.SocketData.Security.(*credentials.TLSChannelzSecurityValue) if !ok { return false, fmt.Errorf("the SocketData.Security is of type: %T, want: *credentials.TLSChannelzSecurityValue", skt.SocketData.Security) } if !reflect.DeepEqual(securityVal.RemoteCertificate, cert.Certificate[0]) { return false, fmt.Errorf("SocketData.Security.RemoteCertificate got: %v, want: %v", securityVal.RemoteCertificate, cert.Certificate[0]) } for _, v := range cipherSuites { if v == securityVal.StandardName { return true, nil } } return false, fmt.Errorf("SocketData.Security.StandardName got: %v, want it to be one of %v", securityVal.StandardName, cipherSuites) }); err != nil { t.Fatal(err) } } func (s) TestCZChannelTraceCreationDeletion(t *testing.T) { czCleanup := channelz.NewChannelzStorage() defer czCleanupWrapper(czCleanup, t) e := tcpClearRREnv // avoid calling API to set balancer type, which will void service config's change of balancer. e.balancer = "" te := newTest(t, e) r, cleanup := manual.GenerateAndRegisterManualResolver() defer cleanup() resolvedAddrs := []resolver.Address{{Addr: "127.0.0.1:0", Type: resolver.GRPCLB, ServerName: "grpclb.server"}} r.InitialState(resolver.State{Addresses: resolvedAddrs}) te.resolverScheme = r.Scheme() te.clientConn() defer te.tearDown() var nestedConn int64 if err := verifyResultWithDelay(func() (bool, error) { tcs, _ := channelz.GetTopChannels(0, 0) if len(tcs) != 1 { return false, fmt.Errorf("there should only be one top channel, not %d", len(tcs)) } if len(tcs[0].NestedChans) != 1 { return false, fmt.Errorf("there should be one nested channel from grpclb, not %d", len(tcs[0].NestedChans)) } for k := range tcs[0].NestedChans { nestedConn = k } for _, e := range tcs[0].Trace.Events { if e.RefID == nestedConn && e.RefType != channelz.RefChannel { return false, fmt.Errorf("nested channel trace event shoud have RefChannel as RefType") } } ncm := channelz.GetChannel(nestedConn) if ncm.Trace == nil { return false, fmt.Errorf("trace for nested channel should not be empty") } if len(ncm.Trace.Events) == 0 { return false, fmt.Errorf("there should be at least one trace event for nested channel not 0") } if ncm.Trace.Events[0].Desc != "Channel Created" { return false, fmt.Errorf("the first trace event should be \"Channel Created\", not %q", ncm.Trace.Events[0].Desc) } return true, nil }); err != nil { t.Fatal(err) } r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: "127.0.0.1:0"}}, ServiceConfig: parseCfg(r, `{"loadBalancingPolicy": "round_robin"}`)}) // wait for the shutdown of grpclb balancer if err := verifyResultWithDelay(func() (bool, error) { tcs, _ := channelz.GetTopChannels(0, 0) if len(tcs) != 1 { return false, fmt.Errorf("there should only be one top channel, not %d", len(tcs)) } if len(tcs[0].NestedChans) != 0 { return false, fmt.Errorf("there should be 0 nested channel from grpclb, not %d", len(tcs[0].NestedChans)) } ncm := channelz.GetChannel(nestedConn) if ncm == nil { return false, fmt.Errorf("nested channel should still exist due to parent's trace reference") } if ncm.Trace == nil { return false, fmt.Errorf("trace for nested channel should not be empty") } if len(ncm.Trace.Events) == 0 { return false, fmt.Errorf("there should be at least one trace event for nested channel not 0") } if ncm.Trace.Events[len(ncm.Trace.Events)-1].Desc != "Channel Deleted" { return false, fmt.Errorf("the first trace event should be \"Channel Deleted\", not %q", ncm.Trace.Events[0].Desc) } return true, nil }); err != nil { t.Fatal(err) } } func (s) TestCZSubChannelTraceCreationDeletion(t *testing.T) { czCleanup := channelz.NewChannelzStorage() defer czCleanupWrapper(czCleanup, t) e := tcpClearRREnv te := newTest(t, e) te.startServer(&testServer{security: e.security}) r, cleanup := manual.GenerateAndRegisterManualResolver() defer cleanup() r.InitialState(resolver.State{Addresses: []resolver.Address{{Addr: te.srvAddr}}}) te.resolverScheme = r.Scheme() te.clientConn() defer te.tearDown() var subConn int64 // Here, we just wait for all sockets to be up. In the future, if we implement // IDLE, we may need to make several rpc calls to create the sockets. if err := verifyResultWithDelay(func() (bool, error) { tcs, _ := channelz.GetTopChannels(0, 0) if len(tcs) != 1 { return false, fmt.Errorf("there should only be one top channel, not %d", len(tcs)) } if len(tcs[0].SubChans) != 1 { return false, fmt.Errorf("there should be 1 subchannel not %d", len(tcs[0].SubChans)) } for k := range tcs[0].SubChans { subConn = k } for _, e := range tcs[0].Trace.Events { if e.RefID == subConn && e.RefType != channelz.RefSubChannel { return false, fmt.Errorf("subchannel trace event shoud have RefType to be RefSubChannel") } } scm := channelz.GetSubChannel(subConn) if scm == nil { return false, fmt.Errorf("subChannel does not exist") } if scm.Trace == nil { return false, fmt.Errorf("trace for subChannel should not be empty") } if len(scm.Trace.Events) == 0 { return false, fmt.Errorf("there should be at least one trace event for subChannel not 0") } if scm.Trace.Events[0].Desc != "Subchannel Created" { return false, fmt.Errorf("the first trace event should be \"Subchannel Created\", not %q", scm.Trace.Events[0].Desc) } return true, nil }); err != nil { t.Fatal(err) } // Wait for ready ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() for src := te.cc.GetState(); src != connectivity.Ready; src = te.cc.GetState() { if !te.cc.WaitForStateChange(ctx, src) { t.Fatalf("timed out waiting for state change. got %v; want %v", src, connectivity.Ready) } } r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: "fake address"}}}) // Wait for not-ready. for src := te.cc.GetState(); src == connectivity.Ready; src = te.cc.GetState() { if !te.cc.WaitForStateChange(ctx, src) { t.Fatalf("timed out waiting for state change. got %v; want !%v", src, connectivity.Ready) } } if err := verifyResultWithDelay(func() (bool, error) { tcs, _ := channelz.GetTopChannels(0, 0) if len(tcs) != 1 { return false, fmt.Errorf("there should only be one top channel, not %d", len(tcs)) } if len(tcs[0].SubChans) != 1 { return false, fmt.Errorf("there should be 1 subchannel not %d", len(tcs[0].SubChans)) } scm := channelz.GetSubChannel(subConn) if scm == nil { return false, fmt.Errorf("subChannel should still exist due to parent's trace reference") } if scm.Trace == nil { return false, fmt.Errorf("trace for SubChannel should not be empty") } if len(scm.Trace.Events) == 0 { return false, fmt.Errorf("there should be at least one trace event for subChannel not 0") } if got, want := scm.Trace.Events[len(scm.Trace.Events)-1].Desc, "Subchannel Deleted"; got != want { return false, fmt.Errorf("the last trace event should be %q, not %q", want, got) } return true, nil }); err != nil { t.Fatal(err) } } func (s) TestCZChannelAddressResolutionChange(t *testing.T) { czCleanup := channelz.NewChannelzStorage() defer czCleanupWrapper(czCleanup, t) e := tcpClearRREnv e.balancer = "" te := newTest(t, e) te.startServer(&testServer{security: e.security}) r, cleanup := manual.GenerateAndRegisterManualResolver() defer cleanup() addrs := []resolver.Address{{Addr: te.srvAddr}} r.InitialState(resolver.State{Addresses: addrs}) te.resolverScheme = r.Scheme() te.clientConn() defer te.tearDown() var cid int64 // Here, we just wait for all sockets to be up. In the future, if we implement // IDLE, we may need to make several rpc calls to create the sockets. if err := verifyResultWithDelay(func() (bool, error) { tcs, _ := channelz.GetTopChannels(0, 0) if len(tcs) != 1 { return false, fmt.Errorf("there should only be one top channel, not %d", len(tcs)) } cid = tcs[0].ID for i := len(tcs[0].Trace.Events) - 1; i >= 0; i-- { if strings.Contains(tcs[0].Trace.Events[i].Desc, "resolver returned new addresses") { break } if i == 0 { return false, fmt.Errorf("events do not contain expected address resolution from empty address state. Got: %+v", tcs[0].Trace.Events) } } return true, nil }); err != nil { t.Fatal(err) } r.UpdateState(resolver.State{Addresses: addrs, ServiceConfig: parseCfg(r, `{"loadBalancingPolicy": "round_robin"}`)}) if err := verifyResultWithDelay(func() (bool, error) { cm := channelz.GetChannel(cid) for i := len(cm.Trace.Events) - 1; i >= 0; i-- { if cm.Trace.Events[i].Desc == fmt.Sprintf("Channel switches to new LB policy %q", roundrobin.Name) { break } if i == 0 { return false, fmt.Errorf("events do not contain expected address resolution change of LB policy") } } return true, nil }); err != nil { t.Fatal(err) } newSC := parseCfg(r, `{ "methodConfig": [ { "name": [ { "service": "grpc.testing.TestService", "method": "EmptyCall" } ], "waitForReady": false, "timeout": ".001s" } ] }`) r.UpdateState(resolver.State{Addresses: addrs, ServiceConfig: newSC}) if err := verifyResultWithDelay(func() (bool, error) { cm := channelz.GetChannel(cid) var es []string for i := len(cm.Trace.Events) - 1; i >= 0; i-- { if strings.Contains(cm.Trace.Events[i].Desc, "service config updated") { break } es = append(es, cm.Trace.Events[i].Desc) if i == 0 { return false, fmt.Errorf("events do not contain expected address resolution of new service config\n Events:\n%v", strings.Join(es, "\n")) } } return true, nil }); err != nil { t.Fatal(err) } r.UpdateState(resolver.State{Addresses: []resolver.Address{}, ServiceConfig: newSC}) if err := verifyResultWithDelay(func() (bool, error) { cm := channelz.GetChannel(cid) for i := len(cm.Trace.Events) - 1; i >= 0; i-- { if strings.Contains(cm.Trace.Events[i].Desc, "resolver returned an empty address list") { break } if i == 0 { return false, fmt.Errorf("events do not contain expected address resolution of empty address") } } return true, nil }); err != nil { t.Fatal(err) } } func (s) TestCZSubChannelPickedNewAddress(t *testing.T) { czCleanup := channelz.NewChannelzStorage() defer czCleanupWrapper(czCleanup, t) e := tcpClearRREnv e.balancer = "" te := newTest(t, e) te.startServers(&testServer{security: e.security}, 3) r, cleanup := manual.GenerateAndRegisterManualResolver() defer cleanup() var svrAddrs []resolver.Address for _, a := range te.srvAddrs { svrAddrs = append(svrAddrs, resolver.Address{Addr: a}) } r.InitialState(resolver.State{Addresses: svrAddrs}) te.resolverScheme = r.Scheme() cc := te.clientConn() defer te.tearDown() tc := testpb.NewTestServiceClient(cc) // make sure the connection is up ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); err != nil { t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, ", err) } te.srvs[0].Stop() te.srvs[1].Stop() // Here, we just wait for all sockets to be up. In the future, if we implement // IDLE, we may need to make several rpc calls to create the sockets. if err := verifyResultWithDelay(func() (bool, error) { tcs, _ := channelz.GetTopChannels(0, 0) if len(tcs) != 1 { return false, fmt.Errorf("there should only be one top channel, not %d", len(tcs)) } if len(tcs[0].SubChans) != 1 { return false, fmt.Errorf("there should be 1 subchannel not %d", len(tcs[0].SubChans)) } var subConn int64 for k := range tcs[0].SubChans { subConn = k } scm := channelz.GetSubChannel(subConn) if scm.Trace == nil { return false, fmt.Errorf("trace for SubChannel should not be empty") } if len(scm.Trace.Events) == 0 { return false, fmt.Errorf("there should be at least one trace event for subChannel not 0") } for i := len(scm.Trace.Events) - 1; i >= 0; i-- { if scm.Trace.Events[i].Desc == fmt.Sprintf("Subchannel picks a new address %q to connect", te.srvAddrs[2]) { break } if i == 0 { return false, fmt.Errorf("events do not contain expected address resolution of subchannel picked new address") } } return true, nil }); err != nil { t.Fatal(err) } } func (s) TestCZSubChannelConnectivityState(t *testing.T) { czCleanup := channelz.NewChannelzStorage() defer czCleanupWrapper(czCleanup, t) e := tcpClearRREnv te := newTest(t, e) te.startServer(&testServer{security: e.security}) r, cleanup := manual.GenerateAndRegisterManualResolver() defer cleanup() r.InitialState(resolver.State{Addresses: []resolver.Address{{Addr: te.srvAddr}}}) te.resolverScheme = r.Scheme() cc := te.clientConn() defer te.tearDown() tc := testpb.NewTestServiceClient(cc) // make sure the connection is up ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); err != nil { t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, ", err) } var subConn int64 te.srv.Stop() if err := verifyResultWithDelay(func() (bool, error) { // we need to obtain the SubChannel id before it gets deleted from Channel's children list (due // to effect of r.UpdateState(resolver.State{Addresses:[]resolver.Address{}})) if subConn == 0 { tcs, _ := channelz.GetTopChannels(0, 0) if len(tcs) != 1 { return false, fmt.Errorf("there should only be one top channel, not %d", len(tcs)) } if len(tcs[0].SubChans) != 1 { return false, fmt.Errorf("there should be 1 subchannel not %d", len(tcs[0].SubChans)) } for k := range tcs[0].SubChans { // get the SubChannel id for further trace inquiry. subConn = k } } scm := channelz.GetSubChannel(subConn) if scm == nil { return false, fmt.Errorf("subChannel should still exist due to parent's trace reference") } if scm.Trace == nil { return false, fmt.Errorf("trace for SubChannel should not be empty") } if len(scm.Trace.Events) == 0 { return false, fmt.Errorf("there should be at least one trace event for subChannel not 0") } var ready, connecting, transient, shutdown int for _, e := range scm.Trace.Events { if e.Desc == fmt.Sprintf("Subchannel Connectivity change to %v", connectivity.TransientFailure) { transient++ } } // Make sure the SubChannel has already seen transient failure before shutting it down through // r.UpdateState(resolver.State{Addresses:[]resolver.Address{}}). if transient == 0 { return false, fmt.Errorf("transient failure has not happened on SubChannel yet") } transient = 0 r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: "fake address"}}}) for _, e := range scm.Trace.Events { if e.Desc == fmt.Sprintf("Subchannel Connectivity change to %v", connectivity.Ready) { ready++ } if e.Desc == fmt.Sprintf("Subchannel Connectivity change to %v", connectivity.Connecting) { connecting++ } if e.Desc == fmt.Sprintf("Subchannel Connectivity change to %v", connectivity.TransientFailure) { transient++ } if e.Desc == fmt.Sprintf("Subchannel Connectivity change to %v", connectivity.Shutdown) { shutdown++ } } // example: // Subchannel Created // Subchannel's connectivity state changed to CONNECTING // Subchannel picked a new address: "localhost:36011" // Subchannel's connectivity state changed to READY // Subchannel's connectivity state changed to TRANSIENT_FAILURE // Subchannel's connectivity state changed to CONNECTING // Subchannel picked a new address: "localhost:36011" // Subchannel's connectivity state changed to SHUTDOWN // Subchannel Deleted if ready != 1 || connecting < 1 || transient < 1 || shutdown != 1 { return false, fmt.Errorf("got: ready = %d, connecting = %d, transient = %d, shutdown = %d, want: 1, >=1, >=1, 1", ready, connecting, transient, shutdown) } return true, nil }); err != nil { t.Fatal(err) } } func (s) TestCZChannelConnectivityState(t *testing.T) { czCleanup := channelz.NewChannelzStorage() defer czCleanupWrapper(czCleanup, t) e := tcpClearRREnv te := newTest(t, e) te.startServer(&testServer{security: e.security}) r, cleanup := manual.GenerateAndRegisterManualResolver() defer cleanup() r.InitialState(resolver.State{Addresses: []resolver.Address{{Addr: te.srvAddr}}}) te.resolverScheme = r.Scheme() cc := te.clientConn() defer te.tearDown() tc := testpb.NewTestServiceClient(cc) // make sure the connection is up ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); err != nil { t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, ", err) } te.srv.Stop() if err := verifyResultWithDelay(func() (bool, error) { tcs, _ := channelz.GetTopChannels(0, 0) if len(tcs) != 1 { return false, fmt.Errorf("there should only be one top channel, not %d", len(tcs)) } var ready, connecting, transient int for _, e := range tcs[0].Trace.Events { if e.Desc == fmt.Sprintf("Channel Connectivity change to %v", connectivity.Ready) { ready++ } if e.Desc == fmt.Sprintf("Channel Connectivity change to %v", connectivity.Connecting) { connecting++ } if e.Desc == fmt.Sprintf("Channel Connectivity change to %v", connectivity.TransientFailure) { transient++ } } // example: // Channel Created // Adressses resolved (from empty address state): "localhost:40467" // SubChannel (id: 4[]) Created // Channel's connectivity state changed to CONNECTING // Channel's connectivity state changed to READY // Channel's connectivity state changed to TRANSIENT_FAILURE // Channel's connectivity state changed to CONNECTING // Channel's connectivity state changed to TRANSIENT_FAILURE if ready != 1 || connecting < 1 || transient < 1 { return false, fmt.Errorf("got: ready = %d, connecting = %d, transient = %d, want: 1, >=1, >=1", ready, connecting, transient) } return true, nil }); err != nil { t.Fatal(err) } } func (s) TestCZTraceOverwriteChannelDeletion(t *testing.T) { czCleanup := channelz.NewChannelzStorage() defer czCleanupWrapper(czCleanup, t) e := tcpClearRREnv // avoid newTest using WithBalancer, which would override service config's change of balancer below. e.balancer = "" te := newTest(t, e) channelz.SetMaxTraceEntry(1) defer channelz.ResetMaxTraceEntryToDefault() r, cleanup := manual.GenerateAndRegisterManualResolver() defer cleanup() resolvedAddrs := []resolver.Address{{Addr: "127.0.0.1:0", Type: resolver.GRPCLB, ServerName: "grpclb.server"}} r.InitialState(resolver.State{Addresses: resolvedAddrs}) te.resolverScheme = r.Scheme() te.clientConn() defer te.tearDown() var nestedConn int64 if err := verifyResultWithDelay(func() (bool, error) { tcs, _ := channelz.GetTopChannels(0, 0) if len(tcs) != 1 { return false, fmt.Errorf("there should only be one top channel, not %d", len(tcs)) } if len(tcs[0].NestedChans) != 1 { return false, fmt.Errorf("there should be one nested channel from grpclb, not %d", len(tcs[0].NestedChans)) } for k := range tcs[0].NestedChans { nestedConn = k } return true, nil }); err != nil { t.Fatal(err) } r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: "127.0.0.1:0"}}, ServiceConfig: parseCfg(r, `{"loadBalancingPolicy": "round_robin"}`)}) // wait for the shutdown of grpclb balancer if err := verifyResultWithDelay(func() (bool, error) { tcs, _ := channelz.GetTopChannels(0, 0) if len(tcs) != 1 { return false, fmt.Errorf("there should only be one top channel, not %d", len(tcs)) } if len(tcs[0].NestedChans) != 0 { return false, fmt.Errorf("there should be 0 nested channel from grpclb, not %d", len(tcs[0].NestedChans)) } return true, nil }); err != nil { t.Fatal(err) } // If nested channel deletion is last trace event before the next validation, it will fail, as the top channel will hold a reference to it. // This line forces a trace event on the top channel in that case. r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: "127.0.0.1:0"}}, ServiceConfig: parseCfg(r, `{"loadBalancingPolicy": "round_robin"}`)}) // verify that the nested channel no longer exist due to trace referencing it got overwritten. if err := verifyResultWithDelay(func() (bool, error) { cm := channelz.GetChannel(nestedConn) if cm != nil { return false, fmt.Errorf("nested channel should have been deleted since its parent's trace should not contain any reference to it anymore") } return true, nil }); err != nil { t.Fatal(err) } } func (s) TestCZTraceOverwriteSubChannelDeletion(t *testing.T) { czCleanup := channelz.NewChannelzStorage() defer czCleanupWrapper(czCleanup, t) e := tcpClearRREnv te := newTest(t, e) channelz.SetMaxTraceEntry(1) defer channelz.ResetMaxTraceEntryToDefault() te.startServer(&testServer{security: e.security}) r, cleanup := manual.GenerateAndRegisterManualResolver() defer cleanup() r.InitialState(resolver.State{Addresses: []resolver.Address{{Addr: te.srvAddr}}}) te.resolverScheme = r.Scheme() te.clientConn() defer te.tearDown() var subConn int64 // Here, we just wait for all sockets to be up. In the future, if we implement // IDLE, we may need to make several rpc calls to create the sockets. if err := verifyResultWithDelay(func() (bool, error) { tcs, _ := channelz.GetTopChannels(0, 0) if len(tcs) != 1 { return false, fmt.Errorf("there should only be one top channel, not %d", len(tcs)) } if len(tcs[0].SubChans) != 1 { return false, fmt.Errorf("there should be 1 subchannel not %d", len(tcs[0].SubChans)) } for k := range tcs[0].SubChans { subConn = k } return true, nil }); err != nil { t.Fatal(err) } // Wait for ready ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() for src := te.cc.GetState(); src != connectivity.Ready; src = te.cc.GetState() { if !te.cc.WaitForStateChange(ctx, src) { t.Fatalf("timed out waiting for state change. got %v; want %v", src, connectivity.Ready) } } r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: "fake address"}}}) // Wait for not-ready. for src := te.cc.GetState(); src == connectivity.Ready; src = te.cc.GetState() { if !te.cc.WaitForStateChange(ctx, src) { t.Fatalf("timed out waiting for state change. got %v; want !%v", src, connectivity.Ready) } } // verify that the subchannel no longer exist due to trace referencing it got overwritten. if err := verifyResultWithDelay(func() (bool, error) { cm := channelz.GetChannel(subConn) if cm != nil { return false, fmt.Errorf("subchannel should have been deleted since its parent's trace should not contain any reference to it anymore") } return true, nil }); err != nil { t.Fatal(err) } } func (s) TestCZTraceTopChannelDeletionTraceClear(t *testing.T) { czCleanup := channelz.NewChannelzStorage() defer czCleanupWrapper(czCleanup, t) e := tcpClearRREnv te := newTest(t, e) te.startServer(&testServer{security: e.security}) r, cleanup := manual.GenerateAndRegisterManualResolver() defer cleanup() r.InitialState(resolver.State{Addresses: []resolver.Address{{Addr: te.srvAddr}}}) te.resolverScheme = r.Scheme() te.clientConn() var subConn int64 // Here, we just wait for all sockets to be up. In the future, if we implement // IDLE, we may need to make several rpc calls to create the sockets. if err := verifyResultWithDelay(func() (bool, error) { tcs, _ := channelz.GetTopChannels(0, 0) if len(tcs) != 1 { return false, fmt.Errorf("there should only be one top channel, not %d", len(tcs)) } if len(tcs[0].SubChans) != 1 { return false, fmt.Errorf("there should be 1 subchannel not %d", len(tcs[0].SubChans)) } for k := range tcs[0].SubChans { subConn = k } return true, nil }); err != nil { t.Fatal(err) } te.tearDown() // verify that the subchannel no longer exist due to parent channel got deleted and its trace cleared. if err := verifyResultWithDelay(func() (bool, error) { cm := channelz.GetChannel(subConn) if cm != nil { return false, fmt.Errorf("subchannel should have been deleted since its parent's trace should not contain any reference to it anymore") } return true, nil }); err != nil { t.Fatal(err) } } grpc-go-1.29.1/test/codec_perf/000077500000000000000000000000001365033716300162215ustar00rootroot00000000000000grpc-go-1.29.1/test/codec_perf/perf.pb.go000066400000000000000000000051171365033716300201100ustar00rootroot00000000000000// Code generated by protoc-gen-go. DO NOT EDIT. // source: codec_perf/perf.proto package codec_perf import ( fmt "fmt" proto "github.com/golang/protobuf/proto" math "math" ) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package // Buffer is a message that contains a body of bytes that is used to exercise // encoding and decoding overheads. type Buffer struct { Body []byte `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *Buffer) Reset() { *m = Buffer{} } func (m *Buffer) String() string { return proto.CompactTextString(m) } func (*Buffer) ProtoMessage() {} func (*Buffer) Descriptor() ([]byte, []int) { return fileDescriptor_afad72ea7772fe3a, []int{0} } func (m *Buffer) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Buffer.Unmarshal(m, b) } func (m *Buffer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Buffer.Marshal(b, m, deterministic) } func (m *Buffer) XXX_Merge(src proto.Message) { xxx_messageInfo_Buffer.Merge(m, src) } func (m *Buffer) XXX_Size() int { return xxx_messageInfo_Buffer.Size(m) } func (m *Buffer) XXX_DiscardUnknown() { xxx_messageInfo_Buffer.DiscardUnknown(m) } var xxx_messageInfo_Buffer proto.InternalMessageInfo func (m *Buffer) GetBody() []byte { if m != nil { return m.Body } return nil } func init() { proto.RegisterType((*Buffer)(nil), "codec.perf.Buffer") } func init() { proto.RegisterFile("codec_perf/perf.proto", fileDescriptor_afad72ea7772fe3a) } var fileDescriptor_afad72ea7772fe3a = []byte{ // 83 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4d, 0xce, 0x4f, 0x49, 0x4d, 0x8e, 0x2f, 0x48, 0x2d, 0x4a, 0xd3, 0x07, 0x11, 0x7a, 0x05, 0x45, 0xf9, 0x25, 0xf9, 0x42, 0x5c, 0x60, 0x61, 0x3d, 0x90, 0x88, 0x92, 0x0c, 0x17, 0x9b, 0x53, 0x69, 0x5a, 0x5a, 0x6a, 0x91, 0x90, 0x10, 0x17, 0x4b, 0x52, 0x7e, 0x4a, 0xa5, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x4f, 0x10, 0x98, 0x9d, 0xc4, 0x06, 0xd6, 0x60, 0x0c, 0x08, 0x00, 0x00, 0xff, 0xff, 0xa3, 0x5f, 0x4f, 0x3c, 0x49, 0x00, 0x00, 0x00, } grpc-go-1.29.1/test/codec_perf/perf.proto000066400000000000000000000015741365033716300202510ustar00rootroot00000000000000// Copyright 2017 gRPC authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Messages used for performance tests that may not reference grpc directly for // reasons of import cycles. syntax = "proto3"; package codec.perf; // Buffer is a message that contains a body of bytes that is used to exercise // encoding and decoding overheads. message Buffer { bytes body = 1; } grpc-go-1.29.1/test/context_canceled_test.go000066400000000000000000000107631365033716300210270ustar00rootroot00000000000000/* * * Copyright 2019 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package test import ( "context" "testing" "time" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/encoding/gzip" "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" testpb "google.golang.org/grpc/test/grpc_testing" ) func (s) TestContextCanceled(t *testing.T) { ss := &stubServer{ fullDuplexCall: func(stream testpb.TestService_FullDuplexCallServer) error { stream.SetTrailer(metadata.New(map[string]string{"a": "b"})) return status.Error(codes.PermissionDenied, "perm denied") }, } if err := ss.Start(nil); err != nil { t.Fatalf("Error starting endpoint server: %v", err) } defer ss.Stop() // Runs 10 rounds of tests with the given delay and returns counts of status codes. // Fails in case of trailer/status code inconsistency. const cntRetry uint = 10 runTest := func(delay time.Duration) (cntCanceled, cntPermDenied uint) { for i := uint(0); i < cntRetry; i++ { ctx, cancel := context.WithTimeout(context.Background(), delay) defer cancel() str, err := ss.client.FullDuplexCall(ctx) if err != nil { continue } _, err = str.Recv() if err == nil { t.Fatalf("non-nil error expected from Recv()") } _, trlOk := str.Trailer()["a"] switch status.Code(err) { case codes.PermissionDenied: if !trlOk { t.Fatalf(`status err: %v; wanted key "a" in trailer but didn't get it`, err) } cntPermDenied++ case codes.DeadlineExceeded: if trlOk { t.Fatalf(`status err: %v; didn't want key "a" in trailer but got it`, err) } cntCanceled++ default: t.Fatalf(`unexpected status err: %v`, err) } } return cntCanceled, cntPermDenied } // Tries to find the delay that causes canceled/perm denied race. canceledOk, permDeniedOk := false, false for lower, upper := time.Duration(0), 2*time.Millisecond; lower <= upper; { delay := lower + (upper-lower)/2 cntCanceled, cntPermDenied := runTest(delay) if cntPermDenied > 0 && cntCanceled > 0 { // Delay that causes the race is found. return } // Set OK flags. if cntCanceled > 0 { canceledOk = true } if cntPermDenied > 0 { permDeniedOk = true } if cntPermDenied == 0 { // No perm denied, increase the delay. lower += (upper-lower)/10 + 1 } else { // All perm denied, decrease the delay. upper -= (upper-lower)/10 + 1 } } if !canceledOk || !permDeniedOk { t.Fatalf(`couldn't find the delay that causes canceled/perm denied race.`) } } // To make sure that canceling a stream with compression enabled won't result in // internal error, compressed flag set with identity or empty encoding. // // The root cause is a select race on stream headerChan and ctx. Stream gets // whether compression is enabled and the compression type from two separate // functions, both include select with context. If the `case non-ctx:` wins the // first one, but `case ctx.Done()` wins the second one, the compression info // will be inconsistent, and it causes internal error. func (s) TestCancelWhileRecvingWithCompression(t *testing.T) { ss := &stubServer{ fullDuplexCall: func(stream testpb.TestService_FullDuplexCallServer) error { for { if err := stream.Send(&testpb.StreamingOutputCallResponse{ Payload: nil, }); err != nil { return err } } }, } if err := ss.Start(nil); err != nil { t.Fatalf("Error starting endpoint server: %v", err) } defer ss.Stop() for i := 0; i < 10; i++ { ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) s, err := ss.client.FullDuplexCall(ctx, grpc.UseCompressor(gzip.Name)) if err != nil { t.Fatalf("failed to start bidi streaming RPC: %v", err) } // Cancel the stream while receiving to trigger the internal error. time.AfterFunc(time.Millisecond, cancel) for { _, err := s.Recv() if err != nil { if status.Code(err) != codes.Canceled { t.Fatalf("recv failed with %v, want Canceled", err) } break } } } } grpc-go-1.29.1/test/creds_test.go000066400000000000000000000072351365033716300166250ustar00rootroot00000000000000/* * * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package test // TODO(https://github.com/grpc/grpc-go/issues/2330): move all creds related // tests to this file. import ( "context" "testing" "google.golang.org/grpc" "google.golang.org/grpc/credentials" testpb "google.golang.org/grpc/test/grpc_testing" "google.golang.org/grpc/testdata" ) const ( bundlePerRPCOnly = "perRPCOnly" bundleTLSOnly = "tlsOnly" ) type testCredsBundle struct { t *testing.T mode string } func (c *testCredsBundle) TransportCredentials() credentials.TransportCredentials { if c.mode == bundlePerRPCOnly { return nil } creds, err := credentials.NewClientTLSFromFile(testdata.Path("ca.pem"), "x.test.youtube.com") if err != nil { c.t.Logf("Failed to load credentials: %v", err) return nil } return creds } func (c *testCredsBundle) PerRPCCredentials() credentials.PerRPCCredentials { if c.mode == bundleTLSOnly { return nil } return testPerRPCCredentials{} } func (c *testCredsBundle) NewWithMode(mode string) (credentials.Bundle, error) { return &testCredsBundle{mode: mode}, nil } func (s) TestCredsBundleBoth(t *testing.T) { te := newTest(t, env{name: "creds-bundle", network: "tcp", balancer: "v1", security: "empty"}) te.tapHandle = authHandle te.customDialOptions = []grpc.DialOption{ grpc.WithCredentialsBundle(&testCredsBundle{t: t}), } creds, err := credentials.NewServerTLSFromFile(testdata.Path("server1.pem"), testdata.Path("server1.key")) if err != nil { t.Fatalf("Failed to generate credentials %v", err) } te.customServerOptions = []grpc.ServerOption{ grpc.Creds(creds), } te.startServer(&testServer{}) defer te.tearDown() cc := te.clientConn() tc := testpb.NewTestServiceClient(cc) if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}); err != nil { t.Fatalf("Test failed. Reason: %v", err) } } func (s) TestCredsBundleTransportCredentials(t *testing.T) { te := newTest(t, env{name: "creds-bundle", network: "tcp", balancer: "v1", security: "empty"}) te.customDialOptions = []grpc.DialOption{ grpc.WithCredentialsBundle(&testCredsBundle{t: t, mode: bundleTLSOnly}), } creds, err := credentials.NewServerTLSFromFile(testdata.Path("server1.pem"), testdata.Path("server1.key")) if err != nil { t.Fatalf("Failed to generate credentials %v", err) } te.customServerOptions = []grpc.ServerOption{ grpc.Creds(creds), } te.startServer(&testServer{}) defer te.tearDown() cc := te.clientConn() tc := testpb.NewTestServiceClient(cc) if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}); err != nil { t.Fatalf("Test failed. Reason: %v", err) } } func (s) TestCredsBundlePerRPCCredentials(t *testing.T) { te := newTest(t, env{name: "creds-bundle", network: "tcp", balancer: "v1", security: "empty"}) te.tapHandle = authHandle te.customDialOptions = []grpc.DialOption{ grpc.WithCredentialsBundle(&testCredsBundle{t: t, mode: bundlePerRPCOnly}), } te.startServer(&testServer{}) defer te.tearDown() cc := te.clientConn() tc := testpb.NewTestServiceClient(cc) if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}); err != nil { t.Fatalf("Test failed. Reason: %v", err) } } grpc-go-1.29.1/test/end2end_test.go000066400000000000000000007161461365033716300170540ustar00rootroot00000000000000/* * * Copyright 2014 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ //go:generate protoc --go_out=plugins=grpc:. codec_perf/perf.proto //go:generate protoc --go_out=plugins=grpc:. grpc_testing/test.proto package test import ( "bufio" "bytes" "compress/gzip" "context" "crypto/tls" "errors" "flag" "fmt" "io" "math" "net" "net/http" "os" "reflect" "runtime" "strings" "sync" "sync/atomic" "syscall" "testing" "time" "github.com/golang/protobuf/proto" anypb "github.com/golang/protobuf/ptypes/any" "golang.org/x/net/http2" "golang.org/x/net/http2/hpack" spb "google.golang.org/genproto/googleapis/rpc/status" "google.golang.org/grpc" "google.golang.org/grpc/balancer/roundrobin" "google.golang.org/grpc/codes" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" "google.golang.org/grpc/encoding" _ "google.golang.org/grpc/encoding/gzip" "google.golang.org/grpc/health" healthgrpc "google.golang.org/grpc/health/grpc_health_v1" healthpb "google.golang.org/grpc/health/grpc_health_v1" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/grpctest" "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" "google.golang.org/grpc/resolver" "google.golang.org/grpc/resolver/manual" "google.golang.org/grpc/serviceconfig" "google.golang.org/grpc/stats" "google.golang.org/grpc/status" "google.golang.org/grpc/tap" testpb "google.golang.org/grpc/test/grpc_testing" "google.golang.org/grpc/testdata" ) const defaultHealthService = "grpc.health.v1.Health" func init() { channelz.TurnOn() } type s struct { grpctest.Tester } func Test(t *testing.T) { grpctest.RunSubTests(t, s{}) } var ( // For headers: testMetadata = metadata.MD{ "key1": []string{"value1"}, "key2": []string{"value2"}, "key3-bin": []string{"binvalue1", string([]byte{1, 2, 3})}, } testMetadata2 = metadata.MD{ "key1": []string{"value12"}, "key2": []string{"value22"}, } // For trailers: testTrailerMetadata = metadata.MD{ "tkey1": []string{"trailerValue1"}, "tkey2": []string{"trailerValue2"}, "tkey3-bin": []string{"trailerbinvalue1", string([]byte{3, 2, 1})}, } testTrailerMetadata2 = metadata.MD{ "tkey1": []string{"trailerValue12"}, "tkey2": []string{"trailerValue22"}, } // capital "Key" is illegal in HTTP/2. malformedHTTP2Metadata = metadata.MD{ "Key": []string{"foo"}, } testAppUA = "myApp1/1.0 myApp2/0.9" failAppUA = "fail-this-RPC" detailedError = status.ErrorProto(&spb.Status{ Code: int32(codes.DataLoss), Message: "error for testing: " + failAppUA, Details: []*anypb.Any{{ TypeUrl: "url", Value: []byte{6, 0, 0, 6, 1, 3}, }}, }) ) var raceMode bool // set by race.go in race mode type testServer struct { testpb.UnimplementedTestServiceServer security string // indicate the authentication protocol used by this server. earlyFail bool // whether to error out the execution of a service handler prematurely. setAndSendHeader bool // whether to call setHeader and sendHeader. setHeaderOnly bool // whether to only call setHeader, not sendHeader. multipleSetTrailer bool // whether to call setTrailer multiple times. unaryCallSleepTime time.Duration } func (s *testServer) EmptyCall(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { if md, ok := metadata.FromIncomingContext(ctx); ok { // For testing purpose, returns an error if user-agent is failAppUA. // To test that client gets the correct error. if ua, ok := md["user-agent"]; !ok || strings.HasPrefix(ua[0], failAppUA) { return nil, detailedError } var str []string for _, entry := range md["user-agent"] { str = append(str, "ua", entry) } grpc.SendHeader(ctx, metadata.Pairs(str...)) } return new(testpb.Empty), nil } func newPayload(t testpb.PayloadType, size int32) (*testpb.Payload, error) { if size < 0 { return nil, fmt.Errorf("requested a response with invalid length %d", size) } body := make([]byte, size) switch t { case testpb.PayloadType_COMPRESSABLE: case testpb.PayloadType_UNCOMPRESSABLE: return nil, fmt.Errorf("PayloadType UNCOMPRESSABLE is not supported") default: return nil, fmt.Errorf("unsupported payload type: %d", t) } return &testpb.Payload{ Type: t, Body: body, }, nil } func (s *testServer) UnaryCall(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { md, ok := metadata.FromIncomingContext(ctx) if ok { if _, exists := md[":authority"]; !exists { return nil, status.Errorf(codes.DataLoss, "expected an :authority metadata: %v", md) } if s.setAndSendHeader { if err := grpc.SetHeader(ctx, md); err != nil { return nil, status.Errorf(status.Code(err), "grpc.SetHeader(_, %v) = %v, want ", md, err) } if err := grpc.SendHeader(ctx, testMetadata2); err != nil { return nil, status.Errorf(status.Code(err), "grpc.SendHeader(_, %v) = %v, want ", testMetadata2, err) } } else if s.setHeaderOnly { if err := grpc.SetHeader(ctx, md); err != nil { return nil, status.Errorf(status.Code(err), "grpc.SetHeader(_, %v) = %v, want ", md, err) } if err := grpc.SetHeader(ctx, testMetadata2); err != nil { return nil, status.Errorf(status.Code(err), "grpc.SetHeader(_, %v) = %v, want ", testMetadata2, err) } } else { if err := grpc.SendHeader(ctx, md); err != nil { return nil, status.Errorf(status.Code(err), "grpc.SendHeader(_, %v) = %v, want ", md, err) } } if err := grpc.SetTrailer(ctx, testTrailerMetadata); err != nil { return nil, status.Errorf(status.Code(err), "grpc.SetTrailer(_, %v) = %v, want ", testTrailerMetadata, err) } if s.multipleSetTrailer { if err := grpc.SetTrailer(ctx, testTrailerMetadata2); err != nil { return nil, status.Errorf(status.Code(err), "grpc.SetTrailer(_, %v) = %v, want ", testTrailerMetadata2, err) } } } pr, ok := peer.FromContext(ctx) if !ok { return nil, status.Error(codes.DataLoss, "failed to get peer from ctx") } if pr.Addr == net.Addr(nil) { return nil, status.Error(codes.DataLoss, "failed to get peer address") } if s.security != "" { // Check Auth info var authType, serverName string switch info := pr.AuthInfo.(type) { case credentials.TLSInfo: authType = info.AuthType() serverName = info.State.ServerName default: return nil, status.Error(codes.Unauthenticated, "Unknown AuthInfo type") } if authType != s.security { return nil, status.Errorf(codes.Unauthenticated, "Wrong auth type: got %q, want %q", authType, s.security) } if serverName != "x.test.youtube.com" { return nil, status.Errorf(codes.Unauthenticated, "Unknown server name %q", serverName) } } // Simulate some service delay. time.Sleep(s.unaryCallSleepTime) payload, err := newPayload(in.GetResponseType(), in.GetResponseSize()) if err != nil { return nil, err } return &testpb.SimpleResponse{ Payload: payload, }, nil } func (s *testServer) StreamingOutputCall(args *testpb.StreamingOutputCallRequest, stream testpb.TestService_StreamingOutputCallServer) error { if md, ok := metadata.FromIncomingContext(stream.Context()); ok { if _, exists := md[":authority"]; !exists { return status.Errorf(codes.DataLoss, "expected an :authority metadata: %v", md) } // For testing purpose, returns an error if user-agent is failAppUA. // To test that client gets the correct error. if ua, ok := md["user-agent"]; !ok || strings.HasPrefix(ua[0], failAppUA) { return status.Error(codes.DataLoss, "error for testing: "+failAppUA) } } cs := args.GetResponseParameters() for _, c := range cs { if us := c.GetIntervalUs(); us > 0 { time.Sleep(time.Duration(us) * time.Microsecond) } payload, err := newPayload(args.GetResponseType(), c.GetSize()) if err != nil { return err } if err := stream.Send(&testpb.StreamingOutputCallResponse{ Payload: payload, }); err != nil { return err } } return nil } func (s *testServer) StreamingInputCall(stream testpb.TestService_StreamingInputCallServer) error { var sum int for { in, err := stream.Recv() if err == io.EOF { return stream.SendAndClose(&testpb.StreamingInputCallResponse{ AggregatedPayloadSize: int32(sum), }) } if err != nil { return err } p := in.GetPayload().GetBody() sum += len(p) if s.earlyFail { return status.Error(codes.NotFound, "not found") } } } func (s *testServer) FullDuplexCall(stream testpb.TestService_FullDuplexCallServer) error { md, ok := metadata.FromIncomingContext(stream.Context()) if ok { if s.setAndSendHeader { if err := stream.SetHeader(md); err != nil { return status.Errorf(status.Code(err), "%v.SetHeader(_, %v) = %v, want ", stream, md, err) } if err := stream.SendHeader(testMetadata2); err != nil { return status.Errorf(status.Code(err), "%v.SendHeader(_, %v) = %v, want ", stream, testMetadata2, err) } } else if s.setHeaderOnly { if err := stream.SetHeader(md); err != nil { return status.Errorf(status.Code(err), "%v.SetHeader(_, %v) = %v, want ", stream, md, err) } if err := stream.SetHeader(testMetadata2); err != nil { return status.Errorf(status.Code(err), "%v.SetHeader(_, %v) = %v, want ", stream, testMetadata2, err) } } else { if err := stream.SendHeader(md); err != nil { return status.Errorf(status.Code(err), "%v.SendHeader(%v) = %v, want %v", stream, md, err, nil) } } stream.SetTrailer(testTrailerMetadata) if s.multipleSetTrailer { stream.SetTrailer(testTrailerMetadata2) } } for { in, err := stream.Recv() if err == io.EOF { // read done. return nil } if err != nil { // to facilitate testSvrWriteStatusEarlyWrite if status.Code(err) == codes.ResourceExhausted { return status.Errorf(codes.Internal, "fake error for test testSvrWriteStatusEarlyWrite. true error: %s", err.Error()) } return err } cs := in.GetResponseParameters() for _, c := range cs { if us := c.GetIntervalUs(); us > 0 { time.Sleep(time.Duration(us) * time.Microsecond) } payload, err := newPayload(in.GetResponseType(), c.GetSize()) if err != nil { return err } if err := stream.Send(&testpb.StreamingOutputCallResponse{ Payload: payload, }); err != nil { // to facilitate testSvrWriteStatusEarlyWrite if status.Code(err) == codes.ResourceExhausted { return status.Errorf(codes.Internal, "fake error for test testSvrWriteStatusEarlyWrite. true error: %s", err.Error()) } return err } } } } func (s *testServer) HalfDuplexCall(stream testpb.TestService_HalfDuplexCallServer) error { var msgBuf []*testpb.StreamingOutputCallRequest for { in, err := stream.Recv() if err == io.EOF { // read done. break } if err != nil { return err } msgBuf = append(msgBuf, in) } for _, m := range msgBuf { cs := m.GetResponseParameters() for _, c := range cs { if us := c.GetIntervalUs(); us > 0 { time.Sleep(time.Duration(us) * time.Microsecond) } payload, err := newPayload(m.GetResponseType(), c.GetSize()) if err != nil { return err } if err := stream.Send(&testpb.StreamingOutputCallResponse{ Payload: payload, }); err != nil { return err } } } return nil } type env struct { name string network string // The type of network such as tcp, unix, etc. security string // The security protocol such as TLS, SSH, etc. httpHandler bool // whether to use the http.Handler ServerTransport; requires TLS balancer string // One of "round_robin", "pick_first", "v1", or "". customDialer func(string, string, time.Duration) (net.Conn, error) } func (e env) runnable() bool { if runtime.GOOS == "windows" && e.network == "unix" { return false } return true } func (e env) dialer(addr string, timeout time.Duration) (net.Conn, error) { if e.customDialer != nil { return e.customDialer(e.network, addr, timeout) } return net.DialTimeout(e.network, addr, timeout) } var ( tcpClearEnv = env{name: "tcp-clear-v1-balancer", network: "tcp", balancer: "v1"} tcpTLSEnv = env{name: "tcp-tls-v1-balancer", network: "tcp", security: "tls", balancer: "v1"} tcpClearRREnv = env{name: "tcp-clear", network: "tcp", balancer: "round_robin"} tcpTLSRREnv = env{name: "tcp-tls", network: "tcp", security: "tls", balancer: "round_robin"} handlerEnv = env{name: "handler-tls", network: "tcp", security: "tls", httpHandler: true, balancer: "round_robin"} noBalancerEnv = env{name: "no-balancer", network: "tcp", security: "tls"} allEnv = []env{tcpClearEnv, tcpTLSEnv, tcpClearRREnv, tcpTLSRREnv, handlerEnv, noBalancerEnv} ) var onlyEnv = flag.String("only_env", "", "If non-empty, one of 'tcp-clear', 'tcp-tls', 'unix-clear', 'unix-tls', or 'handler-tls' to only run the tests for that environment. Empty means all.") func listTestEnv() (envs []env) { if *onlyEnv != "" { for _, e := range allEnv { if e.name == *onlyEnv { if !e.runnable() { panic(fmt.Sprintf("--only_env environment %q does not run on %s", *onlyEnv, runtime.GOOS)) } return []env{e} } } panic(fmt.Sprintf("invalid --only_env value %q", *onlyEnv)) } for _, e := range allEnv { if e.runnable() { envs = append(envs, e) } } return envs } // test is an end-to-end test. It should be created with the newTest // func, modified as needed, and then started with its startServer method. // It should be cleaned up with the tearDown method. type test struct { // The following are setup in newTest(). t *testing.T e env ctx context.Context // valid for life of test, before tearDown cancel context.CancelFunc // The following knobs are for the server-side, and should be set after // calling newTest() and before calling startServer(). // whether or not to expose the server's health via the default health // service implementation. enableHealthServer bool // In almost all cases, one should set the 'enableHealthServer' flag above to // expose the server's health using the default health service // implementation. This should only be used when a non-default health service // implementation is required. healthServer healthpb.HealthServer maxStream uint32 tapHandle tap.ServerInHandle maxServerMsgSize *int maxServerReceiveMsgSize *int maxServerSendMsgSize *int maxServerHeaderListSize *uint32 // Used to test the deprecated API WithCompressor and WithDecompressor. serverCompression bool unknownHandler grpc.StreamHandler unaryServerInt grpc.UnaryServerInterceptor streamServerInt grpc.StreamServerInterceptor serverInitialWindowSize int32 serverInitialConnWindowSize int32 customServerOptions []grpc.ServerOption // The following knobs are for the client-side, and should be set after // calling newTest() and before calling clientConn(). maxClientMsgSize *int maxClientReceiveMsgSize *int maxClientSendMsgSize *int maxClientHeaderListSize *uint32 userAgent string // Used to test the deprecated API WithCompressor and WithDecompressor. clientCompression bool // Used to test the new compressor registration API UseCompressor. clientUseCompression bool // clientNopCompression is set to create a compressor whose type is not supported. clientNopCompression bool unaryClientInt grpc.UnaryClientInterceptor streamClientInt grpc.StreamClientInterceptor sc <-chan grpc.ServiceConfig customCodec encoding.Codec clientInitialWindowSize int32 clientInitialConnWindowSize int32 perRPCCreds credentials.PerRPCCredentials customDialOptions []grpc.DialOption resolverScheme string // All test dialing is blocking by default. Set this to true if dial // should be non-blocking. nonBlockingDial bool // These are are set once startServer is called. The common case is to have // only one testServer. srv stopper hSrv healthpb.HealthServer srvAddr string // These are are set once startServers is called. srvs []stopper hSrvs []healthpb.HealthServer srvAddrs []string cc *grpc.ClientConn // nil until requested via clientConn restoreLogs func() // nil unless declareLogNoise is used } type stopper interface { Stop() GracefulStop() } func (te *test) tearDown() { if te.cancel != nil { te.cancel() te.cancel = nil } if te.cc != nil { te.cc.Close() te.cc = nil } if te.restoreLogs != nil { te.restoreLogs() te.restoreLogs = nil } if te.srv != nil { te.srv.Stop() } for _, s := range te.srvs { s.Stop() } } // newTest returns a new test using the provided testing.T and // environment. It is returned with default values. Tests should // modify it before calling its startServer and clientConn methods. func newTest(t *testing.T, e env) *test { te := &test{ t: t, e: e, maxStream: math.MaxUint32, } te.ctx, te.cancel = context.WithCancel(context.Background()) return te } func (te *test) listenAndServe(ts testpb.TestServiceServer, listen func(network, address string) (net.Listener, error)) net.Listener { te.t.Logf("Running test in %s environment...", te.e.name) sopts := []grpc.ServerOption{grpc.MaxConcurrentStreams(te.maxStream)} if te.maxServerMsgSize != nil { sopts = append(sopts, grpc.MaxMsgSize(*te.maxServerMsgSize)) } if te.maxServerReceiveMsgSize != nil { sopts = append(sopts, grpc.MaxRecvMsgSize(*te.maxServerReceiveMsgSize)) } if te.maxServerSendMsgSize != nil { sopts = append(sopts, grpc.MaxSendMsgSize(*te.maxServerSendMsgSize)) } if te.maxServerHeaderListSize != nil { sopts = append(sopts, grpc.MaxHeaderListSize(*te.maxServerHeaderListSize)) } if te.tapHandle != nil { sopts = append(sopts, grpc.InTapHandle(te.tapHandle)) } if te.serverCompression { sopts = append(sopts, grpc.RPCCompressor(grpc.NewGZIPCompressor()), grpc.RPCDecompressor(grpc.NewGZIPDecompressor()), ) } if te.unaryServerInt != nil { sopts = append(sopts, grpc.UnaryInterceptor(te.unaryServerInt)) } if te.streamServerInt != nil { sopts = append(sopts, grpc.StreamInterceptor(te.streamServerInt)) } if te.unknownHandler != nil { sopts = append(sopts, grpc.UnknownServiceHandler(te.unknownHandler)) } if te.serverInitialWindowSize > 0 { sopts = append(sopts, grpc.InitialWindowSize(te.serverInitialWindowSize)) } if te.serverInitialConnWindowSize > 0 { sopts = append(sopts, grpc.InitialConnWindowSize(te.serverInitialConnWindowSize)) } la := "localhost:0" switch te.e.network { case "unix": la = "/tmp/testsock" + fmt.Sprintf("%d", time.Now().UnixNano()) syscall.Unlink(la) } lis, err := listen(te.e.network, la) if err != nil { te.t.Fatalf("Failed to listen: %v", err) } switch te.e.security { case "tls": creds, err := credentials.NewServerTLSFromFile(testdata.Path("server1.pem"), testdata.Path("server1.key")) if err != nil { te.t.Fatalf("Failed to generate credentials %v", err) } sopts = append(sopts, grpc.Creds(creds)) case "clientTimeoutCreds": sopts = append(sopts, grpc.Creds(&clientTimeoutCreds{})) } sopts = append(sopts, te.customServerOptions...) s := grpc.NewServer(sopts...) if ts != nil { testpb.RegisterTestServiceServer(s, ts) } // Create a new default health server if enableHealthServer is set, or use // the provided one. hs := te.healthServer if te.enableHealthServer { hs = health.NewServer() } if hs != nil { healthgrpc.RegisterHealthServer(s, hs) } addr := la switch te.e.network { case "unix": default: _, port, err := net.SplitHostPort(lis.Addr().String()) if err != nil { te.t.Fatalf("Failed to parse listener address: %v", err) } addr = "localhost:" + port } te.srv = s te.hSrv = hs te.srvAddr = addr if te.e.httpHandler { if te.e.security != "tls" { te.t.Fatalf("unsupported environment settings") } cert, err := tls.LoadX509KeyPair(testdata.Path("server1.pem"), testdata.Path("server1.key")) if err != nil { te.t.Fatal("tls.LoadX509KeyPair(server1.pem, server1.key) failed: ", err) } hs := &http.Server{ Handler: s, TLSConfig: &tls.Config{Certificates: []tls.Certificate{cert}}, } if err := http2.ConfigureServer(hs, &http2.Server{MaxConcurrentStreams: te.maxStream}); err != nil { te.t.Fatal("http2.ConfigureServer(_, _) failed: ", err) } te.srv = wrapHS{hs} tlsListener := tls.NewListener(lis, hs.TLSConfig) go hs.Serve(tlsListener) return lis } go s.Serve(lis) return lis } type wrapHS struct { s *http.Server } func (w wrapHS) GracefulStop() { w.s.Shutdown(context.Background()) } func (w wrapHS) Stop() { w.s.Close() } func (te *test) startServerWithConnControl(ts testpb.TestServiceServer) *listenerWrapper { l := te.listenAndServe(ts, listenWithConnControl) return l.(*listenerWrapper) } // startServer starts a gRPC server exposing the provided TestService // implementation. Callers should defer a call to te.tearDown to clean up func (te *test) startServer(ts testpb.TestServiceServer) { te.listenAndServe(ts, net.Listen) } // startServers starts 'num' gRPC servers exposing the provided TestService. func (te *test) startServers(ts testpb.TestServiceServer, num int) { for i := 0; i < num; i++ { te.startServer(ts) te.srvs = append(te.srvs, te.srv.(*grpc.Server)) te.hSrvs = append(te.hSrvs, te.hSrv) te.srvAddrs = append(te.srvAddrs, te.srvAddr) te.srv = nil te.hSrv = nil te.srvAddr = "" } } // setHealthServingStatus is a helper function to set the health status. func (te *test) setHealthServingStatus(service string, status healthpb.HealthCheckResponse_ServingStatus) { hs, ok := te.hSrv.(*health.Server) if !ok { panic(fmt.Sprintf("SetServingStatus(%v, %v) called for health server of type %T", service, status, hs)) } hs.SetServingStatus(service, status) } type nopCompressor struct { grpc.Compressor } // NewNopCompressor creates a compressor to test the case that type is not supported. func NewNopCompressor() grpc.Compressor { return &nopCompressor{grpc.NewGZIPCompressor()} } func (c *nopCompressor) Type() string { return "nop" } type nopDecompressor struct { grpc.Decompressor } // NewNopDecompressor creates a decompressor to test the case that type is not supported. func NewNopDecompressor() grpc.Decompressor { return &nopDecompressor{grpc.NewGZIPDecompressor()} } func (d *nopDecompressor) Type() string { return "nop" } func (te *test) configDial(opts ...grpc.DialOption) ([]grpc.DialOption, string) { opts = append(opts, grpc.WithDialer(te.e.dialer), grpc.WithUserAgent(te.userAgent)) if te.sc != nil { opts = append(opts, grpc.WithServiceConfig(te.sc)) } if te.clientCompression { opts = append(opts, grpc.WithCompressor(grpc.NewGZIPCompressor()), grpc.WithDecompressor(grpc.NewGZIPDecompressor()), ) } if te.clientUseCompression { opts = append(opts, grpc.WithDefaultCallOptions(grpc.UseCompressor("gzip"))) } if te.clientNopCompression { opts = append(opts, grpc.WithCompressor(NewNopCompressor()), grpc.WithDecompressor(NewNopDecompressor()), ) } if te.unaryClientInt != nil { opts = append(opts, grpc.WithUnaryInterceptor(te.unaryClientInt)) } if te.streamClientInt != nil { opts = append(opts, grpc.WithStreamInterceptor(te.streamClientInt)) } if te.maxClientMsgSize != nil { opts = append(opts, grpc.WithMaxMsgSize(*te.maxClientMsgSize)) } if te.maxClientReceiveMsgSize != nil { opts = append(opts, grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(*te.maxClientReceiveMsgSize))) } if te.maxClientSendMsgSize != nil { opts = append(opts, grpc.WithDefaultCallOptions(grpc.MaxCallSendMsgSize(*te.maxClientSendMsgSize))) } if te.maxClientHeaderListSize != nil { opts = append(opts, grpc.WithMaxHeaderListSize(*te.maxClientHeaderListSize)) } switch te.e.security { case "tls": creds, err := credentials.NewClientTLSFromFile(testdata.Path("ca.pem"), "x.test.youtube.com") if err != nil { te.t.Fatalf("Failed to load credentials: %v", err) } opts = append(opts, grpc.WithTransportCredentials(creds)) case "clientTimeoutCreds": opts = append(opts, grpc.WithTransportCredentials(&clientTimeoutCreds{})) case "empty": // Don't add any transport creds option. default: opts = append(opts, grpc.WithInsecure()) } // TODO(bar) switch balancer case "pick_first". var scheme string if te.resolverScheme == "" { scheme = "passthrough:///" } else { scheme = te.resolverScheme + ":///" } switch te.e.balancer { case "v1": opts = append(opts, grpc.WithBalancer(grpc.RoundRobin(nil))) case "round_robin": opts = append(opts, grpc.WithBalancerName(roundrobin.Name)) } if te.clientInitialWindowSize > 0 { opts = append(opts, grpc.WithInitialWindowSize(te.clientInitialWindowSize)) } if te.clientInitialConnWindowSize > 0 { opts = append(opts, grpc.WithInitialConnWindowSize(te.clientInitialConnWindowSize)) } if te.perRPCCreds != nil { opts = append(opts, grpc.WithPerRPCCredentials(te.perRPCCreds)) } if te.customCodec != nil { opts = append(opts, grpc.WithDefaultCallOptions(grpc.ForceCodec(te.customCodec))) } if !te.nonBlockingDial && te.srvAddr != "" { // Only do a blocking dial if server is up. opts = append(opts, grpc.WithBlock()) } if te.srvAddr == "" { te.srvAddr = "client.side.only.test" } opts = append(opts, te.customDialOptions...) return opts, scheme } func (te *test) clientConnWithConnControl() (*grpc.ClientConn, *dialerWrapper) { if te.cc != nil { return te.cc, nil } opts, scheme := te.configDial() dw := &dialerWrapper{} // overwrite the dialer before opts = append(opts, grpc.WithDialer(dw.dialer)) var err error te.cc, err = grpc.Dial(scheme+te.srvAddr, opts...) if err != nil { te.t.Fatalf("Dial(%q) = %v", scheme+te.srvAddr, err) } return te.cc, dw } func (te *test) clientConn(opts ...grpc.DialOption) *grpc.ClientConn { if te.cc != nil { return te.cc } var scheme string opts, scheme = te.configDial(opts...) var err error te.cc, err = grpc.Dial(scheme+te.srvAddr, opts...) if err != nil { te.t.Fatalf("Dial(%q) = %v", scheme+te.srvAddr, err) } return te.cc } func (te *test) declareLogNoise(phrases ...string) { te.restoreLogs = declareLogNoise(te.t, phrases...) } func (te *test) withServerTester(fn func(st *serverTester)) { c, err := te.e.dialer(te.srvAddr, 10*time.Second) if err != nil { te.t.Fatal(err) } defer c.Close() if te.e.security == "tls" { c = tls.Client(c, &tls.Config{ InsecureSkipVerify: true, NextProtos: []string{http2.NextProtoTLS}, }) } st := newServerTesterFromConn(te.t, c) st.greet() fn(st) } type lazyConn struct { net.Conn beLazy int32 } func (l *lazyConn) Write(b []byte) (int, error) { if atomic.LoadInt32(&(l.beLazy)) == 1 { time.Sleep(time.Second) } return l.Conn.Write(b) } func (s) TestContextDeadlineNotIgnored(t *testing.T) { e := noBalancerEnv var lc *lazyConn e.customDialer = func(network, addr string, timeout time.Duration) (net.Conn, error) { conn, err := net.DialTimeout(network, addr, timeout) if err != nil { return nil, err } lc = &lazyConn{Conn: conn} return lc, nil } te := newTest(t, e) te.startServer(&testServer{security: e.security}) defer te.tearDown() cc := te.clientConn() tc := testpb.NewTestServiceClient(cc) if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}); err != nil { t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, ", err) } atomic.StoreInt32(&(lc.beLazy), 1) ctx, cancel := context.WithTimeout(context.Background(), 50*time.Millisecond) defer cancel() t1 := time.Now() if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.DeadlineExceeded { t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, context.DeadlineExceeded", err) } if time.Since(t1) > 2*time.Second { t.Fatalf("TestService/EmptyCall(_, _) ran over the deadline") } } func (s) TestTimeoutOnDeadServer(t *testing.T) { for _, e := range listTestEnv() { testTimeoutOnDeadServer(t, e) } } func testTimeoutOnDeadServer(t *testing.T, e env) { te := newTest(t, e) te.userAgent = testAppUA te.declareLogNoise( "transport: http2Client.notifyError got notified that the client transport was broken EOF", "grpc: addrConn.transportMonitor exits due to: grpc: the connection is closing", "grpc: addrConn.resetTransport failed to create client transport: connection error", ) te.startServer(&testServer{security: e.security}) defer te.tearDown() cc := te.clientConn() tc := testpb.NewTestServiceClient(cc) if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}, grpc.WaitForReady(true)); err != nil { t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, ", err) } te.srv.Stop() // Wait for the client to notice the connection is gone. ctx, cancel := context.WithTimeout(context.Background(), 500*time.Millisecond) state := cc.GetState() for ; state == connectivity.Ready && cc.WaitForStateChange(ctx, state); state = cc.GetState() { } cancel() if state == connectivity.Ready { t.Fatalf("Timed out waiting for non-ready state") } ctx, cancel = context.WithTimeout(context.Background(), time.Millisecond) _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)) cancel() if e.balancer != "" && status.Code(err) != codes.DeadlineExceeded { // If e.balancer == nil, the ac will stop reconnecting because the dialer returns non-temp error, // the error will be an internal error. t.Fatalf("TestService/EmptyCall(%v, _) = _, %v, want _, error code: %s", ctx, err, codes.DeadlineExceeded) } awaitNewConnLogOutput() } func (s) TestServerGracefulStopIdempotent(t *testing.T) { for _, e := range listTestEnv() { if e.name == "handler-tls" { continue } testServerGracefulStopIdempotent(t, e) } } func testServerGracefulStopIdempotent(t *testing.T, e env) { te := newTest(t, e) te.userAgent = testAppUA te.startServer(&testServer{security: e.security}) defer te.tearDown() for i := 0; i < 3; i++ { te.srv.GracefulStop() } } func (s) TestServerGoAway(t *testing.T) { for _, e := range listTestEnv() { if e.name == "handler-tls" { continue } testServerGoAway(t, e) } } func testServerGoAway(t *testing.T, e env) { te := newTest(t, e) te.userAgent = testAppUA te.startServer(&testServer{security: e.security}) defer te.tearDown() cc := te.clientConn() tc := testpb.NewTestServiceClient(cc) // Finish an RPC to make sure the connection is good. ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil { t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, ", err) } ch := make(chan struct{}) go func() { te.srv.GracefulStop() close(ch) }() // Loop until the server side GoAway signal is propagated to the client. for { ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond) if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); err != nil && status.Code(err) != codes.DeadlineExceeded { cancel() break } cancel() } // A new RPC should fail. ctx, cancel = context.WithTimeout(context.Background(), 5*time.Second) defer cancel() if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.Unavailable && status.Code(err) != codes.Internal { t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s or %s", err, codes.Unavailable, codes.Internal) } <-ch awaitNewConnLogOutput() } func (s) TestServerGoAwayPendingRPC(t *testing.T) { for _, e := range listTestEnv() { if e.name == "handler-tls" { continue } testServerGoAwayPendingRPC(t, e) } } func testServerGoAwayPendingRPC(t *testing.T, e env) { te := newTest(t, e) te.userAgent = testAppUA te.declareLogNoise( "transport: http2Client.notifyError got notified that the client transport was broken EOF", "grpc: addrConn.transportMonitor exits due to: grpc: the connection is closing", "grpc: addrConn.resetTransport failed to create client transport: connection error", ) te.startServer(&testServer{security: e.security}) defer te.tearDown() cc := te.clientConn() tc := testpb.NewTestServiceClient(cc) ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) stream, err := tc.FullDuplexCall(ctx, grpc.WaitForReady(true)) if err != nil { t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) } // Finish an RPC to make sure the connection is good. if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}, grpc.WaitForReady(true)); err != nil { t.Fatalf("%v.EmptyCall(_, _, _) = _, %v, want _, ", tc, err) } ch := make(chan struct{}) go func() { te.srv.GracefulStop() close(ch) }() // Loop until the server side GoAway signal is propagated to the client. start := time.Now() errored := false for time.Since(start) < time.Second { ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond) _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)) cancel() if err != nil { errored = true break } } if !errored { t.Fatalf("GoAway never received by client") } respParam := []*testpb.ResponseParameters{{Size: 1}} payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(100)) if err != nil { t.Fatal(err) } req := &testpb.StreamingOutputCallRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE, ResponseParameters: respParam, Payload: payload, } // The existing RPC should be still good to proceed. if err := stream.Send(req); err != nil { t.Fatalf("%v.Send(_) = %v, want ", stream, err) } if _, err := stream.Recv(); err != nil { t.Fatalf("%v.Recv() = _, %v, want _, ", stream, err) } // The RPC will run until canceled. cancel() <-ch awaitNewConnLogOutput() } func (s) TestServerMultipleGoAwayPendingRPC(t *testing.T) { for _, e := range listTestEnv() { if e.name == "handler-tls" { continue } testServerMultipleGoAwayPendingRPC(t, e) } } func testServerMultipleGoAwayPendingRPC(t *testing.T, e env) { te := newTest(t, e) te.userAgent = testAppUA te.declareLogNoise( "transport: http2Client.notifyError got notified that the client transport was broken EOF", "grpc: addrConn.transportMonitor exits due to: grpc: the connection is closing", "grpc: addrConn.resetTransport failed to create client transport: connection error", ) te.startServer(&testServer{security: e.security}) defer te.tearDown() cc := te.clientConn() tc := testpb.NewTestServiceClient(cc) ctx, cancel := context.WithCancel(context.Background()) stream, err := tc.FullDuplexCall(ctx, grpc.WaitForReady(true)) if err != nil { t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) } // Finish an RPC to make sure the connection is good. if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}, grpc.WaitForReady(true)); err != nil { t.Fatalf("%v.EmptyCall(_, _, _) = _, %v, want _, ", tc, err) } ch1 := make(chan struct{}) go func() { te.srv.GracefulStop() close(ch1) }() ch2 := make(chan struct{}) go func() { te.srv.GracefulStop() close(ch2) }() // Loop until the server side GoAway signal is propagated to the client. for { ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond) if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil { cancel() break } cancel() } select { case <-ch1: t.Fatal("GracefulStop() terminated early") case <-ch2: t.Fatal("GracefulStop() terminated early") default: } respParam := []*testpb.ResponseParameters{ { Size: 1, }, } payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(100)) if err != nil { t.Fatal(err) } req := &testpb.StreamingOutputCallRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE, ResponseParameters: respParam, Payload: payload, } // The existing RPC should be still good to proceed. if err := stream.Send(req); err != nil { t.Fatalf("%v.Send(%v) = %v, want ", stream, req, err) } if _, err := stream.Recv(); err != nil { t.Fatalf("%v.Recv() = _, %v, want _, ", stream, err) } if err := stream.CloseSend(); err != nil { t.Fatalf("%v.CloseSend() = %v, want ", stream, err) } <-ch1 <-ch2 cancel() awaitNewConnLogOutput() } func (s) TestConcurrentClientConnCloseAndServerGoAway(t *testing.T) { for _, e := range listTestEnv() { if e.name == "handler-tls" { continue } testConcurrentClientConnCloseAndServerGoAway(t, e) } } func testConcurrentClientConnCloseAndServerGoAway(t *testing.T, e env) { te := newTest(t, e) te.userAgent = testAppUA te.declareLogNoise( "transport: http2Client.notifyError got notified that the client transport was broken EOF", "grpc: addrConn.transportMonitor exits due to: grpc: the connection is closing", "grpc: addrConn.resetTransport failed to create client transport: connection error", ) te.startServer(&testServer{security: e.security}) defer te.tearDown() cc := te.clientConn() tc := testpb.NewTestServiceClient(cc) if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}, grpc.WaitForReady(true)); err != nil { t.Fatalf("%v.EmptyCall(_, _, _) = _, %v, want _, ", tc, err) } ch := make(chan struct{}) // Close ClientConn and Server concurrently. go func() { te.srv.GracefulStop() close(ch) }() go func() { cc.Close() }() <-ch } func (s) TestConcurrentServerStopAndGoAway(t *testing.T) { for _, e := range listTestEnv() { if e.name == "handler-tls" { continue } testConcurrentServerStopAndGoAway(t, e) } } func testConcurrentServerStopAndGoAway(t *testing.T, e env) { te := newTest(t, e) te.userAgent = testAppUA te.declareLogNoise( "transport: http2Client.notifyError got notified that the client transport was broken EOF", "grpc: addrConn.transportMonitor exits due to: grpc: the connection is closing", "grpc: addrConn.resetTransport failed to create client transport: connection error", ) te.startServer(&testServer{security: e.security}) defer te.tearDown() cc := te.clientConn() tc := testpb.NewTestServiceClient(cc) stream, err := tc.FullDuplexCall(context.Background(), grpc.WaitForReady(true)) if err != nil { t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) } // Finish an RPC to make sure the connection is good. if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}, grpc.WaitForReady(true)); err != nil { t.Fatalf("%v.EmptyCall(_, _, _) = _, %v, want _, ", tc, err) } ch := make(chan struct{}) go func() { te.srv.GracefulStop() close(ch) }() // Loop until the server side GoAway signal is propagated to the client. for { ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond) if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil { cancel() break } cancel() } // Stop the server and close all the connections. te.srv.Stop() respParam := []*testpb.ResponseParameters{ { Size: 1, }, } payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(100)) if err != nil { t.Fatal(err) } req := &testpb.StreamingOutputCallRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE, ResponseParameters: respParam, Payload: payload, } sendStart := time.Now() for { if err := stream.Send(req); err == io.EOF { // stream.Send should eventually send io.EOF break } else if err != nil { // Send should never return a transport-level error. t.Fatalf("stream.Send(%v) = %v; want ", req, err) } if time.Since(sendStart) > 2*time.Second { t.Fatalf("stream.Send(_) did not return io.EOF after 2s") } time.Sleep(time.Millisecond) } if _, err := stream.Recv(); err == nil || err == io.EOF { t.Fatalf("%v.Recv() = _, %v, want _, ", stream, err) } <-ch awaitNewConnLogOutput() } func (s) TestClientConnCloseAfterGoAwayWithActiveStream(t *testing.T) { for _, e := range listTestEnv() { if e.name == "handler-tls" { continue } testClientConnCloseAfterGoAwayWithActiveStream(t, e) } } func testClientConnCloseAfterGoAwayWithActiveStream(t *testing.T, e env) { te := newTest(t, e) te.startServer(&testServer{security: e.security}) defer te.tearDown() cc := te.clientConn() tc := testpb.NewTestServiceClient(cc) ctx, cancel := context.WithCancel(context.Background()) defer cancel() if _, err := tc.FullDuplexCall(ctx); err != nil { t.Fatalf("%v.FullDuplexCall(_) = _, %v, want _, ", tc, err) } done := make(chan struct{}) go func() { te.srv.GracefulStop() close(done) }() time.Sleep(50 * time.Millisecond) cc.Close() timeout := time.NewTimer(time.Second) select { case <-done: case <-timeout.C: t.Fatalf("Test timed-out.") } } func (s) TestFailFast(t *testing.T) { for _, e := range listTestEnv() { testFailFast(t, e) } } func testFailFast(t *testing.T, e env) { te := newTest(t, e) te.userAgent = testAppUA te.declareLogNoise( "transport: http2Client.notifyError got notified that the client transport was broken EOF", "grpc: addrConn.transportMonitor exits due to: grpc: the connection is closing", "grpc: addrConn.resetTransport failed to create client transport: connection error", ) te.startServer(&testServer{security: e.security}) defer te.tearDown() cc := te.clientConn() tc := testpb.NewTestServiceClient(cc) ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); err != nil { t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, ", err) } // Stop the server and tear down all the existing connections. te.srv.Stop() // Loop until the server teardown is propagated to the client. for { ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) _, err := tc.EmptyCall(ctx, &testpb.Empty{}) cancel() if status.Code(err) == codes.Unavailable { break } t.Logf("%v.EmptyCall(_, _) = _, %v", tc, err) time.Sleep(10 * time.Millisecond) } // The client keeps reconnecting and ongoing fail-fast RPCs should fail with code.Unavailable. if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}); status.Code(err) != codes.Unavailable { t.Fatalf("TestService/EmptyCall(_, _, _) = _, %v, want _, error code: %s", err, codes.Unavailable) } if _, err := tc.StreamingInputCall(context.Background()); status.Code(err) != codes.Unavailable { t.Fatalf("TestService/StreamingInputCall(_) = _, %v, want _, error code: %s", err, codes.Unavailable) } awaitNewConnLogOutput() } func testServiceConfigSetup(t *testing.T, e env) *test { te := newTest(t, e) te.userAgent = testAppUA te.declareLogNoise( "transport: http2Client.notifyError got notified that the client transport was broken EOF", "grpc: addrConn.transportMonitor exits due to: grpc: the connection is closing", "grpc: addrConn.resetTransport failed to create client transport: connection error", "Failed to dial : context canceled; please retry.", ) return te } func newBool(b bool) (a *bool) { return &b } func newInt(b int) (a *int) { return &b } func newDuration(b time.Duration) (a *time.Duration) { a = new(time.Duration) *a = b return } func (s) TestGetMethodConfig(t *testing.T) { te := testServiceConfigSetup(t, tcpClearRREnv) defer te.tearDown() r, rcleanup := manual.GenerateAndRegisterManualResolver() defer rcleanup() te.resolverScheme = r.Scheme() cc := te.clientConn() addrs := []resolver.Address{{Addr: te.srvAddr}} r.UpdateState(resolver.State{ Addresses: addrs, ServiceConfig: parseCfg(r, `{ "methodConfig": [ { "name": [ { "service": "grpc.testing.TestService", "method": "EmptyCall" } ], "waitForReady": true, "timeout": ".001s" }, { "name": [ { "service": "grpc.testing.TestService" } ], "waitForReady": false } ] }`)}) tc := testpb.NewTestServiceClient(cc) // Make sure service config has been processed by grpc. for { if cc.GetMethodConfig("/grpc.testing.TestService/EmptyCall").WaitForReady != nil { break } time.Sleep(time.Millisecond) } // The following RPCs are expected to become non-fail-fast ones with 1ms deadline. var err error if _, err = tc.EmptyCall(context.Background(), &testpb.Empty{}); status.Code(err) != codes.DeadlineExceeded { t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.DeadlineExceeded) } r.UpdateState(resolver.State{Addresses: addrs, ServiceConfig: parseCfg(r, `{ "methodConfig": [ { "name": [ { "service": "grpc.testing.TestService", "method": "UnaryCall" } ], "waitForReady": true, "timeout": ".001s" }, { "name": [ { "service": "grpc.testing.TestService" } ], "waitForReady": false } ] }`)}) // Make sure service config has been processed by grpc. for { if mc := cc.GetMethodConfig("/grpc.testing.TestService/EmptyCall"); mc.WaitForReady != nil && !*mc.WaitForReady { break } time.Sleep(time.Millisecond) } // The following RPCs are expected to become fail-fast. if _, err = tc.EmptyCall(context.Background(), &testpb.Empty{}); status.Code(err) != codes.Unavailable { t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.Unavailable) } } func (s) TestServiceConfigWaitForReady(t *testing.T) { te := testServiceConfigSetup(t, tcpClearRREnv) defer te.tearDown() r, rcleanup := manual.GenerateAndRegisterManualResolver() defer rcleanup() // Case1: Client API set failfast to be false, and service config set wait_for_ready to be false, Client API should win, and the rpc will wait until deadline exceeds. te.resolverScheme = r.Scheme() cc := te.clientConn() addrs := []resolver.Address{{Addr: te.srvAddr}} r.UpdateState(resolver.State{ Addresses: addrs, ServiceConfig: parseCfg(r, `{ "methodConfig": [ { "name": [ { "service": "grpc.testing.TestService", "method": "EmptyCall" }, { "service": "grpc.testing.TestService", "method": "FullDuplexCall" } ], "waitForReady": false, "timeout": ".001s" } ] }`)}) tc := testpb.NewTestServiceClient(cc) // Make sure service config has been processed by grpc. for { if cc.GetMethodConfig("/grpc.testing.TestService/FullDuplexCall").WaitForReady != nil { break } time.Sleep(time.Millisecond) } // The following RPCs are expected to become non-fail-fast ones with 1ms deadline. var err error if _, err = tc.EmptyCall(context.Background(), &testpb.Empty{}, grpc.WaitForReady(true)); status.Code(err) != codes.DeadlineExceeded { t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.DeadlineExceeded) } if _, err := tc.FullDuplexCall(context.Background(), grpc.WaitForReady(true)); status.Code(err) != codes.DeadlineExceeded { t.Fatalf("TestService/FullDuplexCall(_) = _, %v, want %s", err, codes.DeadlineExceeded) } // Generate a service config update. // Case2:Client API set failfast to be false, and service config set wait_for_ready to be true, and the rpc will wait until deadline exceeds. r.UpdateState(resolver.State{ Addresses: addrs, ServiceConfig: parseCfg(r, `{ "methodConfig": [ { "name": [ { "service": "grpc.testing.TestService", "method": "EmptyCall" }, { "service": "grpc.testing.TestService", "method": "FullDuplexCall" } ], "waitForReady": true, "timeout": ".001s" } ] }`)}) // Wait for the new service config to take effect. for { if mc := cc.GetMethodConfig("/grpc.testing.TestService/EmptyCall"); mc.WaitForReady != nil && *mc.WaitForReady { break } time.Sleep(time.Millisecond) } // The following RPCs are expected to become non-fail-fast ones with 1ms deadline. if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}); status.Code(err) != codes.DeadlineExceeded { t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.DeadlineExceeded) } if _, err := tc.FullDuplexCall(context.Background()); status.Code(err) != codes.DeadlineExceeded { t.Fatalf("TestService/FullDuplexCall(_) = _, %v, want %s", err, codes.DeadlineExceeded) } } func (s) TestServiceConfigTimeout(t *testing.T) { te := testServiceConfigSetup(t, tcpClearRREnv) defer te.tearDown() r, rcleanup := manual.GenerateAndRegisterManualResolver() defer rcleanup() // Case1: Client API sets timeout to be 1ns and ServiceConfig sets timeout to be 1hr. Timeout should be 1ns (min of 1ns and 1hr) and the rpc will wait until deadline exceeds. te.resolverScheme = r.Scheme() cc := te.clientConn() addrs := []resolver.Address{{Addr: te.srvAddr}} r.UpdateState(resolver.State{ Addresses: addrs, ServiceConfig: parseCfg(r, `{ "methodConfig": [ { "name": [ { "service": "grpc.testing.TestService", "method": "EmptyCall" }, { "service": "grpc.testing.TestService", "method": "FullDuplexCall" } ], "waitForReady": true, "timeout": "3600s" } ] }`)}) tc := testpb.NewTestServiceClient(cc) // Make sure service config has been processed by grpc. for { if cc.GetMethodConfig("/grpc.testing.TestService/FullDuplexCall").Timeout != nil { break } time.Sleep(time.Millisecond) } // The following RPCs are expected to become non-fail-fast ones with 1ns deadline. var err error ctx, cancel := context.WithTimeout(context.Background(), time.Nanosecond) if _, err = tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); status.Code(err) != codes.DeadlineExceeded { t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.DeadlineExceeded) } cancel() ctx, cancel = context.WithTimeout(context.Background(), time.Nanosecond) if _, err = tc.FullDuplexCall(ctx, grpc.WaitForReady(true)); status.Code(err) != codes.DeadlineExceeded { t.Fatalf("TestService/FullDuplexCall(_) = _, %v, want %s", err, codes.DeadlineExceeded) } cancel() // Generate a service config update. // Case2: Client API sets timeout to be 1hr and ServiceConfig sets timeout to be 1ns. Timeout should be 1ns (min of 1ns and 1hr) and the rpc will wait until deadline exceeds. r.UpdateState(resolver.State{ Addresses: addrs, ServiceConfig: parseCfg(r, `{ "methodConfig": [ { "name": [ { "service": "grpc.testing.TestService", "method": "EmptyCall" }, { "service": "grpc.testing.TestService", "method": "FullDuplexCall" } ], "waitForReady": true, "timeout": ".000000001s" } ] }`)}) // Wait for the new service config to take effect. for { if mc := cc.GetMethodConfig("/grpc.testing.TestService/FullDuplexCall"); mc.Timeout != nil && *mc.Timeout == time.Nanosecond { break } time.Sleep(time.Millisecond) } ctx, cancel = context.WithTimeout(context.Background(), time.Hour) if _, err = tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); status.Code(err) != codes.DeadlineExceeded { t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.DeadlineExceeded) } cancel() ctx, cancel = context.WithTimeout(context.Background(), time.Hour) if _, err = tc.FullDuplexCall(ctx, grpc.WaitForReady(true)); status.Code(err) != codes.DeadlineExceeded { t.Fatalf("TestService/FullDuplexCall(_) = _, %v, want %s", err, codes.DeadlineExceeded) } cancel() } func (s) TestServiceConfigMaxMsgSize(t *testing.T) { e := tcpClearRREnv r, rcleanup := manual.GenerateAndRegisterManualResolver() defer rcleanup() // Setting up values and objects shared across all test cases. const smallSize = 1 const largeSize = 1024 const extraLargeSize = 2048 smallPayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, smallSize) if err != nil { t.Fatal(err) } largePayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, largeSize) if err != nil { t.Fatal(err) } extraLargePayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, extraLargeSize) if err != nil { t.Fatal(err) } // Case1: sc set maxReqSize to 2048 (send), maxRespSize to 2048 (recv). te1 := testServiceConfigSetup(t, e) defer te1.tearDown() te1.resolverScheme = r.Scheme() te1.nonBlockingDial = true te1.startServer(&testServer{security: e.security}) cc1 := te1.clientConn() addrs := []resolver.Address{{Addr: te1.srvAddr}} sc := parseCfg(r, `{ "methodConfig": [ { "name": [ { "service": "grpc.testing.TestService", "method": "UnaryCall" }, { "service": "grpc.testing.TestService", "method": "FullDuplexCall" } ], "maxRequestMessageBytes": 2048, "maxResponseMessageBytes": 2048 } ] }`) r.UpdateState(resolver.State{Addresses: addrs, ServiceConfig: sc}) tc := testpb.NewTestServiceClient(cc1) req := &testpb.SimpleRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE, ResponseSize: int32(extraLargeSize), Payload: smallPayload, } for { if cc1.GetMethodConfig("/grpc.testing.TestService/FullDuplexCall").MaxReqSize != nil { break } time.Sleep(time.Millisecond) } // Test for unary RPC recv. if _, err = tc.UnaryCall(context.Background(), req, grpc.WaitForReady(true)); err == nil || status.Code(err) != codes.ResourceExhausted { t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted) } // Test for unary RPC send. req.Payload = extraLargePayload req.ResponseSize = int32(smallSize) if _, err := tc.UnaryCall(context.Background(), req); err == nil || status.Code(err) != codes.ResourceExhausted { t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted) } // Test for streaming RPC recv. respParam := []*testpb.ResponseParameters{ { Size: int32(extraLargeSize), }, } sreq := &testpb.StreamingOutputCallRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE, ResponseParameters: respParam, Payload: smallPayload, } stream, err := tc.FullDuplexCall(te1.ctx) if err != nil { t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) } if err = stream.Send(sreq); err != nil { t.Fatalf("%v.Send(%v) = %v, want ", stream, sreq, err) } if _, err = stream.Recv(); err == nil || status.Code(err) != codes.ResourceExhausted { t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted) } // Test for streaming RPC send. respParam[0].Size = int32(smallSize) sreq.Payload = extraLargePayload stream, err = tc.FullDuplexCall(te1.ctx) if err != nil { t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) } if err = stream.Send(sreq); err == nil || status.Code(err) != codes.ResourceExhausted { t.Fatalf("%v.Send(%v) = %v, want _, error code: %s", stream, sreq, err, codes.ResourceExhausted) } // Case2: Client API set maxReqSize to 1024 (send), maxRespSize to 1024 (recv). Sc sets maxReqSize to 2048 (send), maxRespSize to 2048 (recv). te2 := testServiceConfigSetup(t, e) te2.resolverScheme = r.Scheme() te2.nonBlockingDial = true te2.maxClientReceiveMsgSize = newInt(1024) te2.maxClientSendMsgSize = newInt(1024) te2.startServer(&testServer{security: e.security}) defer te2.tearDown() cc2 := te2.clientConn() r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: te2.srvAddr}}, ServiceConfig: sc}) tc = testpb.NewTestServiceClient(cc2) for { if cc2.GetMethodConfig("/grpc.testing.TestService/FullDuplexCall").MaxReqSize != nil { break } time.Sleep(time.Millisecond) } // Test for unary RPC recv. req.Payload = smallPayload req.ResponseSize = int32(largeSize) if _, err = tc.UnaryCall(context.Background(), req, grpc.WaitForReady(true)); err == nil || status.Code(err) != codes.ResourceExhausted { t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted) } // Test for unary RPC send. req.Payload = largePayload req.ResponseSize = int32(smallSize) if _, err := tc.UnaryCall(context.Background(), req); err == nil || status.Code(err) != codes.ResourceExhausted { t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted) } // Test for streaming RPC recv. stream, err = tc.FullDuplexCall(te2.ctx) respParam[0].Size = int32(largeSize) sreq.Payload = smallPayload if err != nil { t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) } if err = stream.Send(sreq); err != nil { t.Fatalf("%v.Send(%v) = %v, want ", stream, sreq, err) } if _, err = stream.Recv(); err == nil || status.Code(err) != codes.ResourceExhausted { t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted) } // Test for streaming RPC send. respParam[0].Size = int32(smallSize) sreq.Payload = largePayload stream, err = tc.FullDuplexCall(te2.ctx) if err != nil { t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) } if err = stream.Send(sreq); err == nil || status.Code(err) != codes.ResourceExhausted { t.Fatalf("%v.Send(%v) = %v, want _, error code: %s", stream, sreq, err, codes.ResourceExhausted) } // Case3: Client API set maxReqSize to 4096 (send), maxRespSize to 4096 (recv). Sc sets maxReqSize to 2048 (send), maxRespSize to 2048 (recv). te3 := testServiceConfigSetup(t, e) te3.resolverScheme = r.Scheme() te3.nonBlockingDial = true te3.maxClientReceiveMsgSize = newInt(4096) te3.maxClientSendMsgSize = newInt(4096) te3.startServer(&testServer{security: e.security}) defer te3.tearDown() cc3 := te3.clientConn() r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: te3.srvAddr}}, ServiceConfig: sc}) tc = testpb.NewTestServiceClient(cc3) for { if cc3.GetMethodConfig("/grpc.testing.TestService/FullDuplexCall").MaxReqSize != nil { break } time.Sleep(time.Millisecond) } // Test for unary RPC recv. req.Payload = smallPayload req.ResponseSize = int32(largeSize) if _, err = tc.UnaryCall(context.Background(), req, grpc.WaitForReady(true)); err != nil { t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want ", err) } req.ResponseSize = int32(extraLargeSize) if _, err := tc.UnaryCall(context.Background(), req); err == nil || status.Code(err) != codes.ResourceExhausted { t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted) } // Test for unary RPC send. req.Payload = largePayload req.ResponseSize = int32(smallSize) if _, err := tc.UnaryCall(context.Background(), req); err != nil { t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want ", err) } req.Payload = extraLargePayload if _, err = tc.UnaryCall(context.Background(), req); err == nil || status.Code(err) != codes.ResourceExhausted { t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted) } // Test for streaming RPC recv. stream, err = tc.FullDuplexCall(te3.ctx) if err != nil { t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) } respParam[0].Size = int32(largeSize) sreq.Payload = smallPayload if err = stream.Send(sreq); err != nil { t.Fatalf("%v.Send(%v) = %v, want ", stream, sreq, err) } if _, err = stream.Recv(); err != nil { t.Fatalf("%v.Recv() = _, %v, want ", stream, err) } respParam[0].Size = int32(extraLargeSize) if err = stream.Send(sreq); err != nil { t.Fatalf("%v.Send(%v) = %v, want ", stream, sreq, err) } if _, err = stream.Recv(); err == nil || status.Code(err) != codes.ResourceExhausted { t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted) } // Test for streaming RPC send. respParam[0].Size = int32(smallSize) sreq.Payload = largePayload stream, err = tc.FullDuplexCall(te3.ctx) if err != nil { t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) } if err := stream.Send(sreq); err != nil { t.Fatalf("%v.Send(%v) = %v, want ", stream, sreq, err) } sreq.Payload = extraLargePayload if err := stream.Send(sreq); err == nil || status.Code(err) != codes.ResourceExhausted { t.Fatalf("%v.Send(%v) = %v, want _, error code: %s", stream, sreq, err, codes.ResourceExhausted) } } // Reading from a streaming RPC may fail with context canceled if timeout was // set by service config (https://github.com/grpc/grpc-go/issues/1818). This // test makes sure read from streaming RPC doesn't fail in this case. func (s) TestStreamingRPCWithTimeoutInServiceConfigRecv(t *testing.T) { te := testServiceConfigSetup(t, tcpClearRREnv) te.startServer(&testServer{security: tcpClearRREnv.security}) defer te.tearDown() r, rcleanup := manual.GenerateAndRegisterManualResolver() defer rcleanup() te.resolverScheme = r.Scheme() te.nonBlockingDial = true cc := te.clientConn() tc := testpb.NewTestServiceClient(cc) r.UpdateState(resolver.State{ Addresses: []resolver.Address{{Addr: te.srvAddr}}, ServiceConfig: parseCfg(r, `{ "methodConfig": [ { "name": [ { "service": "grpc.testing.TestService", "method": "FullDuplexCall" } ], "waitForReady": true, "timeout": "10s" } ] }`)}) // Make sure service config has been processed by grpc. for { if cc.GetMethodConfig("/grpc.testing.TestService/FullDuplexCall").Timeout != nil { break } time.Sleep(time.Millisecond) } ctx, cancel := context.WithCancel(context.Background()) defer cancel() stream, err := tc.FullDuplexCall(ctx, grpc.WaitForReady(true)) if err != nil { t.Fatalf("TestService/FullDuplexCall(_) = _, %v, want ", err) } payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, 0) if err != nil { t.Fatalf("failed to newPayload: %v", err) } req := &testpb.StreamingOutputCallRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE, ResponseParameters: []*testpb.ResponseParameters{{Size: 0}}, Payload: payload, } if err := stream.Send(req); err != nil { t.Fatalf("stream.Send(%v) = %v, want ", req, err) } stream.CloseSend() time.Sleep(time.Second) // Sleep 1 second before recv to make sure the final status is received // before the recv. if _, err := stream.Recv(); err != nil { t.Fatalf("stream.Recv = _, %v, want _, ", err) } // Keep reading to drain the stream. for { if _, err := stream.Recv(); err != nil { break } } } func (s) TestPreloaderClientSend(t *testing.T) { for _, e := range listTestEnv() { testPreloaderClientSend(t, e) } } func testPreloaderClientSend(t *testing.T, e env) { te := newTest(t, e) te.userAgent = testAppUA te.declareLogNoise( "transport: http2Client.notifyError got notified that the client transport was broken EOF", "grpc: addrConn.transportMonitor exits due to: grpc: the connection is closing", "grpc: addrConn.resetTransport failed to create client transport: connection error", "Failed to dial : context canceled; please retry.", ) te.startServer(&testServer{security: e.security}) defer te.tearDown() tc := testpb.NewTestServiceClient(te.clientConn()) // Test for streaming RPC recv. // Set context for send with proper RPC Information stream, err := tc.FullDuplexCall(te.ctx, grpc.UseCompressor("gzip")) if err != nil { t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) } var index int for index < len(reqSizes) { respParam := []*testpb.ResponseParameters{ { Size: int32(respSizes[index]), }, } payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(reqSizes[index])) if err != nil { t.Fatal(err) } req := &testpb.StreamingOutputCallRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE, ResponseParameters: respParam, Payload: payload, } preparedMsg := &grpc.PreparedMsg{} err = preparedMsg.Encode(stream, req) if err != nil { t.Fatalf("PrepareMsg failed for size %d : %v", reqSizes[index], err) } if err := stream.SendMsg(preparedMsg); err != nil { t.Fatalf("%v.Send(%v) = %v, want ", stream, req, err) } reply, err := stream.Recv() if err != nil { t.Fatalf("%v.Recv() = %v, want ", stream, err) } pt := reply.GetPayload().GetType() if pt != testpb.PayloadType_COMPRESSABLE { t.Fatalf("Got the reply of type %d, want %d", pt, testpb.PayloadType_COMPRESSABLE) } size := len(reply.GetPayload().GetBody()) if size != int(respSizes[index]) { t.Fatalf("Got reply body of length %d, want %d", size, respSizes[index]) } index++ } if err := stream.CloseSend(); err != nil { t.Fatalf("%v.CloseSend() got %v, want %v", stream, err, nil) } if _, err := stream.Recv(); err != io.EOF { t.Fatalf("%v failed to complele the ping pong test: %v", stream, err) } } func (s) TestMaxMsgSizeClientDefault(t *testing.T) { for _, e := range listTestEnv() { testMaxMsgSizeClientDefault(t, e) } } func testMaxMsgSizeClientDefault(t *testing.T, e env) { te := newTest(t, e) te.userAgent = testAppUA te.declareLogNoise( "transport: http2Client.notifyError got notified that the client transport was broken EOF", "grpc: addrConn.transportMonitor exits due to: grpc: the connection is closing", "grpc: addrConn.resetTransport failed to create client transport: connection error", "Failed to dial : context canceled; please retry.", ) te.startServer(&testServer{security: e.security}) defer te.tearDown() tc := testpb.NewTestServiceClient(te.clientConn()) const smallSize = 1 const largeSize = 4 * 1024 * 1024 smallPayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, smallSize) if err != nil { t.Fatal(err) } req := &testpb.SimpleRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE, ResponseSize: int32(largeSize), Payload: smallPayload, } // Test for unary RPC recv. if _, err := tc.UnaryCall(context.Background(), req); err == nil || status.Code(err) != codes.ResourceExhausted { t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted) } respParam := []*testpb.ResponseParameters{ { Size: int32(largeSize), }, } sreq := &testpb.StreamingOutputCallRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE, ResponseParameters: respParam, Payload: smallPayload, } // Test for streaming RPC recv. stream, err := tc.FullDuplexCall(te.ctx) if err != nil { t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) } if err := stream.Send(sreq); err != nil { t.Fatalf("%v.Send(%v) = %v, want ", stream, sreq, err) } if _, err := stream.Recv(); err == nil || status.Code(err) != codes.ResourceExhausted { t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted) } } func (s) TestMaxMsgSizeClientAPI(t *testing.T) { for _, e := range listTestEnv() { testMaxMsgSizeClientAPI(t, e) } } func testMaxMsgSizeClientAPI(t *testing.T, e env) { te := newTest(t, e) te.userAgent = testAppUA // To avoid error on server side. te.maxServerSendMsgSize = newInt(5 * 1024 * 1024) te.maxClientReceiveMsgSize = newInt(1024) te.maxClientSendMsgSize = newInt(1024) te.declareLogNoise( "transport: http2Client.notifyError got notified that the client transport was broken EOF", "grpc: addrConn.transportMonitor exits due to: grpc: the connection is closing", "grpc: addrConn.resetTransport failed to create client transport: connection error", "Failed to dial : context canceled; please retry.", ) te.startServer(&testServer{security: e.security}) defer te.tearDown() tc := testpb.NewTestServiceClient(te.clientConn()) const smallSize = 1 const largeSize = 1024 smallPayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, smallSize) if err != nil { t.Fatal(err) } largePayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, largeSize) if err != nil { t.Fatal(err) } req := &testpb.SimpleRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE, ResponseSize: int32(largeSize), Payload: smallPayload, } // Test for unary RPC recv. if _, err := tc.UnaryCall(context.Background(), req); err == nil || status.Code(err) != codes.ResourceExhausted { t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted) } // Test for unary RPC send. req.Payload = largePayload req.ResponseSize = int32(smallSize) if _, err := tc.UnaryCall(context.Background(), req); err == nil || status.Code(err) != codes.ResourceExhausted { t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted) } respParam := []*testpb.ResponseParameters{ { Size: int32(largeSize), }, } sreq := &testpb.StreamingOutputCallRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE, ResponseParameters: respParam, Payload: smallPayload, } // Test for streaming RPC recv. stream, err := tc.FullDuplexCall(te.ctx) if err != nil { t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) } if err := stream.Send(sreq); err != nil { t.Fatalf("%v.Send(%v) = %v, want ", stream, sreq, err) } if _, err := stream.Recv(); err == nil || status.Code(err) != codes.ResourceExhausted { t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted) } // Test for streaming RPC send. respParam[0].Size = int32(smallSize) sreq.Payload = largePayload stream, err = tc.FullDuplexCall(te.ctx) if err != nil { t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) } if err := stream.Send(sreq); err == nil || status.Code(err) != codes.ResourceExhausted { t.Fatalf("%v.Send(%v) = %v, want _, error code: %s", stream, sreq, err, codes.ResourceExhausted) } } func (s) TestMaxMsgSizeServerAPI(t *testing.T) { for _, e := range listTestEnv() { testMaxMsgSizeServerAPI(t, e) } } func testMaxMsgSizeServerAPI(t *testing.T, e env) { te := newTest(t, e) te.userAgent = testAppUA te.maxServerReceiveMsgSize = newInt(1024) te.maxServerSendMsgSize = newInt(1024) te.declareLogNoise( "transport: http2Client.notifyError got notified that the client transport was broken EOF", "grpc: addrConn.transportMonitor exits due to: grpc: the connection is closing", "grpc: addrConn.resetTransport failed to create client transport: connection error", "Failed to dial : context canceled; please retry.", ) te.startServer(&testServer{security: e.security}) defer te.tearDown() tc := testpb.NewTestServiceClient(te.clientConn()) const smallSize = 1 const largeSize = 1024 smallPayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, smallSize) if err != nil { t.Fatal(err) } largePayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, largeSize) if err != nil { t.Fatal(err) } req := &testpb.SimpleRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE, ResponseSize: int32(largeSize), Payload: smallPayload, } // Test for unary RPC send. if _, err := tc.UnaryCall(context.Background(), req); err == nil || status.Code(err) != codes.ResourceExhausted { t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted) } // Test for unary RPC recv. req.Payload = largePayload req.ResponseSize = int32(smallSize) if _, err := tc.UnaryCall(context.Background(), req); err == nil || status.Code(err) != codes.ResourceExhausted { t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted) } respParam := []*testpb.ResponseParameters{ { Size: int32(largeSize), }, } sreq := &testpb.StreamingOutputCallRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE, ResponseParameters: respParam, Payload: smallPayload, } // Test for streaming RPC send. stream, err := tc.FullDuplexCall(te.ctx) if err != nil { t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) } if err := stream.Send(sreq); err != nil { t.Fatalf("%v.Send(%v) = %v, want ", stream, sreq, err) } if _, err := stream.Recv(); err == nil || status.Code(err) != codes.ResourceExhausted { t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted) } // Test for streaming RPC recv. respParam[0].Size = int32(smallSize) sreq.Payload = largePayload stream, err = tc.FullDuplexCall(te.ctx) if err != nil { t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) } if err := stream.Send(sreq); err != nil { t.Fatalf("%v.Send(%v) = %v, want ", stream, sreq, err) } if _, err := stream.Recv(); err == nil || status.Code(err) != codes.ResourceExhausted { t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted) } } func (s) TestTap(t *testing.T) { for _, e := range listTestEnv() { if e.name == "handler-tls" { continue } testTap(t, e) } } type myTap struct { cnt int } func (t *myTap) handle(ctx context.Context, info *tap.Info) (context.Context, error) { if info != nil { if info.FullMethodName == "/grpc.testing.TestService/EmptyCall" { t.cnt++ } else if info.FullMethodName == "/grpc.testing.TestService/UnaryCall" { return nil, fmt.Errorf("tap error") } } return ctx, nil } func testTap(t *testing.T, e env) { te := newTest(t, e) te.userAgent = testAppUA ttap := &myTap{} te.tapHandle = ttap.handle te.declareLogNoise( "transport: http2Client.notifyError got notified that the client transport was broken EOF", "grpc: addrConn.transportMonitor exits due to: grpc: the connection is closing", "grpc: addrConn.resetTransport failed to create client transport: connection error", ) te.startServer(&testServer{security: e.security}) defer te.tearDown() cc := te.clientConn() tc := testpb.NewTestServiceClient(cc) if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}); err != nil { t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, ", err) } if ttap.cnt != 1 { t.Fatalf("Get the count in ttap %d, want 1", ttap.cnt) } payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, 31) if err != nil { t.Fatal(err) } req := &testpb.SimpleRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE, ResponseSize: 45, Payload: payload, } if _, err := tc.UnaryCall(context.Background(), req); status.Code(err) != codes.Unavailable { t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, %s", err, codes.Unavailable) } } // healthCheck is a helper function to make a unary health check RPC and return // the response. func healthCheck(d time.Duration, cc *grpc.ClientConn, service string) (*healthpb.HealthCheckResponse, error) { ctx, cancel := context.WithTimeout(context.Background(), d) defer cancel() hc := healthgrpc.NewHealthClient(cc) return hc.Check(ctx, &healthpb.HealthCheckRequest{Service: service}) } // verifyHealthCheckStatus is a helper function to verify that the current // health status of the service matches the one passed in 'wantStatus'. func verifyHealthCheckStatus(t *testing.T, d time.Duration, cc *grpc.ClientConn, service string, wantStatus healthpb.HealthCheckResponse_ServingStatus) { t.Helper() resp, err := healthCheck(d, cc, service) if err != nil { t.Fatalf("Health/Check(_, _) = _, %v, want _, ", err) } if resp.Status != wantStatus { t.Fatalf("Got the serving status %v, want %v", resp.Status, wantStatus) } } // verifyHealthCheckErrCode is a helper function to verify that a unary health // check RPC returns an error with a code set to 'wantCode'. func verifyHealthCheckErrCode(t *testing.T, d time.Duration, cc *grpc.ClientConn, service string, wantCode codes.Code) { t.Helper() if _, err := healthCheck(d, cc, service); status.Code(err) != wantCode { t.Fatalf("Health/Check() got errCode %v, want %v", status.Code(err), wantCode) } } // newHealthCheckStream is a helper function to start a health check streaming // RPC, and returns the stream. func newHealthCheckStream(t *testing.T, cc *grpc.ClientConn, service string) (healthgrpc.Health_WatchClient, context.CancelFunc) { t.Helper() ctx, cancel := context.WithCancel(context.Background()) hc := healthgrpc.NewHealthClient(cc) stream, err := hc.Watch(ctx, &healthpb.HealthCheckRequest{Service: service}) if err != nil { t.Fatalf("hc.Watch(_, %v) failed: %v", service, err) } return stream, cancel } // healthWatchChecker is a helper function to verify that the next health // status returned on the given stream matches the one passed in 'wantStatus'. func healthWatchChecker(t *testing.T, stream healthgrpc.Health_WatchClient, wantStatus healthpb.HealthCheckResponse_ServingStatus) { t.Helper() response, err := stream.Recv() if err != nil { t.Fatalf("stream.Recv() failed: %v", err) } if response.Status != wantStatus { t.Fatalf("got servingStatus %v, want %v", response.Status, wantStatus) } } // TestHealthCheckSuccess invokes the unary Check() RPC on the health server in // a successful case. func (s) TestHealthCheckSuccess(t *testing.T) { for _, e := range listTestEnv() { testHealthCheckSuccess(t, e) } } func testHealthCheckSuccess(t *testing.T, e env) { te := newTest(t, e) te.enableHealthServer = true te.startServer(&testServer{security: e.security}) te.setHealthServingStatus(defaultHealthService, healthpb.HealthCheckResponse_SERVING) defer te.tearDown() verifyHealthCheckErrCode(t, 1*time.Second, te.clientConn(), defaultHealthService, codes.OK) } // TestHealthCheckFailure invokes the unary Check() RPC on the health server // with an expired context and expects the RPC to fail. func (s) TestHealthCheckFailure(t *testing.T) { for _, e := range listTestEnv() { testHealthCheckFailure(t, e) } } func testHealthCheckFailure(t *testing.T, e env) { te := newTest(t, e) te.declareLogNoise( "Failed to dial ", "grpc: the client connection is closing; please retry", ) te.enableHealthServer = true te.startServer(&testServer{security: e.security}) te.setHealthServingStatus(defaultHealthService, healthpb.HealthCheckResponse_SERVING) defer te.tearDown() verifyHealthCheckErrCode(t, 0*time.Second, te.clientConn(), defaultHealthService, codes.DeadlineExceeded) awaitNewConnLogOutput() } // TestHealthCheckOff makes a unary Check() RPC on the health server where the // health status of the defaultHealthService is not set, and therefore expects // an error code 'codes.NotFound'. func (s) TestHealthCheckOff(t *testing.T) { for _, e := range listTestEnv() { // TODO(bradfitz): Temporarily skip this env due to #619. if e.name == "handler-tls" { continue } testHealthCheckOff(t, e) } } func testHealthCheckOff(t *testing.T, e env) { te := newTest(t, e) te.enableHealthServer = true te.startServer(&testServer{security: e.security}) defer te.tearDown() verifyHealthCheckErrCode(t, 1*time.Second, te.clientConn(), defaultHealthService, codes.NotFound) } // TestHealthWatchMultipleClients makes a streaming Watch() RPC on the health // server with multiple clients and expects the same status on both streams. func (s) TestHealthWatchMultipleClients(t *testing.T) { for _, e := range listTestEnv() { testHealthWatchMultipleClients(t, e) } } func testHealthWatchMultipleClients(t *testing.T, e env) { te := newTest(t, e) te.enableHealthServer = true te.startServer(&testServer{security: e.security}) defer te.tearDown() cc := te.clientConn() stream1, cf1 := newHealthCheckStream(t, cc, defaultHealthService) defer cf1() healthWatchChecker(t, stream1, healthpb.HealthCheckResponse_SERVICE_UNKNOWN) stream2, cf2 := newHealthCheckStream(t, cc, defaultHealthService) defer cf2() healthWatchChecker(t, stream2, healthpb.HealthCheckResponse_SERVICE_UNKNOWN) te.setHealthServingStatus(defaultHealthService, healthpb.HealthCheckResponse_NOT_SERVING) healthWatchChecker(t, stream1, healthpb.HealthCheckResponse_NOT_SERVING) healthWatchChecker(t, stream2, healthpb.HealthCheckResponse_NOT_SERVING) } // TestHealthWatchSameStatusmakes a streaming Watch() RPC on the health server // and makes sure that the health status of the server is as expected after // multiple calls to SetServingStatus with the same status. func (s) TestHealthWatchSameStatus(t *testing.T) { for _, e := range listTestEnv() { testHealthWatchSameStatus(t, e) } } func testHealthWatchSameStatus(t *testing.T, e env) { te := newTest(t, e) te.enableHealthServer = true te.startServer(&testServer{security: e.security}) defer te.tearDown() stream, cf := newHealthCheckStream(t, te.clientConn(), defaultHealthService) defer cf() healthWatchChecker(t, stream, healthpb.HealthCheckResponse_SERVICE_UNKNOWN) te.setHealthServingStatus(defaultHealthService, healthpb.HealthCheckResponse_SERVING) healthWatchChecker(t, stream, healthpb.HealthCheckResponse_SERVING) te.setHealthServingStatus(defaultHealthService, healthpb.HealthCheckResponse_SERVING) te.setHealthServingStatus(defaultHealthService, healthpb.HealthCheckResponse_NOT_SERVING) healthWatchChecker(t, stream, healthpb.HealthCheckResponse_NOT_SERVING) } // TestHealthWatchServiceStatusSetBeforeStartingServer starts a health server // on which the health status for the defaultService is set before the gRPC // server is started, and expects the correct health status to be returned. func (s) TestHealthWatchServiceStatusSetBeforeStartingServer(t *testing.T) { for _, e := range listTestEnv() { testHealthWatchSetServiceStatusBeforeStartingServer(t, e) } } func testHealthWatchSetServiceStatusBeforeStartingServer(t *testing.T, e env) { hs := health.NewServer() te := newTest(t, e) te.healthServer = hs hs.SetServingStatus(defaultHealthService, healthpb.HealthCheckResponse_SERVING) te.startServer(&testServer{security: e.security}) defer te.tearDown() stream, cf := newHealthCheckStream(t, te.clientConn(), defaultHealthService) defer cf() healthWatchChecker(t, stream, healthpb.HealthCheckResponse_SERVING) } // TestHealthWatchDefaultStatusChange verifies the simple case where the // service starts off with a SERVICE_UNKNOWN status (because SetServingStatus // hasn't been called yet) and then moves to SERVING after SetServingStatus is // called. func (s) TestHealthWatchDefaultStatusChange(t *testing.T) { for _, e := range listTestEnv() { testHealthWatchDefaultStatusChange(t, e) } } func testHealthWatchDefaultStatusChange(t *testing.T, e env) { te := newTest(t, e) te.enableHealthServer = true te.startServer(&testServer{security: e.security}) defer te.tearDown() stream, cf := newHealthCheckStream(t, te.clientConn(), defaultHealthService) defer cf() healthWatchChecker(t, stream, healthpb.HealthCheckResponse_SERVICE_UNKNOWN) te.setHealthServingStatus(defaultHealthService, healthpb.HealthCheckResponse_SERVING) healthWatchChecker(t, stream, healthpb.HealthCheckResponse_SERVING) } // TestHealthWatchSetServiceStatusBeforeClientCallsWatch verifies the case // where the health status is set to SERVING before the client calls Watch(). func (s) TestHealthWatchSetServiceStatusBeforeClientCallsWatch(t *testing.T) { for _, e := range listTestEnv() { testHealthWatchSetServiceStatusBeforeClientCallsWatch(t, e) } } func testHealthWatchSetServiceStatusBeforeClientCallsWatch(t *testing.T, e env) { te := newTest(t, e) te.enableHealthServer = true te.startServer(&testServer{security: e.security}) te.setHealthServingStatus(defaultHealthService, healthpb.HealthCheckResponse_SERVING) defer te.tearDown() stream, cf := newHealthCheckStream(t, te.clientConn(), defaultHealthService) defer cf() healthWatchChecker(t, stream, healthpb.HealthCheckResponse_SERVING) } // TestHealthWatchOverallServerHealthChange verifies setting the overall status // of the server by using the empty service name. func (s) TestHealthWatchOverallServerHealthChange(t *testing.T) { for _, e := range listTestEnv() { testHealthWatchOverallServerHealthChange(t, e) } } func testHealthWatchOverallServerHealthChange(t *testing.T, e env) { te := newTest(t, e) te.enableHealthServer = true te.startServer(&testServer{security: e.security}) defer te.tearDown() stream, cf := newHealthCheckStream(t, te.clientConn(), "") defer cf() healthWatchChecker(t, stream, healthpb.HealthCheckResponse_SERVING) te.setHealthServingStatus("", healthpb.HealthCheckResponse_NOT_SERVING) healthWatchChecker(t, stream, healthpb.HealthCheckResponse_NOT_SERVING) } // TestUnknownHandler verifies that an expected error is returned (by setting // the unknownHandler on the server) for a service which is not exposed to the // client. func (s) TestUnknownHandler(t *testing.T) { // An example unknownHandler that returns a different code and a different // method, making sure that we do not expose what methods are implemented to // a client that is not authenticated. unknownHandler := func(srv interface{}, stream grpc.ServerStream) error { return status.Error(codes.Unauthenticated, "user unauthenticated") } for _, e := range listTestEnv() { // TODO(bradfitz): Temporarily skip this env due to #619. if e.name == "handler-tls" { continue } testUnknownHandler(t, e, unknownHandler) } } func testUnknownHandler(t *testing.T, e env, unknownHandler grpc.StreamHandler) { te := newTest(t, e) te.unknownHandler = unknownHandler te.startServer(&testServer{security: e.security}) defer te.tearDown() verifyHealthCheckErrCode(t, 1*time.Second, te.clientConn(), "", codes.Unauthenticated) } // TestHealthCheckServingStatus makes a streaming Watch() RPC on the health // server and verifies a bunch of health status transitions. func (s) TestHealthCheckServingStatus(t *testing.T) { for _, e := range listTestEnv() { testHealthCheckServingStatus(t, e) } } func testHealthCheckServingStatus(t *testing.T, e env) { te := newTest(t, e) te.enableHealthServer = true te.startServer(&testServer{security: e.security}) defer te.tearDown() cc := te.clientConn() verifyHealthCheckStatus(t, 1*time.Second, cc, "", healthpb.HealthCheckResponse_SERVING) verifyHealthCheckErrCode(t, 1*time.Second, cc, defaultHealthService, codes.NotFound) te.setHealthServingStatus(defaultHealthService, healthpb.HealthCheckResponse_SERVING) verifyHealthCheckStatus(t, 1*time.Second, cc, defaultHealthService, healthpb.HealthCheckResponse_SERVING) te.setHealthServingStatus(defaultHealthService, healthpb.HealthCheckResponse_NOT_SERVING) verifyHealthCheckStatus(t, 1*time.Second, cc, defaultHealthService, healthpb.HealthCheckResponse_NOT_SERVING) } func (s) TestEmptyUnaryWithUserAgent(t *testing.T) { for _, e := range listTestEnv() { testEmptyUnaryWithUserAgent(t, e) } } func testEmptyUnaryWithUserAgent(t *testing.T, e env) { te := newTest(t, e) te.userAgent = testAppUA te.startServer(&testServer{security: e.security}) defer te.tearDown() cc := te.clientConn() tc := testpb.NewTestServiceClient(cc) var header metadata.MD reply, err := tc.EmptyCall(context.Background(), &testpb.Empty{}, grpc.Header(&header)) if err != nil || !proto.Equal(&testpb.Empty{}, reply) { t.Fatalf("TestService/EmptyCall(_, _) = %v, %v, want %v, ", reply, err, &testpb.Empty{}) } if v, ok := header["ua"]; !ok || !strings.HasPrefix(v[0], testAppUA) { t.Fatalf("header[\"ua\"] = %q, %t, want string with prefix %q, true", v, ok, testAppUA) } te.srv.Stop() } func (s) TestFailedEmptyUnary(t *testing.T) { for _, e := range listTestEnv() { if e.name == "handler-tls" { // This test covers status details, but // Grpc-Status-Details-Bin is not support in handler_server. continue } testFailedEmptyUnary(t, e) } } func testFailedEmptyUnary(t *testing.T, e env) { te := newTest(t, e) te.userAgent = failAppUA te.startServer(&testServer{security: e.security}) defer te.tearDown() tc := testpb.NewTestServiceClient(te.clientConn()) ctx := metadata.NewOutgoingContext(context.Background(), testMetadata) wantErr := detailedError if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); !testutils.StatusErrEqual(err, wantErr) { t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %v", err, wantErr) } } func (s) TestLargeUnary(t *testing.T) { for _, e := range listTestEnv() { testLargeUnary(t, e) } } func testLargeUnary(t *testing.T, e env) { te := newTest(t, e) te.startServer(&testServer{security: e.security}) defer te.tearDown() tc := testpb.NewTestServiceClient(te.clientConn()) const argSize = 271828 const respSize = 314159 payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, argSize) if err != nil { t.Fatal(err) } req := &testpb.SimpleRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE, ResponseSize: respSize, Payload: payload, } reply, err := tc.UnaryCall(context.Background(), req) if err != nil { t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, ", err) } pt := reply.GetPayload().GetType() ps := len(reply.GetPayload().GetBody()) if pt != testpb.PayloadType_COMPRESSABLE || ps != respSize { t.Fatalf("Got the reply with type %d len %d; want %d, %d", pt, ps, testpb.PayloadType_COMPRESSABLE, respSize) } } // Test backward-compatibility API for setting msg size limit. func (s) TestExceedMsgLimit(t *testing.T) { for _, e := range listTestEnv() { testExceedMsgLimit(t, e) } } func testExceedMsgLimit(t *testing.T, e env) { te := newTest(t, e) maxMsgSize := 1024 te.maxServerMsgSize, te.maxClientMsgSize = newInt(maxMsgSize), newInt(maxMsgSize) te.startServer(&testServer{security: e.security}) defer te.tearDown() tc := testpb.NewTestServiceClient(te.clientConn()) largeSize := int32(maxMsgSize + 1) const smallSize = 1 largePayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, largeSize) if err != nil { t.Fatal(err) } smallPayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, smallSize) if err != nil { t.Fatal(err) } // Make sure the server cannot receive a unary RPC of largeSize. req := &testpb.SimpleRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE, ResponseSize: smallSize, Payload: largePayload, } if _, err := tc.UnaryCall(context.Background(), req); err == nil || status.Code(err) != codes.ResourceExhausted { t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted) } // Make sure the client cannot receive a unary RPC of largeSize. req.ResponseSize = largeSize req.Payload = smallPayload if _, err := tc.UnaryCall(context.Background(), req); err == nil || status.Code(err) != codes.ResourceExhausted { t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted) } // Make sure the server cannot receive a streaming RPC of largeSize. stream, err := tc.FullDuplexCall(te.ctx) if err != nil { t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) } respParam := []*testpb.ResponseParameters{ { Size: 1, }, } sreq := &testpb.StreamingOutputCallRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE, ResponseParameters: respParam, Payload: largePayload, } if err := stream.Send(sreq); err != nil { t.Fatalf("%v.Send(%v) = %v, want ", stream, sreq, err) } if _, err := stream.Recv(); err == nil || status.Code(err) != codes.ResourceExhausted { t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted) } // Test on client side for streaming RPC. stream, err = tc.FullDuplexCall(te.ctx) if err != nil { t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) } respParam[0].Size = largeSize sreq.Payload = smallPayload if err := stream.Send(sreq); err != nil { t.Fatalf("%v.Send(%v) = %v, want ", stream, sreq, err) } if _, err := stream.Recv(); err == nil || status.Code(err) != codes.ResourceExhausted { t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted) } } func (s) TestPeerClientSide(t *testing.T) { for _, e := range listTestEnv() { testPeerClientSide(t, e) } } func testPeerClientSide(t *testing.T, e env) { te := newTest(t, e) te.userAgent = testAppUA te.startServer(&testServer{security: e.security}) defer te.tearDown() tc := testpb.NewTestServiceClient(te.clientConn()) peer := new(peer.Peer) if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}, grpc.Peer(peer), grpc.WaitForReady(true)); err != nil { t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, ", err) } pa := peer.Addr.String() if e.network == "unix" { if pa != te.srvAddr { t.Fatalf("peer.Addr = %v, want %v", pa, te.srvAddr) } return } _, pp, err := net.SplitHostPort(pa) if err != nil { t.Fatalf("Failed to parse address from peer.") } _, sp, err := net.SplitHostPort(te.srvAddr) if err != nil { t.Fatalf("Failed to parse address of test server.") } if pp != sp { t.Fatalf("peer.Addr = localhost:%v, want localhost:%v", pp, sp) } } // TestPeerNegative tests that if call fails setting peer // doesn't cause a segmentation fault. // issue#1141 https://github.com/grpc/grpc-go/issues/1141 func (s) TestPeerNegative(t *testing.T) { for _, e := range listTestEnv() { testPeerNegative(t, e) } } func testPeerNegative(t *testing.T, e env) { te := newTest(t, e) te.startServer(&testServer{security: e.security}) defer te.tearDown() cc := te.clientConn() tc := testpb.NewTestServiceClient(cc) peer := new(peer.Peer) ctx, cancel := context.WithCancel(context.Background()) cancel() tc.EmptyCall(ctx, &testpb.Empty{}, grpc.Peer(peer)) } func (s) TestPeerFailedRPC(t *testing.T) { for _, e := range listTestEnv() { testPeerFailedRPC(t, e) } } func testPeerFailedRPC(t *testing.T, e env) { te := newTest(t, e) te.maxServerReceiveMsgSize = newInt(1 * 1024) te.startServer(&testServer{security: e.security}) defer te.tearDown() tc := testpb.NewTestServiceClient(te.clientConn()) // first make a successful request to the server if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}); err != nil { t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, ", err) } // make a second request that will be rejected by the server const largeSize = 5 * 1024 largePayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, largeSize) if err != nil { t.Fatal(err) } req := &testpb.SimpleRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE, Payload: largePayload, } peer := new(peer.Peer) if _, err := tc.UnaryCall(context.Background(), req, grpc.Peer(peer)); err == nil || status.Code(err) != codes.ResourceExhausted { t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted) } else { pa := peer.Addr.String() if e.network == "unix" { if pa != te.srvAddr { t.Fatalf("peer.Addr = %v, want %v", pa, te.srvAddr) } return } _, pp, err := net.SplitHostPort(pa) if err != nil { t.Fatalf("Failed to parse address from peer.") } _, sp, err := net.SplitHostPort(te.srvAddr) if err != nil { t.Fatalf("Failed to parse address of test server.") } if pp != sp { t.Fatalf("peer.Addr = localhost:%v, want localhost:%v", pp, sp) } } } func (s) TestMetadataUnaryRPC(t *testing.T) { for _, e := range listTestEnv() { testMetadataUnaryRPC(t, e) } } func testMetadataUnaryRPC(t *testing.T, e env) { te := newTest(t, e) te.startServer(&testServer{security: e.security}) defer te.tearDown() tc := testpb.NewTestServiceClient(te.clientConn()) const argSize = 2718 const respSize = 314 payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, argSize) if err != nil { t.Fatal(err) } req := &testpb.SimpleRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE, ResponseSize: respSize, Payload: payload, } var header, trailer metadata.MD ctx := metadata.NewOutgoingContext(context.Background(), testMetadata) if _, err := tc.UnaryCall(ctx, req, grpc.Header(&header), grpc.Trailer(&trailer)); err != nil { t.Fatalf("TestService.UnaryCall(%v, _, _, _) = _, %v; want _, ", ctx, err) } // Ignore optional response headers that Servers may set: if header != nil { delete(header, "trailer") // RFC 2616 says server SHOULD (but optional) declare trailers delete(header, "date") // the Date header is also optional delete(header, "user-agent") delete(header, "content-type") } if !reflect.DeepEqual(header, testMetadata) { t.Fatalf("Received header metadata %v, want %v", header, testMetadata) } if !reflect.DeepEqual(trailer, testTrailerMetadata) { t.Fatalf("Received trailer metadata %v, want %v", trailer, testTrailerMetadata) } } func (s) TestMetadataOrderUnaryRPC(t *testing.T) { for _, e := range listTestEnv() { testMetadataOrderUnaryRPC(t, e) } } func testMetadataOrderUnaryRPC(t *testing.T, e env) { te := newTest(t, e) te.startServer(&testServer{security: e.security}) defer te.tearDown() tc := testpb.NewTestServiceClient(te.clientConn()) ctx := metadata.NewOutgoingContext(context.Background(), testMetadata) ctx = metadata.AppendToOutgoingContext(ctx, "key1", "value2") ctx = metadata.AppendToOutgoingContext(ctx, "key1", "value3") // using Join to built expected metadata instead of FromOutgoingContext newMetadata := metadata.Join(testMetadata, metadata.Pairs("key1", "value2", "key1", "value3")) var header metadata.MD if _, err := tc.UnaryCall(ctx, &testpb.SimpleRequest{}, grpc.Header(&header)); err != nil { t.Fatal(err) } // Ignore optional response headers that Servers may set: if header != nil { delete(header, "trailer") // RFC 2616 says server SHOULD (but optional) declare trailers delete(header, "date") // the Date header is also optional delete(header, "user-agent") delete(header, "content-type") } if !reflect.DeepEqual(header, newMetadata) { t.Fatalf("Received header metadata %v, want %v", header, newMetadata) } } func (s) TestMultipleSetTrailerUnaryRPC(t *testing.T) { for _, e := range listTestEnv() { testMultipleSetTrailerUnaryRPC(t, e) } } func testMultipleSetTrailerUnaryRPC(t *testing.T, e env) { te := newTest(t, e) te.startServer(&testServer{security: e.security, multipleSetTrailer: true}) defer te.tearDown() tc := testpb.NewTestServiceClient(te.clientConn()) const ( argSize = 1 respSize = 1 ) payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, argSize) if err != nil { t.Fatal(err) } req := &testpb.SimpleRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE, ResponseSize: respSize, Payload: payload, } var trailer metadata.MD ctx := metadata.NewOutgoingContext(context.Background(), testMetadata) if _, err := tc.UnaryCall(ctx, req, grpc.Trailer(&trailer), grpc.WaitForReady(true)); err != nil { t.Fatalf("TestService.UnaryCall(%v, _, _, _) = _, %v; want _, ", ctx, err) } expectedTrailer := metadata.Join(testTrailerMetadata, testTrailerMetadata2) if !reflect.DeepEqual(trailer, expectedTrailer) { t.Fatalf("Received trailer metadata %v, want %v", trailer, expectedTrailer) } } func (s) TestMultipleSetTrailerStreamingRPC(t *testing.T) { for _, e := range listTestEnv() { testMultipleSetTrailerStreamingRPC(t, e) } } func testMultipleSetTrailerStreamingRPC(t *testing.T, e env) { te := newTest(t, e) te.startServer(&testServer{security: e.security, multipleSetTrailer: true}) defer te.tearDown() tc := testpb.NewTestServiceClient(te.clientConn()) ctx := metadata.NewOutgoingContext(context.Background(), testMetadata) stream, err := tc.FullDuplexCall(ctx, grpc.WaitForReady(true)) if err != nil { t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) } if err := stream.CloseSend(); err != nil { t.Fatalf("%v.CloseSend() got %v, want %v", stream, err, nil) } if _, err := stream.Recv(); err != io.EOF { t.Fatalf("%v failed to complele the FullDuplexCall: %v", stream, err) } trailer := stream.Trailer() expectedTrailer := metadata.Join(testTrailerMetadata, testTrailerMetadata2) if !reflect.DeepEqual(trailer, expectedTrailer) { t.Fatalf("Received trailer metadata %v, want %v", trailer, expectedTrailer) } } func (s) TestSetAndSendHeaderUnaryRPC(t *testing.T) { for _, e := range listTestEnv() { if e.name == "handler-tls" { continue } testSetAndSendHeaderUnaryRPC(t, e) } } // To test header metadata is sent on SendHeader(). func testSetAndSendHeaderUnaryRPC(t *testing.T, e env) { te := newTest(t, e) te.startServer(&testServer{security: e.security, setAndSendHeader: true}) defer te.tearDown() tc := testpb.NewTestServiceClient(te.clientConn()) const ( argSize = 1 respSize = 1 ) payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, argSize) if err != nil { t.Fatal(err) } req := &testpb.SimpleRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE, ResponseSize: respSize, Payload: payload, } var header metadata.MD ctx := metadata.NewOutgoingContext(context.Background(), testMetadata) if _, err := tc.UnaryCall(ctx, req, grpc.Header(&header), grpc.WaitForReady(true)); err != nil { t.Fatalf("TestService.UnaryCall(%v, _, _, _) = _, %v; want _, ", ctx, err) } delete(header, "user-agent") delete(header, "content-type") expectedHeader := metadata.Join(testMetadata, testMetadata2) if !reflect.DeepEqual(header, expectedHeader) { t.Fatalf("Received header metadata %v, want %v", header, expectedHeader) } } func (s) TestMultipleSetHeaderUnaryRPC(t *testing.T) { for _, e := range listTestEnv() { if e.name == "handler-tls" { continue } testMultipleSetHeaderUnaryRPC(t, e) } } // To test header metadata is sent when sending response. func testMultipleSetHeaderUnaryRPC(t *testing.T, e env) { te := newTest(t, e) te.startServer(&testServer{security: e.security, setHeaderOnly: true}) defer te.tearDown() tc := testpb.NewTestServiceClient(te.clientConn()) const ( argSize = 1 respSize = 1 ) payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, argSize) if err != nil { t.Fatal(err) } req := &testpb.SimpleRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE, ResponseSize: respSize, Payload: payload, } var header metadata.MD ctx := metadata.NewOutgoingContext(context.Background(), testMetadata) if _, err := tc.UnaryCall(ctx, req, grpc.Header(&header), grpc.WaitForReady(true)); err != nil { t.Fatalf("TestService.UnaryCall(%v, _, _, _) = _, %v; want _, ", ctx, err) } delete(header, "user-agent") delete(header, "content-type") expectedHeader := metadata.Join(testMetadata, testMetadata2) if !reflect.DeepEqual(header, expectedHeader) { t.Fatalf("Received header metadata %v, want %v", header, expectedHeader) } } func (s) TestMultipleSetHeaderUnaryRPCError(t *testing.T) { for _, e := range listTestEnv() { if e.name == "handler-tls" { continue } testMultipleSetHeaderUnaryRPCError(t, e) } } // To test header metadata is sent when sending status. func testMultipleSetHeaderUnaryRPCError(t *testing.T, e env) { te := newTest(t, e) te.startServer(&testServer{security: e.security, setHeaderOnly: true}) defer te.tearDown() tc := testpb.NewTestServiceClient(te.clientConn()) const ( argSize = 1 respSize = -1 // Invalid respSize to make RPC fail. ) payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, argSize) if err != nil { t.Fatal(err) } req := &testpb.SimpleRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE, ResponseSize: respSize, Payload: payload, } var header metadata.MD ctx := metadata.NewOutgoingContext(context.Background(), testMetadata) if _, err := tc.UnaryCall(ctx, req, grpc.Header(&header), grpc.WaitForReady(true)); err == nil { t.Fatalf("TestService.UnaryCall(%v, _, _, _) = _, %v; want _, ", ctx, err) } delete(header, "user-agent") delete(header, "content-type") expectedHeader := metadata.Join(testMetadata, testMetadata2) if !reflect.DeepEqual(header, expectedHeader) { t.Fatalf("Received header metadata %v, want %v", header, expectedHeader) } } func (s) TestSetAndSendHeaderStreamingRPC(t *testing.T) { for _, e := range listTestEnv() { if e.name == "handler-tls" { continue } testSetAndSendHeaderStreamingRPC(t, e) } } // To test header metadata is sent on SendHeader(). func testSetAndSendHeaderStreamingRPC(t *testing.T, e env) { te := newTest(t, e) te.startServer(&testServer{security: e.security, setAndSendHeader: true}) defer te.tearDown() tc := testpb.NewTestServiceClient(te.clientConn()) ctx := metadata.NewOutgoingContext(context.Background(), testMetadata) stream, err := tc.FullDuplexCall(ctx) if err != nil { t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) } if err := stream.CloseSend(); err != nil { t.Fatalf("%v.CloseSend() got %v, want %v", stream, err, nil) } if _, err := stream.Recv(); err != io.EOF { t.Fatalf("%v failed to complele the FullDuplexCall: %v", stream, err) } header, err := stream.Header() if err != nil { t.Fatalf("%v.Header() = _, %v, want _, ", stream, err) } delete(header, "user-agent") delete(header, "content-type") expectedHeader := metadata.Join(testMetadata, testMetadata2) if !reflect.DeepEqual(header, expectedHeader) { t.Fatalf("Received header metadata %v, want %v", header, expectedHeader) } } func (s) TestMultipleSetHeaderStreamingRPC(t *testing.T) { for _, e := range listTestEnv() { if e.name == "handler-tls" { continue } testMultipleSetHeaderStreamingRPC(t, e) } } // To test header metadata is sent when sending response. func testMultipleSetHeaderStreamingRPC(t *testing.T, e env) { te := newTest(t, e) te.startServer(&testServer{security: e.security, setHeaderOnly: true}) defer te.tearDown() tc := testpb.NewTestServiceClient(te.clientConn()) const ( argSize = 1 respSize = 1 ) ctx := metadata.NewOutgoingContext(context.Background(), testMetadata) stream, err := tc.FullDuplexCall(ctx) if err != nil { t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) } payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, argSize) if err != nil { t.Fatal(err) } req := &testpb.StreamingOutputCallRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE, ResponseParameters: []*testpb.ResponseParameters{ {Size: respSize}, }, Payload: payload, } if err := stream.Send(req); err != nil { t.Fatalf("%v.Send(%v) = %v, want ", stream, req, err) } if _, err := stream.Recv(); err != nil { t.Fatalf("%v.Recv() = %v, want ", stream, err) } if err := stream.CloseSend(); err != nil { t.Fatalf("%v.CloseSend() got %v, want %v", stream, err, nil) } if _, err := stream.Recv(); err != io.EOF { t.Fatalf("%v failed to complele the FullDuplexCall: %v", stream, err) } header, err := stream.Header() if err != nil { t.Fatalf("%v.Header() = _, %v, want _, ", stream, err) } delete(header, "user-agent") delete(header, "content-type") expectedHeader := metadata.Join(testMetadata, testMetadata2) if !reflect.DeepEqual(header, expectedHeader) { t.Fatalf("Received header metadata %v, want %v", header, expectedHeader) } } func (s) TestMultipleSetHeaderStreamingRPCError(t *testing.T) { for _, e := range listTestEnv() { if e.name == "handler-tls" { continue } testMultipleSetHeaderStreamingRPCError(t, e) } } // To test header metadata is sent when sending status. func testMultipleSetHeaderStreamingRPCError(t *testing.T, e env) { te := newTest(t, e) te.startServer(&testServer{security: e.security, setHeaderOnly: true}) defer te.tearDown() tc := testpb.NewTestServiceClient(te.clientConn()) const ( argSize = 1 respSize = -1 ) ctx, cancel := context.WithCancel(context.Background()) defer cancel() ctx = metadata.NewOutgoingContext(ctx, testMetadata) stream, err := tc.FullDuplexCall(ctx) if err != nil { t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) } payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, argSize) if err != nil { t.Fatal(err) } req := &testpb.StreamingOutputCallRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE, ResponseParameters: []*testpb.ResponseParameters{ {Size: respSize}, }, Payload: payload, } if err := stream.Send(req); err != nil { t.Fatalf("%v.Send(%v) = %v, want ", stream, req, err) } if _, err := stream.Recv(); err == nil { t.Fatalf("%v.Recv() = %v, want ", stream, err) } header, err := stream.Header() if err != nil { t.Fatalf("%v.Header() = _, %v, want _, ", stream, err) } delete(header, "user-agent") delete(header, "content-type") expectedHeader := metadata.Join(testMetadata, testMetadata2) if !reflect.DeepEqual(header, expectedHeader) { t.Fatalf("Received header metadata %v, want %v", header, expectedHeader) } if err := stream.CloseSend(); err != nil { t.Fatalf("%v.CloseSend() got %v, want %v", stream, err, nil) } } // TestMalformedHTTP2Metadata verfies the returned error when the client // sends an illegal metadata. func (s) TestMalformedHTTP2Metadata(t *testing.T) { for _, e := range listTestEnv() { if e.name == "handler-tls" { // Failed with "server stops accepting new RPCs". // Server stops accepting new RPCs when the client sends an illegal http2 header. continue } testMalformedHTTP2Metadata(t, e) } } func testMalformedHTTP2Metadata(t *testing.T, e env) { te := newTest(t, e) te.startServer(&testServer{security: e.security}) defer te.tearDown() tc := testpb.NewTestServiceClient(te.clientConn()) payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, 2718) if err != nil { t.Fatal(err) } req := &testpb.SimpleRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE, ResponseSize: 314, Payload: payload, } ctx := metadata.NewOutgoingContext(context.Background(), malformedHTTP2Metadata) if _, err := tc.UnaryCall(ctx, req); status.Code(err) != codes.Internal { t.Fatalf("TestService.UnaryCall(%v, _) = _, %v; want _, %s", ctx, err, codes.Internal) } } func (s) TestTransparentRetry(t *testing.T) { for _, e := range listTestEnv() { if e.name == "handler-tls" { // Fails with RST_STREAM / FLOW_CONTROL_ERROR continue } testTransparentRetry(t, e) } } // This test makes sure RPCs are retried times when they receive a RST_STREAM // with the REFUSED_STREAM error code, which the InTapHandle provokes. func testTransparentRetry(t *testing.T, e env) { te := newTest(t, e) attempts := 0 successAttempt := 2 te.tapHandle = func(ctx context.Context, _ *tap.Info) (context.Context, error) { attempts++ if attempts < successAttempt { return nil, errors.New("not now") } return ctx, nil } te.startServer(&testServer{security: e.security}) defer te.tearDown() cc := te.clientConn() tsc := testpb.NewTestServiceClient(cc) testCases := []struct { successAttempt int failFast bool errCode codes.Code }{{ successAttempt: 1, }, { successAttempt: 2, }, { successAttempt: 3, errCode: codes.Unavailable, }, { successAttempt: 1, failFast: true, }, { successAttempt: 2, failFast: true, }, { successAttempt: 3, failFast: true, errCode: codes.Unavailable, }} for _, tc := range testCases { attempts = 0 successAttempt = tc.successAttempt ctx, cancel := context.WithTimeout(context.Background(), time.Second) _, err := tsc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(!tc.failFast)) cancel() if status.Code(err) != tc.errCode { t.Errorf("%+v: tsc.EmptyCall(_, _) = _, %v, want _, Code=%v", tc, err, tc.errCode) } } } func (s) TestCancel(t *testing.T) { for _, e := range listTestEnv() { testCancel(t, e) } } func testCancel(t *testing.T, e env) { te := newTest(t, e) te.declareLogNoise("grpc: the client connection is closing; please retry") te.startServer(&testServer{security: e.security, unaryCallSleepTime: time.Second}) defer te.tearDown() cc := te.clientConn() tc := testpb.NewTestServiceClient(cc) const argSize = 2718 const respSize = 314 payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, argSize) if err != nil { t.Fatal(err) } req := &testpb.SimpleRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE, ResponseSize: respSize, Payload: payload, } ctx, cancel := context.WithCancel(context.Background()) time.AfterFunc(1*time.Millisecond, cancel) if r, err := tc.UnaryCall(ctx, req); status.Code(err) != codes.Canceled { t.Fatalf("TestService/UnaryCall(_, _) = %v, %v; want _, error code: %s", r, err, codes.Canceled) } awaitNewConnLogOutput() } func (s) TestCancelNoIO(t *testing.T) { for _, e := range listTestEnv() { testCancelNoIO(t, e) } } func testCancelNoIO(t *testing.T, e env) { te := newTest(t, e) te.declareLogNoise("http2Client.notifyError got notified that the client transport was broken") te.maxStream = 1 // Only allows 1 live stream per server transport. te.startServer(&testServer{security: e.security}) defer te.tearDown() cc := te.clientConn() tc := testpb.NewTestServiceClient(cc) // Start one blocked RPC for which we'll never send streaming // input. This will consume the 1 maximum concurrent streams, // causing future RPCs to hang. ctx, cancelFirst := context.WithCancel(context.Background()) _, err := tc.StreamingInputCall(ctx) if err != nil { t.Fatalf("%v.StreamingInputCall(_) = _, %v, want _, ", tc, err) } // Loop until the ClientConn receives the initial settings // frame from the server, notifying it about the maximum // concurrent streams. We know when it's received it because // an RPC will fail with codes.DeadlineExceeded instead of // succeeding. // TODO(bradfitz): add internal test hook for this (Issue 534) for { ctx, cancelSecond := context.WithTimeout(context.Background(), 50*time.Millisecond) _, err := tc.StreamingInputCall(ctx) cancelSecond() if err == nil { continue } if status.Code(err) == codes.DeadlineExceeded { break } t.Fatalf("%v.StreamingInputCall(_) = _, %v, want _, %s", tc, err, codes.DeadlineExceeded) } // If there are any RPCs in flight before the client receives // the max streams setting, let them be expired. // TODO(bradfitz): add internal test hook for this (Issue 534) time.Sleep(50 * time.Millisecond) go func() { time.Sleep(50 * time.Millisecond) cancelFirst() }() // This should be blocked until the 1st is canceled, then succeed. ctx, cancelThird := context.WithTimeout(context.Background(), 500*time.Millisecond) if _, err := tc.StreamingInputCall(ctx); err != nil { t.Errorf("%v.StreamingInputCall(_) = _, %v, want _, ", tc, err) } cancelThird() } // The following tests the gRPC streaming RPC implementations. // TODO(zhaoq): Have better coverage on error cases. var ( reqSizes = []int{27182, 8, 1828, 45904} respSizes = []int{31415, 9, 2653, 58979} ) func (s) TestNoService(t *testing.T) { for _, e := range listTestEnv() { testNoService(t, e) } } func testNoService(t *testing.T, e env) { te := newTest(t, e) te.startServer(nil) defer te.tearDown() cc := te.clientConn() tc := testpb.NewTestServiceClient(cc) stream, err := tc.FullDuplexCall(te.ctx, grpc.WaitForReady(true)) if err != nil { t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) } if _, err := stream.Recv(); status.Code(err) != codes.Unimplemented { t.Fatalf("stream.Recv() = _, %v, want _, error code %s", err, codes.Unimplemented) } } func (s) TestPingPong(t *testing.T) { for _, e := range listTestEnv() { testPingPong(t, e) } } func testPingPong(t *testing.T, e env) { te := newTest(t, e) te.startServer(&testServer{security: e.security}) defer te.tearDown() tc := testpb.NewTestServiceClient(te.clientConn()) stream, err := tc.FullDuplexCall(te.ctx) if err != nil { t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) } var index int for index < len(reqSizes) { respParam := []*testpb.ResponseParameters{ { Size: int32(respSizes[index]), }, } payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(reqSizes[index])) if err != nil { t.Fatal(err) } req := &testpb.StreamingOutputCallRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE, ResponseParameters: respParam, Payload: payload, } if err := stream.Send(req); err != nil { t.Fatalf("%v.Send(%v) = %v, want ", stream, req, err) } reply, err := stream.Recv() if err != nil { t.Fatalf("%v.Recv() = %v, want ", stream, err) } pt := reply.GetPayload().GetType() if pt != testpb.PayloadType_COMPRESSABLE { t.Fatalf("Got the reply of type %d, want %d", pt, testpb.PayloadType_COMPRESSABLE) } size := len(reply.GetPayload().GetBody()) if size != int(respSizes[index]) { t.Fatalf("Got reply body of length %d, want %d", size, respSizes[index]) } index++ } if err := stream.CloseSend(); err != nil { t.Fatalf("%v.CloseSend() got %v, want %v", stream, err, nil) } if _, err := stream.Recv(); err != io.EOF { t.Fatalf("%v failed to complele the ping pong test: %v", stream, err) } } func (s) TestMetadataStreamingRPC(t *testing.T) { for _, e := range listTestEnv() { testMetadataStreamingRPC(t, e) } } func testMetadataStreamingRPC(t *testing.T, e env) { te := newTest(t, e) te.startServer(&testServer{security: e.security}) defer te.tearDown() tc := testpb.NewTestServiceClient(te.clientConn()) ctx := metadata.NewOutgoingContext(te.ctx, testMetadata) stream, err := tc.FullDuplexCall(ctx) if err != nil { t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) } go func() { headerMD, err := stream.Header() if e.security == "tls" { delete(headerMD, "transport_security_type") } delete(headerMD, "trailer") // ignore if present delete(headerMD, "user-agent") delete(headerMD, "content-type") if err != nil || !reflect.DeepEqual(testMetadata, headerMD) { t.Errorf("#1 %v.Header() = %v, %v, want %v, ", stream, headerMD, err, testMetadata) } // test the cached value. headerMD, err = stream.Header() delete(headerMD, "trailer") // ignore if present delete(headerMD, "user-agent") delete(headerMD, "content-type") if err != nil || !reflect.DeepEqual(testMetadata, headerMD) { t.Errorf("#2 %v.Header() = %v, %v, want %v, ", stream, headerMD, err, testMetadata) } err = func() error { for index := 0; index < len(reqSizes); index++ { respParam := []*testpb.ResponseParameters{ { Size: int32(respSizes[index]), }, } payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(reqSizes[index])) if err != nil { return err } req := &testpb.StreamingOutputCallRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE, ResponseParameters: respParam, Payload: payload, } if err := stream.Send(req); err != nil { return fmt.Errorf("%v.Send(%v) = %v, want ", stream, req, err) } } return nil }() // Tell the server we're done sending args. stream.CloseSend() if err != nil { t.Error(err) } }() for { if _, err := stream.Recv(); err != nil { break } } trailerMD := stream.Trailer() if !reflect.DeepEqual(testTrailerMetadata, trailerMD) { t.Fatalf("%v.Trailer() = %v, want %v", stream, trailerMD, testTrailerMetadata) } } func (s) TestServerStreaming(t *testing.T) { for _, e := range listTestEnv() { testServerStreaming(t, e) } } func testServerStreaming(t *testing.T, e env) { te := newTest(t, e) te.startServer(&testServer{security: e.security}) defer te.tearDown() tc := testpb.NewTestServiceClient(te.clientConn()) respParam := make([]*testpb.ResponseParameters, len(respSizes)) for i, s := range respSizes { respParam[i] = &testpb.ResponseParameters{ Size: int32(s), } } req := &testpb.StreamingOutputCallRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE, ResponseParameters: respParam, } stream, err := tc.StreamingOutputCall(context.Background(), req) if err != nil { t.Fatalf("%v.StreamingOutputCall(_) = _, %v, want ", tc, err) } var rpcStatus error var respCnt int var index int for { reply, err := stream.Recv() if err != nil { rpcStatus = err break } pt := reply.GetPayload().GetType() if pt != testpb.PayloadType_COMPRESSABLE { t.Fatalf("Got the reply of type %d, want %d", pt, testpb.PayloadType_COMPRESSABLE) } size := len(reply.GetPayload().GetBody()) if size != int(respSizes[index]) { t.Fatalf("Got reply body of length %d, want %d", size, respSizes[index]) } index++ respCnt++ } if rpcStatus != io.EOF { t.Fatalf("Failed to finish the server streaming rpc: %v, want ", rpcStatus) } if respCnt != len(respSizes) { t.Fatalf("Got %d reply, want %d", len(respSizes), respCnt) } } func (s) TestFailedServerStreaming(t *testing.T) { for _, e := range listTestEnv() { testFailedServerStreaming(t, e) } } func testFailedServerStreaming(t *testing.T, e env) { te := newTest(t, e) te.userAgent = failAppUA te.startServer(&testServer{security: e.security}) defer te.tearDown() tc := testpb.NewTestServiceClient(te.clientConn()) respParam := make([]*testpb.ResponseParameters, len(respSizes)) for i, s := range respSizes { respParam[i] = &testpb.ResponseParameters{ Size: int32(s), } } req := &testpb.StreamingOutputCallRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE, ResponseParameters: respParam, } ctx := metadata.NewOutgoingContext(te.ctx, testMetadata) stream, err := tc.StreamingOutputCall(ctx, req) if err != nil { t.Fatalf("%v.StreamingOutputCall(_) = _, %v, want ", tc, err) } wantErr := status.Error(codes.DataLoss, "error for testing: "+failAppUA) if _, err := stream.Recv(); !equalError(err, wantErr) { t.Fatalf("%v.Recv() = _, %v, want _, %v", stream, err, wantErr) } } func equalError(x, y error) bool { return x == y || (x != nil && y != nil && x.Error() == y.Error()) } // concurrentSendServer is a TestServiceServer whose // StreamingOutputCall makes ten serial Send calls, sending payloads // "0".."9", inclusive. TestServerStreamingConcurrent verifies they // were received in the correct order, and that there were no races. // // All other TestServiceServer methods crash if called. type concurrentSendServer struct { testpb.TestServiceServer } func (s concurrentSendServer) StreamingOutputCall(args *testpb.StreamingOutputCallRequest, stream testpb.TestService_StreamingOutputCallServer) error { for i := 0; i < 10; i++ { stream.Send(&testpb.StreamingOutputCallResponse{ Payload: &testpb.Payload{ Body: []byte{'0' + uint8(i)}, }, }) } return nil } // Tests doing a bunch of concurrent streaming output calls. func (s) TestServerStreamingConcurrent(t *testing.T) { for _, e := range listTestEnv() { testServerStreamingConcurrent(t, e) } } func testServerStreamingConcurrent(t *testing.T, e env) { te := newTest(t, e) te.startServer(concurrentSendServer{}) defer te.tearDown() cc := te.clientConn() tc := testpb.NewTestServiceClient(cc) doStreamingCall := func() { req := &testpb.StreamingOutputCallRequest{} stream, err := tc.StreamingOutputCall(context.Background(), req) if err != nil { t.Errorf("%v.StreamingOutputCall(_) = _, %v, want ", tc, err) return } var ngot int var buf bytes.Buffer for { reply, err := stream.Recv() if err == io.EOF { break } if err != nil { t.Fatal(err) } ngot++ if buf.Len() > 0 { buf.WriteByte(',') } buf.Write(reply.GetPayload().GetBody()) } if want := 10; ngot != want { t.Errorf("Got %d replies, want %d", ngot, want) } if got, want := buf.String(), "0,1,2,3,4,5,6,7,8,9"; got != want { t.Errorf("Got replies %q; want %q", got, want) } } var wg sync.WaitGroup for i := 0; i < 20; i++ { wg.Add(1) go func() { defer wg.Done() doStreamingCall() }() } wg.Wait() } func generatePayloadSizes() [][]int { reqSizes := [][]int{ {27182, 8, 1828, 45904}, } num8KPayloads := 1024 eightKPayloads := []int{} for i := 0; i < num8KPayloads; i++ { eightKPayloads = append(eightKPayloads, (1 << 13)) } reqSizes = append(reqSizes, eightKPayloads) num2MPayloads := 8 twoMPayloads := []int{} for i := 0; i < num2MPayloads; i++ { twoMPayloads = append(twoMPayloads, (1 << 21)) } reqSizes = append(reqSizes, twoMPayloads) return reqSizes } func (s) TestClientStreaming(t *testing.T) { for _, s := range generatePayloadSizes() { for _, e := range listTestEnv() { testClientStreaming(t, e, s) } } } func testClientStreaming(t *testing.T, e env, sizes []int) { te := newTest(t, e) te.startServer(&testServer{security: e.security}) defer te.tearDown() tc := testpb.NewTestServiceClient(te.clientConn()) ctx, cancel := context.WithTimeout(te.ctx, time.Second*30) defer cancel() stream, err := tc.StreamingInputCall(ctx) if err != nil { t.Fatalf("%v.StreamingInputCall(_) = _, %v, want ", tc, err) } var sum int for _, s := range sizes { payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(s)) if err != nil { t.Fatal(err) } req := &testpb.StreamingInputCallRequest{ Payload: payload, } if err := stream.Send(req); err != nil { t.Fatalf("%v.Send(_) = %v, want ", stream, err) } sum += s } reply, err := stream.CloseAndRecv() if err != nil { t.Fatalf("%v.CloseAndRecv() got error %v, want %v", stream, err, nil) } if reply.GetAggregatedPayloadSize() != int32(sum) { t.Fatalf("%v.CloseAndRecv().GetAggregatePayloadSize() = %v; want %v", stream, reply.GetAggregatedPayloadSize(), sum) } } func (s) TestClientStreamingError(t *testing.T) { for _, e := range listTestEnv() { if e.name == "handler-tls" { continue } testClientStreamingError(t, e) } } func testClientStreamingError(t *testing.T, e env) { te := newTest(t, e) te.startServer(&testServer{security: e.security, earlyFail: true}) defer te.tearDown() tc := testpb.NewTestServiceClient(te.clientConn()) stream, err := tc.StreamingInputCall(te.ctx) if err != nil { t.Fatalf("%v.StreamingInputCall(_) = _, %v, want ", tc, err) } payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, 1) if err != nil { t.Fatal(err) } req := &testpb.StreamingInputCallRequest{ Payload: payload, } // The 1st request should go through. if err := stream.Send(req); err != nil { t.Fatalf("%v.Send(%v) = %v, want ", stream, req, err) } for { if err := stream.Send(req); err != io.EOF { continue } if _, err := stream.CloseAndRecv(); status.Code(err) != codes.NotFound { t.Fatalf("%v.CloseAndRecv() = %v, want error %s", stream, err, codes.NotFound) } break } } func (s) TestExceedMaxStreamsLimit(t *testing.T) { for _, e := range listTestEnv() { testExceedMaxStreamsLimit(t, e) } } func testExceedMaxStreamsLimit(t *testing.T, e env) { te := newTest(t, e) te.declareLogNoise( "http2Client.notifyError got notified that the client transport was broken", "Conn.resetTransport failed to create client transport", "grpc: the connection is closing", ) te.maxStream = 1 // Only allows 1 live stream per server transport. te.startServer(&testServer{security: e.security}) defer te.tearDown() cc := te.clientConn() tc := testpb.NewTestServiceClient(cc) _, err := tc.StreamingInputCall(te.ctx) if err != nil { t.Fatalf("%v.StreamingInputCall(_) = _, %v, want _, ", tc, err) } // Loop until receiving the new max stream setting from the server. for { ctx, cancel := context.WithTimeout(context.Background(), 50*time.Millisecond) defer cancel() _, err := tc.StreamingInputCall(ctx) if err == nil { time.Sleep(50 * time.Millisecond) continue } if status.Code(err) == codes.DeadlineExceeded { break } t.Fatalf("%v.StreamingInputCall(_) = _, %v, want _, %s", tc, err, codes.DeadlineExceeded) } } func (s) TestStreamsQuotaRecovery(t *testing.T) { for _, e := range listTestEnv() { testStreamsQuotaRecovery(t, e) } } func testStreamsQuotaRecovery(t *testing.T, e env) { te := newTest(t, e) te.declareLogNoise( "http2Client.notifyError got notified that the client transport was broken", "Conn.resetTransport failed to create client transport", "grpc: the connection is closing", ) te.maxStream = 1 // Allows 1 live stream. te.startServer(&testServer{security: e.security}) defer te.tearDown() cc := te.clientConn() tc := testpb.NewTestServiceClient(cc) ctx, cancel := context.WithCancel(context.Background()) defer cancel() if _, err := tc.StreamingInputCall(ctx); err != nil { t.Fatalf("tc.StreamingInputCall(_) = _, %v, want _, ", err) } // Loop until the new max stream setting is effective. for { ctx, cancel := context.WithTimeout(context.Background(), 50*time.Millisecond) _, err := tc.StreamingInputCall(ctx) cancel() if err == nil { time.Sleep(5 * time.Millisecond) continue } if status.Code(err) == codes.DeadlineExceeded { break } t.Fatalf("tc.StreamingInputCall(_) = _, %v, want _, %s", err, codes.DeadlineExceeded) } var wg sync.WaitGroup for i := 0; i < 10; i++ { wg.Add(1) go func() { defer wg.Done() payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, 314) if err != nil { t.Error(err) return } req := &testpb.SimpleRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE, ResponseSize: 1592, Payload: payload, } // No rpc should go through due to the max streams limit. ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond) defer cancel() if _, err := tc.UnaryCall(ctx, req, grpc.WaitForReady(true)); status.Code(err) != codes.DeadlineExceeded { t.Errorf("tc.UnaryCall(_, _) = _, %v, want _, %s", err, codes.DeadlineExceeded) } }() } wg.Wait() cancel() // A new stream should be allowed after canceling the first one. ctx, cancel = context.WithTimeout(context.Background(), 5*time.Second) defer cancel() if _, err := tc.StreamingInputCall(ctx); err != nil { t.Fatalf("tc.StreamingInputCall(_) = _, %v, want _, %v", err, nil) } } func (s) TestCompressServerHasNoSupport(t *testing.T) { for _, e := range listTestEnv() { testCompressServerHasNoSupport(t, e) } } func testCompressServerHasNoSupport(t *testing.T, e env) { te := newTest(t, e) te.serverCompression = false te.clientCompression = false te.clientNopCompression = true te.startServer(&testServer{security: e.security}) defer te.tearDown() tc := testpb.NewTestServiceClient(te.clientConn()) const argSize = 271828 const respSize = 314159 payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, argSize) if err != nil { t.Fatal(err) } req := &testpb.SimpleRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE, ResponseSize: respSize, Payload: payload, } if _, err := tc.UnaryCall(context.Background(), req); err == nil || status.Code(err) != codes.Unimplemented { t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code %s", err, codes.Unimplemented) } // Streaming RPC stream, err := tc.FullDuplexCall(context.Background()) if err != nil { t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) } if _, err := stream.Recv(); err == nil || status.Code(err) != codes.Unimplemented { t.Fatalf("%v.Recv() = %v, want error code %s", stream, err, codes.Unimplemented) } } func (s) TestCompressOK(t *testing.T) { for _, e := range listTestEnv() { testCompressOK(t, e) } } func testCompressOK(t *testing.T, e env) { te := newTest(t, e) te.serverCompression = true te.clientCompression = true te.startServer(&testServer{security: e.security}) defer te.tearDown() tc := testpb.NewTestServiceClient(te.clientConn()) // Unary call const argSize = 271828 const respSize = 314159 payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, argSize) if err != nil { t.Fatal(err) } req := &testpb.SimpleRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE, ResponseSize: respSize, Payload: payload, } ctx := metadata.NewOutgoingContext(context.Background(), metadata.Pairs("something", "something")) if _, err := tc.UnaryCall(ctx, req); err != nil { t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, ", err) } // Streaming RPC ctx, cancel := context.WithCancel(context.Background()) defer cancel() stream, err := tc.FullDuplexCall(ctx) if err != nil { t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) } respParam := []*testpb.ResponseParameters{ { Size: 31415, }, } payload, err = newPayload(testpb.PayloadType_COMPRESSABLE, int32(31415)) if err != nil { t.Fatal(err) } sreq := &testpb.StreamingOutputCallRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE, ResponseParameters: respParam, Payload: payload, } if err := stream.Send(sreq); err != nil { t.Fatalf("%v.Send(%v) = %v, want ", stream, sreq, err) } stream.CloseSend() if _, err := stream.Recv(); err != nil { t.Fatalf("%v.Recv() = %v, want ", stream, err) } if _, err := stream.Recv(); err != io.EOF { t.Fatalf("%v.Recv() = %v, want io.EOF", stream, err) } } func (s) TestIdentityEncoding(t *testing.T) { for _, e := range listTestEnv() { testIdentityEncoding(t, e) } } func testIdentityEncoding(t *testing.T, e env) { te := newTest(t, e) te.startServer(&testServer{security: e.security}) defer te.tearDown() tc := testpb.NewTestServiceClient(te.clientConn()) // Unary call payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, 5) if err != nil { t.Fatal(err) } req := &testpb.SimpleRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE, ResponseSize: 10, Payload: payload, } ctx := metadata.NewOutgoingContext(context.Background(), metadata.Pairs("something", "something")) if _, err := tc.UnaryCall(ctx, req); err != nil { t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, ", err) } // Streaming RPC ctx, cancel := context.WithCancel(context.Background()) defer cancel() stream, err := tc.FullDuplexCall(ctx, grpc.UseCompressor("identity")) if err != nil { t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) } payload, err = newPayload(testpb.PayloadType_COMPRESSABLE, int32(31415)) if err != nil { t.Fatal(err) } sreq := &testpb.StreamingOutputCallRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE, ResponseParameters: []*testpb.ResponseParameters{{Size: 10}}, Payload: payload, } if err := stream.Send(sreq); err != nil { t.Fatalf("%v.Send(%v) = %v, want ", stream, sreq, err) } stream.CloseSend() if _, err := stream.Recv(); err != nil { t.Fatalf("%v.Recv() = %v, want ", stream, err) } if _, err := stream.Recv(); err != io.EOF { t.Fatalf("%v.Recv() = %v, want io.EOF", stream, err) } } func (s) TestUnaryClientInterceptor(t *testing.T) { for _, e := range listTestEnv() { testUnaryClientInterceptor(t, e) } } func failOkayRPC(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { err := invoker(ctx, method, req, reply, cc, opts...) if err == nil { return status.Error(codes.NotFound, "") } return err } func testUnaryClientInterceptor(t *testing.T, e env) { te := newTest(t, e) te.userAgent = testAppUA te.unaryClientInt = failOkayRPC te.startServer(&testServer{security: e.security}) defer te.tearDown() tc := testpb.NewTestServiceClient(te.clientConn()) if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}); status.Code(err) != codes.NotFound { t.Fatalf("%v.EmptyCall(_, _) = _, %v, want _, error code %s", tc, err, codes.NotFound) } } func (s) TestStreamClientInterceptor(t *testing.T) { for _, e := range listTestEnv() { testStreamClientInterceptor(t, e) } } func failOkayStream(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) { s, err := streamer(ctx, desc, cc, method, opts...) if err == nil { return nil, status.Error(codes.NotFound, "") } return s, nil } func testStreamClientInterceptor(t *testing.T, e env) { te := newTest(t, e) te.streamClientInt = failOkayStream te.startServer(&testServer{security: e.security}) defer te.tearDown() tc := testpb.NewTestServiceClient(te.clientConn()) respParam := []*testpb.ResponseParameters{ { Size: int32(1), }, } payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(1)) if err != nil { t.Fatal(err) } req := &testpb.StreamingOutputCallRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE, ResponseParameters: respParam, Payload: payload, } if _, err := tc.StreamingOutputCall(context.Background(), req); status.Code(err) != codes.NotFound { t.Fatalf("%v.StreamingOutputCall(_) = _, %v, want _, error code %s", tc, err, codes.NotFound) } } func (s) TestUnaryServerInterceptor(t *testing.T) { for _, e := range listTestEnv() { testUnaryServerInterceptor(t, e) } } func errInjector(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { return nil, status.Error(codes.PermissionDenied, "") } func testUnaryServerInterceptor(t *testing.T, e env) { te := newTest(t, e) te.unaryServerInt = errInjector te.startServer(&testServer{security: e.security}) defer te.tearDown() tc := testpb.NewTestServiceClient(te.clientConn()) if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}); status.Code(err) != codes.PermissionDenied { t.Fatalf("%v.EmptyCall(_, _) = _, %v, want _, error code %s", tc, err, codes.PermissionDenied) } } func (s) TestStreamServerInterceptor(t *testing.T) { for _, e := range listTestEnv() { // TODO(bradfitz): Temporarily skip this env due to #619. if e.name == "handler-tls" { continue } testStreamServerInterceptor(t, e) } } func fullDuplexOnly(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { if info.FullMethod == "/grpc.testing.TestService/FullDuplexCall" { return handler(srv, ss) } // Reject the other methods. return status.Error(codes.PermissionDenied, "") } func testStreamServerInterceptor(t *testing.T, e env) { te := newTest(t, e) te.streamServerInt = fullDuplexOnly te.startServer(&testServer{security: e.security}) defer te.tearDown() tc := testpb.NewTestServiceClient(te.clientConn()) respParam := []*testpb.ResponseParameters{ { Size: int32(1), }, } payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(1)) if err != nil { t.Fatal(err) } req := &testpb.StreamingOutputCallRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE, ResponseParameters: respParam, Payload: payload, } s1, err := tc.StreamingOutputCall(context.Background(), req) if err != nil { t.Fatalf("%v.StreamingOutputCall(_) = _, %v, want _, ", tc, err) } if _, err := s1.Recv(); status.Code(err) != codes.PermissionDenied { t.Fatalf("%v.StreamingInputCall(_) = _, %v, want _, error code %s", tc, err, codes.PermissionDenied) } s2, err := tc.FullDuplexCall(context.Background()) if err != nil { t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) } if err := s2.Send(req); err != nil { t.Fatalf("%v.Send(_) = %v, want ", s2, err) } if _, err := s2.Recv(); err != nil { t.Fatalf("%v.Recv() = _, %v, want _, ", s2, err) } } // funcServer implements methods of TestServiceServer using funcs, // similar to an http.HandlerFunc. // Any unimplemented method will crash. Tests implement the method(s) // they need. type funcServer struct { testpb.TestServiceServer unaryCall func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) streamingInputCall func(stream testpb.TestService_StreamingInputCallServer) error fullDuplexCall func(stream testpb.TestService_FullDuplexCallServer) error } func (s *funcServer) UnaryCall(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { return s.unaryCall(ctx, in) } func (s *funcServer) StreamingInputCall(stream testpb.TestService_StreamingInputCallServer) error { return s.streamingInputCall(stream) } func (s *funcServer) FullDuplexCall(stream testpb.TestService_FullDuplexCallServer) error { return s.fullDuplexCall(stream) } func (s) TestClientRequestBodyErrorUnexpectedEOF(t *testing.T) { for _, e := range listTestEnv() { testClientRequestBodyErrorUnexpectedEOF(t, e) } } func testClientRequestBodyErrorUnexpectedEOF(t *testing.T, e env) { te := newTest(t, e) ts := &funcServer{unaryCall: func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { errUnexpectedCall := errors.New("unexpected call func server method") t.Error(errUnexpectedCall) return nil, errUnexpectedCall }} te.startServer(ts) defer te.tearDown() te.withServerTester(func(st *serverTester) { st.writeHeadersGRPC(1, "/grpc.testing.TestService/UnaryCall") // Say we have 5 bytes coming, but set END_STREAM flag: st.writeData(1, true, []byte{0, 0, 0, 0, 5}) st.wantAnyFrame() // wait for server to crash (it used to crash) }) } func (s) TestClientRequestBodyErrorCloseAfterLength(t *testing.T) { for _, e := range listTestEnv() { testClientRequestBodyErrorCloseAfterLength(t, e) } } func testClientRequestBodyErrorCloseAfterLength(t *testing.T, e env) { te := newTest(t, e) te.declareLogNoise("Server.processUnaryRPC failed to write status") ts := &funcServer{unaryCall: func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { errUnexpectedCall := errors.New("unexpected call func server method") t.Error(errUnexpectedCall) return nil, errUnexpectedCall }} te.startServer(ts) defer te.tearDown() te.withServerTester(func(st *serverTester) { st.writeHeadersGRPC(1, "/grpc.testing.TestService/UnaryCall") // say we're sending 5 bytes, but then close the connection instead. st.writeData(1, false, []byte{0, 0, 0, 0, 5}) st.cc.Close() }) } func (s) TestClientRequestBodyErrorCancel(t *testing.T) { for _, e := range listTestEnv() { testClientRequestBodyErrorCancel(t, e) } } func testClientRequestBodyErrorCancel(t *testing.T, e env) { te := newTest(t, e) gotCall := make(chan bool, 1) ts := &funcServer{unaryCall: func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { gotCall <- true return new(testpb.SimpleResponse), nil }} te.startServer(ts) defer te.tearDown() te.withServerTester(func(st *serverTester) { st.writeHeadersGRPC(1, "/grpc.testing.TestService/UnaryCall") // Say we have 5 bytes coming, but cancel it instead. st.writeRSTStream(1, http2.ErrCodeCancel) st.writeData(1, false, []byte{0, 0, 0, 0, 5}) // Verify we didn't a call yet. select { case <-gotCall: t.Fatal("unexpected call") default: } // And now send an uncanceled (but still invalid), just to get a response. st.writeHeadersGRPC(3, "/grpc.testing.TestService/UnaryCall") st.writeData(3, true, []byte{0, 0, 0, 0, 0}) <-gotCall st.wantAnyFrame() }) } func (s) TestClientRequestBodyErrorCancelStreamingInput(t *testing.T) { for _, e := range listTestEnv() { testClientRequestBodyErrorCancelStreamingInput(t, e) } } func testClientRequestBodyErrorCancelStreamingInput(t *testing.T, e env) { te := newTest(t, e) recvErr := make(chan error, 1) ts := &funcServer{streamingInputCall: func(stream testpb.TestService_StreamingInputCallServer) error { _, err := stream.Recv() recvErr <- err return nil }} te.startServer(ts) defer te.tearDown() te.withServerTester(func(st *serverTester) { st.writeHeadersGRPC(1, "/grpc.testing.TestService/StreamingInputCall") // Say we have 5 bytes coming, but cancel it instead. st.writeData(1, false, []byte{0, 0, 0, 0, 5}) st.writeRSTStream(1, http2.ErrCodeCancel) var got error select { case got = <-recvErr: case <-time.After(3 * time.Second): t.Fatal("timeout waiting for error") } if grpc.Code(got) != codes.Canceled { t.Errorf("error = %#v; want error code %s", got, codes.Canceled) } }) } func (s) TestClientResourceExhaustedCancelFullDuplex(t *testing.T) { for _, e := range listTestEnv() { if e.httpHandler { // httpHandler write won't be blocked on flow control window. continue } testClientResourceExhaustedCancelFullDuplex(t, e) } } func testClientResourceExhaustedCancelFullDuplex(t *testing.T, e env) { te := newTest(t, e) recvErr := make(chan error, 1) ts := &funcServer{fullDuplexCall: func(stream testpb.TestService_FullDuplexCallServer) error { defer close(recvErr) _, err := stream.Recv() if err != nil { return status.Errorf(codes.Internal, "stream.Recv() got error: %v, want ", err) } // create a payload that's larger than the default flow control window. payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, 10) if err != nil { return err } resp := &testpb.StreamingOutputCallResponse{ Payload: payload, } ce := make(chan error, 1) go func() { var err error for { if err = stream.Send(resp); err != nil { break } } ce <- err }() select { case err = <-ce: case <-time.After(10 * time.Second): err = errors.New("10s timeout reached") } recvErr <- err return err }} te.startServer(ts) defer te.tearDown() // set a low limit on receive message size to error with Resource Exhausted on // client side when server send a large message. te.maxClientReceiveMsgSize = newInt(10) cc := te.clientConn() tc := testpb.NewTestServiceClient(cc) stream, err := tc.FullDuplexCall(context.Background()) if err != nil { t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) } req := &testpb.StreamingOutputCallRequest{} if err := stream.Send(req); err != nil { t.Fatalf("%v.Send(%v) = %v, want ", stream, req, err) } if _, err := stream.Recv(); status.Code(err) != codes.ResourceExhausted { t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted) } err = <-recvErr if status.Code(err) != codes.Canceled { t.Fatalf("server got error %v, want error code: %s", err, codes.Canceled) } } type clientTimeoutCreds struct { timeoutReturned bool } func (c *clientTimeoutCreds) ClientHandshake(ctx context.Context, addr string, rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { if !c.timeoutReturned { c.timeoutReturned = true return nil, nil, context.DeadlineExceeded } return rawConn, nil, nil } func (c *clientTimeoutCreds) ServerHandshake(rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { return rawConn, nil, nil } func (c *clientTimeoutCreds) Info() credentials.ProtocolInfo { return credentials.ProtocolInfo{} } func (c *clientTimeoutCreds) Clone() credentials.TransportCredentials { return nil } func (c *clientTimeoutCreds) OverrideServerName(s string) error { return nil } func (s) TestNonFailFastRPCSucceedOnTimeoutCreds(t *testing.T) { te := newTest(t, env{name: "timeout-cred", network: "tcp", security: "clientTimeoutCreds", balancer: "v1"}) te.userAgent = testAppUA te.startServer(&testServer{security: te.e.security}) defer te.tearDown() cc := te.clientConn() tc := testpb.NewTestServiceClient(cc) // This unary call should succeed, because ClientHandshake will succeed for the second time. if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}, grpc.WaitForReady(true)); err != nil { te.t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want ", err) } } type serverDispatchCred struct { rawConnCh chan net.Conn } func newServerDispatchCred() *serverDispatchCred { return &serverDispatchCred{ rawConnCh: make(chan net.Conn, 1), } } func (c *serverDispatchCred) ClientHandshake(ctx context.Context, addr string, rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { return rawConn, nil, nil } func (c *serverDispatchCred) ServerHandshake(rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { select { case c.rawConnCh <- rawConn: default: } return nil, nil, credentials.ErrConnDispatched } func (c *serverDispatchCred) Info() credentials.ProtocolInfo { return credentials.ProtocolInfo{} } func (c *serverDispatchCred) Clone() credentials.TransportCredentials { return nil } func (c *serverDispatchCred) OverrideServerName(s string) error { return nil } func (c *serverDispatchCred) getRawConn() net.Conn { return <-c.rawConnCh } func (s) TestServerCredsDispatch(t *testing.T) { lis, err := net.Listen("tcp", "localhost:0") if err != nil { t.Fatalf("Failed to listen: %v", err) } cred := newServerDispatchCred() s := grpc.NewServer(grpc.Creds(cred)) go s.Serve(lis) defer s.Stop() cc, err := grpc.Dial(lis.Addr().String(), grpc.WithTransportCredentials(cred)) if err != nil { t.Fatalf("grpc.Dial(%q) = %v", lis.Addr().String(), err) } defer cc.Close() rawConn := cred.getRawConn() // Give grpc a chance to see the error and potentially close the connection. // And check that connection is not closed after that. time.Sleep(100 * time.Millisecond) // Check rawConn is not closed. if n, err := rawConn.Write([]byte{0}); n <= 0 || err != nil { t.Errorf("Read() = %v, %v; want n>0, ", n, err) } } type authorityCheckCreds struct { got string } func (c *authorityCheckCreds) ServerHandshake(rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { return rawConn, nil, nil } func (c *authorityCheckCreds) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { c.got = authority return rawConn, nil, nil } func (c *authorityCheckCreds) Info() credentials.ProtocolInfo { return credentials.ProtocolInfo{} } func (c *authorityCheckCreds) Clone() credentials.TransportCredentials { return c } func (c *authorityCheckCreds) OverrideServerName(s string) error { return nil } // This test makes sure that the authority client handshake gets is the endpoint // in dial target, not the resolved ip address. func (s) TestCredsHandshakeAuthority(t *testing.T) { const testAuthority = "test.auth.ori.ty" lis, err := net.Listen("tcp", "localhost:0") if err != nil { t.Fatalf("Failed to listen: %v", err) } cred := &authorityCheckCreds{} s := grpc.NewServer() go s.Serve(lis) defer s.Stop() r, rcleanup := manual.GenerateAndRegisterManualResolver() defer rcleanup() cc, err := grpc.Dial(r.Scheme()+":///"+testAuthority, grpc.WithTransportCredentials(cred)) if err != nil { t.Fatalf("grpc.Dial(%q) = %v", lis.Addr().String(), err) } defer cc.Close() r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: lis.Addr().String()}}}) ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond) defer cancel() for { s := cc.GetState() if s == connectivity.Ready { break } if !cc.WaitForStateChange(ctx, s) { // ctx got timeout or canceled. t.Fatalf("ClientConn is not ready after 100 ms") } } if cred.got != testAuthority { t.Fatalf("client creds got authority: %q, want: %q", cred.got, testAuthority) } } // This test makes sure that the authority client handshake gets is the endpoint // of the ServerName of the address when it is set. func (s) TestCredsHandshakeServerNameAuthority(t *testing.T) { const testAuthority = "test.auth.ori.ty" const testServerName = "test.server.name" lis, err := net.Listen("tcp", "localhost:0") if err != nil { t.Fatalf("Failed to listen: %v", err) } cred := &authorityCheckCreds{} s := grpc.NewServer() go s.Serve(lis) defer s.Stop() r, rcleanup := manual.GenerateAndRegisterManualResolver() defer rcleanup() cc, err := grpc.Dial(r.Scheme()+":///"+testAuthority, grpc.WithTransportCredentials(cred)) if err != nil { t.Fatalf("grpc.Dial(%q) = %v", lis.Addr().String(), err) } defer cc.Close() r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: lis.Addr().String(), ServerName: testServerName}}}) ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond) defer cancel() for { s := cc.GetState() if s == connectivity.Ready { break } if !cc.WaitForStateChange(ctx, s) { // ctx got timeout or canceled. t.Fatalf("ClientConn is not ready after 100 ms") } } if cred.got != testServerName { t.Fatalf("client creds got authority: %q, want: %q", cred.got, testAuthority) } } type clientFailCreds struct{} func (c *clientFailCreds) ServerHandshake(rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { return rawConn, nil, nil } func (c *clientFailCreds) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { return nil, nil, fmt.Errorf("client handshake fails with fatal error") } func (c *clientFailCreds) Info() credentials.ProtocolInfo { return credentials.ProtocolInfo{} } func (c *clientFailCreds) Clone() credentials.TransportCredentials { return c } func (c *clientFailCreds) OverrideServerName(s string) error { return nil } // This test makes sure that failfast RPCs fail if client handshake fails with // fatal errors. func (s) TestFailfastRPCFailOnFatalHandshakeError(t *testing.T) { lis, err := net.Listen("tcp", "localhost:0") if err != nil { t.Fatalf("Failed to listen: %v", err) } defer lis.Close() cc, err := grpc.Dial("passthrough:///"+lis.Addr().String(), grpc.WithTransportCredentials(&clientFailCreds{})) if err != nil { t.Fatalf("grpc.Dial(_) = %v", err) } defer cc.Close() tc := testpb.NewTestServiceClient(cc) // This unary call should fail, but not timeout. ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(false)); status.Code(err) != codes.Unavailable { t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want ", err) } } func (s) TestFlowControlLogicalRace(t *testing.T) { // Test for a regression of https://github.com/grpc/grpc-go/issues/632, // and other flow control bugs. const ( itemCount = 100 itemSize = 1 << 10 recvCount = 2 maxFailures = 3 requestTimeout = time.Second * 5 ) requestCount := 10000 if raceMode { requestCount = 1000 } lis, err := net.Listen("tcp", "localhost:0") if err != nil { t.Fatalf("Failed to listen: %v", err) } defer lis.Close() s := grpc.NewServer() testpb.RegisterTestServiceServer(s, &flowControlLogicalRaceServer{ itemCount: itemCount, itemSize: itemSize, }) defer s.Stop() go s.Serve(lis) ctx := context.Background() cc, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure(), grpc.WithBlock()) if err != nil { t.Fatalf("grpc.Dial(%q) = %v", lis.Addr().String(), err) } defer cc.Close() cl := testpb.NewTestServiceClient(cc) failures := 0 for i := 0; i < requestCount; i++ { ctx, cancel := context.WithTimeout(ctx, requestTimeout) output, err := cl.StreamingOutputCall(ctx, &testpb.StreamingOutputCallRequest{}) if err != nil { t.Fatalf("StreamingOutputCall; err = %q", err) } j := 0 loop: for ; j < recvCount; j++ { _, err := output.Recv() if err != nil { if err == io.EOF { break loop } switch status.Code(err) { case codes.DeadlineExceeded: break loop default: t.Fatalf("Recv; err = %q", err) } } } cancel() <-ctx.Done() if j < recvCount { t.Errorf("got %d responses to request %d", j, i) failures++ if failures >= maxFailures { // Continue past the first failure to see if the connection is // entirely broken, or if only a single RPC was affected break } } } } type flowControlLogicalRaceServer struct { testpb.TestServiceServer itemSize int itemCount int } func (s *flowControlLogicalRaceServer) StreamingOutputCall(req *testpb.StreamingOutputCallRequest, srv testpb.TestService_StreamingOutputCallServer) error { for i := 0; i < s.itemCount; i++ { err := srv.Send(&testpb.StreamingOutputCallResponse{ Payload: &testpb.Payload{ // Sending a large stream of data which the client reject // helps to trigger some types of flow control bugs. // // Reallocating memory here is inefficient, but the stress it // puts on the GC leads to more frequent flow control // failures. The GC likely causes more variety in the // goroutine scheduling orders. Body: bytes.Repeat([]byte("a"), s.itemSize), }, }) if err != nil { return err } } return nil } type lockingWriter struct { mu sync.Mutex w io.Writer } func (lw *lockingWriter) Write(p []byte) (n int, err error) { lw.mu.Lock() defer lw.mu.Unlock() return lw.w.Write(p) } func (lw *lockingWriter) setWriter(w io.Writer) { lw.mu.Lock() defer lw.mu.Unlock() lw.w = w } var testLogOutput = &lockingWriter{w: os.Stderr} // awaitNewConnLogOutput waits for any of grpc.NewConn's goroutines to // terminate, if they're still running. It spams logs with this // message. We wait for it so our log filter is still // active. Otherwise the "defer restore()" at the top of various test // functions restores our log filter and then the goroutine spams. func awaitNewConnLogOutput() { awaitLogOutput(50*time.Millisecond, "grpc: the client connection is closing; please retry") } func awaitLogOutput(maxWait time.Duration, phrase string) { pb := []byte(phrase) timer := time.NewTimer(maxWait) defer timer.Stop() wakeup := make(chan bool, 1) for { if logOutputHasContents(pb, wakeup) { return } select { case <-timer.C: // Too slow. Oh well. return case <-wakeup: } } } func logOutputHasContents(v []byte, wakeup chan<- bool) bool { testLogOutput.mu.Lock() defer testLogOutput.mu.Unlock() fw, ok := testLogOutput.w.(*filterWriter) if !ok { return false } fw.mu.Lock() defer fw.mu.Unlock() if bytes.Contains(fw.buf.Bytes(), v) { return true } fw.wakeup = wakeup return false } var verboseLogs = flag.Bool("verbose_logs", false, "show all grpclog output, without filtering") func noop() {} // declareLogNoise declares that t is expected to emit the following noisy phrases, // even on success. Those phrases will be filtered from grpclog output // and only be shown if *verbose_logs or t ends up failing. // The returned restore function should be called with defer to be run // before the test ends. func declareLogNoise(t *testing.T, phrases ...string) (restore func()) { if *verboseLogs { return noop } fw := &filterWriter{dst: os.Stderr, filter: phrases} testLogOutput.setWriter(fw) return func() { if t.Failed() { fw.mu.Lock() defer fw.mu.Unlock() if fw.buf.Len() > 0 { t.Logf("Complete log output:\n%s", fw.buf.Bytes()) } } testLogOutput.setWriter(os.Stderr) } } type filterWriter struct { dst io.Writer filter []string mu sync.Mutex buf bytes.Buffer wakeup chan<- bool // if non-nil, gets true on write } func (fw *filterWriter) Write(p []byte) (n int, err error) { fw.mu.Lock() fw.buf.Write(p) if fw.wakeup != nil { select { case fw.wakeup <- true: default: } } fw.mu.Unlock() ps := string(p) for _, f := range fw.filter { if strings.Contains(ps, f) { return len(p), nil } } return fw.dst.Write(p) } // stubServer is a server that is easy to customize within individual test // cases. type stubServer struct { // Guarantees we satisfy this interface; panics if unimplemented methods are called. testpb.TestServiceServer // Customizable implementations of server handlers. emptyCall func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) unaryCall func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) fullDuplexCall func(stream testpb.TestService_FullDuplexCallServer) error // A client connected to this service the test may use. Created in Start(). client testpb.TestServiceClient cc *grpc.ClientConn s *grpc.Server addr string // address of listener cleanups []func() // Lambdas executed in Stop(); populated by Start(). r *manual.Resolver } func (ss *stubServer) EmptyCall(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { return ss.emptyCall(ctx, in) } func (ss *stubServer) UnaryCall(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { return ss.unaryCall(ctx, in) } func (ss *stubServer) FullDuplexCall(stream testpb.TestService_FullDuplexCallServer) error { return ss.fullDuplexCall(stream) } // Start starts the server and creates a client connected to it. func (ss *stubServer) Start(sopts []grpc.ServerOption, dopts ...grpc.DialOption) error { r, cleanup := manual.GenerateAndRegisterManualResolver() ss.r = r ss.cleanups = append(ss.cleanups, cleanup) lis, err := net.Listen("tcp", "localhost:0") if err != nil { return fmt.Errorf(`net.Listen("tcp", "localhost:0") = %v`, err) } ss.addr = lis.Addr().String() ss.cleanups = append(ss.cleanups, func() { lis.Close() }) s := grpc.NewServer(sopts...) testpb.RegisterTestServiceServer(s, ss) go s.Serve(lis) ss.cleanups = append(ss.cleanups, s.Stop) ss.s = s target := ss.r.Scheme() + ":///" + ss.addr opts := append([]grpc.DialOption{grpc.WithInsecure()}, dopts...) cc, err := grpc.Dial(target, opts...) if err != nil { return fmt.Errorf("grpc.Dial(%q) = %v", target, err) } ss.cc = cc ss.r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: ss.addr}}}) if err := ss.waitForReady(cc); err != nil { return err } ss.cleanups = append(ss.cleanups, func() { cc.Close() }) ss.client = testpb.NewTestServiceClient(cc) return nil } func (ss *stubServer) newServiceConfig(sc string) { ss.r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: ss.addr}}, ServiceConfig: parseCfg(ss.r, sc)}) } func (ss *stubServer) waitForReady(cc *grpc.ClientConn) error { ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() for { s := cc.GetState() if s == connectivity.Ready { return nil } if !cc.WaitForStateChange(ctx, s) { // ctx got timeout or canceled. return ctx.Err() } } } func (ss *stubServer) Stop() { for i := len(ss.cleanups) - 1; i >= 0; i-- { ss.cleanups[i]() } } func (s) TestGRPCMethod(t *testing.T) { var method string var ok bool ss := &stubServer{ emptyCall: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { method, ok = grpc.Method(ctx) return &testpb.Empty{}, nil }, } if err := ss.Start(nil); err != nil { t.Fatalf("Error starting endpoint server: %v", err) } defer ss.Stop() ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) defer cancel() if _, err := ss.client.EmptyCall(ctx, &testpb.Empty{}); err != nil { t.Fatalf("ss.client.EmptyCall(_, _) = _, %v; want _, nil", err) } if want := "/grpc.testing.TestService/EmptyCall"; !ok || method != want { t.Fatalf("grpc.Method(_) = %q, %v; want %q, true", method, ok, want) } } func (s) TestUnaryProxyDoesNotForwardMetadata(t *testing.T) { const mdkey = "somedata" // endpoint ensures mdkey is NOT in metadata and returns an error if it is. endpoint := &stubServer{ emptyCall: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { if md, ok := metadata.FromIncomingContext(ctx); !ok || md[mdkey] != nil { return nil, status.Errorf(codes.Internal, "endpoint: md=%v; want !contains(%q)", md, mdkey) } return &testpb.Empty{}, nil }, } if err := endpoint.Start(nil); err != nil { t.Fatalf("Error starting endpoint server: %v", err) } defer endpoint.Stop() // proxy ensures mdkey IS in metadata, then forwards the RPC to endpoint // without explicitly copying the metadata. proxy := &stubServer{ emptyCall: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { if md, ok := metadata.FromIncomingContext(ctx); !ok || md[mdkey] == nil { return nil, status.Errorf(codes.Internal, "proxy: md=%v; want contains(%q)", md, mdkey) } return endpoint.client.EmptyCall(ctx, in) }, } if err := proxy.Start(nil); err != nil { t.Fatalf("Error starting proxy server: %v", err) } defer proxy.Stop() ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) defer cancel() md := metadata.Pairs(mdkey, "val") ctx = metadata.NewOutgoingContext(ctx, md) // Sanity check that endpoint properly errors when it sees mdkey. _, err := endpoint.client.EmptyCall(ctx, &testpb.Empty{}) if s, ok := status.FromError(err); !ok || s.Code() != codes.Internal { t.Fatalf("endpoint.client.EmptyCall(_, _) = _, %v; want _, ", err) } if _, err := proxy.client.EmptyCall(ctx, &testpb.Empty{}); err != nil { t.Fatal(err.Error()) } } func (s) TestStreamingProxyDoesNotForwardMetadata(t *testing.T) { const mdkey = "somedata" // doFDC performs a FullDuplexCall with client and returns the error from the // first stream.Recv call, or nil if that error is io.EOF. Calls t.Fatal if // the stream cannot be established. doFDC := func(ctx context.Context, client testpb.TestServiceClient) error { stream, err := client.FullDuplexCall(ctx) if err != nil { t.Fatalf("Unwanted error: %v", err) } if _, err := stream.Recv(); err != io.EOF { return err } return nil } // endpoint ensures mdkey is NOT in metadata and returns an error if it is. endpoint := &stubServer{ fullDuplexCall: func(stream testpb.TestService_FullDuplexCallServer) error { ctx := stream.Context() if md, ok := metadata.FromIncomingContext(ctx); !ok || md[mdkey] != nil { return status.Errorf(codes.Internal, "endpoint: md=%v; want !contains(%q)", md, mdkey) } return nil }, } if err := endpoint.Start(nil); err != nil { t.Fatalf("Error starting endpoint server: %v", err) } defer endpoint.Stop() // proxy ensures mdkey IS in metadata, then forwards the RPC to endpoint // without explicitly copying the metadata. proxy := &stubServer{ fullDuplexCall: func(stream testpb.TestService_FullDuplexCallServer) error { ctx := stream.Context() if md, ok := metadata.FromIncomingContext(ctx); !ok || md[mdkey] == nil { return status.Errorf(codes.Internal, "endpoint: md=%v; want !contains(%q)", md, mdkey) } return doFDC(ctx, endpoint.client) }, } if err := proxy.Start(nil); err != nil { t.Fatalf("Error starting proxy server: %v", err) } defer proxy.Stop() ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) defer cancel() md := metadata.Pairs(mdkey, "val") ctx = metadata.NewOutgoingContext(ctx, md) // Sanity check that endpoint properly errors when it sees mdkey in ctx. err := doFDC(ctx, endpoint.client) if s, ok := status.FromError(err); !ok || s.Code() != codes.Internal { t.Fatalf("stream.Recv() = _, %v; want _, ", err) } if err := doFDC(ctx, proxy.client); err != nil { t.Fatalf("doFDC(_, proxy.client) = %v; want nil", err) } } func (s) TestStatsTagsAndTrace(t *testing.T) { // Data added to context by client (typically in a stats handler). tags := []byte{1, 5, 2, 4, 3} trace := []byte{5, 2, 1, 3, 4} // endpoint ensures Tags() and Trace() in context match those that were added // by the client and returns an error if not. endpoint := &stubServer{ emptyCall: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { md, _ := metadata.FromIncomingContext(ctx) if tg := stats.Tags(ctx); !reflect.DeepEqual(tg, tags) { return nil, status.Errorf(codes.Internal, "stats.Tags(%v)=%v; want %v", ctx, tg, tags) } if !reflect.DeepEqual(md["grpc-tags-bin"], []string{string(tags)}) { return nil, status.Errorf(codes.Internal, "md['grpc-tags-bin']=%v; want %v", md["grpc-tags-bin"], tags) } if tr := stats.Trace(ctx); !reflect.DeepEqual(tr, trace) { return nil, status.Errorf(codes.Internal, "stats.Trace(%v)=%v; want %v", ctx, tr, trace) } if !reflect.DeepEqual(md["grpc-trace-bin"], []string{string(trace)}) { return nil, status.Errorf(codes.Internal, "md['grpc-trace-bin']=%v; want %v", md["grpc-trace-bin"], trace) } return &testpb.Empty{}, nil }, } if err := endpoint.Start(nil); err != nil { t.Fatalf("Error starting endpoint server: %v", err) } defer endpoint.Stop() ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) defer cancel() testCases := []struct { ctx context.Context want codes.Code }{ {ctx: ctx, want: codes.Internal}, {ctx: stats.SetTags(ctx, tags), want: codes.Internal}, {ctx: stats.SetTrace(ctx, trace), want: codes.Internal}, {ctx: stats.SetTags(stats.SetTrace(ctx, tags), tags), want: codes.Internal}, {ctx: stats.SetTags(stats.SetTrace(ctx, trace), tags), want: codes.OK}, } for _, tc := range testCases { _, err := endpoint.client.EmptyCall(tc.ctx, &testpb.Empty{}) if tc.want == codes.OK && err != nil { t.Fatalf("endpoint.client.EmptyCall(%v, _) = _, %v; want _, nil", tc.ctx, err) } if s, ok := status.FromError(err); !ok || s.Code() != tc.want { t.Fatalf("endpoint.client.EmptyCall(%v, _) = _, %v; want _, ", tc.ctx, err, tc.want) } } } func (s) TestTapTimeout(t *testing.T) { sopts := []grpc.ServerOption{ grpc.InTapHandle(func(ctx context.Context, _ *tap.Info) (context.Context, error) { c, cancel := context.WithCancel(ctx) // Call cancel instead of setting a deadline so we can detect which error // occurred -- this cancellation (desired) or the client's deadline // expired (indicating this cancellation did not affect the RPC). time.AfterFunc(10*time.Millisecond, cancel) return c, nil }), } ss := &stubServer{ emptyCall: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { <-ctx.Done() return nil, status.Errorf(codes.Canceled, ctx.Err().Error()) }, } if err := ss.Start(sopts); err != nil { t.Fatalf("Error starting endpoint server: %v", err) } defer ss.Stop() // This was known to be flaky; test several times. for i := 0; i < 10; i++ { // Set our own deadline in case the server hangs. ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) res, err := ss.client.EmptyCall(ctx, &testpb.Empty{}) cancel() if s, ok := status.FromError(err); !ok || s.Code() != codes.Canceled { t.Fatalf("ss.client.EmptyCall(context.Background(), _) = %v, %v; want nil, ", res, err) } } } func (s) TestClientWriteFailsAfterServerClosesStream(t *testing.T) { ss := &stubServer{ fullDuplexCall: func(stream testpb.TestService_FullDuplexCallServer) error { return status.Errorf(codes.Internal, "") }, } sopts := []grpc.ServerOption{} if err := ss.Start(sopts); err != nil { t.Fatalf("Error starting endpoint server: %v", err) } defer ss.Stop() ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() stream, err := ss.client.FullDuplexCall(ctx) if err != nil { t.Fatalf("Error while creating stream: %v", err) } for { if err := stream.Send(&testpb.StreamingOutputCallRequest{}); err == nil { time.Sleep(5 * time.Millisecond) } else if err == io.EOF { break // Success. } else { t.Fatalf("stream.Send(_) = %v, want io.EOF", err) } } } type windowSizeConfig struct { serverStream int32 serverConn int32 clientStream int32 clientConn int32 } func max(a, b int32) int32 { if a > b { return a } return b } func (s) TestConfigurableWindowSizeWithLargeWindow(t *testing.T) { wc := windowSizeConfig{ serverStream: 8 * 1024 * 1024, serverConn: 12 * 1024 * 1024, clientStream: 6 * 1024 * 1024, clientConn: 8 * 1024 * 1024, } for _, e := range listTestEnv() { testConfigurableWindowSize(t, e, wc) } } func (s) TestConfigurableWindowSizeWithSmallWindow(t *testing.T) { wc := windowSizeConfig{ serverStream: 1, serverConn: 1, clientStream: 1, clientConn: 1, } for _, e := range listTestEnv() { testConfigurableWindowSize(t, e, wc) } } func testConfigurableWindowSize(t *testing.T, e env, wc windowSizeConfig) { te := newTest(t, e) te.serverInitialWindowSize = wc.serverStream te.serverInitialConnWindowSize = wc.serverConn te.clientInitialWindowSize = wc.clientStream te.clientInitialConnWindowSize = wc.clientConn te.startServer(&testServer{security: e.security}) defer te.tearDown() cc := te.clientConn() tc := testpb.NewTestServiceClient(cc) stream, err := tc.FullDuplexCall(context.Background()) if err != nil { t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) } numOfIter := 11 // Set message size to exhaust largest of window sizes. messageSize := max(max(wc.serverStream, wc.serverConn), max(wc.clientStream, wc.clientConn)) / int32(numOfIter-1) messageSize = max(messageSize, 64*1024) payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, messageSize) if err != nil { t.Fatal(err) } respParams := []*testpb.ResponseParameters{ { Size: messageSize, }, } req := &testpb.StreamingOutputCallRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE, ResponseParameters: respParams, Payload: payload, } for i := 0; i < numOfIter; i++ { if err := stream.Send(req); err != nil { t.Fatalf("%v.Send(%v) = %v, want ", stream, req, err) } if _, err := stream.Recv(); err != nil { t.Fatalf("%v.Recv() = _, %v, want _, ", stream, err) } } if err := stream.CloseSend(); err != nil { t.Fatalf("%v.CloseSend() = %v, want ", stream, err) } } var ( // test authdata authdata = map[string]string{ "test-key": "test-value", "test-key2-bin": string([]byte{1, 2, 3}), } ) type testPerRPCCredentials struct{} func (cr testPerRPCCredentials) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { return authdata, nil } func (cr testPerRPCCredentials) RequireTransportSecurity() bool { return false } func authHandle(ctx context.Context, info *tap.Info) (context.Context, error) { md, ok := metadata.FromIncomingContext(ctx) if !ok { return ctx, fmt.Errorf("didn't find metadata in context") } for k, vwant := range authdata { vgot, ok := md[k] if !ok { return ctx, fmt.Errorf("didn't find authdata key %v in context", k) } if vgot[0] != vwant { return ctx, fmt.Errorf("for key %v, got value %v, want %v", k, vgot, vwant) } } return ctx, nil } func (s) TestPerRPCCredentialsViaDialOptions(t *testing.T) { for _, e := range listTestEnv() { testPerRPCCredentialsViaDialOptions(t, e) } } func testPerRPCCredentialsViaDialOptions(t *testing.T, e env) { te := newTest(t, e) te.tapHandle = authHandle te.perRPCCreds = testPerRPCCredentials{} te.startServer(&testServer{security: e.security}) defer te.tearDown() cc := te.clientConn() tc := testpb.NewTestServiceClient(cc) if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}); err != nil { t.Fatalf("Test failed. Reason: %v", err) } } func (s) TestPerRPCCredentialsViaCallOptions(t *testing.T) { for _, e := range listTestEnv() { testPerRPCCredentialsViaCallOptions(t, e) } } func testPerRPCCredentialsViaCallOptions(t *testing.T, e env) { te := newTest(t, e) te.tapHandle = authHandle te.startServer(&testServer{security: e.security}) defer te.tearDown() cc := te.clientConn() tc := testpb.NewTestServiceClient(cc) if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}, grpc.PerRPCCredentials(testPerRPCCredentials{})); err != nil { t.Fatalf("Test failed. Reason: %v", err) } } func (s) TestPerRPCCredentialsViaDialOptionsAndCallOptions(t *testing.T) { for _, e := range listTestEnv() { testPerRPCCredentialsViaDialOptionsAndCallOptions(t, e) } } func testPerRPCCredentialsViaDialOptionsAndCallOptions(t *testing.T, e env) { te := newTest(t, e) te.perRPCCreds = testPerRPCCredentials{} // When credentials are provided via both dial options and call options, // we apply both sets. te.tapHandle = func(ctx context.Context, _ *tap.Info) (context.Context, error) { md, ok := metadata.FromIncomingContext(ctx) if !ok { return ctx, fmt.Errorf("couldn't find metadata in context") } for k, vwant := range authdata { vgot, ok := md[k] if !ok { return ctx, fmt.Errorf("couldn't find metadata for key %v", k) } if len(vgot) != 2 { return ctx, fmt.Errorf("len of value for key %v was %v, want 2", k, len(vgot)) } if vgot[0] != vwant || vgot[1] != vwant { return ctx, fmt.Errorf("value for %v was %v, want [%v, %v]", k, vgot, vwant, vwant) } } return ctx, nil } te.startServer(&testServer{security: e.security}) defer te.tearDown() cc := te.clientConn() tc := testpb.NewTestServiceClient(cc) if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}, grpc.PerRPCCredentials(testPerRPCCredentials{})); err != nil { t.Fatalf("Test failed. Reason: %v", err) } } func (s) TestWaitForReadyConnection(t *testing.T) { for _, e := range listTestEnv() { testWaitForReadyConnection(t, e) } } func testWaitForReadyConnection(t *testing.T, e env) { te := newTest(t, e) te.userAgent = testAppUA te.startServer(&testServer{security: e.security}) defer te.tearDown() cc := te.clientConn() // Non-blocking dial. tc := testpb.NewTestServiceClient(cc) ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() state := cc.GetState() // Wait for connection to be Ready. for ; state != connectivity.Ready && cc.WaitForStateChange(ctx, state); state = cc.GetState() { } if state != connectivity.Ready { t.Fatalf("Want connection state to be Ready, got %v", state) } ctx, cancel = context.WithTimeout(context.Background(), time.Second) defer cancel() // Make a fail-fast RPC. if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); err != nil { t.Fatalf("TestService/EmptyCall(_,_) = _, %v, want _, nil", err) } } type errCodec struct { noError bool } func (c *errCodec) Marshal(v interface{}) ([]byte, error) { if c.noError { return []byte{}, nil } return nil, fmt.Errorf("3987^12 + 4365^12 = 4472^12") } func (c *errCodec) Unmarshal(data []byte, v interface{}) error { return nil } func (c *errCodec) Name() string { return "Fermat's near-miss." } func (s) TestEncodeDoesntPanic(t *testing.T) { for _, e := range listTestEnv() { testEncodeDoesntPanic(t, e) } } func testEncodeDoesntPanic(t *testing.T, e env) { te := newTest(t, e) erc := &errCodec{} te.customCodec = erc te.startServer(&testServer{security: e.security}) defer te.tearDown() te.customCodec = nil tc := testpb.NewTestServiceClient(te.clientConn()) // Failure case, should not panic. tc.EmptyCall(context.Background(), &testpb.Empty{}) erc.noError = true // Passing case. if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}); err != nil { t.Fatalf("EmptyCall(_, _) = _, %v, want _, ", err) } } func (s) TestSvrWriteStatusEarlyWrite(t *testing.T) { for _, e := range listTestEnv() { testSvrWriteStatusEarlyWrite(t, e) } } func testSvrWriteStatusEarlyWrite(t *testing.T, e env) { te := newTest(t, e) const smallSize = 1024 const largeSize = 2048 const extraLargeSize = 4096 te.maxServerReceiveMsgSize = newInt(largeSize) te.maxServerSendMsgSize = newInt(largeSize) smallPayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, smallSize) if err != nil { t.Fatal(err) } extraLargePayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, extraLargeSize) if err != nil { t.Fatal(err) } te.startServer(&testServer{security: e.security}) defer te.tearDown() tc := testpb.NewTestServiceClient(te.clientConn()) respParam := []*testpb.ResponseParameters{ { Size: int32(smallSize), }, } sreq := &testpb.StreamingOutputCallRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE, ResponseParameters: respParam, Payload: extraLargePayload, } // Test recv case: server receives a message larger than maxServerReceiveMsgSize. stream, err := tc.FullDuplexCall(te.ctx) if err != nil { t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) } if err = stream.Send(sreq); err != nil { t.Fatalf("%v.Send() = _, %v, want ", stream, err) } if _, err = stream.Recv(); err == nil || status.Code(err) != codes.ResourceExhausted { t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted) } // Test send case: server sends a message larger than maxServerSendMsgSize. sreq.Payload = smallPayload respParam[0].Size = int32(extraLargeSize) stream, err = tc.FullDuplexCall(te.ctx) if err != nil { t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) } if err = stream.Send(sreq); err != nil { t.Fatalf("%v.Send(%v) = %v, want ", stream, sreq, err) } if _, err = stream.Recv(); err == nil || status.Code(err) != codes.ResourceExhausted { t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted) } } // The following functions with function name ending with TD indicates that they // should be deleted after old service config API is deprecated and deleted. func testServiceConfigSetupTD(t *testing.T, e env) (*test, chan grpc.ServiceConfig) { te := newTest(t, e) // We write before read. ch := make(chan grpc.ServiceConfig, 1) te.sc = ch te.userAgent = testAppUA te.declareLogNoise( "transport: http2Client.notifyError got notified that the client transport was broken EOF", "grpc: addrConn.transportMonitor exits due to: grpc: the connection is closing", "grpc: addrConn.resetTransport failed to create client transport: connection error", "Failed to dial : context canceled; please retry.", ) return te, ch } func (s) TestServiceConfigGetMethodConfigTD(t *testing.T) { for _, e := range listTestEnv() { testGetMethodConfigTD(t, e) } } func testGetMethodConfigTD(t *testing.T, e env) { te, ch := testServiceConfigSetupTD(t, e) defer te.tearDown() mc1 := grpc.MethodConfig{ WaitForReady: newBool(true), Timeout: newDuration(time.Millisecond), } mc2 := grpc.MethodConfig{WaitForReady: newBool(false)} m := make(map[string]grpc.MethodConfig) m["/grpc.testing.TestService/EmptyCall"] = mc1 m["/grpc.testing.TestService/"] = mc2 sc := grpc.ServiceConfig{ Methods: m, } ch <- sc cc := te.clientConn() tc := testpb.NewTestServiceClient(cc) // The following RPCs are expected to become non-fail-fast ones with 1ms deadline. if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}); status.Code(err) != codes.DeadlineExceeded { t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.DeadlineExceeded) } m = make(map[string]grpc.MethodConfig) m["/grpc.testing.TestService/UnaryCall"] = mc1 m["/grpc.testing.TestService/"] = mc2 sc = grpc.ServiceConfig{ Methods: m, } ch <- sc // Wait for the new service config to propagate. for { if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}); status.Code(err) == codes.DeadlineExceeded { continue } break } // The following RPCs are expected to become fail-fast. if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}); status.Code(err) != codes.Unavailable { t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.Unavailable) } } func (s) TestServiceConfigWaitForReadyTD(t *testing.T) { for _, e := range listTestEnv() { testServiceConfigWaitForReadyTD(t, e) } } func testServiceConfigWaitForReadyTD(t *testing.T, e env) { te, ch := testServiceConfigSetupTD(t, e) defer te.tearDown() // Case1: Client API set failfast to be false, and service config set wait_for_ready to be false, Client API should win, and the rpc will wait until deadline exceeds. mc := grpc.MethodConfig{ WaitForReady: newBool(false), Timeout: newDuration(time.Millisecond), } m := make(map[string]grpc.MethodConfig) m["/grpc.testing.TestService/EmptyCall"] = mc m["/grpc.testing.TestService/FullDuplexCall"] = mc sc := grpc.ServiceConfig{ Methods: m, } ch <- sc cc := te.clientConn() tc := testpb.NewTestServiceClient(cc) // The following RPCs are expected to become non-fail-fast ones with 1ms deadline. if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}, grpc.WaitForReady(true)); status.Code(err) != codes.DeadlineExceeded { t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.DeadlineExceeded) } if _, err := tc.FullDuplexCall(context.Background(), grpc.WaitForReady(true)); status.Code(err) != codes.DeadlineExceeded { t.Fatalf("TestService/FullDuplexCall(_) = _, %v, want %s", err, codes.DeadlineExceeded) } // Generate a service config update. // Case2: Client API does not set failfast, and service config set wait_for_ready to be true, and the rpc will wait until deadline exceeds. mc.WaitForReady = newBool(true) m = make(map[string]grpc.MethodConfig) m["/grpc.testing.TestService/EmptyCall"] = mc m["/grpc.testing.TestService/FullDuplexCall"] = mc sc = grpc.ServiceConfig{ Methods: m, } ch <- sc // Wait for the new service config to take effect. mc = cc.GetMethodConfig("/grpc.testing.TestService/EmptyCall") for { if !*mc.WaitForReady { time.Sleep(100 * time.Millisecond) mc = cc.GetMethodConfig("/grpc.testing.TestService/EmptyCall") continue } break } // The following RPCs are expected to become non-fail-fast ones with 1ms deadline. if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}); status.Code(err) != codes.DeadlineExceeded { t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.DeadlineExceeded) } if _, err := tc.FullDuplexCall(context.Background()); status.Code(err) != codes.DeadlineExceeded { t.Fatalf("TestService/FullDuplexCall(_) = _, %v, want %s", err, codes.DeadlineExceeded) } } func (s) TestServiceConfigTimeoutTD(t *testing.T) { for _, e := range listTestEnv() { testServiceConfigTimeoutTD(t, e) } } func testServiceConfigTimeoutTD(t *testing.T, e env) { te, ch := testServiceConfigSetupTD(t, e) defer te.tearDown() // Case1: Client API sets timeout to be 1ns and ServiceConfig sets timeout to be 1hr. Timeout should be 1ns (min of 1ns and 1hr) and the rpc will wait until deadline exceeds. mc := grpc.MethodConfig{ Timeout: newDuration(time.Hour), } m := make(map[string]grpc.MethodConfig) m["/grpc.testing.TestService/EmptyCall"] = mc m["/grpc.testing.TestService/FullDuplexCall"] = mc sc := grpc.ServiceConfig{ Methods: m, } ch <- sc cc := te.clientConn() tc := testpb.NewTestServiceClient(cc) // The following RPCs are expected to become non-fail-fast ones with 1ns deadline. ctx, cancel := context.WithTimeout(context.Background(), time.Nanosecond) if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); status.Code(err) != codes.DeadlineExceeded { t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.DeadlineExceeded) } cancel() ctx, cancel = context.WithTimeout(context.Background(), time.Nanosecond) if _, err := tc.FullDuplexCall(ctx, grpc.WaitForReady(true)); status.Code(err) != codes.DeadlineExceeded { t.Fatalf("TestService/FullDuplexCall(_) = _, %v, want %s", err, codes.DeadlineExceeded) } cancel() // Generate a service config update. // Case2: Client API sets timeout to be 1hr and ServiceConfig sets timeout to be 1ns. Timeout should be 1ns (min of 1ns and 1hr) and the rpc will wait until deadline exceeds. mc.Timeout = newDuration(time.Nanosecond) m = make(map[string]grpc.MethodConfig) m["/grpc.testing.TestService/EmptyCall"] = mc m["/grpc.testing.TestService/FullDuplexCall"] = mc sc = grpc.ServiceConfig{ Methods: m, } ch <- sc // Wait for the new service config to take effect. mc = cc.GetMethodConfig("/grpc.testing.TestService/FullDuplexCall") for { if *mc.Timeout != time.Nanosecond { time.Sleep(100 * time.Millisecond) mc = cc.GetMethodConfig("/grpc.testing.TestService/FullDuplexCall") continue } break } ctx, cancel = context.WithTimeout(context.Background(), time.Hour) if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); status.Code(err) != codes.DeadlineExceeded { t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.DeadlineExceeded) } cancel() ctx, cancel = context.WithTimeout(context.Background(), time.Hour) if _, err := tc.FullDuplexCall(ctx, grpc.WaitForReady(true)); status.Code(err) != codes.DeadlineExceeded { t.Fatalf("TestService/FullDuplexCall(_) = _, %v, want %s", err, codes.DeadlineExceeded) } cancel() } func (s) TestServiceConfigMaxMsgSizeTD(t *testing.T) { for _, e := range listTestEnv() { testServiceConfigMaxMsgSizeTD(t, e) } } func testServiceConfigMaxMsgSizeTD(t *testing.T, e env) { // Setting up values and objects shared across all test cases. const smallSize = 1 const largeSize = 1024 const extraLargeSize = 2048 smallPayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, smallSize) if err != nil { t.Fatal(err) } largePayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, largeSize) if err != nil { t.Fatal(err) } extraLargePayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, extraLargeSize) if err != nil { t.Fatal(err) } mc := grpc.MethodConfig{ MaxReqSize: newInt(extraLargeSize), MaxRespSize: newInt(extraLargeSize), } m := make(map[string]grpc.MethodConfig) m["/grpc.testing.TestService/UnaryCall"] = mc m["/grpc.testing.TestService/FullDuplexCall"] = mc sc := grpc.ServiceConfig{ Methods: m, } // Case1: sc set maxReqSize to 2048 (send), maxRespSize to 2048 (recv). te1, ch1 := testServiceConfigSetupTD(t, e) te1.startServer(&testServer{security: e.security}) defer te1.tearDown() ch1 <- sc tc := testpb.NewTestServiceClient(te1.clientConn()) req := &testpb.SimpleRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE, ResponseSize: int32(extraLargeSize), Payload: smallPayload, } // Test for unary RPC recv. if _, err := tc.UnaryCall(context.Background(), req); err == nil || status.Code(err) != codes.ResourceExhausted { t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted) } // Test for unary RPC send. req.Payload = extraLargePayload req.ResponseSize = int32(smallSize) if _, err := tc.UnaryCall(context.Background(), req); err == nil || status.Code(err) != codes.ResourceExhausted { t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted) } // Test for streaming RPC recv. respParam := []*testpb.ResponseParameters{ { Size: int32(extraLargeSize), }, } sreq := &testpb.StreamingOutputCallRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE, ResponseParameters: respParam, Payload: smallPayload, } stream, err := tc.FullDuplexCall(te1.ctx) if err != nil { t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) } if err := stream.Send(sreq); err != nil { t.Fatalf("%v.Send(%v) = %v, want ", stream, sreq, err) } if _, err := stream.Recv(); err == nil || status.Code(err) != codes.ResourceExhausted { t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted) } // Test for streaming RPC send. respParam[0].Size = int32(smallSize) sreq.Payload = extraLargePayload stream, err = tc.FullDuplexCall(te1.ctx) if err != nil { t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) } if err := stream.Send(sreq); err == nil || status.Code(err) != codes.ResourceExhausted { t.Fatalf("%v.Send(%v) = %v, want _, error code: %s", stream, sreq, err, codes.ResourceExhausted) } // Case2: Client API set maxReqSize to 1024 (send), maxRespSize to 1024 (recv). Sc sets maxReqSize to 2048 (send), maxRespSize to 2048 (recv). te2, ch2 := testServiceConfigSetupTD(t, e) te2.maxClientReceiveMsgSize = newInt(1024) te2.maxClientSendMsgSize = newInt(1024) te2.startServer(&testServer{security: e.security}) defer te2.tearDown() ch2 <- sc tc = testpb.NewTestServiceClient(te2.clientConn()) // Test for unary RPC recv. req.Payload = smallPayload req.ResponseSize = int32(largeSize) if _, err := tc.UnaryCall(context.Background(), req); err == nil || status.Code(err) != codes.ResourceExhausted { t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted) } // Test for unary RPC send. req.Payload = largePayload req.ResponseSize = int32(smallSize) if _, err := tc.UnaryCall(context.Background(), req); err == nil || status.Code(err) != codes.ResourceExhausted { t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted) } // Test for streaming RPC recv. stream, err = tc.FullDuplexCall(te2.ctx) respParam[0].Size = int32(largeSize) sreq.Payload = smallPayload if err != nil { t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) } if err := stream.Send(sreq); err != nil { t.Fatalf("%v.Send(%v) = %v, want ", stream, sreq, err) } if _, err := stream.Recv(); err == nil || status.Code(err) != codes.ResourceExhausted { t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted) } // Test for streaming RPC send. respParam[0].Size = int32(smallSize) sreq.Payload = largePayload stream, err = tc.FullDuplexCall(te2.ctx) if err != nil { t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) } if err := stream.Send(sreq); err == nil || status.Code(err) != codes.ResourceExhausted { t.Fatalf("%v.Send(%v) = %v, want _, error code: %s", stream, sreq, err, codes.ResourceExhausted) } // Case3: Client API set maxReqSize to 4096 (send), maxRespSize to 4096 (recv). Sc sets maxReqSize to 2048 (send), maxRespSize to 2048 (recv). te3, ch3 := testServiceConfigSetupTD(t, e) te3.maxClientReceiveMsgSize = newInt(4096) te3.maxClientSendMsgSize = newInt(4096) te3.startServer(&testServer{security: e.security}) defer te3.tearDown() ch3 <- sc tc = testpb.NewTestServiceClient(te3.clientConn()) // Test for unary RPC recv. req.Payload = smallPayload req.ResponseSize = int32(largeSize) if _, err := tc.UnaryCall(context.Background(), req); err != nil { t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want ", err) } req.ResponseSize = int32(extraLargeSize) if _, err := tc.UnaryCall(context.Background(), req); err == nil || status.Code(err) != codes.ResourceExhausted { t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted) } // Test for unary RPC send. req.Payload = largePayload req.ResponseSize = int32(smallSize) if _, err := tc.UnaryCall(context.Background(), req); err != nil { t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want ", err) } req.Payload = extraLargePayload if _, err := tc.UnaryCall(context.Background(), req); err == nil || status.Code(err) != codes.ResourceExhausted { t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted) } // Test for streaming RPC recv. stream, err = tc.FullDuplexCall(te3.ctx) if err != nil { t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) } respParam[0].Size = int32(largeSize) sreq.Payload = smallPayload if err := stream.Send(sreq); err != nil { t.Fatalf("%v.Send(%v) = %v, want ", stream, sreq, err) } if _, err := stream.Recv(); err != nil { t.Fatalf("%v.Recv() = _, %v, want ", stream, err) } respParam[0].Size = int32(extraLargeSize) if err := stream.Send(sreq); err != nil { t.Fatalf("%v.Send(%v) = %v, want ", stream, sreq, err) } if _, err := stream.Recv(); err == nil || status.Code(err) != codes.ResourceExhausted { t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted) } // Test for streaming RPC send. respParam[0].Size = int32(smallSize) sreq.Payload = largePayload stream, err = tc.FullDuplexCall(te3.ctx) if err != nil { t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) } if err := stream.Send(sreq); err != nil { t.Fatalf("%v.Send(%v) = %v, want ", stream, sreq, err) } sreq.Payload = extraLargePayload if err := stream.Send(sreq); err == nil || status.Code(err) != codes.ResourceExhausted { t.Fatalf("%v.Send(%v) = %v, want _, error code: %s", stream, sreq, err, codes.ResourceExhausted) } } func (s) TestMethodFromServerStream(t *testing.T) { const testMethod = "/package.service/method" e := tcpClearRREnv te := newTest(t, e) var method string var ok bool te.unknownHandler = func(srv interface{}, stream grpc.ServerStream) error { method, ok = grpc.MethodFromServerStream(stream) return nil } te.startServer(nil) defer te.tearDown() _ = te.clientConn().Invoke(context.Background(), testMethod, nil, nil) if !ok || method != testMethod { t.Fatalf("Invoke with method %q, got %q, %v, want %q, true", testMethod, method, ok, testMethod) } } func (s) TestInterceptorCanAccessCallOptions(t *testing.T) { e := tcpClearRREnv te := newTest(t, e) te.startServer(&testServer{security: e.security}) defer te.tearDown() type observedOptions struct { headers []*metadata.MD trailers []*metadata.MD peer []*peer.Peer creds []credentials.PerRPCCredentials failFast []bool maxRecvSize []int maxSendSize []int compressor []string subtype []string } var observedOpts observedOptions populateOpts := func(opts []grpc.CallOption) { for _, o := range opts { switch o := o.(type) { case grpc.HeaderCallOption: observedOpts.headers = append(observedOpts.headers, o.HeaderAddr) case grpc.TrailerCallOption: observedOpts.trailers = append(observedOpts.trailers, o.TrailerAddr) case grpc.PeerCallOption: observedOpts.peer = append(observedOpts.peer, o.PeerAddr) case grpc.PerRPCCredsCallOption: observedOpts.creds = append(observedOpts.creds, o.Creds) case grpc.FailFastCallOption: observedOpts.failFast = append(observedOpts.failFast, o.FailFast) case grpc.MaxRecvMsgSizeCallOption: observedOpts.maxRecvSize = append(observedOpts.maxRecvSize, o.MaxRecvMsgSize) case grpc.MaxSendMsgSizeCallOption: observedOpts.maxSendSize = append(observedOpts.maxSendSize, o.MaxSendMsgSize) case grpc.CompressorCallOption: observedOpts.compressor = append(observedOpts.compressor, o.CompressorType) case grpc.ContentSubtypeCallOption: observedOpts.subtype = append(observedOpts.subtype, o.ContentSubtype) } } } te.unaryClientInt = func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { populateOpts(opts) return nil } te.streamClientInt = func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) { populateOpts(opts) return nil, nil } defaults := []grpc.CallOption{ grpc.WaitForReady(true), grpc.MaxCallRecvMsgSize(1010), } tc := testpb.NewTestServiceClient(te.clientConn(grpc.WithDefaultCallOptions(defaults...))) var headers metadata.MD var trailers metadata.MD var pr peer.Peer tc.UnaryCall(context.Background(), &testpb.SimpleRequest{}, grpc.MaxCallRecvMsgSize(100), grpc.MaxCallSendMsgSize(200), grpc.PerRPCCredentials(testPerRPCCredentials{}), grpc.Header(&headers), grpc.Trailer(&trailers), grpc.Peer(&pr)) expected := observedOptions{ failFast: []bool{false}, maxRecvSize: []int{1010, 100}, maxSendSize: []int{200}, creds: []credentials.PerRPCCredentials{testPerRPCCredentials{}}, headers: []*metadata.MD{&headers}, trailers: []*metadata.MD{&trailers}, peer: []*peer.Peer{&pr}, } if !reflect.DeepEqual(expected, observedOpts) { t.Errorf("unary call did not observe expected options: expected %#v, got %#v", expected, observedOpts) } observedOpts = observedOptions{} // reset tc.StreamingInputCall(context.Background(), grpc.WaitForReady(false), grpc.MaxCallSendMsgSize(2020), grpc.UseCompressor("comp-type"), grpc.CallContentSubtype("json")) expected = observedOptions{ failFast: []bool{false, true}, maxRecvSize: []int{1010}, maxSendSize: []int{2020}, compressor: []string{"comp-type"}, subtype: []string{"json"}, } if !reflect.DeepEqual(expected, observedOpts) { t.Errorf("streaming call did not observe expected options: expected %#v, got %#v", expected, observedOpts) } } func (s) TestCompressorRegister(t *testing.T) { for _, e := range listTestEnv() { testCompressorRegister(t, e) } } func testCompressorRegister(t *testing.T, e env) { te := newTest(t, e) te.clientCompression = false te.serverCompression = false te.clientUseCompression = true te.startServer(&testServer{security: e.security}) defer te.tearDown() tc := testpb.NewTestServiceClient(te.clientConn()) // Unary call const argSize = 271828 const respSize = 314159 payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, argSize) if err != nil { t.Fatal(err) } req := &testpb.SimpleRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE, ResponseSize: respSize, Payload: payload, } ctx := metadata.NewOutgoingContext(context.Background(), metadata.Pairs("something", "something")) if _, err := tc.UnaryCall(ctx, req); err != nil { t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, ", err) } // Streaming RPC ctx, cancel := context.WithCancel(context.Background()) defer cancel() stream, err := tc.FullDuplexCall(ctx) if err != nil { t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) } respParam := []*testpb.ResponseParameters{ { Size: 31415, }, } payload, err = newPayload(testpb.PayloadType_COMPRESSABLE, int32(31415)) if err != nil { t.Fatal(err) } sreq := &testpb.StreamingOutputCallRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE, ResponseParameters: respParam, Payload: payload, } if err := stream.Send(sreq); err != nil { t.Fatalf("%v.Send(%v) = %v, want ", stream, sreq, err) } if _, err := stream.Recv(); err != nil { t.Fatalf("%v.Recv() = %v, want ", stream, err) } } func (s) TestServeExitsWhenListenerClosed(t *testing.T) { ss := &stubServer{ emptyCall: func(context.Context, *testpb.Empty) (*testpb.Empty, error) { return &testpb.Empty{}, nil }, } s := grpc.NewServer() defer s.Stop() testpb.RegisterTestServiceServer(s, ss) lis, err := net.Listen("tcp", "localhost:0") if err != nil { t.Fatalf("Failed to create listener: %v", err) } done := make(chan struct{}) go func() { s.Serve(lis) close(done) }() cc, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure(), grpc.WithBlock()) if err != nil { t.Fatalf("Failed to dial server: %v", err) } defer cc.Close() c := testpb.NewTestServiceClient(cc) ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() if _, err := c.EmptyCall(ctx, &testpb.Empty{}); err != nil { t.Fatalf("Failed to send test RPC to server: %v", err) } if err := lis.Close(); err != nil { t.Fatalf("Failed to close listener: %v", err) } const timeout = 5 * time.Second timer := time.NewTimer(timeout) select { case <-done: return case <-timer.C: t.Fatalf("Serve did not return after %v", timeout) } } // Service handler returns status with invalid utf8 message. func (s) TestStatusInvalidUTF8Message(t *testing.T) { var ( origMsg = string([]byte{0xff, 0xfe, 0xfd}) wantMsg = "���" ) ss := &stubServer{ emptyCall: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { return nil, status.Errorf(codes.Internal, origMsg) }, } if err := ss.Start(nil); err != nil { t.Fatalf("Error starting endpoint server: %v", err) } defer ss.Stop() ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) defer cancel() if _, err := ss.client.EmptyCall(ctx, &testpb.Empty{}); status.Convert(err).Message() != wantMsg { t.Fatalf("ss.client.EmptyCall(_, _) = _, %v (msg %q); want _, err with msg %q", err, status.Convert(err).Message(), wantMsg) } } // Service handler returns status with details and invalid utf8 message. Proto // will fail to marshal the status because of the invalid utf8 message. Details // will be dropped when sending. func (s) TestStatusInvalidUTF8Details(t *testing.T) { grpctest.TLogger.ExpectError("transport: failed to marshal rpc status") var ( origMsg = string([]byte{0xff, 0xfe, 0xfd}) wantMsg = "���" ) ss := &stubServer{ emptyCall: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { st := status.New(codes.Internal, origMsg) st, err := st.WithDetails(&testpb.Empty{}) if err != nil { return nil, err } return nil, st.Err() }, } if err := ss.Start(nil); err != nil { t.Fatalf("Error starting endpoint server: %v", err) } defer ss.Stop() ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) defer cancel() _, err := ss.client.EmptyCall(ctx, &testpb.Empty{}) st := status.Convert(err) if st.Message() != wantMsg { t.Fatalf("ss.client.EmptyCall(_, _) = _, %v (msg %q); want _, err with msg %q", err, st.Message(), wantMsg) } if len(st.Details()) != 0 { // Details should be dropped on the server side. t.Fatalf("RPC status contain details: %v, want no details", st.Details()) } } func (s) TestClientDoesntDeadlockWhileWritingErrornousLargeMessages(t *testing.T) { for _, e := range listTestEnv() { if e.httpHandler { continue } testClientDoesntDeadlockWhileWritingErrornousLargeMessages(t, e) } } func testClientDoesntDeadlockWhileWritingErrornousLargeMessages(t *testing.T, e env) { te := newTest(t, e) te.userAgent = testAppUA smallSize := 1024 te.maxServerReceiveMsgSize = &smallSize te.startServer(&testServer{security: e.security}) defer te.tearDown() tc := testpb.NewTestServiceClient(te.clientConn()) payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, 1048576) if err != nil { t.Fatal(err) } req := &testpb.SimpleRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE, Payload: payload, } var wg sync.WaitGroup for i := 0; i < 10; i++ { wg.Add(1) go func() { defer wg.Done() for j := 0; j < 100; j++ { ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(time.Second*10)) defer cancel() if _, err := tc.UnaryCall(ctx, req); status.Code(err) != codes.ResourceExhausted { t.Errorf("TestService/UnaryCall(_,_) = _. %v, want code: %s", err, codes.ResourceExhausted) return } } }() } wg.Wait() } const clientAlwaysFailCredErrorMsg = "clientAlwaysFailCred always fails" var errClientAlwaysFailCred = errors.New(clientAlwaysFailCredErrorMsg) type clientAlwaysFailCred struct{} func (c clientAlwaysFailCred) ClientHandshake(ctx context.Context, addr string, rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { return nil, nil, errClientAlwaysFailCred } func (c clientAlwaysFailCred) ServerHandshake(rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { return rawConn, nil, nil } func (c clientAlwaysFailCred) Info() credentials.ProtocolInfo { return credentials.ProtocolInfo{} } func (c clientAlwaysFailCred) Clone() credentials.TransportCredentials { return nil } func (c clientAlwaysFailCred) OverrideServerName(s string) error { return nil } func (s) TestFailFastRPCErrorOnBadCertificates(t *testing.T) { te := newTest(t, env{name: "bad-cred", network: "tcp", security: "clientAlwaysFailCred", balancer: "round_robin"}) te.startServer(&testServer{security: te.e.security}) defer te.tearDown() opts := []grpc.DialOption{grpc.WithTransportCredentials(clientAlwaysFailCred{})} ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() cc, err := grpc.DialContext(ctx, te.srvAddr, opts...) if err != nil { t.Fatalf("Dial(_) = %v, want %v", err, nil) } defer cc.Close() tc := testpb.NewTestServiceClient(cc) for i := 0; i < 1000; i++ { // This loop runs for at most 1 second. The first several RPCs will fail // with Unavailable because the connection hasn't started. When the // first connection failed with creds error, the next RPC should also // fail with the expected error. if _, err = tc.EmptyCall(context.Background(), &testpb.Empty{}); strings.Contains(err.Error(), clientAlwaysFailCredErrorMsg) { return } time.Sleep(time.Millisecond) } te.t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want err.Error() contains %q", err, clientAlwaysFailCredErrorMsg) } func (s) TestWaitForReadyRPCErrorOnBadCertificates(t *testing.T) { te := newTest(t, env{name: "bad-cred", network: "tcp", security: "clientAlwaysFailCred", balancer: "round_robin"}) te.startServer(&testServer{security: te.e.security}) defer te.tearDown() opts := []grpc.DialOption{grpc.WithTransportCredentials(clientAlwaysFailCred{})} dctx, dcancel := context.WithTimeout(context.Background(), 10*time.Second) defer dcancel() cc, err := grpc.DialContext(dctx, te.srvAddr, opts...) if err != nil { t.Fatalf("Dial(_) = %v, want %v", err, nil) } defer cc.Close() tc := testpb.NewTestServiceClient(cc) ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) defer cancel() if _, err = tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); strings.Contains(err.Error(), clientAlwaysFailCredErrorMsg) { return } te.t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want err.Error() contains %q", err, clientAlwaysFailCredErrorMsg) } func (s) TestRPCTimeout(t *testing.T) { for _, e := range listTestEnv() { testRPCTimeout(t, e) } } func testRPCTimeout(t *testing.T, e env) { te := newTest(t, e) te.startServer(&testServer{security: e.security, unaryCallSleepTime: 500 * time.Millisecond}) defer te.tearDown() cc := te.clientConn() tc := testpb.NewTestServiceClient(cc) const argSize = 2718 const respSize = 314 payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, argSize) if err != nil { t.Fatal(err) } req := &testpb.SimpleRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE, ResponseSize: respSize, Payload: payload, } for i := -1; i <= 10; i++ { ctx, cancel := context.WithTimeout(context.Background(), time.Duration(i)*time.Millisecond) if _, err := tc.UnaryCall(ctx, req); status.Code(err) != codes.DeadlineExceeded { t.Fatalf("TestService/UnaryCallv(_, _) = _, %v; want , error code: %s", err, codes.DeadlineExceeded) } cancel() } } func (s) TestDisabledIOBuffers(t *testing.T) { payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(60000)) if err != nil { t.Fatalf("Failed to create payload: %v", err) } req := &testpb.StreamingOutputCallRequest{ Payload: payload, } resp := &testpb.StreamingOutputCallResponse{ Payload: payload, } ss := &stubServer{ fullDuplexCall: func(stream testpb.TestService_FullDuplexCallServer) error { for { in, err := stream.Recv() if err == io.EOF { return nil } if err != nil { t.Errorf("stream.Recv() = _, %v, want _, ", err) return err } if !reflect.DeepEqual(in.Payload.Body, payload.Body) { t.Errorf("Received message(len: %v) on server not what was expected(len: %v).", len(in.Payload.Body), len(payload.Body)) return err } if err := stream.Send(resp); err != nil { t.Errorf("stream.Send(_)= %v, want ", err) return err } } }, } s := grpc.NewServer(grpc.WriteBufferSize(0), grpc.ReadBufferSize(0)) testpb.RegisterTestServiceServer(s, ss) lis, err := net.Listen("tcp", "localhost:0") if err != nil { t.Fatalf("Failed to create listener: %v", err) } done := make(chan struct{}) go func() { s.Serve(lis) close(done) }() defer s.Stop() dctx, dcancel := context.WithTimeout(context.Background(), 5*time.Second) defer dcancel() cc, err := grpc.DialContext(dctx, lis.Addr().String(), grpc.WithInsecure(), grpc.WithBlock(), grpc.WithWriteBufferSize(0), grpc.WithReadBufferSize(0)) if err != nil { t.Fatalf("Failed to dial server") } defer cc.Close() c := testpb.NewTestServiceClient(cc) ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() stream, err := c.FullDuplexCall(ctx, grpc.WaitForReady(true)) if err != nil { t.Fatalf("Failed to send test RPC to server") } for i := 0; i < 10; i++ { if err := stream.Send(req); err != nil { t.Fatalf("stream.Send(_) = %v, want ", err) } in, err := stream.Recv() if err != nil { t.Fatalf("stream.Recv() = _, %v, want _, ", err) } if !reflect.DeepEqual(in.Payload.Body, payload.Body) { t.Fatalf("Received message(len: %v) on client not what was expected(len: %v).", len(in.Payload.Body), len(payload.Body)) } } stream.CloseSend() if _, err := stream.Recv(); err != io.EOF { t.Fatalf("stream.Recv() = _, %v, want _, io.EOF", err) } } func (s) TestServerMaxHeaderListSizeClientUserViolation(t *testing.T) { for _, e := range listTestEnv() { if e.httpHandler { continue } testServerMaxHeaderListSizeClientUserViolation(t, e) } } func testServerMaxHeaderListSizeClientUserViolation(t *testing.T, e env) { te := newTest(t, e) te.maxServerHeaderListSize = new(uint32) *te.maxServerHeaderListSize = 216 te.startServer(&testServer{security: e.security}) defer te.tearDown() cc := te.clientConn() tc := testpb.NewTestServiceClient(cc) ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() metadata.AppendToOutgoingContext(ctx, "oversize", string(make([]byte, 216))) var err error if err = verifyResultWithDelay(func() (bool, error) { if _, err = tc.EmptyCall(ctx, &testpb.Empty{}); err != nil && status.Code(err) == codes.Internal { return true, nil } return false, fmt.Errorf("tc.EmptyCall() = _, err: %v, want _, error code: %v", err, codes.Internal) }); err != nil { t.Fatal(err) } } func (s) TestClientMaxHeaderListSizeServerUserViolation(t *testing.T) { for _, e := range listTestEnv() { if e.httpHandler { continue } testClientMaxHeaderListSizeServerUserViolation(t, e) } } func testClientMaxHeaderListSizeServerUserViolation(t *testing.T, e env) { te := newTest(t, e) te.maxClientHeaderListSize = new(uint32) *te.maxClientHeaderListSize = 1 // any header server sends will violate te.startServer(&testServer{security: e.security}) defer te.tearDown() cc := te.clientConn() tc := testpb.NewTestServiceClient(cc) ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() var err error if err = verifyResultWithDelay(func() (bool, error) { if _, err = tc.EmptyCall(ctx, &testpb.Empty{}); err != nil && status.Code(err) == codes.Internal { return true, nil } return false, fmt.Errorf("tc.EmptyCall() = _, err: %v, want _, error code: %v", err, codes.Internal) }); err != nil { t.Fatal(err) } } func (s) TestServerMaxHeaderListSizeClientIntentionalViolation(t *testing.T) { for _, e := range listTestEnv() { if e.httpHandler || e.security == "tls" { continue } testServerMaxHeaderListSizeClientIntentionalViolation(t, e) } } func testServerMaxHeaderListSizeClientIntentionalViolation(t *testing.T, e env) { te := newTest(t, e) te.maxServerHeaderListSize = new(uint32) *te.maxServerHeaderListSize = 512 te.startServer(&testServer{security: e.security}) defer te.tearDown() cc, dw := te.clientConnWithConnControl() tc := &testServiceClientWrapper{TestServiceClient: testpb.NewTestServiceClient(cc)} ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() stream, err := tc.FullDuplexCall(ctx) if err != nil { t.Fatalf("%v.FullDuplexCall(_) = _, %v, want _, ", tc, err) } rcw := dw.getRawConnWrapper() val := make([]string, 512) for i := range val { val[i] = "a" } // allow for client to send the initial header time.Sleep(100 * time.Millisecond) rcw.writeHeaders(http2.HeadersFrameParam{ StreamID: tc.getCurrentStreamID(), BlockFragment: rcw.encodeHeader("oversize", strings.Join(val, "")), EndStream: false, EndHeaders: true, }) if _, err := stream.Recv(); err == nil || status.Code(err) != codes.Internal { t.Fatalf("stream.Recv() = _, %v, want _, error code: %v", err, codes.Internal) } } func (s) TestClientMaxHeaderListSizeServerIntentionalViolation(t *testing.T) { for _, e := range listTestEnv() { if e.httpHandler || e.security == "tls" { continue } testClientMaxHeaderListSizeServerIntentionalViolation(t, e) } } func testClientMaxHeaderListSizeServerIntentionalViolation(t *testing.T, e env) { te := newTest(t, e) te.maxClientHeaderListSize = new(uint32) *te.maxClientHeaderListSize = 200 lw := te.startServerWithConnControl(&testServer{security: e.security, setHeaderOnly: true}) defer te.tearDown() cc, _ := te.clientConnWithConnControl() tc := &testServiceClientWrapper{TestServiceClient: testpb.NewTestServiceClient(cc)} ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() stream, err := tc.FullDuplexCall(ctx) if err != nil { t.Fatalf("%v.FullDuplexCall(_) = _, %v, want _, ", tc, err) } var i int var rcw *rawConnWrapper for i = 0; i < 100; i++ { rcw = lw.getLastConn() if rcw != nil { break } time.Sleep(10 * time.Millisecond) continue } if i == 100 { t.Fatalf("failed to create server transport after 1s") } val := make([]string, 200) for i := range val { val[i] = "a" } // allow for client to send the initial header. time.Sleep(100 * time.Millisecond) rcw.writeHeaders(http2.HeadersFrameParam{ StreamID: tc.getCurrentStreamID(), BlockFragment: rcw.encodeRawHeader("oversize", strings.Join(val, "")), EndStream: false, EndHeaders: true, }) if _, err := stream.Recv(); err == nil || status.Code(err) != codes.Internal { t.Fatalf("stream.Recv() = _, %v, want _, error code: %v", err, codes.Internal) } } func (s) TestNetPipeConn(t *testing.T) { // This test will block indefinitely if grpc writes both client and server // prefaces without either reading from the Conn. pl := testutils.NewPipeListener() s := grpc.NewServer() defer s.Stop() ts := &funcServer{unaryCall: func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { return &testpb.SimpleResponse{}, nil }} testpb.RegisterTestServiceServer(s, ts) go s.Serve(pl) ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() cc, err := grpc.DialContext(ctx, "", grpc.WithInsecure(), grpc.WithDialer(pl.Dialer())) if err != nil { t.Fatalf("Error creating client: %v", err) } defer cc.Close() client := testpb.NewTestServiceClient(cc) if _, err := client.UnaryCall(ctx, &testpb.SimpleRequest{}); err != nil { t.Fatalf("UnaryCall(_) = _, %v; want _, nil", err) } } func (s) TestLargeTimeout(t *testing.T) { for _, e := range listTestEnv() { testLargeTimeout(t, e) } } func testLargeTimeout(t *testing.T, e env) { te := newTest(t, e) te.declareLogNoise("Server.processUnaryRPC failed to write status") ts := &funcServer{} te.startServer(ts) defer te.tearDown() tc := testpb.NewTestServiceClient(te.clientConn()) timeouts := []time.Duration{ time.Duration(math.MaxInt64), // will be (correctly) converted to // 2562048 hours, which overflows upon converting back to an int64 2562047 * time.Hour, // the largest timeout that does not overflow } for i, maxTimeout := range timeouts { ts.unaryCall = func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { deadline, ok := ctx.Deadline() timeout := time.Until(deadline) minTimeout := maxTimeout - 5*time.Second if !ok || timeout < minTimeout || timeout > maxTimeout { t.Errorf("ctx.Deadline() = (now+%v), %v; want [%v, %v], true", timeout, ok, minTimeout, maxTimeout) return nil, status.Error(codes.OutOfRange, "deadline error") } return &testpb.SimpleResponse{}, nil } ctx, cancel := context.WithTimeout(context.Background(), maxTimeout) defer cancel() if _, err := tc.UnaryCall(ctx, &testpb.SimpleRequest{}); err != nil { t.Errorf("case %v: UnaryCall(_) = _, %v; want _, nil", i, err) } } } // Proxies typically send GO_AWAY followed by connection closure a minute or so later. This // test ensures that the connection is re-created after GO_AWAY and not affected by the // subsequent (old) connection closure. func (s) TestGoAwayThenClose(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) defer cancel() lis1, err := net.Listen("tcp", "localhost:0") if err != nil { t.Fatalf("Error while listening. Err: %v", err) } s1 := grpc.NewServer() defer s1.Stop() ts := &funcServer{ unaryCall: func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { return &testpb.SimpleResponse{}, nil }, fullDuplexCall: func(stream testpb.TestService_FullDuplexCallServer) error { // Wait forever. _, err := stream.Recv() if err == nil { t.Error("expected to never receive any message") } return err }, } testpb.RegisterTestServiceServer(s1, ts) go s1.Serve(lis1) conn2Established := grpcsync.NewEvent() lis2, err := listenWithNotifyingListener("tcp", "localhost:0", conn2Established) if err != nil { t.Fatalf("Error while listening. Err: %v", err) } s2 := grpc.NewServer() defer s2.Stop() testpb.RegisterTestServiceServer(s2, ts) go s2.Serve(lis2) r, rcleanup := manual.GenerateAndRegisterManualResolver() defer rcleanup() r.InitialState(resolver.State{Addresses: []resolver.Address{ {Addr: lis1.Addr().String()}, }}) cc, err := grpc.DialContext(ctx, r.Scheme()+":///", grpc.WithInsecure()) if err != nil { t.Fatalf("Error creating client: %v", err) } defer cc.Close() client := testpb.NewTestServiceClient(cc) // Should go on connection 1. We use a long-lived RPC because it will cause GracefulStop to send GO_AWAY, but the // connection doesn't get closed until the server stops and the client receives. stream, err := client.FullDuplexCall(ctx) if err != nil { t.Fatalf("FullDuplexCall(_) = _, %v; want _, nil", err) } r.UpdateState(resolver.State{Addresses: []resolver.Address{ {Addr: lis1.Addr().String()}, {Addr: lis2.Addr().String()}, }}) // Send GO_AWAY to connection 1. go s1.GracefulStop() // Wait for connection 2 to be established. <-conn2Established.Done() // Close connection 1. s1.Stop() // Wait for client to close. _, err = stream.Recv() if err == nil { t.Fatal("expected the stream to die, but got a successful Recv") } // Do a bunch of RPCs, make sure it stays stable. These should go to connection 2. for i := 0; i < 10; i++ { if _, err := client.UnaryCall(ctx, &testpb.SimpleRequest{}); err != nil { t.Fatalf("UnaryCall(_) = _, %v; want _, nil", err) } } } func listenWithNotifyingListener(network, address string, event *grpcsync.Event) (net.Listener, error) { lis, err := net.Listen(network, address) if err != nil { return nil, err } return notifyingListener{connEstablished: event, Listener: lis}, nil } type notifyingListener struct { connEstablished *grpcsync.Event net.Listener } func (lis notifyingListener) Accept() (net.Conn, error) { defer lis.connEstablished.Fire() return lis.Listener.Accept() } func (s) TestRPCWaitsForResolver(t *testing.T) { te := testServiceConfigSetup(t, tcpClearRREnv) te.startServer(&testServer{security: tcpClearRREnv.security}) defer te.tearDown() r, rcleanup := manual.GenerateAndRegisterManualResolver() defer rcleanup() te.resolverScheme = r.Scheme() te.nonBlockingDial = true cc := te.clientConn() tc := testpb.NewTestServiceClient(cc) ctx, cancel := context.WithTimeout(context.Background(), 500*time.Millisecond) defer cancel() // With no resolved addresses yet, this will timeout. if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.DeadlineExceeded { t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.DeadlineExceeded) } ctx, cancel = context.WithTimeout(context.Background(), 10*time.Second) defer cancel() go func() { time.Sleep(time.Second) r.UpdateState(resolver.State{ Addresses: []resolver.Address{{Addr: te.srvAddr}}, ServiceConfig: parseCfg(r, `{ "methodConfig": [ { "name": [ { "service": "grpc.testing.TestService", "method": "UnaryCall" } ], "maxRequestMessageBytes": 0 } ] }`)}) }() // We wait a second before providing a service config and resolving // addresses. So this will wait for that and then honor the // maxRequestMessageBytes it contains. if _, err := tc.UnaryCall(ctx, &testpb.SimpleRequest{ResponseType: testpb.PayloadType_UNCOMPRESSABLE}); status.Code(err) != codes.ResourceExhausted { t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, nil", err) } if got := ctx.Err(); got != nil { t.Fatalf("ctx.Err() = %v; want nil (deadline should be set short by service config)", got) } if _, err := tc.UnaryCall(ctx, &testpb.SimpleRequest{}); err != nil { t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, nil", err) } } func (s) TestHTTPHeaderFrameErrorHandlingHTTPMode(t *testing.T) { // Non-gRPC content-type fallback path. for httpCode := range transport.HTTPStatusConvTab { doHTTPHeaderTest(t, transport.HTTPStatusConvTab[int(httpCode)], []string{ ":status", fmt.Sprintf("%d", httpCode), "content-type", "text/html", // non-gRPC content type to switch to HTTP mode. "grpc-status", "1", // Make up a gRPC status error "grpc-status-details-bin", "???", // Make up a gRPC field parsing error }) } // Missing content-type fallback path. for httpCode := range transport.HTTPStatusConvTab { doHTTPHeaderTest(t, transport.HTTPStatusConvTab[int(httpCode)], []string{ ":status", fmt.Sprintf("%d", httpCode), // Omitting content type to switch to HTTP mode. "grpc-status", "1", // Make up a gRPC status error "grpc-status-details-bin", "???", // Make up a gRPC field parsing error }) } // Malformed HTTP status when fallback. doHTTPHeaderTest(t, codes.Internal, []string{ ":status", "abc", // Omitting content type to switch to HTTP mode. "grpc-status", "1", // Make up a gRPC status error "grpc-status-details-bin", "???", // Make up a gRPC field parsing error }) } // Testing erroneous ResponseHeader or Trailers-only (delivered in the first HEADERS frame). func (s) TestHTTPHeaderFrameErrorHandlingInitialHeader(t *testing.T) { for _, test := range []struct { header []string errCode codes.Code }{ { // missing gRPC status. header: []string{ ":status", "403", "content-type", "application/grpc", }, errCode: codes.Unknown, }, { // malformed grpc-status. header: []string{ ":status", "502", "content-type", "application/grpc", "grpc-status", "abc", }, errCode: codes.Internal, }, { // Malformed grpc-tags-bin field. header: []string{ ":status", "502", "content-type", "application/grpc", "grpc-status", "0", "grpc-tags-bin", "???", }, errCode: codes.Internal, }, { // gRPC status error. header: []string{ ":status", "502", "content-type", "application/grpc", "grpc-status", "3", }, errCode: codes.InvalidArgument, }, } { doHTTPHeaderTest(t, test.errCode, test.header) } } // Testing non-Trailers-only Trailers (delievered in second HEADERS frame) func (s) TestHTTPHeaderFrameErrorHandlingNormalTrailer(t *testing.T) { for _, test := range []struct { responseHeader []string trailer []string errCode codes.Code }{ { responseHeader: []string{ ":status", "200", "content-type", "application/grpc", }, trailer: []string{ // trailer missing grpc-status ":status", "502", }, errCode: codes.Unknown, }, { responseHeader: []string{ ":status", "404", "content-type", "application/grpc", }, trailer: []string{ // malformed grpc-status-details-bin field "grpc-status", "0", "grpc-status-details-bin", "????", }, errCode: codes.Internal, }, } { doHTTPHeaderTest(t, test.errCode, test.responseHeader, test.trailer) } } func (s) TestHTTPHeaderFrameErrorHandlingMoreThanTwoHeaders(t *testing.T) { header := []string{ ":status", "200", "content-type", "application/grpc", } doHTTPHeaderTest(t, codes.Internal, header, header, header) } type httpServer struct { headerFields [][]string } func (s *httpServer) writeHeader(framer *http2.Framer, sid uint32, headerFields []string, endStream bool) error { if len(headerFields)%2 == 1 { panic("odd number of kv args") } var buf bytes.Buffer henc := hpack.NewEncoder(&buf) for len(headerFields) > 0 { k, v := headerFields[0], headerFields[1] headerFields = headerFields[2:] henc.WriteField(hpack.HeaderField{Name: k, Value: v}) } return framer.WriteHeaders(http2.HeadersFrameParam{ StreamID: sid, BlockFragment: buf.Bytes(), EndStream: endStream, EndHeaders: true, }) } func (s *httpServer) start(t *testing.T, lis net.Listener) { // Launch an HTTP server to send back header. go func() { conn, err := lis.Accept() if err != nil { t.Errorf("Error accepting connection: %v", err) return } defer conn.Close() // Read preface sent by client. if _, err = io.ReadFull(conn, make([]byte, len(http2.ClientPreface))); err != nil { t.Errorf("Error at server-side while reading preface from client. Err: %v", err) return } reader := bufio.NewReader(conn) writer := bufio.NewWriter(conn) framer := http2.NewFramer(writer, reader) if err = framer.WriteSettingsAck(); err != nil { t.Errorf("Error at server-side while sending Settings ack. Err: %v", err) return } writer.Flush() // necessary since client is expecting preface before declaring connection fully setup. var sid uint32 // Read frames until a header is received. for { frame, err := framer.ReadFrame() if err != nil { t.Errorf("Error at server-side while reading frame. Err: %v", err) return } if hframe, ok := frame.(*http2.HeadersFrame); ok { sid = hframe.Header().StreamID break } } for i, headers := range s.headerFields { if err = s.writeHeader(framer, sid, headers, i == len(s.headerFields)-1); err != nil { t.Errorf("Error at server-side while writing headers. Err: %v", err) return } writer.Flush() } }() } func doHTTPHeaderTest(t *testing.T, errCode codes.Code, headerFields ...[]string) { t.Helper() lis, err := net.Listen("tcp", "localhost:0") if err != nil { t.Fatalf("Failed to listen. Err: %v", err) } defer lis.Close() server := &httpServer{ headerFields: headerFields, } server.start(t, lis) cc, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) if err != nil { t.Fatalf("failed to dial due to err: %v", err) } defer cc.Close() ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() client := testpb.NewTestServiceClient(cc) stream, err := client.FullDuplexCall(ctx) if err != nil { t.Fatalf("error creating stream due to err: %v", err) } if _, err := stream.Recv(); err == nil || status.Code(err) != errCode { t.Fatalf("stream.Recv() = _, %v, want error code: %v", err, errCode) } } func parseCfg(r *manual.Resolver, s string) *serviceconfig.ParseResult { g := r.CC.ParseServiceConfig(s) if g.Err != nil { panic(fmt.Sprintf("Error parsing config %q: %v", s, g.Err)) } return g } type methodTestCreds struct{} func (m methodTestCreds) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { ri, _ := credentials.RequestInfoFromContext(ctx) return nil, status.Errorf(codes.Unknown, ri.Method) } func (m methodTestCreds) RequireTransportSecurity() bool { return false } func (s) TestGRPCMethodAccessibleToCredsViaContextRequestInfo(t *testing.T) { const wantMethod = "/grpc.testing.TestService/EmptyCall" ss := &stubServer{} if err := ss.Start(nil, grpc.WithPerRPCCredentials(methodTestCreds{})); err != nil { t.Fatalf("Error starting endpoint server: %v", err) } defer ss.Stop() ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) defer cancel() if _, err := ss.client.EmptyCall(ctx, &testpb.Empty{}); status.Convert(err).Message() != wantMethod { t.Fatalf("ss.client.EmptyCall(_, _) = _, %v; want _, _.Message()=%q", err, wantMethod) } } func (s) TestClientCancellationPropagatesUnary(t *testing.T) { wg := &sync.WaitGroup{} called, done := make(chan struct{}), make(chan struct{}) ss := &stubServer{ emptyCall: func(ctx context.Context, _ *testpb.Empty) (*testpb.Empty, error) { close(called) <-ctx.Done() err := ctx.Err() if err != context.Canceled { t.Errorf("ctx.Err() = %v; want context.Canceled", err) } close(done) return nil, err }, } if err := ss.Start(nil); err != nil { t.Fatalf("Error starting endpoint server: %v", err) } defer ss.Stop() ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) wg.Add(1) go func() { if _, err := ss.client.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.Canceled { t.Errorf("ss.client.EmptyCall() = _, %v; want _, Code()=codes.Canceled", err) } wg.Done() }() select { case <-called: case <-time.After(5 * time.Second): t.Fatalf("failed to perform EmptyCall after 10s") } cancel() select { case <-done: case <-time.After(5 * time.Second): t.Fatalf("server failed to close done chan due to cancellation propagation") } wg.Wait() } type badGzipCompressor struct{} func (badGzipCompressor) Do(w io.Writer, p []byte) error { buf := &bytes.Buffer{} gzw := gzip.NewWriter(buf) if _, err := gzw.Write(p); err != nil { return err } err := gzw.Close() bs := buf.Bytes() if len(bs) >= 6 { bs[len(bs)-6] ^= 1 // modify checksum at end by 1 byte } w.Write(bs) return err } func (badGzipCompressor) Type() string { return "gzip" } func (s) TestGzipBadChecksum(t *testing.T) { ss := &stubServer{ unaryCall: func(ctx context.Context, _ *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { return &testpb.SimpleResponse{}, nil }, } if err := ss.Start(nil, grpc.WithCompressor(badGzipCompressor{})); err != nil { t.Fatalf("Error starting endpoint server: %v", err) } defer ss.Stop() ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() p, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(1024)) if err != nil { t.Fatalf("Unexpected error from newPayload: %v", err) } if _, err := ss.client.UnaryCall(ctx, &testpb.SimpleRequest{Payload: p}); err == nil || status.Code(err) != codes.Internal || !strings.Contains(status.Convert(err).Message(), gzip.ErrChecksum.Error()) { t.Errorf("ss.client.UnaryCall(_) = _, %v\n\twant: _, status(codes.Internal, contains %q)", err, gzip.ErrChecksum) } } grpc-go-1.29.1/test/go_vet/000077500000000000000000000000001365033716300154135ustar00rootroot00000000000000grpc-go-1.29.1/test/go_vet/vet.go000066400000000000000000000026371365033716300165500ustar00rootroot00000000000000/* * * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // vet checks whether files that are supposed to be built on appengine running // Go 1.10 or earlier import an unsupported package (e.g. "unsafe", "syscall"). package main import ( "fmt" "go/build" "os" ) func main() { fail := false b := build.Default b.BuildTags = []string{"appengine", "appenginevm"} argsWithoutProg := os.Args[1:] for _, dir := range argsWithoutProg { p, err := b.Import(".", dir, 0) if _, ok := err.(*build.NoGoError); ok { continue } else if err != nil { fmt.Printf("build.Import failed due to %v\n", err) fail = true continue } for _, pkg := range p.Imports { if pkg == "syscall" || pkg == "unsafe" { fmt.Printf("Package %s/%s importing %s package without appengine build tag is NOT ALLOWED!\n", p.Dir, p.Name, pkg) fail = true } } } if fail { os.Exit(1) } } grpc-go-1.29.1/test/goaway_test.go000066400000000000000000000044501365033716300170100ustar00rootroot00000000000000/* * * Copyright 2019 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package test import ( "context" "net" "testing" "time" "google.golang.org/grpc" "google.golang.org/grpc/keepalive" testpb "google.golang.org/grpc/test/grpc_testing" ) // TestGracefulClientOnGoAway attempts to ensure that when the server sends a // GOAWAY (in this test, by configuring max connection age on the server), a // client will never see an error. This requires that the client is appraised // of the GOAWAY and updates its state accordingly before the transport stops // accepting new streams. If a subconn is chosen by a picker and receives the // goaway before creating the stream, an error will occur, but upon transparent // retry, the clientconn will ensure a ready subconn is chosen. func (s) TestGracefulClientOnGoAway(t *testing.T) { const maxConnAge = 100 * time.Millisecond const testTime = maxConnAge * 10 ss := &stubServer{ emptyCall: func(context.Context, *testpb.Empty) (*testpb.Empty, error) { return &testpb.Empty{}, nil }, } s := grpc.NewServer(grpc.KeepaliveParams(keepalive.ServerParameters{MaxConnectionAge: maxConnAge})) defer s.Stop() testpb.RegisterTestServiceServer(s, ss) lis, err := net.Listen("tcp", "localhost:0") if err != nil { t.Fatalf("Failed to create listener: %v", err) } go s.Serve(lis) cc, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) if err != nil { t.Fatalf("Failed to dial server: %v", err) } defer cc.Close() c := testpb.NewTestServiceClient(cc) endTime := time.Now().Add(testTime) for time.Now().Before(endTime) { ctx, cancel := context.WithTimeout(context.Background(), time.Second) if _, err := c.EmptyCall(ctx, &testpb.Empty{}); err != nil { t.Fatalf("EmptyCall(_, _) = _, %v; want _, ", err) } cancel() } } grpc-go-1.29.1/test/gracefulstop_test.go000066400000000000000000000113061365033716300202150ustar00rootroot00000000000000/* * * Copyright 2017 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package test import ( "context" "fmt" "net" "sync" "testing" "time" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" testpb "google.golang.org/grpc/test/grpc_testing" ) type delayListener struct { net.Listener closeCalled chan struct{} acceptCalled chan struct{} allowCloseCh chan struct{} dialed bool } func (d *delayListener) Accept() (net.Conn, error) { select { case <-d.acceptCalled: // On the second call, block until closed, then return an error. <-d.closeCalled <-d.allowCloseCh return nil, fmt.Errorf("listener is closed") default: close(d.acceptCalled) conn, err := d.Listener.Accept() if err != nil { return nil, err } // Allow closing of listener only after accept. // Note: Dial can return successfully, yet Accept // might now have finished. d.allowClose() return conn, nil } } func (d *delayListener) allowClose() { close(d.allowCloseCh) } func (d *delayListener) Close() error { close(d.closeCalled) go func() { <-d.allowCloseCh d.Listener.Close() }() return nil } func (d *delayListener) Dial(ctx context.Context) (net.Conn, error) { if d.dialed { // Only hand out one connection (net.Dial can return more even after the // listener is closed). This is not thread-safe, but Dial should never be // called concurrently in this environment. return nil, fmt.Errorf("no more conns") } d.dialed = true return (&net.Dialer{}).DialContext(ctx, "tcp", d.Listener.Addr().String()) } func (s) TestGracefulStop(t *testing.T) { // This test ensures GracefulStop causes new connections to fail. // // Steps of this test: // 1. Start Server // 2. GracefulStop() Server after listener's Accept is called, but don't // allow Accept() to exit when Close() is called on it. // 3. Create a new connection to the server after listener.Close() is called. // Server should close this connection immediately, before handshaking. // 4. Send an RPC on the new connection. Should see Unavailable error // because the ClientConn is in transient failure. lis, err := net.Listen("tcp", "localhost:0") if err != nil { t.Fatalf("Error listenening: %v", err) } dlis := &delayListener{ Listener: lis, acceptCalled: make(chan struct{}), closeCalled: make(chan struct{}), allowCloseCh: make(chan struct{}), } d := func(ctx context.Context, _ string) (net.Conn, error) { return dlis.Dial(ctx) } ss := &stubServer{ fullDuplexCall: func(stream testpb.TestService_FullDuplexCallServer) error { _, err := stream.Recv() if err != nil { return err } return stream.Send(&testpb.StreamingOutputCallResponse{}) }, } s := grpc.NewServer() testpb.RegisterTestServiceServer(s, ss) // 1. Start Server wg := sync.WaitGroup{} wg.Add(1) go func() { s.Serve(dlis) wg.Done() }() // 2. GracefulStop() Server after listener's Accept is called, but don't // allow Accept() to exit when Close() is called on it. <-dlis.acceptCalled wg.Add(1) go func() { s.GracefulStop() wg.Done() }() // 3. Create a new connection to the server after listener.Close() is called. // Server should close this connection immediately, before handshaking. <-dlis.closeCalled // Block until GracefulStop calls dlis.Close() // Now dial. The listener's Accept method will return a valid connection, // even though GracefulStop has closed the listener. ctx, dialCancel := context.WithTimeout(context.Background(), 5*time.Second) defer dialCancel() cc, err := grpc.DialContext(ctx, "", grpc.WithInsecure(), grpc.WithContextDialer(d)) if err != nil { t.Fatalf("grpc.DialContext(_, %q, _) = %v", lis.Addr().String(), err) } client := testpb.NewTestServiceClient(cc) defer cc.Close() // 4. Send an RPC on the new connection. // The server would send a GOAWAY first, but we are delaying the server's // writes for now until the client writes more than the preface. ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) if _, err = client.FullDuplexCall(ctx); err == nil || status.Code(err) != codes.Unavailable { t.Fatalf("FullDuplexCall= _, %v; want _, ", err) } cancel() wg.Wait() } grpc-go-1.29.1/test/grpc_testing/000077500000000000000000000000001365033716300166205ustar00rootroot00000000000000grpc-go-1.29.1/test/grpc_testing/test.pb.go000066400000000000000000001101661365033716300205330ustar00rootroot00000000000000// Code generated by protoc-gen-go. DO NOT EDIT. // source: grpc_testing/test.proto package grpc_testing import ( context "context" fmt "fmt" proto "github.com/golang/protobuf/proto" grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" status "google.golang.org/grpc/status" math "math" ) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package // The type of payload that should be returned. type PayloadType int32 const ( // Compressable text format. PayloadType_COMPRESSABLE PayloadType = 0 // Uncompressable binary format. PayloadType_UNCOMPRESSABLE PayloadType = 1 // Randomly chosen from all other formats defined in this enum. PayloadType_RANDOM PayloadType = 2 ) var PayloadType_name = map[int32]string{ 0: "COMPRESSABLE", 1: "UNCOMPRESSABLE", 2: "RANDOM", } var PayloadType_value = map[string]int32{ "COMPRESSABLE": 0, "UNCOMPRESSABLE": 1, "RANDOM": 2, } func (x PayloadType) String() string { return proto.EnumName(PayloadType_name, int32(x)) } func (PayloadType) EnumDescriptor() ([]byte, []int) { return fileDescriptor_e1cda82041fed8bf, []int{0} } type Empty struct { XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *Empty) Reset() { *m = Empty{} } func (m *Empty) String() string { return proto.CompactTextString(m) } func (*Empty) ProtoMessage() {} func (*Empty) Descriptor() ([]byte, []int) { return fileDescriptor_e1cda82041fed8bf, []int{0} } func (m *Empty) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Empty.Unmarshal(m, b) } func (m *Empty) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Empty.Marshal(b, m, deterministic) } func (m *Empty) XXX_Merge(src proto.Message) { xxx_messageInfo_Empty.Merge(m, src) } func (m *Empty) XXX_Size() int { return xxx_messageInfo_Empty.Size(m) } func (m *Empty) XXX_DiscardUnknown() { xxx_messageInfo_Empty.DiscardUnknown(m) } var xxx_messageInfo_Empty proto.InternalMessageInfo // A block of data, to simply increase gRPC message size. type Payload struct { // The type of data in body. Type PayloadType `protobuf:"varint,1,opt,name=type,proto3,enum=grpc.testing.PayloadType" json:"type,omitempty"` // Primary contents of payload. Body []byte `protobuf:"bytes,2,opt,name=body,proto3" json:"body,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *Payload) Reset() { *m = Payload{} } func (m *Payload) String() string { return proto.CompactTextString(m) } func (*Payload) ProtoMessage() {} func (*Payload) Descriptor() ([]byte, []int) { return fileDescriptor_e1cda82041fed8bf, []int{1} } func (m *Payload) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Payload.Unmarshal(m, b) } func (m *Payload) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Payload.Marshal(b, m, deterministic) } func (m *Payload) XXX_Merge(src proto.Message) { xxx_messageInfo_Payload.Merge(m, src) } func (m *Payload) XXX_Size() int { return xxx_messageInfo_Payload.Size(m) } func (m *Payload) XXX_DiscardUnknown() { xxx_messageInfo_Payload.DiscardUnknown(m) } var xxx_messageInfo_Payload proto.InternalMessageInfo func (m *Payload) GetType() PayloadType { if m != nil { return m.Type } return PayloadType_COMPRESSABLE } func (m *Payload) GetBody() []byte { if m != nil { return m.Body } return nil } // Unary request. type SimpleRequest struct { // Desired payload type in the response from the server. // If response_type is RANDOM, server randomly chooses one from other formats. ResponseType PayloadType `protobuf:"varint,1,opt,name=response_type,json=responseType,proto3,enum=grpc.testing.PayloadType" json:"response_type,omitempty"` // Desired payload size in the response from the server. // If response_type is COMPRESSABLE, this denotes the size before compression. ResponseSize int32 `protobuf:"varint,2,opt,name=response_size,json=responseSize,proto3" json:"response_size,omitempty"` // Optional input payload sent along with the request. Payload *Payload `protobuf:"bytes,3,opt,name=payload,proto3" json:"payload,omitempty"` // Whether SimpleResponse should include username. FillUsername bool `protobuf:"varint,4,opt,name=fill_username,json=fillUsername,proto3" json:"fill_username,omitempty"` // Whether SimpleResponse should include OAuth scope. FillOauthScope bool `protobuf:"varint,5,opt,name=fill_oauth_scope,json=fillOauthScope,proto3" json:"fill_oauth_scope,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *SimpleRequest) Reset() { *m = SimpleRequest{} } func (m *SimpleRequest) String() string { return proto.CompactTextString(m) } func (*SimpleRequest) ProtoMessage() {} func (*SimpleRequest) Descriptor() ([]byte, []int) { return fileDescriptor_e1cda82041fed8bf, []int{2} } func (m *SimpleRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_SimpleRequest.Unmarshal(m, b) } func (m *SimpleRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_SimpleRequest.Marshal(b, m, deterministic) } func (m *SimpleRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_SimpleRequest.Merge(m, src) } func (m *SimpleRequest) XXX_Size() int { return xxx_messageInfo_SimpleRequest.Size(m) } func (m *SimpleRequest) XXX_DiscardUnknown() { xxx_messageInfo_SimpleRequest.DiscardUnknown(m) } var xxx_messageInfo_SimpleRequest proto.InternalMessageInfo func (m *SimpleRequest) GetResponseType() PayloadType { if m != nil { return m.ResponseType } return PayloadType_COMPRESSABLE } func (m *SimpleRequest) GetResponseSize() int32 { if m != nil { return m.ResponseSize } return 0 } func (m *SimpleRequest) GetPayload() *Payload { if m != nil { return m.Payload } return nil } func (m *SimpleRequest) GetFillUsername() bool { if m != nil { return m.FillUsername } return false } func (m *SimpleRequest) GetFillOauthScope() bool { if m != nil { return m.FillOauthScope } return false } // Unary response, as configured by the request. type SimpleResponse struct { // Payload to increase message size. Payload *Payload `protobuf:"bytes,1,opt,name=payload,proto3" json:"payload,omitempty"` // The user the request came from, for verifying authentication was // successful when the client expected it. Username string `protobuf:"bytes,2,opt,name=username,proto3" json:"username,omitempty"` // OAuth scope. OauthScope string `protobuf:"bytes,3,opt,name=oauth_scope,json=oauthScope,proto3" json:"oauth_scope,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *SimpleResponse) Reset() { *m = SimpleResponse{} } func (m *SimpleResponse) String() string { return proto.CompactTextString(m) } func (*SimpleResponse) ProtoMessage() {} func (*SimpleResponse) Descriptor() ([]byte, []int) { return fileDescriptor_e1cda82041fed8bf, []int{3} } func (m *SimpleResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_SimpleResponse.Unmarshal(m, b) } func (m *SimpleResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_SimpleResponse.Marshal(b, m, deterministic) } func (m *SimpleResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_SimpleResponse.Merge(m, src) } func (m *SimpleResponse) XXX_Size() int { return xxx_messageInfo_SimpleResponse.Size(m) } func (m *SimpleResponse) XXX_DiscardUnknown() { xxx_messageInfo_SimpleResponse.DiscardUnknown(m) } var xxx_messageInfo_SimpleResponse proto.InternalMessageInfo func (m *SimpleResponse) GetPayload() *Payload { if m != nil { return m.Payload } return nil } func (m *SimpleResponse) GetUsername() string { if m != nil { return m.Username } return "" } func (m *SimpleResponse) GetOauthScope() string { if m != nil { return m.OauthScope } return "" } // Client-streaming request. type StreamingInputCallRequest struct { // Optional input payload sent along with the request. Payload *Payload `protobuf:"bytes,1,opt,name=payload,proto3" json:"payload,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *StreamingInputCallRequest) Reset() { *m = StreamingInputCallRequest{} } func (m *StreamingInputCallRequest) String() string { return proto.CompactTextString(m) } func (*StreamingInputCallRequest) ProtoMessage() {} func (*StreamingInputCallRequest) Descriptor() ([]byte, []int) { return fileDescriptor_e1cda82041fed8bf, []int{4} } func (m *StreamingInputCallRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_StreamingInputCallRequest.Unmarshal(m, b) } func (m *StreamingInputCallRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_StreamingInputCallRequest.Marshal(b, m, deterministic) } func (m *StreamingInputCallRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_StreamingInputCallRequest.Merge(m, src) } func (m *StreamingInputCallRequest) XXX_Size() int { return xxx_messageInfo_StreamingInputCallRequest.Size(m) } func (m *StreamingInputCallRequest) XXX_DiscardUnknown() { xxx_messageInfo_StreamingInputCallRequest.DiscardUnknown(m) } var xxx_messageInfo_StreamingInputCallRequest proto.InternalMessageInfo func (m *StreamingInputCallRequest) GetPayload() *Payload { if m != nil { return m.Payload } return nil } // Client-streaming response. type StreamingInputCallResponse struct { // Aggregated size of payloads received from the client. AggregatedPayloadSize int32 `protobuf:"varint,1,opt,name=aggregated_payload_size,json=aggregatedPayloadSize,proto3" json:"aggregated_payload_size,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *StreamingInputCallResponse) Reset() { *m = StreamingInputCallResponse{} } func (m *StreamingInputCallResponse) String() string { return proto.CompactTextString(m) } func (*StreamingInputCallResponse) ProtoMessage() {} func (*StreamingInputCallResponse) Descriptor() ([]byte, []int) { return fileDescriptor_e1cda82041fed8bf, []int{5} } func (m *StreamingInputCallResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_StreamingInputCallResponse.Unmarshal(m, b) } func (m *StreamingInputCallResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_StreamingInputCallResponse.Marshal(b, m, deterministic) } func (m *StreamingInputCallResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_StreamingInputCallResponse.Merge(m, src) } func (m *StreamingInputCallResponse) XXX_Size() int { return xxx_messageInfo_StreamingInputCallResponse.Size(m) } func (m *StreamingInputCallResponse) XXX_DiscardUnknown() { xxx_messageInfo_StreamingInputCallResponse.DiscardUnknown(m) } var xxx_messageInfo_StreamingInputCallResponse proto.InternalMessageInfo func (m *StreamingInputCallResponse) GetAggregatedPayloadSize() int32 { if m != nil { return m.AggregatedPayloadSize } return 0 } // Configuration for a particular response. type ResponseParameters struct { // Desired payload sizes in responses from the server. // If response_type is COMPRESSABLE, this denotes the size before compression. Size int32 `protobuf:"varint,1,opt,name=size,proto3" json:"size,omitempty"` // Desired interval between consecutive responses in the response stream in // microseconds. IntervalUs int32 `protobuf:"varint,2,opt,name=interval_us,json=intervalUs,proto3" json:"interval_us,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *ResponseParameters) Reset() { *m = ResponseParameters{} } func (m *ResponseParameters) String() string { return proto.CompactTextString(m) } func (*ResponseParameters) ProtoMessage() {} func (*ResponseParameters) Descriptor() ([]byte, []int) { return fileDescriptor_e1cda82041fed8bf, []int{6} } func (m *ResponseParameters) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ResponseParameters.Unmarshal(m, b) } func (m *ResponseParameters) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ResponseParameters.Marshal(b, m, deterministic) } func (m *ResponseParameters) XXX_Merge(src proto.Message) { xxx_messageInfo_ResponseParameters.Merge(m, src) } func (m *ResponseParameters) XXX_Size() int { return xxx_messageInfo_ResponseParameters.Size(m) } func (m *ResponseParameters) XXX_DiscardUnknown() { xxx_messageInfo_ResponseParameters.DiscardUnknown(m) } var xxx_messageInfo_ResponseParameters proto.InternalMessageInfo func (m *ResponseParameters) GetSize() int32 { if m != nil { return m.Size } return 0 } func (m *ResponseParameters) GetIntervalUs() int32 { if m != nil { return m.IntervalUs } return 0 } // Server-streaming request. type StreamingOutputCallRequest struct { // Desired payload type in the response from the server. // If response_type is RANDOM, the payload from each response in the stream // might be of different types. This is to simulate a mixed type of payload // stream. ResponseType PayloadType `protobuf:"varint,1,opt,name=response_type,json=responseType,proto3,enum=grpc.testing.PayloadType" json:"response_type,omitempty"` // Configuration for each expected response message. ResponseParameters []*ResponseParameters `protobuf:"bytes,2,rep,name=response_parameters,json=responseParameters,proto3" json:"response_parameters,omitempty"` // Optional input payload sent along with the request. Payload *Payload `protobuf:"bytes,3,opt,name=payload,proto3" json:"payload,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *StreamingOutputCallRequest) Reset() { *m = StreamingOutputCallRequest{} } func (m *StreamingOutputCallRequest) String() string { return proto.CompactTextString(m) } func (*StreamingOutputCallRequest) ProtoMessage() {} func (*StreamingOutputCallRequest) Descriptor() ([]byte, []int) { return fileDescriptor_e1cda82041fed8bf, []int{7} } func (m *StreamingOutputCallRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_StreamingOutputCallRequest.Unmarshal(m, b) } func (m *StreamingOutputCallRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_StreamingOutputCallRequest.Marshal(b, m, deterministic) } func (m *StreamingOutputCallRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_StreamingOutputCallRequest.Merge(m, src) } func (m *StreamingOutputCallRequest) XXX_Size() int { return xxx_messageInfo_StreamingOutputCallRequest.Size(m) } func (m *StreamingOutputCallRequest) XXX_DiscardUnknown() { xxx_messageInfo_StreamingOutputCallRequest.DiscardUnknown(m) } var xxx_messageInfo_StreamingOutputCallRequest proto.InternalMessageInfo func (m *StreamingOutputCallRequest) GetResponseType() PayloadType { if m != nil { return m.ResponseType } return PayloadType_COMPRESSABLE } func (m *StreamingOutputCallRequest) GetResponseParameters() []*ResponseParameters { if m != nil { return m.ResponseParameters } return nil } func (m *StreamingOutputCallRequest) GetPayload() *Payload { if m != nil { return m.Payload } return nil } // Server-streaming response, as configured by the request and parameters. type StreamingOutputCallResponse struct { // Payload to increase response size. Payload *Payload `protobuf:"bytes,1,opt,name=payload,proto3" json:"payload,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *StreamingOutputCallResponse) Reset() { *m = StreamingOutputCallResponse{} } func (m *StreamingOutputCallResponse) String() string { return proto.CompactTextString(m) } func (*StreamingOutputCallResponse) ProtoMessage() {} func (*StreamingOutputCallResponse) Descriptor() ([]byte, []int) { return fileDescriptor_e1cda82041fed8bf, []int{8} } func (m *StreamingOutputCallResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_StreamingOutputCallResponse.Unmarshal(m, b) } func (m *StreamingOutputCallResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_StreamingOutputCallResponse.Marshal(b, m, deterministic) } func (m *StreamingOutputCallResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_StreamingOutputCallResponse.Merge(m, src) } func (m *StreamingOutputCallResponse) XXX_Size() int { return xxx_messageInfo_StreamingOutputCallResponse.Size(m) } func (m *StreamingOutputCallResponse) XXX_DiscardUnknown() { xxx_messageInfo_StreamingOutputCallResponse.DiscardUnknown(m) } var xxx_messageInfo_StreamingOutputCallResponse proto.InternalMessageInfo func (m *StreamingOutputCallResponse) GetPayload() *Payload { if m != nil { return m.Payload } return nil } func init() { proto.RegisterEnum("grpc.testing.PayloadType", PayloadType_name, PayloadType_value) proto.RegisterType((*Empty)(nil), "grpc.testing.Empty") proto.RegisterType((*Payload)(nil), "grpc.testing.Payload") proto.RegisterType((*SimpleRequest)(nil), "grpc.testing.SimpleRequest") proto.RegisterType((*SimpleResponse)(nil), "grpc.testing.SimpleResponse") proto.RegisterType((*StreamingInputCallRequest)(nil), "grpc.testing.StreamingInputCallRequest") proto.RegisterType((*StreamingInputCallResponse)(nil), "grpc.testing.StreamingInputCallResponse") proto.RegisterType((*ResponseParameters)(nil), "grpc.testing.ResponseParameters") proto.RegisterType((*StreamingOutputCallRequest)(nil), "grpc.testing.StreamingOutputCallRequest") proto.RegisterType((*StreamingOutputCallResponse)(nil), "grpc.testing.StreamingOutputCallResponse") } func init() { proto.RegisterFile("grpc_testing/test.proto", fileDescriptor_e1cda82041fed8bf) } var fileDescriptor_e1cda82041fed8bf = []byte{ // 587 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x55, 0xdb, 0x6e, 0xd3, 0x40, 0x10, 0x65, 0xdb, 0xf4, 0x36, 0x49, 0xad, 0x68, 0xab, 0xaa, 0xae, 0x8b, 0x84, 0x65, 0x1e, 0x30, 0x48, 0xa4, 0x28, 0x08, 0x1e, 0x41, 0xa5, 0x17, 0x51, 0x29, 0x4d, 0x82, 0x9d, 0x3c, 0x47, 0xdb, 0x64, 0x6b, 0x2c, 0x39, 0xf6, 0xb2, 0x5e, 0x57, 0xa4, 0x0f, 0xfc, 0x18, 0x3f, 0xc3, 0x47, 0xf0, 0x01, 0x68, 0xd7, 0x76, 0xe2, 0x24, 0xae, 0x48, 0x41, 0xf0, 0x14, 0x7b, 0xe6, 0xcc, 0x99, 0x73, 0x3c, 0xb3, 0x1b, 0x38, 0xf0, 0x38, 0x1b, 0x0e, 0x04, 0x8d, 0x85, 0x1f, 0x7a, 0xc7, 0xf2, 0xb7, 0xc1, 0x78, 0x24, 0x22, 0x5c, 0x93, 0x89, 0x46, 0x96, 0xb0, 0xb6, 0x60, 0xe3, 0x7c, 0xcc, 0xc4, 0xc4, 0x6a, 0xc1, 0x56, 0x97, 0x4c, 0x82, 0x88, 0x8c, 0xf0, 0x4b, 0xa8, 0x88, 0x09, 0xa3, 0x3a, 0x32, 0x91, 0xad, 0x35, 0x0f, 0x1b, 0xc5, 0x82, 0x46, 0x06, 0xea, 0x4d, 0x18, 0x75, 0x14, 0x0c, 0x63, 0xa8, 0x5c, 0x47, 0xa3, 0x89, 0xbe, 0x66, 0x22, 0xbb, 0xe6, 0xa8, 0x67, 0xeb, 0x27, 0x82, 0x5d, 0xd7, 0x1f, 0xb3, 0x80, 0x3a, 0xf4, 0x4b, 0x42, 0x63, 0x81, 0xdf, 0xc1, 0x2e, 0xa7, 0x31, 0x8b, 0xc2, 0x98, 0x0e, 0x56, 0x63, 0xaf, 0xe5, 0x78, 0xf9, 0x86, 0x9f, 0x16, 0xea, 0x63, 0xff, 0x8e, 0xaa, 0x76, 0x1b, 0x33, 0x90, 0xeb, 0xdf, 0x51, 0x7c, 0x0c, 0x5b, 0x2c, 0x65, 0xd0, 0xd7, 0x4d, 0x64, 0x57, 0x9b, 0xfb, 0xa5, 0xf4, 0x4e, 0x8e, 0x92, 0xac, 0x37, 0x7e, 0x10, 0x0c, 0x92, 0x98, 0xf2, 0x90, 0x8c, 0xa9, 0x5e, 0x31, 0x91, 0xbd, 0xed, 0xd4, 0x64, 0xb0, 0x9f, 0xc5, 0xb0, 0x0d, 0x75, 0x05, 0x8a, 0x48, 0x22, 0x3e, 0x0f, 0xe2, 0x61, 0xc4, 0xa8, 0xbe, 0xa1, 0x70, 0x9a, 0x8c, 0x77, 0x64, 0xd8, 0x95, 0x51, 0xeb, 0x1b, 0x68, 0xb9, 0xeb, 0x54, 0x55, 0x51, 0x11, 0x5a, 0x49, 0x91, 0x01, 0xdb, 0x53, 0x31, 0xd2, 0xe2, 0x8e, 0x33, 0x7d, 0xc7, 0x4f, 0xa0, 0x5a, 0xd4, 0xb0, 0xae, 0xd2, 0x10, 0xcd, 0xfa, 0xb7, 0xe0, 0xd0, 0x15, 0x9c, 0x92, 0xb1, 0x1f, 0x7a, 0x97, 0x21, 0x4b, 0xc4, 0x29, 0x09, 0x82, 0x7c, 0x02, 0x0f, 0x95, 0x62, 0xf5, 0xc0, 0x28, 0x63, 0xcb, 0x9c, 0xbd, 0x85, 0x03, 0xe2, 0x79, 0x9c, 0x7a, 0x44, 0xd0, 0xd1, 0x20, 0xab, 0x49, 0x47, 0x83, 0xd4, 0x68, 0xf6, 0x67, 0xe9, 0x8c, 0x5a, 0xce, 0xc8, 0xba, 0x04, 0x9c, 0x73, 0x74, 0x09, 0x27, 0x63, 0x2a, 0x28, 0x8f, 0xe5, 0x12, 0x15, 0x4a, 0xd5, 0xb3, 0xb4, 0xeb, 0x87, 0x82, 0xf2, 0x5b, 0x22, 0x07, 0x94, 0x0d, 0x1c, 0xf2, 0x50, 0x3f, 0xb6, 0x7e, 0xa0, 0x82, 0xc2, 0x4e, 0x22, 0x16, 0x0c, 0xff, 0xed, 0xca, 0x7d, 0x82, 0xbd, 0x69, 0x3d, 0x9b, 0x4a, 0xd5, 0xd7, 0xcc, 0x75, 0xbb, 0xda, 0x34, 0xe7, 0x59, 0x96, 0x2d, 0x39, 0x98, 0x2f, 0xdb, 0x7c, 0xe8, 0x82, 0x5a, 0x6d, 0x38, 0x2a, 0x75, 0xf8, 0x87, 0xeb, 0xf5, 0xe2, 0x3d, 0x54, 0x0b, 0x86, 0x71, 0x1d, 0x6a, 0xa7, 0x9d, 0xab, 0xae, 0x73, 0xee, 0xba, 0x27, 0x1f, 0x5a, 0xe7, 0xf5, 0x47, 0x18, 0x83, 0xd6, 0x6f, 0xcf, 0xc5, 0x10, 0x06, 0xd8, 0x74, 0x4e, 0xda, 0x67, 0x9d, 0xab, 0xfa, 0x5a, 0xf3, 0x7b, 0x05, 0xaa, 0x3d, 0x1a, 0x0b, 0x97, 0xf2, 0x5b, 0x7f, 0x48, 0xf1, 0x1b, 0xd8, 0x51, 0x17, 0x88, 0x94, 0x85, 0xf7, 0xe6, 0xbb, 0xab, 0x84, 0x51, 0x16, 0xc4, 0x17, 0xb0, 0xd3, 0x0f, 0x09, 0x4f, 0xcb, 0x8e, 0xe6, 0x11, 0x73, 0x17, 0x87, 0xf1, 0xb8, 0x3c, 0x99, 0x7d, 0x80, 0x00, 0xf6, 0x4a, 0xbe, 0x0f, 0xb6, 0x17, 0x8a, 0xee, 0x5d, 0x12, 0xe3, 0xf9, 0x0a, 0xc8, 0xb4, 0xd7, 0x2b, 0x84, 0x7d, 0xc0, 0xcb, 0x27, 0x02, 0x3f, 0xbb, 0x87, 0x62, 0xf1, 0x04, 0x1a, 0xf6, 0xef, 0x81, 0x69, 0x2b, 0x5b, 0xb6, 0xd2, 0x2e, 0x92, 0x20, 0x38, 0x4b, 0x58, 0x40, 0xbf, 0xfe, 0x33, 0x4f, 0x36, 0x52, 0xae, 0xb4, 0x8f, 0x24, 0xb8, 0xf9, 0x0f, 0xad, 0xae, 0x37, 0xd5, 0x7f, 0xd0, 0xeb, 0x5f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x07, 0xc7, 0x76, 0x69, 0x9e, 0x06, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. var _ context.Context var _ grpc.ClientConnInterface // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. const _ = grpc.SupportPackageIsVersion6 // TestServiceClient is the client API for TestService service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type TestServiceClient interface { // One empty request followed by one empty response. EmptyCall(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) // One request followed by one response. // The server returns the client payload as-is. UnaryCall(ctx context.Context, in *SimpleRequest, opts ...grpc.CallOption) (*SimpleResponse, error) // One request followed by a sequence of responses (streamed download). // The server returns the payload with client desired type and sizes. StreamingOutputCall(ctx context.Context, in *StreamingOutputCallRequest, opts ...grpc.CallOption) (TestService_StreamingOutputCallClient, error) // A sequence of requests followed by one response (streamed upload). // The server returns the aggregated size of client payload as the result. StreamingInputCall(ctx context.Context, opts ...grpc.CallOption) (TestService_StreamingInputCallClient, error) // A sequence of requests with each request served by the server immediately. // As one request could lead to multiple responses, this interface // demonstrates the idea of full duplexing. FullDuplexCall(ctx context.Context, opts ...grpc.CallOption) (TestService_FullDuplexCallClient, error) // A sequence of requests followed by a sequence of responses. // The server buffers all the client requests and then serves them in order. A // stream of responses are returned to the client when the server starts with // first request. HalfDuplexCall(ctx context.Context, opts ...grpc.CallOption) (TestService_HalfDuplexCallClient, error) } type testServiceClient struct { cc grpc.ClientConnInterface } func NewTestServiceClient(cc grpc.ClientConnInterface) TestServiceClient { return &testServiceClient{cc} } func (c *testServiceClient) EmptyCall(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) { out := new(Empty) err := c.cc.Invoke(ctx, "/grpc.testing.TestService/EmptyCall", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *testServiceClient) UnaryCall(ctx context.Context, in *SimpleRequest, opts ...grpc.CallOption) (*SimpleResponse, error) { out := new(SimpleResponse) err := c.cc.Invoke(ctx, "/grpc.testing.TestService/UnaryCall", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *testServiceClient) StreamingOutputCall(ctx context.Context, in *StreamingOutputCallRequest, opts ...grpc.CallOption) (TestService_StreamingOutputCallClient, error) { stream, err := c.cc.NewStream(ctx, &_TestService_serviceDesc.Streams[0], "/grpc.testing.TestService/StreamingOutputCall", opts...) if err != nil { return nil, err } x := &testServiceStreamingOutputCallClient{stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } if err := x.ClientStream.CloseSend(); err != nil { return nil, err } return x, nil } type TestService_StreamingOutputCallClient interface { Recv() (*StreamingOutputCallResponse, error) grpc.ClientStream } type testServiceStreamingOutputCallClient struct { grpc.ClientStream } func (x *testServiceStreamingOutputCallClient) Recv() (*StreamingOutputCallResponse, error) { m := new(StreamingOutputCallResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func (c *testServiceClient) StreamingInputCall(ctx context.Context, opts ...grpc.CallOption) (TestService_StreamingInputCallClient, error) { stream, err := c.cc.NewStream(ctx, &_TestService_serviceDesc.Streams[1], "/grpc.testing.TestService/StreamingInputCall", opts...) if err != nil { return nil, err } x := &testServiceStreamingInputCallClient{stream} return x, nil } type TestService_StreamingInputCallClient interface { Send(*StreamingInputCallRequest) error CloseAndRecv() (*StreamingInputCallResponse, error) grpc.ClientStream } type testServiceStreamingInputCallClient struct { grpc.ClientStream } func (x *testServiceStreamingInputCallClient) Send(m *StreamingInputCallRequest) error { return x.ClientStream.SendMsg(m) } func (x *testServiceStreamingInputCallClient) CloseAndRecv() (*StreamingInputCallResponse, error) { if err := x.ClientStream.CloseSend(); err != nil { return nil, err } m := new(StreamingInputCallResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func (c *testServiceClient) FullDuplexCall(ctx context.Context, opts ...grpc.CallOption) (TestService_FullDuplexCallClient, error) { stream, err := c.cc.NewStream(ctx, &_TestService_serviceDesc.Streams[2], "/grpc.testing.TestService/FullDuplexCall", opts...) if err != nil { return nil, err } x := &testServiceFullDuplexCallClient{stream} return x, nil } type TestService_FullDuplexCallClient interface { Send(*StreamingOutputCallRequest) error Recv() (*StreamingOutputCallResponse, error) grpc.ClientStream } type testServiceFullDuplexCallClient struct { grpc.ClientStream } func (x *testServiceFullDuplexCallClient) Send(m *StreamingOutputCallRequest) error { return x.ClientStream.SendMsg(m) } func (x *testServiceFullDuplexCallClient) Recv() (*StreamingOutputCallResponse, error) { m := new(StreamingOutputCallResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func (c *testServiceClient) HalfDuplexCall(ctx context.Context, opts ...grpc.CallOption) (TestService_HalfDuplexCallClient, error) { stream, err := c.cc.NewStream(ctx, &_TestService_serviceDesc.Streams[3], "/grpc.testing.TestService/HalfDuplexCall", opts...) if err != nil { return nil, err } x := &testServiceHalfDuplexCallClient{stream} return x, nil } type TestService_HalfDuplexCallClient interface { Send(*StreamingOutputCallRequest) error Recv() (*StreamingOutputCallResponse, error) grpc.ClientStream } type testServiceHalfDuplexCallClient struct { grpc.ClientStream } func (x *testServiceHalfDuplexCallClient) Send(m *StreamingOutputCallRequest) error { return x.ClientStream.SendMsg(m) } func (x *testServiceHalfDuplexCallClient) Recv() (*StreamingOutputCallResponse, error) { m := new(StreamingOutputCallResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } // TestServiceServer is the server API for TestService service. type TestServiceServer interface { // One empty request followed by one empty response. EmptyCall(context.Context, *Empty) (*Empty, error) // One request followed by one response. // The server returns the client payload as-is. UnaryCall(context.Context, *SimpleRequest) (*SimpleResponse, error) // One request followed by a sequence of responses (streamed download). // The server returns the payload with client desired type and sizes. StreamingOutputCall(*StreamingOutputCallRequest, TestService_StreamingOutputCallServer) error // A sequence of requests followed by one response (streamed upload). // The server returns the aggregated size of client payload as the result. StreamingInputCall(TestService_StreamingInputCallServer) error // A sequence of requests with each request served by the server immediately. // As one request could lead to multiple responses, this interface // demonstrates the idea of full duplexing. FullDuplexCall(TestService_FullDuplexCallServer) error // A sequence of requests followed by a sequence of responses. // The server buffers all the client requests and then serves them in order. A // stream of responses are returned to the client when the server starts with // first request. HalfDuplexCall(TestService_HalfDuplexCallServer) error } // UnimplementedTestServiceServer can be embedded to have forward compatible implementations. type UnimplementedTestServiceServer struct { } func (*UnimplementedTestServiceServer) EmptyCall(ctx context.Context, req *Empty) (*Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method EmptyCall not implemented") } func (*UnimplementedTestServiceServer) UnaryCall(ctx context.Context, req *SimpleRequest) (*SimpleResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method UnaryCall not implemented") } func (*UnimplementedTestServiceServer) StreamingOutputCall(req *StreamingOutputCallRequest, srv TestService_StreamingOutputCallServer) error { return status.Errorf(codes.Unimplemented, "method StreamingOutputCall not implemented") } func (*UnimplementedTestServiceServer) StreamingInputCall(srv TestService_StreamingInputCallServer) error { return status.Errorf(codes.Unimplemented, "method StreamingInputCall not implemented") } func (*UnimplementedTestServiceServer) FullDuplexCall(srv TestService_FullDuplexCallServer) error { return status.Errorf(codes.Unimplemented, "method FullDuplexCall not implemented") } func (*UnimplementedTestServiceServer) HalfDuplexCall(srv TestService_HalfDuplexCallServer) error { return status.Errorf(codes.Unimplemented, "method HalfDuplexCall not implemented") } func RegisterTestServiceServer(s *grpc.Server, srv TestServiceServer) { s.RegisterService(&_TestService_serviceDesc, srv) } func _TestService_EmptyCall_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(Empty) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(TestServiceServer).EmptyCall(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/grpc.testing.TestService/EmptyCall", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(TestServiceServer).EmptyCall(ctx, req.(*Empty)) } return interceptor(ctx, in, info, handler) } func _TestService_UnaryCall_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(SimpleRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(TestServiceServer).UnaryCall(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/grpc.testing.TestService/UnaryCall", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(TestServiceServer).UnaryCall(ctx, req.(*SimpleRequest)) } return interceptor(ctx, in, info, handler) } func _TestService_StreamingOutputCall_Handler(srv interface{}, stream grpc.ServerStream) error { m := new(StreamingOutputCallRequest) if err := stream.RecvMsg(m); err != nil { return err } return srv.(TestServiceServer).StreamingOutputCall(m, &testServiceStreamingOutputCallServer{stream}) } type TestService_StreamingOutputCallServer interface { Send(*StreamingOutputCallResponse) error grpc.ServerStream } type testServiceStreamingOutputCallServer struct { grpc.ServerStream } func (x *testServiceStreamingOutputCallServer) Send(m *StreamingOutputCallResponse) error { return x.ServerStream.SendMsg(m) } func _TestService_StreamingInputCall_Handler(srv interface{}, stream grpc.ServerStream) error { return srv.(TestServiceServer).StreamingInputCall(&testServiceStreamingInputCallServer{stream}) } type TestService_StreamingInputCallServer interface { SendAndClose(*StreamingInputCallResponse) error Recv() (*StreamingInputCallRequest, error) grpc.ServerStream } type testServiceStreamingInputCallServer struct { grpc.ServerStream } func (x *testServiceStreamingInputCallServer) SendAndClose(m *StreamingInputCallResponse) error { return x.ServerStream.SendMsg(m) } func (x *testServiceStreamingInputCallServer) Recv() (*StreamingInputCallRequest, error) { m := new(StreamingInputCallRequest) if err := x.ServerStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func _TestService_FullDuplexCall_Handler(srv interface{}, stream grpc.ServerStream) error { return srv.(TestServiceServer).FullDuplexCall(&testServiceFullDuplexCallServer{stream}) } type TestService_FullDuplexCallServer interface { Send(*StreamingOutputCallResponse) error Recv() (*StreamingOutputCallRequest, error) grpc.ServerStream } type testServiceFullDuplexCallServer struct { grpc.ServerStream } func (x *testServiceFullDuplexCallServer) Send(m *StreamingOutputCallResponse) error { return x.ServerStream.SendMsg(m) } func (x *testServiceFullDuplexCallServer) Recv() (*StreamingOutputCallRequest, error) { m := new(StreamingOutputCallRequest) if err := x.ServerStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func _TestService_HalfDuplexCall_Handler(srv interface{}, stream grpc.ServerStream) error { return srv.(TestServiceServer).HalfDuplexCall(&testServiceHalfDuplexCallServer{stream}) } type TestService_HalfDuplexCallServer interface { Send(*StreamingOutputCallResponse) error Recv() (*StreamingOutputCallRequest, error) grpc.ServerStream } type testServiceHalfDuplexCallServer struct { grpc.ServerStream } func (x *testServiceHalfDuplexCallServer) Send(m *StreamingOutputCallResponse) error { return x.ServerStream.SendMsg(m) } func (x *testServiceHalfDuplexCallServer) Recv() (*StreamingOutputCallRequest, error) { m := new(StreamingOutputCallRequest) if err := x.ServerStream.RecvMsg(m); err != nil { return nil, err } return m, nil } var _TestService_serviceDesc = grpc.ServiceDesc{ ServiceName: "grpc.testing.TestService", HandlerType: (*TestServiceServer)(nil), Methods: []grpc.MethodDesc{ { MethodName: "EmptyCall", Handler: _TestService_EmptyCall_Handler, }, { MethodName: "UnaryCall", Handler: _TestService_UnaryCall_Handler, }, }, Streams: []grpc.StreamDesc{ { StreamName: "StreamingOutputCall", Handler: _TestService_StreamingOutputCall_Handler, ServerStreams: true, }, { StreamName: "StreamingInputCall", Handler: _TestService_StreamingInputCall_Handler, ClientStreams: true, }, { StreamName: "FullDuplexCall", Handler: _TestService_FullDuplexCall_Handler, ServerStreams: true, ClientStreams: true, }, { StreamName: "HalfDuplexCall", Handler: _TestService_HalfDuplexCall_Handler, ServerStreams: true, ClientStreams: true, }, }, Metadata: "grpc_testing/test.proto", } grpc-go-1.29.1/test/grpc_testing/test.proto000066400000000000000000000120431365033716300206640ustar00rootroot00000000000000// Copyright 2017 gRPC authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // An integration test service that covers all the method signature permutations // of unary/streaming requests/responses. syntax = "proto3"; package grpc.testing; message Empty {} // The type of payload that should be returned. enum PayloadType { // Compressable text format. COMPRESSABLE = 0; // Uncompressable binary format. UNCOMPRESSABLE = 1; // Randomly chosen from all other formats defined in this enum. RANDOM = 2; } // A block of data, to simply increase gRPC message size. message Payload { // The type of data in body. PayloadType type = 1; // Primary contents of payload. bytes body = 2; } // Unary request. message SimpleRequest { // Desired payload type in the response from the server. // If response_type is RANDOM, server randomly chooses one from other formats. PayloadType response_type = 1; // Desired payload size in the response from the server. // If response_type is COMPRESSABLE, this denotes the size before compression. int32 response_size = 2; // Optional input payload sent along with the request. Payload payload = 3; // Whether SimpleResponse should include username. bool fill_username = 4; // Whether SimpleResponse should include OAuth scope. bool fill_oauth_scope = 5; } // Unary response, as configured by the request. message SimpleResponse { // Payload to increase message size. Payload payload = 1; // The user the request came from, for verifying authentication was // successful when the client expected it. string username = 2; // OAuth scope. string oauth_scope = 3; } // Client-streaming request. message StreamingInputCallRequest { // Optional input payload sent along with the request. Payload payload = 1; // Not expecting any payload from the response. } // Client-streaming response. message StreamingInputCallResponse { // Aggregated size of payloads received from the client. int32 aggregated_payload_size = 1; } // Configuration for a particular response. message ResponseParameters { // Desired payload sizes in responses from the server. // If response_type is COMPRESSABLE, this denotes the size before compression. int32 size = 1; // Desired interval between consecutive responses in the response stream in // microseconds. int32 interval_us = 2; } // Server-streaming request. message StreamingOutputCallRequest { // Desired payload type in the response from the server. // If response_type is RANDOM, the payload from each response in the stream // might be of different types. This is to simulate a mixed type of payload // stream. PayloadType response_type = 1; // Configuration for each expected response message. repeated ResponseParameters response_parameters = 2; // Optional input payload sent along with the request. Payload payload = 3; } // Server-streaming response, as configured by the request and parameters. message StreamingOutputCallResponse { // Payload to increase response size. Payload payload = 1; } // A simple service to test the various types of RPCs and experiment with // performance with various types of payload. service TestService { // One empty request followed by one empty response. rpc EmptyCall(Empty) returns (Empty); // One request followed by one response. // The server returns the client payload as-is. rpc UnaryCall(SimpleRequest) returns (SimpleResponse); // One request followed by a sequence of responses (streamed download). // The server returns the payload with client desired type and sizes. rpc StreamingOutputCall(StreamingOutputCallRequest) returns (stream StreamingOutputCallResponse); // A sequence of requests followed by one response (streamed upload). // The server returns the aggregated size of client payload as the result. rpc StreamingInputCall(stream StreamingInputCallRequest) returns (StreamingInputCallResponse); // A sequence of requests with each request served by the server immediately. // As one request could lead to multiple responses, this interface // demonstrates the idea of full duplexing. rpc FullDuplexCall(stream StreamingOutputCallRequest) returns (stream StreamingOutputCallResponse); // A sequence of requests followed by a sequence of responses. // The server buffers all the client requests and then serves them in order. A // stream of responses are returned to the client when the server starts with // first request. rpc HalfDuplexCall(stream StreamingOutputCallRequest) returns (stream StreamingOutputCallResponse); } grpc-go-1.29.1/test/healthcheck_test.go000066400000000000000000000747031365033716300177740ustar00rootroot00000000000000/* * * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package test import ( "context" "errors" "fmt" "net" "sync" "testing" "time" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/connectivity" _ "google.golang.org/grpc/health" healthgrpc "google.golang.org/grpc/health/grpc_health_v1" healthpb "google.golang.org/grpc/health/grpc_health_v1" "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpctest" "google.golang.org/grpc/resolver" "google.golang.org/grpc/resolver/manual" "google.golang.org/grpc/status" testpb "google.golang.org/grpc/test/grpc_testing" ) var testHealthCheckFunc = internal.HealthCheckFunc func newTestHealthServer() *testHealthServer { return newTestHealthServerWithWatchFunc(defaultWatchFunc) } func newTestHealthServerWithWatchFunc(f func(s *testHealthServer, in *healthpb.HealthCheckRequest, stream healthgrpc.Health_WatchServer) error) *testHealthServer { return &testHealthServer{ watchFunc: f, update: make(chan struct{}, 1), status: make(map[string]healthpb.HealthCheckResponse_ServingStatus), } } // defaultWatchFunc will send a HealthCheckResponse to the client whenever SetServingStatus is called. func defaultWatchFunc(s *testHealthServer, in *healthpb.HealthCheckRequest, stream healthgrpc.Health_WatchServer) error { if in.Service != "foo" { return status.Error(codes.FailedPrecondition, "the defaultWatchFunc only handles request with service name to be \"foo\"") } var done bool for { select { case <-stream.Context().Done(): done = true case <-s.update: } if done { break } s.mu.Lock() resp := &healthpb.HealthCheckResponse{ Status: s.status[in.Service], } s.mu.Unlock() stream.SendMsg(resp) } return nil } type testHealthServer struct { healthpb.UnimplementedHealthServer watchFunc func(s *testHealthServer, in *healthpb.HealthCheckRequest, stream healthgrpc.Health_WatchServer) error mu sync.Mutex status map[string]healthpb.HealthCheckResponse_ServingStatus update chan struct{} } func (s *testHealthServer) Check(ctx context.Context, in *healthpb.HealthCheckRequest) (*healthpb.HealthCheckResponse, error) { return &healthpb.HealthCheckResponse{ Status: healthpb.HealthCheckResponse_SERVING, }, nil } func (s *testHealthServer) Watch(in *healthpb.HealthCheckRequest, stream healthgrpc.Health_WatchServer) error { return s.watchFunc(s, in, stream) } // SetServingStatus is called when need to reset the serving status of a service // or insert a new service entry into the statusMap. func (s *testHealthServer) SetServingStatus(service string, status healthpb.HealthCheckResponse_ServingStatus) { s.mu.Lock() s.status[service] = status select { case <-s.update: default: } s.update <- struct{}{} s.mu.Unlock() } func setupHealthCheckWrapper() (hcEnterChan chan struct{}, hcExitChan chan struct{}, wrapper internal.HealthChecker) { hcEnterChan = make(chan struct{}) hcExitChan = make(chan struct{}) wrapper = func(ctx context.Context, newStream func(string) (interface{}, error), update func(connectivity.State, error), service string) error { close(hcEnterChan) defer close(hcExitChan) return testHealthCheckFunc(ctx, newStream, update, service) } return } type svrConfig struct { specialWatchFunc func(s *testHealthServer, in *healthpb.HealthCheckRequest, stream healthgrpc.Health_WatchServer) error } func setupServer(sc *svrConfig) (s *grpc.Server, lis net.Listener, ts *testHealthServer, deferFunc func(), err error) { s = grpc.NewServer() lis, err = net.Listen("tcp", "localhost:0") if err != nil { return nil, nil, nil, func() {}, fmt.Errorf("failed to listen due to err %v", err) } if sc.specialWatchFunc != nil { ts = newTestHealthServerWithWatchFunc(sc.specialWatchFunc) } else { ts = newTestHealthServer() } healthgrpc.RegisterHealthServer(s, ts) testpb.RegisterTestServiceServer(s, &testServer{}) go s.Serve(lis) return s, lis, ts, s.Stop, nil } type clientConfig struct { balancerName string testHealthCheckFuncWrapper internal.HealthChecker extraDialOption []grpc.DialOption } func setupClient(c *clientConfig) (cc *grpc.ClientConn, r *manual.Resolver, deferFunc func(), err error) { r, rcleanup := manual.GenerateAndRegisterManualResolver() var opts []grpc.DialOption opts = append(opts, grpc.WithInsecure(), grpc.WithBalancerName(c.balancerName)) if c.testHealthCheckFuncWrapper != nil { opts = append(opts, internal.WithHealthCheckFunc.(func(internal.HealthChecker) grpc.DialOption)(c.testHealthCheckFuncWrapper)) } opts = append(opts, c.extraDialOption...) cc, err = grpc.Dial(r.Scheme()+":///test.server", opts...) if err != nil { rcleanup() return nil, nil, nil, fmt.Errorf("dial failed due to err: %v", err) } return cc, r, func() { cc.Close(); rcleanup() }, nil } func (s) TestHealthCheckWatchStateChange(t *testing.T) { _, lis, ts, deferFunc, err := setupServer(&svrConfig{}) defer deferFunc() if err != nil { t.Fatal(err) } // The table below shows the expected series of addrConn connectivity transitions when server // updates its health status. As there's only one addrConn corresponds with the ClientConn in this // test, we use ClientConn's connectivity state as the addrConn connectivity state. //+------------------------------+-------------------------------------------+ //| Health Check Returned Status | Expected addrConn Connectivity Transition | //+------------------------------+-------------------------------------------+ //| NOT_SERVING | ->TRANSIENT FAILURE | //| SERVING | ->READY | //| SERVICE_UNKNOWN | ->TRANSIENT FAILURE | //| SERVING | ->READY | //| UNKNOWN | ->TRANSIENT FAILURE | //+------------------------------+-------------------------------------------+ ts.SetServingStatus("foo", healthpb.HealthCheckResponse_NOT_SERVING) cc, r, deferFunc, err := setupClient(&clientConfig{balancerName: "round_robin"}) if err != nil { t.Fatal(err) } defer deferFunc() r.UpdateState(resolver.State{ Addresses: []resolver.Address{{Addr: lis.Addr().String()}}, ServiceConfig: parseCfg(r, `{ "healthCheckConfig": { "serviceName": "foo" } }`)}) ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() if ok := cc.WaitForStateChange(ctx, connectivity.Idle); !ok { t.Fatal("ClientConn is still in IDLE state when the context times out.") } if ok := cc.WaitForStateChange(ctx, connectivity.Connecting); !ok { t.Fatal("ClientConn is still in CONNECTING state when the context times out.") } if s := cc.GetState(); s != connectivity.TransientFailure { t.Fatalf("ClientConn is in %v state, want TRANSIENT FAILURE", s) } ts.SetServingStatus("foo", healthpb.HealthCheckResponse_SERVING) if ok := cc.WaitForStateChange(ctx, connectivity.TransientFailure); !ok { t.Fatal("ClientConn is still in TRANSIENT FAILURE state when the context times out.") } if s := cc.GetState(); s != connectivity.Ready { t.Fatalf("ClientConn is in %v state, want READY", s) } ts.SetServingStatus("foo", healthpb.HealthCheckResponse_SERVICE_UNKNOWN) if ok := cc.WaitForStateChange(ctx, connectivity.Ready); !ok { t.Fatal("ClientConn is still in READY state when the context times out.") } if s := cc.GetState(); s != connectivity.TransientFailure { t.Fatalf("ClientConn is in %v state, want TRANSIENT FAILURE", s) } ts.SetServingStatus("foo", healthpb.HealthCheckResponse_SERVING) if ok := cc.WaitForStateChange(ctx, connectivity.TransientFailure); !ok { t.Fatal("ClientConn is still in TRANSIENT FAILURE state when the context times out.") } if s := cc.GetState(); s != connectivity.Ready { t.Fatalf("ClientConn is in %v state, want READY", s) } ts.SetServingStatus("foo", healthpb.HealthCheckResponse_UNKNOWN) if ok := cc.WaitForStateChange(ctx, connectivity.Ready); !ok { t.Fatal("ClientConn is still in READY state when the context times out.") } if s := cc.GetState(); s != connectivity.TransientFailure { t.Fatalf("ClientConn is in %v state, want TRANSIENT FAILURE", s) } } // If Watch returns Unimplemented, then the ClientConn should go into READY state. func (s) TestHealthCheckHealthServerNotRegistered(t *testing.T) { grpctest.TLogger.ExpectError("Subchannel health check is unimplemented at server side, thus health check is disabled") s := grpc.NewServer() lis, err := net.Listen("tcp", "localhost:0") if err != nil { t.Fatalf("failed to listen due to err: %v", err) } go s.Serve(lis) defer s.Stop() cc, r, deferFunc, err := setupClient(&clientConfig{balancerName: "round_robin"}) if err != nil { t.Fatal(err) } defer deferFunc() r.UpdateState(resolver.State{ Addresses: []resolver.Address{{Addr: lis.Addr().String()}}, ServiceConfig: parseCfg(r, `{ "healthCheckConfig": { "serviceName": "foo" } }`)}) ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() if ok := cc.WaitForStateChange(ctx, connectivity.Idle); !ok { t.Fatal("ClientConn is still in IDLE state when the context times out.") } if ok := cc.WaitForStateChange(ctx, connectivity.Connecting); !ok { t.Fatal("ClientConn is still in CONNECTING state when the context times out.") } if s := cc.GetState(); s != connectivity.Ready { t.Fatalf("ClientConn is in %v state, want READY", s) } } // In the case of a goaway received, the health check stream should be terminated and health check // function should exit. func (s) TestHealthCheckWithGoAway(t *testing.T) { hcEnterChan, hcExitChan, testHealthCheckFuncWrapper := setupHealthCheckWrapper() s, lis, ts, deferFunc, err := setupServer(&svrConfig{}) defer deferFunc() if err != nil { t.Fatal(err) } ts.SetServingStatus("foo", healthpb.HealthCheckResponse_SERVING) cc, r, deferFunc, err := setupClient(&clientConfig{ balancerName: "round_robin", testHealthCheckFuncWrapper: testHealthCheckFuncWrapper, }) if err != nil { t.Fatal(err) } defer deferFunc() tc := testpb.NewTestServiceClient(cc) r.UpdateState(resolver.State{ Addresses: []resolver.Address{{Addr: lis.Addr().String()}}, ServiceConfig: parseCfg(r, `{ "healthCheckConfig": { "serviceName": "foo" } }`)}) // make some rpcs to make sure connection is working. if err := verifyResultWithDelay(func() (bool, error) { if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}); err != nil { return false, fmt.Errorf("TestService/EmptyCall(_, _) = _, %v, want _, ", err) } return true, nil }); err != nil { t.Fatal(err) } // the stream rpc will persist through goaway event. ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() stream, err := tc.FullDuplexCall(ctx, grpc.WaitForReady(true)) if err != nil { t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) } respParam := []*testpb.ResponseParameters{{Size: 1}} payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(1)) if err != nil { t.Fatal(err) } req := &testpb.StreamingOutputCallRequest{ ResponseParameters: respParam, Payload: payload, } if err := stream.Send(req); err != nil { t.Fatalf("%v.Send(_) = %v, want ", stream, err) } if _, err := stream.Recv(); err != nil { t.Fatalf("%v.Recv() = _, %v, want _, ", stream, err) } select { case <-hcExitChan: t.Fatal("Health check function has exited, which is not expected.") default: } // server sends GoAway go s.GracefulStop() select { case <-hcExitChan: case <-time.After(5 * time.Second): select { case <-hcEnterChan: default: t.Fatal("Health check function has not entered after 5s.") } t.Fatal("Health check function has not exited after 5s.") } // The existing RPC should be still good to proceed. if err := stream.Send(req); err != nil { t.Fatalf("%v.Send(_) = %v, want ", stream, err) } if _, err := stream.Recv(); err != nil { t.Fatalf("%v.Recv() = _, %v, want _, ", stream, err) } } func (s) TestHealthCheckWithConnClose(t *testing.T) { hcEnterChan, hcExitChan, testHealthCheckFuncWrapper := setupHealthCheckWrapper() s, lis, ts, deferFunc, err := setupServer(&svrConfig{}) defer deferFunc() if err != nil { t.Fatal(err) } ts.SetServingStatus("foo", healthpb.HealthCheckResponse_SERVING) cc, r, deferFunc, err := setupClient(&clientConfig{ balancerName: "round_robin", testHealthCheckFuncWrapper: testHealthCheckFuncWrapper, }) if err != nil { t.Fatal(err) } defer deferFunc() tc := testpb.NewTestServiceClient(cc) r.UpdateState(resolver.State{ Addresses: []resolver.Address{{Addr: lis.Addr().String()}}, ServiceConfig: parseCfg(r, `{ "healthCheckConfig": { "serviceName": "foo" } }`)}) // make some rpcs to make sure connection is working. if err := verifyResultWithDelay(func() (bool, error) { if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}); err != nil { return false, fmt.Errorf("TestService/EmptyCall(_, _) = _, %v, want _, ", err) } return true, nil }); err != nil { t.Fatal(err) } select { case <-hcExitChan: t.Fatal("Health check function has exited, which is not expected.") default: } // server closes the connection s.Stop() select { case <-hcExitChan: case <-time.After(5 * time.Second): select { case <-hcEnterChan: default: t.Fatal("Health check function has not entered after 5s.") } t.Fatal("Health check function has not exited after 5s.") } } // addrConn drain happens when addrConn gets torn down due to its address being no longer in the // address list returned by the resolver. func (s) TestHealthCheckWithAddrConnDrain(t *testing.T) { hcEnterChan, hcExitChan, testHealthCheckFuncWrapper := setupHealthCheckWrapper() _, lis, ts, deferFunc, err := setupServer(&svrConfig{}) defer deferFunc() if err != nil { t.Fatal(err) } ts.SetServingStatus("foo", healthpb.HealthCheckResponse_SERVING) cc, r, deferFunc, err := setupClient(&clientConfig{ balancerName: "round_robin", testHealthCheckFuncWrapper: testHealthCheckFuncWrapper, }) if err != nil { t.Fatal(err) } defer deferFunc() tc := testpb.NewTestServiceClient(cc) sc := parseCfg(r, `{ "healthCheckConfig": { "serviceName": "foo" } }`) r.UpdateState(resolver.State{ Addresses: []resolver.Address{{Addr: lis.Addr().String()}}, ServiceConfig: sc, }) // make some rpcs to make sure connection is working. if err := verifyResultWithDelay(func() (bool, error) { if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}); err != nil { return false, fmt.Errorf("TestService/EmptyCall(_, _) = _, %v, want _, ", err) } return true, nil }); err != nil { t.Fatal(err) } // the stream rpc will persist through goaway event. ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() stream, err := tc.FullDuplexCall(ctx, grpc.WaitForReady(true)) if err != nil { t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) } respParam := []*testpb.ResponseParameters{{Size: 1}} payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(1)) if err != nil { t.Fatal(err) } req := &testpb.StreamingOutputCallRequest{ ResponseParameters: respParam, Payload: payload, } if err := stream.Send(req); err != nil { t.Fatalf("%v.Send(_) = %v, want ", stream, err) } if _, err := stream.Recv(); err != nil { t.Fatalf("%v.Recv() = _, %v, want _, ", stream, err) } select { case <-hcExitChan: t.Fatal("Health check function has exited, which is not expected.") default: } // trigger teardown of the ac r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: "fake address"}}, ServiceConfig: sc}) select { case <-hcExitChan: case <-time.After(5 * time.Second): select { case <-hcEnterChan: default: t.Fatal("Health check function has not entered after 5s.") } t.Fatal("Health check function has not exited after 5s.") } // The existing RPC should be still good to proceed. if err := stream.Send(req); err != nil { t.Fatalf("%v.Send(_) = %v, want ", stream, err) } if _, err := stream.Recv(); err != nil { t.Fatalf("%v.Recv() = _, %v, want _, ", stream, err) } } // ClientConn close will lead to its addrConns being torn down. func (s) TestHealthCheckWithClientConnClose(t *testing.T) { hcEnterChan, hcExitChan, testHealthCheckFuncWrapper := setupHealthCheckWrapper() _, lis, ts, deferFunc, err := setupServer(&svrConfig{}) defer deferFunc() if err != nil { t.Fatal(err) } ts.SetServingStatus("foo", healthpb.HealthCheckResponse_SERVING) cc, r, deferFunc, err := setupClient(&clientConfig{ balancerName: "round_robin", testHealthCheckFuncWrapper: testHealthCheckFuncWrapper, }) if err != nil { t.Fatal(err) } defer deferFunc() tc := testpb.NewTestServiceClient(cc) r.UpdateState(resolver.State{ Addresses: []resolver.Address{{Addr: lis.Addr().String()}}, ServiceConfig: parseCfg(r, `{ "healthCheckConfig": { "serviceName": "foo" } }`)}) // make some rpcs to make sure connection is working. if err := verifyResultWithDelay(func() (bool, error) { if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}); err != nil { return false, fmt.Errorf("TestService/EmptyCall(_, _) = _, %v, want _, ", err) } return true, nil }); err != nil { t.Fatal(err) } select { case <-hcExitChan: t.Fatal("Health check function has exited, which is not expected.") default: } // trigger addrConn teardown cc.Close() select { case <-hcExitChan: case <-time.After(5 * time.Second): select { case <-hcEnterChan: default: t.Fatal("Health check function has not entered after 5s.") } t.Fatal("Health check function has not exited after 5s.") } } // This test is to test the logic in the createTransport after the health check function returns which // closes the skipReset channel(since it has not been closed inside health check func) to unblock // onGoAway/onClose goroutine. func (s) TestHealthCheckWithoutSetConnectivityStateCalledAddrConnShutDown(t *testing.T) { hcEnterChan, hcExitChan, testHealthCheckFuncWrapper := setupHealthCheckWrapper() _, lis, ts, deferFunc, err := setupServer(&svrConfig{ specialWatchFunc: func(s *testHealthServer, in *healthpb.HealthCheckRequest, stream healthgrpc.Health_WatchServer) error { if in.Service != "delay" { return status.Error(codes.FailedPrecondition, "this special Watch function only handles request with service name to be \"delay\"") } // Do nothing to mock a delay of health check response from server side. // This case is to help with the test that covers the condition that setConnectivityState is not // called inside HealthCheckFunc before the func returns. select { case <-stream.Context().Done(): case <-time.After(5 * time.Second): } return nil }, }) defer deferFunc() if err != nil { t.Fatal(err) } ts.SetServingStatus("delay", healthpb.HealthCheckResponse_SERVING) _, r, deferFunc, err := setupClient(&clientConfig{ balancerName: "round_robin", testHealthCheckFuncWrapper: testHealthCheckFuncWrapper, }) if err != nil { t.Fatal(err) } defer deferFunc() // The serviceName "delay" is specially handled at server side, where response will not be sent // back to client immediately upon receiving the request (client should receive no response until // test ends). sc := parseCfg(r, `{ "healthCheckConfig": { "serviceName": "delay" } }`) r.UpdateState(resolver.State{ Addresses: []resolver.Address{{Addr: lis.Addr().String()}}, ServiceConfig: sc, }) select { case <-hcExitChan: t.Fatal("Health check function has exited, which is not expected.") default: } select { case <-hcEnterChan: case <-time.After(5 * time.Second): t.Fatal("Health check function has not been invoked after 5s.") } // trigger teardown of the ac, ac in SHUTDOWN state r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: "fake address"}}, ServiceConfig: sc}) // The health check func should exit without calling the setConnectivityState func, as server hasn't sent // any response. select { case <-hcExitChan: case <-time.After(5 * time.Second): t.Fatal("Health check function has not exited after 5s.") } // The deferred leakcheck will check whether there's leaked goroutine, which is an indication // whether we closes the skipReset channel to unblock onGoAway/onClose goroutine. } // This test is to test the logic in the createTransport after the health check function returns which // closes the allowedToReset channel(since it has not been closed inside health check func) to unblock // onGoAway/onClose goroutine. func (s) TestHealthCheckWithoutSetConnectivityStateCalled(t *testing.T) { hcEnterChan, hcExitChan, testHealthCheckFuncWrapper := setupHealthCheckWrapper() s, lis, ts, deferFunc, err := setupServer(&svrConfig{ specialWatchFunc: func(s *testHealthServer, in *healthpb.HealthCheckRequest, stream healthgrpc.Health_WatchServer) error { if in.Service != "delay" { return status.Error(codes.FailedPrecondition, "this special Watch function only handles request with service name to be \"delay\"") } // Do nothing to mock a delay of health check response from server side. // This case is to help with the test that covers the condition that setConnectivityState is not // called inside HealthCheckFunc before the func returns. select { case <-stream.Context().Done(): case <-time.After(5 * time.Second): } return nil }, }) defer deferFunc() if err != nil { t.Fatal(err) } ts.SetServingStatus("delay", healthpb.HealthCheckResponse_SERVING) _, r, deferFunc, err := setupClient(&clientConfig{ balancerName: "round_robin", testHealthCheckFuncWrapper: testHealthCheckFuncWrapper, }) if err != nil { t.Fatal(err) } defer deferFunc() // The serviceName "delay" is specially handled at server side, where response will not be sent // back to client immediately upon receiving the request (client should receive no response until // test ends). r.UpdateState(resolver.State{ Addresses: []resolver.Address{{Addr: lis.Addr().String()}}, ServiceConfig: parseCfg(r, `{ "healthCheckConfig": { "serviceName": "delay" } }`)}) select { case <-hcExitChan: t.Fatal("Health check function has exited, which is not expected.") default: } select { case <-hcEnterChan: case <-time.After(5 * time.Second): t.Fatal("Health check function has not been invoked after 5s.") } // trigger transport being closed s.Stop() // The health check func should exit without calling the setConnectivityState func, as server hasn't sent // any response. select { case <-hcExitChan: case <-time.After(5 * time.Second): t.Fatal("Health check function has not exited after 5s.") } // The deferred leakcheck will check whether there's leaked goroutine, which is an indication // whether we closes the allowedToReset channel to unblock onGoAway/onClose goroutine. } func testHealthCheckDisableWithDialOption(t *testing.T, addr string) { hcEnterChan, _, testHealthCheckFuncWrapper := setupHealthCheckWrapper() cc, r, deferFunc, err := setupClient(&clientConfig{ balancerName: "round_robin", testHealthCheckFuncWrapper: testHealthCheckFuncWrapper, extraDialOption: []grpc.DialOption{grpc.WithDisableHealthCheck()}, }) if err != nil { t.Fatal(err) } defer deferFunc() tc := testpb.NewTestServiceClient(cc) r.UpdateState(resolver.State{ Addresses: []resolver.Address{{Addr: addr}}, ServiceConfig: parseCfg(r, `{ "healthCheckConfig": { "serviceName": "foo" } }`)}) // send some rpcs to make sure transport has been created and is ready for use. if err := verifyResultWithDelay(func() (bool, error) { if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}); err != nil { return false, fmt.Errorf("TestService/EmptyCall(_, _) = _, %v, want _, ", err) } return true, nil }); err != nil { t.Fatal(err) } select { case <-hcEnterChan: t.Fatal("Health check function has exited, which is not expected.") default: } } func testHealthCheckDisableWithBalancer(t *testing.T, addr string) { hcEnterChan, _, testHealthCheckFuncWrapper := setupHealthCheckWrapper() cc, r, deferFunc, err := setupClient(&clientConfig{ balancerName: "pick_first", testHealthCheckFuncWrapper: testHealthCheckFuncWrapper, }) if err != nil { t.Fatal(err) } defer deferFunc() tc := testpb.NewTestServiceClient(cc) r.UpdateState(resolver.State{ Addresses: []resolver.Address{{Addr: addr}}, ServiceConfig: parseCfg(r, `{ "healthCheckConfig": { "serviceName": "foo" } }`)}) // send some rpcs to make sure transport has been created and is ready for use. if err := verifyResultWithDelay(func() (bool, error) { if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}); err != nil { return false, fmt.Errorf("TestService/EmptyCall(_, _) = _, %v, want _, ", err) } return true, nil }); err != nil { t.Fatal(err) } select { case <-hcEnterChan: t.Fatal("Health check function has started, which is not expected.") default: } } func testHealthCheckDisableWithServiceConfig(t *testing.T, addr string) { hcEnterChan, _, testHealthCheckFuncWrapper := setupHealthCheckWrapper() cc, r, deferFunc, err := setupClient(&clientConfig{ balancerName: "round_robin", testHealthCheckFuncWrapper: testHealthCheckFuncWrapper, }) if err != nil { t.Fatal(err) } defer deferFunc() tc := testpb.NewTestServiceClient(cc) r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: addr}}}) // send some rpcs to make sure transport has been created and is ready for use. if err := verifyResultWithDelay(func() (bool, error) { if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}); err != nil { return false, fmt.Errorf("TestService/EmptyCall(_, _) = _, %v, want _, ", err) } return true, nil }); err != nil { t.Fatal(err) } select { case <-hcEnterChan: t.Fatal("Health check function has started, which is not expected.") default: } } func (s) TestHealthCheckDisable(t *testing.T) { _, lis, ts, deferFunc, err := setupServer(&svrConfig{}) defer deferFunc() if err != nil { t.Fatal(err) } ts.SetServingStatus("foo", healthpb.HealthCheckResponse_SERVING) // test client side disabling configuration. testHealthCheckDisableWithDialOption(t, lis.Addr().String()) testHealthCheckDisableWithBalancer(t, lis.Addr().String()) testHealthCheckDisableWithServiceConfig(t, lis.Addr().String()) } func (s) TestHealthCheckChannelzCountingCallSuccess(t *testing.T) { _, lis, _, deferFunc, err := setupServer(&svrConfig{ specialWatchFunc: func(s *testHealthServer, in *healthpb.HealthCheckRequest, stream healthgrpc.Health_WatchServer) error { if in.Service != "channelzSuccess" { return status.Error(codes.FailedPrecondition, "this special Watch function only handles request with service name to be \"channelzSuccess\"") } return status.Error(codes.OK, "fake success") }, }) defer deferFunc() if err != nil { t.Fatal(err) } _, r, deferFunc, err := setupClient(&clientConfig{balancerName: "round_robin"}) if err != nil { t.Fatal(err) } defer deferFunc() r.UpdateState(resolver.State{ Addresses: []resolver.Address{{Addr: lis.Addr().String()}}, ServiceConfig: parseCfg(r, `{ "healthCheckConfig": { "serviceName": "channelzSuccess" } }`)}) if err := verifyResultWithDelay(func() (bool, error) { cm, _ := channelz.GetTopChannels(0, 0) if len(cm) == 0 { return false, errors.New("channelz.GetTopChannels return 0 top channel") } if len(cm[0].SubChans) == 0 { return false, errors.New("there is 0 subchannel") } var id int64 for k := range cm[0].SubChans { id = k break } scm := channelz.GetSubChannel(id) if scm == nil || scm.ChannelData == nil { return false, errors.New("nil subchannel metric or nil subchannel metric ChannelData returned") } // exponential backoff retry may result in more than one health check call. if scm.ChannelData.CallsStarted > 0 && scm.ChannelData.CallsSucceeded > 0 && scm.ChannelData.CallsFailed == 0 { return true, nil } return false, fmt.Errorf("got %d CallsStarted, %d CallsSucceeded, want >0 >0", scm.ChannelData.CallsStarted, scm.ChannelData.CallsSucceeded) }); err != nil { t.Fatal(err) } } func (s) TestHealthCheckChannelzCountingCallFailure(t *testing.T) { _, lis, _, deferFunc, err := setupServer(&svrConfig{ specialWatchFunc: func(s *testHealthServer, in *healthpb.HealthCheckRequest, stream healthgrpc.Health_WatchServer) error { if in.Service != "channelzFailure" { return status.Error(codes.FailedPrecondition, "this special Watch function only handles request with service name to be \"channelzFailure\"") } return status.Error(codes.Internal, "fake failure") }, }) if err != nil { t.Fatal(err) } defer deferFunc() _, r, deferFunc, err := setupClient(&clientConfig{balancerName: "round_robin"}) if err != nil { t.Fatal(err) } defer deferFunc() r.UpdateState(resolver.State{ Addresses: []resolver.Address{{Addr: lis.Addr().String()}}, ServiceConfig: parseCfg(r, `{ "healthCheckConfig": { "serviceName": "channelzFailure" } }`)}) if err := verifyResultWithDelay(func() (bool, error) { cm, _ := channelz.GetTopChannels(0, 0) if len(cm) == 0 { return false, errors.New("channelz.GetTopChannels return 0 top channel") } if len(cm[0].SubChans) == 0 { return false, errors.New("there is 0 subchannel") } var id int64 for k := range cm[0].SubChans { id = k break } scm := channelz.GetSubChannel(id) if scm == nil || scm.ChannelData == nil { return false, errors.New("nil subchannel metric or nil subchannel metric ChannelData returned") } // exponential backoff retry may result in more than one health check call. if scm.ChannelData.CallsStarted > 0 && scm.ChannelData.CallsFailed > 0 && scm.ChannelData.CallsSucceeded == 0 { return true, nil } return false, fmt.Errorf("got %d CallsStarted, %d CallsFailed, want >0, >0", scm.ChannelData.CallsStarted, scm.ChannelData.CallsFailed) }); err != nil { t.Fatal(err) } } grpc-go-1.29.1/test/kokoro/000077500000000000000000000000001365033716300154345ustar00rootroot00000000000000grpc-go-1.29.1/test/kokoro/README.md000066400000000000000000000001101365033716300167030ustar00rootroot00000000000000The scripts in this directory are intended to be run by Kokoro CI jobs. grpc-go-1.29.1/test/kokoro/xds.cfg000066400000000000000000000003701365033716300167130ustar00rootroot00000000000000# Config file for internal CI # Location of the continuous shell script in repository. build_file: "grpc-go/test/kokoro/xds.sh" timeout_mins: 90 action { define_artifacts { regex: "**/*sponge_log.*" regex: "github/grpc/reports/**" } } grpc-go-1.29.1/test/kokoro/xds.sh000077500000000000000000000015531365033716300165750ustar00rootroot00000000000000#!/bin/bash set -exu -o pipefail [[ -f /VERSION ]] && cat /VERSION cd github export GOPATH="${HOME}/gopath" pushd grpc-go/interop/xds/client branch=$(git branch --all --no-color --contains "${KOKORO_GITHUB_COMMIT}" \ | grep -v HEAD | head -1) shopt -s extglob branch="${branch//[[:space:]]}" branch="${branch##remotes/origin/}" shopt -u extglob go build popd git clone -b "${branch}" --single-branch --depth=1 https://github.com/grpc/grpc.git grpc/tools/run_tests/helper_scripts/prep_xds.sh GRPC_GO_LOG_VERBOSITY_LEVEL=99 GRPC_GO_LOG_SEVERITY_LEVEL=info \ python3 grpc/tools/run_tests/run_xds_tests.py \ --test_case=all \ --project_id=grpc-testing \ --gcp_suffix=$(date '+%s') \ --verbose \ --client_cmd="grpc-go/interop/xds/client/client \ --server=xds-experimental:///{server_uri} \ --stats_port={stats_port} \ --qps={qps}" grpc-go-1.29.1/test/race.go000066400000000000000000000012301365033716300153650ustar00rootroot00000000000000// +build race /* * Copyright 2016 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package test func init() { raceMode = true } grpc-go-1.29.1/test/rawConnWrapper.go000066400000000000000000000141771365033716300174410ustar00rootroot00000000000000/* * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package test import ( "bytes" "fmt" "io" "net" "strings" "sync" "time" "golang.org/x/net/http2" "golang.org/x/net/http2/hpack" ) type listenerWrapper struct { net.Listener mu sync.Mutex rcw *rawConnWrapper } func listenWithConnControl(network, address string) (net.Listener, error) { l, err := net.Listen(network, address) if err != nil { return nil, err } return &listenerWrapper{Listener: l}, nil } // Accept blocks until Dial is called, then returns a net.Conn for the server // half of the connection. func (l *listenerWrapper) Accept() (net.Conn, error) { c, err := l.Listener.Accept() if err != nil { return nil, err } l.mu.Lock() l.rcw = newRawConnWrapperFromConn(c) l.mu.Unlock() return c, nil } func (l *listenerWrapper) getLastConn() *rawConnWrapper { l.mu.Lock() defer l.mu.Unlock() return l.rcw } type dialerWrapper struct { c net.Conn rcw *rawConnWrapper } func (d *dialerWrapper) dialer(target string, t time.Duration) (net.Conn, error) { c, err := net.DialTimeout("tcp", target, t) d.c = c d.rcw = newRawConnWrapperFromConn(c) return c, err } func (d *dialerWrapper) getRawConnWrapper() *rawConnWrapper { return d.rcw } type rawConnWrapper struct { cc io.ReadWriteCloser fr *http2.Framer // writing headers: headerBuf bytes.Buffer hpackEnc *hpack.Encoder // reading frames: frc chan http2.Frame frErrc chan error } func newRawConnWrapperFromConn(cc io.ReadWriteCloser) *rawConnWrapper { rcw := &rawConnWrapper{ cc: cc, frc: make(chan http2.Frame, 1), frErrc: make(chan error, 1), } rcw.hpackEnc = hpack.NewEncoder(&rcw.headerBuf) rcw.fr = http2.NewFramer(cc, cc) rcw.fr.ReadMetaHeaders = hpack.NewDecoder(4096 /*initialHeaderTableSize*/, nil) return rcw } func (rcw *rawConnWrapper) Close() error { return rcw.cc.Close() } func (rcw *rawConnWrapper) encodeHeaderField(k, v string) error { err := rcw.hpackEnc.WriteField(hpack.HeaderField{Name: k, Value: v}) if err != nil { return fmt.Errorf("HPACK encoding error for %q/%q: %v", k, v, err) } return nil } // encodeRawHeader is for usage on both client and server side to construct header based on the input // key, value pairs. func (rcw *rawConnWrapper) encodeRawHeader(headers ...string) []byte { if len(headers)%2 == 1 { panic("odd number of kv args") } rcw.headerBuf.Reset() pseudoCount := map[string]int{} var keys []string vals := map[string][]string{} for len(headers) > 0 { k, v := headers[0], headers[1] headers = headers[2:] if _, ok := vals[k]; !ok { keys = append(keys, k) } if strings.HasPrefix(k, ":") { pseudoCount[k]++ if pseudoCount[k] == 1 { vals[k] = []string{v} } else { // Allows testing of invalid headers w/ dup pseudo fields. vals[k] = append(vals[k], v) } } else { vals[k] = append(vals[k], v) } } for _, k := range keys { for _, v := range vals[k] { rcw.encodeHeaderField(k, v) } } return rcw.headerBuf.Bytes() } // encodeHeader is for usage on client side to write request header. // // encodeHeader encodes headers and returns their HPACK bytes. headers // must contain an even number of key/value pairs. There may be // multiple pairs for keys (e.g. "cookie"). The :method, :path, and // :scheme headers default to GET, / and https. func (rcw *rawConnWrapper) encodeHeader(headers ...string) []byte { if len(headers)%2 == 1 { panic("odd number of kv args") } rcw.headerBuf.Reset() if len(headers) == 0 { // Fast path, mostly for benchmarks, so test code doesn't pollute // profiles when we're looking to improve server allocations. rcw.encodeHeaderField(":method", "GET") rcw.encodeHeaderField(":path", "/") rcw.encodeHeaderField(":scheme", "https") return rcw.headerBuf.Bytes() } if len(headers) == 2 && headers[0] == ":method" { // Another fast path for benchmarks. rcw.encodeHeaderField(":method", headers[1]) rcw.encodeHeaderField(":path", "/") rcw.encodeHeaderField(":scheme", "https") return rcw.headerBuf.Bytes() } pseudoCount := map[string]int{} keys := []string{":method", ":path", ":scheme"} vals := map[string][]string{ ":method": {"GET"}, ":path": {"/"}, ":scheme": {"https"}, } for len(headers) > 0 { k, v := headers[0], headers[1] headers = headers[2:] if _, ok := vals[k]; !ok { keys = append(keys, k) } if strings.HasPrefix(k, ":") { pseudoCount[k]++ if pseudoCount[k] == 1 { vals[k] = []string{v} } else { // Allows testing of invalid headers w/ dup pseudo fields. vals[k] = append(vals[k], v) } } else { vals[k] = append(vals[k], v) } } for _, k := range keys { for _, v := range vals[k] { rcw.encodeHeaderField(k, v) } } return rcw.headerBuf.Bytes() } func (rcw *rawConnWrapper) writeHeaders(p http2.HeadersFrameParam) error { if err := rcw.fr.WriteHeaders(p); err != nil { return fmt.Errorf("error writing HEADERS: %v", err) } return nil } func (rcw *rawConnWrapper) writeRSTStream(streamID uint32, code http2.ErrCode) error { if err := rcw.fr.WriteRSTStream(streamID, code); err != nil { return fmt.Errorf("error writing RST_STREAM: %v", err) } return nil } func (rcw *rawConnWrapper) writeGoAway(maxStreamID uint32, code http2.ErrCode, debugData []byte) error { if err := rcw.fr.WriteGoAway(maxStreamID, code, debugData); err != nil { return fmt.Errorf("error writing GoAway: %v", err) } return nil } func (rcw *rawConnWrapper) writeRawFrame(t http2.FrameType, flags http2.Flags, streamID uint32, payload []byte) error { if err := rcw.fr.WriteRawFrame(t, flags, streamID, payload); err != nil { return fmt.Errorf("error writing Raw Frame: %v", err) } return nil } grpc-go-1.29.1/test/retry_test.go000066400000000000000000000405331365033716300166700ustar00rootroot00000000000000/* * * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package test import ( "context" "fmt" "io" "os" "strconv" "strings" "testing" "time" "github.com/golang/protobuf/proto" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" testpb "google.golang.org/grpc/test/grpc_testing" ) func enableRetry() func() { old := envconfig.Retry envconfig.Retry = true return func() { envconfig.Retry = old } } func (s) TestRetryUnary(t *testing.T) { defer enableRetry()() i := -1 ss := &stubServer{ emptyCall: func(context.Context, *testpb.Empty) (*testpb.Empty, error) { i++ switch i { case 0, 2, 5: return &testpb.Empty{}, nil case 6, 8, 11: return nil, status.New(codes.Internal, "non-retryable error").Err() } return nil, status.New(codes.AlreadyExists, "retryable error").Err() }, } if err := ss.Start([]grpc.ServerOption{}); err != nil { t.Fatalf("Error starting endpoint server: %v", err) } defer ss.Stop() ss.newServiceConfig(`{ "methodConfig": [{ "name": [{"service": "grpc.testing.TestService"}], "waitForReady": true, "retryPolicy": { "MaxAttempts": 4, "InitialBackoff": ".01s", "MaxBackoff": ".01s", "BackoffMultiplier": 1.0, "RetryableStatusCodes": [ "ALREADY_EXISTS" ] } }]}`) ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) for { if ctx.Err() != nil { t.Fatalf("Timed out waiting for service config update") } if ss.cc.GetMethodConfig("/grpc.testing.TestService/EmptyCall").WaitForReady != nil { break } time.Sleep(time.Millisecond) } cancel() testCases := []struct { code codes.Code count int }{ {codes.OK, 0}, {codes.OK, 2}, {codes.OK, 5}, {codes.Internal, 6}, {codes.Internal, 8}, {codes.Internal, 11}, {codes.AlreadyExists, 15}, } for _, tc := range testCases { ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) _, err := ss.client.EmptyCall(ctx, &testpb.Empty{}) cancel() if status.Code(err) != tc.code { t.Fatalf("EmptyCall(_, _) = _, %v; want _, ", err, tc.code) } if i != tc.count { t.Fatalf("i = %v; want %v", i, tc.count) } } } func (s) TestRetryDisabledByDefault(t *testing.T) { if strings.EqualFold(os.Getenv("GRPC_GO_RETRY"), "on") { return } i := -1 ss := &stubServer{ emptyCall: func(context.Context, *testpb.Empty) (*testpb.Empty, error) { i++ switch i { case 0: return nil, status.New(codes.AlreadyExists, "retryable error").Err() } return &testpb.Empty{}, nil }, } if err := ss.Start([]grpc.ServerOption{}); err != nil { t.Fatalf("Error starting endpoint server: %v", err) } defer ss.Stop() ss.newServiceConfig(`{ "methodConfig": [{ "name": [{"service": "grpc.testing.TestService"}], "waitForReady": true, "retryPolicy": { "MaxAttempts": 4, "InitialBackoff": ".01s", "MaxBackoff": ".01s", "BackoffMultiplier": 1.0, "RetryableStatusCodes": [ "ALREADY_EXISTS" ] } }]}`) ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) for { if ctx.Err() != nil { t.Fatalf("Timed out waiting for service config update") } if ss.cc.GetMethodConfig("/grpc.testing.TestService/EmptyCall").WaitForReady != nil { break } time.Sleep(time.Millisecond) } cancel() testCases := []struct { code codes.Code count int }{ {codes.AlreadyExists, 0}, } for _, tc := range testCases { ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) _, err := ss.client.EmptyCall(ctx, &testpb.Empty{}) cancel() if status.Code(err) != tc.code { t.Fatalf("EmptyCall(_, _) = _, %v; want _, ", err, tc.code) } if i != tc.count { t.Fatalf("i = %v; want %v", i, tc.count) } } } func (s) TestRetryThrottling(t *testing.T) { defer enableRetry()() i := -1 ss := &stubServer{ emptyCall: func(context.Context, *testpb.Empty) (*testpb.Empty, error) { i++ switch i { case 0, 3, 6, 10, 11, 12, 13, 14, 16, 18: return &testpb.Empty{}, nil } return nil, status.New(codes.Unavailable, "retryable error").Err() }, } if err := ss.Start([]grpc.ServerOption{}); err != nil { t.Fatalf("Error starting endpoint server: %v", err) } defer ss.Stop() ss.newServiceConfig(`{ "methodConfig": [{ "name": [{"service": "grpc.testing.TestService"}], "waitForReady": true, "retryPolicy": { "MaxAttempts": 4, "InitialBackoff": ".01s", "MaxBackoff": ".01s", "BackoffMultiplier": 1.0, "RetryableStatusCodes": [ "UNAVAILABLE" ] } }], "retryThrottling": { "maxTokens": 10, "tokenRatio": 0.5 } }`) ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) for { if ctx.Err() != nil { t.Fatalf("Timed out waiting for service config update") } if ss.cc.GetMethodConfig("/grpc.testing.TestService/EmptyCall").WaitForReady != nil { break } time.Sleep(time.Millisecond) } cancel() testCases := []struct { code codes.Code count int }{ {codes.OK, 0}, // tokens = 10 {codes.OK, 3}, // tokens = 8.5 (10 - 2 failures + 0.5 success) {codes.OK, 6}, // tokens = 6 {codes.Unavailable, 8}, // tokens = 5 -- first attempt is retried; second aborted. {codes.Unavailable, 9}, // tokens = 4 {codes.OK, 10}, // tokens = 4.5 {codes.OK, 11}, // tokens = 5 {codes.OK, 12}, // tokens = 5.5 {codes.OK, 13}, // tokens = 6 {codes.OK, 14}, // tokens = 6.5 {codes.OK, 16}, // tokens = 5.5 {codes.Unavailable, 17}, // tokens = 4.5 } for _, tc := range testCases { ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) _, err := ss.client.EmptyCall(ctx, &testpb.Empty{}) cancel() if status.Code(err) != tc.code { t.Errorf("EmptyCall(_, _) = _, %v; want _, ", err, tc.code) } if i != tc.count { t.Errorf("i = %v; want %v", i, tc.count) } } } func (s) TestRetryStreaming(t *testing.T) { defer enableRetry()() req := func(b byte) *testpb.StreamingOutputCallRequest { return &testpb.StreamingOutputCallRequest{Payload: &testpb.Payload{Body: []byte{b}}} } res := func(b byte) *testpb.StreamingOutputCallResponse { return &testpb.StreamingOutputCallResponse{Payload: &testpb.Payload{Body: []byte{b}}} } largePayload, _ := newPayload(testpb.PayloadType_COMPRESSABLE, 500) type serverOp func(stream testpb.TestService_FullDuplexCallServer) error type clientOp func(stream testpb.TestService_FullDuplexCallClient) error // Server Operations sAttempts := func(n int) serverOp { return func(stream testpb.TestService_FullDuplexCallServer) error { const key = "grpc-previous-rpc-attempts" md, ok := metadata.FromIncomingContext(stream.Context()) if !ok { return status.Errorf(codes.Internal, "server: no header metadata received") } if got := md[key]; len(got) != 1 || got[0] != strconv.Itoa(n) { return status.Errorf(codes.Internal, "server: metadata = %v; want ", md, key, n) } return nil } } sReq := func(b byte) serverOp { return func(stream testpb.TestService_FullDuplexCallServer) error { want := req(b) if got, err := stream.Recv(); err != nil || !proto.Equal(got, want) { return status.Errorf(codes.Internal, "server: Recv() = %v, %v; want %v, ", got, err, want) } return nil } } sReqPayload := func(p *testpb.Payload) serverOp { return func(stream testpb.TestService_FullDuplexCallServer) error { want := &testpb.StreamingOutputCallRequest{Payload: p} if got, err := stream.Recv(); err != nil || !proto.Equal(got, want) { return status.Errorf(codes.Internal, "server: Recv() = %v, %v; want %v, ", got, err, want) } return nil } } sRes := func(b byte) serverOp { return func(stream testpb.TestService_FullDuplexCallServer) error { msg := res(b) if err := stream.Send(msg); err != nil { return status.Errorf(codes.Internal, "server: Send(%v) = %v; want ", msg, err) } return nil } } sErr := func(c codes.Code) serverOp { return func(stream testpb.TestService_FullDuplexCallServer) error { return status.New(c, "").Err() } } sCloseSend := func() serverOp { return func(stream testpb.TestService_FullDuplexCallServer) error { if msg, err := stream.Recv(); msg != nil || err != io.EOF { return status.Errorf(codes.Internal, "server: Recv() = %v, %v; want , io.EOF", msg, err) } return nil } } sPushback := func(s string) serverOp { return func(stream testpb.TestService_FullDuplexCallServer) error { stream.SetTrailer(metadata.MD{"grpc-retry-pushback-ms": []string{s}}) return nil } } // Client Operations cReq := func(b byte) clientOp { return func(stream testpb.TestService_FullDuplexCallClient) error { msg := req(b) if err := stream.Send(msg); err != nil { return fmt.Errorf("client: Send(%v) = %v; want ", msg, err) } return nil } } cReqPayload := func(p *testpb.Payload) clientOp { return func(stream testpb.TestService_FullDuplexCallClient) error { msg := &testpb.StreamingOutputCallRequest{Payload: p} if err := stream.Send(msg); err != nil { return fmt.Errorf("client: Send(%v) = %v; want ", msg, err) } return nil } } cRes := func(b byte) clientOp { return func(stream testpb.TestService_FullDuplexCallClient) error { want := res(b) if got, err := stream.Recv(); err != nil || !proto.Equal(got, want) { return fmt.Errorf("client: Recv() = %v, %v; want %v, ", got, err, want) } return nil } } cErr := func(c codes.Code) clientOp { return func(stream testpb.TestService_FullDuplexCallClient) error { want := status.New(c, "").Err() if c == codes.OK { want = io.EOF } res, err := stream.Recv() if res != nil || ((err == nil) != (want == nil)) || (want != nil && err.Error() != want.Error()) { return fmt.Errorf("client: Recv() = %v, %v; want , %v", res, err, want) } return nil } } cCloseSend := func() clientOp { return func(stream testpb.TestService_FullDuplexCallClient) error { if err := stream.CloseSend(); err != nil { return fmt.Errorf("client: CloseSend() = %v; want ", err) } return nil } } var curTime time.Time cGetTime := func() clientOp { return func(_ testpb.TestService_FullDuplexCallClient) error { curTime = time.Now() return nil } } cCheckElapsed := func(d time.Duration) clientOp { return func(_ testpb.TestService_FullDuplexCallClient) error { if elapsed := time.Since(curTime); elapsed < d { return fmt.Errorf("elapsed time: %v; want >= %v", elapsed, d) } return nil } } cHdr := func() clientOp { return func(stream testpb.TestService_FullDuplexCallClient) error { _, err := stream.Header() return err } } cCtx := func() clientOp { return func(stream testpb.TestService_FullDuplexCallClient) error { stream.Context() return nil } } testCases := []struct { desc string serverOps []serverOp clientOps []clientOp }{{ desc: "Non-retryable error code", serverOps: []serverOp{sReq(1), sErr(codes.Internal)}, clientOps: []clientOp{cReq(1), cErr(codes.Internal)}, }, { desc: "One retry necessary", serverOps: []serverOp{sReq(1), sErr(codes.Unavailable), sReq(1), sAttempts(1), sRes(1)}, clientOps: []clientOp{cReq(1), cRes(1), cErr(codes.OK)}, }, { desc: "Exceed max attempts (4); check attempts header on server", serverOps: []serverOp{ sReq(1), sErr(codes.Unavailable), sReq(1), sAttempts(1), sErr(codes.Unavailable), sAttempts(2), sReq(1), sErr(codes.Unavailable), sAttempts(3), sReq(1), sErr(codes.Unavailable), }, clientOps: []clientOp{cReq(1), cErr(codes.Unavailable)}, }, { desc: "Multiple requests", serverOps: []serverOp{ sReq(1), sReq(2), sErr(codes.Unavailable), sReq(1), sReq(2), sRes(5), }, clientOps: []clientOp{cReq(1), cReq(2), cRes(5), cErr(codes.OK)}, }, { desc: "Multiple successive requests", serverOps: []serverOp{ sReq(1), sErr(codes.Unavailable), sReq(1), sReq(2), sErr(codes.Unavailable), sReq(1), sReq(2), sReq(3), sRes(5), }, clientOps: []clientOp{cReq(1), cReq(2), cReq(3), cRes(5), cErr(codes.OK)}, }, { desc: "No retry after receiving", serverOps: []serverOp{ sReq(1), sErr(codes.Unavailable), sReq(1), sRes(3), sErr(codes.Unavailable), }, clientOps: []clientOp{cReq(1), cRes(3), cErr(codes.Unavailable)}, }, { desc: "No retry after header", serverOps: []serverOp{sReq(1), sErr(codes.Unavailable)}, clientOps: []clientOp{cReq(1), cHdr(), cErr(codes.Unavailable)}, }, { desc: "No retry after context", serverOps: []serverOp{sReq(1), sErr(codes.Unavailable)}, clientOps: []clientOp{cReq(1), cCtx(), cErr(codes.Unavailable)}, }, { desc: "Replaying close send", serverOps: []serverOp{ sReq(1), sReq(2), sCloseSend(), sErr(codes.Unavailable), sReq(1), sReq(2), sCloseSend(), sRes(1), sRes(3), sRes(5), }, clientOps: []clientOp{cReq(1), cReq(2), cCloseSend(), cRes(1), cRes(3), cRes(5), cErr(codes.OK)}, }, { desc: "Negative server pushback - no retry", serverOps: []serverOp{sReq(1), sPushback("-1"), sErr(codes.Unavailable)}, clientOps: []clientOp{cReq(1), cErr(codes.Unavailable)}, }, { desc: "Non-numeric server pushback - no retry", serverOps: []serverOp{sReq(1), sPushback("xxx"), sErr(codes.Unavailable)}, clientOps: []clientOp{cReq(1), cErr(codes.Unavailable)}, }, { desc: "Multiple server pushback values - no retry", serverOps: []serverOp{sReq(1), sPushback("100"), sPushback("10"), sErr(codes.Unavailable)}, clientOps: []clientOp{cReq(1), cErr(codes.Unavailable)}, }, { desc: "1s server pushback - delayed retry", serverOps: []serverOp{sReq(1), sPushback("1000"), sErr(codes.Unavailable), sReq(1), sRes(2)}, clientOps: []clientOp{cGetTime(), cReq(1), cRes(2), cCheckElapsed(time.Second), cErr(codes.OK)}, }, { desc: "Overflowing buffer - no retry", serverOps: []serverOp{sReqPayload(largePayload), sErr(codes.Unavailable)}, clientOps: []clientOp{cReqPayload(largePayload), cErr(codes.Unavailable)}, }} var serverOpIter int var serverOps []serverOp ss := &stubServer{ fullDuplexCall: func(stream testpb.TestService_FullDuplexCallServer) error { for serverOpIter < len(serverOps) { op := serverOps[serverOpIter] serverOpIter++ if err := op(stream); err != nil { return err } } return nil }, } if err := ss.Start([]grpc.ServerOption{}, grpc.WithDefaultCallOptions(grpc.MaxRetryRPCBufferSize(200))); err != nil { t.Fatalf("Error starting endpoint server: %v", err) } defer ss.Stop() ss.newServiceConfig(`{ "methodConfig": [{ "name": [{"service": "grpc.testing.TestService"}], "waitForReady": true, "retryPolicy": { "MaxAttempts": 4, "InitialBackoff": ".01s", "MaxBackoff": ".01s", "BackoffMultiplier": 1.0, "RetryableStatusCodes": [ "UNAVAILABLE" ] } }]}`) ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) for { if ctx.Err() != nil { t.Fatalf("Timed out waiting for service config update") } if ss.cc.GetMethodConfig("/grpc.testing.TestService/FullDuplexCall").WaitForReady != nil { break } time.Sleep(time.Millisecond) } cancel() for _, tc := range testCases { func() { serverOpIter = 0 serverOps = tc.serverOps ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() stream, err := ss.client.FullDuplexCall(ctx) if err != nil { t.Fatalf("%v: Error while creating stream: %v", tc.desc, err) } for _, op := range tc.clientOps { if err := op(stream); err != nil { t.Errorf("%v: %v", tc.desc, err) break } } if serverOpIter != len(serverOps) { t.Errorf("%v: serverOpIter = %v; want %v", tc.desc, serverOpIter, len(serverOps)) } }() } } grpc-go-1.29.1/test/server_test.go000066400000000000000000000225471365033716300170360ustar00rootroot00000000000000/* * * Copyright 2020 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package test import ( "context" "io" "testing" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" testpb "google.golang.org/grpc/test/grpc_testing" ) type ctxKey string func (s) TestChainUnaryServerInterceptor(t *testing.T) { var ( firstIntKey = ctxKey("firstIntKey") secondIntKey = ctxKey("secondIntKey") ) firstInt := func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { if ctx.Value(firstIntKey) != nil { return nil, status.Errorf(codes.Internal, "first interceptor should not have %v in context", firstIntKey) } if ctx.Value(secondIntKey) != nil { return nil, status.Errorf(codes.Internal, "first interceptor should not have %v in context", secondIntKey) } firstCtx := context.WithValue(ctx, firstIntKey, 0) resp, err := handler(firstCtx, req) if err != nil { return nil, status.Errorf(codes.Internal, "failed to handle request at firstInt") } simpleResp, ok := resp.(*testpb.SimpleResponse) if !ok { return nil, status.Errorf(codes.Internal, "failed to get *testpb.SimpleResponse at firstInt") } return &testpb.SimpleResponse{ Payload: &testpb.Payload{ Type: simpleResp.GetPayload().GetType(), Body: append(simpleResp.GetPayload().GetBody(), '1'), }, }, nil } secondInt := func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { if ctx.Value(firstIntKey) == nil { return nil, status.Errorf(codes.Internal, "second interceptor should have %v in context", firstIntKey) } if ctx.Value(secondIntKey) != nil { return nil, status.Errorf(codes.Internal, "second interceptor should not have %v in context", secondIntKey) } secondCtx := context.WithValue(ctx, secondIntKey, 1) resp, err := handler(secondCtx, req) if err != nil { return nil, status.Errorf(codes.Internal, "failed to handle request at secondInt") } simpleResp, ok := resp.(*testpb.SimpleResponse) if !ok { return nil, status.Errorf(codes.Internal, "failed to get *testpb.SimpleResponse at secondInt") } return &testpb.SimpleResponse{ Payload: &testpb.Payload{ Type: simpleResp.GetPayload().GetType(), Body: append(simpleResp.GetPayload().GetBody(), '2'), }, }, nil } lastInt := func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { if ctx.Value(firstIntKey) == nil { return nil, status.Errorf(codes.Internal, "last interceptor should have %v in context", firstIntKey) } if ctx.Value(secondIntKey) == nil { return nil, status.Errorf(codes.Internal, "last interceptor should not have %v in context", secondIntKey) } resp, err := handler(ctx, req) if err != nil { return nil, status.Errorf(codes.Internal, "failed to handle request at lastInt at lastInt") } simpleResp, ok := resp.(*testpb.SimpleResponse) if !ok { return nil, status.Errorf(codes.Internal, "failed to get *testpb.SimpleResponse at lastInt") } return &testpb.SimpleResponse{ Payload: &testpb.Payload{ Type: simpleResp.GetPayload().GetType(), Body: append(simpleResp.GetPayload().GetBody(), '3'), }, }, nil } sopts := []grpc.ServerOption{ grpc.ChainUnaryInterceptor(firstInt, secondInt, lastInt), } ss := &stubServer{ unaryCall: func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, 0) if err != nil { return nil, status.Errorf(codes.Aborted, "failed to make payload: %v", err) } return &testpb.SimpleResponse{ Payload: payload, }, nil }, } if err := ss.Start(sopts); err != nil { t.Fatalf("Error starting endpoint server: %v", err) } defer ss.Stop() resp, err := ss.client.UnaryCall(context.Background(), &testpb.SimpleRequest{}) if s, ok := status.FromError(err); !ok || s.Code() != codes.OK { t.Fatalf("ss.client.UnaryCall(context.Background(), _) = %v, %v; want nil, ", resp, err) } respBytes := resp.Payload.GetBody() if string(respBytes) != "321" { t.Fatalf("invalid response: want=%s, but got=%s", "321", resp) } } func (s) TestChainOnBaseUnaryServerInterceptor(t *testing.T) { baseIntKey := ctxKey("baseIntKey") baseInt := func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { if ctx.Value(baseIntKey) != nil { return nil, status.Errorf(codes.Internal, "base interceptor should not have %v in context", baseIntKey) } baseCtx := context.WithValue(ctx, baseIntKey, 1) return handler(baseCtx, req) } chainInt := func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { if ctx.Value(baseIntKey) == nil { return nil, status.Errorf(codes.Internal, "chain interceptor should have %v in context", baseIntKey) } return handler(ctx, req) } sopts := []grpc.ServerOption{ grpc.UnaryInterceptor(baseInt), grpc.ChainUnaryInterceptor(chainInt), } ss := &stubServer{ emptyCall: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { return &testpb.Empty{}, nil }, } if err := ss.Start(sopts); err != nil { t.Fatalf("Error starting endpoint server: %v", err) } defer ss.Stop() resp, err := ss.client.EmptyCall(context.Background(), &testpb.Empty{}) if s, ok := status.FromError(err); !ok || s.Code() != codes.OK { t.Fatalf("ss.client.EmptyCall(context.Background(), _) = %v, %v; want nil, ", resp, err) } } func (s) TestChainStreamServerInterceptor(t *testing.T) { callCounts := make([]int, 4) firstInt := func(srv interface{}, stream grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { if callCounts[0] != 0 { return status.Errorf(codes.Internal, "callCounts[0] should be 0, but got=%d", callCounts[0]) } if callCounts[1] != 0 { return status.Errorf(codes.Internal, "callCounts[1] should be 0, but got=%d", callCounts[1]) } if callCounts[2] != 0 { return status.Errorf(codes.Internal, "callCounts[2] should be 0, but got=%d", callCounts[2]) } if callCounts[3] != 0 { return status.Errorf(codes.Internal, "callCounts[3] should be 0, but got=%d", callCounts[3]) } callCounts[0]++ return handler(srv, stream) } secondInt := func(srv interface{}, stream grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { if callCounts[0] != 1 { return status.Errorf(codes.Internal, "callCounts[0] should be 1, but got=%d", callCounts[0]) } if callCounts[1] != 0 { return status.Errorf(codes.Internal, "callCounts[1] should be 0, but got=%d", callCounts[1]) } if callCounts[2] != 0 { return status.Errorf(codes.Internal, "callCounts[2] should be 0, but got=%d", callCounts[2]) } if callCounts[3] != 0 { return status.Errorf(codes.Internal, "callCounts[3] should be 0, but got=%d", callCounts[3]) } callCounts[1]++ return handler(srv, stream) } lastInt := func(srv interface{}, stream grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { if callCounts[0] != 1 { return status.Errorf(codes.Internal, "callCounts[0] should be 1, but got=%d", callCounts[0]) } if callCounts[1] != 1 { return status.Errorf(codes.Internal, "callCounts[1] should be 1, but got=%d", callCounts[1]) } if callCounts[2] != 0 { return status.Errorf(codes.Internal, "callCounts[2] should be 0, but got=%d", callCounts[2]) } if callCounts[3] != 0 { return status.Errorf(codes.Internal, "callCounts[3] should be 0, but got=%d", callCounts[3]) } callCounts[2]++ return handler(srv, stream) } sopts := []grpc.ServerOption{ grpc.ChainStreamInterceptor(firstInt, secondInt, lastInt), } ss := &stubServer{ fullDuplexCall: func(stream testpb.TestService_FullDuplexCallServer) error { if callCounts[0] != 1 { return status.Errorf(codes.Internal, "callCounts[0] should be 1, but got=%d", callCounts[0]) } if callCounts[1] != 1 { return status.Errorf(codes.Internal, "callCounts[1] should be 1, but got=%d", callCounts[1]) } if callCounts[2] != 1 { return status.Errorf(codes.Internal, "callCounts[2] should be 0, but got=%d", callCounts[2]) } if callCounts[3] != 0 { return status.Errorf(codes.Internal, "callCounts[3] should be 0, but got=%d", callCounts[3]) } callCounts[3]++ return nil }, } if err := ss.Start(sopts); err != nil { t.Fatalf("Error starting endpoint server: %v", err) } defer ss.Stop() stream, err := ss.client.FullDuplexCall(context.Background()) if err != nil { t.Fatalf("failed to FullDuplexCall: %v", err) } _, err = stream.Recv() if err != io.EOF { t.Fatalf("failed to recv from stream: %v", err) } if callCounts[3] != 1 { t.Fatalf("callCounts[3] should be 1, but got=%d", callCounts[3]) } } grpc-go-1.29.1/test/servertester.go000066400000000000000000000151141365033716300172160ustar00rootroot00000000000000/* * Copyright 2016 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package test import ( "bytes" "errors" "io" "strings" "testing" "time" "golang.org/x/net/http2" "golang.org/x/net/http2/hpack" ) // This is a subset of http2's serverTester type. // // serverTester wraps a io.ReadWriter (acting like the underlying // network connection) and provides utility methods to read and write // http2 frames. // // NOTE(bradfitz): this could eventually be exported somewhere. Others // have asked for it too. For now I'm still experimenting with the // API and don't feel like maintaining a stable testing API. type serverTester struct { cc io.ReadWriteCloser // client conn t testing.TB fr *http2.Framer // writing headers: headerBuf bytes.Buffer hpackEnc *hpack.Encoder // reading frames: frc chan http2.Frame frErrc chan error } func newServerTesterFromConn(t testing.TB, cc io.ReadWriteCloser) *serverTester { st := &serverTester{ t: t, cc: cc, frc: make(chan http2.Frame, 1), frErrc: make(chan error, 1), } st.hpackEnc = hpack.NewEncoder(&st.headerBuf) st.fr = http2.NewFramer(cc, cc) st.fr.ReadMetaHeaders = hpack.NewDecoder(4096 /*initialHeaderTableSize*/, nil) return st } func (st *serverTester) readFrame() (http2.Frame, error) { go func() { fr, err := st.fr.ReadFrame() if err != nil { st.frErrc <- err } else { st.frc <- fr } }() t := time.NewTimer(2 * time.Second) defer t.Stop() select { case f := <-st.frc: return f, nil case err := <-st.frErrc: return nil, err case <-t.C: return nil, errors.New("timeout waiting for frame") } } // greet initiates the client's HTTP/2 connection into a state where // frames may be sent. func (st *serverTester) greet() { st.writePreface() st.writeInitialSettings() st.wantSettings() st.writeSettingsAck() for { f, err := st.readFrame() if err != nil { st.t.Fatal(err) } switch f := f.(type) { case *http2.WindowUpdateFrame: // grpc's transport/http2_server sends this // before the settings ack. The Go http2 // server uses a setting instead. case *http2.SettingsFrame: if f.IsAck() { return } st.t.Fatalf("during greet, got non-ACK settings frame") default: st.t.Fatalf("during greet, unexpected frame type %T", f) } } } func (st *serverTester) writePreface() { n, err := st.cc.Write([]byte(http2.ClientPreface)) if err != nil { st.t.Fatalf("Error writing client preface: %v", err) } if n != len(http2.ClientPreface) { st.t.Fatalf("Writing client preface, wrote %d bytes; want %d", n, len(http2.ClientPreface)) } } func (st *serverTester) writeInitialSettings() { if err := st.fr.WriteSettings(); err != nil { st.t.Fatalf("Error writing initial SETTINGS frame from client to server: %v", err) } } func (st *serverTester) writeSettingsAck() { if err := st.fr.WriteSettingsAck(); err != nil { st.t.Fatalf("Error writing ACK of server's SETTINGS: %v", err) } } func (st *serverTester) wantSettings() *http2.SettingsFrame { f, err := st.readFrame() if err != nil { st.t.Fatalf("Error while expecting a SETTINGS frame: %v", err) } sf, ok := f.(*http2.SettingsFrame) if !ok { st.t.Fatalf("got a %T; want *SettingsFrame", f) } return sf } // wait for any activity from the server func (st *serverTester) wantAnyFrame() http2.Frame { f, err := st.fr.ReadFrame() if err != nil { st.t.Fatal(err) } return f } func (st *serverTester) encodeHeaderField(k, v string) { err := st.hpackEnc.WriteField(hpack.HeaderField{Name: k, Value: v}) if err != nil { st.t.Fatalf("HPACK encoding error for %q/%q: %v", k, v, err) } } // encodeHeader encodes headers and returns their HPACK bytes. headers // must contain an even number of key/value pairs. There may be // multiple pairs for keys (e.g. "cookie"). The :method, :path, and // :scheme headers default to GET, / and https. func (st *serverTester) encodeHeader(headers ...string) []byte { if len(headers)%2 == 1 { panic("odd number of kv args") } st.headerBuf.Reset() if len(headers) == 0 { // Fast path, mostly for benchmarks, so test code doesn't pollute // profiles when we're looking to improve server allocations. st.encodeHeaderField(":method", "GET") st.encodeHeaderField(":path", "/") st.encodeHeaderField(":scheme", "https") return st.headerBuf.Bytes() } if len(headers) == 2 && headers[0] == ":method" { // Another fast path for benchmarks. st.encodeHeaderField(":method", headers[1]) st.encodeHeaderField(":path", "/") st.encodeHeaderField(":scheme", "https") return st.headerBuf.Bytes() } pseudoCount := map[string]int{} keys := []string{":method", ":path", ":scheme"} vals := map[string][]string{ ":method": {"GET"}, ":path": {"/"}, ":scheme": {"https"}, } for len(headers) > 0 { k, v := headers[0], headers[1] headers = headers[2:] if _, ok := vals[k]; !ok { keys = append(keys, k) } if strings.HasPrefix(k, ":") { pseudoCount[k]++ if pseudoCount[k] == 1 { vals[k] = []string{v} } else { // Allows testing of invalid headers w/ dup pseudo fields. vals[k] = append(vals[k], v) } } else { vals[k] = append(vals[k], v) } } for _, k := range keys { for _, v := range vals[k] { st.encodeHeaderField(k, v) } } return st.headerBuf.Bytes() } func (st *serverTester) writeHeadersGRPC(streamID uint32, path string) { st.writeHeaders(http2.HeadersFrameParam{ StreamID: streamID, BlockFragment: st.encodeHeader( ":method", "POST", ":path", path, "content-type", "application/grpc", "te", "trailers", ), EndStream: false, EndHeaders: true, }) } func (st *serverTester) writeHeaders(p http2.HeadersFrameParam) { if err := st.fr.WriteHeaders(p); err != nil { st.t.Fatalf("Error writing HEADERS: %v", err) } } func (st *serverTester) writeData(streamID uint32, endStream bool, data []byte) { if err := st.fr.WriteData(streamID, endStream, data); err != nil { st.t.Fatalf("Error writing DATA: %v", err) } } func (st *serverTester) writeRSTStream(streamID uint32, code http2.ErrCode) { if err := st.fr.WriteRSTStream(streamID, code); err != nil { st.t.Fatalf("Error writing RST_STREAM: %v", err) } } grpc-go-1.29.1/test/stream_cleanup_test.go000066400000000000000000000111311365033716300205150ustar00rootroot00000000000000/* * * Copyright 2019 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package test import ( "context" "io" "testing" "time" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" testpb "google.golang.org/grpc/test/grpc_testing" ) func (s) TestStreamCleanup(t *testing.T) { const initialWindowSize uint = 70 * 1024 // Must be higher than default 64K, ignored otherwise const bodySize = 2 * initialWindowSize // Something that is not going to fit in a single window const callRecvMsgSize uint = 1 // The maximum message size the client can receive ss := &stubServer{ unaryCall: func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { return &testpb.SimpleResponse{Payload: &testpb.Payload{ Body: make([]byte, bodySize), }}, nil }, emptyCall: func(context.Context, *testpb.Empty) (*testpb.Empty, error) { return &testpb.Empty{}, nil }, } if err := ss.Start([]grpc.ServerOption{grpc.MaxConcurrentStreams(1)}, grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(int(callRecvMsgSize))), grpc.WithInitialWindowSize(int32(initialWindowSize))); err != nil { t.Fatalf("Error starting endpoint server: %v", err) } defer ss.Stop() if _, err := ss.client.UnaryCall(context.Background(), &testpb.SimpleRequest{}); status.Code(err) != codes.ResourceExhausted { t.Fatalf("should fail with ResourceExhausted, message's body size: %v, maximum message size the client can receive: %v", bodySize, callRecvMsgSize) } if _, err := ss.client.EmptyCall(context.Background(), &testpb.Empty{}); err != nil { t.Fatalf("should succeed, err: %v", err) } } func (s) TestStreamCleanupAfterSendStatus(t *testing.T) { const initialWindowSize uint = 70 * 1024 // Must be higher than default 64K, ignored otherwise const bodySize = 2 * initialWindowSize // Something that is not going to fit in a single window serverReturnedStatus := make(chan struct{}) ss := &stubServer{ fullDuplexCall: func(stream testpb.TestService_FullDuplexCallServer) error { defer func() { close(serverReturnedStatus) }() return stream.Send(&testpb.StreamingOutputCallResponse{ Payload: &testpb.Payload{ Body: make([]byte, bodySize), }, }) }, } if err := ss.Start([]grpc.ServerOption{grpc.MaxConcurrentStreams(1)}, grpc.WithInitialWindowSize(int32(initialWindowSize))); err != nil { t.Fatalf("Error starting endpoint server: %v", err) } defer ss.Stop() // This test makes sure we don't delete stream from server transport's // activeStreams list too aggressively. // 1. Make a long living stream RPC. So server's activeStream list is not // empty. ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() stream, err := ss.client.FullDuplexCall(ctx) if err != nil { t.Fatalf("FullDuplexCall= _, %v; want _, ", err) } // 2. Wait for service handler to return status. // // This will trigger a stream cleanup code, which will eventually remove // this stream from activeStream. // // But the stream removal won't happen because it's supposed to be done // after the status is sent by loopyWriter, and the status send is blocked // by flow control. <-serverReturnedStatus // 3. GracefulStop (besides sending goaway) checks the number of // activeStreams. // // It will close the connection if there's no active streams. This won't // happen because of the pending stream. But if there's a bug in stream // cleanup that causes stream to be removed too aggressively, the connection // will be closd and the stream will be broken. gracefulStopDone := make(chan struct{}) go func() { defer close(gracefulStopDone) ss.s.GracefulStop() }() // 4. Make sure the stream is not broken. if _, err := stream.Recv(); err != nil { t.Fatalf("stream.Recv() = _, %v, want _, ", err) } if _, err := stream.Recv(); err != io.EOF { t.Fatalf("stream.Recv() = _, %v, want _, io.EOF", err) } timer := time.NewTimer(time.Second) select { case <-gracefulStopDone: timer.Stop() case <-timer.C: t.Fatalf("s.GracefulStop() didn't finish without 1 second after the last RPC") } } grpc-go-1.29.1/test/tools/000077500000000000000000000000001365033716300152705ustar00rootroot00000000000000grpc-go-1.29.1/test/tools/go.mod000066400000000000000000000005311365033716300163750ustar00rootroot00000000000000module google.golang.org/grpc/test/tools go 1.11 require ( github.com/BurntSushi/toml v0.3.1 // indirect github.com/client9/misspell v0.3.4 github.com/golang/protobuf v1.3.3 golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3 golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135 honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc ) grpc-go-1.29.1/test/tools/go.sum000066400000000000000000000033471365033716300164320ustar00rootroot00000000000000github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/golang/protobuf v1.3.3 h1:gyjaxf+svBWX08ZjK86iN9geUJF0H6gp2IRKX6Nf6/I= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3 h1:XQyxROzUlZH+WIQwySDgnISgOivlhjIEwaQaJEJrrN0= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135 h1:5Beo0mZN8dRzgrMMkDp0jc8YXQKx9DiJ2k1dkvGsn5A= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc h1:/hemPrYIhOhy8zYrNj+069zDB68us2sMGsfkFJO0iZs= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= grpc-go-1.29.1/test/tools/tools.go000066400000000000000000000021231365033716300167550ustar00rootroot00000000000000// +build tools /* * * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // This package exists to cause `go mod` and `go get` to believe these tools // are dependencies, even though they are not runtime dependencies of any grpc // package. This means they will appear in our `go.mod` file, but will not be // a part of the build. package tools import ( _ "github.com/client9/misspell/cmd/misspell" _ "github.com/golang/protobuf/protoc-gen-go" _ "golang.org/x/lint/golint" _ "golang.org/x/tools/cmd/goimports" _ "honnef.co/go/tools/cmd/staticcheck" ) grpc-go-1.29.1/testdata/000077500000000000000000000000001365033716300147625ustar00rootroot00000000000000grpc-go-1.29.1/testdata/ca.pem000066400000000000000000000023101365033716300160440ustar00rootroot00000000000000-----BEGIN CERTIFICATE----- MIIDWjCCAkKgAwIBAgIUWrP0VvHcy+LP6UuYNtiL9gBhD5owDQYJKoZIhvcNAQEL BQAwVjELMAkGA1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoM GEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDEPMA0GA1UEAwwGdGVzdGNhMB4XDTIw MDMxNzE4NTk1MVoXDTMwMDMxNTE4NTk1MVowVjELMAkGA1UEBhMCQVUxEzARBgNV BAgMClNvbWUtU3RhdGUxITAfBgNVBAoMGEludGVybmV0IFdpZGdpdHMgUHR5IEx0 ZDEPMA0GA1UEAwwGdGVzdGNhMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC AQEAsGL0oXflF0LzoM+Bh+qUU9yhqzw2w8OOX5mu/iNCyUOBrqaHi7mGHx73GD01 diNzCzvlcQqdNIH6NQSL7DTpBjca66jYT9u73vZe2MDrr1nVbuLvfu9850cdxiUO Inv5xf8+sTHG0C+a+VAvMhsLiRjsq+lXKRJyk5zkbbsETybqpxoJ+K7CoSy3yc/k QIY3TipwEtwkKP4hzyo6KiGd/DPexie4nBUInN3bS1BUeNZ5zeaIC2eg3bkeeW7c qT55b+Yen6CxY0TEkzBK6AKt/WUialKMgT0wbTxRZO7kUCH3Sq6e/wXeFdJ+HvdV LPlAg5TnMaNpRdQih/8nRFpsdwIDAQABoyAwHjAMBgNVHRMEBTADAQH/MA4GA1Ud DwEB/wQEAwICBDANBgkqhkiG9w0BAQsFAAOCAQEAkTrKZjBrJXHps/HrjNCFPb5a THuGPCSsepe1wkKdSp1h4HGRpLoCgcLysCJ5hZhRpHkRihhef+rFHEe60UePQO3S CVTtdJB4CYWpcNyXOdqefrbJW5QNljxgi6Fhvs7JJkBqdXIkWXtFk2eRgOIP2Eo9 /OHQHlYnwZFrk6sp4wPyR+A95S0toZBcyDVz7u+hOW0pGK3wviOe9lvRgj/H3Pwt bewb0l+MhRig0/DVHamyVxrDRbqInU1/GTNCwcZkXKYFWSf92U+kIcTth24Q1gcw eZiLl5FfrWokUNytFElXob0V0a5/kbhiLc3yWmvWqHTpqCALbVyF+rKJo2f5Kw== -----END CERTIFICATE----- grpc-go-1.29.1/testdata/server1.key000066400000000000000000000032541365033716300170670ustar00rootroot00000000000000-----BEGIN PRIVATE KEY----- MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQDnE443EknxvxBq 6+hvn/t09hl8hx366EBYvZmVM/NC+7igXRAjiJiA/mIaCvL3MS0Iz5hBLxSGICU+ WproA3GCIFITIwcf/ETyWj/5xpgZ4AKrLrjQmmX8mhwUajfF3UvwMJrCOVqPp67t PtP+2kBXaqrXdvnvXR41FsIB8V7zIAuIZB6bHQhiGVlc1sgZYsE2EGG9WMmHtS86 qkAOTjG2XyjmPTGAwhGDpYkYrpzp99IiDh4/Veai81hn0ssQkbry0XRD/Ig3jcHh 23WiriPNJ0JsbgXUSLKRPZObA9VgOLy2aXoN84IMaeK3yy+cwSYG/99w93fUZJte MXwz4oYZAgMBAAECggEBAIVn2Ncai+4xbH0OLWckabwgyJ4IM9rDc0LIU368O1kU koais8qP9dujAWgfoh3sGh/YGgKn96VnsZjKHlyMgF+r4TaDJn3k2rlAOWcurGlj 1qaVlsV4HiEzp7pxiDmHhWvp4672Bb6iBG+bsjCUOEk/n9o9KhZzIBluRhtxCmw5 nw4Do7z00PTvN81260uPWSc04IrytvZUiAIx/5qxD72bij2xJ8t/I9GI8g4FtoVB 8pB6S/hJX1PZhh9VlU6Yk+TOfOVnbebG4W5138LkB835eqk3Zz0qsbc2euoi8Hxi y1VGwQEmMQ63jXz4c6g+X55ifvUK9Jpn5E8pq+pMd7ECgYEA93lYq+Cr54K4ey5t sWMa+ye5RqxjzgXj2Kqr55jb54VWG7wp2iGbg8FMlkQwzTJwebzDyCSatguEZLuB gRGroRnsUOy9vBvhKPOch9bfKIl6qOgzMJB267fBVWx5ybnRbWN/I7RvMQf3k+9y biCIVnxDLEEYyx7z85/5qxsXg/MCgYEA7wmWKtCTn032Hy9P8OL49T0X6Z8FlkDC Rk42ygrc/MUbugq9RGUxcCxoImOG9JXUpEtUe31YDm2j+/nbvrjl6/bP2qWs0V7l dTJl6dABP51pCw8+l4cWgBBX08Lkeen812AAFNrjmDCjX6rHjWHLJcpS18fnRRkP V1d/AHWX7MMCgYEA6Gsw2guhp0Zf2GCcaNK5DlQab8OL4Hwrpttzo4kuTlwtqNKp Q9H4al9qfF4Cr1TFya98+EVYf8yFRM3NLNjZpe3gwYf2EerlJj7VLcahw0KKzoN1 QBENfwgPLRk5sDkx9VhSmcfl/diLroZdpAwtv3vo4nEoxeuGFbKTGx3Qkf0CgYEA xyR+dcb05Ygm3w4klHQTowQ10s1H80iaUcZBgQuR1ghEtDbUPZHsoR5t1xCB02ys DgAwLv1bChIvxvH/L6KM8ovZ2LekBX4AviWxoBxJnfz/EVau98B0b1auRN6eSC83 FRuGldlSOW1z/nSh8ViizSYE5H5HX1qkXEippvFRE88CgYB3Bfu3YQY60ITWIShv nNkdcbTT9eoP9suaRJjw92Ln+7ZpALYlQMKUZmJ/5uBmLs4RFwUTQruLOPL4yLTH awADWUzs3IRr1fwn9E+zM8JVyKCnUEM3w4N5UZskGO2klashAd30hWO+knRv/y0r uGIYs9Ek7YXlXIRVrzMwcsrt1w== -----END PRIVATE KEY----- grpc-go-1.29.1/testdata/server1.pem000066400000000000000000000025021365033716300170530ustar00rootroot00000000000000-----BEGIN CERTIFICATE----- MIIDtDCCApygAwIBAgIUbJfTREJ6k6/+oInWhV1O1j3ZT0IwDQYJKoZIhvcNAQEL BQAwVjELMAkGA1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoM GEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDEPMA0GA1UEAwwGdGVzdGNhMB4XDTIw MDMxODAzMTA0MloXDTMwMDMxNjAzMTA0MlowZTELMAkGA1UEBhMCVVMxETAPBgNV BAgMCElsbGlub2lzMRAwDgYDVQQHDAdDaGljYWdvMRUwEwYDVQQKDAxFeGFtcGxl LCBDby4xGjAYBgNVBAMMESoudGVzdC5nb29nbGUuY29tMIIBIjANBgkqhkiG9w0B AQEFAAOCAQ8AMIIBCgKCAQEA5xOONxJJ8b8Qauvob5/7dPYZfIcd+uhAWL2ZlTPz Qvu4oF0QI4iYgP5iGgry9zEtCM+YQS8UhiAlPlqa6ANxgiBSEyMHH/xE8lo/+caY GeACqy640Jpl/JocFGo3xd1L8DCawjlaj6eu7T7T/tpAV2qq13b5710eNRbCAfFe 8yALiGQemx0IYhlZXNbIGWLBNhBhvVjJh7UvOqpADk4xtl8o5j0xgMIRg6WJGK6c 6ffSIg4eP1XmovNYZ9LLEJG68tF0Q/yIN43B4dt1oq4jzSdCbG4F1EiykT2TmwPV YDi8tml6DfOCDGnit8svnMEmBv/fcPd31GSbXjF8M+KGGQIDAQABo2swaTAJBgNV HRMEAjAAMAsGA1UdDwQEAwIF4DBPBgNVHREESDBGghAqLnRlc3QuZ29vZ2xlLmZy ghh3YXRlcnpvb2kudGVzdC5nb29nbGUuYmWCEioudGVzdC55b3V0dWJlLmNvbYcE wKgBAzANBgkqhkiG9w0BAQsFAAOCAQEAS8hDQA8PSgipgAml7Q3/djwQ644ghWQv C2Kb+r30RCY1EyKNhnQnIIh/OUbBZvh0M0iYsy6xqXgfDhCB93AA6j0i5cS8fkhH Jl4RK0tSkGQ3YNY4NzXwQP/vmUgfkw8VBAZ4Y4GKxppdATjffIW+srbAmdDruIRM wPeikgOoRrXf0LA1fi4TqxARzeRwenQpayNfGHTvVF9aJkl8HoaMunTAdG5pIVcr 9GKi/gEMpXUJbbVv3U5frX1Wo4CFo+rZWJ/LyCMeb0jciNLxSdMwj/E/ZuExlyeZ gc9ctPjSMvgSyXEKv6Vwobleeg88V2ZgzenziORoWj4KszG/lbQZvg== -----END CERTIFICATE----- grpc-go-1.29.1/testdata/testdata.go000066400000000000000000000022101365033716300171150ustar00rootroot00000000000000/* * Copyright 2017 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package testdata import ( "path/filepath" "runtime" ) // basepath is the root directory of this package. var basepath string func init() { _, currentFile, _, _ := runtime.Caller(0) basepath = filepath.Dir(currentFile) } // Path returns the absolute path the given relative file or directory path, // relative to the google.golang.org/grpc/testdata directory in the user's GOPATH. // If rel is already absolute, it is returned unmodified. func Path(rel string) string { if filepath.IsAbs(rel) { return rel } return filepath.Join(basepath, rel) } grpc-go-1.29.1/trace.go000066400000000000000000000056141365033716300146040ustar00rootroot00000000000000/* * * Copyright 2015 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package grpc import ( "bytes" "fmt" "io" "net" "strings" "sync" "time" "golang.org/x/net/trace" ) // EnableTracing controls whether to trace RPCs using the golang.org/x/net/trace package. // This should only be set before any RPCs are sent or received by this program. var EnableTracing bool // methodFamily returns the trace family for the given method. // It turns "/pkg.Service/GetFoo" into "pkg.Service". func methodFamily(m string) string { m = strings.TrimPrefix(m, "/") // remove leading slash if i := strings.Index(m, "/"); i >= 0 { m = m[:i] // remove everything from second slash } return m } // traceInfo contains tracing information for an RPC. type traceInfo struct { tr trace.Trace firstLine firstLine } // firstLine is the first line of an RPC trace. // It may be mutated after construction; remoteAddr specifically may change // during client-side use. type firstLine struct { mu sync.Mutex client bool // whether this is a client (outgoing) RPC remoteAddr net.Addr deadline time.Duration // may be zero } func (f *firstLine) SetRemoteAddr(addr net.Addr) { f.mu.Lock() f.remoteAddr = addr f.mu.Unlock() } func (f *firstLine) String() string { f.mu.Lock() defer f.mu.Unlock() var line bytes.Buffer io.WriteString(&line, "RPC: ") if f.client { io.WriteString(&line, "to") } else { io.WriteString(&line, "from") } fmt.Fprintf(&line, " %v deadline:", f.remoteAddr) if f.deadline != 0 { fmt.Fprint(&line, f.deadline) } else { io.WriteString(&line, "none") } return line.String() } const truncateSize = 100 func truncate(x string, l int) string { if l > len(x) { return x } return x[:l] } // payload represents an RPC request or response payload. type payload struct { sent bool // whether this is an outgoing payload msg interface{} // e.g. a proto.Message // TODO(dsymonds): add stringifying info to codec, and limit how much we hold here? } func (p payload) String() string { if p.sent { return truncate(fmt.Sprintf("sent: %v", p.msg), truncateSize) } return truncate(fmt.Sprintf("recv: %v", p.msg), truncateSize) } type fmtStringer struct { format string a []interface{} } func (f *fmtStringer) String() string { return fmt.Sprintf(f.format, f.a...) } type stringer string func (s stringer) String() string { return string(s) } grpc-go-1.29.1/trace_test.go000066400000000000000000000023761365033716300156450ustar00rootroot00000000000000/* * * Copyright 2019 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package grpc import ( "testing" ) func (s) TestMethodFamily(t *testing.T) { cases := []struct { desc string method string wantMethodFamily string }{ { desc: "No leading slash", method: "pkg.service/method", wantMethodFamily: "pkg.service", }, { desc: "Leading slash", method: "/pkg.service/method", wantMethodFamily: "pkg.service", }, } for _, ut := range cases { t.Run(ut.desc, func(t *testing.T) { if got := methodFamily(ut.method); got != ut.wantMethodFamily { t.Fatalf("methodFamily(%s) = %s, want %s", ut.method, got, ut.wantMethodFamily) } }) } } grpc-go-1.29.1/version.go000066400000000000000000000012531365033716300151660ustar00rootroot00000000000000/* * * Copyright 2018 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package grpc // Version is the current grpc version. const Version = "1.29.1" grpc-go-1.29.1/vet.sh000077500000000000000000000112151365033716300143060ustar00rootroot00000000000000#!/bin/bash set -ex # Exit on error; debugging enabled. set -o pipefail # Fail a pipe if any sub-command fails. # not makes sure the command passed to it does not exit with a return code of 0. not() { # This is required instead of the earlier (! $COMMAND) because subshells and # pipefail don't work the same on Darwin as in Linux. ! "$@" } die() { echo "$@" >&2 exit 1 } fail_on_output() { tee /dev/stderr | not read } # Check to make sure it's safe to modify the user's git repo. git status --porcelain | fail_on_output # Undo any edits made by this script. cleanup() { git reset --hard HEAD } trap cleanup EXIT PATH="${GOPATH}/bin:${GOROOT}/bin:${PATH}" if [[ "$1" = "-install" ]]; then # Check for module support if go help mod >& /dev/null; then # Install the pinned versions as defined in module tools. pushd ./test/tools go install \ golang.org/x/lint/golint \ golang.org/x/tools/cmd/goimports \ honnef.co/go/tools/cmd/staticcheck \ github.com/client9/misspell/cmd/misspell \ github.com/golang/protobuf/protoc-gen-go popd else # Ye olde `go get` incantation. # Note: this gets the latest version of all tools (vs. the pinned versions # with Go modules). go get -u \ golang.org/x/lint/golint \ golang.org/x/tools/cmd/goimports \ honnef.co/go/tools/cmd/staticcheck \ github.com/client9/misspell/cmd/misspell \ github.com/golang/protobuf/protoc-gen-go fi if [[ -z "${VET_SKIP_PROTO}" ]]; then if [[ "${TRAVIS}" = "true" ]]; then PROTOBUF_VERSION=3.3.0 PROTOC_FILENAME=protoc-${PROTOBUF_VERSION}-linux-x86_64.zip pushd /home/travis wget https://github.com/google/protobuf/releases/download/v${PROTOBUF_VERSION}/${PROTOC_FILENAME} unzip ${PROTOC_FILENAME} bin/protoc --version popd elif not which protoc > /dev/null; then die "Please install protoc into your path" fi fi exit 0 elif [[ "$#" -ne 0 ]]; then die "Unknown argument(s): $*" fi # - Ensure all source files contain a copyright message. not git grep -L "\(Copyright [0-9]\{4,\} gRPC authors\)\|DO NOT EDIT" -- '*.go' # - Make sure all tests in grpc and grpc/test use leakcheck via Teardown. not grep 'func Test[^(]' *_test.go not grep 'func Test[^(]' test/*.go # - Do not import x/net/context. not git grep -l 'x/net/context' -- "*.go" # - Do not import math/rand for real library code. Use internal/grpcrand for # thread safety. git grep -l '"math/rand"' -- "*.go" 2>&1 | not grep -v '^examples\|^stress\|grpcrand\|^benchmark\|wrr_test' # - Ensure all ptypes proto packages are renamed when importing. not git grep "\(import \|^\s*\)\"github.com/golang/protobuf/ptypes/" -- "*.go" # - Check imports that are illegal in appengine (until Go 1.11). # TODO: Remove when we drop Go 1.10 support go list -f {{.Dir}} ./... | xargs go run test/go_vet/vet.go # - gofmt, goimports, golint (with exceptions for generated code), go vet. gofmt -s -d -l . 2>&1 | fail_on_output goimports -l . 2>&1 | not grep -vE "(_mock|\.pb)\.go" golint ./... 2>&1 | not grep -vE "(_mock|\.pb)\.go:" go vet -all ./... misspell -error . # - Check that generated proto files are up to date. if [[ -z "${VET_SKIP_PROTO}" ]]; then PATH="/home/travis/bin:${PATH}" make proto && \ git status --porcelain 2>&1 | fail_on_output || \ (git status; git --no-pager diff; exit 1) fi # - Check that our module is tidy. if go help mod >& /dev/null; then go mod tidy && \ git status --porcelain 2>&1 | fail_on_output || \ (git status; git --no-pager diff; exit 1) fi # - Collection of static analysis checks # # TODO(dfawley): don't use deprecated functions in examples or first-party # plugins. SC_OUT="$(mktemp)" staticcheck -go 1.9 -checks 'inherit,-ST1015' ./... > "${SC_OUT}" || true # Error if anything other than deprecation warnings are printed. not grep -v "is deprecated:.*SA1019" "${SC_OUT}" # Only ignore the following deprecated types/fields/functions. not grep -Fv '.HandleResolvedAddrs .HandleSubConnStateChange .HeaderMap .NewAddress .NewServiceConfig .Metadata is deprecated: use Attributes .Type is deprecated: use Attributes .UpdateBalancerState balancer.Picker grpc.CallCustomCodec grpc.Code grpc.Compressor grpc.Decompressor grpc.MaxMsgSize grpc.MethodConfig grpc.NewGZIPCompressor grpc.NewGZIPDecompressor grpc.RPCCompressor grpc.RPCDecompressor grpc.RoundRobin grpc.ServiceConfig grpc.WithBalancer grpc.WithBalancerName grpc.WithCompressor grpc.WithDecompressor grpc.WithDialer grpc.WithMaxMsgSize grpc.WithServiceConfig grpc.WithTimeout http.CloseNotifier info.SecurityVersion naming.Resolver naming.Update naming.Watcher resolver.Backend resolver.GRPCLB' "${SC_OUT}" echo SUCCESS grpc-go-1.29.1/xds/000077500000000000000000000000001365033716300137475ustar00rootroot00000000000000grpc-go-1.29.1/xds/experimental/000077500000000000000000000000001365033716300164445ustar00rootroot00000000000000grpc-go-1.29.1/xds/experimental/xds_experimental.go000066400000000000000000000020671365033716300223530ustar00rootroot00000000000000/* * * Copyright 2019 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Package experimental contains xds implementation, still in experimental // state. Users only need to import this package to get all xds functionality. // Things are expected to change fast until we get to a stable state, at // which point, all this will be moved to the xds package. package experimental import ( _ "google.golang.org/grpc/xds/internal/balancer" // Register the balancers. _ "google.golang.org/grpc/xds/internal/resolver" // Register the xds_resolver ) grpc-go-1.29.1/xds/internal/000077500000000000000000000000001365033716300155635ustar00rootroot00000000000000grpc-go-1.29.1/xds/internal/balancer/000077500000000000000000000000001365033716300173325ustar00rootroot00000000000000grpc-go-1.29.1/xds/internal/balancer/balancer.go000066400000000000000000000015431365033716300214330ustar00rootroot00000000000000/* * * Copyright 2020 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Package balancer installs all the xds balancers. package balancer import ( _ "google.golang.org/grpc/xds/internal/balancer/cdsbalancer" // Register the CDS balancer _ "google.golang.org/grpc/xds/internal/balancer/edsbalancer" // Register the EDS balancer ) grpc-go-1.29.1/xds/internal/balancer/balancergroup/000077500000000000000000000000001365033716300221565ustar00rootroot00000000000000grpc-go-1.29.1/xds/internal/balancer/balancergroup/balancergroup.go000066400000000000000000000543221365033716300253370ustar00rootroot00000000000000/* * Copyright 2019 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // Package balancergroup implements a utility struct to bind multiple balancers // into one balancer. package balancergroup import ( "fmt" "sync" "time" orcapb "github.com/cncf/udpa/go/udpa/data/orca/v1" "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/base" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/internal/cache" "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/wrr" "google.golang.org/grpc/resolver" "google.golang.org/grpc/xds/internal" "google.golang.org/grpc/xds/internal/balancer/lrs" ) // subBalancerWithConfig is used to keep the configurations that will be used to start // the underlying balancer. It can be called to start/stop the underlying // balancer. // // When the config changes, it will pass the update to the underlying balancer // if it exists. // // TODO: rename to subBalanceWrapper (and move to a separate file?) type subBalancerWithConfig struct { // subBalancerWithConfig is passed to the sub-balancer as a ClientConn // wrapper, only to keep the state and picker. When sub-balancer is // restarted while in cache, the picker needs to be resent. // // It also contains the sub-balancer ID, so the parent balancer group can // keep track of SubConn/pickers and the sub-balancers they belong to. Some // of the actions are forwarded to the parent ClientConn with no change. // Some are forward to balancer group with the sub-balancer ID. balancer.ClientConn id internal.Locality group *BalancerGroup mu sync.Mutex state balancer.State // The static part of sub-balancer. Keeps balancerBuilders and addresses. // To be used when restarting sub-balancer. builder balancer.Builder addrs []resolver.Address // The dynamic part of sub-balancer. Only used when balancer group is // started. Gets cleared when sub-balancer is closed. balancer balancer.Balancer } func (sbc *subBalancerWithConfig) UpdateBalancerState(state connectivity.State, picker balancer.Picker) { } // UpdateState overrides balancer.ClientConn, to keep state and picker. func (sbc *subBalancerWithConfig) UpdateState(state balancer.State) { sbc.mu.Lock() sbc.state = state sbc.group.updateBalancerState(sbc.id, state) sbc.mu.Unlock() } // NewSubConn overrides balancer.ClientConn, so balancer group can keep track of // the relation between subconns and sub-balancers. func (sbc *subBalancerWithConfig) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { return sbc.group.newSubConn(sbc, addrs, opts) } func (sbc *subBalancerWithConfig) updateBalancerStateWithCachedPicker() { sbc.mu.Lock() if sbc.state.Picker != nil { sbc.group.updateBalancerState(sbc.id, sbc.state) } sbc.mu.Unlock() } func (sbc *subBalancerWithConfig) startBalancer() { b := sbc.builder.Build(sbc, balancer.BuildOptions{}) sbc.group.logger.Infof("Created child policy %p of type %v", b, sbc.builder.Name()) sbc.balancer = b if ub, ok := b.(balancer.V2Balancer); ok { ub.UpdateClientConnState(balancer.ClientConnState{ResolverState: resolver.State{Addresses: sbc.addrs}}) } else { b.HandleResolvedAddrs(sbc.addrs, nil) } } func (sbc *subBalancerWithConfig) handleSubConnStateChange(sc balancer.SubConn, state connectivity.State) { b := sbc.balancer if b == nil { // This sub-balancer was closed. This can happen when EDS removes a // locality. The balancer for this locality was already closed, and the // SubConns are being deleted. But SubConn state change can still // happen. return } if ub, ok := b.(balancer.V2Balancer); ok { ub.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: state}) } else { b.HandleSubConnStateChange(sc, state) } } func (sbc *subBalancerWithConfig) updateAddrs(addrs []resolver.Address) { sbc.addrs = addrs b := sbc.balancer if b == nil { // This sub-balancer was closed. This should never happen because // sub-balancers are closed when the locality is removed from EDS, or // the balancer group is closed. There should be no further address // updates when either of this happened. // // This will be a common case with priority support, because a // sub-balancer (and the whole balancer group) could be closed because // it's the lower priority, but it can still get address updates. return } if ub, ok := b.(balancer.V2Balancer); ok { ub.UpdateClientConnState(balancer.ClientConnState{ResolverState: resolver.State{Addresses: addrs}}) } else { b.HandleResolvedAddrs(addrs, nil) } } func (sbc *subBalancerWithConfig) stopBalancer() { sbc.balancer.Close() sbc.balancer = nil } type pickerState struct { weight uint32 picker balancer.V2Picker state connectivity.State } func (s *pickerState) String() string { return fmt.Sprintf("weight:%v,picker:%p,state:%v", s.weight, s.picker, s.state) } // BalancerGroup takes a list of balancers, and make them into one balancer. // // Note that this struct doesn't implement balancer.Balancer, because it's not // intended to be used directly as a balancer. It's expected to be used as a // sub-balancer manager by a high level balancer. // // Updates from ClientConn are forwarded to sub-balancers // - service config update // - Not implemented // - address update // - subConn state change // - find the corresponding balancer and forward // // Actions from sub-balances are forwarded to parent ClientConn // - new/remove SubConn // - picker update and health states change // - sub-pickers are grouped into a group-picker // - aggregated connectivity state is the overall state of all pickers. // - resolveNow // // Sub-balancers are only built when the balancer group is started. If the // balancer group is closed, the sub-balancers are also closed. And it's // guaranteed that no updates will be sent to parent ClientConn from a closed // balancer group. type BalancerGroup struct { cc balancer.ClientConn logger *grpclog.PrefixLogger loadStore lrs.Store // outgoingMu guards all operations in the direction: // ClientConn-->Sub-balancer. Including start, stop, resolver updates and // SubConn state changes. // // The corresponding boolean outgoingStarted is used to stop further updates // to sub-balancers after they are closed. outgoingMu sync.Mutex outgoingStarted bool idToBalancerConfig map[internal.Locality]*subBalancerWithConfig // Cache for sub-balancers when they are removed. balancerCache *cache.TimeoutCache // incomingMu and pickerMu are to make sure this balancer group doesn't send // updates to cc after it's closed. // // We don't share the mutex to avoid deadlocks (e.g. a call to sub-balancer // may call back to balancer group inline. It causes deaclock if they // require the same mutex). // // We should never need to hold multiple locks at the same time in this // struct. The case where two locks are held can only happen when the // underlying balancer calls back into balancer group inline. So there's an // implicit lock acquisition order that outgoingMu is locked before either // incomingMu or pickerMu. // incomingMu guards all operations in the direction: // Sub-balancer-->ClientConn. Including NewSubConn, RemoveSubConn, and // updatePicker. It also guards the map from SubConn to balancer ID, so // handleSubConnStateChange needs to hold it shortly to find the // sub-balancer to forward the update. // // The corresponding boolean incomingStarted is used to stop further updates // from sub-balancers after they are closed. incomingMu sync.Mutex incomingStarted bool // This boolean only guards calls back to ClientConn. scToSubBalancer map[balancer.SubConn]*subBalancerWithConfig // All balancer IDs exist as keys in this map, even if balancer group is not // started. // // If an ID is not in map, it's either removed or never added. idToPickerState map[internal.Locality]*pickerState } // DefaultSubBalancerCloseTimeout is defined as a variable instead of const for // testing. // // TODO: make it a parameter for New(). var DefaultSubBalancerCloseTimeout = 15 * time.Minute // New creates a new BalancerGroup. Note that the BalancerGroup // needs to be started to work. func New(cc balancer.ClientConn, loadStore lrs.Store, logger *grpclog.PrefixLogger) *BalancerGroup { return &BalancerGroup{ cc: cc, logger: logger, loadStore: loadStore, idToBalancerConfig: make(map[internal.Locality]*subBalancerWithConfig), balancerCache: cache.NewTimeoutCache(DefaultSubBalancerCloseTimeout), scToSubBalancer: make(map[balancer.SubConn]*subBalancerWithConfig), idToPickerState: make(map[internal.Locality]*pickerState), } } // Start starts the balancer group, including building all the sub-balancers, // and send the existing addresses to them. // // A BalancerGroup can be closed and started later. When a BalancerGroup is // closed, it can still receive address updates, which will be applied when // restarted. func (bg *BalancerGroup) Start() { bg.incomingMu.Lock() bg.incomingStarted = true bg.incomingMu.Unlock() bg.outgoingMu.Lock() if bg.outgoingStarted { bg.outgoingMu.Unlock() return } for _, config := range bg.idToBalancerConfig { config.startBalancer() } bg.outgoingStarted = true bg.outgoingMu.Unlock() } // Add adds a balancer built by builder to the group, with given id and weight. // // weight should never be zero. func (bg *BalancerGroup) Add(id internal.Locality, weight uint32, builder balancer.Builder) { if weight == 0 { bg.logger.Errorf("BalancerGroup.add called with weight 0, locality: %v. Locality is not added to balancer group", id) return } // First, add things to the picker map. Do this even if incomingStarted is // false, because the data is static. bg.incomingMu.Lock() bg.idToPickerState[id] = &pickerState{ weight: weight, // Start everything in IDLE. It's doesn't affect the overall state // because we don't count IDLE when aggregating (as opposite to e.g. // READY, 1 READY results in overall READY). state: connectivity.Idle, } bg.incomingMu.Unlock() // Store data in static map, and then check to see if bg is started. bg.outgoingMu.Lock() var sbc *subBalancerWithConfig // If outgoingStarted is true, search in the cache. Otherwise, cache is // guaranteed to be empty, searching is unnecessary. if bg.outgoingStarted { if old, ok := bg.balancerCache.Remove(id); ok { sbc, _ = old.(*subBalancerWithConfig) if sbc != nil && sbc.builder != builder { // If the sub-balancer in cache was built with a different // balancer builder, don't use it, cleanup this old-balancer, // and behave as sub-balancer is not found in cache. // // NOTE that this will also drop the cached addresses for this // sub-balancer, which seems to be reasonable. sbc.stopBalancer() // cleanupSubConns must be done before the new balancer starts, // otherwise new SubConns created by the new balancer might be // removed by mistake. bg.cleanupSubConns(sbc) sbc = nil } } } if sbc == nil { sbc = &subBalancerWithConfig{ ClientConn: bg.cc, id: id, group: bg, builder: builder, } if bg.outgoingStarted { // Only start the balancer if bg is started. Otherwise, we only keep the // static data. sbc.startBalancer() } } else { // When brining back a sub-balancer from cache, re-send the cached // picker and state. sbc.updateBalancerStateWithCachedPicker() } bg.idToBalancerConfig[id] = sbc bg.outgoingMu.Unlock() } // Remove removes the balancer with id from the group. // // But doesn't close the balancer. The balancer is kept in a cache, and will be // closed after timeout. Cleanup work (closing sub-balancer and removing // subconns) will be done after timeout. // // It also removes the picker generated from this balancer from the picker // group. It always results in a picker update. func (bg *BalancerGroup) Remove(id internal.Locality) { bg.outgoingMu.Lock() if sbToRemove, ok := bg.idToBalancerConfig[id]; ok { if bg.outgoingStarted { bg.balancerCache.Add(id, sbToRemove, func() { // After timeout, when sub-balancer is removed from cache, need // to close the underlying sub-balancer, and remove all its // subconns. bg.outgoingMu.Lock() if bg.outgoingStarted { sbToRemove.stopBalancer() } bg.outgoingMu.Unlock() bg.cleanupSubConns(sbToRemove) }) } delete(bg.idToBalancerConfig, id) } else { bg.logger.Infof("balancer group: trying to remove a non-existing locality from balancer group: %v", id) } bg.outgoingMu.Unlock() bg.incomingMu.Lock() // Remove id and picker from picker map. This also results in future updates // for this ID to be ignored. delete(bg.idToPickerState, id) if bg.incomingStarted { // Normally picker update is triggered by SubConn state change. But we // want to update state and picker to reflect the changes, too. Because // we don't want `ClientConn` to pick this sub-balancer anymore. bg.cc.UpdateState(buildPickerAndState(bg.idToPickerState)) } bg.incomingMu.Unlock() } // bg.remove(id) doesn't do cleanup for the sub-balancer. This function does // cleanup after the timeout. func (bg *BalancerGroup) cleanupSubConns(config *subBalancerWithConfig) { bg.incomingMu.Lock() // Remove SubConns. This is only done after the balancer is // actually closed. // // NOTE: if NewSubConn is called by this (closed) balancer later, the // SubConn will be leaked. This shouldn't happen if the balancer // implementation is correct. To make sure this never happens, we need to // add another layer (balancer manager) between balancer group and the // sub-balancers. for sc, b := range bg.scToSubBalancer { if b == config { bg.cc.RemoveSubConn(sc) delete(bg.scToSubBalancer, sc) } } bg.incomingMu.Unlock() } // ChangeWeight changes the weight of the balancer. // // newWeight should never be zero. // // NOTE: It always results in a picker update now. This probably isn't // necessary. But it seems better to do the update because it's a change in the // picker (which is balancer's snapshot). func (bg *BalancerGroup) ChangeWeight(id internal.Locality, newWeight uint32) { if newWeight == 0 { bg.logger.Errorf("BalancerGroup.changeWeight called with newWeight 0. Weight is not changed") return } bg.incomingMu.Lock() defer bg.incomingMu.Unlock() pState, ok := bg.idToPickerState[id] if !ok { return } if pState.weight == newWeight { return } pState.weight = newWeight if bg.incomingStarted { // Normally picker update is triggered by SubConn state change. But we // want to update state and picker to reflect the changes, too. Because // `ClientConn` should do pick with the new weights now. bg.cc.UpdateState(buildPickerAndState(bg.idToPickerState)) } } // Following are actions from the parent grpc.ClientConn, forward to sub-balancers. // HandleSubConnStateChange handles the state for the subconn. It finds the // corresponding balancer and forwards the update. func (bg *BalancerGroup) HandleSubConnStateChange(sc balancer.SubConn, state connectivity.State) { bg.incomingMu.Lock() config, ok := bg.scToSubBalancer[sc] if !ok { bg.incomingMu.Unlock() return } if state == connectivity.Shutdown { // Only delete sc from the map when state changed to Shutdown. delete(bg.scToSubBalancer, sc) } bg.incomingMu.Unlock() bg.outgoingMu.Lock() config.handleSubConnStateChange(sc, state) bg.outgoingMu.Unlock() } // HandleResolvedAddrs handles addresses from resolver. It finds the balancer // and forwards the update. // // TODO: change this to UpdateClientConnState to handle addresses and balancer // config. func (bg *BalancerGroup) HandleResolvedAddrs(id internal.Locality, addrs []resolver.Address) { bg.outgoingMu.Lock() if config, ok := bg.idToBalancerConfig[id]; ok { config.updateAddrs(addrs) } bg.outgoingMu.Unlock() } // TODO: handleServiceConfig() // // For BNS address for slicer, comes from endpoint.Metadata. It will be sent // from parent to sub-balancers as service config. // Following are actions from sub-balancers, forward to ClientConn. // newSubConn: forward to ClientConn, and also create a map from sc to balancer, // so state update will find the right balancer. // // One note about removing SubConn: only forward to ClientConn, but not delete // from map. Delete sc from the map only when state changes to Shutdown. Since // it's just forwarding the action, there's no need for a removeSubConn() // wrapper function. func (bg *BalancerGroup) newSubConn(config *subBalancerWithConfig, addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { // NOTE: if balancer with id was already removed, this should also return // error. But since we call balancer.stopBalancer when removing the balancer, this // shouldn't happen. bg.incomingMu.Lock() if !bg.incomingStarted { bg.incomingMu.Unlock() return nil, fmt.Errorf("NewSubConn is called after balancer group is closed") } sc, err := bg.cc.NewSubConn(addrs, opts) if err != nil { bg.incomingMu.Unlock() return nil, err } bg.scToSubBalancer[sc] = config bg.incomingMu.Unlock() return sc, nil } // updateBalancerState: create an aggregated picker and an aggregated // connectivity state, then forward to ClientConn. func (bg *BalancerGroup) updateBalancerState(id internal.Locality, state balancer.State) { bg.logger.Infof("Balancer state update from locality %v, new state: %+v", id, state) bg.incomingMu.Lock() defer bg.incomingMu.Unlock() pickerSt, ok := bg.idToPickerState[id] if !ok { // All state starts in IDLE. If ID is not in map, it's either removed, // or never existed. bg.logger.Warningf("balancer group: pickerState for %v not found when update picker/state", id) return } pickerSt.picker = newLoadReportPicker(state.Picker, id, bg.loadStore) pickerSt.state = state.ConnectivityState if bg.incomingStarted { bg.logger.Infof("Child pickers with weight: %+v", bg.idToPickerState) bg.cc.UpdateState(buildPickerAndState(bg.idToPickerState)) } } // Close closes the balancer. It stops sub-balancers, and removes the subconns. // The BalancerGroup can be restarted later. func (bg *BalancerGroup) Close() { bg.incomingMu.Lock() if bg.incomingStarted { bg.incomingStarted = false for _, pState := range bg.idToPickerState { // Reset everything to IDLE but keep the entry in map (to keep the // weight). pState.picker = nil pState.state = connectivity.Idle } // Also remove all SubConns. for sc := range bg.scToSubBalancer { bg.cc.RemoveSubConn(sc) delete(bg.scToSubBalancer, sc) } } bg.incomingMu.Unlock() bg.outgoingMu.Lock() if bg.outgoingStarted { bg.outgoingStarted = false for _, config := range bg.idToBalancerConfig { config.stopBalancer() } } bg.outgoingMu.Unlock() // Clear(true) runs clear function to close sub-balancers in cache. It // must be called out of outgoing mutex. bg.balancerCache.Clear(true) } func buildPickerAndState(m map[internal.Locality]*pickerState) balancer.State { var readyN, connectingN int readyPickerWithWeights := make([]pickerState, 0, len(m)) for _, ps := range m { switch ps.state { case connectivity.Ready: readyN++ readyPickerWithWeights = append(readyPickerWithWeights, *ps) case connectivity.Connecting: connectingN++ } } var aggregatedState connectivity.State switch { case readyN > 0: aggregatedState = connectivity.Ready case connectingN > 0: aggregatedState = connectivity.Connecting default: aggregatedState = connectivity.TransientFailure } if aggregatedState == connectivity.TransientFailure { return balancer.State{ConnectivityState: aggregatedState, Picker: base.NewErrPickerV2(balancer.ErrTransientFailure)} } return balancer.State{ConnectivityState: aggregatedState, Picker: newPickerGroup(readyPickerWithWeights)} } // NewRandomWRR is the WRR constructor used to pick sub-pickers from // sub-balancers. It's to be modified in tests. var NewRandomWRR = wrr.NewRandom type pickerGroup struct { length int w wrr.WRR } // newPickerGroup takes pickers with weights, and group them into one picker. // // Note it only takes ready pickers. The map shouldn't contain non-ready // pickers. // // TODO: (bg) confirm this is the expected behavior: non-ready balancers should // be ignored when picking. Only ready balancers are picked. func newPickerGroup(readyPickerWithWeights []pickerState) *pickerGroup { w := NewRandomWRR() for _, ps := range readyPickerWithWeights { w.Add(ps.picker, int64(ps.weight)) } return &pickerGroup{ length: len(readyPickerWithWeights), w: w, } } func (pg *pickerGroup) Pick(info balancer.PickInfo) (balancer.PickResult, error) { if pg.length <= 0 { return balancer.PickResult{}, balancer.ErrNoSubConnAvailable } p := pg.w.Next().(balancer.V2Picker) return p.Pick(info) } const ( serverLoadCPUName = "cpu_utilization" serverLoadMemoryName = "mem_utilization" ) type loadReportPicker struct { p balancer.V2Picker id internal.Locality loadStore lrs.Store } func newLoadReportPicker(p balancer.V2Picker, id internal.Locality, loadStore lrs.Store) *loadReportPicker { return &loadReportPicker{ p: p, id: id, loadStore: loadStore, } } func (lrp *loadReportPicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { res, err := lrp.p.Pick(info) if lrp.loadStore != nil && err == nil { lrp.loadStore.CallStarted(lrp.id) td := res.Done res.Done = func(info balancer.DoneInfo) { lrp.loadStore.CallFinished(lrp.id, info.Err) if load, ok := info.ServerLoad.(*orcapb.OrcaLoadReport); ok { lrp.loadStore.CallServerLoad(lrp.id, serverLoadCPUName, load.CpuUtilization) lrp.loadStore.CallServerLoad(lrp.id, serverLoadMemoryName, load.MemUtilization) for n, d := range load.RequestCost { lrp.loadStore.CallServerLoad(lrp.id, n, d) } for n, d := range load.Utilization { lrp.loadStore.CallServerLoad(lrp.id, n, d) } } if td != nil { td(info) } } } return res, err } grpc-go-1.29.1/xds/internal/balancer/balancergroup/balancergroup_test.go000066400000000000000000000667721365033716300264120ustar00rootroot00000000000000/* * Copyright 2019 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package balancergroup import ( "fmt" "testing" "time" orcapb "github.com/cncf/udpa/go/udpa/data/orca/v1" "github.com/google/go-cmp/cmp" "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/roundrobin" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/resolver" "google.golang.org/grpc/xds/internal" "google.golang.org/grpc/xds/internal/testutils" ) var ( rrBuilder = balancer.Get(roundrobin.Name) testBalancerIDs = []internal.Locality{{Region: "b1"}, {Region: "b2"}, {Region: "b3"}} testBackendAddrs []resolver.Address ) const testBackendAddrsCount = 12 func init() { for i := 0; i < testBackendAddrsCount; i++ { testBackendAddrs = append(testBackendAddrs, resolver.Address{Addr: fmt.Sprintf("%d.%d.%d.%d:%d", i, i, i, i, i)}) } // Disable caching for all tests. It will be re-enabled in caching specific // tests. DefaultSubBalancerCloseTimeout = time.Millisecond } func subConnFromPicker(p balancer.V2Picker) func() balancer.SubConn { return func() balancer.SubConn { scst, _ := p.Pick(balancer.PickInfo{}) return scst.SubConn } } // 1 balancer, 1 backend -> 2 backends -> 1 backend. func (s) TestBalancerGroup_OneRR_AddRemoveBackend(t *testing.T) { cc := testutils.NewTestClientConn(t) bg := New(cc, nil, nil) bg.Start() // Add one balancer to group. bg.Add(testBalancerIDs[0], 1, rrBuilder) // Send one resolved address. bg.HandleResolvedAddrs(testBalancerIDs[0], testBackendAddrs[0:1]) // Send subconn state change. sc1 := <-cc.NewSubConnCh bg.HandleSubConnStateChange(sc1, connectivity.Connecting) bg.HandleSubConnStateChange(sc1, connectivity.Ready) // Test pick with one backend. p1 := <-cc.NewPickerCh for i := 0; i < 5; i++ { gotSCSt, _ := p1.Pick(balancer.PickInfo{}) if !cmp.Equal(gotSCSt.SubConn, sc1, cmp.AllowUnexported(testutils.TestSubConn{})) { t.Fatalf("picker.Pick, got %v, want SubConn=%v", gotSCSt, sc1) } } // Send two addresses. bg.HandleResolvedAddrs(testBalancerIDs[0], testBackendAddrs[0:2]) // Expect one new subconn, send state update. sc2 := <-cc.NewSubConnCh bg.HandleSubConnStateChange(sc2, connectivity.Connecting) bg.HandleSubConnStateChange(sc2, connectivity.Ready) // Test roundrobin pick. p2 := <-cc.NewPickerCh want := []balancer.SubConn{sc1, sc2} if err := testutils.IsRoundRobin(want, subConnFromPicker(p2)); err != nil { t.Fatalf("want %v, got %v", want, err) } // Remove the first address. bg.HandleResolvedAddrs(testBalancerIDs[0], testBackendAddrs[1:2]) scToRemove := <-cc.RemoveSubConnCh if !cmp.Equal(scToRemove, sc1, cmp.AllowUnexported(testutils.TestSubConn{})) { t.Fatalf("RemoveSubConn, want %v, got %v", sc1, scToRemove) } bg.HandleSubConnStateChange(scToRemove, connectivity.Shutdown) // Test pick with only the second subconn. p3 := <-cc.NewPickerCh for i := 0; i < 5; i++ { gotSC, _ := p3.Pick(balancer.PickInfo{}) if !cmp.Equal(gotSC.SubConn, sc2, cmp.AllowUnexported(testutils.TestSubConn{})) { t.Fatalf("picker.Pick, got %v, want SubConn=%v", gotSC, sc2) } } } // 2 balancers, each with 1 backend. func (s) TestBalancerGroup_TwoRR_OneBackend(t *testing.T) { cc := testutils.NewTestClientConn(t) bg := New(cc, nil, nil) bg.Start() // Add two balancers to group and send one resolved address to both // balancers. bg.Add(testBalancerIDs[0], 1, rrBuilder) bg.HandleResolvedAddrs(testBalancerIDs[0], testBackendAddrs[0:1]) sc1 := <-cc.NewSubConnCh bg.Add(testBalancerIDs[1], 1, rrBuilder) bg.HandleResolvedAddrs(testBalancerIDs[1], testBackendAddrs[0:1]) sc2 := <-cc.NewSubConnCh // Send state changes for both subconns. bg.HandleSubConnStateChange(sc1, connectivity.Connecting) bg.HandleSubConnStateChange(sc1, connectivity.Ready) bg.HandleSubConnStateChange(sc2, connectivity.Connecting) bg.HandleSubConnStateChange(sc2, connectivity.Ready) // Test roundrobin on the last picker. p1 := <-cc.NewPickerCh want := []balancer.SubConn{sc1, sc2} if err := testutils.IsRoundRobin(want, subConnFromPicker(p1)); err != nil { t.Fatalf("want %v, got %v", want, err) } } // 2 balancers, each with more than 1 backends. func (s) TestBalancerGroup_TwoRR_MoreBackends(t *testing.T) { cc := testutils.NewTestClientConn(t) bg := New(cc, nil, nil) bg.Start() // Add two balancers to group and send one resolved address to both // balancers. bg.Add(testBalancerIDs[0], 1, rrBuilder) bg.HandleResolvedAddrs(testBalancerIDs[0], testBackendAddrs[0:2]) sc1 := <-cc.NewSubConnCh sc2 := <-cc.NewSubConnCh bg.Add(testBalancerIDs[1], 1, rrBuilder) bg.HandleResolvedAddrs(testBalancerIDs[1], testBackendAddrs[2:4]) sc3 := <-cc.NewSubConnCh sc4 := <-cc.NewSubConnCh // Send state changes for both subconns. bg.HandleSubConnStateChange(sc1, connectivity.Connecting) bg.HandleSubConnStateChange(sc1, connectivity.Ready) bg.HandleSubConnStateChange(sc2, connectivity.Connecting) bg.HandleSubConnStateChange(sc2, connectivity.Ready) bg.HandleSubConnStateChange(sc3, connectivity.Connecting) bg.HandleSubConnStateChange(sc3, connectivity.Ready) bg.HandleSubConnStateChange(sc4, connectivity.Connecting) bg.HandleSubConnStateChange(sc4, connectivity.Ready) // Test roundrobin on the last picker. p1 := <-cc.NewPickerCh want := []balancer.SubConn{sc1, sc2, sc3, sc4} if err := testutils.IsRoundRobin(want, subConnFromPicker(p1)); err != nil { t.Fatalf("want %v, got %v", want, err) } // Turn sc2's connection down, should be RR between balancers. bg.HandleSubConnStateChange(sc2, connectivity.TransientFailure) p2 := <-cc.NewPickerCh // Expect two sc1's in the result, because balancer1 will be picked twice, // but there's only one sc in it. want = []balancer.SubConn{sc1, sc1, sc3, sc4} if err := testutils.IsRoundRobin(want, subConnFromPicker(p2)); err != nil { t.Fatalf("want %v, got %v", want, err) } // Remove sc3's addresses. bg.HandleResolvedAddrs(testBalancerIDs[1], testBackendAddrs[3:4]) scToRemove := <-cc.RemoveSubConnCh if !cmp.Equal(scToRemove, sc3, cmp.AllowUnexported(testutils.TestSubConn{})) { t.Fatalf("RemoveSubConn, want %v, got %v", sc3, scToRemove) } bg.HandleSubConnStateChange(scToRemove, connectivity.Shutdown) p3 := <-cc.NewPickerCh want = []balancer.SubConn{sc1, sc4} if err := testutils.IsRoundRobin(want, subConnFromPicker(p3)); err != nil { t.Fatalf("want %v, got %v", want, err) } // Turn sc1's connection down. bg.HandleSubConnStateChange(sc1, connectivity.TransientFailure) p4 := <-cc.NewPickerCh want = []balancer.SubConn{sc4} if err := testutils.IsRoundRobin(want, subConnFromPicker(p4)); err != nil { t.Fatalf("want %v, got %v", want, err) } // Turn last connection to connecting. bg.HandleSubConnStateChange(sc4, connectivity.Connecting) p5 := <-cc.NewPickerCh for i := 0; i < 5; i++ { if _, err := p5.Pick(balancer.PickInfo{}); err != balancer.ErrNoSubConnAvailable { t.Fatalf("want pick error %v, got %v", balancer.ErrNoSubConnAvailable, err) } } // Turn all connections down. bg.HandleSubConnStateChange(sc4, connectivity.TransientFailure) p6 := <-cc.NewPickerCh for i := 0; i < 5; i++ { if _, err := p6.Pick(balancer.PickInfo{}); err != balancer.ErrTransientFailure { t.Fatalf("want pick error %v, got %v", balancer.ErrTransientFailure, err) } } } // 2 balancers with different weights. func (s) TestBalancerGroup_TwoRR_DifferentWeight_MoreBackends(t *testing.T) { cc := testutils.NewTestClientConn(t) bg := New(cc, nil, nil) bg.Start() // Add two balancers to group and send two resolved addresses to both // balancers. bg.Add(testBalancerIDs[0], 2, rrBuilder) bg.HandleResolvedAddrs(testBalancerIDs[0], testBackendAddrs[0:2]) sc1 := <-cc.NewSubConnCh sc2 := <-cc.NewSubConnCh bg.Add(testBalancerIDs[1], 1, rrBuilder) bg.HandleResolvedAddrs(testBalancerIDs[1], testBackendAddrs[2:4]) sc3 := <-cc.NewSubConnCh sc4 := <-cc.NewSubConnCh // Send state changes for both subconns. bg.HandleSubConnStateChange(sc1, connectivity.Connecting) bg.HandleSubConnStateChange(sc1, connectivity.Ready) bg.HandleSubConnStateChange(sc2, connectivity.Connecting) bg.HandleSubConnStateChange(sc2, connectivity.Ready) bg.HandleSubConnStateChange(sc3, connectivity.Connecting) bg.HandleSubConnStateChange(sc3, connectivity.Ready) bg.HandleSubConnStateChange(sc4, connectivity.Connecting) bg.HandleSubConnStateChange(sc4, connectivity.Ready) // Test roundrobin on the last picker. p1 := <-cc.NewPickerCh want := []balancer.SubConn{sc1, sc1, sc2, sc2, sc3, sc4} if err := testutils.IsRoundRobin(want, subConnFromPicker(p1)); err != nil { t.Fatalf("want %v, got %v", want, err) } } // totally 3 balancers, add/remove balancer. func (s) TestBalancerGroup_ThreeRR_RemoveBalancer(t *testing.T) { cc := testutils.NewTestClientConn(t) bg := New(cc, nil, nil) bg.Start() // Add three balancers to group and send one resolved address to both // balancers. bg.Add(testBalancerIDs[0], 1, rrBuilder) bg.HandleResolvedAddrs(testBalancerIDs[0], testBackendAddrs[0:1]) sc1 := <-cc.NewSubConnCh bg.Add(testBalancerIDs[1], 1, rrBuilder) bg.HandleResolvedAddrs(testBalancerIDs[1], testBackendAddrs[1:2]) sc2 := <-cc.NewSubConnCh bg.Add(testBalancerIDs[2], 1, rrBuilder) bg.HandleResolvedAddrs(testBalancerIDs[2], testBackendAddrs[1:2]) sc3 := <-cc.NewSubConnCh // Send state changes for both subconns. bg.HandleSubConnStateChange(sc1, connectivity.Connecting) bg.HandleSubConnStateChange(sc1, connectivity.Ready) bg.HandleSubConnStateChange(sc2, connectivity.Connecting) bg.HandleSubConnStateChange(sc2, connectivity.Ready) bg.HandleSubConnStateChange(sc3, connectivity.Connecting) bg.HandleSubConnStateChange(sc3, connectivity.Ready) p1 := <-cc.NewPickerCh want := []balancer.SubConn{sc1, sc2, sc3} if err := testutils.IsRoundRobin(want, subConnFromPicker(p1)); err != nil { t.Fatalf("want %v, got %v", want, err) } // Remove the second balancer, while the others two are ready. bg.Remove(testBalancerIDs[1]) scToRemove := <-cc.RemoveSubConnCh if !cmp.Equal(scToRemove, sc2, cmp.AllowUnexported(testutils.TestSubConn{})) { t.Fatalf("RemoveSubConn, want %v, got %v", sc2, scToRemove) } p2 := <-cc.NewPickerCh want = []balancer.SubConn{sc1, sc3} if err := testutils.IsRoundRobin(want, subConnFromPicker(p2)); err != nil { t.Fatalf("want %v, got %v", want, err) } // move balancer 3 into transient failure. bg.HandleSubConnStateChange(sc3, connectivity.TransientFailure) // Remove the first balancer, while the third is transient failure. bg.Remove(testBalancerIDs[0]) scToRemove = <-cc.RemoveSubConnCh if !cmp.Equal(scToRemove, sc1, cmp.AllowUnexported(testutils.TestSubConn{})) { t.Fatalf("RemoveSubConn, want %v, got %v", sc1, scToRemove) } p3 := <-cc.NewPickerCh for i := 0; i < 5; i++ { if _, err := p3.Pick(balancer.PickInfo{}); err != balancer.ErrTransientFailure { t.Fatalf("want pick error %v, got %v", balancer.ErrTransientFailure, err) } } } // 2 balancers, change balancer weight. func (s) TestBalancerGroup_TwoRR_ChangeWeight_MoreBackends(t *testing.T) { cc := testutils.NewTestClientConn(t) bg := New(cc, nil, nil) bg.Start() // Add two balancers to group and send two resolved addresses to both // balancers. bg.Add(testBalancerIDs[0], 2, rrBuilder) bg.HandleResolvedAddrs(testBalancerIDs[0], testBackendAddrs[0:2]) sc1 := <-cc.NewSubConnCh sc2 := <-cc.NewSubConnCh bg.Add(testBalancerIDs[1], 1, rrBuilder) bg.HandleResolvedAddrs(testBalancerIDs[1], testBackendAddrs[2:4]) sc3 := <-cc.NewSubConnCh sc4 := <-cc.NewSubConnCh // Send state changes for both subconns. bg.HandleSubConnStateChange(sc1, connectivity.Connecting) bg.HandleSubConnStateChange(sc1, connectivity.Ready) bg.HandleSubConnStateChange(sc2, connectivity.Connecting) bg.HandleSubConnStateChange(sc2, connectivity.Ready) bg.HandleSubConnStateChange(sc3, connectivity.Connecting) bg.HandleSubConnStateChange(sc3, connectivity.Ready) bg.HandleSubConnStateChange(sc4, connectivity.Connecting) bg.HandleSubConnStateChange(sc4, connectivity.Ready) // Test roundrobin on the last picker. p1 := <-cc.NewPickerCh want := []balancer.SubConn{sc1, sc1, sc2, sc2, sc3, sc4} if err := testutils.IsRoundRobin(want, subConnFromPicker(p1)); err != nil { t.Fatalf("want %v, got %v", want, err) } bg.ChangeWeight(testBalancerIDs[0], 3) // Test roundrobin with new weight. p2 := <-cc.NewPickerCh want = []balancer.SubConn{sc1, sc1, sc1, sc2, sc2, sc2, sc3, sc4} if err := testutils.IsRoundRobin(want, subConnFromPicker(p2)); err != nil { t.Fatalf("want %v, got %v", want, err) } } func (s) TestBalancerGroup_LoadReport(t *testing.T) { testLoadStore := testutils.NewTestLoadStore() cc := testutils.NewTestClientConn(t) bg := New(cc, testLoadStore, nil) bg.Start() backendToBalancerID := make(map[balancer.SubConn]internal.Locality) // Add two balancers to group and send two resolved addresses to both // balancers. bg.Add(testBalancerIDs[0], 2, rrBuilder) bg.HandleResolvedAddrs(testBalancerIDs[0], testBackendAddrs[0:2]) sc1 := <-cc.NewSubConnCh sc2 := <-cc.NewSubConnCh backendToBalancerID[sc1] = testBalancerIDs[0] backendToBalancerID[sc2] = testBalancerIDs[0] bg.Add(testBalancerIDs[1], 1, rrBuilder) bg.HandleResolvedAddrs(testBalancerIDs[1], testBackendAddrs[2:4]) sc3 := <-cc.NewSubConnCh sc4 := <-cc.NewSubConnCh backendToBalancerID[sc3] = testBalancerIDs[1] backendToBalancerID[sc4] = testBalancerIDs[1] // Send state changes for both subconns. bg.HandleSubConnStateChange(sc1, connectivity.Connecting) bg.HandleSubConnStateChange(sc1, connectivity.Ready) bg.HandleSubConnStateChange(sc2, connectivity.Connecting) bg.HandleSubConnStateChange(sc2, connectivity.Ready) bg.HandleSubConnStateChange(sc3, connectivity.Connecting) bg.HandleSubConnStateChange(sc3, connectivity.Ready) bg.HandleSubConnStateChange(sc4, connectivity.Connecting) bg.HandleSubConnStateChange(sc4, connectivity.Ready) // Test roundrobin on the last picker. p1 := <-cc.NewPickerCh var ( wantStart []internal.Locality wantEnd []internal.Locality wantCost []testutils.TestServerLoad ) for i := 0; i < 10; i++ { scst, _ := p1.Pick(balancer.PickInfo{}) locality := backendToBalancerID[scst.SubConn] wantStart = append(wantStart, locality) if scst.Done != nil && scst.SubConn != sc1 { scst.Done(balancer.DoneInfo{ ServerLoad: &orcapb.OrcaLoadReport{ CpuUtilization: 10, MemUtilization: 5, RequestCost: map[string]float64{"pic": 3.14}, Utilization: map[string]float64{"piu": 3.14}, }, }) wantEnd = append(wantEnd, locality) wantCost = append(wantCost, testutils.TestServerLoad{Name: serverLoadCPUName, D: 10}, testutils.TestServerLoad{Name: serverLoadMemoryName, D: 5}, testutils.TestServerLoad{Name: "pic", D: 3.14}, testutils.TestServerLoad{Name: "piu", D: 3.14}) } } if !cmp.Equal(testLoadStore.CallsStarted, wantStart) { t.Fatalf("want started: %v, got: %v", testLoadStore.CallsStarted, wantStart) } if !cmp.Equal(testLoadStore.CallsEnded, wantEnd) { t.Fatalf("want ended: %v, got: %v", testLoadStore.CallsEnded, wantEnd) } if !cmp.Equal(testLoadStore.CallsCost, wantCost, cmp.AllowUnexported(testutils.TestServerLoad{})) { t.Fatalf("want cost: %v, got: %v", testLoadStore.CallsCost, wantCost) } } // Create a new balancer group, add balancer and backends, but not start. // - b1, weight 2, backends [0,1] // - b2, weight 1, backends [2,3] // Start the balancer group and check behavior. // // Close the balancer group, call add/remove/change weight/change address. // - b2, weight 3, backends [0,3] // - b3, weight 1, backends [1,2] // Start the balancer group again and check for behavior. func (s) TestBalancerGroup_start_close(t *testing.T) { cc := testutils.NewTestClientConn(t) bg := New(cc, nil, nil) // Add two balancers to group and send two resolved addresses to both // balancers. bg.Add(testBalancerIDs[0], 2, rrBuilder) bg.HandleResolvedAddrs(testBalancerIDs[0], testBackendAddrs[0:2]) bg.Add(testBalancerIDs[1], 1, rrBuilder) bg.HandleResolvedAddrs(testBalancerIDs[1], testBackendAddrs[2:4]) bg.Start() m1 := make(map[resolver.Address]balancer.SubConn) for i := 0; i < 4; i++ { addrs := <-cc.NewSubConnAddrsCh sc := <-cc.NewSubConnCh m1[addrs[0]] = sc bg.HandleSubConnStateChange(sc, connectivity.Connecting) bg.HandleSubConnStateChange(sc, connectivity.Ready) } // Test roundrobin on the last picker. p1 := <-cc.NewPickerCh want := []balancer.SubConn{ m1[testBackendAddrs[0]], m1[testBackendAddrs[0]], m1[testBackendAddrs[1]], m1[testBackendAddrs[1]], m1[testBackendAddrs[2]], m1[testBackendAddrs[3]], } if err := testutils.IsRoundRobin(want, subConnFromPicker(p1)); err != nil { t.Fatalf("want %v, got %v", want, err) } bg.Close() for i := 0; i < 4; i++ { bg.HandleSubConnStateChange(<-cc.RemoveSubConnCh, connectivity.Shutdown) } // Add b3, weight 1, backends [1,2]. bg.Add(testBalancerIDs[2], 1, rrBuilder) bg.HandleResolvedAddrs(testBalancerIDs[2], testBackendAddrs[1:3]) // Remove b1. bg.Remove(testBalancerIDs[0]) // Update b2 to weight 3, backends [0,3]. bg.ChangeWeight(testBalancerIDs[1], 3) bg.HandleResolvedAddrs(testBalancerIDs[1], append([]resolver.Address(nil), testBackendAddrs[0], testBackendAddrs[3])) bg.Start() m2 := make(map[resolver.Address]balancer.SubConn) for i := 0; i < 4; i++ { addrs := <-cc.NewSubConnAddrsCh sc := <-cc.NewSubConnCh m2[addrs[0]] = sc bg.HandleSubConnStateChange(sc, connectivity.Connecting) bg.HandleSubConnStateChange(sc, connectivity.Ready) } // Test roundrobin on the last picker. p2 := <-cc.NewPickerCh want = []balancer.SubConn{ m2[testBackendAddrs[0]], m2[testBackendAddrs[0]], m2[testBackendAddrs[0]], m2[testBackendAddrs[3]], m2[testBackendAddrs[3]], m2[testBackendAddrs[3]], m2[testBackendAddrs[1]], m2[testBackendAddrs[2]], } if err := testutils.IsRoundRobin(want, subConnFromPicker(p2)); err != nil { t.Fatalf("want %v, got %v", want, err) } } // Test that balancer group start() doesn't deadlock if the balancer calls back // into balancer group inline when it gets an update. // // The potential deadlock can happen if we // - hold a lock and send updates to balancer (e.g. update resolved addresses) // - the balancer calls back (NewSubConn or update picker) in line // The callback will try to hold hte same lock again, which will cause a // deadlock. // // This test starts the balancer group with a test balancer, will updates picker // whenever it gets an address update. It's expected that start() doesn't block // because of deadlock. func (s) TestBalancerGroup_start_close_deadlock(t *testing.T) { cc := testutils.NewTestClientConn(t) bg := New(cc, nil, nil) bg.Add(testBalancerIDs[0], 2, &testutils.TestConstBalancerBuilder{}) bg.HandleResolvedAddrs(testBalancerIDs[0], testBackendAddrs[0:2]) bg.Add(testBalancerIDs[1], 1, &testutils.TestConstBalancerBuilder{}) bg.HandleResolvedAddrs(testBalancerIDs[1], testBackendAddrs[2:4]) bg.Start() } func replaceDefaultSubBalancerCloseTimeout(n time.Duration) func() { old := DefaultSubBalancerCloseTimeout DefaultSubBalancerCloseTimeout = n return func() { DefaultSubBalancerCloseTimeout = old } } // initBalancerGroupForCachingTest creates a balancer group, and initialize it // to be ready for caching tests. // // Two rr balancers are added to bg, each with 2 ready subConns. A sub-balancer // is removed later, so the balancer group returned has one sub-balancer in its // own map, and one sub-balancer in cache. func initBalancerGroupForCachingTest(t *testing.T) (*BalancerGroup, *testutils.TestClientConn, map[resolver.Address]balancer.SubConn) { cc := testutils.NewTestClientConn(t) bg := New(cc, nil, nil) // Add two balancers to group and send two resolved addresses to both // balancers. bg.Add(testBalancerIDs[0], 2, rrBuilder) bg.HandleResolvedAddrs(testBalancerIDs[0], testBackendAddrs[0:2]) bg.Add(testBalancerIDs[1], 1, rrBuilder) bg.HandleResolvedAddrs(testBalancerIDs[1], testBackendAddrs[2:4]) bg.Start() m1 := make(map[resolver.Address]balancer.SubConn) for i := 0; i < 4; i++ { addrs := <-cc.NewSubConnAddrsCh sc := <-cc.NewSubConnCh m1[addrs[0]] = sc bg.HandleSubConnStateChange(sc, connectivity.Connecting) bg.HandleSubConnStateChange(sc, connectivity.Ready) } // Test roundrobin on the last picker. p1 := <-cc.NewPickerCh want := []balancer.SubConn{ m1[testBackendAddrs[0]], m1[testBackendAddrs[0]], m1[testBackendAddrs[1]], m1[testBackendAddrs[1]], m1[testBackendAddrs[2]], m1[testBackendAddrs[3]], } if err := testutils.IsRoundRobin(want, subConnFromPicker(p1)); err != nil { t.Fatalf("want %v, got %v", want, err) } bg.Remove(testBalancerIDs[1]) // Don't wait for SubConns to be removed after close, because they are only // removed after close timeout. for i := 0; i < 10; i++ { select { case <-cc.RemoveSubConnCh: t.Fatalf("Got request to remove subconn, want no remove subconn (because subconns were still in cache)") default: } time.Sleep(time.Millisecond) } // Test roundrobin on the with only sub-balancer0. p2 := <-cc.NewPickerCh want = []balancer.SubConn{ m1[testBackendAddrs[0]], m1[testBackendAddrs[1]], } if err := testutils.IsRoundRobin(want, subConnFromPicker(p2)); err != nil { t.Fatalf("want %v, got %v", want, err) } return bg, cc, m1 } // Test that if a sub-balancer is removed, and re-added within close timeout, // the subConns won't be re-created. func (s) TestBalancerGroup_locality_caching(t *testing.T) { defer replaceDefaultSubBalancerCloseTimeout(10 * time.Second)() bg, cc, addrToSC := initBalancerGroupForCachingTest(t) // Turn down subconn for addr2, shouldn't get picker update because // sub-balancer1 was removed. bg.HandleSubConnStateChange(addrToSC[testBackendAddrs[2]], connectivity.TransientFailure) for i := 0; i < 10; i++ { select { case <-cc.NewPickerCh: t.Fatalf("Got new picker, want no new picker (because the sub-balancer was removed)") default: } time.Sleep(time.Millisecond) } // Sleep, but sleep less then close timeout. time.Sleep(time.Millisecond * 100) // Re-add sub-balancer-1, because subconns were in cache, no new subconns // should be created. But a new picker will still be generated, with subconn // states update to date. bg.Add(testBalancerIDs[1], 1, rrBuilder) p3 := <-cc.NewPickerCh want := []balancer.SubConn{ addrToSC[testBackendAddrs[0]], addrToSC[testBackendAddrs[0]], addrToSC[testBackendAddrs[1]], addrToSC[testBackendAddrs[1]], // addr2 is down, b2 only has addr3 in READY state. addrToSC[testBackendAddrs[3]], addrToSC[testBackendAddrs[3]], } if err := testutils.IsRoundRobin(want, subConnFromPicker(p3)); err != nil { t.Fatalf("want %v, got %v", want, err) } for i := 0; i < 10; i++ { select { case <-cc.NewSubConnAddrsCh: t.Fatalf("Got new subconn, want no new subconn (because subconns were still in cache)") default: } time.Sleep(time.Millisecond * 10) } } // Sub-balancers are put in cache when they are removed. If balancer group is // closed within close timeout, all subconns should still be rmeoved // immediately. func (s) TestBalancerGroup_locality_caching_close_group(t *testing.T) { defer replaceDefaultSubBalancerCloseTimeout(10 * time.Second)() bg, cc, addrToSC := initBalancerGroupForCachingTest(t) bg.Close() // The balancer group is closed. The subconns should be removed immediately. removeTimeout := time.After(time.Millisecond * 500) scToRemove := map[balancer.SubConn]int{ addrToSC[testBackendAddrs[0]]: 1, addrToSC[testBackendAddrs[1]]: 1, addrToSC[testBackendAddrs[2]]: 1, addrToSC[testBackendAddrs[3]]: 1, } for i := 0; i < len(scToRemove); i++ { select { case sc := <-cc.RemoveSubConnCh: c := scToRemove[sc] if c == 0 { t.Fatalf("Got removeSubConn for %v when there's %d remove expected", sc, c) } scToRemove[sc] = c - 1 case <-removeTimeout: t.Fatalf("timeout waiting for subConns (from balancer in cache) to be removed") } } } // Sub-balancers in cache will be closed if not re-added within timeout, and // subConns will be removed. func (s) TestBalancerGroup_locality_caching_not_readd_within_timeout(t *testing.T) { defer replaceDefaultSubBalancerCloseTimeout(time.Second)() _, cc, addrToSC := initBalancerGroupForCachingTest(t) // The sub-balancer is not re-added withtin timeout. The subconns should be // removed. removeTimeout := time.After(DefaultSubBalancerCloseTimeout) scToRemove := map[balancer.SubConn]int{ addrToSC[testBackendAddrs[2]]: 1, addrToSC[testBackendAddrs[3]]: 1, } for i := 0; i < len(scToRemove); i++ { select { case sc := <-cc.RemoveSubConnCh: c := scToRemove[sc] if c == 0 { t.Fatalf("Got removeSubConn for %v when there's %d remove expected", sc, c) } scToRemove[sc] = c - 1 case <-removeTimeout: t.Fatalf("timeout waiting for subConns (from balancer in cache) to be removed") } } } // Wrap the rr builder, so it behaves the same, but has a different pointer. type noopBalancerBuilderWrapper struct { balancer.Builder } // After removing a sub-balancer, re-add with same ID, but different balancer // builder. Old subconns should be removed, and new subconns should be created. func (s) TestBalancerGroup_locality_caching_readd_with_different_builder(t *testing.T) { defer replaceDefaultSubBalancerCloseTimeout(10 * time.Second)() bg, cc, addrToSC := initBalancerGroupForCachingTest(t) // Re-add sub-balancer-1, but with a different balancer builder. The // sub-balancer was still in cache, but cann't be reused. This should cause // old sub-balancer's subconns to be removed immediately, and new subconns // to be created. bg.Add(testBalancerIDs[1], 1, &noopBalancerBuilderWrapper{rrBuilder}) // The cached sub-balancer should be closed, and the subconns should be // removed immediately. removeTimeout := time.After(time.Millisecond * 500) scToRemove := map[balancer.SubConn]int{ addrToSC[testBackendAddrs[2]]: 1, addrToSC[testBackendAddrs[3]]: 1, } for i := 0; i < len(scToRemove); i++ { select { case sc := <-cc.RemoveSubConnCh: c := scToRemove[sc] if c == 0 { t.Fatalf("Got removeSubConn for %v when there's %d remove expected", sc, c) } scToRemove[sc] = c - 1 case <-removeTimeout: t.Fatalf("timeout waiting for subConns (from balancer in cache) to be removed") } } bg.HandleResolvedAddrs(testBalancerIDs[1], testBackendAddrs[4:6]) newSCTimeout := time.After(time.Millisecond * 500) scToAdd := map[resolver.Address]int{ testBackendAddrs[4]: 1, testBackendAddrs[5]: 1, } for i := 0; i < len(scToAdd); i++ { select { case addr := <-cc.NewSubConnAddrsCh: c := scToAdd[addr[0]] if c == 0 { t.Fatalf("Got newSubConn for %v when there's %d new expected", addr, c) } scToAdd[addr[0]] = c - 1 sc := <-cc.NewSubConnCh addrToSC[addr[0]] = sc bg.HandleSubConnStateChange(sc, connectivity.Connecting) bg.HandleSubConnStateChange(sc, connectivity.Ready) case <-newSCTimeout: t.Fatalf("timeout waiting for subConns (from new sub-balancer) to be newed") } } // Test roundrobin on the new picker. p3 := <-cc.NewPickerCh want := []balancer.SubConn{ addrToSC[testBackendAddrs[0]], addrToSC[testBackendAddrs[0]], addrToSC[testBackendAddrs[1]], addrToSC[testBackendAddrs[1]], addrToSC[testBackendAddrs[4]], addrToSC[testBackendAddrs[5]], } if err := testutils.IsRoundRobin(want, subConnFromPicker(p3)); err != nil { t.Fatalf("want %v, got %v", want, err) } } grpc-go-1.29.1/xds/internal/balancer/balancergroup/testutils_test.go000066400000000000000000000015741365033716300256130ustar00rootroot00000000000000/* * * Copyright 2020 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package balancergroup import ( "testing" "google.golang.org/grpc/internal/grpctest" "google.golang.org/grpc/xds/internal/testutils" ) type s struct { grpctest.Tester } func Test(t *testing.T) { grpctest.RunSubTests(t, s{}) } func init() { NewRandomWRR = testutils.NewTestWRR } grpc-go-1.29.1/xds/internal/balancer/cdsbalancer/000077500000000000000000000000001365033716300215735ustar00rootroot00000000000000grpc-go-1.29.1/xds/internal/balancer/cdsbalancer/cdsbalancer.go000066400000000000000000000304071365033716300243670ustar00rootroot00000000000000/* * Copyright 2019 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // Package cdsbalancer implements a balancer to handle CDS responses. package cdsbalancer import ( "encoding/json" "errors" "fmt" "sync" "google.golang.org/grpc/attributes" "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/internal/buffer" "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" "google.golang.org/grpc/xds/internal/balancer/edsbalancer" xdsinternal "google.golang.org/grpc/xds/internal" xdsclient "google.golang.org/grpc/xds/internal/client" ) const ( cdsName = "cds_experimental" edsName = "eds_experimental" ) var ( errBalancerClosed = errors.New("cdsBalancer is closed") // newEDSBalancer is a helper function to build a new edsBalancer and will be // overridden in unittests. newEDSBalancer = func(cc balancer.ClientConn, opts balancer.BuildOptions) (balancer.V2Balancer, error) { builder := balancer.Get(edsName) if builder == nil { return nil, fmt.Errorf("xds: no balancer builder with name %v", edsName) } // We directly pass the parent clientConn to the // underlying edsBalancer because the cdsBalancer does // not deal with subConns. return builder.Build(cc, opts).(balancer.V2Balancer), nil } ) func init() { balancer.Register(cdsBB{}) } // cdsBB (short for cdsBalancerBuilder) implements the balancer.Builder // interface to help build a cdsBalancer. // It also implements the balancer.ConfigParser interface to help parse the // JSON service config, to be passed to the cdsBalancer. type cdsBB struct{} // Build creates a new CDS balancer with the ClientConn. func (cdsBB) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { b := &cdsBalancer{ cc: cc, bOpts: opts, updateCh: buffer.NewUnbounded(), } b.logger = grpclog.NewPrefixLogger(loggingPrefix(b)) b.logger.Infof("Created") go b.run() return b } // Name returns the name of balancers built by this builder. func (cdsBB) Name() string { return cdsName } // lbConfig represents the loadBalancingConfig section of the service config // for the cdsBalancer. type lbConfig struct { serviceconfig.LoadBalancingConfig ClusterName string `json:"Cluster"` } // ParseConfig parses the JSON load balancer config provided into an // internal form or returns an error if the config is invalid. func (cdsBB) ParseConfig(c json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { var cfg lbConfig if err := json.Unmarshal(c, &cfg); err != nil { return nil, fmt.Errorf("xds: unable to unmarshal lbconfig: %s, error: %v", string(c), err) } return &cfg, nil } // xdsClientInterface contains methods from xdsClient.Client which are used by // the cdsBalancer. This will be faked out in unittests. type xdsClientInterface interface { WatchCluster(string, func(xdsclient.CDSUpdate, error)) func() Close() } // ccUpdate wraps a clientConn update received from gRPC (pushed from the // xdsResolver). A valid clusterName causes the cdsBalancer to register a CDS // watcher with the xdsClient, while a non-nil error causes it to cancel the // existing watch and propagate the error to the underlying edsBalancer. type ccUpdate struct { client xdsClientInterface clusterName string err error } // scUpdate wraps a subConn update received from gRPC. This is directly passed // on to the edsBalancer. type scUpdate struct { subConn balancer.SubConn state balancer.SubConnState } // watchUpdate wraps the information received from a registered CDS watcher. A // non-nil error is propagated to the underlying edsBalancer. A valid update // results in creating a new edsBalancer (if one doesn't already exist) and // pushing the update to it. type watchUpdate struct { cds xdsclient.CDSUpdate err error } // closeUpdate is an empty struct used to notify the run() goroutine that a // Close has been called on the balancer. type closeUpdate struct{} // cdsBalancer implements a CDS based LB policy. It instantiates an EDS based // LB policy to further resolve the serviceName received from CDS, into // localities and endpoints. Implements the balancer.Balancer interface which // is exposed to gRPC and implements the balancer.ClientConn interface which is // exposed to the edsBalancer. type cdsBalancer struct { cc balancer.ClientConn bOpts balancer.BuildOptions updateCh *buffer.Unbounded client xdsClientInterface cancelWatch func() edsLB balancer.V2Balancer clusterToWatch string logger *grpclog.PrefixLogger // The only thing protected by this mutex is the closed boolean. This is // checked by all methods before acting on updates. mu sync.Mutex closed bool } // run is a long-running goroutine which handles all updates from gRPC. All // methods which are invoked directly by gRPC or xdsClient simply push an // update onto a channel which is read and acted upon right here. // // 1. Good clientConn updates lead to registration of a CDS watch. Updates with // error lead to cancellation of existing watch and propagation of the same // error to the edsBalancer. // 2. SubConn updates are passthrough and are simply handed over to the // underlying edsBalancer. // 3. Watch API updates lead to clientConn updates being invoked on the // underlying edsBalancer. // 4. Close results in cancellation of the CDS watch and closing of the // underlying edsBalancer and is the only way to exit this goroutine. func (b *cdsBalancer) run() { for { u := <-b.updateCh.Get() b.updateCh.Load() switch update := u.(type) { case *ccUpdate: // We first handle errors, if any, and then proceed with handling // the update, only if the status quo has changed. if err := update.err; err != nil { // TODO: Should we cancel the watch only on specific errors? if b.cancelWatch != nil { b.cancelWatch() } if b.edsLB != nil { b.edsLB.ResolverError(err) } } if b.client == update.client && b.clusterToWatch == update.clusterName { break } if update.client != nil { // Since the cdsBalancer doesn't own the xdsClient object, we // don't have to bother about closing the old client here, but // we still need to cancel the watch on the old client. if b.cancelWatch != nil { b.cancelWatch() } b.client = update.client } if update.clusterName != "" { cancelWatch := b.client.WatchCluster(update.clusterName, b.handleClusterUpdate) b.logger.Infof("Watch started on resource name %v with xds-client %p", update.clusterName, b.client) b.cancelWatch = func() { cancelWatch() b.logger.Infof("Watch cancelled on resource name %v with xds-client %p", update.clusterName, b.client) } b.clusterToWatch = update.clusterName } case *scUpdate: if b.edsLB == nil { b.logger.Errorf("xds: received scUpdate {%+v} with no edsBalancer", update) break } b.edsLB.UpdateSubConnState(update.subConn, update.state) case *watchUpdate: if err := update.err; err != nil { b.logger.Warningf("Watch error from xds-client %p: %v", b.client, err) if b.edsLB != nil { b.edsLB.ResolverError(err) } break } b.logger.Infof("Watch update from xds-client %p, content: %+v", b.client, update.cds) // The first good update from the watch API leads to the // instantiation of an edsBalancer. Further updates/errors are // propagated to the existing edsBalancer. if b.edsLB == nil { var err error b.edsLB, err = newEDSBalancer(b.cc, b.bOpts) if b.edsLB == nil { b.logger.Errorf("Failed to create child policy of type %s, %v", edsName, err) break } b.logger.Infof("Created child policy %p of type %s", b.edsLB, edsName) } lbCfg := &edsbalancer.EDSConfig{EDSServiceName: update.cds.ServiceName} if update.cds.EnableLRS { // An empty string here indicates that the edsBalancer // should use the same xDS server for load reporting as // it does for EDS requests/responses. lbCfg.LrsLoadReportingServerName = new(string) } ccState := balancer.ClientConnState{ ResolverState: resolver.State{Attributes: attributes.New(xdsinternal.XDSClientID, b.client)}, BalancerConfig: lbCfg, } if err := b.edsLB.UpdateClientConnState(ccState); err != nil { b.logger.Errorf("xds: edsBalancer.UpdateClientConnState(%+v) returned error: %v", ccState, err) } case *closeUpdate: if b.cancelWatch != nil { b.cancelWatch() b.cancelWatch = nil } if b.edsLB != nil { b.edsLB.Close() b.edsLB = nil } // This is the *ONLY* point of return from this function. b.logger.Infof("Shutdown") return } } } // handleClusterUpdate is the CDS watch API callback. It simply pushes the // received information on to the update channel for run() to pick it up. func (b *cdsBalancer) handleClusterUpdate(cu xdsclient.CDSUpdate, err error) { if b.isClosed() { b.logger.Warningf("xds: received cluster update {%+v} after cdsBalancer was closed", cu) return } b.updateCh.Put(&watchUpdate{cds: cu, err: err}) } // UpdateClientConnState receives the serviceConfig (which contains the // clusterName to watch for in CDS) and the xdsClient object from the // xdsResolver. func (b *cdsBalancer) UpdateClientConnState(state balancer.ClientConnState) error { if b.isClosed() { b.logger.Warningf("xds: received ClientConnState {%+v} after cdsBalancer was closed", state) return errBalancerClosed } b.logger.Infof("Receive update from resolver, balancer config: %+v", state.BalancerConfig) // The errors checked here should ideally never happen because the // ServiceConfig in this case is prepared by the xdsResolver and is not // something that is received on the wire. lbCfg, ok := state.BalancerConfig.(*lbConfig) if !ok { b.logger.Warningf("xds: unexpected LoadBalancingConfig type: %T", state.BalancerConfig) return balancer.ErrBadResolverState } if lbCfg.ClusterName == "" { b.logger.Warningf("xds: no clusterName found in LoadBalancingConfig: %+v", lbCfg) return balancer.ErrBadResolverState } client := state.ResolverState.Attributes.Value(xdsinternal.XDSClientID) if client == nil { b.logger.Warningf("xds: no xdsClient found in resolver state attributes") return balancer.ErrBadResolverState } newClient, ok := client.(xdsClientInterface) if !ok { b.logger.Warningf("xds: unexpected xdsClient type: %T", client) return balancer.ErrBadResolverState } b.updateCh.Put(&ccUpdate{client: newClient, clusterName: lbCfg.ClusterName}) return nil } // ResolverError handles errors reported by the xdsResolver. // // TODO: Make it possible to differentiate between connection errors and // resource not found errors. func (b *cdsBalancer) ResolverError(err error) { if b.isClosed() { b.logger.Warningf("xds: received resolver error {%v} after cdsBalancer was closed", err) return } b.updateCh.Put(&ccUpdate{err: err}) } // UpdateSubConnState handles subConn updates from gRPC. func (b *cdsBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { if b.isClosed() { b.logger.Warningf("xds: received subConn update {%v, %v} after cdsBalancer was closed", sc, state) return } b.updateCh.Put(&scUpdate{subConn: sc, state: state}) } // Close closes the cdsBalancer and the underlying edsBalancer. func (b *cdsBalancer) Close() { b.mu.Lock() b.closed = true b.mu.Unlock() b.updateCh.Put(&closeUpdate{}) } func (b *cdsBalancer) isClosed() bool { b.mu.Lock() closed := b.closed b.mu.Unlock() return closed } func (b *cdsBalancer) HandleSubConnStateChange(sc balancer.SubConn, state connectivity.State) { b.logger.Errorf("UpdateSubConnState should be called instead of HandleSubConnStateChange") } func (b *cdsBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) { b.logger.Errorf("UpdateClientConnState should be called instead of HandleResolvedAddrs") } grpc-go-1.29.1/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go000066400000000000000000000370021365033716300254240ustar00rootroot00000000000000/* * Copyright 2019 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package cdsbalancer import ( "encoding/json" "errors" "fmt" "testing" "time" "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" "google.golang.org/grpc/attributes" "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/grpctest" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" xdsinternal "google.golang.org/grpc/xds/internal" "google.golang.org/grpc/xds/internal/balancer/edsbalancer" xdsclient "google.golang.org/grpc/xds/internal/client" "google.golang.org/grpc/xds/internal/testutils" "google.golang.org/grpc/xds/internal/testutils/fakeclient" ) const ( clusterName = "cluster1" serviceName = "service1" defaultTestTimeout = 2 * time.Second ) type s struct { grpctest.Tester } func Test(t *testing.T) { grpctest.RunSubTests(t, s{}) } type testClientConn struct { balancer.ClientConn } // cdsWatchInfo wraps the update and the error sent in a CDS watch callback. type cdsWatchInfo struct { update xdsclient.CDSUpdate err error } // invokeWatchCb invokes the CDS watch callback registered by the cdsBalancer // and waits for appropriate state to be pushed to the provided edsBalancer. func invokeWatchCbAndWait(xdsC *fakeclient.Client, cdsW cdsWatchInfo, wantCCS balancer.ClientConnState, edsB *testEDSBalancer) error { xdsC.InvokeWatchClusterCallback(cdsW.update, cdsW.err) if cdsW.err != nil { return edsB.waitForResolverError(cdsW.err) } return edsB.waitForClientConnUpdate(wantCCS) } // testEDSBalancer is a fake edsBalancer used to verify different actions from // the cdsBalancer. It contains a bunch of channels to signal different events // to the test. type testEDSBalancer struct { // ccsCh is a channel used to signal the receipt of a ClientConn update. ccsCh chan balancer.ClientConnState // scStateCh is a channel used to signal the receipt of a SubConn update. scStateCh chan subConnWithState // resolverErrCh is a channel used to signal a resolver error. resolverErrCh chan error // closeCh is a channel used to signal the closing of this balancer. closeCh chan struct{} } type subConnWithState struct { sc balancer.SubConn state balancer.SubConnState } func newTestEDSBalancer() *testEDSBalancer { return &testEDSBalancer{ ccsCh: make(chan balancer.ClientConnState, 1), scStateCh: make(chan subConnWithState, 1), resolverErrCh: make(chan error, 1), closeCh: make(chan struct{}, 1), } } func (tb *testEDSBalancer) UpdateClientConnState(ccs balancer.ClientConnState) error { tb.ccsCh <- ccs return nil } func (tb *testEDSBalancer) ResolverError(err error) { tb.resolverErrCh <- err } func (tb *testEDSBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { tb.scStateCh <- subConnWithState{sc: sc, state: state} } func (tb *testEDSBalancer) Close() { tb.closeCh <- struct{}{} } // waitForClientConnUpdate verifies if the testEDSBalancer receives the // provided ClientConnState within a reasonable amount of time. func (tb *testEDSBalancer) waitForClientConnUpdate(wantCCS balancer.ClientConnState) error { timer := time.NewTimer(defaultTestTimeout) select { case <-timer.C: return errors.New("Timeout when expecting ClientConn update on EDS balancer") case gotCCS := <-tb.ccsCh: timer.Stop() if !cmp.Equal(gotCCS, wantCCS, cmpopts.IgnoreUnexported(attributes.Attributes{})) { return fmt.Errorf("received ClientConnState: %+v, want %+v", gotCCS, wantCCS) } return nil } } // waitForSubConnUpdate verifies if the testEDSBalancer receives the provided // SubConn update within a reasonable amount of time. func (tb *testEDSBalancer) waitForSubConnUpdate(wantSCS subConnWithState) error { timer := time.NewTimer(defaultTestTimeout) select { case <-timer.C: return errors.New("Timeout when expecting SubConn update on EDS balancer") case gotSCS := <-tb.scStateCh: timer.Stop() if !cmp.Equal(gotSCS, wantSCS, cmp.AllowUnexported(subConnWithState{})) { return fmt.Errorf("received SubConnState: %+v, want %+v", gotSCS, wantSCS) } return nil } } // waitForResolverError verifies if the testEDSBalancer receives the // provided resolver error within a reasonable amount of time. func (tb *testEDSBalancer) waitForResolverError(wantErr error) error { timer := time.NewTimer(defaultTestTimeout) select { case <-timer.C: return errors.New("Timeout when expecting a resolver error") case gotErr := <-tb.resolverErrCh: timer.Stop() if gotErr != wantErr { return fmt.Errorf("received resolver error: %v, want %v", gotErr, wantErr) } return nil } } // waitForClose verifies that the edsBalancer is closed with a reasonable // amount of time. func (tb *testEDSBalancer) waitForClose() error { timer := time.NewTimer(defaultTestTimeout) select { case <-timer.C: return errors.New("Timeout when expecting a close") case <-tb.closeCh: timer.Stop() return nil } } // cdsCCS is a helper function to construct a good update passed from the // xdsResolver to the cdsBalancer. func cdsCCS(cluster string, xdsClient interface{}) balancer.ClientConnState { const cdsLBConfig = `{ "loadBalancingConfig":[ { "cds_experimental":{ "Cluster": %s } } ] }` jsonSC := fmt.Sprintf(cdsLBConfig, cluster) return balancer.ClientConnState{ ResolverState: resolver.State{ ServiceConfig: internal.ParseServiceConfigForTesting.(func(string) *serviceconfig.ParseResult)(jsonSC), Attributes: attributes.New(xdsinternal.XDSClientID, xdsClient), }, BalancerConfig: &lbConfig{ClusterName: clusterName}, } } // edsCCS is a helper function to construct a good update passed from the // cdsBalancer to the edsBalancer. func edsCCS(service string, enableLRS bool, xdsClient interface{}) balancer.ClientConnState { lbCfg := &edsbalancer.EDSConfig{EDSServiceName: service} if enableLRS { lbCfg.LrsLoadReportingServerName = new(string) } return balancer.ClientConnState{ ResolverState: resolver.State{Attributes: attributes.New(xdsinternal.XDSClientID, xdsClient)}, BalancerConfig: lbCfg, } } // setup creates a cdsBalancer and an edsBalancer (and overrides the // newEDSBalancer function to return it), and also returns a cleanup function. func setup() (*cdsBalancer, *testEDSBalancer, func()) { builder := cdsBB{} tcc := &testClientConn{} cdsB := builder.Build(tcc, balancer.BuildOptions{}).(balancer.V2Balancer) edsB := newTestEDSBalancer() oldEDSBalancerBuilder := newEDSBalancer newEDSBalancer = func(cc balancer.ClientConn, opts balancer.BuildOptions) (balancer.V2Balancer, error) { return edsB, nil } return cdsB.(*cdsBalancer), edsB, func() { newEDSBalancer = oldEDSBalancerBuilder } } // setupWithWatch does everything that setup does, and also pushes a ClientConn // update to the cdsBalancer and waits for a CDS watch call to be registered. func setupWithWatch(t *testing.T) (*fakeclient.Client, *cdsBalancer, *testEDSBalancer, func()) { t.Helper() xdsC := fakeclient.NewClient() cdsB, edsB, cancel := setup() if err := cdsB.UpdateClientConnState(cdsCCS(clusterName, xdsC)); err != nil { t.Fatalf("cdsBalancer.UpdateClientConnState failed with error: %v", err) } gotCluster, err := xdsC.WaitForWatchCluster() if err != nil { t.Fatalf("xdsClient.WatchCDS failed with error: %v", err) } if gotCluster != clusterName { t.Fatalf("xdsClient.WatchCDS called for cluster: %v, want: %v", gotCluster, clusterName) } return xdsC, cdsB, edsB, cancel } // TestUpdateClientConnState invokes the UpdateClientConnState method on the // cdsBalancer with different inputs and verifies that the CDS watch API on the // provided xdsClient is invoked appropriately. func (s) TestUpdateClientConnState(t *testing.T) { xdsC := fakeclient.NewClient() tests := []struct { name string ccs balancer.ClientConnState wantErr error wantCluster string }{ { name: "bad-lbCfg-type", ccs: balancer.ClientConnState{BalancerConfig: nil}, wantErr: balancer.ErrBadResolverState, }, { name: "empty-cluster-in-lbCfg", ccs: balancer.ClientConnState{BalancerConfig: &lbConfig{ClusterName: ""}}, wantErr: balancer.ErrBadResolverState, }, { name: "no-xdsClient-in-attributes", ccs: balancer.ClientConnState{ ResolverState: resolver.State{ Attributes: attributes.New("key", "value"), }, BalancerConfig: &lbConfig{ClusterName: clusterName}, }, wantErr: balancer.ErrBadResolverState, }, { name: "bad-xdsClient-in-attributes", ccs: balancer.ClientConnState{ ResolverState: resolver.State{ Attributes: attributes.New(xdsinternal.XDSClientID, "value"), }, BalancerConfig: &lbConfig{ClusterName: clusterName}, }, wantErr: balancer.ErrBadResolverState, }, { name: "happy-good-case", ccs: cdsCCS(clusterName, xdsC), wantCluster: clusterName, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { cdsB, _, cancel := setup() defer func() { cancel() cdsB.Close() }() if err := cdsB.UpdateClientConnState(test.ccs); err != test.wantErr { t.Fatalf("cdsBalancer.UpdateClientConnState failed with error: %v", err) } if test.wantErr != nil { // When we wanted an error and got it, we should return early. return } gotCluster, err := xdsC.WaitForWatchCluster() if err != nil { t.Fatalf("xdsClient.WatchCDS failed with error: %v", err) } if gotCluster != test.wantCluster { t.Fatalf("xdsClient.WatchCDS called for cluster: %v, want: %v", gotCluster, test.wantCluster) } }) } } // TestUpdateClientConnStateAfterClose invokes the UpdateClientConnState method // on the cdsBalancer after close and verifies that it returns an error. func (s) TestUpdateClientConnStateAfterClose(t *testing.T) { cdsB, _, cancel := setup() defer cancel() cdsB.Close() if err := cdsB.UpdateClientConnState(cdsCCS(clusterName, fakeclient.NewClient())); err != errBalancerClosed { t.Fatalf("UpdateClientConnState() after close returned %v, want %v", err, errBalancerClosed) } } // TestUpdateClientConnStateWithSameState verifies that a ClientConnState // update with the same cluster and xdsClient does not cause the cdsBalancer to // create a new watch. func (s) TestUpdateClientConnStateWithSameState(t *testing.T) { xdsC, cdsB, _, cancel := setupWithWatch(t) defer func() { cancel() cdsB.Close() }() if err := cdsB.UpdateClientConnState(cdsCCS(clusterName, xdsC)); err != nil { t.Fatalf("cdsBalancer.UpdateClientConnState failed with error: %v", err) } if _, err := xdsC.WaitForWatchCluster(); err != testutils.ErrRecvTimeout { t.Fatalf("waiting for WatchCluster() should have timed out, but returned error: %v", err) } } // TestHandleClusterUpdate invokes the registered CDS watch callback with // different updates and verifies that the expect ClientConnState is propagated // to the edsBalancer. func (s) TestHandleClusterUpdate(t *testing.T) { xdsC, cdsB, edsB, cancel := setupWithWatch(t) defer func() { cancel() cdsB.Close() }() tests := []struct { name string cdsUpdate xdsclient.CDSUpdate updateErr error wantCCS balancer.ClientConnState }{ { name: "happy-case-with-lrs", cdsUpdate: xdsclient.CDSUpdate{ServiceName: serviceName, EnableLRS: true}, wantCCS: edsCCS(serviceName, true, xdsC), }, { name: "happy-case-without-lrs", cdsUpdate: xdsclient.CDSUpdate{ServiceName: serviceName}, wantCCS: edsCCS(serviceName, false, xdsC), }, { name: "cdsWatch-returns-error", updateErr: errors.New("cdsUpdate error"), }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { if err := invokeWatchCbAndWait(xdsC, cdsWatchInfo{test.cdsUpdate, test.updateErr}, test.wantCCS, edsB); err != nil { t.Fatal(err) } }) } } // TestResolverError verifies that an existing watch is cancelled when a // resolver error is received by the cdsBalancer, and also that the same error // is propagated to the edsBalancer. func (s) TestResolverError(t *testing.T) { xdsC, cdsB, edsB, cancel := setupWithWatch(t) defer func() { cancel() cdsB.Close() }() cdsUpdate := xdsclient.CDSUpdate{ServiceName: serviceName} wantCCS := edsCCS(serviceName, false, xdsC) if err := invokeWatchCbAndWait(xdsC, cdsWatchInfo{cdsUpdate, nil}, wantCCS, edsB); err != nil { t.Fatal(err) } rErr := errors.New("cdsBalancer resolver error") cdsB.ResolverError(rErr) if err := xdsC.WaitForCancelClusterWatch(); err != nil { t.Fatal(err) } if err := edsB.waitForResolverError(rErr); err != nil { t.Fatal(err) } } // TestUpdateSubConnState pushes a SubConn update to the cdsBalancer and // verifies that the update is propagated to the edsBalancer. func (s) TestUpdateSubConnState(t *testing.T) { xdsC, cdsB, edsB, cancel := setupWithWatch(t) defer func() { cancel() cdsB.Close() }() cdsUpdate := xdsclient.CDSUpdate{ServiceName: serviceName} wantCCS := edsCCS(serviceName, false, xdsC) if err := invokeWatchCbAndWait(xdsC, cdsWatchInfo{cdsUpdate, nil}, wantCCS, edsB); err != nil { t.Fatal(err) } var sc balancer.SubConn state := balancer.SubConnState{ConnectivityState: connectivity.Ready} cdsB.UpdateSubConnState(sc, state) if err := edsB.waitForSubConnUpdate(subConnWithState{sc: sc, state: state}); err != nil { t.Fatal(err) } } // TestClose calls Close() on the cdsBalancer, and verifies that the underlying // edsBalancer is also closed. func (s) TestClose(t *testing.T) { xdsC, cdsB, edsB, cancel := setupWithWatch(t) defer cancel() cdsUpdate := xdsclient.CDSUpdate{ServiceName: serviceName} wantCCS := edsCCS(serviceName, false, xdsC) if err := invokeWatchCbAndWait(xdsC, cdsWatchInfo{cdsUpdate, nil}, wantCCS, edsB); err != nil { t.Fatal(err) } cdsB.Close() if err := xdsC.WaitForCancelClusterWatch(); err != nil { t.Fatal(err) } if err := edsB.waitForClose(); err != nil { t.Fatal(err) } } // TestParseConfig exercises the config parsing functionality in the cds // balancer builder. func (s) TestParseConfig(t *testing.T) { bb := cdsBB{} if gotName := bb.Name(); gotName != cdsName { t.Fatalf("cdsBB.Name() = %v, want %v", gotName, cdsName) } tests := []struct { name string input json.RawMessage wantCfg serviceconfig.LoadBalancingConfig wantErr bool }{ { name: "good-lb-config", input: json.RawMessage(`{"Cluster": "cluster1"}`), wantCfg: &lbConfig{ClusterName: clusterName}, }, { name: "unknown-fields-in-lb-config", input: json.RawMessage(`{"Unknown": "foobar"}`), wantCfg: &lbConfig{ClusterName: ""}, }, { name: "empty-lb-config", input: json.RawMessage(""), wantErr: true, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { gotCfg, gotErr := bb.ParseConfig(test.input) if (gotErr != nil) != test.wantErr { t.Fatalf("bb.ParseConfig(%v) = %v, wantErr %v", string(test.input), gotErr, test.wantErr) } if !test.wantErr { if !cmp.Equal(gotCfg, test.wantCfg) { t.Fatalf("bb.ParseConfig(%v) = %v, want %v", string(test.input), gotCfg, test.wantCfg) } } }) } } grpc-go-1.29.1/xds/internal/balancer/cdsbalancer/logging.go000066400000000000000000000013601365033716300235500ustar00rootroot00000000000000/* * * Copyright 2020 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package cdsbalancer import ( "fmt" ) const prefix = "[cds-lb %p] " func loggingPrefix(p *cdsBalancer) string { return fmt.Sprintf(prefix, p) } grpc-go-1.29.1/xds/internal/balancer/edsbalancer/000077500000000000000000000000001365033716300215755ustar00rootroot00000000000000grpc-go-1.29.1/xds/internal/balancer/edsbalancer/config.go000066400000000000000000000071101365033716300233700ustar00rootroot00000000000000/* * * Copyright 2019 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package edsbalancer import ( "encoding/json" "fmt" "google.golang.org/grpc/balancer" "google.golang.org/grpc/serviceconfig" ) // EDSConfig represents the loadBalancingConfig section of the service config // for EDS balancers. type EDSConfig struct { serviceconfig.LoadBalancingConfig // BalancerName represents the load balancer to use. BalancerName string // ChildPolicy represents the load balancing config for the child // policy. ChildPolicy *loadBalancingConfig // FallBackPolicy represents the load balancing config for the // fallback. FallBackPolicy *loadBalancingConfig // Name to use in EDS query. If not present, defaults to the server // name from the target URI. EDSServiceName string // LRS server to send load reports to. If not present, load reporting // will be disabled. If set to the empty string, load reporting will // be sent to the same server that we obtained CDS data from. LrsLoadReportingServerName *string } // edsConfigJSON is the intermediate unmarshal result of EDSConfig. ChildPolicy // and Fallbackspolicy are post-processed, and for each, the first installed // policy is kept. type edsConfigJSON struct { BalancerName string ChildPolicy []*loadBalancingConfig FallbackPolicy []*loadBalancingConfig EDSServiceName string LRSLoadReportingServerName *string } // UnmarshalJSON parses the JSON-encoded byte slice in data and stores it in l. // When unmarshalling, we iterate through the childPolicy/fallbackPolicy lists // and select the first LB policy which has been registered. func (l *EDSConfig) UnmarshalJSON(data []byte) error { var configJSON edsConfigJSON if err := json.Unmarshal(data, &configJSON); err != nil { return err } l.BalancerName = configJSON.BalancerName l.EDSServiceName = configJSON.EDSServiceName l.LrsLoadReportingServerName = configJSON.LRSLoadReportingServerName for _, lbcfg := range configJSON.ChildPolicy { if balancer.Get(lbcfg.Name) != nil { l.ChildPolicy = lbcfg break } } for _, lbcfg := range configJSON.FallbackPolicy { if balancer.Get(lbcfg.Name) != nil { l.FallBackPolicy = lbcfg break } } return nil } // MarshalJSON returns a JSON encoding of l. func (l *EDSConfig) MarshalJSON() ([]byte, error) { return nil, fmt.Errorf("EDSConfig.MarshalJSON() is unimplemented") } // loadBalancingConfig represents a single load balancing config, // stored in JSON format. type loadBalancingConfig struct { Name string Config json.RawMessage } // MarshalJSON returns a JSON encoding of l. func (l *loadBalancingConfig) MarshalJSON() ([]byte, error) { return nil, fmt.Errorf("loadBalancingConfig.MarshalJSON() is unimplemented") } // UnmarshalJSON parses the JSON-encoded byte slice in data and stores it in l. func (l *loadBalancingConfig) UnmarshalJSON(data []byte) error { var cfg map[string]json.RawMessage if err := json.Unmarshal(data, &cfg); err != nil { return err } for name, config := range cfg { l.Name = name l.Config = config } return nil } grpc-go-1.29.1/xds/internal/balancer/edsbalancer/eds.go000066400000000000000000000210171365033716300227000ustar00rootroot00000000000000/* * * Copyright 2019 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Package edsbalancer contains EDS balancer implementation. package edsbalancer import ( "context" "encoding/json" "fmt" "time" "github.com/google/go-cmp/cmp" "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/roundrobin" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/internal/buffer" "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" "google.golang.org/grpc/xds/internal/balancer/lrs" xdsclient "google.golang.org/grpc/xds/internal/client" ) const ( defaultTimeout = 10 * time.Second edsName = "eds_experimental" ) var ( newEDSBalancer = func(cc balancer.ClientConn, enqueueState func(priorityType, balancer.State), loadStore lrs.Store, logger *grpclog.PrefixLogger) edsBalancerImplInterface { return newEDSBalancerImpl(cc, enqueueState, loadStore, logger) } ) func init() { balancer.Register(&edsBalancerBuilder{}) } type edsBalancerBuilder struct{} // Build helps implement the balancer.Builder interface. func (b *edsBalancerBuilder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { ctx, cancel := context.WithCancel(context.Background()) x := &edsBalancer{ ctx: ctx, cancel: cancel, cc: cc, buildOpts: opts, grpcUpdate: make(chan interface{}), xdsClientUpdate: make(chan interface{}), childPolicyUpdate: buffer.NewUnbounded(), } loadStore := lrs.NewStore() x.logger = grpclog.NewPrefixLogger(loggingPrefix(x)) x.edsImpl = newEDSBalancer(x.cc, x.enqueueChildBalancerState, loadStore, x.logger) x.client = newXDSClientWrapper(x.handleEDSUpdate, x.loseContact, x.buildOpts, loadStore, x.logger) x.logger.Infof("Created") go x.run() return x } func (b *edsBalancerBuilder) Name() string { return edsName } func (b *edsBalancerBuilder) ParseConfig(c json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { var cfg EDSConfig if err := json.Unmarshal(c, &cfg); err != nil { return nil, fmt.Errorf("unable to unmarshal balancer config %s into EDSConfig, error: %v", string(c), err) } return &cfg, nil } // edsBalancerImplInterface defines the interface that edsBalancerImpl must // implement to communicate with edsBalancer. // // It's implemented by the real eds balancer and a fake testing eds balancer. // // TODO: none of the methods in this interface needs to be exported. type edsBalancerImplInterface interface { // HandleEDSResponse passes the received EDS message from traffic director to eds balancer. HandleEDSResponse(edsResp *xdsclient.EDSUpdate) // HandleChildPolicy updates the eds balancer the intra-cluster load balancing policy to use. HandleChildPolicy(name string, config json.RawMessage) // HandleSubConnStateChange handles state change for SubConn. HandleSubConnStateChange(sc balancer.SubConn, state connectivity.State) // updateState handle a balancer state update from the priority. updateState(priority priorityType, s balancer.State) // Close closes the eds balancer. Close() } var _ balancer.V2Balancer = (*edsBalancer)(nil) // Assert that we implement V2Balancer // edsBalancer manages xdsClient and the actual EDS balancer implementation that // does load balancing. // // It currently has only an edsBalancer. Later, we may add fallback. type edsBalancer struct { cc balancer.ClientConn // *xdsClientConn buildOpts balancer.BuildOptions ctx context.Context cancel context.CancelFunc logger *grpclog.PrefixLogger // edsBalancer continuously monitor the channels below, and will handle events from them in sync. grpcUpdate chan interface{} xdsClientUpdate chan interface{} childPolicyUpdate *buffer.Unbounded client *xdsclientWrapper // may change when passed a different service config config *EDSConfig // may change when passed a different service config edsImpl edsBalancerImplInterface } // run gets executed in a goroutine once edsBalancer is created. It monitors updates from grpc, // xdsClient and load balancer. It synchronizes the operations that happen inside edsBalancer. It // exits when edsBalancer is closed. func (x *edsBalancer) run() { for { select { case update := <-x.grpcUpdate: x.handleGRPCUpdate(update) case update := <-x.xdsClientUpdate: x.handleXDSClientUpdate(update) case update := <-x.childPolicyUpdate.Get(): x.childPolicyUpdate.Load() u := update.(*balancerStateWithPriority) x.edsImpl.updateState(u.priority, u.s) case <-x.ctx.Done(): if x.client != nil { x.client.close() } if x.edsImpl != nil { x.edsImpl.Close() } return } } } func (x *edsBalancer) handleGRPCUpdate(update interface{}) { switch u := update.(type) { case *subConnStateUpdate: if x.edsImpl != nil { x.edsImpl.HandleSubConnStateChange(u.sc, u.state.ConnectivityState) } case *balancer.ClientConnState: x.logger.Infof("Receive update from resolver, balancer config: %+v", u.BalancerConfig) cfg, _ := u.BalancerConfig.(*EDSConfig) if cfg == nil { // service config parsing failed. should never happen. return } x.client.handleUpdate(cfg, u.ResolverState.Attributes) if x.config == nil { x.config = cfg return } // We will update the edsImpl with the new child policy, if we got a // different one. if x.edsImpl != nil && !cmp.Equal(cfg.ChildPolicy, x.config.ChildPolicy) { if cfg.ChildPolicy != nil { x.edsImpl.HandleChildPolicy(cfg.ChildPolicy.Name, cfg.ChildPolicy.Config) } else { x.edsImpl.HandleChildPolicy(roundrobin.Name, nil) } } x.config = cfg default: // unreachable path panic("wrong update type") } } func (x *edsBalancer) handleXDSClientUpdate(update interface{}) { switch u := update.(type) { // TODO: this func should accept (*xdsclient.EDSUpdate, error), and process // the error, instead of having a separate loseContact signal. case *xdsclient.EDSUpdate: x.edsImpl.HandleEDSResponse(u) case *loseContact: // loseContact can be useful for going into fallback. default: panic("unexpected xds client update type") } } type subConnStateUpdate struct { sc balancer.SubConn state balancer.SubConnState } func (x *edsBalancer) HandleSubConnStateChange(sc balancer.SubConn, state connectivity.State) { x.logger.Errorf("UpdateSubConnState should be called instead of HandleSubConnStateChange") } func (x *edsBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) { x.logger.Errorf("UpdateClientConnState should be called instead of HandleResolvedAddrs") } func (x *edsBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { update := &subConnStateUpdate{ sc: sc, state: state, } select { case x.grpcUpdate <- update: case <-x.ctx.Done(): } } func (x *edsBalancer) ResolverError(error) { // TODO: Need to distinguish between connection errors and resource removed // errors. For the former, we will need to handle it later on for fallback. // For the latter, handle it by stopping the watch, closing sub-balancers // and pickers. } func (x *edsBalancer) UpdateClientConnState(s balancer.ClientConnState) error { select { case x.grpcUpdate <- &s: case <-x.ctx.Done(): } return nil } func (x *edsBalancer) handleEDSUpdate(resp *xdsclient.EDSUpdate) error { // TODO: this function should take (resp, error), and send them together on // the channel. There doesn't need to be a separate `loseContact` function. select { case x.xdsClientUpdate <- resp: case <-x.ctx.Done(): } return nil } type loseContact struct { } // TODO: delete loseContact when handleEDSUpdate takes (resp, error). func (x *edsBalancer) loseContact() { select { case x.xdsClientUpdate <- &loseContact{}: case <-x.ctx.Done(): } } type balancerStateWithPriority struct { priority priorityType s balancer.State } func (x *edsBalancer) enqueueChildBalancerState(p priorityType, s balancer.State) { x.childPolicyUpdate.Put(&balancerStateWithPriority{ priority: p, s: s, }) } func (x *edsBalancer) Close() { x.cancel() x.logger.Infof("Shutdown") } grpc-go-1.29.1/xds/internal/balancer/edsbalancer/eds_impl.go000066400000000000000000000362231365033716300237260ustar00rootroot00000000000000/* * Copyright 2019 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package edsbalancer import ( "encoding/json" "sync" "time" "github.com/google/go-cmp/cmp" "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/roundrobin" "google.golang.org/grpc/balancer/weightedroundrobin" "google.golang.org/grpc/codes" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/resolver" "google.golang.org/grpc/status" "google.golang.org/grpc/xds/internal" "google.golang.org/grpc/xds/internal/balancer/balancergroup" "google.golang.org/grpc/xds/internal/balancer/lrs" xdsclient "google.golang.org/grpc/xds/internal/client" ) // TODO: make this a environment variable? var defaultPriorityInitTimeout = 10 * time.Second type localityConfig struct { weight uint32 addrs []resolver.Address } // balancerGroupWithConfig contains the localities with the same priority. It // manages all localities using a balancerGroup. type balancerGroupWithConfig struct { bg *balancergroup.BalancerGroup configs map[internal.Locality]*localityConfig } // edsBalancerImpl does load balancing based on the EDS responses. Note that it // doesn't implement the balancer interface. It's intended to be used by a high // level balancer implementation. // // The localities are picked as weighted round robin. A configurable child // policy is used to manage endpoints in each locality. type edsBalancerImpl struct { cc balancer.ClientConn logger *grpclog.PrefixLogger enqueueChildBalancerStateUpdate func(priorityType, balancer.State) subBalancerBuilder balancer.Builder loadStore lrs.Store priorityToLocalities map[priorityType]*balancerGroupWithConfig // There's no need to hold any mutexes at the same time. The order to take // mutex should be: priorityMu > subConnMu, but this is implicit via // balancers (starting balancer with next priority while holding priorityMu, // and the balancer may create new SubConn). priorityMu sync.Mutex // priorities are pointers, and will be nil when EDS returns empty result. priorityInUse priorityType priorityLowest priorityType priorityToState map[priorityType]*balancer.State // The timer to give a priority 10 seconds to connect. And if the priority // doesn't go into Ready/Failure, start the next priority. // // One timer is enough because there can be at most one priority in init // state. priorityInitTimer *time.Timer subConnMu sync.Mutex subConnToPriority map[balancer.SubConn]priorityType pickerMu sync.Mutex dropConfig []xdsclient.OverloadDropConfig drops []*dropper innerState balancer.State // The state of the picker without drop support. } // newEDSBalancerImpl create a new edsBalancerImpl. func newEDSBalancerImpl(cc balancer.ClientConn, enqueueState func(priorityType, balancer.State), loadStore lrs.Store, logger *grpclog.PrefixLogger) *edsBalancerImpl { edsImpl := &edsBalancerImpl{ cc: cc, logger: logger, subBalancerBuilder: balancer.Get(roundrobin.Name), enqueueChildBalancerStateUpdate: enqueueState, priorityToLocalities: make(map[priorityType]*balancerGroupWithConfig), priorityToState: make(map[priorityType]*balancer.State), subConnToPriority: make(map[balancer.SubConn]priorityType), loadStore: loadStore, } // Don't start balancer group here. Start it when handling the first EDS // response. Otherwise the balancer group will be started with round-robin, // and if users specify a different sub-balancer, all balancers in balancer // group will be closed and recreated when sub-balancer update happens. return edsImpl } // HandleChildPolicy updates the child balancers handling endpoints. Child // policy is roundrobin by default. If the specified balancer is not installed, // the old child balancer will be used. // // HandleChildPolicy and HandleEDSResponse must be called by the same goroutine. func (edsImpl *edsBalancerImpl) HandleChildPolicy(name string, config json.RawMessage) { if edsImpl.subBalancerBuilder.Name() == name { return } newSubBalancerBuilder := balancer.Get(name) if newSubBalancerBuilder == nil { edsImpl.logger.Infof("edsBalancerImpl: failed to find balancer with name %q, keep using %q", name, edsImpl.subBalancerBuilder.Name()) return } edsImpl.subBalancerBuilder = newSubBalancerBuilder for _, bgwc := range edsImpl.priorityToLocalities { if bgwc == nil { continue } for id, config := range bgwc.configs { // TODO: (eds) add support to balancer group to support smoothly // switching sub-balancers (keep old balancer around until new // balancer becomes ready). bgwc.bg.Remove(id) bgwc.bg.Add(id, config.weight, edsImpl.subBalancerBuilder) bgwc.bg.HandleResolvedAddrs(id, config.addrs) } } } // updateDrops compares new drop policies with the old. If they are different, // it updates the drop policies and send ClientConn an updated picker. func (edsImpl *edsBalancerImpl) updateDrops(dropConfig []xdsclient.OverloadDropConfig) { if cmp.Equal(dropConfig, edsImpl.dropConfig) { return } edsImpl.pickerMu.Lock() edsImpl.dropConfig = dropConfig var newDrops []*dropper for _, c := range edsImpl.dropConfig { newDrops = append(newDrops, newDropper(c)) } edsImpl.drops = newDrops if edsImpl.innerState.Picker != nil { // Update picker with old inner picker, new drops. edsImpl.cc.UpdateState(balancer.State{ ConnectivityState: edsImpl.innerState.ConnectivityState, Picker: newDropPicker(edsImpl.innerState.Picker, newDrops, edsImpl.loadStore)}, ) } edsImpl.pickerMu.Unlock() } // HandleEDSResponse handles the EDS response and creates/deletes localities and // SubConns. It also handles drops. // // HandleChildPolicy and HandleEDSResponse must be called by the same goroutine. func (edsImpl *edsBalancerImpl) HandleEDSResponse(edsResp *xdsclient.EDSUpdate) { // TODO: Unhandled fields from EDS response: // - edsResp.GetPolicy().GetOverprovisioningFactor() // - locality.GetPriority() // - lbEndpoint.GetMetadata(): contains BNS name, send to sub-balancers // - as service config or as resolved address // - if socketAddress is not ip:port // - socketAddress.GetNamedPort(), socketAddress.GetResolverName() // - resolve endpoint's name with another resolver edsImpl.updateDrops(edsResp.Drops) // Filter out all localities with weight 0. // // Locality weighted load balancer can be enabled by setting an option in // CDS, and the weight of each locality. Currently, without the guarantee // that CDS is always sent, we assume locality weighted load balance is // always enabled, and ignore all weight 0 localities. // // In the future, we should look at the config in CDS response and decide // whether locality weight matters. newLocalitiesWithPriority := make(map[priorityType][]xdsclient.Locality) for _, locality := range edsResp.Localities { if locality.Weight == 0 { continue } priority := newPriorityType(locality.Priority) newLocalitiesWithPriority[priority] = append(newLocalitiesWithPriority[priority], locality) } var ( priorityLowest priorityType priorityChanged bool ) for priority, newLocalities := range newLocalitiesWithPriority { if !priorityLowest.isSet() || priorityLowest.higherThan(priority) { priorityLowest = priority } bgwc, ok := edsImpl.priorityToLocalities[priority] if !ok { // Create balancer group if it's never created (this is the first // time this priority is received). We don't start it here. It may // be started when necessary (e.g. when higher is down, or if it's a // new lowest priority). bgwc = &balancerGroupWithConfig{ bg: balancergroup.New(edsImpl.ccWrapperWithPriority(priority), edsImpl.loadStore, edsImpl.logger), configs: make(map[internal.Locality]*localityConfig), } edsImpl.priorityToLocalities[priority] = bgwc priorityChanged = true edsImpl.logger.Infof("New priority %v added", priority) } edsImpl.handleEDSResponsePerPriority(bgwc, newLocalities) } edsImpl.priorityLowest = priorityLowest // Delete priorities that are removed in the latest response, and also close // the balancer group. for p, bgwc := range edsImpl.priorityToLocalities { if _, ok := newLocalitiesWithPriority[p]; !ok { delete(edsImpl.priorityToLocalities, p) bgwc.bg.Close() delete(edsImpl.priorityToState, p) priorityChanged = true edsImpl.logger.Infof("Priority %v deleted", p) } } // If priority was added/removed, it may affect the balancer group to use. // E.g. priorityInUse was removed, or all priorities are down, and a new // lower priority was added. if priorityChanged { edsImpl.handlePriorityChange() } } func (edsImpl *edsBalancerImpl) handleEDSResponsePerPriority(bgwc *balancerGroupWithConfig, newLocalities []xdsclient.Locality) { // newLocalitiesSet contains all names of localities in the new EDS response // for the same priority. It's used to delete localities that are removed in // the new EDS response. newLocalitiesSet := make(map[internal.Locality]struct{}) for _, locality := range newLocalities { // One balancer for each locality. lid := locality.ID newLocalitiesSet[lid] = struct{}{} newWeight := locality.Weight var newAddrs []resolver.Address for _, lbEndpoint := range locality.Endpoints { // Filter out all "unhealthy" endpoints (unknown and // healthy are both considered to be healthy: // https://www.envoyproxy.io/docs/envoy/latest/api-v2/api/v2/core/health_check.proto#envoy-api-enum-core-healthstatus). if lbEndpoint.HealthStatus != xdsclient.EndpointHealthStatusHealthy && lbEndpoint.HealthStatus != xdsclient.EndpointHealthStatusUnknown { continue } address := resolver.Address{ Addr: lbEndpoint.Address, } if edsImpl.subBalancerBuilder.Name() == weightedroundrobin.Name && lbEndpoint.Weight != 0 { address.Metadata = &weightedroundrobin.AddrInfo{ Weight: lbEndpoint.Weight, } } newAddrs = append(newAddrs, address) } var weightChanged, addrsChanged bool config, ok := bgwc.configs[lid] if !ok { // A new balancer, add it to balancer group and balancer map. bgwc.bg.Add(lid, newWeight, edsImpl.subBalancerBuilder) config = &localityConfig{ weight: newWeight, } bgwc.configs[lid] = config // weightChanged is false for new locality, because there's no need // to update weight in bg. addrsChanged = true edsImpl.logger.Infof("New locality %v added", lid) } else { // Compare weight and addrs. if config.weight != newWeight { weightChanged = true } if !cmp.Equal(config.addrs, newAddrs) { addrsChanged = true } edsImpl.logger.Infof("Locality %v updated, weightedChanged: %v, addrsChanged: %v", lid, weightChanged, addrsChanged) } if weightChanged { config.weight = newWeight bgwc.bg.ChangeWeight(lid, newWeight) } if addrsChanged { config.addrs = newAddrs bgwc.bg.HandleResolvedAddrs(lid, newAddrs) } } // Delete localities that are removed in the latest response. for lid := range bgwc.configs { if _, ok := newLocalitiesSet[lid]; !ok { bgwc.bg.Remove(lid) delete(bgwc.configs, lid) edsImpl.logger.Infof("Locality %v deleted", lid) } } } // HandleSubConnStateChange handles the state change and update pickers accordingly. func (edsImpl *edsBalancerImpl) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) { edsImpl.subConnMu.Lock() var bgwc *balancerGroupWithConfig if p, ok := edsImpl.subConnToPriority[sc]; ok { if s == connectivity.Shutdown { // Only delete sc from the map when state changed to Shutdown. delete(edsImpl.subConnToPriority, sc) } bgwc = edsImpl.priorityToLocalities[p] } edsImpl.subConnMu.Unlock() if bgwc == nil { edsImpl.logger.Infof("edsBalancerImpl: priority not found for sc state change") return } if bg := bgwc.bg; bg != nil { bg.HandleSubConnStateChange(sc, s) } } // updateState first handles priority, and then wraps picker in a drop picker // before forwarding the update. func (edsImpl *edsBalancerImpl) updateState(priority priorityType, s balancer.State) { _, ok := edsImpl.priorityToLocalities[priority] if !ok { edsImpl.logger.Infof("eds: received picker update from unknown priority") return } if edsImpl.handlePriorityWithNewState(priority, s) { edsImpl.pickerMu.Lock() defer edsImpl.pickerMu.Unlock() edsImpl.innerState = s // Don't reset drops when it's a state change. edsImpl.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: newDropPicker(s.Picker, edsImpl.drops, edsImpl.loadStore)}) } } func (edsImpl *edsBalancerImpl) ccWrapperWithPriority(priority priorityType) *edsBalancerWrapperCC { return &edsBalancerWrapperCC{ ClientConn: edsImpl.cc, priority: priority, parent: edsImpl, } } // edsBalancerWrapperCC implements the balancer.ClientConn API and get passed to // each balancer group. It contains the locality priority. type edsBalancerWrapperCC struct { balancer.ClientConn priority priorityType parent *edsBalancerImpl } func (ebwcc *edsBalancerWrapperCC) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { return ebwcc.parent.newSubConn(ebwcc.priority, addrs, opts) } func (ebwcc *edsBalancerWrapperCC) UpdateBalancerState(state connectivity.State, picker balancer.Picker) { } func (ebwcc *edsBalancerWrapperCC) UpdateState(state balancer.State) { ebwcc.parent.enqueueChildBalancerStateUpdate(ebwcc.priority, state) } func (edsImpl *edsBalancerImpl) newSubConn(priority priorityType, addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { sc, err := edsImpl.cc.NewSubConn(addrs, opts) if err != nil { return nil, err } edsImpl.subConnMu.Lock() edsImpl.subConnToPriority[sc] = priority edsImpl.subConnMu.Unlock() return sc, nil } // Close closes the balancer. func (edsImpl *edsBalancerImpl) Close() { for _, bgwc := range edsImpl.priorityToLocalities { if bg := bgwc.bg; bg != nil { bg.Close() } } } type dropPicker struct { drops []*dropper p balancer.V2Picker loadStore lrs.Store } func newDropPicker(p balancer.V2Picker, drops []*dropper, loadStore lrs.Store) *dropPicker { return &dropPicker{ drops: drops, p: p, loadStore: loadStore, } } func (d *dropPicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { var ( drop bool category string ) for _, dp := range d.drops { if dp.drop() { drop = true category = dp.c.Category break } } if drop { if d.loadStore != nil { d.loadStore.CallDropped(category) } return balancer.PickResult{}, status.Errorf(codes.Unavailable, "RPC is dropped") } // TODO: (eds) don't drop unless the inner picker is READY. Similar to // https://github.com/grpc/grpc-go/issues/2622. return d.p.Pick(info) } grpc-go-1.29.1/xds/internal/balancer/edsbalancer/eds_impl_priority.go000066400000000000000000000271171365033716300256710ustar00rootroot00000000000000/* * * Copyright 2019 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package edsbalancer import ( "fmt" "time" "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/base" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/grpclog" ) // handlePriorityChange handles priority after EDS adds/removes a // priority. // // - If all priorities were deleted, unset priorityInUse, and set parent // ClientConn to TransientFailure // - If priorityInUse wasn't set, this is either the first EDS resp, or the // previous EDS resp deleted everything. Set priorityInUse to 0, and start 0. // - If priorityInUse was deleted, send the picker from the new lowest priority // to parent ClientConn, and set priorityInUse to the new lowest. // - If priorityInUse has a non-Ready state, and also there's a priority lower // than priorityInUse (which means a lower priority was added), set the next // priority as new priorityInUse, and start the bg. func (edsImpl *edsBalancerImpl) handlePriorityChange() { edsImpl.priorityMu.Lock() defer edsImpl.priorityMu.Unlock() // Everything was removed by EDS. if !edsImpl.priorityLowest.isSet() { edsImpl.priorityInUse = newPriorityTypeUnset() edsImpl.cc.UpdateState(balancer.State{ConnectivityState: connectivity.TransientFailure, Picker: base.NewErrPickerV2(balancer.ErrTransientFailure)}) return } // priorityInUse wasn't set, use 0. if !edsImpl.priorityInUse.isSet() { edsImpl.logger.Infof("Switching priority from unset to %v", 0) edsImpl.startPriority(newPriorityType(0)) return } // priorityInUse was deleted, use the new lowest. if _, ok := edsImpl.priorityToLocalities[edsImpl.priorityInUse]; !ok { oldP := edsImpl.priorityInUse edsImpl.priorityInUse = edsImpl.priorityLowest edsImpl.logger.Infof("Switching priority from %v to %v, because former was deleted", oldP, edsImpl.priorityInUse) if s, ok := edsImpl.priorityToState[edsImpl.priorityLowest]; ok { edsImpl.cc.UpdateState(*s) } else { // If state for priorityLowest is not found, this means priorityLowest was // started, but never sent any update. The init timer fired and // triggered the next priority. The old_priorityInUse (that was just // deleted EDS) was picked later. // // We don't have an old state to send to parent, but we also don't // want parent to keep using picker from old_priorityInUse. Send an // update to trigger block picks until a new picker is ready. edsImpl.cc.UpdateState(balancer.State{ConnectivityState: connectivity.Connecting, Picker: base.NewErrPickerV2(balancer.ErrNoSubConnAvailable)}) } return } // priorityInUse is not ready, look for next priority, and use if found. if s, ok := edsImpl.priorityToState[edsImpl.priorityInUse]; ok && s.ConnectivityState != connectivity.Ready { pNext := edsImpl.priorityInUse.nextLower() if _, ok := edsImpl.priorityToLocalities[pNext]; ok { edsImpl.logger.Infof("Switching priority from %v to %v, because latter was added, and former wasn't Ready") edsImpl.startPriority(pNext) } } } // startPriority sets priorityInUse to p, and starts the balancer group for p. // It also starts a timer to fall to next priority after timeout. // // Caller must hold priorityMu, priority must exist, and edsImpl.priorityInUse // must be non-nil. func (edsImpl *edsBalancerImpl) startPriority(priority priorityType) { edsImpl.priorityInUse = priority p := edsImpl.priorityToLocalities[priority] // NOTE: this will eventually send addresses to sub-balancers. If the // sub-balancer tries to update picker, it will result in a deadlock on // priorityMu in the update is handled synchronously. The deadlock is // currently avoided by handling balancer update in a goroutine (the run // goroutine in the parent eds balancer). When priority balancer is split // into its own, this asynchronous state handling needs to be copied. p.bg.Start() // startPriority can be called when // 1. first EDS resp, start p0 // 2. a high priority goes Failure, start next // 3. a high priority init timeout, start next // // In all the cases, the existing init timer is either closed, also already // expired. There's no need to close the old timer. edsImpl.priorityInitTimer = time.AfterFunc(defaultPriorityInitTimeout, func() { edsImpl.priorityMu.Lock() defer edsImpl.priorityMu.Unlock() if !edsImpl.priorityInUse.equal(priority) { return } edsImpl.priorityInitTimer = nil pNext := priority.nextLower() if _, ok := edsImpl.priorityToLocalities[pNext]; ok { edsImpl.startPriority(pNext) } }) } // handlePriorityWithNewState start/close priorities based on the connectivity // state. It returns whether the state should be forwarded to parent ClientConn. func (edsImpl *edsBalancerImpl) handlePriorityWithNewState(priority priorityType, s balancer.State) bool { edsImpl.priorityMu.Lock() defer edsImpl.priorityMu.Unlock() if !edsImpl.priorityInUse.isSet() { grpclog.Infof("eds: received picker update when no priority is in use (EDS returned an empty list)") return false } if edsImpl.priorityInUse.higherThan(priority) { // Lower priorities should all be closed, this is an unexpected update. grpclog.Infof("eds: received picker update from priority lower then priorityInUse") return false } bState, ok := edsImpl.priorityToState[priority] if !ok { bState = &balancer.State{} edsImpl.priorityToState[priority] = bState } oldState := bState.ConnectivityState *bState = s switch s.ConnectivityState { case connectivity.Ready: return edsImpl.handlePriorityWithNewStateReady(priority) case connectivity.TransientFailure: return edsImpl.handlePriorityWithNewStateTransientFailure(priority) case connectivity.Connecting: return edsImpl.handlePriorityWithNewStateConnecting(priority, oldState) default: // New state is Idle, should never happen. Don't forward. return false } } // handlePriorityWithNewStateReady handles state Ready and decides whether to // forward update or not. // // An update with state Ready: // - If it's from higher priority: // - Forward the update // - Set the priority as priorityInUse // - Close all priorities lower than this one // - If it's from priorityInUse: // - Forward and do nothing else // // Caller must make sure priorityInUse is not higher than priority. // // Caller must hold priorityMu. func (edsImpl *edsBalancerImpl) handlePriorityWithNewStateReady(priority priorityType) bool { // If one priority higher or equal to priorityInUse goes Ready, stop the // init timer. If update is from higher than priorityInUse, // priorityInUse will be closed, and the init timer will become useless. if timer := edsImpl.priorityInitTimer; timer != nil { timer.Stop() edsImpl.priorityInitTimer = nil } if edsImpl.priorityInUse.lowerThan(priority) { edsImpl.logger.Infof("Switching priority from %v to %v, because latter became Ready", edsImpl.priorityInUse, priority) edsImpl.priorityInUse = priority for i := priority.nextLower(); !i.lowerThan(edsImpl.priorityLowest); i = i.nextLower() { edsImpl.priorityToLocalities[i].bg.Close() } return true } return true } // handlePriorityWithNewStateTransientFailure handles state TransientFailure and // decides whether to forward update or not. // // An update with state Failure: // - If it's from a higher priority: // - Do not forward, and do nothing // - If it's from priorityInUse: // - If there's no lower: // - Forward and do nothing else // - If there's a lower priority: // - Forward // - Set lower as priorityInUse // - Start lower // // Caller must make sure priorityInUse is not higher than priority. // // Caller must hold priorityMu. func (edsImpl *edsBalancerImpl) handlePriorityWithNewStateTransientFailure(priority priorityType) bool { if edsImpl.priorityInUse.lowerThan(priority) { return false } // priorityInUse sends a failure. Stop its init timer. if timer := edsImpl.priorityInitTimer; timer != nil { timer.Stop() edsImpl.priorityInitTimer = nil } pNext := priority.nextLower() if _, okNext := edsImpl.priorityToLocalities[pNext]; !okNext { return true } edsImpl.logger.Infof("Switching priority from %v to %v, because former became TransientFailure", priority, pNext) edsImpl.startPriority(pNext) return true } // handlePriorityWithNewStateConnecting handles state Connecting and decides // whether to forward update or not. // // An update with state Connecting: // - If it's from a higher priority // - Do nothing // - If it's from priorityInUse, the behavior depends on previous state. // // When new state is Connecting, the behavior depends on previous state. If the // previous state was Ready, this is a transition out from Ready to Connecting. // Assuming there are multiple backends in the same priority, this mean we are // in a bad situation and we should failover to the next priority (Side note: // the current connectivity state aggregating algorhtim (e.g. round-robin) is // not handling this right, because if many backends all go from Ready to // Connecting, the overall situation is more like TransientFailure, not // Connecting). // // If the previous state was Idle, we don't do anything special with failure, // and simply forward the update. The init timer should be in process, will // handle failover if it timeouts. If the previous state was TransientFailure, // we do not forward, because the lower priority is in use. // // Caller must make sure priorityInUse is not higher than priority. // // Caller must hold priorityMu. func (edsImpl *edsBalancerImpl) handlePriorityWithNewStateConnecting(priority priorityType, oldState connectivity.State) bool { if edsImpl.priorityInUse.lowerThan(priority) { return false } switch oldState { case connectivity.Ready: pNext := priority.nextLower() if _, okNext := edsImpl.priorityToLocalities[pNext]; !okNext { return true } edsImpl.logger.Infof("Switching priority from %v to %v, because former became Connecting from Ready", priority, pNext) edsImpl.startPriority(pNext) return true case connectivity.Idle: return true case connectivity.TransientFailure: return false default: // Old state is Connecting or Shutdown. Don't forward. return false } } // priorityType represents the priority from EDS response. // // 0 is the highest priority. The bigger the number, the lower the priority. type priorityType struct { set bool p uint32 } func newPriorityType(p uint32) priorityType { return priorityType{ set: true, p: p, } } func newPriorityTypeUnset() priorityType { return priorityType{} } func (p priorityType) isSet() bool { return p.set } func (p priorityType) equal(p2 priorityType) bool { if !p.isSet() || !p2.isSet() { panic("priority unset") } return p == p2 } func (p priorityType) higherThan(p2 priorityType) bool { if !p.isSet() || !p2.isSet() { panic("priority unset") } return p.p < p2.p } func (p priorityType) lowerThan(p2 priorityType) bool { if !p.isSet() || !p2.isSet() { panic("priority unset") } return p.p > p2.p } func (p priorityType) nextLower() priorityType { if !p.isSet() { panic("priority unset") } return priorityType{ set: true, p: p.p + 1, } } func (p priorityType) String() string { if !p.set { return "Nil" } return fmt.Sprint(p.p) } grpc-go-1.29.1/xds/internal/balancer/edsbalancer/eds_impl_priority_test.go000066400000000000000000000724241365033716300267310ustar00rootroot00000000000000/* * * Copyright 2019 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package edsbalancer import ( "testing" "time" corepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" "github.com/google/go-cmp/cmp" "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" xdsclient "google.golang.org/grpc/xds/internal/client" "google.golang.org/grpc/xds/internal/testutils" ) // When a high priority is ready, adding/removing lower locality doesn't cause // changes. // // Init 0 and 1; 0 is up, use 0; add 2, use 0; remove 2, use 0. func (s) TestEDSPriority_HighPriorityReady(t *testing.T) { cc := testutils.NewTestClientConn(t) edsb := newEDSBalancerImpl(cc, nil, nil, nil) edsb.enqueueChildBalancerStateUpdate = edsb.updateState // Two localities, with priorities [0, 1], each with one backend. clab1 := xdsclient.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) clab1.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) clab1.AddLocality(testSubZones[1], 1, 1, testEndpointAddrs[1:2], nil) edsb.HandleEDSResponse(xdsclient.ParseEDSRespProtoForTesting(clab1.Build())) addrs1 := <-cc.NewSubConnAddrsCh if got, want := addrs1[0].Addr, testEndpointAddrs[0]; got != want { t.Fatalf("sc is created with addr %v, want %v", got, want) } sc1 := <-cc.NewSubConnCh // p0 is ready. edsb.HandleSubConnStateChange(sc1, connectivity.Connecting) edsb.HandleSubConnStateChange(sc1, connectivity.Ready) // Test roundrobin with only p0 subconns. p1 := <-cc.NewPickerCh want := []balancer.SubConn{sc1} if err := testutils.IsRoundRobin(want, subConnFromPicker(p1)); err != nil { t.Fatalf("want %v, got %v", want, err) } // Add p2, it shouldn't cause any udpates. clab2 := xdsclient.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) clab2.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) clab2.AddLocality(testSubZones[1], 1, 1, testEndpointAddrs[1:2], nil) clab2.AddLocality(testSubZones[2], 1, 2, testEndpointAddrs[2:3], nil) edsb.HandleEDSResponse(xdsclient.ParseEDSRespProtoForTesting(clab2.Build())) select { case <-cc.NewPickerCh: t.Fatalf("got unexpected new picker") case <-cc.NewSubConnCh: t.Fatalf("got unexpected new SubConn") case <-cc.RemoveSubConnCh: t.Fatalf("got unexpected remove SubConn") case <-time.After(time.Millisecond * 100): } // Remove p2, no updates. clab3 := xdsclient.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) clab3.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) clab3.AddLocality(testSubZones[1], 1, 1, testEndpointAddrs[1:2], nil) edsb.HandleEDSResponse(xdsclient.ParseEDSRespProtoForTesting(clab3.Build())) select { case <-cc.NewPickerCh: t.Fatalf("got unexpected new picker") case <-cc.NewSubConnCh: t.Fatalf("got unexpected new SubConn") case <-cc.RemoveSubConnCh: t.Fatalf("got unexpected remove SubConn") case <-time.After(time.Millisecond * 100): } } // Lower priority is used when higher priority is not ready. // // Init 0 and 1; 0 is up, use 0; 0 is down, 1 is up, use 1; add 2, use 1; 1 is // down, use 2; remove 2, use 1. func (s) TestEDSPriority_SwitchPriority(t *testing.T) { cc := testutils.NewTestClientConn(t) edsb := newEDSBalancerImpl(cc, nil, nil, nil) edsb.enqueueChildBalancerStateUpdate = edsb.updateState // Two localities, with priorities [0, 1], each with one backend. clab1 := xdsclient.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) clab1.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) clab1.AddLocality(testSubZones[1], 1, 1, testEndpointAddrs[1:2], nil) edsb.HandleEDSResponse(xdsclient.ParseEDSRespProtoForTesting(clab1.Build())) addrs0 := <-cc.NewSubConnAddrsCh if got, want := addrs0[0].Addr, testEndpointAddrs[0]; got != want { t.Fatalf("sc is created with addr %v, want %v", got, want) } sc0 := <-cc.NewSubConnCh // p0 is ready. edsb.HandleSubConnStateChange(sc0, connectivity.Connecting) edsb.HandleSubConnStateChange(sc0, connectivity.Ready) // Test roundrobin with only p0 subconns. p0 := <-cc.NewPickerCh want := []balancer.SubConn{sc0} if err := testutils.IsRoundRobin(want, subConnFromPicker(p0)); err != nil { t.Fatalf("want %v, got %v", want, err) } // Turn down 0, 1 is used. edsb.HandleSubConnStateChange(sc0, connectivity.TransientFailure) addrs1 := <-cc.NewSubConnAddrsCh if got, want := addrs1[0].Addr, testEndpointAddrs[1]; got != want { t.Fatalf("sc is created with addr %v, want %v", got, want) } sc1 := <-cc.NewSubConnCh edsb.HandleSubConnStateChange(sc1, connectivity.Connecting) edsb.HandleSubConnStateChange(sc1, connectivity.Ready) // Test pick with 1. p1 := <-cc.NewPickerCh for i := 0; i < 5; i++ { gotSCSt, _ := p1.Pick(balancer.PickInfo{}) if !cmp.Equal(gotSCSt.SubConn, sc1, cmp.AllowUnexported(testutils.TestSubConn{})) { t.Fatalf("picker.Pick, got %v, want SubConn=%v", gotSCSt, sc1) } } // Add p2, it shouldn't cause any udpates. clab2 := xdsclient.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) clab2.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) clab2.AddLocality(testSubZones[1], 1, 1, testEndpointAddrs[1:2], nil) clab2.AddLocality(testSubZones[2], 1, 2, testEndpointAddrs[2:3], nil) edsb.HandleEDSResponse(xdsclient.ParseEDSRespProtoForTesting(clab2.Build())) select { case <-cc.NewPickerCh: t.Fatalf("got unexpected new picker") case <-cc.NewSubConnCh: t.Fatalf("got unexpected new SubConn") case <-cc.RemoveSubConnCh: t.Fatalf("got unexpected remove SubConn") case <-time.After(time.Millisecond * 100): } // Turn down 1, use 2 edsb.HandleSubConnStateChange(sc1, connectivity.TransientFailure) addrs2 := <-cc.NewSubConnAddrsCh if got, want := addrs2[0].Addr, testEndpointAddrs[2]; got != want { t.Fatalf("sc is created with addr %v, want %v", got, want) } sc2 := <-cc.NewSubConnCh edsb.HandleSubConnStateChange(sc2, connectivity.Connecting) edsb.HandleSubConnStateChange(sc2, connectivity.Ready) // Test pick with 2. p2 := <-cc.NewPickerCh for i := 0; i < 5; i++ { gotSCSt, _ := p2.Pick(balancer.PickInfo{}) if !cmp.Equal(gotSCSt.SubConn, sc2, cmp.AllowUnexported(testutils.TestSubConn{})) { t.Fatalf("picker.Pick, got %v, want SubConn=%v", gotSCSt, sc2) } } // Remove 2, use 1. clab3 := xdsclient.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) clab3.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) clab3.AddLocality(testSubZones[1], 1, 1, testEndpointAddrs[1:2], nil) edsb.HandleEDSResponse(xdsclient.ParseEDSRespProtoForTesting(clab3.Build())) // p2 SubConns are removed. scToRemove := <-cc.RemoveSubConnCh if !cmp.Equal(scToRemove, sc2, cmp.AllowUnexported(testutils.TestSubConn{})) { t.Fatalf("RemoveSubConn, want %v, got %v", sc2, scToRemove) } // Should get an update with 1's old picker, to override 2's old picker. p3 := <-cc.NewPickerCh for i := 0; i < 5; i++ { if _, err := p3.Pick(balancer.PickInfo{}); err != balancer.ErrTransientFailure { t.Fatalf("want pick error %v, got %v", balancer.ErrTransientFailure, err) } } } // Add a lower priority while the higher priority is down. // // Init 0 and 1; 0 and 1 both down; add 2, use 2. func (s) TestEDSPriority_HigherDownWhileAddingLower(t *testing.T) { cc := testutils.NewTestClientConn(t) edsb := newEDSBalancerImpl(cc, nil, nil, nil) edsb.enqueueChildBalancerStateUpdate = edsb.updateState // Two localities, with different priorities, each with one backend. clab1 := xdsclient.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) clab1.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) clab1.AddLocality(testSubZones[1], 1, 1, testEndpointAddrs[1:2], nil) edsb.HandleEDSResponse(xdsclient.ParseEDSRespProtoForTesting(clab1.Build())) addrs0 := <-cc.NewSubConnAddrsCh if got, want := addrs0[0].Addr, testEndpointAddrs[0]; got != want { t.Fatalf("sc is created with addr %v, want %v", got, want) } sc0 := <-cc.NewSubConnCh // Turn down 0, 1 is used. edsb.HandleSubConnStateChange(sc0, connectivity.TransientFailure) addrs1 := <-cc.NewSubConnAddrsCh if got, want := addrs1[0].Addr, testEndpointAddrs[1]; got != want { t.Fatalf("sc is created with addr %v, want %v", got, want) } sc1 := <-cc.NewSubConnCh // Turn down 1, pick should error. edsb.HandleSubConnStateChange(sc1, connectivity.TransientFailure) // Test pick failure. pFail := <-cc.NewPickerCh for i := 0; i < 5; i++ { if _, err := pFail.Pick(balancer.PickInfo{}); err != balancer.ErrTransientFailure { t.Fatalf("want pick error %v, got %v", balancer.ErrTransientFailure, err) } } // Add p2, it should create a new SubConn. clab2 := xdsclient.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) clab2.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) clab2.AddLocality(testSubZones[1], 1, 1, testEndpointAddrs[1:2], nil) clab2.AddLocality(testSubZones[2], 1, 2, testEndpointAddrs[2:3], nil) edsb.HandleEDSResponse(xdsclient.ParseEDSRespProtoForTesting(clab2.Build())) addrs2 := <-cc.NewSubConnAddrsCh if got, want := addrs2[0].Addr, testEndpointAddrs[2]; got != want { t.Fatalf("sc is created with addr %v, want %v", got, want) } sc2 := <-cc.NewSubConnCh edsb.HandleSubConnStateChange(sc2, connectivity.Connecting) edsb.HandleSubConnStateChange(sc2, connectivity.Ready) // Test pick with 2. p2 := <-cc.NewPickerCh for i := 0; i < 5; i++ { gotSCSt, _ := p2.Pick(balancer.PickInfo{}) if !cmp.Equal(gotSCSt.SubConn, sc2, cmp.AllowUnexported(testutils.TestSubConn{})) { t.Fatalf("picker.Pick, got %v, want SubConn=%v", gotSCSt, sc2) } } } // When a higher priority becomes available, all lower priorities are closed. // // Init 0,1,2; 0 and 1 down, use 2; 0 up, close 1 and 2. func (s) TestEDSPriority_HigherReadyCloseAllLower(t *testing.T) { defer time.Sleep(10 * time.Millisecond) cc := testutils.NewTestClientConn(t) edsb := newEDSBalancerImpl(cc, nil, nil, nil) edsb.enqueueChildBalancerStateUpdate = edsb.updateState // Two localities, with priorities [0,1,2], each with one backend. clab1 := xdsclient.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) clab1.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) clab1.AddLocality(testSubZones[1], 1, 1, testEndpointAddrs[1:2], nil) clab1.AddLocality(testSubZones[2], 1, 2, testEndpointAddrs[2:3], nil) edsb.HandleEDSResponse(xdsclient.ParseEDSRespProtoForTesting(clab1.Build())) addrs0 := <-cc.NewSubConnAddrsCh if got, want := addrs0[0].Addr, testEndpointAddrs[0]; got != want { t.Fatalf("sc is created with addr %v, want %v", got, want) } sc0 := <-cc.NewSubConnCh // Turn down 0, 1 is used. edsb.HandleSubConnStateChange(sc0, connectivity.TransientFailure) addrs1 := <-cc.NewSubConnAddrsCh if got, want := addrs1[0].Addr, testEndpointAddrs[1]; got != want { t.Fatalf("sc is created with addr %v, want %v", got, want) } sc1 := <-cc.NewSubConnCh // Turn down 1, 2 is used. edsb.HandleSubConnStateChange(sc1, connectivity.TransientFailure) addrs2 := <-cc.NewSubConnAddrsCh if got, want := addrs2[0].Addr, testEndpointAddrs[2]; got != want { t.Fatalf("sc is created with addr %v, want %v", got, want) } sc2 := <-cc.NewSubConnCh edsb.HandleSubConnStateChange(sc2, connectivity.Connecting) edsb.HandleSubConnStateChange(sc2, connectivity.Ready) // Test pick with 2. p2 := <-cc.NewPickerCh for i := 0; i < 5; i++ { gotSCSt, _ := p2.Pick(balancer.PickInfo{}) if !cmp.Equal(gotSCSt.SubConn, sc2, cmp.AllowUnexported(testutils.TestSubConn{})) { t.Fatalf("picker.Pick, got %v, want SubConn=%v", gotSCSt, sc2) } } // When 0 becomes ready, 0 should be used, 1 and 2 should all be closed. edsb.HandleSubConnStateChange(sc0, connectivity.Ready) // sc1 and sc2 should be removed. // // With localities caching, the lower priorities are closed after a timeout, // in goroutines. The order is no longer guaranteed. scToRemove := []balancer.SubConn{<-cc.RemoveSubConnCh, <-cc.RemoveSubConnCh} if !(cmp.Equal(scToRemove[0], sc1, cmp.AllowUnexported(testutils.TestSubConn{})) && cmp.Equal(scToRemove[1], sc2, cmp.AllowUnexported(testutils.TestSubConn{}))) && !(cmp.Equal(scToRemove[0], sc2, cmp.AllowUnexported(testutils.TestSubConn{})) && cmp.Equal(scToRemove[1], sc1, cmp.AllowUnexported(testutils.TestSubConn{}))) { t.Errorf("RemoveSubConn, want [%v, %v], got %v", sc1, sc2, scToRemove) } // Test pick with 0. p0 := <-cc.NewPickerCh for i := 0; i < 5; i++ { gotSCSt, _ := p0.Pick(balancer.PickInfo{}) if !cmp.Equal(gotSCSt.SubConn, sc0, cmp.AllowUnexported(testutils.TestSubConn{})) { t.Fatalf("picker.Pick, got %v, want SubConn=%v", gotSCSt, sc0) } } } // At init, start the next lower priority after timeout if the higher priority // doesn't get ready. // // Init 0,1; 0 is not ready (in connecting), after timeout, use 1. func (s) TestEDSPriority_InitTimeout(t *testing.T) { const testPriorityInitTimeout = time.Second defer func() func() { old := defaultPriorityInitTimeout defaultPriorityInitTimeout = testPriorityInitTimeout return func() { defaultPriorityInitTimeout = old } }()() cc := testutils.NewTestClientConn(t) edsb := newEDSBalancerImpl(cc, nil, nil, nil) edsb.enqueueChildBalancerStateUpdate = edsb.updateState // Two localities, with different priorities, each with one backend. clab1 := xdsclient.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) clab1.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) clab1.AddLocality(testSubZones[1], 1, 1, testEndpointAddrs[1:2], nil) edsb.HandleEDSResponse(xdsclient.ParseEDSRespProtoForTesting(clab1.Build())) addrs0 := <-cc.NewSubConnAddrsCh if got, want := addrs0[0].Addr, testEndpointAddrs[0]; got != want { t.Fatalf("sc is created with addr %v, want %v", got, want) } sc0 := <-cc.NewSubConnCh // Keep 0 in connecting, 1 will be used after init timeout. edsb.HandleSubConnStateChange(sc0, connectivity.Connecting) // Make sure new SubConn is created before timeout. select { case <-time.After(testPriorityInitTimeout * 3 / 4): case <-cc.NewSubConnAddrsCh: t.Fatalf("Got a new SubConn too early (Within timeout). Expect a new SubConn only after timeout") } addrs1 := <-cc.NewSubConnAddrsCh if got, want := addrs1[0].Addr, testEndpointAddrs[1]; got != want { t.Fatalf("sc is created with addr %v, want %v", got, want) } sc1 := <-cc.NewSubConnCh edsb.HandleSubConnStateChange(sc1, connectivity.Connecting) edsb.HandleSubConnStateChange(sc1, connectivity.Ready) // Test pick with 1. p1 := <-cc.NewPickerCh for i := 0; i < 5; i++ { gotSCSt, _ := p1.Pick(balancer.PickInfo{}) if !cmp.Equal(gotSCSt.SubConn, sc1, cmp.AllowUnexported(testutils.TestSubConn{})) { t.Fatalf("picker.Pick, got %v, want SubConn=%v", gotSCSt, sc1) } } } // Add localities to existing priorities. // // - start with 2 locality with p0 and p1 // - add localities to existing p0 and p1 func (s) TestEDSPriority_MultipleLocalities(t *testing.T) { cc := testutils.NewTestClientConn(t) edsb := newEDSBalancerImpl(cc, nil, nil, nil) edsb.enqueueChildBalancerStateUpdate = edsb.updateState // Two localities, with different priorities, each with one backend. clab0 := xdsclient.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) clab0.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) clab0.AddLocality(testSubZones[1], 1, 1, testEndpointAddrs[1:2], nil) edsb.HandleEDSResponse(xdsclient.ParseEDSRespProtoForTesting(clab0.Build())) addrs0 := <-cc.NewSubConnAddrsCh if got, want := addrs0[0].Addr, testEndpointAddrs[0]; got != want { t.Fatalf("sc is created with addr %v, want %v", got, want) } sc0 := <-cc.NewSubConnCh edsb.HandleSubConnStateChange(sc0, connectivity.Connecting) edsb.HandleSubConnStateChange(sc0, connectivity.Ready) // Test roundrobin with only p0 subconns. p0 := <-cc.NewPickerCh want := []balancer.SubConn{sc0} if err := testutils.IsRoundRobin(want, subConnFromPicker(p0)); err != nil { t.Fatalf("want %v, got %v", want, err) } // Turn down p0 subconns, p1 subconns will be created. edsb.HandleSubConnStateChange(sc0, connectivity.TransientFailure) addrs1 := <-cc.NewSubConnAddrsCh if got, want := addrs1[0].Addr, testEndpointAddrs[1]; got != want { t.Fatalf("sc is created with addr %v, want %v", got, want) } sc1 := <-cc.NewSubConnCh edsb.HandleSubConnStateChange(sc1, connectivity.Connecting) edsb.HandleSubConnStateChange(sc1, connectivity.Ready) // Test roundrobin with only p1 subconns. p1 := <-cc.NewPickerCh want = []balancer.SubConn{sc1} if err := testutils.IsRoundRobin(want, subConnFromPicker(p1)); err != nil { t.Fatalf("want %v, got %v", want, err) } // Reconnect p0 subconns, p1 subconn will be closed. edsb.HandleSubConnStateChange(sc0, connectivity.Ready) scToRemove := <-cc.RemoveSubConnCh if !cmp.Equal(scToRemove, sc1, cmp.AllowUnexported(testutils.TestSubConn{})) { t.Fatalf("RemoveSubConn, want %v, got %v", sc1, scToRemove) } // Test roundrobin with only p0 subconns. p2 := <-cc.NewPickerCh want = []balancer.SubConn{sc0} if err := testutils.IsRoundRobin(want, subConnFromPicker(p2)); err != nil { t.Fatalf("want %v, got %v", want, err) } // Add two localities, with two priorities, with one backend. clab1 := xdsclient.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) clab1.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) clab1.AddLocality(testSubZones[1], 1, 1, testEndpointAddrs[1:2], nil) clab1.AddLocality(testSubZones[2], 1, 0, testEndpointAddrs[2:3], nil) clab1.AddLocality(testSubZones[3], 1, 1, testEndpointAddrs[3:4], nil) edsb.HandleEDSResponse(xdsclient.ParseEDSRespProtoForTesting(clab1.Build())) addrs2 := <-cc.NewSubConnAddrsCh if got, want := addrs2[0].Addr, testEndpointAddrs[2]; got != want { t.Fatalf("sc is created with addr %v, want %v", got, want) } sc2 := <-cc.NewSubConnCh edsb.HandleSubConnStateChange(sc2, connectivity.Connecting) edsb.HandleSubConnStateChange(sc2, connectivity.Ready) // Test roundrobin with only two p0 subconns. p3 := <-cc.NewPickerCh want = []balancer.SubConn{sc0, sc2} if err := testutils.IsRoundRobin(want, subConnFromPicker(p3)); err != nil { t.Fatalf("want %v, got %v", want, err) } // Turn down p0 subconns, p1 subconns will be created. edsb.HandleSubConnStateChange(sc0, connectivity.TransientFailure) edsb.HandleSubConnStateChange(sc2, connectivity.TransientFailure) sc3 := <-cc.NewSubConnCh edsb.HandleSubConnStateChange(sc3, connectivity.Connecting) edsb.HandleSubConnStateChange(sc3, connectivity.Ready) sc4 := <-cc.NewSubConnCh edsb.HandleSubConnStateChange(sc4, connectivity.Connecting) edsb.HandleSubConnStateChange(sc4, connectivity.Ready) // Test roundrobin with only p1 subconns. p4 := <-cc.NewPickerCh want = []balancer.SubConn{sc3, sc4} if err := testutils.IsRoundRobin(want, subConnFromPicker(p4)); err != nil { t.Fatalf("want %v, got %v", want, err) } } // EDS removes all localities, and re-adds them. func (s) TestEDSPriority_RemovesAllLocalities(t *testing.T) { const testPriorityInitTimeout = time.Second defer func() func() { old := defaultPriorityInitTimeout defaultPriorityInitTimeout = testPriorityInitTimeout return func() { defaultPriorityInitTimeout = old } }()() cc := testutils.NewTestClientConn(t) edsb := newEDSBalancerImpl(cc, nil, nil, nil) edsb.enqueueChildBalancerStateUpdate = edsb.updateState // Two localities, with different priorities, each with one backend. clab0 := xdsclient.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) clab0.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) clab0.AddLocality(testSubZones[1], 1, 1, testEndpointAddrs[1:2], nil) edsb.HandleEDSResponse(xdsclient.ParseEDSRespProtoForTesting(clab0.Build())) addrs0 := <-cc.NewSubConnAddrsCh if got, want := addrs0[0].Addr, testEndpointAddrs[0]; got != want { t.Fatalf("sc is created with addr %v, want %v", got, want) } sc0 := <-cc.NewSubConnCh edsb.HandleSubConnStateChange(sc0, connectivity.Connecting) edsb.HandleSubConnStateChange(sc0, connectivity.Ready) // Test roundrobin with only p0 subconns. p0 := <-cc.NewPickerCh want := []balancer.SubConn{sc0} if err := testutils.IsRoundRobin(want, subConnFromPicker(p0)); err != nil { t.Fatalf("want %v, got %v", want, err) } // Remove all priorities. clab1 := xdsclient.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) edsb.HandleEDSResponse(xdsclient.ParseEDSRespProtoForTesting(clab1.Build())) // p0 subconn should be removed. scToRemove := <-cc.RemoveSubConnCh if !cmp.Equal(scToRemove, sc0, cmp.AllowUnexported(testutils.TestSubConn{})) { t.Fatalf("RemoveSubConn, want %v, got %v", sc0, scToRemove) } // Test pick return TransientFailure. pFail := <-cc.NewPickerCh for i := 0; i < 5; i++ { if _, err := pFail.Pick(balancer.PickInfo{}); err != balancer.ErrTransientFailure { t.Fatalf("want pick error %v, got %v", balancer.ErrTransientFailure, err) } } // Re-add two localities, with previous priorities, but different backends. clab2 := xdsclient.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) clab2.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[2:3], nil) clab2.AddLocality(testSubZones[1], 1, 1, testEndpointAddrs[3:4], nil) edsb.HandleEDSResponse(xdsclient.ParseEDSRespProtoForTesting(clab2.Build())) addrs01 := <-cc.NewSubConnAddrsCh if got, want := addrs01[0].Addr, testEndpointAddrs[2]; got != want { t.Fatalf("sc is created with addr %v, want %v", got, want) } sc01 := <-cc.NewSubConnCh // Don't send any update to p0, so to not override the old state of p0. // Later, connect to p1 and then remove p1. This will fallback to p0, and // will send p0's old picker if they are not correctly removed. // p1 will be used after priority init timeout. addrs11 := <-cc.NewSubConnAddrsCh if got, want := addrs11[0].Addr, testEndpointAddrs[3]; got != want { t.Fatalf("sc is created with addr %v, want %v", got, want) } sc11 := <-cc.NewSubConnCh edsb.HandleSubConnStateChange(sc11, connectivity.Connecting) edsb.HandleSubConnStateChange(sc11, connectivity.Ready) // Test roundrobin with only p1 subconns. p1 := <-cc.NewPickerCh want = []balancer.SubConn{sc11} if err := testutils.IsRoundRobin(want, subConnFromPicker(p1)); err != nil { t.Fatalf("want %v, got %v", want, err) } // Remove p1 from EDS, to fallback to p0. clab3 := xdsclient.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) clab3.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[2:3], nil) edsb.HandleEDSResponse(xdsclient.ParseEDSRespProtoForTesting(clab3.Build())) // p1 subconn should be removed. scToRemove1 := <-cc.RemoveSubConnCh if !cmp.Equal(scToRemove1, sc11, cmp.AllowUnexported(testutils.TestSubConn{})) { t.Fatalf("RemoveSubConn, want %v, got %v", sc11, scToRemove1) } // Test pick return TransientFailure. pFail1 := <-cc.NewPickerCh for i := 0; i < 5; i++ { if scst, err := pFail1.Pick(balancer.PickInfo{}); err != balancer.ErrNoSubConnAvailable { t.Fatalf("want pick error _, %v, got %v, _ ,%v", balancer.ErrTransientFailure, scst, err) } } // Send an ready update for the p0 sc that was received when re-adding // localities to EDS. edsb.HandleSubConnStateChange(sc01, connectivity.Connecting) edsb.HandleSubConnStateChange(sc01, connectivity.Ready) // Test roundrobin with only p0 subconns. p2 := <-cc.NewPickerCh want = []balancer.SubConn{sc01} if err := testutils.IsRoundRobin(want, subConnFromPicker(p2)); err != nil { t.Fatalf("want %v, got %v", want, err) } select { case <-cc.NewPickerCh: t.Fatalf("got unexpected new picker") case <-cc.NewSubConnCh: t.Fatalf("got unexpected new SubConn") case <-cc.RemoveSubConnCh: t.Fatalf("got unexpected remove SubConn") case <-time.After(time.Millisecond * 100): } } func (s) TestPriorityType(t *testing.T) { p0 := newPriorityType(0) p1 := newPriorityType(1) p2 := newPriorityType(2) if !p0.higherThan(p1) || !p0.higherThan(p2) { t.Errorf("want p0 to be higher than p1 and p2, got p0>p1: %v, p0>p2: %v", !p0.higherThan(p1), !p0.higherThan(p2)) } if !p1.lowerThan(p0) || !p1.higherThan(p2) { t.Errorf("want p1 to be between p0 and p2, got p1p2: %v", !p1.lowerThan(p0), !p1.higherThan(p2)) } if !p2.lowerThan(p0) || !p2.lowerThan(p1) { t.Errorf("want p2 to be lower than p0 and p1, got p2") } else if i > 50 && err != nil { t.Errorf("The second 50%% picks should be non-drops, got error %v", err) } } // The same locality, remove drops. clab6 := xdsclient.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) clab6.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[2:3], nil) edsb.HandleEDSResponse(xdsclient.ParseEDSRespProtoForTesting(clab6.Build())) // Pick without drops. p6 := <-cc.NewPickerCh for i := 0; i < 5; i++ { gotSCSt, _ := p6.Pick(balancer.PickInfo{}) if !cmp.Equal(gotSCSt.SubConn, sc3, cmp.AllowUnexported(testutils.TestSubConn{})) { t.Fatalf("picker.Pick, got %v, want SubConn=%v", gotSCSt, sc3) } } } // 2 locality // - start with 2 locality // - add locality // - remove locality // - address change for the locality // - update locality weight func (s) TestEDS_TwoLocalities(t *testing.T) { cc := testutils.NewTestClientConn(t) edsb := newEDSBalancerImpl(cc, nil, nil, nil) edsb.enqueueChildBalancerStateUpdate = edsb.updateState // Two localities, each with one backend. clab1 := xdsclient.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) clab1.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) edsb.HandleEDSResponse(xdsclient.ParseEDSRespProtoForTesting(clab1.Build())) sc1 := <-cc.NewSubConnCh edsb.HandleSubConnStateChange(sc1, connectivity.Connecting) edsb.HandleSubConnStateChange(sc1, connectivity.Ready) // Add the second locality later to make sure sc2 belongs to the second // locality. Otherwise the test is flaky because of a map is used in EDS to // keep localities. clab1.AddLocality(testSubZones[1], 1, 0, testEndpointAddrs[1:2], nil) edsb.HandleEDSResponse(xdsclient.ParseEDSRespProtoForTesting(clab1.Build())) sc2 := <-cc.NewSubConnCh edsb.HandleSubConnStateChange(sc2, connectivity.Connecting) edsb.HandleSubConnStateChange(sc2, connectivity.Ready) // Test roundrobin with two subconns. p1 := <-cc.NewPickerCh want := []balancer.SubConn{sc1, sc2} if err := testutils.IsRoundRobin(want, subConnFromPicker(p1)); err != nil { t.Fatalf("want %v, got %v", want, err) } // Add another locality, with one backend. clab2 := xdsclient.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) clab2.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) clab2.AddLocality(testSubZones[1], 1, 0, testEndpointAddrs[1:2], nil) clab2.AddLocality(testSubZones[2], 1, 0, testEndpointAddrs[2:3], nil) edsb.HandleEDSResponse(xdsclient.ParseEDSRespProtoForTesting(clab2.Build())) sc3 := <-cc.NewSubConnCh edsb.HandleSubConnStateChange(sc3, connectivity.Connecting) edsb.HandleSubConnStateChange(sc3, connectivity.Ready) // Test roundrobin with three subconns. p2 := <-cc.NewPickerCh want = []balancer.SubConn{sc1, sc2, sc3} if err := testutils.IsRoundRobin(want, subConnFromPicker(p2)); err != nil { t.Fatalf("want %v, got %v", want, err) } // Remove first locality. clab3 := xdsclient.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) clab3.AddLocality(testSubZones[1], 1, 0, testEndpointAddrs[1:2], nil) clab3.AddLocality(testSubZones[2], 1, 0, testEndpointAddrs[2:3], nil) edsb.HandleEDSResponse(xdsclient.ParseEDSRespProtoForTesting(clab3.Build())) scToRemove := <-cc.RemoveSubConnCh if !cmp.Equal(scToRemove, sc1, cmp.AllowUnexported(testutils.TestSubConn{})) { t.Fatalf("RemoveSubConn, want %v, got %v", sc1, scToRemove) } edsb.HandleSubConnStateChange(scToRemove, connectivity.Shutdown) // Test pick with two subconns (without the first one). p3 := <-cc.NewPickerCh want = []balancer.SubConn{sc2, sc3} if err := testutils.IsRoundRobin(want, subConnFromPicker(p3)); err != nil { t.Fatalf("want %v, got %v", want, err) } // Add a backend to the last locality. clab4 := xdsclient.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) clab4.AddLocality(testSubZones[1], 1, 0, testEndpointAddrs[1:2], nil) clab4.AddLocality(testSubZones[2], 1, 0, testEndpointAddrs[2:4], nil) edsb.HandleEDSResponse(xdsclient.ParseEDSRespProtoForTesting(clab4.Build())) sc4 := <-cc.NewSubConnCh edsb.HandleSubConnStateChange(sc4, connectivity.Connecting) edsb.HandleSubConnStateChange(sc4, connectivity.Ready) // Test pick with two subconns (without the first one). p4 := <-cc.NewPickerCh // Locality-1 will be picked twice, and locality-2 will be picked twice. // Locality-1 contains only sc2, locality-2 contains sc3 and sc4. So expect // two sc2's and sc3, sc4. want = []balancer.SubConn{sc2, sc2, sc3, sc4} if err := testutils.IsRoundRobin(want, subConnFromPicker(p4)); err != nil { t.Fatalf("want %v, got %v", want, err) } // Change weight of the locality[1]. clab5 := xdsclient.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) clab5.AddLocality(testSubZones[1], 2, 0, testEndpointAddrs[1:2], nil) clab5.AddLocality(testSubZones[2], 1, 0, testEndpointAddrs[2:4], nil) edsb.HandleEDSResponse(xdsclient.ParseEDSRespProtoForTesting(clab5.Build())) // Test pick with two subconns different locality weight. p5 := <-cc.NewPickerCh // Locality-1 will be picked four times, and locality-2 will be picked twice // (weight 2 and 1). Locality-1 contains only sc2, locality-2 contains sc3 and // sc4. So expect four sc2's and sc3, sc4. want = []balancer.SubConn{sc2, sc2, sc2, sc2, sc3, sc4} if err := testutils.IsRoundRobin(want, subConnFromPicker(p5)); err != nil { t.Fatalf("want %v, got %v", want, err) } // Change weight of the locality[1] to 0, it should never be picked. clab6 := xdsclient.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) clab6.AddLocality(testSubZones[1], 0, 0, testEndpointAddrs[1:2], nil) clab6.AddLocality(testSubZones[2], 1, 0, testEndpointAddrs[2:4], nil) edsb.HandleEDSResponse(xdsclient.ParseEDSRespProtoForTesting(clab6.Build())) // Changing weight of locality[1] to 0 caused it to be removed. It's subconn // should also be removed. // // NOTE: this is because we handle locality with weight 0 same as the // locality doesn't exist. If this changes in the future, this removeSubConn // behavior will also change. scToRemove2 := <-cc.RemoveSubConnCh if !cmp.Equal(scToRemove2, sc2, cmp.AllowUnexported(testutils.TestSubConn{})) { t.Fatalf("RemoveSubConn, want %v, got %v", sc2, scToRemove2) } // Test pick with two subconns different locality weight. p6 := <-cc.NewPickerCh // Locality-1 will be not be picked, and locality-2 will be picked. // Locality-2 contains sc3 and sc4. So expect sc3, sc4. want = []balancer.SubConn{sc3, sc4} if err := testutils.IsRoundRobin(want, subConnFromPicker(p6)); err != nil { t.Fatalf("want %v, got %v", want, err) } } // The EDS balancer gets EDS resp with unhealthy endpoints. Test that only // healthy ones are used. func (s) TestEDS_EndpointsHealth(t *testing.T) { cc := testutils.NewTestClientConn(t) edsb := newEDSBalancerImpl(cc, nil, nil, nil) edsb.enqueueChildBalancerStateUpdate = edsb.updateState // Two localities, each 3 backend, one Healthy, one Unhealthy, one Unknown. clab1 := xdsclient.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) clab1.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:6], &xdsclient.AddLocalityOptions{ Health: []corepb.HealthStatus{ corepb.HealthStatus_HEALTHY, corepb.HealthStatus_UNHEALTHY, corepb.HealthStatus_UNKNOWN, corepb.HealthStatus_DRAINING, corepb.HealthStatus_TIMEOUT, corepb.HealthStatus_DEGRADED, }, }) clab1.AddLocality(testSubZones[1], 1, 0, testEndpointAddrs[6:12], &xdsclient.AddLocalityOptions{ Health: []corepb.HealthStatus{ corepb.HealthStatus_HEALTHY, corepb.HealthStatus_UNHEALTHY, corepb.HealthStatus_UNKNOWN, corepb.HealthStatus_DRAINING, corepb.HealthStatus_TIMEOUT, corepb.HealthStatus_DEGRADED, }, }) edsb.HandleEDSResponse(xdsclient.ParseEDSRespProtoForTesting(clab1.Build())) var ( readySCs []balancer.SubConn newSubConnAddrStrs []string ) for i := 0; i < 4; i++ { addr := <-cc.NewSubConnAddrsCh newSubConnAddrStrs = append(newSubConnAddrStrs, addr[0].Addr) sc := <-cc.NewSubConnCh edsb.HandleSubConnStateChange(sc, connectivity.Connecting) edsb.HandleSubConnStateChange(sc, connectivity.Ready) readySCs = append(readySCs, sc) } wantNewSubConnAddrStrs := []string{ testEndpointAddrs[0], testEndpointAddrs[2], testEndpointAddrs[6], testEndpointAddrs[8], } sortStrTrans := cmp.Transformer("Sort", func(in []string) []string { out := append([]string(nil), in...) // Copy input to avoid mutating it. sort.Strings(out) return out }) if !cmp.Equal(newSubConnAddrStrs, wantNewSubConnAddrStrs, sortStrTrans) { t.Fatalf("want newSubConn with address %v, got %v", wantNewSubConnAddrStrs, newSubConnAddrStrs) } // There should be exactly 4 new SubConns. Check to make sure there's no // more subconns being created. select { case <-cc.NewSubConnCh: t.Fatalf("Got unexpected new subconn") case <-time.After(time.Microsecond * 100): } // Test roundrobin with the subconns. p1 := <-cc.NewPickerCh want := readySCs if err := testutils.IsRoundRobin(want, subConnFromPicker(p1)); err != nil { t.Fatalf("want %v, got %v", want, err) } } func (s) TestClose(t *testing.T) { edsb := newEDSBalancerImpl(nil, nil, nil, nil) // This is what could happen when switching between fallback and eds. This // make sure it doesn't panic. edsb.Close() } // Create XDS balancer, and update sub-balancer before handling eds responses. // Then switch between round-robin and test-const-balancer after handling first // eds response. func (s) TestEDS_UpdateSubBalancerName(t *testing.T) { cc := testutils.NewTestClientConn(t) edsb := newEDSBalancerImpl(cc, nil, nil, nil) edsb.enqueueChildBalancerStateUpdate = edsb.updateState t.Logf("update sub-balancer to test-const-balancer") edsb.HandleChildPolicy("test-const-balancer", nil) // Two localities, each with one backend. clab1 := xdsclient.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) clab1.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) clab1.AddLocality(testSubZones[1], 1, 0, testEndpointAddrs[1:2], nil) edsb.HandleEDSResponse(xdsclient.ParseEDSRespProtoForTesting(clab1.Build())) for i := 0; i < 2; i++ { sc := <-cc.NewSubConnCh edsb.HandleSubConnStateChange(sc, connectivity.Ready) } p0 := <-cc.NewPickerCh for i := 0; i < 5; i++ { _, err := p0.Pick(balancer.PickInfo{}) if err != testutils.ErrTestConstPicker { t.Fatalf("picker.Pick, got err %q, want err %q", err, testutils.ErrTestConstPicker) } } t.Logf("update sub-balancer to round-robin") edsb.HandleChildPolicy(roundrobin.Name, nil) for i := 0; i < 2; i++ { <-cc.RemoveSubConnCh } sc1 := <-cc.NewSubConnCh edsb.HandleSubConnStateChange(sc1, connectivity.Connecting) edsb.HandleSubConnStateChange(sc1, connectivity.Ready) sc2 := <-cc.NewSubConnCh edsb.HandleSubConnStateChange(sc2, connectivity.Connecting) edsb.HandleSubConnStateChange(sc2, connectivity.Ready) // Test roundrobin with two subconns. p1 := <-cc.NewPickerCh want := []balancer.SubConn{sc1, sc2} if err := testutils.IsRoundRobin(want, subConnFromPicker(p1)); err != nil { t.Fatalf("want %v, got %v", want, err) } t.Logf("update sub-balancer to test-const-balancer") edsb.HandleChildPolicy("test-const-balancer", nil) for i := 0; i < 2; i++ { scToRemove := <-cc.RemoveSubConnCh if !cmp.Equal(scToRemove, sc1, cmp.AllowUnexported(testutils.TestSubConn{})) && !cmp.Equal(scToRemove, sc2, cmp.AllowUnexported(testutils.TestSubConn{})) { t.Fatalf("RemoveSubConn, want (%v or %v), got %v", sc1, sc2, scToRemove) } edsb.HandleSubConnStateChange(scToRemove, connectivity.Shutdown) } for i := 0; i < 2; i++ { sc := <-cc.NewSubConnCh edsb.HandleSubConnStateChange(sc, connectivity.Ready) } p2 := <-cc.NewPickerCh for i := 0; i < 5; i++ { _, err := p2.Pick(balancer.PickInfo{}) if err != testutils.ErrTestConstPicker { t.Fatalf("picker.Pick, got err %q, want err %q", err, testutils.ErrTestConstPicker) } } t.Logf("update sub-balancer to round-robin") edsb.HandleChildPolicy(roundrobin.Name, nil) for i := 0; i < 2; i++ { <-cc.RemoveSubConnCh } sc3 := <-cc.NewSubConnCh edsb.HandleSubConnStateChange(sc3, connectivity.Connecting) edsb.HandleSubConnStateChange(sc3, connectivity.Ready) sc4 := <-cc.NewSubConnCh edsb.HandleSubConnStateChange(sc4, connectivity.Connecting) edsb.HandleSubConnStateChange(sc4, connectivity.Ready) p3 := <-cc.NewPickerCh want = []balancer.SubConn{sc3, sc4} if err := testutils.IsRoundRobin(want, subConnFromPicker(p3)); err != nil { t.Fatalf("want %v, got %v", want, err) } } func init() { balancer.Register(&testInlineUpdateBalancerBuilder{}) } // A test balancer that updates balancer.State inline when handling ClientConn // state. type testInlineUpdateBalancerBuilder struct{} func (*testInlineUpdateBalancerBuilder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { return &testInlineUpdateBalancer{cc: cc} } func (*testInlineUpdateBalancerBuilder) Name() string { return "test-inline-update-balancer" } type testInlineUpdateBalancer struct { cc balancer.ClientConn } func (tb *testInlineUpdateBalancer) HandleSubConnStateChange(sc balancer.SubConn, state connectivity.State) { } var errTestInlineStateUpdate = fmt.Errorf("don't like addresses, empty or not") func (tb *testInlineUpdateBalancer) HandleResolvedAddrs(a []resolver.Address, err error) { tb.cc.UpdateState(balancer.State{ ConnectivityState: connectivity.Ready, Picker: &testutils.TestConstPicker{Err: errTestInlineStateUpdate}, }) } func (*testInlineUpdateBalancer) Close() { } // When the child policy update picker inline in a handleClientUpdate call // (e.g., roundrobin handling empty addresses). There could be deadlock caused // by acquiring a locked mutex. func (s) TestEDS_ChildPolicyUpdatePickerInline(t *testing.T) { cc := testutils.NewTestClientConn(t) edsb := newEDSBalancerImpl(cc, nil, nil, nil) edsb.enqueueChildBalancerStateUpdate = func(p priorityType, state balancer.State) { // For this test, euqueue needs to happen asynchronously (like in the // real implementation). go edsb.updateState(p, state) } edsb.HandleChildPolicy("test-inline-update-balancer", nil) clab1 := xdsclient.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) clab1.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) edsb.HandleEDSResponse(xdsclient.ParseEDSRespProtoForTesting(clab1.Build())) p0 := <-cc.NewPickerCh for i := 0; i < 5; i++ { _, err := p0.Pick(balancer.PickInfo{}) if err != errTestInlineStateUpdate { t.Fatalf("picker.Pick, got err %q, want err %q", err, errTestInlineStateUpdate) } } } func (s) TestDropPicker(t *testing.T) { const pickCount = 12 var constPicker = &testutils.TestConstPicker{ SC: testutils.TestSubConns[0], } tests := []struct { name string drops []*dropper }{ { name: "no drop", drops: nil, }, { name: "one drop", drops: []*dropper{ newDropper(xdsclient.OverloadDropConfig{Numerator: 1, Denominator: 2}), }, }, { name: "two drops", drops: []*dropper{ newDropper(xdsclient.OverloadDropConfig{Numerator: 1, Denominator: 3}), newDropper(xdsclient.OverloadDropConfig{Numerator: 1, Denominator: 2}), }, }, { name: "three drops", drops: []*dropper{ newDropper(xdsclient.OverloadDropConfig{Numerator: 1, Denominator: 3}), newDropper(xdsclient.OverloadDropConfig{Numerator: 1, Denominator: 4}), newDropper(xdsclient.OverloadDropConfig{Numerator: 1, Denominator: 2}), }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { p := newDropPicker(constPicker, tt.drops, nil) // scCount is the number of sc's returned by pick. The opposite of // drop-count. var ( scCount int wantCount = pickCount ) for _, dp := range tt.drops { wantCount = wantCount * int(dp.c.Denominator-dp.c.Numerator) / int(dp.c.Denominator) } for i := 0; i < pickCount; i++ { _, err := p.Pick(balancer.PickInfo{}) if err == nil { scCount++ } } if scCount != (wantCount) { t.Errorf("drops: %+v, scCount %v, wantCount %v", tt.drops, scCount, wantCount) } }) } } func (s) TestEDS_LoadReport(t *testing.T) { testLoadStore := testutils.NewTestLoadStore() cc := testutils.NewTestClientConn(t) edsb := newEDSBalancerImpl(cc, nil, testLoadStore, nil) edsb.enqueueChildBalancerStateUpdate = edsb.updateState backendToBalancerID := make(map[balancer.SubConn]internal.Locality) // Two localities, each with one backend. clab1 := xdsclient.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) clab1.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) edsb.HandleEDSResponse(xdsclient.ParseEDSRespProtoForTesting(clab1.Build())) sc1 := <-cc.NewSubConnCh edsb.HandleSubConnStateChange(sc1, connectivity.Connecting) edsb.HandleSubConnStateChange(sc1, connectivity.Ready) backendToBalancerID[sc1] = internal.Locality{ SubZone: testSubZones[0], } // Add the second locality later to make sure sc2 belongs to the second // locality. Otherwise the test is flaky because of a map is used in EDS to // keep localities. clab1.AddLocality(testSubZones[1], 1, 0, testEndpointAddrs[1:2], nil) edsb.HandleEDSResponse(xdsclient.ParseEDSRespProtoForTesting(clab1.Build())) sc2 := <-cc.NewSubConnCh edsb.HandleSubConnStateChange(sc2, connectivity.Connecting) edsb.HandleSubConnStateChange(sc2, connectivity.Ready) backendToBalancerID[sc2] = internal.Locality{ SubZone: testSubZones[1], } // Test roundrobin with two subconns. p1 := <-cc.NewPickerCh var ( wantStart []internal.Locality wantEnd []internal.Locality ) for i := 0; i < 10; i++ { scst, _ := p1.Pick(balancer.PickInfo{}) locality := backendToBalancerID[scst.SubConn] wantStart = append(wantStart, locality) if scst.Done != nil && scst.SubConn != sc1 { scst.Done(balancer.DoneInfo{}) wantEnd = append(wantEnd, backendToBalancerID[scst.SubConn]) } } if !cmp.Equal(testLoadStore.CallsStarted, wantStart) { t.Fatalf("want started: %v, got: %v", testLoadStore.CallsStarted, wantStart) } if !cmp.Equal(testLoadStore.CallsEnded, wantEnd) { t.Fatalf("want ended: %v, got: %v", testLoadStore.CallsEnded, wantEnd) } } grpc-go-1.29.1/xds/internal/balancer/edsbalancer/eds_test.go000066400000000000000000000420401365033716300237360ustar00rootroot00000000000000/* * * Copyright 2019 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package edsbalancer import ( "bytes" "encoding/json" "fmt" "testing" corepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" "github.com/golang/protobuf/jsonpb" wrapperspb "github.com/golang/protobuf/ptypes/wrappers" "github.com/google/go-cmp/cmp" "google.golang.org/grpc" "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpctest" scpb "google.golang.org/grpc/internal/proto/grpc_service_config" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" "google.golang.org/grpc/xds/internal/balancer/lrs" xdsclient "google.golang.org/grpc/xds/internal/client" "google.golang.org/grpc/xds/internal/client/bootstrap" "google.golang.org/grpc/xds/internal/testutils" "google.golang.org/grpc/xds/internal/testutils/fakeclient" ) func init() { balancer.Register(&edsBalancerBuilder{}) bootstrapConfigNew = func() (*bootstrap.Config, error) { return &bootstrap.Config{ BalancerName: testBalancerNameFooBar, Creds: grpc.WithInsecure(), NodeProto: &corepb.Node{}, }, nil } } func subConnFromPicker(p balancer.V2Picker) func() balancer.SubConn { return func() balancer.SubConn { scst, _ := p.Pick(balancer.PickInfo{}) return scst.SubConn } } type s struct { grpctest.Tester } func Test(t *testing.T) { grpctest.RunSubTests(t, s{}) } const testBalancerNameFooBar = "foo.bar" func newNoopTestClientConn() *noopTestClientConn { return &noopTestClientConn{} } // noopTestClientConn is used in EDS balancer config update tests that only // cover the config update handling, but not SubConn/load-balancing. type noopTestClientConn struct { balancer.ClientConn } func (t *noopTestClientConn) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { return nil, nil } func (noopTestClientConn) Target() string { return testServiceName } type scStateChange struct { sc balancer.SubConn state connectivity.State } type fakeEDSBalancer struct { cc balancer.ClientConn childPolicy *testutils.Channel subconnStateChange *testutils.Channel loadStore lrs.Store } func (f *fakeEDSBalancer) HandleSubConnStateChange(sc balancer.SubConn, state connectivity.State) { f.subconnStateChange.Send(&scStateChange{sc: sc, state: state}) } func (f *fakeEDSBalancer) HandleChildPolicy(name string, config json.RawMessage) { f.childPolicy.Send(&loadBalancingConfig{Name: name, Config: config}) } func (f *fakeEDSBalancer) Close() {} func (f *fakeEDSBalancer) HandleEDSResponse(edsResp *xdsclient.EDSUpdate) {} func (f *fakeEDSBalancer) updateState(priority priorityType, s balancer.State) {} func (f *fakeEDSBalancer) waitForChildPolicy(wantPolicy *loadBalancingConfig) error { val, err := f.childPolicy.Receive() if err != nil { return fmt.Errorf("error waiting for childPolicy: %v", err) } gotPolicy := val.(*loadBalancingConfig) if !cmp.Equal(gotPolicy, wantPolicy) { return fmt.Errorf("got childPolicy %v, want %v", gotPolicy, wantPolicy) } return nil } func (f *fakeEDSBalancer) waitForSubConnStateChange(wantState *scStateChange) error { val, err := f.subconnStateChange.Receive() if err != nil { return fmt.Errorf("error waiting for subconnStateChange: %v", err) } gotState := val.(*scStateChange) if !cmp.Equal(gotState, wantState, cmp.AllowUnexported(scStateChange{})) { return fmt.Errorf("got subconnStateChange %v, want %v", gotState, wantState) } return nil } func newFakeEDSBalancer(cc balancer.ClientConn, loadStore lrs.Store) edsBalancerImplInterface { return &fakeEDSBalancer{ cc: cc, childPolicy: testutils.NewChannelWithSize(10), subconnStateChange: testutils.NewChannelWithSize(10), loadStore: loadStore, } } type fakeSubConn struct{} func (*fakeSubConn) UpdateAddresses([]resolver.Address) { panic("implement me") } func (*fakeSubConn) Connect() { panic("implement me") } // waitForNewXDSClientWithEDSWatch makes sure that a new xdsClient is created // with the provided name. It also make sure that the newly created client // registers an eds watcher. func waitForNewXDSClientWithEDSWatch(t *testing.T, ch *testutils.Channel, wantName string) *fakeclient.Client { t.Helper() val, err := ch.Receive() if err != nil { t.Fatalf("error when waiting for a new xds client: %v", err) return nil } xdsC := val.(*fakeclient.Client) if xdsC.Name() != wantName { t.Fatalf("xdsClient created to balancer: %v, want %v", xdsC.Name(), wantName) return nil } _, err = xdsC.WaitForWatchEDS() if err != nil { t.Fatalf("xdsClient.WatchEndpoints failed with error: %v", err) return nil } return xdsC } // waitForNewEDSLB makes sure that a new edsLB is created by the top-level // edsBalancer. func waitForNewEDSLB(t *testing.T, ch *testutils.Channel) *fakeEDSBalancer { t.Helper() val, err := ch.Receive() if err != nil { t.Fatalf("error when waiting for a new edsLB: %v", err) return nil } return val.(*fakeEDSBalancer) } // setup overrides the functions which are used to create the xdsClient and the // edsLB, creates fake version of them and makes them available on the provided // channels. The returned cancel function should be called by the test for // cleanup. func setup(edsLBCh *testutils.Channel, xdsClientCh *testutils.Channel) func() { origNewEDSBalancer := newEDSBalancer newEDSBalancer = func(cc balancer.ClientConn, enqueue func(priorityType, balancer.State), loadStore lrs.Store, logger *grpclog.PrefixLogger) edsBalancerImplInterface { edsLB := newFakeEDSBalancer(cc, loadStore) defer func() { edsLBCh.Send(edsLB) }() return edsLB } origXdsClientNew := xdsclientNew xdsclientNew = func(opts xdsclient.Options) (xdsClientInterface, error) { xdsC := fakeclient.NewClientWithName(opts.Config.BalancerName) defer func() { xdsClientCh.Send(xdsC) }() return xdsC, nil } return func() { newEDSBalancer = origNewEDSBalancer xdsclientNew = origXdsClientNew } } // TestXDSConfigBalancerNameUpdate verifies different scenarios where the // balancer name in the lbConfig is updated. // // The test does the following: // * Builds a new xds balancer. // * Repeatedly pushes new ClientConnState which specifies different // balancerName in the lbConfig. We expect xdsClient objects to created // whenever the balancerName changes. func (s) TestXDSConfigBalancerNameUpdate(t *testing.T) { oldBootstrapConfigNew := bootstrapConfigNew bootstrapConfigNew = func() (*bootstrap.Config, error) { // Return an error from bootstrap, so the eds balancer will use // BalancerName from the config. // // TODO: remove this when deleting BalancerName from config. return nil, fmt.Errorf("no bootstrap available") } defer func() { bootstrapConfigNew = oldBootstrapConfigNew }() edsLBCh := testutils.NewChannel() xdsClientCh := testutils.NewChannel() cancel := setup(edsLBCh, xdsClientCh) defer cancel() builder := balancer.Get(edsName) cc := newNoopTestClientConn() edsB, ok := builder.Build(cc, balancer.BuildOptions{Target: resolver.Target{Endpoint: testEDSClusterName}}).(*edsBalancer) if !ok { t.Fatalf("builder.Build(%s) returned type {%T}, want {*edsBalancer}", edsName, edsB) } defer edsB.Close() addrs := []resolver.Address{{Addr: "1.1.1.1:10001"}, {Addr: "2.2.2.2:10002"}, {Addr: "3.3.3.3:10003"}} for i := 0; i < 2; i++ { balancerName := fmt.Sprintf("balancer-%d", i) edsB.UpdateClientConnState(balancer.ClientConnState{ ResolverState: resolver.State{Addresses: addrs}, BalancerConfig: &EDSConfig{ BalancerName: balancerName, EDSServiceName: testEDSClusterName, }, }) xdsC := waitForNewXDSClientWithEDSWatch(t, xdsClientCh, balancerName) xdsC.InvokeWatchEDSCallback(&xdsclient.EDSUpdate{}, nil) } } const ( fakeBalancerA = "fake_balancer_A" fakeBalancerB = "fake_balancer_B" ) // Install two fake balancers for service config update tests. // // ParseConfig only accepts the json if the balancer specified is registered. func init() { balancer.Register(&fakeBalancerBuilder{name: fakeBalancerA}) balancer.Register(&fakeBalancerBuilder{name: fakeBalancerB}) } type fakeBalancerBuilder struct { name string } func (b *fakeBalancerBuilder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { return &fakeBalancer{cc: cc} } func (b *fakeBalancerBuilder) Name() string { return b.name } type fakeBalancer struct { cc balancer.ClientConn } func (b *fakeBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) { panic("implement me") } func (b *fakeBalancer) HandleSubConnStateChange(sc balancer.SubConn, state connectivity.State) { panic("implement me") } func (b *fakeBalancer) Close() {} // TestXDSConnfigChildPolicyUpdate verifies scenarios where the childPolicy // section of the lbConfig is updated. // // The test does the following: // * Builds a new xds balancer. // * Pushes a new ClientConnState with a childPolicy set to fakeBalancerA. // Verifies that a new xdsClient is created. It then pushes a new edsUpdate // through the fakexds client. Verifies that a new edsLB is created and it // receives the expected childPolicy. // * Pushes a new ClientConnState with a childPolicy set to fakeBalancerB. // This time around, we expect no new xdsClient or edsLB to be created. // Instead, we expect the existing edsLB to receive the new child policy. func (s) TestXDSConnfigChildPolicyUpdate(t *testing.T) { edsLBCh := testutils.NewChannel() xdsClientCh := testutils.NewChannel() cancel := setup(edsLBCh, xdsClientCh) defer cancel() builder := balancer.Get(edsName) cc := newNoopTestClientConn() edsB, ok := builder.Build(cc, balancer.BuildOptions{Target: resolver.Target{Endpoint: testServiceName}}).(*edsBalancer) if !ok { t.Fatalf("builder.Build(%s) returned type {%T}, want {*edsBalancer}", edsName, edsB) } defer edsB.Close() edsB.UpdateClientConnState(balancer.ClientConnState{ BalancerConfig: &EDSConfig{ BalancerName: testBalancerNameFooBar, ChildPolicy: &loadBalancingConfig{ Name: fakeBalancerA, Config: json.RawMessage("{}"), }, EDSServiceName: testEDSClusterName, }, }) xdsC := waitForNewXDSClientWithEDSWatch(t, xdsClientCh, testBalancerNameFooBar) xdsC.InvokeWatchEDSCallback(&xdsclient.EDSUpdate{}, nil) edsLB := waitForNewEDSLB(t, edsLBCh) edsLB.waitForChildPolicy(&loadBalancingConfig{ Name: string(fakeBalancerA), Config: json.RawMessage(`{}`), }) edsB.UpdateClientConnState(balancer.ClientConnState{ BalancerConfig: &EDSConfig{ BalancerName: testBalancerNameFooBar, ChildPolicy: &loadBalancingConfig{ Name: fakeBalancerB, Config: json.RawMessage("{}"), }, EDSServiceName: testEDSClusterName, }, }) edsLB.waitForChildPolicy(&loadBalancingConfig{ Name: string(fakeBalancerA), Config: json.RawMessage(`{}`), }) } // TestXDSSubConnStateChange verifies if the top-level edsBalancer passes on // the subConnStateChange to appropriate child balancers. func (s) TestXDSSubConnStateChange(t *testing.T) { edsLBCh := testutils.NewChannel() xdsClientCh := testutils.NewChannel() cancel := setup(edsLBCh, xdsClientCh) defer cancel() builder := balancer.Get(edsName) cc := newNoopTestClientConn() edsB, ok := builder.Build(cc, balancer.BuildOptions{Target: resolver.Target{Endpoint: testEDSClusterName}}).(*edsBalancer) if !ok { t.Fatalf("builder.Build(%s) returned type {%T}, want {*edsBalancer}", edsName, edsB) } defer edsB.Close() addrs := []resolver.Address{{Addr: "1.1.1.1:10001"}, {Addr: "2.2.2.2:10002"}, {Addr: "3.3.3.3:10003"}} edsB.UpdateClientConnState(balancer.ClientConnState{ ResolverState: resolver.State{Addresses: addrs}, BalancerConfig: &EDSConfig{ BalancerName: testBalancerNameFooBar, EDSServiceName: testEDSClusterName, }, }) xdsC := waitForNewXDSClientWithEDSWatch(t, xdsClientCh, testBalancerNameFooBar) xdsC.InvokeWatchEDSCallback(&xdsclient.EDSUpdate{}, nil) edsLB := waitForNewEDSLB(t, edsLBCh) fsc := &fakeSubConn{} state := connectivity.Ready edsB.UpdateSubConnState(fsc, balancer.SubConnState{ConnectivityState: state}) edsLB.waitForSubConnStateChange(&scStateChange{sc: fsc, state: state}) } func (s) TestXDSBalancerConfigParsing(t *testing.T) { const testEDSName = "eds.service" var testLRSName = "lrs.server" b := bytes.NewBuffer(nil) if err := (&jsonpb.Marshaler{}).Marshal(b, &scpb.XdsConfig{ ChildPolicy: []*scpb.LoadBalancingConfig{ {Policy: &scpb.LoadBalancingConfig_Xds{}}, {Policy: &scpb.LoadBalancingConfig_RoundRobin{ RoundRobin: &scpb.RoundRobinConfig{}, }}, }, FallbackPolicy: []*scpb.LoadBalancingConfig{ {Policy: &scpb.LoadBalancingConfig_Xds{}}, {Policy: &scpb.LoadBalancingConfig_PickFirst{ PickFirst: &scpb.PickFirstConfig{}, }}, }, EdsServiceName: testEDSName, LrsLoadReportingServerName: &wrapperspb.StringValue{Value: testLRSName}, }); err != nil { t.Fatalf("%v", err) } tests := []struct { name string js json.RawMessage want serviceconfig.LoadBalancingConfig wantErr bool }{ { name: "jsonpb-generated", js: b.Bytes(), want: &EDSConfig{ ChildPolicy: &loadBalancingConfig{ Name: "round_robin", Config: json.RawMessage("{}"), }, FallBackPolicy: &loadBalancingConfig{ Name: "pick_first", Config: json.RawMessage("{}"), }, EDSServiceName: testEDSName, LrsLoadReportingServerName: &testLRSName, }, wantErr: false, }, { // json with random balancers, and the first is not registered. name: "manually-generated", js: json.RawMessage(` { "balancerName": "fake.foo.bar", "childPolicy": [ {"fake_balancer_C": {}}, {"fake_balancer_A": {}}, {"fake_balancer_B": {}} ], "fallbackPolicy": [ {"fake_balancer_C": {}}, {"fake_balancer_B": {}}, {"fake_balancer_A": {}} ], "edsServiceName": "eds.service", "lrsLoadReportingServerName": "lrs.server" }`), want: &EDSConfig{ BalancerName: "fake.foo.bar", ChildPolicy: &loadBalancingConfig{ Name: "fake_balancer_A", Config: json.RawMessage("{}"), }, FallBackPolicy: &loadBalancingConfig{ Name: "fake_balancer_B", Config: json.RawMessage("{}"), }, EDSServiceName: testEDSName, LrsLoadReportingServerName: &testLRSName, }, wantErr: false, }, { // json with no lrs server name, LrsLoadReportingServerName should // be nil (not an empty string). name: "no-lrs-server-name", js: json.RawMessage(` { "balancerName": "fake.foo.bar", "edsServiceName": "eds.service" }`), want: &EDSConfig{ BalancerName: "fake.foo.bar", EDSServiceName: testEDSName, LrsLoadReportingServerName: nil, }, wantErr: false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { b := &edsBalancerBuilder{} got, err := b.ParseConfig(tt.js) if (err != nil) != tt.wantErr { t.Errorf("edsBalancerBuilder.ParseConfig() error = %v, wantErr %v", err, tt.wantErr) return } if !cmp.Equal(got, tt.want) { t.Errorf(cmp.Diff(got, tt.want)) } }) } } func (s) TestLoadbalancingConfigParsing(t *testing.T) { tests := []struct { name string s string want *EDSConfig }{ { name: "empty", s: "{}", want: &EDSConfig{}, }, { name: "success1", s: `{"childPolicy":[{"pick_first":{}}]}`, want: &EDSConfig{ ChildPolicy: &loadBalancingConfig{ Name: "pick_first", Config: json.RawMessage(`{}`), }, }, }, { name: "success2", s: `{"childPolicy":[{"round_robin":{}},{"pick_first":{}}]}`, want: &EDSConfig{ ChildPolicy: &loadBalancingConfig{ Name: "round_robin", Config: json.RawMessage(`{}`), }, }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { var cfg EDSConfig if err := json.Unmarshal([]byte(tt.s), &cfg); err != nil || !cmp.Equal(&cfg, tt.want) { t.Errorf("test name: %s, parseFullServiceConfig() = %+v, err: %v, want %+v, ", tt.name, cfg, err, tt.want) } }) } } func (s) TestEqualStringPointers(t *testing.T) { var ( ta1 = "test-a" ta2 = "test-a" tb = "test-b" ) tests := []struct { name string a *string b *string want bool }{ {"both-nil", nil, nil, true}, {"a-non-nil", &ta1, nil, false}, {"b-non-nil", nil, &tb, false}, {"equal", &ta1, &ta2, true}, {"different", &ta1, &tb, false}, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { if got := equalStringPointers(tt.a, tt.b); got != tt.want { t.Errorf("equalStringPointers() = %v, want %v", got, tt.want) } }) } } grpc-go-1.29.1/xds/internal/balancer/edsbalancer/logging.go000066400000000000000000000013601365033716300235520ustar00rootroot00000000000000/* * * Copyright 2020 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package edsbalancer import ( "fmt" ) const prefix = "[eds-lb %p] " func loggingPrefix(p *edsBalancer) string { return fmt.Sprintf(prefix, p) } grpc-go-1.29.1/xds/internal/balancer/edsbalancer/util.go000066400000000000000000000021101365033716300230730ustar00rootroot00000000000000/* * Copyright 2019 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package edsbalancer import ( "google.golang.org/grpc/internal/wrr" xdsclient "google.golang.org/grpc/xds/internal/client" ) var newRandomWRR = wrr.NewRandom type dropper struct { c xdsclient.OverloadDropConfig w wrr.WRR } func newDropper(c xdsclient.OverloadDropConfig) *dropper { w := newRandomWRR() w.Add(true, int64(c.Numerator)) w.Add(false, int64(c.Denominator-c.Numerator)) return &dropper{ c: c, w: w, } } func (d *dropper) drop() (ret bool) { return d.w.Next().(bool) } grpc-go-1.29.1/xds/internal/balancer/edsbalancer/util_test.go000066400000000000000000000035161365033716300241450ustar00rootroot00000000000000/* * Copyright 2019 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package edsbalancer import ( "testing" xdsclient "google.golang.org/grpc/xds/internal/client" "google.golang.org/grpc/xds/internal/testutils" ) func init() { newRandomWRR = testutils.NewTestWRR } func (s) TestDropper(t *testing.T) { const repeat = 2 type args struct { numerator uint32 denominator uint32 } tests := []struct { name string args args }{ { name: "2_3", args: args{ numerator: 2, denominator: 3, }, }, { name: "4_8", args: args{ numerator: 4, denominator: 8, }, }, { name: "7_20", args: args{ numerator: 7, denominator: 20, }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { d := newDropper(xdsclient.OverloadDropConfig{ Category: "", Numerator: tt.args.numerator, Denominator: tt.args.denominator, }) var ( dCount int wantCount = int(tt.args.numerator) * repeat loopCount = int(tt.args.denominator) * repeat ) for i := 0; i < loopCount; i++ { if d.drop() { dCount++ } } if dCount != (wantCount) { t.Errorf("with numerator %v, denominator %v repeat %v, got drop count: %v, want %v", tt.args.numerator, tt.args.denominator, repeat, dCount, wantCount) } }) } } grpc-go-1.29.1/xds/internal/balancer/edsbalancer/xds_client_wrapper.go000066400000000000000000000265451365033716300260340ustar00rootroot00000000000000/* * * Copyright 2019 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package edsbalancer import ( "google.golang.org/grpc" "google.golang.org/grpc/attributes" "google.golang.org/grpc/balancer" "google.golang.org/grpc/internal/grpclog" xdsinternal "google.golang.org/grpc/xds/internal" "google.golang.org/grpc/xds/internal/balancer/lrs" xdsclient "google.golang.org/grpc/xds/internal/client" "google.golang.org/grpc/xds/internal/client/bootstrap" ) // xdsClientInterface contains only the xds_client methods needed by EDS // balancer. It's defined so we can override xdsclientNew function in tests. type xdsClientInterface interface { WatchEndpoints(clusterName string, edsCb func(*xdsclient.EDSUpdate, error)) (cancel func()) ReportLoad(server string, clusterName string, loadStore lrs.Store) (cancel func()) Close() } var ( xdsclientNew = func(opts xdsclient.Options) (xdsClientInterface, error) { return xdsclient.New(opts) } bootstrapConfigNew = bootstrap.NewConfig ) // xdsclientWrapper is responsible for getting the xds client from attributes or // creating a new xds client, and start watching EDS. The given callbacks will // be called with EDS updates or errors. type xdsclientWrapper struct { logger *grpclog.PrefixLogger newEDSUpdate func(*xdsclient.EDSUpdate) error loseContact func() bbo balancer.BuildOptions loadStore lrs.Store balancerName string // xdsclient could come from attributes, or created with balancerName. xdsclient xdsClientInterface // edsServiceName is the edsServiceName currently being watched, not // necessary the edsServiceName from service config. // // If edsServiceName from service config is an empty, this will be user's // dial target (because that's what we use to watch EDS). // // TODO: remove the empty string related behavior, when we switch to always // do CDS. edsServiceName string cancelEndpointsWatch func() loadReportServer *string // LRS is disabled if loadReporterServer is nil. cancelLoadReport func() } // newXDSClientWrapper creates an empty xds_client wrapper that does nothing. It // can accept xds_client configs, to new/switch xds_client to use. // // The given callbacks won't be called until the underlying xds_client is // working and sends updates. func newXDSClientWrapper(newEDSUpdate func(*xdsclient.EDSUpdate) error, loseContact func(), bbo balancer.BuildOptions, loadStore lrs.Store, logger *grpclog.PrefixLogger) *xdsclientWrapper { return &xdsclientWrapper{ logger: logger, newEDSUpdate: newEDSUpdate, loseContact: loseContact, bbo: bbo, loadStore: loadStore, } } // replaceXDSClient replaces xdsclient fields to the newClient if they are // different. If xdsclient is replaced, the balancerName field will also be // updated to newBalancerName. // // If the old xdsclient is replaced, and was created locally (not from // attributes), it will be closed. // // It returns whether xdsclient is replaced. func (c *xdsclientWrapper) replaceXDSClient(newClient xdsClientInterface, newBalancerName string) bool { if c.xdsclient == newClient { return false } oldClient := c.xdsclient oldBalancerName := c.balancerName c.xdsclient = newClient c.balancerName = newBalancerName if oldBalancerName != "" { // OldBalancerName!="" means if the old client was not from attributes. oldClient.Close() } return true } // updateXDSClient sets xdsclient in wrapper to the correct one based on the // attributes and service config. // // If client is found in attributes, it will be used, but we also need to decide // whether to close the old client. // - if old client was created locally (balancerName is not ""), close it and // replace it // - if old client was from previous attributes, only replace it, but don't // close it // // If client is not found in attributes, will need to create a new one only if // the balancerName (from bootstrap file or from service config) changed. // - if balancer names are the same, do nothing, and return false // - if balancer names are different, create new one, and return true func (c *xdsclientWrapper) updateXDSClient(config *EDSConfig, attr *attributes.Attributes) bool { if attr != nil { if clientFromAttr, _ := attr.Value(xdsinternal.XDSClientID).(xdsClientInterface); clientFromAttr != nil { // This will also clear balancerName, to indicate that client is // from attributes. return c.replaceXDSClient(clientFromAttr, "") } } clientConfig, err := bootstrapConfigNew() if err != nil { // TODO: propagate this error to ClientConn, and fail RPCs if necessary. clientConfig = &bootstrap.Config{BalancerName: config.BalancerName} } if c.balancerName == clientConfig.BalancerName { return false } if clientConfig.Creds == nil { // TODO: Once we start supporting a mechanism to register credential // types, a failure to find the credential type mentioned in the // bootstrap file should result in a failure, and not in using // credentials from the parent channel (passed through the // resolver.BuildOptions). clientConfig.Creds = c.defaultDialCreds(clientConfig.BalancerName) } var dopts []grpc.DialOption if dialer := c.bbo.Dialer; dialer != nil { dopts = []grpc.DialOption{grpc.WithContextDialer(dialer)} } newClient, err := xdsclientNew(xdsclient.Options{Config: *clientConfig, DialOpts: dopts}) if err != nil { // This should never fail. xdsclientnew does a non-blocking dial, and // all the config passed in should be validated. // // This could leave c.xdsclient as nil if this is the first update. c.logger.Warningf("eds: failed to create xdsclient, error: %v", err) return false } return c.replaceXDSClient(newClient, clientConfig.BalancerName) } // startEndpointsWatch starts the EDS watch. Caller can call this when the // xds_client is updated, or the edsServiceName is updated. // // Note that if there's already a watch in progress, it's not explicitly // canceled. Because for each xds_client, there should be only one EDS watch in // progress. So a new EDS watch implicitly cancels the previous one. // // This usually means load report needs to be restarted, but this function does // NOT do that. Caller needs to call startLoadReport separately. func (c *xdsclientWrapper) startEndpointsWatch(nameToWatch string) { if c.xdsclient == nil { return } c.edsServiceName = nameToWatch cancelEDSWatch := c.xdsclient.WatchEndpoints(c.edsServiceName, func(update *xdsclient.EDSUpdate, err error) { if err != nil { // TODO: this should trigger a call to `c.loseContact`, when the // error indicates "lose contact". c.logger.Warningf("Watch error from xds-client %p: %v", c.xdsclient, err) return } c.logger.Infof("Watch update from xds-client %p, content: %+v", c.xdsclient, update) if err := c.newEDSUpdate(update); err != nil { c.logger.Warningf("xds: processing new EDS update failed due to %v.", err) } }) c.logger.Infof("Watch started on resource name %v with xds-client %p", c.edsServiceName, c.xdsclient) c.cancelEndpointsWatch = func() { cancelEDSWatch() c.logger.Infof("Watch cancelled on resource name %v with xds-client %p", c.edsServiceName, c.xdsclient) } } // startLoadReport starts load reporting. If there's already a load reporting in // progress, it cancels that. // // Caller can cal this when the loadReportServer name changes, but // edsServiceName doesn't (so we only need to restart load reporting, not EDS // watch). func (c *xdsclientWrapper) startLoadReport(edsServiceNameBeingWatched string, loadReportServer *string) { if c.xdsclient == nil { c.logger.Warningf("xds: xdsclient is nil when trying to start load reporting. This means xdsclient wasn't passed in from the resolver, and xdsclient.New failed") return } if c.loadStore != nil { if c.cancelLoadReport != nil { c.cancelLoadReport() } c.loadReportServer = loadReportServer if c.loadReportServer != nil { c.cancelLoadReport = c.xdsclient.ReportLoad(*c.loadReportServer, edsServiceNameBeingWatched, c.loadStore) } } } // handleUpdate applies the service config and attributes updates to the client, // including updating the xds_client to use, and updating the EDS name to watch. func (c *xdsclientWrapper) handleUpdate(config *EDSConfig, attr *attributes.Attributes) { clientChanged := c.updateXDSClient(config, attr) var ( restartEndpointsWatch bool restartLoadReport bool ) // The clusterName to watch should come from CDS response, via service // config. If it's an empty string, fallback user's dial target. nameToWatch := config.EDSServiceName if nameToWatch == "" { c.logger.Warningf("eds: cluster name to watch is an empty string. Fallback to user's dial target") nameToWatch = c.bbo.Target.Endpoint } // Need to restart EDS watch when one of the following happens: // - the xds_client is updated // - the xds_client didn't change, but the edsServiceName changed // // Only need to restart load reporting when: // - no need to restart EDS, but loadReportServer name changed if clientChanged || c.edsServiceName != nameToWatch { restartEndpointsWatch = true restartLoadReport = true } else if !equalStringPointers(c.loadReportServer, config.LrsLoadReportingServerName) { restartLoadReport = true } if restartEndpointsWatch { c.startEndpointsWatch(nameToWatch) } if restartLoadReport { c.startLoadReport(nameToWatch, config.LrsLoadReportingServerName) } } func (c *xdsclientWrapper) close() { if c.xdsclient != nil && c.balancerName != "" { // Only close xdsclient if it's not from attributes. c.xdsclient.Close() } if c.cancelLoadReport != nil { c.cancelLoadReport() } if c.cancelEndpointsWatch != nil { c.cancelEndpointsWatch() } } // defaultDialCreds builds a DialOption containing the credentials to be used // while talking to the xDS server (this is done only if the xds bootstrap // process does not return any credentials to use). If the parent channel // contains DialCreds, we use it as is. If it contains a CredsBundle, we use // just the transport credentials from the bundle. If we don't find any // credentials on the parent channel, we resort to using an insecure channel. func (c *xdsclientWrapper) defaultDialCreds(balancerName string) grpc.DialOption { switch { case c.bbo.DialCreds != nil: if err := c.bbo.DialCreds.OverrideServerName(balancerName); err != nil { c.logger.Warningf("xds: failed to override server name in credentials: %v, using Insecure", err) return grpc.WithInsecure() } return grpc.WithTransportCredentials(c.bbo.DialCreds) case c.bbo.CredsBundle != nil: return grpc.WithTransportCredentials(c.bbo.CredsBundle.TransportCredentials()) default: c.logger.Warningf("xds: no credentials available, using Insecure") return grpc.WithInsecure() } } // equalStringPointers returns true if // - a and b are both nil OR // - *a == *b (and a and b are both non-nil) func equalStringPointers(a, b *string) bool { if a == nil && b == nil { return true } if a == nil || b == nil { return false } return *a == *b } grpc-go-1.29.1/xds/internal/balancer/edsbalancer/xds_client_wrapper_test.go000066400000000000000000000172231365033716300270640ustar00rootroot00000000000000/* * * Copyright 2019 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package edsbalancer import ( "errors" "testing" xdspb "github.com/envoyproxy/go-control-plane/envoy/api/v2" corepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" "github.com/golang/protobuf/proto" "github.com/google/go-cmp/cmp" "google.golang.org/grpc" "google.golang.org/grpc/attributes" "google.golang.org/grpc/balancer" "google.golang.org/grpc/resolver" xdsinternal "google.golang.org/grpc/xds/internal" xdsclient "google.golang.org/grpc/xds/internal/client" "google.golang.org/grpc/xds/internal/client/bootstrap" "google.golang.org/grpc/xds/internal/testutils" "google.golang.org/grpc/xds/internal/testutils/fakeclient" "google.golang.org/grpc/xds/internal/testutils/fakeserver" ) const ( edsType = "type.googleapis.com/envoy.api.v2.ClusterLoadAssignment" ) var ( testServiceName = "test/foo" testEDSClusterName = "test/service/eds" ) // TestClientWrapperWatchEDS verifies that the clientWrapper registers an // EDS watch for expected resource upon receiving an update from the top-level // edsBalancer. // // The test does the following: // * Starts a fake xDS server. // * Creates a clientWrapper. // * Sends updates with different edsServiceNames and expects new watches to be // registered. func (s) TestClientWrapperWatchEDS(t *testing.T) { fakeServer, cleanup, err := fakeserver.StartServer() if err != nil { t.Fatalf("Failed to start fake xDS server: %v", err) } defer cleanup() cw := newXDSClientWrapper(nil, nil, balancer.BuildOptions{Target: resolver.Target{Endpoint: testServiceName}}, nil, nil) defer cw.close() for _, test := range []struct { name string edsServiceName string wantResourceName string }{ { // Update with an empty edsServiceName should trigger an EDS watch // for the user's dial target. name: "empty-edsServiceName", edsServiceName: "", wantResourceName: testServiceName, }, { // Update with an non-empty edsServiceName should trigger an EDS // watch for the same. name: "first-non-empty-edsServiceName", edsServiceName: "foobar-1", wantResourceName: "foobar-1", }, { // Also test the case where the edsServerName changes from one // non-empty name to another, and make sure a new watch is // registered. name: "second-non-empty-edsServiceName", edsServiceName: "foobar-2", wantResourceName: "foobar-2", }, } { t.Run(test.name, func(t *testing.T) { oldBootstrapConfigNew := bootstrapConfigNew bootstrapConfigNew = func() (*bootstrap.Config, error) { return &bootstrap.Config{ BalancerName: fakeServer.Address, Creds: grpc.WithInsecure(), NodeProto: &corepb.Node{}, }, nil } defer func() { bootstrapConfigNew = oldBootstrapConfigNew }() cw.handleUpdate(&EDSConfig{ BalancerName: fakeServer.Address, EDSServiceName: test.edsServiceName, }, nil) req, err := fakeServer.XDSRequestChan.Receive() if err != nil { t.Fatalf("EDS RPC failed with err: %v", err) } edsReq := req.(*fakeserver.Request) if edsReq.Err != nil { t.Fatalf("EDS RPC failed with err: %v", edsReq.Err) } wantReq := &xdspb.DiscoveryRequest{ TypeUrl: edsType, ResourceNames: []string{test.wantResourceName}, Node: &corepb.Node{}, } if !proto.Equal(edsReq.Req, wantReq) { t.Fatalf("got EDS request %v, expected: %v, diff: %s", edsReq.Req, wantReq, cmp.Diff(edsReq.Req, wantReq, cmp.Comparer(proto.Equal))) } }) } } // TestClientWrapperHandleUpdateError verifies that the clientWrapper handles // errors from the edsWatch callback appropriately. // // The test does the following: // * Creates a clientWrapper. // * Creates a fakeclient.Client and passes it to the clientWrapper in attributes. // * Verifies the clientWrapper registers an EDS watch. // * Forces the fakeclient.Client to invoke the registered EDS watch callback with // an error. Verifies that the wrapper does not invoke the top-level // edsBalancer with the received error. func (s) TestClientWrapperHandleUpdateError(t *testing.T) { edsRespChan := testutils.NewChannel() newEDS := func(update *xdsclient.EDSUpdate) error { edsRespChan.Send(update) return nil } cw := newXDSClientWrapper(newEDS, nil, balancer.BuildOptions{Target: resolver.Target{Endpoint: testServiceName}}, nil, nil) defer cw.close() xdsC := fakeclient.NewClient() cw.handleUpdate(&EDSConfig{EDSServiceName: testEDSClusterName}, attributes.New(xdsinternal.XDSClientID, xdsC)) gotCluster, err := xdsC.WaitForWatchEDS() if err != nil { t.Fatalf("xdsClient.WatchEndpoints failed with error: %v", err) } if gotCluster != testEDSClusterName { t.Fatalf("xdsClient.WatchEndpoints() called with cluster: %v, want %v", gotCluster, testEDSClusterName) } xdsC.InvokeWatchEDSCallback(nil, errors.New("EDS watch callback error")) // The callback is called with an error, expect no update from edsRespChan. // // TODO: check for loseContact() when errors indicating "lose contact" are // handled correctly. if gotUpdate, gotErr := edsRespChan.Receive(); gotErr != testutils.ErrRecvTimeout { t.Fatalf("edsBalancer got edsUpdate {%+v, %v}, when none was expected", gotUpdate, gotErr) } } // TestClientWrapperGetsXDSClientInAttributes verfies the case where the // clientWrapper receives the xdsClient to use in the attributes section of the // update. func (s) TestClientWrapperGetsXDSClientInAttributes(t *testing.T) { oldxdsclientNew := xdsclientNew xdsclientNew = func(_ xdsclient.Options) (xdsClientInterface, error) { t.Fatalf("unexpected call to xdsclientNew when xds_client is set in attributes") return nil, nil } defer func() { xdsclientNew = oldxdsclientNew }() cw := newXDSClientWrapper(nil, nil, balancer.BuildOptions{Target: resolver.Target{Endpoint: testServiceName}}, nil, nil) defer cw.close() // Verify that the eds watch is registered for the expected resource name. xdsC1 := fakeclient.NewClient() cw.handleUpdate(&EDSConfig{EDSServiceName: testEDSClusterName}, attributes.New(xdsinternal.XDSClientID, xdsC1)) gotCluster, err := xdsC1.WaitForWatchEDS() if err != nil { t.Fatalf("xdsClient.WatchEndpoints failed with error: %v", err) } if gotCluster != testEDSClusterName { t.Fatalf("xdsClient.WatchEndpoints() called with cluster: %v, want %v", gotCluster, testEDSClusterName) } // Pass a new client in the attributes. Verify that the watch is // re-registered on the new client, and that the old client is not closed // (because clientWrapper only closes clients that it creates, it does not // close client that are passed through attributes). xdsC2 := fakeclient.NewClient() cw.handleUpdate(&EDSConfig{EDSServiceName: testEDSClusterName}, attributes.New(xdsinternal.XDSClientID, xdsC2)) gotCluster, err = xdsC2.WaitForWatchEDS() if err != nil { t.Fatalf("xdsClient.WatchEndpoints failed with error: %v", err) } if gotCluster != testEDSClusterName { t.Fatalf("xdsClient.WatchEndpoints() called with cluster: %v, want %v", gotCluster, testEDSClusterName) } if err := xdsC1.WaitForClose(); err != testutils.ErrRecvTimeout { t.Fatalf("clientWrapper closed xdsClient received in attributes") } } grpc-go-1.29.1/xds/internal/balancer/edsbalancer/xds_lrs_test.go000066400000000000000000000042751365033716300246510ustar00rootroot00000000000000/* * * Copyright 2019 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package edsbalancer import ( "testing" "google.golang.org/grpc/attributes" "google.golang.org/grpc/balancer" "google.golang.org/grpc/resolver" xdsinternal "google.golang.org/grpc/xds/internal" "google.golang.org/grpc/xds/internal/testutils/fakeclient" ) // TestXDSLoadReporting verifies that the edsBalancer starts the loadReport // stream when the lbConfig passed to it contains a valid value for the LRS // server (empty string). func (s) TestXDSLoadReporting(t *testing.T) { builder := balancer.Get(edsName) cc := newNoopTestClientConn() edsB, ok := builder.Build(cc, balancer.BuildOptions{Target: resolver.Target{Endpoint: testEDSClusterName}}).(*edsBalancer) if !ok { t.Fatalf("builder.Build(%s) returned type {%T}, want {*edsBalancer}", edsName, edsB) } defer edsB.Close() xdsC := fakeclient.NewClient() edsB.UpdateClientConnState(balancer.ClientConnState{ ResolverState: resolver.State{Attributes: attributes.New(xdsinternal.XDSClientID, xdsC)}, BalancerConfig: &EDSConfig{LrsLoadReportingServerName: new(string)}, }) gotCluster, err := xdsC.WaitForWatchEDS() if err != nil { t.Fatalf("xdsClient.WatchEndpoints failed with error: %v", err) } if gotCluster != testEDSClusterName { t.Fatalf("xdsClient.WatchEndpoints() called with cluster: %v, want %v", gotCluster, testEDSClusterName) } got, err := xdsC.WaitForReportLoad() if err != nil { t.Fatalf("xdsClient.ReportLoad failed with error: %v", err) } if got.Server != "" || got.Cluster != testEDSClusterName { t.Fatalf("xdsClient.ReportLoad called with {%v, %v}: want {\"\", %v}", got.Server, got.Cluster, testEDSClusterName) } } grpc-go-1.29.1/xds/internal/balancer/edsbalancer/xds_old.go000066400000000000000000000025611365033716300235640ustar00rootroot00000000000000/* * * Copyright 2019 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package edsbalancer import "google.golang.org/grpc/balancer" // The old xds balancer implements logic for both CDS and EDS. With the new // design, CDS is split and moved to a separate balancer, and the xds balancer // becomes the EDS balancer. // // To keep the existing tests working, this file regisger EDS balancer under the // old xds balancer name. // // TODO: delete this file when migration to new workflow (LDS, RDS, CDS, EDS) is // done. const xdsName = "xds_experimental" func init() { balancer.Register(&xdsBalancerBuilder{}) } // xdsBalancerBuilder register edsBalancerBuilder (now with name // "eds_experimental") under the old name "xds_experimental". type xdsBalancerBuilder struct { edsBalancerBuilder } func (b *xdsBalancerBuilder) Name() string { return xdsName } grpc-go-1.29.1/xds/internal/balancer/lrs/000077500000000000000000000000001365033716300201325ustar00rootroot00000000000000grpc-go-1.29.1/xds/internal/balancer/lrs/lrs.go000066400000000000000000000250311365033716300212620ustar00rootroot00000000000000/* * Copyright 2019 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // Package lrs implements load reporting service for xds balancer. package lrs import ( "context" "sync" "sync/atomic" "time" corepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" endpointpb "github.com/envoyproxy/go-control-plane/envoy/api/v2/endpoint" lrsgrpc "github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v2" lrspb "github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v2" "github.com/golang/protobuf/ptypes" "google.golang.org/grpc" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal/backoff" "google.golang.org/grpc/xds/internal" ) const negativeOneUInt64 = ^uint64(0) // Store defines the interface for a load store. It keeps loads and can report // them to a server when requested. type Store interface { CallDropped(category string) CallStarted(l internal.Locality) CallFinished(l internal.Locality, err error) CallServerLoad(l internal.Locality, name string, d float64) // Report the load of clusterName to cc. ReportTo(ctx context.Context, cc *grpc.ClientConn, clusterName string, node *corepb.Node) } type rpcCountData struct { // Only atomic accesses are allowed for the fields. succeeded *uint64 errored *uint64 inProgress *uint64 // Map from load name to load data (sum+count). Loading data from map is // atomic, but updating data takes a lock, which could cause contention when // multiple RPCs try to report loads for the same name. // // To fix the contention, shard this map. serverLoads sync.Map // map[string]*rpcLoadData } func newRPCCountData() *rpcCountData { return &rpcCountData{ succeeded: new(uint64), errored: new(uint64), inProgress: new(uint64), } } func (rcd *rpcCountData) incrSucceeded() { atomic.AddUint64(rcd.succeeded, 1) } func (rcd *rpcCountData) loadAndClearSucceeded() uint64 { return atomic.SwapUint64(rcd.succeeded, 0) } func (rcd *rpcCountData) incrErrored() { atomic.AddUint64(rcd.errored, 1) } func (rcd *rpcCountData) loadAndClearErrored() uint64 { return atomic.SwapUint64(rcd.errored, 0) } func (rcd *rpcCountData) incrInProgress() { atomic.AddUint64(rcd.inProgress, 1) } func (rcd *rpcCountData) decrInProgress() { atomic.AddUint64(rcd.inProgress, negativeOneUInt64) // atomic.Add(x, -1) } func (rcd *rpcCountData) loadInProgress() uint64 { return atomic.LoadUint64(rcd.inProgress) // InProgress count is not clear when reading. } func (rcd *rpcCountData) addServerLoad(name string, d float64) { loads, ok := rcd.serverLoads.Load(name) if !ok { tl := newRPCLoadData() loads, _ = rcd.serverLoads.LoadOrStore(name, tl) } loads.(*rpcLoadData).add(d) } // Data for server loads (from trailers or oob). Fields in this struct must be // updated consistently. // // The current solution is to hold a lock, which could cause contention. To fix, // shard serverLoads map in rpcCountData. type rpcLoadData struct { mu sync.Mutex sum float64 count uint64 } func newRPCLoadData() *rpcLoadData { return &rpcLoadData{} } func (rld *rpcLoadData) add(v float64) { rld.mu.Lock() rld.sum += v rld.count++ rld.mu.Unlock() } func (rld *rpcLoadData) loadAndClear() (s float64, c uint64) { rld.mu.Lock() s = rld.sum rld.sum = 0 c = rld.count rld.count = 0 rld.mu.Unlock() return } // lrsStore collects loads from xds balancer, and periodically sends load to the // server. type lrsStore struct { backoff backoff.Strategy lastReported time.Time drops sync.Map // map[string]*uint64 localityRPCCount sync.Map // map[internal.Locality]*rpcCountData } // NewStore creates a store for load reports. func NewStore() Store { return &lrsStore{ backoff: backoff.DefaultExponential, lastReported: time.Now(), } } // Update functions are called by picker for each RPC. To avoid contention, all // updates are done atomically. // CallDropped adds one drop record with the given category to store. func (ls *lrsStore) CallDropped(category string) { p, ok := ls.drops.Load(category) if !ok { tp := new(uint64) p, _ = ls.drops.LoadOrStore(category, tp) } atomic.AddUint64(p.(*uint64), 1) } func (ls *lrsStore) CallStarted(l internal.Locality) { p, ok := ls.localityRPCCount.Load(l) if !ok { tp := newRPCCountData() p, _ = ls.localityRPCCount.LoadOrStore(l, tp) } p.(*rpcCountData).incrInProgress() } func (ls *lrsStore) CallFinished(l internal.Locality, err error) { p, ok := ls.localityRPCCount.Load(l) if !ok { // The map is never cleared, only values in the map are reset. So the // case where entry for call-finish is not found should never happen. return } p.(*rpcCountData).decrInProgress() if err == nil { p.(*rpcCountData).incrSucceeded() } else { p.(*rpcCountData).incrErrored() } } func (ls *lrsStore) CallServerLoad(l internal.Locality, name string, d float64) { p, ok := ls.localityRPCCount.Load(l) if !ok { // The map is never cleared, only values in the map are reset. So the // case where entry for CallServerLoad is not found should never happen. return } p.(*rpcCountData).addServerLoad(name, d) } func (ls *lrsStore) buildStats(clusterName string) []*endpointpb.ClusterStats { var ( totalDropped uint64 droppedReqs []*endpointpb.ClusterStats_DroppedRequests localityStats []*endpointpb.UpstreamLocalityStats ) ls.drops.Range(func(category, countP interface{}) bool { tempCount := atomic.SwapUint64(countP.(*uint64), 0) if tempCount == 0 { return true } totalDropped += tempCount droppedReqs = append(droppedReqs, &endpointpb.ClusterStats_DroppedRequests{ Category: category.(string), DroppedCount: tempCount, }) return true }) ls.localityRPCCount.Range(func(locality, countP interface{}) bool { tempLocality := locality.(internal.Locality) tempCount := countP.(*rpcCountData) tempSucceeded := tempCount.loadAndClearSucceeded() tempInProgress := tempCount.loadInProgress() tempErrored := tempCount.loadAndClearErrored() if tempSucceeded == 0 && tempInProgress == 0 && tempErrored == 0 { return true } var loadMetricStats []*endpointpb.EndpointLoadMetricStats tempCount.serverLoads.Range(func(name, data interface{}) bool { tempName := name.(string) tempSum, tempCount := data.(*rpcLoadData).loadAndClear() if tempCount == 0 { return true } loadMetricStats = append(loadMetricStats, &endpointpb.EndpointLoadMetricStats{ MetricName: tempName, NumRequestsFinishedWithMetric: tempCount, TotalMetricValue: tempSum, }, ) return true }) localityStats = append(localityStats, &endpointpb.UpstreamLocalityStats{ Locality: &corepb.Locality{ Region: tempLocality.Region, Zone: tempLocality.Zone, SubZone: tempLocality.SubZone, }, TotalSuccessfulRequests: tempSucceeded, TotalRequestsInProgress: tempInProgress, TotalErrorRequests: tempErrored, LoadMetricStats: loadMetricStats, UpstreamEndpointStats: nil, // TODO: populate for per endpoint loads. }) return true }) dur := time.Since(ls.lastReported) ls.lastReported = time.Now() var ret []*endpointpb.ClusterStats ret = append(ret, &endpointpb.ClusterStats{ ClusterName: clusterName, UpstreamLocalityStats: localityStats, TotalDroppedRequests: totalDropped, DroppedRequests: droppedReqs, LoadReportInterval: ptypes.DurationProto(dur), }) return ret } // ReportTo makes a streaming lrs call to cc and blocks. // // It retries the call (with backoff) until ctx is canceled. func (ls *lrsStore) ReportTo(ctx context.Context, cc *grpc.ClientConn, clusterName string, node *corepb.Node) { c := lrsgrpc.NewLoadReportingServiceClient(cc) var ( retryCount int doBackoff bool ) for { select { case <-ctx.Done(): return default: } if doBackoff { backoffTimer := time.NewTimer(ls.backoff.Backoff(retryCount)) select { case <-backoffTimer.C: case <-ctx.Done(): backoffTimer.Stop() return } retryCount++ } doBackoff = true stream, err := c.StreamLoadStats(ctx) if err != nil { grpclog.Warningf("lrs: failed to create stream: %v", err) continue } grpclog.Infof("lrs: created LRS stream") req := &lrspb.LoadStatsRequest{Node: node} grpclog.Infof("lrs: sending init LoadStatsRequest: %v", req) if err := stream.Send(req); err != nil { grpclog.Warningf("lrs: failed to send first request: %v", err) continue } first, err := stream.Recv() if err != nil { grpclog.Warningf("lrs: failed to receive first response: %v", err) continue } grpclog.Infof("lrs: received first LoadStatsResponse: %+v", first) interval, err := ptypes.Duration(first.LoadReportingInterval) if err != nil { grpclog.Warningf("lrs: failed to convert report interval: %v", err) continue } // The LRS client should join the clusters it knows with the cluster // list from response, and send loads for them. // // But the LRS client now only supports one cluster. TODO: extend it to // support multiple clusters. var clusterFoundInResponse bool for _, c := range first.Clusters { if c == clusterName { clusterFoundInResponse = true } } if !clusterFoundInResponse { grpclog.Warningf("lrs: received clusters %v does not contain expected {%v}", first.Clusters, clusterName) continue } if first.ReportEndpointGranularity { // TODO: fixme to support per endpoint loads. grpclog.Warningf("lrs: endpoint loads requested, but not supported by current implementation") continue } // No backoff afterwards. doBackoff = false retryCount = 0 ls.sendLoads(ctx, stream, clusterName, interval) } } func (ls *lrsStore) sendLoads(ctx context.Context, stream lrsgrpc.LoadReportingService_StreamLoadStatsClient, clusterName string, interval time.Duration) { tick := time.NewTicker(interval) defer tick.Stop() for { select { case <-tick.C: case <-ctx.Done(): return } req := &lrspb.LoadStatsRequest{ClusterStats: ls.buildStats(clusterName)} grpclog.Infof("lrs: sending LRS loads: %+v", req) if err := stream.Send(req); err != nil { grpclog.Warningf("lrs: failed to send report: %v", err) return } } } grpc-go-1.29.1/xds/internal/balancer/lrs/lrs_test.go000066400000000000000000000342311365033716300223230ustar00rootroot00000000000000/* * * Copyright 2019 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package lrs import ( "context" "fmt" "io" "net" "sort" "sync" "testing" "time" corepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" endpointpb "github.com/envoyproxy/go-control-plane/envoy/api/v2/endpoint" lrsgrpc "github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v2" lrspb "github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v2" "github.com/golang/protobuf/proto" durationpb "github.com/golang/protobuf/ptypes/duration" structpb "github.com/golang/protobuf/ptypes/struct" "github.com/google/go-cmp/cmp" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" "google.golang.org/grpc/xds/internal" ) const ( testService = "grpc.service.test" testHostname = "grpc.server.name" nodeMetadataHostnameKey = "PROXYLESS_CLIENT_HOSTNAME" ) var ( dropCategories = []string{"drop_for_real", "drop_for_fun"} localities = []internal.Locality{{Region: "a"}, {Region: "b"}} errTest = fmt.Errorf("test error") ) type rpcCountDataForTest struct { succeeded uint64 errored uint64 inProgress uint64 serverLoads map[string]float64 } func newRPCCountDataForTest(succeeded, errored, inprogress uint64, serverLoads map[string]float64) *rpcCountDataForTest { return &rpcCountDataForTest{ succeeded: succeeded, errored: errored, inProgress: inprogress, serverLoads: serverLoads, } } // Equal() is needed to compare unexported fields. func (rcd *rpcCountDataForTest) Equal(b *rpcCountDataForTest) bool { return rcd.inProgress == b.inProgress && rcd.errored == b.errored && rcd.succeeded == b.succeeded && cmp.Equal(rcd.serverLoads, b.serverLoads) } // equalClusterStats sorts requests and clear report internal before comparing. func equalClusterStats(a, b []*endpointpb.ClusterStats) bool { for _, t := range [][]*endpointpb.ClusterStats{a, b} { for _, s := range t { sort.Slice(s.DroppedRequests, func(i, j int) bool { return s.DroppedRequests[i].Category < s.DroppedRequests[j].Category }) sort.Slice(s.UpstreamLocalityStats, func(i, j int) bool { return s.UpstreamLocalityStats[i].Locality.String() < s.UpstreamLocalityStats[j].Locality.String() }) for _, us := range s.UpstreamLocalityStats { sort.Slice(us.LoadMetricStats, func(i, j int) bool { return us.LoadMetricStats[i].MetricName < us.LoadMetricStats[j].MetricName }) } s.LoadReportInterval = nil } } return cmp.Equal(a, b, cmp.Comparer(proto.Equal)) } func Test_lrsStore_buildStats_drops(t *testing.T) { tests := []struct { name string drops []map[string]uint64 }{ { name: "one drop report", drops: []map[string]uint64{{ dropCategories[0]: 31, dropCategories[1]: 41, }}, }, { name: "two drop reports", drops: []map[string]uint64{{ dropCategories[0]: 31, dropCategories[1]: 41, }, { dropCategories[0]: 59, dropCategories[1]: 26, }}, }, { name: "no empty report", drops: []map[string]uint64{{ dropCategories[0]: 31, dropCategories[1]: 41, }, { dropCategories[0]: 0, // This shouldn't cause an empty report for category[0]. dropCategories[1]: 26, }}, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { ls := NewStore().(*lrsStore) for _, ds := range tt.drops { var ( totalDropped uint64 droppedReqs []*endpointpb.ClusterStats_DroppedRequests ) for cat, count := range ds { if count == 0 { continue } totalDropped += count droppedReqs = append(droppedReqs, &endpointpb.ClusterStats_DroppedRequests{ Category: cat, DroppedCount: count, }) } want := []*endpointpb.ClusterStats{ { ClusterName: testService, TotalDroppedRequests: totalDropped, DroppedRequests: droppedReqs, }, } var wg sync.WaitGroup for c, count := range ds { for i := 0; i < int(count); i++ { wg.Add(1) go func(i int, c string) { ls.CallDropped(c) wg.Done() }(i, c) } } wg.Wait() if got := ls.buildStats(testService); !equalClusterStats(got, want) { t.Errorf("lrsStore.buildStats() = %v, want %v", got, want) t.Errorf("%s", cmp.Diff(got, want)) } } }) } } func Test_lrsStore_buildStats_rpcCounts(t *testing.T) { tests := []struct { name string rpcs []map[internal.Locality]struct { start, success, failure uint64 serverData map[string]float64 // Will be reported with successful RPCs. } }{ { name: "one rpcCount report", rpcs: []map[internal.Locality]struct { start, success, failure uint64 serverData map[string]float64 }{{ localities[0]: {8, 3, 1, nil}, }}, }, { name: "two localities one rpcCount report", rpcs: []map[internal.Locality]struct { start, success, failure uint64 serverData map[string]float64 }{{ localities[0]: {8, 3, 1, nil}, localities[1]: {15, 1, 5, nil}, }}, }, { name: "three rpcCount reports", rpcs: []map[internal.Locality]struct { start, success, failure uint64 serverData map[string]float64 }{{ localities[0]: {8, 3, 1, nil}, localities[1]: {15, 1, 5, nil}, }, { localities[0]: {8, 3, 1, nil}, }, { localities[1]: {15, 1, 5, nil}, }}, }, { name: "no empty report", rpcs: []map[internal.Locality]struct { start, success, failure uint64 serverData map[string]float64 }{{ localities[0]: {4, 3, 1, nil}, localities[1]: {7, 1, 5, nil}, }, { localities[0]: {0, 0, 0, nil}, // This shouldn't cause an empty report for locality[0]. localities[1]: {1, 1, 0, nil}, }}, }, { name: "two localities one report with server loads", rpcs: []map[internal.Locality]struct { start, success, failure uint64 serverData map[string]float64 }{{ localities[0]: {8, 3, 1, map[string]float64{"cpu": 15, "mem": 20}}, localities[1]: {15, 4, 5, map[string]float64{"net": 5, "disk": 0.8}}, }}, }, { name: "three reports with server loads", rpcs: []map[internal.Locality]struct { start, success, failure uint64 serverData map[string]float64 }{{ localities[0]: {8, 3, 1, map[string]float64{"cpu": 15, "mem": 20}}, localities[1]: {15, 4, 5, map[string]float64{"net": 5, "disk": 0.8}}, }, { localities[0]: {8, 3, 1, map[string]float64{"cpu": 1, "mem": 2}}, }, { localities[1]: {15, 4, 5, map[string]float64{"net": 13, "disk": 1.4}}, }}, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { ls := NewStore().(*lrsStore) // InProgress count doesn't get cleared at each buildStats, keep // them to carry over. inProgressCounts := make(map[internal.Locality]uint64) for _, counts := range tt.rpcs { var upstreamLocalityStats []*endpointpb.UpstreamLocalityStats for l, count := range counts { tempInProgress := count.start - count.success - count.failure + inProgressCounts[l] inProgressCounts[l] = tempInProgress if count.success == 0 && tempInProgress == 0 && count.failure == 0 { continue } var loadMetricStats []*endpointpb.EndpointLoadMetricStats for n, d := range count.serverData { loadMetricStats = append(loadMetricStats, &endpointpb.EndpointLoadMetricStats{ MetricName: n, NumRequestsFinishedWithMetric: count.success, TotalMetricValue: d * float64(count.success), }, ) } upstreamLocalityStats = append(upstreamLocalityStats, &endpointpb.UpstreamLocalityStats{ Locality: l.ToProto(), TotalSuccessfulRequests: count.success, TotalRequestsInProgress: tempInProgress, TotalErrorRequests: count.failure, LoadMetricStats: loadMetricStats, }) } // InProgress count doesn't get cleared at each buildStats, and // needs to be carried over to the next result. for l, c := range inProgressCounts { if _, ok := counts[l]; !ok { upstreamLocalityStats = append(upstreamLocalityStats, &endpointpb.UpstreamLocalityStats{ Locality: l.ToProto(), TotalRequestsInProgress: c, }) } } want := []*endpointpb.ClusterStats{ { ClusterName: testService, UpstreamLocalityStats: upstreamLocalityStats, }, } var wg sync.WaitGroup for l, count := range counts { for i := 0; i < int(count.success); i++ { wg.Add(1) go func(l internal.Locality, serverData map[string]float64) { ls.CallStarted(l) ls.CallFinished(l, nil) for n, d := range serverData { ls.CallServerLoad(l, n, d) } wg.Done() }(l, count.serverData) } for i := 0; i < int(count.failure); i++ { wg.Add(1) go func(l internal.Locality) { ls.CallStarted(l) ls.CallFinished(l, errTest) wg.Done() }(l) } for i := 0; i < int(count.start-count.success-count.failure); i++ { wg.Add(1) go func(l internal.Locality) { ls.CallStarted(l) wg.Done() }(l) } } wg.Wait() if got := ls.buildStats(testService); !equalClusterStats(got, want) { t.Errorf("lrsStore.buildStats() = %v, want %v", got, want) t.Errorf("%s", cmp.Diff(got, want)) } } }) } } type lrsServer struct { reportingInterval *durationpb.Duration mu sync.Mutex dropTotal uint64 drops map[string]uint64 rpcs map[internal.Locality]*rpcCountDataForTest } func (lrss *lrsServer) StreamLoadStats(stream lrsgrpc.LoadReportingService_StreamLoadStatsServer) error { req, err := stream.Recv() if err != nil { return err } if req.GetNode().GetMetadata().GetFields()[nodeMetadataHostnameKey].GetStringValue() != testHostname { return status.Errorf(codes.FailedPrecondition, "unexpected req: %+v", req) } if err := stream.Send(&lrspb.LoadStatsResponse{ Clusters: []string{testService, "another-cluster"}, LoadReportingInterval: lrss.reportingInterval, }); err != nil { return err } for { req, err := stream.Recv() if err != nil { if err == io.EOF { return nil } return err } stats := req.ClusterStats[0] lrss.mu.Lock() lrss.dropTotal += stats.TotalDroppedRequests for _, d := range stats.DroppedRequests { lrss.drops[d.Category] += d.DroppedCount } for _, ss := range stats.UpstreamLocalityStats { l := internal.Locality{ Region: ss.Locality.Region, Zone: ss.Locality.Zone, SubZone: ss.Locality.SubZone, } counts, ok := lrss.rpcs[l] if !ok { counts = newRPCCountDataForTest(0, 0, 0, nil) lrss.rpcs[l] = counts } counts.succeeded += ss.TotalSuccessfulRequests counts.inProgress = ss.TotalRequestsInProgress counts.errored += ss.TotalErrorRequests for _, ts := range ss.LoadMetricStats { if counts.serverLoads == nil { counts.serverLoads = make(map[string]float64) } counts.serverLoads[ts.MetricName] = ts.TotalMetricValue / float64(ts.NumRequestsFinishedWithMetric) } } lrss.mu.Unlock() } } func setupServer(t *testing.T, reportingInterval *durationpb.Duration) (addr string, lrss *lrsServer, cleanup func()) { lis, err := net.Listen("tcp", "localhost:0") if err != nil { t.Fatalf("listen failed due to: %v", err) } svr := grpc.NewServer() lrss = &lrsServer{ reportingInterval: reportingInterval, drops: make(map[string]uint64), rpcs: make(map[internal.Locality]*rpcCountDataForTest), } lrsgrpc.RegisterLoadReportingServiceServer(svr, lrss) go svr.Serve(lis) return lis.Addr().String(), lrss, func() { svr.Stop() lis.Close() } } func Test_lrsStore_ReportTo(t *testing.T) { const intervalNano = 1000 * 1000 * 50 addr, lrss, cleanup := setupServer(t, &durationpb.Duration{ Seconds: 0, Nanos: intervalNano, }) defer cleanup() ls := NewStore() cc, err := grpc.Dial(addr, grpc.WithInsecure()) if err != nil { t.Fatalf("failed to dial: %v", err) } ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() done := make(chan struct{}) go func() { node := &corepb.Node{ Metadata: &structpb.Struct{ Fields: map[string]*structpb.Value{ nodeMetadataHostnameKey: { Kind: &structpb.Value_StringValue{StringValue: testHostname}, }, }, }, } ls.ReportTo(ctx, cc, testService, node) close(done) }() drops := map[string]uint64{ dropCategories[0]: 13, dropCategories[1]: 14, } for c, d := range drops { for i := 0; i < int(d); i++ { ls.CallDropped(c) time.Sleep(time.Nanosecond * intervalNano / 10) } } rpcs := map[internal.Locality]*rpcCountDataForTest{ localities[0]: newRPCCountDataForTest(3, 1, 4, nil), localities[1]: newRPCCountDataForTest(1, 5, 9, map[string]float64{"pi": 3.14, "e": 2.71}), } for l, count := range rpcs { for i := 0; i < int(count.succeeded); i++ { go func(i int, l internal.Locality, count *rpcCountDataForTest) { ls.CallStarted(l) ls.CallFinished(l, nil) for n, d := range count.serverLoads { ls.CallServerLoad(l, n, d) } }(i, l, count) } for i := 0; i < int(count.inProgress); i++ { go func(i int, l internal.Locality) { ls.CallStarted(l) }(i, l) } for i := 0; i < int(count.errored); i++ { go func(i int, l internal.Locality) { ls.CallStarted(l) ls.CallFinished(l, errTest) }(i, l) } } time.Sleep(time.Nanosecond * intervalNano * 2) cancel() <-done lrss.mu.Lock() defer lrss.mu.Unlock() if !cmp.Equal(lrss.drops, drops) { t.Errorf("different: %v", cmp.Diff(lrss.drops, drops)) } if !cmp.Equal(lrss.rpcs, rpcs) { t.Errorf("different: %v", cmp.Diff(lrss.rpcs, rpcs)) } } grpc-go-1.29.1/xds/internal/balancer/orca/000077500000000000000000000000001365033716300202565ustar00rootroot00000000000000grpc-go-1.29.1/xds/internal/balancer/orca/orca.go000066400000000000000000000041531365033716300215340ustar00rootroot00000000000000/* * Copyright 2019 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // Package orca implements Open Request Cost Aggregation. package orca import ( orcapb "github.com/cncf/udpa/go/udpa/data/orca/v1" "github.com/golang/protobuf/proto" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal/balancerload" "google.golang.org/grpc/metadata" ) const mdKey = "X-Endpoint-Load-Metrics-Bin" // toBytes converts a orca load report into bytes. func toBytes(r *orcapb.OrcaLoadReport) []byte { if r == nil { return nil } b, err := proto.Marshal(r) if err != nil { grpclog.Warningf("orca: failed to marshal load report: %v", err) return nil } return b } // ToMetadata converts a orca load report into grpc metadata. func ToMetadata(r *orcapb.OrcaLoadReport) metadata.MD { b := toBytes(r) if b == nil { return nil } return metadata.Pairs(mdKey, string(b)) } // fromBytes reads load report bytes and converts it to orca. func fromBytes(b []byte) *orcapb.OrcaLoadReport { ret := new(orcapb.OrcaLoadReport) if err := proto.Unmarshal(b, ret); err != nil { grpclog.Warningf("orca: failed to unmarshal load report: %v", err) return nil } return ret } // FromMetadata reads load report from metadata and converts it to orca. // // It returns nil if report is not found in metadata. func FromMetadata(md metadata.MD) *orcapb.OrcaLoadReport { vs := md.Get(mdKey) if len(vs) == 0 { return nil } return fromBytes([]byte(vs[0])) } type loadParser struct{} func (*loadParser) Parse(md metadata.MD) interface{} { return FromMetadata(md) } func init() { balancerload.SetParser(&loadParser{}) } grpc-go-1.29.1/xds/internal/balancer/orca/orca_test.go000066400000000000000000000042361365033716300225750ustar00rootroot00000000000000/* * Copyright 2019 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package orca import ( "strings" "testing" orcapb "github.com/cncf/udpa/go/udpa/data/orca/v1" "github.com/golang/protobuf/proto" "github.com/google/go-cmp/cmp" "google.golang.org/grpc/internal/grpctest" "google.golang.org/grpc/metadata" ) var ( testMessage = &orcapb.OrcaLoadReport{ CpuUtilization: 0.1, MemUtilization: 0.2, RequestCost: map[string]float64{"ccc": 3.4}, Utilization: map[string]float64{"ttt": 0.4}, } testBytes, _ = proto.Marshal(testMessage) ) type s struct { grpctest.Tester } func Test(t *testing.T) { grpctest.RunSubTests(t, s{}) } func (s) TestToMetadata(t *testing.T) { tests := []struct { name string r *orcapb.OrcaLoadReport want metadata.MD }{{ name: "nil", r: nil, want: nil, }, { name: "valid", r: testMessage, want: metadata.MD{ strings.ToLower(mdKey): []string{string(testBytes)}, }, }} for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { if got := ToMetadata(tt.r); !cmp.Equal(got, tt.want) { t.Errorf("ToMetadata() = %v, want %v", got, tt.want) } }) } } func (s) TestFromMetadata(t *testing.T) { tests := []struct { name string md metadata.MD want *orcapb.OrcaLoadReport }{{ name: "nil", md: nil, want: nil, }, { name: "valid", md: metadata.MD{ strings.ToLower(mdKey): []string{string(testBytes)}, }, want: testMessage, }} for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { if got := FromMetadata(tt.md); !cmp.Equal(got, tt.want, cmp.Comparer(proto.Equal)) { t.Errorf("FromMetadata() = %v, want %v", got, tt.want) } }) } } grpc-go-1.29.1/xds/internal/client/000077500000000000000000000000001365033716300170415ustar00rootroot00000000000000grpc-go-1.29.1/xds/internal/client/bootstrap/000077500000000000000000000000001365033716300210565ustar00rootroot00000000000000grpc-go-1.29.1/xds/internal/client/bootstrap/bootstrap.go000066400000000000000000000140261365033716300234250ustar00rootroot00000000000000/* * * Copyright 2019 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Package bootstrap provides the functionality to initialize certain aspects // of an xDS client by reading a bootstrap file. package bootstrap import ( "bytes" "encoding/json" "fmt" "io/ioutil" "os" corepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" "github.com/golang/protobuf/jsonpb" "google.golang.org/grpc" "google.golang.org/grpc/credentials/google" ) const ( // Environment variable which holds the name of the xDS bootstrap file. fileEnv = "GRPC_XDS_BOOTSTRAP" // Type name for Google default credentials. googleDefaultCreds = "google_default" gRPCUserAgentName = "gRPC Go" clientFeatureNoOverprovisioning = "envoy.lb.does_not_support_overprovisioning" ) var gRPCVersion = fmt.Sprintf("%s %s", gRPCUserAgentName, grpc.Version) // For overriding in unit tests. var fileReadFunc = ioutil.ReadFile // Config provides the xDS client with several key bits of information that it // requires in its interaction with an xDS server. The Config is initialized // from the bootstrap file. type Config struct { // BalancerName is the name of the xDS server to connect to. // // The bootstrap file contains a list of servers (with name+creds), but we // pick the first one. BalancerName string // Creds contains the credentials to be used while talking to the xDS // server, as a grpc.DialOption. Creds grpc.DialOption // NodeProto contains the node proto to be used in xDS requests. NodeProto *corepb.Node } type channelCreds struct { Type string `json:"type"` Config json.RawMessage `json:"config"` } type xdsServer struct { ServerURI string `json:"server_uri"` ChannelCreds []channelCreds `json:"channel_creds"` } // NewConfig returns a new instance of Config initialized by reading the // bootstrap file found at ${GRPC_XDS_BOOTSTRAP}. // // The format of the bootstrap file will be as follows: // { // "xds_server": { // "server_uri": , // "channel_creds": [ // { // "type": , // "config": // } // ] // }, // "node": // } // // Currently, we support exactly one type of credential, which is // "google_default", where we use the host's default certs for transport // credentials and a Google oauth token for call credentials. // // This function tries to process as much of the bootstrap file as possible (in // the presence of the errors) and may return a Config object with certain // fields left unspecified, in which case the caller should use some sane // defaults. func NewConfig() (*Config, error) { config := &Config{} fName, ok := os.LookupEnv(fileEnv) if !ok { return nil, fmt.Errorf("xds: Environment variable %v not defined", fileEnv) } logger.Infof("Got bootstrap file location from %v environment variable: %v", fileEnv, fName) data, err := fileReadFunc(fName) if err != nil { return nil, fmt.Errorf("xds: Failed to read bootstrap file %s with error %v", fName, err) } logger.Debugf("Bootstrap content: %s", data) var jsonData map[string]json.RawMessage if err := json.Unmarshal(data, &jsonData); err != nil { return nil, fmt.Errorf("xds: Failed to parse file %s (content %v) with error: %v", fName, string(data), err) } m := jsonpb.Unmarshaler{AllowUnknownFields: true} for k, v := range jsonData { switch k { case "node": n := &corepb.Node{} if err := m.Unmarshal(bytes.NewReader(v), n); err != nil { return nil, fmt.Errorf("xds: jsonpb.Unmarshal(%v) for field %q failed during bootstrap: %v", string(v), k, err) } config.NodeProto = n case "xds_servers": var servers []*xdsServer if err := json.Unmarshal(v, &servers); err != nil { return nil, fmt.Errorf("xds: json.Unmarshal(%v) for field %q failed during bootstrap: %v", string(v), k, err) } if len(servers) < 1 { return nil, fmt.Errorf("xds: bootstrap file parsing failed during bootstrap: file doesn't contain any xds server to connect to") } xs := servers[0] config.BalancerName = xs.ServerURI for _, cc := range xs.ChannelCreds { if cc.Type == googleDefaultCreds { config.Creds = grpc.WithCredentialsBundle(google.NewComputeEngineCredentials()) // We stop at the first credential type that we support. break } } } // Do not fail the xDS bootstrap when an unknown field is seen. This can // happen when an older version client reads a newer version bootstrap // file with new fields. } if config.BalancerName == "" { return nil, fmt.Errorf("xds: Required field %q not found in bootstrap", "xds_servers.server_uri") } // If we don't find a nodeProto in the bootstrap file, we just create an // empty one here. That way, callers of this function can always expect // that the NodeProto field is non-nil. if config.NodeProto == nil { config.NodeProto = &corepb.Node{} } // BuildVersion is deprecated, and is replaced by user_agent_name and // user_agent_version. But the management servers are still using the old // field, so we will keep both set. config.NodeProto.BuildVersion = gRPCVersion config.NodeProto.UserAgentName = gRPCUserAgentName config.NodeProto.UserAgentVersionType = &corepb.Node_UserAgentVersion{UserAgentVersion: grpc.Version} config.NodeProto.ClientFeatures = append(config.NodeProto.ClientFeatures, clientFeatureNoOverprovisioning) logger.Infof("Bootstrap config for creating xds-client: %+v", config) return config, nil } grpc-go-1.29.1/xds/internal/client/bootstrap/bootstrap_test.go000066400000000000000000000167541365033716300244760ustar00rootroot00000000000000/* * * Copyright 2019 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package bootstrap import ( "os" "testing" "github.com/golang/protobuf/proto" "google.golang.org/grpc" "google.golang.org/grpc/credentials/google" "google.golang.org/grpc/internal/grpctest" corepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" structpb "github.com/golang/protobuf/ptypes/struct" ) var ( nodeProto = &corepb.Node{ Id: "ENVOY_NODE_ID", Metadata: &structpb.Struct{ Fields: map[string]*structpb.Value{ "TRAFFICDIRECTOR_GRPC_HOSTNAME": { Kind: &structpb.Value_StringValue{StringValue: "trafficdirector"}, }, }, }, BuildVersion: gRPCVersion, UserAgentName: gRPCUserAgentName, UserAgentVersionType: &corepb.Node_UserAgentVersion{UserAgentVersion: grpc.Version}, ClientFeatures: []string{clientFeatureNoOverprovisioning}, } nilCredsConfig = &Config{ BalancerName: "trafficdirector.googleapis.com:443", Creds: nil, NodeProto: nodeProto, } nonNilCredsConfig = &Config{ BalancerName: "trafficdirector.googleapis.com:443", Creds: grpc.WithCredentialsBundle(google.NewComputeEngineCredentials()), NodeProto: nodeProto, } ) type s struct { grpctest.Tester } func Test(t *testing.T) { grpctest.RunSubTests(t, s{}) } // TestNewConfig exercises the functionality in NewConfig with different // bootstrap file contents. It overrides the fileReadFunc by returning // bootstrap file contents defined in this test, instead of reading from a // file. func (s) TestNewConfig(t *testing.T) { bootstrapFileMap := map[string]string{ "empty": "", "badJSON": `["test": 123]`, "noBalancerName": `{"node": {"id": "ENVOY_NODE_ID"}}`, "emptyNodeProto": ` { "xds_servers" : [{ "server_uri": "trafficdirector.googleapis.com:443" }] }`, "emptyXdsServer": ` { "node": { "id": "ENVOY_NODE_ID", "metadata": { "TRAFFICDIRECTOR_GRPC_HOSTNAME": "trafficdirector" } } }`, "unknownTopLevelFieldInFile": ` { "node": { "id": "ENVOY_NODE_ID", "metadata": { "TRAFFICDIRECTOR_GRPC_HOSTNAME": "trafficdirector" } }, "xds_servers" : [{ "server_uri": "trafficdirector.googleapis.com:443", "channel_creds": [ { "type": "not-google-default" } ] }], "unknownField": "foobar" }`, "unknownFieldInNodeProto": ` { "node": { "id": "ENVOY_NODE_ID", "unknownField": "foobar", "metadata": { "TRAFFICDIRECTOR_GRPC_HOSTNAME": "trafficdirector" } }, "xds_servers" : [{ "server_uri": "trafficdirector.googleapis.com:443" }] }`, "unknownFieldInXdsServer": ` { "node": { "id": "ENVOY_NODE_ID", "metadata": { "TRAFFICDIRECTOR_GRPC_HOSTNAME": "trafficdirector" } }, "xds_servers" : [{ "server_uri": "trafficdirector.googleapis.com:443", "channel_creds": [ { "type": "not-google-default" } ], "unknownField": "foobar" }] }`, "emptyChannelCreds": ` { "node": { "id": "ENVOY_NODE_ID", "metadata": { "TRAFFICDIRECTOR_GRPC_HOSTNAME": "trafficdirector" } }, "xds_servers" : [{ "server_uri": "trafficdirector.googleapis.com:443" }] }`, "nonGoogleDefaultCreds": ` { "node": { "id": "ENVOY_NODE_ID", "metadata": { "TRAFFICDIRECTOR_GRPC_HOSTNAME": "trafficdirector" } }, "xds_servers" : [{ "server_uri": "trafficdirector.googleapis.com:443", "channel_creds": [ { "type": "not-google-default" } ] }] }`, "multipleChannelCreds": ` { "node": { "id": "ENVOY_NODE_ID", "metadata": { "TRAFFICDIRECTOR_GRPC_HOSTNAME": "trafficdirector" } }, "xds_servers" : [{ "server_uri": "trafficdirector.googleapis.com:443", "channel_creds": [ { "type": "not-google-default" }, { "type": "google_default" } ] }] }`, "goodBootstrap": ` { "node": { "id": "ENVOY_NODE_ID", "metadata": { "TRAFFICDIRECTOR_GRPC_HOSTNAME": "trafficdirector" } }, "xds_servers" : [{ "server_uri": "trafficdirector.googleapis.com:443", "channel_creds": [ { "type": "google_default" } ] }] }`, "multipleXDSServers": ` { "node": { "id": "ENVOY_NODE_ID", "metadata": { "TRAFFICDIRECTOR_GRPC_HOSTNAME": "trafficdirector" } }, "xds_servers" : [ { "server_uri": "trafficdirector.googleapis.com:443", "channel_creds": [{ "type": "google_default" }] }, { "server_uri": "backup.never.use.com:1234", "channel_creds": [{ "type": "not-google-default" }] } ] }`, } oldFileReadFunc := fileReadFunc fileReadFunc = func(name string) ([]byte, error) { if b, ok := bootstrapFileMap[name]; ok { return []byte(b), nil } return nil, os.ErrNotExist } defer func() { fileReadFunc = oldFileReadFunc os.Unsetenv(fileEnv) }() tests := []struct { name string wantConfig *Config wantError bool }{ {"nonExistentBootstrapFile", nil, true}, {"empty", nil, true}, {"badJSON", nil, true}, {"emptyNodeProto", &Config{ BalancerName: "trafficdirector.googleapis.com:443", NodeProto: &corepb.Node{ BuildVersion: gRPCVersion, UserAgentName: gRPCUserAgentName, UserAgentVersionType: &corepb.Node_UserAgentVersion{UserAgentVersion: grpc.Version}, ClientFeatures: []string{clientFeatureNoOverprovisioning}, }, }, false}, {"noBalancerName", nil, true}, {"emptyXdsServer", nil, true}, {"unknownTopLevelFieldInFile", nilCredsConfig, false}, {"unknownFieldInNodeProto", nilCredsConfig, false}, {"unknownFieldInXdsServer", nilCredsConfig, false}, {"emptyChannelCreds", nilCredsConfig, false}, {"nonGoogleDefaultCreds", nilCredsConfig, false}, {"multipleChannelCreds", nonNilCredsConfig, false}, {"goodBootstrap", nonNilCredsConfig, false}, {"multipleXDSServers", nonNilCredsConfig, false}, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { if err := os.Setenv(fileEnv, test.name); err != nil { t.Fatalf("os.Setenv(%s, %s) failed with error: %v", fileEnv, test.name, err) } config, err := NewConfig() if err != nil { if !test.wantError { t.Fatalf("unexpected error %v", err) } return } if test.wantError { t.Fatalf("wantError: %v, got error %v", test.wantError, err) } if config.BalancerName != test.wantConfig.BalancerName { t.Errorf("config.BalancerName is %s, want %s", config.BalancerName, test.wantConfig.BalancerName) } if !proto.Equal(config.NodeProto, test.wantConfig.NodeProto) { t.Errorf("config.NodeProto is %#v, want %#v", config.NodeProto, test.wantConfig.NodeProto) } if (config.Creds != nil) != (test.wantConfig.Creds != nil) { t.Errorf("config.Creds is %#v, want %#v", config.Creds, test.wantConfig.Creds) } }) } } func (s) TestNewConfigEnvNotSet(t *testing.T) { os.Unsetenv(fileEnv) config, err := NewConfig() if err == nil { t.Errorf("NewConfig() returned: %#v, , wanted non-nil error", config) } } grpc-go-1.29.1/xds/internal/client/bootstrap/logging.go000066400000000000000000000013611365033716300230340ustar00rootroot00000000000000/* * * Copyright 2020 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package bootstrap import "google.golang.org/grpc/internal/grpclog" const prefix = "[xds-bootstrap] " var logger = grpclog.NewPrefixLogger(prefix) grpc-go-1.29.1/xds/internal/client/cds.go000066400000000000000000000062421365033716300201450ustar00rootroot00000000000000/* * * Copyright 2019 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package client import ( "fmt" xdspb "github.com/envoyproxy/go-control-plane/envoy/api/v2" "github.com/golang/protobuf/ptypes" ) // handleCDSResponse processes an CDS response received from the xDS server. On // receipt of a good response, it also invokes the registered watcher callback. func (v2c *v2Client) handleCDSResponse(resp *xdspb.DiscoveryResponse) error { v2c.mu.Lock() defer v2c.mu.Unlock() wi := v2c.watchMap[cdsURL] if wi == nil { return fmt.Errorf("xds: no CDS watcher found when handling CDS response: %+v", resp) } var returnUpdate CDSUpdate localCache := make(map[string]CDSUpdate) for _, r := range resp.GetResources() { var resource ptypes.DynamicAny if err := ptypes.UnmarshalAny(r, &resource); err != nil { return fmt.Errorf("xds: failed to unmarshal resource in CDS response: %v", err) } cluster, ok := resource.Message.(*xdspb.Cluster) if !ok { return fmt.Errorf("xds: unexpected resource type: %T in CDS response", resource.Message) } v2c.logger.Infof("Resource with name: %v, type: %T, contains: %v", cluster.GetName(), cluster, cluster) update, err := validateCluster(cluster) if err != nil { return err } // If the Cluster message in the CDS response did not contain a // serviceName, we will just use the clusterName for EDS. if update.ServiceName == "" { update.ServiceName = cluster.GetName() } localCache[cluster.GetName()] = update v2c.logger.Debugf("Resource with name %v, type %T, value %+v added to cache", cluster.GetName(), update, update) if cluster.GetName() == wi.target[0] { returnUpdate = update } } v2c.cdsCache = localCache var err error if returnUpdate.ServiceName == "" { err = fmt.Errorf("xds: CDS target %s not found in received response %+v", wi.target, resp) } wi.stopTimer() wi.cdsCallback(returnUpdate, err) return nil } func validateCluster(cluster *xdspb.Cluster) (CDSUpdate, error) { emptyUpdate := CDSUpdate{ServiceName: "", EnableLRS: false} switch { case cluster.GetType() != xdspb.Cluster_EDS: return emptyUpdate, fmt.Errorf("xds: unexpected cluster type %v in response: %+v", cluster.GetType(), cluster) case cluster.GetEdsClusterConfig().GetEdsConfig().GetAds() == nil: return emptyUpdate, fmt.Errorf("xds: unexpected edsConfig in response: %+v", cluster) case cluster.GetLbPolicy() != xdspb.Cluster_ROUND_ROBIN: return emptyUpdate, fmt.Errorf("xds: unexpected lbPolicy %v in response: %+v", cluster.GetLbPolicy(), cluster) } return CDSUpdate{ ServiceName: cluster.GetEdsClusterConfig().GetServiceName(), EnableLRS: cluster.GetLrsServer().GetSelf() != nil, }, nil } grpc-go-1.29.1/xds/internal/client/cds_test.go000066400000000000000000000353211365033716300212040ustar00rootroot00000000000000/* * * Copyright 2019 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package client import ( "errors" "fmt" "testing" "time" discoverypb "github.com/envoyproxy/go-control-plane/envoy/api/v2" xdspb "github.com/envoyproxy/go-control-plane/envoy/api/v2" corepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" "github.com/golang/protobuf/proto" anypb "github.com/golang/protobuf/ptypes/any" "github.com/google/go-cmp/cmp" "google.golang.org/grpc/xds/internal/testutils" "google.golang.org/grpc/xds/internal/testutils/fakeserver" ) const ( clusterName1 = "foo-cluster" clusterName2 = "bar-cluster" serviceName1 = "foo-service" serviceName2 = "bar-service" ) func (v2c *v2Client) cloneCDSCacheForTesting() map[string]CDSUpdate { v2c.mu.Lock() defer v2c.mu.Unlock() cloneCache := make(map[string]CDSUpdate) for k, v := range v2c.cdsCache { cloneCache[k] = v } return cloneCache } func (s) TestValidateCluster(t *testing.T) { emptyUpdate := CDSUpdate{ServiceName: "", EnableLRS: false} tests := []struct { name string cluster *xdspb.Cluster wantUpdate CDSUpdate wantErr bool }{ { name: "non-eds-cluster-type", cluster: &xdspb.Cluster{ ClusterDiscoveryType: &xdspb.Cluster_Type{Type: xdspb.Cluster_STATIC}, EdsClusterConfig: &xdspb.Cluster_EdsClusterConfig{ EdsConfig: &corepb.ConfigSource{ ConfigSourceSpecifier: &corepb.ConfigSource_Ads{ Ads: &corepb.AggregatedConfigSource{}, }, }, }, LbPolicy: xdspb.Cluster_LEAST_REQUEST, }, wantUpdate: emptyUpdate, wantErr: true, }, { name: "no-eds-config", cluster: &xdspb.Cluster{ ClusterDiscoveryType: &xdspb.Cluster_Type{Type: xdspb.Cluster_EDS}, LbPolicy: xdspb.Cluster_ROUND_ROBIN, }, wantUpdate: emptyUpdate, wantErr: true, }, { name: "no-ads-config-source", cluster: &xdspb.Cluster{ ClusterDiscoveryType: &xdspb.Cluster_Type{Type: xdspb.Cluster_EDS}, EdsClusterConfig: &xdspb.Cluster_EdsClusterConfig{}, LbPolicy: xdspb.Cluster_ROUND_ROBIN, }, wantUpdate: emptyUpdate, wantErr: true, }, { name: "non-round-robin-lb-policy", cluster: &xdspb.Cluster{ ClusterDiscoveryType: &xdspb.Cluster_Type{Type: xdspb.Cluster_EDS}, EdsClusterConfig: &xdspb.Cluster_EdsClusterConfig{ EdsConfig: &corepb.ConfigSource{ ConfigSourceSpecifier: &corepb.ConfigSource_Ads{ Ads: &corepb.AggregatedConfigSource{}, }, }, }, LbPolicy: xdspb.Cluster_LEAST_REQUEST, }, wantUpdate: emptyUpdate, wantErr: true, }, { name: "happy-case-no-service-name-no-lrs", cluster: &xdspb.Cluster{ ClusterDiscoveryType: &xdspb.Cluster_Type{Type: xdspb.Cluster_EDS}, EdsClusterConfig: &xdspb.Cluster_EdsClusterConfig{ EdsConfig: &corepb.ConfigSource{ ConfigSourceSpecifier: &corepb.ConfigSource_Ads{ Ads: &corepb.AggregatedConfigSource{}, }, }, }, LbPolicy: xdspb.Cluster_ROUND_ROBIN, }, wantUpdate: emptyUpdate, }, { name: "happy-case-no-lrs", cluster: &xdspb.Cluster{ ClusterDiscoveryType: &xdspb.Cluster_Type{Type: xdspb.Cluster_EDS}, EdsClusterConfig: &xdspb.Cluster_EdsClusterConfig{ EdsConfig: &corepb.ConfigSource{ ConfigSourceSpecifier: &corepb.ConfigSource_Ads{ Ads: &corepb.AggregatedConfigSource{}, }, }, ServiceName: serviceName1, }, LbPolicy: xdspb.Cluster_ROUND_ROBIN, }, wantUpdate: CDSUpdate{ServiceName: serviceName1, EnableLRS: false}, }, { name: "happiest-case", cluster: goodCluster1, wantUpdate: CDSUpdate{ServiceName: serviceName1, EnableLRS: true}, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { gotUpdate, gotErr := validateCluster(test.cluster) if (gotErr != nil) != test.wantErr { t.Errorf("validateCluster(%+v) returned error: %v, wantErr: %v", test.cluster, gotErr, test.wantErr) } if !cmp.Equal(gotUpdate, test.wantUpdate) { t.Errorf("validateCluster(%+v) = %v, want: %v", test.cluster, gotUpdate, test.wantUpdate) } }) } } // TestCDSHandleResponse starts a fake xDS server, makes a ClientConn to it, // and creates a v2Client using it. Then, it registers a CDS watcher and tests // different CDS responses. func (s) TestCDSHandleResponse(t *testing.T) { fakeServer, cc, cleanup := startServerAndGetCC(t) defer cleanup() v2c := newV2Client(cc, goodNodeProto, func(int) time.Duration { return 0 }, nil) defer v2c.close() tests := []struct { name string cdsResponse *xdspb.DiscoveryResponse wantErr bool wantUpdate *CDSUpdate wantUpdateErr bool }{ // Badly marshaled CDS response. { name: "badly-marshaled-response", cdsResponse: badlyMarshaledCDSResponse, wantErr: true, wantUpdate: nil, wantUpdateErr: false, }, // Response does not contain Cluster proto. { name: "no-cluster-proto-in-response", cdsResponse: badResourceTypeInLDSResponse, wantErr: true, wantUpdate: nil, wantUpdateErr: false, }, // Response contains no clusters. { name: "no-cluster", cdsResponse: &xdspb.DiscoveryResponse{}, wantErr: false, wantUpdate: &CDSUpdate{}, wantUpdateErr: true, }, // Response contains one good cluster we are not interested in. { name: "one-uninteresting-cluster", cdsResponse: goodCDSResponse2, wantErr: false, wantUpdate: &CDSUpdate{}, wantUpdateErr: true, }, // Response contains one cluster and it is good. { name: "one-good-cluster", cdsResponse: goodCDSResponse1, wantErr: false, wantUpdate: &CDSUpdate{ServiceName: serviceName1, EnableLRS: true}, wantUpdateErr: false, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { testWatchHandle(t, &watchHandleTestcase{ responseToHandle: test.cdsResponse, wantHandleErr: test.wantErr, wantUpdate: test.wantUpdate, wantUpdateErr: test.wantUpdateErr, cdsWatch: v2c.watchCDS, watchReqChan: fakeServer.XDSRequestChan, handleXDSResp: v2c.handleCDSResponse, }) }) } } // TestCDSHandleResponseWithoutWatch tests the case where the v2Client receives // a CDS response without a registered watcher. func (s) TestCDSHandleResponseWithoutWatch(t *testing.T) { _, cc, cleanup := startServerAndGetCC(t) defer cleanup() v2c := newV2Client(cc, goodNodeProto, func(int) time.Duration { return 0 }, nil) defer v2c.close() if v2c.handleCDSResponse(goodCDSResponse1) == nil { t.Fatal("v2c.handleCDSResponse() succeeded, should have failed") } } // cdsTestOp contains all data related to one particular test operation. Not // all fields make sense for all tests. type cdsTestOp struct { // target is the resource name to watch for. target string // responseToSend is the xDS response sent to the client responseToSend *fakeserver.Response // wantOpErr specfies whether the main operation should return an error. wantOpErr bool // wantCDSCache is the expected rdsCache at the end of an operation. wantCDSCache map[string]CDSUpdate // wantWatchCallback specifies if the watch callback should be invoked. wantWatchCallback bool } // testCDSCaching is a helper function which starts a fake xDS server, makes a // ClientConn to it, creates a v2Client using it. It then reads a bunch of // test operations to be performed from cdsTestOps and returns error, if any, // on the provided error channel. This is executed in a separate goroutine. func testCDSCaching(t *testing.T, cdsTestOps []cdsTestOp, errCh *testutils.Channel) { t.Helper() fakeServer, cc, cleanup := startServerAndGetCC(t) defer cleanup() v2c := newV2Client(cc, goodNodeProto, func(int) time.Duration { return 0 }, nil) defer v2c.close() t.Log("Started xds v2Client...") callbackCh := make(chan struct{}, 1) for _, cdsTestOp := range cdsTestOps { // Register a watcher if required, and use a channel to signal the // successful invocation of the callback. if cdsTestOp.target != "" { v2c.watchCDS(cdsTestOp.target, func(u CDSUpdate, err error) { t.Logf("Received callback with CDSUpdate {%+v} and error {%v}", u, err) callbackCh <- struct{}{} }) t.Logf("Registered a watcher for CDS target: %v...", cdsTestOp.target) // Wait till the request makes it to the fakeServer. This ensures that // the watch request has been processed by the v2Client. if _, err := fakeServer.XDSRequestChan.Receive(); err != nil { errCh.Send(fmt.Errorf("Timeout waiting for CDS request: %v", err)) return } t.Log("FakeServer received request...") } // Directly push the response through a call to handleCDSResponse, // thereby bypassing the fakeServer. if cdsTestOp.responseToSend != nil { resp := cdsTestOp.responseToSend.Resp.(*discoverypb.DiscoveryResponse) if err := v2c.handleCDSResponse(resp); (err != nil) != cdsTestOp.wantOpErr { errCh.Send(fmt.Errorf("v2c.handleRDSResponse(%+v) returned err: %v", resp, err)) return } } // If the test needs the callback to be invoked, just verify that // it was invoked. Since we verify the contents of the cache, it's // ok not to verify the contents of the callback. if cdsTestOp.wantWatchCallback { <-callbackCh } if !cmp.Equal(v2c.cloneCDSCacheForTesting(), cdsTestOp.wantCDSCache) { errCh.Send(fmt.Errorf("gotCDSCache: %v, wantCDSCache: %v", v2c.rdsCache, cdsTestOp.wantCDSCache)) return } } t.Log("Completed all test ops successfully...") errCh.Send(nil) } // TestCDSCaching tests some end-to-end CDS flows using a fake xDS server, and // verifies the CDS data cached at the v2Client. func (s) TestCDSCaching(t *testing.T) { ops := []cdsTestOp{ // Add an CDS watch for a cluster name (clusterName1), which returns one // matching resource in the response. { target: clusterName1, responseToSend: &fakeserver.Response{Resp: goodCDSResponse1}, wantCDSCache: map[string]CDSUpdate{ clusterName1: {serviceName1, true}, }, wantWatchCallback: true, }, // Push an CDS response which contains a new resource (apart from the // one received in the previous response). This should be cached. { responseToSend: &fakeserver.Response{Resp: cdsResponseWithMultipleResources}, wantCDSCache: map[string]CDSUpdate{ clusterName1: {serviceName1, true}, clusterName2: {serviceName2, false}, }, wantWatchCallback: true, }, // Switch the watch target to clusterName2, which was already cached. No // response is received from the server (as expected), but we want the // callback to be invoked with the new serviceName. { target: clusterName2, wantCDSCache: map[string]CDSUpdate{ clusterName1: {serviceName1, true}, clusterName2: {serviceName2, false}, }, wantWatchCallback: true, }, // Push an empty CDS response. This should clear the cache. { responseToSend: &fakeserver.Response{Resp: &xdspb.DiscoveryResponse{TypeUrl: cdsURL}}, wantOpErr: false, wantCDSCache: map[string]CDSUpdate{}, wantWatchCallback: true, }, } errCh := testutils.NewChannel() go testCDSCaching(t, ops, errCh) waitForNilErr(t, errCh) } // TestCDSWatchExpiryTimer tests the case where the client does not receive an // CDS response for the request that it sends out. We want the watch callback // to be invoked with an error once the watchExpiryTimer fires. func (s) TestCDSWatchExpiryTimer(t *testing.T) { oldWatchExpiryTimeout := defaultWatchExpiryTimeout defaultWatchExpiryTimeout = 500 * time.Millisecond defer func() { defaultWatchExpiryTimeout = oldWatchExpiryTimeout }() fakeServer, cc, cleanup := startServerAndGetCC(t) defer cleanup() v2c := newV2Client(cc, goodNodeProto, func(int) time.Duration { return 0 }, nil) defer v2c.close() t.Log("Started xds v2Client...") callbackCh := testutils.NewChannel() v2c.watchCDS(clusterName1, func(u CDSUpdate, err error) { t.Logf("Received callback with CDSUpdate {%+v} and error {%v}", u, err) if u.ServiceName != "" { callbackCh.Send(fmt.Errorf("received serviceName %v in cdsCallback, wanted empty string", u.ServiceName)) } if err == nil { callbackCh.Send(errors.New("received nil error in cdsCallback")) } callbackCh.Send(nil) }) // Wait till the request makes it to the fakeServer. This ensures that // the watch request has been processed by the v2Client. if _, err := fakeServer.XDSRequestChan.Receive(); err != nil { t.Fatalf("Timeout expired when expecting an CDS request") } waitForNilErr(t, callbackCh) } var ( badlyMarshaledCDSResponse = &xdspb.DiscoveryResponse{ Resources: []*anypb.Any{ { TypeUrl: cdsURL, Value: []byte{1, 2, 3, 4}, }, }, TypeUrl: cdsURL, } goodCluster1 = &xdspb.Cluster{ Name: clusterName1, ClusterDiscoveryType: &xdspb.Cluster_Type{Type: xdspb.Cluster_EDS}, EdsClusterConfig: &xdspb.Cluster_EdsClusterConfig{ EdsConfig: &corepb.ConfigSource{ ConfigSourceSpecifier: &corepb.ConfigSource_Ads{ Ads: &corepb.AggregatedConfigSource{}, }, }, ServiceName: serviceName1, }, LbPolicy: xdspb.Cluster_ROUND_ROBIN, LrsServer: &corepb.ConfigSource{ ConfigSourceSpecifier: &corepb.ConfigSource_Self{ Self: &corepb.SelfConfigSource{}, }, }, } marshaledCluster1, _ = proto.Marshal(goodCluster1) goodCluster2 = &xdspb.Cluster{ Name: clusterName2, ClusterDiscoveryType: &xdspb.Cluster_Type{Type: xdspb.Cluster_EDS}, EdsClusterConfig: &xdspb.Cluster_EdsClusterConfig{ EdsConfig: &corepb.ConfigSource{ ConfigSourceSpecifier: &corepb.ConfigSource_Ads{ Ads: &corepb.AggregatedConfigSource{}, }, }, ServiceName: serviceName2, }, LbPolicy: xdspb.Cluster_ROUND_ROBIN, } marshaledCluster2, _ = proto.Marshal(goodCluster2) goodCDSResponse1 = &xdspb.DiscoveryResponse{ Resources: []*anypb.Any{ { TypeUrl: cdsURL, Value: marshaledCluster1, }, }, TypeUrl: cdsURL, } goodCDSResponse2 = &xdspb.DiscoveryResponse{ Resources: []*anypb.Any{ { TypeUrl: cdsURL, Value: marshaledCluster2, }, }, TypeUrl: cdsURL, } cdsResponseWithMultipleResources = &xdspb.DiscoveryResponse{ Resources: []*anypb.Any{ { TypeUrl: cdsURL, Value: marshaledCluster1, }, { TypeUrl: cdsURL, Value: marshaledCluster2, }, }, TypeUrl: cdsURL, } ) grpc-go-1.29.1/xds/internal/client/client.go000066400000000000000000000131741365033716300206540ustar00rootroot00000000000000/* * * Copyright 2019 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Package client implementation a full fledged gRPC client for the xDS API // used by the xds resolver and balancer implementations. package client import ( "errors" "fmt" "sync" "time" "google.golang.org/grpc" "google.golang.org/grpc/internal/backoff" "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/xds/internal/client/bootstrap" ) // Options provides all parameters required for the creation of an xDS client. type Options struct { // Config contains a fully populated bootstrap config. It is the // responsibility of the caller to use some sane defaults here if the // bootstrap process returned with certain fields left unspecified. Config bootstrap.Config // DialOpts contains dial options to be used when dialing the xDS server. DialOpts []grpc.DialOption // TargetName is the target of the parent ClientConn. TargetName string } // Client is a full fledged gRPC client which queries a set of discovery APIs // (collectively termed as xDS) on a remote management server, to discover // various dynamic resources. // // A single client object will be shared by the xds resolver and balancer // implementations. But the same client can only be shared by the same parent // ClientConn. type Client struct { opts Options cc *grpc.ClientConn // Connection to the xDS server v2c *v2Client // Actual xDS client implementation using the v2 API logger *grpclog.PrefixLogger mu sync.Mutex serviceCallback func(ServiceUpdate, error) ldsCancel func() rdsCancel func() } // New returns a new xdsClient configured with opts. func New(opts Options) (*Client, error) { switch { case opts.Config.BalancerName == "": return nil, errors.New("xds: no xds_server name provided in options") case opts.Config.Creds == nil: return nil, errors.New("xds: no credentials provided in options") case opts.Config.NodeProto == nil: return nil, errors.New("xds: no node_proto provided in options") } dopts := []grpc.DialOption{ opts.Config.Creds, grpc.WithKeepaliveParams(keepalive.ClientParameters{ Time: 5 * time.Minute, Timeout: 20 * time.Second, }), } dopts = append(dopts, opts.DialOpts...) c := &Client{opts: opts} cc, err := grpc.Dial(opts.Config.BalancerName, dopts...) if err != nil { // An error from a non-blocking dial indicates something serious. return nil, fmt.Errorf("xds: failed to dial balancer {%s}: %v", opts.Config.BalancerName, err) } c.cc = cc c.logger = grpclog.NewPrefixLogger(loggingPrefix(c)) c.logger.Infof("Created ClientConn to xDS server: %s", opts.Config.BalancerName) c.v2c = newV2Client(cc, opts.Config.NodeProto, backoff.DefaultExponential.Backoff, c.logger) c.logger.Infof("Created") return c, nil } // Close closes the gRPC connection to the xDS server. func (c *Client) Close() { // TODO: Should we invoke the registered callbacks here with an error that // the client is closed? c.v2c.close() c.cc.Close() c.logger.Infof("Shutdown") } // ServiceUpdate contains update about the service. type ServiceUpdate struct { Cluster string } // handleLDSUpdate is the LDS watcher callback we registered with the v2Client. func (c *Client) handleLDSUpdate(u ldsUpdate, err error) { c.logger.Infof("xds: client received LDS update: %+v, err: %v", u, err) if err != nil { c.mu.Lock() if c.serviceCallback != nil { c.serviceCallback(ServiceUpdate{}, err) } c.mu.Unlock() return } c.mu.Lock() c.rdsCancel = c.v2c.watchRDS(u.routeName, c.handleRDSUpdate) c.mu.Unlock() } // handleRDSUpdate is the RDS watcher callback we registered with the v2Client. func (c *Client) handleRDSUpdate(u rdsUpdate, err error) { c.logger.Infof("xds: client received RDS update: %+v, err: %v", u, err) if err != nil { c.mu.Lock() if c.serviceCallback != nil { c.serviceCallback(ServiceUpdate{}, err) } c.mu.Unlock() return } c.mu.Lock() if c.serviceCallback != nil { c.serviceCallback(ServiceUpdate{Cluster: u.clusterName}, nil) } c.mu.Unlock() } // WatchService uses LDS and RDS protocols to discover information about the // provided serviceName. func (c *Client) WatchService(serviceName string, callback func(ServiceUpdate, error)) (cancel func()) { // TODO: Error out early if the client is closed. Ideally, this should // never be called after the client is closed though. c.mu.Lock() c.serviceCallback = callback c.ldsCancel = c.v2c.watchLDS(serviceName, c.handleLDSUpdate) c.mu.Unlock() return func() { c.mu.Lock() c.serviceCallback = nil if c.ldsCancel != nil { c.ldsCancel() } if c.rdsCancel != nil { c.rdsCancel() } c.mu.Unlock() } } // WatchCluster uses CDS to discover information about the provided // clusterName. func (c *Client) WatchCluster(clusterName string, cdsCb func(CDSUpdate, error)) (cancel func()) { return c.v2c.watchCDS(clusterName, cdsCb) } // WatchEndpoints uses EDS to discover information about the endpoints in the // provided clusterName. func (c *Client) WatchEndpoints(clusterName string, edsCb func(*EDSUpdate, error)) (cancel func()) { return c.v2c.watchEDS(clusterName, edsCb) } grpc-go-1.29.1/xds/internal/client/client_loadreport.go000066400000000000000000000053371365033716300231110ustar00rootroot00000000000000/* * * Copyright 2019 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package client import ( "context" corepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" "github.com/golang/protobuf/proto" structpb "github.com/golang/protobuf/ptypes/struct" "google.golang.org/grpc" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/xds/internal/balancer/lrs" ) const nodeMetadataHostnameKey = "PROXYLESS_CLIENT_HOSTNAME" // ReportLoad sends the load of the given clusterName from loadStore to the // given server. If the server is not an empty string, and is different from the // xds server, a new ClientConn will be created. // // The same options used for creating the Client will be used (including // NodeProto, and dial options if necessary). // // It returns a function to cancel the load reporting stream. If server is // different from xds server, the ClientConn will also be closed. func (c *Client) ReportLoad(server string, clusterName string, loadStore lrs.Store) func() { var ( cc *grpc.ClientConn closeCC bool ) c.logger.Infof("Starting load report to server: %s", server) if server == "" || server == c.opts.Config.BalancerName { cc = c.cc } else { c.logger.Infof("LRS server is different from xDS server, starting a new ClientConn") dopts := append([]grpc.DialOption{c.opts.Config.Creds}, c.opts.DialOpts...) ccNew, err := grpc.Dial(server, dopts...) if err != nil { // An error from a non-blocking dial indicates something serious. grpclog.Infof("xds: failed to dial load report server {%s}: %v", server, err) return func() {} } cc = ccNew closeCC = true } ctx, cancel := context.WithCancel(context.Background()) nodeTemp := proto.Clone(c.opts.Config.NodeProto).(*corepb.Node) if nodeTemp == nil { nodeTemp = &corepb.Node{} } if nodeTemp.Metadata == nil { nodeTemp.Metadata = &structpb.Struct{} } if nodeTemp.Metadata.Fields == nil { nodeTemp.Metadata.Fields = make(map[string]*structpb.Value) } nodeTemp.Metadata.Fields[nodeMetadataHostnameKey] = &structpb.Value{ Kind: &structpb.Value_StringValue{StringValue: c.opts.TargetName}, } go loadStore.ReportTo(ctx, c.cc, clusterName, nodeTemp) return func() { cancel() if closeCC { cc.Close() } } } grpc-go-1.29.1/xds/internal/client/client_test.go000066400000000000000000000211511365033716300217050ustar00rootroot00000000000000/* * * Copyright 2019 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package client import ( "errors" "fmt" "testing" "time" "google.golang.org/grpc" "google.golang.org/grpc/internal/grpctest" "google.golang.org/grpc/xds/internal/client/bootstrap" "google.golang.org/grpc/xds/internal/testutils" "google.golang.org/grpc/xds/internal/testutils/fakeserver" corepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" ) type s struct { grpctest.Tester } func Test(t *testing.T) { grpctest.RunSubTests(t, s{}) } func clientOpts(balancerName string) Options { return Options{ Config: bootstrap.Config{ BalancerName: balancerName, Creds: grpc.WithInsecure(), NodeProto: &corepb.Node{}, }, // WithTimeout is deprecated. But we are OK to call it here from the // test, so we clearly know that the dial failed. DialOpts: []grpc.DialOption{grpc.WithTimeout(5 * time.Second), grpc.WithBlock()}, } } func (s) TestNew(t *testing.T) { fakeServer, cleanup, err := fakeserver.StartServer() if err != nil { t.Fatalf("Failed to start fake xDS server: %v", err) } defer cleanup() tests := []struct { name string opts Options wantErr bool }{ {name: "empty-opts", opts: Options{}, wantErr: true}, { name: "empty-balancer-name", opts: Options{ Config: bootstrap.Config{ Creds: grpc.WithInsecure(), NodeProto: &corepb.Node{}, }, }, wantErr: true, }, { name: "empty-dial-creds", opts: Options{ Config: bootstrap.Config{ BalancerName: "dummy", NodeProto: &corepb.Node{}, }, }, wantErr: true, }, { name: "empty-node-proto", opts: Options{ Config: bootstrap.Config{ BalancerName: "dummy", Creds: grpc.WithInsecure(), }, }, wantErr: true, }, { name: "happy-case", opts: clientOpts(fakeServer.Address), wantErr: false, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { c, err := New(test.opts) if err == nil { defer c.Close() } if (err != nil) != test.wantErr { t.Fatalf("New(%+v) = %v, wantErr: %v", test.opts, err, test.wantErr) } }) } } // TestWatchService tests the happy case of registering a watcher for // service updates and receiving a good update. func (s) TestWatchService(t *testing.T) { fakeServer, cleanup, err := fakeserver.StartServer() if err != nil { t.Fatalf("Failed to start fake xDS server: %v", err) } defer cleanup() xdsClient, err := New(clientOpts(fakeServer.Address)) if err != nil { t.Fatalf("New returned error: %v", err) } defer xdsClient.Close() t.Log("Created an xdsClient...") callbackCh := testutils.NewChannel() cancelWatch := xdsClient.WatchService(goodLDSTarget1, func(su ServiceUpdate, err error) { if err != nil { callbackCh.Send(fmt.Errorf("xdsClient.WatchService returned error: %v", err)) return } if su.Cluster != goodClusterName1 { callbackCh.Send(fmt.Errorf("got clusterName: %+v, want clusterName: %+v", su.Cluster, goodClusterName1)) return } callbackCh.Send(nil) }) defer cancelWatch() t.Log("Registered a watcher for service updates...") // Make the fakeServer send LDS response. if _, err := fakeServer.XDSRequestChan.Receive(); err != nil { t.Fatalf("Timeout expired when expecting an LDS request") } fakeServer.XDSResponseChan <- &fakeserver.Response{Resp: goodLDSResponse1} // Make the fakeServer send RDS response. if _, err := fakeServer.XDSRequestChan.Receive(); err != nil { t.Fatalf("Timeout expired when expecting an RDS request") } fakeServer.XDSResponseChan <- &fakeserver.Response{Resp: goodRDSResponse1} waitForNilErr(t, callbackCh) } // TestWatchServiceWithNoResponseFromServer tests the case where the // xDS server does not respond to the requests being sent out as part of // registering a service update watcher. The underlying v2Client will timeout // and will send us an error. func (s) TestWatchServiceWithNoResponseFromServer(t *testing.T) { fakeServer, cleanup, err := fakeserver.StartServer() if err != nil { t.Fatalf("Failed to start fake xDS server: %v", err) } defer cleanup() xdsClient, err := New(clientOpts(fakeServer.Address)) if err != nil { t.Fatalf("New returned error: %v", err) } defer xdsClient.Close() t.Log("Created an xdsClient...") oldWatchExpiryTimeout := defaultWatchExpiryTimeout defaultWatchExpiryTimeout = 500 * time.Millisecond defer func() { defaultWatchExpiryTimeout = oldWatchExpiryTimeout }() callbackCh := testutils.NewChannel() cancelWatch := xdsClient.WatchService(goodLDSTarget1, func(su ServiceUpdate, err error) { if su.Cluster != "" { callbackCh.Send(fmt.Errorf("got clusterName: %+v, want empty clusterName", su.Cluster)) return } if err == nil { callbackCh.Send(errors.New("xdsClient.WatchService returned error non-nil error")) return } callbackCh.Send(nil) }) defer cancelWatch() t.Log("Registered a watcher for service updates...") // Wait for one request from the client, but send no reponses. if _, err := fakeServer.XDSRequestChan.Receive(); err != nil { t.Fatalf("Timeout expired when expecting an LDS request") } waitForNilErr(t, callbackCh) } // TestWatchServiceEmptyRDS tests the case where the underlying // v2Client receives an empty RDS response. func (s) TestWatchServiceEmptyRDS(t *testing.T) { fakeServer, cleanup, err := fakeserver.StartServer() if err != nil { t.Fatalf("Failed to start fake xDS server: %v", err) } defer cleanup() xdsClient, err := New(clientOpts(fakeServer.Address)) if err != nil { t.Fatalf("New returned error: %v", err) } defer xdsClient.Close() t.Log("Created an xdsClient...") oldWatchExpiryTimeout := defaultWatchExpiryTimeout defaultWatchExpiryTimeout = 500 * time.Millisecond defer func() { defaultWatchExpiryTimeout = oldWatchExpiryTimeout }() callbackCh := testutils.NewChannel() cancelWatch := xdsClient.WatchService(goodLDSTarget1, func(su ServiceUpdate, err error) { if su.Cluster != "" { callbackCh.Send(fmt.Errorf("got clusterName: %+v, want empty clusterName", su.Cluster)) return } if err == nil { callbackCh.Send(errors.New("xdsClient.WatchService returned error non-nil error")) return } callbackCh.Send(nil) }) defer cancelWatch() t.Log("Registered a watcher for service updates...") // Make the fakeServer send LDS response. if _, err := fakeServer.XDSRequestChan.Receive(); err != nil { t.Fatalf("Timeout expired when expecting an LDS request") } fakeServer.XDSResponseChan <- &fakeserver.Response{Resp: goodLDSResponse1} // Make the fakeServer send an empty RDS response. if _, err := fakeServer.XDSRequestChan.Receive(); err != nil { t.Fatalf("Timeout expired when expecting an RDS request") } fakeServer.XDSResponseChan <- &fakeserver.Response{Resp: noVirtualHostsInRDSResponse} waitForNilErr(t, callbackCh) } // TestWatchServiceWithClientClose tests the case where xDS responses are // received after the client is closed, and we make sure that the registered // watcher callback is not invoked. func (s) TestWatchServiceWithClientClose(t *testing.T) { fakeServer, cleanup, err := fakeserver.StartServer() if err != nil { t.Fatalf("Failed to start fake xDS server: %v", err) } defer cleanup() xdsClient, err := New(clientOpts(fakeServer.Address)) if err != nil { t.Fatalf("New returned error: %v", err) } defer xdsClient.Close() t.Log("Created an xdsClient...") callbackCh := testutils.NewChannel() cancelWatch := xdsClient.WatchService(goodLDSTarget1, func(su ServiceUpdate, err error) { callbackCh.Send(errors.New("watcher callback invoked after client close")) }) defer cancelWatch() t.Log("Registered a watcher for service updates...") // Make the fakeServer send LDS response. if _, err := fakeServer.XDSRequestChan.Receive(); err != nil { t.Fatalf("Timeout expired when expecting an LDS request") } fakeServer.XDSResponseChan <- &fakeserver.Response{Resp: goodLDSResponse1} xdsClient.Close() t.Log("Closing the xdsClient...") // Push an RDS response from the fakeserver fakeServer.XDSResponseChan <- &fakeserver.Response{Resp: goodRDSResponse1} if cbErr, err := callbackCh.Receive(); err != testutils.ErrRecvTimeout { t.Fatal(cbErr) } } grpc-go-1.29.1/xds/internal/client/eds.go000066400000000000000000000146421365033716300201520ustar00rootroot00000000000000/* * * Copyright 2019 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package client import ( "fmt" "net" "strconv" xdspb "github.com/envoyproxy/go-control-plane/envoy/api/v2" corepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" endpointpb "github.com/envoyproxy/go-control-plane/envoy/api/v2/endpoint" typepb "github.com/envoyproxy/go-control-plane/envoy/type" "github.com/golang/protobuf/ptypes" "google.golang.org/grpc/xds/internal" ) // OverloadDropConfig contains the config to drop overloads. type OverloadDropConfig struct { Category string Numerator uint32 Denominator uint32 } // EndpointHealthStatus represents the health status of an endpoint. type EndpointHealthStatus int32 const ( // EndpointHealthStatusUnknown represents HealthStatus UNKNOWN. EndpointHealthStatusUnknown EndpointHealthStatus = iota // EndpointHealthStatusHealthy represents HealthStatus HEALTHY. EndpointHealthStatusHealthy // EndpointHealthStatusUnhealthy represents HealthStatus UNHEALTHY. EndpointHealthStatusUnhealthy // EndpointHealthStatusDraining represents HealthStatus DRAINING. EndpointHealthStatusDraining // EndpointHealthStatusTimeout represents HealthStatus TIMEOUT. EndpointHealthStatusTimeout // EndpointHealthStatusDegraded represents HealthStatus DEGRADED. EndpointHealthStatusDegraded ) // Endpoint contains information of an endpoint. type Endpoint struct { Address string HealthStatus EndpointHealthStatus Weight uint32 } // Locality contains information of a locality. type Locality struct { Endpoints []Endpoint ID internal.Locality Priority uint32 Weight uint32 } // EDSUpdate contains an EDS update. type EDSUpdate struct { Drops []OverloadDropConfig Localities []Locality } func parseAddress(socketAddress *corepb.SocketAddress) string { return net.JoinHostPort(socketAddress.GetAddress(), strconv.Itoa(int(socketAddress.GetPortValue()))) } func parseDropPolicy(dropPolicy *xdspb.ClusterLoadAssignment_Policy_DropOverload) OverloadDropConfig { percentage := dropPolicy.GetDropPercentage() var ( numerator = percentage.GetNumerator() denominator uint32 ) switch percentage.GetDenominator() { case typepb.FractionalPercent_HUNDRED: denominator = 100 case typepb.FractionalPercent_TEN_THOUSAND: denominator = 10000 case typepb.FractionalPercent_MILLION: denominator = 1000000 } return OverloadDropConfig{ Category: dropPolicy.GetCategory(), Numerator: numerator, Denominator: denominator, } } func parseEndpoints(lbEndpoints []*endpointpb.LbEndpoint) []Endpoint { endpoints := make([]Endpoint, 0, len(lbEndpoints)) for _, lbEndpoint := range lbEndpoints { endpoints = append(endpoints, Endpoint{ HealthStatus: EndpointHealthStatus(lbEndpoint.GetHealthStatus()), Address: parseAddress(lbEndpoint.GetEndpoint().GetAddress().GetSocketAddress()), Weight: lbEndpoint.GetLoadBalancingWeight().GetValue(), }) } return endpoints } // ParseEDSRespProto turns EDS response proto message to EDSUpdate. // // This is temporarily exported to be used in eds balancer, before it switches // to use xds client. TODO: unexport. func ParseEDSRespProto(m *xdspb.ClusterLoadAssignment) (*EDSUpdate, error) { ret := &EDSUpdate{} for _, dropPolicy := range m.GetPolicy().GetDropOverloads() { ret.Drops = append(ret.Drops, parseDropPolicy(dropPolicy)) } priorities := make(map[uint32]struct{}) for _, locality := range m.Endpoints { l := locality.GetLocality() if l == nil { return nil, fmt.Errorf("EDS response contains a locality without ID, locality: %+v", locality) } lid := internal.Locality{ Region: l.Region, Zone: l.Zone, SubZone: l.SubZone, } priority := locality.GetPriority() priorities[priority] = struct{}{} ret.Localities = append(ret.Localities, Locality{ ID: lid, Endpoints: parseEndpoints(locality.GetLbEndpoints()), Weight: locality.GetLoadBalancingWeight().GetValue(), Priority: priority, }) } for i := 0; i < len(priorities); i++ { if _, ok := priorities[uint32(i)]; !ok { return nil, fmt.Errorf("priority %v missing (with different priorities %v received)", i, priorities) } } return ret, nil } // ParseEDSRespProtoForTesting parses EDS response, and panic if parsing fails. // This is used by EDS balancer tests. // // TODO: delete this. The EDS balancer tests should build an EDSUpdate directly, // instead of building and parsing a proto message. func ParseEDSRespProtoForTesting(m *xdspb.ClusterLoadAssignment) *EDSUpdate { u, err := ParseEDSRespProto(m) if err != nil { panic(err.Error()) } return u } func (v2c *v2Client) handleEDSResponse(resp *xdspb.DiscoveryResponse) error { v2c.mu.Lock() defer v2c.mu.Unlock() wi := v2c.watchMap[edsURL] if wi == nil { return fmt.Errorf("xds: no EDS watcher found when handling EDS response: %+v", resp) } var returnUpdate *EDSUpdate for _, r := range resp.GetResources() { var resource ptypes.DynamicAny if err := ptypes.UnmarshalAny(r, &resource); err != nil { return fmt.Errorf("xds: failed to unmarshal resource in EDS response: %v", err) } cla, ok := resource.Message.(*xdspb.ClusterLoadAssignment) if !ok { return fmt.Errorf("xds: unexpected resource type: %T in EDS response", resource.Message) } v2c.logger.Infof("Resource with name: %v, type: %T, contains: %v", cla.GetClusterName(), cla, cla) if cla.GetClusterName() != wi.target[0] { // We won't validate the remaining resources. If one of the // uninteresting ones is invalid, we will still ACK the response. continue } u, err := ParseEDSRespProto(cla) if err != nil { return err } returnUpdate = u // Break from the loop because the request resource is found. But // this also means we won't validate the remaining resources. If one // of the uninteresting ones is invalid, we will still ACK the // response. break } if returnUpdate != nil { wi.stopTimer() wi.edsCallback(returnUpdate, nil) } return nil } grpc-go-1.29.1/xds/internal/client/eds_test.go000066400000000000000000000177021365033716300212110ustar00rootroot00000000000000/* * * Copyright 2019 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package client import ( "errors" "fmt" "testing" "time" xdspb "github.com/envoyproxy/go-control-plane/envoy/api/v2" corepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" "github.com/golang/protobuf/ptypes" anypb "github.com/golang/protobuf/ptypes/any" "github.com/google/go-cmp/cmp" "google.golang.org/grpc/xds/internal" "google.golang.org/grpc/xds/internal/testutils" ) func (s) TestEDSParseRespProto(t *testing.T) { tests := []struct { name string m *xdspb.ClusterLoadAssignment want *EDSUpdate wantErr bool }{ { name: "missing-priority", m: func() *xdspb.ClusterLoadAssignment { clab0 := NewClusterLoadAssignmentBuilder("test", nil) clab0.AddLocality("locality-1", 1, 0, []string{"addr1:314"}, nil) clab0.AddLocality("locality-2", 1, 2, []string{"addr2:159"}, nil) return clab0.Build() }(), want: nil, wantErr: true, }, { name: "missing-locality-ID", m: func() *xdspb.ClusterLoadAssignment { clab0 := NewClusterLoadAssignmentBuilder("test", nil) clab0.AddLocality("", 1, 0, []string{"addr1:314"}, nil) return clab0.Build() }(), want: nil, wantErr: true, }, { name: "good", m: func() *xdspb.ClusterLoadAssignment { clab0 := NewClusterLoadAssignmentBuilder("test", nil) clab0.AddLocality("locality-1", 1, 1, []string{"addr1:314"}, &AddLocalityOptions{ Health: []corepb.HealthStatus{corepb.HealthStatus_UNHEALTHY}, Weight: []uint32{271}, }) clab0.AddLocality("locality-2", 1, 0, []string{"addr2:159"}, &AddLocalityOptions{ Health: []corepb.HealthStatus{corepb.HealthStatus_DRAINING}, Weight: []uint32{828}, }) return clab0.Build() }(), want: &EDSUpdate{ Drops: nil, Localities: []Locality{ { Endpoints: []Endpoint{{ Address: "addr1:314", HealthStatus: EndpointHealthStatusUnhealthy, Weight: 271, }}, ID: internal.Locality{SubZone: "locality-1"}, Priority: 1, Weight: 1, }, { Endpoints: []Endpoint{{ Address: "addr2:159", HealthStatus: EndpointHealthStatusDraining, Weight: 828, }}, ID: internal.Locality{SubZone: "locality-2"}, Priority: 0, Weight: 1, }, }, }, wantErr: false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { got, err := ParseEDSRespProto(tt.m) if (err != nil) != tt.wantErr { t.Errorf("ParseEDSRespProto() error = %v, wantErr %v", err, tt.wantErr) return } if d := cmp.Diff(got, tt.want); d != "" { t.Errorf("ParseEDSRespProto() got = %v, want %v, diff: %v", got, tt.want, d) } }) } } var ( badlyMarshaledEDSResponse = &xdspb.DiscoveryResponse{ Resources: []*anypb.Any{ { TypeUrl: edsURL, Value: []byte{1, 2, 3, 4}, }, }, TypeUrl: edsURL, } badResourceTypeInEDSResponse = &xdspb.DiscoveryResponse{ Resources: []*anypb.Any{ { TypeUrl: httpConnManagerURL, Value: marshaledConnMgr1, }, }, TypeUrl: edsURL, } goodEDSResponse1 = &xdspb.DiscoveryResponse{ Resources: []*anypb.Any{ func() *anypb.Any { clab0 := NewClusterLoadAssignmentBuilder(goodEDSName, nil) clab0.AddLocality("locality-1", 1, 1, []string{"addr1:314"}, nil) clab0.AddLocality("locality-2", 1, 0, []string{"addr2:159"}, nil) a, _ := ptypes.MarshalAny(clab0.Build()) return a }(), }, TypeUrl: edsURL, } goodEDSResponse2 = &xdspb.DiscoveryResponse{ Resources: []*anypb.Any{ func() *anypb.Any { clab0 := NewClusterLoadAssignmentBuilder("not-goodEDSName", nil) clab0.AddLocality("locality-1", 1, 1, []string{"addr1:314"}, nil) clab0.AddLocality("locality-2", 1, 0, []string{"addr2:159"}, nil) a, _ := ptypes.MarshalAny(clab0.Build()) return a }(), }, TypeUrl: edsURL, } ) func (s) TestEDSHandleResponse(t *testing.T) { fakeServer, cc, cleanup := startServerAndGetCC(t) defer cleanup() v2c := newV2Client(cc, goodNodeProto, func(int) time.Duration { return 0 }, nil) defer v2c.close() tests := []struct { name string edsResponse *xdspb.DiscoveryResponse wantErr bool wantUpdate *EDSUpdate wantUpdateErr bool }{ // Any in resource is badly marshaled. { name: "badly-marshaled_response", edsResponse: badlyMarshaledEDSResponse, wantErr: true, wantUpdate: nil, wantUpdateErr: false, }, // Response doesn't contain resource with the right type. { name: "no-config-in-response", edsResponse: badResourceTypeInEDSResponse, wantErr: true, wantUpdate: nil, wantUpdateErr: false, }, // Response contains one uninteresting ClusterLoadAssignment. { name: "one-uninterestring-assignment", edsResponse: goodEDSResponse2, wantErr: false, wantUpdate: nil, wantUpdateErr: false, }, // Response contains one good ClusterLoadAssignment. { name: "one-good-assignment", edsResponse: goodEDSResponse1, wantErr: false, wantUpdate: &EDSUpdate{ Localities: []Locality{ { Endpoints: []Endpoint{{Address: "addr1:314"}}, ID: internal.Locality{SubZone: "locality-1"}, Priority: 1, Weight: 1, }, { Endpoints: []Endpoint{{Address: "addr2:159"}}, ID: internal.Locality{SubZone: "locality-2"}, Priority: 0, Weight: 1, }, }, }, wantUpdateErr: false, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { testWatchHandle(t, &watchHandleTestcase{ responseToHandle: test.edsResponse, wantHandleErr: test.wantErr, wantUpdate: test.wantUpdate, wantUpdateErr: test.wantUpdateErr, edsWatch: v2c.watchEDS, watchReqChan: fakeServer.XDSRequestChan, handleXDSResp: v2c.handleEDSResponse, }) }) } } // TestEDSHandleResponseWithoutWatch tests the case where the v2Client // receives an EDS response without a registered EDS watcher. func (s) TestEDSHandleResponseWithoutWatch(t *testing.T) { _, cc, cleanup := startServerAndGetCC(t) defer cleanup() v2c := newV2Client(cc, goodNodeProto, func(int) time.Duration { return 0 }, nil) defer v2c.close() if v2c.handleEDSResponse(goodEDSResponse1) == nil { t.Fatal("v2c.handleEDSResponse() succeeded, should have failed") } } func (s) TestEDSWatchExpiryTimer(t *testing.T) { oldWatchExpiryTimeout := defaultWatchExpiryTimeout defaultWatchExpiryTimeout = 500 * time.Millisecond defer func() { defaultWatchExpiryTimeout = oldWatchExpiryTimeout }() fakeServer, cc, cleanup := startServerAndGetCC(t) defer cleanup() v2c := newV2Client(cc, goodNodeProto, func(int) time.Duration { return 0 }, nil) defer v2c.close() t.Log("Started xds v2Client...") callbackCh := testutils.NewChannel() v2c.watchEDS(goodRouteName1, func(u *EDSUpdate, err error) { t.Logf("Received callback with edsUpdate {%+v} and error {%v}", u, err) if u != nil { callbackCh.Send(fmt.Errorf("received EDSUpdate %v in edsCallback, wanted nil", u)) } if err == nil { callbackCh.Send(errors.New("received nil error in edsCallback")) } callbackCh.Send(nil) }) // Wait till the request makes it to the fakeServer. This ensures that // the watch request has been processed by the v2Client. if _, err := fakeServer.XDSRequestChan.Receive(); err != nil { t.Fatalf("Timeout expired when expecting an CDS request") } waitForNilErr(t, callbackCh) } grpc-go-1.29.1/xds/internal/client/eds_testutil.go000066400000000000000000000076401365033716300221070ustar00rootroot00000000000000/* * * Copyright 2019 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // All structs/functions in this file should be unexported. They are used in EDS // balancer tests now, to generate test inputs. Eventually, EDS balancer tests // should generate EDSUpdate directly, instead of generating and parsing the // proto message. // TODO: unexported everything in this file. package client import ( "fmt" "net" "strconv" xdspb "github.com/envoyproxy/go-control-plane/envoy/api/v2" corepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" endpointpb "github.com/envoyproxy/go-control-plane/envoy/api/v2/endpoint" typepb "github.com/envoyproxy/go-control-plane/envoy/type" wrapperspb "github.com/golang/protobuf/ptypes/wrappers" ) // ClusterLoadAssignmentBuilder builds a ClusterLoadAssignment, aka EDS // response. type ClusterLoadAssignmentBuilder struct { v *xdspb.ClusterLoadAssignment } // NewClusterLoadAssignmentBuilder creates a ClusterLoadAssignmentBuilder. func NewClusterLoadAssignmentBuilder(clusterName string, dropPercents []uint32) *ClusterLoadAssignmentBuilder { var drops []*xdspb.ClusterLoadAssignment_Policy_DropOverload for i, d := range dropPercents { drops = append(drops, &xdspb.ClusterLoadAssignment_Policy_DropOverload{ Category: fmt.Sprintf("test-drop-%d", i), DropPercentage: &typepb.FractionalPercent{ Numerator: d, Denominator: typepb.FractionalPercent_HUNDRED, }, }) } return &ClusterLoadAssignmentBuilder{ v: &xdspb.ClusterLoadAssignment{ ClusterName: clusterName, Policy: &xdspb.ClusterLoadAssignment_Policy{ DropOverloads: drops, }, }, } } // AddLocalityOptions contains options when adding locality to the builder. type AddLocalityOptions struct { Health []corepb.HealthStatus Weight []uint32 } // AddLocality adds a locality to the builder. func (clab *ClusterLoadAssignmentBuilder) AddLocality(subzone string, weight uint32, priority uint32, addrsWithPort []string, opts *AddLocalityOptions) { var lbEndPoints []*endpointpb.LbEndpoint for i, a := range addrsWithPort { host, portStr, err := net.SplitHostPort(a) if err != nil { panic("failed to split " + a) } port, err := strconv.Atoi(portStr) if err != nil { panic("failed to atoi " + portStr) } lbe := &endpointpb.LbEndpoint{ HostIdentifier: &endpointpb.LbEndpoint_Endpoint{ Endpoint: &endpointpb.Endpoint{ Address: &corepb.Address{ Address: &corepb.Address_SocketAddress{ SocketAddress: &corepb.SocketAddress{ Protocol: corepb.SocketAddress_TCP, Address: host, PortSpecifier: &corepb.SocketAddress_PortValue{ PortValue: uint32(port)}}}}}}, } if opts != nil { if i < len(opts.Health) { lbe.HealthStatus = opts.Health[i] } if i < len(opts.Weight) { lbe.LoadBalancingWeight = &wrapperspb.UInt32Value{Value: opts.Weight[i]} } } lbEndPoints = append(lbEndPoints, lbe) } var localityID *corepb.Locality if subzone != "" { localityID = &corepb.Locality{ Region: "", Zone: "", SubZone: subzone, } } clab.v.Endpoints = append(clab.v.Endpoints, &endpointpb.LocalityLbEndpoints{ Locality: localityID, LbEndpoints: lbEndPoints, LoadBalancingWeight: &wrapperspb.UInt32Value{Value: weight}, Priority: priority, }) } // Build builds ClusterLoadAssignment. func (clab *ClusterLoadAssignmentBuilder) Build() *xdspb.ClusterLoadAssignment { return clab.v } grpc-go-1.29.1/xds/internal/client/lds.go000066400000000000000000000076231365033716300201620ustar00rootroot00000000000000/* * * Copyright 2019 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package client import ( "fmt" xdspb "github.com/envoyproxy/go-control-plane/envoy/api/v2" httppb "github.com/envoyproxy/go-control-plane/envoy/config/filter/network/http_connection_manager/v2" "github.com/golang/protobuf/ptypes" ) // handleLDSResponse processes an LDS response received from the xDS server. On // receipt of a good response, it also invokes the registered watcher callback. func (v2c *v2Client) handleLDSResponse(resp *xdspb.DiscoveryResponse) error { v2c.mu.Lock() defer v2c.mu.Unlock() wi := v2c.watchMap[ldsURL] if wi == nil { return fmt.Errorf("xds: no LDS watcher found when handling LDS response: %+v", resp) } routeName := "" for _, r := range resp.GetResources() { var resource ptypes.DynamicAny if err := ptypes.UnmarshalAny(r, &resource); err != nil { return fmt.Errorf("xds: failed to unmarshal resource in LDS response: %v", err) } lis, ok := resource.Message.(*xdspb.Listener) if !ok { return fmt.Errorf("xds: unexpected resource type: %T in LDS response", resource.Message) } v2c.logger.Infof("Resource with name: %v, type: %T, contains: %v", lis.GetName(), lis, lis) if lis.GetName() != wi.target[0] { // We ignore listeners we are not watching for because LDS is // special in the sense that there is only one resource we are // interested in, and this resource does not change over the // lifetime of the v2Client. So, we don't have to cache other // listeners which we are not interested in. continue } var err error routeName, err = v2c.getRouteConfigNameFromListener(lis) if err != nil { return err } } var err error if routeName == "" { err = fmt.Errorf("xds: LDS target %s not found in received response %+v", wi.target, resp) } wi.stopTimer() wi.ldsCallback(ldsUpdate{routeName: routeName}, err) return nil } // getRouteConfigNameFromListener checks if the provided Listener proto meets // the expected criteria. If so, it returns a non-empty routeConfigName. func (v2c *v2Client) getRouteConfigNameFromListener(lis *xdspb.Listener) (string, error) { if lis.GetApiListener() == nil { return "", fmt.Errorf("xds: no api_listener field in LDS response %+v", lis) } var apiAny ptypes.DynamicAny if err := ptypes.UnmarshalAny(lis.GetApiListener().GetApiListener(), &apiAny); err != nil { return "", fmt.Errorf("xds: failed to unmarshal api_listner in LDS response: %v", err) } apiLis, ok := apiAny.Message.(*httppb.HttpConnectionManager) if !ok { return "", fmt.Errorf("xds: unexpected api_listener type: %T in LDS response", apiAny.Message) } v2c.logger.Infof("Resource with type %T, contains %v", apiLis, apiLis) switch apiLis.RouteSpecifier.(type) { case *httppb.HttpConnectionManager_Rds: name := apiLis.GetRds().GetRouteConfigName() if name == "" { return "", fmt.Errorf("xds: empty route_config_name in LDS response: %+v", lis) } return name, nil case *httppb.HttpConnectionManager_RouteConfig: // TODO: Add support for specifying the RouteConfiguration inline // in the LDS response. return "", fmt.Errorf("xds: LDS response contains RDS config inline. Not supported for now: %+v", apiLis) case nil: return "", fmt.Errorf("xds: no RouteSpecifier in received LDS response: %+v", apiLis) default: return "", fmt.Errorf("xds: unsupported type %T for RouteSpecifier in received LDS response", apiLis.RouteSpecifier) } } grpc-go-1.29.1/xds/internal/client/lds_test.go000066400000000000000000000161641365033716300212210ustar00rootroot00000000000000/* * * Copyright 2019 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package client import ( "errors" "fmt" "testing" "time" xdspb "github.com/envoyproxy/go-control-plane/envoy/api/v2" "google.golang.org/grpc/xds/internal/testutils" ) func (s) TestLDSGetRouteConfig(t *testing.T) { tests := []struct { name string lis *xdspb.Listener wantRoute string wantErr bool }{ { name: "no-apiListener-field", lis: &xdspb.Listener{}, wantRoute: "", wantErr: true, }, { name: "badly-marshaled-apiListener", lis: badAPIListener1, wantRoute: "", wantErr: true, }, { name: "wrong-type-in-apiListener", lis: badResourceListener, wantRoute: "", wantErr: true, }, { name: "empty-httpConnMgr-in-apiListener", lis: listenerWithEmptyHTTPConnMgr, wantRoute: "", wantErr: true, }, { name: "scopedRoutes-routeConfig-in-apiListener", lis: listenerWithScopedRoutesRouteConfig, wantRoute: "", wantErr: true, }, { name: "goodListener1", lis: goodListener1, wantRoute: goodRouteName1, wantErr: false, }, } _, cc, cleanup := startServerAndGetCC(t) defer cleanup() v2c := newV2Client(cc, goodNodeProto, func(int) time.Duration { return 0 }, nil) defer v2c.close() for _, test := range tests { t.Run(test.name, func(t *testing.T) { gotRoute, err := v2c.getRouteConfigNameFromListener(test.lis) if gotRoute != test.wantRoute { t.Errorf("getRouteConfigNameFromListener(%+v) = %v, want %v", test.lis, gotRoute, test.wantRoute) } if (err != nil) != test.wantErr { t.Errorf("getRouteConfigNameFromListener(%+v) = %v, want %v", test.lis, err, test.wantErr) } }) } } // TestLDSHandleResponse starts a fake xDS server, makes a ClientConn to it, // and creates a v2Client using it. Then, it registers a watchLDS and tests // different LDS responses. func (s) TestLDSHandleResponse(t *testing.T) { fakeServer, cc, cleanup := startServerAndGetCC(t) defer cleanup() v2c := newV2Client(cc, goodNodeProto, func(int) time.Duration { return 0 }, nil) defer v2c.close() tests := []struct { name string ldsResponse *xdspb.DiscoveryResponse wantErr bool wantUpdate *ldsUpdate wantUpdateErr bool }{ // Badly marshaled LDS response. { name: "badly-marshaled-response", ldsResponse: badlyMarshaledLDSResponse, wantErr: true, wantUpdate: nil, wantUpdateErr: false, }, // Response does not contain Listener proto. { name: "no-listener-proto-in-response", ldsResponse: badResourceTypeInLDSResponse, wantErr: true, wantUpdate: nil, wantUpdateErr: false, }, // No APIListener in the response. Just one test case here for a bad // ApiListener, since the others are covered in // TestGetRouteConfigNameFromListener. { name: "no-apiListener-in-response", ldsResponse: noAPIListenerLDSResponse, wantErr: true, wantUpdate: nil, wantUpdateErr: false, }, // Response contains one listener and it is good. { name: "one-good-listener", ldsResponse: goodLDSResponse1, wantErr: false, wantUpdate: &ldsUpdate{routeName: goodRouteName1}, wantUpdateErr: false, }, // Response contains multiple good listeners, including the one we are // interested in. { name: "multiple-good-listener", ldsResponse: ldsResponseWithMultipleResources, wantErr: false, wantUpdate: &ldsUpdate{routeName: goodRouteName1}, wantUpdateErr: false, }, // Response contains two good listeners (one interesting and one // uninteresting), and one badly marshaled listener. { name: "good-bad-ugly-listeners", ldsResponse: goodBadUglyLDSResponse, wantErr: false, wantUpdate: &ldsUpdate{routeName: goodRouteName1}, wantUpdateErr: false, }, // Response contains one listener, but we are not interested in it. { name: "one-uninteresting-listener", ldsResponse: goodLDSResponse2, wantErr: false, wantUpdate: &ldsUpdate{routeName: ""}, wantUpdateErr: true, }, // Response constains no resources. This is the case where the server // does not know about the target we are interested in. { name: "empty-response", ldsResponse: emptyLDSResponse, wantErr: false, wantUpdate: &ldsUpdate{routeName: ""}, wantUpdateErr: true, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { testWatchHandle(t, &watchHandleTestcase{ responseToHandle: test.ldsResponse, wantHandleErr: test.wantErr, wantUpdate: test.wantUpdate, wantUpdateErr: test.wantUpdateErr, ldsWatch: v2c.watchLDS, watchReqChan: fakeServer.XDSRequestChan, handleXDSResp: v2c.handleLDSResponse, }) }) } } // TestLDSHandleResponseWithoutWatch tests the case where the v2Client receives // an LDS response without a registered watcher. func (s) TestLDSHandleResponseWithoutWatch(t *testing.T) { _, cc, cleanup := startServerAndGetCC(t) defer cleanup() v2c := newV2Client(cc, goodNodeProto, func(int) time.Duration { return 0 }, nil) defer v2c.close() if v2c.handleLDSResponse(goodLDSResponse1) == nil { t.Fatal("v2c.handleLDSResponse() succeeded, should have failed") } } // TestLDSWatchExpiryTimer tests the case where the client does not receive an // LDS response for the request that it sends out. We want the watch callback // to be invoked with an error once the watchExpiryTimer fires. func (s) TestLDSWatchExpiryTimer(t *testing.T) { oldWatchExpiryTimeout := defaultWatchExpiryTimeout defaultWatchExpiryTimeout = 500 * time.Millisecond defer func() { defaultWatchExpiryTimeout = oldWatchExpiryTimeout }() fakeServer, cc, cleanup := startServerAndGetCC(t) defer cleanup() v2c := newV2Client(cc, goodNodeProto, func(int) time.Duration { return 0 }, nil) defer v2c.close() callbackCh := testutils.NewChannel() v2c.watchLDS(goodLDSTarget1, func(u ldsUpdate, err error) { t.Logf("in v2c.watchLDS callback, ldsUpdate: %+v, err: %v", u, err) if u.routeName != "" { callbackCh.Send(fmt.Errorf("received routeName %v in ldsCallback, wanted empty string", u.routeName)) } if err == nil { callbackCh.Send(errors.New("received nil error in ldsCallback")) } callbackCh.Send(nil) }) // Wait till the request makes it to the fakeServer. This ensures that // the watch request has been processed by the v2Client. if _, err := fakeServer.XDSRequestChan.Receive(); err != nil { t.Fatalf("Timeout expired when expecting an LDS request") } waitForNilErr(t, callbackCh) } grpc-go-1.29.1/xds/internal/client/logging.go000066400000000000000000000013521365033716300210170ustar00rootroot00000000000000/* * * Copyright 2020 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package client import ( "fmt" ) const prefix = "[xds-client %p] " func loggingPrefix(p *Client) string { return fmt.Sprintf(prefix, p) } grpc-go-1.29.1/xds/internal/client/rds.go000066400000000000000000000172511365033716300201660ustar00rootroot00000000000000/* * * Copyright 2019 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package client import ( "fmt" "strings" xdspb "github.com/envoyproxy/go-control-plane/envoy/api/v2" routepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/route" "github.com/golang/protobuf/ptypes" ) // handleRDSResponse processes an RDS response received from the xDS server. On // receipt of a good response, it caches validated resources and also invokes // the registered watcher callback. func (v2c *v2Client) handleRDSResponse(resp *xdspb.DiscoveryResponse) error { v2c.mu.Lock() defer v2c.mu.Unlock() if v2c.watchMap[ldsURL] == nil { return fmt.Errorf("xds: unexpected RDS response when no LDS watcher is registered: %+v", resp) } target := v2c.watchMap[ldsURL].target[0] wi := v2c.watchMap[rdsURL] if wi == nil { return fmt.Errorf("xds: no RDS watcher found when handling RDS response: %+v", resp) } returnCluster := "" localCache := make(map[string]string) for _, r := range resp.GetResources() { var resource ptypes.DynamicAny if err := ptypes.UnmarshalAny(r, &resource); err != nil { return fmt.Errorf("xds: failed to unmarshal resource in RDS response: %v", err) } rc, ok := resource.Message.(*xdspb.RouteConfiguration) if !ok { return fmt.Errorf("xds: unexpected resource type: %T in RDS response", resource.Message) } v2c.logger.Infof("Resource with name: %v, type: %T, contains: %v", rc.GetName(), rc, rc) cluster := getClusterFromRouteConfiguration(rc, target) if cluster == "" { return fmt.Errorf("xds: received invalid RouteConfiguration in RDS response: %+v", rc) } // If we get here, it means that this resource was a good one. localCache[rc.GetName()] = cluster v2c.logger.Debugf("Resource with name %v, type %T, value %+v added to cache", rc.GetName(), cluster, cluster) // TODO: remove cache, and only process resources that are interesting. if rc.GetName() == wi.target[0] { returnCluster = cluster } } // Update the cache in the v2Client only after we have confirmed that all // resources in the received response were good. for k, v := range localCache { // TODO: Need to handle deletion of entries from the cache based on LDS // watch calls. Not handling it does not affect correctness, but leads // to unnecessary memory consumption. v2c.rdsCache[k] = v } if returnCluster != "" { // We stop the expiry timer and invoke the callback only when we have // received the resource that we are watching for. Since RDS is an // incremental protocol, the fact that we did not receive the resource // that we are watching for in this response does not mean that the // server does not know about it. wi.stopTimer() wi.rdsCallback(rdsUpdate{clusterName: returnCluster}, nil) } return nil } // getClusterFromRouteConfiguration checks if the provided RouteConfiguration // meets the expected criteria. If so, it returns a non-empty clusterName. // // A RouteConfiguration resource is considered valid when only if it contains a // VirtualHost whose domain field matches the server name from the URI passed // to the gRPC channel, and it contains a clusterName. // // The RouteConfiguration includes a list of VirtualHosts, which may have zero // or more elements. We are interested in the element whose domains field // matches the server name specified in the "xds:" URI. The only field in the // VirtualHost proto that the we are interested in is the list of routes. We // only look at the last route in the list (the default route), whose match // field must be empty and whose route field must be set. Inside that route // message, the cluster field will contain the clusterName we are looking for. func getClusterFromRouteConfiguration(rc *xdspb.RouteConfiguration, host string) string { // TODO: return error for better error logging and nack. // // Currently this returns "" on error, and the caller will return an error. // But the error doesn't contain details of why the response is invalid // (mismatch domain or empty route). // // For logging purposes, we can log in line. But if we want to populate // error details for nack, a detailed error needs to be returned. vh := findBestMatchingVirtualHost(host, rc.GetVirtualHosts()) if vh == nil { // No matching virtual host found. return "" } if len(vh.Routes) == 0 { // The matched virtual host has no routes, this is invalid because there // should be at least one default route. return "" } dr := vh.Routes[len(vh.Routes)-1] if match := dr.GetMatch(); match == nil || match.GetPrefix() != "" { // The matched virtual host is invalid. return "" } if route := dr.GetRoute(); route != nil { return route.GetCluster() } return "" } type domainMatchType int const ( domainMatchTypeInvalid domainMatchType = iota domainMatchTypeUniversal domainMatchTypePrefix domainMatchTypeSuffix domainMatchTypeExact ) // Exact > Suffix > Prefix > Universal > Invalid. func (t domainMatchType) betterThan(b domainMatchType) bool { return t > b } func matchTypeForDomain(d string) domainMatchType { if d == "" { return domainMatchTypeInvalid } if d == "*" { return domainMatchTypeUniversal } if strings.HasPrefix(d, "*") { return domainMatchTypeSuffix } if strings.HasSuffix(d, "*") { return domainMatchTypePrefix } if strings.Contains(d, "*") { return domainMatchTypeInvalid } return domainMatchTypeExact } func match(domain, host string) (domainMatchType, bool) { switch typ := matchTypeForDomain(domain); typ { case domainMatchTypeInvalid: return typ, false case domainMatchTypeUniversal: return typ, true case domainMatchTypePrefix: // abc.* return typ, strings.HasPrefix(host, strings.TrimSuffix(domain, "*")) case domainMatchTypeSuffix: // *.123 return typ, strings.HasSuffix(host, strings.TrimPrefix(domain, "*")) case domainMatchTypeExact: return typ, domain == host default: return domainMatchTypeInvalid, false } } // findBestMatchingVirtualHost returns the virtual host whose domains field best // matches host // // The domains field support 4 different matching pattern types: // - Exact match // - Suffix match (e.g. “*ABC”) // - Prefix match (e.g. “ABC*) // - Universal match (e.g. “*”) // // The best match is defined as: // - A match is better if it’s matching pattern type is better // - Exact match > suffix match > prefix match > universal match // - If two matches are of the same pattern type, the longer match is better // - This is to compare the length of the matching pattern, e.g. “*ABCDE” > // “*ABC” func findBestMatchingVirtualHost(host string, vHosts []*routepb.VirtualHost) *routepb.VirtualHost { var ( matchVh *routepb.VirtualHost matchType = domainMatchTypeInvalid matchLen int ) for _, vh := range vHosts { for _, domain := range vh.GetDomains() { typ, matched := match(domain, host) if typ == domainMatchTypeInvalid { // The rds response is invalid. return nil } if matchType.betterThan(typ) || matchType == typ && matchLen >= len(domain) || !matched { // The previous match has better type, or the previous match has // better length, or this domain isn't a match. continue } matchVh = vh matchType = typ matchLen = len(domain) } } return matchVh } grpc-go-1.29.1/xds/internal/client/rds_test.go000066400000000000000000000446171365033716300212330ustar00rootroot00000000000000/* * * Copyright 2019 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package client import ( "errors" "fmt" "testing" "time" discoverypb "github.com/envoyproxy/go-control-plane/envoy/api/v2" xdspb "github.com/envoyproxy/go-control-plane/envoy/api/v2" routepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/route" "github.com/golang/protobuf/proto" "github.com/google/go-cmp/cmp" "google.golang.org/grpc/xds/internal/testutils" "google.golang.org/grpc/xds/internal/testutils/fakeserver" ) func (v2c *v2Client) cloneRDSCacheForTesting() map[string]string { v2c.mu.Lock() defer v2c.mu.Unlock() cloneCache := make(map[string]string) for k, v := range v2c.rdsCache { cloneCache[k] = v } return cloneCache } func (s) TestRDSGetClusterFromRouteConfiguration(t *testing.T) { tests := []struct { name string rc *xdspb.RouteConfiguration wantCluster string }{ { name: "no-virtual-hosts-in-rc", rc: emptyRouteConfig, wantCluster: "", }, { name: "no-domains-in-rc", rc: noDomainsInRouteConfig, wantCluster: "", }, { name: "non-matching-domain-in-rc", rc: &xdspb.RouteConfiguration{ VirtualHosts: []*routepb.VirtualHost{ {Domains: []string{uninterestingDomain}}, }, }, wantCluster: "", }, { name: "no-routes-in-rc", rc: &xdspb.RouteConfiguration{ VirtualHosts: []*routepb.VirtualHost{ {Domains: []string{goodLDSTarget1}}, }, }, wantCluster: "", }, { name: "default-route-match-field-is-nil", rc: &xdspb.RouteConfiguration{ VirtualHosts: []*routepb.VirtualHost{ { Domains: []string{goodLDSTarget1}, Routes: []*routepb.Route{ { Action: &routepb.Route_Route{ Route: &routepb.RouteAction{ ClusterSpecifier: &routepb.RouteAction_Cluster{Cluster: goodClusterName1}, }, }, }, }, }, }, }, wantCluster: "", }, { name: "default-route-match-field-is-non-nil", rc: &xdspb.RouteConfiguration{ VirtualHosts: []*routepb.VirtualHost{ { Domains: []string{goodLDSTarget1}, Routes: []*routepb.Route{ { Match: &routepb.RouteMatch{}, Action: &routepb.Route_Route{}, }, }, }, }, }, wantCluster: "", }, { name: "default-route-routeaction-field-is-nil", rc: &xdspb.RouteConfiguration{ VirtualHosts: []*routepb.VirtualHost{ { Domains: []string{goodLDSTarget1}, Routes: []*routepb.Route{{}}, }, }, }, wantCluster: "", }, { name: "default-route-cluster-field-is-empty", rc: &xdspb.RouteConfiguration{ VirtualHosts: []*routepb.VirtualHost{ { Domains: []string{goodLDSTarget1}, Routes: []*routepb.Route{ { Action: &routepb.Route_Route{ Route: &routepb.RouteAction{ ClusterSpecifier: &routepb.RouteAction_ClusterHeader{}, }, }, }, }, }, }, }, wantCluster: "", }, { name: "good-route-config", rc: goodRouteConfig1, wantCluster: goodClusterName1, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { if gotCluster := getClusterFromRouteConfiguration(test.rc, goodLDSTarget1); gotCluster != test.wantCluster { t.Errorf("getClusterFromRouteConfiguration(%+v, %v) = %v, want %v", test.rc, goodLDSTarget1, gotCluster, test.wantCluster) } }) } } // doLDS makes a LDS watch, and waits for the response and ack to finish. // // This is called by RDS tests to start LDS first, because LDS is a // pre-requirement for RDS, and RDS handle would fail without an existing LDS // watch. func doLDS(t *testing.T, v2c *v2Client, fakeServer *fakeserver.Server) { // Register an LDS watcher, and wait till the request is sent out, the // response is received and the callback is invoked. cbCh := testutils.NewChannel() v2c.watchLDS(goodLDSTarget1, func(u ldsUpdate, err error) { t.Logf("v2c.watchLDS callback, ldsUpdate: %+v, err: %v", u, err) cbCh.Send(err) }) if _, err := fakeServer.XDSRequestChan.Receive(); err != nil { t.Fatalf("Timeout waiting for LDS request: %v", err) } fakeServer.XDSResponseChan <- &fakeserver.Response{Resp: goodLDSResponse1} waitForNilErr(t, cbCh) // Read the LDS ack, to clear RequestChan for following tests. if _, err := fakeServer.XDSRequestChan.Receive(); err != nil { t.Fatalf("Timeout waiting for LDS ACK: %v", err) } } // TestRDSHandleResponse starts a fake xDS server, makes a ClientConn to it, // and creates a v2Client using it. Then, it registers an LDS and RDS watcher // and tests different RDS responses. func (s) TestRDSHandleResponse(t *testing.T) { fakeServer, cc, cleanup := startServerAndGetCC(t) defer cleanup() v2c := newV2Client(cc, goodNodeProto, func(int) time.Duration { return 0 }, nil) defer v2c.close() doLDS(t, v2c, fakeServer) tests := []struct { name string rdsResponse *xdspb.DiscoveryResponse wantErr bool wantUpdate *rdsUpdate wantUpdateErr bool }{ // Badly marshaled RDS response. { name: "badly-marshaled-response", rdsResponse: badlyMarshaledRDSResponse, wantErr: true, wantUpdate: nil, wantUpdateErr: false, }, // Response does not contain RouteConfiguration proto. { name: "no-route-config-in-response", rdsResponse: badResourceTypeInRDSResponse, wantErr: true, wantUpdate: nil, wantUpdateErr: false, }, // No VirtualHosts in the response. Just one test case here for a bad // RouteConfiguration, since the others are covered in // TestGetClusterFromRouteConfiguration. { name: "no-virtual-hosts-in-response", rdsResponse: noVirtualHostsInRDSResponse, wantErr: true, wantUpdate: nil, wantUpdateErr: false, }, // Response contains one good RouteConfiguration, uninteresting though. { name: "one-uninteresting-route-config", rdsResponse: goodRDSResponse2, wantErr: false, wantUpdate: nil, wantUpdateErr: false, }, // Response contains one good interesting RouteConfiguration. { name: "one-good-route-config", rdsResponse: goodRDSResponse1, wantErr: false, wantUpdate: &rdsUpdate{clusterName: goodClusterName1}, wantUpdateErr: false, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { testWatchHandle(t, &watchHandleTestcase{ responseToHandle: test.rdsResponse, wantHandleErr: test.wantErr, wantUpdate: test.wantUpdate, wantUpdateErr: test.wantUpdateErr, rdsWatch: v2c.watchRDS, watchReqChan: fakeServer.XDSRequestChan, handleXDSResp: v2c.handleRDSResponse, }) }) } } // TestRDSHandleResponseWithoutLDSWatch tests the case where the v2Client // receives an RDS response without a registered LDS watcher. func (s) TestRDSHandleResponseWithoutLDSWatch(t *testing.T) { _, cc, cleanup := startServerAndGetCC(t) defer cleanup() v2c := newV2Client(cc, goodNodeProto, func(int) time.Duration { return 0 }, nil) defer v2c.close() if v2c.handleRDSResponse(goodRDSResponse1) == nil { t.Fatal("v2c.handleRDSResponse() succeeded, should have failed") } } // TestRDSHandleResponseWithoutRDSWatch tests the case where the v2Client // receives an RDS response without a registered RDS watcher. func (s) TestRDSHandleResponseWithoutRDSWatch(t *testing.T) { fakeServer, cc, cleanup := startServerAndGetCC(t) defer cleanup() v2c := newV2Client(cc, goodNodeProto, func(int) time.Duration { return 0 }, nil) defer v2c.close() doLDS(t, v2c, fakeServer) if v2c.handleRDSResponse(goodRDSResponse1) == nil { t.Fatal("v2c.handleRDSResponse() succeeded, should have failed") } } // rdsTestOp contains all data related to one particular test operation. Not // all fields make sense for all tests. type rdsTestOp struct { // target is the resource name to watch for. target string // responseToSend is the xDS response sent to the client responseToSend *fakeserver.Response // wantOpErr specfies whether the main operation should return an error. wantOpErr bool // wantRDSCache is the expected rdsCache at the end of an operation. wantRDSCache map[string]string // wantWatchCallback specifies if the watch callback should be invoked. wantWatchCallback bool } // testRDSCaching is a helper function which starts a fake xDS server, makes a // ClientConn to it, creates a v2Client using it, registers an LDS watcher and // pushes a good LDS response. It then reads a bunch of test operations to be // performed from rdsTestOps and returns error, if any, on the provided error // channel. This is executed in a separate goroutine. func testRDSCaching(t *testing.T, rdsTestOps []rdsTestOp, errCh *testutils.Channel) { t.Helper() fakeServer, cc, cleanup := startServerAndGetCC(t) defer cleanup() v2c := newV2Client(cc, goodNodeProto, func(int) time.Duration { return 0 }, nil) defer v2c.close() t.Log("Started xds v2Client...") doLDS(t, v2c, fakeServer) callbackCh := make(chan struct{}, 1) for _, rdsTestOp := range rdsTestOps { // Register a watcher if required, and use a channel to signal the // successful invocation of the callback. if rdsTestOp.target != "" { v2c.watchRDS(rdsTestOp.target, func(u rdsUpdate, err error) { t.Logf("Received callback with rdsUpdate {%+v} and error {%v}", u, err) callbackCh <- struct{}{} }) t.Logf("Registered a watcher for RDS target: %v...", rdsTestOp.target) // Wait till the request makes it to the fakeServer. This ensures that // the watch request has been processed by the v2Client. if _, err := fakeServer.XDSRequestChan.Receive(); err != nil { errCh.Send(fmt.Errorf("Timeout waiting for RDS request: %v", err)) } t.Log("FakeServer received request...") } // Directly push the response through a call to handleRDSResponse, // thereby bypassing the fakeServer. if rdsTestOp.responseToSend != nil { resp := rdsTestOp.responseToSend.Resp.(*discoverypb.DiscoveryResponse) if err := v2c.handleRDSResponse(resp); (err != nil) != rdsTestOp.wantOpErr { errCh.Send(fmt.Errorf("v2c.handleRDSResponse(%+v) returned err: %v", resp, err)) return } } // If the test needs the callback to be invoked, just verify that // it was invoked. Since we verify the contents of the cache, it's // ok not to verify the contents of the callback. if rdsTestOp.wantWatchCallback { <-callbackCh } if !cmp.Equal(v2c.cloneRDSCacheForTesting(), rdsTestOp.wantRDSCache) { errCh.Send(fmt.Errorf("gotRDSCache: %v, wantRDSCache: %v", v2c.rdsCache, rdsTestOp.wantRDSCache)) return } } t.Log("Completed all test ops successfully...") errCh.Send(nil) } // TestRDSCaching tests some end-to-end RDS flows using a fake xDS server, and // verifies the RDS data cached at the v2Client. func (s) TestRDSCaching(t *testing.T) { ops := []rdsTestOp{ // Add an RDS watch for a resource name (goodRouteName1), which returns one // matching resource in the response. { target: goodRouteName1, responseToSend: &fakeserver.Response{Resp: goodRDSResponse1}, wantRDSCache: map[string]string{goodRouteName1: goodClusterName1}, wantWatchCallback: true, }, // Push an RDS response with a new resource. This resource is considered // good because its domain field matches our LDS watch target, but the // routeConfigName does not match our RDS watch (so the watch callback will // not be invoked). But this should still be cached. { responseToSend: &fakeserver.Response{Resp: goodRDSResponse2}, wantRDSCache: map[string]string{ goodRouteName1: goodClusterName1, goodRouteName2: goodClusterName2, }, }, // Push an uninteresting RDS response. This should cause handleRDSResponse // to return an error. But the watch callback should not be invoked, and // the cache should not be updated. { responseToSend: &fakeserver.Response{Resp: uninterestingRDSResponse}, wantOpErr: true, wantRDSCache: map[string]string{ goodRouteName1: goodClusterName1, goodRouteName2: goodClusterName2, }, }, // Switch the watch target to goodRouteName2, which was already cached. No // response is received from the server (as expected), but we want the // callback to be invoked with the new clusterName. { target: goodRouteName2, wantRDSCache: map[string]string{ goodRouteName1: goodClusterName1, goodRouteName2: goodClusterName2, }, wantWatchCallback: true, }, } errCh := testutils.NewChannel() go testRDSCaching(t, ops, errCh) waitForNilErr(t, errCh) } // TestRDSWatchExpiryTimer tests the case where the client does not receive an // RDS response for the request that it sends out. We want the watch callback // to be invoked with an error once the watchExpiryTimer fires. func (s) TestRDSWatchExpiryTimer(t *testing.T) { oldWatchExpiryTimeout := defaultWatchExpiryTimeout defaultWatchExpiryTimeout = 500 * time.Millisecond defer func() { defaultWatchExpiryTimeout = oldWatchExpiryTimeout }() fakeServer, cc, cleanup := startServerAndGetCC(t) defer cleanup() v2c := newV2Client(cc, goodNodeProto, func(int) time.Duration { return 0 }, nil) defer v2c.close() t.Log("Started xds v2Client...") doLDS(t, v2c, fakeServer) callbackCh := testutils.NewChannel() v2c.watchRDS(goodRouteName1, func(u rdsUpdate, err error) { t.Logf("Received callback with rdsUpdate {%+v} and error {%v}", u, err) if u.clusterName != "" { callbackCh.Send(fmt.Errorf("received clusterName %v in rdsCallback, wanted empty string", u.clusterName)) } if err == nil { callbackCh.Send(errors.New("received nil error in rdsCallback")) } callbackCh.Send(nil) }) // Wait till the request makes it to the fakeServer. This ensures that // the watch request has been processed by the v2Client. if _, err := fakeServer.XDSRequestChan.Receive(); err != nil { t.Fatalf("Timeout expired when expecting an RDS request") } waitForNilErr(t, callbackCh) } func TestMatchTypeForDomain(t *testing.T) { tests := []struct { d string want domainMatchType }{ {d: "", want: domainMatchTypeInvalid}, {d: "*", want: domainMatchTypeUniversal}, {d: "bar.*", want: domainMatchTypePrefix}, {d: "*.abc.com", want: domainMatchTypeSuffix}, {d: "foo.bar.com", want: domainMatchTypeExact}, {d: "foo.*.com", want: domainMatchTypeInvalid}, } for _, tt := range tests { if got := matchTypeForDomain(tt.d); got != tt.want { t.Errorf("matchTypeForDomain(%q) = %v, want %v", tt.d, got, tt.want) } } } func TestMatch(t *testing.T) { tests := []struct { name string domain string host string wantTyp domainMatchType wantMatched bool }{ {name: "invalid-empty", domain: "", host: "", wantTyp: domainMatchTypeInvalid, wantMatched: false}, {name: "invalid", domain: "a.*.b", host: "", wantTyp: domainMatchTypeInvalid, wantMatched: false}, {name: "universal", domain: "*", host: "abc.com", wantTyp: domainMatchTypeUniversal, wantMatched: true}, {name: "prefix-match", domain: "abc.*", host: "abc.123", wantTyp: domainMatchTypePrefix, wantMatched: true}, {name: "prefix-no-match", domain: "abc.*", host: "abcd.123", wantTyp: domainMatchTypePrefix, wantMatched: false}, {name: "suffix-match", domain: "*.123", host: "abc.123", wantTyp: domainMatchTypeSuffix, wantMatched: true}, {name: "suffix-no-match", domain: "*.123", host: "abc.1234", wantTyp: domainMatchTypeSuffix, wantMatched: false}, {name: "exact-match", domain: "foo.bar", host: "foo.bar", wantTyp: domainMatchTypeExact, wantMatched: true}, {name: "exact-no-match", domain: "foo.bar.com", host: "foo.bar", wantTyp: domainMatchTypeExact, wantMatched: false}, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { if gotTyp, gotMatched := match(tt.domain, tt.host); gotTyp != tt.wantTyp || gotMatched != tt.wantMatched { t.Errorf("match() = %v, %v, want %v, %v", gotTyp, gotMatched, tt.wantTyp, tt.wantMatched) } }) } } func TestFindBestMatchingVirtualHost(t *testing.T) { var ( oneExactMatch = &routepb.VirtualHost{ Name: "one-exact-match", Domains: []string{"foo.bar.com"}, } oneSuffixMatch = &routepb.VirtualHost{ Name: "one-suffix-match", Domains: []string{"*.bar.com"}, } onePrefixMatch = &routepb.VirtualHost{ Name: "one-prefix-match", Domains: []string{"foo.bar.*"}, } oneUniversalMatch = &routepb.VirtualHost{ Name: "one-universal-match", Domains: []string{"*"}, } longExactMatch = &routepb.VirtualHost{ Name: "one-exact-match", Domains: []string{"v2.foo.bar.com"}, } multipleMatch = &routepb.VirtualHost{ Name: "multiple-match", Domains: []string{"pi.foo.bar.com", "314.*", "*.159"}, } vhs = []*routepb.VirtualHost{oneExactMatch, oneSuffixMatch, onePrefixMatch, oneUniversalMatch, longExactMatch, multipleMatch} ) tests := []struct { name string host string vHosts []*routepb.VirtualHost want *routepb.VirtualHost }{ {name: "exact-match", host: "foo.bar.com", vHosts: vhs, want: oneExactMatch}, {name: "suffix-match", host: "123.bar.com", vHosts: vhs, want: oneSuffixMatch}, {name: "prefix-match", host: "foo.bar.org", vHosts: vhs, want: onePrefixMatch}, {name: "universal-match", host: "abc.123", vHosts: vhs, want: oneUniversalMatch}, {name: "long-exact-match", host: "v2.foo.bar.com", vHosts: vhs, want: longExactMatch}, // Matches suffix "*.bar.com" and exact "pi.foo.bar.com". Takes exact. {name: "multiple-match-exact", host: "pi.foo.bar.com", vHosts: vhs, want: multipleMatch}, // Matches suffix "*.159" and prefix "foo.bar.*". Takes suffix. {name: "multiple-match-suffix", host: "foo.bar.159", vHosts: vhs, want: multipleMatch}, // Matches suffix "*.bar.com" and prefix "314.*". Takes suffix. {name: "multiple-match-prefix", host: "314.bar.com", vHosts: vhs, want: oneSuffixMatch}, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { if got := findBestMatchingVirtualHost(tt.host, tt.vHosts); !cmp.Equal(got, tt.want, cmp.Comparer(proto.Equal)) { t.Errorf("findBestMatchingVirtualHost() = %v, want %v", got, tt.want) } }) } } grpc-go-1.29.1/xds/internal/client/testutil_test.go000066400000000000000000000135001365033716300223030ustar00rootroot00000000000000/* * * Copyright 2019 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package client import ( "reflect" "testing" xdspb "github.com/envoyproxy/go-control-plane/envoy/api/v2" "github.com/google/go-cmp/cmp" "google.golang.org/grpc" "google.golang.org/grpc/xds/internal/testutils" "google.golang.org/grpc/xds/internal/testutils/fakeserver" ) type watchHandleTestcase struct { responseToHandle *xdspb.DiscoveryResponse wantHandleErr bool wantUpdate interface{} wantUpdateErr bool // Only one of the following should be non-nil. The one corresponding with // typeURL will be called. ldsWatch func(target string, ldsCb ldsCallbackFunc) (cancel func()) rdsWatch func(routeName string, rdsCb rdsCallbackFunc) (cancel func()) cdsWatch func(clusterName string, cdsCb cdsCallbackFunc) (cancel func()) edsWatch func(clusterName string, edsCb edsCallbackFunc) (cancel func()) watchReqChan *testutils.Channel // The request sent for watch will be sent to this channel. handleXDSResp func(response *xdspb.DiscoveryResponse) error } // testWatchHandle is called to test response handling for each xDS. // // It starts the xDS watch as configured in test, waits for the fake xds server // to receive the request (so watch callback is installed), and calls // handleXDSResp with responseToHandle (if it's set). It then compares the // update received by watch callback with the expected results. func testWatchHandle(t *testing.T, test *watchHandleTestcase) { type updateErr struct { u interface{} err error } gotUpdateCh := testutils.NewChannel() var cancelWatch func() // Register the watcher, this will also trigger the v2Client to send the xDS // request. switch { case test.ldsWatch != nil: cancelWatch = test.ldsWatch(goodLDSTarget1, func(u ldsUpdate, err error) { t.Logf("in v2c.watchLDS callback, ldsUpdate: %+v, err: %v", u, err) gotUpdateCh.Send(updateErr{u, err}) }) case test.rdsWatch != nil: cancelWatch = test.rdsWatch(goodRouteName1, func(u rdsUpdate, err error) { t.Logf("in v2c.watchRDS callback, rdsUpdate: %+v, err: %v", u, err) gotUpdateCh.Send(updateErr{u, err}) }) case test.cdsWatch != nil: cancelWatch = test.cdsWatch(clusterName1, func(u CDSUpdate, err error) { t.Logf("in v2c.watchCDS callback, cdsUpdate: %+v, err: %v", u, err) gotUpdateCh.Send(updateErr{u, err}) }) case test.edsWatch != nil: cancelWatch = test.edsWatch(goodEDSName, func(u *EDSUpdate, err error) { t.Logf("in v2c.watchEDS callback, edsUpdate: %+v, err: %v", u, err) gotUpdateCh.Send(updateErr{*u, err}) }) default: t.Fatalf("no watch() is set") } defer cancelWatch() // Wait till the request makes it to the fakeServer. This ensures that // the watch request has been processed by the v2Client. if _, err := test.watchReqChan.Receive(); err != nil { t.Fatalf("Timeout waiting for an xDS request: %v", err) } // Directly push the response through a call to handleXDSResp. This bypasses // the fakeServer, so it's only testing the handle logic. Client response // processing is covered elsewhere. // // Also note that this won't trigger ACK, so there's no need to clear the // request channel afterwards. if err := test.handleXDSResp(test.responseToHandle); (err != nil) != test.wantHandleErr { t.Fatalf("v2c.handleRDSResponse() returned err: %v, wantErr: %v", err, test.wantHandleErr) } // If the test doesn't expect the callback to be invoked, verify that no // update or error is pushed to the callback. // // Cannot directly compare test.wantUpdate with nil (typed vs non-typed nil: // https://golang.org/doc/faq#nil_error). if c := test.wantUpdate; c == nil || (reflect.ValueOf(c).Kind() == reflect.Ptr && reflect.ValueOf(c).IsNil()) { update, err := gotUpdateCh.Receive() if err == testutils.ErrRecvTimeout { return } t.Fatalf("Unexpected update: +%v", update) } wantUpdate := reflect.ValueOf(test.wantUpdate).Elem().Interface() uErr, err := gotUpdateCh.Receive() if err == testutils.ErrRecvTimeout { t.Fatal("Timeout expecting xDS update") } gotUpdate := uErr.(updateErr).u opt := cmp.AllowUnexported(rdsUpdate{}, ldsUpdate{}, CDSUpdate{}, EDSUpdate{}) if diff := cmp.Diff(gotUpdate, wantUpdate, opt); diff != "" { t.Fatalf("got update : %+v, want %+v, diff: %s", gotUpdate, wantUpdate, diff) } gotUpdateErr := uErr.(updateErr).err if (gotUpdateErr != nil) != test.wantUpdateErr { t.Fatalf("got xDS update error {%v}, wantErr: %v", gotUpdateErr, test.wantUpdateErr) } } // startServerAndGetCC starts a fake XDS server and also returns a ClientConn // connected to it. func startServerAndGetCC(t *testing.T) (*fakeserver.Server, *grpc.ClientConn, func()) { t.Helper() fs, sCleanup, err := fakeserver.StartServer() if err != nil { t.Fatalf("Failed to start fake xDS server: %v", err) } cc, ccCleanup, err := fs.XDSClientConn() if err != nil { sCleanup() t.Fatalf("Failed to get a clientConn to the fake xDS server: %v", err) } return fs, cc, func() { sCleanup() ccCleanup() } } // waitForNilErr waits for a nil error value to be received on the // provided channel. func waitForNilErr(t *testing.T, ch *testutils.Channel) { t.Helper() val, err := ch.Receive() if err == testutils.ErrRecvTimeout { t.Fatalf("Timeout expired when expecting update") } if val != nil { if cbErr := val.(error); cbErr != nil { t.Fatal(cbErr) } } } grpc-go-1.29.1/xds/internal/client/types.go000066400000000000000000000053401365033716300205360ustar00rootroot00000000000000/* * * Copyright 2019 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package client import ( "time" adsgrpc "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v2" ) type adsStream adsgrpc.AggregatedDiscoveryService_StreamAggregatedResourcesClient const ( ldsURL = "type.googleapis.com/envoy.api.v2.Listener" rdsURL = "type.googleapis.com/envoy.api.v2.RouteConfiguration" cdsURL = "type.googleapis.com/envoy.api.v2.Cluster" edsURL = "type.googleapis.com/envoy.api.v2.ClusterLoadAssignment" ) // watchState is an enum to represent the state of a watch call. type watchState int const ( watchEnqueued watchState = iota watchCancelled watchStarted ) // watchInfo holds all the information about a watch call. type watchInfo struct { typeURL string target []string state watchState ldsCallback ldsCallbackFunc rdsCallback rdsCallbackFunc cdsCallback cdsCallbackFunc edsCallback edsCallbackFunc expiryTimer *time.Timer } // cancel marks the state as cancelled, and also stops the expiry timer. func (wi *watchInfo) cancel() { wi.state = watchCancelled if wi.expiryTimer != nil { wi.expiryTimer.Stop() } } // stopTimer stops the expiry timer without cancelling the watch. func (wi *watchInfo) stopTimer() { if wi.expiryTimer != nil { wi.expiryTimer.Stop() } } type ackInfo struct { typeURL string version string // NACK if version is an empty string. nonce string // ACK/NACK are tagged with the stream it's for. When the stream is down, // all the ACK/NACK for this stream will be dropped, and the version/nonce // won't be updated. stream adsStream } type ldsUpdate struct { routeName string } type ldsCallbackFunc func(ldsUpdate, error) type rdsUpdate struct { clusterName string } type rdsCallbackFunc func(rdsUpdate, error) // CDSUpdate contains information from a received CDS response, which is of // interest to the registered CDS watcher. type CDSUpdate struct { // ServiceName is the service name corresponding to the clusterName which // is being watched for through CDS. ServiceName string // EnableLRS indicates whether or not load should be reported through LRS. EnableLRS bool } type cdsCallbackFunc func(CDSUpdate, error) type edsCallbackFunc func(*EDSUpdate, error) grpc-go-1.29.1/xds/internal/client/v2client.go000066400000000000000000000460671365033716300211330ustar00rootroot00000000000000/* * * Copyright 2019 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package client import ( "context" "fmt" "sync" "time" "google.golang.org/grpc" "google.golang.org/grpc/internal/buffer" "google.golang.org/grpc/internal/grpclog" xdspb "github.com/envoyproxy/go-control-plane/envoy/api/v2" corepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" adsgrpc "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v2" ) // The value chosen here is based on the default value of the // initial_fetch_timeout field in corepb.ConfigSource proto. var defaultWatchExpiryTimeout = 15 * time.Second // v2Client performs the actual xDS RPCs using the xDS v2 API. It creates a // single ADS stream on which the different types of xDS requests and responses // are multiplexed. // The reason for splitting this out from the top level xdsClient object is // because there is already an xDS v3Aplha API in development. If and when we // want to switch to that, this separation will ease that process. type v2Client struct { ctx context.Context cancelCtx context.CancelFunc // ClientConn to the xDS gRPC server. Owned by the parent xdsClient. cc *grpc.ClientConn nodeProto *corepb.Node backoff func(int) time.Duration logger *grpclog.PrefixLogger streamCh chan adsStream // sendCh in the channel onto which watchInfo objects are pushed by the // watch API, and it is read and acted upon by the send() goroutine. sendCh *buffer.Unbounded mu sync.Mutex // Message specific watch infos, protected by the above mutex. These are // written to, after successfully reading from the update channel, and are // read from when recovering from a broken stream to resend the xDS // messages. When the user of this client object cancels a watch call, // these are set to nil. All accesses to the map protected and any value // inside the map should be protected with the above mutex. watchMap map[string]*watchInfo // versionMap contains the version that was acked (the version in the ack // request that was sent on wire). The key is typeURL, the value is the // version string, becaues the versions for different resource types should // be independent. versionMap map[string]string // nonceMap contains the nonce from the most recent received response. nonceMap map[string]string // rdsCache maintains a mapping of {routeConfigName --> clusterName} from // validated route configurations received in RDS responses. We cache all // valid route configurations, whether or not we are interested in them // when we received them (because we could become interested in them in the // future and the server wont send us those resources again). // Protected by the above mutex. // // TODO: remove RDS cache. The updated spec says client can ignore // unrequested resources. // https://github.com/envoyproxy/envoy/blob/master/api/xds_protocol.rst#resource-hints rdsCache map[string]string // rdsCache maintains a mapping of {clusterName --> CDSUpdate} from // validated cluster configurations received in CDS responses. We cache all // valid cluster configurations, whether or not we are interested in them // when we received them (because we could become interested in them in the // future and the server wont send us those resources again). This is only // to support legacy management servers that do not honor the // resource_names field. As per the latest spec, the server should resend // the response when the request changes, even if it had sent the same // resource earlier (when not asked for). Protected by the above mutex. cdsCache map[string]CDSUpdate } // newV2Client creates a new v2Client initialized with the passed arguments. func newV2Client(cc *grpc.ClientConn, nodeProto *corepb.Node, backoff func(int) time.Duration, logger *grpclog.PrefixLogger) *v2Client { v2c := &v2Client{ cc: cc, nodeProto: nodeProto, backoff: backoff, logger: logger, streamCh: make(chan adsStream, 1), sendCh: buffer.NewUnbounded(), watchMap: make(map[string]*watchInfo), versionMap: make(map[string]string), nonceMap: make(map[string]string), rdsCache: make(map[string]string), cdsCache: make(map[string]CDSUpdate), } v2c.ctx, v2c.cancelCtx = context.WithCancel(context.Background()) go v2c.run() return v2c } // close cleans up resources and goroutines allocated by this client. func (v2c *v2Client) close() { v2c.cancelCtx() } // run starts an ADS stream (and backs off exponentially, if the previous // stream failed without receiving a single reply) and runs the sender and // receiver routines to send and receive data from the stream respectively. func (v2c *v2Client) run() { go v2c.send() // TODO: start a goroutine monitoring ClientConn's connectivity state, and // report error (and log) when stats is transient failure. retries := 0 for { select { case <-v2c.ctx.Done(): return default: } if retries != 0 { t := time.NewTimer(v2c.backoff(retries)) select { case <-t.C: case <-v2c.ctx.Done(): if !t.Stop() { <-t.C } return } } retries++ cli := adsgrpc.NewAggregatedDiscoveryServiceClient(v2c.cc) stream, err := cli.StreamAggregatedResources(v2c.ctx, grpc.WaitForReady(true)) if err != nil { v2c.logger.Warningf("xds: ADS stream creation failed: %v", err) continue } v2c.logger.Infof("ADS stream created") select { case <-v2c.streamCh: default: } v2c.streamCh <- stream if v2c.recv(stream) { retries = 0 } } } // sendRequest sends a request for provided typeURL and resource on the provided // stream. // // version is the ack version to be sent with the request // - If this is the new request (not an ack/nack), version will be an empty // string // - If this is an ack, version will be the version from the response // - If this is a nack, version will be the previous acked version (from // versionMap). If there was no ack before, it will be an empty string func (v2c *v2Client) sendRequest(stream adsStream, resourceNames []string, typeURL, version, nonce string) bool { req := &xdspb.DiscoveryRequest{ Node: v2c.nodeProto, TypeUrl: typeURL, ResourceNames: resourceNames, VersionInfo: version, ResponseNonce: nonce, // TODO: populate ErrorDetails for nack. } if err := stream.Send(req); err != nil { return false } v2c.logger.Debugf("ADS request sent: %v", req) return true } // sendExisting sends out xDS requests for registered watchers when recovering // from a broken stream. // // We call stream.Send() here with the lock being held. It should be OK to do // that here because the stream has just started and Send() usually returns // quickly (once it pushes the message onto the transport layer) and is only // ever blocked if we don't have enough flow control quota. func (v2c *v2Client) sendExisting(stream adsStream) bool { v2c.mu.Lock() defer v2c.mu.Unlock() // Reset the ack versions when the stream restarts. v2c.versionMap = make(map[string]string) v2c.nonceMap = make(map[string]string) for typeURL, wi := range v2c.watchMap { if !v2c.sendRequest(stream, wi.target, typeURL, "", "") { return false } } return true } // processWatchInfo pulls the fields needed by the request from a watchInfo. // // It also calls callback with cached response, and updates the watch map in // v2c. // // If the watch was already canceled, it returns false for send func (v2c *v2Client) processWatchInfo(t *watchInfo) (target []string, typeURL, version, nonce string, send bool) { v2c.mu.Lock() defer v2c.mu.Unlock() if t.state == watchCancelled { return // This returns all zero values, and false for send. } t.state = watchStarted send = true typeURL = t.typeURL target = t.target v2c.checkCacheAndUpdateWatchMap(t) // TODO: if watch is called again with the same resource names, // there's no need to send another request. // We don't reset version or nonce when a new watch is started. The version // and nonce from previous response are carried by the request unless the // stream is recreated. version = v2c.versionMap[typeURL] nonce = v2c.nonceMap[typeURL] return } // processAckInfo pulls the fields needed by the ack request from a ackInfo. // // If no active watch is found for this ack, it returns false for send. func (v2c *v2Client) processAckInfo(t *ackInfo, stream adsStream) (target []string, typeURL, version, nonce string, send bool) { if t.stream != stream { // If ACK's stream isn't the current sending stream, this means the ACK // was pushed to queue before the old stream broke, and a new stream has // been started since. Return immediately here so we don't update the // nonce for the new stream. return } typeURL = t.typeURL v2c.mu.Lock() defer v2c.mu.Unlock() // Update the nonce no matter if we are going to send the ACK request on // wire. We may not send the request if the watch is canceled. But the nonce // needs to be updated so the next request will have the right nonce. nonce = t.nonce v2c.nonceMap[typeURL] = nonce wi, ok := v2c.watchMap[typeURL] if !ok { // We don't send the request ack if there's no active watch (this can be // either the server sends responses before any request, or the watch is // canceled while the ackInfo is in queue), because there's no resource // name. And if we send a request with empty resource name list, the // server may treat it as a wild card and send us everything. return nil, "", "", "", false } send = true version = t.version if version == "" { // This is a nack, get the previous acked version. version = v2c.versionMap[typeURL] // version will still be an empty string if typeURL isn't // found in versionMap, this can happen if there wasn't any ack // before. } else { v2c.versionMap[typeURL] = version } target = wi.target return target, typeURL, version, nonce, send } // send is a separate goroutine for sending watch requests on the xds stream. // // It watches the stream channel for new streams, and the request channel for // new requests to send on the stream. // // For each new request (watchInfo), it's // - processed and added to the watch map // - so resend will pick them up when there are new streams) // - sent on the current stream if there's one // - the current stream is cleared when any send on it fails // // For each new stream, all the existing requests will be resent. // // Note that this goroutine doesn't do anything to the old stream when there's a // new one. In fact, there should be only one stream in progress, and new one // should only be created when the old one fails (recv returns an error). func (v2c *v2Client) send() { var stream adsStream for { select { case <-v2c.ctx.Done(): return case newStream := <-v2c.streamCh: stream = newStream if !v2c.sendExisting(stream) { // send failed, clear the current stream. stream = nil } case u := <-v2c.sendCh.Get(): v2c.sendCh.Load() var ( target []string typeURL, version, nonce string send bool ) switch t := u.(type) { case *watchInfo: target, typeURL, version, nonce, send = v2c.processWatchInfo(t) case *ackInfo: target, typeURL, version, nonce, send = v2c.processAckInfo(t, stream) } if !send { continue } if stream == nil { // There's no stream yet. Skip the request. This request // will be resent to the new streams. If no stream is // created, the watcher will timeout (same as server not // sending response back). continue } if !v2c.sendRequest(stream, target, typeURL, version, nonce) { // send failed, clear the current stream. stream = nil } } } } // recv receives xDS responses on the provided ADS stream and branches out to // message specific handlers. func (v2c *v2Client) recv(stream adsStream) bool { success := false for { resp, err := stream.Recv() // TODO: call watch callbacks with error when stream is broken. if err != nil { v2c.logger.Warningf("ADS stream is closed with error: %v", err) return success } v2c.logger.Infof("ADS response received, type: %v", resp.GetTypeUrl()) v2c.logger.Debugf("ADS response received: %v", resp) var respHandleErr error switch resp.GetTypeUrl() { case ldsURL: respHandleErr = v2c.handleLDSResponse(resp) case rdsURL: respHandleErr = v2c.handleRDSResponse(resp) case cdsURL: respHandleErr = v2c.handleCDSResponse(resp) case edsURL: respHandleErr = v2c.handleEDSResponse(resp) default: v2c.logger.Warningf("Resource type %v unknown in response from server", resp.GetTypeUrl()) continue } typeURL := resp.GetTypeUrl() if respHandleErr != nil { v2c.sendCh.Put(&ackInfo{ typeURL: typeURL, version: "", nonce: resp.GetNonce(), stream: stream, }) v2c.logger.Warningf("Sending NACK for response type: %v, version: %v, nonce: %v, reason: %v", typeURL, resp.GetVersionInfo(), resp.GetNonce(), respHandleErr) continue } v2c.sendCh.Put(&ackInfo{ typeURL: typeURL, version: resp.GetVersionInfo(), nonce: resp.GetNonce(), stream: stream, }) v2c.logger.Infof("Sending ACK for response type: %v, version: %v, nonce: %v", typeURL, resp.GetVersionInfo(), resp.GetNonce()) success = true } } // watchLDS registers an LDS watcher for the provided target. Updates // corresponding to received LDS responses will be pushed to the provided // callback. The caller can cancel the watch by invoking the returned cancel // function. // The provided callback should not block or perform any expensive operations // or call other methods of the v2Client object. func (v2c *v2Client) watchLDS(target string, ldsCb ldsCallbackFunc) (cancel func()) { return v2c.watch(&watchInfo{ typeURL: ldsURL, target: []string{target}, ldsCallback: ldsCb, }) } // watchRDS registers an RDS watcher for the provided routeName. Updates // corresponding to received RDS responses will be pushed to the provided // callback. The caller can cancel the watch by invoking the returned cancel // function. // The provided callback should not block or perform any expensive operations // or call other methods of the v2Client object. func (v2c *v2Client) watchRDS(routeName string, rdsCb rdsCallbackFunc) (cancel func()) { return v2c.watch(&watchInfo{ typeURL: rdsURL, target: []string{routeName}, rdsCallback: rdsCb, }) // TODO: Once a registered RDS watch is cancelled, we should send an RDS // request with no resources. This will let the server know that we are no // longer interested in this resource. } // watchCDS registers an CDS watcher for the provided clusterName. Updates // corresponding to received CDS responses will be pushed to the provided // callback. The caller can cancel the watch by invoking the returned cancel // function. // The provided callback should not block or perform any expensive operations // or call other methods of the v2Client object. func (v2c *v2Client) watchCDS(clusterName string, cdsCb cdsCallbackFunc) (cancel func()) { return v2c.watch(&watchInfo{ typeURL: cdsURL, target: []string{clusterName}, cdsCallback: cdsCb, }) } // watchEDS registers an EDS watcher for the provided clusterName. Updates // corresponding to received EDS responses will be pushed to the provided // callback. The caller can cancel the watch by invoking the returned cancel // function. // The provided callback should not block or perform any expensive operations // or call other methods of the v2Client object. func (v2c *v2Client) watchEDS(clusterName string, edsCb edsCallbackFunc) (cancel func()) { return v2c.watch(&watchInfo{ typeURL: edsURL, target: []string{clusterName}, edsCallback: edsCb, }) // TODO: Once a registered EDS watch is cancelled, we should send an EDS // request with no resources. This will let the server know that we are no // longer interested in this resource. } func (v2c *v2Client) watch(wi *watchInfo) (cancel func()) { v2c.sendCh.Put(wi) v2c.logger.Infof("Sending ADS request for new watch of type: %v, resource names: %v", wi.typeURL, wi.target) return func() { v2c.mu.Lock() defer v2c.mu.Unlock() if wi.state == watchEnqueued { wi.state = watchCancelled return } v2c.watchMap[wi.typeURL].cancel() delete(v2c.watchMap, wi.typeURL) // TODO: should we reset ack version string when cancelling the watch? } } // checkCacheAndUpdateWatchMap is called when a new watch call is handled in // send(). If an existing watcher is found, its expiry timer is stopped. If the // watchInfo to be added to the watchMap is found in the cache, the watcher // callback is immediately invoked. // // Caller should hold v2c.mu func (v2c *v2Client) checkCacheAndUpdateWatchMap(wi *watchInfo) { if existing := v2c.watchMap[wi.typeURL]; existing != nil { existing.cancel() } v2c.watchMap[wi.typeURL] = wi switch wi.typeURL { // We need to grab the lock inside of the expiryTimer's afterFunc because // we need to access the watchInfo, which is stored in the watchMap. case ldsURL: wi.expiryTimer = time.AfterFunc(defaultWatchExpiryTimeout, func() { v2c.mu.Lock() wi.ldsCallback(ldsUpdate{}, fmt.Errorf("xds: LDS target %s not found, watcher timeout", wi.target)) v2c.mu.Unlock() }) case rdsURL: routeName := wi.target[0] if cluster := v2c.rdsCache[routeName]; cluster != "" { var err error if v2c.watchMap[ldsURL] == nil { cluster = "" err = fmt.Errorf("xds: no LDS watcher found when handling RDS watch for route {%v} from cache", routeName) } v2c.logger.Infof("Resource with name %v, type %v found in cache", routeName, wi.typeURL) wi.rdsCallback(rdsUpdate{clusterName: cluster}, err) return } // Add the watch expiry timer only for new watches we don't find in // the cache, and return from here. wi.expiryTimer = time.AfterFunc(defaultWatchExpiryTimeout, func() { v2c.mu.Lock() wi.rdsCallback(rdsUpdate{}, fmt.Errorf("xds: RDS target %s not found, watcher timeout", wi.target)) v2c.mu.Unlock() }) case cdsURL: clusterName := wi.target[0] if update, ok := v2c.cdsCache[clusterName]; ok { var err error if v2c.watchMap[cdsURL] == nil { err = fmt.Errorf("xds: no CDS watcher found when handling CDS watch for cluster {%v} from cache", clusterName) } v2c.logger.Infof("Resource with name %v, type %v found in cache", clusterName, wi.typeURL) wi.cdsCallback(update, err) return } wi.expiryTimer = time.AfterFunc(defaultWatchExpiryTimeout, func() { v2c.mu.Lock() wi.cdsCallback(CDSUpdate{}, fmt.Errorf("xds: CDS target %s not found, watcher timeout", wi.target)) v2c.mu.Unlock() }) case edsURL: wi.expiryTimer = time.AfterFunc(defaultWatchExpiryTimeout, func() { v2c.mu.Lock() wi.edsCallback(nil, fmt.Errorf("xds: EDS target %s not found, watcher timeout", wi.target)) v2c.mu.Unlock() }) } } grpc-go-1.29.1/xds/internal/client/v2client_ack_test.go000066400000000000000000000346121365033716300230010ustar00rootroot00000000000000/* * * Copyright 2019 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package client import ( "fmt" "strconv" "testing" "time" xdspb "github.com/envoyproxy/go-control-plane/envoy/api/v2" "github.com/golang/protobuf/proto" anypb "github.com/golang/protobuf/ptypes/any" "github.com/google/go-cmp/cmp" "google.golang.org/grpc/xds/internal/testutils" "google.golang.org/grpc/xds/internal/testutils/fakeserver" ) // compareXDSRequest reads requests from channel, compare it with want. func compareXDSRequest(ch *testutils.Channel, want *xdspb.DiscoveryRequest, version, nonce string) error { val, err := ch.Receive() if err != nil { return err } req := val.(*fakeserver.Request) if req.Err != nil { return fmt.Errorf("unexpected error from request: %v", req.Err) } wantClone := proto.Clone(want).(*xdspb.DiscoveryRequest) wantClone.VersionInfo = version wantClone.ResponseNonce = nonce if !cmp.Equal(req.Req, wantClone, cmp.Comparer(proto.Equal)) { return fmt.Errorf("received request different from want, diff: %s", cmp.Diff(req.Req, wantClone)) } return nil } func sendXDSRespWithVersion(ch chan<- *fakeserver.Response, respWithoutVersion *xdspb.DiscoveryResponse, version int) (nonce string) { respToSend := proto.Clone(respWithoutVersion).(*xdspb.DiscoveryResponse) respToSend.VersionInfo = strconv.Itoa(version) nonce = strconv.Itoa(int(time.Now().UnixNano())) respToSend.Nonce = nonce ch <- &fakeserver.Response{Resp: respToSend} return } // startXDS calls watch to send the first request. It then sends a good response // and checks for ack. func startXDS(t *testing.T, xdsname string, v2c *v2Client, reqChan *testutils.Channel, req *xdspb.DiscoveryRequest, preVersion string, preNonce string) *testutils.Channel { callbackCh := testutils.NewChannel() switch xdsname { case "LDS": v2c.watchLDS(goodLDSTarget1, func(u ldsUpdate, err error) { t.Logf("Received %s callback with ldsUpdate {%+v} and error {%v}", xdsname, u, err) callbackCh.Send(struct{}{}) }) case "RDS": v2c.watchRDS(goodRouteName1, func(u rdsUpdate, err error) { t.Logf("Received %s callback with ldsUpdate {%+v} and error {%v}", xdsname, u, err) callbackCh.Send(struct{}{}) }) case "CDS": v2c.watchCDS(goodClusterName1, func(u CDSUpdate, err error) { t.Logf("Received %s callback with ldsUpdate {%+v} and error {%v}", xdsname, u, err) callbackCh.Send(struct{}{}) }) case "EDS": v2c.watchEDS(goodEDSName, func(u *EDSUpdate, err error) { t.Logf("Received %s callback with ldsUpdate {%+v} and error {%v}", xdsname, u, err) callbackCh.Send(struct{}{}) }) } if err := compareXDSRequest(reqChan, req, preVersion, preNonce); err != nil { t.Fatalf("Failed to receive %s request: %v", xdsname, err) } t.Logf("FakeServer received %s request...", xdsname) return callbackCh } // sendGoodResp sends the good response, with the given version, and a random // nonce. // // It also waits and checks that the ack request contains the given version, and // the generated nonce. // // TODO: make this and other helper function either consistently return error, // and fatal() in the test code, or all call t.Fatal(), and mark them as // helper(). func sendGoodResp(t *testing.T, xdsname string, fakeServer *fakeserver.Server, version int, goodResp *xdspb.DiscoveryResponse, wantReq *xdspb.DiscoveryRequest, callbackCh *testutils.Channel) (nonce string) { nonce = sendXDSRespWithVersion(fakeServer.XDSResponseChan, goodResp, version) t.Logf("Good %s response pushed to fakeServer...", xdsname) if err := compareXDSRequest(fakeServer.XDSRequestChan, wantReq, strconv.Itoa(version), nonce); err != nil { t.Errorf("Failed to receive %s request: %v", xdsname, err) } t.Logf("Good %s response acked", xdsname) if _, err := callbackCh.Receive(); err != nil { t.Errorf("Timeout when expecting %s update", xdsname) } t.Logf("Good %s response callback executed", xdsname) return } // sendBadResp sends a bad response with the given version. This response will // be nacked, so we expect a request with the previous version (version-1). // // But the nonce in request should be the new nonce. func sendBadResp(t *testing.T, xdsname string, fakeServer *fakeserver.Server, version int, wantReq *xdspb.DiscoveryRequest) { var typeURL string switch xdsname { case "LDS": typeURL = ldsURL case "RDS": typeURL = rdsURL case "CDS": typeURL = cdsURL case "EDS": typeURL = edsURL } nonce := sendXDSRespWithVersion(fakeServer.XDSResponseChan, &xdspb.DiscoveryResponse{ Resources: []*anypb.Any{{}}, TypeUrl: typeURL, }, version) t.Logf("Bad %s response pushed to fakeServer...", xdsname) if err := compareXDSRequest(fakeServer.XDSRequestChan, wantReq, strconv.Itoa(version-1), nonce); err != nil { t.Errorf("Failed to receive %s request: %v", xdsname, err) } t.Logf("Bad %s response nacked", xdsname) } // TestV2ClientAck verifies that valid responses are acked, and invalid ones // are nacked. // // This test also verifies the version for different types are independent. func (s) TestV2ClientAck(t *testing.T) { var ( versionLDS = 1000 versionRDS = 2000 versionCDS = 3000 versionEDS = 4000 ) fakeServer, cc, cleanup := startServerAndGetCC(t) defer cleanup() v2c := newV2Client(cc, goodNodeProto, func(int) time.Duration { return 0 }, nil) defer v2c.close() t.Log("Started xds v2Client...") // Start the watch, send a good response, and check for ack. cbLDS := startXDS(t, "LDS", v2c, fakeServer.XDSRequestChan, goodLDSRequest, "", "") sendGoodResp(t, "LDS", fakeServer, versionLDS, goodLDSResponse1, goodLDSRequest, cbLDS) versionLDS++ cbRDS := startXDS(t, "RDS", v2c, fakeServer.XDSRequestChan, goodRDSRequest, "", "") sendGoodResp(t, "RDS", fakeServer, versionRDS, goodRDSResponse1, goodRDSRequest, cbRDS) versionRDS++ cbCDS := startXDS(t, "CDS", v2c, fakeServer.XDSRequestChan, goodCDSRequest, "", "") sendGoodResp(t, "CDS", fakeServer, versionCDS, goodCDSResponse1, goodCDSRequest, cbCDS) versionCDS++ cbEDS := startXDS(t, "EDS", v2c, fakeServer.XDSRequestChan, goodEDSRequest, "", "") sendGoodResp(t, "EDS", fakeServer, versionEDS, goodEDSResponse1, goodEDSRequest, cbEDS) versionEDS++ // Send a bad response, and check for nack. sendBadResp(t, "LDS", fakeServer, versionLDS, goodLDSRequest) versionLDS++ sendBadResp(t, "RDS", fakeServer, versionRDS, goodRDSRequest) versionRDS++ sendBadResp(t, "CDS", fakeServer, versionCDS, goodCDSRequest) versionCDS++ sendBadResp(t, "EDS", fakeServer, versionEDS, goodEDSRequest) versionEDS++ // send another good response, and check for ack, with the new version. sendGoodResp(t, "LDS", fakeServer, versionLDS, goodLDSResponse1, goodLDSRequest, cbLDS) versionLDS++ sendGoodResp(t, "RDS", fakeServer, versionRDS, goodRDSResponse1, goodRDSRequest, cbRDS) versionRDS++ sendGoodResp(t, "CDS", fakeServer, versionCDS, goodCDSResponse1, goodCDSRequest, cbCDS) versionCDS++ sendGoodResp(t, "EDS", fakeServer, versionEDS, goodEDSResponse1, goodEDSRequest, cbEDS) versionEDS++ } // Test when the first response is invalid, and is nacked, the nack requests // should have an empty version string. func (s) TestV2ClientAckFirstIsNack(t *testing.T) { var versionLDS = 1000 fakeServer, cc, cleanup := startServerAndGetCC(t) defer cleanup() v2c := newV2Client(cc, goodNodeProto, func(int) time.Duration { return 0 }, nil) defer v2c.close() t.Log("Started xds v2Client...") // Start the watch, send a good response, and check for ack. cbLDS := startXDS(t, "LDS", v2c, fakeServer.XDSRequestChan, goodLDSRequest, "", "") nonce := sendXDSRespWithVersion(fakeServer.XDSResponseChan, &xdspb.DiscoveryResponse{ Resources: []*anypb.Any{{}}, TypeUrl: ldsURL, }, versionLDS) t.Logf("Bad response pushed to fakeServer...") // The expected version string is an empty string, because this is the first // response, and it's nacked (so there's no previous ack version). if err := compareXDSRequest(fakeServer.XDSRequestChan, goodLDSRequest, "", nonce); err != nil { t.Errorf("Failed to receive request: %v", err) } t.Logf("Bad response nacked") versionLDS++ sendGoodResp(t, "LDS", fakeServer, versionLDS, goodLDSResponse1, goodLDSRequest, cbLDS) versionLDS++ } // Test when a nack is sent after a new watch, we nack with the previous acked // version (instead of resetting to empty string). func (s) TestV2ClientAckNackAfterNewWatch(t *testing.T) { var versionLDS = 1000 fakeServer, cc, cleanup := startServerAndGetCC(t) defer cleanup() v2c := newV2Client(cc, goodNodeProto, func(int) time.Duration { return 0 }, nil) defer v2c.close() t.Log("Started xds v2Client...") // Start the watch, send a good response, and check for ack. cbLDS := startXDS(t, "LDS", v2c, fakeServer.XDSRequestChan, goodLDSRequest, "", "") nonce := sendGoodResp(t, "LDS", fakeServer, versionLDS, goodLDSResponse1, goodLDSRequest, cbLDS) // Start a new watch. The version in the new request should be the version // from the previous response, thus versionLDS before ++. cbLDS = startXDS(t, "LDS", v2c, fakeServer.XDSRequestChan, goodLDSRequest, strconv.Itoa(versionLDS), nonce) versionLDS++ // This is an invalid response after the new watch. nonce = sendXDSRespWithVersion(fakeServer.XDSResponseChan, &xdspb.DiscoveryResponse{ Resources: []*anypb.Any{{}}, TypeUrl: ldsURL, }, versionLDS) t.Logf("Bad response pushed to fakeServer...") // The expected version string is the previous acked version. if err := compareXDSRequest(fakeServer.XDSRequestChan, goodLDSRequest, strconv.Itoa(versionLDS-1), nonce); err != nil { t.Errorf("Failed to receive request: %v", err) } t.Logf("Bad response nacked") versionLDS++ sendGoodResp(t, "LDS", fakeServer, versionLDS, goodLDSResponse1, goodLDSRequest, cbLDS) versionLDS++ } // TestV2ClientAckNewWatchAfterCancel verifies the new request for a new watch // after the previous watch is canceled, has the right version. func (s) TestV2ClientAckNewWatchAfterCancel(t *testing.T) { var versionCDS = 3000 fakeServer, cc, cleanup := startServerAndGetCC(t) defer cleanup() v2c := newV2Client(cc, goodNodeProto, func(int) time.Duration { return 0 }, nil) defer v2c.close() t.Log("Started xds v2Client...") // Start a CDS watch. callbackCh := testutils.NewChannel() cancel := v2c.watchCDS(goodClusterName1, func(u CDSUpdate, err error) { t.Logf("Received %s callback with ldsUpdate {%+v} and error {%v}", "CDS", u, err) callbackCh.Send(struct{}{}) }) if err := compareXDSRequest(fakeServer.XDSRequestChan, goodCDSRequest, "", ""); err != nil { t.Fatal(err) } t.Logf("FakeServer received %s request...", "CDS") // Send a good CDS response, this function waits for the ACK with the right // version. nonce := sendGoodResp(t, "CDS", fakeServer, versionCDS, goodCDSResponse1, goodCDSRequest, callbackCh) // Cancel the CDS watch, and start a new one. The new watch should have the // version from the response above. cancel() v2c.watchCDS(goodClusterName1, func(u CDSUpdate, err error) { t.Logf("Received %s callback with ldsUpdate {%+v} and error {%v}", "CDS", u, err) callbackCh.Send(struct{}{}) }) if err := compareXDSRequest(fakeServer.XDSRequestChan, goodCDSRequest, strconv.Itoa(versionCDS), nonce); err != nil { t.Fatalf("Failed to receive %s request: %v", "CDS", err) } versionCDS++ // Send a bad response with the next version. sendBadResp(t, "CDS", fakeServer, versionCDS, goodCDSRequest) versionCDS++ // send another good response, and check for ack, with the new version. sendGoodResp(t, "CDS", fakeServer, versionCDS, goodCDSResponse1, goodCDSRequest, callbackCh) versionCDS++ } // TestV2ClientAckCancelResponseRace verifies if the response and ACK request // race with cancel (which means the ACK request will not be sent on wire, // because there's no active watch), the nonce will still be updated, and the // new request with the new watch will have the correct nonce. func (s) TestV2ClientAckCancelResponseRace(t *testing.T) { var versionCDS = 3000 fakeServer, cc, cleanup := startServerAndGetCC(t) defer cleanup() v2c := newV2Client(cc, goodNodeProto, func(int) time.Duration { return 0 }, nil) defer v2c.close() t.Log("Started xds v2Client...") // Start a CDS watch. callbackCh := testutils.NewChannel() cancel := v2c.watchCDS(goodClusterName1, func(u CDSUpdate, err error) { t.Logf("Received %s callback with ldsUpdate {%+v} and error {%v}", "CDS", u, err) callbackCh.Send(struct{}{}) }) if err := compareXDSRequest(fakeServer.XDSRequestChan, goodCDSRequest, "", ""); err != nil { t.Fatalf("Failed to receive %s request: %v", "CDS", err) } t.Logf("FakeServer received %s request...", "CDS") // send another good response, and check for ack, with the new version. sendGoodResp(t, "CDS", fakeServer, versionCDS, goodCDSResponse1, goodCDSRequest, callbackCh) versionCDS++ // Cancel the watch before the next response is sent. This mimics the case // watch is canceled while response is on wire. cancel() // Send a good response. nonce := sendXDSRespWithVersion(fakeServer.XDSResponseChan, goodCDSResponse1, versionCDS) t.Logf("Good %s response pushed to fakeServer...", "CDS") // Expect no ACK because watch was canceled. if req, err := fakeServer.XDSRequestChan.Receive(); err != testutils.ErrRecvTimeout { t.Fatalf("Got unexpected xds request after watch is canceled: %v", req) } // Start a new watch. The new watch should have the nonce from the response // above, and version from the first good response. v2c.watchCDS(goodClusterName1, func(u CDSUpdate, err error) { t.Logf("Received %s callback with ldsUpdate {%+v} and error {%v}", "CDS", u, err) callbackCh.Send(struct{}{}) }) if err := compareXDSRequest(fakeServer.XDSRequestChan, goodCDSRequest, strconv.Itoa(versionCDS-1), nonce); err != nil { t.Fatalf("Failed to receive %s request: %v", "CDS", err) } // Send a bad response with the next version. sendBadResp(t, "CDS", fakeServer, versionCDS, goodCDSRequest) versionCDS++ // send another good response, and check for ack, with the new version. sendGoodResp(t, "CDS", fakeServer, versionCDS, goodCDSResponse1, goodCDSRequest, callbackCh) versionCDS++ } grpc-go-1.29.1/xds/internal/client/v2client_test.go000066400000000000000000000414331365033716300221620ustar00rootroot00000000000000/* * * Copyright 2019 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package client import ( "errors" "testing" "time" "github.com/golang/protobuf/proto" "google.golang.org/grpc" "google.golang.org/grpc/resolver" "google.golang.org/grpc/resolver/manual" "google.golang.org/grpc/xds/internal/testutils" "google.golang.org/grpc/xds/internal/testutils/fakeserver" xdspb "github.com/envoyproxy/go-control-plane/envoy/api/v2" basepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" routepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/route" httppb "github.com/envoyproxy/go-control-plane/envoy/config/filter/network/http_connection_manager/v2" listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v2" anypb "github.com/golang/protobuf/ptypes/any" structpb "github.com/golang/protobuf/ptypes/struct" ) const ( defaultTestTimeout = 1 * time.Second goodLDSTarget1 = "lds.target.good:1111" goodLDSTarget2 = "lds.target.good:2222" goodRouteName1 = "GoodRouteConfig1" goodRouteName2 = "GoodRouteConfig2" goodEDSName = "GoodClusterAssignment1" uninterestingRouteName = "UninterestingRouteName" uninterestingDomain = "uninteresting.domain" goodClusterName1 = "GoodClusterName1" goodClusterName2 = "GoodClusterName2" uninterestingClusterName = "UninterestingClusterName" httpConnManagerURL = "type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager" ) var ( goodNodeProto = &basepb.Node{ Id: "ENVOY_NODE_ID", Metadata: &structpb.Struct{ Fields: map[string]*structpb.Value{ "TRAFFICDIRECTOR_GRPC_HOSTNAME": { Kind: &structpb.Value_StringValue{StringValue: "trafficdirector"}, }, }, }, } goodLDSRequest = &xdspb.DiscoveryRequest{ Node: goodNodeProto, TypeUrl: ldsURL, ResourceNames: []string{goodLDSTarget1}, } goodRDSRequest = &xdspb.DiscoveryRequest{ Node: goodNodeProto, TypeUrl: rdsURL, ResourceNames: []string{goodRouteName1}, } goodCDSRequest = &xdspb.DiscoveryRequest{ Node: goodNodeProto, TypeUrl: cdsURL, ResourceNames: []string{goodClusterName1}, } goodEDSRequest = &xdspb.DiscoveryRequest{ Node: goodNodeProto, TypeUrl: edsURL, ResourceNames: []string{goodEDSName}, } goodHTTPConnManager1 = &httppb.HttpConnectionManager{ RouteSpecifier: &httppb.HttpConnectionManager_Rds{ Rds: &httppb.Rds{ RouteConfigName: goodRouteName1, }, }, } marshaledConnMgr1, _ = proto.Marshal(goodHTTPConnManager1) emptyHTTPConnManager = &httppb.HttpConnectionManager{ RouteSpecifier: &httppb.HttpConnectionManager_Rds{ Rds: &httppb.Rds{}, }, } emptyMarshaledConnMgr, _ = proto.Marshal(emptyHTTPConnManager) connMgrWithScopedRoutes = &httppb.HttpConnectionManager{ RouteSpecifier: &httppb.HttpConnectionManager_ScopedRoutes{}, } marshaledConnMgrWithScopedRoutes, _ = proto.Marshal(connMgrWithScopedRoutes) goodListener1 = &xdspb.Listener{ Name: goodLDSTarget1, ApiListener: &listenerpb.ApiListener{ ApiListener: &anypb.Any{ TypeUrl: httpConnManagerURL, Value: marshaledConnMgr1, }, }, } marshaledListener1, _ = proto.Marshal(goodListener1) goodListener2 = &xdspb.Listener{ Name: goodLDSTarget2, ApiListener: &listenerpb.ApiListener{ ApiListener: &anypb.Any{ TypeUrl: httpConnManagerURL, Value: marshaledConnMgr1, }, }, } marshaledListener2, _ = proto.Marshal(goodListener2) noAPIListener = &xdspb.Listener{Name: goodLDSTarget1} marshaledNoAPIListener, _ = proto.Marshal(noAPIListener) badAPIListener1 = &xdspb.Listener{ Name: goodLDSTarget1, ApiListener: &listenerpb.ApiListener{ ApiListener: &anypb.Any{ TypeUrl: httpConnManagerURL, Value: []byte{1, 2, 3, 4}, }, }, } badAPIListener2 = &xdspb.Listener{ Name: goodLDSTarget2, ApiListener: &listenerpb.ApiListener{ ApiListener: &anypb.Any{ TypeUrl: httpConnManagerURL, Value: []byte{1, 2, 3, 4}, }, }, } badlyMarshaledAPIListener2, _ = proto.Marshal(badAPIListener2) badResourceListener = &xdspb.Listener{ Name: goodLDSTarget1, ApiListener: &listenerpb.ApiListener{ ApiListener: &anypb.Any{ TypeUrl: ldsURL, Value: marshaledListener1, }, }, } listenerWithEmptyHTTPConnMgr = &xdspb.Listener{ Name: goodLDSTarget1, ApiListener: &listenerpb.ApiListener{ ApiListener: &anypb.Any{ TypeUrl: httpConnManagerURL, Value: emptyMarshaledConnMgr, }, }, } listenerWithScopedRoutesRouteConfig = &xdspb.Listener{ Name: goodLDSTarget1, ApiListener: &listenerpb.ApiListener{ ApiListener: &anypb.Any{ TypeUrl: httpConnManagerURL, Value: marshaledConnMgrWithScopedRoutes, }, }, } goodLDSResponse1 = &xdspb.DiscoveryResponse{ Resources: []*anypb.Any{ { TypeUrl: ldsURL, Value: marshaledListener1, }, }, TypeUrl: ldsURL, } goodLDSResponse2 = &xdspb.DiscoveryResponse{ Resources: []*anypb.Any{ { TypeUrl: ldsURL, Value: marshaledListener2, }, }, TypeUrl: ldsURL, } emptyLDSResponse = &xdspb.DiscoveryResponse{TypeUrl: ldsURL} badlyMarshaledLDSResponse = &xdspb.DiscoveryResponse{ Resources: []*anypb.Any{ { TypeUrl: ldsURL, Value: []byte{1, 2, 3, 4}, }, }, TypeUrl: ldsURL, } badResourceTypeInLDSResponse = &xdspb.DiscoveryResponse{ Resources: []*anypb.Any{ { TypeUrl: httpConnManagerURL, Value: marshaledConnMgr1, }, }, TypeUrl: ldsURL, } ldsResponseWithMultipleResources = &xdspb.DiscoveryResponse{ Resources: []*anypb.Any{ { TypeUrl: ldsURL, Value: marshaledListener2, }, { TypeUrl: ldsURL, Value: marshaledListener1, }, }, TypeUrl: ldsURL, } noAPIListenerLDSResponse = &xdspb.DiscoveryResponse{ Resources: []*anypb.Any{ { TypeUrl: ldsURL, Value: marshaledNoAPIListener, }, }, TypeUrl: ldsURL, } goodBadUglyLDSResponse = &xdspb.DiscoveryResponse{ Resources: []*anypb.Any{ { TypeUrl: ldsURL, Value: marshaledListener2, }, { TypeUrl: ldsURL, Value: marshaledListener1, }, { TypeUrl: ldsURL, Value: badlyMarshaledAPIListener2, }, }, TypeUrl: ldsURL, } badlyMarshaledRDSResponse = &xdspb.DiscoveryResponse{ Resources: []*anypb.Any{ { TypeUrl: rdsURL, Value: []byte{1, 2, 3, 4}, }, }, TypeUrl: rdsURL, } badResourceTypeInRDSResponse = &xdspb.DiscoveryResponse{ Resources: []*anypb.Any{ { TypeUrl: httpConnManagerURL, Value: marshaledConnMgr1, }, }, TypeUrl: rdsURL, } emptyRouteConfig = &xdspb.RouteConfiguration{} marshaledEmptyRouteConfig, _ = proto.Marshal(emptyRouteConfig) noDomainsInRouteConfig = &xdspb.RouteConfiguration{ VirtualHosts: []*routepb.VirtualHost{{}}, } noVirtualHostsInRDSResponse = &xdspb.DiscoveryResponse{ Resources: []*anypb.Any{ { TypeUrl: rdsURL, Value: marshaledEmptyRouteConfig, }, }, TypeUrl: rdsURL, } goodRouteConfig1 = &xdspb.RouteConfiguration{ Name: goodRouteName1, VirtualHosts: []*routepb.VirtualHost{ { Domains: []string{uninterestingDomain}, Routes: []*routepb.Route{ { Match: &routepb.RouteMatch{PathSpecifier: &routepb.RouteMatch_Prefix{Prefix: ""}}, Action: &routepb.Route_Route{ Route: &routepb.RouteAction{ ClusterSpecifier: &routepb.RouteAction_Cluster{Cluster: uninterestingClusterName}, }, }, }, }, }, { Domains: []string{goodLDSTarget1}, Routes: []*routepb.Route{ { Match: &routepb.RouteMatch{PathSpecifier: &routepb.RouteMatch_Prefix{Prefix: ""}}, Action: &routepb.Route_Route{ Route: &routepb.RouteAction{ ClusterSpecifier: &routepb.RouteAction_Cluster{Cluster: goodClusterName1}, }, }, }, }, }, }, } marshaledGoodRouteConfig1, _ = proto.Marshal(goodRouteConfig1) goodRouteConfig2 = &xdspb.RouteConfiguration{ Name: goodRouteName2, VirtualHosts: []*routepb.VirtualHost{ { Domains: []string{uninterestingDomain}, Routes: []*routepb.Route{ { Match: &routepb.RouteMatch{PathSpecifier: &routepb.RouteMatch_Prefix{Prefix: ""}}, Action: &routepb.Route_Route{ Route: &routepb.RouteAction{ ClusterSpecifier: &routepb.RouteAction_Cluster{Cluster: uninterestingClusterName}, }, }, }, }, }, { Domains: []string{goodLDSTarget1}, Routes: []*routepb.Route{ { Match: &routepb.RouteMatch{PathSpecifier: &routepb.RouteMatch_Prefix{Prefix: ""}}, Action: &routepb.Route_Route{ Route: &routepb.RouteAction{ ClusterSpecifier: &routepb.RouteAction_Cluster{Cluster: goodClusterName2}, }, }, }, }, }, }, } marshaledGoodRouteConfig2, _ = proto.Marshal(goodRouteConfig2) uninterestingRouteConfig = &xdspb.RouteConfiguration{ Name: uninterestingRouteName, VirtualHosts: []*routepb.VirtualHost{ { Domains: []string{uninterestingDomain}, Routes: []*routepb.Route{ { Action: &routepb.Route_Route{ Route: &routepb.RouteAction{ ClusterSpecifier: &routepb.RouteAction_Cluster{Cluster: uninterestingClusterName}, }, }, }, }, }, }, } marshaledUninterestingRouteConfig, _ = proto.Marshal(uninterestingRouteConfig) goodRDSResponse1 = &xdspb.DiscoveryResponse{ Resources: []*anypb.Any{ { TypeUrl: rdsURL, Value: marshaledGoodRouteConfig1, }, }, TypeUrl: rdsURL, } goodRDSResponse2 = &xdspb.DiscoveryResponse{ Resources: []*anypb.Any{ { TypeUrl: rdsURL, Value: marshaledGoodRouteConfig2, }, }, TypeUrl: rdsURL, } uninterestingRDSResponse = &xdspb.DiscoveryResponse{ Resources: []*anypb.Any{ { TypeUrl: rdsURL, Value: marshaledUninterestingRouteConfig, }, }, TypeUrl: rdsURL, } ) // TestV2ClientBackoffAfterRecvError verifies if the v2Client backoffs when it // encounters a Recv error while receiving an LDS response. func (s) TestV2ClientBackoffAfterRecvError(t *testing.T) { fakeServer, cc, cleanup := startServerAndGetCC(t) defer cleanup() // Override the v2Client backoff function with this, so that we can verify // that a backoff actually was triggerred. boCh := make(chan int, 1) clientBackoff := func(v int) time.Duration { boCh <- v return 0 } v2c := newV2Client(cc, goodNodeProto, clientBackoff, nil) defer v2c.close() t.Log("Started xds v2Client...") callbackCh := make(chan struct{}) v2c.watchLDS(goodLDSTarget1, func(u ldsUpdate, err error) { close(callbackCh) }) if _, err := fakeServer.XDSRequestChan.Receive(); err != nil { t.Fatalf("Timeout expired when expecting an LDS request") } t.Log("FakeServer received request...") fakeServer.XDSResponseChan <- &fakeserver.Response{Err: errors.New("RPC error")} t.Log("Bad LDS response pushed to fakeServer...") timer := time.NewTimer(defaultTestTimeout) select { case <-timer.C: t.Fatal("Timeout when expecting LDS update") case <-boCh: timer.Stop() t.Log("v2Client backed off before retrying...") case <-callbackCh: t.Fatal("Received unexpected LDS callback") } } // TestV2ClientRetriesAfterBrokenStream verifies the case where a stream // encountered a Recv() error, and is expected to send out xDS requests for // registered watchers once it comes back up again. func (s) TestV2ClientRetriesAfterBrokenStream(t *testing.T) { fakeServer, cc, cleanup := startServerAndGetCC(t) defer cleanup() v2c := newV2Client(cc, goodNodeProto, func(int) time.Duration { return 0 }, nil) defer v2c.close() t.Log("Started xds v2Client...") callbackCh := testutils.NewChannel() v2c.watchLDS(goodLDSTarget1, func(u ldsUpdate, err error) { t.Logf("Received LDS callback with ldsUpdate {%+v} and error {%v}", u, err) callbackCh.Send(struct{}{}) }) if _, err := fakeServer.XDSRequestChan.Receive(); err != nil { t.Fatalf("Timeout expired when expecting an LDS request") } t.Log("FakeServer received request...") fakeServer.XDSResponseChan <- &fakeserver.Response{Resp: goodLDSResponse1} t.Log("Good LDS response pushed to fakeServer...") if _, err := callbackCh.Receive(); err != nil { t.Fatal("Timeout when expecting LDS update") } // Read the ack, so the next request is sent after stream re-creation. if _, err := fakeServer.XDSRequestChan.Receive(); err != nil { t.Fatalf("Timeout expired when expecting an LDS ACK") } fakeServer.XDSResponseChan <- &fakeserver.Response{Err: errors.New("RPC error")} t.Log("Bad LDS response pushed to fakeServer...") val, err := fakeServer.XDSRequestChan.Receive() if err == testutils.ErrRecvTimeout { t.Fatalf("Timeout expired when expecting LDS update") } gotRequest := val.(*fakeserver.Request) if !proto.Equal(gotRequest.Req, goodLDSRequest) { t.Fatalf("gotRequest: %+v, wantRequest: %+v", gotRequest.Req, goodLDSRequest) } } // TestV2ClientCancelWatch verifies that the registered watch callback is not // invoked if a response is received after the watcher is cancelled. func (s) TestV2ClientCancelWatch(t *testing.T) { fakeServer, cc, cleanup := startServerAndGetCC(t) defer cleanup() v2c := newV2Client(cc, goodNodeProto, func(int) time.Duration { return 0 }, nil) defer v2c.close() t.Log("Started xds v2Client...") callbackCh := testutils.NewChannel() cancelFunc := v2c.watchLDS(goodLDSTarget1, func(u ldsUpdate, err error) { t.Logf("Received LDS callback with ldsUpdate {%+v} and error {%v}", u, err) callbackCh.Send(struct{}{}) }) if _, err := fakeServer.XDSRequestChan.Receive(); err != nil { t.Fatalf("Timeout expired when expecting an LDS request") } t.Log("FakeServer received request...") fakeServer.XDSResponseChan <- &fakeserver.Response{Resp: goodLDSResponse1} t.Log("Good LDS response pushed to fakeServer...") if _, err := callbackCh.Receive(); err != nil { t.Fatal("Timeout when expecting LDS update") } cancelFunc() fakeServer.XDSResponseChan <- &fakeserver.Response{Resp: goodLDSResponse1} t.Log("Another good LDS response pushed to fakeServer...") if _, err := callbackCh.Receive(); err != testutils.ErrRecvTimeout { t.Fatalf("Watch callback invoked after the watcher was cancelled") } } func (s) TestV2ClientWatchWithoutStream(t *testing.T) { oldWatchExpiryTimeout := defaultWatchExpiryTimeout defaultWatchExpiryTimeout = 500 * time.Millisecond defer func() { defaultWatchExpiryTimeout = oldWatchExpiryTimeout }() fakeServer, sCleanup, err := fakeserver.StartServer() if err != nil { t.Fatalf("Failed to start fake xDS server: %v", err) } defer sCleanup() const scheme = "xds_client_test_whatever" rb := manual.NewBuilderWithScheme(scheme) rb.InitialState(resolver.State{Addresses: []resolver.Address{{Addr: "no.such.server"}}}) cc, err := grpc.Dial(scheme+":///whatever", grpc.WithInsecure(), grpc.WithResolvers(rb)) if err != nil { t.Fatalf("Failed to dial ClientConn: %v", err) } defer cc.Close() v2c := newV2Client(cc, goodNodeProto, func(int) time.Duration { return 0 }, nil) defer v2c.close() t.Log("Started xds v2Client...") callbackCh := testutils.NewChannel() // This watch is started when the xds-ClientConn is in Transient Failure, // and no xds stream is created. v2c.watchLDS(goodLDSTarget1, func(u ldsUpdate, err error) { t.Logf("Received LDS callback with ldsUpdate {%+v} and error {%v}", u, err) if err != nil { callbackCh.Send(err) } callbackCh.Send(u) }) // The watcher should receive an update, with a timeout error in it. if v, err := callbackCh.TimedReceive(time.Second); err != nil { t.Fatal("Timeout when expecting LDS update") } else if _, ok := v.(error); !ok { t.Fatalf("Expect an error from watcher, got %v", v) } // Send the real server address to the ClientConn, the stream should be // created, and the previous watch should be sent. rb.UpdateState(resolver.State{ Addresses: []resolver.Address{{Addr: fakeServer.Address}}, }) if _, err := fakeServer.XDSRequestChan.Receive(); err != nil { t.Fatalf("Timeout expired when expecting an LDS request") } t.Log("FakeServer received request...") fakeServer.XDSResponseChan <- &fakeserver.Response{Resp: goodLDSResponse1} t.Log("Good LDS response pushed to fakeServer...") if v, err := callbackCh.Receive(); err != nil { t.Fatal("Timeout when expecting LDS update") } else if _, ok := v.(ldsUpdate); !ok { t.Fatalf("Expect an LDS update from watcher, got %v", v) } } grpc-go-1.29.1/xds/internal/internal.go000066400000000000000000000032241365033716300177270ustar00rootroot00000000000000/* * * Copyright 2019 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package internal import ( "fmt" corepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" ) type clientID string // XDSClientID is the attributes key used to pass the address of the xdsClient // object shared between the resolver and the balancer. The xdsClient object is // created by the resolver and passed to the balancer. const XDSClientID = clientID("xdsClientID") // Locality is xds.Locality without XXX fields, so it can be used as map // keys. // // xds.Locality cannot be map keys because one of the XXX fields is a slice. // // This struct should only be used as map keys. Use the proto message directly // in all other places. // // TODO: rename to LocalityID. type Locality struct { Region string Zone string SubZone string } func (lamk Locality) String() string { return fmt.Sprintf("%s-%s-%s", lamk.Region, lamk.Zone, lamk.SubZone) } // ToProto convert Locality to the proto representation. func (lamk Locality) ToProto() *corepb.Locality { return &corepb.Locality{ Region: lamk.Region, Zone: lamk.Zone, SubZone: lamk.SubZone, } } grpc-go-1.29.1/xds/internal/internal_test.go000066400000000000000000000034241365033716300207700ustar00rootroot00000000000000/* * * Copyright 2019 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package internal import ( "reflect" "strings" "testing" "unicode" corepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" "github.com/google/go-cmp/cmp" "google.golang.org/grpc/internal/grpctest" ) const ignorePrefix = "XXX_" type s struct { grpctest.Tester } func Test(t *testing.T) { grpctest.RunSubTests(t, s{}) } func ignore(name string) bool { if !unicode.IsUpper([]rune(name)[0]) { return true } return strings.HasPrefix(name, ignorePrefix) } // A reflection based test to make sure internal.Locality contains all the // fields (expect for XXX_) from the proto message. func (s) TestLocalityMatchProtoMessage(t *testing.T) { want1 := make(map[string]string) for ty, i := reflect.TypeOf(Locality{}), 0; i < ty.NumField(); i++ { f := ty.Field(i) if ignore(f.Name) { continue } want1[f.Name] = f.Type.Name() } want2 := make(map[string]string) for ty, i := reflect.TypeOf(corepb.Locality{}), 0; i < ty.NumField(); i++ { f := ty.Field(i) if ignore(f.Name) { continue } want2[f.Name] = f.Type.Name() } if diff := cmp.Diff(want1, want2); diff != "" { t.Fatalf("internal type and proto message have different fields: (-got +want):\n%+v", diff) } } grpc-go-1.29.1/xds/internal/resolver/000077500000000000000000000000001365033716300174245ustar00rootroot00000000000000grpc-go-1.29.1/xds/internal/resolver/logging.go000066400000000000000000000013631365033716300214040ustar00rootroot00000000000000/* * * Copyright 2020 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package resolver import ( "fmt" ) const prefix = "[xds-resolver %p] " func loggingPrefix(p *xdsResolver) string { return fmt.Sprintf(prefix, p) } grpc-go-1.29.1/xds/internal/resolver/xds_resolver.go000066400000000000000000000162331365033716300224770ustar00rootroot00000000000000/* * Copyright 2019 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Package resolver implements the xds resolver, that does LDS and RDS to find // the cluster to use. package resolver import ( "context" "fmt" "google.golang.org/grpc" "google.golang.org/grpc/attributes" "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/resolver" xdsinternal "google.golang.org/grpc/xds/internal" xdsclient "google.golang.org/grpc/xds/internal/client" "google.golang.org/grpc/xds/internal/client/bootstrap" ) // xDS balancer name is xds_experimental while resolver scheme is // xds-experimental since "_" is not a valid character in the URL. const xdsScheme = "xds-experimental" // For overriding in unittests. var ( newXDSClient = func(opts xdsclient.Options) (xdsClientInterface, error) { return xdsclient.New(opts) } newXDSConfig = bootstrap.NewConfig ) func init() { resolver.Register(&xdsResolverBuilder{}) } type xdsResolverBuilder struct{} // Build helps implement the resolver.Builder interface. // // The xds bootstrap process is performed (and a new xds client is built) every // time an xds resolver is built. func (b *xdsResolverBuilder) Build(t resolver.Target, cc resolver.ClientConn, rbo resolver.BuildOptions) (resolver.Resolver, error) { config, err := newXDSConfig() if err != nil { return nil, fmt.Errorf("xds: failed to read bootstrap file: %v", err) } r := &xdsResolver{ target: t, cc: cc, updateCh: make(chan suWithError, 1), } r.logger = grpclog.NewPrefixLogger(loggingPrefix(r)) r.logger.Infof("Creating resolver for target: %+v", t) if config.Creds == nil { // TODO: Once we start supporting a mechanism to register credential // types, a failure to find the credential type mentioned in the // bootstrap file should result in a failure, and not in using // credentials from the parent channel (passed through the // resolver.BuildOptions). config.Creds = r.defaultDialCreds(config.BalancerName, rbo) } var dopts []grpc.DialOption if rbo.Dialer != nil { dopts = []grpc.DialOption{grpc.WithContextDialer(rbo.Dialer)} } client, err := newXDSClient(xdsclient.Options{Config: *config, DialOpts: dopts, TargetName: t.Endpoint}) if err != nil { return nil, fmt.Errorf("xds: failed to create xds-client: %v", err) } r.client = client r.ctx, r.cancelCtx = context.WithCancel(context.Background()) cancelWatch := r.client.WatchService(r.target.Endpoint, r.handleServiceUpdate) r.logger.Infof("Watch started on resource name %v with xds-client %p", r.target.Endpoint, r.client) r.cancelWatch = func() { cancelWatch() r.logger.Infof("Watch cancel on resource name %v with xds-client %p", r.target.Endpoint, r.client) } go r.run() return r, nil } // defaultDialCreds builds a DialOption containing the credentials to be used // while talking to the xDS server (this is done only if the xds bootstrap // process does not return any credentials to use). If the parent channel // contains DialCreds, we use it as is. If it contains a CredsBundle, we use // just the transport credentials from the bundle. If we don't find any // credentials on the parent channel, we resort to using an insecure channel. func (r *xdsResolver) defaultDialCreds(balancerName string, rbo resolver.BuildOptions) grpc.DialOption { switch { case rbo.DialCreds != nil: if err := rbo.DialCreds.OverrideServerName(balancerName); err != nil { r.logger.Errorf("Failed to override server name in credentials: %v, using Insecure", err) return grpc.WithInsecure() } return grpc.WithTransportCredentials(rbo.DialCreds) case rbo.CredsBundle != nil: return grpc.WithTransportCredentials(rbo.CredsBundle.TransportCredentials()) default: r.logger.Warningf("No credentials available, using Insecure") return grpc.WithInsecure() } } // Name helps implement the resolver.Builder interface. func (*xdsResolverBuilder) Scheme() string { return xdsScheme } // xdsClientInterface contains methods from xdsClient.Client which are used by // the resolver. This will be faked out in unittests. type xdsClientInterface interface { WatchService(string, func(xdsclient.ServiceUpdate, error)) func() Close() } // suWithError wraps the ServiceUpdate and error received through a watch API // callback, so that it can pushed onto the update channel as a single entity. type suWithError struct { su xdsclient.ServiceUpdate err error } // xdsResolver implements the resolver.Resolver interface. // // It registers a watcher for ServiceConfig updates with the xdsClient object // (which performs LDS/RDS queries for the same), and passes the received // updates to the ClientConn. type xdsResolver struct { ctx context.Context cancelCtx context.CancelFunc target resolver.Target cc resolver.ClientConn logger *grpclog.PrefixLogger // The underlying xdsClient which performs all xDS requests and responses. client xdsClientInterface // A channel for the watch API callback to write service updates on to. The // updates are read by the run goroutine and passed on to the ClientConn. updateCh chan suWithError // cancelWatch is the function to cancel the watcher. cancelWatch func() } const jsonFormatSC = `{ "loadBalancingConfig":[ { "cds_experimental":{ "Cluster": "%s" } } ] }` // run is a long running goroutine which blocks on receiving service updates // and passes it on the ClientConn. func (r *xdsResolver) run() { for { select { case <-r.ctx.Done(): case update := <-r.updateCh: if update.err != nil { r.logger.Warningf("Watch error on resource %v from xds-client %p, %v", r.target.Endpoint, r.client, update.err) r.cc.ReportError(update.err) continue } sc := fmt.Sprintf(jsonFormatSC, update.su.Cluster) r.logger.Infof("Received update on resource %v from xds-client %p, generated service config: %v", r.target.Endpoint, r.client, sc) r.cc.UpdateState(resolver.State{ ServiceConfig: r.cc.ParseServiceConfig(sc), Attributes: attributes.New(xdsinternal.XDSClientID, r.client), }) } } } // handleServiceUpdate is the callback which handles service updates. It writes // the received update to the update channel, which is picked by the run // goroutine. func (r *xdsResolver) handleServiceUpdate(su xdsclient.ServiceUpdate, err error) { if r.ctx.Err() != nil { // Do not pass updates to the ClientConn once the resolver is closed. return } r.updateCh <- suWithError{su, err} } // ResolveNow is a no-op at this point. func (*xdsResolver) ResolveNow(o resolver.ResolveNowOptions) {} // Close closes the resolver, and also closes the underlying xdsClient. func (r *xdsResolver) Close() { r.cancelWatch() r.client.Close() r.cancelCtx() r.logger.Infof("Shutdown") } grpc-go-1.29.1/xds/internal/resolver/xds_resolver_test.go000066400000000000000000000301331365033716300235310ustar00rootroot00000000000000/* * * Copyright 2019 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package resolver import ( "context" "errors" "fmt" "net" "testing" "google.golang.org/grpc" "google.golang.org/grpc/internal" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" xdsinternal "google.golang.org/grpc/xds/internal" _ "google.golang.org/grpc/xds/internal/balancer/cdsbalancer" // To parse LB config xdsclient "google.golang.org/grpc/xds/internal/client" "google.golang.org/grpc/xds/internal/client/bootstrap" "google.golang.org/grpc/xds/internal/testutils" "google.golang.org/grpc/xds/internal/testutils/fakeclient" corepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" ) const ( targetStr = "target" cluster = "cluster" balancerName = "dummyBalancer" ) var ( validConfig = bootstrap.Config{ BalancerName: balancerName, Creds: grpc.WithInsecure(), NodeProto: &corepb.Node{}, } target = resolver.Target{Endpoint: targetStr} ) // testClientConn is a fake implemetation of resolver.ClientConn. All is does // is to store the state received from the resolver locally and signal that // event through a channel. type testClientConn struct { resolver.ClientConn stateCh *testutils.Channel errorCh *testutils.Channel } func (t *testClientConn) UpdateState(s resolver.State) { t.stateCh.Send(s) } func (t *testClientConn) ReportError(err error) { t.errorCh.Send(err) } func (t *testClientConn) ParseServiceConfig(jsonSC string) *serviceconfig.ParseResult { return internal.ParseServiceConfigForTesting.(func(string) *serviceconfig.ParseResult)(jsonSC) } func newTestClientConn() *testClientConn { return &testClientConn{ stateCh: testutils.NewChannel(), errorCh: testutils.NewChannel(), } } func getXDSClientMakerFunc(wantOpts xdsclient.Options) func(xdsclient.Options) (xdsClientInterface, error) { return func(gotOpts xdsclient.Options) (xdsClientInterface, error) { if gotOpts.Config.BalancerName != wantOpts.Config.BalancerName { return nil, fmt.Errorf("got balancerName: %s, want: %s", gotOpts.Config.BalancerName, wantOpts.Config.BalancerName) } // We cannot compare two DialOption objects to see if they are equal // because each of these is a function pointer. So, the only thing we // can do here is to check if the got option is nil or not based on // what the want option is. We should be able to do extensive // credential testing in e2e tests. if (gotOpts.Config.Creds != nil) != (wantOpts.Config.Creds != nil) { return nil, fmt.Errorf("got len(creds): %s, want: %s", gotOpts.Config.Creds, wantOpts.Config.Creds) } if len(gotOpts.DialOpts) != len(wantOpts.DialOpts) { return nil, fmt.Errorf("got len(DialOpts): %v, want: %v", len(gotOpts.DialOpts), len(wantOpts.DialOpts)) } return fakeclient.NewClient(), nil } } func errorDialer(_ context.Context, _ string) (net.Conn, error) { return nil, errors.New("dial error") } // TestResolverBuilder tests the xdsResolverBuilder's Build method with // different parameters. func TestResolverBuilder(t *testing.T) { tests := []struct { name string rbo resolver.BuildOptions config bootstrap.Config xdsClientFunc func(xdsclient.Options) (xdsClientInterface, error) wantErr bool }{ { name: "empty-config", rbo: resolver.BuildOptions{}, config: bootstrap.Config{}, wantErr: true, }, { name: "no-balancer-name-in-config", rbo: resolver.BuildOptions{}, config: bootstrap.Config{ Creds: grpc.WithInsecure(), NodeProto: &corepb.Node{}, }, wantErr: true, }, { name: "no-creds-in-config", rbo: resolver.BuildOptions{}, config: bootstrap.Config{ BalancerName: balancerName, NodeProto: &corepb.Node{}, }, xdsClientFunc: getXDSClientMakerFunc(xdsclient.Options{Config: validConfig}), wantErr: false, }, { name: "error-dialer-in-rbo", rbo: resolver.BuildOptions{Dialer: errorDialer}, config: validConfig, xdsClientFunc: getXDSClientMakerFunc(xdsclient.Options{ Config: validConfig, DialOpts: []grpc.DialOption{grpc.WithContextDialer(errorDialer)}, }), wantErr: false, }, { name: "simple-good", rbo: resolver.BuildOptions{}, config: validConfig, xdsClientFunc: getXDSClientMakerFunc(xdsclient.Options{Config: validConfig}), wantErr: false, }, { name: "newXDSClient-throws-error", rbo: resolver.BuildOptions{}, config: validConfig, xdsClientFunc: func(_ xdsclient.Options) (xdsClientInterface, error) { return nil, errors.New("newXDSClient-throws-error") }, wantErr: true, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { // Fake out the bootstrap process by providing our own config. oldConfigMaker := newXDSConfig newXDSConfig = func() (*bootstrap.Config, error) { if test.config.BalancerName == "" { return nil, fmt.Errorf("no balancer name found in config") } return &test.config, nil } // Fake out the xdsClient creation process by providing a fake. oldClientMaker := newXDSClient newXDSClient = test.xdsClientFunc defer func() { newXDSConfig = oldConfigMaker newXDSClient = oldClientMaker }() builder := resolver.Get(xdsScheme) if builder == nil { t.Fatalf("resolver.Get(%v) returned nil", xdsScheme) } r, err := builder.Build(target, newTestClientConn(), test.rbo) if (err != nil) != test.wantErr { t.Fatalf("builder.Build(%v) returned err: %v, wantErr: %v", target, err, test.wantErr) } if err != nil { // This is the case where we expect an error and got it. return } r.Close() }) } } type setupOpts struct { config *bootstrap.Config xdsClientFunc func(xdsclient.Options) (xdsClientInterface, error) } func testSetup(t *testing.T, opts setupOpts) (*xdsResolver, *testClientConn, func()) { t.Helper() oldConfigMaker := newXDSConfig newXDSConfig = func() (*bootstrap.Config, error) { return opts.config, nil } oldClientMaker := newXDSClient newXDSClient = opts.xdsClientFunc cancel := func() { newXDSConfig = oldConfigMaker newXDSClient = oldClientMaker } builder := resolver.Get(xdsScheme) if builder == nil { t.Fatalf("resolver.Get(%v) returned nil", xdsScheme) } tcc := newTestClientConn() r, err := builder.Build(target, tcc, resolver.BuildOptions{}) if err != nil { t.Fatalf("builder.Build(%v) returned err: %v", target, err) } return r.(*xdsResolver), tcc, cancel } // waitForWatchService waits for the WatchService method to be called on the // xdsClient within a reasonable amount of time, and also verifies that the // watch is called with the expected target. func waitForWatchService(t *testing.T, xdsC *fakeclient.Client, wantTarget string) { t.Helper() gotTarget, err := xdsC.WaitForWatchService() if err != nil { t.Fatalf("xdsClient.WatchService failed with error: %v", err) } if gotTarget != wantTarget { t.Fatalf("xdsClient.WatchService() called with target: %v, want %v", gotTarget, wantTarget) } } // TestXDSResolverWatchCallbackAfterClose tests the case where a service update // from the underlying xdsClient is received after the resolver is closed. func TestXDSResolverWatchCallbackAfterClose(t *testing.T) { xdsC := fakeclient.NewClient() xdsR, tcc, cancel := testSetup(t, setupOpts{ config: &validConfig, xdsClientFunc: func(_ xdsclient.Options) (xdsClientInterface, error) { return xdsC, nil }, }) defer cancel() waitForWatchService(t, xdsC, targetStr) // Call the watchAPI callback after closing the resolver, and make sure no // update is triggerred on the ClientConn. xdsR.Close() xdsC.InvokeWatchServiceCallback(cluster, nil) if gotVal, gotErr := tcc.stateCh.Receive(); gotErr != testutils.ErrRecvTimeout { t.Fatalf("ClientConn.UpdateState called after xdsResolver is closed: %v", gotVal) } } // TestXDSResolverBadServiceUpdate tests the case the xdsClient returns a bad // service update. func TestXDSResolverBadServiceUpdate(t *testing.T) { xdsC := fakeclient.NewClient() xdsR, tcc, cancel := testSetup(t, setupOpts{ config: &validConfig, xdsClientFunc: func(_ xdsclient.Options) (xdsClientInterface, error) { return xdsC, nil }, }) defer func() { cancel() xdsR.Close() }() waitForWatchService(t, xdsC, targetStr) // Invoke the watchAPI callback with a bad service update and wait for the // ReportError method to be called on the ClientConn. suErr := errors.New("bad serviceupdate") xdsC.InvokeWatchServiceCallback("", suErr) if gotErrVal, gotErr := tcc.errorCh.Receive(); gotErr != nil || gotErrVal != suErr { t.Fatalf("ClientConn.ReportError() received %v, want %v", gotErrVal, suErr) } } // TestXDSResolverGoodServiceUpdate tests the happy case where the resolver // gets a good service update from the xdsClient. func TestXDSResolverGoodServiceUpdate(t *testing.T) { xdsC := fakeclient.NewClient() xdsR, tcc, cancel := testSetup(t, setupOpts{ config: &validConfig, xdsClientFunc: func(_ xdsclient.Options) (xdsClientInterface, error) { return xdsC, nil }, }) defer func() { cancel() xdsR.Close() }() waitForWatchService(t, xdsC, targetStr) // Invoke the watchAPI callback with a good service update and wait for the // UpdateState method to be called on the ClientConn. xdsC.InvokeWatchServiceCallback(cluster, nil) gotState, err := tcc.stateCh.Receive() if err != nil { t.Fatalf("ClientConn.UpdateState returned error: %v", err) } rState := gotState.(resolver.State) if gotClient := rState.Attributes.Value(xdsinternal.XDSClientID); gotClient != xdsC { t.Fatalf("ClientConn.UpdateState got xdsClient: %v, want %v", gotClient, xdsC) } if err := rState.ServiceConfig.Err; err != nil { t.Fatalf("ClientConn.UpdateState received error in service config: %v", rState.ServiceConfig.Err) } } // TestXDSResolverUpdates tests the cases where the resolver gets a good update // after an error, and an error after the good update. func TestXDSResolverGoodUpdateAfterError(t *testing.T) { xdsC := fakeclient.NewClient() xdsR, tcc, cancel := testSetup(t, setupOpts{ config: &validConfig, xdsClientFunc: func(_ xdsclient.Options) (xdsClientInterface, error) { return xdsC, nil }, }) defer func() { cancel() xdsR.Close() }() waitForWatchService(t, xdsC, targetStr) // Invoke the watchAPI callback with a bad service update and wait for the // ReportError method to be called on the ClientConn. suErr := errors.New("bad serviceupdate") xdsC.InvokeWatchServiceCallback("", suErr) if gotErrVal, gotErr := tcc.errorCh.Receive(); gotErr != nil || gotErrVal != suErr { t.Fatalf("ClientConn.ReportError() received %v, want %v", gotErrVal, suErr) } // Invoke the watchAPI callback with a good service update and wait for the // UpdateState method to be called on the ClientConn. xdsC.InvokeWatchServiceCallback(cluster, nil) gotState, err := tcc.stateCh.Receive() if err != nil { t.Fatalf("ClientConn.UpdateState returned error: %v", err) } rState := gotState.(resolver.State) if gotClient := rState.Attributes.Value(xdsinternal.XDSClientID); gotClient != xdsC { t.Fatalf("ClientConn.UpdateState got xdsClient: %v, want %v", gotClient, xdsC) } if err := rState.ServiceConfig.Err; err != nil { t.Fatalf("ClientConn.UpdateState received error in service config: %v", rState.ServiceConfig.Err) } // Invoke the watchAPI callback with a bad service update and wait for the // ReportError method to be called on the ClientConn. suErr2 := errors.New("bad serviceupdate 2") xdsC.InvokeWatchServiceCallback("", suErr2) if gotErrVal, gotErr := tcc.errorCh.Receive(); gotErr != nil || gotErrVal != suErr2 { t.Fatalf("ClientConn.ReportError() received %v, want %v", gotErrVal, suErr2) } } grpc-go-1.29.1/xds/internal/testutils/000077500000000000000000000000001365033716300176235ustar00rootroot00000000000000grpc-go-1.29.1/xds/internal/testutils/balancer.go000066400000000000000000000235661365033716300217350ustar00rootroot00000000000000/* * * Copyright 2020 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package testutils import ( "context" "fmt" "sync" "testing" envoy_api_v2_core "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" "google.golang.org/grpc" "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/internal/wrr" "google.golang.org/grpc/resolver" "google.golang.org/grpc/xds/internal" ) const testSubConnsCount = 16 // TestSubConns contains a list of SubConns to be used in tests. var TestSubConns []*TestSubConn func init() { for i := 0; i < testSubConnsCount; i++ { TestSubConns = append(TestSubConns, &TestSubConn{ id: fmt.Sprintf("sc%d", i), }) } } // TestSubConn implements the SubConn interface, to be used in tests. type TestSubConn struct { id string } // UpdateAddresses panics. func (tsc *TestSubConn) UpdateAddresses([]resolver.Address) { panic("not implemented") } // Connect is a no-op. func (tsc *TestSubConn) Connect() {} // String implements stringer to print human friendly error message. func (tsc *TestSubConn) String() string { return tsc.id } // TestClientConn is a mock balancer.ClientConn used in tests. type TestClientConn struct { t *testing.T // For logging only. NewSubConnAddrsCh chan []resolver.Address // the last 10 []Address to create subconn. NewSubConnCh chan balancer.SubConn // the last 10 subconn created. RemoveSubConnCh chan balancer.SubConn // the last 10 subconn removed. NewPickerCh chan balancer.V2Picker // the last picker updated. NewStateCh chan connectivity.State // the last state. subConnIdx int } // NewTestClientConn creates a TestClientConn. func NewTestClientConn(t *testing.T) *TestClientConn { return &TestClientConn{ t: t, NewSubConnAddrsCh: make(chan []resolver.Address, 10), NewSubConnCh: make(chan balancer.SubConn, 10), RemoveSubConnCh: make(chan balancer.SubConn, 10), NewPickerCh: make(chan balancer.V2Picker, 1), NewStateCh: make(chan connectivity.State, 1), } } // NewSubConn creates a new SubConn. func (tcc *TestClientConn) NewSubConn(a []resolver.Address, o balancer.NewSubConnOptions) (balancer.SubConn, error) { sc := TestSubConns[tcc.subConnIdx] tcc.subConnIdx++ tcc.t.Logf("testClientConn: NewSubConn(%v, %+v) => %s", a, o, sc) select { case tcc.NewSubConnAddrsCh <- a: default: } select { case tcc.NewSubConnCh <- sc: default: } return sc, nil } // RemoveSubConn removes the SubConn. func (tcc *TestClientConn) RemoveSubConn(sc balancer.SubConn) { tcc.t.Logf("testClientCOnn: RemoveSubConn(%p)", sc) select { case tcc.RemoveSubConnCh <- sc: default: } } // UpdateBalancerState implements balancer.Balancer API. It will be removed when // switching to the new balancer interface. func (tcc *TestClientConn) UpdateBalancerState(s connectivity.State, p balancer.Picker) { tcc.t.Fatal("not implemented") } // UpdateState updates connectivity state and picker. func (tcc *TestClientConn) UpdateState(bs balancer.State) { tcc.t.Logf("testClientConn: UpdateState(%v)", bs) select { case <-tcc.NewStateCh: default: } tcc.NewStateCh <- bs.ConnectivityState select { case <-tcc.NewPickerCh: default: } tcc.NewPickerCh <- bs.Picker } // ResolveNow panics. func (tcc *TestClientConn) ResolveNow(resolver.ResolveNowOptions) { panic("not implemented") } // Target panics. func (tcc *TestClientConn) Target() string { panic("not implemented") } // TestServerLoad is testing Load for testing LRS. type TestServerLoad struct { Name string D float64 } // TestLoadStore is a load store to be used in tests. type TestLoadStore struct { CallsStarted []internal.Locality CallsEnded []internal.Locality CallsCost []TestServerLoad } // NewTestLoadStore creates a new TestLoadStore. func NewTestLoadStore() *TestLoadStore { return &TestLoadStore{} } // CallDropped records a call dropped. func (*TestLoadStore) CallDropped(category string) { panic("not implemented") } // CallStarted records a call started. func (tls *TestLoadStore) CallStarted(l internal.Locality) { tls.CallsStarted = append(tls.CallsStarted, l) } // CallFinished records a call finished. func (tls *TestLoadStore) CallFinished(l internal.Locality, err error) { tls.CallsEnded = append(tls.CallsEnded, l) } // CallServerLoad records a call server load. func (tls *TestLoadStore) CallServerLoad(l internal.Locality, name string, d float64) { tls.CallsCost = append(tls.CallsCost, TestServerLoad{Name: name, D: d}) } // ReportTo panics. func (*TestLoadStore) ReportTo(ctx context.Context, cc *grpc.ClientConn, clusterName string, node *envoy_api_v2_core.Node) { panic("not implemented") } // IsRoundRobin checks whether f's return value is roundrobin of elements from // want. But it doesn't check for the order. Note that want can contain // duplicate items, which makes it weight-round-robin. // // Step 1. the return values of f should form a permutation of all elements in // want, but not necessary in the same order. E.g. if want is {a,a,b}, the check // fails if f returns: // - {a,a,a}: third a is returned before b // - {a,b,b}: second b is returned before the second a // // If error is found in this step, the returned error contains only the first // iteration until where it goes wrong. // // Step 2. the return values of f should be repetitions of the same permutation. // E.g. if want is {a,a,b}, the check failes if f returns: // - {a,b,a,b,a,a}: though it satisfies step 1, the second iteration is not // repeating the first iteration. // // If error is found in this step, the returned error contains the first // iteration + the second iteration until where it goes wrong. func IsRoundRobin(want []balancer.SubConn, f func() balancer.SubConn) error { wantSet := make(map[balancer.SubConn]int) // SubConn -> count, for weighted RR. for _, sc := range want { wantSet[sc]++ } // The first iteration: makes sure f's return values form a permutation of // elements in want. // // Also keep the returns values in a slice, so we can compare the order in // the second iteration. gotSliceFirstIteration := make([]balancer.SubConn, 0, len(want)) for range want { got := f() gotSliceFirstIteration = append(gotSliceFirstIteration, got) wantSet[got]-- if wantSet[got] < 0 { return fmt.Errorf("non-roundrobin want: %v, result: %v", want, gotSliceFirstIteration) } } // The second iteration should repeat the first iteration. var gotSliceSecondIteration []balancer.SubConn for i := 0; i < 2; i++ { for _, w := range gotSliceFirstIteration { g := f() gotSliceSecondIteration = append(gotSliceSecondIteration, g) if w != g { return fmt.Errorf("non-roundrobin, first iter: %v, second iter: %v", gotSliceFirstIteration, gotSliceSecondIteration) } } } return nil } // testClosure is a test util for TestIsRoundRobin. type testClosure struct { r []balancer.SubConn i int } func (tc *testClosure) next() balancer.SubConn { ret := tc.r[tc.i] tc.i = (tc.i + 1) % len(tc.r) return ret } func init() { balancer.Register(&TestConstBalancerBuilder{}) } // ErrTestConstPicker is error returned by test const picker. var ErrTestConstPicker = fmt.Errorf("const picker error") // TestConstBalancerBuilder is a balancer builder for tests. type TestConstBalancerBuilder struct{} // Build builds a test const balancer. func (*TestConstBalancerBuilder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { return &testConstBalancer{cc: cc} } // Name returns test-const-balancer name. func (*TestConstBalancerBuilder) Name() string { return "test-const-balancer" } type testConstBalancer struct { cc balancer.ClientConn } func (tb *testConstBalancer) HandleSubConnStateChange(sc balancer.SubConn, state connectivity.State) { tb.cc.UpdateState(balancer.State{ConnectivityState: connectivity.Ready, Picker: &TestConstPicker{Err: ErrTestConstPicker}}) } func (tb *testConstBalancer) HandleResolvedAddrs(a []resolver.Address, err error) { if len(a) == 0 { return } tb.cc.NewSubConn(a, balancer.NewSubConnOptions{}) } func (*testConstBalancer) Close() { } // TestConstPicker is a const picker for tests. type TestConstPicker struct { Err error SC balancer.SubConn } // Pick returns the const SubConn or the error. func (tcp *TestConstPicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { if tcp.Err != nil { return balancer.PickResult{}, tcp.Err } return balancer.PickResult{SubConn: tcp.SC}, nil } // testWRR is a deterministic WRR implementation. // // The real implementation does random WRR. testWRR makes the balancer behavior // deterministic and easier to test. // // With {a: 2, b: 3}, the Next() results will be {a, a, b, b, b}. type testWRR struct { itemsWithWeight []struct { item interface{} weight int64 } length int mu sync.Mutex idx int // The index of the item that will be picked count int64 // The number of times the current item has been picked. } // NewTestWRR return a WRR for testing. It's deterministic instead random. func NewTestWRR() wrr.WRR { return &testWRR{} } func (twrr *testWRR) Add(item interface{}, weight int64) { twrr.itemsWithWeight = append(twrr.itemsWithWeight, struct { item interface{} weight int64 }{item: item, weight: weight}) twrr.length++ } func (twrr *testWRR) Next() interface{} { twrr.mu.Lock() iww := twrr.itemsWithWeight[twrr.idx] twrr.count++ if twrr.count >= iww.weight { twrr.idx = (twrr.idx + 1) % twrr.length twrr.count = 0 } twrr.mu.Unlock() return iww.item } grpc-go-1.29.1/xds/internal/testutils/balancer_test.go000066400000000000000000000067741365033716300227760ustar00rootroot00000000000000/* * * Copyright 2020 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package testutils import ( "testing" "google.golang.org/grpc/balancer" ) func TestIsRoundRobin(t *testing.T) { var ( sc1 = TestSubConns[0] sc2 = TestSubConns[1] sc3 = TestSubConns[2] ) testCases := []struct { desc string want []balancer.SubConn got []balancer.SubConn pass bool }{ { desc: "0 element", want: []balancer.SubConn{}, got: []balancer.SubConn{}, pass: true, }, { desc: "1 element RR", want: []balancer.SubConn{sc1}, got: []balancer.SubConn{sc1, sc1, sc1, sc1}, pass: true, }, { desc: "1 element not RR", want: []balancer.SubConn{sc1}, got: []balancer.SubConn{sc1, sc2, sc1}, pass: false, }, { desc: "2 elements RR", want: []balancer.SubConn{sc1, sc2}, got: []balancer.SubConn{sc1, sc2, sc1, sc2, sc1, sc2}, pass: true, }, { desc: "2 elements RR different order from want", want: []balancer.SubConn{sc2, sc1}, got: []balancer.SubConn{sc1, sc2, sc1, sc2, sc1, sc2}, pass: true, }, { desc: "2 elements RR not RR, mistake in first iter", want: []balancer.SubConn{sc1, sc2}, got: []balancer.SubConn{sc1, sc1, sc1, sc2, sc1, sc2}, pass: false, }, { desc: "2 elements RR not RR, mistake in second iter", want: []balancer.SubConn{sc1, sc2}, got: []balancer.SubConn{sc1, sc2, sc1, sc1, sc1, sc2}, pass: false, }, { desc: "2 elements weighted RR", want: []balancer.SubConn{sc1, sc1, sc2}, got: []balancer.SubConn{sc1, sc1, sc2, sc1, sc1, sc2}, pass: true, }, { desc: "2 elements weighted RR different order", want: []balancer.SubConn{sc1, sc1, sc2}, got: []balancer.SubConn{sc1, sc2, sc1, sc1, sc2, sc1}, pass: true, }, { desc: "3 elements RR", want: []balancer.SubConn{sc1, sc2, sc3}, got: []balancer.SubConn{sc1, sc2, sc3, sc1, sc2, sc3, sc1, sc2, sc3}, pass: true, }, { desc: "3 elements RR different order", want: []balancer.SubConn{sc1, sc2, sc3}, got: []balancer.SubConn{sc3, sc2, sc1, sc3, sc2, sc1}, pass: true, }, { desc: "3 elements weighted RR", want: []balancer.SubConn{sc1, sc1, sc1, sc2, sc2, sc3}, got: []balancer.SubConn{sc1, sc2, sc3, sc1, sc2, sc1, sc1, sc2, sc3, sc1, sc2, sc1}, pass: true, }, { desc: "3 elements weighted RR not RR, mistake in first iter", want: []balancer.SubConn{sc1, sc1, sc1, sc2, sc2, sc3}, got: []balancer.SubConn{sc1, sc2, sc1, sc1, sc2, sc1, sc1, sc2, sc3, sc1, sc2, sc1}, pass: false, }, { desc: "3 elements weighted RR not RR, mistake in second iter", want: []balancer.SubConn{sc1, sc1, sc1, sc2, sc2, sc3}, got: []balancer.SubConn{sc1, sc2, sc3, sc1, sc2, sc1, sc1, sc1, sc3, sc1, sc2, sc1}, pass: false, }, } for _, tC := range testCases { t.Run(tC.desc, func(t *testing.T) { err := IsRoundRobin(tC.want, (&testClosure{r: tC.got}).next) if err == nil != tC.pass { t.Errorf("want pass %v, want %v, got err %v", tC.pass, tC.want, err) } }) } } grpc-go-1.29.1/xds/internal/testutils/channel.go000066400000000000000000000043511365033716300215650ustar00rootroot00000000000000/* * * Copyright 2019 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // Package testutils provides utility types, for use in xds tests. package testutils import ( "errors" "time" ) // ErrRecvTimeout is an error to indicate that a receive operation on the // channel timed out. var ErrRecvTimeout = errors.New("timed out when waiting for value on channel") const ( // DefaultChanRecvTimeout is the default timeout for receive operations on the // underlying channel. DefaultChanRecvTimeout = 1 * time.Second // DefaultChanBufferSize is the default buffer size of the underlying channel. DefaultChanBufferSize = 1 ) // Channel wraps a generic channel and provides a timed receive operation. type Channel struct { ch chan interface{} } // Send sends value on the underlying channel. func (cwt *Channel) Send(value interface{}) { cwt.ch <- value } // TimedReceive returns the value received on the underlying channel, or // ErrRecvTimeout if timeout amount of time elapsed. func (cwt *Channel) TimedReceive(timeout time.Duration) (interface{}, error) { timer := time.NewTimer(timeout) select { case <-timer.C: return nil, ErrRecvTimeout case got := <-cwt.ch: timer.Stop() return got, nil } } // Receive returns the value received on the underlying channel, or // ErrRecvTimeout if DefaultChanRecvTimeout amount of time elapses. func (cwt *Channel) Receive() (interface{}, error) { return cwt.TimedReceive(DefaultChanRecvTimeout) } // NewChannel returns a new Channel. func NewChannel() *Channel { return NewChannelWithSize(DefaultChanBufferSize) } // NewChannelWithSize returns a new Channel with a buffer of bufSize. func NewChannelWithSize(bufSize int) *Channel { return &Channel{ch: make(chan interface{}, bufSize)} } grpc-go-1.29.1/xds/internal/testutils/fakeclient/000077500000000000000000000000001365033716300217305ustar00rootroot00000000000000grpc-go-1.29.1/xds/internal/testutils/fakeclient/client.go000066400000000000000000000142151365033716300235400ustar00rootroot00000000000000/* * * Copyright 2019 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Package fakeclient provides a fake implementation of an xDS client. package fakeclient import ( "sync" "google.golang.org/grpc/xds/internal/balancer/lrs" xdsclient "google.golang.org/grpc/xds/internal/client" "google.golang.org/grpc/xds/internal/testutils" ) // Client is a fake implementation of an xds client. It exposes a bunch of // channels to signal the occurrence of various events. type Client struct { name string suWatchCh *testutils.Channel cdsWatchCh *testutils.Channel edsWatchCh *testutils.Channel suCancelCh *testutils.Channel cdsCancelCh *testutils.Channel edsCancelCh *testutils.Channel loadReportCh *testutils.Channel closeCh *testutils.Channel mu sync.Mutex serviceCb func(xdsclient.ServiceUpdate, error) cdsCb func(xdsclient.CDSUpdate, error) edsCb func(*xdsclient.EDSUpdate, error) } // WatchService registers a LDS/RDS watch. func (xdsC *Client) WatchService(target string, callback func(xdsclient.ServiceUpdate, error)) func() { xdsC.mu.Lock() defer xdsC.mu.Unlock() xdsC.serviceCb = callback xdsC.suWatchCh.Send(target) return func() { xdsC.suCancelCh.Send(nil) } } // WaitForWatchService waits for WatchService to be invoked on this client // within a reasonable timeout, and returns the serviceName being watched. func (xdsC *Client) WaitForWatchService() (string, error) { val, err := xdsC.suWatchCh.Receive() if err != nil { return "", err } return val.(string), err } // InvokeWatchServiceCallback invokes the registered service watch callback. func (xdsC *Client) InvokeWatchServiceCallback(cluster string, err error) { xdsC.mu.Lock() defer xdsC.mu.Unlock() xdsC.serviceCb(xdsclient.ServiceUpdate{Cluster: cluster}, err) } // WatchCluster registers a CDS watch. func (xdsC *Client) WatchCluster(clusterName string, callback func(xdsclient.CDSUpdate, error)) func() { xdsC.mu.Lock() defer xdsC.mu.Unlock() xdsC.cdsCb = callback xdsC.cdsWatchCh.Send(clusterName) return func() { xdsC.cdsCancelCh.Send(nil) } } // WaitForWatchCluster waits for WatchCluster to be invoked on this client // within a reasonable timeout, and returns the clusterName being watched. func (xdsC *Client) WaitForWatchCluster() (string, error) { val, err := xdsC.cdsWatchCh.Receive() if err != nil { return "", err } return val.(string), err } // InvokeWatchClusterCallback invokes the registered cdsWatch callback. func (xdsC *Client) InvokeWatchClusterCallback(update xdsclient.CDSUpdate, err error) { xdsC.mu.Lock() defer xdsC.mu.Unlock() xdsC.cdsCb(update, err) } // WaitForCancelClusterWatch waits for a CDS watch to be cancelled within a // reasonable timeout, and returns testutils.ErrRecvTimeout otherwise. func (xdsC *Client) WaitForCancelClusterWatch() error { _, err := xdsC.cdsCancelCh.Receive() return err } // WatchEndpoints registers an EDS watch for provided clusterName. func (xdsC *Client) WatchEndpoints(clusterName string, callback func(*xdsclient.EDSUpdate, error)) (cancel func()) { xdsC.mu.Lock() defer xdsC.mu.Unlock() xdsC.edsCb = callback xdsC.edsWatchCh.Send(clusterName) return func() { xdsC.edsCancelCh.Send(nil) } } // WaitForWatchEDS waits for WatchEndpoints to be invoked on this client within a // reasonable timeout, and returns the clusterName being watched. func (xdsC *Client) WaitForWatchEDS() (string, error) { val, err := xdsC.edsWatchCh.Receive() if err != nil { return "", err } return val.(string), err } // InvokeWatchEDSCallback invokes the registered edsWatch callback. func (xdsC *Client) InvokeWatchEDSCallback(update *xdsclient.EDSUpdate, err error) { xdsC.mu.Lock() defer xdsC.mu.Unlock() xdsC.edsCb(update, err) } // ReportLoadArgs wraps the arguments passed to ReportLoad. type ReportLoadArgs struct { // Server is the name of the server to which the load is reported. Server string // Cluster is the name of the cluster for which load is reported. Cluster string } // ReportLoad starts reporting load about clusterName to server. func (xdsC *Client) ReportLoad(server string, clusterName string, loadStore lrs.Store) (cancel func()) { xdsC.loadReportCh.Send(ReportLoadArgs{Server: server, Cluster: clusterName}) return func() {} } // WaitForReportLoad waits for ReportLoad to be invoked on this client within a // reasonable timeout, and returns the arguments passed to it. func (xdsC *Client) WaitForReportLoad() (ReportLoadArgs, error) { val, err := xdsC.loadReportCh.Receive() return val.(ReportLoadArgs), err } // Close closes the xds client. func (xdsC *Client) Close() { xdsC.closeCh.Send(nil) } // WaitForClose waits for Close to be invoked on this client within a // reasonable timeout, and returns testutils.ErrRecvTimeout otherwise. func (xdsC *Client) WaitForClose() error { _, err := xdsC.closeCh.Receive() return err } // Name returns the name of the xds client. func (xdsC *Client) Name() string { return xdsC.name } // NewClient returns a new fake xds client. func NewClient() *Client { return NewClientWithName("") } // NewClientWithName returns a new fake xds client with the provided name. This // is used in cases where multiple clients are created in the tests and we need // to make sure the client is created for the expected balancer name. func NewClientWithName(name string) *Client { return &Client{ name: name, suWatchCh: testutils.NewChannel(), cdsWatchCh: testutils.NewChannel(), edsWatchCh: testutils.NewChannel(), suCancelCh: testutils.NewChannel(), cdsCancelCh: testutils.NewChannel(), edsCancelCh: testutils.NewChannel(), loadReportCh: testutils.NewChannel(), closeCh: testutils.NewChannel(), } } grpc-go-1.29.1/xds/internal/testutils/fakeserver/000077500000000000000000000000001365033716300217605ustar00rootroot00000000000000grpc-go-1.29.1/xds/internal/testutils/fakeserver/server.go000066400000000000000000000135441365033716300236240ustar00rootroot00000000000000/* * * Copyright 2019 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Package fakeserver provides a fake implementation of an xDS server. package fakeserver import ( "context" "fmt" "io" "net" "time" "github.com/golang/protobuf/proto" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" "google.golang.org/grpc/xds/internal/testutils" discoverypb "github.com/envoyproxy/go-control-plane/envoy/api/v2" adsgrpc "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v2" lrsgrpc "github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v2" lrspb "github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v2" ) const ( // TODO: Make this a var or a field in the server if there is a need to use a // value other than this default. defaultChannelBufferSize = 50 defaultDialTimeout = 5 * time.Second ) // Request wraps the request protobuf (xds/LRS) and error received by the // Server in a call to stream.Recv(). type Request struct { Req proto.Message Err error } // Response wraps the response protobuf (xds/LRS) and error that the Server // should send out to the client through a call to stream.Send() type Response struct { Resp proto.Message Err error } // Server is a fake implementation of xDS and LRS protocols. It listens on the // same port for both services and exposes a bunch of channels to send/receive // messages. type Server struct { // XDSRequestChan is a channel on which received xDS requests are made // available to the users of this Server. XDSRequestChan *testutils.Channel // XDSResponseChan is a channel on which the Server accepts xDS responses // to be sent to the client. XDSResponseChan chan *Response // LRSRequestChan is a channel on which received LRS requests are made // available to the users of this Server. LRSRequestChan *testutils.Channel // LRSResponseChan is a channel on which the Server accepts the LRS // response to be sent to the client. LRSResponseChan chan *Response // Address is the host:port on which the Server is listening for requests. Address string // The underlying fake implementation of xDS and LRS. xdsS *xdsServer lrsS *lrsServer } // StartServer makes a new Server and gets it to start listening on a local // port for gRPC requests. The returned cancel function should be invoked by // the caller upon completion of the test. func StartServer() (*Server, func(), error) { lis, err := net.Listen("tcp", "localhost:0") if err != nil { return nil, func() {}, fmt.Errorf("net.Listen() failed: %v", err) } s := &Server{ XDSRequestChan: testutils.NewChannelWithSize(defaultChannelBufferSize), LRSRequestChan: testutils.NewChannelWithSize(defaultChannelBufferSize), XDSResponseChan: make(chan *Response, defaultChannelBufferSize), LRSResponseChan: make(chan *Response, 1), // The server only ever sends one response. Address: lis.Addr().String(), } s.xdsS = &xdsServer{reqChan: s.XDSRequestChan, respChan: s.XDSResponseChan} s.lrsS = &lrsServer{reqChan: s.LRSRequestChan, respChan: s.LRSResponseChan} server := grpc.NewServer() lrsgrpc.RegisterLoadReportingServiceServer(server, s.lrsS) adsgrpc.RegisterAggregatedDiscoveryServiceServer(server, s.xdsS) go server.Serve(lis) return s, func() { server.Stop() }, nil } // XDSClientConn returns a grpc.ClientConn connected to the fakeServer. func (xdsS *Server) XDSClientConn() (*grpc.ClientConn, func(), error) { ctx, cancel := context.WithTimeout(context.Background(), defaultDialTimeout) defer cancel() cc, err := grpc.DialContext(ctx, xdsS.Address, grpc.WithInsecure(), grpc.WithBlock()) if err != nil { return nil, nil, fmt.Errorf("grpc.DialContext(%s) failed: %v", xdsS.Address, err) } return cc, func() { cc.Close() }, nil } type xdsServer struct { reqChan *testutils.Channel respChan chan *Response } func (xdsS *xdsServer) StreamAggregatedResources(s adsgrpc.AggregatedDiscoveryService_StreamAggregatedResourcesServer) error { errCh := make(chan error, 2) go func() { for { req, err := s.Recv() if err != nil { errCh <- err return } xdsS.reqChan.Send(&Request{req, err}) } }() go func() { var retErr error defer func() { errCh <- retErr }() for { select { case r := <-xdsS.respChan: if r.Err != nil { retErr = r.Err return } if err := s.Send(r.Resp.(*discoverypb.DiscoveryResponse)); err != nil { retErr = err return } case <-s.Context().Done(): retErr = s.Context().Err() return } } }() if err := <-errCh; err != nil { return err } return nil } func (xdsS *xdsServer) DeltaAggregatedResources(adsgrpc.AggregatedDiscoveryService_DeltaAggregatedResourcesServer) error { return status.Error(codes.Unimplemented, "") } type lrsServer struct { reqChan *testutils.Channel respChan chan *Response } func (lrsS *lrsServer) StreamLoadStats(s lrsgrpc.LoadReportingService_StreamLoadStatsServer) error { req, err := s.Recv() if err != nil { return err } lrsS.reqChan.Send(&Request{req, err}) select { case r := <-lrsS.respChan: if r.Err != nil { return r.Err } if err := s.Send(r.Resp.(*lrspb.LoadStatsResponse)); err != nil { return err } case <-s.Context().Done(): return s.Context().Err() } for { req, err := s.Recv() if err != nil { if err == io.EOF { return nil } return err } lrsS.reqChan.Send(&Request{req, err}) } }