From cd7199288a8464df326ca39e72b9a9876ee7f3a6 Mon Sep 17 00:00:00 2001 From: bryanl Date: Sun, 29 Mar 2020 20:11:17 -0400 Subject: [PATCH] Create objects using store This change does the following: * Add Create function to store * Add Create support to plugin api Signed-off-by: bryanl --- changelogs/unreleased/802-bryanl | 1 + go.mod | 5 +- go.sum | 12 +- internal/objectstore/dynamic_cache.go | 46 +++ pkg/plugin/api/api_test.go | 14 + pkg/plugin/api/client.go | 18 ++ pkg/plugin/api/fake/mock_dash_service.go | 14 + pkg/plugin/api/fake/mock_dashboard_client.go | 20 ++ pkg/plugin/api/proto/dashboard.pb.go | 224 ++++++++++--- pkg/plugin/api/proto/dashboard.proto | 9 + pkg/plugin/api/proto/generate.sh | 7 +- pkg/plugin/api/server.go | 24 ++ pkg/plugin/dashboard/dashboard.pb.go | 49 ++- pkg/plugin/dashboard/generate.sh | 6 +- pkg/store/fake/mock_store.go | 14 + pkg/store/store.go | 1 + .../golang/protobuf/proto/properties.go | 5 +- .../protobuf/protoc-gen-go/grpc/grpc.go | 61 ++++ .../googleapis/rpc/status/status.pb.go | 26 +- vendor/google.golang.org/grpc/.travis.yml | 27 +- .../google.golang.org/grpc/CODE-OF-CONDUCT.md | 3 + vendor/google.golang.org/grpc/CONTRIBUTING.md | 4 +- vendor/google.golang.org/grpc/GOVERNANCE.md | 1 + vendor/google.golang.org/grpc/MAINTAINERS.md | 27 ++ vendor/google.golang.org/grpc/Makefile | 3 + .../grpc/attributes/attributes.go | 70 ++++ vendor/google.golang.org/grpc/backoff.go | 20 ++ .../google.golang.org/grpc/backoff/backoff.go | 52 +++ .../grpc/balancer/balancer.go | 120 ++++++- .../grpc/balancer/base/balancer.go | 115 +++++-- .../grpc/balancer/base/base.go | 29 ++ .../grpc/balancer/roundrobin/roundrobin.go | 18 +- .../grpc/balancer_conn_wrappers.go | 163 ++++------ .../grpc/balancer_v1_wrapper.go | 34 +- vendor/google.golang.org/grpc/clientconn.go | 303 ++++++++++++------ .../grpc/credentials/credentials.go | 275 ++++++---------- .../grpc/credentials/{tls13.go => go12.go} | 0 .../google.golang.org/grpc/credentials/tls.go | 225 +++++++++++++ vendor/google.golang.org/grpc/dialoptions.go | 82 +++-- .../grpc/encoding/encoding.go | 4 + vendor/google.golang.org/grpc/go.mod | 15 +- vendor/google.golang.org/grpc/go.sum | 36 ++- .../google.golang.org/grpc/grpclog/grpclog.go | 2 +- .../google.golang.org/grpc/health/client.go | 42 +-- .../grpc/health/grpc_health_v1/health.pb.go | 96 +++--- .../google.golang.org/grpc/health/server.go | 10 +- .../grpc/internal/backoff/backoff.go | 27 +- .../grpc/internal/binarylog/binarylog.go | 12 +- .../grpc/internal/binarylog/env_config.go | 4 +- .../grpc/internal/binarylog/sink.go | 2 +- .../grpc/internal/buffer/unbounded.go | 85 +++++ .../grpc/internal/envconfig/envconfig.go | 7 +- .../grpc/internal/internal.go | 15 +- .../resolver/dns/dns_resolver.go | 214 ++++++------- .../grpc/internal/resolver/dns/go113.go | 33 ++ .../resolver/passthrough/passthrough.go | 4 +- .../grpc/internal/transport/controlbuf.go | 12 +- .../grpc/internal/transport/handler_server.go | 10 +- .../grpc/internal/transport/http2_client.go | 203 +++++++----- .../grpc/internal/transport/http2_server.go | 157 +++++---- .../grpc/internal/transport/http_util.go | 1 + .../grpc/internal/transport/transport.go | 70 ++-- .../google.golang.org/grpc/picker_wrapper.go | 172 ++++++---- vendor/google.golang.org/grpc/pickfirst.go | 89 +++-- .../grpc/resolver/resolver.go | 90 +++++- .../grpc/resolver_conn_wrapper.go | 175 +++++++--- vendor/google.golang.org/grpc/rpc_util.go | 56 +++- vendor/google.golang.org/grpc/server.go | 160 +++++---- .../google.golang.org/grpc/service_config.go | 39 ++- .../grpc/serviceconfig/serviceconfig.go | 21 +- vendor/google.golang.org/grpc/stats/stats.go | 11 + vendor/google.golang.org/grpc/stream.go | 2 +- vendor/google.golang.org/grpc/trace.go | 3 - vendor/google.golang.org/grpc/version.go | 2 +- vendor/google.golang.org/grpc/vet.sh | 87 +++-- vendor/modules.txt | 13 +- 76 files changed, 2836 insertions(+), 1272 deletions(-) create mode 100644 changelogs/unreleased/802-bryanl create mode 100644 vendor/google.golang.org/grpc/CODE-OF-CONDUCT.md create mode 100644 vendor/google.golang.org/grpc/GOVERNANCE.md create mode 100644 vendor/google.golang.org/grpc/MAINTAINERS.md create mode 100644 vendor/google.golang.org/grpc/attributes/attributes.go create mode 100644 vendor/google.golang.org/grpc/backoff/backoff.go rename vendor/google.golang.org/grpc/credentials/{tls13.go => go12.go} (100%) create mode 100644 vendor/google.golang.org/grpc/credentials/tls.go create mode 100644 vendor/google.golang.org/grpc/internal/buffer/unbounded.go rename vendor/google.golang.org/grpc/{ => internal}/resolver/dns/dns_resolver.go (72%) create mode 100644 vendor/google.golang.org/grpc/internal/resolver/dns/go113.go rename vendor/google.golang.org/grpc/{ => internal}/resolver/passthrough/passthrough.go (94%) diff --git a/changelogs/unreleased/802-bryanl b/changelogs/unreleased/802-bryanl new file mode 100644 index 0000000000..6d61db755b --- /dev/null +++ b/changelogs/unreleased/802-bryanl @@ -0,0 +1 @@ +Add support for creating objects diff --git a/go.mod b/go.mod index 809f0a97ce..aed80b8248 100644 --- a/go.mod +++ b/go.mod @@ -13,7 +13,7 @@ require ( github.com/fsnotify/fsnotify v1.4.7 github.com/gobwas/glob v0.2.3 github.com/golang/mock v1.3.1 - github.com/golang/protobuf v1.3.1 + github.com/golang/protobuf v1.3.2 github.com/google/uuid v1.1.1 github.com/googleapis/gnostic v0.2.0 github.com/gorilla/handlers v1.4.0 @@ -38,8 +38,7 @@ require ( go.opencensus.io v0.22.1 go.uber.org/zap v1.10.0 golang.org/x/sync v0.0.0-20190423024810-112230192c58 - google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873 // indirect - google.golang.org/grpc v1.23.0 + google.golang.org/grpc v1.27.0 gopkg.in/inf.v0 v0.9.1 // indirect k8s.io/api v0.0.0-20191016225839-816a9b7df678 k8s.io/apiextensions-apiserver v0.0.0-20181213153335-0fe22c71c476 diff --git a/go.sum b/go.sum index b64d84f7d6..15a9a6d4ad 100644 --- a/go.sum +++ b/go.sum @@ -29,6 +29,7 @@ github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= @@ -61,6 +62,8 @@ github.com/elazarl/goproxy/ext v0.0.0-20190703090003-6125c262ffb0 h1:ht1Fo9uxmem github.com/elazarl/goproxy/ext v0.0.0-20190703090003-6125c262ffb0/go.mod h1:gNh8nYJoAm43RfaxurUnxr+N1PwuFV3ZMl/efxlIlY8= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/evanphx/json-patch v0.0.0-20190203023257-5858425f7550/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.2.0+incompatible h1:fUDGZCv/7iAN7u0puUVhvKCcsR6vRfwrJatElLBEf0I= github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= @@ -102,6 +105,8 @@ github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+ github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20160524151835-7d79101e329e/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= @@ -235,6 +240,7 @@ github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDf github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= @@ -399,8 +405,8 @@ google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19 h1:Lj2SnHtxkRGJDqn google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb h1:i1Ppqkc3WQXikh8bXiwHqAN5Rv3/qDCcRk0/Otx73BY= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873 h1:nfPFGzJkUDX6uBmpN/pSw7MbOAWegH5QDQuoXFHedLg= -google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 h1:gSJIx1SDwno+2ElGhA4+qG2zF97qiUzTM+rQ0klBOcE= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0 h1:cfg4PD8YEdSFnm7qLV4++93WcmhH2nIUhMjhdCvl3j8= @@ -410,6 +416,8 @@ google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiq google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0 h1:AzbTB6ux+okLTzP8Ru1Xs41C303zdcfEht7MQnYJt5A= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.27.0 h1:rRYRFMVgRv6E0D70Skyfsr28tDXIuuPZyWGMPdMcnXg= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/internal/objectstore/dynamic_cache.go b/internal/objectstore/dynamic_cache.go index a0e89630d7..cc585009ed 100644 --- a/internal/objectstore/dynamic_cache.go +++ b/internal/objectstore/dynamic_cache.go @@ -573,3 +573,49 @@ func (dc *DynamicCache) Update(ctx context.Context, key store.Key, updater func( func (dc *DynamicCache) IsLoading(ctx context.Context, key store.Key) bool { return !dc.informerSynced.hasSynced(key) } + +// Create creates an object in the cluster. +// Note: test coverage of DynamicCache is slim. +func (dc *DynamicCache) Create(ctx context.Context, object *unstructured.Unstructured) error { + _, span := trace.StartSpan(ctx, "dynamicCache:create") + defer span.End() + + key, err := store.KeyFromObject(object) + if err != nil { + return fmt.Errorf("key from object: %w", err) + } + + if dc.isBackingOff(ctx, key) { + return nil + } + + if err := dc.access.HasAccess(ctx, key, "create"); err != nil { + if meta.IsNoMatchError(err) { + return nil + } + if !dc.isBackingOff(ctx, key) { + dc.backoff(ctx, key) + } + return fmt.Errorf("check access to create %s: %w", key, err) + } + + dynamicClient, err := dc.client.DynamicClient() + if err != nil { + return err + } + + gvr, err := dc.client.Resource(key.GroupVersionKind().GroupKind()) + if err != nil { + return err + } + + createOptions := metav1.CreateOptions{} + + if key.Namespace == "" { + _, err := dynamicClient.Resource(gvr).Create(object, createOptions) + return err + } + + _, err = dynamicClient.Resource(gvr).Namespace(key.Namespace).Create(object, createOptions) + return err +} diff --git a/pkg/plugin/api/api_test.go b/pkg/plugin/api/api_test.go index 1b27b49fba..9a6871d8f0 100644 --- a/pkg/plugin/api/api_test.go +++ b/pkg/plugin/api/api_test.go @@ -82,6 +82,20 @@ func TestAPI(t *testing.T) { assert.Equal(t, expected, got) }, }, + { + name: "create", + initFunc: func(t *testing.T, mocks *apiMocks) { + mocks.objectStore.EXPECT(). + Create(gomock.Any(), gomock.Eq(object)).Return(nil) + }, + doFunc: func(t *testing.T, client *api.Client) { + clientCtx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + defer cancel() + + err := client.Create(clientCtx, object) + require.NoError(t, err) + }, + }, { name: "get", initFunc: func(t *testing.T, mocks *apiMocks) { diff --git a/pkg/plugin/api/client.go b/pkg/plugin/api/client.go index 787ba14e22..20e94643d9 100644 --- a/pkg/plugin/api/client.go +++ b/pkg/plugin/api/client.go @@ -138,6 +138,24 @@ func (c *Client) Update(ctx context.Context, object *unstructured.Unstructured) return err } +func (c *Client) Create(ctx context.Context, object *unstructured.Unstructured) error { + client := c.DashboardConnection.Client() + + data, err := convertFromObject(object) + if err != nil { + return err + } + + req := &proto.CreateRequest{ + Object: data, + } + + _, err = client.Create(ctx, req) + + return err + +} + // PortForward creates a port forward. func (c *Client) PortForward(ctx context.Context, req PortForwardRequest) (PortForwardResponse, error) { client := c.DashboardConnection.Client() diff --git a/pkg/plugin/api/fake/mock_dash_service.go b/pkg/plugin/api/fake/mock_dash_service.go index 25f7d12196..f9272494c1 100644 --- a/pkg/plugin/api/fake/mock_dash_service.go +++ b/pkg/plugin/api/fake/mock_dash_service.go @@ -48,6 +48,20 @@ func (mr *MockServiceMockRecorder) CancelPortForward(arg0, arg1 interface{}) *go return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CancelPortForward", reflect.TypeOf((*MockService)(nil).CancelPortForward), arg0, arg1) } +// Create mocks base method +func (m *MockService) Create(arg0 context.Context, arg1 *unstructured.Unstructured) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Create", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// Create indicates an expected call of Create +func (mr *MockServiceMockRecorder) Create(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Create", reflect.TypeOf((*MockService)(nil).Create), arg0, arg1) +} + // ForceFrontendUpdate mocks base method func (m *MockService) ForceFrontendUpdate(arg0 context.Context) error { m.ctrl.T.Helper() diff --git a/pkg/plugin/api/fake/mock_dashboard_client.go b/pkg/plugin/api/fake/mock_dashboard_client.go index 7d93dd88f2..cdf3cd8484 100644 --- a/pkg/plugin/api/fake/mock_dashboard_client.go +++ b/pkg/plugin/api/fake/mock_dashboard_client.go @@ -55,6 +55,26 @@ func (mr *MockDashboardClientMockRecorder) CancelPortForward(arg0, arg1 interfac return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CancelPortForward", reflect.TypeOf((*MockDashboardClient)(nil).CancelPortForward), varargs...) } +// Create mocks base method +func (m *MockDashboardClient) Create(arg0 context.Context, arg1 *proto.CreateRequest, arg2 ...grpc.CallOption) (*proto.CreateResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "Create", varargs...) + ret0, _ := ret[0].(*proto.CreateResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Create indicates an expected call of Create +func (mr *MockDashboardClientMockRecorder) Create(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Create", reflect.TypeOf((*MockDashboardClient)(nil).Create), varargs...) +} + // ForceFrontendUpdate mocks base method func (m *MockDashboardClient) ForceFrontendUpdate(arg0 context.Context, arg1 *proto.Empty, arg2 ...grpc.CallOption) (*proto.Empty, error) { m.ctrl.T.Helper() diff --git a/pkg/plugin/api/proto/dashboard.pb.go b/pkg/plugin/api/proto/dashboard.pb.go index ff5798c649..a7a74f22bb 100644 --- a/pkg/plugin/api/proto/dashboard.pb.go +++ b/pkg/plugin/api/proto/dashboard.pb.go @@ -9,6 +9,8 @@ import ( proto "github.com/golang/protobuf/proto" wrappers "github.com/golang/protobuf/ptypes/wrappers" grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" math "math" ) @@ -273,6 +275,76 @@ func (m *UpdateResponse) XXX_DiscardUnknown() { var xxx_messageInfo_UpdateResponse proto.InternalMessageInfo +type CreateRequest struct { + Object []byte `protobuf:"bytes,1,opt,name=object,proto3" json:"object,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateRequest) Reset() { *m = CreateRequest{} } +func (m *CreateRequest) String() string { return proto.CompactTextString(m) } +func (*CreateRequest) ProtoMessage() {} +func (*CreateRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_9b97678da3a35dfb, []int{6} +} + +func (m *CreateRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateRequest.Unmarshal(m, b) +} +func (m *CreateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateRequest.Marshal(b, m, deterministic) +} +func (m *CreateRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateRequest.Merge(m, src) +} +func (m *CreateRequest) XXX_Size() int { + return xxx_messageInfo_CreateRequest.Size(m) +} +func (m *CreateRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CreateRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateRequest proto.InternalMessageInfo + +func (m *CreateRequest) GetObject() []byte { + if m != nil { + return m.Object + } + return nil +} + +type CreateResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateResponse) Reset() { *m = CreateResponse{} } +func (m *CreateResponse) String() string { return proto.CompactTextString(m) } +func (*CreateResponse) ProtoMessage() {} +func (*CreateResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_9b97678da3a35dfb, []int{7} +} + +func (m *CreateResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateResponse.Unmarshal(m, b) +} +func (m *CreateResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateResponse.Marshal(b, m, deterministic) +} +func (m *CreateResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateResponse.Merge(m, src) +} +func (m *CreateResponse) XXX_Size() int { + return xxx_messageInfo_CreateResponse.Size(m) +} +func (m *CreateResponse) XXX_DiscardUnknown() { + xxx_messageInfo_CreateResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateResponse proto.InternalMessageInfo + type PortForwardRequest struct { Namespace string `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"` PodName string `protobuf:"bytes,2,opt,name=podName,proto3" json:"podName,omitempty"` @@ -287,7 +359,7 @@ func (m *PortForwardRequest) Reset() { *m = PortForwardRequest{} } func (m *PortForwardRequest) String() string { return proto.CompactTextString(m) } func (*PortForwardRequest) ProtoMessage() {} func (*PortForwardRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_9b97678da3a35dfb, []int{6} + return fileDescriptor_9b97678da3a35dfb, []int{8} } func (m *PortForwardRequest) XXX_Unmarshal(b []byte) error { @@ -348,7 +420,7 @@ func (m *PortForwardResponse) Reset() { *m = PortForwardResponse{} } func (m *PortForwardResponse) String() string { return proto.CompactTextString(m) } func (*PortForwardResponse) ProtoMessage() {} func (*PortForwardResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_9b97678da3a35dfb, []int{7} + return fileDescriptor_9b97678da3a35dfb, []int{9} } func (m *PortForwardResponse) XXX_Unmarshal(b []byte) error { @@ -394,7 +466,7 @@ func (m *CancelPortForwardRequest) Reset() { *m = CancelPortForwardReque func (m *CancelPortForwardRequest) String() string { return proto.CompactTextString(m) } func (*CancelPortForwardRequest) ProtoMessage() {} func (*CancelPortForwardRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_9b97678da3a35dfb, []int{8} + return fileDescriptor_9b97678da3a35dfb, []int{10} } func (m *CancelPortForwardRequest) XXX_Unmarshal(b []byte) error { @@ -433,7 +505,7 @@ func (m *NamespacesResponse) Reset() { *m = NamespacesResponse{} } func (m *NamespacesResponse) String() string { return proto.CompactTextString(m) } func (*NamespacesResponse) ProtoMessage() {} func (*NamespacesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_9b97678da3a35dfb, []int{9} + return fileDescriptor_9b97678da3a35dfb, []int{11} } func (m *NamespacesResponse) XXX_Unmarshal(b []byte) error { @@ -468,57 +540,63 @@ func init() { proto.RegisterType((*GetResponse)(nil), "proto.GetResponse") proto.RegisterType((*UpdateRequest)(nil), "proto.UpdateRequest") proto.RegisterType((*UpdateResponse)(nil), "proto.UpdateResponse") + proto.RegisterType((*CreateRequest)(nil), "proto.CreateRequest") + proto.RegisterType((*CreateResponse)(nil), "proto.CreateResponse") proto.RegisterType((*PortForwardRequest)(nil), "proto.PortForwardRequest") proto.RegisterType((*PortForwardResponse)(nil), "proto.PortForwardResponse") proto.RegisterType((*CancelPortForwardRequest)(nil), "proto.CancelPortForwardRequest") proto.RegisterType((*NamespacesResponse)(nil), "proto.NamespacesResponse") } -func init() { proto.RegisterFile("dashboard.proto", fileDescriptor_9b97678da3a35dfb) } +func init() { + proto.RegisterFile("dashboard.proto", fileDescriptor_9b97678da3a35dfb) +} var fileDescriptor_9b97678da3a35dfb = []byte{ - // 510 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x93, 0xcd, 0x6e, 0xd3, 0x40, - 0x10, 0xc7, 0xe5, 0xe6, 0x4b, 0x99, 0x24, 0x85, 0x6e, 0x00, 0x19, 0x83, 0x42, 0x64, 0x15, 0x91, - 0x03, 0x72, 0x45, 0x81, 0x03, 0x37, 0x28, 0x21, 0x15, 0x02, 0x45, 0xc8, 0x88, 0x5e, 0x38, 0xad, - 0xed, 0xa1, 0x18, 0x1c, 0xef, 0xb2, 0xbb, 0x51, 0x95, 0xd7, 0xe0, 0xc4, 0x8b, 0xf0, 0x7e, 0x95, - 0x77, 0xd7, 0xb5, 0xdd, 0xa4, 0x52, 0x4f, 0xd9, 0xf9, 0xcf, 0x47, 0x66, 0x7e, 0x33, 0x86, 0x3b, - 0x09, 0x95, 0x3f, 0x23, 0x46, 0x45, 0x12, 0x70, 0xc1, 0x14, 0x23, 0x1d, 0xfd, 0xe3, 0x4d, 0xce, - 0x19, 0x3b, 0xcf, 0xf0, 0x48, 0x5b, 0xd1, 0xfa, 0xc7, 0xd1, 0x85, 0xa0, 0x9c, 0xa3, 0x90, 0x26, - 0xcc, 0xef, 0x41, 0xe7, 0xc3, 0x8a, 0xab, 0x8d, 0xff, 0xdf, 0x01, 0xf8, 0x84, 0x9b, 0x10, 0xff, - 0xac, 0x51, 0x2a, 0xf2, 0x18, 0xfa, 0x39, 0x5d, 0xa1, 0xe4, 0x34, 0x46, 0xd7, 0x99, 0x3a, 0xb3, - 0x7e, 0x58, 0x09, 0x64, 0x02, 0x40, 0x79, 0x7a, 0x86, 0x42, 0xa6, 0x2c, 0x77, 0xf7, 0xb4, 0xbb, - 0xa6, 0x10, 0x02, 0xed, 0xdf, 0x69, 0x9e, 0xb8, 0x2d, 0xed, 0xd1, 0xef, 0x42, 0x2b, 0x0a, 0xb8, - 0x6d, 0xa3, 0x15, 0x6f, 0xf2, 0x0e, 0x46, 0x19, 0x8d, 0x30, 0xfb, 0x8a, 0x19, 0xc6, 0x8a, 0x09, - 0xb7, 0x33, 0x75, 0x66, 0x83, 0xe3, 0x47, 0x81, 0xe9, 0x3a, 0x28, 0xbb, 0x0e, 0x4e, 0x36, 0x0a, - 0xe5, 0x19, 0xcd, 0xd6, 0x18, 0x36, 0x33, 0xfc, 0x19, 0x0c, 0x3f, 0xa7, 0x52, 0x85, 0x28, 0x39, - 0xcb, 0x25, 0x12, 0x17, 0x7a, 0x2c, 0xfa, 0x85, 0xb1, 0x92, 0xae, 0x33, 0x6d, 0xcd, 0x86, 0x61, - 0x69, 0xfa, 0x4f, 0x61, 0x70, 0x8a, 0x55, 0xe0, 0x03, 0xe8, 0x1a, 0x8f, 0x1e, 0x6f, 0x18, 0x5a, - 0xcb, 0x7f, 0x06, 0xa3, 0x6f, 0x3c, 0xa1, 0x0a, 0x4b, 0x14, 0x37, 0x05, 0xde, 0x85, 0xfd, 0x32, - 0xd0, 0x94, 0xf4, 0xff, 0x3a, 0x40, 0xbe, 0x30, 0xa1, 0x16, 0x4c, 0x5c, 0x50, 0x91, 0xdc, 0x8e, - 0xa5, 0x0b, 0x3d, 0xce, 0x92, 0x65, 0x81, 0xc6, 0x80, 0x2c, 0x4d, 0x72, 0x08, 0xa3, 0x98, 0xe5, - 0x8a, 0xa6, 0x39, 0x0a, 0xed, 0x37, 0x38, 0x9b, 0x62, 0xb1, 0x0b, 0xce, 0x84, 0x5a, 0xae, 0x57, - 0x11, 0x0a, 0x4d, 0x77, 0x14, 0xd6, 0x14, 0xff, 0x3b, 0x8c, 0x1b, 0x3d, 0xd9, 0xf1, 0x0f, 0x61, - 0xc4, 0x2b, 0xf9, 0xe3, 0xdc, 0x36, 0xd6, 0x14, 0xaf, 0x15, 0xdf, 0xdb, 0x2a, 0xfe, 0x16, 0xdc, - 0xf7, 0x34, 0x8f, 0x31, 0xdb, 0x31, 0xf6, 0xad, 0xfe, 0xc1, 0x7f, 0x05, 0x64, 0x59, 0xb2, 0x90, - 0x57, 0xdd, 0x4d, 0x00, 0xae, 0x08, 0x99, 0x45, 0xf6, 0xc3, 0x9a, 0x72, 0xfc, 0xaf, 0x05, 0xfd, - 0x79, 0x79, 0xf1, 0x24, 0x80, 0x76, 0x71, 0x03, 0xe4, 0xc0, 0x1c, 0x4c, 0x50, 0xdd, 0xb1, 0x37, - 0xb6, 0x52, 0xe3, 0x46, 0x9e, 0x43, 0xeb, 0x14, 0x77, 0x86, 0x13, 0x2b, 0xd5, 0x0f, 0xe5, 0x35, - 0x74, 0xcd, 0x9e, 0xc9, 0x3d, 0xeb, 0x6d, 0xdc, 0x87, 0x77, 0xff, 0x9a, 0x6a, 0xd3, 0xe6, 0x30, - 0xa8, 0x41, 0x21, 0x0f, 0x6d, 0xd4, 0x36, 0x28, 0xcf, 0xdb, 0xe5, 0xb2, 0x55, 0x4e, 0xe0, 0x60, - 0x0b, 0x30, 0x79, 0x62, 0x13, 0x6e, 0x42, 0xef, 0x0d, 0x6d, 0x80, 0xfe, 0xb4, 0xc9, 0x1b, 0xd8, - 0x2f, 0xc6, 0xaf, 0x30, 0x93, 0x86, 0xdf, 0x2b, 0x5b, 0xdb, 0xb1, 0x87, 0x17, 0x30, 0x5e, 0x30, - 0x11, 0xe3, 0x42, 0xb0, 0x5c, 0x61, 0x9e, 0x58, 0x10, 0xcd, 0xfc, 0x86, 0x15, 0x75, 0xb5, 0xf1, - 0xf2, 0x32, 0x00, 0x00, 0xff, 0xff, 0x15, 0xd4, 0x9d, 0x8f, 0x92, 0x04, 0x00, 0x00, + // 533 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x54, 0xdd, 0x6e, 0xd3, 0x30, + 0x14, 0x56, 0xd6, 0x3f, 0xf5, 0xb4, 0x1d, 0xcc, 0x05, 0x14, 0x02, 0x2a, 0x55, 0x34, 0x44, 0x2f, + 0x50, 0x26, 0x06, 0x5c, 0x70, 0x07, 0x5b, 0xe9, 0x84, 0x40, 0x15, 0x0a, 0x62, 0x37, 0x5c, 0x39, + 0xc9, 0x61, 0x14, 0xd2, 0xd8, 0xd8, 0xae, 0xa6, 0xbe, 0x06, 0xef, 0xc2, 0x73, 0xf0, 0x4a, 0x28, + 0xb1, 0xd3, 0xc4, 0x6b, 0x27, 0xf5, 0xaa, 0x39, 0xdf, 0xf9, 0xf1, 0xf1, 0xf7, 0x7d, 0x2e, 0xdc, + 0x49, 0xa8, 0xfc, 0x11, 0x31, 0x2a, 0x92, 0x80, 0x0b, 0xa6, 0x18, 0x69, 0x15, 0x3f, 0xde, 0xe8, + 0x8a, 0xb1, 0xab, 0x14, 0x4f, 0x8a, 0x28, 0x5a, 0x7d, 0x3f, 0xb9, 0x16, 0x94, 0x73, 0x14, 0x52, + 0x97, 0xf9, 0x1d, 0x68, 0xbd, 0x5f, 0x72, 0xb5, 0xf6, 0xff, 0x3a, 0x00, 0x1f, 0x71, 0x1d, 0xe2, + 0xef, 0x15, 0x4a, 0x45, 0x1e, 0x43, 0x37, 0xa3, 0x4b, 0x94, 0x9c, 0xc6, 0xe8, 0x3a, 0x63, 0x67, + 0xd2, 0x0d, 0x2b, 0x80, 0x8c, 0x00, 0x28, 0x5f, 0x5c, 0xa2, 0x90, 0x0b, 0x96, 0xb9, 0x07, 0x45, + 0xba, 0x86, 0x10, 0x02, 0xcd, 0x5f, 0x8b, 0x2c, 0x71, 0x1b, 0x45, 0xa6, 0xf8, 0xce, 0xb1, 0x7c, + 0x80, 0xdb, 0xd4, 0x58, 0xfe, 0x4d, 0xde, 0xc1, 0x20, 0xa5, 0x11, 0xa6, 0x5f, 0x30, 0xc5, 0x58, + 0x31, 0xe1, 0xb6, 0xc6, 0xce, 0xa4, 0x77, 0xfa, 0x28, 0xd0, 0x5b, 0x07, 0xe5, 0xd6, 0xc1, 0xd9, + 0x5a, 0xa1, 0xbc, 0xa4, 0xe9, 0x0a, 0x43, 0xbb, 0xc3, 0x9f, 0x40, 0xff, 0xd3, 0x42, 0xaa, 0x10, + 0x25, 0x67, 0x99, 0x44, 0xe2, 0x42, 0x87, 0x45, 0x3f, 0x31, 0x56, 0xd2, 0x75, 0xc6, 0x8d, 0x49, + 0x3f, 0x2c, 0x43, 0xff, 0x29, 0xf4, 0x2e, 0xb0, 0x2a, 0x7c, 0x00, 0x6d, 0x9d, 0x29, 0xae, 0xd7, + 0x0f, 0x4d, 0xe4, 0x3f, 0x83, 0xc1, 0x57, 0x9e, 0x50, 0x85, 0x25, 0x15, 0xb7, 0x15, 0xde, 0x85, + 0xc3, 0xb2, 0x50, 0x8f, 0xcc, 0x5b, 0xcf, 0x05, 0xee, 0xd7, 0x5a, 0x16, 0x9a, 0xd6, 0x3f, 0x0e, + 0x90, 0xcf, 0x4c, 0xa8, 0x19, 0x13, 0xd7, 0x54, 0x24, 0xfb, 0xc9, 0xe0, 0x42, 0x87, 0xb3, 0x64, + 0x9e, 0xb3, 0xaa, 0x35, 0x28, 0x43, 0x72, 0x0c, 0x83, 0x98, 0x65, 0x8a, 0x2e, 0x32, 0x14, 0x45, + 0x5e, 0x2b, 0x61, 0x83, 0xb9, 0x8c, 0x9c, 0x09, 0x35, 0x5f, 0x2d, 0x23, 0x14, 0x85, 0x30, 0x83, + 0xb0, 0x86, 0xf8, 0xdf, 0x60, 0x68, 0xed, 0x64, 0x98, 0x3b, 0x86, 0x01, 0xaf, 0xe0, 0x0f, 0x53, + 0xb3, 0x98, 0x0d, 0xde, 0x18, 0x7e, 0xb0, 0x35, 0xfc, 0x2d, 0xb8, 0xe7, 0x34, 0x8b, 0x31, 0xdd, + 0x71, 0xed, 0xbd, 0x4e, 0xf0, 0x5f, 0x01, 0x99, 0x97, 0x5c, 0xc8, 0xcd, 0x76, 0x23, 0x80, 0x0d, + 0x43, 0xda, 0x03, 0xdd, 0xb0, 0x86, 0x9c, 0xfe, 0x6b, 0x40, 0x77, 0x5a, 0x3e, 0x16, 0x12, 0x40, + 0x33, 0xb7, 0x0f, 0x39, 0xd2, 0x5e, 0x0b, 0xaa, 0x27, 0xe0, 0x0d, 0x0d, 0x64, 0xd9, 0xeb, 0x39, + 0x34, 0x2e, 0x70, 0x67, 0x39, 0x31, 0x50, 0xdd, 0x63, 0xaf, 0xa1, 0xad, 0x2d, 0x42, 0xee, 0x99, + 0xac, 0x65, 0x2d, 0xef, 0xfe, 0x0d, 0xb4, 0x6a, 0xd3, 0xf6, 0xd8, 0xb4, 0x59, 0xb6, 0xda, 0xb4, + 0xd9, 0x1e, 0x22, 0x53, 0xe8, 0xd5, 0xb8, 0x24, 0x0f, 0x4d, 0xd5, 0x36, 0xbf, 0x9e, 0xb7, 0x2b, + 0x65, 0xa6, 0x9c, 0xc1, 0xd1, 0x96, 0x2e, 0xe4, 0x49, 0x79, 0xe2, 0x2d, 0x8a, 0x79, 0x7d, 0x53, + 0x50, 0xfc, 0x99, 0x90, 0x37, 0x70, 0x98, 0xb3, 0x56, 0xa9, 0x43, 0xac, 0xbc, 0x57, 0xae, 0xb6, + 0x43, 0xbe, 0x17, 0x30, 0x9c, 0x31, 0x11, 0xe3, 0x4c, 0xb0, 0x4c, 0x61, 0x96, 0x18, 0xfe, 0xec, + 0x7e, 0x2b, 0x8a, 0xda, 0x45, 0xf0, 0xf2, 0x7f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x8f, 0x17, 0xc0, + 0x71, 0x04, 0x05, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. var _ context.Context -var _ grpc.ClientConn +var _ grpc.ClientConnInterface // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 +const _ = grpc.SupportPackageIsVersion6 // DashboardClient is the client API for Dashboard service. // @@ -527,6 +605,7 @@ type DashboardClient interface { List(ctx context.Context, in *KeyRequest, opts ...grpc.CallOption) (*ListResponse, error) Get(ctx context.Context, in *KeyRequest, opts ...grpc.CallOption) (*GetResponse, error) Update(ctx context.Context, in *UpdateRequest, opts ...grpc.CallOption) (*UpdateResponse, error) + Create(ctx context.Context, in *CreateRequest, opts ...grpc.CallOption) (*CreateResponse, error) PortForward(ctx context.Context, in *PortForwardRequest, opts ...grpc.CallOption) (*PortForwardResponse, error) CancelPortForward(ctx context.Context, in *CancelPortForwardRequest, opts ...grpc.CallOption) (*Empty, error) ListNamespaces(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*NamespacesResponse, error) @@ -534,10 +613,10 @@ type DashboardClient interface { } type dashboardClient struct { - cc *grpc.ClientConn + cc grpc.ClientConnInterface } -func NewDashboardClient(cc *grpc.ClientConn) DashboardClient { +func NewDashboardClient(cc grpc.ClientConnInterface) DashboardClient { return &dashboardClient{cc} } @@ -568,6 +647,15 @@ func (c *dashboardClient) Update(ctx context.Context, in *UpdateRequest, opts .. return out, nil } +func (c *dashboardClient) Create(ctx context.Context, in *CreateRequest, opts ...grpc.CallOption) (*CreateResponse, error) { + out := new(CreateResponse) + err := c.cc.Invoke(ctx, "/proto.Dashboard/Create", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *dashboardClient) PortForward(ctx context.Context, in *PortForwardRequest, opts ...grpc.CallOption) (*PortForwardResponse, error) { out := new(PortForwardResponse) err := c.cc.Invoke(ctx, "/proto.Dashboard/PortForward", in, out, opts...) @@ -609,12 +697,42 @@ type DashboardServer interface { List(context.Context, *KeyRequest) (*ListResponse, error) Get(context.Context, *KeyRequest) (*GetResponse, error) Update(context.Context, *UpdateRequest) (*UpdateResponse, error) + Create(context.Context, *CreateRequest) (*CreateResponse, error) PortForward(context.Context, *PortForwardRequest) (*PortForwardResponse, error) CancelPortForward(context.Context, *CancelPortForwardRequest) (*Empty, error) ListNamespaces(context.Context, *Empty) (*NamespacesResponse, error) ForceFrontendUpdate(context.Context, *Empty) (*Empty, error) } +// UnimplementedDashboardServer can be embedded to have forward compatible implementations. +type UnimplementedDashboardServer struct { +} + +func (*UnimplementedDashboardServer) List(ctx context.Context, req *KeyRequest) (*ListResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method List not implemented") +} +func (*UnimplementedDashboardServer) Get(ctx context.Context, req *KeyRequest) (*GetResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Get not implemented") +} +func (*UnimplementedDashboardServer) Update(ctx context.Context, req *UpdateRequest) (*UpdateResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Update not implemented") +} +func (*UnimplementedDashboardServer) Create(ctx context.Context, req *CreateRequest) (*CreateResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Create not implemented") +} +func (*UnimplementedDashboardServer) PortForward(ctx context.Context, req *PortForwardRequest) (*PortForwardResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method PortForward not implemented") +} +func (*UnimplementedDashboardServer) CancelPortForward(ctx context.Context, req *CancelPortForwardRequest) (*Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method CancelPortForward not implemented") +} +func (*UnimplementedDashboardServer) ListNamespaces(ctx context.Context, req *Empty) (*NamespacesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListNamespaces not implemented") +} +func (*UnimplementedDashboardServer) ForceFrontendUpdate(ctx context.Context, req *Empty) (*Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method ForceFrontendUpdate not implemented") +} + func RegisterDashboardServer(s *grpc.Server, srv DashboardServer) { s.RegisterService(&_Dashboard_serviceDesc, srv) } @@ -673,6 +791,24 @@ func _Dashboard_Update_Handler(srv interface{}, ctx context.Context, dec func(in return interceptor(ctx, in, info, handler) } +func _Dashboard_Create_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DashboardServer).Create(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/proto.Dashboard/Create", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DashboardServer).Create(ctx, req.(*CreateRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _Dashboard_PortForward_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(PortForwardRequest) if err := dec(in); err != nil { @@ -761,6 +897,10 @@ var _Dashboard_serviceDesc = grpc.ServiceDesc{ MethodName: "Update", Handler: _Dashboard_Update_Handler, }, + { + MethodName: "Create", + Handler: _Dashboard_Create_Handler, + }, { MethodName: "PortForward", Handler: _Dashboard_PortForward_Handler, diff --git a/pkg/plugin/api/proto/dashboard.proto b/pkg/plugin/api/proto/dashboard.proto index 7a102c7ef6..73e08da9b1 100644 --- a/pkg/plugin/api/proto/dashboard.proto +++ b/pkg/plugin/api/proto/dashboard.proto @@ -29,6 +29,14 @@ message UpdateResponse { } +message CreateRequest { + bytes object = 1; +} + +message CreateResponse { + +} + message PortForwardRequest { string namespace = 1; string podName = 2; @@ -53,6 +61,7 @@ service Dashboard { rpc List(KeyRequest) returns (ListResponse); rpc Get(KeyRequest) returns (GetResponse); rpc Update(UpdateRequest) returns (UpdateResponse); + rpc Create(CreateRequest) returns (CreateResponse); rpc PortForward(PortForwardRequest) returns (PortForwardResponse); rpc CancelPortForward(CancelPortForwardRequest) returns (Empty); rpc ListNamespaces(Empty) returns (NamespacesResponse); diff --git a/pkg/plugin/api/proto/generate.sh b/pkg/plugin/api/proto/generate.sh index c2fd8b3f60..66f158f0d5 100755 --- a/pkg/plugin/api/proto/generate.sh +++ b/pkg/plugin/api/proto/generate.sh @@ -1,4 +1,9 @@ #!/bin/sh # generate golang for protobuf -protoc -I$GOPATH/src/github.com/vmware-tanzu/octant/vendor -I$GOPATH/src/github.com/vmware-tanzu/octant -I. --go_out=plugins=grpc:. dashboard.proto +# get directory of this script +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" +OCTANT_ROOT=${DIR}/../../../.. + +protoc -I${OCTANT_ROOT}/vendor -I${OCTANT_ROOT} -I. --go_out=plugins=grpc:. dashboard.proto + diff --git a/pkg/plugin/api/server.go b/pkg/plugin/api/server.go index c43a0b02c4..80adf5cb2d 100644 --- a/pkg/plugin/api/server.go +++ b/pkg/plugin/api/server.go @@ -7,6 +7,7 @@ package api import ( "context" + "fmt" "github.com/pkg/errors" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" @@ -45,6 +46,7 @@ type Service interface { CancelPortForward(ctx context.Context, id string) ListNamespaces(ctx context.Context) (NamespacesResponse, error) Update(ctx context.Context, object *unstructured.Unstructured) error + Create(ctx context.Context, object *unstructured.Unstructured) error ForceFrontendUpdate(ctx context.Context) error } @@ -101,6 +103,10 @@ func (s *GRPCService) Update(ctx context.Context, object *unstructured.Unstructu }) } +func (s *GRPCService) Create(ctx context.Context, object *unstructured.Unstructured) error { + return s.ObjectStore.Create(ctx, object) +} + // PortForward creates a port forward. func (s *GRPCService) PortForward(ctx context.Context, req PortForwardRequest) (PortForwardResponse, error) { pfResponse, err := s.PortForwarder.Create( @@ -221,6 +227,24 @@ func (c *grpcServer) Update(ctx context.Context, in *proto.UpdateRequest) (*prot return &proto.UpdateResponse{}, nil } +// Create creates an object in the cluster. +func (c *grpcServer) Create(ctx context.Context, in *proto.CreateRequest) (*proto.CreateResponse, error) { + object, err := convertToObject(in.Object) + if err != nil { + return nil, err + } + + if object == nil { + return nil, fmt.Errorf("unable to create a nil object") + } + + if err := c.service.Create(ctx, object); err != nil { + return nil, err + } + + return &proto.CreateResponse{}, nil +} + // PortForward creates a port forward. func (c *grpcServer) PortForward(ctx context.Context, in *proto.PortForwardRequest) (*proto.PortForwardResponse, error) { req, err := convertToPortForwardRequest(in) diff --git a/pkg/plugin/dashboard/dashboard.pb.go b/pkg/plugin/dashboard/dashboard.pb.go index eb646477d7..74ff2081f9 100644 --- a/pkg/plugin/dashboard/dashboard.pb.go +++ b/pkg/plugin/dashboard/dashboard.pb.go @@ -8,6 +8,8 @@ import ( fmt "fmt" proto "github.com/golang/protobuf/proto" grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" math "math" ) @@ -873,7 +875,9 @@ func init() { proto.RegisterType((*WatchRequest)(nil), "dashboard.WatchRequest") } -func init() { proto.RegisterFile("dashboard.proto", fileDescriptor_9b97678da3a35dfb) } +func init() { + proto.RegisterFile("dashboard.proto", fileDescriptor_9b97678da3a35dfb) +} var fileDescriptor_9b97678da3a35dfb = []byte{ // 871 bytes of a gzipped FileDescriptorProto @@ -936,11 +940,11 @@ var fileDescriptor_9b97678da3a35dfb = []byte{ // Reference imports to suppress errors if they are not otherwise used. var _ context.Context -var _ grpc.ClientConn +var _ grpc.ClientConnInterface // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 +const _ = grpc.SupportPackageIsVersion6 // PluginClient is the client API for Plugin service. // @@ -959,10 +963,10 @@ type PluginClient interface { } type pluginClient struct { - cc *grpc.ClientConn + cc grpc.ClientConnInterface } -func NewPluginClient(cc *grpc.ClientConn) PluginClient { +func NewPluginClient(cc grpc.ClientConnInterface) PluginClient { return &pluginClient{cc} } @@ -1070,6 +1074,41 @@ type PluginServer interface { WatchDelete(context.Context, *WatchRequest) (*Empty, error) } +// UnimplementedPluginServer can be embedded to have forward compatible implementations. +type UnimplementedPluginServer struct { +} + +func (*UnimplementedPluginServer) Content(ctx context.Context, req *ContentRequest) (*ContentResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Content not implemented") +} +func (*UnimplementedPluginServer) HandleAction(ctx context.Context, req *HandleActionRequest) (*HandleActionResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method HandleAction not implemented") +} +func (*UnimplementedPluginServer) Navigation(ctx context.Context, req *NavigationRequest) (*NavigationResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Navigation not implemented") +} +func (*UnimplementedPluginServer) Register(ctx context.Context, req *RegisterRequest) (*RegisterResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Register not implemented") +} +func (*UnimplementedPluginServer) Print(ctx context.Context, req *ObjectRequest) (*PrintResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Print not implemented") +} +func (*UnimplementedPluginServer) ObjectStatus(ctx context.Context, req *ObjectRequest) (*ObjectStatusResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ObjectStatus not implemented") +} +func (*UnimplementedPluginServer) PrintTab(ctx context.Context, req *ObjectRequest) (*PrintTabResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method PrintTab not implemented") +} +func (*UnimplementedPluginServer) WatchAdd(ctx context.Context, req *WatchRequest) (*Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method WatchAdd not implemented") +} +func (*UnimplementedPluginServer) WatchUpdate(ctx context.Context, req *WatchRequest) (*Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method WatchUpdate not implemented") +} +func (*UnimplementedPluginServer) WatchDelete(ctx context.Context, req *WatchRequest) (*Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method WatchDelete not implemented") +} + func RegisterPluginServer(s *grpc.Server, srv PluginServer) { s.RegisterService(&_Plugin_serviceDesc, srv) } diff --git a/pkg/plugin/dashboard/generate.sh b/pkg/plugin/dashboard/generate.sh index c2fd8b3f60..032de75bfb 100755 --- a/pkg/plugin/dashboard/generate.sh +++ b/pkg/plugin/dashboard/generate.sh @@ -1,4 +1,8 @@ #!/bin/sh # generate golang for protobuf -protoc -I$GOPATH/src/github.com/vmware-tanzu/octant/vendor -I$GOPATH/src/github.com/vmware-tanzu/octant -I. --go_out=plugins=grpc:. dashboard.proto +# get directory of this script +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" +OCTANT_ROOT=${DIR}/../../.. + +protoc -I${OCTANT_ROOT}/vendor -I${OCTANT_ROOT} -I. --go_out=plugins=grpc:. dashboard.proto diff --git a/pkg/store/fake/mock_store.go b/pkg/store/fake/mock_store.go index 319b56eb25..f696684d3b 100644 --- a/pkg/store/fake/mock_store.go +++ b/pkg/store/fake/mock_store.go @@ -38,6 +38,20 @@ func (m *MockStore) EXPECT() *MockStoreMockRecorder { return m.recorder } +// Create mocks base method +func (m *MockStore) Create(arg0 context.Context, arg1 *unstructured.Unstructured) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Create", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// Create indicates an expected call of Create +func (mr *MockStoreMockRecorder) Create(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Create", reflect.TypeOf((*MockStore)(nil).Create), arg0, arg1) +} + // Delete mocks base method func (m *MockStore) Delete(arg0 context.Context, arg1 store.Key) error { m.ctrl.T.Helper() diff --git a/pkg/store/store.go b/pkg/store/store.go index 1eb4fa965c..a3a1042e6b 100644 --- a/pkg/store/store.go +++ b/pkg/store/store.go @@ -37,6 +37,7 @@ type Store interface { RegisterOnUpdate(fn UpdateFn) Update(ctx context.Context, key Key, updater func(*unstructured.Unstructured) error) error IsLoading(ctx context.Context, key Key) bool + Create(ctx context.Context, object *unstructured.Unstructured) error } // Key is a key for the object store. diff --git a/vendor/github.com/golang/protobuf/proto/properties.go b/vendor/github.com/golang/protobuf/proto/properties.go index 79668ff5c5..a4b8c0cd3a 100644 --- a/vendor/github.com/golang/protobuf/proto/properties.go +++ b/vendor/github.com/golang/protobuf/proto/properties.go @@ -38,7 +38,6 @@ package proto import ( "fmt" "log" - "os" "reflect" "sort" "strconv" @@ -194,7 +193,7 @@ func (p *Properties) Parse(s string) { // "bytes,49,opt,name=foo,def=hello!" fields := strings.Split(s, ",") // breaks def=, but handled below. if len(fields) < 2 { - fmt.Fprintf(os.Stderr, "proto: tag has too few fields: %q\n", s) + log.Printf("proto: tag has too few fields: %q", s) return } @@ -214,7 +213,7 @@ func (p *Properties) Parse(s string) { p.WireType = WireBytes // no numeric converter for non-numeric types default: - fmt.Fprintf(os.Stderr, "proto: tag has unknown wire type: %q\n", s) + log.Printf("proto: tag has unknown wire type: %q", s) return } diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/grpc/grpc.go b/vendor/github.com/golang/protobuf/protoc-gen-go/grpc/grpc.go index 1ddfe836f4..5d1e3f0f61 100644 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/grpc/grpc.go +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/grpc/grpc.go @@ -54,6 +54,8 @@ const generatedCodeVersion = 4 const ( contextPkgPath = "context" grpcPkgPath = "google.golang.org/grpc" + codePkgPath = "google.golang.org/grpc/codes" + statusPkgPath = "google.golang.org/grpc/status" ) func init() { @@ -216,6 +218,12 @@ func (g *grpc) generateService(file *generator.FileDescriptor, service *pb.Servi g.P("}") g.P() + // Server Unimplemented struct for forward compatability. + if deprecated { + g.P(deprecationComment) + } + g.generateUnimplementedServer(servName, service) + // Server registration. if deprecated { g.P(deprecationComment) @@ -269,6 +277,35 @@ func (g *grpc) generateService(file *generator.FileDescriptor, service *pb.Servi g.P() } +// generateUnimplementedServer creates the unimplemented server struct +func (g *grpc) generateUnimplementedServer(servName string, service *pb.ServiceDescriptorProto) { + serverType := servName + "Server" + g.P("// Unimplemented", serverType, " can be embedded to have forward compatible implementations.") + g.P("type Unimplemented", serverType, " struct {") + g.P("}") + g.P() + // UnimplementedServer's concrete methods + for _, method := range service.Method { + g.generateServerMethodConcrete(servName, method) + } + g.P() +} + +// generateServerMethodConcrete returns unimplemented methods which ensure forward compatibility +func (g *grpc) generateServerMethodConcrete(servName string, method *pb.MethodDescriptorProto) { + header := g.generateServerSignatureWithParamNames(servName, method) + g.P("func (*Unimplemented", servName, "Server) ", header, " {") + var nilArg string + if !method.GetServerStreaming() && !method.GetClientStreaming() { + nilArg = "nil, " + } + methName := generator.CamelCase(method.GetName()) + statusPkg := string(g.gen.AddImport(statusPkgPath)) + codePkg := string(g.gen.AddImport(codePkgPath)) + g.P("return ", nilArg, statusPkg, `.Errorf(`, codePkg, `.Unimplemented, "method `, methName, ` not implemented")`) + g.P("}") +} + // generateClientSignature returns the client-side signature for a method. func (g *grpc) generateClientSignature(servName string, method *pb.MethodDescriptorProto) string { origMethName := method.GetName() @@ -368,6 +405,30 @@ func (g *grpc) generateClientMethod(servName, fullServName, serviceDescVar strin } } +// generateServerSignatureWithParamNames returns the server-side signature for a method with parameter names. +func (g *grpc) generateServerSignatureWithParamNames(servName string, method *pb.MethodDescriptorProto) string { + origMethName := method.GetName() + methName := generator.CamelCase(origMethName) + if reservedClientName[methName] { + methName += "_" + } + + var reqArgs []string + ret := "error" + if !method.GetServerStreaming() && !method.GetClientStreaming() { + reqArgs = append(reqArgs, "ctx "+contextPkg+".Context") + ret = "(*" + g.typeName(method.GetOutputType()) + ", error)" + } + if !method.GetClientStreaming() { + reqArgs = append(reqArgs, "req *"+g.typeName(method.GetInputType())) + } + if method.GetServerStreaming() || method.GetClientStreaming() { + reqArgs = append(reqArgs, "srv "+servName+"_"+generator.CamelCase(origMethName)+"Server") + } + + return methName + "(" + strings.Join(reqArgs, ", ") + ") " + ret +} + // generateServerSignature returns the server-side signature for a method. func (g *grpc) generateServerSignature(servName string, method *pb.MethodDescriptorProto) string { origMethName := method.GetName() diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go index 57ae35f6b5..0b9907f89b 100644 --- a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go @@ -1,12 +1,15 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // source: google/rpc/status.proto -package status // import "google.golang.org/genproto/googleapis/rpc/status" +package status -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" -import any "github.com/golang/protobuf/ptypes/any" +import ( + fmt "fmt" + math "math" + + proto "github.com/golang/protobuf/proto" + any "github.com/golang/protobuf/ptypes/any" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -17,7 +20,7 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package // The `Status` type defines a logical error model that is suitable for // different programming environments, including REST APIs and RPC APIs. It is @@ -93,16 +96,17 @@ func (m *Status) Reset() { *m = Status{} } func (m *Status) String() string { return proto.CompactTextString(m) } func (*Status) ProtoMessage() {} func (*Status) Descriptor() ([]byte, []int) { - return fileDescriptor_status_ced6ddf76350620b, []int{0} + return fileDescriptor_24d244abaf643bfe, []int{0} } + func (m *Status) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Status.Unmarshal(m, b) } func (m *Status) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Status.Marshal(b, m, deterministic) } -func (dst *Status) XXX_Merge(src proto.Message) { - xxx_messageInfo_Status.Merge(dst, src) +func (m *Status) XXX_Merge(src proto.Message) { + xxx_messageInfo_Status.Merge(m, src) } func (m *Status) XXX_Size() int { return xxx_messageInfo_Status.Size(m) @@ -138,9 +142,9 @@ func init() { proto.RegisterType((*Status)(nil), "google.rpc.Status") } -func init() { proto.RegisterFile("google/rpc/status.proto", fileDescriptor_status_ced6ddf76350620b) } +func init() { proto.RegisterFile("google/rpc/status.proto", fileDescriptor_24d244abaf643bfe) } -var fileDescriptor_status_ced6ddf76350620b = []byte{ +var fileDescriptor_24d244abaf643bfe = []byte{ // 209 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4f, 0xcf, 0xcf, 0x4f, 0xcf, 0x49, 0xd5, 0x2f, 0x2a, 0x48, 0xd6, 0x2f, 0x2e, 0x49, 0x2c, 0x29, 0x2d, 0xd6, 0x2b, 0x28, diff --git a/vendor/google.golang.org/grpc/.travis.yml b/vendor/google.golang.org/grpc/.travis.yml index 024408e646..a11e8cbca6 100644 --- a/vendor/google.golang.org/grpc/.travis.yml +++ b/vendor/google.golang.org/grpc/.travis.yml @@ -2,18 +2,20 @@ language: go matrix: include: - - go: 1.12.x + - go: 1.13.x env: VET=1 GO111MODULE=on - - go: 1.12.x + - go: 1.13.x env: RACE=1 GO111MODULE=on - - go: 1.12.x + - go: 1.13.x env: RUN386=1 - - go: 1.12.x + - go: 1.13.x env: GRPC_GO_RETRY=on + - go: 1.13.x + env: TESTEXTRAS=1 + - go: 1.12.x + env: GO111MODULE=on - go: 1.11.x env: GO111MODULE=on - - go: 1.10.x - - go: 1.9.x - go: 1.9.x env: GAE=1 @@ -23,17 +25,18 @@ before_install: - if [[ "${GO111MODULE}" = "on" ]]; then mkdir "${HOME}/go"; export GOPATH="${HOME}/go"; fi - if [[ -n "${RUN386}" ]]; then export GOARCH=386; fi - if [[ "${TRAVIS_EVENT_TYPE}" = "cron" && -z "${RUN386}" ]]; then RACE=1; fi - - if [[ "${TRAVIS_EVENT_TYPE}" != "cron" ]]; then VET_SKIP_PROTO=1; fi + - if [[ "${TRAVIS_EVENT_TYPE}" != "cron" ]]; then export VET_SKIP_PROTO=1; fi install: - try3() { eval "$*" || eval "$*" || eval "$*"; } - try3 'if [[ "${GO111MODULE}" = "on" ]]; then go mod download; else make testdeps; fi' - - if [[ "${GAE}" = 1 ]]; then source ./install_gae.sh; make testappenginedeps; fi - - if [[ "${VET}" = 1 ]]; then ./vet.sh -install; fi + - if [[ -n "${GAE}" ]]; then source ./install_gae.sh; make testappenginedeps; fi + - if [[ -n "${VET}" ]]; then ./vet.sh -install; fi script: - set -e - - if [[ "${VET}" = 1 ]]; then ./vet.sh; fi - - if [[ "${GAE}" = 1 ]]; then make testappengine; exit 0; fi - - if [[ "${RACE}" = 1 ]]; then make testrace; exit 0; fi + - if [[ -n "${TESTEXTRAS}" ]]; then examples/examples_test.sh; interop/interop_test.sh; make testsubmodule; exit 0; fi + - if [[ -n "${VET}" ]]; then ./vet.sh; fi + - if [[ -n "${GAE}" ]]; then make testappengine; exit 0; fi + - if [[ -n "${RACE}" ]]; then make testrace; exit 0; fi - make test diff --git a/vendor/google.golang.org/grpc/CODE-OF-CONDUCT.md b/vendor/google.golang.org/grpc/CODE-OF-CONDUCT.md new file mode 100644 index 0000000000..9d4213ebca --- /dev/null +++ b/vendor/google.golang.org/grpc/CODE-OF-CONDUCT.md @@ -0,0 +1,3 @@ +## Community Code of Conduct + +gRPC follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md). diff --git a/vendor/google.golang.org/grpc/CONTRIBUTING.md b/vendor/google.golang.org/grpc/CONTRIBUTING.md index 6e69b28c27..4f1567e2f9 100644 --- a/vendor/google.golang.org/grpc/CONTRIBUTING.md +++ b/vendor/google.golang.org/grpc/CONTRIBUTING.md @@ -1,6 +1,8 @@ # How to contribute -We definitely welcome your patches and contributions to gRPC! +We definitely welcome your patches and contributions to gRPC! Please read the gRPC +organization's [governance rules](https://github.com/grpc/grpc-community/blob/master/governance.md) +and [contribution guidelines](https://github.com/grpc/grpc-community/blob/master/CONTRIBUTING.md) before proceeding. If you are new to github, please start by reading [Pull Request howto](https://help.github.com/articles/about-pull-requests/) diff --git a/vendor/google.golang.org/grpc/GOVERNANCE.md b/vendor/google.golang.org/grpc/GOVERNANCE.md new file mode 100644 index 0000000000..d6ff267471 --- /dev/null +++ b/vendor/google.golang.org/grpc/GOVERNANCE.md @@ -0,0 +1 @@ +This repository is governed by the gRPC organization's [governance rules](https://github.com/grpc/grpc-community/blob/master/governance.md). diff --git a/vendor/google.golang.org/grpc/MAINTAINERS.md b/vendor/google.golang.org/grpc/MAINTAINERS.md new file mode 100644 index 0000000000..093c82b3af --- /dev/null +++ b/vendor/google.golang.org/grpc/MAINTAINERS.md @@ -0,0 +1,27 @@ +This page lists all active maintainers of this repository. If you were a +maintainer and would like to add your name to the Emeritus list, please send us a +PR. + +See [GOVERNANCE.md](https://github.com/grpc/grpc-community/blob/master/governance.md) +for governance guidelines and how to become a maintainer. +See [CONTRIBUTING.md](https://github.com/grpc/grpc-community/blob/master/CONTRIBUTING.md) +for general contribution guidelines. + +## Maintainers (in alphabetical order) +- [canguler](https://github.com/canguler), Google LLC +- [cesarghali](https://github.com/cesarghali), Google LLC +- [dfawley](https://github.com/dfawley), Google LLC +- [easwars](https://github.com/easwars), Google LLC +- [jadekler](https://github.com/jadekler), Google LLC +- [menghanl](https://github.com/menghanl), Google LLC +- [srini100](https://github.com/srini100), Google LLC + +## Emeritus Maintainers (in alphabetical order) +- [adelez](https://github.com/adelez), Google LLC +- [iamqizhao](https://github.com/iamqizhao), Google LLC +- [jtattermusch](https://github.com/jtattermusch), Google LLC +- [lyuxuan](https://github.com/lyuxuan), Google LLC +- [makmukhi](https://github.com/makmukhi), Google LLC +- [matt-kwong](https://github.com/matt-kwong), Google LLC +- [nicolasnoble](https://github.com/nicolasnoble), Google LLC +- [yongni](https://github.com/yongni), Google LLC diff --git a/vendor/google.golang.org/grpc/Makefile b/vendor/google.golang.org/grpc/Makefile index db982aabde..410f7d56d4 100644 --- a/vendor/google.golang.org/grpc/Makefile +++ b/vendor/google.golang.org/grpc/Makefile @@ -19,6 +19,9 @@ proto: test: testdeps go test -cpu 1,4 -timeout 7m google.golang.org/grpc/... +testsubmodule: testdeps + cd security/advancedtls && go test -cpu 1,4 -timeout 7m google.golang.org/grpc/security/advancedtls/... + testappengine: testappenginedeps goapp test -cpu 1,4 -timeout 7m google.golang.org/grpc/... diff --git a/vendor/google.golang.org/grpc/attributes/attributes.go b/vendor/google.golang.org/grpc/attributes/attributes.go new file mode 100644 index 0000000000..68ffc62013 --- /dev/null +++ b/vendor/google.golang.org/grpc/attributes/attributes.go @@ -0,0 +1,70 @@ +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package attributes defines a generic key/value store used in various gRPC +// components. +// +// All APIs in this package are EXPERIMENTAL. +package attributes + +import "fmt" + +// Attributes is an immutable struct for storing and retrieving generic +// key/value pairs. Keys must be hashable, and users should define their own +// types for keys. +type Attributes struct { + m map[interface{}]interface{} +} + +// New returns a new Attributes containing all key/value pairs in kvs. If the +// same key appears multiple times, the last value overwrites all previous +// values for that key. Panics if len(kvs) is not even. +func New(kvs ...interface{}) *Attributes { + if len(kvs)%2 != 0 { + panic(fmt.Sprintf("attributes.New called with unexpected input: len(kvs) = %v", len(kvs))) + } + a := &Attributes{m: make(map[interface{}]interface{}, len(kvs)/2)} + for i := 0; i < len(kvs)/2; i++ { + a.m[kvs[i*2]] = kvs[i*2+1] + } + return a +} + +// WithValues returns a new Attributes containing all key/value pairs in a and +// kvs. Panics if len(kvs) is not even. If the same key appears multiple +// times, the last value overwrites all previous values for that key. To +// remove an existing key, use a nil value. +func (a *Attributes) WithValues(kvs ...interface{}) *Attributes { + if len(kvs)%2 != 0 { + panic(fmt.Sprintf("attributes.New called with unexpected input: len(kvs) = %v", len(kvs))) + } + n := &Attributes{m: make(map[interface{}]interface{}, len(a.m)+len(kvs)/2)} + for k, v := range a.m { + n.m[k] = v + } + for i := 0; i < len(kvs)/2; i++ { + n.m[kvs[i*2]] = kvs[i*2+1] + } + return n +} + +// Value returns the value associated with these attributes for key, or nil if +// no value is associated with key. +func (a *Attributes) Value(key interface{}) interface{} { + return a.m[key] +} diff --git a/vendor/google.golang.org/grpc/backoff.go b/vendor/google.golang.org/grpc/backoff.go index 97c6e2568f..ff7c3ee6f4 100644 --- a/vendor/google.golang.org/grpc/backoff.go +++ b/vendor/google.golang.org/grpc/backoff.go @@ -23,16 +23,36 @@ package grpc import ( "time" + + "google.golang.org/grpc/backoff" ) // DefaultBackoffConfig uses values specified for backoff in // https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. +// +// Deprecated: use ConnectParams instead. Will be supported throughout 1.x. var DefaultBackoffConfig = BackoffConfig{ MaxDelay: 120 * time.Second, } // BackoffConfig defines the parameters for the default gRPC backoff strategy. +// +// Deprecated: use ConnectParams instead. Will be supported throughout 1.x. type BackoffConfig struct { // MaxDelay is the upper bound of backoff delay. MaxDelay time.Duration } + +// ConnectParams defines the parameters for connecting and retrying. Users are +// encouraged to use this instead of the BackoffConfig type defined above. See +// here for more details: +// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. +// +// This API is EXPERIMENTAL. +type ConnectParams struct { + // Backoff specifies the configuration options for connection backoff. + Backoff backoff.Config + // MinConnectTimeout is the minimum amount of time we are willing to give a + // connection to complete. + MinConnectTimeout time.Duration +} diff --git a/vendor/google.golang.org/grpc/backoff/backoff.go b/vendor/google.golang.org/grpc/backoff/backoff.go new file mode 100644 index 0000000000..0787d0b50c --- /dev/null +++ b/vendor/google.golang.org/grpc/backoff/backoff.go @@ -0,0 +1,52 @@ +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package backoff provides configuration options for backoff. +// +// More details can be found at: +// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. +// +// All APIs in this package are experimental. +package backoff + +import "time" + +// Config defines the configuration options for backoff. +type Config struct { + // BaseDelay is the amount of time to backoff after the first failure. + BaseDelay time.Duration + // Multiplier is the factor with which to multiply backoffs after a + // failed retry. Should ideally be greater than 1. + Multiplier float64 + // Jitter is the factor with which backoffs are randomized. + Jitter float64 + // MaxDelay is the upper bound of backoff delay. + MaxDelay time.Duration +} + +// DefaultConfig is a backoff configuration with the default values specfied +// at https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. +// +// This should be useful for callers who want to configure backoff with +// non-default values only for a subset of the options. +var DefaultConfig = Config{ + BaseDelay: 1.0 * time.Second, + Multiplier: 1.6, + Jitter: 0.2, + MaxDelay: 120 * time.Second, +} diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go index c266f4ec10..9258858ed7 100644 --- a/vendor/google.golang.org/grpc/balancer/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/balancer.go @@ -117,6 +117,15 @@ type NewSubConnOptions struct { HealthCheckEnabled bool } +// State contains the balancer's state relevant to the gRPC ClientConn. +type State struct { + // State contains the connectivity state of the balancer, which is used to + // determine the state of the ClientConn. + ConnectivityState connectivity.State + // Picker is used to choose connections (SubConns) for RPCs. + Picker V2Picker +} + // ClientConn represents a gRPC ClientConn. // // This interface is to be implemented by gRPC. Users should not need a @@ -137,10 +146,19 @@ type ClientConn interface { // // gRPC will update the connectivity state of the ClientConn, and will call pick // on the new picker to pick new SubConn. + // + // Deprecated: use UpdateState instead UpdateBalancerState(s connectivity.State, p Picker) + // UpdateState notifies gRPC that the balancer's internal state has + // changed. + // + // gRPC will update the connectivity state of the ClientConn, and will call pick + // on the new picker to pick new SubConns. + UpdateState(State) + // ResolveNow is called by balancer to notify gRPC to do a name resolving. - ResolveNow(resolver.ResolveNowOption) + ResolveNow(resolver.ResolveNowOptions) // Target returns the dial target for this ClientConn. // @@ -185,11 +203,14 @@ type ConfigParser interface { ParseConfig(LoadBalancingConfigJSON json.RawMessage) (serviceconfig.LoadBalancingConfig, error) } -// PickOptions contains addition information for the Pick operation. -type PickOptions struct { +// PickInfo contains additional information for the Pick operation. +type PickInfo struct { // FullMethodName is the method name that NewClientStream() is called // with. The canonical format is /service/Method. FullMethodName string + // Ctx is the RPC's context, and may contain relevant RPC-level information + // like the outgoing header metadata. + Ctx context.Context } // DoneInfo contains additional information for done. @@ -215,7 +236,7 @@ var ( ErrNoSubConnAvailable = errors.New("no SubConn is available") // ErrTransientFailure indicates all SubConns are in TransientFailure. // WaitForReady RPCs will block, non-WaitForReady RPCs will fail. - ErrTransientFailure = errors.New("all SubConns are in TransientFailure") + ErrTransientFailure = TransientFailureError(errors.New("all SubConns are in TransientFailure")) ) // Picker is used by gRPC to pick a SubConn to send an RPC. @@ -223,6 +244,8 @@ var ( // internal state has changed. // // The pickers used by gRPC can be updated by ClientConn.UpdateBalancerState(). +// +// Deprecated: use V2Picker instead type Picker interface { // Pick returns the SubConn to be used to send the RPC. // The returned SubConn must be one returned by NewSubConn(). @@ -243,18 +266,76 @@ type Picker interface { // // If the returned error is not nil: // - If the error is ErrNoSubConnAvailable, gRPC will block until UpdateBalancerState() - // - If the error is ErrTransientFailure: + // - If the error is ErrTransientFailure or implements IsTransientFailure() + // bool, returning true: // - If the RPC is wait-for-ready, gRPC will block until UpdateBalancerState() // is called to pick again; // - Otherwise, RPC will fail with unavailable error. // - Else (error is other non-nil error): - // - The RPC will fail with unavailable error. + // - The RPC will fail with the error's status code, or Unknown if it is + // not a status error. // // The returned done() function will be called once the rpc has finished, // with the final status of that RPC. If the SubConn returned is not a // valid SubConn type, done may not be called. done may be nil if balancer // doesn't care about the RPC status. - Pick(ctx context.Context, opts PickOptions) (conn SubConn, done func(DoneInfo), err error) + Pick(ctx context.Context, info PickInfo) (conn SubConn, done func(DoneInfo), err error) +} + +// PickResult contains information related to a connection chosen for an RPC. +type PickResult struct { + // SubConn is the connection to use for this pick, if its state is Ready. + // If the state is not Ready, gRPC will block the RPC until a new Picker is + // provided by the balancer (using ClientConn.UpdateState). The SubConn + // must be one returned by ClientConn.NewSubConn. + SubConn SubConn + + // Done is called when the RPC is completed. If the SubConn is not ready, + // this will be called with a nil parameter. If the SubConn is not a valid + // type, Done may not be called. May be nil if the balancer does not wish + // to be notified when the RPC completes. + Done func(DoneInfo) +} + +type transientFailureError struct { + error +} + +func (e *transientFailureError) IsTransientFailure() bool { return true } + +// TransientFailureError wraps err in an error implementing +// IsTransientFailure() bool, returning true. +func TransientFailureError(err error) error { + return &transientFailureError{error: err} +} + +// V2Picker is used by gRPC to pick a SubConn to send an RPC. +// Balancer is expected to generate a new picker from its snapshot every time its +// internal state has changed. +// +// The pickers used by gRPC can be updated by ClientConn.UpdateBalancerState(). +type V2Picker interface { + // Pick returns the connection to use for this RPC and related information. + // + // Pick should not block. If the balancer needs to do I/O or any blocking + // or time-consuming work to service this call, it should return + // ErrNoSubConnAvailable, and the Pick call will be repeated by gRPC when + // the Picker is updated (using ClientConn.UpdateState). + // + // If an error is returned: + // + // - If the error is ErrNoSubConnAvailable, gRPC will block until a new + // Picker is provided by the balancer (using ClientConn.UpdateState). + // + // - If the error implements IsTransientFailure() bool, returning true, + // wait for ready RPCs will wait, but non-wait for ready RPCs will be + // terminated with this error's Error() string and status code + // Unavailable. + // + // - Any other errors terminate all RPCs with the code and message + // provided. If the error is not a status error, it will be converted by + // gRPC to a status error with code Unknown. + Pick(info PickInfo) (PickResult, error) } // Balancer takes input from gRPC, manages SubConns, and collects and aggregates @@ -292,8 +373,11 @@ type Balancer interface { // SubConnState describes the state of a SubConn. type SubConnState struct { + // ConnectivityState is the connectivity state of the SubConn. ConnectivityState connectivity.State - // TODO: add last connection error + // ConnectionError is set if the ConnectivityState is TransientFailure, + // describing the reason the SubConn failed. Otherwise, it is nil. + ConnectionError error } // ClientConnState describes the state of a ClientConn relevant to the @@ -305,14 +389,23 @@ type ClientConnState struct { BalancerConfig serviceconfig.LoadBalancingConfig } +// ErrBadResolverState may be returned by UpdateClientConnState to indicate a +// problem with the provided name resolver data. +var ErrBadResolverState = errors.New("bad resolver state") + // V2Balancer is defined for documentation purposes. If a Balancer also // implements V2Balancer, its UpdateClientConnState method will be called // instead of HandleResolvedAddrs and its UpdateSubConnState will be called // instead of HandleSubConnStateChange. type V2Balancer interface { // UpdateClientConnState is called by gRPC when the state of the ClientConn - // changes. - UpdateClientConnState(ClientConnState) + // changes. If the error returned is ErrBadResolverState, the ClientConn + // will begin calling ResolveNow on the active name resolver with + // exponential backoff until a subsequent call to UpdateClientConnState + // returns a nil error. Any other errors are currently ignored. + UpdateClientConnState(ClientConnState) error + // ResolverError is called by gRPC when the name resolver reports an error. + ResolverError(error) // UpdateSubConnState is called by gRPC when the state of a SubConn // changes. UpdateSubConnState(SubConn, SubConnState) @@ -326,9 +419,8 @@ type V2Balancer interface { // // It's not thread safe. type ConnectivityStateEvaluator struct { - numReady uint64 // Number of addrConns in ready state. - numConnecting uint64 // Number of addrConns in connecting state. - numTransientFailure uint64 // Number of addrConns in transientFailure. + numReady uint64 // Number of addrConns in ready state. + numConnecting uint64 // Number of addrConns in connecting state. } // RecordTransition records state change happening in subConn and based on that @@ -348,8 +440,6 @@ func (cse *ConnectivityStateEvaluator) RecordTransition(oldState, newState conne cse.numReady += updateVal case connectivity.Connecting: cse.numConnecting += updateVal - case connectivity.TransientFailure: - cse.numTransientFailure += updateVal } } diff --git a/vendor/google.golang.org/grpc/balancer/base/balancer.go b/vendor/google.golang.org/grpc/balancer/base/balancer.go index 1af88f0a3f..d952f09f34 100644 --- a/vendor/google.golang.org/grpc/balancer/base/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/base/balancer.go @@ -20,6 +20,7 @@ package base import ( "context" + "errors" "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" @@ -28,34 +29,44 @@ import ( ) type baseBuilder struct { - name string - pickerBuilder PickerBuilder - config Config + name string + pickerBuilder PickerBuilder + v2PickerBuilder V2PickerBuilder + config Config } func (bb *baseBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer { - return &baseBalancer{ - cc: cc, - pickerBuilder: bb.pickerBuilder, + bal := &baseBalancer{ + cc: cc, + pickerBuilder: bb.pickerBuilder, + v2PickerBuilder: bb.v2PickerBuilder, subConns: make(map[resolver.Address]balancer.SubConn), scStates: make(map[balancer.SubConn]connectivity.State), csEvltr: &balancer.ConnectivityStateEvaluator{}, - // Initialize picker to a picker that always return - // ErrNoSubConnAvailable, because when state of a SubConn changes, we - // may call UpdateBalancerState with this picker. - picker: NewErrPicker(balancer.ErrNoSubConnAvailable), - config: bb.config, + config: bb.config, } + // Initialize picker to a picker that always returns + // ErrNoSubConnAvailable, because when state of a SubConn changes, we + // may call UpdateState with this picker. + if bb.pickerBuilder != nil { + bal.picker = NewErrPicker(balancer.ErrNoSubConnAvailable) + } else { + bal.v2Picker = NewErrPickerV2(balancer.ErrNoSubConnAvailable) + } + return bal } func (bb *baseBuilder) Name() string { return bb.name } +var _ balancer.V2Balancer = (*baseBalancer)(nil) // Assert that we implement V2Balancer + type baseBalancer struct { - cc balancer.ClientConn - pickerBuilder PickerBuilder + cc balancer.ClientConn + pickerBuilder PickerBuilder + v2PickerBuilder V2PickerBuilder csEvltr *balancer.ConnectivityStateEvaluator state connectivity.State @@ -63,6 +74,7 @@ type baseBalancer struct { subConns map[resolver.Address]balancer.SubConn scStates map[balancer.SubConn]connectivity.State picker balancer.Picker + v2Picker balancer.V2Picker config Config } @@ -70,7 +82,18 @@ func (b *baseBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) panic("not implemented") } -func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) { +func (b *baseBalancer) ResolverError(err error) { + switch b.state { + case connectivity.TransientFailure, connectivity.Idle, connectivity.Connecting: + if b.picker != nil { + b.picker = NewErrPicker(err) + } else { + b.v2Picker = NewErrPickerV2(err) + } + } +} + +func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error { // TODO: handle s.ResolverState.Err (log if not nil) once implemented. // TODO: handle s.ResolverState.ServiceConfig? if grpclog.V(2) { @@ -101,26 +124,51 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) { // The entry will be deleted in HandleSubConnStateChange. } } + return nil } // regeneratePicker takes a snapshot of the balancer, and generates a picker // from it. The picker is // - errPicker with ErrTransientFailure if the balancer is in TransientFailure, // - built by the pickerBuilder with all READY SubConns otherwise. -func (b *baseBalancer) regeneratePicker() { +func (b *baseBalancer) regeneratePicker(err error) { if b.state == connectivity.TransientFailure { - b.picker = NewErrPicker(balancer.ErrTransientFailure) + if b.pickerBuilder != nil { + b.picker = NewErrPicker(balancer.ErrTransientFailure) + } else { + if err != nil { + b.v2Picker = NewErrPickerV2(balancer.TransientFailureError(err)) + } else { + // This means the last subchannel transition was not to + // TransientFailure (otherwise err must be set), but the + // aggregate state of the balancer is TransientFailure, meaning + // there are no other addresses. + b.v2Picker = NewErrPickerV2(balancer.TransientFailureError(errors.New("resolver returned no addresses"))) + } + } return } - readySCs := make(map[resolver.Address]balancer.SubConn) + if b.pickerBuilder != nil { + readySCs := make(map[resolver.Address]balancer.SubConn) - // Filter out all ready SCs from full subConn map. - for addr, sc := range b.subConns { - if st, ok := b.scStates[sc]; ok && st == connectivity.Ready { - readySCs[addr] = sc + // Filter out all ready SCs from full subConn map. + for addr, sc := range b.subConns { + if st, ok := b.scStates[sc]; ok && st == connectivity.Ready { + readySCs[addr] = sc + } } + b.picker = b.pickerBuilder.Build(readySCs) + } else { + readySCs := make(map[balancer.SubConn]SubConnInfo) + + // Filter out all ready SCs from full subConn map. + for addr, sc := range b.subConns { + if st, ok := b.scStates[sc]; ok && st == connectivity.Ready { + readySCs[sc] = SubConnInfo{Address: addr} + } + } + b.v2Picker = b.v2PickerBuilder.Build(PickerBuildInfo{ReadySCs: readySCs}) } - b.picker = b.pickerBuilder.Build(readySCs) } func (b *baseBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) { @@ -159,10 +207,14 @@ func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.Su // - the aggregated state of balancer became non-TransientFailure from TransientFailure if (s == connectivity.Ready) != (oldS == connectivity.Ready) || (b.state == connectivity.TransientFailure) != (oldAggrState == connectivity.TransientFailure) { - b.regeneratePicker() + b.regeneratePicker(state.ConnectionError) } - b.cc.UpdateBalancerState(b.state, b.picker) + if b.picker != nil { + b.cc.UpdateBalancerState(b.state, b.picker) + } else { + b.cc.UpdateState(balancer.State{ConnectivityState: b.state, Picker: b.v2Picker}) + } } // Close is a nop because base balancer doesn't have internal state to clean up, @@ -179,6 +231,19 @@ type errPicker struct { err error // Pick() always returns this err. } -func (p *errPicker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) { +func (p *errPicker) Pick(context.Context, balancer.PickInfo) (balancer.SubConn, func(balancer.DoneInfo), error) { return nil, nil, p.err } + +// NewErrPickerV2 returns a V2Picker that always returns err on Pick(). +func NewErrPickerV2(err error) balancer.V2Picker { + return &errPickerV2{err: err} +} + +type errPickerV2 struct { + err error // Pick() always returns this err. +} + +func (p *errPickerV2) Pick(info balancer.PickInfo) (balancer.PickResult, error) { + return balancer.PickResult{}, p.err +} diff --git a/vendor/google.golang.org/grpc/balancer/base/base.go b/vendor/google.golang.org/grpc/balancer/base/base.go index 34b1f2994a..4192918b9e 100644 --- a/vendor/google.golang.org/grpc/balancer/base/base.go +++ b/vendor/google.golang.org/grpc/balancer/base/base.go @@ -42,6 +42,26 @@ type PickerBuilder interface { Build(readySCs map[resolver.Address]balancer.SubConn) balancer.Picker } +// V2PickerBuilder creates balancer.V2Picker. +type V2PickerBuilder interface { + // Build returns a picker that will be used by gRPC to pick a SubConn. + Build(info PickerBuildInfo) balancer.V2Picker +} + +// PickerBuildInfo contains information needed by the picker builder to +// construct a picker. +type PickerBuildInfo struct { + // ReadySCs is a map from all ready SubConns to the Addresses used to + // create them. + ReadySCs map[balancer.SubConn]SubConnInfo +} + +// SubConnInfo contains information about a SubConn created by the base +// balancer. +type SubConnInfo struct { + Address resolver.Address // the address used to create this SubConn +} + // NewBalancerBuilder returns a balancer builder. The balancers // built by this builder will use the picker builder to build pickers. func NewBalancerBuilder(name string, pb PickerBuilder) balancer.Builder { @@ -62,3 +82,12 @@ func NewBalancerBuilderWithConfig(name string, pb PickerBuilder, config Config) config: config, } } + +// NewBalancerBuilderV2 returns a base balancer builder configured by the provided config. +func NewBalancerBuilderV2(name string, pb V2PickerBuilder, config Config) balancer.Builder { + return &baseBuilder{ + name: name, + v2PickerBuilder: pb, + config: config, + } +} diff --git a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go index 29f7a4ddd6..d4d645501c 100644 --- a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go +++ b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go @@ -22,14 +22,12 @@ package roundrobin import ( - "context" "sync" "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/base" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal/grpcrand" - "google.golang.org/grpc/resolver" ) // Name is the name of round_robin balancer. @@ -37,7 +35,7 @@ const Name = "round_robin" // newBuilder creates a new roundrobin balancer builder. func newBuilder() balancer.Builder { - return base.NewBalancerBuilderWithConfig(Name, &rrPickerBuilder{}, base.Config{HealthCheck: true}) + return base.NewBalancerBuilderV2(Name, &rrPickerBuilder{}, base.Config{HealthCheck: true}) } func init() { @@ -46,13 +44,13 @@ func init() { type rrPickerBuilder struct{} -func (*rrPickerBuilder) Build(readySCs map[resolver.Address]balancer.SubConn) balancer.Picker { - grpclog.Infof("roundrobinPicker: newPicker called with readySCs: %v", readySCs) - if len(readySCs) == 0 { - return base.NewErrPicker(balancer.ErrNoSubConnAvailable) +func (*rrPickerBuilder) Build(info base.PickerBuildInfo) balancer.V2Picker { + grpclog.Infof("roundrobinPicker: newPicker called with info: %v", info) + if len(info.ReadySCs) == 0 { + return base.NewErrPickerV2(balancer.ErrNoSubConnAvailable) } var scs []balancer.SubConn - for _, sc := range readySCs { + for sc := range info.ReadySCs { scs = append(scs, sc) } return &rrPicker{ @@ -74,10 +72,10 @@ type rrPicker struct { next int } -func (p *rrPicker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) { +func (p *rrPicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { p.mu.Lock() sc := p.subConns[p.next] p.next = (p.next + 1) % len(p.subConns) p.mu.Unlock() - return sc, nil, nil + return balancer.PickResult{SubConn: sc}, nil } diff --git a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go index 8df4095ca9..824f28e740 100644 --- a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go +++ b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go @@ -25,6 +25,8 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/buffer" + "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/resolver" ) @@ -32,64 +34,17 @@ import ( type scStateUpdate struct { sc balancer.SubConn state connectivity.State -} - -// scStateUpdateBuffer is an unbounded channel for scStateChangeTuple. -// TODO make a general purpose buffer that uses interface{}. -type scStateUpdateBuffer struct { - c chan *scStateUpdate - mu sync.Mutex - backlog []*scStateUpdate -} - -func newSCStateUpdateBuffer() *scStateUpdateBuffer { - return &scStateUpdateBuffer{ - c: make(chan *scStateUpdate, 1), - } -} - -func (b *scStateUpdateBuffer) put(t *scStateUpdate) { - b.mu.Lock() - defer b.mu.Unlock() - if len(b.backlog) == 0 { - select { - case b.c <- t: - return - default: - } - } - b.backlog = append(b.backlog, t) -} - -func (b *scStateUpdateBuffer) load() { - b.mu.Lock() - defer b.mu.Unlock() - if len(b.backlog) > 0 { - select { - case b.c <- b.backlog[0]: - b.backlog[0] = nil - b.backlog = b.backlog[1:] - default: - } - } -} - -// get returns the channel that the scStateUpdate will be sent to. -// -// Upon receiving, the caller should call load to send another -// scStateChangeTuple onto the channel if there is any. -func (b *scStateUpdateBuffer) get() <-chan *scStateUpdate { - return b.c + err error } // ccBalancerWrapper is a wrapper on top of cc for balancers. // It implements balancer.ClientConn interface. type ccBalancerWrapper struct { - cc *ClientConn - balancer balancer.Balancer - stateChangeQueue *scStateUpdateBuffer - ccUpdateCh chan *balancer.ClientConnState - done chan struct{} + cc *ClientConn + balancerMu sync.Mutex // synchronizes calls to the balancer + balancer balancer.Balancer + scBuffer *buffer.Unbounded + done *grpcsync.Event mu sync.Mutex subConns map[*acBalancerWrapper]struct{} @@ -97,11 +52,10 @@ type ccBalancerWrapper struct { func newCCBalancerWrapper(cc *ClientConn, b balancer.Builder, bopts balancer.BuildOptions) *ccBalancerWrapper { ccb := &ccBalancerWrapper{ - cc: cc, - stateChangeQueue: newSCStateUpdateBuffer(), - ccUpdateCh: make(chan *balancer.ClientConnState, 1), - done: make(chan struct{}), - subConns: make(map[*acBalancerWrapper]struct{}), + cc: cc, + scBuffer: buffer.NewUnbounded(), + done: grpcsync.NewEvent(), + subConns: make(map[*acBalancerWrapper]struct{}), } go ccb.watcher() ccb.balancer = b.Build(ccb, bopts) @@ -113,36 +67,23 @@ func newCCBalancerWrapper(cc *ClientConn, b balancer.Builder, bopts balancer.Bui func (ccb *ccBalancerWrapper) watcher() { for { select { - case t := <-ccb.stateChangeQueue.get(): - ccb.stateChangeQueue.load() - select { - case <-ccb.done: - ccb.balancer.Close() - return - default: + case t := <-ccb.scBuffer.Get(): + ccb.scBuffer.Load() + if ccb.done.HasFired() { + break } + ccb.balancerMu.Lock() + su := t.(*scStateUpdate) if ub, ok := ccb.balancer.(balancer.V2Balancer); ok { - ub.UpdateSubConnState(t.sc, balancer.SubConnState{ConnectivityState: t.state}) + ub.UpdateSubConnState(su.sc, balancer.SubConnState{ConnectivityState: su.state, ConnectionError: su.err}) } else { - ccb.balancer.HandleSubConnStateChange(t.sc, t.state) + ccb.balancer.HandleSubConnStateChange(su.sc, su.state) } - case s := <-ccb.ccUpdateCh: - select { - case <-ccb.done: - ccb.balancer.Close() - return - default: - } - if ub, ok := ccb.balancer.(balancer.V2Balancer); ok { - ub.UpdateClientConnState(*s) - } else { - ccb.balancer.HandleResolvedAddrs(s.ResolverState.Addresses, nil) - } - case <-ccb.done: + ccb.balancerMu.Unlock() + case <-ccb.done.Done(): } - select { - case <-ccb.done: + if ccb.done.HasFired() { ccb.balancer.Close() ccb.mu.Lock() scs := ccb.subConns @@ -151,19 +92,17 @@ func (ccb *ccBalancerWrapper) watcher() { for acbw := range scs { ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain) } - ccb.UpdateBalancerState(connectivity.Connecting, nil) + ccb.UpdateState(balancer.State{ConnectivityState: connectivity.Connecting, Picker: nil}) return - default: } - ccb.cc.firstResolveEvent.Fire() } } func (ccb *ccBalancerWrapper) close() { - close(ccb.done) + ccb.done.Fire() } -func (ccb *ccBalancerWrapper) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State) { +func (ccb *ccBalancerWrapper) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State, err error) { // When updating addresses for a SubConn, if the address in use is not in // the new addresses, the old ac will be tearDown() and a new ac will be // created. tearDown() generates a state change with Shutdown state, we @@ -174,30 +113,29 @@ func (ccb *ccBalancerWrapper) handleSubConnStateChange(sc balancer.SubConn, s co if sc == nil { return } - ccb.stateChangeQueue.put(&scStateUpdate{ + ccb.scBuffer.Put(&scStateUpdate{ sc: sc, state: s, + err: err, }) } -func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) { - if ccb.cc.curBalancerName != grpclbName { - // Filter any grpclb addresses since we don't have the grpclb balancer. - s := &ccs.ResolverState - for i := 0; i < len(s.Addresses); { - if s.Addresses[i].Type == resolver.GRPCLB { - copy(s.Addresses[i:], s.Addresses[i+1:]) - s.Addresses = s.Addresses[:len(s.Addresses)-1] - continue - } - i++ - } +func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error { + ccb.balancerMu.Lock() + defer ccb.balancerMu.Unlock() + if ub, ok := ccb.balancer.(balancer.V2Balancer); ok { + return ub.UpdateClientConnState(*ccs) } - select { - case <-ccb.ccUpdateCh: - default: + ccb.balancer.HandleResolvedAddrs(ccs.ResolverState.Addresses, nil) + return nil +} + +func (ccb *ccBalancerWrapper) resolverError(err error) { + if ub, ok := ccb.balancer.(balancer.V2Balancer); ok { + ccb.balancerMu.Lock() + ub.ResolverError(err) + ccb.balancerMu.Unlock() } - ccb.ccUpdateCh <- ccs } func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { @@ -250,7 +188,22 @@ func (ccb *ccBalancerWrapper) UpdateBalancerState(s connectivity.State, p balanc ccb.cc.csMgr.updateState(s) } -func (ccb *ccBalancerWrapper) ResolveNow(o resolver.ResolveNowOption) { +func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) { + ccb.mu.Lock() + defer ccb.mu.Unlock() + if ccb.subConns == nil { + return + } + // Update picker before updating state. Even though the ordering here does + // not matter, it can lead to multiple calls of Pick in the common start-up + // case where we wait for ready and then perform an RPC. If the picker is + // updated later, we could call the "connecting" picker when the state is + // updated, and then call the "ready" picker after the picker gets updated. + ccb.cc.blockingpicker.updatePickerV2(s.Picker) + ccb.cc.csMgr.updateState(s.ConnectivityState) +} + +func (ccb *ccBalancerWrapper) ResolveNow(o resolver.ResolveNowOptions) { ccb.cc.resolveNow(o) } diff --git a/vendor/google.golang.org/grpc/balancer_v1_wrapper.go b/vendor/google.golang.org/grpc/balancer_v1_wrapper.go index 66e9a44ac4..db04b08b84 100644 --- a/vendor/google.golang.org/grpc/balancer_v1_wrapper.go +++ b/vendor/google.golang.org/grpc/balancer_v1_wrapper.go @@ -19,7 +19,6 @@ package grpc import ( - "context" "sync" "google.golang.org/grpc/balancer" @@ -49,7 +48,7 @@ func (bwb *balancerWrapperBuilder) Build(cc balancer.ClientConn, opts balancer.B csEvltr: &balancer.ConnectivityStateEvaluator{}, state: connectivity.Idle, } - cc.UpdateBalancerState(connectivity.Idle, bw) + cc.UpdateState(balancer.State{ConnectivityState: connectivity.Idle, Picker: bw}) go bw.lbWatcher() return bw } @@ -243,7 +242,7 @@ func (bw *balancerWrapper) HandleSubConnStateChange(sc balancer.SubConn, s conne if bw.state != sa { bw.state = sa } - bw.cc.UpdateBalancerState(bw.state, bw) + bw.cc.UpdateState(balancer.State{ConnectivityState: bw.state, Picker: bw}) if s == connectivity.Shutdown { // Remove state for this sc. delete(bw.connSt, sc) @@ -275,17 +274,17 @@ func (bw *balancerWrapper) Close() { // The picker is the balancerWrapper itself. // It either blocks or returns error, consistent with v1 balancer Get(). -func (bw *balancerWrapper) Pick(ctx context.Context, opts balancer.PickOptions) (sc balancer.SubConn, done func(balancer.DoneInfo), err error) { +func (bw *balancerWrapper) Pick(info balancer.PickInfo) (result balancer.PickResult, err error) { failfast := true // Default failfast is true. - if ss, ok := rpcInfoFromContext(ctx); ok { + if ss, ok := rpcInfoFromContext(info.Ctx); ok { failfast = ss.failfast } - a, p, err := bw.balancer.Get(ctx, BalancerGetOptions{BlockingWait: !failfast}) + a, p, err := bw.balancer.Get(info.Ctx, BalancerGetOptions{BlockingWait: !failfast}) if err != nil { - return nil, nil, err + return balancer.PickResult{}, toRPCErr(err) } if p != nil { - done = func(balancer.DoneInfo) { p() } + result.Done = func(balancer.DoneInfo) { p() } defer func() { if err != nil { p() @@ -297,38 +296,39 @@ func (bw *balancerWrapper) Pick(ctx context.Context, opts balancer.PickOptions) defer bw.mu.Unlock() if bw.pickfirst { // Get the first sc in conns. - for _, sc := range bw.conns { - return sc, done, nil + for _, result.SubConn = range bw.conns { + return result, nil } - return nil, nil, balancer.ErrNoSubConnAvailable + return balancer.PickResult{}, balancer.ErrNoSubConnAvailable } - sc, ok1 := bw.conns[resolver.Address{ + var ok1 bool + result.SubConn, ok1 = bw.conns[resolver.Address{ Addr: a.Addr, Type: resolver.Backend, ServerName: "", Metadata: a.Metadata, }] - s, ok2 := bw.connSt[sc] + s, ok2 := bw.connSt[result.SubConn] if !ok1 || !ok2 { // This can only happen due to a race where Get() returned an address // that was subsequently removed by Notify. In this case we should // retry always. - return nil, nil, balancer.ErrNoSubConnAvailable + return balancer.PickResult{}, balancer.ErrNoSubConnAvailable } switch s.s { case connectivity.Ready, connectivity.Idle: - return sc, done, nil + return result, nil case connectivity.Shutdown, connectivity.TransientFailure: // If the returned sc has been shut down or is in transient failure, // return error, and this RPC will fail or wait for another picker (if // non-failfast). - return nil, nil, balancer.ErrTransientFailure + return balancer.PickResult{}, balancer.ErrTransientFailure default: // For other states (connecting or unknown), the v1 balancer would // traditionally wait until ready and then issue the RPC. Returning // ErrNoSubConnAvailable will be a slight improvement in that it will // allow the balancer to choose another address in case others are // connected. - return nil, nil, balancer.ErrNoSubConnAvailable + return balancer.PickResult{}, balancer.ErrNoSubConnAvailable } } diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index a7643df7d2..f58740b250 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -31,7 +31,7 @@ import ( "time" "google.golang.org/grpc/balancer" - _ "google.golang.org/grpc/balancer/roundrobin" // To register roundrobin. + "google.golang.org/grpc/balancer/base" "google.golang.org/grpc/codes" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" @@ -42,10 +42,12 @@ import ( "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/resolver" - _ "google.golang.org/grpc/resolver/dns" // To register dns resolver. - _ "google.golang.org/grpc/resolver/passthrough" // To register passthrough resolver. "google.golang.org/grpc/serviceconfig" "google.golang.org/grpc/status" + + _ "google.golang.org/grpc/balancer/roundrobin" // To register roundrobin. + _ "google.golang.org/grpc/internal/resolver/dns" // To register dns resolver. + _ "google.golang.org/grpc/internal/resolver/passthrough" // To register passthrough resolver. ) const ( @@ -186,11 +188,11 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * } if cc.dopts.defaultServiceConfigRawJSON != nil { - sc, err := parseServiceConfig(*cc.dopts.defaultServiceConfigRawJSON) - if err != nil { - return nil, fmt.Errorf("%s: %v", invalidDefaultServiceConfigErrPrefix, err) + scpr := parseServiceConfig(*cc.dopts.defaultServiceConfigRawJSON) + if scpr.Err != nil { + return nil, fmt.Errorf("%s: %v", invalidDefaultServiceConfigErrPrefix, scpr.Err) } - cc.dopts.defaultServiceConfig = sc + cc.dopts.defaultServiceConfig, _ = scpr.Config.(*ServiceConfig) } cc.mkp = cc.dopts.copts.KeepaliveParams @@ -235,29 +237,28 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * } } if cc.dopts.bs == nil { - cc.dopts.bs = backoff.Exponential{ - MaxDelay: DefaultBackoffConfig.MaxDelay, + cc.dopts.bs = backoff.DefaultExponential + } + + // Determine the resolver to use. + cc.parsedTarget = parseTarget(cc.target) + grpclog.Infof("parsed scheme: %q", cc.parsedTarget.Scheme) + resolverBuilder := cc.getResolver(cc.parsedTarget.Scheme) + if resolverBuilder == nil { + // If resolver builder is still nil, the parsed target's scheme is + // not registered. Fallback to default resolver and set Endpoint to + // the original target. + grpclog.Infof("scheme %q not registered, fallback to default scheme", cc.parsedTarget.Scheme) + cc.parsedTarget = resolver.Target{ + Scheme: resolver.GetDefaultScheme(), + Endpoint: target, } - } - if cc.dopts.resolverBuilder == nil { - // Only try to parse target when resolver builder is not already set. - cc.parsedTarget = parseTarget(cc.target) - grpclog.Infof("parsed scheme: %q", cc.parsedTarget.Scheme) - cc.dopts.resolverBuilder = resolver.Get(cc.parsedTarget.Scheme) - if cc.dopts.resolverBuilder == nil { - // If resolver builder is still nil, the parsed target's scheme is - // not registered. Fallback to default resolver and set Endpoint to - // the original target. - grpclog.Infof("scheme %q not registered, fallback to default scheme", cc.parsedTarget.Scheme) - cc.parsedTarget = resolver.Target{ - Scheme: resolver.GetDefaultScheme(), - Endpoint: target, - } - cc.dopts.resolverBuilder = resolver.Get(cc.parsedTarget.Scheme) + resolverBuilder = cc.getResolver(cc.parsedTarget.Scheme) + if resolverBuilder == nil { + return nil, fmt.Errorf("could not get resolver for default scheme: %q", cc.parsedTarget.Scheme) } - } else { - cc.parsedTarget = resolver.Target{Endpoint: target} } + creds := cc.dopts.copts.TransportCredentials if creds != nil && creds.Info().ServerName != "" { cc.authority = creds.Info().ServerName @@ -297,14 +298,14 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * } // Build the resolver. - rWrapper, err := newCCResolverWrapper(cc) + rWrapper, err := newCCResolverWrapper(cc, resolverBuilder) if err != nil { return nil, fmt.Errorf("failed to build resolver: %v", err) } - cc.mu.Lock() cc.resolverWrapper = rWrapper cc.mu.Unlock() + // A blocking dial blocks until the clientConn is ready. if cc.dopts.block { for { @@ -443,7 +444,32 @@ func (csm *connectivityStateManager) getNotifyChan() <-chan struct{} { return csm.notifyChan } -// ClientConn represents a client connection to an RPC server. +// ClientConnInterface defines the functions clients need to perform unary and +// streaming RPCs. It is implemented by *ClientConn, and is only intended to +// be referenced by generated code. +type ClientConnInterface interface { + // Invoke performs a unary RPC and returns after the response is received + // into reply. + Invoke(ctx context.Context, method string, args interface{}, reply interface{}, opts ...CallOption) error + // NewStream begins a streaming RPC. + NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) +} + +// Assert *ClientConn implements ClientConnInterface. +var _ ClientConnInterface = (*ClientConn)(nil) + +// ClientConn represents a virtual connection to a conceptual endpoint, to +// perform RPCs. +// +// A ClientConn is free to have zero or more actual connections to the endpoint +// based on configuration, load, etc. It is also free to determine which actual +// endpoints to use and may change it every RPC, permitting client-side load +// balancing. +// +// A ClientConn encapsulates a range of functionality including name +// resolution, TCP connection establishment (with retries and backoff) and TLS +// handshakes. It also handles errors on established connections by +// re-resolving the name and reconnecting. type ClientConn struct { ctx context.Context cancel context.CancelFunc @@ -532,58 +558,104 @@ func (cc *ClientConn) waitForResolvedAddrs(ctx context.Context) error { } } -func (cc *ClientConn) updateResolverState(s resolver.State) error { +var emptyServiceConfig *ServiceConfig + +func init() { + cfg := parseServiceConfig("{}") + if cfg.Err != nil { + panic(fmt.Sprintf("impossible error parsing empty service config: %v", cfg.Err)) + } + emptyServiceConfig = cfg.Config.(*ServiceConfig) +} + +func (cc *ClientConn) maybeApplyDefaultServiceConfig(addrs []resolver.Address) { + if cc.sc != nil { + cc.applyServiceConfigAndBalancer(cc.sc, addrs) + return + } + if cc.dopts.defaultServiceConfig != nil { + cc.applyServiceConfigAndBalancer(cc.dopts.defaultServiceConfig, addrs) + } else { + cc.applyServiceConfigAndBalancer(emptyServiceConfig, addrs) + } +} + +func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { + defer cc.firstResolveEvent.Fire() cc.mu.Lock() - defer cc.mu.Unlock() // Check if the ClientConn is already closed. Some fields (e.g. // balancerWrapper) are set to nil when closing the ClientConn, and could // cause nil pointer panic if we don't have this check. if cc.conns == nil { + cc.mu.Unlock() return nil } - if cc.dopts.disableServiceConfig || s.ServiceConfig == nil { - if cc.dopts.defaultServiceConfig != nil && cc.sc == nil { - cc.applyServiceConfig(cc.dopts.defaultServiceConfig) + if err != nil { + // May need to apply the initial service config in case the resolver + // doesn't support service configs, or doesn't provide a service config + // with the new addresses. + cc.maybeApplyDefaultServiceConfig(nil) + + if cc.balancerWrapper != nil { + cc.balancerWrapper.resolverError(err) } - } else if sc, ok := s.ServiceConfig.(*ServiceConfig); ok { - cc.applyServiceConfig(sc) + + // No addresses are valid with err set; return early. + cc.mu.Unlock() + return balancer.ErrBadResolverState } - var balCfg serviceconfig.LoadBalancingConfig - if cc.dopts.balancerBuilder == nil { - // Only look at balancer types and switch balancer if balancer dial - // option is not set. - var newBalancerName string - if cc.sc != nil && cc.sc.lbConfig != nil { - newBalancerName = cc.sc.lbConfig.name - balCfg = cc.sc.lbConfig.cfg + var ret error + if cc.dopts.disableServiceConfig || s.ServiceConfig == nil { + cc.maybeApplyDefaultServiceConfig(s.Addresses) + // TODO: do we need to apply a failing LB policy if there is no + // default, per the error handling design? + } else { + if sc, ok := s.ServiceConfig.Config.(*ServiceConfig); s.ServiceConfig.Err == nil && ok { + cc.applyServiceConfigAndBalancer(sc, s.Addresses) } else { - var isGRPCLB bool - for _, a := range s.Addresses { - if a.Type == resolver.GRPCLB { - isGRPCLB = true - break + ret = balancer.ErrBadResolverState + if cc.balancerWrapper == nil { + var err error + if s.ServiceConfig.Err != nil { + err = status.Errorf(codes.Unavailable, "error parsing service config: %v", s.ServiceConfig.Err) + } else { + err = status.Errorf(codes.Unavailable, "illegal service config type: %T", s.ServiceConfig.Config) } - } - if isGRPCLB { - newBalancerName = grpclbName - } else if cc.sc != nil && cc.sc.LB != nil { - newBalancerName = *cc.sc.LB - } else { - newBalancerName = PickFirstBalancerName + cc.blockingpicker.updatePicker(base.NewErrPicker(err)) + cc.csMgr.updateState(connectivity.TransientFailure) + cc.mu.Unlock() + return ret } } - cc.switchBalancer(newBalancerName) - } else if cc.balancerWrapper == nil { - // Balancer dial option was set, and this is the first time handling - // resolved addresses. Build a balancer with dopts.balancerBuilder. - cc.curBalancerName = cc.dopts.balancerBuilder.Name() - cc.balancerWrapper = newCCBalancerWrapper(cc, cc.dopts.balancerBuilder, cc.balancerBuildOpts) } - cc.balancerWrapper.updateClientConnState(&balancer.ClientConnState{ResolverState: s, BalancerConfig: balCfg}) - return nil + var balCfg serviceconfig.LoadBalancingConfig + if cc.dopts.balancerBuilder == nil && cc.sc != nil && cc.sc.lbConfig != nil { + balCfg = cc.sc.lbConfig.cfg + } + + cbn := cc.curBalancerName + bw := cc.balancerWrapper + cc.mu.Unlock() + if cbn != grpclbName { + // Filter any grpclb addresses since we don't have the grpclb balancer. + for i := 0; i < len(s.Addresses); { + if s.Addresses[i].Type == resolver.GRPCLB { + copy(s.Addresses[i:], s.Addresses[i+1:]) + s.Addresses = s.Addresses[:len(s.Addresses)-1] + continue + } + i++ + } + } + uccsErr := bw.updateClientConnState(&balancer.ClientConnState{ResolverState: s, BalancerConfig: balCfg}) + if ret == nil { + ret = uccsErr // prefer ErrBadResolver state since any other error is + // currently meaningless to the caller. + } + return ret } // switchBalancer starts the switching from current balancer to the balancer @@ -631,7 +703,7 @@ func (cc *ClientConn) switchBalancer(name string) { cc.balancerWrapper = newCCBalancerWrapper(cc, builder, cc.balancerBuildOpts) } -func (cc *ClientConn) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State) { +func (cc *ClientConn) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State, err error) { cc.mu.Lock() if cc.conns == nil { cc.mu.Unlock() @@ -639,7 +711,7 @@ func (cc *ClientConn) handleSubConnStateChange(sc balancer.SubConn, s connectivi } // TODO(bar switching) send updates to all balancer wrappers when balancer // gracefully switching is supported. - cc.balancerWrapper.handleSubConnStateChange(sc, s) + cc.balancerWrapper.handleSubConnStateChange(sc, s, err) cc.mu.Unlock() } @@ -736,7 +808,7 @@ func (ac *addrConn) connect() error { } // Update connectivity state within the lock to prevent subsequent or // concurrent calls from resetting the transport more than once. - ac.updateConnectivityState(connectivity.Connecting) + ac.updateConnectivityState(connectivity.Connecting, nil) ac.mu.Unlock() // Start a goroutine connecting to the server asynchronously. @@ -822,7 +894,8 @@ func (cc *ClientConn) healthCheckConfig() *healthCheckConfig { } func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method string) (transport.ClientTransport, func(balancer.DoneInfo), error) { - t, done, err := cc.blockingpicker.pick(ctx, failfast, balancer.PickOptions{ + t, done, err := cc.blockingpicker.pick(ctx, failfast, balancer.PickInfo{ + Ctx: ctx, FullMethodName: method, }) if err != nil { @@ -831,10 +904,10 @@ func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method st return t, done, nil } -func (cc *ClientConn) applyServiceConfig(sc *ServiceConfig) error { +func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, addrs []resolver.Address) { if sc == nil { // should never reach here. - return fmt.Errorf("got nil pointer for service config") + return } cc.sc = sc @@ -850,10 +923,38 @@ func (cc *ClientConn) applyServiceConfig(sc *ServiceConfig) error { cc.retryThrottler.Store((*retryThrottler)(nil)) } - return nil + if cc.dopts.balancerBuilder == nil { + // Only look at balancer types and switch balancer if balancer dial + // option is not set. + var newBalancerName string + if cc.sc != nil && cc.sc.lbConfig != nil { + newBalancerName = cc.sc.lbConfig.name + } else { + var isGRPCLB bool + for _, a := range addrs { + if a.Type == resolver.GRPCLB { + isGRPCLB = true + break + } + } + if isGRPCLB { + newBalancerName = grpclbName + } else if cc.sc != nil && cc.sc.LB != nil { + newBalancerName = *cc.sc.LB + } else { + newBalancerName = PickFirstBalancerName + } + } + cc.switchBalancer(newBalancerName) + } else if cc.balancerWrapper == nil { + // Balancer dial option was set, and this is the first time handling + // resolved addresses. Build a balancer with dopts.balancerBuilder. + cc.curBalancerName = cc.dopts.balancerBuilder.Name() + cc.balancerWrapper = newCCBalancerWrapper(cc, cc.dopts.balancerBuilder, cc.balancerBuildOpts) + } } -func (cc *ClientConn) resolveNow(o resolver.ResolveNowOption) { +func (cc *ClientConn) resolveNow(o resolver.ResolveNowOptions) { cc.mu.RLock() r := cc.resolverWrapper cc.mu.RUnlock() @@ -875,8 +976,9 @@ func (cc *ClientConn) resolveNow(o resolver.ResolveNowOption) { // This API is EXPERIMENTAL. func (cc *ClientConn) ResetConnectBackoff() { cc.mu.Lock() - defer cc.mu.Unlock() - for ac := range cc.conns { + conns := cc.conns + cc.mu.Unlock() + for ac := range conns { ac.resetConnectBackoff() } } @@ -962,7 +1064,7 @@ type addrConn struct { } // Note: this requires a lock on ac.mu. -func (ac *addrConn) updateConnectivityState(s connectivity.State) { +func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error) { if ac.state == s { return } @@ -975,7 +1077,7 @@ func (ac *addrConn) updateConnectivityState(s connectivity.State) { Severity: channelz.CtINFO, }) } - ac.cc.handleSubConnStateChange(ac.acbw, s) + ac.cc.handleSubConnStateChange(ac.acbw, s, lastErr) } // adjustParams updates parameters used to create transports upon @@ -995,7 +1097,7 @@ func (ac *addrConn) adjustParams(r transport.GoAwayReason) { func (ac *addrConn) resetTransport() { for i := 0; ; i++ { if i > 0 { - ac.cc.resolveNow(resolver.ResolveNowOption{}) + ac.cc.resolveNow(resolver.ResolveNowOptions{}) } ac.mu.Lock() @@ -1024,7 +1126,7 @@ func (ac *addrConn) resetTransport() { // https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md#proposed-backoff-algorithm connectDeadline := time.Now().Add(dialDuration) - ac.updateConnectivityState(connectivity.Connecting) + ac.updateConnectivityState(connectivity.Connecting, nil) ac.transport = nil ac.mu.Unlock() @@ -1037,7 +1139,7 @@ func (ac *addrConn) resetTransport() { ac.mu.Unlock() return } - ac.updateConnectivityState(connectivity.TransientFailure) + ac.updateConnectivityState(connectivity.TransientFailure, err) // Backoff. b := ac.resetBackoff @@ -1093,6 +1195,7 @@ func (ac *addrConn) resetTransport() { // first successful one. It returns the transport, the address and a Event in // the successful case. The Event fires when the returned transport disconnects. func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.Time) (transport.ClientTransport, resolver.Address, *grpcsync.Event, error) { + var firstConnErr error for _, addr := range addrs { ac.mu.Lock() if ac.state == connectivity.Shutdown { @@ -1121,11 +1224,14 @@ func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.T if err == nil { return newTr, addr, reconnect, nil } + if firstConnErr == nil { + firstConnErr = err + } ac.cc.blockingpicker.updateConnectionError(err) } // Couldn't connect to any address. - return nil, resolver.Address{}, nil, fmt.Errorf("couldn't connect to any address") + return nil, resolver.Address{}, nil, firstConnErr } // createTransport creates a connection to addr. It returns the transport and a @@ -1136,10 +1242,16 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne onCloseCalled := make(chan struct{}) reconnect := grpcsync.NewEvent() + authority := ac.cc.authority + // addr.ServerName takes precedent over ClientConn authority, if present. + if addr.ServerName != "" { + authority = addr.ServerName + } + target := transport.TargetInfo{ Addr: addr.Addr, Metadata: addr.Metadata, - Authority: ac.cc.authority, + Authority: authority, } once := sync.Once{} @@ -1152,7 +1264,7 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne // state to Connecting. // // TODO: this should be Idle when grpc-go properly supports it. - ac.updateConnectivityState(connectivity.Connecting) + ac.updateConnectivityState(connectivity.Connecting, nil) } }) ac.mu.Unlock() @@ -1167,7 +1279,7 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne // state to Connecting. // // TODO: this should be Idle when grpc-go properly supports it. - ac.updateConnectivityState(connectivity.Connecting) + ac.updateConnectivityState(connectivity.Connecting, nil) } }) ac.mu.Unlock() @@ -1193,7 +1305,7 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne } select { - case <-time.After(connectDeadline.Sub(time.Now())): + case <-time.After(time.Until(connectDeadline)): // We didn't get the preface in time. newTr.Close() grpclog.Warningf("grpc: addrConn.createTransport failed to connect to %v: didn't receive server preface in time. Reconnecting...", addr) @@ -1224,7 +1336,7 @@ func (ac *addrConn) startHealthCheck(ctx context.Context) { var healthcheckManagingState bool defer func() { if !healthcheckManagingState { - ac.updateConnectivityState(connectivity.Ready) + ac.updateConnectivityState(connectivity.Ready, nil) } }() @@ -1260,13 +1372,13 @@ func (ac *addrConn) startHealthCheck(ctx context.Context) { ac.mu.Unlock() return newNonRetryClientStream(ctx, &StreamDesc{ServerStreams: true}, method, currentTr, ac) } - setConnectivityState := func(s connectivity.State) { + setConnectivityState := func(s connectivity.State, lastErr error) { ac.mu.Lock() defer ac.mu.Unlock() if ac.transport != currentTr { return } - ac.updateConnectivityState(s) + ac.updateConnectivityState(s, lastErr) } // Start the health checking stream. go func() { @@ -1331,8 +1443,8 @@ func (ac *addrConn) tearDown(err error) { curTr := ac.transport ac.transport = nil // We have to set the state to Shutdown before anything else to prevent races - // between setting the state and logic that waits on context cancelation / etc. - ac.updateConnectivityState(connectivity.Shutdown) + // between setting the state and logic that waits on context cancellation / etc. + ac.updateConnectivityState(connectivity.Shutdown, nil) ac.cancel() ac.curAddr = resolver.Address{} if err == errConnDrain && curTr != nil { @@ -1355,7 +1467,7 @@ func (ac *addrConn) tearDown(err error) { }, }) // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add trace reference to - // the entity beng deleted, and thus prevent it from being deleted right away. + // the entity being deleted, and thus prevent it from being deleted right away. channelz.RemoveEntry(ac.channelzID) } ac.mu.Unlock() @@ -1445,3 +1557,12 @@ func (c *channelzChannel) ChannelzMetric() *channelz.ChannelInternalMetric { // Deprecated: This error is never returned by grpc and should not be // referenced by users. var ErrClientConnTimeout = errors.New("grpc: timed out when dialing") + +func (cc *ClientConn) getResolver(scheme string) resolver.Builder { + for _, rb := range cc.dopts.resolvers { + if cc.parsedTarget.Scheme == rb.Scheme() { + return rb + } + } + return resolver.Get(cc.parsedTarget.Scheme) +} diff --git a/vendor/google.golang.org/grpc/credentials/credentials.go b/vendor/google.golang.org/grpc/credentials/credentials.go index 8ea3d4a1dc..845ce5d216 100644 --- a/vendor/google.golang.org/grpc/credentials/credentials.go +++ b/vendor/google.golang.org/grpc/credentials/credentials.go @@ -24,16 +24,12 @@ package credentials // import "google.golang.org/grpc/credentials" import ( "context" - "crypto/tls" - "crypto/x509" "errors" "fmt" - "io/ioutil" "net" - "strings" "github.com/golang/protobuf/proto" - "google.golang.org/grpc/credentials/internal" + "google.golang.org/grpc/internal" ) // PerRPCCredentials defines the common interface for the credentials which need to @@ -45,7 +41,8 @@ type PerRPCCredentials interface { // context. If a status code is returned, it will be used as the status // for the RPC. uri is the URI of the entry point for the request. // When supported by the underlying implementation, ctx can be used for - // timeout and cancellation. + // timeout and cancellation. Additionally, RequestInfo data will be + // available via ctx to this call. // TODO(zhaoq): Define the set of the qualified keys instead of leaving // it as an arbitrary string. GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) @@ -54,6 +51,48 @@ type PerRPCCredentials interface { RequireTransportSecurity() bool } +// SecurityLevel defines the protection level on an established connection. +// +// This API is experimental. +type SecurityLevel int + +const ( + // NoSecurity indicates a connection is insecure. + // The zero SecurityLevel value is invalid for backward compatibility. + NoSecurity SecurityLevel = iota + 1 + // IntegrityOnly indicates a connection only provides integrity protection. + IntegrityOnly + // PrivacyAndIntegrity indicates a connection provides both privacy and integrity protection. + PrivacyAndIntegrity +) + +// String returns SecurityLevel in a string format. +func (s SecurityLevel) String() string { + switch s { + case NoSecurity: + return "NoSecurity" + case IntegrityOnly: + return "IntegrityOnly" + case PrivacyAndIntegrity: + return "PrivacyAndIntegrity" + } + return fmt.Sprintf("invalid SecurityLevel: %v", int(s)) +} + +// CommonAuthInfo contains authenticated information common to AuthInfo implementations. +// It should be embedded in a struct implementing AuthInfo to provide additional information +// about the credentials. +// +// This API is experimental. +type CommonAuthInfo struct { + SecurityLevel SecurityLevel +} + +// GetCommonAuthInfo returns the pointer to CommonAuthInfo struct. +func (c *CommonAuthInfo) GetCommonAuthInfo() *CommonAuthInfo { + return c +} + // ProtocolInfo provides information regarding the gRPC wire protocol version, // security protocol, security protocol version in use, server name, etc. type ProtocolInfo struct { @@ -68,6 +107,8 @@ type ProtocolInfo struct { } // AuthInfo defines the common interface for the auth information the users are interested in. +// A struct that implements AuthInfo should embed CommonAuthInfo by including additional +// information about the credentials in it. type AuthInfo interface { AuthType() string } @@ -82,7 +123,8 @@ type TransportCredentials interface { // ClientHandshake does the authentication handshake specified by the corresponding // authentication protocol on rawConn for clients. It returns the authenticated // connection and the corresponding auth information about the connection. - // Implementations must use the provided context to implement timely cancellation. + // The auth information should embed CommonAuthInfo to return additional information about + // the credentials. Implementations must use the provided context to implement timely cancellation. // gRPC will try to reconnect if the error returned is a temporary error // (io.EOF, context.DeadlineExceeded or err.Temporary() == true). // If the returned error is a wrapper error, implementations should make sure that @@ -92,7 +134,8 @@ type TransportCredentials interface { ClientHandshake(context.Context, string, net.Conn) (net.Conn, AuthInfo, error) // ServerHandshake does the authentication handshake for servers. It returns // the authenticated connection and the corresponding auth information about - // the connection. + // the connection. The auth information should embed CommonAuthInfo to return additional information + // about the credentials. // // If the returned net.Conn is closed, it MUST close the net.Conn provided. ServerHandshake(net.Conn) (net.Conn, AuthInfo, error) @@ -125,145 +168,63 @@ type Bundle interface { NewWithMode(mode string) (Bundle, error) } -// TLSInfo contains the auth information for a TLS authenticated connection. -// It implements the AuthInfo interface. -type TLSInfo struct { - State tls.ConnectionState -} - -// AuthType returns the type of TLSInfo as a string. -func (t TLSInfo) AuthType() string { - return "tls" +// RequestInfo contains request data attached to the context passed to GetRequestMetadata calls. +// +// This API is experimental. +type RequestInfo struct { + // The method passed to Invoke or NewStream for this RPC. (For proto methods, this has the format "/some.Service/Method") + Method string + // AuthInfo contains the information from a security handshake (TransportCredentials.ClientHandshake, TransportCredentials.ServerHandshake) + AuthInfo AuthInfo } -// GetSecurityValue returns security info requested by channelz. -func (t TLSInfo) GetSecurityValue() ChannelzSecurityValue { - v := &TLSChannelzSecurityValue{ - StandardName: cipherSuiteLookup[t.State.CipherSuite], - } - // Currently there's no way to get LocalCertificate info from tls package. - if len(t.State.PeerCertificates) > 0 { - v.RemoteCertificate = t.State.PeerCertificates[0].Raw - } - return v -} +// requestInfoKey is a struct to be used as the key when attaching a RequestInfo to a context object. +type requestInfoKey struct{} -// tlsCreds is the credentials required for authenticating a connection using TLS. -type tlsCreds struct { - // TLS configuration - config *tls.Config +// RequestInfoFromContext extracts the RequestInfo from the context if it exists. +// +// This API is experimental. +func RequestInfoFromContext(ctx context.Context) (ri RequestInfo, ok bool) { + ri, ok = ctx.Value(requestInfoKey{}).(RequestInfo) + return } -func (c tlsCreds) Info() ProtocolInfo { - return ProtocolInfo{ - SecurityProtocol: "tls", - SecurityVersion: "1.2", - ServerName: c.config.ServerName, +// CheckSecurityLevel checks if a connection's security level is greater than or equal to the specified one. +// It returns success if 1) the condition is satisified or 2) AuthInfo struct does not implement GetCommonAuthInfo() method +// or 3) CommonAuthInfo.SecurityLevel has an invalid zero value. For 2) and 3), it is for the purpose of backward-compatibility. +// +// This API is experimental. +func CheckSecurityLevel(ctx context.Context, level SecurityLevel) error { + type internalInfo interface { + GetCommonAuthInfo() *CommonAuthInfo } -} - -func (c *tlsCreds) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (_ net.Conn, _ AuthInfo, err error) { - // use local cfg to avoid clobbering ServerName if using multiple endpoints - cfg := cloneTLSConfig(c.config) - if cfg.ServerName == "" { - colonPos := strings.LastIndex(authority, ":") - if colonPos == -1 { - colonPos = len(authority) - } - cfg.ServerName = authority[:colonPos] + ri, _ := RequestInfoFromContext(ctx) + if ri.AuthInfo == nil { + return errors.New("unable to obtain SecurityLevel from context") } - conn := tls.Client(rawConn, cfg) - errChannel := make(chan error, 1) - go func() { - errChannel <- conn.Handshake() - }() - select { - case err := <-errChannel: - if err != nil { - return nil, nil, err + if ci, ok := ri.AuthInfo.(internalInfo); ok { + // CommonAuthInfo.SecurityLevel has an invalid value. + if ci.GetCommonAuthInfo().SecurityLevel == 0 { + return nil } - case <-ctx.Done(): - return nil, nil, ctx.Err() - } - return internal.WrapSyscallConn(rawConn, conn), TLSInfo{conn.ConnectionState()}, nil -} - -func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) { - conn := tls.Server(rawConn, c.config) - if err := conn.Handshake(); err != nil { - return nil, nil, err - } - return internal.WrapSyscallConn(rawConn, conn), TLSInfo{conn.ConnectionState()}, nil -} - -func (c *tlsCreds) Clone() TransportCredentials { - return NewTLS(c.config) -} - -func (c *tlsCreds) OverrideServerName(serverNameOverride string) error { - c.config.ServerName = serverNameOverride - return nil -} - -const alpnProtoStrH2 = "h2" - -func appendH2ToNextProtos(ps []string) []string { - for _, p := range ps { - if p == alpnProtoStrH2 { - return ps + if ci.GetCommonAuthInfo().SecurityLevel < level { + return fmt.Errorf("requires SecurityLevel %v; connection has %v", level, ci.GetCommonAuthInfo().SecurityLevel) } } - ret := make([]string, 0, len(ps)+1) - ret = append(ret, ps...) - return append(ret, alpnProtoStrH2) -} - -// NewTLS uses c to construct a TransportCredentials based on TLS. -func NewTLS(c *tls.Config) TransportCredentials { - tc := &tlsCreds{cloneTLSConfig(c)} - tc.config.NextProtos = appendH2ToNextProtos(tc.config.NextProtos) - return tc -} - -// NewClientTLSFromCert constructs TLS credentials from the input certificate for client. -// serverNameOverride is for testing only. If set to a non empty string, -// it will override the virtual host name of authority (e.g. :authority header field) in requests. -func NewClientTLSFromCert(cp *x509.CertPool, serverNameOverride string) TransportCredentials { - return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp}) -} - -// NewClientTLSFromFile constructs TLS credentials from the input certificate file for client. -// serverNameOverride is for testing only. If set to a non empty string, -// it will override the virtual host name of authority (e.g. :authority header field) in requests. -func NewClientTLSFromFile(certFile, serverNameOverride string) (TransportCredentials, error) { - b, err := ioutil.ReadFile(certFile) - if err != nil { - return nil, err - } - cp := x509.NewCertPool() - if !cp.AppendCertsFromPEM(b) { - return nil, fmt.Errorf("credentials: failed to append certificates") - } - return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp}), nil -} - -// NewServerTLSFromCert constructs TLS credentials from the input certificate for server. -func NewServerTLSFromCert(cert *tls.Certificate) TransportCredentials { - return NewTLS(&tls.Config{Certificates: []tls.Certificate{*cert}}) + // The condition is satisfied or AuthInfo struct does not implement GetCommonAuthInfo() method. + return nil } -// NewServerTLSFromFile constructs TLS credentials from the input certificate file and key -// file for server. -func NewServerTLSFromFile(certFile, keyFile string) (TransportCredentials, error) { - cert, err := tls.LoadX509KeyPair(certFile, keyFile) - if err != nil { - return nil, err +func init() { + internal.NewRequestInfoContext = func(ctx context.Context, ri RequestInfo) context.Context { + return context.WithValue(ctx, requestInfoKey{}, ri) } - return NewTLS(&tls.Config{Certificates: []tls.Certificate{cert}}), nil } // ChannelzSecurityInfo defines the interface that security protocols should implement // in order to provide security info to channelz. +// +// This API is experimental. type ChannelzSecurityInfo interface { GetSecurityValue() ChannelzSecurityValue } @@ -271,66 +232,20 @@ type ChannelzSecurityInfo interface { // ChannelzSecurityValue defines the interface that GetSecurityValue() return value // should satisfy. This interface should only be satisfied by *TLSChannelzSecurityValue // and *OtherChannelzSecurityValue. +// +// This API is experimental. type ChannelzSecurityValue interface { isChannelzSecurityValue() } -// TLSChannelzSecurityValue defines the struct that TLS protocol should return -// from GetSecurityValue(), containing security info like cipher and certificate used. -type TLSChannelzSecurityValue struct { - ChannelzSecurityValue - StandardName string - LocalCertificate []byte - RemoteCertificate []byte -} - // OtherChannelzSecurityValue defines the struct that non-TLS protocol should return // from GetSecurityValue(), which contains protocol specific security info. Note // the Value field will be sent to users of channelz requesting channel info, and // thus sensitive info should better be avoided. +// +// This API is experimental. type OtherChannelzSecurityValue struct { ChannelzSecurityValue Name string Value proto.Message } - -var cipherSuiteLookup = map[uint16]string{ - tls.TLS_RSA_WITH_RC4_128_SHA: "TLS_RSA_WITH_RC4_128_SHA", - tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA: "TLS_RSA_WITH_3DES_EDE_CBC_SHA", - tls.TLS_RSA_WITH_AES_128_CBC_SHA: "TLS_RSA_WITH_AES_128_CBC_SHA", - tls.TLS_RSA_WITH_AES_256_CBC_SHA: "TLS_RSA_WITH_AES_256_CBC_SHA", - tls.TLS_RSA_WITH_AES_128_GCM_SHA256: "TLS_RSA_WITH_AES_128_GCM_SHA256", - tls.TLS_RSA_WITH_AES_256_GCM_SHA384: "TLS_RSA_WITH_AES_256_GCM_SHA384", - tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA: "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA", - tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA: "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", - tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA: "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA", - tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA: "TLS_ECDHE_RSA_WITH_RC4_128_SHA", - tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA: "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA", - tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA: "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", - tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", - tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256: "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", - tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", - tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384: "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", - tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384: "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", - tls.TLS_FALLBACK_SCSV: "TLS_FALLBACK_SCSV", - tls.TLS_RSA_WITH_AES_128_CBC_SHA256: "TLS_RSA_WITH_AES_128_CBC_SHA256", - tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256: "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", - tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256: "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256", - tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", - tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305", -} - -// cloneTLSConfig returns a shallow clone of the exported -// fields of cfg, ignoring the unexported sync.Once, which -// contains a mutex and must not be copied. -// -// If cfg is nil, a new zero tls.Config is returned. -// -// TODO: inline this function if possible. -func cloneTLSConfig(cfg *tls.Config) *tls.Config { - if cfg == nil { - return &tls.Config{} - } - - return cfg.Clone() -} diff --git a/vendor/google.golang.org/grpc/credentials/tls13.go b/vendor/google.golang.org/grpc/credentials/go12.go similarity index 100% rename from vendor/google.golang.org/grpc/credentials/tls13.go rename to vendor/google.golang.org/grpc/credentials/go12.go diff --git a/vendor/google.golang.org/grpc/credentials/tls.go b/vendor/google.golang.org/grpc/credentials/tls.go new file mode 100644 index 0000000000..28b4f6232d --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/tls.go @@ -0,0 +1,225 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package credentials + +import ( + "context" + "crypto/tls" + "crypto/x509" + "fmt" + "io/ioutil" + "net" + + "google.golang.org/grpc/credentials/internal" +) + +// TLSInfo contains the auth information for a TLS authenticated connection. +// It implements the AuthInfo interface. +type TLSInfo struct { + State tls.ConnectionState + CommonAuthInfo +} + +// AuthType returns the type of TLSInfo as a string. +func (t TLSInfo) AuthType() string { + return "tls" +} + +// GetSecurityValue returns security info requested by channelz. +func (t TLSInfo) GetSecurityValue() ChannelzSecurityValue { + v := &TLSChannelzSecurityValue{ + StandardName: cipherSuiteLookup[t.State.CipherSuite], + } + // Currently there's no way to get LocalCertificate info from tls package. + if len(t.State.PeerCertificates) > 0 { + v.RemoteCertificate = t.State.PeerCertificates[0].Raw + } + return v +} + +// tlsCreds is the credentials required for authenticating a connection using TLS. +type tlsCreds struct { + // TLS configuration + config *tls.Config +} + +func (c tlsCreds) Info() ProtocolInfo { + return ProtocolInfo{ + SecurityProtocol: "tls", + SecurityVersion: "1.2", + ServerName: c.config.ServerName, + } +} + +func (c *tlsCreds) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (_ net.Conn, _ AuthInfo, err error) { + // use local cfg to avoid clobbering ServerName if using multiple endpoints + cfg := cloneTLSConfig(c.config) + if cfg.ServerName == "" { + serverName, _, err := net.SplitHostPort(authority) + if err != nil { + // If the authority had no host port or if the authority cannot be parsed, use it as-is. + serverName = authority + } + cfg.ServerName = serverName + } + conn := tls.Client(rawConn, cfg) + errChannel := make(chan error, 1) + go func() { + errChannel <- conn.Handshake() + close(errChannel) + }() + select { + case err := <-errChannel: + if err != nil { + conn.Close() + return nil, nil, err + } + case <-ctx.Done(): + conn.Close() + return nil, nil, ctx.Err() + } + return internal.WrapSyscallConn(rawConn, conn), TLSInfo{conn.ConnectionState(), CommonAuthInfo{PrivacyAndIntegrity}}, nil +} + +func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) { + conn := tls.Server(rawConn, c.config) + if err := conn.Handshake(); err != nil { + conn.Close() + return nil, nil, err + } + return internal.WrapSyscallConn(rawConn, conn), TLSInfo{conn.ConnectionState(), CommonAuthInfo{PrivacyAndIntegrity}}, nil +} + +func (c *tlsCreds) Clone() TransportCredentials { + return NewTLS(c.config) +} + +func (c *tlsCreds) OverrideServerName(serverNameOverride string) error { + c.config.ServerName = serverNameOverride + return nil +} + +const alpnProtoStrH2 = "h2" + +func appendH2ToNextProtos(ps []string) []string { + for _, p := range ps { + if p == alpnProtoStrH2 { + return ps + } + } + ret := make([]string, 0, len(ps)+1) + ret = append(ret, ps...) + return append(ret, alpnProtoStrH2) +} + +// NewTLS uses c to construct a TransportCredentials based on TLS. +func NewTLS(c *tls.Config) TransportCredentials { + tc := &tlsCreds{cloneTLSConfig(c)} + tc.config.NextProtos = appendH2ToNextProtos(tc.config.NextProtos) + return tc +} + +// NewClientTLSFromCert constructs TLS credentials from the input certificate for client. +// serverNameOverride is for testing only. If set to a non empty string, +// it will override the virtual host name of authority (e.g. :authority header field) in requests. +func NewClientTLSFromCert(cp *x509.CertPool, serverNameOverride string) TransportCredentials { + return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp}) +} + +// NewClientTLSFromFile constructs TLS credentials from the input certificate file for client. +// serverNameOverride is for testing only. If set to a non empty string, +// it will override the virtual host name of authority (e.g. :authority header field) in requests. +func NewClientTLSFromFile(certFile, serverNameOverride string) (TransportCredentials, error) { + b, err := ioutil.ReadFile(certFile) + if err != nil { + return nil, err + } + cp := x509.NewCertPool() + if !cp.AppendCertsFromPEM(b) { + return nil, fmt.Errorf("credentials: failed to append certificates") + } + return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp}), nil +} + +// NewServerTLSFromCert constructs TLS credentials from the input certificate for server. +func NewServerTLSFromCert(cert *tls.Certificate) TransportCredentials { + return NewTLS(&tls.Config{Certificates: []tls.Certificate{*cert}}) +} + +// NewServerTLSFromFile constructs TLS credentials from the input certificate file and key +// file for server. +func NewServerTLSFromFile(certFile, keyFile string) (TransportCredentials, error) { + cert, err := tls.LoadX509KeyPair(certFile, keyFile) + if err != nil { + return nil, err + } + return NewTLS(&tls.Config{Certificates: []tls.Certificate{cert}}), nil +} + +// TLSChannelzSecurityValue defines the struct that TLS protocol should return +// from GetSecurityValue(), containing security info like cipher and certificate used. +// +// This API is EXPERIMENTAL. +type TLSChannelzSecurityValue struct { + ChannelzSecurityValue + StandardName string + LocalCertificate []byte + RemoteCertificate []byte +} + +var cipherSuiteLookup = map[uint16]string{ + tls.TLS_RSA_WITH_RC4_128_SHA: "TLS_RSA_WITH_RC4_128_SHA", + tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA: "TLS_RSA_WITH_3DES_EDE_CBC_SHA", + tls.TLS_RSA_WITH_AES_128_CBC_SHA: "TLS_RSA_WITH_AES_128_CBC_SHA", + tls.TLS_RSA_WITH_AES_256_CBC_SHA: "TLS_RSA_WITH_AES_256_CBC_SHA", + tls.TLS_RSA_WITH_AES_128_GCM_SHA256: "TLS_RSA_WITH_AES_128_GCM_SHA256", + tls.TLS_RSA_WITH_AES_256_GCM_SHA384: "TLS_RSA_WITH_AES_256_GCM_SHA384", + tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA: "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA", + tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA: "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", + tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA: "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA", + tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA: "TLS_ECDHE_RSA_WITH_RC4_128_SHA", + tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA: "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA", + tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA: "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", + tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", + tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256: "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", + tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", + tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384: "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", + tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384: "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", + tls.TLS_FALLBACK_SCSV: "TLS_FALLBACK_SCSV", + tls.TLS_RSA_WITH_AES_128_CBC_SHA256: "TLS_RSA_WITH_AES_128_CBC_SHA256", + tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256: "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", + tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256: "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256", + tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", + tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305", +} + +// cloneTLSConfig returns a shallow clone of the exported +// fields of cfg, ignoring the unexported sync.Once, which +// contains a mutex and must not be copied. +// +// If cfg is nil, a new zero tls.Config is returned. +// +// TODO: inline this function if possible. +func cloneTLSConfig(cfg *tls.Config) *tls.Config { + if cfg == nil { + return &tls.Config{} + } + + return cfg.Clone() +} diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go index e8f34d0d6e..63f5ae21df 100644 --- a/vendor/google.golang.org/grpc/dialoptions.go +++ b/vendor/google.golang.org/grpc/dialoptions.go @@ -24,11 +24,12 @@ import ( "net" "time" + "google.golang.org/grpc/backoff" "google.golang.org/grpc/balancer" "google.golang.org/grpc/credentials" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal" - "google.golang.org/grpc/internal/backoff" + internalbackoff "google.golang.org/grpc/internal/backoff" "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" @@ -47,7 +48,7 @@ type dialOptions struct { cp Compressor dc Decompressor - bs backoff.Strategy + bs internalbackoff.Strategy block bool insecure bool timeout time.Duration @@ -57,9 +58,7 @@ type dialOptions struct { callOptions []CallOption // This is used by v1 balancer dial option WithBalancer to support v1 // balancer, and also by WithBalancerName dial option. - balancerBuilder balancer.Builder - // This is to support grpclb. - resolverBuilder resolver.Builder + balancerBuilder balancer.Builder channelzParentID int64 disableServiceConfig bool disableRetry bool @@ -68,6 +67,11 @@ type dialOptions struct { minConnectTimeout func() time.Duration defaultServiceConfig *ServiceConfig // defaultServiceConfig is parsed from defaultServiceConfigRawJSON. defaultServiceConfigRawJSON *string + // This is used by ccResolverWrapper to backoff between successive calls to + // resolver.ResolveNow(). The user will have no need to configure this, but + // we need to be able to configure this in tests. + resolveNowBackoff func(int) time.Duration + resolvers []resolver.Builder } // DialOption configures how we set up the connection. @@ -226,13 +230,6 @@ func WithBalancerName(balancerName string) DialOption { }) } -// withResolverBuilder is only for grpclb. -func withResolverBuilder(b resolver.Builder) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.resolverBuilder = b - }) -} - // WithServiceConfig returns a DialOption which has a channel to read the // service configuration. // @@ -246,8 +243,28 @@ func WithServiceConfig(c <-chan ServiceConfig) DialOption { }) } +// WithConnectParams configures the dialer to use the provided ConnectParams. +// +// The backoff configuration specified as part of the ConnectParams overrides +// all defaults specified in +// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. Consider +// using the backoff.DefaultConfig as a base, in cases where you want to +// override only a subset of the backoff configuration. +// +// This API is EXPERIMENTAL. +func WithConnectParams(p ConnectParams) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.bs = internalbackoff.Exponential{Config: p.Backoff} + o.minConnectTimeout = func() time.Duration { + return p.MinConnectTimeout + } + }) +} + // WithBackoffMaxDelay configures the dialer to use the provided maximum delay // when backing off after failed connection attempts. +// +// Deprecated: use WithConnectParams instead. Will be supported throughout 1.x. func WithBackoffMaxDelay(md time.Duration) DialOption { return WithBackoffConfig(BackoffConfig{MaxDelay: md}) } @@ -255,19 +272,18 @@ func WithBackoffMaxDelay(md time.Duration) DialOption { // WithBackoffConfig configures the dialer to use the provided backoff // parameters after connection failures. // -// Use WithBackoffMaxDelay until more parameters on BackoffConfig are opened up -// for use. +// Deprecated: use WithConnectParams instead. Will be supported throughout 1.x. func WithBackoffConfig(b BackoffConfig) DialOption { - return withBackoff(backoff.Exponential{ - MaxDelay: b.MaxDelay, - }) + bc := backoff.DefaultConfig + bc.MaxDelay = b.MaxDelay + return withBackoff(internalbackoff.Exponential{Config: bc}) } // withBackoff sets the backoff strategy used for connectRetryNum after a failed // connection attempt. // // This can be exported if arbitrary backoff strategies are allowed by gRPC. -func withBackoff(bs backoff.Strategy) DialOption { +func withBackoff(bs internalbackoff.Strategy) DialOption { return newFuncDialOption(func(o *dialOptions) { o.bs = bs }) @@ -322,8 +338,8 @@ func WithCredentialsBundle(b credentials.Bundle) DialOption { // WithTimeout returns a DialOption that configures a timeout for dialing a // ClientConn initially. This is valid if and only if WithBlock() is present. // -// Deprecated: use DialContext and context.WithTimeout instead. Will be -// supported throughout 1.x. +// Deprecated: use DialContext instead of Dial and context.WithTimeout +// instead. Will be supported throughout 1.x. func WithTimeout(d time.Duration) DialOption { return newFuncDialOption(func(o *dialOptions) { o.timeout = d @@ -341,7 +357,6 @@ func WithContextDialer(f func(context.Context, string) (net.Conn, error)) DialOp } func init() { - internal.WithResolverBuilder = withResolverBuilder internal.WithHealthCheckFunc = withHealthCheckFunc } @@ -455,6 +470,8 @@ func WithAuthority(a string) DialOption { // WithChannelzParentID returns a DialOption that specifies the channelz ID of // current ClientConn's parent. This function is used in nested channel creation // (e.g. grpclb dial). +// +// This API is EXPERIMENTAL. func WithChannelzParentID(id int64) DialOption { return newFuncDialOption(func(o *dialOptions) { o.channelzParentID = id @@ -539,6 +556,7 @@ func defaultDialOptions() dialOptions { WriteBufferSize: defaultWriteBufSize, ReadBufferSize: defaultReadBufSize, }, + resolveNowBackoff: internalbackoff.DefaultExponential.Backoff, } } @@ -552,3 +570,25 @@ func withMinConnectDeadline(f func() time.Duration) DialOption { o.minConnectTimeout = f }) } + +// withResolveNowBackoff specifies the function that clientconn uses to backoff +// between successive calls to resolver.ResolveNow(). +// +// For testing purpose only. +func withResolveNowBackoff(f func(int) time.Duration) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.resolveNowBackoff = f + }) +} + +// WithResolvers allows a list of resolver implementations to be registered +// locally with the ClientConn without needing to be globally registered via +// resolver.Register. They will be matched against the scheme used for the +// current Dial only, and will take precedence over the global registry. +// +// This API is EXPERIMENTAL. +func WithResolvers(rs ...resolver.Builder) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.resolvers = append(o.resolvers, rs...) + }) +} diff --git a/vendor/google.golang.org/grpc/encoding/encoding.go b/vendor/google.golang.org/grpc/encoding/encoding.go index 30a75da99d..195e8448b6 100644 --- a/vendor/google.golang.org/grpc/encoding/encoding.go +++ b/vendor/google.golang.org/grpc/encoding/encoding.go @@ -46,6 +46,10 @@ type Compressor interface { // coding header. The result must be static; the result cannot change // between calls. Name() string + // EXPERIMENTAL: if a Compressor implements + // DecompressedSize(compressedBytes []byte) int, gRPC will call it + // to determine the size of the buffer allocated for the result of decompression. + // Return -1 to indicate unknown size. } var registeredCompressor = make(map[string]Compressor) diff --git a/vendor/google.golang.org/grpc/go.mod b/vendor/google.golang.org/grpc/go.mod index c1a8340c5b..2378361302 100644 --- a/vendor/google.golang.org/grpc/go.mod +++ b/vendor/google.golang.org/grpc/go.mod @@ -1,19 +1,16 @@ module google.golang.org/grpc +go 1.11 + require ( - cloud.google.com/go v0.26.0 // indirect - github.com/BurntSushi/toml v0.3.1 // indirect - github.com/client9/misspell v0.3.4 + github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473 + github.com/envoyproxy/protoc-gen-validate v0.1.0 github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b github.com/golang/mock v1.1.1 - github.com/golang/protobuf v1.2.0 + github.com/golang/protobuf v1.3.2 github.com/google/go-cmp v0.2.0 - golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3 golang.org/x/net v0.0.0-20190311183353-d8887717615a golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a - golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135 - google.golang.org/appengine v1.1.0 // indirect - google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 - honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc + google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 ) diff --git a/vendor/google.golang.org/grpc/go.sum b/vendor/google.golang.org/grpc/go.sum index 741677d2e8..dd5d0cee7a 100644 --- a/vendor/google.golang.org/grpc/go.sum +++ b/vendor/google.golang.org/grpc/go.sum @@ -1,37 +1,53 @@ cloud.google.com/go v0.26.0 h1:e0WKqKTd5BnrG8aKH3J3h+QvEIQtSUcf2n5UZ5ZgLtQ= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473 h1:4cmBvAEBNJaGARUEs3/suWRyfyBfhf7I60WBZq+bv2w= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/protoc-gen-validate v0.1.0 h1:EQciDnbrYxy13PgWoY8AqoxGiPrpgBZ1R8UNe3ddc+A= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/mock v1.1.1 h1:G5FRp8JnTd7RQH5kemVNlMeyXQAztQ3mOWV95KxsXH8= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3 h1:XQyxROzUlZH+WIQwySDgnISgOivlhjIEwaQaJEJrrN0= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be h1:vEDujvNQGv4jgYKudGeI/+DAX4Jffq6hpD55MmoEvKs= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd h1:/e+gpKk9r3dJobndpTytxS2gOy6m5uvpg+ISQoEcusQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135 h1:5Beo0mZN8dRzgrMMkDp0jc8YXQKx9DiJ2k1dkvGsn5A= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -google.golang.org/appengine v1.1.0 h1:igQkv0AAhEIvTEpD5LIpAfav2eeVO9HBTjvKHVJPRSs= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc= +google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc h1:/hemPrYIhOhy8zYrNj+069zDB68us2sMGsfkFJO0iZs= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 h1:gSJIx1SDwno+2ElGhA4+qG2zF97qiUzTM+rQ0klBOcE= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/vendor/google.golang.org/grpc/grpclog/grpclog.go b/vendor/google.golang.org/grpc/grpclog/grpclog.go index 51bb9457cd..874ea6d98a 100644 --- a/vendor/google.golang.org/grpc/grpclog/grpclog.go +++ b/vendor/google.golang.org/grpc/grpclog/grpclog.go @@ -89,7 +89,7 @@ func Fatal(args ...interface{}) { } // Fatalf logs to the FATAL log. Arguments are handled in the manner of fmt.Printf. -// It calles os.Exit() with exit code 1. +// It calls os.Exit() with exit code 1. func Fatalf(format string, args ...interface{}) { logger.Fatalf(format, args...) // Make sure fatal logs will exit. diff --git a/vendor/google.golang.org/grpc/health/client.go b/vendor/google.golang.org/grpc/health/client.go index b43746e616..b5bee48380 100644 --- a/vendor/google.golang.org/grpc/health/client.go +++ b/vendor/google.golang.org/grpc/health/client.go @@ -33,20 +33,20 @@ import ( "google.golang.org/grpc/status" ) -const maxDelay = 120 * time.Second - -var backoffStrategy = backoff.Exponential{MaxDelay: maxDelay} -var backoffFunc = func(ctx context.Context, retries int) bool { - d := backoffStrategy.Backoff(retries) - timer := time.NewTimer(d) - select { - case <-timer.C: - return true - case <-ctx.Done(): - timer.Stop() - return false +var ( + backoffStrategy = backoff.DefaultExponential + backoffFunc = func(ctx context.Context, retries int) bool { + d := backoffStrategy.Backoff(retries) + timer := time.NewTimer(d) + select { + case <-timer.C: + return true + case <-ctx.Done(): + timer.Stop() + return false + } } -} +) func init() { internal.HealthCheckFunc = clientHealthCheck @@ -56,7 +56,7 @@ const healthCheckMethod = "/grpc.health.v1.Health/Watch" // This function implements the protocol defined at: // https://github.com/grpc/grpc/blob/master/doc/health-checking.md -func clientHealthCheck(ctx context.Context, newStream func(string) (interface{}, error), setConnectivityState func(connectivity.State), service string) error { +func clientHealthCheck(ctx context.Context, newStream func(string) (interface{}, error), setConnectivityState func(connectivity.State, error), service string) error { tryCnt := 0 retryConnection: @@ -70,7 +70,7 @@ retryConnection: if ctx.Err() != nil { return nil } - setConnectivityState(connectivity.Connecting) + setConnectivityState(connectivity.Connecting, nil) rawS, err := newStream(healthCheckMethod) if err != nil { continue retryConnection @@ -79,7 +79,7 @@ retryConnection: s, ok := rawS.(grpc.ClientStream) // Ideally, this should never happen. But if it happens, the server is marked as healthy for LBing purposes. if !ok { - setConnectivityState(connectivity.Ready) + setConnectivityState(connectivity.Ready, nil) return fmt.Errorf("newStream returned %v (type %T); want grpc.ClientStream", rawS, rawS) } @@ -95,22 +95,22 @@ retryConnection: // Reports healthy for the LBing purposes if health check is not implemented in the server. if status.Code(err) == codes.Unimplemented { - setConnectivityState(connectivity.Ready) + setConnectivityState(connectivity.Ready, nil) return err } // Reports unhealthy if server's Watch method gives an error other than UNIMPLEMENTED. if err != nil { - setConnectivityState(connectivity.TransientFailure) + setConnectivityState(connectivity.TransientFailure, fmt.Errorf("connection active but received health check RPC error: %v", err)) continue retryConnection } - // As a message has been received, removes the need for backoff for the next retry by reseting the try count. + // As a message has been received, removes the need for backoff for the next retry by resetting the try count. tryCnt = 0 if resp.Status == healthpb.HealthCheckResponse_SERVING { - setConnectivityState(connectivity.Ready) + setConnectivityState(connectivity.Ready, nil) } else { - setConnectivityState(connectivity.TransientFailure) + setConnectivityState(connectivity.TransientFailure, fmt.Errorf("connection active but health check failed. status=%s", resp.Status)) } } } diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go index c2f2c7729d..c99e27ae5b 100644 --- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go @@ -1,15 +1,16 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // source: grpc/health/v1/health.proto -package grpc_health_v1 // import "google.golang.org/grpc/health/grpc_health_v1" - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" +package grpc_health_v1 import ( - context "golang.org/x/net/context" + context "context" + fmt "fmt" + proto "github.com/golang/protobuf/proto" grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + math "math" ) // Reference imports to suppress errors if they are not otherwise used. @@ -21,7 +22,7 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package type HealthCheckResponse_ServingStatus int32 @@ -38,6 +39,7 @@ var HealthCheckResponse_ServingStatus_name = map[int32]string{ 2: "NOT_SERVING", 3: "SERVICE_UNKNOWN", } + var HealthCheckResponse_ServingStatus_value = map[string]int32{ "UNKNOWN": 0, "SERVING": 1, @@ -48,8 +50,9 @@ var HealthCheckResponse_ServingStatus_value = map[string]int32{ func (x HealthCheckResponse_ServingStatus) String() string { return proto.EnumName(HealthCheckResponse_ServingStatus_name, int32(x)) } + func (HealthCheckResponse_ServingStatus) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_health_6b1a06aa67f91efd, []int{1, 0} + return fileDescriptor_e265fd9d4e077217, []int{1, 0} } type HealthCheckRequest struct { @@ -63,16 +66,17 @@ func (m *HealthCheckRequest) Reset() { *m = HealthCheckRequest{} } func (m *HealthCheckRequest) String() string { return proto.CompactTextString(m) } func (*HealthCheckRequest) ProtoMessage() {} func (*HealthCheckRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_health_6b1a06aa67f91efd, []int{0} + return fileDescriptor_e265fd9d4e077217, []int{0} } + func (m *HealthCheckRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_HealthCheckRequest.Unmarshal(m, b) } func (m *HealthCheckRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_HealthCheckRequest.Marshal(b, m, deterministic) } -func (dst *HealthCheckRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_HealthCheckRequest.Merge(dst, src) +func (m *HealthCheckRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_HealthCheckRequest.Merge(m, src) } func (m *HealthCheckRequest) XXX_Size() int { return xxx_messageInfo_HealthCheckRequest.Size(m) @@ -101,16 +105,17 @@ func (m *HealthCheckResponse) Reset() { *m = HealthCheckResponse{} } func (m *HealthCheckResponse) String() string { return proto.CompactTextString(m) } func (*HealthCheckResponse) ProtoMessage() {} func (*HealthCheckResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_health_6b1a06aa67f91efd, []int{1} + return fileDescriptor_e265fd9d4e077217, []int{1} } + func (m *HealthCheckResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_HealthCheckResponse.Unmarshal(m, b) } func (m *HealthCheckResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_HealthCheckResponse.Marshal(b, m, deterministic) } -func (dst *HealthCheckResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_HealthCheckResponse.Merge(dst, src) +func (m *HealthCheckResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_HealthCheckResponse.Merge(m, src) } func (m *HealthCheckResponse) XXX_Size() int { return xxx_messageInfo_HealthCheckResponse.Size(m) @@ -129,9 +134,34 @@ func (m *HealthCheckResponse) GetStatus() HealthCheckResponse_ServingStatus { } func init() { + proto.RegisterEnum("grpc.health.v1.HealthCheckResponse_ServingStatus", HealthCheckResponse_ServingStatus_name, HealthCheckResponse_ServingStatus_value) proto.RegisterType((*HealthCheckRequest)(nil), "grpc.health.v1.HealthCheckRequest") proto.RegisterType((*HealthCheckResponse)(nil), "grpc.health.v1.HealthCheckResponse") - proto.RegisterEnum("grpc.health.v1.HealthCheckResponse_ServingStatus", HealthCheckResponse_ServingStatus_name, HealthCheckResponse_ServingStatus_value) +} + +func init() { proto.RegisterFile("grpc/health/v1/health.proto", fileDescriptor_e265fd9d4e077217) } + +var fileDescriptor_e265fd9d4e077217 = []byte{ + // 297 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4e, 0x2f, 0x2a, 0x48, + 0xd6, 0xcf, 0x48, 0x4d, 0xcc, 0x29, 0xc9, 0xd0, 0x2f, 0x33, 0x84, 0xb2, 0xf4, 0x0a, 0x8a, 0xf2, + 0x4b, 0xf2, 0x85, 0xf8, 0x40, 0x92, 0x7a, 0x50, 0xa1, 0x32, 0x43, 0x25, 0x3d, 0x2e, 0x21, 0x0f, + 0x30, 0xc7, 0x39, 0x23, 0x35, 0x39, 0x3b, 0x28, 0xb5, 0xb0, 0x34, 0xb5, 0xb8, 0x44, 0x48, 0x82, + 0x8b, 0xbd, 0x38, 0xb5, 0xa8, 0x2c, 0x33, 0x39, 0x55, 0x82, 0x51, 0x81, 0x51, 0x83, 0x33, 0x08, + 0xc6, 0x55, 0xda, 0xc8, 0xc8, 0x25, 0x8c, 0xa2, 0xa1, 0xb8, 0x20, 0x3f, 0xaf, 0x38, 0x55, 0xc8, + 0x93, 0x8b, 0xad, 0xb8, 0x24, 0xb1, 0xa4, 0xb4, 0x18, 0xac, 0x81, 0xcf, 0xc8, 0x50, 0x0f, 0xd5, + 0x22, 0x3d, 0x2c, 0x9a, 0xf4, 0x82, 0x41, 0x86, 0xe6, 0xa5, 0x07, 0x83, 0x35, 0x06, 0x41, 0x0d, + 0x50, 0xf2, 0xe7, 0xe2, 0x45, 0x91, 0x10, 0xe2, 0xe6, 0x62, 0x0f, 0xf5, 0xf3, 0xf6, 0xf3, 0x0f, + 0xf7, 0x13, 0x60, 0x00, 0x71, 0x82, 0x5d, 0x83, 0xc2, 0x3c, 0xfd, 0xdc, 0x05, 0x18, 0x85, 0xf8, + 0xb9, 0xb8, 0xfd, 0xfc, 0x43, 0xe2, 0x61, 0x02, 0x4c, 0x42, 0xc2, 0x5c, 0xfc, 0x60, 0x8e, 0xb3, + 0x6b, 0x3c, 0x4c, 0x0b, 0xb3, 0xd1, 0x3a, 0x46, 0x2e, 0x36, 0x88, 0xf5, 0x42, 0x01, 0x5c, 0xac, + 0x60, 0x27, 0x08, 0x29, 0xe1, 0x75, 0x1f, 0x38, 0x14, 0xa4, 0x94, 0x89, 0xf0, 0x83, 0x50, 0x10, + 0x17, 0x6b, 0x78, 0x62, 0x49, 0x72, 0x06, 0xd5, 0x4c, 0x34, 0x60, 0x74, 0x4a, 0xe4, 0x12, 0xcc, + 0xcc, 0x47, 0x53, 0xea, 0xc4, 0x0d, 0x51, 0x1b, 0x00, 0x8a, 0xc6, 0x00, 0xc6, 0x28, 0x9d, 0xf4, + 0xfc, 0xfc, 0xf4, 0x9c, 0x54, 0xbd, 0xf4, 0xfc, 0x9c, 0xc4, 0xbc, 0x74, 0xbd, 0xfc, 0xa2, 0x74, + 0x7d, 0xe4, 0x78, 0x07, 0xb1, 0xe3, 0x21, 0xec, 0xf8, 0x32, 0xc3, 0x55, 0x4c, 0x7c, 0xee, 0x20, + 0xd3, 0x20, 0x46, 0xe8, 0x85, 0x19, 0x26, 0xb1, 0x81, 0x93, 0x83, 0x31, 0x20, 0x00, 0x00, 0xff, + 0xff, 0x12, 0x7d, 0x96, 0xcb, 0x2d, 0x02, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -239,6 +269,17 @@ type HealthServer interface { Watch(*HealthCheckRequest, Health_WatchServer) error } +// UnimplementedHealthServer can be embedded to have forward compatible implementations. +type UnimplementedHealthServer struct { +} + +func (*UnimplementedHealthServer) Check(ctx context.Context, req *HealthCheckRequest) (*HealthCheckResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Check not implemented") +} +func (*UnimplementedHealthServer) Watch(req *HealthCheckRequest, srv Health_WatchServer) error { + return status.Errorf(codes.Unimplemented, "method Watch not implemented") +} + func RegisterHealthServer(s *grpc.Server, srv HealthServer) { s.RegisterService(&_Health_serviceDesc, srv) } @@ -300,28 +341,3 @@ var _Health_serviceDesc = grpc.ServiceDesc{ }, Metadata: "grpc/health/v1/health.proto", } - -func init() { proto.RegisterFile("grpc/health/v1/health.proto", fileDescriptor_health_6b1a06aa67f91efd) } - -var fileDescriptor_health_6b1a06aa67f91efd = []byte{ - // 297 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4e, 0x2f, 0x2a, 0x48, - 0xd6, 0xcf, 0x48, 0x4d, 0xcc, 0x29, 0xc9, 0xd0, 0x2f, 0x33, 0x84, 0xb2, 0xf4, 0x0a, 0x8a, 0xf2, - 0x4b, 0xf2, 0x85, 0xf8, 0x40, 0x92, 0x7a, 0x50, 0xa1, 0x32, 0x43, 0x25, 0x3d, 0x2e, 0x21, 0x0f, - 0x30, 0xc7, 0x39, 0x23, 0x35, 0x39, 0x3b, 0x28, 0xb5, 0xb0, 0x34, 0xb5, 0xb8, 0x44, 0x48, 0x82, - 0x8b, 0xbd, 0x38, 0xb5, 0xa8, 0x2c, 0x33, 0x39, 0x55, 0x82, 0x51, 0x81, 0x51, 0x83, 0x33, 0x08, - 0xc6, 0x55, 0xda, 0xc8, 0xc8, 0x25, 0x8c, 0xa2, 0xa1, 0xb8, 0x20, 0x3f, 0xaf, 0x38, 0x55, 0xc8, - 0x93, 0x8b, 0xad, 0xb8, 0x24, 0xb1, 0xa4, 0xb4, 0x18, 0xac, 0x81, 0xcf, 0xc8, 0x50, 0x0f, 0xd5, - 0x22, 0x3d, 0x2c, 0x9a, 0xf4, 0x82, 0x41, 0x86, 0xe6, 0xa5, 0x07, 0x83, 0x35, 0x06, 0x41, 0x0d, - 0x50, 0xf2, 0xe7, 0xe2, 0x45, 0x91, 0x10, 0xe2, 0xe6, 0x62, 0x0f, 0xf5, 0xf3, 0xf6, 0xf3, 0x0f, - 0xf7, 0x13, 0x60, 0x00, 0x71, 0x82, 0x5d, 0x83, 0xc2, 0x3c, 0xfd, 0xdc, 0x05, 0x18, 0x85, 0xf8, - 0xb9, 0xb8, 0xfd, 0xfc, 0x43, 0xe2, 0x61, 0x02, 0x4c, 0x42, 0xc2, 0x5c, 0xfc, 0x60, 0x8e, 0xb3, - 0x6b, 0x3c, 0x4c, 0x0b, 0xb3, 0xd1, 0x3a, 0x46, 0x2e, 0x36, 0x88, 0xf5, 0x42, 0x01, 0x5c, 0xac, - 0x60, 0x27, 0x08, 0x29, 0xe1, 0x75, 0x1f, 0x38, 0x14, 0xa4, 0x94, 0x89, 0xf0, 0x83, 0x50, 0x10, - 0x17, 0x6b, 0x78, 0x62, 0x49, 0x72, 0x06, 0xd5, 0x4c, 0x34, 0x60, 0x74, 0x4a, 0xe4, 0x12, 0xcc, - 0xcc, 0x47, 0x53, 0xea, 0xc4, 0x0d, 0x51, 0x1b, 0x00, 0x8a, 0xc6, 0x00, 0xc6, 0x28, 0x9d, 0xf4, - 0xfc, 0xfc, 0xf4, 0x9c, 0x54, 0xbd, 0xf4, 0xfc, 0x9c, 0xc4, 0xbc, 0x74, 0xbd, 0xfc, 0xa2, 0x74, - 0x7d, 0xe4, 0x78, 0x07, 0xb1, 0xe3, 0x21, 0xec, 0xf8, 0x32, 0xc3, 0x55, 0x4c, 0x7c, 0xee, 0x20, - 0xd3, 0x20, 0x46, 0xe8, 0x85, 0x19, 0x26, 0xb1, 0x81, 0x93, 0x83, 0x31, 0x20, 0x00, 0x00, 0xff, - 0xff, 0x12, 0x7d, 0x96, 0xcb, 0x2d, 0x02, 0x00, 0x00, -} diff --git a/vendor/google.golang.org/grpc/health/server.go b/vendor/google.golang.org/grpc/health/server.go index c79f9d2ab8..2262607f88 100644 --- a/vendor/google.golang.org/grpc/health/server.go +++ b/vendor/google.golang.org/grpc/health/server.go @@ -35,7 +35,7 @@ import ( // Server implements `service Health`. type Server struct { - mu sync.Mutex + mu sync.RWMutex // If shutdown is true, it's expected all serving status is NOT_SERVING, and // will stay in NOT_SERVING. shutdown bool @@ -54,8 +54,8 @@ func NewServer() *Server { // Check implements `service Health`. func (s *Server) Check(ctx context.Context, in *healthpb.HealthCheckRequest) (*healthpb.HealthCheckResponse, error) { - s.mu.Lock() - defer s.mu.Unlock() + s.mu.RLock() + defer s.mu.RUnlock() if servingStatus, ok := s.statusMap[in.Service]; ok { return &healthpb.HealthCheckResponse{ Status: servingStatus, @@ -139,7 +139,7 @@ func (s *Server) setServingStatusLocked(service string, servingStatus healthpb.H // Shutdown sets all serving status to NOT_SERVING, and configures the server to // ignore all future status changes. // -// This changes serving status for all services. To set status for a perticular +// This changes serving status for all services. To set status for a particular // services, call SetServingStatus(). func (s *Server) Shutdown() { s.mu.Lock() @@ -153,7 +153,7 @@ func (s *Server) Shutdown() { // Resume sets all serving status to SERVING, and configures the server to // accept all future status changes. // -// This changes serving status for all services. To set status for a perticular +// This changes serving status for all services. To set status for a particular // services, call SetServingStatus(). func (s *Server) Resume() { s.mu.Lock() diff --git a/vendor/google.golang.org/grpc/internal/backoff/backoff.go b/vendor/google.golang.org/grpc/internal/backoff/backoff.go index 1bd0cce5ab..5fc0ee3da5 100644 --- a/vendor/google.golang.org/grpc/internal/backoff/backoff.go +++ b/vendor/google.golang.org/grpc/internal/backoff/backoff.go @@ -25,44 +25,39 @@ package backoff import ( "time" + grpcbackoff "google.golang.org/grpc/backoff" "google.golang.org/grpc/internal/grpcrand" ) // Strategy defines the methodology for backing off after a grpc connection // failure. -// type Strategy interface { // Backoff returns the amount of time to wait before the next retry given // the number of consecutive failures. Backoff(retries int) time.Duration } -const ( - // baseDelay is the amount of time to wait before retrying after the first - // failure. - baseDelay = 1.0 * time.Second - // factor is applied to the backoff after each retry. - factor = 1.6 - // jitter provides a range to randomize backoff delays. - jitter = 0.2 -) +// DefaultExponential is an exponential backoff implementation using the +// default values for all the configurable knobs defined in +// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. +var DefaultExponential = Exponential{Config: grpcbackoff.DefaultConfig} // Exponential implements exponential backoff algorithm as defined in // https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. type Exponential struct { - // MaxDelay is the upper bound of backoff delay. - MaxDelay time.Duration + // Config contains all options to configure the backoff algorithm. + Config grpcbackoff.Config } // Backoff returns the amount of time to wait before the next retry given the // number of retries. func (bc Exponential) Backoff(retries int) time.Duration { if retries == 0 { - return baseDelay + return bc.Config.BaseDelay } - backoff, max := float64(baseDelay), float64(bc.MaxDelay) + backoff, max := float64(bc.Config.BaseDelay), float64(bc.Config.MaxDelay) for backoff < max && retries > 0 { - backoff *= factor + backoff *= bc.Config.Multiplier retries-- } if backoff > max { @@ -70,7 +65,7 @@ func (bc Exponential) Backoff(retries int) time.Duration { } // Randomize backoff delays so that if a cluster of requests start at // the same time, they won't operate in lockstep. - backoff *= 1 + jitter*(grpcrand.Float64()*2-1) + backoff *= 1 + bc.Config.Jitter*(grpcrand.Float64()*2-1) if backoff < 0 { return 0 } diff --git a/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go b/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go index fee6aecd08..8b10516749 100644 --- a/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go +++ b/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go @@ -34,7 +34,7 @@ type Logger interface { } // binLogger is the global binary logger for the binary. One of this should be -// built at init time from the configuration (environment varialbe or flags). +// built at init time from the configuration (environment variable or flags). // // It is used to get a methodLogger for each individual method. var binLogger Logger @@ -98,7 +98,7 @@ func (l *logger) setDefaultMethodLogger(ml *methodLoggerConfig) error { // New methodLogger with same service overrides the old one. func (l *logger) setServiceMethodLogger(service string, ml *methodLoggerConfig) error { if _, ok := l.services[service]; ok { - return fmt.Errorf("conflicting rules for service %v found", service) + return fmt.Errorf("conflicting service rules for service %v found", service) } if l.services == nil { l.services = make(map[string]*methodLoggerConfig) @@ -112,10 +112,10 @@ func (l *logger) setServiceMethodLogger(service string, ml *methodLoggerConfig) // New methodLogger with same method overrides the old one. func (l *logger) setMethodMethodLogger(method string, ml *methodLoggerConfig) error { if _, ok := l.blacklist[method]; ok { - return fmt.Errorf("conflicting rules for method %v found", method) + return fmt.Errorf("conflicting blacklist rules for method %v found", method) } if _, ok := l.methods[method]; ok { - return fmt.Errorf("conflicting rules for method %v found", method) + return fmt.Errorf("conflicting method rules for method %v found", method) } if l.methods == nil { l.methods = make(map[string]*methodLoggerConfig) @@ -127,10 +127,10 @@ func (l *logger) setMethodMethodLogger(method string, ml *methodLoggerConfig) er // Set blacklist method for "-service/method". func (l *logger) setBlacklist(method string) error { if _, ok := l.blacklist[method]; ok { - return fmt.Errorf("conflicting rules for method %v found", method) + return fmt.Errorf("conflicting blacklist rules for method %v found", method) } if _, ok := l.methods[method]; ok { - return fmt.Errorf("conflicting rules for method %v found", method) + return fmt.Errorf("conflicting method rules for method %v found", method) } if l.blacklist == nil { l.blacklist = make(map[string]struct{}) diff --git a/vendor/google.golang.org/grpc/internal/binarylog/env_config.go b/vendor/google.golang.org/grpc/internal/binarylog/env_config.go index 4cc2525df7..be30d0e65e 100644 --- a/vendor/google.golang.org/grpc/internal/binarylog/env_config.go +++ b/vendor/google.golang.org/grpc/internal/binarylog/env_config.go @@ -43,7 +43,7 @@ import ( // Foo. // // If two configs exist for one certain method or service, the one specified -// later overrides the privous config. +// later overrides the previous config. func NewLoggerFromConfigString(s string) Logger { if s == "" { return nil @@ -74,7 +74,7 @@ func (l *logger) fillMethodLoggerWithConfigString(config string) error { return fmt.Errorf("invalid config: %q, %v", config, err) } if m == "*" { - return fmt.Errorf("invalid config: %q, %v", config, "* not allowd in blacklist config") + return fmt.Errorf("invalid config: %q, %v", config, "* not allowed in blacklist config") } if suffix != "" { return fmt.Errorf("invalid config: %q, %v", config, "header/message limit not allowed in blacklist config") diff --git a/vendor/google.golang.org/grpc/internal/binarylog/sink.go b/vendor/google.golang.org/grpc/internal/binarylog/sink.go index 20d044f0fd..a2e7c346dd 100644 --- a/vendor/google.golang.org/grpc/internal/binarylog/sink.go +++ b/vendor/google.golang.org/grpc/internal/binarylog/sink.go @@ -63,7 +63,7 @@ func (ns *noopSink) Close() error { return nil } // newWriterSink creates a binary log sink with the given writer. // -// Write() marshalls the proto message and writes it to the given writer. Each +// Write() marshals the proto message and writes it to the given writer. Each // message is prefixed with a 4 byte big endian unsigned integer as the length. // // No buffer is done, Close() doesn't try to close the writer. diff --git a/vendor/google.golang.org/grpc/internal/buffer/unbounded.go b/vendor/google.golang.org/grpc/internal/buffer/unbounded.go new file mode 100644 index 0000000000..9f6a0c1200 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/buffer/unbounded.go @@ -0,0 +1,85 @@ +/* + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package buffer provides an implementation of an unbounded buffer. +package buffer + +import "sync" + +// Unbounded is an implementation of an unbounded buffer which does not use +// extra goroutines. This is typically used for passing updates from one entity +// to another within gRPC. +// +// All methods on this type are thread-safe and don't block on anything except +// the underlying mutex used for synchronization. +// +// Unbounded supports values of any type to be stored in it by using a channel +// of `interface{}`. This means that a call to Put() incurs an extra memory +// allocation, and also that users need a type assertion while reading. For +// performance critical code paths, using Unbounded is strongly discouraged and +// defining a new type specific implementation of this buffer is preferred. See +// internal/transport/transport.go for an example of this. +type Unbounded struct { + c chan interface{} + mu sync.Mutex + backlog []interface{} +} + +// NewUnbounded returns a new instance of Unbounded. +func NewUnbounded() *Unbounded { + return &Unbounded{c: make(chan interface{}, 1)} +} + +// Put adds t to the unbounded buffer. +func (b *Unbounded) Put(t interface{}) { + b.mu.Lock() + if len(b.backlog) == 0 { + select { + case b.c <- t: + b.mu.Unlock() + return + default: + } + } + b.backlog = append(b.backlog, t) + b.mu.Unlock() +} + +// Load sends the earliest buffered data, if any, onto the read channel +// returned by Get(). Users are expected to call this every time they read a +// value from the read channel. +func (b *Unbounded) Load() { + b.mu.Lock() + if len(b.backlog) > 0 { + select { + case b.c <- b.backlog[0]: + b.backlog[0] = nil + b.backlog = b.backlog[1:] + default: + } + } + b.mu.Unlock() +} + +// Get returns a read channel on which values added to the buffer, via Put(), +// are sent on. +// +// Upon reading a value from this channel, users are expected to call Load() to +// send the next buffered value onto the channel if there is any. +func (b *Unbounded) Get() <-chan interface{} { + return b.c +} diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go index 3ee8740f1f..ae6c8972fd 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go @@ -25,11 +25,14 @@ import ( ) const ( - prefix = "GRPC_GO_" - retryStr = prefix + "RETRY" + prefix = "GRPC_GO_" + retryStr = prefix + "RETRY" + txtErrIgnoreStr = prefix + "IGNORE_TXT_ERRORS" ) var ( // Retry is set if retry is explicitly enabled via "GRPC_GO_RETRY=on". Retry = strings.EqualFold(os.Getenv(retryStr), "on") + // TXTErrIgnore is set if TXT errors should be ignored ("GRPC_GO_IGNORE_TXT_ERRORS" is not "false"). + TXTErrIgnore = !strings.EqualFold(os.Getenv(retryStr), "false") ) diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go index bc1f99ac80..0912f0bf4c 100644 --- a/vendor/google.golang.org/grpc/internal/internal.go +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -28,9 +28,7 @@ import ( ) var ( - // WithResolverBuilder is exported by dialoptions.go - WithResolverBuilder interface{} // func (resolver.Builder) grpc.DialOption - // WithHealthCheckFunc is not exported by dialoptions.go + // WithHealthCheckFunc is set by dialoptions.go WithHealthCheckFunc interface{} // func (HealthChecker) DialOption // HealthCheckFunc is used to provide client-side LB channel health checking HealthCheckFunc HealthChecker @@ -39,14 +37,17 @@ var ( // KeepaliveMinPingTime is the minimum ping interval. This must be 10s by // default, but tests may wish to set it lower for convenience. KeepaliveMinPingTime = 10 * time.Second - // ParseServiceConfig is a function to parse JSON service configs into - // opaque data structures. - ParseServiceConfig func(sc string) (interface{}, error) // StatusRawProto is exported by status/status.go. This func returns a // pointer to the wrapped Status proto for a given status.Status without a // call to proto.Clone(). The returned Status proto should not be mutated by // the caller. StatusRawProto interface{} // func (*status.Status) *spb.Status + // NewRequestInfoContext creates a new context based on the argument context attaching + // the passed in RequestInfo to the new context. + NewRequestInfoContext interface{} // func(context.Context, credentials.RequestInfo) context.Context + // ParseServiceConfigForTesting is for creating a fake + // ClientConn for resolver testing only + ParseServiceConfigForTesting interface{} // func(string) *serviceconfig.ParseResult ) // HealthChecker defines the signature of the client-side LB channel health checking function. @@ -57,7 +58,7 @@ var ( // // The health checking protocol is defined at: // https://github.com/grpc/grpc/blob/master/doc/health-checking.md -type HealthChecker func(ctx context.Context, newStream func(string) (interface{}, error), setConnectivityState func(connectivity.State), serviceName string) error +type HealthChecker func(ctx context.Context, newStream func(string) (interface{}, error), setConnectivityState func(connectivity.State, error), serviceName string) error const ( // CredsBundleModeFallback switches GoogleDefaultCreds to fallback mode. diff --git a/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go similarity index 72% rename from vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go rename to vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go index 297492e87a..c368db62ea 100644 --- a/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go +++ b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go @@ -33,18 +33,22 @@ import ( "time" "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/internal/backoff" + "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" ) +// EnableSRVLookups controls whether the DNS resolver attempts to fetch gRPCLB +// addresses from SRV records. Must not be changed after init time. +var EnableSRVLookups = false + func init() { resolver.Register(NewBuilder()) } const ( defaultPort = "443" - defaultFreq = time.Minute * 30 defaultDNSSvrPort = "53" golang = "GO" // txtPrefix is the prefix string to be prepended to the host name for txt record lookup. @@ -94,47 +98,33 @@ var customAuthorityResolver = func(authority string) (netResolver, error) { // NewBuilder creates a dnsBuilder which is used to factory DNS resolvers. func NewBuilder() resolver.Builder { - return &dnsBuilder{minFreq: defaultFreq} + return &dnsBuilder{} } -type dnsBuilder struct { - // minimum frequency of polling the DNS server. - minFreq time.Duration -} +type dnsBuilder struct{} // Build creates and starts a DNS resolver that watches the name resolution of the target. -func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOption) (resolver.Resolver, error) { +func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { host, port, err := parseTarget(target.Endpoint, defaultPort) if err != nil { return nil, err } // IP address. - if net.ParseIP(host) != nil { - host, _ = formatIP(host) - addr := []resolver.Address{{Addr: host + ":" + port}} - i := &ipResolver{ - cc: cc, - ip: addr, - rn: make(chan struct{}, 1), - q: make(chan struct{}), - } - cc.NewAddress(addr) - go i.watcher() - return i, nil + if ipAddr, ok := formatIP(host); ok { + addr := []resolver.Address{{Addr: ipAddr + ":" + port}} + cc.UpdateState(resolver.State{Addresses: addr}) + return deadResolver{}, nil } // DNS address (non-IP). ctx, cancel := context.WithCancel(context.Background()) d := &dnsResolver{ - freq: b.minFreq, - backoff: backoff.Exponential{MaxDelay: b.minFreq}, host: host, port: port, ctx: ctx, cancel: cancel, cc: cc, - t: time.NewTimer(0), rn: make(chan struct{}, 1), disableServiceConfig: opts.DisableServiceConfig, } @@ -150,6 +140,7 @@ func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts d.wg.Add(1) go d.watcher() + d.ResolveNow(resolver.ResolveNowOptions{}) return d, nil } @@ -164,53 +155,23 @@ type netResolver interface { LookupTXT(ctx context.Context, name string) (txts []string, err error) } -// ipResolver watches for the name resolution update for an IP address. -type ipResolver struct { - cc resolver.ClientConn - ip []resolver.Address - // rn channel is used by ResolveNow() to force an immediate resolution of the target. - rn chan struct{} - q chan struct{} -} - -// ResolveNow resend the address it stores, no resolution is needed. -func (i *ipResolver) ResolveNow(opt resolver.ResolveNowOption) { - select { - case i.rn <- struct{}{}: - default: - } -} +// deadResolver is a resolver that does nothing. +type deadResolver struct{} -// Close closes the ipResolver. -func (i *ipResolver) Close() { - close(i.q) -} +func (deadResolver) ResolveNow(resolver.ResolveNowOptions) {} -func (i *ipResolver) watcher() { - for { - select { - case <-i.rn: - i.cc.NewAddress(i.ip) - case <-i.q: - return - } - } -} +func (deadResolver) Close() {} // dnsResolver watches for the name resolution update for a non-IP target. type dnsResolver struct { - freq time.Duration - backoff backoff.Exponential - retryCount int - host string - port string - resolver netResolver - ctx context.Context - cancel context.CancelFunc - cc resolver.ClientConn + host string + port string + resolver netResolver + ctx context.Context + cancel context.CancelFunc + cc resolver.ClientConn // rn channel is used by ResolveNow() to force an immediate resolution of the target. rn chan struct{} - t *time.Timer // wg is used to enforce Close() to return after the watcher() goroutine has finished. // Otherwise, data race will be possible. [Race Example] in dns_resolver_test we // replace the real lookup functions with mocked ones to facilitate testing. @@ -222,7 +183,7 @@ type dnsResolver struct { } // ResolveNow invoke an immediate resolution of the target that this dnsResolver watches. -func (d *dnsResolver) ResolveNow(opt resolver.ResolveNowOption) { +func (d *dnsResolver) ResolveNow(resolver.ResolveNowOptions) { select { case d.rn <- struct{}{}: default: @@ -233,7 +194,6 @@ func (d *dnsResolver) ResolveNow(opt resolver.ResolveNowOption) { func (d *dnsResolver) Close() { d.cancel() d.wg.Wait() - d.t.Stop() } func (d *dnsResolver) watcher() { @@ -242,27 +202,15 @@ func (d *dnsResolver) watcher() { select { case <-d.ctx.Done(): return - case <-d.t.C: case <-d.rn: - if !d.t.Stop() { - // Before resetting a timer, it should be stopped to prevent racing with - // reads on it's channel. - <-d.t.C - } } - result, sc := d.lookup() - // Next lookup should happen within an interval defined by d.freq. It may be - // more often due to exponential retry on empty address list. - if len(result) == 0 { - d.retryCount++ - d.t.Reset(d.backoff.Backoff(d.retryCount)) + state, err := d.lookup() + if err != nil { + d.cc.ReportError(err) } else { - d.retryCount = 0 - d.t.Reset(d.freq) + d.cc.UpdateState(*state) } - d.cc.NewServiceConfig(sc) - d.cc.NewAddress(result) // Sleep to prevent excessive re-resolutions. Incoming resolution requests // will be queued in d.rn. @@ -276,37 +224,68 @@ func (d *dnsResolver) watcher() { } } -func (d *dnsResolver) lookupSRV() []resolver.Address { +func (d *dnsResolver) lookupSRV() ([]resolver.Address, error) { + if !EnableSRVLookups { + return nil, nil + } var newAddrs []resolver.Address _, srvs, err := d.resolver.LookupSRV(d.ctx, "grpclb", "tcp", d.host) if err != nil { - grpclog.Infof("grpc: failed dns SRV record lookup due to %v.\n", err) - return nil + err = handleDNSError(err, "SRV") // may become nil + return nil, err } for _, s := range srvs { lbAddrs, err := d.resolver.LookupHost(d.ctx, s.Target) if err != nil { - grpclog.Infof("grpc: failed load balancer address dns lookup due to %v.\n", err) - continue + err = handleDNSError(err, "A") // may become nil + if err == nil { + // If there are other SRV records, look them up and ignore this + // one that does not exist. + continue + } + return nil, err } for _, a := range lbAddrs { - a, ok := formatIP(a) + ip, ok := formatIP(a) if !ok { - grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err) - continue + return nil, fmt.Errorf("dns: error parsing A record IP address %v", a) } - addr := a + ":" + strconv.Itoa(int(s.Port)) + addr := ip + ":" + strconv.Itoa(int(s.Port)) newAddrs = append(newAddrs, resolver.Address{Addr: addr, Type: resolver.GRPCLB, ServerName: s.Target}) } } - return newAddrs + return newAddrs, nil +} + +var filterError = func(err error) error { + if dnsErr, ok := err.(*net.DNSError); ok && !dnsErr.IsTimeout && !dnsErr.IsTemporary { + // Timeouts and temporary errors should be communicated to gRPC to + // attempt another DNS query (with backoff). Other errors should be + // suppressed (they may represent the absence of a TXT record). + return nil + } + return err +} + +func handleDNSError(err error, lookupType string) error { + err = filterError(err) + if err != nil { + err = fmt.Errorf("dns: %v record lookup error: %v", lookupType, err) + grpclog.Infoln(err) + } + return err } -func (d *dnsResolver) lookupTXT() string { +func (d *dnsResolver) lookupTXT() *serviceconfig.ParseResult { ss, err := d.resolver.LookupTXT(d.ctx, txtPrefix+d.host) if err != nil { - grpclog.Infof("grpc: failed dns TXT record lookup due to %v.\n", err) - return "" + if envconfig.TXTErrIgnore { + return nil + } + if err = handleDNSError(err, "TXT"); err != nil { + return &serviceconfig.ParseResult{Err: err} + } + return nil } var res string for _, s := range ss { @@ -315,40 +294,45 @@ func (d *dnsResolver) lookupTXT() string { // TXT record must have "grpc_config=" attribute in order to be used as service config. if !strings.HasPrefix(res, txtAttribute) { - grpclog.Warningf("grpc: TXT record %v missing %v attribute", res, txtAttribute) - return "" + grpclog.Warningf("dns: TXT record %v missing %v attribute", res, txtAttribute) + // This is not an error; it is the equivalent of not having a service config. + return nil } - return strings.TrimPrefix(res, txtAttribute) + sc := canaryingSC(strings.TrimPrefix(res, txtAttribute)) + return d.cc.ParseServiceConfig(sc) } -func (d *dnsResolver) lookupHost() []resolver.Address { +func (d *dnsResolver) lookupHost() ([]resolver.Address, error) { var newAddrs []resolver.Address addrs, err := d.resolver.LookupHost(d.ctx, d.host) if err != nil { - grpclog.Warningf("grpc: failed dns A record lookup due to %v.\n", err) - return nil + err = handleDNSError(err, "A") + return nil, err } for _, a := range addrs { - a, ok := formatIP(a) + ip, ok := formatIP(a) if !ok { - grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err) - continue + return nil, fmt.Errorf("dns: error parsing A record IP address %v", a) } - addr := a + ":" + d.port + addr := ip + ":" + d.port newAddrs = append(newAddrs, resolver.Address{Addr: addr}) } - return newAddrs + return newAddrs, nil } -func (d *dnsResolver) lookup() ([]resolver.Address, string) { - newAddrs := d.lookupSRV() - // Support fallback to non-balancer address. - newAddrs = append(newAddrs, d.lookupHost()...) - if d.disableServiceConfig { - return newAddrs, "" +func (d *dnsResolver) lookup() (*resolver.State, error) { + srv, srvErr := d.lookupSRV() + addrs, hostErr := d.lookupHost() + if hostErr != nil && (srvErr != nil || len(srv) == 0) { + return nil, hostErr + } + state := &resolver.State{ + Addresses: append(addrs, srv...), + } + if !d.disableServiceConfig { + state.ServiceConfig = d.lookupTXT() } - sc := d.lookupTXT() - return newAddrs, canaryingSC(sc) + return state, nil } // formatIP returns ok = false if addr is not a valid textual representation of an IP address. @@ -434,12 +418,12 @@ func canaryingSC(js string) string { var rcs []rawChoice err := json.Unmarshal([]byte(js), &rcs) if err != nil { - grpclog.Warningf("grpc: failed to parse service config json string due to %v.\n", err) + grpclog.Warningf("dns: error parsing service config json: %v", err) return "" } cliHostname, err := os.Hostname() if err != nil { - grpclog.Warningf("grpc: failed to get client hostname due to %v.\n", err) + grpclog.Warningf("dns: error getting client hostname: %v", err) return "" } var sc string diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/go113.go b/vendor/google.golang.org/grpc/internal/resolver/dns/go113.go new file mode 100644 index 0000000000..8783a8cf82 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/resolver/dns/go113.go @@ -0,0 +1,33 @@ +// +build go1.13 + +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package dns + +import "net" + +func init() { + filterError = func(err error) error { + if dnsErr, ok := err.(*net.DNSError); ok && dnsErr.IsNotFound { + // The name does not exist; not an error. + return nil + } + return err + } +} diff --git a/vendor/google.golang.org/grpc/resolver/passthrough/passthrough.go b/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go similarity index 94% rename from vendor/google.golang.org/grpc/resolver/passthrough/passthrough.go rename to vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go index 893d5d12cb..520d9229e1 100644 --- a/vendor/google.golang.org/grpc/resolver/passthrough/passthrough.go +++ b/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go @@ -26,7 +26,7 @@ const scheme = "passthrough" type passthroughBuilder struct{} -func (*passthroughBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOption) (resolver.Resolver, error) { +func (*passthroughBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { r := &passthroughResolver{ target: target, cc: cc, @@ -48,7 +48,7 @@ func (r *passthroughResolver) start() { r.cc.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: r.target.Endpoint}}}) } -func (*passthroughResolver) ResolveNow(o resolver.ResolveNowOption) {} +func (*passthroughResolver) ResolveNow(o resolver.ResolveNowOptions) {} func (*passthroughResolver) Close() {} diff --git a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go index b8e0aa4db2..ddee20b6be 100644 --- a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go +++ b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go @@ -107,8 +107,8 @@ func (*registerStream) isTransportResponseFrame() bool { return false } type headerFrame struct { streamID uint32 hf []hpack.HeaderField - endStream bool // Valid on server side. - initStream func(uint32) (bool, error) // Used only on the client side. + endStream bool // Valid on server side. + initStream func(uint32) error // Used only on the client side. onWrite func() wq *writeQuota // write quota for the stream created. cleanup *cleanupStream // Valid on the server side. @@ -637,21 +637,17 @@ func (l *loopyWriter) headerHandler(h *headerFrame) error { func (l *loopyWriter) originateStream(str *outStream) error { hdr := str.itl.dequeue().(*headerFrame) - sendPing, err := hdr.initStream(str.id) - if err != nil { + if err := hdr.initStream(str.id); err != nil { if err == ErrConnClosing { return err } // Other errors(errStreamDrain) need not close transport. return nil } - if err = l.writeHeader(str.id, hdr.endStream, hdr.hf, hdr.onWrite); err != nil { + if err := l.writeHeader(str.id, hdr.endStream, hdr.hf, hdr.onWrite); err != nil { return err } l.estdStreams[str.id] = str - if sendPing { - return l.pingHandler(&ping{data: [8]byte{}}) - } return nil } diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go index 78f9ddc3d3..c3c32dafe9 100644 --- a/vendor/google.golang.org/grpc/internal/transport/handler_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go @@ -227,7 +227,9 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro if err == nil { // transport has not been closed if ht.stats != nil { - ht.stats.HandleRPC(s.Context(), &stats.OutTrailer{}) + ht.stats.HandleRPC(s.Context(), &stats.OutTrailer{ + Trailer: s.trailer.Copy(), + }) } } ht.Close() @@ -289,7 +291,9 @@ func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error { if err == nil { if ht.stats != nil { - ht.stats.HandleRPC(s.Context(), &stats.OutHeader{}) + ht.stats.HandleRPC(s.Context(), &stats.OutHeader{ + Header: md.Copy(), + }) } } return err @@ -334,7 +338,7 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), trace Addr: ht.RemoteAddr(), } if req.TLS != nil { - pr.AuthInfo = credentials.TLSInfo{State: *req.TLS} + pr.AuthInfo = credentials.TLSInfo{State: *req.TLS, CommonAuthInfo: credentials.CommonAuthInfo{credentials.PrivacyAndIntegrity}} } ctx = metadata.NewIncomingContext(ctx, ht.headerMD) s.ctx = peer.NewContext(ctx, pr) diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go index 41a79c5670..2d6feeb1be 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -35,6 +35,7 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/syscall" "google.golang.org/grpc/keepalive" @@ -44,8 +45,14 @@ import ( "google.golang.org/grpc/status" ) +// clientConnectionCounter counts the number of connections a client has +// initiated (equal to the number of http2Clients created). Must be accessed +// atomically. +var clientConnectionCounter uint64 + // http2Client implements the ClientTransport interface with HTTP2. type http2Client struct { + lastRead int64 // Keep this field 64-bit aligned. Accessed atomically. ctx context.Context cancel context.CancelFunc ctxDone <-chan struct{} // Cache the ctx.Done() chan. @@ -62,8 +69,6 @@ type http2Client struct { // goAway is closed to notify the upper layer (i.e., addrConn.transportMonitor) // that the server sent GoAway on this transport. goAway chan struct{} - // awakenKeepalive is used to wake up keepalive when after it has gone dormant. - awakenKeepalive chan struct{} framer *framer // controlBuf delivers all the control related tasks (e.g., window @@ -77,9 +82,6 @@ type http2Client struct { perRPCCreds []credentials.PerRPCCredentials - // Boolean to keep track of reading activity on transport. - // 1 is true and 0 is false. - activity uint32 // Accessed atomically. kp keepalive.ClientParameters keepaliveEnabled bool @@ -110,6 +112,16 @@ type http2Client struct { // goAwayReason records the http2.ErrCode and debug data received with the // GoAway frame. goAwayReason GoAwayReason + // A condition variable used to signal when the keepalive goroutine should + // go dormant. The condition for dormancy is based on the number of active + // streams and the `PermitWithoutStream` keepalive client parameter. And + // since the number of active streams is guarded by the above mutex, we use + // the same for this condition variable as well. + kpDormancyCond *sync.Cond + // A boolean to track whether the keepalive goroutine is dormant or not. + // This is checked before attempting to signal the above condition + // variable. + kpDormant bool // Fields below are for channelz metric collection. channelzID int64 // channelz unique identification number @@ -119,6 +131,8 @@ type http2Client struct { onClose func() bufferPool *bufferPool + + connectionID uint64 } func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error), addr string) (net.Conn, error) { @@ -232,7 +246,6 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts Conne readerDone: make(chan struct{}), writerDone: make(chan struct{}), goAway: make(chan struct{}), - awakenKeepalive: make(chan struct{}, 1), framer: newFramer(conn, writeBufSize, readBufSize, maxHeaderListSize), fc: &trInFlow{limit: uint32(icwz)}, scheme: scheme, @@ -264,9 +277,6 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts Conne updateFlowControl: t.updateFlowControl, } } - // Make sure awakenKeepalive can't be written upon. - // keepalive routine will make it writable, if need be. - t.awakenKeepalive <- struct{}{} if t.statsHandler != nil { t.ctx = t.statsHandler.TagConn(t.ctx, &stats.ConnTagInfo{ RemoteAddr: t.remoteAddr, @@ -281,6 +291,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts Conne t.channelzID = channelz.RegisterNormalSocket(t, opts.ChannelzParentID, fmt.Sprintf("%s -> %s", t.localAddr, t.remoteAddr)) } if t.keepaliveEnabled { + t.kpDormancyCond = sync.NewCond(&t.mu) go t.keepalive() } // Start the reader goroutine for incoming message. Each transport has @@ -325,6 +336,8 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts Conne } } + t.connectionID = atomic.AddUint64(&clientConnectionCounter, 1) + if err := t.framer.writer.Flush(); err != nil { return nil, err } @@ -347,6 +360,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts Conne func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream { // TODO(zhaoq): Handle uint32 overflow of Stream.id. s := &Stream{ + ct: t, done: make(chan struct{}), method: callHdr.Method, sendCompress: callHdr.SendCompress, @@ -380,23 +394,24 @@ func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream { } func (t *http2Client) getPeer() *peer.Peer { - pr := &peer.Peer{ - Addr: t.remoteAddr, + return &peer.Peer{ + Addr: t.remoteAddr, + AuthInfo: t.authInfo, } - // Attach Auth info if there is any. - if t.authInfo != nil { - pr.AuthInfo = t.authInfo - } - return pr } func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) ([]hpack.HeaderField, error) { aud := t.createAudience(callHdr) - authData, err := t.getTrAuthData(ctx, aud) + ri := credentials.RequestInfo{ + Method: callHdr.Method, + AuthInfo: t.authInfo, + } + ctxWithRequestInfo := internal.NewRequestInfoContext.(func(context.Context, credentials.RequestInfo) context.Context)(ctx, ri) + authData, err := t.getTrAuthData(ctxWithRequestInfo, aud) if err != nil { return nil, err } - callAuthData, err := t.getCallAuthData(ctx, aud, callHdr) + callAuthData, err := t.getCallAuthData(ctxWithRequestInfo, aud, callHdr) if err != nil { return nil, err } @@ -419,6 +434,7 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) if callHdr.SendCompress != "" { headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: callHdr.SendCompress}) + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-accept-encoding", Value: callHdr.SendCompress}) } if dl, ok := ctx.Deadline(); ok { // Send out timeout regardless its value. The server can detect timeout context by itself. @@ -564,7 +580,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea hdr := &headerFrame{ hf: headerFields, endStream: false, - initStream: func(id uint32) (bool, error) { + initStream: func(id uint32) error { t.mu.Lock() if state := t.state; state != reachable { t.mu.Unlock() @@ -574,29 +590,19 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea err = ErrConnClosing } cleanup(err) - return false, err + return err } t.activeStreams[id] = s if channelz.IsOn() { atomic.AddInt64(&t.czData.streamsStarted, 1) atomic.StoreInt64(&t.czData.lastStreamCreatedTime, time.Now().UnixNano()) } - var sendPing bool - // If the number of active streams change from 0 to 1, then check if keepalive - // has gone dormant. If so, wake it up. - if len(t.activeStreams) == 1 && t.keepaliveEnabled { - select { - case t.awakenKeepalive <- struct{}{}: - sendPing = true - // Fill the awakenKeepalive channel again as this channel must be - // kept non-writable except at the point that the keepalive() - // goroutine is waiting either to be awaken or shutdown. - t.awakenKeepalive <- struct{}{} - default: - } + // If the keepalive goroutine has gone dormant, wake it up. + if t.kpDormant { + t.kpDormancyCond.Signal() } t.mu.Unlock() - return sendPing, nil + return nil }, onOrphaned: cleanup, wq: s.wq, @@ -674,12 +680,14 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea } } if t.statsHandler != nil { + header, _, _ := metadata.FromOutgoingContextRaw(ctx) outHeader := &stats.OutHeader{ Client: true, FullMethod: callHdr.Method, RemoteAddr: t.remoteAddr, LocalAddr: t.localAddr, Compression: callHdr.SendCompress, + Header: header.Copy(), } t.statsHandler.HandleRPC(s.ctx, outHeader) } @@ -778,6 +786,11 @@ func (t *http2Client) Close() error { t.state = closing streams := t.activeStreams t.activeStreams = nil + if t.kpDormant { + // If the keepalive goroutine is blocked on this condition variable, we + // should unblock it so that the goroutine eventually exits. + t.kpDormancyCond.Signal() + } t.mu.Unlock() t.controlBuf.finish() t.cancel() @@ -853,11 +866,11 @@ func (t *http2Client) Write(s *Stream, hdr []byte, data []byte, opts *Options) e return t.controlBuf.put(df) } -func (t *http2Client) getStream(f http2.Frame) (*Stream, bool) { +func (t *http2Client) getStream(f http2.Frame) *Stream { t.mu.Lock() - defer t.mu.Unlock() - s, ok := t.activeStreams[f.Header().StreamID] - return s, ok + s := t.activeStreams[f.Header().StreamID] + t.mu.Unlock() + return s } // adjustWindow sends out extra window update over the initial window size @@ -937,8 +950,8 @@ func (t *http2Client) handleData(f *http2.DataFrame) { t.controlBuf.put(bdpPing) } // Select the right stream to dispatch. - s, ok := t.getStream(f) - if !ok { + s := t.getStream(f) + if s == nil { return } if size > 0 { @@ -969,8 +982,8 @@ func (t *http2Client) handleData(f *http2.DataFrame) { } func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) { - s, ok := t.getStream(f) - if !ok { + s := t.getStream(f) + if s == nil { return } if f.ErrCode == http2.ErrCodeRefusedStream { @@ -1147,8 +1160,8 @@ func (t *http2Client) handleWindowUpdate(f *http2.WindowUpdateFrame) { // operateHeaders takes action on the decoded headers. func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { - s, ok := t.getStream(frame) - if !ok { + s := t.getStream(frame) + if s == nil { return } endStream := frame.StreamEnded() @@ -1177,12 +1190,14 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { inHeader := &stats.InHeader{ Client: true, WireLength: int(frame.Header().Length), + Header: s.header.Copy(), } t.statsHandler.HandleRPC(s.ctx, inHeader) } else { inTrailer := &stats.InTrailer{ Client: true, WireLength: int(frame.Header().Length), + Trailer: s.trailer.Copy(), } t.statsHandler.HandleRPC(s.ctx, inTrailer) } @@ -1191,6 +1206,7 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { // If headerChan hasn't been closed yet if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) { + s.headerValid = true if !endStream { // HEADERS frame block carries a Response-Headers. isHeader = true @@ -1233,7 +1249,7 @@ func (t *http2Client) reader() { } t.conn.SetReadDeadline(time.Time{}) // reset deadline once we get the settings frame (we didn't time out, yay!) if t.keepaliveEnabled { - atomic.CompareAndSwapUint32(&t.activity, 0, 1) + atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) } sf, ok := frame.(*http2.SettingsFrame) if !ok { @@ -1248,7 +1264,7 @@ func (t *http2Client) reader() { t.controlBuf.throttle() frame, err := t.framer.fr.ReadFrame() if t.keepaliveEnabled { - atomic.CompareAndSwapUint32(&t.activity, 0, 1) + atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) } if err != nil { // Abort an active stream if the http2.Framer returns a @@ -1292,56 +1308,83 @@ func (t *http2Client) reader() { } } +func minTime(a, b time.Duration) time.Duration { + if a < b { + return a + } + return b +} + // keepalive running in a separate goroutune makes sure the connection is alive by sending pings. func (t *http2Client) keepalive() { p := &ping{data: [8]byte{}} + // True iff a ping has been sent, and no data has been received since then. + outstandingPing := false + // Amount of time remaining before which we should receive an ACK for the + // last sent ping. + timeoutLeft := time.Duration(0) + // Records the last value of t.lastRead before we go block on the timer. + // This is required to check for read activity since then. + prevNano := time.Now().UnixNano() timer := time.NewTimer(t.kp.Time) for { select { case <-timer.C: - if atomic.CompareAndSwapUint32(&t.activity, 1, 0) { - timer.Reset(t.kp.Time) + lastRead := atomic.LoadInt64(&t.lastRead) + if lastRead > prevNano { + // There has been read activity since the last time we were here. + outstandingPing = false + // Next timer should fire at kp.Time seconds from lastRead time. + timer.Reset(time.Duration(lastRead) + t.kp.Time - time.Duration(time.Now().UnixNano())) + prevNano = lastRead continue } - // Check if keepalive should go dormant. + if outstandingPing && timeoutLeft <= 0 { + t.Close() + return + } t.mu.Lock() - if len(t.activeStreams) < 1 && !t.kp.PermitWithoutStream { - // Make awakenKeepalive writable. - <-t.awakenKeepalive - t.mu.Unlock() - select { - case <-t.awakenKeepalive: - // If the control gets here a ping has been sent - // need to reset the timer with keepalive.Timeout. - case <-t.ctx.Done(): - return - } - } else { + if t.state == closing { + // If the transport is closing, we should exit from the + // keepalive goroutine here. If not, we could have a race + // between the call to Signal() from Close() and the call to + // Wait() here, whereby the keepalive goroutine ends up + // blocking on the condition variable which will never be + // signalled again. t.mu.Unlock() + return + } + if len(t.activeStreams) < 1 && !t.kp.PermitWithoutStream { + // If a ping was sent out previously (because there were active + // streams at that point) which wasn't acked and its timeout + // hadn't fired, but we got here and are about to go dormant, + // we should make sure that we unconditionally send a ping once + // we awaken. + outstandingPing = false + t.kpDormant = true + t.kpDormancyCond.Wait() + } + t.kpDormant = false + t.mu.Unlock() + + // We get here either because we were dormant and a new stream was + // created which unblocked the Wait() call, or because the + // keepalive timer expired. In both cases, we need to send a ping. + if !outstandingPing { if channelz.IsOn() { atomic.AddInt64(&t.czData.kpCount, 1) } - // Send ping. t.controlBuf.put(p) + timeoutLeft = t.kp.Timeout + outstandingPing = true } - - // By the time control gets here a ping has been sent one way or the other. - timer.Reset(t.kp.Timeout) - select { - case <-timer.C: - if atomic.CompareAndSwapUint32(&t.activity, 1, 0) { - timer.Reset(t.kp.Time) - continue - } - infof("transport: closing client transport due to idleness.") - t.Close() - return - case <-t.ctx.Done(): - if !timer.Stop() { - <-timer.C - } - return - } + // The amount of time to sleep here is the minimum of kp.Time and + // timeoutLeft. This will ensure that we wait only for kp.Time + // before sending out the next ping (for cases where the ping is + // acked). + sleepDuration := minTime(t.kp.Time, timeoutLeft) + timeoutLeft -= sleepDuration + timer.Reset(sleepDuration) case <-t.ctx.Done(): if !timer.Stop() { <-timer.C diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go index 83439b5627..8b04b0392a 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -62,11 +62,15 @@ var ( statusRawProto = internal.StatusRawProto.(func(*status.Status) *spb.Status) ) +// serverConnectionCounter counts the number of connections a server has seen +// (equal to the number of http2Servers created). Must be accessed atomically. +var serverConnectionCounter uint64 + // http2Server implements the ServerTransport interface with HTTP2. type http2Server struct { + lastRead int64 // Keep this field 64-bit aligned. Accessed atomically. ctx context.Context - ctxDone <-chan struct{} // Cache the context.Done() chan - cancel context.CancelFunc + done chan struct{} conn net.Conn loopy *loopyWriter readerDone chan struct{} // sync point to enable testing. @@ -84,12 +88,8 @@ type http2Server struct { controlBuf *controlBuffer fc *trInFlow stats stats.Handler - // Flag to keep track of reading activity on transport. - // 1 is true and 0 is false. - activity uint32 // Accessed atomically. // Keepalive and max-age parameters for the server. kp keepalive.ServerParameters - // Keepalive enforcement policy. kep keepalive.EnforcementPolicy // The time instance last ping was received. @@ -125,6 +125,8 @@ type http2Server struct { channelzID int64 // channelz unique identification number czData *channelzData bufferPool *bufferPool + + connectionID uint64 } // newHTTP2Server constructs a ServerTransport based on HTTP2. ConnectionError is @@ -138,7 +140,10 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err } framer := newFramer(conn, writeBufSize, readBufSize, maxHeaderListSize) // Send initial settings as connection preface to client. - var isettings []http2.Setting + isettings := []http2.Setting{{ + ID: http2.SettingMaxFrameSize, + Val: http2MaxFrameLen, + }} // TODO(zhaoq): Have a better way to signal "no limit" because 0 is // permitted in the HTTP2 spec. maxStreams := config.MaxStreams @@ -172,6 +177,12 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err Val: *config.MaxHeaderListSize, }) } + if config.HeaderTableSize != nil { + isettings = append(isettings, http2.Setting{ + ID: http2.SettingHeaderTableSize, + Val: *config.HeaderTableSize, + }) + } if err := framer.fr.WriteSettings(isettings...); err != nil { return nil, connectionErrorf(false, err, "transport: %v", err) } @@ -203,11 +214,10 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err if kep.MinTime == 0 { kep.MinTime = defaultKeepalivePolicyMinTime } - ctx, cancel := context.WithCancel(context.Background()) + done := make(chan struct{}) t := &http2Server{ - ctx: ctx, - cancel: cancel, - ctxDone: ctx.Done(), + ctx: context.Background(), + done: done, conn: conn, remoteAddr: conn.RemoteAddr(), localAddr: conn.LocalAddr(), @@ -228,7 +238,7 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err czData: new(channelzData), bufferPool: newBufferPool(), } - t.controlBuf = newControlBuffer(t.ctxDone) + t.controlBuf = newControlBuffer(t.done) if dynamicWindow { t.bdpEst = &bdpEstimator{ bdp: initialWindowSize, @@ -246,6 +256,9 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err if channelz.IsOn() { t.channelzID = channelz.RegisterNormalSocket(t, config.ChannelzParentID, fmt.Sprintf("%s -> %s", t.remoteAddr, t.localAddr)) } + + t.connectionID = atomic.AddUint64(&serverConnectionCounter, 1) + t.framer.writer.Flush() defer func() { @@ -270,7 +283,7 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err if err != nil { return nil, connectionErrorf(false, err, "transport: http2Server.HandleStreams failed to read initial settings frame: %v", err) } - atomic.StoreUint32(&t.activity, 1) + atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) sf, ok := frame.(*http2.SettingsFrame) if !ok { return nil, connectionErrorf(false, nil, "transport: http2Server.HandleStreams saw invalid preface type %T from client", frame) @@ -359,12 +372,14 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( rstCode: http2.ErrCodeRefusedStream, onWrite: func() {}, }) + s.cancel() return false } } t.mu.Lock() if t.state != reachable { t.mu.Unlock() + s.cancel() return false } if uint32(len(t.activeStreams)) >= t.maxStreams { @@ -375,12 +390,14 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( rstCode: http2.ErrCodeRefusedStream, onWrite: func() {}, }) + s.cancel() return false } if streamID%2 != 1 || streamID <= t.maxStreamID { t.mu.Unlock() // illegal gRPC stream id. errorf("transport: http2Server.HandleStreams received an illegal stream id: %v", streamID) + s.cancel() return true } t.maxStreamID = streamID @@ -405,6 +422,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( LocalAddr: t.localAddr, Compression: s.recvCompress, WireLength: int(frame.Header().Length), + Header: metadata.MD(state.data.mdata).Copy(), } t.stats.HandleRPC(s.ctx, inHeader) } @@ -438,7 +456,7 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context. for { t.controlBuf.throttle() frame, err := t.framer.fr.ReadFrame() - atomic.StoreUint32(&t.activity, 1) + atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) if err != nil { if se, ok := err.(http2.StreamError); ok { warningf("transport: http2Server.HandleStreams encountered http2.StreamError: %v", se) @@ -746,7 +764,7 @@ func (t *http2Server) checkForHeaderListSize(it interface{}) bool { return true } -// WriteHeader sends the header metedata md back to the client. +// WriteHeader sends the header metadata md back to the client. func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { if s.updateHeaderSent() || s.getState() == streamDone { return ErrIllegalHeaderWrite @@ -797,7 +815,9 @@ func (t *http2Server) writeHeaderLocked(s *Stream) error { if t.stats != nil { // Note: WireLength is not set in outHeader. // TODO(mmukhi): Revisit this later, if needed. - outHeader := &stats.OutHeader{} + outHeader := &stats.OutHeader{ + Header: s.header.Copy(), + } t.stats.HandleRPC(s.Context(), outHeader) } return nil @@ -860,7 +880,9 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { rst := s.getState() == streamActive t.finishStream(s, rst, http2.ErrCodeNo, trailingHeader, true) if t.stats != nil { - t.stats.HandleRPC(s.Context(), &stats.OutTrailer{}) + t.stats.HandleRPC(s.Context(), &stats.OutTrailer{ + Trailer: s.trailer.Copy(), + }) } return nil } @@ -882,7 +904,7 @@ func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) e // TODO(mmukhi, dfawley): Should the server write also return io.EOF? s.cancel() select { - case <-t.ctx.Done(): + case <-t.done: return ErrConnClosing default: } @@ -904,7 +926,7 @@ func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) e } if err := s.wq.get(int32(len(hdr) + len(data))); err != nil { select { - case <-t.ctx.Done(): + case <-t.done: return ErrConnClosing default: } @@ -921,32 +943,35 @@ func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) e // after an additional duration of keepalive.Timeout. func (t *http2Server) keepalive() { p := &ping{} - var pingSent bool - maxIdle := time.NewTimer(t.kp.MaxConnectionIdle) - maxAge := time.NewTimer(t.kp.MaxConnectionAge) - keepalive := time.NewTimer(t.kp.Time) - // NOTE: All exit paths of this function should reset their - // respective timers. A failure to do so will cause the - // following clean-up to deadlock and eventually leak. + // True iff a ping has been sent, and no data has been received since then. + outstandingPing := false + // Amount of time remaining before which we should receive an ACK for the + // last sent ping. + kpTimeoutLeft := time.Duration(0) + // Records the last value of t.lastRead before we go block on the timer. + // This is required to check for read activity since then. + prevNano := time.Now().UnixNano() + // Initialize the different timers to their default values. + idleTimer := time.NewTimer(t.kp.MaxConnectionIdle) + ageTimer := time.NewTimer(t.kp.MaxConnectionAge) + kpTimer := time.NewTimer(t.kp.Time) defer func() { - if !maxIdle.Stop() { - <-maxIdle.C - } - if !maxAge.Stop() { - <-maxAge.C - } - if !keepalive.Stop() { - <-keepalive.C - } + // We need to drain the underlying channel in these timers after a call + // to Stop(), only if we are interested in resetting them. Clearly we + // are not interested in resetting them here. + idleTimer.Stop() + ageTimer.Stop() + kpTimer.Stop() }() + for { select { - case <-maxIdle.C: + case <-idleTimer.C: t.mu.Lock() idle := t.idle if idle.IsZero() { // The connection is non-idle. t.mu.Unlock() - maxIdle.Reset(t.kp.MaxConnectionIdle) + idleTimer.Reset(t.kp.MaxConnectionIdle) continue } val := t.kp.MaxConnectionIdle - time.Since(idle) @@ -955,44 +980,52 @@ func (t *http2Server) keepalive() { // The connection has been idle for a duration of keepalive.MaxConnectionIdle or more. // Gracefully close the connection. t.drain(http2.ErrCodeNo, []byte{}) - // Resetting the timer so that the clean-up doesn't deadlock. - maxIdle.Reset(infinity) return } - maxIdle.Reset(val) - case <-maxAge.C: + idleTimer.Reset(val) + case <-ageTimer.C: t.drain(http2.ErrCodeNo, []byte{}) - maxAge.Reset(t.kp.MaxConnectionAgeGrace) + ageTimer.Reset(t.kp.MaxConnectionAgeGrace) select { - case <-maxAge.C: + case <-ageTimer.C: // Close the connection after grace period. infof("transport: closing server transport due to maximum connection age.") t.Close() - // Resetting the timer so that the clean-up doesn't deadlock. - maxAge.Reset(infinity) - case <-t.ctx.Done(): + case <-t.done: } return - case <-keepalive.C: - if atomic.CompareAndSwapUint32(&t.activity, 1, 0) { - pingSent = false - keepalive.Reset(t.kp.Time) + case <-kpTimer.C: + lastRead := atomic.LoadInt64(&t.lastRead) + if lastRead > prevNano { + // There has been read activity since the last time we were + // here. Setup the timer to fire at kp.Time seconds from + // lastRead time and continue. + outstandingPing = false + kpTimer.Reset(time.Duration(lastRead) + t.kp.Time - time.Duration(time.Now().UnixNano())) + prevNano = lastRead continue } - if pingSent { + if outstandingPing && kpTimeoutLeft <= 0 { infof("transport: closing server transport due to idleness.") t.Close() - // Resetting the timer so that the clean-up doesn't deadlock. - keepalive.Reset(infinity) return } - pingSent = true - if channelz.IsOn() { - atomic.AddInt64(&t.czData.kpCount, 1) + if !outstandingPing { + if channelz.IsOn() { + atomic.AddInt64(&t.czData.kpCount, 1) + } + t.controlBuf.put(p) + kpTimeoutLeft = t.kp.Timeout + outstandingPing = true } - t.controlBuf.put(p) - keepalive.Reset(t.kp.Timeout) - case <-t.ctx.Done(): + // The amount of time to sleep here is the minimum of kp.Time and + // timeoutLeft. This will ensure that we wait only for kp.Time + // before sending out the next ping (for cases where the ping is + // acked). + sleepDuration := minTime(t.kp.Time, kpTimeoutLeft) + kpTimeoutLeft -= sleepDuration + kpTimer.Reset(sleepDuration) + case <-t.done: return } } @@ -1012,7 +1045,7 @@ func (t *http2Server) Close() error { t.activeStreams = nil t.mu.Unlock() t.controlBuf.finish() - t.cancel() + close(t.done) err := t.conn.Close() if channelz.IsOn() { channelz.RemoveEntry(t.channelzID) @@ -1152,7 +1185,7 @@ func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) { select { case <-t.drainChan: case <-timer.C: - case <-t.ctx.Done(): + case <-t.done: return } t.controlBuf.put(&goAway{code: g.code, debugData: g.debugData}) @@ -1202,7 +1235,7 @@ func (t *http2Server) getOutFlowWindow() int64 { select { case sz := <-resp: return int64(sz) - case <-t.ctxDone: + case <-t.done: return -1 case <-timer.C: return -2 diff --git a/vendor/google.golang.org/grpc/internal/transport/http_util.go b/vendor/google.golang.org/grpc/internal/transport/http_util.go index 9d212867ce..8f5f3349d9 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http_util.go +++ b/vendor/google.golang.org/grpc/internal/transport/http_util.go @@ -667,6 +667,7 @@ func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, maxHeaderList writer: w, fr: http2.NewFramer(w, r), } + f.fr.SetMaxReadFrameSize(http2MaxFrameLen) // Opt-in to Frame reuse API on framer to reduce garbage. // Frames aren't safe to read from after a subsequent call to ReadFrame. f.fr.SetReuseFrames() diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go index 1c1d106709..a30da9eb32 100644 --- a/vendor/google.golang.org/grpc/internal/transport/transport.go +++ b/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -73,10 +73,11 @@ type recvMsg struct { } // recvBuffer is an unbounded channel of recvMsg structs. -// Note recvBuffer differs from controlBuffer only in that recvBuffer -// holds a channel of only recvMsg structs instead of objects implementing "item" interface. -// recvBuffer is written to much more often than -// controlBuffer and using strict recvMsg structs helps avoid allocation in "recvBuffer.put" +// +// Note: recvBuffer differs from buffer.Unbounded only in the fact that it +// holds a channel of recvMsg structs instead of objects implementing "item" +// interface. recvBuffer is written to much more often and using strict recvMsg +// structs helps avoid allocation in "recvBuffer.put" type recvBuffer struct { c chan recvMsg mu sync.Mutex @@ -233,6 +234,7 @@ const ( type Stream struct { id uint32 st ServerTransport // nil for client side Stream + ct *http2Client // nil for server side Stream ctx context.Context // the associated context of the stream cancel context.CancelFunc // always nil for client side Stream done chan struct{} // closed at the end of stream to unblock writers. On the client side. @@ -251,6 +253,10 @@ type Stream struct { headerChan chan struct{} // closed to indicate the end of header metadata. headerChanClosed uint32 // set when headerChan is closed. Used to avoid closing headerChan multiple times. + // headerValid indicates whether a valid header was received. Only + // meaningful after headerChan is closed (always call waitOnHeader() before + // reading its value). Not valid on server side. + headerValid bool // hdrMu protects header and trailer metadata on the server-side. hdrMu sync.Mutex @@ -303,34 +309,28 @@ func (s *Stream) getState() streamState { return streamState(atomic.LoadUint32((*uint32)(&s.state))) } -func (s *Stream) waitOnHeader() error { +func (s *Stream) waitOnHeader() { if s.headerChan == nil { // On the server headerChan is always nil since a stream originates // only after having received headers. - return nil + return } select { case <-s.ctx.Done(): - // We prefer success over failure when reading messages because we delay - // context error in stream.Read(). To keep behavior consistent, we also - // prefer success here. - select { - case <-s.headerChan: - return nil - default: - } - return ContextErr(s.ctx.Err()) + // Close the stream to prevent headers/trailers from changing after + // this function returns. + s.ct.CloseStream(s, ContextErr(s.ctx.Err())) + // headerChan could possibly not be closed yet if closeStream raced + // with operateHeaders; wait until it is closed explicitly here. + <-s.headerChan case <-s.headerChan: - return nil } } // RecvCompress returns the compression algorithm applied to the inbound // message. It is empty string if there is no compression applied. func (s *Stream) RecvCompress() string { - if err := s.waitOnHeader(); err != nil { - return "" - } + s.waitOnHeader() return s.recvCompress } @@ -351,36 +351,27 @@ func (s *Stream) Done() <-chan struct{} { // available. It blocks until i) the metadata is ready or ii) there is no header // metadata or iii) the stream is canceled/expired. // -// On server side, it returns the out header after t.WriteHeader is called. +// On server side, it returns the out header after t.WriteHeader is called. It +// does not block and must not be called until after WriteHeader. func (s *Stream) Header() (metadata.MD, error) { - if s.headerChan == nil && s.header != nil { + if s.headerChan == nil { // On server side, return the header in stream. It will be the out // header after t.WriteHeader is called. return s.header.Copy(), nil } - err := s.waitOnHeader() - // Even if the stream is closed, header is returned if available. - select { - case <-s.headerChan: - if s.header == nil { - return nil, nil - } - return s.header.Copy(), nil - default: + s.waitOnHeader() + if !s.headerValid { + return nil, s.status.Err() } - return nil, err + return s.header.Copy(), nil } // TrailersOnly blocks until a header or trailers-only frame is received and // then returns true if the stream was trailers-only. If the stream ends -// before headers are received, returns true, nil. If a context error happens -// first, returns it as a status error. Client-side only. -func (s *Stream) TrailersOnly() (bool, error) { - err := s.waitOnHeader() - if err != nil { - return false, err - } - return s.noHeaders, nil +// before headers are received, returns true, nil. Client-side only. +func (s *Stream) TrailersOnly() bool { + s.waitOnHeader() + return s.noHeaders } // Trailer returns the cached trailer metedata. Note that if it is not called @@ -534,6 +525,7 @@ type ServerConfig struct { ReadBufferSize int ChannelzParentID int64 MaxHeaderListSize *uint32 + HeaderTableSize *uint32 } // NewServerTransport creates a ServerTransport with conn or non-nil error diff --git a/vendor/google.golang.org/grpc/picker_wrapper.go b/vendor/google.golang.org/grpc/picker_wrapper.go index 45baa2ae13..00447894f0 100644 --- a/vendor/google.golang.org/grpc/picker_wrapper.go +++ b/vendor/google.golang.org/grpc/picker_wrapper.go @@ -20,6 +20,7 @@ package grpc import ( "context" + "fmt" "io" "sync" @@ -31,49 +32,78 @@ import ( "google.golang.org/grpc/status" ) +// v2PickerWrapper wraps a balancer.Picker while providing the +// balancer.V2Picker API. It requires a pickerWrapper to generate errors +// including the latest connectionError. To be deleted when balancer.Picker is +// updated to the balancer.V2Picker API. +type v2PickerWrapper struct { + picker balancer.Picker + connErr *connErr +} + +func (v *v2PickerWrapper) Pick(info balancer.PickInfo) (balancer.PickResult, error) { + sc, done, err := v.picker.Pick(info.Ctx, info) + if err != nil { + if err == balancer.ErrTransientFailure { + return balancer.PickResult{}, balancer.TransientFailureError(fmt.Errorf("%v, latest connection error: %v", err, v.connErr.connectionError())) + } + return balancer.PickResult{}, err + } + return balancer.PickResult{SubConn: sc, Done: done}, nil +} + // pickerWrapper is a wrapper of balancer.Picker. It blocks on certain pick // actions and unblock when there's a picker update. type pickerWrapper struct { mu sync.Mutex done bool blockingCh chan struct{} - picker balancer.Picker + picker balancer.V2Picker - // The latest connection happened. - connErrMu sync.Mutex - connErr error + // The latest connection error. TODO: remove when V1 picker is deprecated; + // balancer should be responsible for providing the error. + *connErr } -func newPickerWrapper() *pickerWrapper { - bp := &pickerWrapper{blockingCh: make(chan struct{})} - return bp +type connErr struct { + mu sync.Mutex + err error } -func (bp *pickerWrapper) updateConnectionError(err error) { - bp.connErrMu.Lock() - bp.connErr = err - bp.connErrMu.Unlock() +func (c *connErr) updateConnectionError(err error) { + c.mu.Lock() + c.err = err + c.mu.Unlock() } -func (bp *pickerWrapper) connectionError() error { - bp.connErrMu.Lock() - err := bp.connErr - bp.connErrMu.Unlock() +func (c *connErr) connectionError() error { + c.mu.Lock() + err := c.err + c.mu.Unlock() return err } +func newPickerWrapper() *pickerWrapper { + return &pickerWrapper{blockingCh: make(chan struct{}), connErr: &connErr{}} +} + // updatePicker is called by UpdateBalancerState. It unblocks all blocked pick. -func (bp *pickerWrapper) updatePicker(p balancer.Picker) { - bp.mu.Lock() - if bp.done { - bp.mu.Unlock() +func (pw *pickerWrapper) updatePicker(p balancer.Picker) { + pw.updatePickerV2(&v2PickerWrapper{picker: p, connErr: pw.connErr}) +} + +// updatePicker is called by UpdateBalancerState. It unblocks all blocked pick. +func (pw *pickerWrapper) updatePickerV2(p balancer.V2Picker) { + pw.mu.Lock() + if pw.done { + pw.mu.Unlock() return } - bp.picker = p - // bp.blockingCh should never be nil. - close(bp.blockingCh) - bp.blockingCh = make(chan struct{}) - bp.mu.Unlock() + pw.picker = p + // pw.blockingCh should never be nil. + close(pw.blockingCh) + pw.blockingCh = make(chan struct{}) + pw.mu.Unlock() } func doneChannelzWrapper(acw *acBalancerWrapper, done func(balancer.DoneInfo)) func(balancer.DoneInfo) { @@ -100,83 +130,85 @@ func doneChannelzWrapper(acw *acBalancerWrapper, done func(balancer.DoneInfo)) f // - the current picker returns other errors and failfast is false. // - the subConn returned by the current picker is not READY // When one of these situations happens, pick blocks until the picker gets updated. -func (bp *pickerWrapper) pick(ctx context.Context, failfast bool, opts balancer.PickOptions) (transport.ClientTransport, func(balancer.DoneInfo), error) { +func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.PickInfo) (transport.ClientTransport, func(balancer.DoneInfo), error) { var ch chan struct{} + var lastPickErr error for { - bp.mu.Lock() - if bp.done { - bp.mu.Unlock() + pw.mu.Lock() + if pw.done { + pw.mu.Unlock() return nil, nil, ErrClientConnClosing } - if bp.picker == nil { - ch = bp.blockingCh + if pw.picker == nil { + ch = pw.blockingCh } - if ch == bp.blockingCh { + if ch == pw.blockingCh { // This could happen when either: - // - bp.picker is nil (the previous if condition), or + // - pw.picker is nil (the previous if condition), or // - has called pick on the current picker. - bp.mu.Unlock() + pw.mu.Unlock() select { case <-ctx.Done(): - if connectionErr := bp.connectionError(); connectionErr != nil { - switch ctx.Err() { - case context.DeadlineExceeded: - return nil, nil, status.Errorf(codes.DeadlineExceeded, "latest connection error: %v", connectionErr) - case context.Canceled: - return nil, nil, status.Errorf(codes.Canceled, "latest connection error: %v", connectionErr) - } + var errStr string + if lastPickErr != nil { + errStr = "latest balancer error: " + lastPickErr.Error() + } else if connectionErr := pw.connectionError(); connectionErr != nil { + errStr = "latest connection error: " + connectionErr.Error() + } else { + errStr = ctx.Err().Error() + } + switch ctx.Err() { + case context.DeadlineExceeded: + return nil, nil, status.Error(codes.DeadlineExceeded, errStr) + case context.Canceled: + return nil, nil, status.Error(codes.Canceled, errStr) } - return nil, nil, ctx.Err() case <-ch: } continue } - ch = bp.blockingCh - p := bp.picker - bp.mu.Unlock() + ch = pw.blockingCh + p := pw.picker + pw.mu.Unlock() - subConn, done, err := p.Pick(ctx, opts) + pickResult, err := p.Pick(info) if err != nil { - switch err { - case balancer.ErrNoSubConnAvailable: + if err == balancer.ErrNoSubConnAvailable { continue - case balancer.ErrTransientFailure: + } + if tfe, ok := err.(interface{ IsTransientFailure() bool }); ok && tfe.IsTransientFailure() { if !failfast { + lastPickErr = err continue } - return nil, nil, status.Errorf(codes.Unavailable, "%v, latest connection error: %v", err, bp.connectionError()) - case context.DeadlineExceeded: - return nil, nil, status.Error(codes.DeadlineExceeded, err.Error()) - case context.Canceled: - return nil, nil, status.Error(codes.Canceled, err.Error()) - default: - if _, ok := status.FromError(err); ok { - return nil, nil, err - } - // err is some other error. - return nil, nil, status.Error(codes.Unknown, err.Error()) + return nil, nil, status.Error(codes.Unavailable, err.Error()) } + if _, ok := status.FromError(err); ok { + return nil, nil, err + } + // err is some other error. + return nil, nil, status.Error(codes.Unknown, err.Error()) } - acw, ok := subConn.(*acBalancerWrapper) + acw, ok := pickResult.SubConn.(*acBalancerWrapper) if !ok { grpclog.Error("subconn returned from pick is not *acBalancerWrapper") continue } if t, ok := acw.getAddrConn().getReadyTransport(); ok { if channelz.IsOn() { - return t, doneChannelzWrapper(acw, done), nil + return t, doneChannelzWrapper(acw, pickResult.Done), nil } - return t, done, nil + return t, pickResult.Done, nil } - if done != nil { + if pickResult.Done != nil { // Calling done with nil error, no bytes sent and no bytes received. // DoneInfo with default value works. - done(balancer.DoneInfo{}) + pickResult.Done(balancer.DoneInfo{}) } grpclog.Infof("blockingPicker: the picked transport is not ready, loop back to repick") // If ok == false, ac.state is not READY. @@ -186,12 +218,12 @@ func (bp *pickerWrapper) pick(ctx context.Context, failfast bool, opts balancer. } } -func (bp *pickerWrapper) close() { - bp.mu.Lock() - defer bp.mu.Unlock() - if bp.done { +func (pw *pickerWrapper) close() { + pw.mu.Lock() + defer pw.mu.Unlock() + if pw.done { return } - bp.done = true - close(bp.blockingCh) + pw.done = true + close(pw.blockingCh) } diff --git a/vendor/google.golang.org/grpc/pickfirst.go b/vendor/google.golang.org/grpc/pickfirst.go index ed05b02ed9..c43dac9ad8 100644 --- a/vendor/google.golang.org/grpc/pickfirst.go +++ b/vendor/google.golang.org/grpc/pickfirst.go @@ -19,12 +19,14 @@ package grpc import ( - "context" + "errors" "google.golang.org/grpc/balancer" + "google.golang.org/grpc/codes" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/resolver" + "google.golang.org/grpc/status" ) // PickFirstBalancerName is the name of the pick_first balancer. @@ -45,35 +47,67 @@ func (*pickfirstBuilder) Name() string { } type pickfirstBalancer struct { - cc balancer.ClientConn - sc balancer.SubConn + state connectivity.State + cc balancer.ClientConn + sc balancer.SubConn } +var _ balancer.V2Balancer = &pickfirstBalancer{} // Assert we implement v2 + func (b *pickfirstBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) { if err != nil { - if grpclog.V(2) { - grpclog.Infof("pickfirstBalancer: HandleResolvedAddrs called with error %v", err) - } + b.ResolverError(err) return } + b.UpdateClientConnState(balancer.ClientConnState{ResolverState: resolver.State{Addresses: addrs}}) // Ignore error +} + +func (b *pickfirstBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) { + b.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: s}) +} + +func (b *pickfirstBalancer) ResolverError(err error) { + switch b.state { + case connectivity.TransientFailure, connectivity.Idle, connectivity.Connecting: + // Set a failing picker if we don't have a good picker. + b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.TransientFailure, + Picker: &picker{err: status.Errorf(codes.Unavailable, "name resolver error: %v", err)}}, + ) + } + if grpclog.V(2) { + grpclog.Infof("pickfirstBalancer: ResolverError called with error %v", err) + } +} + +func (b *pickfirstBalancer) UpdateClientConnState(cs balancer.ClientConnState) error { + if len(cs.ResolverState.Addresses) == 0 { + b.ResolverError(errors.New("produced zero addresses")) + return balancer.ErrBadResolverState + } if b.sc == nil { - b.sc, err = b.cc.NewSubConn(addrs, balancer.NewSubConnOptions{}) + var err error + b.sc, err = b.cc.NewSubConn(cs.ResolverState.Addresses, balancer.NewSubConnOptions{}) if err != nil { - //TODO(yuxuanli): why not change the cc state to Idle? if grpclog.V(2) { grpclog.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err) } - return + b.state = connectivity.TransientFailure + b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.TransientFailure, + Picker: &picker{err: status.Errorf(codes.Unavailable, "error creating connection: %v", err)}}, + ) + return balancer.ErrBadResolverState } - b.cc.UpdateBalancerState(connectivity.Idle, &picker{sc: b.sc}) + b.state = connectivity.Idle + b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.Idle, Picker: &picker{result: balancer.PickResult{SubConn: b.sc}}}) b.sc.Connect() } else { - b.sc.UpdateAddresses(addrs) + b.sc.UpdateAddresses(cs.ResolverState.Addresses) b.sc.Connect() } + return nil } -func (b *pickfirstBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) { +func (b *pickfirstBalancer) UpdateSubConnState(sc balancer.SubConn, s balancer.SubConnState) { if grpclog.V(2) { grpclog.Infof("pickfirstBalancer: HandleSubConnStateChange: %p, %v", sc, s) } @@ -83,18 +117,28 @@ func (b *pickfirstBalancer) HandleSubConnStateChange(sc balancer.SubConn, s conn } return } - if s == connectivity.Shutdown { + b.state = s.ConnectivityState + if s.ConnectivityState == connectivity.Shutdown { b.sc = nil return } - switch s { + switch s.ConnectivityState { case connectivity.Ready, connectivity.Idle: - b.cc.UpdateBalancerState(s, &picker{sc: sc}) + b.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: &picker{result: balancer.PickResult{SubConn: sc}}}) case connectivity.Connecting: - b.cc.UpdateBalancerState(s, &picker{err: balancer.ErrNoSubConnAvailable}) + b.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: &picker{err: balancer.ErrNoSubConnAvailable}}) case connectivity.TransientFailure: - b.cc.UpdateBalancerState(s, &picker{err: balancer.ErrTransientFailure}) + err := balancer.ErrTransientFailure + // TODO: this can be unconditional after the V1 API is removed, as + // SubConnState will always contain a connection error. + if s.ConnectionError != nil { + err = balancer.TransientFailureError(s.ConnectionError) + } + b.cc.UpdateState(balancer.State{ + ConnectivityState: s.ConnectivityState, + Picker: &picker{err: err}, + }) } } @@ -102,15 +146,12 @@ func (b *pickfirstBalancer) Close() { } type picker struct { - err error - sc balancer.SubConn + result balancer.PickResult + err error } -func (p *picker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) { - if p.err != nil { - return nil, nil, p.err - } - return p.sc, nil, nil +func (p *picker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { + return p.result, p.err } func init() { diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go index e83da346a5..fe14b2fb98 100644 --- a/vendor/google.golang.org/grpc/resolver/resolver.go +++ b/vendor/google.golang.org/grpc/resolver/resolver.go @@ -21,6 +21,11 @@ package resolver import ( + "context" + "net" + + "google.golang.org/grpc/attributes" + "google.golang.org/grpc/credentials" "google.golang.org/grpc/serviceconfig" ) @@ -69,12 +74,18 @@ func GetDefaultScheme() string { } // AddressType indicates the address type returned by name resolution. +// +// Deprecated: use Attributes in Address instead. type AddressType uint8 const ( // Backend indicates the address is for a backend server. + // + // Deprecated: use Attributes in Address instead. Backend AddressType = iota // GRPCLB indicates the address is for a grpclb load balancer. + // + // Deprecated: use Attributes in Address instead. GRPCLB ) @@ -83,33 +94,75 @@ const ( type Address struct { // Addr is the server address on which a connection will be established. Addr string - // Type is the type of this address. - Type AddressType + // ServerName is the name of this address. + // If non-empty, the ServerName is used as the transport certification authority for + // the address, instead of the hostname from the Dial target string. In most cases, + // this should not be set. // - // e.g. if Type is GRPCLB, ServerName should be the name of the remote load + // If Type is GRPCLB, ServerName should be the name of the remote load // balancer, not the name of the backend. + // + // WARNING: ServerName must only be populated with trusted values. It + // is insecure to populate it with data from untrusted inputs since untrusted + // values could be used to bypass the authority checks performed by TLS. ServerName string + + // Attributes contains arbitrary data about this address intended for + // consumption by the load balancing policy. + Attributes *attributes.Attributes + + // Type is the type of this address. + // + // Deprecated: use Attributes instead. + Type AddressType + // Metadata is the information associated with Addr, which may be used // to make load balancing decision. + // + // Deprecated: use Attributes instead. Metadata interface{} } -// BuildOption includes additional information for the builder to create +// BuildOptions includes additional information for the builder to create // the resolver. -type BuildOption struct { - // DisableServiceConfig indicates whether resolver should fetch service config data. +type BuildOptions struct { + // DisableServiceConfig indicates whether a resolver implementation should + // fetch service config data. DisableServiceConfig bool + // DialCreds is the transport credentials used by the ClientConn for + // communicating with the target gRPC service (set via + // WithTransportCredentials). In cases where a name resolution service + // requires the same credentials, the resolver may use this field. In most + // cases though, it is not appropriate, and this field may be ignored. + DialCreds credentials.TransportCredentials + // CredsBundle is the credentials bundle used by the ClientConn for + // communicating with the target gRPC service (set via + // WithCredentialsBundle). In cases where a name resolution service + // requires the same credentials, the resolver may use this field. In most + // cases though, it is not appropriate, and this field may be ignored. + CredsBundle credentials.Bundle + // Dialer is the custom dialer used by the ClientConn for dialling the + // target gRPC service (set via WithDialer). In cases where a name + // resolution service requires the same dialer, the resolver may use this + // field. In most cases though, it is not appropriate, and this field may + // be ignored. + Dialer func(context.Context, string) (net.Conn, error) } // State contains the current Resolver state relevant to the ClientConn. type State struct { - Addresses []Address // Resolved addresses for the target - // ServiceConfig is the parsed service config; obtained from - // serviceconfig.Parse. - ServiceConfig serviceconfig.Config + // Addresses is the latest set of resolved addresses for the target. + Addresses []Address + + // ServiceConfig contains the result from parsing the latest service + // config. If it is nil, it indicates no service config is present or the + // resolver does not provide service configs. + ServiceConfig *serviceconfig.ParseResult - // TODO: add Err error + // Attributes contains arbitrary data about the resolver intended for + // consumption by the load balancing policy. + Attributes *attributes.Attributes } // ClientConn contains the callbacks for resolver to notify any updates @@ -122,6 +175,10 @@ type State struct { type ClientConn interface { // UpdateState updates the state of the ClientConn appropriately. UpdateState(State) + // ReportError notifies the ClientConn that the Resolver encountered an + // error. The ClientConn will notify the load balancer and begin calling + // ResolveNow on the Resolver with exponential backoff. + ReportError(error) // NewAddress is called by resolver to notify ClientConn a new list // of resolved addresses. // The address list should be the complete list of resolved addresses. @@ -133,6 +190,9 @@ type ClientConn interface { // // Deprecated: Use UpdateState instead. NewServiceConfig(serviceConfig string) + // ParseServiceConfig parses the provided service config and returns an + // object that provides the parsed config. + ParseServiceConfig(serviceConfigJSON string) *serviceconfig.ParseResult } // Target represents a target for gRPC, as specified in: @@ -164,14 +224,14 @@ type Builder interface { // // gRPC dial calls Build synchronously, and fails if the returned error is // not nil. - Build(target Target, cc ClientConn, opts BuildOption) (Resolver, error) + Build(target Target, cc ClientConn, opts BuildOptions) (Resolver, error) // Scheme returns the scheme supported by this resolver. // Scheme is defined at https://github.com/grpc/grpc/blob/master/doc/naming.md. Scheme() string } -// ResolveNowOption includes additional information for ResolveNow. -type ResolveNowOption struct{} +// ResolveNowOptions includes additional information for ResolveNow. +type ResolveNowOptions struct{} // Resolver watches for the updates on the specified target. // Updates include address updates and service config updates. @@ -180,7 +240,7 @@ type Resolver interface { // again. It's just a hint, resolver can ignore this if it's not necessary. // // It could be called multiple times concurrently. - ResolveNow(ResolveNowOption) + ResolveNow(ResolveNowOptions) // Close closes the resolver. Close() } diff --git a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go index 6934905b0f..3eaf724cd6 100644 --- a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go +++ b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go @@ -21,22 +21,29 @@ package grpc import ( "fmt" "strings" - "sync/atomic" + "sync" + "time" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/credentials" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" ) // ccResolverWrapper is a wrapper on top of cc for resolvers. -// It implements resolver.ClientConnection interface. +// It implements resolver.ClientConn interface. type ccResolverWrapper struct { - cc *ClientConn - resolver resolver.Resolver - addrCh chan []resolver.Address - scCh chan string - done uint32 // accessed atomically; set to 1 when closed. - curState resolver.State + cc *ClientConn + resolverMu sync.Mutex + resolver resolver.Resolver + done *grpcsync.Event + curState resolver.State + + pollingMu sync.Mutex + polling chan struct{} } // split2 returns the values from strings.SplitN(s, sep, 2). @@ -67,60 +74,126 @@ func parseTarget(target string) (ret resolver.Target) { return ret } -// newCCResolverWrapper parses cc.target for scheme and gets the resolver -// builder for this scheme and builds the resolver. The monitoring goroutine -// for it is not started yet and can be created by calling start(). -// -// If withResolverBuilder dial option is set, the specified resolver will be -// used instead. -func newCCResolverWrapper(cc *ClientConn) (*ccResolverWrapper, error) { - rb := cc.dopts.resolverBuilder - if rb == nil { - return nil, fmt.Errorf("could not get resolver for scheme: %q", cc.parsedTarget.Scheme) +// newCCResolverWrapper uses the resolver.Builder to build a Resolver and +// returns a ccResolverWrapper object which wraps the newly built resolver. +func newCCResolverWrapper(cc *ClientConn, rb resolver.Builder) (*ccResolverWrapper, error) { + ccr := &ccResolverWrapper{ + cc: cc, + done: grpcsync.NewEvent(), } - ccr := &ccResolverWrapper{ - cc: cc, - addrCh: make(chan []resolver.Address, 1), - scCh: make(chan string, 1), + var credsClone credentials.TransportCredentials + if creds := cc.dopts.copts.TransportCredentials; creds != nil { + credsClone = creds.Clone() + } + rbo := resolver.BuildOptions{ + DisableServiceConfig: cc.dopts.disableServiceConfig, + DialCreds: credsClone, + CredsBundle: cc.dopts.copts.CredsBundle, + Dialer: cc.dopts.copts.Dialer, } var err error - ccr.resolver, err = rb.Build(cc.parsedTarget, ccr, resolver.BuildOption{DisableServiceConfig: cc.dopts.disableServiceConfig}) + // We need to hold the lock here while we assign to the ccr.resolver field + // to guard against a data race caused by the following code path, + // rb.Build-->ccr.ReportError-->ccr.poll-->ccr.resolveNow, would end up + // accessing ccr.resolver which is being assigned here. + ccr.resolverMu.Lock() + defer ccr.resolverMu.Unlock() + ccr.resolver, err = rb.Build(cc.parsedTarget, ccr, rbo) if err != nil { return nil, err } return ccr, nil } -func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOption) { - ccr.resolver.ResolveNow(o) +func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOptions) { + ccr.resolverMu.Lock() + if !ccr.done.HasFired() { + ccr.resolver.ResolveNow(o) + } + ccr.resolverMu.Unlock() } func (ccr *ccResolverWrapper) close() { + ccr.resolverMu.Lock() ccr.resolver.Close() - atomic.StoreUint32(&ccr.done, 1) + ccr.done.Fire() + ccr.resolverMu.Unlock() } -func (ccr *ccResolverWrapper) isDone() bool { - return atomic.LoadUint32(&ccr.done) == 1 +// poll begins or ends asynchronous polling of the resolver based on whether +// err is ErrBadResolverState. +func (ccr *ccResolverWrapper) poll(err error) { + ccr.pollingMu.Lock() + defer ccr.pollingMu.Unlock() + if err != balancer.ErrBadResolverState { + // stop polling + if ccr.polling != nil { + close(ccr.polling) + ccr.polling = nil + } + return + } + if ccr.polling != nil { + // already polling + return + } + p := make(chan struct{}) + ccr.polling = p + go func() { + for i := 0; ; i++ { + ccr.resolveNow(resolver.ResolveNowOptions{}) + t := time.NewTimer(ccr.cc.dopts.resolveNowBackoff(i)) + select { + case <-p: + t.Stop() + return + case <-ccr.done.Done(): + // Resolver has been closed. + t.Stop() + return + case <-t.C: + select { + case <-p: + return + default: + } + // Timer expired; re-resolve. + } + } + }() } func (ccr *ccResolverWrapper) UpdateState(s resolver.State) { - if ccr.isDone() { + if ccr.done.HasFired() { return } grpclog.Infof("ccResolverWrapper: sending update to cc: %v", s) if channelz.IsOn() { ccr.addChannelzTraceEvent(s) } - ccr.cc.updateResolverState(s) ccr.curState = s + ccr.poll(ccr.cc.updateResolverState(ccr.curState, nil)) +} + +func (ccr *ccResolverWrapper) ReportError(err error) { + if ccr.done.HasFired() { + return + } + grpclog.Warningf("ccResolverWrapper: reporting error to cc: %v", err) + if channelz.IsOn() { + channelz.AddTraceEvent(ccr.cc.channelzID, &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Resolver reported error: %v", err), + Severity: channelz.CtWarning, + }) + } + ccr.poll(ccr.cc.updateResolverState(resolver.State{}, err)) } // NewAddress is called by the resolver implementation to send addresses to gRPC. func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) { - if ccr.isDone() { + if ccr.done.HasFired() { return } grpclog.Infof("ccResolverWrapper: sending new addresses to cc: %v", addrs) @@ -128,31 +201,53 @@ func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) { ccr.addChannelzTraceEvent(resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig}) } ccr.curState.Addresses = addrs - ccr.cc.updateResolverState(ccr.curState) + ccr.poll(ccr.cc.updateResolverState(ccr.curState, nil)) } // NewServiceConfig is called by the resolver implementation to send service // configs to gRPC. func (ccr *ccResolverWrapper) NewServiceConfig(sc string) { - if ccr.isDone() { + if ccr.done.HasFired() { return } grpclog.Infof("ccResolverWrapper: got new service config: %v", sc) - c, err := parseServiceConfig(sc) - if err != nil { + if ccr.cc.dopts.disableServiceConfig { + grpclog.Infof("Service config lookups disabled; ignoring config") + return + } + scpr := parseServiceConfig(sc) + if scpr.Err != nil { + grpclog.Warningf("ccResolverWrapper: error parsing service config: %v", scpr.Err) + if channelz.IsOn() { + channelz.AddTraceEvent(ccr.cc.channelzID, &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Error parsing service config: %v", scpr.Err), + Severity: channelz.CtWarning, + }) + } + ccr.poll(balancer.ErrBadResolverState) return } if channelz.IsOn() { - ccr.addChannelzTraceEvent(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: c}) + ccr.addChannelzTraceEvent(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: scpr}) } - ccr.curState.ServiceConfig = c - ccr.cc.updateResolverState(ccr.curState) + ccr.curState.ServiceConfig = scpr + ccr.poll(ccr.cc.updateResolverState(ccr.curState, nil)) +} + +func (ccr *ccResolverWrapper) ParseServiceConfig(scJSON string) *serviceconfig.ParseResult { + return parseServiceConfig(scJSON) } func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) { var updates []string - oldSC, oldOK := ccr.curState.ServiceConfig.(*ServiceConfig) - newSC, newOK := s.ServiceConfig.(*ServiceConfig) + var oldSC, newSC *ServiceConfig + var oldOK, newOK bool + if ccr.curState.ServiceConfig != nil { + oldSC, oldOK = ccr.curState.ServiceConfig.Config.(*ServiceConfig) + } + if s.ServiceConfig != nil { + newSC, newOK = s.ServiceConfig.Config.(*ServiceConfig) + } if oldOK != newOK || (oldOK && newOK && oldSC.rawJSONString != newSC.rawJSONString) { updates = append(updates, "service config updated") } diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go index 088c3f1b25..d3a4adc5ee 100644 --- a/vendor/google.golang.org/grpc/rpc_util.go +++ b/vendor/google.golang.org/grpc/rpc_util.go @@ -648,35 +648,58 @@ func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxRecei return nil, st.Err() } + var size int if pf == compressionMade { // To match legacy behavior, if the decompressor is set by WithDecompressor or RPCDecompressor, // use this decompressor as the default. if dc != nil { d, err = dc.Do(bytes.NewReader(d)) - if err != nil { - return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err) - } + size = len(d) } else { - dcReader, err := compressor.Decompress(bytes.NewReader(d)) - if err != nil { - return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err) - } - // Read from LimitReader with limit max+1. So if the underlying - // reader is over limit, the result will be bigger than max. - d, err = ioutil.ReadAll(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) - if err != nil { - return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err) - } + d, size, err = decompress(compressor, d, maxReceiveMessageSize) } + if err != nil { + return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err) + } + } else { + size = len(d) } - if len(d) > maxReceiveMessageSize { + if size > maxReceiveMessageSize { // TODO: Revisit the error code. Currently keep it consistent with java // implementation. - return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", len(d), maxReceiveMessageSize) + return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", size, maxReceiveMessageSize) } return d, nil } +// Using compressor, decompress d, returning data and size. +// Optionally, if data will be over maxReceiveMessageSize, just return the size. +func decompress(compressor encoding.Compressor, d []byte, maxReceiveMessageSize int) ([]byte, int, error) { + dcReader, err := compressor.Decompress(bytes.NewReader(d)) + if err != nil { + return nil, 0, err + } + if sizer, ok := compressor.(interface { + DecompressedSize(compressedBytes []byte) int + }); ok { + if size := sizer.DecompressedSize(d); size >= 0 { + if size > maxReceiveMessageSize { + return nil, size, nil + } + // size is used as an estimate to size the buffer, but we + // will read more data if available. + // +MinRead so ReadFrom will not reallocate if size is correct. + buf := bytes.NewBuffer(make([]byte, 0, size+bytes.MinRead)) + bytesRead, err := buf.ReadFrom(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) + return buf.Bytes(), int(bytesRead), err + } + } + // Read from LimitReader with limit max+1. So if the underlying + // reader is over limit, the result will be bigger than max. + d, err = ioutil.ReadAll(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) + return d, len(d), err +} + // For the two compressor parameters, both should not be set, but if they are, // dc takes precedence over compressor. // TODO(dfawley): wrap the old compressor/decompressor using the new API? @@ -848,7 +871,7 @@ type channelzData struct { // The SupportPackageIsVersion variables are referenced from generated protocol // buffer files to ensure compatibility with the gRPC version used. The latest -// support package version is 5. +// support package version is 6. // // Older versions are kept for compatibility. They may be removed if // compatibility cannot be maintained. @@ -858,6 +881,7 @@ const ( SupportPackageIsVersion3 = true SupportPackageIsVersion4 = true SupportPackageIsVersion5 = true + SupportPackageIsVersion6 = true ) const grpcUA = "grpc-go/" + Version diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go index f064b73e55..0d75cb109a 100644 --- a/vendor/google.golang.org/grpc/server.go +++ b/vendor/google.golang.org/grpc/server.go @@ -130,6 +130,7 @@ type serverOptions struct { readBufferSize int connectionTimeout time.Duration maxHeaderListSize *uint32 + headerTableSize *uint32 } var defaultServerOptions = serverOptions{ @@ -343,8 +344,8 @@ func StatsHandler(h stats.Handler) ServerOption { // unknown service handler. The provided method is a bidi-streaming RPC service // handler that will be invoked instead of returning the "unimplemented" gRPC // error whenever a request is received for an unregistered service or method. -// The handling function has full access to the Context of the request and the -// stream, and the invocation bypasses interceptors. +// The handling function and stream interceptor (if set) have full access to +// the ServerStream, including its Context. func UnknownServiceHandler(streamHandler StreamHandler) ServerOption { return newFuncServerOption(func(o *serverOptions) { o.unknownStreamDesc = &StreamDesc{ @@ -377,6 +378,16 @@ func MaxHeaderListSize(s uint32) ServerOption { }) } +// HeaderTableSize returns a ServerOption that sets the size of dynamic +// header table for stream. +// +// This API is EXPERIMENTAL. +func HeaderTableSize(s uint32) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.headerTableSize = &s + }) +} + // NewServer creates a gRPC server which has no service registered and has not // started to accept requests yet. func NewServer(opt ...ServerOption) *Server { @@ -686,6 +697,7 @@ func (s *Server) newHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) tr ReadBufferSize: s.opts.readBufferSize, ChannelzParentID: s.channelzID, MaxHeaderListSize: s.opts.maxHeaderListSize, + HeaderTableSize: s.opts.headerTableSize, } st, err := transport.NewServerTransport("http2", c, config) if err != nil { @@ -853,41 +865,58 @@ func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Str } func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, md *MethodDesc, trInfo *traceInfo) (err error) { - if channelz.IsOn() { - s.incrCallsStarted() - defer func() { - if err != nil && err != io.EOF { - s.incrCallsFailed() - } else { - s.incrCallsSucceeded() - } - }() - } sh := s.opts.statsHandler - if sh != nil { - beginTime := time.Now() - begin := &stats.Begin{ - BeginTime: beginTime, + if sh != nil || trInfo != nil || channelz.IsOn() { + if channelz.IsOn() { + s.incrCallsStarted() } - sh.HandleRPC(stream.Context(), begin) - defer func() { - end := &stats.End{ + var statsBegin *stats.Begin + if sh != nil { + beginTime := time.Now() + statsBegin = &stats.Begin{ BeginTime: beginTime, - EndTime: time.Now(), - } - if err != nil && err != io.EOF { - end.Error = toRPCErr(err) } - sh.HandleRPC(stream.Context(), end) - }() - } - if trInfo != nil { - defer trInfo.tr.Finish() - trInfo.tr.LazyLog(&trInfo.firstLine, false) + sh.HandleRPC(stream.Context(), statsBegin) + } + if trInfo != nil { + trInfo.tr.LazyLog(&trInfo.firstLine, false) + } + // The deferred error handling for tracing, stats handler and channelz are + // combined into one function to reduce stack usage -- a defer takes ~56-64 + // bytes on the stack, so overflowing the stack will require a stack + // re-allocation, which is expensive. + // + // To maintain behavior similar to separate deferred statements, statements + // should be executed in the reverse order. That is, tracing first, stats + // handler second, and channelz last. Note that panics *within* defers will + // lead to different behavior, but that's an acceptable compromise; that + // would be undefined behavior territory anyway. defer func() { - if err != nil && err != io.EOF { - trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) - trInfo.tr.SetError() + if trInfo != nil { + if err != nil && err != io.EOF { + trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + trInfo.tr.SetError() + } + trInfo.tr.Finish() + } + + if sh != nil { + end := &stats.End{ + BeginTime: statsBegin.BeginTime, + EndTime: time.Now(), + } + if err != nil && err != io.EOF { + end.Error = toRPCErr(err) + } + sh.HandleRPC(stream.Context(), end) + } + + if channelz.IsOn() { + if err != nil && err != io.EOF { + s.incrCallsFailed() + } else { + s.incrCallsSucceeded() + } } }() } @@ -1087,31 +1116,15 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, sd *StreamDesc, trInfo *traceInfo) (err error) { if channelz.IsOn() { s.incrCallsStarted() - defer func() { - if err != nil && err != io.EOF { - s.incrCallsFailed() - } else { - s.incrCallsSucceeded() - } - }() } sh := s.opts.statsHandler + var statsBegin *stats.Begin if sh != nil { beginTime := time.Now() - begin := &stats.Begin{ + statsBegin = &stats.Begin{ BeginTime: beginTime, } - sh.HandleRPC(stream.Context(), begin) - defer func() { - end := &stats.End{ - BeginTime: beginTime, - EndTime: time.Now(), - } - if err != nil && err != io.EOF { - end.Error = toRPCErr(err) - } - sh.HandleRPC(stream.Context(), end) - }() + sh.HandleRPC(stream.Context(), statsBegin) } ctx := NewContextWithServerTransportStream(stream.Context(), stream) ss := &serverStream{ @@ -1126,6 +1139,41 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp statsHandler: sh, } + if sh != nil || trInfo != nil || channelz.IsOn() { + // See comment in processUnaryRPC on defers. + defer func() { + if trInfo != nil { + ss.mu.Lock() + if err != nil && err != io.EOF { + ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + ss.trInfo.tr.SetError() + } + ss.trInfo.tr.Finish() + ss.trInfo.tr = nil + ss.mu.Unlock() + } + + if sh != nil { + end := &stats.End{ + BeginTime: statsBegin.BeginTime, + EndTime: time.Now(), + } + if err != nil && err != io.EOF { + end.Error = toRPCErr(err) + } + sh.HandleRPC(stream.Context(), end) + } + + if channelz.IsOn() { + if err != nil && err != io.EOF { + s.incrCallsFailed() + } else { + s.incrCallsSucceeded() + } + } + }() + } + ss.binlog = binarylog.GetMethodLogger(stream.Method()) if ss.binlog != nil { md, _ := metadata.FromIncomingContext(ctx) @@ -1179,16 +1227,6 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp if trInfo != nil { trInfo.tr.LazyLog(&trInfo.firstLine, false) - defer func() { - ss.mu.Lock() - if err != nil && err != io.EOF { - ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) - ss.trInfo.tr.SetError() - } - ss.trInfo.tr.Finish() - ss.trInfo.tr = nil - ss.mu.Unlock() - }() } var appErr error var server interface{} diff --git a/vendor/google.golang.org/grpc/service_config.go b/vendor/google.golang.org/grpc/service_config.go index d0787f1e2a..5a80a575a5 100644 --- a/vendor/google.golang.org/grpc/service_config.go +++ b/vendor/google.golang.org/grpc/service_config.go @@ -136,9 +136,9 @@ type retryPolicy struct { maxAttempts int // Exponential backoff parameters. The initial retry attempt will occur at - // random(0, initialBackoffMS). In general, the nth attempt will occur at + // random(0, initialBackoff). In general, the nth attempt will occur at // random(0, - // min(initialBackoffMS*backoffMultiplier**(n-1), maxBackoffMS)). + // min(initialBackoff*backoffMultiplier**(n-1), maxBackoff)). // // These fields are required and must be greater than zero. initialBackoff time.Duration @@ -261,20 +261,17 @@ type jsonSC struct { } func init() { - internal.ParseServiceConfig = func(sc string) (interface{}, error) { - return parseServiceConfig(sc) - } + internal.ParseServiceConfigForTesting = parseServiceConfig } - -func parseServiceConfig(js string) (*ServiceConfig, error) { +func parseServiceConfig(js string) *serviceconfig.ParseResult { if len(js) == 0 { - return nil, fmt.Errorf("no JSON service config provided") + return &serviceconfig.ParseResult{Err: fmt.Errorf("no JSON service config provided")} } var rsc jsonSC err := json.Unmarshal([]byte(js), &rsc) if err != nil { grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) - return nil, err + return &serviceconfig.ParseResult{Err: err} } sc := ServiceConfig{ LB: rsc.LoadBalancingPolicy, @@ -288,7 +285,7 @@ func parseServiceConfig(js string) (*ServiceConfig, error) { if len(lbcfg) != 1 { err := fmt.Errorf("invalid loadBalancingConfig: entry %v does not contain exactly 1 policy/config pair: %q", i, lbcfg) grpclog.Warningf(err.Error()) - return nil, err + return &serviceconfig.ParseResult{Err: err} } var name string var jsonCfg json.RawMessage @@ -303,17 +300,25 @@ func parseServiceConfig(js string) (*ServiceConfig, error) { var err error sc.lbConfig.cfg, err = parser.ParseConfig(jsonCfg) if err != nil { - return nil, fmt.Errorf("error parsing loadBalancingConfig for policy %q: %v", name, err) + return &serviceconfig.ParseResult{Err: fmt.Errorf("error parsing loadBalancingConfig for policy %q: %v", name, err)} } } else if string(jsonCfg) != "{}" { grpclog.Warningf("non-empty balancer configuration %q, but balancer does not implement ParseConfig", string(jsonCfg)) } break } + if sc.lbConfig == nil { + // We had a loadBalancingConfig field but did not encounter a + // supported policy. The config is considered invalid in this + // case. + err := fmt.Errorf("invalid loadBalancingConfig: no supported policies found") + grpclog.Warningf(err.Error()) + return &serviceconfig.ParseResult{Err: err} + } } if rsc.MethodConfig == nil { - return &sc, nil + return &serviceconfig.ParseResult{Config: &sc} } for _, m := range *rsc.MethodConfig { if m.Name == nil { @@ -322,7 +327,7 @@ func parseServiceConfig(js string) (*ServiceConfig, error) { d, err := parseDuration(m.Timeout) if err != nil { grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) - return nil, err + return &serviceconfig.ParseResult{Err: err} } mc := MethodConfig{ @@ -331,7 +336,7 @@ func parseServiceConfig(js string) (*ServiceConfig, error) { } if mc.retryPolicy, err = convertRetryPolicy(m.RetryPolicy); err != nil { grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) - return nil, err + return &serviceconfig.ParseResult{Err: err} } if m.MaxRequestMessageBytes != nil { if *m.MaxRequestMessageBytes > int64(maxInt) { @@ -356,13 +361,13 @@ func parseServiceConfig(js string) (*ServiceConfig, error) { if sc.retryThrottling != nil { if mt := sc.retryThrottling.MaxTokens; mt <= 0 || mt > 1000 { - return nil, fmt.Errorf("invalid retry throttling config: maxTokens (%v) out of range (0, 1000]", mt) + return &serviceconfig.ParseResult{Err: fmt.Errorf("invalid retry throttling config: maxTokens (%v) out of range (0, 1000]", mt)} } if tr := sc.retryThrottling.TokenRatio; tr <= 0 { - return nil, fmt.Errorf("invalid retry throttling config: tokenRatio (%v) may not be negative", tr) + return &serviceconfig.ParseResult{Err: fmt.Errorf("invalid retry throttling config: tokenRatio (%v) may not be negative", tr)} } } - return &sc, nil + return &serviceconfig.ParseResult{Config: &sc} } func convertRetryPolicy(jrp *jsonRetryPolicy) (p *retryPolicy, err error) { diff --git a/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go b/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go index 53b27875a1..187c304421 100644 --- a/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go +++ b/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go @@ -22,27 +22,20 @@ // This package is EXPERIMENTAL. package serviceconfig -import ( - "google.golang.org/grpc/internal" -) - // Config represents an opaque data structure holding a service config. type Config interface { - isConfig() + isServiceConfig() } // LoadBalancingConfig represents an opaque data structure holding a load -// balancer config. +// balancing config. type LoadBalancingConfig interface { isLoadBalancingConfig() } -// Parse parses the JSON service config provided into an internal form or -// returns an error if the config is invalid. -func Parse(ServiceConfigJSON string) (Config, error) { - c, err := internal.ParseServiceConfig(ServiceConfigJSON) - if err != nil { - return nil, err - } - return c.(Config), err +// ParseResult contains a service config or an error. Exactly one must be +// non-nil. +type ParseResult struct { + Config Config + Err error } diff --git a/vendor/google.golang.org/grpc/stats/stats.go b/vendor/google.golang.org/grpc/stats/stats.go index f3f593c844..9e22c393f1 100644 --- a/vendor/google.golang.org/grpc/stats/stats.go +++ b/vendor/google.golang.org/grpc/stats/stats.go @@ -91,6 +91,8 @@ type InHeader struct { LocalAddr net.Addr // Compression is the compression algorithm used for the RPC. Compression string + // Header contains the header metadata received. + Header metadata.MD } // IsClient indicates if the stats information is from client side. @@ -104,6 +106,9 @@ type InTrailer struct { Client bool // WireLength is the wire length of trailer. WireLength int + // Trailer contains the trailer metadata received from the server. This + // field is only valid if this InTrailer is from the client side. + Trailer metadata.MD } // IsClient indicates if the stats information is from client side. @@ -146,6 +151,8 @@ type OutHeader struct { LocalAddr net.Addr // Compression is the compression algorithm used for the RPC. Compression string + // Header contains the header metadata sent. + Header metadata.MD } // IsClient indicates if this stats information is from client side. @@ -159,6 +166,9 @@ type OutTrailer struct { Client bool // WireLength is the wire length of trailer. WireLength int + // Trailer contains the trailer metadata sent to the client. This + // field is only valid if this OutTrailer is from the server side. + Trailer metadata.MD } // IsClient indicates if this stats information is from client side. @@ -176,6 +186,7 @@ type End struct { EndTime time.Time // Trailer contains the trailer metadata received from the server. This // field is only valid if this End is from the client side. + // Deprecated: use Trailer in InTrailer instead. Trailer metadata.MD // Error is the error the RPC ended with. It is an error generated from // status.Status and can be converted back to status.Status using diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go index 134a624a15..bb99940e36 100644 --- a/vendor/google.golang.org/grpc/stream.go +++ b/vendor/google.golang.org/grpc/stream.go @@ -488,7 +488,7 @@ func (cs *clientStream) shouldRetry(err error) error { pushback := 0 hasPushback := false if cs.attempt.s != nil { - if to, toErr := cs.attempt.s.TrailersOnly(); toErr != nil || !to { + if !cs.attempt.s.TrailersOnly() { return err } diff --git a/vendor/google.golang.org/grpc/trace.go b/vendor/google.golang.org/grpc/trace.go index 0a57b99948..07a2d26b3e 100644 --- a/vendor/google.golang.org/grpc/trace.go +++ b/vendor/google.golang.org/grpc/trace.go @@ -41,9 +41,6 @@ func methodFamily(m string) string { if i := strings.Index(m, "/"); i >= 0 { m = m[:i] // remove everything from second slash } - if i := strings.LastIndex(m, "."); i >= 0 { - m = m[i+1:] // cut down to last dotted component - } return m } diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index 5411a73a22..cf193820f3 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.23.0" +const Version = "1.27.0" diff --git a/vendor/google.golang.org/grpc/vet.sh b/vendor/google.golang.org/grpc/vet.sh index 661e1e1de9..0e73707278 100644 --- a/vendor/google.golang.org/grpc/vet.sh +++ b/vendor/google.golang.org/grpc/vet.sh @@ -31,12 +31,15 @@ PATH="${GOPATH}/bin:${GOROOT}/bin:${PATH}" if [[ "$1" = "-install" ]]; then # Check for module support if go help mod >& /dev/null; then + # Install the pinned versions as defined in module tools. + pushd ./test/tools go install \ golang.org/x/lint/golint \ golang.org/x/tools/cmd/goimports \ honnef.co/go/tools/cmd/staticcheck \ github.com/client9/misspell/cmd/misspell \ github.com/golang/protobuf/protoc-gen-go + popd else # Ye olde `go get` incantation. # Note: this gets the latest version of all tools (vs. the pinned versions @@ -67,18 +70,21 @@ elif [[ "$#" -ne 0 ]]; then fi # - Ensure all source files contain a copyright message. -git ls-files "*.go" | xargs grep -L "\(Copyright [0-9]\{4,\} gRPC authors\)\|DO NOT EDIT" 2>&1 | fail_on_output +(! git grep -L "\(Copyright [0-9]\{4,\} gRPC authors\)\|DO NOT EDIT" -- '*.go') # - Make sure all tests in grpc and grpc/test use leakcheck via Teardown. (! grep 'func Test[^(]' *_test.go) (! grep 'func Test[^(]' test/*.go) +# - Do not import x/net/context. +(! git grep -l 'x/net/context' -- "*.go") + # - Do not import math/rand for real library code. Use internal/grpcrand for # thread safety. -git ls-files "*.go" | xargs grep -l '"math/rand"' 2>&1 | (! grep -v '^examples\|^stress\|grpcrand\|wrr_test') +git grep -l '"math/rand"' -- "*.go" 2>&1 | (! grep -v '^examples\|^stress\|grpcrand\|^benchmark\|wrr_test') # - Ensure all ptypes proto packages are renamed when importing. -git ls-files "*.go" | (! xargs grep "\(import \|^\s*\)\"github.com/golang/protobuf/ptypes/") +(! git grep "\(import \|^\s*\)\"github.com/golang/protobuf/ptypes/" -- "*.go") # - Check imports that are illegal in appengine (until Go 1.11). # TODO: Remove when we drop Go 1.10 support @@ -86,10 +92,12 @@ go list -f {{.Dir}} ./... | xargs go run test/go_vet/vet.go # - gofmt, goimports, golint (with exceptions for generated code), go vet. gofmt -s -d -l . 2>&1 | fail_on_output -goimports -l . 2>&1 | (! grep -vE "(_mock|\.pb)\.go:") | fail_on_output +goimports -l . 2>&1 | (! grep -vE "(_mock|\.pb)\.go") golint ./... 2>&1 | (! grep -vE "(_mock|\.pb)\.go:") go vet -all . +misspell -error . + # - Check that generated proto files are up to date. if [[ -z "${VET_SKIP_PROTO}" ]]; then PATH="/home/travis/bin:${PATH}" make proto && \ @@ -105,30 +113,47 @@ if go help mod >& /dev/null; then fi # - Collection of static analysis checks -# TODO(dfawley): don't use deprecated functions in examples. -staticcheck -go 1.9 -checks 'inherit,-ST1015' -ignore ' -google.golang.org/grpc/balancer.go:SA1019 -google.golang.org/grpc/balancer/grpclb/grpclb_remote_balancer.go:SA1019 -google.golang.org/grpc/balancer/roundrobin/roundrobin_test.go:SA1019 -google.golang.org/grpc/xds/internal/balancer/edsbalancer/balancergroup.go:SA1019 -google.golang.org/grpc/xds/internal/balancer/xds.go:SA1019 -google.golang.org/grpc/xds/internal/balancer/xds_client.go:SA1019 -google.golang.org/grpc/balancer_conn_wrappers.go:SA1019 -google.golang.org/grpc/balancer_test.go:SA1019 -google.golang.org/grpc/benchmark/benchmain/main.go:SA1019 -google.golang.org/grpc/benchmark/worker/benchmark_client.go:SA1019 -google.golang.org/grpc/clientconn.go:S1024 -google.golang.org/grpc/clientconn_state_transition_test.go:SA1019 -google.golang.org/grpc/clientconn_test.go:SA1019 -google.golang.org/grpc/examples/features/debugging/client/main.go:SA1019 -google.golang.org/grpc/examples/features/load_balancing/client/main.go:SA1019 -google.golang.org/grpc/internal/transport/handler_server.go:SA1019 -google.golang.org/grpc/internal/transport/handler_server_test.go:SA1019 -google.golang.org/grpc/resolver/dns/dns_resolver.go:SA1019 -google.golang.org/grpc/stats/stats_test.go:SA1019 -google.golang.org/grpc/test/balancer_test.go:SA1019 -google.golang.org/grpc/test/channelz_test.go:SA1019 -google.golang.org/grpc/test/end2end_test.go:SA1019 -google.golang.org/grpc/test/healthcheck_test.go:SA1019 -' ./... -misspell -error . +# +# TODO(dfawley): don't use deprecated functions in examples or first-party +# plugins. +SC_OUT="$(mktemp)" +staticcheck -go 1.9 -checks 'inherit,-ST1015' ./... > "${SC_OUT}" || true +# Error if anything other than deprecation warnings are printed. +(! grep -v "is deprecated:.*SA1019" "${SC_OUT}") +# Only ignore the following deprecated types/fields/functions. +(! grep -Fv '.HandleResolvedAddrs +.HandleSubConnStateChange +.HeaderMap +.NewAddress +.NewServiceConfig +.Metadata is deprecated: use Attributes +.Type is deprecated: use Attributes +.UpdateBalancerState +balancer.Picker +grpc.CallCustomCodec +grpc.Code +grpc.Compressor +grpc.Decompressor +grpc.MaxMsgSize +grpc.MethodConfig +grpc.NewGZIPCompressor +grpc.NewGZIPDecompressor +grpc.RPCCompressor +grpc.RPCDecompressor +grpc.RoundRobin +grpc.ServiceConfig +grpc.WithBalancer +grpc.WithBalancerName +grpc.WithCompressor +grpc.WithDecompressor +grpc.WithDialer +grpc.WithMaxMsgSize +grpc.WithServiceConfig +grpc.WithTimeout +http.CloseNotifier +naming.Resolver +naming.Update +naming.Watcher +resolver.Backend +resolver.GRPCLB' "${SC_OUT}" +) diff --git a/vendor/modules.txt b/vendor/modules.txt index 8aee922d4c..86ee94c547 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -52,7 +52,7 @@ github.com/golang/groupcache/lru github.com/golang/mock/gomock github.com/golang/mock/mockgen github.com/golang/mock/mockgen/model -# github.com/golang/protobuf v1.3.1 +# github.com/golang/protobuf v1.3.2 github.com/golang/protobuf/proto github.com/golang/protobuf/protoc-gen-go github.com/golang/protobuf/protoc-gen-go/descriptor @@ -233,10 +233,12 @@ google.golang.org/appengine/internal/modules google.golang.org/appengine/internal/remote_api google.golang.org/appengine/internal/urlfetch google.golang.org/appengine/urlfetch -# google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873 +# google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 google.golang.org/genproto/googleapis/rpc/status -# google.golang.org/grpc v1.23.0 +# google.golang.org/grpc v1.27.0 google.golang.org/grpc +google.golang.org/grpc/attributes +google.golang.org/grpc/backoff google.golang.org/grpc/balancer google.golang.org/grpc/balancer/base google.golang.org/grpc/balancer/roundrobin @@ -254,10 +256,13 @@ google.golang.org/grpc/internal google.golang.org/grpc/internal/backoff google.golang.org/grpc/internal/balancerload google.golang.org/grpc/internal/binarylog +google.golang.org/grpc/internal/buffer google.golang.org/grpc/internal/channelz google.golang.org/grpc/internal/envconfig google.golang.org/grpc/internal/grpcrand google.golang.org/grpc/internal/grpcsync +google.golang.org/grpc/internal/resolver/dns +google.golang.org/grpc/internal/resolver/passthrough google.golang.org/grpc/internal/syscall google.golang.org/grpc/internal/transport google.golang.org/grpc/keepalive @@ -265,8 +270,6 @@ google.golang.org/grpc/metadata google.golang.org/grpc/naming google.golang.org/grpc/peer google.golang.org/grpc/resolver -google.golang.org/grpc/resolver/dns -google.golang.org/grpc/resolver/passthrough google.golang.org/grpc/serviceconfig google.golang.org/grpc/stats google.golang.org/grpc/status