From a6cff59b556e356a6020d2393752c5a1ac4769ca Mon Sep 17 00:00:00 2001 From: Dongsu Park Date: Fri, 26 Aug 2016 15:52:26 +0200 Subject: [PATCH 1/3] vendor: update grpc to master 2016-08-26 Update vendor/google.golang.org/grpc to the most recent master, as of 2016-08-26. --- glide.lock | 7 +- glide.yaml | 1 + .../coreos/go-systemd/activation/listeners.go | 2 - .../github.com/gogo/protobuf/jsonpb/jsonpb.go | 713 ++++++ .../jsonpb_test_proto/more_test_objects.pb.go | 199 ++ .../jsonpb_test_proto/test_objects.pb.go | 617 +++++ .../protobuf/proto/proto3_proto/proto3.pb.go | 184 ++ .../golang/protobuf/jsonpb/jsonpb.go | 832 +++++++ .../jsonpb_test_proto/more_test_objects.pb.go | 200 ++ .../jsonpb_test_proto/test_objects.pb.go | 769 ++++++ .../protobuf/proto/proto3_proto/proto3.pb.go | 199 ++ .../protoc-gen-go/descriptor/descriptor.pb.go | 2076 +++++++++++++++++ .../protoc-gen-go/plugin/plugin.pb.go | 229 ++ .../golang/protobuf/ptypes/any/any.pb.go | 155 ++ .../protobuf/ptypes/duration/duration.pb.go | 114 + .../golang/protobuf/ptypes/empty/empty.pb.go | 69 + .../protobuf/ptypes/struct/struct.pb.go | 382 +++ .../protobuf/ptypes/timestamp/timestamp.pb.go | 127 + .../protobuf/ptypes/wrappers/wrappers.pb.go | 200 ++ vendor/google.golang.org/grpc/backoff.go | 80 + vendor/google.golang.org/grpc/balancer.go | 385 +++ vendor/google.golang.org/grpc/call.go | 98 +- vendor/google.golang.org/grpc/clientconn.go | 858 ++++--- .../grpc/credentials/credentials.go | 87 +- .../grpc/credentials/credentials_util_go17.go | 76 + .../credentials/credentials_util_pre_go17.go | 74 + .../grpc/credentials/oauth/oauth.go | 33 +- vendor/google.golang.org/grpc/doc.go | 2 +- .../health.pb.go | 72 +- .../health/grpc_health_v1alpha/health.proto | 20 - .../google.golang.org/grpc/health/health.go | 17 +- vendor/google.golang.org/grpc/interceptor.go | 74 + .../grpc/internal/internal.go | 49 + .../grpc/interop/client/client.go | 16 +- .../grpc/interop/grpc_testing/test.pb.go | 61 +- .../grpc/interop/grpc_testing/test.proto | 140 -- .../grpc/interop/test_utils.go | 15 - .../grpc/metadata/metadata.go | 14 +- .../google.golang.org/grpc/naming/naming.go | 3 +- vendor/google.golang.org/grpc/picker.go | 243 -- .../grpc_reflection_v1alpha/reflection.pb.go | 694 ++++++ .../grpc/reflection/grpc_testing/proto2.pb.go | 56 + .../reflection/grpc_testing/proto2_ext.pb.go | 88 + .../grpc/reflection/grpc_testing/test.pb.go | 220 ++ .../grpc/reflection/serverreflection.go | 395 ++++ vendor/google.golang.org/grpc/rpc_util.go | 155 +- vendor/google.golang.org/grpc/server.go | 294 ++- vendor/google.golang.org/grpc/stream.go | 193 +- .../grpc/stress/client/main.go | 298 +++ .../grpc/stress/grpc_testing/metrics.pb.go | 361 +++ .../grpc/stress/metrics_client/main.go | 97 + vendor/google.golang.org/grpc/trace.go | 3 +- .../grpc/transport/control.go | 89 +- .../google.golang.org/grpc/transport/go16.go | 46 + .../google.golang.org/grpc/transport/go17.go | 46 + .../grpc/transport/handler_server.go | 65 +- .../grpc/transport/http2_client.go | 489 ++-- .../grpc/transport/http2_server.go | 249 +- .../grpc/transport/http_util.go | 264 ++- .../grpc/transport/pre_go16.go | 51 + .../grpc/transport/transport.go | 130 +- 61 files changed, 12221 insertions(+), 1554 deletions(-) create mode 100644 vendor/github.com/gogo/protobuf/jsonpb/jsonpb.go create mode 100644 vendor/github.com/gogo/protobuf/jsonpb/jsonpb_test_proto/more_test_objects.pb.go create mode 100644 vendor/github.com/gogo/protobuf/jsonpb/jsonpb_test_proto/test_objects.pb.go create mode 100644 vendor/github.com/gogo/protobuf/proto/proto3_proto/proto3.pb.go create mode 100644 vendor/github.com/golang/protobuf/jsonpb/jsonpb.go create mode 100644 vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/more_test_objects.pb.go create mode 100644 vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/test_objects.pb.go create mode 100644 vendor/github.com/golang/protobuf/proto/proto3_proto/proto3.pb.go create mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go create mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/any/any.pb.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go create mode 100644 vendor/google.golang.org/grpc/backoff.go create mode 100644 vendor/google.golang.org/grpc/balancer.go create mode 100644 vendor/google.golang.org/grpc/credentials/credentials_util_go17.go create mode 100644 vendor/google.golang.org/grpc/credentials/credentials_util_pre_go17.go rename vendor/google.golang.org/grpc/health/{grpc_health_v1alpha => grpc_health_v1}/health.pb.go (58%) delete mode 100644 vendor/google.golang.org/grpc/health/grpc_health_v1alpha/health.proto create mode 100644 vendor/google.golang.org/grpc/interceptor.go create mode 100644 vendor/google.golang.org/grpc/internal/internal.go delete mode 100644 vendor/google.golang.org/grpc/interop/grpc_testing/test.proto delete mode 100644 vendor/google.golang.org/grpc/picker.go create mode 100644 vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go create mode 100644 vendor/google.golang.org/grpc/reflection/grpc_testing/proto2.pb.go create mode 100644 vendor/google.golang.org/grpc/reflection/grpc_testing/proto2_ext.pb.go create mode 100644 vendor/google.golang.org/grpc/reflection/grpc_testing/test.pb.go create mode 100644 vendor/google.golang.org/grpc/reflection/serverreflection.go create mode 100644 vendor/google.golang.org/grpc/stress/client/main.go create mode 100644 vendor/google.golang.org/grpc/stress/grpc_testing/metrics.pb.go create mode 100644 vendor/google.golang.org/grpc/stress/metrics_client/main.go create mode 100644 vendor/google.golang.org/grpc/transport/go16.go create mode 100644 vendor/google.golang.org/grpc/transport/go17.go create mode 100644 vendor/google.golang.org/grpc/transport/pre_go16.go diff --git a/glide.lock b/glide.lock index bfcd8ec1a..a99cb1122 100644 --- a/glide.lock +++ b/glide.lock @@ -1,5 +1,5 @@ -hash: c585fb40a2a91ff24a0324b16d891616815fcb086e784e1a3417cf9324f4cb5e -updated: 2016-08-25T12:55:03.672545229+02:00 +hash: 330b9b6c73a80bab434112ebf538765e010a5ccfb0f86e0b2d1a0ba1fe47eada +updated: 2016-08-26T15:46:26.653614146+02:00 imports: - name: github.com/beorn7/perks version: 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9 @@ -105,11 +105,12 @@ imports: - googleapi - googleapi/internal/uritemplates - name: google.golang.org/grpc - version: 178b68e2819d3ed711040301c12f3f97b6c270ce + version: b7aa4e95cbb5ec9d07c52a7541ce178ef9626316 subpackages: - codes - credentials - grpclog + - internal - metadata - naming - peer diff --git a/glide.yaml b/glide.yaml index ba4ba2f9c..6291e874d 100644 --- a/glide.yaml +++ b/glide.yaml @@ -35,3 +35,4 @@ import: - package: google.golang.org/api subpackages: - googleapi +- package: github.com/spf13/viper diff --git a/vendor/github.com/coreos/go-systemd/activation/listeners.go b/vendor/github.com/coreos/go-systemd/activation/listeners.go index df27c29e9..fd5dfc709 100644 --- a/vendor/github.com/coreos/go-systemd/activation/listeners.go +++ b/vendor/github.com/coreos/go-systemd/activation/listeners.go @@ -48,8 +48,6 @@ func TLSListeners(unsetEnv bool, tlsConfig *tls.Config) ([]net.Listener, error) } if tlsConfig != nil && err == nil { - tlsConfig.NextProtos = []string{"http/1.1"} - for i, l := range listeners { // Activate TLS only for TCP sockets if l.Addr().Network() == "tcp" { diff --git a/vendor/github.com/gogo/protobuf/jsonpb/jsonpb.go b/vendor/github.com/gogo/protobuf/jsonpb/jsonpb.go new file mode 100644 index 000000000..3fe315cbd --- /dev/null +++ b/vendor/github.com/gogo/protobuf/jsonpb/jsonpb.go @@ -0,0 +1,713 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2015 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +Package jsonpb provides marshaling and unmarshaling between protocol buffers and JSON. +It follows the specification at https://developers.google.com/protocol-buffers/docs/proto3#json. + +This package produces a different output than the standard "encoding/json" package, +which does not operate correctly on protocol buffers. +*/ +package jsonpb + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "reflect" + "sort" + "strconv" + "strings" + + "github.com/gogo/protobuf/proto" +) + +// Marshaler is a configurable object for converting between +// protocol buffer objects and a JSON representation for them. +type Marshaler struct { + // Whether to render enum values as integers, as opposed to string values. + EnumsAsInts bool + + // Whether to render fields with zero values. + EmitDefaults bool + + // A string to indent each level by. The presence of this field will + // also cause a space to appear between the field separator and + // value, and for newlines to be appear between fields and array + // elements. + Indent string + + // Whether to use the original (.proto) name for fields. + OrigName bool +} + +// Marshal marshals a protocol buffer into JSON. +func (m *Marshaler) Marshal(out io.Writer, pb proto.Message) error { + writer := &errWriter{writer: out} + return m.marshalObject(writer, pb, "") +} + +// MarshalToString converts a protocol buffer object to JSON string. +func (m *Marshaler) MarshalToString(pb proto.Message) (string, error) { + var buf bytes.Buffer + if err := m.Marshal(&buf, pb); err != nil { + return "", err + } + return buf.String(), nil +} + +type int32Slice []int32 + +// For sorting extensions ids to ensure stable output. +func (s int32Slice) Len() int { return len(s) } +func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] } +func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// marshalObject writes a struct to the Writer. +func (m *Marshaler) marshalObject(out *errWriter, v proto.Message, indent string) error { + out.write("{") + if m.Indent != "" { + out.write("\n") + } + + s := reflect.ValueOf(v).Elem() + firstField := true + for i := 0; i < s.NumField(); i++ { + value := s.Field(i) + valueField := s.Type().Field(i) + if strings.HasPrefix(valueField.Name, "XXX_") { + continue + } + + // IsNil will panic on most value kinds. + switch value.Kind() { + case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + if value.IsNil() { + continue + } + } + + if !m.EmitDefaults { + switch value.Kind() { + case reflect.Bool: + if !value.Bool() { + continue + } + case reflect.Int32, reflect.Int64: + if value.Int() == 0 { + continue + } + case reflect.Uint32, reflect.Uint64: + if value.Uint() == 0 { + continue + } + case reflect.Float32, reflect.Float64: + if value.Float() == 0 { + continue + } + case reflect.String: + if value.Len() == 0 { + continue + } + } + } + + // Oneof fields need special handling. + if valueField.Tag.Get("protobuf_oneof") != "" { + // value is an interface containing &T{real_value}. + sv := value.Elem().Elem() // interface -> *T -> T + value = sv.Field(0) + valueField = sv.Type().Field(0) + } + prop := jsonProperties(valueField, m.OrigName) + if !firstField { + m.writeSep(out) + } + // If the map value is a cast type, it may not implement proto.Message, therefore + // allow the struct tag to declare the underlying message type. Instead of changing + // the signatures of the child types (and because prop.mvalue is not public), use + // CustomType as a passer. + if value.Kind() == reflect.Map { + if tag := valueField.Tag.Get("protobuf"); tag != "" { + for _, v := range strings.Split(tag, ",") { + if !strings.HasPrefix(v, "castvaluetype=") { + continue + } + v = strings.TrimPrefix(v, "castvaluetype=") + prop.CustomType = v + break + } + } + } + if err := m.marshalField(out, prop, value, indent); err != nil { + return err + } + firstField = false + } + + // Handle proto2 extensions. + if ep, ok := v.(proto.Message); ok { + extensions := proto.RegisteredExtensions(v) + // Sort extensions for stable output. + ids := make([]int32, 0, len(extensions)) + for id, desc := range extensions { + if !proto.HasExtension(ep, desc) { + continue + } + ids = append(ids, id) + } + sort.Sort(int32Slice(ids)) + for _, id := range ids { + desc := extensions[id] + if desc == nil { + // unknown extension + continue + } + ext, extErr := proto.GetExtension(ep, desc) + if extErr != nil { + return extErr + } + value := reflect.ValueOf(ext) + var prop proto.Properties + prop.Parse(desc.Tag) + prop.JSONName = fmt.Sprintf("[%s]", desc.Name) + if !firstField { + m.writeSep(out) + } + if err := m.marshalField(out, &prop, value, indent); err != nil { + return err + } + firstField = false + } + + } + + if m.Indent != "" { + out.write("\n") + out.write(indent) + } + out.write("}") + return out.err +} + +func (m *Marshaler) writeSep(out *errWriter) { + if m.Indent != "" { + out.write(",\n") + } else { + out.write(",") + } +} + +// marshalField writes field description and value to the Writer. +func (m *Marshaler) marshalField(out *errWriter, prop *proto.Properties, v reflect.Value, indent string) error { + if m.Indent != "" { + out.write(indent) + out.write(m.Indent) + } + out.write(`"`) + out.write(prop.JSONName) + out.write(`":`) + if m.Indent != "" { + out.write(" ") + } + if err := m.marshalValue(out, prop, v, indent); err != nil { + return err + } + return nil +} + +// marshalValue writes the value to the Writer. +func (m *Marshaler) marshalValue(out *errWriter, prop *proto.Properties, v reflect.Value, indent string) error { + + v = reflect.Indirect(v) + + // Handle repeated elements. + if v.Kind() == reflect.Slice && v.Type().Elem().Kind() != reflect.Uint8 { + out.write("[") + comma := "" + for i := 0; i < v.Len(); i++ { + sliceVal := v.Index(i) + out.write(comma) + if m.Indent != "" { + out.write("\n") + out.write(indent) + out.write(m.Indent) + out.write(m.Indent) + } + if err := m.marshalValue(out, prop, sliceVal, indent+m.Indent); err != nil { + return err + } + comma = "," + } + if m.Indent != "" { + out.write("\n") + out.write(indent) + out.write(m.Indent) + } + out.write("]") + return out.err + } + + // Handle enumerations. + if !m.EnumsAsInts && prop.Enum != "" { + // Unknown enum values will are stringified by the proto library as their + // value. Such values should _not_ be quoted or they will be interpreted + // as an enum string instead of their value. + enumStr := v.Interface().(fmt.Stringer).String() + var valStr string + if v.Kind() == reflect.Ptr { + valStr = strconv.Itoa(int(v.Elem().Int())) + } else { + valStr = strconv.Itoa(int(v.Int())) + } + + if m, ok := v.Interface().(interface { + MarshalJSON() ([]byte, error) + }); ok { + data, err := m.MarshalJSON() + if err != nil { + return err + } + enumStr = string(data) + enumStr, err = strconv.Unquote(enumStr) + if err != nil { + return err + } + } + + isKnownEnum := enumStr != valStr + + if isKnownEnum { + out.write(`"`) + } + out.write(enumStr) + if isKnownEnum { + out.write(`"`) + } + return out.err + } + + // Handle nested messages. + if v.Kind() == reflect.Struct { + i := v + if v.CanAddr() { + i = v.Addr() + } else { + i = reflect.New(v.Type()) + i.Elem().Set(v) + } + iface := i.Interface() + if iface == nil { + out.write(`null`) + return out.err + } + pm, ok := iface.(proto.Message) + if !ok { + if prop.CustomType == "" { + return fmt.Errorf("%v does not implement proto.Message", v.Type()) + } + t := proto.MessageType(prop.CustomType) + if t == nil || !i.Type().ConvertibleTo(t) { + return fmt.Errorf("%v declared custom type %s but it is not convertible to %v", v.Type(), prop.CustomType, t) + } + pm = i.Convert(t).Interface().(proto.Message) + } + return m.marshalObject(out, pm, indent+m.Indent) + } + + // Handle maps. + // Since Go randomizes map iteration, we sort keys for stable output. + if v.Kind() == reflect.Map { + out.write(`{`) + keys := v.MapKeys() + sort.Sort(mapKeys(keys)) + for i, k := range keys { + if i > 0 { + out.write(`,`) + } + if m.Indent != "" { + out.write("\n") + out.write(indent) + out.write(m.Indent) + out.write(m.Indent) + } + + b, err := json.Marshal(k.Interface()) + if err != nil { + return err + } + s := string(b) + + // If the JSON is not a string value, encode it again to make it one. + if !strings.HasPrefix(s, `"`) { + b, err := json.Marshal(s) + if err != nil { + return err + } + s = string(b) + } + + out.write(s) + out.write(`:`) + if m.Indent != "" { + out.write(` `) + } + + if err := m.marshalValue(out, prop, v.MapIndex(k), indent+m.Indent); err != nil { + return err + } + } + if m.Indent != "" { + out.write("\n") + out.write(indent) + out.write(m.Indent) + } + out.write(`}`) + return out.err + } + + // Default handling defers to the encoding/json library. + b, err := json.Marshal(v.Interface()) + if err != nil { + return err + } + needToQuote := string(b[0]) != `"` && (v.Kind() == reflect.Int64 || v.Kind() == reflect.Uint64) + if needToQuote { + out.write(`"`) + } + out.write(string(b)) + if needToQuote { + out.write(`"`) + } + return out.err +} + +// Unmarshaler is a configurable object for converting from a JSON +// representation to a protocol buffer object. +type Unmarshaler struct { + // Whether to allow messages to contain unknown fields, as opposed to + // failing to unmarshal. + AllowUnknownFields bool +} + +// UnmarshalNext unmarshals the next protocol buffer from a JSON object stream. +// This function is lenient and will decode any options permutations of the +// related Marshaler. +func (u *Unmarshaler) UnmarshalNext(dec *json.Decoder, pb proto.Message) error { + inputValue := json.RawMessage{} + if err := dec.Decode(&inputValue); err != nil { + return err + } + return u.unmarshalValue(reflect.ValueOf(pb).Elem(), inputValue, nil) +} + +// Unmarshal unmarshals a JSON object stream into a protocol +// buffer. This function is lenient and will decode any options +// permutations of the related Marshaler. +func (u *Unmarshaler) Unmarshal(r io.Reader, pb proto.Message) error { + dec := json.NewDecoder(r) + return u.UnmarshalNext(dec, pb) +} + +// UnmarshalNext unmarshals the next protocol buffer from a JSON object stream. +// This function is lenient and will decode any options permutations of the +// related Marshaler. +func UnmarshalNext(dec *json.Decoder, pb proto.Message) error { + return new(Unmarshaler).UnmarshalNext(dec, pb) +} + +// Unmarshal unmarshals a JSON object stream into a protocol +// buffer. This function is lenient and will decode any options +// permutations of the related Marshaler. +func Unmarshal(r io.Reader, pb proto.Message) error { + return new(Unmarshaler).Unmarshal(r, pb) +} + +// UnmarshalString will populate the fields of a protocol buffer based +// on a JSON string. This function is lenient and will decode any options +// permutations of the related Marshaler. +func UnmarshalString(str string, pb proto.Message) error { + return new(Unmarshaler).Unmarshal(strings.NewReader(str), pb) +} + +// unmarshalValue converts/copies a value into the target. +// prop may be nil. +func (u *Unmarshaler) unmarshalValue(target reflect.Value, inputValue json.RawMessage, prop *proto.Properties) error { + targetType := target.Type() + + // Allocate memory for pointer fields. + if targetType.Kind() == reflect.Ptr { + target.Set(reflect.New(targetType.Elem())) + return u.unmarshalValue(target.Elem(), inputValue, prop) + } + + // Handle enums, which have an underlying type of int32, + // and may appear as strings. + // The case of an enum appearing as a number is handled + // at the bottom of this function. + if inputValue[0] == '"' && prop != nil && prop.Enum != "" { + vmap := proto.EnumValueMap(prop.Enum) + // Don't need to do unquoting; valid enum names + // are from a limited character set. + s := inputValue[1 : len(inputValue)-1] + n, ok := vmap[string(s)] + if !ok { + return fmt.Errorf("unknown value %q for enum %s", s, prop.Enum) + } + if target.Kind() == reflect.Ptr { // proto2 + target.Set(reflect.New(targetType.Elem())) + target = target.Elem() + } + target.SetInt(int64(n)) + return nil + } + + // Handle nested messages. + if targetType.Kind() == reflect.Struct { + var jsonFields map[string]json.RawMessage + if err := json.Unmarshal(inputValue, &jsonFields); err != nil { + return err + } + + consumeField := func(prop *proto.Properties) (json.RawMessage, bool) { + // Be liberal in what names we accept; both orig_name and camelName are okay. + fieldNames := acceptedJSONFieldNames(prop) + + vOrig, okOrig := jsonFields[fieldNames.orig] + vCamel, okCamel := jsonFields[fieldNames.camel] + if !okOrig && !okCamel { + return nil, false + } + // If, for some reason, both are present in the data, favour the camelName. + var raw json.RawMessage + if okOrig { + raw = vOrig + delete(jsonFields, fieldNames.orig) + } + if okCamel { + raw = vCamel + delete(jsonFields, fieldNames.camel) + } + return raw, true + } + + sprops := proto.GetProperties(targetType) + for i := 0; i < target.NumField(); i++ { + ft := target.Type().Field(i) + if strings.HasPrefix(ft.Name, "XXX_") { + continue + } + valueForField, ok := consumeField(sprops.Prop[i]) + if !ok { + continue + } + + if err := u.unmarshalValue(target.Field(i), valueForField, sprops.Prop[i]); err != nil { + return err + } + } + // Check for any oneof fields. + if len(jsonFields) > 0 { + for _, oop := range sprops.OneofTypes { + raw, ok := consumeField(oop.Prop) + if !ok { + continue + } + nv := reflect.New(oop.Type.Elem()) + target.Field(oop.Field).Set(nv) + if err := u.unmarshalValue(nv.Elem().Field(0), raw, oop.Prop); err != nil { + return err + } + } + } + if !u.AllowUnknownFields && len(jsonFields) > 0 { + // Pick any field to be the scapegoat. + var f string + for fname := range jsonFields { + f = fname + break + } + return fmt.Errorf("unknown field %q in %v", f, targetType) + } + return nil + } + + // Handle arrays + if targetType.Kind() == reflect.Slice { + if targetType.Elem().Kind() == reflect.Uint8 { + outRef := reflect.New(targetType) + outVal := outRef.Interface() + //CustomType with underlying type []byte + if _, ok := outVal.(interface { + UnmarshalJSON([]byte) error + }); ok { + if err := json.Unmarshal(inputValue, outVal); err != nil { + return err + } + target.Set(outRef.Elem()) + return nil + } + // Special case for encoded bytes. Pre-go1.5 doesn't support unmarshalling + // strings into aliased []byte types. + // https://github.com/golang/go/commit/4302fd0409da5e4f1d71471a6770dacdc3301197 + // https://github.com/golang/go/commit/c60707b14d6be26bf4213114d13070bff00d0b0a + var out []byte + if err := json.Unmarshal(inputValue, &out); err != nil { + return err + } + target.SetBytes(out) + return nil + } + + var slc []json.RawMessage + if err := json.Unmarshal(inputValue, &slc); err != nil { + return err + } + len := len(slc) + target.Set(reflect.MakeSlice(targetType, len, len)) + for i := 0; i < len; i++ { + if err := u.unmarshalValue(target.Index(i), slc[i], prop); err != nil { + return err + } + } + return nil + } + + // Handle maps (whose keys are always strings) + if targetType.Kind() == reflect.Map { + var mp map[string]json.RawMessage + if err := json.Unmarshal(inputValue, &mp); err != nil { + return err + } + target.Set(reflect.MakeMap(targetType)) + var keyprop, valprop *proto.Properties + if prop != nil { + // These could still be nil if the protobuf metadata is broken somehow. + // TODO: This won't work because the fields are unexported. + // We should probably just reparse them. + //keyprop, valprop = prop.mkeyprop, prop.mvalprop + } + for ks, raw := range mp { + // Unmarshal map key. The core json library already decoded the key into a + // string, so we handle that specially. Other types were quoted post-serialization. + var k reflect.Value + if targetType.Key().Kind() == reflect.String { + k = reflect.ValueOf(ks) + } else { + k = reflect.New(targetType.Key()).Elem() + if err := u.unmarshalValue(k, json.RawMessage(ks), keyprop); err != nil { + return err + } + } + + if !k.Type().AssignableTo(targetType.Key()) { + k = k.Convert(targetType.Key()) + } + + // Unmarshal map value. + v := reflect.New(targetType.Elem()).Elem() + if err := u.unmarshalValue(v, raw, valprop); err != nil { + return err + } + target.SetMapIndex(k, v) + } + return nil + } + + // 64-bit integers can be encoded as strings. In this case we drop + // the quotes and proceed as normal. + isNum := targetType.Kind() == reflect.Int64 || targetType.Kind() == reflect.Uint64 + if isNum && strings.HasPrefix(string(inputValue), `"`) { + inputValue = inputValue[1 : len(inputValue)-1] + } + + // Use the encoding/json for parsing other value types. + return json.Unmarshal(inputValue, target.Addr().Interface()) +} + +// jsonProperties returns parsed proto.Properties for the field and corrects JSONName attribute. +func jsonProperties(f reflect.StructField, origName bool) *proto.Properties { + var prop proto.Properties + prop.Init(f.Type, f.Name, f.Tag.Get("protobuf"), &f) + if origName || prop.JSONName == "" { + prop.JSONName = prop.OrigName + } + return &prop +} + +type fieldNames struct { + orig, camel string +} + +func acceptedJSONFieldNames(prop *proto.Properties) fieldNames { + opts := fieldNames{orig: prop.OrigName, camel: prop.OrigName} + if prop.JSONName != "" { + opts.camel = prop.JSONName + } + return opts +} + +// Writer wrapper inspired by https://blog.golang.org/errors-are-values +type errWriter struct { + writer io.Writer + err error +} + +func (w *errWriter) write(str string) { + if w.err != nil { + return + } + _, w.err = w.writer.Write([]byte(str)) +} + +// Map fields may have key types of non-float scalars, strings and enums. +// The easiest way to sort them in some deterministic order is to use fmt. +// If this turns out to be inefficient we can always consider other options, +// such as doing a Schwartzian transform. +// +// Numeric keys are sorted in numeric order per +// https://developers.google.com/protocol-buffers/docs/proto#maps. +type mapKeys []reflect.Value + +func (s mapKeys) Len() int { return len(s) } +func (s mapKeys) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s mapKeys) Less(i, j int) bool { + if k := s[i].Kind(); k == s[j].Kind() { + switch k { + case reflect.Int32, reflect.Int64: + return s[i].Int() < s[j].Int() + case reflect.Uint32, reflect.Uint64: + return s[i].Uint() < s[j].Uint() + } + } + return fmt.Sprint(s[i].Interface()) < fmt.Sprint(s[j].Interface()) +} diff --git a/vendor/github.com/gogo/protobuf/jsonpb/jsonpb_test_proto/more_test_objects.pb.go b/vendor/github.com/gogo/protobuf/jsonpb/jsonpb_test_proto/more_test_objects.pb.go new file mode 100644 index 000000000..15902e699 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/jsonpb/jsonpb_test_proto/more_test_objects.pb.go @@ -0,0 +1,199 @@ +// Code generated by protoc-gen-gogo. +// source: more_test_objects.proto +// DO NOT EDIT! + +/* +Package jsonpb is a generated protocol buffer package. + +It is generated from these files: + more_test_objects.proto + test_objects.proto + +It has these top-level messages: + Simple3 + Mappy + Simple + Repeats + Widget + Maps + MsgWithOneof + Real + Complex +*/ +package jsonpb + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +type Numeral int32 + +const ( + Numeral_UNKNOWN Numeral = 0 + Numeral_ARABIC Numeral = 1 + Numeral_ROMAN Numeral = 2 +) + +var Numeral_name = map[int32]string{ + 0: "UNKNOWN", + 1: "ARABIC", + 2: "ROMAN", +} +var Numeral_value = map[string]int32{ + "UNKNOWN": 0, + "ARABIC": 1, + "ROMAN": 2, +} + +func (x Numeral) String() string { + return proto.EnumName(Numeral_name, int32(x)) +} +func (Numeral) EnumDescriptor() ([]byte, []int) { return fileDescriptorMoreTestObjects, []int{0} } + +type Simple3 struct { + Dub float64 `protobuf:"fixed64,1,opt,name=dub,proto3" json:"dub,omitempty"` +} + +func (m *Simple3) Reset() { *m = Simple3{} } +func (m *Simple3) String() string { return proto.CompactTextString(m) } +func (*Simple3) ProtoMessage() {} +func (*Simple3) Descriptor() ([]byte, []int) { return fileDescriptorMoreTestObjects, []int{0} } + +type Mappy struct { + Nummy map[int64]int32 `protobuf:"bytes,1,rep,name=nummy" json:"nummy,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` + Strry map[string]string `protobuf:"bytes,2,rep,name=strry" json:"strry,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Objjy map[int32]*Simple3 `protobuf:"bytes,3,rep,name=objjy" json:"objjy,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value"` + Buggy map[int64]string `protobuf:"bytes,4,rep,name=buggy" json:"buggy,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Booly map[bool]bool `protobuf:"bytes,5,rep,name=booly" json:"booly,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` + Enumy map[string]Numeral `protobuf:"bytes,6,rep,name=enumy" json:"enumy,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3,enum=jsonpb.Numeral"` + S32Booly map[int32]bool `protobuf:"bytes,7,rep,name=s32booly" json:"s32booly,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` + S64Booly map[int64]bool `protobuf:"bytes,8,rep,name=s64booly" json:"s64booly,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` + U32Booly map[uint32]bool `protobuf:"bytes,9,rep,name=u32booly" json:"u32booly,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` + U64Booly map[uint64]bool `protobuf:"bytes,10,rep,name=u64booly" json:"u64booly,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` +} + +func (m *Mappy) Reset() { *m = Mappy{} } +func (m *Mappy) String() string { return proto.CompactTextString(m) } +func (*Mappy) ProtoMessage() {} +func (*Mappy) Descriptor() ([]byte, []int) { return fileDescriptorMoreTestObjects, []int{1} } + +func (m *Mappy) GetNummy() map[int64]int32 { + if m != nil { + return m.Nummy + } + return nil +} + +func (m *Mappy) GetStrry() map[string]string { + if m != nil { + return m.Strry + } + return nil +} + +func (m *Mappy) GetObjjy() map[int32]*Simple3 { + if m != nil { + return m.Objjy + } + return nil +} + +func (m *Mappy) GetBuggy() map[int64]string { + if m != nil { + return m.Buggy + } + return nil +} + +func (m *Mappy) GetBooly() map[bool]bool { + if m != nil { + return m.Booly + } + return nil +} + +func (m *Mappy) GetEnumy() map[string]Numeral { + if m != nil { + return m.Enumy + } + return nil +} + +func (m *Mappy) GetS32Booly() map[int32]bool { + if m != nil { + return m.S32Booly + } + return nil +} + +func (m *Mappy) GetS64Booly() map[int64]bool { + if m != nil { + return m.S64Booly + } + return nil +} + +func (m *Mappy) GetU32Booly() map[uint32]bool { + if m != nil { + return m.U32Booly + } + return nil +} + +func (m *Mappy) GetU64Booly() map[uint64]bool { + if m != nil { + return m.U64Booly + } + return nil +} + +func init() { + proto.RegisterType((*Simple3)(nil), "jsonpb.Simple3") + proto.RegisterType((*Mappy)(nil), "jsonpb.Mappy") + proto.RegisterEnum("jsonpb.Numeral", Numeral_name, Numeral_value) +} + +func init() { proto.RegisterFile("more_test_objects.proto", fileDescriptorMoreTestObjects) } + +var fileDescriptorMoreTestObjects = []byte{ + // 444 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x8c, 0x94, 0xc1, 0x6b, 0xdb, 0x30, + 0x14, 0x87, 0xe7, 0xa4, 0x4e, 0xec, 0x17, 0xba, 0x19, 0x31, 0x98, 0x58, 0x2f, 0xa1, 0x30, 0x08, + 0x83, 0xf9, 0x90, 0x8c, 0xad, 0x6c, 0xa7, 0x74, 0xf4, 0x50, 0x46, 0x1d, 0x70, 0x09, 0x3b, 0x96, + 0x78, 0x13, 0x65, 0x9e, 0x6d, 0x19, 0xdb, 0x1a, 0xe8, 0x8f, 0x1f, 0x8c, 0x27, 0xcb, 0xb5, 0x6c, + 0x14, 0xd2, 0x9b, 0xcc, 0xef, 0xfb, 0xf2, 0x9e, 0xf4, 0x1e, 0x81, 0x37, 0x39, 0xaf, 0xd8, 0x43, + 0xc3, 0xea, 0xe6, 0x81, 0x27, 0x29, 0xfb, 0xd9, 0xd4, 0x61, 0x59, 0xf1, 0x86, 0x93, 0x59, 0x5a, + 0xf3, 0xa2, 0x4c, 0x2e, 0x2f, 0x60, 0x7e, 0xff, 0x3b, 0x2f, 0x33, 0xb6, 0x21, 0x01, 0x4c, 0x7f, + 0x89, 0x84, 0x3a, 0x4b, 0x67, 0xe5, 0xc4, 0x78, 0xbc, 0xfc, 0xe7, 0x81, 0x7b, 0x77, 0x28, 0x4b, + 0x49, 0x42, 0x70, 0x0b, 0x91, 0xe7, 0x92, 0x3a, 0xcb, 0xe9, 0x6a, 0xb1, 0xa6, 0x61, 0xab, 0x87, + 0x2a, 0x0d, 0x23, 0x8c, 0x6e, 0x8a, 0xa6, 0x92, 0x71, 0x8b, 0x21, 0x5f, 0x37, 0x55, 0x25, 0xe9, + 0xc4, 0xc6, 0xdf, 0x63, 0xa4, 0x79, 0x85, 0x21, 0xcf, 0x93, 0x34, 0x95, 0x74, 0x6a, 0xe3, 0x77, + 0x18, 0x69, 0x5e, 0x61, 0xc8, 0x27, 0xe2, 0xf1, 0x51, 0xd2, 0x33, 0x1b, 0x7f, 0x8d, 0x91, 0xe6, + 0x15, 0xa6, 0x78, 0xce, 0x33, 0x49, 0x5d, 0x2b, 0x8f, 0x51, 0xc7, 0xe3, 0x19, 0x79, 0x56, 0x88, + 0x5c, 0xd2, 0x99, 0x8d, 0xbf, 0xc1, 0x48, 0xf3, 0x0a, 0x23, 0x9f, 0xc1, 0xab, 0x37, 0xeb, 0xb6, + 0xc4, 0x5c, 0x29, 0x17, 0xa3, 0x2b, 0xeb, 0xb4, 0xb5, 0x9e, 0x60, 0x25, 0x7e, 0xfa, 0xd8, 0x8a, + 0x9e, 0x55, 0xd4, 0x69, 0x27, 0xea, 0x4f, 0x14, 0x45, 0x57, 0xd1, 0xb7, 0x89, 0xfb, 0x61, 0x45, + 0x61, 0x54, 0x14, 0x5d, 0x45, 0xb0, 0x8a, 0xc3, 0x8a, 0x1d, 0xfc, 0xf6, 0x0a, 0xa0, 0x1f, 0x34, + 0x6e, 0xcb, 0x1f, 0x26, 0xd5, 0xb6, 0x4c, 0x63, 0x3c, 0x92, 0xd7, 0xe0, 0xfe, 0x3d, 0x64, 0x82, + 0xd1, 0xc9, 0xd2, 0x59, 0xb9, 0x71, 0xfb, 0xf1, 0x65, 0x72, 0xe5, 0xa0, 0xd9, 0x8f, 0xdc, 0x34, + 0x7d, 0x8b, 0xe9, 0x9b, 0xe6, 0x2d, 0x40, 0x3f, 0x7c, 0xd3, 0x74, 0x5b, 0xf3, 0x9d, 0x69, 0x2e, + 0xd6, 0xaf, 0xba, 0x9b, 0xe8, 0x9d, 0x1e, 0x35, 0xd1, 0xef, 0xc5, 0xa9, 0xf6, 0xfd, 0xb1, 0xf9, + 0xf4, 0x20, 0xa6, 0xe9, 0x59, 0x4c, 0x6f, 0xd4, 0x7e, 0xbf, 0x2b, 0x96, 0x8b, 0x0f, 0xda, 0x7f, + 0xd9, 0xb7, 0x1f, 0x89, 0x9c, 0x55, 0x87, 0xcc, 0xfc, 0xa9, 0xaf, 0x70, 0x3e, 0xd8, 0x21, 0xcb, + 0x63, 0x1c, 0xef, 0x03, 0x65, 0x73, 0xaa, 0xa7, 0xae, 0x3f, 0x96, 0xf7, 0xc7, 0x2a, 0x9f, 0x3f, + 0x47, 0x3e, 0x56, 0xf9, 0xec, 0x84, 0xfc, 0xfe, 0x03, 0xcc, 0xf5, 0x4b, 0x90, 0x05, 0xcc, 0xf7, + 0xd1, 0xf7, 0x68, 0xf7, 0x23, 0x0a, 0x5e, 0x10, 0x80, 0xd9, 0x36, 0xde, 0x5e, 0xdf, 0x7e, 0x0b, + 0x1c, 0xe2, 0x83, 0x1b, 0xef, 0xee, 0xb6, 0x51, 0x30, 0x49, 0x66, 0xea, 0xaf, 0x6d, 0xf3, 0x3f, + 0x00, 0x00, 0xff, 0xff, 0xa2, 0x4b, 0xe1, 0x77, 0xf5, 0x04, 0x00, 0x00, +} diff --git a/vendor/github.com/gogo/protobuf/jsonpb/jsonpb_test_proto/test_objects.pb.go b/vendor/github.com/gogo/protobuf/jsonpb/jsonpb_test_proto/test_objects.pb.go new file mode 100644 index 000000000..edea373ed --- /dev/null +++ b/vendor/github.com/gogo/protobuf/jsonpb/jsonpb_test_proto/test_objects.pb.go @@ -0,0 +1,617 @@ +// Code generated by protoc-gen-gogo. +// source: test_objects.proto +// DO NOT EDIT! + +package jsonpb + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +// skipping weak import gogoproto "gogoproto" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type Widget_Color int32 + +const ( + Widget_RED Widget_Color = 0 + Widget_GREEN Widget_Color = 1 + Widget_BLUE Widget_Color = 2 +) + +var Widget_Color_name = map[int32]string{ + 0: "RED", + 1: "GREEN", + 2: "BLUE", +} +var Widget_Color_value = map[string]int32{ + "RED": 0, + "GREEN": 1, + "BLUE": 2, +} + +func (x Widget_Color) Enum() *Widget_Color { + p := new(Widget_Color) + *p = x + return p +} +func (x Widget_Color) String() string { + return proto.EnumName(Widget_Color_name, int32(x)) +} +func (x *Widget_Color) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Widget_Color_value, data, "Widget_Color") + if err != nil { + return err + } + *x = Widget_Color(value) + return nil +} +func (Widget_Color) EnumDescriptor() ([]byte, []int) { return fileDescriptorTestObjects, []int{2, 0} } + +// Test message for holding primitive types. +type Simple struct { + OBool *bool `protobuf:"varint,1,opt,name=o_bool,json=oBool" json:"o_bool,omitempty"` + OInt32 *int32 `protobuf:"varint,2,opt,name=o_int32,json=oInt32" json:"o_int32,omitempty"` + OInt64 *int64 `protobuf:"varint,3,opt,name=o_int64,json=oInt64" json:"o_int64,omitempty"` + OUint32 *uint32 `protobuf:"varint,4,opt,name=o_uint32,json=oUint32" json:"o_uint32,omitempty"` + OUint64 *uint64 `protobuf:"varint,5,opt,name=o_uint64,json=oUint64" json:"o_uint64,omitempty"` + OSint32 *int32 `protobuf:"zigzag32,6,opt,name=o_sint32,json=oSint32" json:"o_sint32,omitempty"` + OSint64 *int64 `protobuf:"zigzag64,7,opt,name=o_sint64,json=oSint64" json:"o_sint64,omitempty"` + OFloat *float32 `protobuf:"fixed32,8,opt,name=o_float,json=oFloat" json:"o_float,omitempty"` + ODouble *float64 `protobuf:"fixed64,9,opt,name=o_double,json=oDouble" json:"o_double,omitempty"` + OString *string `protobuf:"bytes,10,opt,name=o_string,json=oString" json:"o_string,omitempty"` + OBytes []byte `protobuf:"bytes,11,opt,name=o_bytes,json=oBytes" json:"o_bytes,omitempty"` + OCastBytes Bytes `protobuf:"bytes,12,opt,name=o_cast_bytes,json=oCastBytes,casttype=Bytes" json:"o_cast_bytes,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Simple) Reset() { *m = Simple{} } +func (m *Simple) String() string { return proto.CompactTextString(m) } +func (*Simple) ProtoMessage() {} +func (*Simple) Descriptor() ([]byte, []int) { return fileDescriptorTestObjects, []int{0} } + +func (m *Simple) GetOBool() bool { + if m != nil && m.OBool != nil { + return *m.OBool + } + return false +} + +func (m *Simple) GetOInt32() int32 { + if m != nil && m.OInt32 != nil { + return *m.OInt32 + } + return 0 +} + +func (m *Simple) GetOInt64() int64 { + if m != nil && m.OInt64 != nil { + return *m.OInt64 + } + return 0 +} + +func (m *Simple) GetOUint32() uint32 { + if m != nil && m.OUint32 != nil { + return *m.OUint32 + } + return 0 +} + +func (m *Simple) GetOUint64() uint64 { + if m != nil && m.OUint64 != nil { + return *m.OUint64 + } + return 0 +} + +func (m *Simple) GetOSint32() int32 { + if m != nil && m.OSint32 != nil { + return *m.OSint32 + } + return 0 +} + +func (m *Simple) GetOSint64() int64 { + if m != nil && m.OSint64 != nil { + return *m.OSint64 + } + return 0 +} + +func (m *Simple) GetOFloat() float32 { + if m != nil && m.OFloat != nil { + return *m.OFloat + } + return 0 +} + +func (m *Simple) GetODouble() float64 { + if m != nil && m.ODouble != nil { + return *m.ODouble + } + return 0 +} + +func (m *Simple) GetOString() string { + if m != nil && m.OString != nil { + return *m.OString + } + return "" +} + +func (m *Simple) GetOBytes() []byte { + if m != nil { + return m.OBytes + } + return nil +} + +func (m *Simple) GetOCastBytes() Bytes { + if m != nil { + return m.OCastBytes + } + return nil +} + +// Test message for holding repeated primitives. +type Repeats struct { + RBool []bool `protobuf:"varint,1,rep,name=r_bool,json=rBool" json:"r_bool,omitempty"` + RInt32 []int32 `protobuf:"varint,2,rep,name=r_int32,json=rInt32" json:"r_int32,omitempty"` + RInt64 []int64 `protobuf:"varint,3,rep,name=r_int64,json=rInt64" json:"r_int64,omitempty"` + RUint32 []uint32 `protobuf:"varint,4,rep,name=r_uint32,json=rUint32" json:"r_uint32,omitempty"` + RUint64 []uint64 `protobuf:"varint,5,rep,name=r_uint64,json=rUint64" json:"r_uint64,omitempty"` + RSint32 []int32 `protobuf:"zigzag32,6,rep,name=r_sint32,json=rSint32" json:"r_sint32,omitempty"` + RSint64 []int64 `protobuf:"zigzag64,7,rep,name=r_sint64,json=rSint64" json:"r_sint64,omitempty"` + RFloat []float32 `protobuf:"fixed32,8,rep,name=r_float,json=rFloat" json:"r_float,omitempty"` + RDouble []float64 `protobuf:"fixed64,9,rep,name=r_double,json=rDouble" json:"r_double,omitempty"` + RString []string `protobuf:"bytes,10,rep,name=r_string,json=rString" json:"r_string,omitempty"` + RBytes [][]byte `protobuf:"bytes,11,rep,name=r_bytes,json=rBytes" json:"r_bytes,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Repeats) Reset() { *m = Repeats{} } +func (m *Repeats) String() string { return proto.CompactTextString(m) } +func (*Repeats) ProtoMessage() {} +func (*Repeats) Descriptor() ([]byte, []int) { return fileDescriptorTestObjects, []int{1} } + +func (m *Repeats) GetRBool() []bool { + if m != nil { + return m.RBool + } + return nil +} + +func (m *Repeats) GetRInt32() []int32 { + if m != nil { + return m.RInt32 + } + return nil +} + +func (m *Repeats) GetRInt64() []int64 { + if m != nil { + return m.RInt64 + } + return nil +} + +func (m *Repeats) GetRUint32() []uint32 { + if m != nil { + return m.RUint32 + } + return nil +} + +func (m *Repeats) GetRUint64() []uint64 { + if m != nil { + return m.RUint64 + } + return nil +} + +func (m *Repeats) GetRSint32() []int32 { + if m != nil { + return m.RSint32 + } + return nil +} + +func (m *Repeats) GetRSint64() []int64 { + if m != nil { + return m.RSint64 + } + return nil +} + +func (m *Repeats) GetRFloat() []float32 { + if m != nil { + return m.RFloat + } + return nil +} + +func (m *Repeats) GetRDouble() []float64 { + if m != nil { + return m.RDouble + } + return nil +} + +func (m *Repeats) GetRString() []string { + if m != nil { + return m.RString + } + return nil +} + +func (m *Repeats) GetRBytes() [][]byte { + if m != nil { + return m.RBytes + } + return nil +} + +// Test message for holding enums and nested messages. +type Widget struct { + Color *Widget_Color `protobuf:"varint,1,opt,name=color,enum=jsonpb.Widget_Color" json:"color,omitempty"` + RColor []Widget_Color `protobuf:"varint,2,rep,name=r_color,json=rColor,enum=jsonpb.Widget_Color" json:"r_color,omitempty"` + Simple *Simple `protobuf:"bytes,10,opt,name=simple" json:"simple,omitempty"` + RSimple []*Simple `protobuf:"bytes,11,rep,name=r_simple,json=rSimple" json:"r_simple,omitempty"` + Repeats *Repeats `protobuf:"bytes,20,opt,name=repeats" json:"repeats,omitempty"` + RRepeats []*Repeats `protobuf:"bytes,21,rep,name=r_repeats,json=rRepeats" json:"r_repeats,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Widget) Reset() { *m = Widget{} } +func (m *Widget) String() string { return proto.CompactTextString(m) } +func (*Widget) ProtoMessage() {} +func (*Widget) Descriptor() ([]byte, []int) { return fileDescriptorTestObjects, []int{2} } + +func (m *Widget) GetColor() Widget_Color { + if m != nil && m.Color != nil { + return *m.Color + } + return Widget_RED +} + +func (m *Widget) GetRColor() []Widget_Color { + if m != nil { + return m.RColor + } + return nil +} + +func (m *Widget) GetSimple() *Simple { + if m != nil { + return m.Simple + } + return nil +} + +func (m *Widget) GetRSimple() []*Simple { + if m != nil { + return m.RSimple + } + return nil +} + +func (m *Widget) GetRepeats() *Repeats { + if m != nil { + return m.Repeats + } + return nil +} + +func (m *Widget) GetRRepeats() []*Repeats { + if m != nil { + return m.RRepeats + } + return nil +} + +type Maps struct { + MInt64Str map[int64]string `protobuf:"bytes,1,rep,name=m_int64_str,json=mInt64Str" json:"m_int64_str,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + MBoolSimple map[bool]*Simple `protobuf:"bytes,2,rep,name=m_bool_simple,json=mBoolSimple" json:"m_bool_simple,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Maps) Reset() { *m = Maps{} } +func (m *Maps) String() string { return proto.CompactTextString(m) } +func (*Maps) ProtoMessage() {} +func (*Maps) Descriptor() ([]byte, []int) { return fileDescriptorTestObjects, []int{3} } + +func (m *Maps) GetMInt64Str() map[int64]string { + if m != nil { + return m.MInt64Str + } + return nil +} + +func (m *Maps) GetMBoolSimple() map[bool]*Simple { + if m != nil { + return m.MBoolSimple + } + return nil +} + +type MsgWithOneof struct { + // Types that are valid to be assigned to Union: + // *MsgWithOneof_Title + // *MsgWithOneof_Salary + // *MsgWithOneof_Country + Union isMsgWithOneof_Union `protobuf_oneof:"union"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MsgWithOneof) Reset() { *m = MsgWithOneof{} } +func (m *MsgWithOneof) String() string { return proto.CompactTextString(m) } +func (*MsgWithOneof) ProtoMessage() {} +func (*MsgWithOneof) Descriptor() ([]byte, []int) { return fileDescriptorTestObjects, []int{4} } + +type isMsgWithOneof_Union interface { + isMsgWithOneof_Union() +} + +type MsgWithOneof_Title struct { + Title string `protobuf:"bytes,1,opt,name=title,oneof"` +} +type MsgWithOneof_Salary struct { + Salary int64 `protobuf:"varint,2,opt,name=salary,oneof"` +} +type MsgWithOneof_Country struct { + Country string `protobuf:"bytes,3,opt,name=Country,json=country,oneof"` +} + +func (*MsgWithOneof_Title) isMsgWithOneof_Union() {} +func (*MsgWithOneof_Salary) isMsgWithOneof_Union() {} +func (*MsgWithOneof_Country) isMsgWithOneof_Union() {} + +func (m *MsgWithOneof) GetUnion() isMsgWithOneof_Union { + if m != nil { + return m.Union + } + return nil +} + +func (m *MsgWithOneof) GetTitle() string { + if x, ok := m.GetUnion().(*MsgWithOneof_Title); ok { + return x.Title + } + return "" +} + +func (m *MsgWithOneof) GetSalary() int64 { + if x, ok := m.GetUnion().(*MsgWithOneof_Salary); ok { + return x.Salary + } + return 0 +} + +func (m *MsgWithOneof) GetCountry() string { + if x, ok := m.GetUnion().(*MsgWithOneof_Country); ok { + return x.Country + } + return "" +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*MsgWithOneof) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _MsgWithOneof_OneofMarshaler, _MsgWithOneof_OneofUnmarshaler, _MsgWithOneof_OneofSizer, []interface{}{ + (*MsgWithOneof_Title)(nil), + (*MsgWithOneof_Salary)(nil), + (*MsgWithOneof_Country)(nil), + } +} + +func _MsgWithOneof_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*MsgWithOneof) + // union + switch x := m.Union.(type) { + case *MsgWithOneof_Title: + _ = b.EncodeVarint(1<<3 | proto.WireBytes) + _ = b.EncodeStringBytes(x.Title) + case *MsgWithOneof_Salary: + _ = b.EncodeVarint(2<<3 | proto.WireVarint) + _ = b.EncodeVarint(uint64(x.Salary)) + case *MsgWithOneof_Country: + _ = b.EncodeVarint(3<<3 | proto.WireBytes) + _ = b.EncodeStringBytes(x.Country) + case nil: + default: + return fmt.Errorf("MsgWithOneof.Union has unexpected type %T", x) + } + return nil +} + +func _MsgWithOneof_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*MsgWithOneof) + switch tag { + case 1: // union.title + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Union = &MsgWithOneof_Title{x} + return true, err + case 2: // union.salary + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Union = &MsgWithOneof_Salary{int64(x)} + return true, err + case 3: // union.Country + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Union = &MsgWithOneof_Country{x} + return true, err + default: + return false, nil + } +} + +func _MsgWithOneof_OneofSizer(msg proto.Message) (n int) { + m := msg.(*MsgWithOneof) + // union + switch x := m.Union.(type) { + case *MsgWithOneof_Title: + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.Title))) + n += len(x.Title) + case *MsgWithOneof_Salary: + n += proto.SizeVarint(2<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.Salary)) + case *MsgWithOneof_Country: + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.Country))) + n += len(x.Country) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type Real struct { + Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Real) Reset() { *m = Real{} } +func (m *Real) String() string { return proto.CompactTextString(m) } +func (*Real) ProtoMessage() {} +func (*Real) Descriptor() ([]byte, []int) { return fileDescriptorTestObjects, []int{5} } + +var extRange_Real = []proto.ExtensionRange{ + {Start: 100, End: 536870911}, +} + +func (*Real) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_Real +} + +func (m *Real) GetValue() float64 { + if m != nil && m.Value != nil { + return *m.Value + } + return 0 +} + +type Complex struct { + Imaginary *float64 `protobuf:"fixed64,1,opt,name=imaginary" json:"imaginary,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Complex) Reset() { *m = Complex{} } +func (m *Complex) String() string { return proto.CompactTextString(m) } +func (*Complex) ProtoMessage() {} +func (*Complex) Descriptor() ([]byte, []int) { return fileDescriptorTestObjects, []int{6} } + +var extRange_Complex = []proto.ExtensionRange{ + {Start: 100, End: 536870911}, +} + +func (*Complex) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_Complex +} + +func (m *Complex) GetImaginary() float64 { + if m != nil && m.Imaginary != nil { + return *m.Imaginary + } + return 0 +} + +var E_Complex_RealExtension = &proto.ExtensionDesc{ + ExtendedType: (*Real)(nil), + ExtensionType: (*Complex)(nil), + Field: 123, + Name: "jsonpb.Complex.real_extension", + Tag: "bytes,123,opt,name=real_extension,json=realExtension", +} + +var E_Name = &proto.ExtensionDesc{ + ExtendedType: (*Real)(nil), + ExtensionType: (*string)(nil), + Field: 124, + Name: "jsonpb.name", + Tag: "bytes,124,opt,name=name", +} + +func init() { + proto.RegisterType((*Simple)(nil), "jsonpb.Simple") + proto.RegisterType((*Repeats)(nil), "jsonpb.Repeats") + proto.RegisterType((*Widget)(nil), "jsonpb.Widget") + proto.RegisterType((*Maps)(nil), "jsonpb.Maps") + proto.RegisterType((*MsgWithOneof)(nil), "jsonpb.MsgWithOneof") + proto.RegisterType((*Real)(nil), "jsonpb.Real") + proto.RegisterType((*Complex)(nil), "jsonpb.Complex") + proto.RegisterEnum("jsonpb.Widget_Color", Widget_Color_name, Widget_Color_value) + proto.RegisterExtension(E_Complex_RealExtension) + proto.RegisterExtension(E_Name) +} + +func init() { proto.RegisterFile("test_objects.proto", fileDescriptorTestObjects) } + +var fileDescriptorTestObjects = []byte{ + // 797 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x74, 0x94, 0xd1, 0x6e, 0xe3, 0x44, + 0x14, 0x86, 0x6b, 0x4f, 0x6c, 0xc7, 0x27, 0x69, 0x09, 0xa3, 0x2e, 0x98, 0x02, 0xc2, 0x8a, 0x60, + 0x65, 0x16, 0x08, 0x52, 0xa8, 0x2a, 0xb4, 0x70, 0x43, 0xda, 0xc0, 0x22, 0xd1, 0x45, 0x9a, 0x6a, + 0xb5, 0xdc, 0x45, 0x4e, 0x3b, 0x1b, 0xbc, 0x38, 0x9e, 0xe8, 0x78, 0x02, 0x5b, 0xc1, 0x45, 0x1f, + 0x82, 0x67, 0xe0, 0x11, 0x78, 0x10, 0x1e, 0x80, 0x07, 0xe1, 0x0a, 0x9d, 0x19, 0xdb, 0x93, 0xb6, + 0xda, 0xab, 0xcc, 0x99, 0xf3, 0xff, 0x7f, 0xc6, 0xdf, 0x1c, 0x0d, 0x70, 0x2d, 0x6b, 0xbd, 0x50, + 0xcb, 0x97, 0xf2, 0x52, 0xd7, 0x93, 0x0d, 0x2a, 0xad, 0x78, 0xf8, 0xb2, 0x56, 0xd5, 0x66, 0x79, + 0x74, 0xb8, 0x52, 0x2b, 0x65, 0xb6, 0x3e, 0xa7, 0x95, 0xed, 0x8e, 0xff, 0xf1, 0x21, 0xbc, 0x28, + 0xd6, 0x9b, 0x52, 0xf2, 0x07, 0x10, 0xaa, 0xc5, 0x52, 0xa9, 0x32, 0xf1, 0x52, 0x2f, 0xeb, 0x8b, + 0x40, 0xcd, 0x94, 0x2a, 0xf9, 0xdb, 0x10, 0xa9, 0x45, 0x51, 0xe9, 0x2f, 0xa6, 0x89, 0x9f, 0x7a, + 0x59, 0x20, 0x42, 0xf5, 0x3d, 0x55, 0x5d, 0xe3, 0xe4, 0x38, 0x61, 0xa9, 0x97, 0x31, 0xdb, 0x38, + 0x39, 0xe6, 0xef, 0x40, 0x5f, 0x2d, 0xb6, 0xd6, 0xd2, 0x4b, 0xbd, 0x6c, 0x5f, 0x44, 0xea, 0x99, + 0x29, 0x5d, 0xeb, 0xe4, 0x38, 0x09, 0x52, 0x2f, 0xeb, 0x35, 0xad, 0xd6, 0x55, 0x5b, 0x57, 0x98, + 0x7a, 0xd9, 0x9b, 0x22, 0x52, 0x17, 0x3b, 0xae, 0xda, 0xba, 0xa2, 0xd4, 0xcb, 0x78, 0xd3, 0x3a, + 0x39, 0xb6, 0x87, 0x78, 0x51, 0xaa, 0x5c, 0x27, 0xfd, 0xd4, 0xcb, 0x7c, 0x11, 0xaa, 0x6f, 0xa9, + 0xb2, 0x9e, 0x2b, 0xb5, 0x5d, 0x96, 0x32, 0x89, 0x53, 0x2f, 0xf3, 0x44, 0xa4, 0xce, 0x4c, 0xd9, + 0xc4, 0x69, 0x2c, 0xaa, 0x55, 0x02, 0xa9, 0x97, 0xc5, 0x14, 0x67, 0x4a, 0x1b, 0xb7, 0xbc, 0xd6, + 0xb2, 0x4e, 0x06, 0xa9, 0x97, 0x0d, 0x45, 0xa8, 0x66, 0x54, 0xf1, 0x4f, 0x60, 0xa8, 0x16, 0x97, + 0x79, 0xad, 0x9b, 0xee, 0x90, 0xba, 0xb3, 0xf8, 0xbf, 0x7f, 0x3f, 0x08, 0x8c, 0x40, 0x80, 0x3a, + 0xcd, 0x6b, 0x6d, 0xd6, 0xe3, 0xbf, 0x7c, 0x88, 0x84, 0xdc, 0xc8, 0x5c, 0xd7, 0x44, 0x15, 0x5b, + 0xaa, 0x8c, 0xa8, 0x62, 0x4b, 0x15, 0x3b, 0xaa, 0x8c, 0xa8, 0x62, 0x47, 0x15, 0x3b, 0xaa, 0x8c, + 0xa8, 0x62, 0x47, 0x15, 0x1d, 0x55, 0x46, 0x54, 0xd1, 0x51, 0x45, 0x47, 0x95, 0x11, 0x55, 0x74, + 0x54, 0xd1, 0x51, 0x65, 0x44, 0x15, 0x2f, 0x76, 0x5c, 0x1d, 0x55, 0x46, 0x54, 0xd1, 0x51, 0xc5, + 0x8e, 0x2a, 0x23, 0xaa, 0xd8, 0x51, 0x45, 0x47, 0x95, 0x11, 0x55, 0x74, 0x54, 0xd1, 0x51, 0x65, + 0x44, 0x15, 0x1d, 0x55, 0xec, 0xa8, 0x32, 0xa2, 0x8a, 0x16, 0xd4, 0xdf, 0x3e, 0x84, 0xcf, 0x8b, + 0xab, 0x95, 0xd4, 0xfc, 0x11, 0x04, 0x97, 0xaa, 0x54, 0x68, 0x86, 0xef, 0x60, 0x7a, 0x38, 0xb1, + 0x63, 0x3b, 0xb1, 0xed, 0xc9, 0x29, 0xf5, 0x84, 0x95, 0xf0, 0xcf, 0x28, 0xcf, 0xaa, 0x09, 0xde, + 0xeb, 0xd4, 0x21, 0x9a, 0x5f, 0xfe, 0x10, 0xc2, 0xda, 0x8c, 0xb8, 0xb9, 0xed, 0xc1, 0xf4, 0xa0, + 0x55, 0xdb, 0xc1, 0x17, 0x4d, 0x97, 0x7f, 0x6c, 0x81, 0x18, 0x25, 0x9d, 0xf3, 0xbe, 0x92, 0x00, + 0x35, 0xd2, 0x08, 0xed, 0x05, 0x27, 0x87, 0x26, 0xf3, 0x8d, 0x56, 0xd9, 0xdc, 0xbb, 0x68, 0xfb, + 0xfc, 0x53, 0x88, 0x71, 0xd1, 0x8a, 0x1f, 0x98, 0xd8, 0x7b, 0xe2, 0x3e, 0x36, 0xab, 0xf1, 0x47, + 0x10, 0xd8, 0x43, 0x47, 0xc0, 0xc4, 0xfc, 0x6c, 0xb4, 0xc7, 0x63, 0x08, 0xbe, 0x13, 0xf3, 0xf9, + 0xd3, 0x91, 0xc7, 0xfb, 0xd0, 0x9b, 0xfd, 0xf0, 0x6c, 0x3e, 0xf2, 0xc7, 0x7f, 0xfa, 0xd0, 0x3b, + 0xcf, 0x37, 0x35, 0xff, 0x0a, 0x06, 0x6b, 0x3b, 0x2e, 0xc4, 0xde, 0xcc, 0xd8, 0x60, 0xfa, 0x6e, + 0x9b, 0x4f, 0x92, 0xc9, 0xb9, 0x99, 0x9f, 0x0b, 0x8d, 0xf3, 0x4a, 0xe3, 0xb5, 0x88, 0xd7, 0x6d, + 0xcd, 0xbf, 0x81, 0xfd, 0xb5, 0x99, 0xcd, 0xf6, 0xab, 0x7d, 0x63, 0x7f, 0xff, 0xb6, 0x9d, 0xe6, + 0xd5, 0x7e, 0xb6, 0x0d, 0x18, 0xac, 0xdd, 0xce, 0xd1, 0xd7, 0x70, 0x70, 0x3b, 0x9f, 0x8f, 0x80, + 0xfd, 0x22, 0xaf, 0xcd, 0x35, 0x32, 0x41, 0x4b, 0x7e, 0x08, 0xc1, 0xaf, 0x79, 0xb9, 0x95, 0xe6, + 0xfd, 0x88, 0x85, 0x2d, 0x1e, 0xfb, 0x5f, 0x7a, 0x47, 0x4f, 0x61, 0x74, 0x37, 0x7e, 0xd7, 0xdf, + 0xb7, 0xfe, 0x0f, 0x77, 0xfd, 0xf7, 0x2f, 0xc5, 0xe5, 0x8d, 0x25, 0x0c, 0xcf, 0xeb, 0xd5, 0xf3, + 0x42, 0xff, 0xfc, 0x63, 0x25, 0xd5, 0x0b, 0xfe, 0x16, 0x04, 0xba, 0xd0, 0xa5, 0x34, 0x69, 0xf1, + 0x93, 0x3d, 0x61, 0x4b, 0x9e, 0x40, 0x58, 0xe7, 0x65, 0x8e, 0xd7, 0x26, 0x92, 0x3d, 0xd9, 0x13, + 0x4d, 0xcd, 0x8f, 0x20, 0x3a, 0x55, 0x5b, 0x3a, 0x88, 0x79, 0xd4, 0xc8, 0x13, 0x5d, 0xda, 0x8d, + 0x59, 0x04, 0xc1, 0xb6, 0x2a, 0x54, 0x35, 0x7e, 0x08, 0x3d, 0x21, 0xf3, 0xd2, 0x7d, 0x98, 0x67, + 0x1e, 0x18, 0x5b, 0x3c, 0xea, 0xf7, 0xaf, 0x46, 0x37, 0x37, 0x37, 0x37, 0xfe, 0xf8, 0x37, 0x0a, + 0xa3, 0x33, 0xbe, 0xe2, 0xef, 0x41, 0x5c, 0xac, 0xf3, 0x55, 0x51, 0xd1, 0x9f, 0x5a, 0xb9, 0xdb, + 0x70, 0x96, 0xe9, 0x19, 0x1c, 0xa0, 0xcc, 0xcb, 0x85, 0x7c, 0xa5, 0x65, 0x55, 0x17, 0xaa, 0xe2, + 0x43, 0x37, 0x2c, 0x79, 0x99, 0xfc, 0x7e, 0x7b, 0xda, 0x9a, 0x78, 0xb1, 0x4f, 0xa6, 0x79, 0xeb, + 0x79, 0x9c, 0x42, 0xaf, 0xca, 0xd7, 0xf2, 0x8e, 0xf7, 0x0f, 0x83, 0xdf, 0x74, 0x7e, 0xda, 0xfb, + 0x3f, 0x00, 0x00, 0xff, 0xff, 0x6b, 0x95, 0x80, 0x0d, 0x2c, 0x06, 0x00, 0x00, +} diff --git a/vendor/github.com/gogo/protobuf/proto/proto3_proto/proto3.pb.go b/vendor/github.com/gogo/protobuf/proto/proto3_proto/proto3.pb.go new file mode 100644 index 000000000..f1835be36 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/proto3_proto/proto3.pb.go @@ -0,0 +1,184 @@ +// Code generated by protoc-gen-gogo. +// source: proto3_proto/proto3.proto +// DO NOT EDIT! + +/* +Package proto3_proto is a generated protocol buffer package. + +It is generated from these files: + proto3_proto/proto3.proto + +It has these top-level messages: + Message + Nested + MessageWithMap +*/ +package proto3_proto + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import testdata "github.com/gogo/protobuf/proto/testdata" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +type Message_Humour int32 + +const ( + Message_UNKNOWN Message_Humour = 0 + Message_PUNS Message_Humour = 1 + Message_SLAPSTICK Message_Humour = 2 + Message_BILL_BAILEY Message_Humour = 3 +) + +var Message_Humour_name = map[int32]string{ + 0: "UNKNOWN", + 1: "PUNS", + 2: "SLAPSTICK", + 3: "BILL_BAILEY", +} +var Message_Humour_value = map[string]int32{ + "UNKNOWN": 0, + "PUNS": 1, + "SLAPSTICK": 2, + "BILL_BAILEY": 3, +} + +func (x Message_Humour) String() string { + return proto.EnumName(Message_Humour_name, int32(x)) +} +func (Message_Humour) EnumDescriptor() ([]byte, []int) { return fileDescriptorProto3, []int{0, 0} } + +type Message struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Hilarity Message_Humour `protobuf:"varint,2,opt,name=hilarity,proto3,enum=proto3_proto.Message_Humour" json:"hilarity,omitempty"` + HeightInCm uint32 `protobuf:"varint,3,opt,name=height_in_cm,json=heightInCm,proto3" json:"height_in_cm,omitempty"` + Data []byte `protobuf:"bytes,4,opt,name=data,proto3" json:"data,omitempty"` + ResultCount int64 `protobuf:"varint,7,opt,name=result_count,json=resultCount,proto3" json:"result_count,omitempty"` + TrueScotsman bool `protobuf:"varint,8,opt,name=true_scotsman,json=trueScotsman,proto3" json:"true_scotsman,omitempty"` + Score float32 `protobuf:"fixed32,9,opt,name=score,proto3" json:"score,omitempty"` + Key []uint64 `protobuf:"varint,5,rep,name=key" json:"key,omitempty"` + Nested *Nested `protobuf:"bytes,6,opt,name=nested" json:"nested,omitempty"` + RFunny []Message_Humour `protobuf:"varint,16,rep,name=r_funny,json=rFunny,enum=proto3_proto.Message_Humour" json:"r_funny,omitempty"` + Terrain map[string]*Nested `protobuf:"bytes,10,rep,name=terrain" json:"terrain,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value"` + Proto2Field *testdata.SubDefaults `protobuf:"bytes,11,opt,name=proto2_field,json=proto2Field" json:"proto2_field,omitempty"` + Proto2Value map[string]*testdata.SubDefaults `protobuf:"bytes,13,rep,name=proto2_value,json=proto2Value" json:"proto2_value,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value"` +} + +func (m *Message) Reset() { *m = Message{} } +func (m *Message) String() string { return proto.CompactTextString(m) } +func (*Message) ProtoMessage() {} +func (*Message) Descriptor() ([]byte, []int) { return fileDescriptorProto3, []int{0} } + +func (m *Message) GetNested() *Nested { + if m != nil { + return m.Nested + } + return nil +} + +func (m *Message) GetTerrain() map[string]*Nested { + if m != nil { + return m.Terrain + } + return nil +} + +func (m *Message) GetProto2Field() *testdata.SubDefaults { + if m != nil { + return m.Proto2Field + } + return nil +} + +func (m *Message) GetProto2Value() map[string]*testdata.SubDefaults { + if m != nil { + return m.Proto2Value + } + return nil +} + +type Nested struct { + Bunny string `protobuf:"bytes,1,opt,name=bunny,proto3" json:"bunny,omitempty"` + Cute bool `protobuf:"varint,2,opt,name=cute,proto3" json:"cute,omitempty"` +} + +func (m *Nested) Reset() { *m = Nested{} } +func (m *Nested) String() string { return proto.CompactTextString(m) } +func (*Nested) ProtoMessage() {} +func (*Nested) Descriptor() ([]byte, []int) { return fileDescriptorProto3, []int{1} } + +type MessageWithMap struct { + ByteMapping map[bool][]byte `protobuf:"bytes,1,rep,name=byte_mapping,json=byteMapping" json:"byte_mapping,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (m *MessageWithMap) Reset() { *m = MessageWithMap{} } +func (m *MessageWithMap) String() string { return proto.CompactTextString(m) } +func (*MessageWithMap) ProtoMessage() {} +func (*MessageWithMap) Descriptor() ([]byte, []int) { return fileDescriptorProto3, []int{2} } + +func (m *MessageWithMap) GetByteMapping() map[bool][]byte { + if m != nil { + return m.ByteMapping + } + return nil +} + +func init() { + proto.RegisterType((*Message)(nil), "proto3_proto.Message") + proto.RegisterType((*Nested)(nil), "proto3_proto.Nested") + proto.RegisterType((*MessageWithMap)(nil), "proto3_proto.MessageWithMap") + proto.RegisterEnum("proto3_proto.Message_Humour", Message_Humour_name, Message_Humour_value) +} + +func init() { proto.RegisterFile("proto3_proto/proto3.proto", fileDescriptorProto3) } + +var fileDescriptorProto3 = []byte{ + // 585 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x84, 0x52, 0xef, 0x6a, 0xdb, 0x3e, + 0x14, 0xfd, 0x29, 0x4e, 0x9d, 0xf4, 0xda, 0xe9, 0xcf, 0x88, 0x0e, 0xb4, 0xb0, 0x0f, 0x5a, 0x06, + 0xc3, 0xec, 0x8f, 0x0b, 0x19, 0x83, 0x32, 0xc6, 0x46, 0xdb, 0xb5, 0x2c, 0x34, 0xcd, 0x82, 0xd2, + 0xae, 0xec, 0x93, 0xb1, 0x53, 0xc5, 0x31, 0x8b, 0xed, 0x60, 0x4b, 0x03, 0x3f, 0xc5, 0xde, 0x61, + 0x4f, 0x3a, 0x2c, 0x39, 0xad, 0x5b, 0xb2, 0xed, 0x93, 0xaf, 0x8e, 0xcf, 0xbd, 0xe7, 0xe8, 0x5c, + 0xc1, 0xe3, 0x75, 0x9e, 0x89, 0xec, 0x8d, 0xaf, 0x3e, 0x07, 0xfa, 0xe0, 0xa9, 0x0f, 0xb6, 0x9b, + 0xbf, 0xfa, 0xc3, 0x28, 0x16, 0x4b, 0x19, 0x7a, 0xf3, 0x2c, 0x39, 0x88, 0xb2, 0xa8, 0xe6, 0x86, + 0x72, 0xa1, 0x8b, 0x03, 0xc1, 0x0b, 0x71, 0x13, 0x88, 0x40, 0x15, 0x7a, 0xc2, 0xe0, 0xa7, 0x09, + 0x9d, 0x0b, 0x5e, 0x14, 0x41, 0xc4, 0x31, 0x86, 0x76, 0x1a, 0x24, 0x9c, 0x20, 0x8a, 0xdc, 0x5d, + 0xa6, 0x6a, 0x7c, 0x08, 0xdd, 0x65, 0xbc, 0x0a, 0xf2, 0x58, 0x94, 0xa4, 0x45, 0x91, 0xbb, 0x37, + 0x7c, 0xe2, 0x35, 0x45, 0xbd, 0xba, 0xd9, 0xfb, 0x2c, 0x93, 0x4c, 0xe6, 0xec, 0x96, 0x8d, 0x29, + 0xd8, 0x4b, 0x1e, 0x47, 0x4b, 0xe1, 0xc7, 0xa9, 0x3f, 0x4f, 0x88, 0x41, 0x91, 0xdb, 0x63, 0xa0, + 0xb1, 0x51, 0x7a, 0x92, 0x54, 0x7a, 0x95, 0x1d, 0xd2, 0xa6, 0xc8, 0xb5, 0x99, 0xaa, 0xf1, 0x53, + 0xb0, 0x73, 0x5e, 0xc8, 0x95, 0xf0, 0xe7, 0x99, 0x4c, 0x05, 0xe9, 0x50, 0xe4, 0x1a, 0xcc, 0xd2, + 0xd8, 0x49, 0x05, 0xe1, 0x67, 0xd0, 0x13, 0xb9, 0xe4, 0x7e, 0x31, 0xcf, 0x44, 0x91, 0x04, 0x29, + 0xe9, 0x52, 0xe4, 0x76, 0x99, 0x5d, 0x81, 0xb3, 0x1a, 0xc3, 0xfb, 0xb0, 0x53, 0xcc, 0xb3, 0x9c, + 0x93, 0x5d, 0x8a, 0xdc, 0x16, 0xd3, 0x07, 0xec, 0x80, 0xf1, 0x9d, 0x97, 0x64, 0x87, 0x1a, 0x6e, + 0x9b, 0x55, 0x25, 0x7e, 0x05, 0x66, 0xca, 0x0b, 0xc1, 0x6f, 0x88, 0x49, 0x91, 0x6b, 0x0d, 0xf7, + 0xef, 0xdf, 0x6e, 0xa2, 0xfe, 0xb1, 0x9a, 0x83, 0xdf, 0x42, 0x27, 0xf7, 0x17, 0x32, 0x4d, 0x4b, + 0xe2, 0x50, 0xe3, 0x9f, 0x61, 0x98, 0xf9, 0x59, 0xc5, 0xc5, 0xef, 0xa1, 0x23, 0x78, 0x9e, 0x07, + 0x71, 0x4a, 0x80, 0x1a, 0xae, 0x35, 0x1c, 0x6c, 0x6f, 0xbb, 0xd4, 0xa4, 0xd3, 0x54, 0xe4, 0x25, + 0xdb, 0xb4, 0xe0, 0x43, 0xd0, 0x6b, 0x1e, 0xfa, 0x8b, 0x98, 0xaf, 0x6e, 0x88, 0xa5, 0x8c, 0x3e, + 0xf2, 0x36, 0xeb, 0xf4, 0x66, 0x32, 0xfc, 0xc4, 0x17, 0x81, 0x5c, 0x89, 0x82, 0x59, 0x9a, 0x7a, + 0x56, 0x31, 0xf1, 0xe8, 0xb6, 0xf3, 0x47, 0xb0, 0x92, 0x9c, 0xf4, 0x94, 0xf8, 0xf3, 0xed, 0xe2, + 0x53, 0xc5, 0xfc, 0x5a, 0x11, 0xb5, 0x81, 0x7a, 0x94, 0x42, 0xfa, 0x53, 0xb0, 0x9b, 0xee, 0x36, + 0x49, 0xea, 0xa7, 0xa2, 0x92, 0x7c, 0x01, 0x3b, 0x5a, 0xa5, 0xf5, 0x97, 0x20, 0x35, 0xe5, 0x5d, + 0xeb, 0x10, 0xf5, 0xaf, 0xc0, 0x79, 0x28, 0xb9, 0x65, 0xea, 0xcb, 0xfb, 0x53, 0xff, 0x70, 0xeb, + 0xbb, 0xb1, 0x83, 0x8f, 0x60, 0xea, 0xf4, 0xb1, 0x05, 0x9d, 0xab, 0xc9, 0xf9, 0xe4, 0xcb, 0xf5, + 0xc4, 0xf9, 0x0f, 0x77, 0xa1, 0x3d, 0xbd, 0x9a, 0xcc, 0x1c, 0x84, 0x7b, 0xb0, 0x3b, 0x1b, 0x1f, + 0x4d, 0x67, 0x97, 0xa3, 0x93, 0x73, 0xa7, 0x85, 0xff, 0x07, 0xeb, 0x78, 0x34, 0x1e, 0xfb, 0xc7, + 0x47, 0xa3, 0xf1, 0xe9, 0x37, 0xc7, 0x18, 0x0c, 0xc1, 0xd4, 0x66, 0xab, 0x37, 0x14, 0xaa, 0x5d, + 0x6b, 0x3f, 0xfa, 0x50, 0xbd, 0xda, 0xb9, 0x14, 0xda, 0x50, 0x97, 0xa9, 0x7a, 0xf0, 0x0b, 0xc1, + 0x5e, 0x9d, 0xe3, 0x75, 0x2c, 0x96, 0x17, 0xc1, 0x1a, 0x4f, 0xc1, 0x0e, 0x4b, 0xc1, 0xfd, 0x24, + 0x58, 0xaf, 0xe3, 0x34, 0x22, 0x48, 0x65, 0xff, 0x7a, 0x6b, 0xf6, 0x75, 0x8f, 0x77, 0x5c, 0x0a, + 0x7e, 0xa1, 0xf9, 0xf5, 0x0a, 0xc2, 0x3b, 0xa4, 0xff, 0x01, 0x9c, 0x87, 0x84, 0x66, 0x60, 0x5d, + 0x1d, 0xd8, 0x7e, 0x33, 0x30, 0xbb, 0x91, 0x4c, 0x68, 0x6a, 0xe9, 0xdf, 0x01, 0x00, 0x00, 0xff, + 0xff, 0x75, 0x74, 0x87, 0xb4, 0x50, 0x04, 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go b/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go new file mode 100644 index 000000000..38f86c5f7 --- /dev/null +++ b/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go @@ -0,0 +1,832 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2015 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +Package jsonpb provides marshaling and unmarshaling between protocol buffers and JSON. +It follows the specification at https://developers.google.com/protocol-buffers/docs/proto3#json. + +This package produces a different output than the standard "encoding/json" package, +which does not operate correctly on protocol buffers. +*/ +package jsonpb + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "reflect" + "sort" + "strconv" + "strings" + "time" + + "github.com/golang/protobuf/proto" +) + +// Marshaler is a configurable object for converting between +// protocol buffer objects and a JSON representation for them. +type Marshaler struct { + // Whether to render enum values as integers, as opposed to string values. + EnumsAsInts bool + + // Whether to render fields with zero values. + EmitDefaults bool + + // A string to indent each level by. The presence of this field will + // also cause a space to appear between the field separator and + // value, and for newlines to be appear between fields and array + // elements. + Indent string + + // Whether to use the original (.proto) name for fields. + OrigName bool +} + +// Marshal marshals a protocol buffer into JSON. +func (m *Marshaler) Marshal(out io.Writer, pb proto.Message) error { + writer := &errWriter{writer: out} + return m.marshalObject(writer, pb, "", "") +} + +// MarshalToString converts a protocol buffer object to JSON string. +func (m *Marshaler) MarshalToString(pb proto.Message) (string, error) { + var buf bytes.Buffer + if err := m.Marshal(&buf, pb); err != nil { + return "", err + } + return buf.String(), nil +} + +type int32Slice []int32 + +// For sorting extensions ids to ensure stable output. +func (s int32Slice) Len() int { return len(s) } +func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] } +func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +type wkt interface { + XXX_WellKnownType() string +} + +// marshalObject writes a struct to the Writer. +func (m *Marshaler) marshalObject(out *errWriter, v proto.Message, indent, typeURL string) error { + s := reflect.ValueOf(v).Elem() + + // Handle well-known types. + if wkt, ok := v.(wkt); ok { + switch wkt.XXX_WellKnownType() { + case "DoubleValue", "FloatValue", "Int64Value", "UInt64Value", + "Int32Value", "UInt32Value", "BoolValue", "StringValue", "BytesValue": + // "Wrappers use the same representation in JSON + // as the wrapped primitive type, ..." + sprop := proto.GetProperties(s.Type()) + return m.marshalValue(out, sprop.Prop[0], s.Field(0), indent) + case "Any": + // Any is a bit more involved. + return m.marshalAny(out, v, indent) + case "Duration": + // "Generated output always contains 3, 6, or 9 fractional digits, + // depending on required precision." + s, ns := s.Field(0).Int(), s.Field(1).Int() + d := time.Duration(s)*time.Second + time.Duration(ns)*time.Nanosecond + x := fmt.Sprintf("%.9f", d.Seconds()) + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, "000") + out.write(`"`) + out.write(x) + out.write(`s"`) + return out.err + case "Struct": + // Let marshalValue handle the `fields` map. + // TODO: pass the correct Properties if needed. + return m.marshalValue(out, &proto.Properties{}, s.Field(0), indent) + case "Timestamp": + // "RFC 3339, where generated output will always be Z-normalized + // and uses 3, 6 or 9 fractional digits." + s, ns := s.Field(0).Int(), s.Field(1).Int() + t := time.Unix(s, ns).UTC() + // time.RFC3339Nano isn't exactly right (we need to get 3/6/9 fractional digits). + x := t.Format("2006-01-02T15:04:05.000000000") + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, "000") + out.write(`"`) + out.write(x) + out.write(`Z"`) + return out.err + case "Value": + // Value has a single oneof. + kind := s.Field(0) + if kind.IsNil() { + // "absence of any variant indicates an error" + return errors.New("nil Value") + } + // oneof -> *T -> T -> T.F + x := kind.Elem().Elem().Field(0) + // TODO: pass the correct Properties if needed. + return m.marshalValue(out, &proto.Properties{}, x, indent) + } + } + + out.write("{") + if m.Indent != "" { + out.write("\n") + } + + firstField := true + + if typeURL != "" { + if err := m.marshalTypeURL(out, indent, typeURL); err != nil { + return err + } + firstField = false + } + + for i := 0; i < s.NumField(); i++ { + value := s.Field(i) + valueField := s.Type().Field(i) + if strings.HasPrefix(valueField.Name, "XXX_") { + continue + } + + // IsNil will panic on most value kinds. + switch value.Kind() { + case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + if value.IsNil() { + continue + } + } + + if !m.EmitDefaults { + switch value.Kind() { + case reflect.Bool: + if !value.Bool() { + continue + } + case reflect.Int32, reflect.Int64: + if value.Int() == 0 { + continue + } + case reflect.Uint32, reflect.Uint64: + if value.Uint() == 0 { + continue + } + case reflect.Float32, reflect.Float64: + if value.Float() == 0 { + continue + } + case reflect.String: + if value.Len() == 0 { + continue + } + } + } + + // Oneof fields need special handling. + if valueField.Tag.Get("protobuf_oneof") != "" { + // value is an interface containing &T{real_value}. + sv := value.Elem().Elem() // interface -> *T -> T + value = sv.Field(0) + valueField = sv.Type().Field(0) + } + prop := jsonProperties(valueField, m.OrigName) + if !firstField { + m.writeSep(out) + } + if err := m.marshalField(out, prop, value, indent); err != nil { + return err + } + firstField = false + } + + // Handle proto2 extensions. + if ep, ok := v.(proto.Message); ok { + extensions := proto.RegisteredExtensions(v) + // Sort extensions for stable output. + ids := make([]int32, 0, len(extensions)) + for id, desc := range extensions { + if !proto.HasExtension(ep, desc) { + continue + } + ids = append(ids, id) + } + sort.Sort(int32Slice(ids)) + for _, id := range ids { + desc := extensions[id] + if desc == nil { + // unknown extension + continue + } + ext, extErr := proto.GetExtension(ep, desc) + if extErr != nil { + return extErr + } + value := reflect.ValueOf(ext) + var prop proto.Properties + prop.Parse(desc.Tag) + prop.JSONName = fmt.Sprintf("[%s]", desc.Name) + if !firstField { + m.writeSep(out) + } + if err := m.marshalField(out, &prop, value, indent); err != nil { + return err + } + firstField = false + } + + } + + if m.Indent != "" { + out.write("\n") + out.write(indent) + } + out.write("}") + return out.err +} + +func (m *Marshaler) writeSep(out *errWriter) { + if m.Indent != "" { + out.write(",\n") + } else { + out.write(",") + } +} + +func (m *Marshaler) marshalAny(out *errWriter, any proto.Message, indent string) error { + // "If the Any contains a value that has a special JSON mapping, + // it will be converted as follows: {"@type": xxx, "value": yyy}. + // Otherwise, the value will be converted into a JSON object, + // and the "@type" field will be inserted to indicate the actual data type." + v := reflect.ValueOf(any).Elem() + turl := v.Field(0).String() + val := v.Field(1).Bytes() + + // Only the part of type_url after the last slash is relevant. + mname := turl + if slash := strings.LastIndex(mname, "/"); slash >= 0 { + mname = mname[slash+1:] + } + mt := proto.MessageType(mname) + if mt == nil { + return fmt.Errorf("unknown message type %q", mname) + } + msg := reflect.New(mt.Elem()).Interface().(proto.Message) + if err := proto.Unmarshal(val, msg); err != nil { + return err + } + + if _, ok := msg.(wkt); ok { + out.write("{") + if m.Indent != "" { + out.write("\n") + } + if err := m.marshalTypeURL(out, indent, turl); err != nil { + return err + } + m.writeSep(out) + if m.Indent != "" { + out.write(indent) + out.write(m.Indent) + out.write(`"value": `) + } else { + out.write(`"value":`) + } + if err := m.marshalObject(out, msg, indent+m.Indent, ""); err != nil { + return err + } + if m.Indent != "" { + out.write("\n") + out.write(indent) + } + out.write("}") + return out.err + } + + return m.marshalObject(out, msg, indent, turl) +} + +func (m *Marshaler) marshalTypeURL(out *errWriter, indent, typeURL string) error { + if m.Indent != "" { + out.write(indent) + out.write(m.Indent) + } + out.write(`"@type":`) + if m.Indent != "" { + out.write(" ") + } + b, err := json.Marshal(typeURL) + if err != nil { + return err + } + out.write(string(b)) + return out.err +} + +// marshalField writes field description and value to the Writer. +func (m *Marshaler) marshalField(out *errWriter, prop *proto.Properties, v reflect.Value, indent string) error { + if m.Indent != "" { + out.write(indent) + out.write(m.Indent) + } + out.write(`"`) + out.write(prop.JSONName) + out.write(`":`) + if m.Indent != "" { + out.write(" ") + } + if err := m.marshalValue(out, prop, v, indent); err != nil { + return err + } + return nil +} + +// marshalValue writes the value to the Writer. +func (m *Marshaler) marshalValue(out *errWriter, prop *proto.Properties, v reflect.Value, indent string) error { + + var err error + v = reflect.Indirect(v) + + // Handle repeated elements. + if v.Kind() == reflect.Slice && v.Type().Elem().Kind() != reflect.Uint8 { + out.write("[") + comma := "" + for i := 0; i < v.Len(); i++ { + sliceVal := v.Index(i) + out.write(comma) + if m.Indent != "" { + out.write("\n") + out.write(indent) + out.write(m.Indent) + out.write(m.Indent) + } + if err := m.marshalValue(out, prop, sliceVal, indent+m.Indent); err != nil { + return err + } + comma = "," + } + if m.Indent != "" { + out.write("\n") + out.write(indent) + out.write(m.Indent) + } + out.write("]") + return out.err + } + + // Handle well-known types. + // Most are handled up in marshalObject (because 99% are messages). + type wkt interface { + XXX_WellKnownType() string + } + if wkt, ok := v.Interface().(wkt); ok { + switch wkt.XXX_WellKnownType() { + case "NullValue": + out.write("null") + return out.err + } + } + + // Handle enumerations. + if !m.EnumsAsInts && prop.Enum != "" { + // Unknown enum values will are stringified by the proto library as their + // value. Such values should _not_ be quoted or they will be interpreted + // as an enum string instead of their value. + enumStr := v.Interface().(fmt.Stringer).String() + var valStr string + if v.Kind() == reflect.Ptr { + valStr = strconv.Itoa(int(v.Elem().Int())) + } else { + valStr = strconv.Itoa(int(v.Int())) + } + isKnownEnum := enumStr != valStr + if isKnownEnum { + out.write(`"`) + } + out.write(enumStr) + if isKnownEnum { + out.write(`"`) + } + return out.err + } + + // Handle nested messages. + if v.Kind() == reflect.Struct { + return m.marshalObject(out, v.Addr().Interface().(proto.Message), indent+m.Indent, "") + } + + // Handle maps. + // Since Go randomizes map iteration, we sort keys for stable output. + if v.Kind() == reflect.Map { + out.write(`{`) + keys := v.MapKeys() + sort.Sort(mapKeys(keys)) + for i, k := range keys { + if i > 0 { + out.write(`,`) + } + if m.Indent != "" { + out.write("\n") + out.write(indent) + out.write(m.Indent) + out.write(m.Indent) + } + + b, err := json.Marshal(k.Interface()) + if err != nil { + return err + } + s := string(b) + + // If the JSON is not a string value, encode it again to make it one. + if !strings.HasPrefix(s, `"`) { + b, err := json.Marshal(s) + if err != nil { + return err + } + s = string(b) + } + + out.write(s) + out.write(`:`) + if m.Indent != "" { + out.write(` `) + } + + if err := m.marshalValue(out, prop, v.MapIndex(k), indent+m.Indent); err != nil { + return err + } + } + if m.Indent != "" { + out.write("\n") + out.write(indent) + out.write(m.Indent) + } + out.write(`}`) + return out.err + } + + // Default handling defers to the encoding/json library. + b, err := json.Marshal(v.Interface()) + if err != nil { + return err + } + needToQuote := string(b[0]) != `"` && (v.Kind() == reflect.Int64 || v.Kind() == reflect.Uint64) + if needToQuote { + out.write(`"`) + } + out.write(string(b)) + if needToQuote { + out.write(`"`) + } + return out.err +} + +// Unmarshaler is a configurable object for converting from a JSON +// representation to a protocol buffer object. +type Unmarshaler struct { + // Whether to allow messages to contain unknown fields, as opposed to + // failing to unmarshal. + AllowUnknownFields bool +} + +// UnmarshalNext unmarshals the next protocol buffer from a JSON object stream. +// This function is lenient and will decode any options permutations of the +// related Marshaler. +func (u *Unmarshaler) UnmarshalNext(dec *json.Decoder, pb proto.Message) error { + inputValue := json.RawMessage{} + if err := dec.Decode(&inputValue); err != nil { + return err + } + return u.unmarshalValue(reflect.ValueOf(pb).Elem(), inputValue, nil) +} + +// Unmarshal unmarshals a JSON object stream into a protocol +// buffer. This function is lenient and will decode any options +// permutations of the related Marshaler. +func (u *Unmarshaler) Unmarshal(r io.Reader, pb proto.Message) error { + dec := json.NewDecoder(r) + return u.UnmarshalNext(dec, pb) +} + +// UnmarshalNext unmarshals the next protocol buffer from a JSON object stream. +// This function is lenient and will decode any options permutations of the +// related Marshaler. +func UnmarshalNext(dec *json.Decoder, pb proto.Message) error { + return new(Unmarshaler).UnmarshalNext(dec, pb) +} + +// Unmarshal unmarshals a JSON object stream into a protocol +// buffer. This function is lenient and will decode any options +// permutations of the related Marshaler. +func Unmarshal(r io.Reader, pb proto.Message) error { + return new(Unmarshaler).Unmarshal(r, pb) +} + +// UnmarshalString will populate the fields of a protocol buffer based +// on a JSON string. This function is lenient and will decode any options +// permutations of the related Marshaler. +func UnmarshalString(str string, pb proto.Message) error { + return new(Unmarshaler).Unmarshal(strings.NewReader(str), pb) +} + +// unmarshalValue converts/copies a value into the target. +// prop may be nil. +func (u *Unmarshaler) unmarshalValue(target reflect.Value, inputValue json.RawMessage, prop *proto.Properties) error { + targetType := target.Type() + + // Allocate memory for pointer fields. + if targetType.Kind() == reflect.Ptr { + target.Set(reflect.New(targetType.Elem())) + return u.unmarshalValue(target.Elem(), inputValue, prop) + } + + // Handle well-known types. + type wkt interface { + XXX_WellKnownType() string + } + if wkt, ok := target.Addr().Interface().(wkt); ok { + switch wkt.XXX_WellKnownType() { + case "DoubleValue", "FloatValue", "Int64Value", "UInt64Value", + "Int32Value", "UInt32Value", "BoolValue", "StringValue", "BytesValue": + // "Wrappers use the same representation in JSON + // as the wrapped primitive type, except that null is allowed." + // encoding/json will turn JSON `null` into Go `nil`, + // so we don't have to do any extra work. + return u.unmarshalValue(target.Field(0), inputValue, prop) + case "Any": + return fmt.Errorf("unmarshaling Any not supported yet") + case "Duration": + unq, err := strconv.Unquote(string(inputValue)) + if err != nil { + return err + } + d, err := time.ParseDuration(unq) + if err != nil { + return fmt.Errorf("bad Duration: %v", err) + } + ns := d.Nanoseconds() + s := ns / 1e9 + ns %= 1e9 + target.Field(0).SetInt(s) + target.Field(1).SetInt(ns) + return nil + case "Timestamp": + unq, err := strconv.Unquote(string(inputValue)) + if err != nil { + return err + } + t, err := time.Parse(time.RFC3339Nano, unq) + if err != nil { + return fmt.Errorf("bad Timestamp: %v", err) + } + ns := t.UnixNano() + s := ns / 1e9 + ns %= 1e9 + target.Field(0).SetInt(s) + target.Field(1).SetInt(ns) + return nil + } + } + + // Handle enums, which have an underlying type of int32, + // and may appear as strings. + // The case of an enum appearing as a number is handled + // at the bottom of this function. + if inputValue[0] == '"' && prop != nil && prop.Enum != "" { + vmap := proto.EnumValueMap(prop.Enum) + // Don't need to do unquoting; valid enum names + // are from a limited character set. + s := inputValue[1 : len(inputValue)-1] + n, ok := vmap[string(s)] + if !ok { + return fmt.Errorf("unknown value %q for enum %s", s, prop.Enum) + } + if target.Kind() == reflect.Ptr { // proto2 + target.Set(reflect.New(targetType.Elem())) + target = target.Elem() + } + target.SetInt(int64(n)) + return nil + } + + // Handle nested messages. + if targetType.Kind() == reflect.Struct { + var jsonFields map[string]json.RawMessage + if err := json.Unmarshal(inputValue, &jsonFields); err != nil { + return err + } + + consumeField := func(prop *proto.Properties) (json.RawMessage, bool) { + // Be liberal in what names we accept; both orig_name and camelName are okay. + fieldNames := acceptedJSONFieldNames(prop) + + vOrig, okOrig := jsonFields[fieldNames.orig] + vCamel, okCamel := jsonFields[fieldNames.camel] + if !okOrig && !okCamel { + return nil, false + } + // If, for some reason, both are present in the data, favour the camelName. + var raw json.RawMessage + if okOrig { + raw = vOrig + delete(jsonFields, fieldNames.orig) + } + if okCamel { + raw = vCamel + delete(jsonFields, fieldNames.camel) + } + return raw, true + } + + sprops := proto.GetProperties(targetType) + for i := 0; i < target.NumField(); i++ { + ft := target.Type().Field(i) + if strings.HasPrefix(ft.Name, "XXX_") { + continue + } + + valueForField, ok := consumeField(sprops.Prop[i]) + if !ok { + continue + } + + if err := u.unmarshalValue(target.Field(i), valueForField, sprops.Prop[i]); err != nil { + return err + } + } + // Check for any oneof fields. + if len(jsonFields) > 0 { + for _, oop := range sprops.OneofTypes { + raw, ok := consumeField(oop.Prop) + if !ok { + continue + } + nv := reflect.New(oop.Type.Elem()) + target.Field(oop.Field).Set(nv) + if err := u.unmarshalValue(nv.Elem().Field(0), raw, oop.Prop); err != nil { + return err + } + } + } + if !u.AllowUnknownFields && len(jsonFields) > 0 { + // Pick any field to be the scapegoat. + var f string + for fname := range jsonFields { + f = fname + break + } + return fmt.Errorf("unknown field %q in %v", f, targetType) + } + return nil + } + + // Handle arrays (which aren't encoded bytes) + if targetType.Kind() == reflect.Slice && targetType.Elem().Kind() != reflect.Uint8 { + var slc []json.RawMessage + if err := json.Unmarshal(inputValue, &slc); err != nil { + return err + } + len := len(slc) + target.Set(reflect.MakeSlice(targetType, len, len)) + for i := 0; i < len; i++ { + if err := u.unmarshalValue(target.Index(i), slc[i], prop); err != nil { + return err + } + } + return nil + } + + // Handle maps (whose keys are always strings) + if targetType.Kind() == reflect.Map { + var mp map[string]json.RawMessage + if err := json.Unmarshal(inputValue, &mp); err != nil { + return err + } + target.Set(reflect.MakeMap(targetType)) + var keyprop, valprop *proto.Properties + if prop != nil { + // These could still be nil if the protobuf metadata is broken somehow. + // TODO: This won't work because the fields are unexported. + // We should probably just reparse them. + //keyprop, valprop = prop.mkeyprop, prop.mvalprop + } + for ks, raw := range mp { + // Unmarshal map key. The core json library already decoded the key into a + // string, so we handle that specially. Other types were quoted post-serialization. + var k reflect.Value + if targetType.Key().Kind() == reflect.String { + k = reflect.ValueOf(ks) + } else { + k = reflect.New(targetType.Key()).Elem() + if err := u.unmarshalValue(k, json.RawMessage(ks), keyprop); err != nil { + return err + } + } + + // Unmarshal map value. + v := reflect.New(targetType.Elem()).Elem() + if err := u.unmarshalValue(v, raw, valprop); err != nil { + return err + } + target.SetMapIndex(k, v) + } + return nil + } + + // 64-bit integers can be encoded as strings. In this case we drop + // the quotes and proceed as normal. + isNum := targetType.Kind() == reflect.Int64 || targetType.Kind() == reflect.Uint64 + if isNum && strings.HasPrefix(string(inputValue), `"`) { + inputValue = inputValue[1 : len(inputValue)-1] + } + + // Use the encoding/json for parsing other value types. + return json.Unmarshal(inputValue, target.Addr().Interface()) +} + +// jsonProperties returns parsed proto.Properties for the field and corrects JSONName attribute. +func jsonProperties(f reflect.StructField, origName bool) *proto.Properties { + var prop proto.Properties + prop.Init(f.Type, f.Name, f.Tag.Get("protobuf"), &f) + if origName || prop.JSONName == "" { + prop.JSONName = prop.OrigName + } + return &prop +} + +type fieldNames struct { + orig, camel string +} + +func acceptedJSONFieldNames(prop *proto.Properties) fieldNames { + opts := fieldNames{orig: prop.OrigName, camel: prop.OrigName} + if prop.JSONName != "" { + opts.camel = prop.JSONName + } + return opts +} + +// Writer wrapper inspired by https://blog.golang.org/errors-are-values +type errWriter struct { + writer io.Writer + err error +} + +func (w *errWriter) write(str string) { + if w.err != nil { + return + } + _, w.err = w.writer.Write([]byte(str)) +} + +// Map fields may have key types of non-float scalars, strings and enums. +// The easiest way to sort them in some deterministic order is to use fmt. +// If this turns out to be inefficient we can always consider other options, +// such as doing a Schwartzian transform. +// +// Numeric keys are sorted in numeric order per +// https://developers.google.com/protocol-buffers/docs/proto#maps. +type mapKeys []reflect.Value + +func (s mapKeys) Len() int { return len(s) } +func (s mapKeys) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s mapKeys) Less(i, j int) bool { + if k := s[i].Kind(); k == s[j].Kind() { + switch k { + case reflect.Int32, reflect.Int64: + return s[i].Int() < s[j].Int() + case reflect.Uint32, reflect.Uint64: + return s[i].Uint() < s[j].Uint() + } + } + return fmt.Sprint(s[i].Interface()) < fmt.Sprint(s[j].Interface()) +} diff --git a/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/more_test_objects.pb.go b/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/more_test_objects.pb.go new file mode 100644 index 000000000..cd6ff1cde --- /dev/null +++ b/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/more_test_objects.pb.go @@ -0,0 +1,200 @@ +// Code generated by protoc-gen-go. +// source: more_test_objects.proto +// DO NOT EDIT! + +/* +Package jsonpb is a generated protocol buffer package. + +It is generated from these files: + more_test_objects.proto + test_objects.proto + +It has these top-level messages: + Simple3 + Mappy + Simple + Repeats + Widget + Maps + MsgWithOneof + Real + Complex + KnownTypes +*/ +package jsonpb + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type Numeral int32 + +const ( + Numeral_UNKNOWN Numeral = 0 + Numeral_ARABIC Numeral = 1 + Numeral_ROMAN Numeral = 2 +) + +var Numeral_name = map[int32]string{ + 0: "UNKNOWN", + 1: "ARABIC", + 2: "ROMAN", +} +var Numeral_value = map[string]int32{ + "UNKNOWN": 0, + "ARABIC": 1, + "ROMAN": 2, +} + +func (x Numeral) String() string { + return proto.EnumName(Numeral_name, int32(x)) +} +func (Numeral) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +type Simple3 struct { + Dub float64 `protobuf:"fixed64,1,opt,name=dub" json:"dub,omitempty"` +} + +func (m *Simple3) Reset() { *m = Simple3{} } +func (m *Simple3) String() string { return proto.CompactTextString(m) } +func (*Simple3) ProtoMessage() {} +func (*Simple3) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +type Mappy struct { + Nummy map[int64]int32 `protobuf:"bytes,1,rep,name=nummy" json:"nummy,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"` + Strry map[string]string `protobuf:"bytes,2,rep,name=strry" json:"strry,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + Objjy map[int32]*Simple3 `protobuf:"bytes,3,rep,name=objjy" json:"objjy,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + Buggy map[int64]string `protobuf:"bytes,4,rep,name=buggy" json:"buggy,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + Booly map[bool]bool `protobuf:"bytes,5,rep,name=booly" json:"booly,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"` + Enumy map[string]Numeral `protobuf:"bytes,6,rep,name=enumy" json:"enumy,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"varint,2,opt,name=value,enum=jsonpb.Numeral"` + S32Booly map[int32]bool `protobuf:"bytes,7,rep,name=s32booly" json:"s32booly,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"` + S64Booly map[int64]bool `protobuf:"bytes,8,rep,name=s64booly" json:"s64booly,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"` + U32Booly map[uint32]bool `protobuf:"bytes,9,rep,name=u32booly" json:"u32booly,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"` + U64Booly map[uint64]bool `protobuf:"bytes,10,rep,name=u64booly" json:"u64booly,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"` +} + +func (m *Mappy) Reset() { *m = Mappy{} } +func (m *Mappy) String() string { return proto.CompactTextString(m) } +func (*Mappy) ProtoMessage() {} +func (*Mappy) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *Mappy) GetNummy() map[int64]int32 { + if m != nil { + return m.Nummy + } + return nil +} + +func (m *Mappy) GetStrry() map[string]string { + if m != nil { + return m.Strry + } + return nil +} + +func (m *Mappy) GetObjjy() map[int32]*Simple3 { + if m != nil { + return m.Objjy + } + return nil +} + +func (m *Mappy) GetBuggy() map[int64]string { + if m != nil { + return m.Buggy + } + return nil +} + +func (m *Mappy) GetBooly() map[bool]bool { + if m != nil { + return m.Booly + } + return nil +} + +func (m *Mappy) GetEnumy() map[string]Numeral { + if m != nil { + return m.Enumy + } + return nil +} + +func (m *Mappy) GetS32Booly() map[int32]bool { + if m != nil { + return m.S32Booly + } + return nil +} + +func (m *Mappy) GetS64Booly() map[int64]bool { + if m != nil { + return m.S64Booly + } + return nil +} + +func (m *Mappy) GetU32Booly() map[uint32]bool { + if m != nil { + return m.U32Booly + } + return nil +} + +func (m *Mappy) GetU64Booly() map[uint64]bool { + if m != nil { + return m.U64Booly + } + return nil +} + +func init() { + proto.RegisterType((*Simple3)(nil), "jsonpb.Simple3") + proto.RegisterType((*Mappy)(nil), "jsonpb.Mappy") + proto.RegisterEnum("jsonpb.Numeral", Numeral_name, Numeral_value) +} + +func init() { proto.RegisterFile("more_test_objects.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 444 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x8c, 0x94, 0xc1, 0x6b, 0xdb, 0x30, + 0x14, 0x87, 0xe7, 0xa4, 0x4e, 0xec, 0x17, 0xba, 0x19, 0x31, 0x98, 0x58, 0x2f, 0xa1, 0x30, 0x08, + 0x83, 0xf9, 0x90, 0x8c, 0xad, 0x6c, 0xa7, 0x74, 0xf4, 0x50, 0x46, 0x1d, 0x70, 0x09, 0x3b, 0x96, + 0x78, 0x13, 0x65, 0x9e, 0x6d, 0x19, 0xdb, 0x1a, 0xe8, 0x8f, 0x1f, 0x8c, 0x27, 0xcb, 0xb5, 0x6c, + 0x14, 0xd2, 0x9b, 0xcc, 0xef, 0xfb, 0xf2, 0x9e, 0xf4, 0x1e, 0x81, 0x37, 0x39, 0xaf, 0xd8, 0x43, + 0xc3, 0xea, 0xe6, 0x81, 0x27, 0x29, 0xfb, 0xd9, 0xd4, 0x61, 0x59, 0xf1, 0x86, 0x93, 0x59, 0x5a, + 0xf3, 0xa2, 0x4c, 0x2e, 0x2f, 0x60, 0x7e, 0xff, 0x3b, 0x2f, 0x33, 0xb6, 0x21, 0x01, 0x4c, 0x7f, + 0x89, 0x84, 0x3a, 0x4b, 0x67, 0xe5, 0xc4, 0x78, 0xbc, 0xfc, 0xe7, 0x81, 0x7b, 0x77, 0x28, 0x4b, + 0x49, 0x42, 0x70, 0x0b, 0x91, 0xe7, 0x92, 0x3a, 0xcb, 0xe9, 0x6a, 0xb1, 0xa6, 0x61, 0xab, 0x87, + 0x2a, 0x0d, 0x23, 0x8c, 0x6e, 0x8a, 0xa6, 0x92, 0x71, 0x8b, 0x21, 0x5f, 0x37, 0x55, 0x25, 0xe9, + 0xc4, 0xc6, 0xdf, 0x63, 0xa4, 0x79, 0x85, 0x21, 0xcf, 0x93, 0x34, 0x95, 0x74, 0x6a, 0xe3, 0x77, + 0x18, 0x69, 0x5e, 0x61, 0xc8, 0x27, 0xe2, 0xf1, 0x51, 0xd2, 0x33, 0x1b, 0x7f, 0x8d, 0x91, 0xe6, + 0x15, 0xa6, 0x78, 0xce, 0x33, 0x49, 0x5d, 0x2b, 0x8f, 0x51, 0xc7, 0xe3, 0x19, 0x79, 0x56, 0x88, + 0x5c, 0xd2, 0x99, 0x8d, 0xbf, 0xc1, 0x48, 0xf3, 0x0a, 0x23, 0x9f, 0xc1, 0xab, 0x37, 0xeb, 0xb6, + 0xc4, 0x5c, 0x29, 0x17, 0xa3, 0x2b, 0xeb, 0xb4, 0xb5, 0x9e, 0x60, 0x25, 0x7e, 0xfa, 0xd8, 0x8a, + 0x9e, 0x55, 0xd4, 0x69, 0x27, 0xea, 0x4f, 0x14, 0x45, 0x57, 0xd1, 0xb7, 0x89, 0xfb, 0x61, 0x45, + 0x61, 0x54, 0x14, 0x5d, 0x45, 0xb0, 0x8a, 0xc3, 0x8a, 0x1d, 0xfc, 0xf6, 0x0a, 0xa0, 0x1f, 0x34, + 0x6e, 0xcb, 0x1f, 0x26, 0xd5, 0xb6, 0x4c, 0x63, 0x3c, 0x92, 0xd7, 0xe0, 0xfe, 0x3d, 0x64, 0x82, + 0xd1, 0xc9, 0xd2, 0x59, 0xb9, 0x71, 0xfb, 0xf1, 0x65, 0x72, 0xe5, 0xa0, 0xd9, 0x8f, 0xdc, 0x34, + 0x7d, 0x8b, 0xe9, 0x9b, 0xe6, 0x2d, 0x40, 0x3f, 0x7c, 0xd3, 0x74, 0x5b, 0xf3, 0x9d, 0x69, 0x2e, + 0xd6, 0xaf, 0xba, 0x9b, 0xe8, 0x9d, 0x1e, 0x35, 0xd1, 0xef, 0xc5, 0xa9, 0xf6, 0xfd, 0xb1, 0xf9, + 0xf4, 0x20, 0xa6, 0xe9, 0x59, 0x4c, 0x6f, 0xd4, 0x7e, 0xbf, 0x2b, 0x96, 0x8b, 0x0f, 0xda, 0x7f, + 0xd9, 0xb7, 0x1f, 0x89, 0x9c, 0x55, 0x87, 0xcc, 0xfc, 0xa9, 0xaf, 0x70, 0x3e, 0xd8, 0x21, 0xcb, + 0x63, 0x1c, 0xef, 0x03, 0x65, 0x73, 0xaa, 0xa7, 0xae, 0x3f, 0x96, 0xf7, 0xc7, 0x2a, 0x9f, 0x3f, + 0x47, 0x3e, 0x56, 0xf9, 0xec, 0x84, 0xfc, 0xfe, 0x03, 0xcc, 0xf5, 0x4b, 0x90, 0x05, 0xcc, 0xf7, + 0xd1, 0xf7, 0x68, 0xf7, 0x23, 0x0a, 0x5e, 0x10, 0x80, 0xd9, 0x36, 0xde, 0x5e, 0xdf, 0x7e, 0x0b, + 0x1c, 0xe2, 0x83, 0x1b, 0xef, 0xee, 0xb6, 0x51, 0x30, 0x49, 0x66, 0xea, 0xaf, 0x6d, 0xf3, 0x3f, + 0x00, 0x00, 0xff, 0xff, 0xa2, 0x4b, 0xe1, 0x77, 0xf5, 0x04, 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/test_objects.pb.go b/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/test_objects.pb.go new file mode 100644 index 000000000..104a308d8 --- /dev/null +++ b/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/test_objects.pb.go @@ -0,0 +1,769 @@ +// Code generated by protoc-gen-go. +// source: test_objects.proto +// DO NOT EDIT! + +package jsonpb + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import google_protobuf "github.com/golang/protobuf/ptypes/any" +import google_protobuf1 "github.com/golang/protobuf/ptypes/duration" +import google_protobuf2 "github.com/golang/protobuf/ptypes/struct" +import google_protobuf3 "github.com/golang/protobuf/ptypes/timestamp" +import google_protobuf4 "github.com/golang/protobuf/ptypes/wrappers" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type Widget_Color int32 + +const ( + Widget_RED Widget_Color = 0 + Widget_GREEN Widget_Color = 1 + Widget_BLUE Widget_Color = 2 +) + +var Widget_Color_name = map[int32]string{ + 0: "RED", + 1: "GREEN", + 2: "BLUE", +} +var Widget_Color_value = map[string]int32{ + "RED": 0, + "GREEN": 1, + "BLUE": 2, +} + +func (x Widget_Color) Enum() *Widget_Color { + p := new(Widget_Color) + *p = x + return p +} +func (x Widget_Color) String() string { + return proto.EnumName(Widget_Color_name, int32(x)) +} +func (x *Widget_Color) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Widget_Color_value, data, "Widget_Color") + if err != nil { + return err + } + *x = Widget_Color(value) + return nil +} +func (Widget_Color) EnumDescriptor() ([]byte, []int) { return fileDescriptor1, []int{2, 0} } + +// Test message for holding primitive types. +type Simple struct { + OBool *bool `protobuf:"varint,1,opt,name=o_bool,json=oBool" json:"o_bool,omitempty"` + OInt32 *int32 `protobuf:"varint,2,opt,name=o_int32,json=oInt32" json:"o_int32,omitempty"` + OInt64 *int64 `protobuf:"varint,3,opt,name=o_int64,json=oInt64" json:"o_int64,omitempty"` + OUint32 *uint32 `protobuf:"varint,4,opt,name=o_uint32,json=oUint32" json:"o_uint32,omitempty"` + OUint64 *uint64 `protobuf:"varint,5,opt,name=o_uint64,json=oUint64" json:"o_uint64,omitempty"` + OSint32 *int32 `protobuf:"zigzag32,6,opt,name=o_sint32,json=oSint32" json:"o_sint32,omitempty"` + OSint64 *int64 `protobuf:"zigzag64,7,opt,name=o_sint64,json=oSint64" json:"o_sint64,omitempty"` + OFloat *float32 `protobuf:"fixed32,8,opt,name=o_float,json=oFloat" json:"o_float,omitempty"` + ODouble *float64 `protobuf:"fixed64,9,opt,name=o_double,json=oDouble" json:"o_double,omitempty"` + OString *string `protobuf:"bytes,10,opt,name=o_string,json=oString" json:"o_string,omitempty"` + OBytes []byte `protobuf:"bytes,11,opt,name=o_bytes,json=oBytes" json:"o_bytes,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Simple) Reset() { *m = Simple{} } +func (m *Simple) String() string { return proto.CompactTextString(m) } +func (*Simple) ProtoMessage() {} +func (*Simple) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{0} } + +func (m *Simple) GetOBool() bool { + if m != nil && m.OBool != nil { + return *m.OBool + } + return false +} + +func (m *Simple) GetOInt32() int32 { + if m != nil && m.OInt32 != nil { + return *m.OInt32 + } + return 0 +} + +func (m *Simple) GetOInt64() int64 { + if m != nil && m.OInt64 != nil { + return *m.OInt64 + } + return 0 +} + +func (m *Simple) GetOUint32() uint32 { + if m != nil && m.OUint32 != nil { + return *m.OUint32 + } + return 0 +} + +func (m *Simple) GetOUint64() uint64 { + if m != nil && m.OUint64 != nil { + return *m.OUint64 + } + return 0 +} + +func (m *Simple) GetOSint32() int32 { + if m != nil && m.OSint32 != nil { + return *m.OSint32 + } + return 0 +} + +func (m *Simple) GetOSint64() int64 { + if m != nil && m.OSint64 != nil { + return *m.OSint64 + } + return 0 +} + +func (m *Simple) GetOFloat() float32 { + if m != nil && m.OFloat != nil { + return *m.OFloat + } + return 0 +} + +func (m *Simple) GetODouble() float64 { + if m != nil && m.ODouble != nil { + return *m.ODouble + } + return 0 +} + +func (m *Simple) GetOString() string { + if m != nil && m.OString != nil { + return *m.OString + } + return "" +} + +func (m *Simple) GetOBytes() []byte { + if m != nil { + return m.OBytes + } + return nil +} + +// Test message for holding repeated primitives. +type Repeats struct { + RBool []bool `protobuf:"varint,1,rep,name=r_bool,json=rBool" json:"r_bool,omitempty"` + RInt32 []int32 `protobuf:"varint,2,rep,name=r_int32,json=rInt32" json:"r_int32,omitempty"` + RInt64 []int64 `protobuf:"varint,3,rep,name=r_int64,json=rInt64" json:"r_int64,omitempty"` + RUint32 []uint32 `protobuf:"varint,4,rep,name=r_uint32,json=rUint32" json:"r_uint32,omitempty"` + RUint64 []uint64 `protobuf:"varint,5,rep,name=r_uint64,json=rUint64" json:"r_uint64,omitempty"` + RSint32 []int32 `protobuf:"zigzag32,6,rep,name=r_sint32,json=rSint32" json:"r_sint32,omitempty"` + RSint64 []int64 `protobuf:"zigzag64,7,rep,name=r_sint64,json=rSint64" json:"r_sint64,omitempty"` + RFloat []float32 `protobuf:"fixed32,8,rep,name=r_float,json=rFloat" json:"r_float,omitempty"` + RDouble []float64 `protobuf:"fixed64,9,rep,name=r_double,json=rDouble" json:"r_double,omitempty"` + RString []string `protobuf:"bytes,10,rep,name=r_string,json=rString" json:"r_string,omitempty"` + RBytes [][]byte `protobuf:"bytes,11,rep,name=r_bytes,json=rBytes" json:"r_bytes,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Repeats) Reset() { *m = Repeats{} } +func (m *Repeats) String() string { return proto.CompactTextString(m) } +func (*Repeats) ProtoMessage() {} +func (*Repeats) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{1} } + +func (m *Repeats) GetRBool() []bool { + if m != nil { + return m.RBool + } + return nil +} + +func (m *Repeats) GetRInt32() []int32 { + if m != nil { + return m.RInt32 + } + return nil +} + +func (m *Repeats) GetRInt64() []int64 { + if m != nil { + return m.RInt64 + } + return nil +} + +func (m *Repeats) GetRUint32() []uint32 { + if m != nil { + return m.RUint32 + } + return nil +} + +func (m *Repeats) GetRUint64() []uint64 { + if m != nil { + return m.RUint64 + } + return nil +} + +func (m *Repeats) GetRSint32() []int32 { + if m != nil { + return m.RSint32 + } + return nil +} + +func (m *Repeats) GetRSint64() []int64 { + if m != nil { + return m.RSint64 + } + return nil +} + +func (m *Repeats) GetRFloat() []float32 { + if m != nil { + return m.RFloat + } + return nil +} + +func (m *Repeats) GetRDouble() []float64 { + if m != nil { + return m.RDouble + } + return nil +} + +func (m *Repeats) GetRString() []string { + if m != nil { + return m.RString + } + return nil +} + +func (m *Repeats) GetRBytes() [][]byte { + if m != nil { + return m.RBytes + } + return nil +} + +// Test message for holding enums and nested messages. +type Widget struct { + Color *Widget_Color `protobuf:"varint,1,opt,name=color,enum=jsonpb.Widget_Color" json:"color,omitempty"` + RColor []Widget_Color `protobuf:"varint,2,rep,name=r_color,json=rColor,enum=jsonpb.Widget_Color" json:"r_color,omitempty"` + Simple *Simple `protobuf:"bytes,10,opt,name=simple" json:"simple,omitempty"` + RSimple []*Simple `protobuf:"bytes,11,rep,name=r_simple,json=rSimple" json:"r_simple,omitempty"` + Repeats *Repeats `protobuf:"bytes,20,opt,name=repeats" json:"repeats,omitempty"` + RRepeats []*Repeats `protobuf:"bytes,21,rep,name=r_repeats,json=rRepeats" json:"r_repeats,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Widget) Reset() { *m = Widget{} } +func (m *Widget) String() string { return proto.CompactTextString(m) } +func (*Widget) ProtoMessage() {} +func (*Widget) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{2} } + +func (m *Widget) GetColor() Widget_Color { + if m != nil && m.Color != nil { + return *m.Color + } + return Widget_RED +} + +func (m *Widget) GetRColor() []Widget_Color { + if m != nil { + return m.RColor + } + return nil +} + +func (m *Widget) GetSimple() *Simple { + if m != nil { + return m.Simple + } + return nil +} + +func (m *Widget) GetRSimple() []*Simple { + if m != nil { + return m.RSimple + } + return nil +} + +func (m *Widget) GetRepeats() *Repeats { + if m != nil { + return m.Repeats + } + return nil +} + +func (m *Widget) GetRRepeats() []*Repeats { + if m != nil { + return m.RRepeats + } + return nil +} + +type Maps struct { + MInt64Str map[int64]string `protobuf:"bytes,1,rep,name=m_int64_str,json=mInt64Str" json:"m_int64_str,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + MBoolSimple map[bool]*Simple `protobuf:"bytes,2,rep,name=m_bool_simple,json=mBoolSimple" json:"m_bool_simple,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Maps) Reset() { *m = Maps{} } +func (m *Maps) String() string { return proto.CompactTextString(m) } +func (*Maps) ProtoMessage() {} +func (*Maps) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{3} } + +func (m *Maps) GetMInt64Str() map[int64]string { + if m != nil { + return m.MInt64Str + } + return nil +} + +func (m *Maps) GetMBoolSimple() map[bool]*Simple { + if m != nil { + return m.MBoolSimple + } + return nil +} + +type MsgWithOneof struct { + // Types that are valid to be assigned to Union: + // *MsgWithOneof_Title + // *MsgWithOneof_Salary + // *MsgWithOneof_Country + // *MsgWithOneof_HomeAddress + Union isMsgWithOneof_Union `protobuf_oneof:"union"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MsgWithOneof) Reset() { *m = MsgWithOneof{} } +func (m *MsgWithOneof) String() string { return proto.CompactTextString(m) } +func (*MsgWithOneof) ProtoMessage() {} +func (*MsgWithOneof) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{4} } + +type isMsgWithOneof_Union interface { + isMsgWithOneof_Union() +} + +type MsgWithOneof_Title struct { + Title string `protobuf:"bytes,1,opt,name=title,oneof"` +} +type MsgWithOneof_Salary struct { + Salary int64 `protobuf:"varint,2,opt,name=salary,oneof"` +} +type MsgWithOneof_Country struct { + Country string `protobuf:"bytes,3,opt,name=Country,json=country,oneof"` +} +type MsgWithOneof_HomeAddress struct { + HomeAddress string `protobuf:"bytes,4,opt,name=home_address,json=homeAddress,oneof"` +} + +func (*MsgWithOneof_Title) isMsgWithOneof_Union() {} +func (*MsgWithOneof_Salary) isMsgWithOneof_Union() {} +func (*MsgWithOneof_Country) isMsgWithOneof_Union() {} +func (*MsgWithOneof_HomeAddress) isMsgWithOneof_Union() {} + +func (m *MsgWithOneof) GetUnion() isMsgWithOneof_Union { + if m != nil { + return m.Union + } + return nil +} + +func (m *MsgWithOneof) GetTitle() string { + if x, ok := m.GetUnion().(*MsgWithOneof_Title); ok { + return x.Title + } + return "" +} + +func (m *MsgWithOneof) GetSalary() int64 { + if x, ok := m.GetUnion().(*MsgWithOneof_Salary); ok { + return x.Salary + } + return 0 +} + +func (m *MsgWithOneof) GetCountry() string { + if x, ok := m.GetUnion().(*MsgWithOneof_Country); ok { + return x.Country + } + return "" +} + +func (m *MsgWithOneof) GetHomeAddress() string { + if x, ok := m.GetUnion().(*MsgWithOneof_HomeAddress); ok { + return x.HomeAddress + } + return "" +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*MsgWithOneof) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _MsgWithOneof_OneofMarshaler, _MsgWithOneof_OneofUnmarshaler, _MsgWithOneof_OneofSizer, []interface{}{ + (*MsgWithOneof_Title)(nil), + (*MsgWithOneof_Salary)(nil), + (*MsgWithOneof_Country)(nil), + (*MsgWithOneof_HomeAddress)(nil), + } +} + +func _MsgWithOneof_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*MsgWithOneof) + // union + switch x := m.Union.(type) { + case *MsgWithOneof_Title: + b.EncodeVarint(1<<3 | proto.WireBytes) + b.EncodeStringBytes(x.Title) + case *MsgWithOneof_Salary: + b.EncodeVarint(2<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.Salary)) + case *MsgWithOneof_Country: + b.EncodeVarint(3<<3 | proto.WireBytes) + b.EncodeStringBytes(x.Country) + case *MsgWithOneof_HomeAddress: + b.EncodeVarint(4<<3 | proto.WireBytes) + b.EncodeStringBytes(x.HomeAddress) + case nil: + default: + return fmt.Errorf("MsgWithOneof.Union has unexpected type %T", x) + } + return nil +} + +func _MsgWithOneof_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*MsgWithOneof) + switch tag { + case 1: // union.title + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Union = &MsgWithOneof_Title{x} + return true, err + case 2: // union.salary + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Union = &MsgWithOneof_Salary{int64(x)} + return true, err + case 3: // union.Country + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Union = &MsgWithOneof_Country{x} + return true, err + case 4: // union.home_address + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Union = &MsgWithOneof_HomeAddress{x} + return true, err + default: + return false, nil + } +} + +func _MsgWithOneof_OneofSizer(msg proto.Message) (n int) { + m := msg.(*MsgWithOneof) + // union + switch x := m.Union.(type) { + case *MsgWithOneof_Title: + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.Title))) + n += len(x.Title) + case *MsgWithOneof_Salary: + n += proto.SizeVarint(2<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.Salary)) + case *MsgWithOneof_Country: + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.Country))) + n += len(x.Country) + case *MsgWithOneof_HomeAddress: + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.HomeAddress))) + n += len(x.HomeAddress) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type Real struct { + Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Real) Reset() { *m = Real{} } +func (m *Real) String() string { return proto.CompactTextString(m) } +func (*Real) ProtoMessage() {} +func (*Real) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{5} } + +var extRange_Real = []proto.ExtensionRange{ + {100, 536870911}, +} + +func (*Real) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_Real +} + +func (m *Real) GetValue() float64 { + if m != nil && m.Value != nil { + return *m.Value + } + return 0 +} + +type Complex struct { + Imaginary *float64 `protobuf:"fixed64,1,opt,name=imaginary" json:"imaginary,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Complex) Reset() { *m = Complex{} } +func (m *Complex) String() string { return proto.CompactTextString(m) } +func (*Complex) ProtoMessage() {} +func (*Complex) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{6} } + +var extRange_Complex = []proto.ExtensionRange{ + {100, 536870911}, +} + +func (*Complex) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_Complex +} + +func (m *Complex) GetImaginary() float64 { + if m != nil && m.Imaginary != nil { + return *m.Imaginary + } + return 0 +} + +var E_Complex_RealExtension = &proto.ExtensionDesc{ + ExtendedType: (*Real)(nil), + ExtensionType: (*Complex)(nil), + Field: 123, + Name: "jsonpb.Complex.real_extension", + Tag: "bytes,123,opt,name=real_extension,json=realExtension", +} + +type KnownTypes struct { + An *google_protobuf.Any `protobuf:"bytes,14,opt,name=an" json:"an,omitempty"` + Dur *google_protobuf1.Duration `protobuf:"bytes,1,opt,name=dur" json:"dur,omitempty"` + St *google_protobuf2.Struct `protobuf:"bytes,12,opt,name=st" json:"st,omitempty"` + Ts *google_protobuf3.Timestamp `protobuf:"bytes,2,opt,name=ts" json:"ts,omitempty"` + Dbl *google_protobuf4.DoubleValue `protobuf:"bytes,3,opt,name=dbl" json:"dbl,omitempty"` + Flt *google_protobuf4.FloatValue `protobuf:"bytes,4,opt,name=flt" json:"flt,omitempty"` + I64 *google_protobuf4.Int64Value `protobuf:"bytes,5,opt,name=i64" json:"i64,omitempty"` + U64 *google_protobuf4.UInt64Value `protobuf:"bytes,6,opt,name=u64" json:"u64,omitempty"` + I32 *google_protobuf4.Int32Value `protobuf:"bytes,7,opt,name=i32" json:"i32,omitempty"` + U32 *google_protobuf4.UInt32Value `protobuf:"bytes,8,opt,name=u32" json:"u32,omitempty"` + Bool *google_protobuf4.BoolValue `protobuf:"bytes,9,opt,name=bool" json:"bool,omitempty"` + Str *google_protobuf4.StringValue `protobuf:"bytes,10,opt,name=str" json:"str,omitempty"` + Bytes *google_protobuf4.BytesValue `protobuf:"bytes,11,opt,name=bytes" json:"bytes,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *KnownTypes) Reset() { *m = KnownTypes{} } +func (m *KnownTypes) String() string { return proto.CompactTextString(m) } +func (*KnownTypes) ProtoMessage() {} +func (*KnownTypes) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{7} } + +func (m *KnownTypes) GetAn() *google_protobuf.Any { + if m != nil { + return m.An + } + return nil +} + +func (m *KnownTypes) GetDur() *google_protobuf1.Duration { + if m != nil { + return m.Dur + } + return nil +} + +func (m *KnownTypes) GetSt() *google_protobuf2.Struct { + if m != nil { + return m.St + } + return nil +} + +func (m *KnownTypes) GetTs() *google_protobuf3.Timestamp { + if m != nil { + return m.Ts + } + return nil +} + +func (m *KnownTypes) GetDbl() *google_protobuf4.DoubleValue { + if m != nil { + return m.Dbl + } + return nil +} + +func (m *KnownTypes) GetFlt() *google_protobuf4.FloatValue { + if m != nil { + return m.Flt + } + return nil +} + +func (m *KnownTypes) GetI64() *google_protobuf4.Int64Value { + if m != nil { + return m.I64 + } + return nil +} + +func (m *KnownTypes) GetU64() *google_protobuf4.UInt64Value { + if m != nil { + return m.U64 + } + return nil +} + +func (m *KnownTypes) GetI32() *google_protobuf4.Int32Value { + if m != nil { + return m.I32 + } + return nil +} + +func (m *KnownTypes) GetU32() *google_protobuf4.UInt32Value { + if m != nil { + return m.U32 + } + return nil +} + +func (m *KnownTypes) GetBool() *google_protobuf4.BoolValue { + if m != nil { + return m.Bool + } + return nil +} + +func (m *KnownTypes) GetStr() *google_protobuf4.StringValue { + if m != nil { + return m.Str + } + return nil +} + +func (m *KnownTypes) GetBytes() *google_protobuf4.BytesValue { + if m != nil { + return m.Bytes + } + return nil +} + +var E_Name = &proto.ExtensionDesc{ + ExtendedType: (*Real)(nil), + ExtensionType: (*string)(nil), + Field: 124, + Name: "jsonpb.name", + Tag: "bytes,124,opt,name=name", +} + +func init() { + proto.RegisterType((*Simple)(nil), "jsonpb.Simple") + proto.RegisterType((*Repeats)(nil), "jsonpb.Repeats") + proto.RegisterType((*Widget)(nil), "jsonpb.Widget") + proto.RegisterType((*Maps)(nil), "jsonpb.Maps") + proto.RegisterType((*MsgWithOneof)(nil), "jsonpb.MsgWithOneof") + proto.RegisterType((*Real)(nil), "jsonpb.Real") + proto.RegisterType((*Complex)(nil), "jsonpb.Complex") + proto.RegisterType((*KnownTypes)(nil), "jsonpb.KnownTypes") + proto.RegisterEnum("jsonpb.Widget_Color", Widget_Color_name, Widget_Color_value) + proto.RegisterExtension(E_Complex_RealExtension) + proto.RegisterExtension(E_Name) +} + +func init() { proto.RegisterFile("test_objects.proto", fileDescriptor1) } + +var fileDescriptor1 = []byte{ + // 1055 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x74, 0x95, 0x51, 0x73, 0xdb, 0x44, + 0x10, 0xc7, 0x23, 0xc9, 0x96, 0xec, 0x73, 0x12, 0xcc, 0x4d, 0x4a, 0x15, 0x13, 0x40, 0x63, 0x4a, + 0x11, 0x85, 0xba, 0x83, 0xe2, 0xf1, 0x30, 0x85, 0x97, 0xa4, 0x31, 0x94, 0x81, 0x94, 0x99, 0x4b, + 0x43, 0x1f, 0x3d, 0x72, 0x7c, 0x71, 0x55, 0x64, 0x9d, 0xe7, 0xee, 0x44, 0xea, 0x81, 0x87, 0x3c, + 0xf3, 0xcc, 0x57, 0x80, 0x8f, 0xc0, 0x27, 0xe2, 0x83, 0x74, 0x76, 0x4f, 0xb2, 0x12, 0x3b, 0x7e, + 0x8a, 0xf7, 0xf6, 0xbf, 0xff, 0x9c, 0x7e, 0xb7, 0x77, 0x4b, 0xa8, 0xe6, 0x4a, 0x8f, 0xc4, 0xf8, + 0x0d, 0xbf, 0xd0, 0xaa, 0x37, 0x97, 0x42, 0x0b, 0xea, 0xbe, 0x51, 0x22, 0x9b, 0x8f, 0x3b, 0xfb, + 0x53, 0x21, 0xa6, 0x29, 0x7f, 0x82, 0xab, 0xe3, 0xfc, 0xf2, 0x49, 0x9c, 0x2d, 0x8c, 0xa4, 0xf3, + 0xf1, 0x6a, 0x6a, 0x92, 0xcb, 0x58, 0x27, 0x22, 0x2b, 0xf2, 0x07, 0xab, 0x79, 0xa5, 0x65, 0x7e, + 0xa1, 0x8b, 0xec, 0x27, 0xab, 0x59, 0x9d, 0xcc, 0xb8, 0xd2, 0xf1, 0x6c, 0xbe, 0xc9, 0xfe, 0x4a, + 0xc6, 0xf3, 0x39, 0x97, 0xc5, 0x0e, 0xbb, 0xff, 0xd8, 0xc4, 0x3d, 0x4b, 0x66, 0xf3, 0x94, 0xd3, + 0x7b, 0xc4, 0x15, 0xa3, 0xb1, 0x10, 0xa9, 0x6f, 0x05, 0x56, 0xd8, 0x60, 0x75, 0x71, 0x2c, 0x44, + 0x4a, 0xef, 0x13, 0x4f, 0x8c, 0x92, 0x4c, 0x1f, 0x46, 0xbe, 0x1d, 0x58, 0x61, 0x9d, 0xb9, 0xe2, + 0x47, 0x88, 0x96, 0x89, 0x41, 0xdf, 0x77, 0x02, 0x2b, 0x74, 0x4c, 0x62, 0xd0, 0xa7, 0xfb, 0xa4, + 0x21, 0x46, 0xb9, 0x29, 0xa9, 0x05, 0x56, 0xb8, 0xc3, 0x3c, 0x71, 0x8e, 0x61, 0x95, 0x1a, 0xf4, + 0xfd, 0x7a, 0x60, 0x85, 0xb5, 0x22, 0x55, 0x56, 0x29, 0x53, 0xe5, 0x06, 0x56, 0xf8, 0x3e, 0xf3, + 0xc4, 0xd9, 0x8d, 0x2a, 0x65, 0xaa, 0xbc, 0xc0, 0x0a, 0x69, 0x91, 0x1a, 0xf4, 0xcd, 0x26, 0x2e, + 0x53, 0x11, 0x6b, 0xbf, 0x11, 0x58, 0xa1, 0xcd, 0x5c, 0xf1, 0x3d, 0x44, 0xa6, 0x66, 0x22, 0xf2, + 0x71, 0xca, 0xfd, 0x66, 0x60, 0x85, 0x16, 0xf3, 0xc4, 0x09, 0x86, 0x85, 0x9d, 0x96, 0x49, 0x36, + 0xf5, 0x49, 0x60, 0x85, 0x4d, 0xb0, 0xc3, 0xd0, 0xd8, 0x8d, 0x17, 0x9a, 0x2b, 0xbf, 0x15, 0x58, + 0xe1, 0x36, 0x73, 0xc5, 0x31, 0x44, 0xdd, 0x7f, 0x6d, 0xe2, 0x31, 0x3e, 0xe7, 0xb1, 0x56, 0x00, + 0x4a, 0x96, 0xa0, 0x1c, 0x00, 0x25, 0x4b, 0x50, 0x72, 0x09, 0xca, 0x01, 0x50, 0x72, 0x09, 0x4a, + 0x2e, 0x41, 0x39, 0x00, 0x4a, 0x2e, 0x41, 0xc9, 0x0a, 0x94, 0x03, 0xa0, 0x64, 0x05, 0x4a, 0x56, + 0xa0, 0x1c, 0x00, 0x25, 0x2b, 0x50, 0xb2, 0x02, 0xe5, 0x00, 0x28, 0x79, 0x76, 0xa3, 0x6a, 0x09, + 0xca, 0x01, 0x50, 0xb2, 0x02, 0x25, 0x97, 0xa0, 0x1c, 0x00, 0x25, 0x97, 0xa0, 0x64, 0x05, 0xca, + 0x01, 0x50, 0xb2, 0x02, 0x25, 0x2b, 0x50, 0x0e, 0x80, 0x92, 0x15, 0x28, 0xb9, 0x04, 0xe5, 0x00, + 0x28, 0x69, 0x40, 0xfd, 0x67, 0x13, 0xf7, 0x55, 0x32, 0x99, 0x72, 0x4d, 0x1f, 0x91, 0xfa, 0x85, + 0x48, 0x85, 0xc4, 0x7e, 0xda, 0x8d, 0xf6, 0x7a, 0xe6, 0x36, 0xf4, 0x4c, 0xba, 0xf7, 0x0c, 0x72, + 0xcc, 0x48, 0xe8, 0x63, 0xf0, 0x33, 0x6a, 0x80, 0xb7, 0x49, 0xed, 0x4a, 0xfc, 0x4b, 0x1f, 0x12, + 0x57, 0x61, 0xd7, 0xe2, 0x01, 0xb6, 0xa2, 0xdd, 0x52, 0x6d, 0x7a, 0x99, 0x15, 0x59, 0xfa, 0x85, + 0x01, 0x82, 0x4a, 0xd8, 0xe7, 0xba, 0x12, 0x00, 0x15, 0x52, 0x4f, 0x9a, 0x03, 0xf6, 0xf7, 0xd0, + 0xf3, 0xbd, 0x52, 0x59, 0x9c, 0x3b, 0x2b, 0xf3, 0xf4, 0x2b, 0xd2, 0x94, 0xa3, 0x52, 0x7c, 0x0f, + 0x6d, 0xd7, 0xc4, 0x0d, 0x59, 0xfc, 0xea, 0x7e, 0x46, 0xea, 0x66, 0xd3, 0x1e, 0x71, 0xd8, 0xf0, + 0xa4, 0xbd, 0x45, 0x9b, 0xa4, 0xfe, 0x03, 0x1b, 0x0e, 0x5f, 0xb4, 0x2d, 0xda, 0x20, 0xb5, 0xe3, + 0x9f, 0xcf, 0x87, 0x6d, 0xbb, 0xfb, 0xb7, 0x4d, 0x6a, 0xa7, 0xf1, 0x5c, 0xd1, 0x6f, 0x49, 0x6b, + 0x66, 0xda, 0x05, 0xd8, 0x63, 0x8f, 0xb5, 0xa2, 0x0f, 0x4b, 0x7f, 0x90, 0xf4, 0x4e, 0xb1, 0x7f, + 0xce, 0xb4, 0x1c, 0x66, 0x5a, 0x2e, 0x58, 0x73, 0x56, 0xc6, 0xf4, 0x88, 0xec, 0xcc, 0xb0, 0x37, + 0xcb, 0xaf, 0xb6, 0xb1, 0xfc, 0xa3, 0xdb, 0xe5, 0xd0, 0xaf, 0xe6, 0xb3, 0x8d, 0x41, 0x6b, 0x56, + 0xad, 0x74, 0xbe, 0x23, 0xbb, 0xb7, 0xfd, 0x69, 0x9b, 0x38, 0xbf, 0xf1, 0x05, 0x1e, 0xa3, 0xc3, + 0xe0, 0x27, 0xdd, 0x23, 0xf5, 0xdf, 0xe3, 0x34, 0xe7, 0xf8, 0x24, 0x34, 0x99, 0x09, 0x9e, 0xda, + 0xdf, 0x58, 0x9d, 0x17, 0xa4, 0xbd, 0x6a, 0x7f, 0xb3, 0xbe, 0x61, 0xea, 0x1f, 0xdc, 0xac, 0x5f, + 0x3f, 0x94, 0xca, 0xaf, 0xfb, 0x97, 0x45, 0xb6, 0x4f, 0xd5, 0xf4, 0x55, 0xa2, 0x5f, 0xff, 0x92, + 0x71, 0x71, 0x49, 0x3f, 0x20, 0x75, 0x9d, 0xe8, 0x94, 0xa3, 0x5d, 0xf3, 0xf9, 0x16, 0x33, 0x21, + 0xf5, 0x89, 0xab, 0xe2, 0x34, 0x96, 0x0b, 0xf4, 0x74, 0x9e, 0x6f, 0xb1, 0x22, 0xa6, 0x1d, 0xe2, + 0x3d, 0x13, 0x39, 0xec, 0x04, 0x1f, 0x2a, 0xa8, 0xf1, 0x2e, 0xcc, 0x02, 0xfd, 0x94, 0x6c, 0xbf, + 0x16, 0x33, 0x3e, 0x8a, 0x27, 0x13, 0xc9, 0x95, 0xc2, 0xf7, 0x0a, 0x04, 0x2d, 0x58, 0x3d, 0x32, + 0x8b, 0xc7, 0x1e, 0xa9, 0xe7, 0x59, 0x22, 0xb2, 0xee, 0x43, 0x52, 0x63, 0x3c, 0x4e, 0xab, 0xcf, + 0xb7, 0xf0, 0x65, 0x31, 0xc1, 0xa3, 0x46, 0x63, 0xd2, 0xbe, 0xbe, 0xbe, 0xbe, 0xb6, 0xbb, 0x57, + 0xf0, 0x1f, 0xe1, 0x4b, 0xde, 0xd2, 0x03, 0xd2, 0x4c, 0x66, 0xf1, 0x34, 0xc9, 0x60, 0x67, 0x46, + 0x5e, 0x2d, 0x54, 0x25, 0xd1, 0x09, 0xd9, 0x95, 0x3c, 0x4e, 0x47, 0xfc, 0xad, 0xe6, 0x99, 0x4a, + 0x44, 0x46, 0xb7, 0xab, 0x96, 0x8a, 0x53, 0xff, 0x8f, 0xdb, 0x3d, 0x59, 0xd8, 0xb3, 0x1d, 0x28, + 0x1a, 0x96, 0x35, 0xdd, 0xff, 0x6b, 0x84, 0xfc, 0x94, 0x89, 0xab, 0xec, 0xe5, 0x62, 0xce, 0x15, + 0x7d, 0x40, 0xec, 0x38, 0xf3, 0x77, 0xb1, 0x74, 0xaf, 0x67, 0x46, 0x41, 0xaf, 0x1c, 0x05, 0xbd, + 0xa3, 0x6c, 0xc1, 0xec, 0x38, 0xa3, 0x5f, 0x12, 0x67, 0x92, 0x9b, 0x5b, 0xda, 0x8a, 0xf6, 0xd7, + 0x64, 0x27, 0xc5, 0x40, 0x62, 0xa0, 0xa2, 0x9f, 0x13, 0x5b, 0x69, 0x7f, 0x1b, 0xb5, 0xf7, 0xd7, + 0xb4, 0x67, 0x38, 0x9c, 0x98, 0xad, 0xe0, 0xf6, 0xdb, 0x5a, 0x15, 0xe7, 0xdb, 0x59, 0x13, 0xbe, + 0x2c, 0xe7, 0x14, 0xb3, 0xb5, 0xa2, 0x3d, 0xe2, 0x4c, 0xc6, 0x29, 0x9e, 0x4e, 0x2b, 0x3a, 0x58, + 0xdf, 0x01, 0x3e, 0x47, 0xbf, 0x02, 0x64, 0x06, 0x42, 0xfa, 0x98, 0x38, 0x97, 0xa9, 0xc6, 0xc3, + 0x82, 0xab, 0xb1, 0xaa, 0xc7, 0x87, 0xad, 0x90, 0x5f, 0xa6, 0x1a, 0xe4, 0x49, 0x31, 0x70, 0xee, + 0x92, 0x63, 0xb3, 0x17, 0xf2, 0x64, 0xd0, 0x87, 0xdd, 0xe4, 0x83, 0x3e, 0x0e, 0xa1, 0xbb, 0x76, + 0x73, 0x7e, 0x53, 0x9f, 0x0f, 0xfa, 0x68, 0x7f, 0x18, 0xe1, 0x64, 0xda, 0x60, 0x7f, 0x18, 0x95, + 0xf6, 0x87, 0x11, 0xda, 0x1f, 0x46, 0x38, 0xae, 0x36, 0xd9, 0x2f, 0xf5, 0x39, 0xea, 0x6b, 0x38, + 0x6c, 0x9a, 0x1b, 0x50, 0xc2, 0x6d, 0x33, 0x72, 0xd4, 0x81, 0x3f, 0xbc, 0x1b, 0x64, 0x83, 0xbf, + 0x79, 0xc0, 0x0b, 0x7f, 0xa5, 0x25, 0xfd, 0x9a, 0xd4, 0xab, 0x89, 0x77, 0xd7, 0x07, 0xe0, 0xc3, + 0x6e, 0x0a, 0x8c, 0xf2, 0x69, 0x40, 0x6a, 0x59, 0x3c, 0xe3, 0x2b, 0x2d, 0xfa, 0x27, 0xbe, 0x05, + 0x98, 0x79, 0x17, 0x00, 0x00, 0xff, 0xff, 0xda, 0x8b, 0x4a, 0x7a, 0x0e, 0x09, 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/proto/proto3_proto/proto3.pb.go b/vendor/github.com/golang/protobuf/proto/proto3_proto/proto3.pb.go new file mode 100644 index 000000000..be386f1d5 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/proto3_proto/proto3.pb.go @@ -0,0 +1,199 @@ +// Code generated by protoc-gen-go. +// source: proto3_proto/proto3.proto +// DO NOT EDIT! + +/* +Package proto3_proto is a generated protocol buffer package. + +It is generated from these files: + proto3_proto/proto3.proto + +It has these top-level messages: + Message + Nested + MessageWithMap +*/ +package proto3_proto + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import google_protobuf "github.com/golang/protobuf/ptypes/any" +import testdata "github.com/golang/protobuf/proto/testdata" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +const _ = proto.ProtoPackageIsVersion1 + +type Message_Humour int32 + +const ( + Message_UNKNOWN Message_Humour = 0 + Message_PUNS Message_Humour = 1 + Message_SLAPSTICK Message_Humour = 2 + Message_BILL_BAILEY Message_Humour = 3 +) + +var Message_Humour_name = map[int32]string{ + 0: "UNKNOWN", + 1: "PUNS", + 2: "SLAPSTICK", + 3: "BILL_BAILEY", +} +var Message_Humour_value = map[string]int32{ + "UNKNOWN": 0, + "PUNS": 1, + "SLAPSTICK": 2, + "BILL_BAILEY": 3, +} + +func (x Message_Humour) String() string { + return proto.EnumName(Message_Humour_name, int32(x)) +} +func (Message_Humour) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0, 0} } + +type Message struct { + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Hilarity Message_Humour `protobuf:"varint,2,opt,name=hilarity,enum=proto3_proto.Message_Humour" json:"hilarity,omitempty"` + HeightInCm uint32 `protobuf:"varint,3,opt,name=height_in_cm,json=heightInCm" json:"height_in_cm,omitempty"` + Data []byte `protobuf:"bytes,4,opt,name=data,proto3" json:"data,omitempty"` + ResultCount int64 `protobuf:"varint,7,opt,name=result_count,json=resultCount" json:"result_count,omitempty"` + TrueScotsman bool `protobuf:"varint,8,opt,name=true_scotsman,json=trueScotsman" json:"true_scotsman,omitempty"` + Score float32 `protobuf:"fixed32,9,opt,name=score" json:"score,omitempty"` + Key []uint64 `protobuf:"varint,5,rep,name=key" json:"key,omitempty"` + Nested *Nested `protobuf:"bytes,6,opt,name=nested" json:"nested,omitempty"` + RFunny []Message_Humour `protobuf:"varint,16,rep,name=r_funny,json=rFunny,enum=proto3_proto.Message_Humour" json:"r_funny,omitempty"` + Terrain map[string]*Nested `protobuf:"bytes,10,rep,name=terrain" json:"terrain,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + Proto2Field *testdata.SubDefaults `protobuf:"bytes,11,opt,name=proto2_field,json=proto2Field" json:"proto2_field,omitempty"` + Proto2Value map[string]*testdata.SubDefaults `protobuf:"bytes,13,rep,name=proto2_value,json=proto2Value" json:"proto2_value,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + Anything *google_protobuf.Any `protobuf:"bytes,14,opt,name=anything" json:"anything,omitempty"` + ManyThings []*google_protobuf.Any `protobuf:"bytes,15,rep,name=many_things,json=manyThings" json:"many_things,omitempty"` +} + +func (m *Message) Reset() { *m = Message{} } +func (m *Message) String() string { return proto.CompactTextString(m) } +func (*Message) ProtoMessage() {} +func (*Message) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *Message) GetNested() *Nested { + if m != nil { + return m.Nested + } + return nil +} + +func (m *Message) GetTerrain() map[string]*Nested { + if m != nil { + return m.Terrain + } + return nil +} + +func (m *Message) GetProto2Field() *testdata.SubDefaults { + if m != nil { + return m.Proto2Field + } + return nil +} + +func (m *Message) GetProto2Value() map[string]*testdata.SubDefaults { + if m != nil { + return m.Proto2Value + } + return nil +} + +func (m *Message) GetAnything() *google_protobuf.Any { + if m != nil { + return m.Anything + } + return nil +} + +func (m *Message) GetManyThings() []*google_protobuf.Any { + if m != nil { + return m.ManyThings + } + return nil +} + +type Nested struct { + Bunny string `protobuf:"bytes,1,opt,name=bunny" json:"bunny,omitempty"` + Cute bool `protobuf:"varint,2,opt,name=cute" json:"cute,omitempty"` +} + +func (m *Nested) Reset() { *m = Nested{} } +func (m *Nested) String() string { return proto.CompactTextString(m) } +func (*Nested) ProtoMessage() {} +func (*Nested) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +type MessageWithMap struct { + ByteMapping map[bool][]byte `protobuf:"bytes,1,rep,name=byte_mapping,json=byteMapping" json:"byte_mapping,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (m *MessageWithMap) Reset() { *m = MessageWithMap{} } +func (m *MessageWithMap) String() string { return proto.CompactTextString(m) } +func (*MessageWithMap) ProtoMessage() {} +func (*MessageWithMap) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +func (m *MessageWithMap) GetByteMapping() map[bool][]byte { + if m != nil { + return m.ByteMapping + } + return nil +} + +func init() { + proto.RegisterType((*Message)(nil), "proto3_proto.Message") + proto.RegisterType((*Nested)(nil), "proto3_proto.Nested") + proto.RegisterType((*MessageWithMap)(nil), "proto3_proto.MessageWithMap") + proto.RegisterEnum("proto3_proto.Message_Humour", Message_Humour_name, Message_Humour_value) +} + +var fileDescriptor0 = []byte{ + // 621 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x84, 0x53, 0x5b, 0x6f, 0xd3, 0x4c, + 0x10, 0xfd, 0x72, 0xa9, 0xe3, 0x8e, 0x9d, 0xd6, 0xda, 0xaf, 0x48, 0xdb, 0x88, 0x87, 0x12, 0x24, + 0x54, 0x71, 0x71, 0x51, 0x10, 0x52, 0x85, 0x10, 0xa8, 0x2d, 0xad, 0x88, 0x9a, 0x86, 0x68, 0xd3, + 0x52, 0xf1, 0x64, 0xad, 0xd3, 0x4d, 0x62, 0x11, 0xaf, 0x23, 0x7b, 0x8d, 0xe4, 0xbf, 0xc3, 0xaf, + 0xe4, 0x91, 0xbd, 0x38, 0xad, 0x5b, 0x05, 0x78, 0xf2, 0xee, 0xcc, 0x39, 0x33, 0xb3, 0xe7, 0x8c, + 0x61, 0x77, 0x99, 0x26, 0x22, 0x79, 0x13, 0xe8, 0xcf, 0x81, 0xb9, 0xf8, 0xfa, 0x83, 0xdc, 0x6a, + 0xaa, 0xb3, 0x3b, 0x4b, 0x92, 0xd9, 0x82, 0x19, 0x48, 0x98, 0x4f, 0x0f, 0x28, 0x2f, 0x0c, 0xb0, + 0xf3, 0xbf, 0x60, 0x99, 0xb8, 0xa1, 0x82, 0x1e, 0xa8, 0x83, 0x09, 0x76, 0x7f, 0x59, 0xd0, 0xba, + 0x60, 0x59, 0x46, 0x67, 0x0c, 0x21, 0x68, 0x72, 0x1a, 0x33, 0x5c, 0xdb, 0xab, 0xed, 0x6f, 0x12, + 0x7d, 0x46, 0x87, 0x60, 0xcf, 0xa3, 0x05, 0x4d, 0x23, 0x51, 0xe0, 0xba, 0x8c, 0x6f, 0xf5, 0x1e, + 0xfb, 0xd5, 0x86, 0x7e, 0x49, 0xf6, 0x3f, 0xe7, 0x71, 0x92, 0xa7, 0xe4, 0x16, 0x8d, 0xf6, 0xc0, + 0x9d, 0xb3, 0x68, 0x36, 0x17, 0x41, 0xc4, 0x83, 0x49, 0x8c, 0x1b, 0x92, 0xdd, 0x26, 0x60, 0x62, + 0x7d, 0x7e, 0x12, 0xab, 0x7e, 0x6a, 0x1c, 0xdc, 0x94, 0x19, 0x97, 0xe8, 0x33, 0x7a, 0x02, 0x6e, + 0xca, 0xb2, 0x7c, 0x21, 0x82, 0x49, 0x92, 0x73, 0x81, 0x5b, 0x32, 0xd7, 0x20, 0x8e, 0x89, 0x9d, + 0xa8, 0x10, 0x7a, 0x0a, 0x6d, 0x91, 0xe6, 0x2c, 0xc8, 0x26, 0x89, 0xc8, 0x62, 0xca, 0xb1, 0x2d, + 0x31, 0x36, 0x71, 0x55, 0x70, 0x5c, 0xc6, 0xd0, 0x0e, 0x6c, 0xc8, 0x7c, 0xca, 0xf0, 0xa6, 0x4c, + 0xd6, 0x89, 0xb9, 0x20, 0x0f, 0x1a, 0xdf, 0x59, 0x81, 0x37, 0xf6, 0x1a, 0xfb, 0x4d, 0xa2, 0x8e, + 0xe8, 0x25, 0x58, 0x5c, 0xaa, 0xc1, 0x6e, 0xb0, 0x25, 0x81, 0x4e, 0x6f, 0xe7, 0xfe, 0xeb, 0x86, + 0x3a, 0x47, 0x4a, 0x0c, 0x7a, 0x0b, 0xad, 0x34, 0x98, 0xe6, 0x9c, 0x17, 0xd8, 0x93, 0x35, 0xfe, + 0x25, 0x86, 0x95, 0x9e, 0x29, 0x2c, 0x7a, 0x0f, 0x2d, 0xc1, 0xd2, 0x94, 0x46, 0x1c, 0x83, 0xa4, + 0x39, 0xbd, 0xee, 0x7a, 0xda, 0xa5, 0x01, 0x9d, 0x72, 0x91, 0x16, 0x64, 0x45, 0x91, 0x16, 0x18, + 0x8b, 0x7b, 0xc1, 0x34, 0x62, 0x8b, 0x1b, 0xec, 0xe8, 0x41, 0x1f, 0xf9, 0x2b, 0x3b, 0xfd, 0x71, + 0x1e, 0x7e, 0x62, 0x53, 0x2a, 0x05, 0xca, 0x88, 0x63, 0xa0, 0x67, 0x0a, 0x89, 0xfa, 0xb7, 0xcc, + 0x1f, 0x74, 0x91, 0x33, 0xdc, 0xd6, 0xcd, 0x9f, 0xad, 0x6f, 0x3e, 0xd2, 0xc8, 0xaf, 0x0a, 0x68, + 0x06, 0x28, 0x4b, 0xe9, 0x08, 0x7a, 0x0d, 0xb6, 0xdc, 0x24, 0x31, 0x8f, 0xf8, 0x0c, 0x6f, 0x95, + 0x4a, 0x99, 0x55, 0xf3, 0x57, 0xab, 0xe6, 0x1f, 0xf1, 0x82, 0xdc, 0xa2, 0xa4, 0x56, 0x8e, 0x34, + 0xa2, 0x08, 0xf4, 0x2d, 0xc3, 0xdb, 0xba, 0xf7, 0x7a, 0x12, 0x28, 0xe0, 0xa5, 0xc6, 0x75, 0x46, + 0xe0, 0x56, 0x65, 0x58, 0x59, 0x66, 0x76, 0x52, 0x5b, 0xf6, 0x1c, 0x36, 0xcc, 0x73, 0xea, 0x7f, + 0x71, 0xcc, 0x40, 0xde, 0xd5, 0x0f, 0x6b, 0x9d, 0x2b, 0xf0, 0x1e, 0xbe, 0x6d, 0x4d, 0xd5, 0x17, + 0xf7, 0xab, 0xfe, 0x41, 0xde, 0xbb, 0xb2, 0xdd, 0x8f, 0x60, 0x19, 0x9b, 0x91, 0x03, 0xad, 0xab, + 0xe1, 0xf9, 0xf0, 0xcb, 0xf5, 0xd0, 0xfb, 0x0f, 0xd9, 0xd0, 0x1c, 0x5d, 0x0d, 0xc7, 0x5e, 0x0d, + 0xb5, 0x61, 0x73, 0x3c, 0x38, 0x1a, 0x8d, 0x2f, 0xfb, 0x27, 0xe7, 0x5e, 0x1d, 0x6d, 0x83, 0x73, + 0xdc, 0x1f, 0x0c, 0x82, 0xe3, 0xa3, 0xfe, 0xe0, 0xf4, 0x9b, 0xd7, 0xe8, 0xf6, 0xc0, 0x32, 0xc3, + 0xaa, 0x65, 0x0d, 0xf5, 0x52, 0x99, 0x79, 0xcc, 0x45, 0xfd, 0x1e, 0x93, 0x5c, 0x98, 0x81, 0x6c, + 0xa2, 0xcf, 0xdd, 0x9f, 0x35, 0xd8, 0x2a, 0x0d, 0xbb, 0x8e, 0xc4, 0xfc, 0x82, 0x2e, 0x91, 0x14, + 0x2c, 0x2c, 0x04, 0x0b, 0x62, 0xba, 0x5c, 0x2a, 0x77, 0x6a, 0x5a, 0xe8, 0x57, 0x6b, 0x4d, 0x2e, + 0x39, 0xfe, 0xb1, 0x24, 0x5c, 0x18, 0x7c, 0xe9, 0x75, 0x78, 0x17, 0xe9, 0x7c, 0x00, 0xef, 0x21, + 0xa0, 0x2a, 0x98, 0x6d, 0x04, 0xdb, 0xa9, 0x0a, 0xe6, 0x56, 0x94, 0x09, 0x2d, 0xd3, 0xfa, 0x77, + 0x00, 0x00, 0x00, 0xff, 0xff, 0x78, 0xc9, 0x75, 0x36, 0xb5, 0x04, 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go new file mode 100644 index 000000000..2e06cb3a9 --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go @@ -0,0 +1,2076 @@ +// Code generated by protoc-gen-go. +// source: google/protobuf/descriptor.proto +// DO NOT EDIT! + +/* +Package descriptor is a generated protocol buffer package. + +It is generated from these files: + google/protobuf/descriptor.proto + +It has these top-level messages: + FileDescriptorSet + FileDescriptorProto + DescriptorProto + FieldDescriptorProto + OneofDescriptorProto + EnumDescriptorProto + EnumValueDescriptorProto + ServiceDescriptorProto + MethodDescriptorProto + FileOptions + MessageOptions + FieldOptions + OneofOptions + EnumOptions + EnumValueOptions + ServiceOptions + MethodOptions + UninterpretedOption + SourceCodeInfo + GeneratedCodeInfo +*/ +package descriptor + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type FieldDescriptorProto_Type int32 + +const ( + // 0 is reserved for errors. + // Order is weird for historical reasons. + FieldDescriptorProto_TYPE_DOUBLE FieldDescriptorProto_Type = 1 + FieldDescriptorProto_TYPE_FLOAT FieldDescriptorProto_Type = 2 + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if + // negative values are likely. + FieldDescriptorProto_TYPE_INT64 FieldDescriptorProto_Type = 3 + FieldDescriptorProto_TYPE_UINT64 FieldDescriptorProto_Type = 4 + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if + // negative values are likely. + FieldDescriptorProto_TYPE_INT32 FieldDescriptorProto_Type = 5 + FieldDescriptorProto_TYPE_FIXED64 FieldDescriptorProto_Type = 6 + FieldDescriptorProto_TYPE_FIXED32 FieldDescriptorProto_Type = 7 + FieldDescriptorProto_TYPE_BOOL FieldDescriptorProto_Type = 8 + FieldDescriptorProto_TYPE_STRING FieldDescriptorProto_Type = 9 + FieldDescriptorProto_TYPE_GROUP FieldDescriptorProto_Type = 10 + FieldDescriptorProto_TYPE_MESSAGE FieldDescriptorProto_Type = 11 + // New in version 2. + FieldDescriptorProto_TYPE_BYTES FieldDescriptorProto_Type = 12 + FieldDescriptorProto_TYPE_UINT32 FieldDescriptorProto_Type = 13 + FieldDescriptorProto_TYPE_ENUM FieldDescriptorProto_Type = 14 + FieldDescriptorProto_TYPE_SFIXED32 FieldDescriptorProto_Type = 15 + FieldDescriptorProto_TYPE_SFIXED64 FieldDescriptorProto_Type = 16 + FieldDescriptorProto_TYPE_SINT32 FieldDescriptorProto_Type = 17 + FieldDescriptorProto_TYPE_SINT64 FieldDescriptorProto_Type = 18 +) + +var FieldDescriptorProto_Type_name = map[int32]string{ + 1: "TYPE_DOUBLE", + 2: "TYPE_FLOAT", + 3: "TYPE_INT64", + 4: "TYPE_UINT64", + 5: "TYPE_INT32", + 6: "TYPE_FIXED64", + 7: "TYPE_FIXED32", + 8: "TYPE_BOOL", + 9: "TYPE_STRING", + 10: "TYPE_GROUP", + 11: "TYPE_MESSAGE", + 12: "TYPE_BYTES", + 13: "TYPE_UINT32", + 14: "TYPE_ENUM", + 15: "TYPE_SFIXED32", + 16: "TYPE_SFIXED64", + 17: "TYPE_SINT32", + 18: "TYPE_SINT64", +} +var FieldDescriptorProto_Type_value = map[string]int32{ + "TYPE_DOUBLE": 1, + "TYPE_FLOAT": 2, + "TYPE_INT64": 3, + "TYPE_UINT64": 4, + "TYPE_INT32": 5, + "TYPE_FIXED64": 6, + "TYPE_FIXED32": 7, + "TYPE_BOOL": 8, + "TYPE_STRING": 9, + "TYPE_GROUP": 10, + "TYPE_MESSAGE": 11, + "TYPE_BYTES": 12, + "TYPE_UINT32": 13, + "TYPE_ENUM": 14, + "TYPE_SFIXED32": 15, + "TYPE_SFIXED64": 16, + "TYPE_SINT32": 17, + "TYPE_SINT64": 18, +} + +func (x FieldDescriptorProto_Type) Enum() *FieldDescriptorProto_Type { + p := new(FieldDescriptorProto_Type) + *p = x + return p +} +func (x FieldDescriptorProto_Type) String() string { + return proto.EnumName(FieldDescriptorProto_Type_name, int32(x)) +} +func (x *FieldDescriptorProto_Type) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Type_value, data, "FieldDescriptorProto_Type") + if err != nil { + return err + } + *x = FieldDescriptorProto_Type(value) + return nil +} +func (FieldDescriptorProto_Type) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{3, 0} } + +type FieldDescriptorProto_Label int32 + +const ( + // 0 is reserved for errors + FieldDescriptorProto_LABEL_OPTIONAL FieldDescriptorProto_Label = 1 + FieldDescriptorProto_LABEL_REQUIRED FieldDescriptorProto_Label = 2 + FieldDescriptorProto_LABEL_REPEATED FieldDescriptorProto_Label = 3 +) + +var FieldDescriptorProto_Label_name = map[int32]string{ + 1: "LABEL_OPTIONAL", + 2: "LABEL_REQUIRED", + 3: "LABEL_REPEATED", +} +var FieldDescriptorProto_Label_value = map[string]int32{ + "LABEL_OPTIONAL": 1, + "LABEL_REQUIRED": 2, + "LABEL_REPEATED": 3, +} + +func (x FieldDescriptorProto_Label) Enum() *FieldDescriptorProto_Label { + p := new(FieldDescriptorProto_Label) + *p = x + return p +} +func (x FieldDescriptorProto_Label) String() string { + return proto.EnumName(FieldDescriptorProto_Label_name, int32(x)) +} +func (x *FieldDescriptorProto_Label) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Label_value, data, "FieldDescriptorProto_Label") + if err != nil { + return err + } + *x = FieldDescriptorProto_Label(value) + return nil +} +func (FieldDescriptorProto_Label) EnumDescriptor() ([]byte, []int) { + return fileDescriptor0, []int{3, 1} +} + +// Generated classes can be optimized for speed or code size. +type FileOptions_OptimizeMode int32 + +const ( + FileOptions_SPEED FileOptions_OptimizeMode = 1 + // etc. + FileOptions_CODE_SIZE FileOptions_OptimizeMode = 2 + FileOptions_LITE_RUNTIME FileOptions_OptimizeMode = 3 +) + +var FileOptions_OptimizeMode_name = map[int32]string{ + 1: "SPEED", + 2: "CODE_SIZE", + 3: "LITE_RUNTIME", +} +var FileOptions_OptimizeMode_value = map[string]int32{ + "SPEED": 1, + "CODE_SIZE": 2, + "LITE_RUNTIME": 3, +} + +func (x FileOptions_OptimizeMode) Enum() *FileOptions_OptimizeMode { + p := new(FileOptions_OptimizeMode) + *p = x + return p +} +func (x FileOptions_OptimizeMode) String() string { + return proto.EnumName(FileOptions_OptimizeMode_name, int32(x)) +} +func (x *FileOptions_OptimizeMode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FileOptions_OptimizeMode_value, data, "FileOptions_OptimizeMode") + if err != nil { + return err + } + *x = FileOptions_OptimizeMode(value) + return nil +} +func (FileOptions_OptimizeMode) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{9, 0} } + +type FieldOptions_CType int32 + +const ( + // Default mode. + FieldOptions_STRING FieldOptions_CType = 0 + FieldOptions_CORD FieldOptions_CType = 1 + FieldOptions_STRING_PIECE FieldOptions_CType = 2 +) + +var FieldOptions_CType_name = map[int32]string{ + 0: "STRING", + 1: "CORD", + 2: "STRING_PIECE", +} +var FieldOptions_CType_value = map[string]int32{ + "STRING": 0, + "CORD": 1, + "STRING_PIECE": 2, +} + +func (x FieldOptions_CType) Enum() *FieldOptions_CType { + p := new(FieldOptions_CType) + *p = x + return p +} +func (x FieldOptions_CType) String() string { + return proto.EnumName(FieldOptions_CType_name, int32(x)) +} +func (x *FieldOptions_CType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldOptions_CType_value, data, "FieldOptions_CType") + if err != nil { + return err + } + *x = FieldOptions_CType(value) + return nil +} +func (FieldOptions_CType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{11, 0} } + +type FieldOptions_JSType int32 + +const ( + // Use the default type. + FieldOptions_JS_NORMAL FieldOptions_JSType = 0 + // Use JavaScript strings. + FieldOptions_JS_STRING FieldOptions_JSType = 1 + // Use JavaScript numbers. + FieldOptions_JS_NUMBER FieldOptions_JSType = 2 +) + +var FieldOptions_JSType_name = map[int32]string{ + 0: "JS_NORMAL", + 1: "JS_STRING", + 2: "JS_NUMBER", +} +var FieldOptions_JSType_value = map[string]int32{ + "JS_NORMAL": 0, + "JS_STRING": 1, + "JS_NUMBER": 2, +} + +func (x FieldOptions_JSType) Enum() *FieldOptions_JSType { + p := new(FieldOptions_JSType) + *p = x + return p +} +func (x FieldOptions_JSType) String() string { + return proto.EnumName(FieldOptions_JSType_name, int32(x)) +} +func (x *FieldOptions_JSType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldOptions_JSType_value, data, "FieldOptions_JSType") + if err != nil { + return err + } + *x = FieldOptions_JSType(value) + return nil +} +func (FieldOptions_JSType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{11, 1} } + +// The protocol compiler can output a FileDescriptorSet containing the .proto +// files it parses. +type FileDescriptorSet struct { + File []*FileDescriptorProto `protobuf:"bytes,1,rep,name=file" json:"file,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *FileDescriptorSet) Reset() { *m = FileDescriptorSet{} } +func (m *FileDescriptorSet) String() string { return proto.CompactTextString(m) } +func (*FileDescriptorSet) ProtoMessage() {} +func (*FileDescriptorSet) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *FileDescriptorSet) GetFile() []*FileDescriptorProto { + if m != nil { + return m.File + } + return nil +} + +// Describes a complete .proto file. +type FileDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Package *string `protobuf:"bytes,2,opt,name=package" json:"package,omitempty"` + // Names of files imported by this file. + Dependency []string `protobuf:"bytes,3,rep,name=dependency" json:"dependency,omitempty"` + // Indexes of the public imported files in the dependency list above. + PublicDependency []int32 `protobuf:"varint,10,rep,name=public_dependency,json=publicDependency" json:"public_dependency,omitempty"` + // Indexes of the weak imported files in the dependency list. + // For Google-internal migration only. Do not use. + WeakDependency []int32 `protobuf:"varint,11,rep,name=weak_dependency,json=weakDependency" json:"weak_dependency,omitempty"` + // All top-level definitions in this file. + MessageType []*DescriptorProto `protobuf:"bytes,4,rep,name=message_type,json=messageType" json:"message_type,omitempty"` + EnumType []*EnumDescriptorProto `protobuf:"bytes,5,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"` + Service []*ServiceDescriptorProto `protobuf:"bytes,6,rep,name=service" json:"service,omitempty"` + Extension []*FieldDescriptorProto `protobuf:"bytes,7,rep,name=extension" json:"extension,omitempty"` + Options *FileOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` + // This field contains optional information about the original source code. + // You may safely remove this entire field without harming runtime + // functionality of the descriptors -- the information is needed only by + // development tools. + SourceCodeInfo *SourceCodeInfo `protobuf:"bytes,9,opt,name=source_code_info,json=sourceCodeInfo" json:"source_code_info,omitempty"` + // The syntax of the proto file. + // The supported values are "proto2" and "proto3". + Syntax *string `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *FileDescriptorProto) Reset() { *m = FileDescriptorProto{} } +func (m *FileDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*FileDescriptorProto) ProtoMessage() {} +func (*FileDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *FileDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *FileDescriptorProto) GetPackage() string { + if m != nil && m.Package != nil { + return *m.Package + } + return "" +} + +func (m *FileDescriptorProto) GetDependency() []string { + if m != nil { + return m.Dependency + } + return nil +} + +func (m *FileDescriptorProto) GetPublicDependency() []int32 { + if m != nil { + return m.PublicDependency + } + return nil +} + +func (m *FileDescriptorProto) GetWeakDependency() []int32 { + if m != nil { + return m.WeakDependency + } + return nil +} + +func (m *FileDescriptorProto) GetMessageType() []*DescriptorProto { + if m != nil { + return m.MessageType + } + return nil +} + +func (m *FileDescriptorProto) GetEnumType() []*EnumDescriptorProto { + if m != nil { + return m.EnumType + } + return nil +} + +func (m *FileDescriptorProto) GetService() []*ServiceDescriptorProto { + if m != nil { + return m.Service + } + return nil +} + +func (m *FileDescriptorProto) GetExtension() []*FieldDescriptorProto { + if m != nil { + return m.Extension + } + return nil +} + +func (m *FileDescriptorProto) GetOptions() *FileOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *FileDescriptorProto) GetSourceCodeInfo() *SourceCodeInfo { + if m != nil { + return m.SourceCodeInfo + } + return nil +} + +func (m *FileDescriptorProto) GetSyntax() string { + if m != nil && m.Syntax != nil { + return *m.Syntax + } + return "" +} + +// Describes a message type. +type DescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Field []*FieldDescriptorProto `protobuf:"bytes,2,rep,name=field" json:"field,omitempty"` + Extension []*FieldDescriptorProto `protobuf:"bytes,6,rep,name=extension" json:"extension,omitempty"` + NestedType []*DescriptorProto `protobuf:"bytes,3,rep,name=nested_type,json=nestedType" json:"nested_type,omitempty"` + EnumType []*EnumDescriptorProto `protobuf:"bytes,4,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"` + ExtensionRange []*DescriptorProto_ExtensionRange `protobuf:"bytes,5,rep,name=extension_range,json=extensionRange" json:"extension_range,omitempty"` + OneofDecl []*OneofDescriptorProto `protobuf:"bytes,8,rep,name=oneof_decl,json=oneofDecl" json:"oneof_decl,omitempty"` + Options *MessageOptions `protobuf:"bytes,7,opt,name=options" json:"options,omitempty"` + ReservedRange []*DescriptorProto_ReservedRange `protobuf:"bytes,9,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` + // Reserved field names, which may not be used by fields in the same message. + // A given name may only be reserved once. + ReservedName []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *DescriptorProto) Reset() { *m = DescriptorProto{} } +func (m *DescriptorProto) String() string { return proto.CompactTextString(m) } +func (*DescriptorProto) ProtoMessage() {} +func (*DescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +func (m *DescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *DescriptorProto) GetField() []*FieldDescriptorProto { + if m != nil { + return m.Field + } + return nil +} + +func (m *DescriptorProto) GetExtension() []*FieldDescriptorProto { + if m != nil { + return m.Extension + } + return nil +} + +func (m *DescriptorProto) GetNestedType() []*DescriptorProto { + if m != nil { + return m.NestedType + } + return nil +} + +func (m *DescriptorProto) GetEnumType() []*EnumDescriptorProto { + if m != nil { + return m.EnumType + } + return nil +} + +func (m *DescriptorProto) GetExtensionRange() []*DescriptorProto_ExtensionRange { + if m != nil { + return m.ExtensionRange + } + return nil +} + +func (m *DescriptorProto) GetOneofDecl() []*OneofDescriptorProto { + if m != nil { + return m.OneofDecl + } + return nil +} + +func (m *DescriptorProto) GetOptions() *MessageOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *DescriptorProto) GetReservedRange() []*DescriptorProto_ReservedRange { + if m != nil { + return m.ReservedRange + } + return nil +} + +func (m *DescriptorProto) GetReservedName() []string { + if m != nil { + return m.ReservedName + } + return nil +} + +type DescriptorProto_ExtensionRange struct { + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *DescriptorProto_ExtensionRange) Reset() { *m = DescriptorProto_ExtensionRange{} } +func (m *DescriptorProto_ExtensionRange) String() string { return proto.CompactTextString(m) } +func (*DescriptorProto_ExtensionRange) ProtoMessage() {} +func (*DescriptorProto_ExtensionRange) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{2, 0} +} + +func (m *DescriptorProto_ExtensionRange) GetStart() int32 { + if m != nil && m.Start != nil { + return *m.Start + } + return 0 +} + +func (m *DescriptorProto_ExtensionRange) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +// Range of reserved tag numbers. Reserved tag numbers may not be used by +// fields or extension ranges in the same message. Reserved ranges may +// not overlap. +type DescriptorProto_ReservedRange struct { + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *DescriptorProto_ReservedRange) Reset() { *m = DescriptorProto_ReservedRange{} } +func (m *DescriptorProto_ReservedRange) String() string { return proto.CompactTextString(m) } +func (*DescriptorProto_ReservedRange) ProtoMessage() {} +func (*DescriptorProto_ReservedRange) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{2, 1} +} + +func (m *DescriptorProto_ReservedRange) GetStart() int32 { + if m != nil && m.Start != nil { + return *m.Start + } + return 0 +} + +func (m *DescriptorProto_ReservedRange) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +// Describes a field within a message. +type FieldDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Number *int32 `protobuf:"varint,3,opt,name=number" json:"number,omitempty"` + Label *FieldDescriptorProto_Label `protobuf:"varint,4,opt,name=label,enum=google.protobuf.FieldDescriptorProto_Label" json:"label,omitempty"` + // If type_name is set, this need not be set. If both this and type_name + // are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP. + Type *FieldDescriptorProto_Type `protobuf:"varint,5,opt,name=type,enum=google.protobuf.FieldDescriptorProto_Type" json:"type,omitempty"` + // For message and enum types, this is the name of the type. If the name + // starts with a '.', it is fully-qualified. Otherwise, C++-like scoping + // rules are used to find the type (i.e. first the nested types within this + // message are searched, then within the parent, on up to the root + // namespace). + TypeName *string `protobuf:"bytes,6,opt,name=type_name,json=typeName" json:"type_name,omitempty"` + // For extensions, this is the name of the type being extended. It is + // resolved in the same manner as type_name. + Extendee *string `protobuf:"bytes,2,opt,name=extendee" json:"extendee,omitempty"` + // For numeric types, contains the original text representation of the value. + // For booleans, "true" or "false". + // For strings, contains the default text contents (not escaped in any way). + // For bytes, contains the C escaped value. All bytes >= 128 are escaped. + // TODO(kenton): Base-64 encode? + DefaultValue *string `protobuf:"bytes,7,opt,name=default_value,json=defaultValue" json:"default_value,omitempty"` + // If set, gives the index of a oneof in the containing type's oneof_decl + // list. This field is a member of that oneof. + OneofIndex *int32 `protobuf:"varint,9,opt,name=oneof_index,json=oneofIndex" json:"oneof_index,omitempty"` + // JSON name of this field. The value is set by protocol compiler. If the + // user has set a "json_name" option on this field, that option's value + // will be used. Otherwise, it's deduced from the field's name by converting + // it to camelCase. + JsonName *string `protobuf:"bytes,10,opt,name=json_name,json=jsonName" json:"json_name,omitempty"` + Options *FieldOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *FieldDescriptorProto) Reset() { *m = FieldDescriptorProto{} } +func (m *FieldDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*FieldDescriptorProto) ProtoMessage() {} +func (*FieldDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *FieldDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *FieldDescriptorProto) GetNumber() int32 { + if m != nil && m.Number != nil { + return *m.Number + } + return 0 +} + +func (m *FieldDescriptorProto) GetLabel() FieldDescriptorProto_Label { + if m != nil && m.Label != nil { + return *m.Label + } + return FieldDescriptorProto_LABEL_OPTIONAL +} + +func (m *FieldDescriptorProto) GetType() FieldDescriptorProto_Type { + if m != nil && m.Type != nil { + return *m.Type + } + return FieldDescriptorProto_TYPE_DOUBLE +} + +func (m *FieldDescriptorProto) GetTypeName() string { + if m != nil && m.TypeName != nil { + return *m.TypeName + } + return "" +} + +func (m *FieldDescriptorProto) GetExtendee() string { + if m != nil && m.Extendee != nil { + return *m.Extendee + } + return "" +} + +func (m *FieldDescriptorProto) GetDefaultValue() string { + if m != nil && m.DefaultValue != nil { + return *m.DefaultValue + } + return "" +} + +func (m *FieldDescriptorProto) GetOneofIndex() int32 { + if m != nil && m.OneofIndex != nil { + return *m.OneofIndex + } + return 0 +} + +func (m *FieldDescriptorProto) GetJsonName() string { + if m != nil && m.JsonName != nil { + return *m.JsonName + } + return "" +} + +func (m *FieldDescriptorProto) GetOptions() *FieldOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes a oneof. +type OneofDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Options *OneofOptions `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *OneofDescriptorProto) Reset() { *m = OneofDescriptorProto{} } +func (m *OneofDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*OneofDescriptorProto) ProtoMessage() {} +func (*OneofDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +func (m *OneofDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *OneofDescriptorProto) GetOptions() *OneofOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes an enum type. +type EnumDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Value []*EnumValueDescriptorProto `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"` + Options *EnumOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *EnumDescriptorProto) Reset() { *m = EnumDescriptorProto{} } +func (m *EnumDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*EnumDescriptorProto) ProtoMessage() {} +func (*EnumDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } + +func (m *EnumDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *EnumDescriptorProto) GetValue() []*EnumValueDescriptorProto { + if m != nil { + return m.Value + } + return nil +} + +func (m *EnumDescriptorProto) GetOptions() *EnumOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes a value within an enum. +type EnumValueDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Number *int32 `protobuf:"varint,2,opt,name=number" json:"number,omitempty"` + Options *EnumValueOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *EnumValueDescriptorProto) Reset() { *m = EnumValueDescriptorProto{} } +func (m *EnumValueDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*EnumValueDescriptorProto) ProtoMessage() {} +func (*EnumValueDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } + +func (m *EnumValueDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *EnumValueDescriptorProto) GetNumber() int32 { + if m != nil && m.Number != nil { + return *m.Number + } + return 0 +} + +func (m *EnumValueDescriptorProto) GetOptions() *EnumValueOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes a service. +type ServiceDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Method []*MethodDescriptorProto `protobuf:"bytes,2,rep,name=method" json:"method,omitempty"` + Options *ServiceOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ServiceDescriptorProto) Reset() { *m = ServiceDescriptorProto{} } +func (m *ServiceDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*ServiceDescriptorProto) ProtoMessage() {} +func (*ServiceDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } + +func (m *ServiceDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *ServiceDescriptorProto) GetMethod() []*MethodDescriptorProto { + if m != nil { + return m.Method + } + return nil +} + +func (m *ServiceDescriptorProto) GetOptions() *ServiceOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes a method of a service. +type MethodDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Input and output type names. These are resolved in the same way as + // FieldDescriptorProto.type_name, but must refer to a message type. + InputType *string `protobuf:"bytes,2,opt,name=input_type,json=inputType" json:"input_type,omitempty"` + OutputType *string `protobuf:"bytes,3,opt,name=output_type,json=outputType" json:"output_type,omitempty"` + Options *MethodOptions `protobuf:"bytes,4,opt,name=options" json:"options,omitempty"` + // Identifies if client streams multiple client messages + ClientStreaming *bool `protobuf:"varint,5,opt,name=client_streaming,json=clientStreaming,def=0" json:"client_streaming,omitempty"` + // Identifies if server streams multiple server messages + ServerStreaming *bool `protobuf:"varint,6,opt,name=server_streaming,json=serverStreaming,def=0" json:"server_streaming,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MethodDescriptorProto) Reset() { *m = MethodDescriptorProto{} } +func (m *MethodDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*MethodDescriptorProto) ProtoMessage() {} +func (*MethodDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } + +const Default_MethodDescriptorProto_ClientStreaming bool = false +const Default_MethodDescriptorProto_ServerStreaming bool = false + +func (m *MethodDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *MethodDescriptorProto) GetInputType() string { + if m != nil && m.InputType != nil { + return *m.InputType + } + return "" +} + +func (m *MethodDescriptorProto) GetOutputType() string { + if m != nil && m.OutputType != nil { + return *m.OutputType + } + return "" +} + +func (m *MethodDescriptorProto) GetOptions() *MethodOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *MethodDescriptorProto) GetClientStreaming() bool { + if m != nil && m.ClientStreaming != nil { + return *m.ClientStreaming + } + return Default_MethodDescriptorProto_ClientStreaming +} + +func (m *MethodDescriptorProto) GetServerStreaming() bool { + if m != nil && m.ServerStreaming != nil { + return *m.ServerStreaming + } + return Default_MethodDescriptorProto_ServerStreaming +} + +type FileOptions struct { + // Sets the Java package where classes generated from this .proto will be + // placed. By default, the proto package is used, but this is often + // inappropriate because proto packages do not normally start with backwards + // domain names. + JavaPackage *string `protobuf:"bytes,1,opt,name=java_package,json=javaPackage" json:"java_package,omitempty"` + // If set, all the classes from the .proto file are wrapped in a single + // outer class with the given name. This applies to both Proto1 + // (equivalent to the old "--one_java_file" option) and Proto2 (where + // a .proto always translates to a single class, but you may want to + // explicitly choose the class name). + JavaOuterClassname *string `protobuf:"bytes,8,opt,name=java_outer_classname,json=javaOuterClassname" json:"java_outer_classname,omitempty"` + // If set true, then the Java code generator will generate a separate .java + // file for each top-level message, enum, and service defined in the .proto + // file. Thus, these types will *not* be nested inside the outer class + // named by java_outer_classname. However, the outer class will still be + // generated to contain the file's getDescriptor() method as well as any + // top-level extensions defined in the file. + JavaMultipleFiles *bool `protobuf:"varint,10,opt,name=java_multiple_files,json=javaMultipleFiles,def=0" json:"java_multiple_files,omitempty"` + // If set true, then the Java code generator will generate equals() and + // hashCode() methods for all messages defined in the .proto file. + // This increases generated code size, potentially substantially for large + // protos, which may harm a memory-constrained application. + // - In the full runtime this is a speed optimization, as the + // AbstractMessage base class includes reflection-based implementations of + // these methods. + // - In the lite runtime, setting this option changes the semantics of + // equals() and hashCode() to more closely match those of the full runtime; + // the generated methods compute their results based on field values rather + // than object identity. (Implementations should not assume that hashcodes + // will be consistent across runtimes or versions of the protocol compiler.) + JavaGenerateEqualsAndHash *bool `protobuf:"varint,20,opt,name=java_generate_equals_and_hash,json=javaGenerateEqualsAndHash,def=0" json:"java_generate_equals_and_hash,omitempty"` + // If set true, then the Java2 code generator will generate code that + // throws an exception whenever an attempt is made to assign a non-UTF-8 + // byte sequence to a string field. + // Message reflection will do the same. + // However, an extension field still accepts non-UTF-8 byte sequences. + // This option has no effect on when used with the lite runtime. + JavaStringCheckUtf8 *bool `protobuf:"varint,27,opt,name=java_string_check_utf8,json=javaStringCheckUtf8,def=0" json:"java_string_check_utf8,omitempty"` + OptimizeFor *FileOptions_OptimizeMode `protobuf:"varint,9,opt,name=optimize_for,json=optimizeFor,enum=google.protobuf.FileOptions_OptimizeMode,def=1" json:"optimize_for,omitempty"` + // Sets the Go package where structs generated from this .proto will be + // placed. If omitted, the Go package will be derived from the following: + // - The basename of the package import path, if provided. + // - Otherwise, the package statement in the .proto file, if present. + // - Otherwise, the basename of the .proto file, without extension. + GoPackage *string `protobuf:"bytes,11,opt,name=go_package,json=goPackage" json:"go_package,omitempty"` + // Should generic services be generated in each language? "Generic" services + // are not specific to any particular RPC system. They are generated by the + // main code generators in each language (without additional plugins). + // Generic services were the only kind of service generation supported by + // early versions of google.protobuf. + // + // Generic services are now considered deprecated in favor of using plugins + // that generate code specific to your particular RPC system. Therefore, + // these default to false. Old code which depends on generic services should + // explicitly set them to true. + CcGenericServices *bool `protobuf:"varint,16,opt,name=cc_generic_services,json=ccGenericServices,def=0" json:"cc_generic_services,omitempty"` + JavaGenericServices *bool `protobuf:"varint,17,opt,name=java_generic_services,json=javaGenericServices,def=0" json:"java_generic_services,omitempty"` + PyGenericServices *bool `protobuf:"varint,18,opt,name=py_generic_services,json=pyGenericServices,def=0" json:"py_generic_services,omitempty"` + // Is this file deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for everything in the file, or it will be completely ignored; in the very + // least, this is a formalization for deprecating files. + Deprecated *bool `protobuf:"varint,23,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // Enables the use of arenas for the proto messages in this file. This applies + // only to generated classes for C++. + CcEnableArenas *bool `protobuf:"varint,31,opt,name=cc_enable_arenas,json=ccEnableArenas,def=0" json:"cc_enable_arenas,omitempty"` + // Sets the objective c class prefix which is prepended to all objective c + // generated classes from this .proto. There is no default. + ObjcClassPrefix *string `protobuf:"bytes,36,opt,name=objc_class_prefix,json=objcClassPrefix" json:"objc_class_prefix,omitempty"` + // Namespace for generated classes; defaults to the package. + CsharpNamespace *string `protobuf:"bytes,37,opt,name=csharp_namespace,json=csharpNamespace" json:"csharp_namespace,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *FileOptions) Reset() { *m = FileOptions{} } +func (m *FileOptions) String() string { return proto.CompactTextString(m) } +func (*FileOptions) ProtoMessage() {} +func (*FileOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } + +var extRange_FileOptions = []proto.ExtensionRange{ + {1000, 536870911}, +} + +func (*FileOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_FileOptions +} + +const Default_FileOptions_JavaMultipleFiles bool = false +const Default_FileOptions_JavaGenerateEqualsAndHash bool = false +const Default_FileOptions_JavaStringCheckUtf8 bool = false +const Default_FileOptions_OptimizeFor FileOptions_OptimizeMode = FileOptions_SPEED +const Default_FileOptions_CcGenericServices bool = false +const Default_FileOptions_JavaGenericServices bool = false +const Default_FileOptions_PyGenericServices bool = false +const Default_FileOptions_Deprecated bool = false +const Default_FileOptions_CcEnableArenas bool = false + +func (m *FileOptions) GetJavaPackage() string { + if m != nil && m.JavaPackage != nil { + return *m.JavaPackage + } + return "" +} + +func (m *FileOptions) GetJavaOuterClassname() string { + if m != nil && m.JavaOuterClassname != nil { + return *m.JavaOuterClassname + } + return "" +} + +func (m *FileOptions) GetJavaMultipleFiles() bool { + if m != nil && m.JavaMultipleFiles != nil { + return *m.JavaMultipleFiles + } + return Default_FileOptions_JavaMultipleFiles +} + +func (m *FileOptions) GetJavaGenerateEqualsAndHash() bool { + if m != nil && m.JavaGenerateEqualsAndHash != nil { + return *m.JavaGenerateEqualsAndHash + } + return Default_FileOptions_JavaGenerateEqualsAndHash +} + +func (m *FileOptions) GetJavaStringCheckUtf8() bool { + if m != nil && m.JavaStringCheckUtf8 != nil { + return *m.JavaStringCheckUtf8 + } + return Default_FileOptions_JavaStringCheckUtf8 +} + +func (m *FileOptions) GetOptimizeFor() FileOptions_OptimizeMode { + if m != nil && m.OptimizeFor != nil { + return *m.OptimizeFor + } + return Default_FileOptions_OptimizeFor +} + +func (m *FileOptions) GetGoPackage() string { + if m != nil && m.GoPackage != nil { + return *m.GoPackage + } + return "" +} + +func (m *FileOptions) GetCcGenericServices() bool { + if m != nil && m.CcGenericServices != nil { + return *m.CcGenericServices + } + return Default_FileOptions_CcGenericServices +} + +func (m *FileOptions) GetJavaGenericServices() bool { + if m != nil && m.JavaGenericServices != nil { + return *m.JavaGenericServices + } + return Default_FileOptions_JavaGenericServices +} + +func (m *FileOptions) GetPyGenericServices() bool { + if m != nil && m.PyGenericServices != nil { + return *m.PyGenericServices + } + return Default_FileOptions_PyGenericServices +} + +func (m *FileOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_FileOptions_Deprecated +} + +func (m *FileOptions) GetCcEnableArenas() bool { + if m != nil && m.CcEnableArenas != nil { + return *m.CcEnableArenas + } + return Default_FileOptions_CcEnableArenas +} + +func (m *FileOptions) GetObjcClassPrefix() string { + if m != nil && m.ObjcClassPrefix != nil { + return *m.ObjcClassPrefix + } + return "" +} + +func (m *FileOptions) GetCsharpNamespace() string { + if m != nil && m.CsharpNamespace != nil { + return *m.CsharpNamespace + } + return "" +} + +func (m *FileOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type MessageOptions struct { + // Set true to use the old proto1 MessageSet wire format for extensions. + // This is provided for backwards-compatibility with the MessageSet wire + // format. You should not use this for any other reason: It's less + // efficient, has fewer features, and is more complicated. + // + // The message must be defined exactly as follows: + // message Foo { + // option message_set_wire_format = true; + // extensions 4 to max; + // } + // Note that the message cannot have any defined fields; MessageSets only + // have extensions. + // + // All extensions of your type must be singular messages; e.g. they cannot + // be int32s, enums, or repeated messages. + // + // Because this is an option, the above two restrictions are not enforced by + // the protocol compiler. + MessageSetWireFormat *bool `protobuf:"varint,1,opt,name=message_set_wire_format,json=messageSetWireFormat,def=0" json:"message_set_wire_format,omitempty"` + // Disables the generation of the standard "descriptor()" accessor, which can + // conflict with a field of the same name. This is meant to make migration + // from proto1 easier; new code should avoid fields named "descriptor". + NoStandardDescriptorAccessor *bool `protobuf:"varint,2,opt,name=no_standard_descriptor_accessor,json=noStandardDescriptorAccessor,def=0" json:"no_standard_descriptor_accessor,omitempty"` + // Is this message deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the message, or it will be completely ignored; in the very least, + // this is a formalization for deprecating messages. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // Whether the message is an automatically generated map entry type for the + // maps field. + // + // For maps fields: + // map map_field = 1; + // The parsed descriptor looks like: + // message MapFieldEntry { + // option map_entry = true; + // optional KeyType key = 1; + // optional ValueType value = 2; + // } + // repeated MapFieldEntry map_field = 1; + // + // Implementations may choose not to generate the map_entry=true message, but + // use a native map in the target language to hold the keys and values. + // The reflection APIs in such implementions still need to work as + // if the field is a repeated message field. + // + // NOTE: Do not set the option in .proto files. Always use the maps syntax + // instead. The option should only be implicitly set by the proto compiler + // parser. + MapEntry *bool `protobuf:"varint,7,opt,name=map_entry,json=mapEntry" json:"map_entry,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MessageOptions) Reset() { *m = MessageOptions{} } +func (m *MessageOptions) String() string { return proto.CompactTextString(m) } +func (*MessageOptions) ProtoMessage() {} +func (*MessageOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } + +var extRange_MessageOptions = []proto.ExtensionRange{ + {1000, 536870911}, +} + +func (*MessageOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_MessageOptions +} + +const Default_MessageOptions_MessageSetWireFormat bool = false +const Default_MessageOptions_NoStandardDescriptorAccessor bool = false +const Default_MessageOptions_Deprecated bool = false + +func (m *MessageOptions) GetMessageSetWireFormat() bool { + if m != nil && m.MessageSetWireFormat != nil { + return *m.MessageSetWireFormat + } + return Default_MessageOptions_MessageSetWireFormat +} + +func (m *MessageOptions) GetNoStandardDescriptorAccessor() bool { + if m != nil && m.NoStandardDescriptorAccessor != nil { + return *m.NoStandardDescriptorAccessor + } + return Default_MessageOptions_NoStandardDescriptorAccessor +} + +func (m *MessageOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_MessageOptions_Deprecated +} + +func (m *MessageOptions) GetMapEntry() bool { + if m != nil && m.MapEntry != nil { + return *m.MapEntry + } + return false +} + +func (m *MessageOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type FieldOptions struct { + // The ctype option instructs the C++ code generator to use a different + // representation of the field than it normally would. See the specific + // options below. This option is not yet implemented in the open source + // release -- sorry, we'll try to include it in a future version! + Ctype *FieldOptions_CType `protobuf:"varint,1,opt,name=ctype,enum=google.protobuf.FieldOptions_CType,def=0" json:"ctype,omitempty"` + // The packed option can be enabled for repeated primitive fields to enable + // a more efficient representation on the wire. Rather than repeatedly + // writing the tag and type for each element, the entire array is encoded as + // a single length-delimited blob. In proto3, only explicit setting it to + // false will avoid using packed encoding. + Packed *bool `protobuf:"varint,2,opt,name=packed" json:"packed,omitempty"` + // The jstype option determines the JavaScript type used for values of the + // field. The option is permitted only for 64 bit integral and fixed types + // (int64, uint64, sint64, fixed64, sfixed64). By default these types are + // represented as JavaScript strings. This avoids loss of precision that can + // happen when a large value is converted to a floating point JavaScript + // numbers. Specifying JS_NUMBER for the jstype causes the generated + // JavaScript code to use the JavaScript "number" type instead of strings. + // This option is an enum to permit additional types to be added, + // e.g. goog.math.Integer. + Jstype *FieldOptions_JSType `protobuf:"varint,6,opt,name=jstype,enum=google.protobuf.FieldOptions_JSType,def=0" json:"jstype,omitempty"` + // Should this field be parsed lazily? Lazy applies only to message-type + // fields. It means that when the outer message is initially parsed, the + // inner message's contents will not be parsed but instead stored in encoded + // form. The inner message will actually be parsed when it is first accessed. + // + // This is only a hint. Implementations are free to choose whether to use + // eager or lazy parsing regardless of the value of this option. However, + // setting this option true suggests that the protocol author believes that + // using lazy parsing on this field is worth the additional bookkeeping + // overhead typically needed to implement it. + // + // This option does not affect the public interface of any generated code; + // all method signatures remain the same. Furthermore, thread-safety of the + // interface is not affected by this option; const methods remain safe to + // call from multiple threads concurrently, while non-const methods continue + // to require exclusive access. + // + // + // Note that implementations may choose not to check required fields within + // a lazy sub-message. That is, calling IsInitialized() on the outher message + // may return true even if the inner message has missing required fields. + // This is necessary because otherwise the inner message would have to be + // parsed in order to perform the check, defeating the purpose of lazy + // parsing. An implementation which chooses not to check required fields + // must be consistent about it. That is, for any particular sub-message, the + // implementation must either *always* check its required fields, or *never* + // check its required fields, regardless of whether or not the message has + // been parsed. + Lazy *bool `protobuf:"varint,5,opt,name=lazy,def=0" json:"lazy,omitempty"` + // Is this field deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for accessors, or it will be completely ignored; in the very least, this + // is a formalization for deprecating fields. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // For Google-internal migration only. Do not use. + Weak *bool `protobuf:"varint,10,opt,name=weak,def=0" json:"weak,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *FieldOptions) Reset() { *m = FieldOptions{} } +func (m *FieldOptions) String() string { return proto.CompactTextString(m) } +func (*FieldOptions) ProtoMessage() {} +func (*FieldOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } + +var extRange_FieldOptions = []proto.ExtensionRange{ + {1000, 536870911}, +} + +func (*FieldOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_FieldOptions +} + +const Default_FieldOptions_Ctype FieldOptions_CType = FieldOptions_STRING +const Default_FieldOptions_Jstype FieldOptions_JSType = FieldOptions_JS_NORMAL +const Default_FieldOptions_Lazy bool = false +const Default_FieldOptions_Deprecated bool = false +const Default_FieldOptions_Weak bool = false + +func (m *FieldOptions) GetCtype() FieldOptions_CType { + if m != nil && m.Ctype != nil { + return *m.Ctype + } + return Default_FieldOptions_Ctype +} + +func (m *FieldOptions) GetPacked() bool { + if m != nil && m.Packed != nil { + return *m.Packed + } + return false +} + +func (m *FieldOptions) GetJstype() FieldOptions_JSType { + if m != nil && m.Jstype != nil { + return *m.Jstype + } + return Default_FieldOptions_Jstype +} + +func (m *FieldOptions) GetLazy() bool { + if m != nil && m.Lazy != nil { + return *m.Lazy + } + return Default_FieldOptions_Lazy +} + +func (m *FieldOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_FieldOptions_Deprecated +} + +func (m *FieldOptions) GetWeak() bool { + if m != nil && m.Weak != nil { + return *m.Weak + } + return Default_FieldOptions_Weak +} + +func (m *FieldOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type OneofOptions struct { + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *OneofOptions) Reset() { *m = OneofOptions{} } +func (m *OneofOptions) String() string { return proto.CompactTextString(m) } +func (*OneofOptions) ProtoMessage() {} +func (*OneofOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } + +var extRange_OneofOptions = []proto.ExtensionRange{ + {1000, 536870911}, +} + +func (*OneofOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_OneofOptions +} + +func (m *OneofOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type EnumOptions struct { + // Set this option to true to allow mapping different tag names to the same + // value. + AllowAlias *bool `protobuf:"varint,2,opt,name=allow_alias,json=allowAlias" json:"allow_alias,omitempty"` + // Is this enum deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum, or it will be completely ignored; in the very least, this + // is a formalization for deprecating enums. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *EnumOptions) Reset() { *m = EnumOptions{} } +func (m *EnumOptions) String() string { return proto.CompactTextString(m) } +func (*EnumOptions) ProtoMessage() {} +func (*EnumOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } + +var extRange_EnumOptions = []proto.ExtensionRange{ + {1000, 536870911}, +} + +func (*EnumOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_EnumOptions +} + +const Default_EnumOptions_Deprecated bool = false + +func (m *EnumOptions) GetAllowAlias() bool { + if m != nil && m.AllowAlias != nil { + return *m.AllowAlias + } + return false +} + +func (m *EnumOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_EnumOptions_Deprecated +} + +func (m *EnumOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type EnumValueOptions struct { + // Is this enum value deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum value, or it will be completely ignored; in the very least, + // this is a formalization for deprecating enum values. + Deprecated *bool `protobuf:"varint,1,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *EnumValueOptions) Reset() { *m = EnumValueOptions{} } +func (m *EnumValueOptions) String() string { return proto.CompactTextString(m) } +func (*EnumValueOptions) ProtoMessage() {} +func (*EnumValueOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } + +var extRange_EnumValueOptions = []proto.ExtensionRange{ + {1000, 536870911}, +} + +func (*EnumValueOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_EnumValueOptions +} + +const Default_EnumValueOptions_Deprecated bool = false + +func (m *EnumValueOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_EnumValueOptions_Deprecated +} + +func (m *EnumValueOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type ServiceOptions struct { + // Is this service deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the service, or it will be completely ignored; in the very least, + // this is a formalization for deprecating services. + Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ServiceOptions) Reset() { *m = ServiceOptions{} } +func (m *ServiceOptions) String() string { return proto.CompactTextString(m) } +func (*ServiceOptions) ProtoMessage() {} +func (*ServiceOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } + +var extRange_ServiceOptions = []proto.ExtensionRange{ + {1000, 536870911}, +} + +func (*ServiceOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_ServiceOptions +} + +const Default_ServiceOptions_Deprecated bool = false + +func (m *ServiceOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_ServiceOptions_Deprecated +} + +func (m *ServiceOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type MethodOptions struct { + // Is this method deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the method, or it will be completely ignored; in the very least, + // this is a formalization for deprecating methods. + Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MethodOptions) Reset() { *m = MethodOptions{} } +func (m *MethodOptions) String() string { return proto.CompactTextString(m) } +func (*MethodOptions) ProtoMessage() {} +func (*MethodOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} } + +var extRange_MethodOptions = []proto.ExtensionRange{ + {1000, 536870911}, +} + +func (*MethodOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_MethodOptions +} + +const Default_MethodOptions_Deprecated bool = false + +func (m *MethodOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_MethodOptions_Deprecated +} + +func (m *MethodOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +// A message representing a option the parser does not recognize. This only +// appears in options protos created by the compiler::Parser class. +// DescriptorPool resolves these when building Descriptor objects. Therefore, +// options protos in descriptor objects (e.g. returned by Descriptor::options(), +// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions +// in them. +type UninterpretedOption struct { + Name []*UninterpretedOption_NamePart `protobuf:"bytes,2,rep,name=name" json:"name,omitempty"` + // The value of the uninterpreted option, in whatever type the tokenizer + // identified it as during parsing. Exactly one of these should be set. + IdentifierValue *string `protobuf:"bytes,3,opt,name=identifier_value,json=identifierValue" json:"identifier_value,omitempty"` + PositiveIntValue *uint64 `protobuf:"varint,4,opt,name=positive_int_value,json=positiveIntValue" json:"positive_int_value,omitempty"` + NegativeIntValue *int64 `protobuf:"varint,5,opt,name=negative_int_value,json=negativeIntValue" json:"negative_int_value,omitempty"` + DoubleValue *float64 `protobuf:"fixed64,6,opt,name=double_value,json=doubleValue" json:"double_value,omitempty"` + StringValue []byte `protobuf:"bytes,7,opt,name=string_value,json=stringValue" json:"string_value,omitempty"` + AggregateValue *string `protobuf:"bytes,8,opt,name=aggregate_value,json=aggregateValue" json:"aggregate_value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *UninterpretedOption) Reset() { *m = UninterpretedOption{} } +func (m *UninterpretedOption) String() string { return proto.CompactTextString(m) } +func (*UninterpretedOption) ProtoMessage() {} +func (*UninterpretedOption) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} } + +func (m *UninterpretedOption) GetName() []*UninterpretedOption_NamePart { + if m != nil { + return m.Name + } + return nil +} + +func (m *UninterpretedOption) GetIdentifierValue() string { + if m != nil && m.IdentifierValue != nil { + return *m.IdentifierValue + } + return "" +} + +func (m *UninterpretedOption) GetPositiveIntValue() uint64 { + if m != nil && m.PositiveIntValue != nil { + return *m.PositiveIntValue + } + return 0 +} + +func (m *UninterpretedOption) GetNegativeIntValue() int64 { + if m != nil && m.NegativeIntValue != nil { + return *m.NegativeIntValue + } + return 0 +} + +func (m *UninterpretedOption) GetDoubleValue() float64 { + if m != nil && m.DoubleValue != nil { + return *m.DoubleValue + } + return 0 +} + +func (m *UninterpretedOption) GetStringValue() []byte { + if m != nil { + return m.StringValue + } + return nil +} + +func (m *UninterpretedOption) GetAggregateValue() string { + if m != nil && m.AggregateValue != nil { + return *m.AggregateValue + } + return "" +} + +// The name of the uninterpreted option. Each string represents a segment in +// a dot-separated name. is_extension is true iff a segment represents an +// extension (denoted with parentheses in options specs in .proto files). +// E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents +// "foo.(bar.baz).qux". +type UninterpretedOption_NamePart struct { + NamePart *string `protobuf:"bytes,1,req,name=name_part,json=namePart" json:"name_part,omitempty"` + IsExtension *bool `protobuf:"varint,2,req,name=is_extension,json=isExtension" json:"is_extension,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *UninterpretedOption_NamePart) Reset() { *m = UninterpretedOption_NamePart{} } +func (m *UninterpretedOption_NamePart) String() string { return proto.CompactTextString(m) } +func (*UninterpretedOption_NamePart) ProtoMessage() {} +func (*UninterpretedOption_NamePart) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{17, 0} +} + +func (m *UninterpretedOption_NamePart) GetNamePart() string { + if m != nil && m.NamePart != nil { + return *m.NamePart + } + return "" +} + +func (m *UninterpretedOption_NamePart) GetIsExtension() bool { + if m != nil && m.IsExtension != nil { + return *m.IsExtension + } + return false +} + +// Encapsulates information about the original source file from which a +// FileDescriptorProto was generated. +type SourceCodeInfo struct { + // A Location identifies a piece of source code in a .proto file which + // corresponds to a particular definition. This information is intended + // to be useful to IDEs, code indexers, documentation generators, and similar + // tools. + // + // For example, say we have a file like: + // message Foo { + // optional string foo = 1; + // } + // Let's look at just the field definition: + // optional string foo = 1; + // ^ ^^ ^^ ^ ^^^ + // a bc de f ghi + // We have the following locations: + // span path represents + // [a,i) [ 4, 0, 2, 0 ] The whole field definition. + // [a,b) [ 4, 0, 2, 0, 4 ] The label (optional). + // [c,d) [ 4, 0, 2, 0, 5 ] The type (string). + // [e,f) [ 4, 0, 2, 0, 1 ] The name (foo). + // [g,h) [ 4, 0, 2, 0, 3 ] The number (1). + // + // Notes: + // - A location may refer to a repeated field itself (i.e. not to any + // particular index within it). This is used whenever a set of elements are + // logically enclosed in a single code segment. For example, an entire + // extend block (possibly containing multiple extension definitions) will + // have an outer location whose path refers to the "extensions" repeated + // field without an index. + // - Multiple locations may have the same path. This happens when a single + // logical declaration is spread out across multiple places. The most + // obvious example is the "extend" block again -- there may be multiple + // extend blocks in the same scope, each of which will have the same path. + // - A location's span is not always a subset of its parent's span. For + // example, the "extendee" of an extension declaration appears at the + // beginning of the "extend" block and is shared by all extensions within + // the block. + // - Just because a location's span is a subset of some other location's span + // does not mean that it is a descendent. For example, a "group" defines + // both a type and a field in a single declaration. Thus, the locations + // corresponding to the type and field and their components will overlap. + // - Code which tries to interpret locations should probably be designed to + // ignore those that it doesn't understand, as more types of locations could + // be recorded in the future. + Location []*SourceCodeInfo_Location `protobuf:"bytes,1,rep,name=location" json:"location,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SourceCodeInfo) Reset() { *m = SourceCodeInfo{} } +func (m *SourceCodeInfo) String() string { return proto.CompactTextString(m) } +func (*SourceCodeInfo) ProtoMessage() {} +func (*SourceCodeInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} } + +func (m *SourceCodeInfo) GetLocation() []*SourceCodeInfo_Location { + if m != nil { + return m.Location + } + return nil +} + +type SourceCodeInfo_Location struct { + // Identifies which part of the FileDescriptorProto was defined at this + // location. + // + // Each element is a field number or an index. They form a path from + // the root FileDescriptorProto to the place where the definition. For + // example, this path: + // [ 4, 3, 2, 7, 1 ] + // refers to: + // file.message_type(3) // 4, 3 + // .field(7) // 2, 7 + // .name() // 1 + // This is because FileDescriptorProto.message_type has field number 4: + // repeated DescriptorProto message_type = 4; + // and DescriptorProto.field has field number 2: + // repeated FieldDescriptorProto field = 2; + // and FieldDescriptorProto.name has field number 1: + // optional string name = 1; + // + // Thus, the above path gives the location of a field name. If we removed + // the last element: + // [ 4, 3, 2, 7 ] + // this path refers to the whole field declaration (from the beginning + // of the label to the terminating semicolon). + Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` + // Always has exactly three or four elements: start line, start column, + // end line (optional, otherwise assumed same as start line), end column. + // These are packed into a single field for efficiency. Note that line + // and column numbers are zero-based -- typically you will want to add + // 1 to each before displaying to a user. + Span []int32 `protobuf:"varint,2,rep,packed,name=span" json:"span,omitempty"` + // If this SourceCodeInfo represents a complete declaration, these are any + // comments appearing before and after the declaration which appear to be + // attached to the declaration. + // + // A series of line comments appearing on consecutive lines, with no other + // tokens appearing on those lines, will be treated as a single comment. + // + // leading_detached_comments will keep paragraphs of comments that appear + // before (but not connected to) the current element. Each paragraph, + // separated by empty lines, will be one comment element in the repeated + // field. + // + // Only the comment content is provided; comment markers (e.g. //) are + // stripped out. For block comments, leading whitespace and an asterisk + // will be stripped from the beginning of each line other than the first. + // Newlines are included in the output. + // + // Examples: + // + // optional int32 foo = 1; // Comment attached to foo. + // // Comment attached to bar. + // optional int32 bar = 2; + // + // optional string baz = 3; + // // Comment attached to baz. + // // Another line attached to baz. + // + // // Comment attached to qux. + // // + // // Another line attached to qux. + // optional double qux = 4; + // + // // Detached comment for corge. This is not leading or trailing comments + // // to qux or corge because there are blank lines separating it from + // // both. + // + // // Detached comment for corge paragraph 2. + // + // optional string corge = 5; + // /* Block comment attached + // * to corge. Leading asterisks + // * will be removed. */ + // /* Block comment attached to + // * grault. */ + // optional int32 grault = 6; + // + // // ignored detached comments. + LeadingComments *string `protobuf:"bytes,3,opt,name=leading_comments,json=leadingComments" json:"leading_comments,omitempty"` + TrailingComments *string `protobuf:"bytes,4,opt,name=trailing_comments,json=trailingComments" json:"trailing_comments,omitempty"` + LeadingDetachedComments []string `protobuf:"bytes,6,rep,name=leading_detached_comments,json=leadingDetachedComments" json:"leading_detached_comments,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SourceCodeInfo_Location) Reset() { *m = SourceCodeInfo_Location{} } +func (m *SourceCodeInfo_Location) String() string { return proto.CompactTextString(m) } +func (*SourceCodeInfo_Location) ProtoMessage() {} +func (*SourceCodeInfo_Location) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18, 0} } + +func (m *SourceCodeInfo_Location) GetPath() []int32 { + if m != nil { + return m.Path + } + return nil +} + +func (m *SourceCodeInfo_Location) GetSpan() []int32 { + if m != nil { + return m.Span + } + return nil +} + +func (m *SourceCodeInfo_Location) GetLeadingComments() string { + if m != nil && m.LeadingComments != nil { + return *m.LeadingComments + } + return "" +} + +func (m *SourceCodeInfo_Location) GetTrailingComments() string { + if m != nil && m.TrailingComments != nil { + return *m.TrailingComments + } + return "" +} + +func (m *SourceCodeInfo_Location) GetLeadingDetachedComments() []string { + if m != nil { + return m.LeadingDetachedComments + } + return nil +} + +// Describes the relationship between generated code and its original source +// file. A GeneratedCodeInfo message is associated with only one generated +// source file, but may contain references to different source .proto files. +type GeneratedCodeInfo struct { + // An Annotation connects some span of text in generated code to an element + // of its generating .proto file. + Annotation []*GeneratedCodeInfo_Annotation `protobuf:"bytes,1,rep,name=annotation" json:"annotation,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GeneratedCodeInfo) Reset() { *m = GeneratedCodeInfo{} } +func (m *GeneratedCodeInfo) String() string { return proto.CompactTextString(m) } +func (*GeneratedCodeInfo) ProtoMessage() {} +func (*GeneratedCodeInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} } + +func (m *GeneratedCodeInfo) GetAnnotation() []*GeneratedCodeInfo_Annotation { + if m != nil { + return m.Annotation + } + return nil +} + +type GeneratedCodeInfo_Annotation struct { + // Identifies the element in the original source .proto file. This field + // is formatted the same as SourceCodeInfo.Location.path. + Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` + // Identifies the filesystem path to the original source .proto. + SourceFile *string `protobuf:"bytes,2,opt,name=source_file,json=sourceFile" json:"source_file,omitempty"` + // Identifies the starting offset in bytes in the generated code + // that relates to the identified object. + Begin *int32 `protobuf:"varint,3,opt,name=begin" json:"begin,omitempty"` + // Identifies the ending offset in bytes in the generated code that + // relates to the identified offset. The end offset should be one past + // the last relevant byte (so the length of the text = end - begin). + End *int32 `protobuf:"varint,4,opt,name=end" json:"end,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GeneratedCodeInfo_Annotation) Reset() { *m = GeneratedCodeInfo_Annotation{} } +func (m *GeneratedCodeInfo_Annotation) String() string { return proto.CompactTextString(m) } +func (*GeneratedCodeInfo_Annotation) ProtoMessage() {} +func (*GeneratedCodeInfo_Annotation) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{19, 0} +} + +func (m *GeneratedCodeInfo_Annotation) GetPath() []int32 { + if m != nil { + return m.Path + } + return nil +} + +func (m *GeneratedCodeInfo_Annotation) GetSourceFile() string { + if m != nil && m.SourceFile != nil { + return *m.SourceFile + } + return "" +} + +func (m *GeneratedCodeInfo_Annotation) GetBegin() int32 { + if m != nil && m.Begin != nil { + return *m.Begin + } + return 0 +} + +func (m *GeneratedCodeInfo_Annotation) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +func init() { + proto.RegisterType((*FileDescriptorSet)(nil), "google.protobuf.FileDescriptorSet") + proto.RegisterType((*FileDescriptorProto)(nil), "google.protobuf.FileDescriptorProto") + proto.RegisterType((*DescriptorProto)(nil), "google.protobuf.DescriptorProto") + proto.RegisterType((*DescriptorProto_ExtensionRange)(nil), "google.protobuf.DescriptorProto.ExtensionRange") + proto.RegisterType((*DescriptorProto_ReservedRange)(nil), "google.protobuf.DescriptorProto.ReservedRange") + proto.RegisterType((*FieldDescriptorProto)(nil), "google.protobuf.FieldDescriptorProto") + proto.RegisterType((*OneofDescriptorProto)(nil), "google.protobuf.OneofDescriptorProto") + proto.RegisterType((*EnumDescriptorProto)(nil), "google.protobuf.EnumDescriptorProto") + proto.RegisterType((*EnumValueDescriptorProto)(nil), "google.protobuf.EnumValueDescriptorProto") + proto.RegisterType((*ServiceDescriptorProto)(nil), "google.protobuf.ServiceDescriptorProto") + proto.RegisterType((*MethodDescriptorProto)(nil), "google.protobuf.MethodDescriptorProto") + proto.RegisterType((*FileOptions)(nil), "google.protobuf.FileOptions") + proto.RegisterType((*MessageOptions)(nil), "google.protobuf.MessageOptions") + proto.RegisterType((*FieldOptions)(nil), "google.protobuf.FieldOptions") + proto.RegisterType((*OneofOptions)(nil), "google.protobuf.OneofOptions") + proto.RegisterType((*EnumOptions)(nil), "google.protobuf.EnumOptions") + proto.RegisterType((*EnumValueOptions)(nil), "google.protobuf.EnumValueOptions") + proto.RegisterType((*ServiceOptions)(nil), "google.protobuf.ServiceOptions") + proto.RegisterType((*MethodOptions)(nil), "google.protobuf.MethodOptions") + proto.RegisterType((*UninterpretedOption)(nil), "google.protobuf.UninterpretedOption") + proto.RegisterType((*UninterpretedOption_NamePart)(nil), "google.protobuf.UninterpretedOption.NamePart") + proto.RegisterType((*SourceCodeInfo)(nil), "google.protobuf.SourceCodeInfo") + proto.RegisterType((*SourceCodeInfo_Location)(nil), "google.protobuf.SourceCodeInfo.Location") + proto.RegisterType((*GeneratedCodeInfo)(nil), "google.protobuf.GeneratedCodeInfo") + proto.RegisterType((*GeneratedCodeInfo_Annotation)(nil), "google.protobuf.GeneratedCodeInfo.Annotation") + proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Type", FieldDescriptorProto_Type_name, FieldDescriptorProto_Type_value) + proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Label", FieldDescriptorProto_Label_name, FieldDescriptorProto_Label_value) + proto.RegisterEnum("google.protobuf.FileOptions_OptimizeMode", FileOptions_OptimizeMode_name, FileOptions_OptimizeMode_value) + proto.RegisterEnum("google.protobuf.FieldOptions_CType", FieldOptions_CType_name, FieldOptions_CType_value) + proto.RegisterEnum("google.protobuf.FieldOptions_JSType", FieldOptions_JSType_name, FieldOptions_JSType_value) +} + +func init() { proto.RegisterFile("google/protobuf/descriptor.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 2287 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xcc, 0x59, 0xcd, 0x73, 0xdb, 0xc6, + 0x15, 0x0f, 0xf8, 0x25, 0xf2, 0x91, 0xa2, 0x56, 0x2b, 0xc5, 0x86, 0xe5, 0x38, 0x96, 0x19, 0x3b, + 0x96, 0xed, 0x96, 0xce, 0xc8, 0x1f, 0x71, 0x94, 0x4e, 0x3a, 0x94, 0x08, 0x2b, 0xf4, 0x50, 0x22, + 0x0b, 0x4a, 0xad, 0x93, 0x1e, 0x30, 0x2b, 0x60, 0x49, 0xc1, 0x06, 0x17, 0x28, 0x00, 0xda, 0x56, + 0x4e, 0x9e, 0xe9, 0xa9, 0xc7, 0xde, 0x3a, 0x6d, 0xa7, 0xd3, 0xc9, 0x25, 0x33, 0xfd, 0x03, 0x7a, + 0xe8, 0xbd, 0xd7, 0xce, 0xf4, 0xde, 0x63, 0x67, 0xda, 0xff, 0xa0, 0xd7, 0xce, 0xee, 0x02, 0x20, + 0xf8, 0x15, 0xab, 0x99, 0x49, 0xd2, 0x93, 0xb8, 0xbf, 0xf7, 0x7b, 0x0f, 0x6f, 0xdf, 0x3e, 0xbc, + 0xf7, 0xb0, 0x82, 0xcd, 0x81, 0xeb, 0x0e, 0x1c, 0x7a, 0xd7, 0xf3, 0xdd, 0xd0, 0x3d, 0x19, 0xf5, + 0xef, 0x5a, 0x34, 0x30, 0x7d, 0xdb, 0x0b, 0x5d, 0xbf, 0x2e, 0x30, 0xbc, 0x22, 0x19, 0xf5, 0x98, + 0x51, 0x3b, 0x80, 0xd5, 0xc7, 0xb6, 0x43, 0x9b, 0x09, 0xb1, 0x47, 0x43, 0xfc, 0x08, 0x72, 0x7d, + 0xdb, 0xa1, 0xaa, 0xb2, 0x99, 0xdd, 0x2a, 0x6f, 0x5f, 0xaf, 0x4f, 0x29, 0xd5, 0x27, 0x35, 0xba, + 0x1c, 0xd6, 0x85, 0x46, 0xed, 0x9f, 0x39, 0x58, 0x9b, 0x23, 0xc5, 0x18, 0x72, 0x8c, 0x0c, 0xb9, + 0x45, 0x65, 0xab, 0xa4, 0x8b, 0xdf, 0x58, 0x85, 0x25, 0x8f, 0x98, 0xcf, 0xc9, 0x80, 0xaa, 0x19, + 0x01, 0xc7, 0x4b, 0xfc, 0x2e, 0x80, 0x45, 0x3d, 0xca, 0x2c, 0xca, 0xcc, 0x33, 0x35, 0xbb, 0x99, + 0xdd, 0x2a, 0xe9, 0x29, 0x04, 0xdf, 0x81, 0x55, 0x6f, 0x74, 0xe2, 0xd8, 0xa6, 0x91, 0xa2, 0xc1, + 0x66, 0x76, 0x2b, 0xaf, 0x23, 0x29, 0x68, 0x8e, 0xc9, 0x37, 0x61, 0xe5, 0x25, 0x25, 0xcf, 0xd3, + 0xd4, 0xb2, 0xa0, 0x56, 0x39, 0x9c, 0x22, 0xee, 0x41, 0x65, 0x48, 0x83, 0x80, 0x0c, 0xa8, 0x11, + 0x9e, 0x79, 0x54, 0xcd, 0x89, 0xdd, 0x6f, 0xce, 0xec, 0x7e, 0x7a, 0xe7, 0xe5, 0x48, 0xeb, 0xe8, + 0xcc, 0xa3, 0xb8, 0x01, 0x25, 0xca, 0x46, 0x43, 0x69, 0x21, 0xbf, 0x20, 0x7e, 0x1a, 0x1b, 0x0d, + 0xa7, 0xad, 0x14, 0xb9, 0x5a, 0x64, 0x62, 0x29, 0xa0, 0xfe, 0x0b, 0xdb, 0xa4, 0x6a, 0x41, 0x18, + 0xb8, 0x39, 0x63, 0xa0, 0x27, 0xe5, 0xd3, 0x36, 0x62, 0x3d, 0xbc, 0x07, 0x25, 0xfa, 0x2a, 0xa4, + 0x2c, 0xb0, 0x5d, 0xa6, 0x2e, 0x09, 0x23, 0x37, 0xe6, 0x9c, 0x22, 0x75, 0xac, 0x69, 0x13, 0x63, + 0x3d, 0xfc, 0x10, 0x96, 0x5c, 0x2f, 0xb4, 0x5d, 0x16, 0xa8, 0xc5, 0x4d, 0x65, 0xab, 0xbc, 0xfd, + 0xce, 0xdc, 0x44, 0xe8, 0x48, 0x8e, 0x1e, 0x93, 0x71, 0x0b, 0x50, 0xe0, 0x8e, 0x7c, 0x93, 0x1a, + 0xa6, 0x6b, 0x51, 0xc3, 0x66, 0x7d, 0x57, 0x2d, 0x09, 0x03, 0x57, 0x67, 0x37, 0x22, 0x88, 0x7b, + 0xae, 0x45, 0x5b, 0xac, 0xef, 0xea, 0xd5, 0x60, 0x62, 0x8d, 0x2f, 0x40, 0x21, 0x38, 0x63, 0x21, + 0x79, 0xa5, 0x56, 0x44, 0x86, 0x44, 0xab, 0xda, 0x7f, 0xf2, 0xb0, 0x72, 0x9e, 0x14, 0xfb, 0x18, + 0xf2, 0x7d, 0xbe, 0x4b, 0x35, 0xf3, 0xbf, 0xc4, 0x40, 0xea, 0x4c, 0x06, 0xb1, 0xf0, 0x0d, 0x83, + 0xd8, 0x80, 0x32, 0xa3, 0x41, 0x48, 0x2d, 0x99, 0x11, 0xd9, 0x73, 0xe6, 0x14, 0x48, 0xa5, 0xd9, + 0x94, 0xca, 0x7d, 0xa3, 0x94, 0x7a, 0x0a, 0x2b, 0x89, 0x4b, 0x86, 0x4f, 0xd8, 0x20, 0xce, 0xcd, + 0xbb, 0x6f, 0xf2, 0xa4, 0xae, 0xc5, 0x7a, 0x3a, 0x57, 0xd3, 0xab, 0x74, 0x62, 0x8d, 0x9b, 0x00, + 0x2e, 0xa3, 0x6e, 0xdf, 0xb0, 0xa8, 0xe9, 0xa8, 0xc5, 0x05, 0x51, 0xea, 0x70, 0xca, 0x4c, 0x94, + 0x5c, 0x89, 0x9a, 0x0e, 0xfe, 0x68, 0x9c, 0x6a, 0x4b, 0x0b, 0x32, 0xe5, 0x40, 0xbe, 0x64, 0x33, + 0xd9, 0x76, 0x0c, 0x55, 0x9f, 0xf2, 0xbc, 0xa7, 0x56, 0xb4, 0xb3, 0x92, 0x70, 0xa2, 0xfe, 0xc6, + 0x9d, 0xe9, 0x91, 0x9a, 0xdc, 0xd8, 0xb2, 0x9f, 0x5e, 0xe2, 0xf7, 0x20, 0x01, 0x0c, 0x91, 0x56, + 0x20, 0xaa, 0x50, 0x25, 0x06, 0x0f, 0xc9, 0x90, 0x6e, 0x3c, 0x82, 0xea, 0x64, 0x78, 0xf0, 0x3a, + 0xe4, 0x83, 0x90, 0xf8, 0xa1, 0xc8, 0xc2, 0xbc, 0x2e, 0x17, 0x18, 0x41, 0x96, 0x32, 0x4b, 0x54, + 0xb9, 0xbc, 0xce, 0x7f, 0x6e, 0x7c, 0x08, 0xcb, 0x13, 0x8f, 0x3f, 0xaf, 0x62, 0xed, 0x37, 0x05, + 0x58, 0x9f, 0x97, 0x73, 0x73, 0xd3, 0xff, 0x02, 0x14, 0xd8, 0x68, 0x78, 0x42, 0x7d, 0x35, 0x2b, + 0x2c, 0x44, 0x2b, 0xdc, 0x80, 0xbc, 0x43, 0x4e, 0xa8, 0xa3, 0xe6, 0x36, 0x95, 0xad, 0xea, 0xf6, + 0x9d, 0x73, 0x65, 0x75, 0xbd, 0xcd, 0x55, 0x74, 0xa9, 0x89, 0x3f, 0x81, 0x5c, 0x54, 0xe2, 0xb8, + 0x85, 0xdb, 0xe7, 0xb3, 0xc0, 0x73, 0x51, 0x17, 0x7a, 0xf8, 0x32, 0x94, 0xf8, 0x5f, 0x19, 0xdb, + 0x82, 0xf0, 0xb9, 0xc8, 0x01, 0x1e, 0x57, 0xbc, 0x01, 0x45, 0x91, 0x66, 0x16, 0x8d, 0x5b, 0x43, + 0xb2, 0xe6, 0x07, 0x63, 0xd1, 0x3e, 0x19, 0x39, 0xa1, 0xf1, 0x82, 0x38, 0x23, 0x2a, 0x12, 0xa6, + 0xa4, 0x57, 0x22, 0xf0, 0xa7, 0x1c, 0xc3, 0x57, 0xa1, 0x2c, 0xb3, 0xd2, 0x66, 0x16, 0x7d, 0x25, + 0xaa, 0x4f, 0x5e, 0x97, 0x89, 0xda, 0xe2, 0x08, 0x7f, 0xfc, 0xb3, 0xc0, 0x65, 0xf1, 0xd1, 0x8a, + 0x47, 0x70, 0x40, 0x3c, 0xfe, 0xc3, 0xe9, 0xc2, 0x77, 0x65, 0xfe, 0xf6, 0xa6, 0x73, 0xb1, 0xf6, + 0xe7, 0x0c, 0xe4, 0xc4, 0xfb, 0xb6, 0x02, 0xe5, 0xa3, 0xcf, 0xba, 0x9a, 0xd1, 0xec, 0x1c, 0xef, + 0xb6, 0x35, 0xa4, 0xe0, 0x2a, 0x80, 0x00, 0x1e, 0xb7, 0x3b, 0x8d, 0x23, 0x94, 0x49, 0xd6, 0xad, + 0xc3, 0xa3, 0x87, 0xf7, 0x51, 0x36, 0x51, 0x38, 0x96, 0x40, 0x2e, 0x4d, 0xb8, 0xb7, 0x8d, 0xf2, + 0x18, 0x41, 0x45, 0x1a, 0x68, 0x3d, 0xd5, 0x9a, 0x0f, 0xef, 0xa3, 0xc2, 0x24, 0x72, 0x6f, 0x1b, + 0x2d, 0xe1, 0x65, 0x28, 0x09, 0x64, 0xb7, 0xd3, 0x69, 0xa3, 0x62, 0x62, 0xb3, 0x77, 0xa4, 0xb7, + 0x0e, 0xf7, 0x51, 0x29, 0xb1, 0xb9, 0xaf, 0x77, 0x8e, 0xbb, 0x08, 0x12, 0x0b, 0x07, 0x5a, 0xaf, + 0xd7, 0xd8, 0xd7, 0x50, 0x39, 0x61, 0xec, 0x7e, 0x76, 0xa4, 0xf5, 0x50, 0x65, 0xc2, 0xad, 0x7b, + 0xdb, 0x68, 0x39, 0x79, 0x84, 0x76, 0x78, 0x7c, 0x80, 0xaa, 0x78, 0x15, 0x96, 0xe5, 0x23, 0x62, + 0x27, 0x56, 0xa6, 0xa0, 0x87, 0xf7, 0x11, 0x1a, 0x3b, 0x22, 0xad, 0xac, 0x4e, 0x00, 0x0f, 0xef, + 0x23, 0x5c, 0xdb, 0x83, 0xbc, 0xc8, 0x2e, 0x8c, 0xa1, 0xda, 0x6e, 0xec, 0x6a, 0x6d, 0xa3, 0xd3, + 0x3d, 0x6a, 0x75, 0x0e, 0x1b, 0x6d, 0xa4, 0x8c, 0x31, 0x5d, 0xfb, 0xc9, 0x71, 0x4b, 0xd7, 0x9a, + 0x28, 0x93, 0xc6, 0xba, 0x5a, 0xe3, 0x48, 0x6b, 0xa2, 0x6c, 0xcd, 0x84, 0xf5, 0x79, 0x75, 0x66, + 0xee, 0x9b, 0x91, 0x3a, 0xe2, 0xcc, 0x82, 0x23, 0x16, 0xb6, 0x66, 0x8e, 0xf8, 0x4b, 0x05, 0xd6, + 0xe6, 0xd4, 0xda, 0xb9, 0x0f, 0xf9, 0x31, 0xe4, 0x65, 0x8a, 0xca, 0xee, 0x73, 0x6b, 0x6e, 0xd1, + 0x16, 0x09, 0x3b, 0xd3, 0x81, 0x84, 0x5e, 0xba, 0x03, 0x67, 0x17, 0x74, 0x60, 0x6e, 0x62, 0xc6, + 0xc9, 0x5f, 0x2a, 0xa0, 0x2e, 0xb2, 0xfd, 0x86, 0x42, 0x91, 0x99, 0x28, 0x14, 0x1f, 0x4f, 0x3b, + 0x70, 0x6d, 0xf1, 0x1e, 0x66, 0xbc, 0xf8, 0x4a, 0x81, 0x0b, 0xf3, 0x07, 0x95, 0xb9, 0x3e, 0x7c, + 0x02, 0x85, 0x21, 0x0d, 0x4f, 0xdd, 0xb8, 0x59, 0xbf, 0x3f, 0xa7, 0x05, 0x70, 0xf1, 0x74, 0xac, + 0x22, 0xad, 0x74, 0x0f, 0xc9, 0x2e, 0x9a, 0x36, 0xa4, 0x37, 0x33, 0x9e, 0xfe, 0x2a, 0x03, 0x6f, + 0xcf, 0x35, 0x3e, 0xd7, 0xd1, 0x2b, 0x00, 0x36, 0xf3, 0x46, 0xa1, 0x6c, 0xc8, 0xb2, 0x3e, 0x95, + 0x04, 0x22, 0xde, 0x7d, 0x5e, 0x7b, 0x46, 0x61, 0x22, 0xcf, 0x0a, 0x39, 0x48, 0x48, 0x10, 0x1e, + 0x8d, 0x1d, 0xcd, 0x09, 0x47, 0xdf, 0x5d, 0xb0, 0xd3, 0x99, 0x5e, 0xf7, 0x01, 0x20, 0xd3, 0xb1, + 0x29, 0x0b, 0x8d, 0x20, 0xf4, 0x29, 0x19, 0xda, 0x6c, 0x20, 0x0a, 0x70, 0x71, 0x27, 0xdf, 0x27, + 0x4e, 0x40, 0xf5, 0x15, 0x29, 0xee, 0xc5, 0x52, 0xae, 0x21, 0xba, 0x8c, 0x9f, 0xd2, 0x28, 0x4c, + 0x68, 0x48, 0x71, 0xa2, 0x51, 0xfb, 0xf5, 0x12, 0x94, 0x53, 0x63, 0x1d, 0xbe, 0x06, 0x95, 0x67, + 0xe4, 0x05, 0x31, 0xe2, 0x51, 0x5d, 0x46, 0xa2, 0xcc, 0xb1, 0x6e, 0x34, 0xae, 0x7f, 0x00, 0xeb, + 0x82, 0xe2, 0x8e, 0x42, 0xea, 0x1b, 0xa6, 0x43, 0x82, 0x40, 0x04, 0xad, 0x28, 0xa8, 0x98, 0xcb, + 0x3a, 0x5c, 0xb4, 0x17, 0x4b, 0xf0, 0x03, 0x58, 0x13, 0x1a, 0xc3, 0x91, 0x13, 0xda, 0x9e, 0x43, + 0x0d, 0xfe, 0xf1, 0x10, 0x88, 0x42, 0x9c, 0x78, 0xb6, 0xca, 0x19, 0x07, 0x11, 0x81, 0x7b, 0x14, + 0xe0, 0x7d, 0xb8, 0x22, 0xd4, 0x06, 0x94, 0x51, 0x9f, 0x84, 0xd4, 0xa0, 0xbf, 0x18, 0x11, 0x27, + 0x30, 0x08, 0xb3, 0x8c, 0x53, 0x12, 0x9c, 0xaa, 0xeb, 0x69, 0x03, 0x97, 0x38, 0x77, 0x3f, 0xa2, + 0x6a, 0x82, 0xd9, 0x60, 0xd6, 0xa7, 0x24, 0x38, 0xc5, 0x3b, 0x70, 0x41, 0x18, 0x0a, 0x42, 0xdf, + 0x66, 0x03, 0xc3, 0x3c, 0xa5, 0xe6, 0x73, 0x63, 0x14, 0xf6, 0x1f, 0xa9, 0x97, 0xd3, 0x16, 0x84, + 0x93, 0x3d, 0xc1, 0xd9, 0xe3, 0x94, 0xe3, 0xb0, 0xff, 0x08, 0xf7, 0xa0, 0xc2, 0xcf, 0x63, 0x68, + 0x7f, 0x41, 0x8d, 0xbe, 0xeb, 0x8b, 0xe6, 0x52, 0x9d, 0xf3, 0x72, 0xa7, 0x82, 0x58, 0xef, 0x44, + 0x0a, 0x07, 0xae, 0x45, 0x77, 0xf2, 0xbd, 0xae, 0xa6, 0x35, 0xf5, 0x72, 0x6c, 0xe5, 0xb1, 0xeb, + 0xf3, 0x9c, 0x1a, 0xb8, 0x49, 0x8c, 0xcb, 0x32, 0xa7, 0x06, 0x6e, 0x1c, 0xe1, 0x07, 0xb0, 0x66, + 0x9a, 0x72, 0xdb, 0xb6, 0x69, 0x44, 0x53, 0x7e, 0xa0, 0xa2, 0x89, 0x78, 0x99, 0xe6, 0xbe, 0x24, + 0x44, 0x69, 0x1e, 0xe0, 0x8f, 0xe0, 0xed, 0x71, 0xbc, 0xd2, 0x8a, 0xab, 0x33, 0xbb, 0x9c, 0x56, + 0x7d, 0x00, 0x6b, 0xde, 0xd9, 0xac, 0x22, 0x9e, 0x78, 0xa2, 0x77, 0x36, 0xad, 0x76, 0x43, 0x7c, + 0xb9, 0xf9, 0xd4, 0x24, 0x21, 0xb5, 0xd4, 0x8b, 0x69, 0x76, 0x4a, 0x80, 0xef, 0x02, 0x32, 0x4d, + 0x83, 0x32, 0x72, 0xe2, 0x50, 0x83, 0xf8, 0x94, 0x91, 0x40, 0xbd, 0x9a, 0x26, 0x57, 0x4d, 0x53, + 0x13, 0xd2, 0x86, 0x10, 0xe2, 0xdb, 0xb0, 0xea, 0x9e, 0x3c, 0x33, 0x65, 0x72, 0x19, 0x9e, 0x4f, + 0xfb, 0xf6, 0x2b, 0xf5, 0xba, 0x08, 0xd3, 0x0a, 0x17, 0x88, 0xd4, 0xea, 0x0a, 0x18, 0xdf, 0x02, + 0x64, 0x06, 0xa7, 0xc4, 0xf7, 0x44, 0x77, 0x0f, 0x3c, 0x62, 0x52, 0xf5, 0x86, 0xa4, 0x4a, 0xfc, + 0x30, 0x86, 0xf1, 0x53, 0x58, 0x1f, 0x31, 0x9b, 0x85, 0xd4, 0xf7, 0x7c, 0xca, 0x87, 0x74, 0xf9, + 0xa6, 0xa9, 0xff, 0x5a, 0x5a, 0x30, 0x66, 0x1f, 0xa7, 0xd9, 0xf2, 0x74, 0xf5, 0xb5, 0xd1, 0x2c, + 0x58, 0xdb, 0x81, 0x4a, 0xfa, 0xd0, 0x71, 0x09, 0xe4, 0xb1, 0x23, 0x85, 0xf7, 0xd0, 0xbd, 0x4e, + 0x93, 0x77, 0xbf, 0xcf, 0x35, 0x94, 0xe1, 0x5d, 0xb8, 0xdd, 0x3a, 0xd2, 0x0c, 0xfd, 0xf8, 0xf0, + 0xa8, 0x75, 0xa0, 0xa1, 0xec, 0xed, 0x52, 0xf1, 0xdf, 0x4b, 0xe8, 0xf5, 0xeb, 0xd7, 0xaf, 0x33, + 0x4f, 0x72, 0xc5, 0xf7, 0xd1, 0xcd, 0xda, 0x5f, 0x33, 0x50, 0x9d, 0x9c, 0x7f, 0xf1, 0x8f, 0xe0, + 0x62, 0xfc, 0xb1, 0x1a, 0xd0, 0xd0, 0x78, 0x69, 0xfb, 0x22, 0x1b, 0x87, 0x44, 0x4e, 0x90, 0x49, + 0x20, 0xd7, 0x23, 0x56, 0x8f, 0x86, 0x3f, 0xb3, 0x7d, 0x9e, 0x6b, 0x43, 0x12, 0xe2, 0x36, 0x5c, + 0x65, 0xae, 0x11, 0x84, 0x84, 0x59, 0xc4, 0xb7, 0x8c, 0xf1, 0x35, 0x81, 0x41, 0x4c, 0x93, 0x06, + 0x81, 0x2b, 0x1b, 0x41, 0x62, 0xe5, 0x1d, 0xe6, 0xf6, 0x22, 0xf2, 0xb8, 0x42, 0x36, 0x22, 0xea, + 0xd4, 0xa1, 0x67, 0x17, 0x1d, 0xfa, 0x65, 0x28, 0x0d, 0x89, 0x67, 0x50, 0x16, 0xfa, 0x67, 0x62, + 0x6a, 0x2b, 0xea, 0xc5, 0x21, 0xf1, 0x34, 0xbe, 0xfe, 0xf6, 0x4e, 0x22, 0x15, 0xcd, 0xda, 0x3f, + 0xb2, 0x50, 0x49, 0x4f, 0x6e, 0x7c, 0x10, 0x36, 0x45, 0x95, 0x56, 0xc4, 0x4b, 0xfc, 0xde, 0xd7, + 0xce, 0x79, 0xf5, 0x3d, 0x5e, 0xbe, 0x77, 0x0a, 0x72, 0x9e, 0xd2, 0xa5, 0x26, 0x6f, 0x9d, 0xfc, + 0xb5, 0xa5, 0x72, 0x4a, 0x2f, 0xea, 0xd1, 0x0a, 0xef, 0x43, 0xe1, 0x59, 0x20, 0x6c, 0x17, 0x84, + 0xed, 0xeb, 0x5f, 0x6f, 0xfb, 0x49, 0x4f, 0x18, 0x2f, 0x3d, 0xe9, 0x19, 0x87, 0x1d, 0xfd, 0xa0, + 0xd1, 0xd6, 0x23, 0x75, 0x7c, 0x09, 0x72, 0x0e, 0xf9, 0xe2, 0x6c, 0xb2, 0xd0, 0x0b, 0xe8, 0xbc, + 0x81, 0xbf, 0x04, 0xb9, 0x97, 0x94, 0x3c, 0x9f, 0x2c, 0xaf, 0x02, 0xfa, 0x16, 0x5f, 0x80, 0xbb, + 0x90, 0x17, 0xf1, 0xc2, 0x00, 0x51, 0xc4, 0xd0, 0x5b, 0xb8, 0x08, 0xb9, 0xbd, 0x8e, 0xce, 0x5f, + 0x02, 0x04, 0x15, 0x89, 0x1a, 0xdd, 0x96, 0xb6, 0xa7, 0xa1, 0x4c, 0xed, 0x01, 0x14, 0x64, 0x10, + 0xf8, 0x0b, 0x92, 0x84, 0x01, 0xbd, 0x15, 0x2d, 0x23, 0x1b, 0x4a, 0x2c, 0x3d, 0x3e, 0xd8, 0xd5, + 0x74, 0x94, 0x49, 0x1f, 0x6f, 0x00, 0x95, 0xf4, 0xd0, 0xf6, 0xdd, 0xe4, 0xd4, 0x5f, 0x14, 0x28, + 0xa7, 0x86, 0x30, 0xde, 0xfe, 0x89, 0xe3, 0xb8, 0x2f, 0x0d, 0xe2, 0xd8, 0x24, 0x88, 0x92, 0x02, + 0x04, 0xd4, 0xe0, 0xc8, 0x79, 0x0f, 0xed, 0x3b, 0x71, 0xfe, 0x0f, 0x0a, 0xa0, 0xe9, 0x01, 0x6e, + 0xca, 0x41, 0xe5, 0x7b, 0x75, 0xf0, 0xf7, 0x0a, 0x54, 0x27, 0xa7, 0xb6, 0x29, 0xf7, 0xae, 0x7d, + 0xaf, 0xee, 0xfd, 0x4e, 0x81, 0xe5, 0x89, 0x59, 0xed, 0xff, 0xca, 0xbb, 0xdf, 0x66, 0x61, 0x6d, + 0x8e, 0x1e, 0x6e, 0x44, 0x43, 0xad, 0x9c, 0xb3, 0x7f, 0x78, 0x9e, 0x67, 0xd5, 0x79, 0xcf, 0xec, + 0x12, 0x3f, 0x8c, 0x66, 0xe0, 0x5b, 0x80, 0x6c, 0x8b, 0xb2, 0xd0, 0xee, 0xdb, 0xd4, 0x8f, 0x3e, + 0xc4, 0xe5, 0xa4, 0xbb, 0x32, 0xc6, 0xe5, 0xb7, 0xf8, 0x0f, 0x00, 0x7b, 0x6e, 0x60, 0x87, 0xf6, + 0x0b, 0x6a, 0xd8, 0x2c, 0xfe, 0x6a, 0xe7, 0x93, 0x6f, 0x4e, 0x47, 0xb1, 0xa4, 0xc5, 0xc2, 0x84, + 0xcd, 0xe8, 0x80, 0x4c, 0xb1, 0x79, 0xed, 0xcb, 0xea, 0x28, 0x96, 0x24, 0xec, 0x6b, 0x50, 0xb1, + 0xdc, 0x11, 0x1f, 0x22, 0x24, 0x8f, 0x97, 0x5a, 0x45, 0x2f, 0x4b, 0x2c, 0xa1, 0x44, 0x53, 0xde, + 0xf8, 0xba, 0xa0, 0xa2, 0x97, 0x25, 0x26, 0x29, 0x37, 0x61, 0x85, 0x0c, 0x06, 0x3e, 0x37, 0x1e, + 0x1b, 0x92, 0xa3, 0x6b, 0x35, 0x81, 0x05, 0x71, 0xe3, 0x09, 0x14, 0xe3, 0x38, 0xf0, 0x6e, 0xc6, + 0x23, 0x61, 0x78, 0xf2, 0xd2, 0x26, 0xb3, 0x55, 0xd2, 0x8b, 0x2c, 0x16, 0x5e, 0x83, 0x8a, 0x1d, + 0x18, 0xe3, 0xdb, 0xc3, 0xcc, 0x66, 0x66, 0xab, 0xa8, 0x97, 0xed, 0x20, 0xb9, 0x2e, 0xaa, 0x7d, + 0x95, 0x81, 0xea, 0xe4, 0xed, 0x27, 0x6e, 0x42, 0xd1, 0x71, 0x4d, 0x22, 0x12, 0x41, 0x5e, 0xbd, + 0x6f, 0xbd, 0xe1, 0xc2, 0xb4, 0xde, 0x8e, 0xf8, 0x7a, 0xa2, 0xb9, 0xf1, 0x37, 0x05, 0x8a, 0x31, + 0x8c, 0x2f, 0x40, 0xce, 0x23, 0xe1, 0xa9, 0x30, 0x97, 0xdf, 0xcd, 0x20, 0x45, 0x17, 0x6b, 0x8e, + 0x07, 0x1e, 0x61, 0x22, 0x05, 0x22, 0x9c, 0xaf, 0xf9, 0xb9, 0x3a, 0x94, 0x58, 0x62, 0x28, 0x76, + 0x87, 0x43, 0xca, 0xc2, 0x20, 0x3e, 0xd7, 0x08, 0xdf, 0x8b, 0x60, 0x7c, 0x07, 0x56, 0x43, 0x9f, + 0xd8, 0xce, 0x04, 0x37, 0x27, 0xb8, 0x28, 0x16, 0x24, 0xe4, 0x1d, 0xb8, 0x14, 0xdb, 0xb5, 0x68, + 0x48, 0xcc, 0x53, 0x6a, 0x8d, 0x95, 0x0a, 0xe2, 0x6a, 0xed, 0x62, 0x44, 0x68, 0x46, 0xf2, 0x58, + 0xb7, 0xf6, 0x77, 0x05, 0x56, 0xe3, 0x31, 0xde, 0x4a, 0x82, 0x75, 0x00, 0x40, 0x18, 0x73, 0xc3, + 0x74, 0xb8, 0x66, 0x53, 0x79, 0x46, 0xaf, 0xde, 0x48, 0x94, 0xf4, 0x94, 0x81, 0x8d, 0x21, 0xc0, + 0x58, 0xb2, 0x30, 0x6c, 0x57, 0xa1, 0x1c, 0x5d, 0x6d, 0x8b, 0xff, 0x8f, 0xc8, 0x6f, 0x3f, 0x90, + 0x10, 0x9f, 0xf7, 0xf1, 0x3a, 0xe4, 0x4f, 0xe8, 0xc0, 0x66, 0xd1, 0x85, 0x9b, 0x5c, 0xc4, 0xd7, + 0x78, 0xb9, 0xe4, 0x1a, 0x6f, 0xf7, 0xe7, 0xb0, 0x66, 0xba, 0xc3, 0x69, 0x77, 0x77, 0xd1, 0xd4, + 0xf7, 0x67, 0xf0, 0xa9, 0xf2, 0x39, 0x8c, 0xa7, 0xb3, 0x3f, 0x2a, 0xca, 0x97, 0x99, 0xec, 0x7e, + 0x77, 0xf7, 0x4f, 0x99, 0x8d, 0x7d, 0xa9, 0xda, 0x8d, 0x77, 0xaa, 0xd3, 0xbe, 0x43, 0x4d, 0xee, + 0xfd, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x05, 0x54, 0xc8, 0x7d, 0x07, 0x1a, 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.go new file mode 100644 index 000000000..0ff4e13a8 --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.go @@ -0,0 +1,229 @@ +// Code generated by protoc-gen-go. +// source: google/protobuf/compiler/plugin.proto +// DO NOT EDIT! + +/* +Package plugin_go is a generated protocol buffer package. + +It is generated from these files: + google/protobuf/compiler/plugin.proto + +It has these top-level messages: + CodeGeneratorRequest + CodeGeneratorResponse +*/ +package plugin_go + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import google_protobuf "github.com/golang/protobuf/protoc-gen-go/descriptor" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// An encoded CodeGeneratorRequest is written to the plugin's stdin. +type CodeGeneratorRequest struct { + // The .proto files that were explicitly listed on the command-line. The + // code generator should generate code only for these files. Each file's + // descriptor will be included in proto_file, below. + FileToGenerate []string `protobuf:"bytes,1,rep,name=file_to_generate,json=fileToGenerate" json:"file_to_generate,omitempty"` + // The generator parameter passed on the command-line. + Parameter *string `protobuf:"bytes,2,opt,name=parameter" json:"parameter,omitempty"` + // FileDescriptorProtos for all files in files_to_generate and everything + // they import. The files will appear in topological order, so each file + // appears before any file that imports it. + // + // protoc guarantees that all proto_files will be written after + // the fields above, even though this is not technically guaranteed by the + // protobuf wire format. This theoretically could allow a plugin to stream + // in the FileDescriptorProtos and handle them one by one rather than read + // the entire set into memory at once. However, as of this writing, this + // is not similarly optimized on protoc's end -- it will store all fields in + // memory at once before sending them to the plugin. + ProtoFile []*google_protobuf.FileDescriptorProto `protobuf:"bytes,15,rep,name=proto_file,json=protoFile" json:"proto_file,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CodeGeneratorRequest) Reset() { *m = CodeGeneratorRequest{} } +func (m *CodeGeneratorRequest) String() string { return proto.CompactTextString(m) } +func (*CodeGeneratorRequest) ProtoMessage() {} +func (*CodeGeneratorRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *CodeGeneratorRequest) GetFileToGenerate() []string { + if m != nil { + return m.FileToGenerate + } + return nil +} + +func (m *CodeGeneratorRequest) GetParameter() string { + if m != nil && m.Parameter != nil { + return *m.Parameter + } + return "" +} + +func (m *CodeGeneratorRequest) GetProtoFile() []*google_protobuf.FileDescriptorProto { + if m != nil { + return m.ProtoFile + } + return nil +} + +// The plugin writes an encoded CodeGeneratorResponse to stdout. +type CodeGeneratorResponse struct { + // Error message. If non-empty, code generation failed. The plugin process + // should exit with status code zero even if it reports an error in this way. + // + // This should be used to indicate errors in .proto files which prevent the + // code generator from generating correct code. Errors which indicate a + // problem in protoc itself -- such as the input CodeGeneratorRequest being + // unparseable -- should be reported by writing a message to stderr and + // exiting with a non-zero status code. + Error *string `protobuf:"bytes,1,opt,name=error" json:"error,omitempty"` + File []*CodeGeneratorResponse_File `protobuf:"bytes,15,rep,name=file" json:"file,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CodeGeneratorResponse) Reset() { *m = CodeGeneratorResponse{} } +func (m *CodeGeneratorResponse) String() string { return proto.CompactTextString(m) } +func (*CodeGeneratorResponse) ProtoMessage() {} +func (*CodeGeneratorResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *CodeGeneratorResponse) GetError() string { + if m != nil && m.Error != nil { + return *m.Error + } + return "" +} + +func (m *CodeGeneratorResponse) GetFile() []*CodeGeneratorResponse_File { + if m != nil { + return m.File + } + return nil +} + +// Represents a single generated file. +type CodeGeneratorResponse_File struct { + // The file name, relative to the output directory. The name must not + // contain "." or ".." components and must be relative, not be absolute (so, + // the file cannot lie outside the output directory). "/" must be used as + // the path separator, not "\". + // + // If the name is omitted, the content will be appended to the previous + // file. This allows the generator to break large files into small chunks, + // and allows the generated text to be streamed back to protoc so that large + // files need not reside completely in memory at one time. Note that as of + // this writing protoc does not optimize for this -- it will read the entire + // CodeGeneratorResponse before writing files to disk. + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // If non-empty, indicates that the named file should already exist, and the + // content here is to be inserted into that file at a defined insertion + // point. This feature allows a code generator to extend the output + // produced by another code generator. The original generator may provide + // insertion points by placing special annotations in the file that look + // like: + // @@protoc_insertion_point(NAME) + // The annotation can have arbitrary text before and after it on the line, + // which allows it to be placed in a comment. NAME should be replaced with + // an identifier naming the point -- this is what other generators will use + // as the insertion_point. Code inserted at this point will be placed + // immediately above the line containing the insertion point (thus multiple + // insertions to the same point will come out in the order they were added). + // The double-@ is intended to make it unlikely that the generated code + // could contain things that look like insertion points by accident. + // + // For example, the C++ code generator places the following line in the + // .pb.h files that it generates: + // // @@protoc_insertion_point(namespace_scope) + // This line appears within the scope of the file's package namespace, but + // outside of any particular class. Another plugin can then specify the + // insertion_point "namespace_scope" to generate additional classes or + // other declarations that should be placed in this scope. + // + // Note that if the line containing the insertion point begins with + // whitespace, the same whitespace will be added to every line of the + // inserted text. This is useful for languages like Python, where + // indentation matters. In these languages, the insertion point comment + // should be indented the same amount as any inserted code will need to be + // in order to work correctly in that context. + // + // The code generator that generates the initial file and the one which + // inserts into it must both run as part of a single invocation of protoc. + // Code generators are executed in the order in which they appear on the + // command line. + // + // If |insertion_point| is present, |name| must also be present. + InsertionPoint *string `protobuf:"bytes,2,opt,name=insertion_point,json=insertionPoint" json:"insertion_point,omitempty"` + // The file contents. + Content *string `protobuf:"bytes,15,opt,name=content" json:"content,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CodeGeneratorResponse_File) Reset() { *m = CodeGeneratorResponse_File{} } +func (m *CodeGeneratorResponse_File) String() string { return proto.CompactTextString(m) } +func (*CodeGeneratorResponse_File) ProtoMessage() {} +func (*CodeGeneratorResponse_File) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1, 0} } + +func (m *CodeGeneratorResponse_File) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *CodeGeneratorResponse_File) GetInsertionPoint() string { + if m != nil && m.InsertionPoint != nil { + return *m.InsertionPoint + } + return "" +} + +func (m *CodeGeneratorResponse_File) GetContent() string { + if m != nil && m.Content != nil { + return *m.Content + } + return "" +} + +func init() { + proto.RegisterType((*CodeGeneratorRequest)(nil), "google.protobuf.compiler.CodeGeneratorRequest") + proto.RegisterType((*CodeGeneratorResponse)(nil), "google.protobuf.compiler.CodeGeneratorResponse") + proto.RegisterType((*CodeGeneratorResponse_File)(nil), "google.protobuf.compiler.CodeGeneratorResponse.File") +} + +func init() { proto.RegisterFile("google/protobuf/compiler/plugin.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 310 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x74, 0x51, 0xc1, 0x4a, 0xc3, 0x40, + 0x10, 0x25, 0xb6, 0x22, 0x19, 0xa5, 0x95, 0xa5, 0xc2, 0x52, 0x7a, 0x08, 0x45, 0x31, 0xa7, 0x14, + 0x44, 0xf0, 0xde, 0x8a, 0x7a, 0x2c, 0xc1, 0x93, 0x20, 0x21, 0xa6, 0xd3, 0xb0, 0x90, 0xec, 0xac, + 0xb3, 0xdb, 0x2f, 0xf2, 0x9f, 0xfc, 0x1e, 0xd9, 0x4d, 0x5b, 0xa5, 0xd8, 0xdb, 0xce, 0x7b, 0x6f, + 0xe6, 0xbd, 0x9d, 0x81, 0x9b, 0x9a, 0xa8, 0x6e, 0x70, 0x66, 0x98, 0x1c, 0x7d, 0x6c, 0xd6, 0xb3, + 0x8a, 0x5a, 0xa3, 0x1a, 0xe4, 0x99, 0x69, 0x36, 0xb5, 0xd2, 0x59, 0x20, 0x84, 0xec, 0x64, 0xd9, + 0x4e, 0x96, 0xed, 0x64, 0xe3, 0xe4, 0x70, 0xc0, 0x0a, 0x6d, 0xc5, 0xca, 0x38, 0xe2, 0x4e, 0x3d, + 0xfd, 0x8a, 0x60, 0xb4, 0xa0, 0x15, 0x3e, 0xa3, 0x46, 0x2e, 0x1d, 0x71, 0x8e, 0x9f, 0x1b, 0xb4, + 0x4e, 0xa4, 0x70, 0xb9, 0x56, 0x0d, 0x16, 0x8e, 0x8a, 0xba, 0xe3, 0x50, 0x46, 0x49, 0x2f, 0x8d, + 0xf3, 0x81, 0xc7, 0x5f, 0x69, 0xdb, 0x81, 0x62, 0x02, 0xb1, 0x29, 0xb9, 0x6c, 0xd1, 0x21, 0xcb, + 0x93, 0x24, 0x4a, 0xe3, 0xfc, 0x17, 0x10, 0x0b, 0x80, 0xe0, 0x54, 0xf8, 0x2e, 0x39, 0x4c, 0x7a, + 0xe9, 0xf9, 0xdd, 0x75, 0x76, 0x98, 0xf8, 0x49, 0x35, 0xf8, 0xb8, 0xcf, 0xb6, 0xf4, 0x70, 0x1e, + 0x07, 0xd6, 0x33, 0xd3, 0xef, 0x08, 0xae, 0x0e, 0x52, 0x5a, 0x43, 0xda, 0xa2, 0x18, 0xc1, 0x29, + 0x32, 0x13, 0xcb, 0x28, 0x18, 0x77, 0x85, 0x78, 0x81, 0xfe, 0x1f, 0xbb, 0xfb, 0xec, 0xd8, 0x82, + 0xb2, 0x7f, 0x87, 0x86, 0x34, 0x79, 0x98, 0x30, 0x7e, 0x87, 0xbe, 0xaf, 0x84, 0x80, 0xbe, 0x2e, + 0x5b, 0xdc, 0xda, 0x84, 0xb7, 0xb8, 0x85, 0xa1, 0xd2, 0x16, 0xd9, 0x29, 0xd2, 0x85, 0x21, 0xa5, + 0xdd, 0xf6, 0xfb, 0x83, 0x3d, 0xbc, 0xf4, 0xa8, 0x90, 0x70, 0x56, 0x91, 0x76, 0xa8, 0x9d, 0x1c, + 0x06, 0xc1, 0xae, 0x9c, 0x3f, 0xc0, 0xa4, 0xa2, 0xf6, 0x68, 0xbe, 0xf9, 0xc5, 0x32, 0x1c, 0x3a, + 0x2c, 0xc4, 0xbe, 0xc5, 0xdd, 0xd9, 0x8b, 0x9a, 0x7e, 0x02, 0x00, 0x00, 0xff, 0xff, 0x83, 0x7b, + 0x5c, 0x7c, 0x1b, 0x02, 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go new file mode 100644 index 000000000..f2c6906b9 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go @@ -0,0 +1,155 @@ +// Code generated by protoc-gen-go. +// source: github.com/golang/protobuf/ptypes/any/any.proto +// DO NOT EDIT! + +/* +Package any is a generated protocol buffer package. + +It is generated from these files: + github.com/golang/protobuf/ptypes/any/any.proto + +It has these top-level messages: + Any +*/ +package any + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// `Any` contains an arbitrary serialized protocol buffer message along with a +// URL that describes the type of the serialized message. +// +// Protobuf library provides support to pack/unpack Any values in the form +// of utility functions or additional generated methods of the Any type. +// +// Example 1: Pack and unpack a message in C++. +// +// Foo foo = ...; +// Any any; +// any.PackFrom(foo); +// ... +// if (any.UnpackTo(&foo)) { +// ... +// } +// +// Example 2: Pack and unpack a message in Java. +// +// Foo foo = ...; +// Any any = Any.pack(foo); +// ... +// if (any.is(Foo.class)) { +// foo = any.unpack(Foo.class); +// } +// +// Example 3: Pack and unpack a message in Python. +// +// foo = Foo(...) +// any = Any() +// any.Pack(foo) +// ... +// if any.Is(Foo.DESCRIPTOR): +// any.Unpack(foo) +// ... +// +// The pack methods provided by protobuf library will by default use +// 'type.googleapis.com/full.type.name' as the type URL and the unpack +// methods only use the fully qualified type name after the last '/' +// in the type URL, for example "foo.bar.com/x/y.z" will yield type +// name "y.z". +// +// +// JSON +// ==== +// The JSON representation of an `Any` value uses the regular +// representation of the deserialized, embedded message, with an +// additional field `@type` which contains the type URL. Example: +// +// package google.profile; +// message Person { +// string first_name = 1; +// string last_name = 2; +// } +// +// { +// "@type": "type.googleapis.com/google.profile.Person", +// "firstName": , +// "lastName": +// } +// +// If the embedded message type is well-known and has a custom JSON +// representation, that representation will be embedded adding a field +// `value` which holds the custom JSON in addition to the `@type` +// field. Example (for message [google.protobuf.Duration][]): +// +// { +// "@type": "type.googleapis.com/google.protobuf.Duration", +// "value": "1.212s" +// } +// +type Any struct { + // A URL/resource name whose content describes the type of the + // serialized protocol buffer message. + // + // For URLs which use the scheme `http`, `https`, or no scheme, the + // following restrictions and interpretations apply: + // + // * If no scheme is provided, `https` is assumed. + // * The last segment of the URL's path must represent the fully + // qualified name of the type (as in `path/google.protobuf.Duration`). + // The name should be in a canonical form (e.g., leading "." is + // not accepted). + // * An HTTP GET on the URL must yield a [google.protobuf.Type][] + // value in binary format, or produce an error. + // * Applications are allowed to cache lookup results based on the + // URL, or have them precompiled into a binary to avoid any + // lookup. Therefore, binary compatibility needs to be preserved + // on changes to types. (Use versioned type names to manage + // breaking changes.) + // + // Schemes other than `http`, `https` (or the empty scheme) might be + // used with implementation specific semantics. + // + TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl" json:"type_url,omitempty"` + // Must be a valid serialized protocol buffer of the above specified type. + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *Any) Reset() { *m = Any{} } +func (m *Any) String() string { return proto.CompactTextString(m) } +func (*Any) ProtoMessage() {} +func (*Any) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (*Any) XXX_WellKnownType() string { return "Any" } + +func init() { + proto.RegisterType((*Any)(nil), "google.protobuf.Any") +} + +func init() { proto.RegisterFile("github.com/golang/protobuf/ptypes/any/any.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 187 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xd2, 0x4f, 0xcf, 0x2c, 0xc9, + 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0xd7, 0x2f, 0x28, + 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x28, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x4f, 0xcc, + 0xab, 0x04, 0x61, 0x3d, 0xb0, 0xb8, 0x10, 0x7f, 0x7a, 0x7e, 0x7e, 0x7a, 0x4e, 0xaa, 0x1e, 0x4c, + 0x95, 0x92, 0x19, 0x17, 0xb3, 0x63, 0x5e, 0xa5, 0x90, 0x24, 0x17, 0x07, 0x48, 0x79, 0x7c, 0x69, + 0x51, 0x8e, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x67, 0x10, 0x3b, 0x88, 0x1f, 0x5a, 0x94, 0x23, 0x24, + 0xc2, 0xc5, 0x5a, 0x96, 0x98, 0x53, 0x9a, 0x2a, 0xc1, 0xa4, 0xc0, 0xa8, 0xc1, 0x13, 0x04, 0xe1, + 0x38, 0x15, 0x71, 0x09, 0x27, 0xe7, 0xe7, 0xea, 0xa1, 0x19, 0xe7, 0xc4, 0xe1, 0x98, 0x57, 0x19, + 0x00, 0xe2, 0x04, 0x30, 0x46, 0xa9, 0x12, 0xe5, 0xb8, 0x05, 0x8c, 0x8c, 0x8b, 0x98, 0x98, 0xdd, + 0x03, 0x9c, 0x56, 0x31, 0xc9, 0xb9, 0x43, 0x4c, 0x0b, 0x80, 0xaa, 0xd2, 0x0b, 0x4f, 0xcd, 0xc9, + 0xf1, 0xce, 0xcb, 0x2f, 0xcf, 0x0b, 0x01, 0xa9, 0x4e, 0x62, 0x03, 0x6b, 0x37, 0x06, 0x04, 0x00, + 0x00, 0xff, 0xff, 0xc6, 0x4d, 0x03, 0x23, 0xf6, 0x00, 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go new file mode 100644 index 000000000..569748346 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go @@ -0,0 +1,114 @@ +// Code generated by protoc-gen-go. +// source: github.com/golang/protobuf/ptypes/duration/duration.proto +// DO NOT EDIT! + +/* +Package duration is a generated protocol buffer package. + +It is generated from these files: + github.com/golang/protobuf/ptypes/duration/duration.proto + +It has these top-level messages: + Duration +*/ +package duration + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// A Duration represents a signed, fixed-length span of time represented +// as a count of seconds and fractions of seconds at nanosecond +// resolution. It is independent of any calendar and concepts like "day" +// or "month". It is related to Timestamp in that the difference between +// two Timestamp values is a Duration and it can be added or subtracted +// from a Timestamp. Range is approximately +-10,000 years. +// +// Example 1: Compute Duration from two Timestamps in pseudo code. +// +// Timestamp start = ...; +// Timestamp end = ...; +// Duration duration = ...; +// +// duration.seconds = end.seconds - start.seconds; +// duration.nanos = end.nanos - start.nanos; +// +// if (duration.seconds < 0 && duration.nanos > 0) { +// duration.seconds += 1; +// duration.nanos -= 1000000000; +// } else if (durations.seconds > 0 && duration.nanos < 0) { +// duration.seconds -= 1; +// duration.nanos += 1000000000; +// } +// +// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code. +// +// Timestamp start = ...; +// Duration duration = ...; +// Timestamp end = ...; +// +// end.seconds = start.seconds + duration.seconds; +// end.nanos = start.nanos + duration.nanos; +// +// if (end.nanos < 0) { +// end.seconds -= 1; +// end.nanos += 1000000000; +// } else if (end.nanos >= 1000000000) { +// end.seconds += 1; +// end.nanos -= 1000000000; +// } +// +// +type Duration struct { + // Signed seconds of the span of time. Must be from -315,576,000,000 + // to +315,576,000,000 inclusive. + Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"` + // Signed fractions of a second at nanosecond resolution of the span + // of time. Durations less than one second are represented with a 0 + // `seconds` field and a positive or negative `nanos` field. For durations + // of one second or more, a non-zero value for the `nanos` field must be + // of the same sign as the `seconds` field. Must be from -999,999,999 + // to +999,999,999 inclusive. + Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"` +} + +func (m *Duration) Reset() { *m = Duration{} } +func (m *Duration) String() string { return proto.CompactTextString(m) } +func (*Duration) ProtoMessage() {} +func (*Duration) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (*Duration) XXX_WellKnownType() string { return "Duration" } + +func init() { + proto.RegisterType((*Duration)(nil), "google.protobuf.Duration") +} + +func init() { + proto.RegisterFile("github.com/golang/protobuf/ptypes/duration/duration.proto", fileDescriptor0) +} + +var fileDescriptor0 = []byte{ + // 189 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xb2, 0x4c, 0xcf, 0x2c, 0xc9, + 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0xd7, 0x2f, 0x28, + 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x28, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x4f, 0x29, + 0x2d, 0x4a, 0x2c, 0xc9, 0xcc, 0xcf, 0x83, 0x33, 0xf4, 0xc0, 0x2a, 0x84, 0xf8, 0xd3, 0xf3, 0xf3, + 0xd3, 0x73, 0x52, 0xf5, 0x60, 0xea, 0x95, 0xac, 0xb8, 0x38, 0x5c, 0xa0, 0x4a, 0x84, 0x24, 0xb8, + 0xd8, 0x8b, 0x53, 0x93, 0xf3, 0xf3, 0x52, 0x8a, 0x25, 0x18, 0x15, 0x18, 0x35, 0x98, 0x83, 0x60, + 0x5c, 0x21, 0x11, 0x2e, 0xd6, 0xbc, 0xc4, 0xbc, 0xfc, 0x62, 0x09, 0x26, 0x05, 0x46, 0x0d, 0xd6, + 0x20, 0x08, 0xc7, 0xa9, 0x86, 0x4b, 0x38, 0x39, 0x3f, 0x57, 0x0f, 0xcd, 0x48, 0x27, 0x5e, 0x98, + 0x81, 0x01, 0x20, 0x91, 0x00, 0xc6, 0x28, 0x2d, 0xe2, 0xdd, 0xbb, 0x80, 0x91, 0x71, 0x11, 0x13, + 0xb3, 0x7b, 0x80, 0xd3, 0x2a, 0x26, 0x39, 0x77, 0x88, 0xb9, 0x01, 0x50, 0xa5, 0x7a, 0xe1, 0xa9, + 0x39, 0x39, 0xde, 0x79, 0xf9, 0xe5, 0x79, 0x21, 0x20, 0x2d, 0x49, 0x6c, 0x60, 0x33, 0x8c, 0x01, + 0x01, 0x00, 0x00, 0xff, 0xff, 0x62, 0xfb, 0xb1, 0x51, 0x0e, 0x01, 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go b/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go new file mode 100644 index 000000000..46c765a96 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go @@ -0,0 +1,69 @@ +// Code generated by protoc-gen-go. +// source: github.com/golang/protobuf/ptypes/empty/empty.proto +// DO NOT EDIT! + +/* +Package empty is a generated protocol buffer package. + +It is generated from these files: + github.com/golang/protobuf/ptypes/empty/empty.proto + +It has these top-level messages: + Empty +*/ +package empty + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// A generic empty message that you can re-use to avoid defining duplicated +// empty messages in your APIs. A typical example is to use it as the request +// or the response type of an API method. For instance: +// +// service Foo { +// rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); +// } +// +// The JSON representation for `Empty` is empty JSON object `{}`. +type Empty struct { +} + +func (m *Empty) Reset() { *m = Empty{} } +func (m *Empty) String() string { return proto.CompactTextString(m) } +func (*Empty) ProtoMessage() {} +func (*Empty) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (*Empty) XXX_WellKnownType() string { return "Empty" } + +func init() { + proto.RegisterType((*Empty)(nil), "google.protobuf.Empty") +} + +func init() { + proto.RegisterFile("github.com/golang/protobuf/ptypes/empty/empty.proto", fileDescriptor0) +} + +var fileDescriptor0 = []byte{ + // 150 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0x32, 0x4e, 0xcf, 0x2c, 0xc9, + 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0xd7, 0x2f, 0x28, + 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x28, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x4f, 0xcd, + 0x2d, 0x28, 0xa9, 0x84, 0x90, 0x7a, 0x60, 0x39, 0x21, 0xfe, 0xf4, 0xfc, 0xfc, 0xf4, 0x9c, 0x54, + 0x3d, 0x98, 0x4a, 0x25, 0x76, 0x2e, 0x56, 0x57, 0x90, 0xbc, 0x53, 0x25, 0x97, 0x70, 0x72, 0x7e, + 0xae, 0x1e, 0x9a, 0xbc, 0x13, 0x17, 0x58, 0x36, 0x00, 0xc4, 0x0d, 0x60, 0x8c, 0x52, 0x27, 0xd2, + 0xce, 0x05, 0x8c, 0x8c, 0x3f, 0x18, 0x19, 0x17, 0x31, 0x31, 0xbb, 0x07, 0x38, 0xad, 0x62, 0x92, + 0x73, 0x87, 0x18, 0x1a, 0x00, 0x55, 0xaa, 0x17, 0x9e, 0x9a, 0x93, 0xe3, 0x9d, 0x97, 0x5f, 0x9e, + 0x17, 0x02, 0xd2, 0x92, 0xc4, 0x06, 0x36, 0xc3, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0x7f, 0xbb, + 0xf4, 0x0e, 0xd2, 0x00, 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go b/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go new file mode 100644 index 000000000..197042ed5 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go @@ -0,0 +1,382 @@ +// Code generated by protoc-gen-go. +// source: github.com/golang/protobuf/ptypes/struct/struct.proto +// DO NOT EDIT! + +/* +Package structpb is a generated protocol buffer package. + +It is generated from these files: + github.com/golang/protobuf/ptypes/struct/struct.proto + +It has these top-level messages: + Struct + Value + ListValue +*/ +package structpb + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// `NullValue` is a singleton enumeration to represent the null value for the +// `Value` type union. +// +// The JSON representation for `NullValue` is JSON `null`. +type NullValue int32 + +const ( + // Null value. + NullValue_NULL_VALUE NullValue = 0 +) + +var NullValue_name = map[int32]string{ + 0: "NULL_VALUE", +} +var NullValue_value = map[string]int32{ + "NULL_VALUE": 0, +} + +func (x NullValue) String() string { + return proto.EnumName(NullValue_name, int32(x)) +} +func (NullValue) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (NullValue) XXX_WellKnownType() string { return "NullValue" } + +// `Struct` represents a structured data value, consisting of fields +// which map to dynamically typed values. In some languages, `Struct` +// might be supported by a native representation. For example, in +// scripting languages like JS a struct is represented as an +// object. The details of that representation are described together +// with the proto support for the language. +// +// The JSON representation for `Struct` is JSON object. +type Struct struct { + // Unordered map of dynamically typed values. + Fields map[string]*Value `protobuf:"bytes,1,rep,name=fields" json:"fields,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` +} + +func (m *Struct) Reset() { *m = Struct{} } +func (m *Struct) String() string { return proto.CompactTextString(m) } +func (*Struct) ProtoMessage() {} +func (*Struct) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (*Struct) XXX_WellKnownType() string { return "Struct" } + +func (m *Struct) GetFields() map[string]*Value { + if m != nil { + return m.Fields + } + return nil +} + +// `Value` represents a dynamically typed value which can be either +// null, a number, a string, a boolean, a recursive struct value, or a +// list of values. A producer of value is expected to set one of that +// variants, absence of any variant indicates an error. +// +// The JSON representation for `Value` is JSON value. +type Value struct { + // The kind of value. + // + // Types that are valid to be assigned to Kind: + // *Value_NullValue + // *Value_NumberValue + // *Value_StringValue + // *Value_BoolValue + // *Value_StructValue + // *Value_ListValue + Kind isValue_Kind `protobuf_oneof:"kind"` +} + +func (m *Value) Reset() { *m = Value{} } +func (m *Value) String() string { return proto.CompactTextString(m) } +func (*Value) ProtoMessage() {} +func (*Value) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } +func (*Value) XXX_WellKnownType() string { return "Value" } + +type isValue_Kind interface { + isValue_Kind() +} + +type Value_NullValue struct { + NullValue NullValue `protobuf:"varint,1,opt,name=null_value,json=nullValue,enum=google.protobuf.NullValue,oneof"` +} +type Value_NumberValue struct { + NumberValue float64 `protobuf:"fixed64,2,opt,name=number_value,json=numberValue,oneof"` +} +type Value_StringValue struct { + StringValue string `protobuf:"bytes,3,opt,name=string_value,json=stringValue,oneof"` +} +type Value_BoolValue struct { + BoolValue bool `protobuf:"varint,4,opt,name=bool_value,json=boolValue,oneof"` +} +type Value_StructValue struct { + StructValue *Struct `protobuf:"bytes,5,opt,name=struct_value,json=structValue,oneof"` +} +type Value_ListValue struct { + ListValue *ListValue `protobuf:"bytes,6,opt,name=list_value,json=listValue,oneof"` +} + +func (*Value_NullValue) isValue_Kind() {} +func (*Value_NumberValue) isValue_Kind() {} +func (*Value_StringValue) isValue_Kind() {} +func (*Value_BoolValue) isValue_Kind() {} +func (*Value_StructValue) isValue_Kind() {} +func (*Value_ListValue) isValue_Kind() {} + +func (m *Value) GetKind() isValue_Kind { + if m != nil { + return m.Kind + } + return nil +} + +func (m *Value) GetNullValue() NullValue { + if x, ok := m.GetKind().(*Value_NullValue); ok { + return x.NullValue + } + return NullValue_NULL_VALUE +} + +func (m *Value) GetNumberValue() float64 { + if x, ok := m.GetKind().(*Value_NumberValue); ok { + return x.NumberValue + } + return 0 +} + +func (m *Value) GetStringValue() string { + if x, ok := m.GetKind().(*Value_StringValue); ok { + return x.StringValue + } + return "" +} + +func (m *Value) GetBoolValue() bool { + if x, ok := m.GetKind().(*Value_BoolValue); ok { + return x.BoolValue + } + return false +} + +func (m *Value) GetStructValue() *Struct { + if x, ok := m.GetKind().(*Value_StructValue); ok { + return x.StructValue + } + return nil +} + +func (m *Value) GetListValue() *ListValue { + if x, ok := m.GetKind().(*Value_ListValue); ok { + return x.ListValue + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Value) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Value_OneofMarshaler, _Value_OneofUnmarshaler, _Value_OneofSizer, []interface{}{ + (*Value_NullValue)(nil), + (*Value_NumberValue)(nil), + (*Value_StringValue)(nil), + (*Value_BoolValue)(nil), + (*Value_StructValue)(nil), + (*Value_ListValue)(nil), + } +} + +func _Value_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Value) + // kind + switch x := m.Kind.(type) { + case *Value_NullValue: + b.EncodeVarint(1<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.NullValue)) + case *Value_NumberValue: + b.EncodeVarint(2<<3 | proto.WireFixed64) + b.EncodeFixed64(math.Float64bits(x.NumberValue)) + case *Value_StringValue: + b.EncodeVarint(3<<3 | proto.WireBytes) + b.EncodeStringBytes(x.StringValue) + case *Value_BoolValue: + t := uint64(0) + if x.BoolValue { + t = 1 + } + b.EncodeVarint(4<<3 | proto.WireVarint) + b.EncodeVarint(t) + case *Value_StructValue: + b.EncodeVarint(5<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.StructValue); err != nil { + return err + } + case *Value_ListValue: + b.EncodeVarint(6<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ListValue); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("Value.Kind has unexpected type %T", x) + } + return nil +} + +func _Value_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Value) + switch tag { + case 1: // kind.null_value + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Kind = &Value_NullValue{NullValue(x)} + return true, err + case 2: // kind.number_value + if wire != proto.WireFixed64 { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeFixed64() + m.Kind = &Value_NumberValue{math.Float64frombits(x)} + return true, err + case 3: // kind.string_value + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Kind = &Value_StringValue{x} + return true, err + case 4: // kind.bool_value + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Kind = &Value_BoolValue{x != 0} + return true, err + case 5: // kind.struct_value + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Struct) + err := b.DecodeMessage(msg) + m.Kind = &Value_StructValue{msg} + return true, err + case 6: // kind.list_value + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ListValue) + err := b.DecodeMessage(msg) + m.Kind = &Value_ListValue{msg} + return true, err + default: + return false, nil + } +} + +func _Value_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Value) + // kind + switch x := m.Kind.(type) { + case *Value_NullValue: + n += proto.SizeVarint(1<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.NullValue)) + case *Value_NumberValue: + n += proto.SizeVarint(2<<3 | proto.WireFixed64) + n += 8 + case *Value_StringValue: + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.StringValue))) + n += len(x.StringValue) + case *Value_BoolValue: + n += proto.SizeVarint(4<<3 | proto.WireVarint) + n += 1 + case *Value_StructValue: + s := proto.Size(x.StructValue) + n += proto.SizeVarint(5<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Value_ListValue: + s := proto.Size(x.ListValue) + n += proto.SizeVarint(6<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// `ListValue` is a wrapper around a repeated field of values. +// +// The JSON representation for `ListValue` is JSON array. +type ListValue struct { + // Repeated field of dynamically typed values. + Values []*Value `protobuf:"bytes,1,rep,name=values" json:"values,omitempty"` +} + +func (m *ListValue) Reset() { *m = ListValue{} } +func (m *ListValue) String() string { return proto.CompactTextString(m) } +func (*ListValue) ProtoMessage() {} +func (*ListValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } +func (*ListValue) XXX_WellKnownType() string { return "ListValue" } + +func (m *ListValue) GetValues() []*Value { + if m != nil { + return m.Values + } + return nil +} + +func init() { + proto.RegisterType((*Struct)(nil), "google.protobuf.Struct") + proto.RegisterType((*Value)(nil), "google.protobuf.Value") + proto.RegisterType((*ListValue)(nil), "google.protobuf.ListValue") + proto.RegisterEnum("google.protobuf.NullValue", NullValue_name, NullValue_value) +} + +func init() { + proto.RegisterFile("github.com/golang/protobuf/ptypes/struct/struct.proto", fileDescriptor0) +} + +var fileDescriptor0 = []byte{ + // 416 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x8c, 0x92, 0x41, 0x8b, 0xd3, 0x40, + 0x14, 0x80, 0x3b, 0xc9, 0x36, 0x98, 0x17, 0x59, 0x97, 0x11, 0xb4, 0xac, 0xa0, 0xa1, 0x7b, 0x09, + 0x22, 0x09, 0x56, 0x04, 0x31, 0x5e, 0x0c, 0xac, 0xbb, 0x60, 0x58, 0x62, 0x74, 0x57, 0xf0, 0x52, + 0x9a, 0x34, 0x8d, 0xa1, 0xd3, 0x99, 0x90, 0xcc, 0x28, 0x3d, 0xfa, 0x2f, 0x3c, 0x8a, 0x47, 0x8f, + 0xfe, 0x42, 0x99, 0x99, 0x24, 0x4a, 0x4b, 0xc1, 0xd3, 0xf4, 0xbd, 0xf9, 0xde, 0x37, 0xef, 0xbd, + 0x06, 0x9e, 0x97, 0x15, 0xff, 0x2c, 0x32, 0x3f, 0x67, 0x9b, 0xa0, 0x64, 0x64, 0x41, 0xcb, 0xa0, + 0x6e, 0x18, 0x67, 0x99, 0x58, 0x05, 0x35, 0xdf, 0xd6, 0x45, 0x1b, 0xb4, 0xbc, 0x11, 0x39, 0xef, + 0x0e, 0x5f, 0xdd, 0xe2, 0x3b, 0x25, 0x63, 0x25, 0x29, 0xfc, 0x9e, 0x9d, 0x7e, 0x47, 0x60, 0xbd, + 0x57, 0x04, 0x0e, 0xc1, 0x5a, 0x55, 0x05, 0x59, 0xb6, 0x13, 0xe4, 0x9a, 0x9e, 0x33, 0x3b, 0xf3, + 0x77, 0x60, 0x5f, 0x83, 0xfe, 0x1b, 0x45, 0x9d, 0x53, 0xde, 0x6c, 0xd3, 0xae, 0xe4, 0xf4, 0x1d, + 0x38, 0xff, 0xa4, 0xf1, 0x09, 0x98, 0xeb, 0x62, 0x3b, 0x41, 0x2e, 0xf2, 0xec, 0x54, 0xfe, 0xc4, + 0x4f, 0x60, 0xfc, 0x65, 0x41, 0x44, 0x31, 0x31, 0x5c, 0xe4, 0x39, 0xb3, 0x7b, 0x7b, 0xf2, 0x1b, + 0x79, 0x9b, 0x6a, 0xe8, 0xa5, 0xf1, 0x02, 0x4d, 0x7f, 0x1b, 0x30, 0x56, 0x49, 0x1c, 0x02, 0x50, + 0x41, 0xc8, 0x5c, 0x0b, 0xa4, 0xf4, 0x78, 0x76, 0xba, 0x27, 0xb8, 0x12, 0x84, 0x28, 0xfe, 0x72, + 0x94, 0xda, 0xb4, 0x0f, 0xf0, 0x19, 0xdc, 0xa6, 0x62, 0x93, 0x15, 0xcd, 0xfc, 0xef, 0xfb, 0xe8, + 0x72, 0x94, 0x3a, 0x3a, 0x3b, 0x40, 0x2d, 0x6f, 0x2a, 0x5a, 0x76, 0x90, 0x29, 0x1b, 0x97, 0x90, + 0xce, 0x6a, 0xe8, 0x11, 0x40, 0xc6, 0x58, 0xdf, 0xc6, 0x91, 0x8b, 0xbc, 0x5b, 0xf2, 0x29, 0x99, + 0xd3, 0xc0, 0x2b, 0x65, 0x11, 0x39, 0xef, 0x90, 0xb1, 0x1a, 0xf5, 0xfe, 0x81, 0x3d, 0x76, 0x7a, + 0x91, 0xf3, 0x61, 0x4a, 0x52, 0xb5, 0x7d, 0xad, 0xa5, 0x6a, 0xf7, 0xa7, 0x8c, 0xab, 0x96, 0x0f, + 0x53, 0x92, 0x3e, 0x88, 0x2c, 0x38, 0x5a, 0x57, 0x74, 0x39, 0x0d, 0xc1, 0x1e, 0x08, 0xec, 0x83, + 0xa5, 0x64, 0xfd, 0x3f, 0x7a, 0x68, 0xe9, 0x1d, 0xf5, 0xf8, 0x01, 0xd8, 0xc3, 0x12, 0xf1, 0x31, + 0xc0, 0xd5, 0x75, 0x1c, 0xcf, 0x6f, 0x5e, 0xc7, 0xd7, 0xe7, 0x27, 0xa3, 0xe8, 0x1b, 0x82, 0xbb, + 0x39, 0xdb, 0xec, 0x2a, 0x22, 0x47, 0x4f, 0x93, 0xc8, 0x38, 0x41, 0x9f, 0x9e, 0xfe, 0xef, 0x87, + 0x19, 0xea, 0xa3, 0xce, 0x7e, 0x20, 0xf4, 0xd3, 0x30, 0x2f, 0x92, 0xe8, 0x97, 0xf1, 0xf0, 0x42, + 0xcb, 0x93, 0xbe, 0xbf, 0x8f, 0x05, 0x21, 0x6f, 0x29, 0xfb, 0x4a, 0x3f, 0xc8, 0xca, 0xcc, 0x52, + 0xaa, 0x67, 0x7f, 0x02, 0x00, 0x00, 0xff, 0xff, 0xbc, 0xcf, 0x6d, 0x50, 0xfe, 0x02, 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go new file mode 100644 index 000000000..ffcc51594 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go @@ -0,0 +1,127 @@ +// Code generated by protoc-gen-go. +// source: github.com/golang/protobuf/ptypes/timestamp/timestamp.proto +// DO NOT EDIT! + +/* +Package timestamp is a generated protocol buffer package. + +It is generated from these files: + github.com/golang/protobuf/ptypes/timestamp/timestamp.proto + +It has these top-level messages: + Timestamp +*/ +package timestamp + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// A Timestamp represents a point in time independent of any time zone +// or calendar, represented as seconds and fractions of seconds at +// nanosecond resolution in UTC Epoch time. It is encoded using the +// Proleptic Gregorian Calendar which extends the Gregorian calendar +// backwards to year one. It is encoded assuming all minutes are 60 +// seconds long, i.e. leap seconds are "smeared" so that no leap second +// table is needed for interpretation. Range is from +// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. +// By restricting to that range, we ensure that we can convert to +// and from RFC 3339 date strings. +// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt). +// +// Example 1: Compute Timestamp from POSIX `time()`. +// +// Timestamp timestamp; +// timestamp.set_seconds(time(NULL)); +// timestamp.set_nanos(0); +// +// Example 2: Compute Timestamp from POSIX `gettimeofday()`. +// +// struct timeval tv; +// gettimeofday(&tv, NULL); +// +// Timestamp timestamp; +// timestamp.set_seconds(tv.tv_sec); +// timestamp.set_nanos(tv.tv_usec * 1000); +// +// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`. +// +// FILETIME ft; +// GetSystemTimeAsFileTime(&ft); +// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; +// +// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z +// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. +// Timestamp timestamp; +// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); +// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); +// +// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`. +// +// long millis = System.currentTimeMillis(); +// +// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) +// .setNanos((int) ((millis % 1000) * 1000000)).build(); +// +// +// Example 5: Compute Timestamp from current time in Python. +// +// now = time.time() +// seconds = int(now) +// nanos = int((now - seconds) * 10**9) +// timestamp = Timestamp(seconds=seconds, nanos=nanos) +// +// +type Timestamp struct { + // Represents seconds of UTC time since Unix epoch + // 1970-01-01T00:00:00Z. Must be from from 0001-01-01T00:00:00Z to + // 9999-12-31T23:59:59Z inclusive. + Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"` + // Non-negative fractions of a second at nanosecond resolution. Negative + // second values with fractions must still have non-negative nanos values + // that count forward in time. Must be from 0 to 999,999,999 + // inclusive. + Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"` +} + +func (m *Timestamp) Reset() { *m = Timestamp{} } +func (m *Timestamp) String() string { return proto.CompactTextString(m) } +func (*Timestamp) ProtoMessage() {} +func (*Timestamp) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (*Timestamp) XXX_WellKnownType() string { return "Timestamp" } + +func init() { + proto.RegisterType((*Timestamp)(nil), "google.protobuf.Timestamp") +} + +func init() { + proto.RegisterFile("github.com/golang/protobuf/ptypes/timestamp/timestamp.proto", fileDescriptor0) +} + +var fileDescriptor0 = []byte{ + // 194 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xb2, 0x4e, 0xcf, 0x2c, 0xc9, + 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0xd7, 0x2f, 0x28, + 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x28, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x2f, 0xc9, + 0xcc, 0x4d, 0x2d, 0x2e, 0x49, 0xcc, 0x2d, 0x40, 0xb0, 0xf4, 0xc0, 0x6a, 0x84, 0xf8, 0xd3, 0xf3, + 0xf3, 0xd3, 0x73, 0x52, 0xf5, 0x60, 0x3a, 0x94, 0xac, 0xb9, 0x38, 0x43, 0x60, 0x6a, 0x84, 0x24, + 0xb8, 0xd8, 0x8b, 0x53, 0x93, 0xf3, 0xf3, 0x52, 0x8a, 0x25, 0x18, 0x15, 0x18, 0x35, 0x98, 0x83, + 0x60, 0x5c, 0x21, 0x11, 0x2e, 0xd6, 0xbc, 0xc4, 0xbc, 0xfc, 0x62, 0x09, 0x26, 0x05, 0x46, 0x0d, + 0xd6, 0x20, 0x08, 0xc7, 0xa9, 0x91, 0x91, 0x4b, 0x38, 0x39, 0x3f, 0x57, 0x0f, 0xcd, 0x50, 0x27, + 0x3e, 0xb8, 0x91, 0x01, 0x20, 0xa1, 0x00, 0xc6, 0x28, 0x6d, 0x12, 0x1c, 0xbd, 0x80, 0x91, 0xf1, + 0x07, 0x23, 0xe3, 0x22, 0x26, 0x66, 0xf7, 0x00, 0xa7, 0x55, 0x4c, 0x72, 0xee, 0x10, 0xc3, 0x03, + 0xa0, 0xca, 0xf5, 0xc2, 0x53, 0x73, 0x72, 0xbc, 0xf3, 0xf2, 0xcb, 0xf3, 0x42, 0x40, 0xda, 0x92, + 0xd8, 0xc0, 0xe6, 0x18, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0x17, 0x5f, 0xb7, 0xdc, 0x17, 0x01, + 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go b/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go new file mode 100644 index 000000000..5e52a81c7 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go @@ -0,0 +1,200 @@ +// Code generated by protoc-gen-go. +// source: github.com/golang/protobuf/ptypes/wrappers/wrappers.proto +// DO NOT EDIT! + +/* +Package wrappers is a generated protocol buffer package. + +It is generated from these files: + github.com/golang/protobuf/ptypes/wrappers/wrappers.proto + +It has these top-level messages: + DoubleValue + FloatValue + Int64Value + UInt64Value + Int32Value + UInt32Value + BoolValue + StringValue + BytesValue +*/ +package wrappers + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// Wrapper message for `double`. +// +// The JSON representation for `DoubleValue` is JSON number. +type DoubleValue struct { + // The double value. + Value float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` +} + +func (m *DoubleValue) Reset() { *m = DoubleValue{} } +func (m *DoubleValue) String() string { return proto.CompactTextString(m) } +func (*DoubleValue) ProtoMessage() {} +func (*DoubleValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (*DoubleValue) XXX_WellKnownType() string { return "DoubleValue" } + +// Wrapper message for `float`. +// +// The JSON representation for `FloatValue` is JSON number. +type FloatValue struct { + // The float value. + Value float32 `protobuf:"fixed32,1,opt,name=value" json:"value,omitempty"` +} + +func (m *FloatValue) Reset() { *m = FloatValue{} } +func (m *FloatValue) String() string { return proto.CompactTextString(m) } +func (*FloatValue) ProtoMessage() {} +func (*FloatValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } +func (*FloatValue) XXX_WellKnownType() string { return "FloatValue" } + +// Wrapper message for `int64`. +// +// The JSON representation for `Int64Value` is JSON string. +type Int64Value struct { + // The int64 value. + Value int64 `protobuf:"varint,1,opt,name=value" json:"value,omitempty"` +} + +func (m *Int64Value) Reset() { *m = Int64Value{} } +func (m *Int64Value) String() string { return proto.CompactTextString(m) } +func (*Int64Value) ProtoMessage() {} +func (*Int64Value) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } +func (*Int64Value) XXX_WellKnownType() string { return "Int64Value" } + +// Wrapper message for `uint64`. +// +// The JSON representation for `UInt64Value` is JSON string. +type UInt64Value struct { + // The uint64 value. + Value uint64 `protobuf:"varint,1,opt,name=value" json:"value,omitempty"` +} + +func (m *UInt64Value) Reset() { *m = UInt64Value{} } +func (m *UInt64Value) String() string { return proto.CompactTextString(m) } +func (*UInt64Value) ProtoMessage() {} +func (*UInt64Value) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } +func (*UInt64Value) XXX_WellKnownType() string { return "UInt64Value" } + +// Wrapper message for `int32`. +// +// The JSON representation for `Int32Value` is JSON number. +type Int32Value struct { + // The int32 value. + Value int32 `protobuf:"varint,1,opt,name=value" json:"value,omitempty"` +} + +func (m *Int32Value) Reset() { *m = Int32Value{} } +func (m *Int32Value) String() string { return proto.CompactTextString(m) } +func (*Int32Value) ProtoMessage() {} +func (*Int32Value) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } +func (*Int32Value) XXX_WellKnownType() string { return "Int32Value" } + +// Wrapper message for `uint32`. +// +// The JSON representation for `UInt32Value` is JSON number. +type UInt32Value struct { + // The uint32 value. + Value uint32 `protobuf:"varint,1,opt,name=value" json:"value,omitempty"` +} + +func (m *UInt32Value) Reset() { *m = UInt32Value{} } +func (m *UInt32Value) String() string { return proto.CompactTextString(m) } +func (*UInt32Value) ProtoMessage() {} +func (*UInt32Value) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } +func (*UInt32Value) XXX_WellKnownType() string { return "UInt32Value" } + +// Wrapper message for `bool`. +// +// The JSON representation for `BoolValue` is JSON `true` and `false`. +type BoolValue struct { + // The bool value. + Value bool `protobuf:"varint,1,opt,name=value" json:"value,omitempty"` +} + +func (m *BoolValue) Reset() { *m = BoolValue{} } +func (m *BoolValue) String() string { return proto.CompactTextString(m) } +func (*BoolValue) ProtoMessage() {} +func (*BoolValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } +func (*BoolValue) XXX_WellKnownType() string { return "BoolValue" } + +// Wrapper message for `string`. +// +// The JSON representation for `StringValue` is JSON string. +type StringValue struct { + // The string value. + Value string `protobuf:"bytes,1,opt,name=value" json:"value,omitempty"` +} + +func (m *StringValue) Reset() { *m = StringValue{} } +func (m *StringValue) String() string { return proto.CompactTextString(m) } +func (*StringValue) ProtoMessage() {} +func (*StringValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } +func (*StringValue) XXX_WellKnownType() string { return "StringValue" } + +// Wrapper message for `bytes`. +// +// The JSON representation for `BytesValue` is JSON string. +type BytesValue struct { + // The bytes value. + Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *BytesValue) Reset() { *m = BytesValue{} } +func (m *BytesValue) String() string { return proto.CompactTextString(m) } +func (*BytesValue) ProtoMessage() {} +func (*BytesValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } +func (*BytesValue) XXX_WellKnownType() string { return "BytesValue" } + +func init() { + proto.RegisterType((*DoubleValue)(nil), "google.protobuf.DoubleValue") + proto.RegisterType((*FloatValue)(nil), "google.protobuf.FloatValue") + proto.RegisterType((*Int64Value)(nil), "google.protobuf.Int64Value") + proto.RegisterType((*UInt64Value)(nil), "google.protobuf.UInt64Value") + proto.RegisterType((*Int32Value)(nil), "google.protobuf.Int32Value") + proto.RegisterType((*UInt32Value)(nil), "google.protobuf.UInt32Value") + proto.RegisterType((*BoolValue)(nil), "google.protobuf.BoolValue") + proto.RegisterType((*StringValue)(nil), "google.protobuf.StringValue") + proto.RegisterType((*BytesValue)(nil), "google.protobuf.BytesValue") +} + +func init() { + proto.RegisterFile("github.com/golang/protobuf/ptypes/wrappers/wrappers.proto", fileDescriptor0) +} + +var fileDescriptor0 = []byte{ + // 260 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xb2, 0x4c, 0xcf, 0x2c, 0xc9, + 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0xd7, 0x2f, 0x28, + 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x28, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x2f, 0x2f, + 0x4a, 0x2c, 0x28, 0x48, 0x2d, 0x42, 0x30, 0xf4, 0xc0, 0x2a, 0x84, 0xf8, 0xd3, 0xf3, 0xf3, 0xd3, + 0x73, 0x52, 0xf5, 0x60, 0xea, 0x95, 0x94, 0xb9, 0xb8, 0x5d, 0xf2, 0x4b, 0x93, 0x72, 0x52, 0xc3, + 0x12, 0x73, 0x4a, 0x53, 0x85, 0x44, 0xb8, 0x58, 0xcb, 0x40, 0x0c, 0x09, 0x46, 0x05, 0x46, 0x0d, + 0xc6, 0x20, 0x08, 0x47, 0x49, 0x89, 0x8b, 0xcb, 0x2d, 0x27, 0x3f, 0xb1, 0x04, 0x8b, 0x1a, 0x26, + 0x24, 0x35, 0x9e, 0x79, 0x25, 0x66, 0x26, 0x58, 0xd4, 0x30, 0xc3, 0xd4, 0x28, 0x73, 0x71, 0x87, + 0xe2, 0x52, 0xc4, 0x82, 0x6a, 0x90, 0xb1, 0x11, 0x16, 0x35, 0xac, 0x68, 0x06, 0x61, 0x55, 0xc4, + 0x0b, 0x53, 0xa4, 0xc8, 0xc5, 0xe9, 0x94, 0x9f, 0x9f, 0x83, 0x45, 0x09, 0x07, 0x92, 0x39, 0xc1, + 0x25, 0x45, 0x99, 0x79, 0xe9, 0x58, 0x14, 0x71, 0x22, 0x39, 0xc8, 0xa9, 0xb2, 0x24, 0xb5, 0x18, + 0x8b, 0x1a, 0x1e, 0xa8, 0x1a, 0xa7, 0x7a, 0x2e, 0xe1, 0xe4, 0xfc, 0x5c, 0x3d, 0xb4, 0xd0, 0x75, + 0xe2, 0x0d, 0x87, 0x06, 0x7f, 0x00, 0x48, 0x24, 0x80, 0x31, 0x4a, 0x8b, 0xf8, 0xa8, 0x5b, 0xc0, + 0xc8, 0xf8, 0x83, 0x91, 0x71, 0x11, 0x13, 0xb3, 0x7b, 0x80, 0xd3, 0x2a, 0x26, 0x39, 0x77, 0x88, + 0xd1, 0x01, 0x50, 0xd5, 0x7a, 0xe1, 0xa9, 0x39, 0x39, 0xde, 0x79, 0xf9, 0xe5, 0x79, 0x21, 0x20, + 0x5d, 0x49, 0x6c, 0x60, 0x63, 0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0xa9, 0xdf, 0x64, 0x4b, + 0x1c, 0x02, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/grpc/backoff.go b/vendor/google.golang.org/grpc/backoff.go new file mode 100644 index 000000000..52f4f10fc --- /dev/null +++ b/vendor/google.golang.org/grpc/backoff.go @@ -0,0 +1,80 @@ +package grpc + +import ( + "math/rand" + "time" +) + +// DefaultBackoffConfig uses values specified for backoff in +// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. +var ( + DefaultBackoffConfig = BackoffConfig{ + MaxDelay: 120 * time.Second, + baseDelay: 1.0 * time.Second, + factor: 1.6, + jitter: 0.2, + } +) + +// backoffStrategy defines the methodology for backing off after a grpc +// connection failure. +// +// This is unexported until the gRPC project decides whether or not to allow +// alternative backoff strategies. Once a decision is made, this type and its +// method may be exported. +type backoffStrategy interface { + // backoff returns the amount of time to wait before the next retry given + // the number of consecutive failures. + backoff(retries int) time.Duration +} + +// BackoffConfig defines the parameters for the default gRPC backoff strategy. +type BackoffConfig struct { + // MaxDelay is the upper bound of backoff delay. + MaxDelay time.Duration + + // TODO(stevvooe): The following fields are not exported, as allowing + // changes would violate the current gRPC specification for backoff. If + // gRPC decides to allow more interesting backoff strategies, these fields + // may be opened up in the future. + + // baseDelay is the amount of time to wait before retrying after the first + // failure. + baseDelay time.Duration + + // factor is applied to the backoff after each retry. + factor float64 + + // jitter provides a range to randomize backoff delays. + jitter float64 +} + +func setDefaults(bc *BackoffConfig) { + md := bc.MaxDelay + *bc = DefaultBackoffConfig + + if md > 0 { + bc.MaxDelay = md + } +} + +func (bc BackoffConfig) backoff(retries int) (t time.Duration) { + if retries == 0 { + return bc.baseDelay + } + backoff, max := float64(bc.baseDelay), float64(bc.MaxDelay) + for backoff < max && retries > 0 { + backoff *= bc.factor + retries-- + } + if backoff > max { + backoff = max + } + // Randomize backoff delays so that if a cluster of requests start at + // the same time, they won't operate in lockstep. + backoff *= 1 + bc.jitter*(rand.Float64()*2-1) + if backoff < 0 { + return 0 + } + return time.Duration(backoff) +} diff --git a/vendor/google.golang.org/grpc/balancer.go b/vendor/google.golang.org/grpc/balancer.go new file mode 100644 index 000000000..419e21461 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer.go @@ -0,0 +1,385 @@ +/* + * + * Copyright 2016, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package grpc + +import ( + "fmt" + "sync" + + "golang.org/x/net/context" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/naming" +) + +// Address represents a server the client connects to. +// This is the EXPERIMENTAL API and may be changed or extended in the future. +type Address struct { + // Addr is the server address on which a connection will be established. + Addr string + // Metadata is the information associated with Addr, which may be used + // to make load balancing decision. + Metadata interface{} +} + +// BalancerGetOptions configures a Get call. +// This is the EXPERIMENTAL API and may be changed or extended in the future. +type BalancerGetOptions struct { + // BlockingWait specifies whether Get should block when there is no + // connected address. + BlockingWait bool +} + +// Balancer chooses network addresses for RPCs. +// This is the EXPERIMENTAL API and may be changed or extended in the future. +type Balancer interface { + // Start does the initialization work to bootstrap a Balancer. For example, + // this function may start the name resolution and watch the updates. It will + // be called when dialing. + Start(target string) error + // Up informs the Balancer that gRPC has a connection to the server at + // addr. It returns down which is called once the connection to addr gets + // lost or closed. + // TODO: It is not clear how to construct and take advantage the meaningful error + // parameter for down. Need realistic demands to guide. + Up(addr Address) (down func(error)) + // Get gets the address of a server for the RPC corresponding to ctx. + // i) If it returns a connected address, gRPC internals issues the RPC on the + // connection to this address; + // ii) If it returns an address on which the connection is under construction + // (initiated by Notify(...)) but not connected, gRPC internals + // * fails RPC if the RPC is fail-fast and connection is in the TransientFailure or + // Shutdown state; + // or + // * issues RPC on the connection otherwise. + // iii) If it returns an address on which the connection does not exist, gRPC + // internals treats it as an error and will fail the corresponding RPC. + // + // Therefore, the following is the recommended rule when writing a custom Balancer. + // If opts.BlockingWait is true, it should return a connected address or + // block if there is no connected address. It should respect the timeout or + // cancellation of ctx when blocking. If opts.BlockingWait is false (for fail-fast + // RPCs), it should return an address it has notified via Notify(...) immediately + // instead of blocking. + // + // The function returns put which is called once the rpc has completed or failed. + // put can collect and report RPC stats to a remote load balancer. + // + // This function should only return the errors Balancer cannot recover by itself. + // gRPC internals will fail the RPC if an error is returned. + Get(ctx context.Context, opts BalancerGetOptions) (addr Address, put func(), err error) + // Notify returns a channel that is used by gRPC internals to watch the addresses + // gRPC needs to connect. The addresses might be from a name resolver or remote + // load balancer. gRPC internals will compare it with the existing connected + // addresses. If the address Balancer notified is not in the existing connected + // addresses, gRPC starts to connect the address. If an address in the existing + // connected addresses is not in the notification list, the corresponding connection + // is shutdown gracefully. Otherwise, there are no operations to take. Note that + // the Address slice must be the full list of the Addresses which should be connected. + // It is NOT delta. + Notify() <-chan []Address + // Close shuts down the balancer. + Close() error +} + +// downErr implements net.Error. It is constructed by gRPC internals and passed to the down +// call of Balancer. +type downErr struct { + timeout bool + temporary bool + desc string +} + +func (e downErr) Error() string { return e.desc } +func (e downErr) Timeout() bool { return e.timeout } +func (e downErr) Temporary() bool { return e.temporary } + +func downErrorf(timeout, temporary bool, format string, a ...interface{}) downErr { + return downErr{ + timeout: timeout, + temporary: temporary, + desc: fmt.Sprintf(format, a...), + } +} + +// RoundRobin returns a Balancer that selects addresses round-robin. It uses r to watch +// the name resolution updates and updates the addresses available correspondingly. +func RoundRobin(r naming.Resolver) Balancer { + return &roundRobin{r: r} +} + +type addrInfo struct { + addr Address + connected bool +} + +type roundRobin struct { + r naming.Resolver + w naming.Watcher + addrs []*addrInfo // all the addresses the client should potentially connect + mu sync.Mutex + addrCh chan []Address // the channel to notify gRPC internals the list of addresses the client should connect to. + next int // index of the next address to return for Get() + waitCh chan struct{} // the channel to block when there is no connected address available + done bool // The Balancer is closed. +} + +func (rr *roundRobin) watchAddrUpdates() error { + updates, err := rr.w.Next() + if err != nil { + grpclog.Printf("grpc: the naming watcher stops working due to %v.\n", err) + return err + } + rr.mu.Lock() + defer rr.mu.Unlock() + for _, update := range updates { + addr := Address{ + Addr: update.Addr, + Metadata: update.Metadata, + } + switch update.Op { + case naming.Add: + var exist bool + for _, v := range rr.addrs { + if addr == v.addr { + exist = true + grpclog.Println("grpc: The name resolver wanted to add an existing address: ", addr) + break + } + } + if exist { + continue + } + rr.addrs = append(rr.addrs, &addrInfo{addr: addr}) + case naming.Delete: + for i, v := range rr.addrs { + if addr == v.addr { + copy(rr.addrs[i:], rr.addrs[i+1:]) + rr.addrs = rr.addrs[:len(rr.addrs)-1] + break + } + } + default: + grpclog.Println("Unknown update.Op ", update.Op) + } + } + // Make a copy of rr.addrs and write it onto rr.addrCh so that gRPC internals gets notified. + open := make([]Address, len(rr.addrs)) + for i, v := range rr.addrs { + open[i] = v.addr + } + if rr.done { + return ErrClientConnClosing + } + rr.addrCh <- open + return nil +} + +func (rr *roundRobin) Start(target string) error { + if rr.r == nil { + // If there is no name resolver installed, it is not needed to + // do name resolution. In this case, target is added into rr.addrs + // as the only address available and rr.addrCh stays nil. + rr.addrs = append(rr.addrs, &addrInfo{addr: Address{Addr: target}}) + return nil + } + w, err := rr.r.Resolve(target) + if err != nil { + return err + } + rr.w = w + rr.addrCh = make(chan []Address) + go func() { + for { + if err := rr.watchAddrUpdates(); err != nil { + return + } + } + }() + return nil +} + +// Up sets the connected state of addr and sends notification if there are pending +// Get() calls. +func (rr *roundRobin) Up(addr Address) func(error) { + rr.mu.Lock() + defer rr.mu.Unlock() + var cnt int + for _, a := range rr.addrs { + if a.addr == addr { + if a.connected { + return nil + } + a.connected = true + } + if a.connected { + cnt++ + } + } + // addr is only one which is connected. Notify the Get() callers who are blocking. + if cnt == 1 && rr.waitCh != nil { + close(rr.waitCh) + rr.waitCh = nil + } + return func(err error) { + rr.down(addr, err) + } +} + +// down unsets the connected state of addr. +func (rr *roundRobin) down(addr Address, err error) { + rr.mu.Lock() + defer rr.mu.Unlock() + for _, a := range rr.addrs { + if addr == a.addr { + a.connected = false + break + } + } +} + +// Get returns the next addr in the rotation. +func (rr *roundRobin) Get(ctx context.Context, opts BalancerGetOptions) (addr Address, put func(), err error) { + var ch chan struct{} + rr.mu.Lock() + if rr.done { + rr.mu.Unlock() + err = ErrClientConnClosing + return + } + + if len(rr.addrs) > 0 { + if rr.next >= len(rr.addrs) { + rr.next = 0 + } + next := rr.next + for { + a := rr.addrs[next] + next = (next + 1) % len(rr.addrs) + if a.connected { + addr = a.addr + rr.next = next + rr.mu.Unlock() + return + } + if next == rr.next { + // Has iterated all the possible address but none is connected. + break + } + } + } + if !opts.BlockingWait { + if len(rr.addrs) == 0 { + rr.mu.Unlock() + err = fmt.Errorf("there is no address available") + return + } + // Returns the next addr on rr.addrs for failfast RPCs. + addr = rr.addrs[rr.next].addr + rr.next++ + rr.mu.Unlock() + return + } + // Wait on rr.waitCh for non-failfast RPCs. + if rr.waitCh == nil { + ch = make(chan struct{}) + rr.waitCh = ch + } else { + ch = rr.waitCh + } + rr.mu.Unlock() + for { + select { + case <-ctx.Done(): + err = ctx.Err() + return + case <-ch: + rr.mu.Lock() + if rr.done { + rr.mu.Unlock() + err = ErrClientConnClosing + return + } + + if len(rr.addrs) > 0 { + if rr.next >= len(rr.addrs) { + rr.next = 0 + } + next := rr.next + for { + a := rr.addrs[next] + next = (next + 1) % len(rr.addrs) + if a.connected { + addr = a.addr + rr.next = next + rr.mu.Unlock() + return + } + if next == rr.next { + // Has iterated all the possible address but none is connected. + break + } + } + } + // The newly added addr got removed by Down() again. + if rr.waitCh == nil { + ch = make(chan struct{}) + rr.waitCh = ch + } else { + ch = rr.waitCh + } + rr.mu.Unlock() + } + } +} + +func (rr *roundRobin) Notify() <-chan []Address { + return rr.addrCh +} + +func (rr *roundRobin) Close() error { + rr.mu.Lock() + defer rr.mu.Unlock() + rr.done = true + if rr.w != nil { + rr.w.Close() + } + if rr.waitCh != nil { + close(rr.waitCh) + rr.waitCh = nil + } + if rr.addrCh != nil { + close(rr.addrCh) + } + return nil +} diff --git a/vendor/google.golang.org/grpc/call.go b/vendor/google.golang.org/grpc/call.go index d4ae68bee..fea07998d 100644 --- a/vendor/google.golang.org/grpc/call.go +++ b/vendor/google.golang.org/grpc/call.go @@ -36,6 +36,7 @@ package grpc import ( "bytes" "io" + "math" "time" "golang.org/x/net/context" @@ -51,13 +52,20 @@ import ( func recvResponse(dopts dialOptions, t transport.ClientTransport, c *callInfo, stream *transport.Stream, reply interface{}) error { // Try to acquire header metadata from the server if there is any. var err error + defer func() { + if err != nil { + if _, ok := err.(transport.ConnectionError); !ok { + t.CloseStream(stream, err) + } + } + }() c.headerMD, err = stream.Header() if err != nil { return err } - p := &parser{s: stream} + p := &parser{r: stream} for { - if err = recv(p, dopts.codec, stream, dopts.dc, reply); err != nil { + if err = recv(p, dopts.codec, stream, dopts.dc, reply, math.MaxInt32); err != nil { if err == io.EOF { break } @@ -76,6 +84,7 @@ func sendRequest(ctx context.Context, codec Codec, compressor Compressor, callHd } defer func() { if err != nil { + // If err is connection error, t will be closed, no need to close stream here. if _, ok := err.(transport.ConnectionError); !ok { t.CloseStream(stream, err) } @@ -90,17 +99,21 @@ func sendRequest(ctx context.Context, codec Codec, compressor Compressor, callHd return nil, transport.StreamErrorf(codes.Internal, "grpc: %v", err) } err = t.Write(stream, outBuf, opts) - if err != nil { + // t.NewStream(...) could lead to an early rejection of the RPC (e.g., the service/method + // does not exist.) so that t.Write could get io.EOF from wait(...). Leave the following + // recvResponse to get the final status. + if err != nil && err != io.EOF { return nil, err } // Sent successfully. return stream, nil } -// Invoke is called by the generated code. It sends the RPC request on the -// wire and returns after response is received. +// Invoke sends the RPC request on the wire and returns after response is received. +// Invoke is called by generated code. Also users can call Invoke directly when it +// is really needed in their use cases. func Invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) (err error) { - var c callInfo + c := defaultCallInfo for _, o := range opts { if err := o.before(&c); err != nil { return toRPCErr(err) @@ -131,19 +144,16 @@ func Invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli Last: true, Delay: false, } - var ( - lastErr error // record the error that happened - ) for { var ( err error t transport.ClientTransport stream *transport.Stream + // Record the put handler from Balancer.Get(...). It is called once the + // RPC has completed or failed. + put func() ) - // TODO(zhaoq): Need a formal spec of retry strategy for non-failfast rpcs. - if lastErr != nil && c.failFast { - return toRPCErr(lastErr) - } + // TODO(zhaoq): Need a formal spec of fail-fast. callHdr := &transport.CallHdr{ Host: cc.authority, Method: method, @@ -151,40 +161,66 @@ func Invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli if cc.dopts.cp != nil { callHdr.SendCompress = cc.dopts.cp.Type() } - t, err = cc.dopts.picker.Pick(ctx) + gopts := BalancerGetOptions{ + BlockingWait: !c.failFast, + } + t, put, err = cc.getTransport(ctx, gopts) if err != nil { - if lastErr != nil { - // This was a retry; return the error from the last attempt. - return toRPCErr(lastErr) + // TODO(zhaoq): Probably revisit the error handling. + if _, ok := err.(*rpcError); ok { + return err } - return toRPCErr(err) + if err == errConnClosing || err == errConnUnavailable { + if c.failFast { + return Errorf(codes.Unavailable, "%v", err) + } + continue + } + // All the other errors are treated as Internal errors. + return Errorf(codes.Internal, "%v", err) } if c.traceInfo.tr != nil { c.traceInfo.tr.LazyLog(&payload{sent: true, msg: args}, true) } stream, err = sendRequest(ctx, cc.dopts.codec, cc.dopts.cp, callHdr, t, args, topts) if err != nil { - if _, ok := err.(transport.ConnectionError); ok { - lastErr = err - continue + if put != nil { + put() + put = nil } - if lastErr != nil { - return toRPCErr(lastErr) + // Retry a non-failfast RPC when + // i) there is a connection error; or + // ii) the server started to drain before this RPC was initiated. + if _, ok := err.(transport.ConnectionError); ok || err == transport.ErrStreamDrain { + if c.failFast { + return toRPCErr(err) + } + continue } return toRPCErr(err) } - // Receive the response - lastErr = recvResponse(cc.dopts, t, &c, stream, reply) - if _, ok := lastErr.(transport.ConnectionError); ok { - continue + err = recvResponse(cc.dopts, t, &c, stream, reply) + if err != nil { + if put != nil { + put() + put = nil + } + if _, ok := err.(transport.ConnectionError); ok || err == transport.ErrStreamDrain { + if c.failFast { + return toRPCErr(err) + } + continue + } + return toRPCErr(err) } if c.traceInfo.tr != nil { c.traceInfo.tr.LazyLog(&payload{sent: false, msg: reply}, true) } - t.CloseStream(stream, lastErr) - if lastErr != nil { - return toRPCErr(lastErr) + t.CloseStream(stream, nil) + if put != nil { + put() + put = nil } - return Errorf(stream.StatusCode(), stream.StatusDesc()) + return Errorf(stream.StatusCode(), "%s", stream.StatusDesc()) } } diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index 28e74da8b..1d3b46c60 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -49,22 +49,33 @@ import ( ) var ( - // ErrUnspecTarget indicates that the target address is unspecified. - ErrUnspecTarget = errors.New("grpc: target is unspecified") - // ErrNoTransportSecurity indicates that there is no transport security - // being set for ClientConn. Users should either set one or explicityly - // call WithInsecure DialOption to disable security. - ErrNoTransportSecurity = errors.New("grpc: no transport security set (use grpc.WithInsecure() explicitly or set credentials)") - // ErrCredentialsMisuse indicates that users want to transmit security infomation - // (e.g., oauth2 token) which requires secure connection on an insecure - // connection. - ErrCredentialsMisuse = errors.New("grpc: the credentials require transport level security (use grpc.WithTransportAuthenticator() to set)") // ErrClientConnClosing indicates that the operation is illegal because - // the session is closing. + // the ClientConn is closing. ErrClientConnClosing = errors.New("grpc: the client connection is closing") - // ErrClientConnTimeout indicates that the connection could not be - // established or re-established within the specified timeout. - ErrClientConnTimeout = errors.New("grpc: timed out trying to connect") + // ErrClientConnTimeout indicates that the ClientConn cannot establish the + // underlying connections within the specified timeout. + ErrClientConnTimeout = errors.New("grpc: timed out when dialing") + + // errNoTransportSecurity indicates that there is no transport security + // being set for ClientConn. Users should either set one or explicitly + // call WithInsecure DialOption to disable security. + errNoTransportSecurity = errors.New("grpc: no transport security set (use grpc.WithInsecure() explicitly or set credentials)") + // errTransportCredentialsMissing indicates that users want to transmit security + // information (e.g., oauth2 token) which requires secure connection on an insecure + // connection. + errTransportCredentialsMissing = errors.New("grpc: the credentials require transport level security (use grpc.WithTransportCredentials() to set)") + // errCredentialsConflict indicates that grpc.WithTransportCredentials() + // and grpc.WithInsecure() are both called for a connection. + errCredentialsConflict = errors.New("grpc: transport credentials are set for an insecure connection (grpc.WithTransportCredentials() and grpc.WithInsecure() are both called)") + // errNetworkIO indicates that the connection is down due to some network I/O error. + errNetworkIO = errors.New("grpc: failed with network I/O error") + // errConnDrain indicates that the connection starts to be drained and does not accept any new RPCs. + errConnDrain = errors.New("grpc: the connection is drained") + // errConnClosing indicates that the connection is closing. + errConnClosing = errors.New("grpc: the connection is closing") + // errConnUnavailable indicates that the connection is unavailable. + errConnUnavailable = errors.New("grpc: the connection is unavailable") + errNoAddr = errors.New("grpc: there is no address available to dial") // minimum time to give a connection to complete minConnectTimeout = 20 * time.Second ) @@ -75,9 +86,11 @@ type dialOptions struct { codec Codec cp Compressor dc Decompressor - picker Picker + bs backoffStrategy + balancer Balancer block bool insecure bool + timeout time.Duration copts transport.ConnectOptions } @@ -107,10 +120,38 @@ func WithDecompressor(dc Decompressor) DialOption { } } -// WithPicker returns a DialOption which sets a picker for connection selection. -func WithPicker(p Picker) DialOption { +// WithBalancer returns a DialOption which sets a load balancer. +func WithBalancer(b Balancer) DialOption { + return func(o *dialOptions) { + o.balancer = b + } +} + +// WithBackoffMaxDelay configures the dialer to use the provided maximum delay +// when backing off after failed connection attempts. +func WithBackoffMaxDelay(md time.Duration) DialOption { + return WithBackoffConfig(BackoffConfig{MaxDelay: md}) +} + +// WithBackoffConfig configures the dialer to use the provided backoff +// parameters after connection failures. +// +// Use WithBackoffMaxDelay until more parameters on BackoffConfig are opened up +// for use. +func WithBackoffConfig(b BackoffConfig) DialOption { + // Set defaults to ensure that provided BackoffConfig is valid and + // unexported fields get default values. + setDefaults(&b) + return withBackoff(b) +} + +// withBackoff sets the backoff strategy used for retries after a +// failed connection attempt. +// +// This can be exported if arbitrary backoff strategies are allowed by gRPC. +func withBackoff(bs backoffStrategy) DialOption { return func(o *dialOptions) { - o.picker = p + o.bs = bs } } @@ -133,31 +174,37 @@ func WithInsecure() DialOption { // WithTransportCredentials returns a DialOption which configures a // connection level security credentials (e.g., TLS/SSL). -func WithTransportCredentials(creds credentials.TransportAuthenticator) DialOption { +func WithTransportCredentials(creds credentials.TransportCredentials) DialOption { return func(o *dialOptions) { - o.copts.AuthOptions = append(o.copts.AuthOptions, creds) + o.copts.TransportCredentials = creds } } // WithPerRPCCredentials returns a DialOption which sets // credentials which will place auth state on each outbound RPC. -func WithPerRPCCredentials(creds credentials.Credentials) DialOption { +func WithPerRPCCredentials(creds credentials.PerRPCCredentials) DialOption { return func(o *dialOptions) { - o.copts.AuthOptions = append(o.copts.AuthOptions, creds) + o.copts.PerRPCCredentials = append(o.copts.PerRPCCredentials, creds) } } -// WithTimeout returns a DialOption that configures a timeout for dialing a client connection. +// WithTimeout returns a DialOption that configures a timeout for dialing a ClientConn +// initially. This is valid if and only if WithBlock() is present. func WithTimeout(d time.Duration) DialOption { return func(o *dialOptions) { - o.copts.Timeout = d + o.timeout = d } } // WithDialer returns a DialOption that specifies a function to use for dialing network addresses. -func WithDialer(f func(addr string, timeout time.Duration) (net.Conn, error)) DialOption { +func WithDialer(f func(string, time.Duration) (net.Conn, error)) DialOption { return func(o *dialOptions) { - o.copts.Dialer = f + o.copts.Dialer = func(ctx context.Context, addr string) (net.Conn, error) { + if deadline, ok := ctx.Deadline(); ok { + return f(addr, deadline.Sub(time.Now())) + } + return f(addr, 0) + } } } @@ -168,25 +215,96 @@ func WithUserAgent(s string) DialOption { } } -// Dial creates a client connection the given target. +// Dial creates a client connection to the given target. func Dial(target string, opts ...DialOption) (*ClientConn, error) { + return DialContext(context.Background(), target, opts...) +} + +// DialContext creates a client connection to the given target. ctx can be used to +// cancel or expire the pending connecting. Once this function returns, the +// cancellation and expiration of ctx will be noop. Users should call ClientConn.Close +// to terminate all the pending operations after this function returns. +// This is the EXPERIMENTAL API. +func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) { cc := &ClientConn{ target: target, + conns: make(map[Address]*addrConn), } + cc.ctx, cc.cancel = context.WithCancel(context.Background()) + defer func() { + select { + case <-ctx.Done(): + conn, err = nil, ctx.Err() + default: + } + + if err != nil { + cc.Close() + } + }() + for _, opt := range opts { opt(&cc.dopts) } + + // Set defaults. if cc.dopts.codec == nil { - // Set the default codec. cc.dopts.codec = protoCodec{} } - if cc.dopts.picker == nil { - cc.dopts.picker = &unicastPicker{ - target: target, + if cc.dopts.bs == nil { + cc.dopts.bs = DefaultBackoffConfig + } + + var ( + ok bool + addrs []Address + ) + if cc.dopts.balancer == nil { + // Connect to target directly if balancer is nil. + addrs = append(addrs, Address{Addr: target}) + } else { + if err := cc.dopts.balancer.Start(target); err != nil { + return nil, err + } + ch := cc.dopts.balancer.Notify() + if ch == nil { + // There is no name resolver installed. + addrs = append(addrs, Address{Addr: target}) + } else { + addrs, ok = <-ch + if !ok || len(addrs) == 0 { + return nil, errNoAddr + } } } - if err := cc.dopts.picker.Init(cc); err != nil { - return nil, err + waitC := make(chan error, 1) + go func() { + for _, a := range addrs { + if err := cc.resetAddrConn(a, false, nil); err != nil { + waitC <- err + return + } + } + close(waitC) + }() + var timeoutCh <-chan time.Time + if cc.dopts.timeout > 0 { + timeoutCh = time.After(cc.dopts.timeout) + } + select { + case <-ctx.Done(): + return nil, ctx.Err() + case err := <-waitC: + if err != nil { + return nil, err + } + case <-timeoutCh: + return nil, ErrClientConnTimeout + } + // If balancer is nil or balancer.Notify() is nil, ok will be false here. + // The lbWatcher goroutine will not be created. + if ok { + go cc.lbWatcher() } colonPos := strings.LastIndex(target, ":") if colonPos == -1 { @@ -229,361 +347,511 @@ func (s ConnectivityState) String() string { } } -// ClientConn represents a client connection to an RPC service. +// ClientConn represents a client connection to an RPC server. type ClientConn struct { + ctx context.Context + cancel context.CancelFunc + target string authority string dopts dialOptions -} -// State returns the connectivity state of cc. -// This is EXPERIMENTAL API. -func (cc *ClientConn) State() (ConnectivityState, error) { - return cc.dopts.picker.State() + mu sync.RWMutex + conns map[Address]*addrConn } -// WaitForStateChange blocks until the state changes to something other than the sourceState. -// It returns the new state or error. -// This is EXPERIMENTAL API. -func (cc *ClientConn) WaitForStateChange(ctx context.Context, sourceState ConnectivityState) (ConnectivityState, error) { - return cc.dopts.picker.WaitForStateChange(ctx, sourceState) -} - -// Close starts to tear down the ClientConn. -func (cc *ClientConn) Close() error { - return cc.dopts.picker.Close() -} - -// Conn is a client connection to a single destination. -type Conn struct { - target string - dopts dialOptions - resetChan chan int - shutdownChan chan struct{} - events trace.EventLog - - mu sync.Mutex - state ConnectivityState - stateCV *sync.Cond - // ready is closed and becomes nil when a new transport is up or failed - // due to timeout. - ready chan struct{} - transport transport.ClientTransport +func (cc *ClientConn) lbWatcher() { + for addrs := range cc.dopts.balancer.Notify() { + var ( + add []Address // Addresses need to setup connections. + del []*addrConn // Connections need to tear down. + ) + cc.mu.Lock() + for _, a := range addrs { + if _, ok := cc.conns[a]; !ok { + add = append(add, a) + } + } + for k, c := range cc.conns { + var keep bool + for _, a := range addrs { + if k == a { + keep = true + break + } + } + if !keep { + del = append(del, c) + delete(cc.conns, c.addr) + } + } + cc.mu.Unlock() + for _, a := range add { + cc.resetAddrConn(a, true, nil) + } + for _, c := range del { + c.tearDown(errConnDrain) + } + } } -// NewConn creates a Conn. -func NewConn(cc *ClientConn) (*Conn, error) { - if cc.target == "" { - return nil, ErrUnspecTarget - } - c := &Conn{ - target: cc.target, - dopts: cc.dopts, - resetChan: make(chan int, 1), - shutdownChan: make(chan struct{}), - } +// resetAddrConn creates an addrConn for addr and adds it to cc.conns. +// If there is an old addrConn for addr, it will be torn down, using tearDownErr as the reason. +// If tearDownErr is nil, errConnDrain will be used instead. +func (cc *ClientConn) resetAddrConn(addr Address, skipWait bool, tearDownErr error) error { + ac := &addrConn{ + cc: cc, + addr: addr, + dopts: cc.dopts, + } + ac.ctx, ac.cancel = context.WithCancel(cc.ctx) + ac.stateCV = sync.NewCond(&ac.mu) if EnableTracing { - c.events = trace.NewEventLog("grpc.ClientConn", c.target) + ac.events = trace.NewEventLog("grpc.ClientConn", ac.addr.Addr) } - if !c.dopts.insecure { - var ok bool - for _, cd := range c.dopts.copts.AuthOptions { - if _, ok := cd.(credentials.TransportAuthenticator); !ok { - continue - } - ok = true - } - if !ok { - return nil, ErrNoTransportSecurity + if !ac.dopts.insecure { + if ac.dopts.copts.TransportCredentials == nil { + return errNoTransportSecurity } } else { - for _, cd := range c.dopts.copts.AuthOptions { + if ac.dopts.copts.TransportCredentials != nil { + return errCredentialsConflict + } + for _, cd := range ac.dopts.copts.PerRPCCredentials { if cd.RequireTransportSecurity() { - return nil, ErrCredentialsMisuse + return errTransportCredentialsMissing } } } - c.stateCV = sync.NewCond(&c.mu) - if c.dopts.block { - if err := c.resetTransport(false); err != nil { - c.Close() - return nil, err + // Track ac in cc. This needs to be done before any getTransport(...) is called. + cc.mu.Lock() + if cc.conns == nil { + cc.mu.Unlock() + return ErrClientConnClosing + } + stale := cc.conns[ac.addr] + cc.conns[ac.addr] = ac + cc.mu.Unlock() + if stale != nil { + // There is an addrConn alive on ac.addr already. This could be due to + // 1) a buggy Balancer notifies duplicated Addresses; + // 2) goaway was received, a new ac will replace the old ac. + // The old ac should be deleted from cc.conns, but the + // underlying transport should drain rather than close. + if tearDownErr == nil { + // tearDownErr is nil if resetAddrConn is called by + // 1) Dial + // 2) lbWatcher + // In both cases, the stale ac should drain, not close. + stale.tearDown(errConnDrain) + } else { + stale.tearDown(tearDownErr) + } + } + // skipWait may overwrite the decision in ac.dopts.block. + if ac.dopts.block && !skipWait { + if err := ac.resetTransport(false); err != nil { + if err != errConnClosing { + // Tear down ac and delete it from cc.conns. + cc.mu.Lock() + delete(cc.conns, ac.addr) + cc.mu.Unlock() + ac.tearDown(err) + } + if e, ok := err.(transport.ConnectionError); ok && !e.Temporary() { + return e.Origin() + } + return err } // Start to monitor the error status of transport. - go c.transportMonitor() + go ac.transportMonitor() } else { // Start a goroutine connecting to the server asynchronously. go func() { - if err := c.resetTransport(false); err != nil { - grpclog.Printf("Failed to dial %s: %v; please retry.", c.target, err) - c.Close() + if err := ac.resetTransport(false); err != nil { + grpclog.Printf("Failed to dial %s: %v; please retry.", ac.addr.Addr, err) + if err != errConnClosing { + // Keep this ac in cc.conns, to get the reason it's torn down. + ac.tearDown(err) + } return } - c.transportMonitor() + ac.transportMonitor() }() } - return c, nil + return nil +} + +func (cc *ClientConn) getTransport(ctx context.Context, opts BalancerGetOptions) (transport.ClientTransport, func(), error) { + var ( + ac *addrConn + ok bool + put func() + ) + if cc.dopts.balancer == nil { + // If balancer is nil, there should be only one addrConn available. + cc.mu.RLock() + if cc.conns == nil { + cc.mu.RUnlock() + return nil, nil, toRPCErr(ErrClientConnClosing) + } + for _, ac = range cc.conns { + // Break after the first iteration to get the first addrConn. + ok = true + break + } + cc.mu.RUnlock() + } else { + var ( + addr Address + err error + ) + addr, put, err = cc.dopts.balancer.Get(ctx, opts) + if err != nil { + return nil, nil, toRPCErr(err) + } + cc.mu.RLock() + if cc.conns == nil { + cc.mu.RUnlock() + return nil, nil, toRPCErr(ErrClientConnClosing) + } + ac, ok = cc.conns[addr] + cc.mu.RUnlock() + } + if !ok { + if put != nil { + put() + } + return nil, nil, errConnClosing + } + t, err := ac.wait(ctx, cc.dopts.balancer != nil, !opts.BlockingWait) + if err != nil { + if put != nil { + put() + } + return nil, nil, err + } + return t, put, nil } -// printf records an event in cc's event log, unless cc has been closed. -// REQUIRES cc.mu is held. -func (cc *Conn) printf(format string, a ...interface{}) { - if cc.events != nil { - cc.events.Printf(format, a...) +// Close tears down the ClientConn and all underlying connections. +func (cc *ClientConn) Close() error { + cc.cancel() + + cc.mu.Lock() + if cc.conns == nil { + cc.mu.Unlock() + return ErrClientConnClosing + } + conns := cc.conns + cc.conns = nil + cc.mu.Unlock() + if cc.dopts.balancer != nil { + cc.dopts.balancer.Close() + } + for _, ac := range conns { + ac.tearDown(ErrClientConnClosing) } + return nil } -// errorf records an error in cc's event log, unless cc has been closed. -// REQUIRES cc.mu is held. -func (cc *Conn) errorf(format string, a ...interface{}) { - if cc.events != nil { - cc.events.Errorf(format, a...) +// addrConn is a network connection to a given address. +type addrConn struct { + ctx context.Context + cancel context.CancelFunc + + cc *ClientConn + addr Address + dopts dialOptions + events trace.EventLog + + mu sync.Mutex + state ConnectivityState + stateCV *sync.Cond + down func(error) // the handler called when a connection is down. + // ready is closed and becomes nil when a new transport is up or failed + // due to timeout. + ready chan struct{} + transport transport.ClientTransport + + // The reason this addrConn is torn down. + tearDownErr error +} + +// printf records an event in ac's event log, unless ac has been closed. +// REQUIRES ac.mu is held. +func (ac *addrConn) printf(format string, a ...interface{}) { + if ac.events != nil { + ac.events.Printf(format, a...) } } -// State returns the connectivity state of the Conn -func (cc *Conn) State() ConnectivityState { - cc.mu.Lock() - defer cc.mu.Unlock() - return cc.state +// errorf records an error in ac's event log, unless ac has been closed. +// REQUIRES ac.mu is held. +func (ac *addrConn) errorf(format string, a ...interface{}) { + if ac.events != nil { + ac.events.Errorf(format, a...) + } } -// WaitForStateChange blocks until the state changes to something other than the sourceState. -func (cc *Conn) WaitForStateChange(ctx context.Context, sourceState ConnectivityState) (ConnectivityState, error) { - cc.mu.Lock() - defer cc.mu.Unlock() - if sourceState != cc.state { - return cc.state, nil +// getState returns the connectivity state of the Conn +func (ac *addrConn) getState() ConnectivityState { + ac.mu.Lock() + defer ac.mu.Unlock() + return ac.state +} + +// waitForStateChange blocks until the state changes to something other than the sourceState. +func (ac *addrConn) waitForStateChange(ctx context.Context, sourceState ConnectivityState) (ConnectivityState, error) { + ac.mu.Lock() + defer ac.mu.Unlock() + if sourceState != ac.state { + return ac.state, nil } done := make(chan struct{}) var err error go func() { select { case <-ctx.Done(): - cc.mu.Lock() + ac.mu.Lock() err = ctx.Err() - cc.stateCV.Broadcast() - cc.mu.Unlock() + ac.stateCV.Broadcast() + ac.mu.Unlock() case <-done: } }() defer close(done) - for sourceState == cc.state { - cc.stateCV.Wait() + for sourceState == ac.state { + ac.stateCV.Wait() if err != nil { - return cc.state, err + return ac.state, err } } - return cc.state, nil + return ac.state, nil } -// NotifyReset tries to signal the underlying transport needs to be reset due to -// for example a name resolution change in flight. -func (cc *Conn) NotifyReset() { - select { - case cc.resetChan <- 0: - default: - } -} - -func (cc *Conn) resetTransport(closeTransport bool) error { - var retries int - start := time.Now() - for { - cc.mu.Lock() - cc.printf("connecting") - if cc.state == Shutdown { - // cc.Close() has been invoked. - cc.mu.Unlock() - return ErrClientConnClosing - } - cc.state = Connecting - cc.stateCV.Broadcast() - cc.mu.Unlock() - if closeTransport { - cc.transport.Close() +func (ac *addrConn) resetTransport(closeTransport bool) error { + for retries := 0; ; retries++ { + ac.mu.Lock() + ac.printf("connecting") + if ac.state == Shutdown { + // ac.tearDown(...) has been invoked. + ac.mu.Unlock() + return errConnClosing } - // Adjust timeout for the current try. - copts := cc.dopts.copts - if copts.Timeout < 0 { - cc.Close() - return ErrClientConnTimeout + if ac.down != nil { + ac.down(downErrorf(false, true, "%v", errNetworkIO)) + ac.down = nil } - if copts.Timeout > 0 { - copts.Timeout -= time.Since(start) - if copts.Timeout <= 0 { - cc.Close() - return ErrClientConnTimeout - } + ac.state = Connecting + ac.stateCV.Broadcast() + t := ac.transport + ac.mu.Unlock() + if closeTransport && t != nil { + t.Close() } - sleepTime := backoff(retries) - timeout := sleepTime - if timeout < minConnectTimeout { - timeout = minConnectTimeout - } - if copts.Timeout == 0 || copts.Timeout > timeout { - copts.Timeout = timeout + sleepTime := ac.dopts.bs.backoff(retries) + timeout := minConnectTimeout + if timeout < sleepTime { + timeout = sleepTime } + ctx, cancel := context.WithTimeout(ac.ctx, timeout) connectTime := time.Now() - addr, err := cc.dopts.picker.PickAddr() - var newTransport transport.ClientTransport - if err == nil { - newTransport, err = transport.NewClientTransport(addr, &copts) - } + newTransport, err := transport.NewClientTransport(ctx, ac.addr.Addr, ac.dopts.copts) if err != nil { - cc.mu.Lock() - if cc.state == Shutdown { - // cc.Close() has been invoked. - cc.mu.Unlock() - return ErrClientConnClosing - } - cc.errorf("transient failure: %v", err) - cc.state = TransientFailure - cc.stateCV.Broadcast() - if cc.ready != nil { - close(cc.ready) - cc.ready = nil + cancel() + + if e, ok := err.(transport.ConnectionError); ok && !e.Temporary() { + return err } - cc.mu.Unlock() - sleepTime -= time.Since(connectTime) - if sleepTime < 0 { - sleepTime = 0 + grpclog.Printf("grpc: addrConn.resetTransport failed to create client transport: %v; Reconnecting to %q", err, ac.addr) + ac.mu.Lock() + if ac.state == Shutdown { + // ac.tearDown(...) has been invoked. + ac.mu.Unlock() + return errConnClosing } - // Fail early before falling into sleep. - if cc.dopts.copts.Timeout > 0 && cc.dopts.copts.Timeout < sleepTime+time.Since(start) { - cc.mu.Lock() - cc.errorf("connection timeout") - cc.mu.Unlock() - cc.Close() - return ErrClientConnTimeout + ac.errorf("transient failure: %v", err) + ac.state = TransientFailure + ac.stateCV.Broadcast() + if ac.ready != nil { + close(ac.ready) + ac.ready = nil } + ac.mu.Unlock() closeTransport = false - time.Sleep(sleepTime) - retries++ - grpclog.Printf("grpc: Conn.resetTransport failed to create client transport: %v; Reconnecting to %q", err, cc.target) + select { + case <-time.After(sleepTime - time.Since(connectTime)): + case <-ac.ctx.Done(): + return ac.ctx.Err() + } continue } - cc.mu.Lock() - cc.printf("ready") - if cc.state == Shutdown { - // cc.Close() has been invoked. - cc.mu.Unlock() + ac.mu.Lock() + ac.printf("ready") + if ac.state == Shutdown { + // ac.tearDown(...) has been invoked. + ac.mu.Unlock() newTransport.Close() - return ErrClientConnClosing + return errConnClosing } - cc.state = Ready - cc.stateCV.Broadcast() - cc.transport = newTransport - if cc.ready != nil { - close(cc.ready) - cc.ready = nil + ac.state = Ready + ac.stateCV.Broadcast() + ac.transport = newTransport + if ac.ready != nil { + close(ac.ready) + ac.ready = nil } - cc.mu.Unlock() + if ac.cc.dopts.balancer != nil { + ac.down = ac.cc.dopts.balancer.Up(ac.addr) + } + ac.mu.Unlock() return nil } } -func (cc *Conn) reconnect() bool { - cc.mu.Lock() - if cc.state == Shutdown { - // cc.Close() has been invoked. - cc.mu.Unlock() - return false - } - cc.state = TransientFailure - cc.stateCV.Broadcast() - cc.mu.Unlock() - if err := cc.resetTransport(true); err != nil { - // The ClientConn is closing. - cc.mu.Lock() - cc.printf("transport exiting: %v", err) - cc.mu.Unlock() - grpclog.Printf("grpc: Conn.transportMonitor exits due to: %v", err) - return false - } - return true -} - // Run in a goroutine to track the error in transport and create the // new transport if an error happens. It returns when the channel is closing. -func (cc *Conn) transportMonitor() { +func (ac *addrConn) transportMonitor() { for { + ac.mu.Lock() + t := ac.transport + ac.mu.Unlock() select { - // shutdownChan is needed to detect the teardown when - // the ClientConn is idle (i.e., no RPC in flight). - case <-cc.shutdownChan: + // This is needed to detect the teardown when + // the addrConn is idle (i.e., no RPC in flight). + case <-ac.ctx.Done(): + select { + case <-t.Error(): + t.Close() + default: + } return - case <-cc.resetChan: - if !cc.reconnect() { + case <-t.GoAway(): + // If GoAway happens without any network I/O error, ac is closed without shutting down the + // underlying transport (the transport will be closed when all the pending RPCs finished or + // failed.). + // If GoAway and some network I/O error happen concurrently, ac and its underlying transport + // are closed. + // In both cases, a new ac is created. + select { + case <-t.Error(): + ac.cc.resetAddrConn(ac.addr, true, errNetworkIO) + default: + ac.cc.resetAddrConn(ac.addr, true, errConnDrain) + } + return + case <-t.Error(): + select { + case <-ac.ctx.Done(): + t.Close() return + case <-t.GoAway(): + ac.cc.resetAddrConn(ac.addr, true, errNetworkIO) + return + default: } - case <-cc.transport.Error(): - if !cc.reconnect() { + ac.mu.Lock() + if ac.state == Shutdown { + // ac has been shutdown. + ac.mu.Unlock() return } - // Tries to drain reset signal if there is any since it is out-dated. - select { - case <-cc.resetChan: - default: + ac.state = TransientFailure + ac.stateCV.Broadcast() + ac.mu.Unlock() + if err := ac.resetTransport(true); err != nil { + ac.mu.Lock() + ac.printf("transport exiting: %v", err) + ac.mu.Unlock() + grpclog.Printf("grpc: addrConn.transportMonitor exits due to: %v", err) + if err != errConnClosing { + // Keep this ac in cc.conns, to get the reason it's torn down. + ac.tearDown(err) + } + return } } } } -// Wait blocks until i) the new transport is up or ii) ctx is done or iii) cc is closed. -func (cc *Conn) Wait(ctx context.Context) (transport.ClientTransport, error) { +// wait blocks until i) the new transport is up or ii) ctx is done or iii) ac is closed or +// iv) transport is in TransientFailure and there's no balancer/failfast is true. +func (ac *addrConn) wait(ctx context.Context, hasBalancer, failfast bool) (transport.ClientTransport, error) { for { - cc.mu.Lock() + ac.mu.Lock() switch { - case cc.state == Shutdown: - cc.mu.Unlock() - return nil, ErrClientConnClosing - case cc.state == Ready: - cc.mu.Unlock() - return cc.transport, nil - default: - ready := cc.ready - if ready == nil { - ready = make(chan struct{}) - cc.ready = ready + case ac.state == Shutdown: + if failfast || !hasBalancer { + // RPC is failfast or balancer is nil. This RPC should fail with ac.tearDownErr. + err := ac.tearDownErr + ac.mu.Unlock() + return nil, err } - cc.mu.Unlock() - select { - case <-ctx.Done(): - return nil, transport.ContextErr(ctx.Err()) - // Wait until the new transport is ready or failed. - case <-ready: + ac.mu.Unlock() + return nil, errConnClosing + case ac.state == Ready: + ct := ac.transport + ac.mu.Unlock() + return ct, nil + case ac.state == TransientFailure: + if failfast || hasBalancer { + ac.mu.Unlock() + return nil, errConnUnavailable } } + ready := ac.ready + if ready == nil { + ready = make(chan struct{}) + ac.ready = ready + } + ac.mu.Unlock() + select { + case <-ctx.Done(): + return nil, toRPCErr(ctx.Err()) + // Wait until the new transport is ready or failed. + case <-ready: + } } } -// Close starts to tear down the Conn. Returns ErrClientConnClosing if -// it has been closed (mostly due to dial time-out). +// tearDown starts to tear down the addrConn. // TODO(zhaoq): Make this synchronous to avoid unbounded memory consumption in -// some edge cases (e.g., the caller opens and closes many ClientConn's in a +// some edge cases (e.g., the caller opens and closes many addrConn's in a // tight loop. -func (cc *Conn) Close() error { - cc.mu.Lock() - defer cc.mu.Unlock() - if cc.state == Shutdown { - return ErrClientConnClosing - } - cc.state = Shutdown - cc.stateCV.Broadcast() - if cc.events != nil { - cc.events.Finish() - cc.events = nil - } - if cc.ready != nil { - close(cc.ready) - cc.ready = nil - } - if cc.transport != nil { - cc.transport.Close() - } - if cc.shutdownChan != nil { - close(cc.shutdownChan) - } - return nil +// tearDown doesn't remove ac from ac.cc.conns. +func (ac *addrConn) tearDown(err error) { + ac.cancel() + + ac.mu.Lock() + defer ac.mu.Unlock() + if ac.down != nil { + ac.down(downErrorf(false, false, "%v", err)) + ac.down = nil + } + if err == errConnDrain && ac.transport != nil { + // GracefulClose(...) may be executed multiple times when + // i) receiving multiple GoAway frames from the server; or + // ii) there are concurrent name resolver/Balancer triggered + // address removal and GoAway. + ac.transport.GracefulClose() + } + if ac.state == Shutdown { + return + } + ac.state = Shutdown + ac.tearDownErr = err + ac.stateCV.Broadcast() + if ac.events != nil { + ac.events.Finish() + ac.events = nil + } + if ac.ready != nil { + close(ac.ready) + ac.ready = nil + } + if ac.transport != nil && err != errConnDrain { + ac.transport.Close() + } + return } diff --git a/vendor/google.golang.org/grpc/credentials/credentials.go b/vendor/google.golang.org/grpc/credentials/credentials.go index 681f64e44..3f17b7062 100644 --- a/vendor/google.golang.org/grpc/credentials/credentials.go +++ b/vendor/google.golang.org/grpc/credentials/credentials.go @@ -44,7 +44,6 @@ import ( "io/ioutil" "net" "strings" - "time" "golang.org/x/net/context" ) @@ -54,9 +53,9 @@ var ( alpnProtoStr = []string{"h2"} ) -// Credentials defines the common interface all supported credentials must -// implement. -type Credentials interface { +// PerRPCCredentials defines the common interface for the credentials which need to +// attach security information to every RPC (e.g., oauth2). +type PerRPCCredentials interface { // GetRequestMetadata gets the current request metadata, refreshing // tokens if required. This should be called by the transport layer on // each request, and the data should be populated in headers or other @@ -66,7 +65,7 @@ type Credentials interface { // TODO(zhaoq): Define the set of the qualified keys instead of leaving // it as an arbitrary string. GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) - // RequireTransportSecurity indicates whether the credentails requires + // RequireTransportSecurity indicates whether the credentials requires // transport security. RequireTransportSecurity() bool } @@ -87,20 +86,20 @@ type AuthInfo interface { AuthType() string } -// TransportAuthenticator defines the common interface for all the live gRPC wire +// TransportCredentials defines the common interface for all the live gRPC wire // protocols and supported transport security protocols (e.g., TLS, SSL). -type TransportAuthenticator interface { +type TransportCredentials interface { // ClientHandshake does the authentication handshake specified by the corresponding // authentication protocol on rawConn for clients. It returns the authenticated // connection and the corresponding auth information about the connection. - ClientHandshake(addr string, rawConn net.Conn, timeout time.Duration) (net.Conn, AuthInfo, error) + // Implementations must use the provided context to implement timely cancellation. + ClientHandshake(context.Context, string, net.Conn) (net.Conn, AuthInfo, error) // ServerHandshake does the authentication handshake for servers. It returns // the authenticated connection and the corresponding auth information about // the connection. - ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) - // Info provides the ProtocolInfo of this TransportAuthenticator. + ServerHandshake(net.Conn) (net.Conn, AuthInfo, error) + // Info provides the ProtocolInfo of this TransportCredentials. Info() ProtocolInfo - Credentials } // TLSInfo contains the auth information for a TLS authenticated connection. @@ -109,6 +108,7 @@ type TLSInfo struct { State tls.ConnectionState } +// AuthType returns the type of TLSInfo as a string. func (t TLSInfo) AuthType() string { return "tls" } @@ -116,7 +116,7 @@ func (t TLSInfo) AuthType() string { // tlsCreds is the credentials required for authenticating a connection using TLS. type tlsCreds struct { // TLS configuration - config tls.Config + config *tls.Config } func (c tlsCreds) Info() ProtocolInfo { @@ -136,40 +136,28 @@ func (c *tlsCreds) RequireTransportSecurity() bool { return true } -type timeoutError struct{} - -func (timeoutError) Error() string { return "credentials: Dial timed out" } -func (timeoutError) Timeout() bool { return true } -func (timeoutError) Temporary() bool { return true } - -func (c *tlsCreds) ClientHandshake(addr string, rawConn net.Conn, timeout time.Duration) (_ net.Conn, _ AuthInfo, err error) { - // borrow some code from tls.DialWithDialer - var errChannel chan error - if timeout != 0 { - errChannel = make(chan error, 2) - time.AfterFunc(timeout, func() { - errChannel <- timeoutError{} - }) - } - if c.config.ServerName == "" { +func (c *tlsCreds) ClientHandshake(ctx context.Context, addr string, rawConn net.Conn) (_ net.Conn, _ AuthInfo, err error) { + // use local cfg to avoid clobbering ServerName if using multiple endpoints + cfg := cloneTLSConfig(c.config) + if cfg.ServerName == "" { colonPos := strings.LastIndex(addr, ":") if colonPos == -1 { colonPos = len(addr) } - c.config.ServerName = addr[:colonPos] - } - conn := tls.Client(rawConn, &c.config) - if timeout == 0 { - err = conn.Handshake() - } else { - go func() { - errChannel <- conn.Handshake() - }() - err = <-errChannel + cfg.ServerName = addr[:colonPos] } - if err != nil { - rawConn.Close() - return nil, nil, err + conn := tls.Client(rawConn, cfg) + errChannel := make(chan error, 1) + go func() { + errChannel <- conn.Handshake() + }() + select { + case err := <-errChannel: + if err != nil { + return nil, nil, err + } + case <-ctx.Done(): + return nil, nil, ctx.Err() } // TODO(zhaoq): Omit the auth info for client now. It is more for // information than anything else. @@ -177,28 +165,27 @@ func (c *tlsCreds) ClientHandshake(addr string, rawConn net.Conn, timeout time.D } func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) { - conn := tls.Server(rawConn, &c.config) + conn := tls.Server(rawConn, c.config) if err := conn.Handshake(); err != nil { - rawConn.Close() return nil, nil, err } return conn, TLSInfo{conn.ConnectionState()}, nil } -// NewTLS uses c to construct a TransportAuthenticator based on TLS. -func NewTLS(c *tls.Config) TransportAuthenticator { - tc := &tlsCreds{*c} +// NewTLS uses c to construct a TransportCredentials based on TLS. +func NewTLS(c *tls.Config) TransportCredentials { + tc := &tlsCreds{cloneTLSConfig(c)} tc.config.NextProtos = alpnProtoStr return tc } // NewClientTLSFromCert constructs a TLS from the input certificate for client. -func NewClientTLSFromCert(cp *x509.CertPool, serverName string) TransportAuthenticator { +func NewClientTLSFromCert(cp *x509.CertPool, serverName string) TransportCredentials { return NewTLS(&tls.Config{ServerName: serverName, RootCAs: cp}) } // NewClientTLSFromFile constructs a TLS from the input certificate file for client. -func NewClientTLSFromFile(certFile, serverName string) (TransportAuthenticator, error) { +func NewClientTLSFromFile(certFile, serverName string) (TransportCredentials, error) { b, err := ioutil.ReadFile(certFile) if err != nil { return nil, err @@ -211,13 +198,13 @@ func NewClientTLSFromFile(certFile, serverName string) (TransportAuthenticator, } // NewServerTLSFromCert constructs a TLS from the input certificate for server. -func NewServerTLSFromCert(cert *tls.Certificate) TransportAuthenticator { +func NewServerTLSFromCert(cert *tls.Certificate) TransportCredentials { return NewTLS(&tls.Config{Certificates: []tls.Certificate{*cert}}) } // NewServerTLSFromFile constructs a TLS from the input certificate file and key // file for server. -func NewServerTLSFromFile(certFile, keyFile string) (TransportAuthenticator, error) { +func NewServerTLSFromFile(certFile, keyFile string) (TransportCredentials, error) { cert, err := tls.LoadX509KeyPair(certFile, keyFile) if err != nil { return nil, err diff --git a/vendor/google.golang.org/grpc/credentials/credentials_util_go17.go b/vendor/google.golang.org/grpc/credentials/credentials_util_go17.go new file mode 100644 index 000000000..9647b9ec8 --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/credentials_util_go17.go @@ -0,0 +1,76 @@ +// +build go1.7 + +/* + * + * Copyright 2016, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package credentials + +import ( + "crypto/tls" +) + +// cloneTLSConfig returns a shallow clone of the exported +// fields of cfg, ignoring the unexported sync.Once, which +// contains a mutex and must not be copied. +// +// If cfg is nil, a new zero tls.Config is returned. +// +// TODO replace this function with official clone function. +func cloneTLSConfig(cfg *tls.Config) *tls.Config { + if cfg == nil { + return &tls.Config{} + } + return &tls.Config{ + Rand: cfg.Rand, + Time: cfg.Time, + Certificates: cfg.Certificates, + NameToCertificate: cfg.NameToCertificate, + GetCertificate: cfg.GetCertificate, + RootCAs: cfg.RootCAs, + NextProtos: cfg.NextProtos, + ServerName: cfg.ServerName, + ClientAuth: cfg.ClientAuth, + ClientCAs: cfg.ClientCAs, + InsecureSkipVerify: cfg.InsecureSkipVerify, + CipherSuites: cfg.CipherSuites, + PreferServerCipherSuites: cfg.PreferServerCipherSuites, + SessionTicketsDisabled: cfg.SessionTicketsDisabled, + SessionTicketKey: cfg.SessionTicketKey, + ClientSessionCache: cfg.ClientSessionCache, + MinVersion: cfg.MinVersion, + MaxVersion: cfg.MaxVersion, + CurvePreferences: cfg.CurvePreferences, + DynamicRecordSizingDisabled: cfg.DynamicRecordSizingDisabled, + Renegotiation: cfg.Renegotiation, + } +} diff --git a/vendor/google.golang.org/grpc/credentials/credentials_util_pre_go17.go b/vendor/google.golang.org/grpc/credentials/credentials_util_pre_go17.go new file mode 100644 index 000000000..09b8d12c7 --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/credentials_util_pre_go17.go @@ -0,0 +1,74 @@ +// +build !go1.7 + +/* + * + * Copyright 2016, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package credentials + +import ( + "crypto/tls" +) + +// cloneTLSConfig returns a shallow clone of the exported +// fields of cfg, ignoring the unexported sync.Once, which +// contains a mutex and must not be copied. +// +// If cfg is nil, a new zero tls.Config is returned. +// +// TODO replace this function with official clone function. +func cloneTLSConfig(cfg *tls.Config) *tls.Config { + if cfg == nil { + return &tls.Config{} + } + return &tls.Config{ + Rand: cfg.Rand, + Time: cfg.Time, + Certificates: cfg.Certificates, + NameToCertificate: cfg.NameToCertificate, + GetCertificate: cfg.GetCertificate, + RootCAs: cfg.RootCAs, + NextProtos: cfg.NextProtos, + ServerName: cfg.ServerName, + ClientAuth: cfg.ClientAuth, + ClientCAs: cfg.ClientCAs, + InsecureSkipVerify: cfg.InsecureSkipVerify, + CipherSuites: cfg.CipherSuites, + PreferServerCipherSuites: cfg.PreferServerCipherSuites, + SessionTicketsDisabled: cfg.SessionTicketsDisabled, + SessionTicketKey: cfg.SessionTicketKey, + ClientSessionCache: cfg.ClientSessionCache, + MinVersion: cfg.MinVersion, + MaxVersion: cfg.MaxVersion, + CurvePreferences: cfg.CurvePreferences, + } +} diff --git a/vendor/google.golang.org/grpc/credentials/oauth/oauth.go b/vendor/google.golang.org/grpc/credentials/oauth/oauth.go index 04943fdf0..8e68c4d73 100644 --- a/vendor/google.golang.org/grpc/credentials/oauth/oauth.go +++ b/vendor/google.golang.org/grpc/credentials/oauth/oauth.go @@ -45,7 +45,7 @@ import ( "google.golang.org/grpc/credentials" ) -// TokenSource supplies credentials from an oauth2.TokenSource. +// TokenSource supplies PerRPCCredentials from an oauth2.TokenSource. type TokenSource struct { oauth2.TokenSource } @@ -57,10 +57,11 @@ func (ts TokenSource) GetRequestMetadata(ctx context.Context, uri ...string) (ma return nil, err } return map[string]string{ - "authorization": token.TokenType + " " + token.AccessToken, + "authorization": token.Type() + " " + token.AccessToken, }, nil } +// RequireTransportSecurity indicates whether the credentails requires transport security. func (ts TokenSource) RequireTransportSecurity() bool { return true } @@ -69,7 +70,8 @@ type jwtAccess struct { jsonKey []byte } -func NewJWTAccessFromFile(keyFile string) (credentials.Credentials, error) { +// NewJWTAccessFromFile creates PerRPCCredentials from the given keyFile. +func NewJWTAccessFromFile(keyFile string) (credentials.PerRPCCredentials, error) { jsonKey, err := ioutil.ReadFile(keyFile) if err != nil { return nil, fmt.Errorf("credentials: failed to read the service account key file: %v", err) @@ -77,7 +79,8 @@ func NewJWTAccessFromFile(keyFile string) (credentials.Credentials, error) { return NewJWTAccessFromKey(jsonKey) } -func NewJWTAccessFromKey(jsonKey []byte) (credentials.Credentials, error) { +// NewJWTAccessFromKey creates PerRPCCredentials from the given jsonKey. +func NewJWTAccessFromKey(jsonKey []byte) (credentials.PerRPCCredentials, error) { return jwtAccess{jsonKey}, nil } @@ -99,13 +102,13 @@ func (j jwtAccess) RequireTransportSecurity() bool { return true } -// oauthAccess supplies credentials from a given token. +// oauthAccess supplies PerRPCCredentials from a given token. type oauthAccess struct { token oauth2.Token } -// NewOauthAccess constructs the credentials using a given token. -func NewOauthAccess(token *oauth2.Token) credentials.Credentials { +// NewOauthAccess constructs the PerRPCCredentials using a given token. +func NewOauthAccess(token *oauth2.Token) credentials.PerRPCCredentials { return oauthAccess{token: *token} } @@ -119,15 +122,15 @@ func (oa oauthAccess) RequireTransportSecurity() bool { return true } -// NewComputeEngine constructs the credentials that fetches access tokens from +// NewComputeEngine constructs the PerRPCCredentials that fetches access tokens from // Google Compute Engine (GCE)'s metadata server. It is only valid to use this // if your program is running on a GCE instance. // TODO(dsymonds): Deprecate and remove this. -func NewComputeEngine() credentials.Credentials { +func NewComputeEngine() credentials.PerRPCCredentials { return TokenSource{google.ComputeTokenSource("")} } -// serviceAccount represents credentials via JWT signing key. +// serviceAccount represents PerRPCCredentials via JWT signing key. type serviceAccount struct { config *jwt.Config } @@ -146,9 +149,9 @@ func (s serviceAccount) RequireTransportSecurity() bool { return true } -// NewServiceAccountFromKey constructs the credentials using the JSON key slice +// NewServiceAccountFromKey constructs the PerRPCCredentials using the JSON key slice // from a Google Developers service account. -func NewServiceAccountFromKey(jsonKey []byte, scope ...string) (credentials.Credentials, error) { +func NewServiceAccountFromKey(jsonKey []byte, scope ...string) (credentials.PerRPCCredentials, error) { config, err := google.JWTConfigFromJSON(jsonKey, scope...) if err != nil { return nil, err @@ -156,9 +159,9 @@ func NewServiceAccountFromKey(jsonKey []byte, scope ...string) (credentials.Cred return serviceAccount{config: config}, nil } -// NewServiceAccountFromFile constructs the credentials using the JSON key file +// NewServiceAccountFromFile constructs the PerRPCCredentials using the JSON key file // of a Google Developers service account. -func NewServiceAccountFromFile(keyFile string, scope ...string) (credentials.Credentials, error) { +func NewServiceAccountFromFile(keyFile string, scope ...string) (credentials.PerRPCCredentials, error) { jsonKey, err := ioutil.ReadFile(keyFile) if err != nil { return nil, fmt.Errorf("credentials: failed to read the service account key file: %v", err) @@ -168,7 +171,7 @@ func NewServiceAccountFromFile(keyFile string, scope ...string) (credentials.Cre // NewApplicationDefault returns "Application Default Credentials". For more // detail, see https://developers.google.com/accounts/docs/application-default-credentials. -func NewApplicationDefault(ctx context.Context, scope ...string) (credentials.Credentials, error) { +func NewApplicationDefault(ctx context.Context, scope ...string) (credentials.PerRPCCredentials, error) { t, err := google.DefaultTokenSource(ctx, scope...) if err != nil { return nil, err diff --git a/vendor/google.golang.org/grpc/doc.go b/vendor/google.golang.org/grpc/doc.go index c63847745..a35f21885 100644 --- a/vendor/google.golang.org/grpc/doc.go +++ b/vendor/google.golang.org/grpc/doc.go @@ -1,6 +1,6 @@ /* Package grpc implements an RPC system called gRPC. -See https://github.com/grpc/grpc for more information about gRPC. +See www.grpc.io for more information about gRPC. */ package grpc // import "google.golang.org/grpc" diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1alpha/health.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go similarity index 58% rename from vendor/google.golang.org/grpc/health/grpc_health_v1alpha/health.pb.go rename to vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go index 8e89dc9f2..0e6a9100a 100644 --- a/vendor/google.golang.org/grpc/health/grpc_health_v1alpha/health.pb.go +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go @@ -3,7 +3,7 @@ // DO NOT EDIT! /* -Package grpc_health_v1alpha is a generated protocol buffer package. +Package grpc_health_v1 is a generated protocol buffer package. It is generated from these files: health.proto @@ -12,7 +12,7 @@ It has these top-level messages: HealthCheckRequest HealthCheckResponse */ -package grpc_health_v1alpha +package grpc_health_v1 import proto "github.com/golang/protobuf/proto" import fmt "fmt" @@ -30,7 +30,9 @@ var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. -const _ = proto.ProtoPackageIsVersion1 +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package type HealthCheckResponse_ServingStatus int32 @@ -68,7 +70,7 @@ func (*HealthCheckRequest) ProtoMessage() {} func (*HealthCheckRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } type HealthCheckResponse struct { - Status HealthCheckResponse_ServingStatus `protobuf:"varint,1,opt,name=status,enum=grpc.health.v1alpha.HealthCheckResponse_ServingStatus" json:"status,omitempty"` + Status HealthCheckResponse_ServingStatus `protobuf:"varint,1,opt,name=status,enum=grpc.health.v1.HealthCheckResponse_ServingStatus" json:"status,omitempty"` } func (m *HealthCheckResponse) Reset() { *m = HealthCheckResponse{} } @@ -77,15 +79,19 @@ func (*HealthCheckResponse) ProtoMessage() {} func (*HealthCheckResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } func init() { - proto.RegisterType((*HealthCheckRequest)(nil), "grpc.health.v1alpha.HealthCheckRequest") - proto.RegisterType((*HealthCheckResponse)(nil), "grpc.health.v1alpha.HealthCheckResponse") - proto.RegisterEnum("grpc.health.v1alpha.HealthCheckResponse_ServingStatus", HealthCheckResponse_ServingStatus_name, HealthCheckResponse_ServingStatus_value) + proto.RegisterType((*HealthCheckRequest)(nil), "grpc.health.v1.HealthCheckRequest") + proto.RegisterType((*HealthCheckResponse)(nil), "grpc.health.v1.HealthCheckResponse") + proto.RegisterEnum("grpc.health.v1.HealthCheckResponse_ServingStatus", HealthCheckResponse_ServingStatus_name, HealthCheckResponse_ServingStatus_value) } // Reference imports to suppress errors if they are not otherwise used. var _ context.Context var _ grpc.ClientConn +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion3 + // Client API for Health service type HealthClient interface { @@ -102,7 +108,7 @@ func NewHealthClient(cc *grpc.ClientConn) HealthClient { func (c *healthClient) Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) { out := new(HealthCheckResponse) - err := grpc.Invoke(ctx, "/grpc.health.v1alpha.Health/Check", in, out, c.cc, opts...) + err := grpc.Invoke(ctx, "/grpc.health.v1.Health/Check", in, out, c.cc, opts...) if err != nil { return nil, err } @@ -119,20 +125,26 @@ func RegisterHealthServer(s *grpc.Server, srv HealthServer) { s.RegisterService(&_Health_serviceDesc, srv) } -func _Health_Check_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { +func _Health_Check_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(HealthCheckRequest) if err := dec(in); err != nil { return nil, err } - out, err := srv.(HealthServer).Check(ctx, in) - if err != nil { - return nil, err + if interceptor == nil { + return srv.(HealthServer).Check(ctx, in) } - return out, nil + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/grpc.health.v1.Health/Check", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(HealthServer).Check(ctx, req.(*HealthCheckRequest)) + } + return interceptor(ctx, in, info, handler) } var _Health_serviceDesc = grpc.ServiceDesc{ - ServiceName: "grpc.health.v1alpha.Health", + ServiceName: "grpc.health.v1.Health", HandlerType: (*HealthServer)(nil), Methods: []grpc.MethodDesc{ { @@ -140,23 +152,25 @@ var _Health_serviceDesc = grpc.ServiceDesc{ Handler: _Health_Check_Handler, }, }, - Streams: []grpc.StreamDesc{}, + Streams: []grpc.StreamDesc{}, + Metadata: fileDescriptor0, } +func init() { proto.RegisterFile("health.proto", fileDescriptor0) } + var fileDescriptor0 = []byte{ - // 209 bytes of a gzipped FileDescriptorProto + // 204 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0xc9, 0x48, 0x4d, 0xcc, - 0x29, 0xc9, 0xd0, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x4e, 0x2f, 0x2a, 0x48, 0xd6, 0x83, - 0x0a, 0x95, 0x19, 0x26, 0xe6, 0x14, 0x64, 0x24, 0x2a, 0xe9, 0x71, 0x09, 0x79, 0x80, 0x45, 0x9c, - 0x33, 0x52, 0x93, 0xb3, 0x83, 0x52, 0x0b, 0x4b, 0x53, 0x8b, 0x4b, 0x84, 0x24, 0xb8, 0xd8, 0x8b, - 0x53, 0x8b, 0xca, 0x32, 0x93, 0x53, 0x25, 0x18, 0x15, 0x18, 0x35, 0x38, 0x83, 0x60, 0x5c, 0xa5, - 0x85, 0x8c, 0x5c, 0xc2, 0x28, 0x1a, 0x8a, 0x0b, 0xf2, 0xf3, 0x8a, 0x53, 0x85, 0xfc, 0xb8, 0xd8, - 0x8a, 0x4b, 0x12, 0x4b, 0x4a, 0x8b, 0xc1, 0x1a, 0xf8, 0x8c, 0xcc, 0xf4, 0xb0, 0xd8, 0xa6, 0x87, - 0x45, 0xa7, 0x5e, 0x30, 0xc8, 0xe4, 0xbc, 0xf4, 0x60, 0xb0, 0xee, 0x20, 0xa8, 0x29, 0x4a, 0x56, - 0x5c, 0xbc, 0x28, 0x12, 0x42, 0xdc, 0x5c, 0xec, 0xa1, 0x7e, 0xde, 0x7e, 0xfe, 0xe1, 0x7e, 0x02, - 0x0c, 0x20, 0x4e, 0xb0, 0x6b, 0x50, 0x98, 0xa7, 0x9f, 0xbb, 0x00, 0xa3, 0x10, 0x3f, 0x17, 0xb7, - 0x9f, 0x7f, 0x48, 0x3c, 0x4c, 0x80, 0xc9, 0x28, 0x85, 0x8b, 0x0d, 0x62, 0x91, 0x50, 0x14, 0x17, - 0x2b, 0xd8, 0x32, 0x21, 0x75, 0xc2, 0xce, 0x01, 0xfb, 0x5c, 0x4a, 0x83, 0x58, 0x77, 0x27, 0xb1, - 0x81, 0x43, 0xd5, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0xe1, 0x3f, 0xd0, 0xe1, 0x65, 0x01, 0x00, - 0x00, + 0x29, 0xc9, 0xd0, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x4b, 0x2f, 0x2a, 0x48, 0xd6, 0x83, + 0x0a, 0x95, 0x19, 0x2a, 0xe9, 0x71, 0x09, 0x79, 0x80, 0x39, 0xce, 0x19, 0xa9, 0xc9, 0xd9, 0x41, + 0xa9, 0x85, 0xa5, 0xa9, 0xc5, 0x25, 0x42, 0x12, 0x5c, 0xec, 0xc5, 0xa9, 0x45, 0x65, 0x99, 0xc9, + 0xa9, 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0x9c, 0x41, 0x30, 0xae, 0xd2, 0x1c, 0x46, 0x2e, 0x61, 0x14, + 0x0d, 0xc5, 0x05, 0xf9, 0x79, 0xc5, 0xa9, 0x42, 0x9e, 0x5c, 0x6c, 0xc5, 0x25, 0x89, 0x25, 0xa5, + 0xc5, 0x60, 0x0d, 0x7c, 0x46, 0x86, 0x7a, 0xa8, 0x16, 0xe9, 0x61, 0xd1, 0xa4, 0x17, 0x0c, 0x32, + 0x34, 0x2f, 0x3d, 0x18, 0xac, 0x31, 0x08, 0x6a, 0x80, 0x92, 0x15, 0x17, 0x2f, 0x8a, 0x84, 0x10, + 0x37, 0x17, 0x7b, 0xa8, 0x9f, 0xb7, 0x9f, 0x7f, 0xb8, 0x9f, 0x00, 0x03, 0x88, 0x13, 0xec, 0x1a, + 0x14, 0xe6, 0xe9, 0xe7, 0x2e, 0xc0, 0x28, 0xc4, 0xcf, 0xc5, 0xed, 0xe7, 0x1f, 0x12, 0x0f, 0x13, + 0x60, 0x32, 0x8a, 0xe2, 0x62, 0x83, 0x58, 0x24, 0x14, 0xc0, 0xc5, 0x0a, 0xb6, 0x4c, 0x48, 0x09, + 0xaf, 0x4b, 0xc0, 0xfe, 0x95, 0x52, 0x26, 0xc2, 0xb5, 0x49, 0x6c, 0xe0, 0x10, 0x34, 0x06, 0x04, + 0x00, 0x00, 0xff, 0xff, 0xac, 0x56, 0x2a, 0xcb, 0x51, 0x01, 0x00, 0x00, } diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1alpha/health.proto b/vendor/google.golang.org/grpc/health/grpc_health_v1alpha/health.proto deleted file mode 100644 index 91c7f06e9..000000000 --- a/vendor/google.golang.org/grpc/health/grpc_health_v1alpha/health.proto +++ /dev/null @@ -1,20 +0,0 @@ -syntax = "proto3"; - -package grpc.health.v1alpha; - -message HealthCheckRequest { - string service = 1; -} - -message HealthCheckResponse { - enum ServingStatus { - UNKNOWN = 0; - SERVING = 1; - NOT_SERVING = 2; - } - ServingStatus status = 1; -} - -service Health{ - rpc Check(HealthCheckRequest) returns (HealthCheckResponse); -} diff --git a/vendor/google.golang.org/grpc/health/health.go b/vendor/google.golang.org/grpc/health/health.go index 7930bde7f..342552986 100644 --- a/vendor/google.golang.org/grpc/health/health.go +++ b/vendor/google.golang.org/grpc/health/health.go @@ -8,22 +8,25 @@ import ( "golang.org/x/net/context" "google.golang.org/grpc" "google.golang.org/grpc/codes" - healthpb "google.golang.org/grpc/health/grpc_health_v1alpha" + healthpb "google.golang.org/grpc/health/grpc_health_v1" ) -type HealthServer struct { +// Server implements `service Health`. +type Server struct { mu sync.Mutex - // statusMap stores the serving status of the services this HealthServer monitors. + // statusMap stores the serving status of the services this Server monitors. statusMap map[string]healthpb.HealthCheckResponse_ServingStatus } -func NewHealthServer() *HealthServer { - return &HealthServer{ +// NewServer returns a new Server. +func NewServer() *Server { + return &Server{ statusMap: make(map[string]healthpb.HealthCheckResponse_ServingStatus), } } -func (s *HealthServer) Check(ctx context.Context, in *healthpb.HealthCheckRequest) (*healthpb.HealthCheckResponse, error) { +// Check implements `service Health`. +func (s *Server) Check(ctx context.Context, in *healthpb.HealthCheckRequest) (*healthpb.HealthCheckResponse, error) { s.mu.Lock() defer s.mu.Unlock() if in.Service == "" { @@ -42,7 +45,7 @@ func (s *HealthServer) Check(ctx context.Context, in *healthpb.HealthCheckReques // SetServingStatus is called when need to reset the serving status of a service // or insert a new service entry into the statusMap. -func (s *HealthServer) SetServingStatus(service string, status healthpb.HealthCheckResponse_ServingStatus) { +func (s *Server) SetServingStatus(service string, status healthpb.HealthCheckResponse_ServingStatus) { s.mu.Lock() s.statusMap[service] = status s.mu.Unlock() diff --git a/vendor/google.golang.org/grpc/interceptor.go b/vendor/google.golang.org/grpc/interceptor.go new file mode 100644 index 000000000..588f59e5a --- /dev/null +++ b/vendor/google.golang.org/grpc/interceptor.go @@ -0,0 +1,74 @@ +/* + * + * Copyright 2016, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package grpc + +import ( + "golang.org/x/net/context" +) + +// UnaryServerInfo consists of various information about a unary RPC on +// server side. All per-rpc information may be mutated by the interceptor. +type UnaryServerInfo struct { + // Server is the service implementation the user provides. This is read-only. + Server interface{} + // FullMethod is the full RPC method string, i.e., /package.service/method. + FullMethod string +} + +// UnaryHandler defines the handler invoked by UnaryServerInterceptor to complete the normal +// execution of a unary RPC. +type UnaryHandler func(ctx context.Context, req interface{}) (interface{}, error) + +// UnaryServerInterceptor provides a hook to intercept the execution of a unary RPC on the server. info +// contains all the information of this RPC the interceptor can operate on. And handler is the wrapper +// of the service method implementation. It is the responsibility of the interceptor to invoke handler +// to complete the RPC. +type UnaryServerInterceptor func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (resp interface{}, err error) + +// StreamServerInfo consists of various information about a streaming RPC on +// server side. All per-rpc information may be mutated by the interceptor. +type StreamServerInfo struct { + // FullMethod is the full RPC method string, i.e., /package.service/method. + FullMethod string + // IsClientStream indicates whether the RPC is a client streaming RPC. + IsClientStream bool + // IsServerStream indicates whether the RPC is a server streaming RPC. + IsServerStream bool +} + +// StreamServerInterceptor provides a hook to intercept the execution of a streaming RPC on the server. +// info contains all the information of this RPC the interceptor can operate on. And handler is the +// service method implementation. It is the responsibility of the interceptor to invoke handler to +// complete the RPC. +type StreamServerInterceptor func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go new file mode 100644 index 000000000..5489143a8 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -0,0 +1,49 @@ +/* + * Copyright 2016, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +// Package internal contains gRPC-internal code for testing, to avoid polluting +// the godoc of the top-level grpc package. +package internal + +// TestingCloseConns closes all existing transports but keeps +// grpcServer.lis accepting new connections. +// +// The provided grpcServer must be of type *grpc.Server. It is untyped +// for circular dependency reasons. +var TestingCloseConns func(grpcServer interface{}) + +// TestingUseHandlerImpl enables the http.Handler-based server implementation. +// It must be called before Serve and requires TLS credentials. +// +// The provided grpcServer must be of type *grpc.Server. It is untyped +// for circular dependency reasons. +var TestingUseHandlerImpl func(grpcServer interface{}) diff --git a/vendor/google.golang.org/grpc/interop/client/client.go b/vendor/google.golang.org/grpc/interop/client/client.go index deb9c3d84..98f6cfec3 100644 --- a/vendor/google.golang.org/grpc/interop/client/client.go +++ b/vendor/google.golang.org/grpc/interop/client/client.go @@ -85,7 +85,7 @@ func main() { if *tlsServerName != "" { sn = *tlsServerName } - var creds credentials.TransportAuthenticator + var creds credentials.TransportCredentials if *testCA { var err error creds, err = credentials.NewClientTLSFromFile(testCAFile, sn) @@ -125,47 +125,61 @@ func main() { switch *testCase { case "empty_unary": interop.DoEmptyUnaryCall(tc) + grpclog.Println("EmptyUnaryCall done") case "large_unary": interop.DoLargeUnaryCall(tc) + grpclog.Println("LargeUnaryCall done") case "client_streaming": interop.DoClientStreaming(tc) + grpclog.Println("ClientStreaming done") case "server_streaming": interop.DoServerStreaming(tc) + grpclog.Println("ServerStreaming done") case "ping_pong": interop.DoPingPong(tc) + grpclog.Println("Pingpong done") case "empty_stream": interop.DoEmptyStream(tc) + grpclog.Println("Emptystream done") case "timeout_on_sleeping_server": interop.DoTimeoutOnSleepingServer(tc) + grpclog.Println("TimeoutOnSleepingServer done") case "compute_engine_creds": if !*useTLS { grpclog.Fatalf("TLS is not enabled. TLS is required to execute compute_engine_creds test case.") } interop.DoComputeEngineCreds(tc, *defaultServiceAccount, *oauthScope) + grpclog.Println("ComputeEngineCreds done") case "service_account_creds": if !*useTLS { grpclog.Fatalf("TLS is not enabled. TLS is required to execute service_account_creds test case.") } interop.DoServiceAccountCreds(tc, *serviceAccountKeyFile, *oauthScope) + grpclog.Println("ServiceAccountCreds done") case "jwt_token_creds": if !*useTLS { grpclog.Fatalf("TLS is not enabled. TLS is required to execute jwt_token_creds test case.") } interop.DoJWTTokenCreds(tc, *serviceAccountKeyFile) + grpclog.Println("JWTtokenCreds done") case "per_rpc_creds": if !*useTLS { grpclog.Fatalf("TLS is not enabled. TLS is required to execute per_rpc_creds test case.") } interop.DoPerRPCCreds(tc, *serviceAccountKeyFile, *oauthScope) + grpclog.Println("PerRPCCreds done") case "oauth2_auth_token": if !*useTLS { grpclog.Fatalf("TLS is not enabled. TLS is required to execute oauth2_auth_token test case.") } interop.DoOauth2TokenCreds(tc, *serviceAccountKeyFile, *oauthScope) + grpclog.Println("Oauth2TokenCreds done") case "cancel_after_begin": interop.DoCancelAfterBegin(tc) + grpclog.Println("CancelAfterBegin done") case "cancel_after_first_response": interop.DoCancelAfterFirstResponse(tc) + grpclog.Println("CancelAfterFirstResponse done") default: grpclog.Fatal("Unsupported test case: ", *testCase) } diff --git a/vendor/google.golang.org/grpc/interop/grpc_testing/test.pb.go b/vendor/google.golang.org/grpc/interop/grpc_testing/test.pb.go index 7b0803f55..0ceb12df5 100755 --- a/vendor/google.golang.org/grpc/interop/grpc_testing/test.pb.go +++ b/vendor/google.golang.org/grpc/interop/grpc_testing/test.pb.go @@ -37,7 +37,9 @@ var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. -const _ = proto.ProtoPackageIsVersion1 +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package // The type of payload that should be returned. type PayloadType int32 @@ -121,16 +123,16 @@ func (m *Payload) GetBody() []byte { type SimpleRequest struct { // Desired payload type in the response from the server. // If response_type is RANDOM, server randomly chooses one from other formats. - ResponseType *PayloadType `protobuf:"varint,1,opt,name=response_type,enum=grpc.testing.PayloadType" json:"response_type,omitempty"` + ResponseType *PayloadType `protobuf:"varint,1,opt,name=response_type,json=responseType,enum=grpc.testing.PayloadType" json:"response_type,omitempty"` // Desired payload size in the response from the server. // If response_type is COMPRESSABLE, this denotes the size before compression. - ResponseSize *int32 `protobuf:"varint,2,opt,name=response_size" json:"response_size,omitempty"` + ResponseSize *int32 `protobuf:"varint,2,opt,name=response_size,json=responseSize" json:"response_size,omitempty"` // Optional input payload sent along with the request. Payload *Payload `protobuf:"bytes,3,opt,name=payload" json:"payload,omitempty"` // Whether SimpleResponse should include username. - FillUsername *bool `protobuf:"varint,4,opt,name=fill_username" json:"fill_username,omitempty"` + FillUsername *bool `protobuf:"varint,4,opt,name=fill_username,json=fillUsername" json:"fill_username,omitempty"` // Whether SimpleResponse should include OAuth scope. - FillOauthScope *bool `protobuf:"varint,5,opt,name=fill_oauth_scope" json:"fill_oauth_scope,omitempty"` + FillOauthScope *bool `protobuf:"varint,5,opt,name=fill_oauth_scope,json=fillOauthScope" json:"fill_oauth_scope,omitempty"` XXX_unrecognized []byte `json:"-"` } @@ -182,7 +184,7 @@ type SimpleResponse struct { // successful when the client expected it. Username *string `protobuf:"bytes,2,opt,name=username" json:"username,omitempty"` // OAuth scope. - OauthScope *string `protobuf:"bytes,3,opt,name=oauth_scope" json:"oauth_scope,omitempty"` + OauthScope *string `protobuf:"bytes,3,opt,name=oauth_scope,json=oauthScope" json:"oauth_scope,omitempty"` XXX_unrecognized []byte `json:"-"` } @@ -234,7 +236,7 @@ func (m *StreamingInputCallRequest) GetPayload() *Payload { // Client-streaming response. type StreamingInputCallResponse struct { // Aggregated size of payloads received from the client. - AggregatedPayloadSize *int32 `protobuf:"varint,1,opt,name=aggregated_payload_size" json:"aggregated_payload_size,omitempty"` + AggregatedPayloadSize *int32 `protobuf:"varint,1,opt,name=aggregated_payload_size,json=aggregatedPayloadSize" json:"aggregated_payload_size,omitempty"` XXX_unrecognized []byte `json:"-"` } @@ -257,7 +259,7 @@ type ResponseParameters struct { Size *int32 `protobuf:"varint,1,opt,name=size" json:"size,omitempty"` // Desired interval between consecutive responses in the response stream in // microseconds. - IntervalUs *int32 `protobuf:"varint,2,opt,name=interval_us" json:"interval_us,omitempty"` + IntervalUs *int32 `protobuf:"varint,2,opt,name=interval_us,json=intervalUs" json:"interval_us,omitempty"` XXX_unrecognized []byte `json:"-"` } @@ -286,9 +288,9 @@ type StreamingOutputCallRequest struct { // If response_type is RANDOM, the payload from each response in the stream // might be of different types. This is to simulate a mixed type of payload // stream. - ResponseType *PayloadType `protobuf:"varint,1,opt,name=response_type,enum=grpc.testing.PayloadType" json:"response_type,omitempty"` + ResponseType *PayloadType `protobuf:"varint,1,opt,name=response_type,json=responseType,enum=grpc.testing.PayloadType" json:"response_type,omitempty"` // Configuration for each expected response message. - ResponseParameters []*ResponseParameters `protobuf:"bytes,2,rep,name=response_parameters" json:"response_parameters,omitempty"` + ResponseParameters []*ResponseParameters `protobuf:"bytes,2,rep,name=response_parameters,json=responseParameters" json:"response_parameters,omitempty"` // Optional input payload sent along with the request. Payload *Payload `protobuf:"bytes,3,opt,name=payload" json:"payload,omitempty"` XXX_unrecognized []byte `json:"-"` @@ -356,6 +358,10 @@ func init() { var _ context.Context var _ grpc.ClientConn +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion3 + // Client API for TestService service type TestServiceClient interface { @@ -564,28 +570,40 @@ func RegisterTestServiceServer(s *grpc.Server, srv TestServiceServer) { s.RegisterService(&_TestService_serviceDesc, srv) } -func _TestService_EmptyCall_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { +func _TestService_EmptyCall_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(Empty) if err := dec(in); err != nil { return nil, err } - out, err := srv.(TestServiceServer).EmptyCall(ctx, in) - if err != nil { - return nil, err + if interceptor == nil { + return srv.(TestServiceServer).EmptyCall(ctx, in) } - return out, nil + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/grpc.testing.TestService/EmptyCall", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TestServiceServer).EmptyCall(ctx, req.(*Empty)) + } + return interceptor(ctx, in, info, handler) } -func _TestService_UnaryCall_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { +func _TestService_UnaryCall_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(SimpleRequest) if err := dec(in); err != nil { return nil, err } - out, err := srv.(TestServiceServer).UnaryCall(ctx, in) - if err != nil { - return nil, err + if interceptor == nil { + return srv.(TestServiceServer).UnaryCall(ctx, in) } - return out, nil + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/grpc.testing.TestService/UnaryCall", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TestServiceServer).UnaryCall(ctx, req.(*SimpleRequest)) + } + return interceptor(ctx, in, info, handler) } func _TestService_StreamingOutputCall_Handler(srv interface{}, stream grpc.ServerStream) error { @@ -724,8 +742,11 @@ var _TestService_serviceDesc = grpc.ServiceDesc{ ClientStreams: true, }, }, + Metadata: fileDescriptor0, } +func init() { proto.RegisterFile("test.proto", fileDescriptor0) } + var fileDescriptor0 = []byte{ // 567 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xbc, 0x54, 0x51, 0x6f, 0xd2, 0x50, diff --git a/vendor/google.golang.org/grpc/interop/grpc_testing/test.proto b/vendor/google.golang.org/grpc/interop/grpc_testing/test.proto deleted file mode 100644 index b5bfe0537..000000000 --- a/vendor/google.golang.org/grpc/interop/grpc_testing/test.proto +++ /dev/null @@ -1,140 +0,0 @@ -// An integration test service that covers all the method signature permutations -// of unary/streaming requests/responses. -syntax = "proto2"; - -package grpc.testing; - -message Empty {} - -// The type of payload that should be returned. -enum PayloadType { - // Compressable text format. - COMPRESSABLE = 0; - - // Uncompressable binary format. - UNCOMPRESSABLE = 1; - - // Randomly chosen from all other formats defined in this enum. - RANDOM = 2; -} - -// A block of data, to simply increase gRPC message size. -message Payload { - // The type of data in body. - optional PayloadType type = 1; - // Primary contents of payload. - optional bytes body = 2; -} - -// Unary request. -message SimpleRequest { - // Desired payload type in the response from the server. - // If response_type is RANDOM, server randomly chooses one from other formats. - optional PayloadType response_type = 1; - - // Desired payload size in the response from the server. - // If response_type is COMPRESSABLE, this denotes the size before compression. - optional int32 response_size = 2; - - // Optional input payload sent along with the request. - optional Payload payload = 3; - - // Whether SimpleResponse should include username. - optional bool fill_username = 4; - - // Whether SimpleResponse should include OAuth scope. - optional bool fill_oauth_scope = 5; -} - -// Unary response, as configured by the request. -message SimpleResponse { - // Payload to increase message size. - optional Payload payload = 1; - - // The user the request came from, for verifying authentication was - // successful when the client expected it. - optional string username = 2; - - // OAuth scope. - optional string oauth_scope = 3; -} - -// Client-streaming request. -message StreamingInputCallRequest { - // Optional input payload sent along with the request. - optional Payload payload = 1; - - // Not expecting any payload from the response. -} - -// Client-streaming response. -message StreamingInputCallResponse { - // Aggregated size of payloads received from the client. - optional int32 aggregated_payload_size = 1; -} - -// Configuration for a particular response. -message ResponseParameters { - // Desired payload sizes in responses from the server. - // If response_type is COMPRESSABLE, this denotes the size before compression. - optional int32 size = 1; - - // Desired interval between consecutive responses in the response stream in - // microseconds. - optional int32 interval_us = 2; -} - -// Server-streaming request. -message StreamingOutputCallRequest { - // Desired payload type in the response from the server. - // If response_type is RANDOM, the payload from each response in the stream - // might be of different types. This is to simulate a mixed type of payload - // stream. - optional PayloadType response_type = 1; - - // Configuration for each expected response message. - repeated ResponseParameters response_parameters = 2; - - // Optional input payload sent along with the request. - optional Payload payload = 3; -} - -// Server-streaming response, as configured by the request and parameters. -message StreamingOutputCallResponse { - // Payload to increase response size. - optional Payload payload = 1; -} - -// A simple service to test the various types of RPCs and experiment with -// performance with various types of payload. -service TestService { - // One empty request followed by one empty response. - rpc EmptyCall(Empty) returns (Empty); - - // One request followed by one response. - // The server returns the client payload as-is. - rpc UnaryCall(SimpleRequest) returns (SimpleResponse); - - // One request followed by a sequence of responses (streamed download). - // The server returns the payload with client desired type and sizes. - rpc StreamingOutputCall(StreamingOutputCallRequest) - returns (stream StreamingOutputCallResponse); - - // A sequence of requests followed by one response (streamed upload). - // The server returns the aggregated size of client payload as the result. - rpc StreamingInputCall(stream StreamingInputCallRequest) - returns (StreamingInputCallResponse); - - // A sequence of requests with each request served by the server immediately. - // As one request could lead to multiple responses, this interface - // demonstrates the idea of full duplexing. - rpc FullDuplexCall(stream StreamingOutputCallRequest) - returns (stream StreamingOutputCallResponse); - - // A sequence of requests followed by a sequence of responses. - // The server buffers all the client requests and then serves them in order. A - // stream of responses are returned to the client when the server starts with - // first request. - rpc HalfDuplexCall(stream StreamingOutputCallRequest) - returns (stream StreamingOutputCallResponse); -} diff --git a/vendor/google.golang.org/grpc/interop/test_utils.go b/vendor/google.golang.org/grpc/interop/test_utils.go index 6a73d6f45..23340ab81 100644 --- a/vendor/google.golang.org/grpc/interop/test_utils.go +++ b/vendor/google.golang.org/grpc/interop/test_utils.go @@ -85,7 +85,6 @@ func DoEmptyUnaryCall(tc testpb.TestServiceClient) { if !proto.Equal(&testpb.Empty{}, reply) { grpclog.Fatalf("/TestService/EmptyCall receives %v, want %v", reply, testpb.Empty{}) } - grpclog.Println("EmptyUnaryCall done") } // DoLargeUnaryCall performs a unary RPC with large payload in the request and response. @@ -105,7 +104,6 @@ func DoLargeUnaryCall(tc testpb.TestServiceClient) { if t != testpb.PayloadType_COMPRESSABLE || s != largeRespSize { grpclog.Fatalf("Got the reply with type %d len %d; want %d, %d", t, s, testpb.PayloadType_COMPRESSABLE, largeRespSize) } - grpclog.Println("LargeUnaryCall done") } // DoClientStreaming performs a client streaming RPC. @@ -134,7 +132,6 @@ func DoClientStreaming(tc testpb.TestServiceClient) { if reply.GetAggregatedPayloadSize() != int32(sum) { grpclog.Fatalf("%v.CloseAndRecv().GetAggregatePayloadSize() = %v; want %v", stream, reply.GetAggregatedPayloadSize(), sum) } - grpclog.Println("ClientStreaming done") } // DoServerStreaming performs a server streaming RPC. @@ -179,7 +176,6 @@ func DoServerStreaming(tc testpb.TestServiceClient) { if respCnt != len(respSizes) { grpclog.Fatalf("Got %d reply, want %d", len(respSizes), respCnt) } - grpclog.Println("ServerStreaming done") } // DoPingPong performs ping-pong style bi-directional streaming RPC. @@ -224,7 +220,6 @@ func DoPingPong(tc testpb.TestServiceClient) { if _, err := stream.Recv(); err != io.EOF { grpclog.Fatalf("%v failed to complele the ping pong test: %v", stream, err) } - grpclog.Println("Pingpong done") } // DoEmptyStream sets up a bi-directional streaming with zero message. @@ -239,7 +234,6 @@ func DoEmptyStream(tc testpb.TestServiceClient) { if _, err := stream.Recv(); err != io.EOF { grpclog.Fatalf("%v failed to complete the empty stream test: %v", stream, err) } - grpclog.Println("Emptystream done") } // DoTimeoutOnSleepingServer performs an RPC on a sleep server which causes RPC timeout. @@ -248,7 +242,6 @@ func DoTimeoutOnSleepingServer(tc testpb.TestServiceClient) { stream, err := tc.FullDuplexCall(ctx) if err != nil { if grpc.Code(err) == codes.DeadlineExceeded { - grpclog.Println("TimeoutOnSleepingServer done") return } grpclog.Fatalf("%v.FullDuplexCall(_) = _, %v", tc, err) @@ -264,7 +257,6 @@ func DoTimeoutOnSleepingServer(tc testpb.TestServiceClient) { if _, err := stream.Recv(); grpc.Code(err) != codes.DeadlineExceeded { grpclog.Fatalf("%v.Recv() = _, %v, want error code %d", stream, err, codes.DeadlineExceeded) } - grpclog.Println("TimeoutOnSleepingServer done") } // DoComputeEngineCreds performs a unary RPC with compute engine auth. @@ -289,7 +281,6 @@ func DoComputeEngineCreds(tc testpb.TestServiceClient, serviceAccount, oauthScop if !strings.Contains(oauthScope, scope) { grpclog.Fatalf("Got OAuth scope %q which is NOT a substring of %q.", scope, oauthScope) } - grpclog.Println("ComputeEngineCreds done") } func getServiceAccountJSONKey(keyFile string) []byte { @@ -323,7 +314,6 @@ func DoServiceAccountCreds(tc testpb.TestServiceClient, serviceAccountKeyFile, o if !strings.Contains(oauthScope, scope) { grpclog.Fatalf("Got OAuth scope %q which is NOT a substring of %q.", scope, oauthScope) } - grpclog.Println("ServiceAccountCreds done") } // DoJWTTokenCreds performs a unary RPC with JWT token auth. @@ -344,7 +334,6 @@ func DoJWTTokenCreds(tc testpb.TestServiceClient, serviceAccountKeyFile string) if !strings.Contains(string(jsonKey), user) { grpclog.Fatalf("Got user name %q which is NOT a substring of %q.", user, jsonKey) } - grpclog.Println("JWTtokenCreds done") } // GetToken obtains an OAUTH token from the input. @@ -384,7 +373,6 @@ func DoOauth2TokenCreds(tc testpb.TestServiceClient, serviceAccountKeyFile, oaut if !strings.Contains(oauthScope, scope) { grpclog.Fatalf("Got OAuth scope %q which is NOT a substring of %q.", scope, oauthScope) } - grpclog.Println("Oauth2TokenCreds done") } // DoPerRPCCreds performs a unary RPC with per RPC OAUTH2 token. @@ -413,7 +401,6 @@ func DoPerRPCCreds(tc testpb.TestServiceClient, serviceAccountKeyFile, oauthScop if !strings.Contains(oauthScope, scope) { grpclog.Fatalf("Got OAuth scope %q which is NOT a substring of %q.", scope, oauthScope) } - grpclog.Println("PerRPCCreds done") } var ( @@ -435,7 +422,6 @@ func DoCancelAfterBegin(tc testpb.TestServiceClient) { if grpc.Code(err) != codes.Canceled { grpclog.Fatalf("%v.CloseAndRecv() got error code %d, want %d", stream, grpc.Code(err), codes.Canceled) } - grpclog.Println("CancelAfterBegin done") } // DoCancelAfterFirstResponse cancels the RPC after receiving the first message from the server. @@ -466,7 +452,6 @@ func DoCancelAfterFirstResponse(tc testpb.TestServiceClient) { if _, err := stream.Recv(); grpc.Code(err) != codes.Canceled { grpclog.Fatalf("%v compleled with error code %d, want %d", stream, grpc.Code(err), codes.Canceled) } - grpclog.Println("CancelAfterFirstResponse done") } type testServer struct { diff --git a/vendor/google.golang.org/grpc/metadata/metadata.go b/vendor/google.golang.org/grpc/metadata/metadata.go index 52070dbec..954c0f77c 100644 --- a/vendor/google.golang.org/grpc/metadata/metadata.go +++ b/vendor/google.golang.org/grpc/metadata/metadata.go @@ -60,15 +60,21 @@ func encodeKeyValue(k, v string) (string, string) { // DecodeKeyValue returns the original key and value corresponding to the // encoded data in k, v. +// If k is a binary header and v contains comma, v is split on comma before decoded, +// and the decoded v will be joined with comma before returned. func DecodeKeyValue(k, v string) (string, string, error) { if !strings.HasSuffix(k, binHdrSuffix) { return k, v, nil } - val, err := base64.StdEncoding.DecodeString(v) - if err != nil { - return "", "", err + vvs := strings.Split(v, ",") + for i, vv := range vvs { + val, err := base64.StdEncoding.DecodeString(vv) + if err != nil { + return "", "", err + } + vvs[i] = string(val) } - return k, string(val), nil + return k, strings.Join(vvs, ","), nil } // MD is a mapping from metadata keys to values. Users should use the following diff --git a/vendor/google.golang.org/grpc/naming/naming.go b/vendor/google.golang.org/grpc/naming/naming.go index 06605607c..c2e0871e6 100644 --- a/vendor/google.golang.org/grpc/naming/naming.go +++ b/vendor/google.golang.org/grpc/naming/naming.go @@ -66,7 +66,8 @@ type Resolver interface { // Watcher watches for the updates on the specified target. type Watcher interface { // Next blocks until an update or error happens. It may return one or more - // updates. The first call should get the full set of the results. + // updates. The first call should get the full set of the results. It should + // return an error if and only if Watcher cannot recover. Next() ([]*Update, error) // Close closes the Watcher. Close() diff --git a/vendor/google.golang.org/grpc/picker.go b/vendor/google.golang.org/grpc/picker.go deleted file mode 100644 index 50f315b44..000000000 --- a/vendor/google.golang.org/grpc/picker.go +++ /dev/null @@ -1,243 +0,0 @@ -/* - * - * Copyright 2014, Google Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -package grpc - -import ( - "container/list" - "fmt" - "sync" - - "golang.org/x/net/context" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/naming" - "google.golang.org/grpc/transport" -) - -// Picker picks a Conn for RPC requests. -// This is EXPERIMENTAL and please do not implement your own Picker for now. -type Picker interface { - // Init does initial processing for the Picker, e.g., initiate some connections. - Init(cc *ClientConn) error - // Pick blocks until either a transport.ClientTransport is ready for the upcoming RPC - // or some error happens. - Pick(ctx context.Context) (transport.ClientTransport, error) - // PickAddr picks a peer address for connecting. This will be called repeated for - // connecting/reconnecting. - PickAddr() (string, error) - // State returns the connectivity state of the underlying connections. - State() (ConnectivityState, error) - // WaitForStateChange blocks until the state changes to something other than - // the sourceState. It returns the new state or error. - WaitForStateChange(ctx context.Context, sourceState ConnectivityState) (ConnectivityState, error) - // Close closes all the Conn's owned by this Picker. - Close() error -} - -// unicastPicker is the default Picker which is used when there is no custom Picker -// specified by users. It always picks the same Conn. -type unicastPicker struct { - target string - conn *Conn -} - -func (p *unicastPicker) Init(cc *ClientConn) error { - c, err := NewConn(cc) - if err != nil { - return err - } - p.conn = c - return nil -} - -func (p *unicastPicker) Pick(ctx context.Context) (transport.ClientTransport, error) { - return p.conn.Wait(ctx) -} - -func (p *unicastPicker) PickAddr() (string, error) { - return p.target, nil -} - -func (p *unicastPicker) State() (ConnectivityState, error) { - return p.conn.State(), nil -} - -func (p *unicastPicker) WaitForStateChange(ctx context.Context, sourceState ConnectivityState) (ConnectivityState, error) { - return p.conn.WaitForStateChange(ctx, sourceState) -} - -func (p *unicastPicker) Close() error { - if p.conn != nil { - return p.conn.Close() - } - return nil -} - -// unicastNamingPicker picks an address from a name resolver to set up the connection. -type unicastNamingPicker struct { - cc *ClientConn - resolver naming.Resolver - watcher naming.Watcher - mu sync.Mutex - // The list of the addresses are obtained from watcher. - addrs *list.List - // It tracks the current picked addr by PickAddr(). The next PickAddr may - // push it forward on addrs. - pickedAddr *list.Element - conn *Conn -} - -// NewUnicastNamingPicker creates a Picker to pick addresses from a name resolver -// to connect. -func NewUnicastNamingPicker(r naming.Resolver) Picker { - return &unicastNamingPicker{ - resolver: r, - addrs: list.New(), - } -} - -type addrInfo struct { - addr string - // Set to true if this addrInfo needs to be deleted in the next PickAddrr() call. - deleting bool -} - -// processUpdates calls Watcher.Next() once and processes the obtained updates. -func (p *unicastNamingPicker) processUpdates() error { - updates, err := p.watcher.Next() - if err != nil { - return err - } - for _, update := range updates { - switch update.Op { - case naming.Add: - p.mu.Lock() - p.addrs.PushBack(&addrInfo{ - addr: update.Addr, - }) - p.mu.Unlock() - // Initial connection setup - if p.conn == nil { - conn, err := NewConn(p.cc) - if err != nil { - return err - } - p.conn = conn - } - case naming.Delete: - p.mu.Lock() - for e := p.addrs.Front(); e != nil; e = e.Next() { - if update.Addr == e.Value.(*addrInfo).addr { - if e == p.pickedAddr { - // Do not remove the element now if it is the current picked - // one. We leave the deletion to the next PickAddr() call. - e.Value.(*addrInfo).deleting = true - // Notify Conn to close it. All the live RPCs on this connection - // will be aborted. - p.conn.NotifyReset() - } else { - p.addrs.Remove(e) - } - } - } - p.mu.Unlock() - default: - grpclog.Println("Unknown update.Op ", update.Op) - } - } - return nil -} - -// monitor runs in a standalone goroutine to keep watching name resolution updates until the watcher -// is closed. -func (p *unicastNamingPicker) monitor() { - for { - if err := p.processUpdates(); err != nil { - return - } - } -} - -func (p *unicastNamingPicker) Init(cc *ClientConn) error { - w, err := p.resolver.Resolve(cc.target) - if err != nil { - return err - } - p.watcher = w - p.cc = cc - // Get the initial name resolution. - if err := p.processUpdates(); err != nil { - return err - } - go p.monitor() - return nil -} - -func (p *unicastNamingPicker) Pick(ctx context.Context) (transport.ClientTransport, error) { - return p.conn.Wait(ctx) -} - -func (p *unicastNamingPicker) PickAddr() (string, error) { - p.mu.Lock() - defer p.mu.Unlock() - if p.pickedAddr == nil { - p.pickedAddr = p.addrs.Front() - } else { - pa := p.pickedAddr - p.pickedAddr = pa.Next() - if pa.Value.(*addrInfo).deleting { - p.addrs.Remove(pa) - } - if p.pickedAddr == nil { - p.pickedAddr = p.addrs.Front() - } - } - if p.pickedAddr == nil { - return "", fmt.Errorf("there is no address available to pick") - } - return p.pickedAddr.Value.(*addrInfo).addr, nil -} - -func (p *unicastNamingPicker) State() (ConnectivityState, error) { - return 0, fmt.Errorf("State() is not supported for unicastNamingPicker") -} - -func (p *unicastNamingPicker) WaitForStateChange(ctx context.Context, sourceState ConnectivityState) (ConnectivityState, error) { - return 0, fmt.Errorf("WaitForStateChange is not supported for unicastNamingPciker") -} - -func (p *unicastNamingPicker) Close() error { - p.watcher.Close() - p.conn.Close() - return nil -} diff --git a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go new file mode 100644 index 000000000..da90479e7 --- /dev/null +++ b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go @@ -0,0 +1,694 @@ +// Code generated by protoc-gen-go. +// source: reflection.proto +// DO NOT EDIT! + +/* +Package grpc_reflection_v1alpha is a generated protocol buffer package. + +It is generated from these files: + reflection.proto + +It has these top-level messages: + ServerReflectionRequest + ExtensionRequest + ServerReflectionResponse + FileDescriptorResponse + ExtensionNumberResponse + ListServiceResponse + ServiceResponse + ErrorResponse +*/ +package grpc_reflection_v1alpha + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// The message sent by the client when calling ServerReflectionInfo method. +type ServerReflectionRequest struct { + Host string `protobuf:"bytes,1,opt,name=host" json:"host,omitempty"` + // To use reflection service, the client should set one of the following + // fields in message_request. The server distinguishes requests by their + // defined field and then handles them using corresponding methods. + // + // Types that are valid to be assigned to MessageRequest: + // *ServerReflectionRequest_FileByFilename + // *ServerReflectionRequest_FileContainingSymbol + // *ServerReflectionRequest_FileContainingExtension + // *ServerReflectionRequest_AllExtensionNumbersOfType + // *ServerReflectionRequest_ListServices + MessageRequest isServerReflectionRequest_MessageRequest `protobuf_oneof:"message_request"` +} + +func (m *ServerReflectionRequest) Reset() { *m = ServerReflectionRequest{} } +func (m *ServerReflectionRequest) String() string { return proto.CompactTextString(m) } +func (*ServerReflectionRequest) ProtoMessage() {} +func (*ServerReflectionRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +type isServerReflectionRequest_MessageRequest interface { + isServerReflectionRequest_MessageRequest() +} + +type ServerReflectionRequest_FileByFilename struct { + FileByFilename string `protobuf:"bytes,3,opt,name=file_by_filename,json=fileByFilename,oneof"` +} +type ServerReflectionRequest_FileContainingSymbol struct { + FileContainingSymbol string `protobuf:"bytes,4,opt,name=file_containing_symbol,json=fileContainingSymbol,oneof"` +} +type ServerReflectionRequest_FileContainingExtension struct { + FileContainingExtension *ExtensionRequest `protobuf:"bytes,5,opt,name=file_containing_extension,json=fileContainingExtension,oneof"` +} +type ServerReflectionRequest_AllExtensionNumbersOfType struct { + AllExtensionNumbersOfType string `protobuf:"bytes,6,opt,name=all_extension_numbers_of_type,json=allExtensionNumbersOfType,oneof"` +} +type ServerReflectionRequest_ListServices struct { + ListServices string `protobuf:"bytes,7,opt,name=list_services,json=listServices,oneof"` +} + +func (*ServerReflectionRequest_FileByFilename) isServerReflectionRequest_MessageRequest() {} +func (*ServerReflectionRequest_FileContainingSymbol) isServerReflectionRequest_MessageRequest() {} +func (*ServerReflectionRequest_FileContainingExtension) isServerReflectionRequest_MessageRequest() {} +func (*ServerReflectionRequest_AllExtensionNumbersOfType) isServerReflectionRequest_MessageRequest() {} +func (*ServerReflectionRequest_ListServices) isServerReflectionRequest_MessageRequest() {} + +func (m *ServerReflectionRequest) GetMessageRequest() isServerReflectionRequest_MessageRequest { + if m != nil { + return m.MessageRequest + } + return nil +} + +func (m *ServerReflectionRequest) GetFileByFilename() string { + if x, ok := m.GetMessageRequest().(*ServerReflectionRequest_FileByFilename); ok { + return x.FileByFilename + } + return "" +} + +func (m *ServerReflectionRequest) GetFileContainingSymbol() string { + if x, ok := m.GetMessageRequest().(*ServerReflectionRequest_FileContainingSymbol); ok { + return x.FileContainingSymbol + } + return "" +} + +func (m *ServerReflectionRequest) GetFileContainingExtension() *ExtensionRequest { + if x, ok := m.GetMessageRequest().(*ServerReflectionRequest_FileContainingExtension); ok { + return x.FileContainingExtension + } + return nil +} + +func (m *ServerReflectionRequest) GetAllExtensionNumbersOfType() string { + if x, ok := m.GetMessageRequest().(*ServerReflectionRequest_AllExtensionNumbersOfType); ok { + return x.AllExtensionNumbersOfType + } + return "" +} + +func (m *ServerReflectionRequest) GetListServices() string { + if x, ok := m.GetMessageRequest().(*ServerReflectionRequest_ListServices); ok { + return x.ListServices + } + return "" +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*ServerReflectionRequest) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _ServerReflectionRequest_OneofMarshaler, _ServerReflectionRequest_OneofUnmarshaler, _ServerReflectionRequest_OneofSizer, []interface{}{ + (*ServerReflectionRequest_FileByFilename)(nil), + (*ServerReflectionRequest_FileContainingSymbol)(nil), + (*ServerReflectionRequest_FileContainingExtension)(nil), + (*ServerReflectionRequest_AllExtensionNumbersOfType)(nil), + (*ServerReflectionRequest_ListServices)(nil), + } +} + +func _ServerReflectionRequest_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*ServerReflectionRequest) + // message_request + switch x := m.MessageRequest.(type) { + case *ServerReflectionRequest_FileByFilename: + b.EncodeVarint(3<<3 | proto.WireBytes) + b.EncodeStringBytes(x.FileByFilename) + case *ServerReflectionRequest_FileContainingSymbol: + b.EncodeVarint(4<<3 | proto.WireBytes) + b.EncodeStringBytes(x.FileContainingSymbol) + case *ServerReflectionRequest_FileContainingExtension: + b.EncodeVarint(5<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.FileContainingExtension); err != nil { + return err + } + case *ServerReflectionRequest_AllExtensionNumbersOfType: + b.EncodeVarint(6<<3 | proto.WireBytes) + b.EncodeStringBytes(x.AllExtensionNumbersOfType) + case *ServerReflectionRequest_ListServices: + b.EncodeVarint(7<<3 | proto.WireBytes) + b.EncodeStringBytes(x.ListServices) + case nil: + default: + return fmt.Errorf("ServerReflectionRequest.MessageRequest has unexpected type %T", x) + } + return nil +} + +func _ServerReflectionRequest_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*ServerReflectionRequest) + switch tag { + case 3: // message_request.file_by_filename + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.MessageRequest = &ServerReflectionRequest_FileByFilename{x} + return true, err + case 4: // message_request.file_containing_symbol + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.MessageRequest = &ServerReflectionRequest_FileContainingSymbol{x} + return true, err + case 5: // message_request.file_containing_extension + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ExtensionRequest) + err := b.DecodeMessage(msg) + m.MessageRequest = &ServerReflectionRequest_FileContainingExtension{msg} + return true, err + case 6: // message_request.all_extension_numbers_of_type + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.MessageRequest = &ServerReflectionRequest_AllExtensionNumbersOfType{x} + return true, err + case 7: // message_request.list_services + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.MessageRequest = &ServerReflectionRequest_ListServices{x} + return true, err + default: + return false, nil + } +} + +func _ServerReflectionRequest_OneofSizer(msg proto.Message) (n int) { + m := msg.(*ServerReflectionRequest) + // message_request + switch x := m.MessageRequest.(type) { + case *ServerReflectionRequest_FileByFilename: + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.FileByFilename))) + n += len(x.FileByFilename) + case *ServerReflectionRequest_FileContainingSymbol: + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.FileContainingSymbol))) + n += len(x.FileContainingSymbol) + case *ServerReflectionRequest_FileContainingExtension: + s := proto.Size(x.FileContainingExtension) + n += proto.SizeVarint(5<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *ServerReflectionRequest_AllExtensionNumbersOfType: + n += proto.SizeVarint(6<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.AllExtensionNumbersOfType))) + n += len(x.AllExtensionNumbersOfType) + case *ServerReflectionRequest_ListServices: + n += proto.SizeVarint(7<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.ListServices))) + n += len(x.ListServices) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// The type name and extension number sent by the client when requesting +// file_containing_extension. +type ExtensionRequest struct { + // Fully-qualified type name. The format should be . + ContainingType string `protobuf:"bytes,1,opt,name=containing_type,json=containingType" json:"containing_type,omitempty"` + ExtensionNumber int32 `protobuf:"varint,2,opt,name=extension_number,json=extensionNumber" json:"extension_number,omitempty"` +} + +func (m *ExtensionRequest) Reset() { *m = ExtensionRequest{} } +func (m *ExtensionRequest) String() string { return proto.CompactTextString(m) } +func (*ExtensionRequest) ProtoMessage() {} +func (*ExtensionRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +// The message sent by the server to answer ServerReflectionInfo method. +type ServerReflectionResponse struct { + ValidHost string `protobuf:"bytes,1,opt,name=valid_host,json=validHost" json:"valid_host,omitempty"` + OriginalRequest *ServerReflectionRequest `protobuf:"bytes,2,opt,name=original_request,json=originalRequest" json:"original_request,omitempty"` + // The server set one of the following fields accroding to the message_request + // in the request. + // + // Types that are valid to be assigned to MessageResponse: + // *ServerReflectionResponse_FileDescriptorResponse + // *ServerReflectionResponse_AllExtensionNumbersResponse + // *ServerReflectionResponse_ListServicesResponse + // *ServerReflectionResponse_ErrorResponse + MessageResponse isServerReflectionResponse_MessageResponse `protobuf_oneof:"message_response"` +} + +func (m *ServerReflectionResponse) Reset() { *m = ServerReflectionResponse{} } +func (m *ServerReflectionResponse) String() string { return proto.CompactTextString(m) } +func (*ServerReflectionResponse) ProtoMessage() {} +func (*ServerReflectionResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +type isServerReflectionResponse_MessageResponse interface { + isServerReflectionResponse_MessageResponse() +} + +type ServerReflectionResponse_FileDescriptorResponse struct { + FileDescriptorResponse *FileDescriptorResponse `protobuf:"bytes,4,opt,name=file_descriptor_response,json=fileDescriptorResponse,oneof"` +} +type ServerReflectionResponse_AllExtensionNumbersResponse struct { + AllExtensionNumbersResponse *ExtensionNumberResponse `protobuf:"bytes,5,opt,name=all_extension_numbers_response,json=allExtensionNumbersResponse,oneof"` +} +type ServerReflectionResponse_ListServicesResponse struct { + ListServicesResponse *ListServiceResponse `protobuf:"bytes,6,opt,name=list_services_response,json=listServicesResponse,oneof"` +} +type ServerReflectionResponse_ErrorResponse struct { + ErrorResponse *ErrorResponse `protobuf:"bytes,7,opt,name=error_response,json=errorResponse,oneof"` +} + +func (*ServerReflectionResponse_FileDescriptorResponse) isServerReflectionResponse_MessageResponse() {} +func (*ServerReflectionResponse_AllExtensionNumbersResponse) isServerReflectionResponse_MessageResponse() { +} +func (*ServerReflectionResponse_ListServicesResponse) isServerReflectionResponse_MessageResponse() {} +func (*ServerReflectionResponse_ErrorResponse) isServerReflectionResponse_MessageResponse() {} + +func (m *ServerReflectionResponse) GetMessageResponse() isServerReflectionResponse_MessageResponse { + if m != nil { + return m.MessageResponse + } + return nil +} + +func (m *ServerReflectionResponse) GetOriginalRequest() *ServerReflectionRequest { + if m != nil { + return m.OriginalRequest + } + return nil +} + +func (m *ServerReflectionResponse) GetFileDescriptorResponse() *FileDescriptorResponse { + if x, ok := m.GetMessageResponse().(*ServerReflectionResponse_FileDescriptorResponse); ok { + return x.FileDescriptorResponse + } + return nil +} + +func (m *ServerReflectionResponse) GetAllExtensionNumbersResponse() *ExtensionNumberResponse { + if x, ok := m.GetMessageResponse().(*ServerReflectionResponse_AllExtensionNumbersResponse); ok { + return x.AllExtensionNumbersResponse + } + return nil +} + +func (m *ServerReflectionResponse) GetListServicesResponse() *ListServiceResponse { + if x, ok := m.GetMessageResponse().(*ServerReflectionResponse_ListServicesResponse); ok { + return x.ListServicesResponse + } + return nil +} + +func (m *ServerReflectionResponse) GetErrorResponse() *ErrorResponse { + if x, ok := m.GetMessageResponse().(*ServerReflectionResponse_ErrorResponse); ok { + return x.ErrorResponse + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*ServerReflectionResponse) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _ServerReflectionResponse_OneofMarshaler, _ServerReflectionResponse_OneofUnmarshaler, _ServerReflectionResponse_OneofSizer, []interface{}{ + (*ServerReflectionResponse_FileDescriptorResponse)(nil), + (*ServerReflectionResponse_AllExtensionNumbersResponse)(nil), + (*ServerReflectionResponse_ListServicesResponse)(nil), + (*ServerReflectionResponse_ErrorResponse)(nil), + } +} + +func _ServerReflectionResponse_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*ServerReflectionResponse) + // message_response + switch x := m.MessageResponse.(type) { + case *ServerReflectionResponse_FileDescriptorResponse: + b.EncodeVarint(4<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.FileDescriptorResponse); err != nil { + return err + } + case *ServerReflectionResponse_AllExtensionNumbersResponse: + b.EncodeVarint(5<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.AllExtensionNumbersResponse); err != nil { + return err + } + case *ServerReflectionResponse_ListServicesResponse: + b.EncodeVarint(6<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ListServicesResponse); err != nil { + return err + } + case *ServerReflectionResponse_ErrorResponse: + b.EncodeVarint(7<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ErrorResponse); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("ServerReflectionResponse.MessageResponse has unexpected type %T", x) + } + return nil +} + +func _ServerReflectionResponse_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*ServerReflectionResponse) + switch tag { + case 4: // message_response.file_descriptor_response + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(FileDescriptorResponse) + err := b.DecodeMessage(msg) + m.MessageResponse = &ServerReflectionResponse_FileDescriptorResponse{msg} + return true, err + case 5: // message_response.all_extension_numbers_response + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ExtensionNumberResponse) + err := b.DecodeMessage(msg) + m.MessageResponse = &ServerReflectionResponse_AllExtensionNumbersResponse{msg} + return true, err + case 6: // message_response.list_services_response + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ListServiceResponse) + err := b.DecodeMessage(msg) + m.MessageResponse = &ServerReflectionResponse_ListServicesResponse{msg} + return true, err + case 7: // message_response.error_response + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ErrorResponse) + err := b.DecodeMessage(msg) + m.MessageResponse = &ServerReflectionResponse_ErrorResponse{msg} + return true, err + default: + return false, nil + } +} + +func _ServerReflectionResponse_OneofSizer(msg proto.Message) (n int) { + m := msg.(*ServerReflectionResponse) + // message_response + switch x := m.MessageResponse.(type) { + case *ServerReflectionResponse_FileDescriptorResponse: + s := proto.Size(x.FileDescriptorResponse) + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *ServerReflectionResponse_AllExtensionNumbersResponse: + s := proto.Size(x.AllExtensionNumbersResponse) + n += proto.SizeVarint(5<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *ServerReflectionResponse_ListServicesResponse: + s := proto.Size(x.ListServicesResponse) + n += proto.SizeVarint(6<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *ServerReflectionResponse_ErrorResponse: + s := proto.Size(x.ErrorResponse) + n += proto.SizeVarint(7<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Serialized FileDescriptorProto messages sent by the server answering +// a file_by_filename, file_containing_symbol, or file_containing_extension +// request. +type FileDescriptorResponse struct { + // Serialized FileDescriptorProto messages. We avoid taking a dependency on + // descriptor.proto, which uses proto2 only features, by making them opaque + // bytes instead. + FileDescriptorProto [][]byte `protobuf:"bytes,1,rep,name=file_descriptor_proto,json=fileDescriptorProto,proto3" json:"file_descriptor_proto,omitempty"` +} + +func (m *FileDescriptorResponse) Reset() { *m = FileDescriptorResponse{} } +func (m *FileDescriptorResponse) String() string { return proto.CompactTextString(m) } +func (*FileDescriptorResponse) ProtoMessage() {} +func (*FileDescriptorResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +// A list of extension numbers sent by the server answering +// all_extension_numbers_of_type request. +type ExtensionNumberResponse struct { + // Full name of the base type, including the package name. The format + // is . + BaseTypeName string `protobuf:"bytes,1,opt,name=base_type_name,json=baseTypeName" json:"base_type_name,omitempty"` + ExtensionNumber []int32 `protobuf:"varint,2,rep,name=extension_number,json=extensionNumber" json:"extension_number,omitempty"` +} + +func (m *ExtensionNumberResponse) Reset() { *m = ExtensionNumberResponse{} } +func (m *ExtensionNumberResponse) String() string { return proto.CompactTextString(m) } +func (*ExtensionNumberResponse) ProtoMessage() {} +func (*ExtensionNumberResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +// A list of ServiceResponse sent by the server answering list_services request. +type ListServiceResponse struct { + // The information of each service may be expanded in the future, so we use + // ServiceResponse message to encapsulate it. + Service []*ServiceResponse `protobuf:"bytes,1,rep,name=service" json:"service,omitempty"` +} + +func (m *ListServiceResponse) Reset() { *m = ListServiceResponse{} } +func (m *ListServiceResponse) String() string { return proto.CompactTextString(m) } +func (*ListServiceResponse) ProtoMessage() {} +func (*ListServiceResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } + +func (m *ListServiceResponse) GetService() []*ServiceResponse { + if m != nil { + return m.Service + } + return nil +} + +// The information of a single service used by ListServiceResponse to answer +// list_services request. +type ServiceResponse struct { + // Full name of a registered service, including its package name. The format + // is . + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *ServiceResponse) Reset() { *m = ServiceResponse{} } +func (m *ServiceResponse) String() string { return proto.CompactTextString(m) } +func (*ServiceResponse) ProtoMessage() {} +func (*ServiceResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } + +// The error code and error message sent by the server when an error occurs. +type ErrorResponse struct { + // This field uses the error codes defined in grpc::StatusCode. + ErrorCode int32 `protobuf:"varint,1,opt,name=error_code,json=errorCode" json:"error_code,omitempty"` + ErrorMessage string `protobuf:"bytes,2,opt,name=error_message,json=errorMessage" json:"error_message,omitempty"` +} + +func (m *ErrorResponse) Reset() { *m = ErrorResponse{} } +func (m *ErrorResponse) String() string { return proto.CompactTextString(m) } +func (*ErrorResponse) ProtoMessage() {} +func (*ErrorResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } + +func init() { + proto.RegisterType((*ServerReflectionRequest)(nil), "grpc.reflection.v1alpha.ServerReflectionRequest") + proto.RegisterType((*ExtensionRequest)(nil), "grpc.reflection.v1alpha.ExtensionRequest") + proto.RegisterType((*ServerReflectionResponse)(nil), "grpc.reflection.v1alpha.ServerReflectionResponse") + proto.RegisterType((*FileDescriptorResponse)(nil), "grpc.reflection.v1alpha.FileDescriptorResponse") + proto.RegisterType((*ExtensionNumberResponse)(nil), "grpc.reflection.v1alpha.ExtensionNumberResponse") + proto.RegisterType((*ListServiceResponse)(nil), "grpc.reflection.v1alpha.ListServiceResponse") + proto.RegisterType((*ServiceResponse)(nil), "grpc.reflection.v1alpha.ServiceResponse") + proto.RegisterType((*ErrorResponse)(nil), "grpc.reflection.v1alpha.ErrorResponse") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion3 + +// Client API for ServerReflection service + +type ServerReflectionClient interface { + // The reflection service is structured as a bidirectional stream, ensuring + // all related requests go to a single server. + ServerReflectionInfo(ctx context.Context, opts ...grpc.CallOption) (ServerReflection_ServerReflectionInfoClient, error) +} + +type serverReflectionClient struct { + cc *grpc.ClientConn +} + +func NewServerReflectionClient(cc *grpc.ClientConn) ServerReflectionClient { + return &serverReflectionClient{cc} +} + +func (c *serverReflectionClient) ServerReflectionInfo(ctx context.Context, opts ...grpc.CallOption) (ServerReflection_ServerReflectionInfoClient, error) { + stream, err := grpc.NewClientStream(ctx, &_ServerReflection_serviceDesc.Streams[0], c.cc, "/grpc.reflection.v1alpha.ServerReflection/ServerReflectionInfo", opts...) + if err != nil { + return nil, err + } + x := &serverReflectionServerReflectionInfoClient{stream} + return x, nil +} + +type ServerReflection_ServerReflectionInfoClient interface { + Send(*ServerReflectionRequest) error + Recv() (*ServerReflectionResponse, error) + grpc.ClientStream +} + +type serverReflectionServerReflectionInfoClient struct { + grpc.ClientStream +} + +func (x *serverReflectionServerReflectionInfoClient) Send(m *ServerReflectionRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *serverReflectionServerReflectionInfoClient) Recv() (*ServerReflectionResponse, error) { + m := new(ServerReflectionResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// Server API for ServerReflection service + +type ServerReflectionServer interface { + // The reflection service is structured as a bidirectional stream, ensuring + // all related requests go to a single server. + ServerReflectionInfo(ServerReflection_ServerReflectionInfoServer) error +} + +func RegisterServerReflectionServer(s *grpc.Server, srv ServerReflectionServer) { + s.RegisterService(&_ServerReflection_serviceDesc, srv) +} + +func _ServerReflection_ServerReflectionInfo_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(ServerReflectionServer).ServerReflectionInfo(&serverReflectionServerReflectionInfoServer{stream}) +} + +type ServerReflection_ServerReflectionInfoServer interface { + Send(*ServerReflectionResponse) error + Recv() (*ServerReflectionRequest, error) + grpc.ServerStream +} + +type serverReflectionServerReflectionInfoServer struct { + grpc.ServerStream +} + +func (x *serverReflectionServerReflectionInfoServer) Send(m *ServerReflectionResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *serverReflectionServerReflectionInfoServer) Recv() (*ServerReflectionRequest, error) { + m := new(ServerReflectionRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +var _ServerReflection_serviceDesc = grpc.ServiceDesc{ + ServiceName: "grpc.reflection.v1alpha.ServerReflection", + HandlerType: (*ServerReflectionServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "ServerReflectionInfo", + Handler: _ServerReflection_ServerReflectionInfo_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: fileDescriptor0, +} + +func init() { proto.RegisterFile("reflection.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 646 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x94, 0x54, 0xdd, 0x6e, 0xd3, 0x4c, + 0x10, 0xfd, 0xd2, 0xe6, 0x47, 0x99, 0xfc, 0xf9, 0xdb, 0x86, 0xc4, 0x05, 0x15, 0x21, 0x43, 0x21, + 0x45, 0x28, 0xb4, 0x46, 0xe2, 0x01, 0x52, 0x40, 0x45, 0x2a, 0x2d, 0x72, 0xb8, 0x41, 0x5c, 0x58, + 0x8e, 0xb3, 0x4e, 0x0d, 0x8e, 0xd7, 0xec, 0xba, 0x81, 0x5c, 0xf1, 0x10, 0x3c, 0x14, 0xaf, 0xc4, + 0x25, 0xbb, 0xeb, 0x9f, 0x38, 0xae, 0x0d, 0xea, 0x95, 0xad, 0xb3, 0x33, 0x7b, 0x66, 0xe6, 0x9c, + 0x59, 0x50, 0x28, 0x76, 0x3c, 0x6c, 0x87, 0x2e, 0xf1, 0xc7, 0x01, 0x25, 0x21, 0x41, 0xc3, 0x05, + 0x0d, 0xec, 0x71, 0x06, 0x5e, 0x9d, 0x58, 0x5e, 0x70, 0x65, 0x69, 0xbf, 0x77, 0x60, 0x38, 0xc5, + 0x74, 0x85, 0xa9, 0x91, 0x1e, 0x1a, 0xf8, 0xeb, 0x35, 0x66, 0x21, 0x42, 0x50, 0xbd, 0x22, 0x2c, + 0x54, 0x2b, 0x0f, 0x2a, 0xa3, 0xa6, 0x21, 0xff, 0xd1, 0x53, 0x50, 0x1c, 0xd7, 0xc3, 0xe6, 0x6c, + 0x6d, 0x8a, 0xaf, 0x6f, 0x2d, 0xb1, 0xba, 0x2b, 0xce, 0xcf, 0xfe, 0x33, 0xba, 0x02, 0x99, 0xac, + 0xdf, 0xc4, 0x38, 0x7a, 0x09, 0x03, 0x19, 0x6b, 0x13, 0x3f, 0xb4, 0x5c, 0xdf, 0xf5, 0x17, 0x26, + 0x5b, 0x2f, 0x67, 0xc4, 0x53, 0xab, 0x71, 0x46, 0x5f, 0x9c, 0x9f, 0xa6, 0xc7, 0x53, 0x79, 0x8a, + 0x16, 0xb0, 0x9f, 0xcf, 0xc3, 0xdf, 0x43, 0xec, 0x33, 0x5e, 0x9b, 0x5a, 0xe3, 0xa9, 0x2d, 0xfd, + 0x68, 0x5c, 0xd2, 0xd0, 0xf8, 0x75, 0x12, 0x19, 0x77, 0xc1, 0x59, 0x86, 0xdb, 0x2c, 0x69, 0x04, + 0x9a, 0xc0, 0x81, 0xe5, 0x79, 0x9b, 0xcb, 0x4d, 0xff, 0x7a, 0x39, 0xc3, 0x94, 0x99, 0xc4, 0x31, + 0xc3, 0x75, 0x80, 0xd5, 0x7a, 0x5c, 0xe7, 0x3e, 0x0f, 0x4b, 0xd3, 0x2e, 0xa2, 0xa0, 0x4b, 0xe7, + 0x03, 0x0f, 0x41, 0x87, 0xd0, 0xf1, 0x5c, 0x16, 0x9a, 0x8c, 0x0f, 0xd1, 0xb5, 0x31, 0x53, 0x1b, + 0x71, 0x4e, 0x5b, 0xc0, 0xd3, 0x18, 0x9d, 0xfc, 0x0f, 0xbd, 0x25, 0x66, 0xcc, 0x5a, 0x60, 0x93, + 0x46, 0x85, 0x69, 0x0e, 0x28, 0xf9, 0x62, 0xd1, 0x13, 0xe8, 0x65, 0xba, 0x96, 0x35, 0x44, 0xd3, + 0xef, 0x6e, 0x60, 0x49, 0x7b, 0x04, 0x4a, 0xbe, 0x6c, 0x75, 0x87, 0x47, 0xd6, 0x8c, 0x1e, 0xde, + 0x2e, 0x54, 0xfb, 0x55, 0x05, 0xf5, 0xa6, 0xc4, 0x2c, 0x20, 0x3e, 0xc3, 0xe8, 0x00, 0x60, 0x65, + 0x79, 0xee, 0xdc, 0xcc, 0x28, 0xdd, 0x94, 0xc8, 0x99, 0x90, 0xfb, 0x13, 0x28, 0x84, 0xba, 0x0b, + 0xd7, 0xb7, 0xbc, 0xa4, 0x6e, 0x49, 0xd3, 0xd2, 0x8f, 0x4b, 0x15, 0x28, 0xb1, 0x93, 0xd1, 0x4b, + 0x6e, 0x4a, 0x9a, 0xfd, 0x02, 0xaa, 0xd4, 0x79, 0x8e, 0x99, 0x4d, 0xdd, 0x20, 0x24, 0x94, 0x73, + 0x44, 0x75, 0x49, 0x87, 0xb4, 0xf4, 0xe7, 0xa5, 0x24, 0xc2, 0x64, 0xaf, 0xd2, 0xbc, 0xa4, 0x1d, + 0x3e, 0x76, 0x69, 0xb9, 0x9b, 0x27, 0xe8, 0x1b, 0xdc, 0x2f, 0xd6, 0x3a, 0xa5, 0xac, 0xfd, 0xa3, + 0xaf, 0x9c, 0x01, 0x32, 0x9c, 0xf7, 0x0a, 0xec, 0x91, 0x12, 0xcf, 0x61, 0xb0, 0x65, 0x90, 0x0d, + 0x61, 0x5d, 0x12, 0x3e, 0x2b, 0x25, 0x3c, 0xdf, 0x18, 0x28, 0x43, 0xd6, 0xcf, 0xfa, 0x2a, 0x65, + 0xb9, 0x84, 0x2e, 0xa6, 0x34, 0x3b, 0xc1, 0x86, 0xbc, 0xfd, 0x71, 0x79, 0x3b, 0x22, 0x3c, 0x73, + 0x6f, 0x07, 0x67, 0x81, 0x09, 0x02, 0x65, 0x63, 0xd8, 0x08, 0xd3, 0xce, 0x61, 0x50, 0x3c, 0x77, + 0xa4, 0xc3, 0x9d, 0xbc, 0x94, 0xf2, 0xe1, 0xe1, 0x8e, 0xda, 0x1d, 0xb5, 0x8d, 0xbd, 0x6d, 0x51, + 0xde, 0x8b, 0x23, 0xed, 0x33, 0x0c, 0x4b, 0x46, 0x8a, 0x1e, 0x41, 0x77, 0x66, 0x31, 0x2c, 0x17, + 0xc0, 0x94, 0x6f, 0x4c, 0xe4, 0xcc, 0xb6, 0x40, 0x85, 0xff, 0x2f, 0xc4, 0xfb, 0x52, 0xbc, 0x03, + 0xbb, 0x45, 0x3b, 0xf0, 0x11, 0xf6, 0x0a, 0xa6, 0xc9, 0x1f, 0x80, 0x46, 0x2c, 0x8b, 0x2c, 0xb4, + 0xa5, 0x8f, 0xfe, 0xea, 0xea, 0x4c, 0xaa, 0x91, 0x24, 0x6a, 0x87, 0xd0, 0xcb, 0x5f, 0xcb, 0x1f, + 0xce, 0x4c, 0xd1, 0xf2, 0x5f, 0x9b, 0x42, 0x67, 0x6b, 0xe2, 0x62, 0xf3, 0x22, 0xc5, 0x6c, 0x32, + 0x8f, 0x42, 0x6b, 0x46, 0x53, 0x22, 0xa7, 0x1c, 0x40, 0x0f, 0x21, 0x12, 0xc4, 0x8c, 0x55, 0x90, + 0x6b, 0xc7, 0x27, 0x20, 0xc1, 0x77, 0x11, 0xa6, 0xff, 0xac, 0x80, 0x92, 0x5f, 0x37, 0xf4, 0x03, + 0xfa, 0x79, 0xec, 0xad, 0xef, 0x10, 0x74, 0xeb, 0x8d, 0xbd, 0x7b, 0x72, 0x8b, 0x8c, 0xa8, 0xab, + 0x51, 0xe5, 0xb8, 0x32, 0xab, 0x4b, 0xe9, 0x5f, 0xfc, 0x09, 0x00, 0x00, 0xff, 0xff, 0xe9, 0x3f, + 0x7b, 0x08, 0x87, 0x06, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/grpc/reflection/grpc_testing/proto2.pb.go b/vendor/google.golang.org/grpc/reflection/grpc_testing/proto2.pb.go new file mode 100644 index 000000000..ac49de4aa --- /dev/null +++ b/vendor/google.golang.org/grpc/reflection/grpc_testing/proto2.pb.go @@ -0,0 +1,56 @@ +// Code generated by protoc-gen-go. +// source: proto2.proto +// DO NOT EDIT! + +package grpc_testing + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type ToBeExtened struct { + Foo *int32 `protobuf:"varint,1,req,name=foo" json:"foo,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ToBeExtened) Reset() { *m = ToBeExtened{} } +func (m *ToBeExtened) String() string { return proto.CompactTextString(m) } +func (*ToBeExtened) ProtoMessage() {} +func (*ToBeExtened) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{0} } + +var extRange_ToBeExtened = []proto.ExtensionRange{ + {10, 20}, +} + +func (*ToBeExtened) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_ToBeExtened +} + +func (m *ToBeExtened) GetFoo() int32 { + if m != nil && m.Foo != nil { + return *m.Foo + } + return 0 +} + +func init() { + proto.RegisterType((*ToBeExtened)(nil), "grpc.testing.ToBeExtened") +} + +func init() { proto.RegisterFile("proto2.proto", fileDescriptor1) } + +var fileDescriptor1 = []byte{ + // 85 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x29, 0x28, 0xca, 0x2f, + 0xc9, 0x37, 0xd2, 0x03, 0x53, 0x42, 0x3c, 0xe9, 0x45, 0x05, 0xc9, 0x7a, 0x25, 0xa9, 0xc5, 0x25, + 0x99, 0x79, 0xe9, 0x4a, 0xaa, 0x5c, 0xdc, 0x21, 0xf9, 0x4e, 0xa9, 0xae, 0x15, 0x25, 0xa9, 0x79, + 0xa9, 0x29, 0x42, 0x02, 0x5c, 0xcc, 0x69, 0xf9, 0xf9, 0x12, 0x8c, 0x0a, 0x4c, 0x1a, 0xac, 0x41, + 0x20, 0xa6, 0x16, 0x0b, 0x07, 0x97, 0x80, 0x28, 0x20, 0x00, 0x00, 0xff, 0xff, 0xc9, 0xed, 0xbc, + 0xc2, 0x43, 0x00, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/grpc/reflection/grpc_testing/proto2_ext.pb.go b/vendor/google.golang.org/grpc/reflection/grpc_testing/proto2_ext.pb.go new file mode 100644 index 000000000..0120ca972 --- /dev/null +++ b/vendor/google.golang.org/grpc/reflection/grpc_testing/proto2_ext.pb.go @@ -0,0 +1,88 @@ +// Code generated by protoc-gen-go. +// source: proto2_ext.proto +// DO NOT EDIT! + +/* +Package grpc_testing is a generated protocol buffer package. + +It is generated from these files: + proto2_ext.proto + proto2.proto + test.proto + +It has these top-level messages: + Extension + ToBeExtened + SearchResponse + SearchRequest +*/ +package grpc_testing + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type Extension struct { + Baz *int32 `protobuf:"varint,1,opt,name=baz" json:"baz,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Extension) Reset() { *m = Extension{} } +func (m *Extension) String() string { return proto.CompactTextString(m) } +func (*Extension) ProtoMessage() {} +func (*Extension) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *Extension) GetBaz() int32 { + if m != nil && m.Baz != nil { + return *m.Baz + } + return 0 +} + +var E_Bar = &proto.ExtensionDesc{ + ExtendedType: (*ToBeExtened)(nil), + ExtensionType: (*int32)(nil), + Field: 13, + Name: "grpc.testing.bar", + Tag: "varint,13,opt,name=bar", +} + +var E_Baz = &proto.ExtensionDesc{ + ExtendedType: (*ToBeExtened)(nil), + ExtensionType: (*Extension)(nil), + Field: 17, + Name: "grpc.testing.baz", + Tag: "bytes,17,opt,name=baz", +} + +func init() { + proto.RegisterType((*Extension)(nil), "grpc.testing.Extension") + proto.RegisterExtension(E_Bar) + proto.RegisterExtension(E_Baz) +} + +func init() { proto.RegisterFile("proto2_ext.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 130 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0x12, 0x28, 0x28, 0xca, 0x2f, + 0xc9, 0x37, 0x8a, 0x4f, 0xad, 0x28, 0xd1, 0x03, 0x33, 0x85, 0x78, 0xd2, 0x8b, 0x0a, 0x92, 0xf5, + 0x4a, 0x52, 0x8b, 0x4b, 0x32, 0xf3, 0xd2, 0xa5, 0x78, 0x20, 0xf2, 0x10, 0x39, 0x25, 0x59, 0x2e, + 0x4e, 0xd7, 0x8a, 0x92, 0xd4, 0xbc, 0xe2, 0xcc, 0xfc, 0x3c, 0x21, 0x01, 0x2e, 0xe6, 0xa4, 0xc4, + 0x2a, 0x09, 0x46, 0x05, 0x46, 0x0d, 0xd6, 0x20, 0x10, 0xd3, 0x4a, 0x1b, 0x24, 0x52, 0x24, 0x24, + 0xa9, 0x87, 0x6c, 0x84, 0x5e, 0x48, 0xbe, 0x53, 0x2a, 0x58, 0x57, 0x6a, 0x8a, 0x04, 0x2f, 0x4c, + 0x71, 0x91, 0x95, 0x0b, 0x58, 0x3b, 0x3e, 0xc5, 0x82, 0x40, 0xc5, 0xdc, 0x46, 0xe2, 0xa8, 0x0a, + 0xe0, 0xf6, 0x83, 0xad, 0x04, 0x04, 0x00, 0x00, 0xff, 0xff, 0x59, 0xfa, 0x16, 0xbc, 0xc0, 0x00, + 0x00, 0x00, +} diff --git a/vendor/google.golang.org/grpc/reflection/grpc_testing/test.pb.go b/vendor/google.golang.org/grpc/reflection/grpc_testing/test.pb.go new file mode 100644 index 000000000..add7abd09 --- /dev/null +++ b/vendor/google.golang.org/grpc/reflection/grpc_testing/test.pb.go @@ -0,0 +1,220 @@ +// Code generated by protoc-gen-go. +// source: test.proto +// DO NOT EDIT! + +package grpc_testing + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type SearchResponse struct { + Results []*SearchResponse_Result `protobuf:"bytes,1,rep,name=results" json:"results,omitempty"` +} + +func (m *SearchResponse) Reset() { *m = SearchResponse{} } +func (m *SearchResponse) String() string { return proto.CompactTextString(m) } +func (*SearchResponse) ProtoMessage() {} +func (*SearchResponse) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{0} } + +func (m *SearchResponse) GetResults() []*SearchResponse_Result { + if m != nil { + return m.Results + } + return nil +} + +type SearchResponse_Result struct { + Url string `protobuf:"bytes,1,opt,name=url" json:"url,omitempty"` + Title string `protobuf:"bytes,2,opt,name=title" json:"title,omitempty"` + Snippets []string `protobuf:"bytes,3,rep,name=snippets" json:"snippets,omitempty"` +} + +func (m *SearchResponse_Result) Reset() { *m = SearchResponse_Result{} } +func (m *SearchResponse_Result) String() string { return proto.CompactTextString(m) } +func (*SearchResponse_Result) ProtoMessage() {} +func (*SearchResponse_Result) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{0, 0} } + +type SearchRequest struct { + Query string `protobuf:"bytes,1,opt,name=query" json:"query,omitempty"` +} + +func (m *SearchRequest) Reset() { *m = SearchRequest{} } +func (m *SearchRequest) String() string { return proto.CompactTextString(m) } +func (*SearchRequest) ProtoMessage() {} +func (*SearchRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{1} } + +func init() { + proto.RegisterType((*SearchResponse)(nil), "grpc.testing.SearchResponse") + proto.RegisterType((*SearchResponse_Result)(nil), "grpc.testing.SearchResponse.Result") + proto.RegisterType((*SearchRequest)(nil), "grpc.testing.SearchRequest") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion3 + +// Client API for SearchService service + +type SearchServiceClient interface { + Search(ctx context.Context, in *SearchRequest, opts ...grpc.CallOption) (*SearchResponse, error) + StreamingSearch(ctx context.Context, opts ...grpc.CallOption) (SearchService_StreamingSearchClient, error) +} + +type searchServiceClient struct { + cc *grpc.ClientConn +} + +func NewSearchServiceClient(cc *grpc.ClientConn) SearchServiceClient { + return &searchServiceClient{cc} +} + +func (c *searchServiceClient) Search(ctx context.Context, in *SearchRequest, opts ...grpc.CallOption) (*SearchResponse, error) { + out := new(SearchResponse) + err := grpc.Invoke(ctx, "/grpc.testing.SearchService/Search", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *searchServiceClient) StreamingSearch(ctx context.Context, opts ...grpc.CallOption) (SearchService_StreamingSearchClient, error) { + stream, err := grpc.NewClientStream(ctx, &_SearchService_serviceDesc.Streams[0], c.cc, "/grpc.testing.SearchService/StreamingSearch", opts...) + if err != nil { + return nil, err + } + x := &searchServiceStreamingSearchClient{stream} + return x, nil +} + +type SearchService_StreamingSearchClient interface { + Send(*SearchRequest) error + Recv() (*SearchResponse, error) + grpc.ClientStream +} + +type searchServiceStreamingSearchClient struct { + grpc.ClientStream +} + +func (x *searchServiceStreamingSearchClient) Send(m *SearchRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *searchServiceStreamingSearchClient) Recv() (*SearchResponse, error) { + m := new(SearchResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// Server API for SearchService service + +type SearchServiceServer interface { + Search(context.Context, *SearchRequest) (*SearchResponse, error) + StreamingSearch(SearchService_StreamingSearchServer) error +} + +func RegisterSearchServiceServer(s *grpc.Server, srv SearchServiceServer) { + s.RegisterService(&_SearchService_serviceDesc, srv) +} + +func _SearchService_Search_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SearchRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SearchServiceServer).Search(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/grpc.testing.SearchService/Search", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SearchServiceServer).Search(ctx, req.(*SearchRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _SearchService_StreamingSearch_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(SearchServiceServer).StreamingSearch(&searchServiceStreamingSearchServer{stream}) +} + +type SearchService_StreamingSearchServer interface { + Send(*SearchResponse) error + Recv() (*SearchRequest, error) + grpc.ServerStream +} + +type searchServiceStreamingSearchServer struct { + grpc.ServerStream +} + +func (x *searchServiceStreamingSearchServer) Send(m *SearchResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *searchServiceStreamingSearchServer) Recv() (*SearchRequest, error) { + m := new(SearchRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +var _SearchService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "grpc.testing.SearchService", + HandlerType: (*SearchServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Search", + Handler: _SearchService_Search_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "StreamingSearch", + Handler: _SearchService_StreamingSearch_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: fileDescriptor2, +} + +func init() { proto.RegisterFile("test.proto", fileDescriptor2) } + +var fileDescriptor2 = []byte{ + // 227 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x2a, 0x49, 0x2d, 0x2e, + 0xd1, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x49, 0x2f, 0x2a, 0x48, 0xd6, 0x03, 0x09, 0x64, + 0xe6, 0xa5, 0x2b, 0xcd, 0x65, 0xe4, 0xe2, 0x0b, 0x4e, 0x4d, 0x2c, 0x4a, 0xce, 0x08, 0x4a, 0x2d, + 0x2e, 0xc8, 0xcf, 0x2b, 0x4e, 0x15, 0xb2, 0xe5, 0x62, 0x2f, 0x4a, 0x2d, 0x2e, 0xcd, 0x29, 0x29, + 0x96, 0x60, 0x54, 0x60, 0xd6, 0xe0, 0x36, 0x52, 0xd6, 0x43, 0xd6, 0xa2, 0x87, 0xaa, 0x5c, 0x2f, + 0x08, 0xac, 0x36, 0x08, 0xa6, 0x47, 0xca, 0x87, 0x8b, 0x0d, 0x22, 0x24, 0x24, 0xc0, 0xc5, 0x5c, + 0x5a, 0x94, 0x03, 0x34, 0x84, 0x51, 0x83, 0x33, 0x08, 0xc4, 0x14, 0x12, 0xe1, 0x62, 0x2d, 0xc9, + 0x2c, 0xc9, 0x49, 0x95, 0x60, 0x02, 0x8b, 0x41, 0x38, 0x42, 0x52, 0x5c, 0x1c, 0xc5, 0x79, 0x99, + 0x05, 0x05, 0xa9, 0x40, 0x1b, 0x99, 0x81, 0x36, 0x72, 0x06, 0xc1, 0xf9, 0x4a, 0xaa, 0x5c, 0xbc, + 0x30, 0xfb, 0x0a, 0x4b, 0x81, 0x0e, 0x00, 0x19, 0x01, 0x64, 0x14, 0x55, 0x42, 0x8d, 0x85, 0x70, + 0x8c, 0x96, 0x31, 0xc2, 0xd4, 0x05, 0xa7, 0x16, 0x95, 0x65, 0x26, 0xa7, 0x0a, 0x39, 0x73, 0xb1, + 0x41, 0x04, 0x84, 0xa4, 0xb1, 0x3b, 0x1f, 0x6c, 0x9c, 0x94, 0x0c, 0x3e, 0xbf, 0x09, 0x05, 0x70, + 0xf1, 0x07, 0x97, 0x14, 0xa5, 0x26, 0xe6, 0x02, 0xe5, 0x28, 0x36, 0x4d, 0x83, 0xd1, 0x80, 0x31, + 0x89, 0x0d, 0x1c, 0x09, 0xc6, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0x20, 0xd6, 0x09, 0xb8, 0x92, + 0x01, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/grpc/reflection/serverreflection.go b/vendor/google.golang.org/grpc/reflection/serverreflection.go new file mode 100644 index 000000000..686090aa5 --- /dev/null +++ b/vendor/google.golang.org/grpc/reflection/serverreflection.go @@ -0,0 +1,395 @@ +/* + * + * Copyright 2016, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +/* +Package reflection implements server reflection service. + +The service implemented is defined in: +https://github.com/grpc/grpc/blob/master/src/proto/grpc/reflection/v1alpha/reflection.proto. + +To register server reflection on a gRPC server: + import "google.golang.org/grpc/reflection" + + s := grpc.NewServer() + pb.RegisterYourOwnServer(s, &server{}) + + // Register reflection service on gRPC server. + reflection.Register(s) + + s.Serve(lis) + +*/ +package reflection // import "google.golang.org/grpc/reflection" + +import ( + "bytes" + "compress/gzip" + "fmt" + "io" + "io/ioutil" + "reflect" + "strings" + + "github.com/golang/protobuf/proto" + dpb "github.com/golang/protobuf/protoc-gen-go/descriptor" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + rpb "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" +) + +type serverReflectionServer struct { + s *grpc.Server + // TODO add more cache if necessary + serviceInfo map[string]grpc.ServiceInfo // cache for s.GetServiceInfo() +} + +// Register registers the server reflection service on the given gRPC server. +func Register(s *grpc.Server) { + rpb.RegisterServerReflectionServer(s, &serverReflectionServer{ + s: s, + }) +} + +// protoMessage is used for type assertion on proto messages. +// Generated proto message implements function Descriptor(), but Descriptor() +// is not part of interface proto.Message. This interface is needed to +// call Descriptor(). +type protoMessage interface { + Descriptor() ([]byte, []int) +} + +// fileDescForType gets the file descriptor for the given type. +// The given type should be a proto message. +func (s *serverReflectionServer) fileDescForType(st reflect.Type) (*dpb.FileDescriptorProto, error) { + m, ok := reflect.Zero(reflect.PtrTo(st)).Interface().(protoMessage) + if !ok { + return nil, fmt.Errorf("failed to create message from type: %v", st) + } + enc, _ := m.Descriptor() + + return s.decodeFileDesc(enc) +} + +// decodeFileDesc does decompression and unmarshalling on the given +// file descriptor byte slice. +func (s *serverReflectionServer) decodeFileDesc(enc []byte) (*dpb.FileDescriptorProto, error) { + raw, err := decompress(enc) + if err != nil { + return nil, fmt.Errorf("failed to decompress enc: %v", err) + } + + fd := new(dpb.FileDescriptorProto) + if err := proto.Unmarshal(raw, fd); err != nil { + return nil, fmt.Errorf("bad descriptor: %v", err) + } + return fd, nil +} + +// decompress does gzip decompression. +func decompress(b []byte) ([]byte, error) { + r, err := gzip.NewReader(bytes.NewReader(b)) + if err != nil { + return nil, fmt.Errorf("bad gzipped descriptor: %v\n", err) + } + out, err := ioutil.ReadAll(r) + if err != nil { + return nil, fmt.Errorf("bad gzipped descriptor: %v\n", err) + } + return out, nil +} + +func (s *serverReflectionServer) typeForName(name string) (reflect.Type, error) { + pt := proto.MessageType(name) + if pt == nil { + return nil, fmt.Errorf("unknown type: %q", name) + } + st := pt.Elem() + + return st, nil +} + +func (s *serverReflectionServer) fileDescContainingExtension(st reflect.Type, ext int32) (*dpb.FileDescriptorProto, error) { + m, ok := reflect.Zero(reflect.PtrTo(st)).Interface().(proto.Message) + if !ok { + return nil, fmt.Errorf("failed to create message from type: %v", st) + } + + var extDesc *proto.ExtensionDesc + for id, desc := range proto.RegisteredExtensions(m) { + if id == ext { + extDesc = desc + break + } + } + + if extDesc == nil { + return nil, fmt.Errorf("failed to find registered extension for extension number %v", ext) + } + + extT := reflect.TypeOf(extDesc.ExtensionType).Elem() + + return s.fileDescForType(extT) +} + +func (s *serverReflectionServer) allExtensionNumbersForType(st reflect.Type) ([]int32, error) { + m, ok := reflect.Zero(reflect.PtrTo(st)).Interface().(proto.Message) + if !ok { + return nil, fmt.Errorf("failed to create message from type: %v", st) + } + + exts := proto.RegisteredExtensions(m) + out := make([]int32, 0, len(exts)) + for id := range exts { + out = append(out, id) + } + return out, nil +} + +// fileDescEncodingByFilename finds the file descriptor for given filename, +// does marshalling on it and returns the marshalled result. +func (s *serverReflectionServer) fileDescEncodingByFilename(name string) ([]byte, error) { + enc := proto.FileDescriptor(name) + if enc == nil { + return nil, fmt.Errorf("unknown file: %v", name) + } + fd, err := s.decodeFileDesc(enc) + if err != nil { + return nil, err + } + return proto.Marshal(fd) +} + +// serviceMetadataForSymbol finds the metadata for name in s.serviceInfo. +// name should be a service name or a method name. +func (s *serverReflectionServer) serviceMetadataForSymbol(name string) (interface{}, error) { + if s.serviceInfo == nil { + s.serviceInfo = s.s.GetServiceInfo() + } + + // Check if it's a service name. + if info, ok := s.serviceInfo[name]; ok { + return info.Metadata, nil + } + + // Check if it's a method name. + pos := strings.LastIndex(name, ".") + // Not a valid method name. + if pos == -1 { + return nil, fmt.Errorf("unknown symbol: %v", name) + } + + info, ok := s.serviceInfo[name[:pos]] + // Substring before last "." is not a service name. + if !ok { + return nil, fmt.Errorf("unknown symbol: %v", name) + } + + // Search the method name in info.Methods. + var found bool + for _, m := range info.Methods { + if m.Name == name[pos+1:] { + found = true + break + } + } + if found { + return info.Metadata, nil + } + + return nil, fmt.Errorf("unknown symbol: %v", name) +} + +// fileDescEncodingContainingSymbol finds the file descriptor containing the given symbol, +// does marshalling on it and returns the marshalled result. +// The given symbol can be a type, a service or a method. +func (s *serverReflectionServer) fileDescEncodingContainingSymbol(name string) ([]byte, error) { + var ( + fd *dpb.FileDescriptorProto + ) + // Check if it's a type name. + if st, err := s.typeForName(name); err == nil { + fd, err = s.fileDescForType(st) + if err != nil { + return nil, err + } + } else { // Check if it's a service name or a method name. + meta, err := s.serviceMetadataForSymbol(name) + + // Metadata not found. + if err != nil { + return nil, err + } + + // Metadata not valid. + enc, ok := meta.([]byte) + if !ok { + return nil, fmt.Errorf("invalid file descriptor for symbol: %v", name) + } + + fd, err = s.decodeFileDesc(enc) + if err != nil { + return nil, err + } + } + + return proto.Marshal(fd) +} + +// fileDescEncodingContainingExtension finds the file descriptor containing given extension, +// does marshalling on it and returns the marshalled result. +func (s *serverReflectionServer) fileDescEncodingContainingExtension(typeName string, extNum int32) ([]byte, error) { + st, err := s.typeForName(typeName) + if err != nil { + return nil, err + } + fd, err := s.fileDescContainingExtension(st, extNum) + if err != nil { + return nil, err + } + return proto.Marshal(fd) +} + +// allExtensionNumbersForTypeName returns all extension numbers for the given type. +func (s *serverReflectionServer) allExtensionNumbersForTypeName(name string) ([]int32, error) { + st, err := s.typeForName(name) + if err != nil { + return nil, err + } + extNums, err := s.allExtensionNumbersForType(st) + if err != nil { + return nil, err + } + return extNums, nil +} + +// ServerReflectionInfo is the reflection service handler. +func (s *serverReflectionServer) ServerReflectionInfo(stream rpb.ServerReflection_ServerReflectionInfoServer) error { + for { + in, err := stream.Recv() + if err == io.EOF { + return nil + } + if err != nil { + return err + } + + out := &rpb.ServerReflectionResponse{ + ValidHost: in.Host, + OriginalRequest: in, + } + switch req := in.MessageRequest.(type) { + case *rpb.ServerReflectionRequest_FileByFilename: + b, err := s.fileDescEncodingByFilename(req.FileByFilename) + if err != nil { + out.MessageResponse = &rpb.ServerReflectionResponse_ErrorResponse{ + ErrorResponse: &rpb.ErrorResponse{ + ErrorCode: int32(codes.NotFound), + ErrorMessage: err.Error(), + }, + } + } else { + out.MessageResponse = &rpb.ServerReflectionResponse_FileDescriptorResponse{ + FileDescriptorResponse: &rpb.FileDescriptorResponse{FileDescriptorProto: [][]byte{b}}, + } + } + case *rpb.ServerReflectionRequest_FileContainingSymbol: + b, err := s.fileDescEncodingContainingSymbol(req.FileContainingSymbol) + if err != nil { + out.MessageResponse = &rpb.ServerReflectionResponse_ErrorResponse{ + ErrorResponse: &rpb.ErrorResponse{ + ErrorCode: int32(codes.NotFound), + ErrorMessage: err.Error(), + }, + } + } else { + out.MessageResponse = &rpb.ServerReflectionResponse_FileDescriptorResponse{ + FileDescriptorResponse: &rpb.FileDescriptorResponse{FileDescriptorProto: [][]byte{b}}, + } + } + case *rpb.ServerReflectionRequest_FileContainingExtension: + typeName := req.FileContainingExtension.ContainingType + extNum := req.FileContainingExtension.ExtensionNumber + b, err := s.fileDescEncodingContainingExtension(typeName, extNum) + if err != nil { + out.MessageResponse = &rpb.ServerReflectionResponse_ErrorResponse{ + ErrorResponse: &rpb.ErrorResponse{ + ErrorCode: int32(codes.NotFound), + ErrorMessage: err.Error(), + }, + } + } else { + out.MessageResponse = &rpb.ServerReflectionResponse_FileDescriptorResponse{ + FileDescriptorResponse: &rpb.FileDescriptorResponse{FileDescriptorProto: [][]byte{b}}, + } + } + case *rpb.ServerReflectionRequest_AllExtensionNumbersOfType: + extNums, err := s.allExtensionNumbersForTypeName(req.AllExtensionNumbersOfType) + if err != nil { + out.MessageResponse = &rpb.ServerReflectionResponse_ErrorResponse{ + ErrorResponse: &rpb.ErrorResponse{ + ErrorCode: int32(codes.NotFound), + ErrorMessage: err.Error(), + }, + } + } else { + out.MessageResponse = &rpb.ServerReflectionResponse_AllExtensionNumbersResponse{ + AllExtensionNumbersResponse: &rpb.ExtensionNumberResponse{ + BaseTypeName: req.AllExtensionNumbersOfType, + ExtensionNumber: extNums, + }, + } + } + case *rpb.ServerReflectionRequest_ListServices: + if s.serviceInfo == nil { + s.serviceInfo = s.s.GetServiceInfo() + } + serviceResponses := make([]*rpb.ServiceResponse, 0, len(s.serviceInfo)) + for n := range s.serviceInfo { + serviceResponses = append(serviceResponses, &rpb.ServiceResponse{ + Name: n, + }) + } + out.MessageResponse = &rpb.ServerReflectionResponse_ListServicesResponse{ + ListServicesResponse: &rpb.ListServiceResponse{ + Service: serviceResponses, + }, + } + default: + return grpc.Errorf(codes.InvalidArgument, "invalid MessageRequest: %v", in.MessageRequest) + } + + if err := stream.Send(out); err != nil { + return err + } + } +} diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go index fadf3394d..35ac9cc7b 100644 --- a/vendor/google.golang.org/grpc/rpc_util.go +++ b/vendor/google.golang.org/grpc/rpc_util.go @@ -41,9 +41,7 @@ import ( "io" "io/ioutil" "math" - "math/rand" "os" - "time" "github.com/golang/protobuf/proto" "golang.org/x/net/context" @@ -63,7 +61,7 @@ type Codec interface { String() string } -// protoCodec is a Codec implemetation with protobuf. It is the default codec for gRPC. +// protoCodec is a Codec implementation with protobuf. It is the default codec for gRPC. type protoCodec struct{} func (protoCodec) Marshal(v interface{}) ([]byte, error) { @@ -143,6 +141,8 @@ type callInfo struct { traceInfo traceInfo // in trace.go } +var defaultCallInfo = callInfo{failFast: true} + // CallOption configures a Call before it starts or extracts information from // a Call after it completes. type CallOption interface { @@ -181,6 +181,19 @@ func Trailer(md *metadata.MD) CallOption { }) } +// FailFast configures the action to take when an RPC is attempted on broken +// connections or unreachable servers. If failfast is true, the RPC will fail +// immediately. Otherwise, the RPC client will block the call until a +// connection is available (or the call is canceled or times out) and will retry +// the call if it fails due to a transient error. Please refer to +// https://github.com/grpc/grpc/blob/master/doc/fail_fast.md +func FailFast(failFast bool) CallOption { + return beforeCall(func(c *callInfo) error { + c.failFast = failFast + return nil + }) +} + // The format of the payload: compressed or not? type payloadFormat uint8 @@ -189,32 +202,49 @@ const ( compressionMade ) -// parser reads complelete gRPC messages from the underlying reader. +// parser reads complete gRPC messages from the underlying reader. type parser struct { - s io.Reader -} + // r is the underlying reader. + // See the comment on recvMsg for the permissible + // error types. + r io.Reader -// recvMsg is to read a complete gRPC message from the stream. It is blocking if -// the message has not been complete yet. It returns the message and its type, -// EOF is returned with nil msg and 0 pf if the entire stream is done. Other -// non-nil error is returned if something is wrong on reading. -func (p *parser) recvMsg() (pf payloadFormat, msg []byte, err error) { // The header of a gRPC message. Find more detail // at http://www.grpc.io/docs/guides/wire.html. - var buf [5]byte + header [5]byte +} - if _, err := io.ReadFull(p.s, buf[:]); err != nil { +// recvMsg reads a complete gRPC message from the stream. +// +// It returns the message and its payload (compression/encoding) +// format. The caller owns the returned msg memory. +// +// If there is an error, possible values are: +// * io.EOF, when no messages remain +// * io.ErrUnexpectedEOF +// * of type transport.ConnectionError +// * of type transport.StreamError +// No other error values or types must be returned, which also means +// that the underlying io.Reader must not return an incompatible +// error. +func (p *parser) recvMsg(maxMsgSize int) (pf payloadFormat, msg []byte, err error) { + if _, err := io.ReadFull(p.r, p.header[:]); err != nil { return 0, nil, err } - pf = payloadFormat(buf[0]) - length := binary.BigEndian.Uint32(buf[1:]) + pf = payloadFormat(p.header[0]) + length := binary.BigEndian.Uint32(p.header[1:]) if length == 0 { return pf, nil, nil } + if length > uint32(maxMsgSize) { + return 0, nil, Errorf(codes.Internal, "grpc: received message length %d exceeding the max size %d", length, maxMsgSize) + } + // TODO(bradfitz,zhaoq): garbage. reuse buffer after proto decoding instead + // of making it for each message: msg = make([]byte, int(length)) - if _, err := io.ReadFull(p.s, msg); err != nil { + if _, err := io.ReadFull(p.r, msg); err != nil { if err == io.EOF { err = io.ErrUnexpectedEOF } @@ -272,20 +302,17 @@ func checkRecvPayload(pf payloadFormat, recvCompress string, dc Decompressor) er switch pf { case compressionNone: case compressionMade: - if recvCompress == "" { - return transport.StreamErrorf(codes.InvalidArgument, "grpc: invalid grpc-encoding %q with compression enabled", recvCompress) - } if dc == nil || recvCompress != dc.Type() { - return transport.StreamErrorf(codes.InvalidArgument, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress) + return transport.StreamErrorf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress) } default: - return transport.StreamErrorf(codes.InvalidArgument, "grpc: received unexpected payload format %d", pf) + return transport.StreamErrorf(codes.Internal, "grpc: received unexpected payload format %d", pf) } return nil } -func recv(p *parser, c Codec, s *transport.Stream, dc Decompressor, m interface{}) error { - pf, d, err := p.recvMsg() +func recv(p *parser, c Codec, s *transport.Stream, dc Decompressor, m interface{}, maxMsgSize int) error { + pf, d, err := p.recvMsg(maxMsgSize) if err != nil { return err } @@ -295,11 +322,16 @@ func recv(p *parser, c Codec, s *transport.Stream, dc Decompressor, m interface{ if pf == compressionMade { d, err = dc.Do(bytes.NewReader(d)) if err != nil { - return transport.StreamErrorf(codes.Internal, "grpc: failed to decompress the received message %v", err) + return Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err) } } + if len(d) > maxMsgSize { + // TODO: Revisit the error code. Currently keep it consistent with java + // implementation. + return Errorf(codes.Internal, "grpc: received a message of %d bytes exceeding %d limit", len(d), maxMsgSize) + } if err := c.Unmarshal(d, m); err != nil { - return transport.StreamErrorf(codes.Internal, "grpc: failed to unmarshal the received message %v", err) + return Errorf(codes.Internal, "grpc: failed to unmarshal the received message %v", err) } return nil } @@ -310,8 +342,8 @@ type rpcError struct { desc string } -func (e rpcError) Error() string { - return fmt.Sprintf("rpc error: code = %d desc = %q", e.code, e.desc) +func (e *rpcError) Error() string { + return fmt.Sprintf("rpc error: code = %d desc = %s", e.code, e.desc) } // Code returns the error code for err if it was produced by the rpc system. @@ -320,7 +352,7 @@ func Code(err error) codes.Code { if err == nil { return codes.OK } - if e, ok := err.(rpcError); ok { + if e, ok := err.(*rpcError); ok { return e.code } return codes.Unknown @@ -332,7 +364,7 @@ func ErrorDesc(err error) string { if err == nil { return "" } - if e, ok := err.(rpcError); ok { + if e, ok := err.(*rpcError); ok { return e.desc } return err.Error() @@ -344,7 +376,7 @@ func Errorf(c codes.Code, format string, a ...interface{}) error { if c == codes.OK { return nil } - return rpcError{ + return &rpcError{ code: c, desc: fmt.Sprintf(format, a...), } @@ -353,18 +385,37 @@ func Errorf(c codes.Code, format string, a ...interface{}) error { // toRPCErr converts an error into a rpcError. func toRPCErr(err error) error { switch e := err.(type) { - case rpcError: + case *rpcError: return err case transport.StreamError: - return rpcError{ + return &rpcError{ code: e.Code, desc: e.Desc, } case transport.ConnectionError: - return rpcError{ + return &rpcError{ code: codes.Internal, desc: e.Desc, } + default: + switch err { + case context.DeadlineExceeded: + return &rpcError{ + code: codes.DeadlineExceeded, + desc: err.Error(), + } + case context.Canceled: + return &rpcError{ + code: codes.Canceled, + desc: err.Error(), + } + case ErrClientConnClosing: + return &rpcError{ + code: codes.FailedPrecondition, + desc: err.Error(), + } + } + } return Errorf(codes.Unknown, "%v", err) } @@ -397,34 +448,10 @@ func convertCode(err error) codes.Code { return codes.Unknown } -const ( - // how long to wait after the first failure before retrying - baseDelay = 1.0 * time.Second - // upper bound of backoff delay - maxDelay = 120 * time.Second - // backoff increases by this factor on each retry - backoffFactor = 1.6 - // backoff is randomized downwards by this factor - backoffJitter = 0.2 -) - -func backoff(retries int) (t time.Duration) { - if retries == 0 { - return baseDelay - } - backoff, max := float64(baseDelay), float64(maxDelay) - for backoff < max && retries > 0 { - backoff *= backoffFactor - retries-- - } - if backoff > max { - backoff = max - } - // Randomize backoff delays so that if a cluster of requests start at - // the same time, they won't operate in lockstep. - backoff *= 1 + backoffJitter*(rand.Float64()*2-1) - if backoff < 0 { - return 0 - } - return time.Duration(backoff) -} +// SupportPackageIsVersion3 is referenced from generated protocol buffer files +// to assert that that code is compatible with this version of the grpc package. +// +// This constant may be renamed in the future if a change in the generated code +// requires a synchronised update of grpc-go and protoc-gen-go. This constant +// should not be referenced from any other code. +const SupportPackageIsVersion3 = true diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go index eb56b3443..1ed8aac9e 100644 --- a/vendor/google.golang.org/grpc/server.go +++ b/vendor/google.golang.org/grpc/server.go @@ -52,11 +52,12 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal" "google.golang.org/grpc/metadata" "google.golang.org/grpc/transport" ) -type methodHandler func(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) +type methodHandler func(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor UnaryServerInterceptor) (interface{}, error) // MethodDesc represents an RPC service's method specification. type MethodDesc struct { @@ -72,6 +73,7 @@ type ServiceDesc struct { HandlerType interface{} Methods []MethodDesc Streams []StreamDesc + Metadata interface{} } // service consists of the information of the server serving this service and @@ -80,28 +82,38 @@ type service struct { server interface{} // the server for service methods md map[string]*MethodDesc sd map[string]*StreamDesc + mdata interface{} } // Server is a gRPC server to serve RPC requests. type Server struct { opts options - mu sync.Mutex // guards following - lis map[net.Listener]bool - conns map[io.Closer]bool + mu sync.Mutex // guards following + lis map[net.Listener]bool + conns map[io.Closer]bool + drain bool + // A CondVar to let GracefulStop() blocks until all the pending RPCs are finished + // and all the transport goes away. + cv *sync.Cond m map[string]*service // service name -> service info events trace.EventLog } type options struct { - creds credentials.Credentials + creds credentials.TransportCredentials codec Codec cp Compressor dc Decompressor + maxMsgSize int + unaryInt UnaryServerInterceptor + streamInt StreamServerInterceptor maxConcurrentStreams uint32 useHandlerImpl bool // use http.Handler-based server } +var defaultMaxMsgSize = 1024 * 1024 * 4 // use 4MB as the default message size limit + // A ServerOption sets options. type ServerOption func(*options) @@ -112,18 +124,28 @@ func CustomCodec(codec Codec) ServerOption { } } +// RPCCompressor returns a ServerOption that sets a compressor for outbound messages. func RPCCompressor(cp Compressor) ServerOption { return func(o *options) { o.cp = cp } } +// RPCDecompressor returns a ServerOption that sets a decompressor for inbound messages. func RPCDecompressor(dc Decompressor) ServerOption { return func(o *options) { o.dc = dc } } +// MaxMsgSize returns a ServerOption to set the max message size in bytes for inbound mesages. +// If this is not set, gRPC uses the default 4MB. +func MaxMsgSize(m int) ServerOption { + return func(o *options) { + o.maxMsgSize = m + } +} + // MaxConcurrentStreams returns a ServerOption that will apply a limit on the number // of concurrent streams to each ServerTransport. func MaxConcurrentStreams(n uint32) ServerOption { @@ -133,16 +155,40 @@ func MaxConcurrentStreams(n uint32) ServerOption { } // Creds returns a ServerOption that sets credentials for server connections. -func Creds(c credentials.Credentials) ServerOption { +func Creds(c credentials.TransportCredentials) ServerOption { return func(o *options) { o.creds = c } } +// UnaryInterceptor returns a ServerOption that sets the UnaryServerInterceptor for the +// server. Only one unary interceptor can be installed. The construction of multiple +// interceptors (e.g., chaining) can be implemented at the caller. +func UnaryInterceptor(i UnaryServerInterceptor) ServerOption { + return func(o *options) { + if o.unaryInt != nil { + panic("The unary server interceptor has been set.") + } + o.unaryInt = i + } +} + +// StreamInterceptor returns a ServerOption that sets the StreamServerInterceptor for the +// server. Only one stream interceptor can be installed. +func StreamInterceptor(i StreamServerInterceptor) ServerOption { + return func(o *options) { + if o.streamInt != nil { + panic("The stream server interceptor has been set.") + } + o.streamInt = i + } +} + // NewServer creates a gRPC server which has no service registered and has not // started to accept requests yet. func NewServer(opt ...ServerOption) *Server { var opts options + opts.maxMsgSize = defaultMaxMsgSize for _, o := range opt { o(&opts) } @@ -156,6 +202,7 @@ func NewServer(opt ...ServerOption) *Server { conns: make(map[io.Closer]bool), m: make(map[string]*service), } + s.cv = sync.NewCond(&s.mu) if EnableTracing { _, file, line, _ := runtime.Caller(1) s.events = trace.NewEventLog("grpc.Server", fmt.Sprintf("%s:%d", file, line)) @@ -202,6 +249,7 @@ func (s *Server) register(sd *ServiceDesc, ss interface{}) { server: ss, md: make(map[string]*MethodDesc), sd: make(map[string]*StreamDesc), + mdata: sd.Metadata, } for i := range sd.Methods { d := &sd.Methods[i] @@ -214,6 +262,52 @@ func (s *Server) register(sd *ServiceDesc, ss interface{}) { s.m[sd.ServiceName] = srv } +// MethodInfo contains the information of an RPC including its method name and type. +type MethodInfo struct { + // Name is the method name only, without the service name or package name. + Name string + // IsClientStream indicates whether the RPC is a client streaming RPC. + IsClientStream bool + // IsServerStream indicates whether the RPC is a server streaming RPC. + IsServerStream bool +} + +// ServiceInfo contains unary RPC method info, streaming RPC methid info and metadata for a service. +type ServiceInfo struct { + Methods []MethodInfo + // Metadata is the metadata specified in ServiceDesc when registering service. + Metadata interface{} +} + +// GetServiceInfo returns a map from service names to ServiceInfo. +// Service names include the package names, in the form of .. +func (s *Server) GetServiceInfo() map[string]ServiceInfo { + ret := make(map[string]ServiceInfo) + for n, srv := range s.m { + methods := make([]MethodInfo, 0, len(srv.md)+len(srv.sd)) + for m := range srv.md { + methods = append(methods, MethodInfo{ + Name: m, + IsClientStream: false, + IsServerStream: false, + }) + } + for m, d := range srv.sd { + methods = append(methods, MethodInfo{ + Name: m, + IsClientStream: d.ClientStreams, + IsServerStream: d.ServerStreams, + }) + } + + ret[n] = ServiceInfo{ + Methods: methods, + Metadata: srv.mdata, + } + } + return ret +} + var ( // ErrServerStopped indicates that the operation is now illegal because of // the server being stopped. @@ -221,33 +315,35 @@ var ( ) func (s *Server) useTransportAuthenticator(rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { - creds, ok := s.opts.creds.(credentials.TransportAuthenticator) - if !ok { + if s.opts.creds == nil { return rawConn, nil, nil } - return creds.ServerHandshake(rawConn) + return s.opts.creds.ServerHandshake(rawConn) } // Serve accepts incoming connections on the listener lis, creating a new // ServerTransport and service goroutine for each. The service goroutines // read gRPC requests and then call the registered handlers to reply to them. -// Service returns when lis.Accept fails. +// Service returns when lis.Accept fails. lis will be closed when +// this method returns. func (s *Server) Serve(lis net.Listener) error { s.mu.Lock() s.printf("serving") if s.lis == nil { s.mu.Unlock() + lis.Close() return ErrServerStopped } s.lis[lis] = true s.mu.Unlock() defer func() { - lis.Close() s.mu.Lock() - delete(s.lis, lis) + if s.lis != nil && s.lis[lis] { + lis.Close() + delete(s.lis, lis) + } s.mu.Unlock() }() - listenerAddr := lis.Addr() for { rawConn, err := lis.Accept() if err != nil { @@ -258,19 +354,19 @@ func (s *Server) Serve(lis net.Listener) error { } // Start a new goroutine to deal with rawConn // so we don't stall this Accept loop goroutine. - go s.handleRawConn(listenerAddr, rawConn) + go s.handleRawConn(rawConn) } } // handleRawConn is run in its own goroutine and handles a just-accepted // connection that has not had any I/O performed on it yet. -func (s *Server) handleRawConn(listenerAddr net.Addr, rawConn net.Conn) { +func (s *Server) handleRawConn(rawConn net.Conn) { conn, authInfo, err := s.useTransportAuthenticator(rawConn) if err != nil { s.mu.Lock() s.errorf("ServerHandshake(%q) failed: %v", rawConn.RemoteAddr(), err) s.mu.Unlock() - grpclog.Println("grpc: Server.Serve failed to complete security handshake.") + grpclog.Printf("grpc: Server.Serve failed to complete security handshake from %q: %v", rawConn.RemoteAddr(), err) rawConn.Close() return } @@ -284,7 +380,7 @@ func (s *Server) handleRawConn(listenerAddr net.Addr, rawConn net.Conn) { s.mu.Unlock() if s.opts.useHandlerImpl { - s.serveUsingHandler(listenerAddr, conn) + s.serveUsingHandler(conn) } else { s.serveNewHTTP2Transport(conn, authInfo) } @@ -340,29 +436,18 @@ var _ http.Handler = (*Server)(nil) // method as one of the environment types. // // conn is the *tls.Conn that's already been authenticated. -func (s *Server) serveUsingHandler(listenerAddr net.Addr, conn net.Conn) { +func (s *Server) serveUsingHandler(conn net.Conn) { if !s.addConn(conn) { conn.Close() return } defer s.removeConn(conn) - connDone := make(chan struct{}) - hs := &http.Server{ - Handler: s, - ConnState: func(c net.Conn, cs http.ConnState) { - if cs == http.StateClosed { - close(connDone) - } - }, - } - if err := http2.ConfigureServer(hs, &http2.Server{ + h2s := &http2.Server{ MaxConcurrentStreams: s.opts.maxConcurrentStreams, - }); err != nil { - grpclog.Fatalf("grpc: http2.ConfigureServer: %v", err) - return } - hs.Serve(&singleConnListener{addr: listenerAddr, conn: conn}) - <-connDone + h2s.ServeConn(conn, &http2.ServeConnOpts{ + Handler: s, + }) } func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { @@ -400,7 +485,7 @@ func (s *Server) traceInfo(st transport.ServerTransport, stream *transport.Strea func (s *Server) addConn(c io.Closer) bool { s.mu.Lock() defer s.mu.Unlock() - if s.conns == nil { + if s.conns == nil || s.drain { return false } s.conns[c] = true @@ -412,6 +497,7 @@ func (s *Server) removeConn(c io.Closer) { defer s.mu.Unlock() if s.conns != nil { delete(s.conns, c) + s.cv.Signal() } } @@ -446,15 +532,26 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. } }() } - p := &parser{s: stream} + if s.opts.cp != nil { + // NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686. + stream.SetSendCompress(s.opts.cp.Type()) + } + p := &parser{r: stream} for { - pf, req, err := p.recvMsg() + pf, req, err := p.recvMsg(s.opts.maxMsgSize) if err == io.EOF { // The entire stream is done (for unary RPC only). return err } + if err == io.ErrUnexpectedEOF { + err = transport.StreamError{Code: codes.Internal, Desc: "io.ErrUnexpectedEOF"} + } if err != nil { switch err := err.(type) { + case *rpcError: + if err := t.WriteStatus(stream, err.code, err.desc); err != nil { + grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", err) + } case transport.ConnectionError: // Nothing to do here. case transport.StreamError: @@ -494,6 +591,12 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. return err } } + if len(req) > s.opts.maxMsgSize { + // TODO: Revisit the error code. Currently keep it consistent with + // java implementation. + statusCode = codes.Internal + statusDesc = fmt.Sprintf("grpc: server received a message of %d bytes exceeding %d limit", len(req), s.opts.maxMsgSize) + } if err := s.opts.codec.Unmarshal(req, v); err != nil { return err } @@ -502,9 +605,9 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. } return nil } - reply, appErr := md.Handler(srv.server, stream.Context(), df) + reply, appErr := md.Handler(srv.server, stream.Context(), df, s.opts.unaryInt) if appErr != nil { - if err, ok := appErr.(rpcError); ok { + if err, ok := appErr.(*rpcError); ok { statusCode = err.code statusDesc = err.desc } else { @@ -528,9 +631,6 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. Last: true, Delay: false, } - if s.opts.cp != nil { - stream.SetSendCompress(s.opts.cp.Type()) - } if err := s.sendResponse(t, stream, reply, s.opts.cp, opts); err != nil { switch err := err.(type) { case transport.ConnectionError: @@ -556,13 +656,14 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp stream.SetSendCompress(s.opts.cp.Type()) } ss := &serverStream{ - t: t, - s: stream, - p: &parser{s: stream}, - codec: s.opts.codec, - cp: s.opts.cp, - dc: s.opts.dc, - trInfo: trInfo, + t: t, + s: stream, + p: &parser{r: stream}, + codec: s.opts.codec, + cp: s.opts.cp, + dc: s.opts.dc, + maxMsgSize: s.opts.maxMsgSize, + trInfo: trInfo, } if ss.cp != nil { ss.cbuf = new(bytes.Buffer) @@ -580,8 +681,19 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp ss.mu.Unlock() }() } - if appErr := sd.Handler(srv.server, ss); appErr != nil { - if err, ok := appErr.(rpcError); ok { + var appErr error + if s.opts.streamInt == nil { + appErr = sd.Handler(srv.server, ss) + } else { + info := &StreamServerInfo{ + FullMethod: stream.Method(), + IsClientStream: sd.ClientStreams, + IsServerStream: sd.ServerStreams, + } + appErr = s.opts.streamInt(srv.server, ss, info, sd.Handler) + } + if appErr != nil { + if err, ok := appErr.(*rpcError); ok { ss.statusCode = err.code ss.statusDesc = err.desc } else if err, ok := appErr.(transport.StreamError); ok { @@ -683,14 +795,16 @@ func (s *Server) Stop() { s.mu.Lock() listeners := s.lis s.lis = nil - cs := s.conns + st := s.conns s.conns = nil + // interrupt GracefulStop if Stop and GracefulStop are called concurrently. + s.cv.Signal() s.mu.Unlock() for lis := range listeners { lis.Close() } - for c := range cs { + for c := range st { c.Close() } @@ -702,10 +816,44 @@ func (s *Server) Stop() { s.mu.Unlock() } -// TestingCloseConns closes all exiting transports but keeps s.lis accepting new -// connections. -// This is only for tests and is subject to removal. -func (s *Server) TestingCloseConns() { +// GracefulStop stops the gRPC server gracefully. It stops the server to accept new +// connections and RPCs and blocks until all the pending RPCs are finished. +func (s *Server) GracefulStop() { + s.mu.Lock() + defer s.mu.Unlock() + if s.drain == true || s.conns == nil { + return + } + s.drain = true + for lis := range s.lis { + lis.Close() + } + s.lis = nil + for c := range s.conns { + c.(transport.ServerTransport).Drain() + } + for len(s.conns) != 0 { + s.cv.Wait() + } + s.conns = nil + if s.events != nil { + s.events.Finish() + s.events = nil + } +} + +func init() { + internal.TestingCloseConns = func(arg interface{}) { + arg.(*Server).testingCloseConns() + } + internal.TestingUseHandlerImpl = func(arg interface{}) { + arg.(*Server).opts.useHandlerImpl = true + } +} + +// testingCloseConns closes all existing transports but keeps s.lis +// accepting new connections. +func (s *Server) testingCloseConns() { s.mu.Lock() for c := range s.conns { c.Close() @@ -714,13 +862,6 @@ func (s *Server) TestingCloseConns() { s.mu.Unlock() } -// TestingUseHandlerImpl enables the http.Handler-based server implementation. -// It must be called before Serve and requires TLS credentials. -// This is only for tests and is subject to removal. -func (s *Server) TestingUseHandlerImpl() { - s.opts.useHandlerImpl = true -} - // SendHeader sends header metadata. It may be called at most once from a unary // RPC handler. The ctx is the RPC handler's Context or one derived from it. func SendHeader(ctx context.Context, md metadata.MD) error { @@ -751,30 +892,3 @@ func SetTrailer(ctx context.Context, md metadata.MD) error { } return stream.SetTrailer(md) } - -// singleConnListener is a net.Listener that yields a single conn. -type singleConnListener struct { - mu sync.Mutex - addr net.Addr - conn net.Conn // nil if done -} - -func (ln *singleConnListener) Addr() net.Addr { return ln.addr } - -func (ln *singleConnListener) Close() error { - ln.mu.Lock() - defer ln.mu.Unlock() - ln.conn = nil - return nil -} - -func (ln *singleConnListener) Accept() (net.Conn, error) { - ln.mu.Lock() - defer ln.mu.Unlock() - c := ln.conn - if c == nil { - return nil, io.EOF - } - ln.conn = nil - return c, nil -} diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go index ea685cc15..51df3f01d 100644 --- a/vendor/google.golang.org/grpc/stream.go +++ b/vendor/google.golang.org/grpc/stream.go @@ -37,6 +37,7 @@ import ( "bytes" "errors" "io" + "math" "sync" "time" @@ -47,12 +48,14 @@ import ( "google.golang.org/grpc/transport" ) -type streamHandler func(srv interface{}, stream ServerStream) error +// StreamHandler defines the handler called by gRPC server to complete the +// execution of a streaming RPC. +type StreamHandler func(srv interface{}, stream ServerStream) error // StreamDesc represents a streaming RPC service's method specification. type StreamDesc struct { StreamName string - Handler streamHandler + Handler StreamHandler // At least one of these is true. ServerStreams bool @@ -67,7 +70,8 @@ type Stream interface { // breaks. // On error, it aborts the stream and returns an RPC status on client // side. On server side, it simply returns the error to the caller. - // SendMsg is called by generated code. + // SendMsg is called by generated code. Also Users can call SendMsg + // directly when it is really needed in their use cases. SendMsg(m interface{}) error // RecvMsg blocks until it receives a message or the stream is // done. On client side, it returns io.EOF when the stream is done. On @@ -76,17 +80,14 @@ type Stream interface { RecvMsg(m interface{}) error } -// ClientStream defines the interface a client stream has to satify. +// ClientStream defines the interface a client stream has to satisfy. type ClientStream interface { - // Header returns the header metedata received from the server if there + // Header returns the header metadata received from the server if there // is any. It blocks if the metadata is not ready to read. Header() (metadata.MD, error) - // Trailer returns the trailer metadata from the server. It must be called - // after stream.Recv() returns non-nil error (including io.EOF) for - // bi-directional streaming and server streaming or stream.CloseAndRecv() - // returns for client streaming in order to receive trailer metadata if - // present. Otherwise, it could returns an empty MD even though trailer - // is present. + // Trailer returns the trailer metadata from the server, if there is any. + // It must only be called after stream.CloseAndRecv has returned, or + // stream.Recv has returned a non-nil error (including io.EOF). Trailer() metadata.MD // CloseSend closes the send direction of the stream. It closes the stream // when non-nil error is met. @@ -96,58 +97,118 @@ type ClientStream interface { // NewClientStream creates a new Stream for the client side. This is called // by generated code. -func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) { +func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) { var ( t transport.ClientTransport - err error + s *transport.Stream + put func() ) - t, err = cc.dopts.picker.Pick(ctx) - if err != nil { - return nil, toRPCErr(err) + c := defaultCallInfo + for _, o := range opts { + if err := o.before(&c); err != nil { + return nil, toRPCErr(err) + } } - // TODO(zhaoq): CallOption is omitted. Add support when it is needed. callHdr := &transport.CallHdr{ Host: cc.authority, Method: method, - Flush: desc.ServerStreams&&desc.ClientStreams, + Flush: desc.ServerStreams && desc.ClientStreams, } if cc.dopts.cp != nil { callHdr.SendCompress = cc.dopts.cp.Type() } + var trInfo traceInfo + if EnableTracing { + trInfo.tr = trace.New("grpc.Sent."+methodFamily(method), method) + trInfo.firstLine.client = true + if deadline, ok := ctx.Deadline(); ok { + trInfo.firstLine.deadline = deadline.Sub(time.Now()) + } + trInfo.tr.LazyLog(&trInfo.firstLine, false) + ctx = trace.NewContext(ctx, trInfo.tr) + defer func() { + if err != nil { + // Need to call tr.finish() if error is returned. + // Because tr will not be returned to caller. + trInfo.tr.LazyPrintf("RPC: [%v]", err) + trInfo.tr.SetError() + trInfo.tr.Finish() + } + }() + } + gopts := BalancerGetOptions{ + BlockingWait: !c.failFast, + } + for { + t, put, err = cc.getTransport(ctx, gopts) + if err != nil { + // TODO(zhaoq): Probably revisit the error handling. + if _, ok := err.(*rpcError); ok { + return nil, err + } + if err == errConnClosing || err == errConnUnavailable { + if c.failFast { + return nil, Errorf(codes.Unavailable, "%v", err) + } + continue + } + // All the other errors are treated as Internal errors. + return nil, Errorf(codes.Internal, "%v", err) + } + + s, err = t.NewStream(ctx, callHdr) + if err != nil { + if put != nil { + put() + put = nil + } + if _, ok := err.(transport.ConnectionError); ok || err == transport.ErrStreamDrain { + if c.failFast { + return nil, toRPCErr(err) + } + continue + } + return nil, toRPCErr(err) + } + break + } cs := &clientStream{ - desc: desc, - codec: cc.dopts.codec, - cp: cc.dopts.cp, - dc: cc.dopts.dc, + opts: opts, + c: c, + desc: desc, + codec: cc.dopts.codec, + cp: cc.dopts.cp, + dc: cc.dopts.dc, + + put: put, + t: t, + s: s, + p: &parser{r: s}, + tracing: EnableTracing, + trInfo: trInfo, } if cc.dopts.cp != nil { - callHdr.SendCompress = cc.dopts.cp.Type() cs.cbuf = new(bytes.Buffer) } - if cs.tracing { - cs.trInfo.tr = trace.New("grpc.Sent."+methodFamily(method), method) - cs.trInfo.firstLine.client = true - if deadline, ok := ctx.Deadline(); ok { - cs.trInfo.firstLine.deadline = deadline.Sub(time.Now()) - } - cs.trInfo.tr.LazyLog(&cs.trInfo.firstLine, false) - ctx = trace.NewContext(ctx, cs.trInfo.tr) - } - s, err := t.NewStream(ctx, callHdr) - if err != nil { - cs.finish(err) - return nil, toRPCErr(err) - } - cs.t = t - cs.s = s - cs.p = &parser{s: s} - // Listen on ctx.Done() to detect cancellation when there is no pending - // I/O operations on this stream. + // Listen on ctx.Done() to detect cancellation and s.Done() to detect normal termination + // when there is no pending I/O operations on this stream. go func() { select { case <-t.Error(): // Incur transport error, simply exit. + case <-s.Done(): + // TODO: The trace of the RPC is terminated here when there is no pending + // I/O, which is probably not the optimal solution. + if s.StatusCode() == codes.OK { + cs.finish(nil) + } else { + cs.finish(Errorf(s.StatusCode(), "%s", s.StatusDesc())) + } + cs.closeTransportStream(nil) + case <-s.GoAway(): + cs.finish(errConnDrain) + cs.closeTransportStream(errConnDrain) case <-s.Context().Done(): err := s.Context().Err() cs.finish(err) @@ -159,6 +220,8 @@ func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth // clientStream implements a client side Stream. type clientStream struct { + opts []CallOption + c callInfo t transport.ClientTransport s *transport.Stream p *parser @@ -171,6 +234,7 @@ type clientStream struct { tracing bool // set to EnableTracing when the clientStream is created. mu sync.Mutex + put func() closed bool // trInfo.tr is set when the clientStream is created (if EnableTracing is true), // and is set to nil when the clientStream's finish method is called. @@ -204,7 +268,20 @@ func (cs *clientStream) SendMsg(m interface{}) (err error) { cs.mu.Unlock() } defer func() { - if err == nil || err == io.EOF { + if err != nil { + cs.finish(err) + } + if err == nil { + return + } + if err == io.EOF { + // Specialize the process for server streaming. SendMesg is only called + // once when creating the stream object. io.EOF needs to be skipped when + // the rpc is early finished (before the stream object is created.). + // TODO: It is probably better to move this into the generated code. + if !cs.desc.ClientStreams && cs.desc.ServerStreams { + err = nil + } return } if _, ok := err.(transport.ConnectionError); !ok { @@ -225,7 +302,7 @@ func (cs *clientStream) SendMsg(m interface{}) (err error) { } func (cs *clientStream) RecvMsg(m interface{}) (err error) { - err = recv(cs.p, cs.codec, cs.s, cs.dc, m) + err = recv(cs.p, cs.codec, cs.s, cs.dc, m, math.MaxInt32) defer func() { // err != nil indicates the termination of the stream. if err != nil { @@ -244,16 +321,17 @@ func (cs *clientStream) RecvMsg(m interface{}) (err error) { return } // Special handling for client streaming rpc. - err = recv(cs.p, cs.codec, cs.s, cs.dc, m) + err = recv(cs.p, cs.codec, cs.s, cs.dc, m, math.MaxInt32) cs.closeTransportStream(err) if err == nil { return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want ")) } if err == io.EOF { if cs.s.StatusCode() == codes.OK { + cs.finish(err) return nil } - return Errorf(cs.s.StatusCode(), cs.s.StatusDesc()) + return Errorf(cs.s.StatusCode(), "%s", cs.s.StatusDesc()) } return toRPCErr(err) } @@ -265,15 +343,20 @@ func (cs *clientStream) RecvMsg(m interface{}) (err error) { // Returns io.EOF to indicate the end of the stream. return } - return Errorf(cs.s.StatusCode(), cs.s.StatusDesc()) + return Errorf(cs.s.StatusCode(), "%s", cs.s.StatusDesc()) } return toRPCErr(err) } func (cs *clientStream) CloseSend() (err error) { err = cs.t.Write(cs.s, nil, &transport.Options{Last: true}) + defer func() { + if err != nil { + cs.finish(err) + } + }() if err == nil || err == io.EOF { - return + return nil } if _, ok := err.(transport.ConnectionError); !ok { cs.closeTransportStream(err) @@ -294,11 +377,18 @@ func (cs *clientStream) closeTransportStream(err error) { } func (cs *clientStream) finish(err error) { + cs.mu.Lock() + defer cs.mu.Unlock() + for _, o := range cs.opts { + o.after(&cs.c) + } + if cs.put != nil { + cs.put() + cs.put = nil + } if !cs.tracing { return } - cs.mu.Lock() - defer cs.mu.Unlock() if cs.trInfo.tr != nil { if err == nil || err == io.EOF { cs.trInfo.tr.LazyPrintf("RPC: [OK]") @@ -332,6 +422,7 @@ type serverStream struct { cp Compressor dc Decompressor cbuf *bytes.Buffer + maxMsgSize int statusCode codes.Code statusDesc string trInfo *traceInfo @@ -398,5 +489,5 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) { ss.mu.Unlock() } }() - return recv(ss.p, ss.codec, ss.s, ss.dc, m) + return recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxMsgSize) } diff --git a/vendor/google.golang.org/grpc/stress/client/main.go b/vendor/google.golang.org/grpc/stress/client/main.go new file mode 100644 index 000000000..4579aab46 --- /dev/null +++ b/vendor/google.golang.org/grpc/stress/client/main.go @@ -0,0 +1,298 @@ +/* + * + * Copyright 2016, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +// client starts an interop client to do stress test and a metrics server to report qps. +package main + +import ( + "flag" + "fmt" + "math/rand" + "net" + "strconv" + "strings" + "sync" + "time" + + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/interop" + testpb "google.golang.org/grpc/interop/grpc_testing" + metricspb "google.golang.org/grpc/stress/grpc_testing" +) + +var ( + serverAddresses = flag.String("server_addresses", "localhost:8080", "a list of server addresses") + testCases = flag.String("test_cases", "", "a list of test cases along with the relative weights") + testDurationSecs = flag.Int("test_duration_secs", -1, "test duration in seconds") + numChannelsPerServer = flag.Int("num_channels_per_server", 1, "Number of channels (i.e connections) to each server") + numStubsPerChannel = flag.Int("num_stubs_per_channel", 1, "Number of client stubs per each connection to server") + metricsPort = flag.Int("metrics_port", 8081, "The port at which the stress client exposes QPS metrics") +) + +// testCaseWithWeight contains the test case type and its weight. +type testCaseWithWeight struct { + name string + weight int +} + +// parseTestCases converts test case string to a list of struct testCaseWithWeight. +func parseTestCases(testCaseString string) []testCaseWithWeight { + testCaseStrings := strings.Split(testCaseString, ",") + testCases := make([]testCaseWithWeight, len(testCaseStrings)) + for i, str := range testCaseStrings { + testCase := strings.Split(str, ":") + if len(testCase) != 2 { + panic(fmt.Sprintf("invalid test case with weight: %s", str)) + } + // Check if test case is supported. + switch testCase[0] { + case + "empty_unary", + "large_unary", + "client_streaming", + "server_streaming", + "empty_stream": + default: + panic(fmt.Sprintf("unknown test type: %s", testCase[0])) + } + testCases[i].name = testCase[0] + w, err := strconv.Atoi(testCase[1]) + if err != nil { + panic(fmt.Sprintf("%v", err)) + } + testCases[i].weight = w + } + return testCases +} + +// weightedRandomTestSelector defines a weighted random selector for test case types. +type weightedRandomTestSelector struct { + tests []testCaseWithWeight + totalWeight int +} + +// newWeightedRandomTestSelector constructs a weightedRandomTestSelector with the given list of testCaseWithWeight. +func newWeightedRandomTestSelector(tests []testCaseWithWeight) *weightedRandomTestSelector { + var totalWeight int + for _, t := range tests { + totalWeight += t.weight + } + rand.Seed(time.Now().UnixNano()) + return &weightedRandomTestSelector{tests, totalWeight} +} + +func (selector weightedRandomTestSelector) getNextTest() string { + random := rand.Intn(selector.totalWeight) + var weightSofar int + for _, test := range selector.tests { + weightSofar += test.weight + if random < weightSofar { + return test.name + } + } + panic("no test case selected by weightedRandomTestSelector") +} + +// gauge stores the qps of one interop client (one stub). +type gauge struct { + mutex sync.RWMutex + val int64 +} + +func (g *gauge) set(v int64) { + g.mutex.Lock() + defer g.mutex.Unlock() + g.val = v +} + +func (g *gauge) get() int64 { + g.mutex.RLock() + defer g.mutex.RUnlock() + return g.val +} + +// server implements metrics server functions. +type server struct { + mutex sync.RWMutex + // gauges is a map from /stress_test/server_/channel_/stub_/qps to its qps gauge. + gauges map[string]*gauge +} + +// newMetricsServer returns a new metrics server. +func newMetricsServer() *server { + return &server{gauges: make(map[string]*gauge)} +} + +// GetAllGauges returns all gauges. +func (s *server) GetAllGauges(in *metricspb.EmptyMessage, stream metricspb.MetricsService_GetAllGaugesServer) error { + s.mutex.RLock() + defer s.mutex.RUnlock() + + for name, gauge := range s.gauges { + if err := stream.Send(&metricspb.GaugeResponse{Name: name, Value: &metricspb.GaugeResponse_LongValue{LongValue: gauge.get()}}); err != nil { + return err + } + } + return nil +} + +// GetGauge returns the gauge for the given name. +func (s *server) GetGauge(ctx context.Context, in *metricspb.GaugeRequest) (*metricspb.GaugeResponse, error) { + s.mutex.RLock() + defer s.mutex.RUnlock() + + if g, ok := s.gauges[in.Name]; ok { + return &metricspb.GaugeResponse{Name: in.Name, Value: &metricspb.GaugeResponse_LongValue{LongValue: g.get()}}, nil + } + return nil, grpc.Errorf(codes.InvalidArgument, "gauge with name %s not found", in.Name) +} + +// createGauge creates a guage using the given name in metrics server. +func (s *server) createGauge(name string) *gauge { + s.mutex.Lock() + defer s.mutex.Unlock() + + if _, ok := s.gauges[name]; ok { + // gauge already exists. + panic(fmt.Sprintf("gauge %s already exists", name)) + } + var g gauge + s.gauges[name] = &g + return &g +} + +func startServer(server *server, port int) { + lis, err := net.Listen("tcp", ":"+strconv.Itoa(port)) + if err != nil { + grpclog.Fatalf("failed to listen: %v", err) + } + + s := grpc.NewServer() + metricspb.RegisterMetricsServiceServer(s, server) + s.Serve(lis) + +} + +// performRPCs uses weightedRandomTestSelector to select test case and runs the tests. +func performRPCs(gauge *gauge, conn *grpc.ClientConn, selector *weightedRandomTestSelector, stop <-chan bool) { + client := testpb.NewTestServiceClient(conn) + var numCalls int64 + startTime := time.Now() + for { + done := make(chan bool, 1) + go func() { + test := selector.getNextTest() + switch test { + case "empty_unary": + interop.DoEmptyUnaryCall(client) + case "large_unary": + interop.DoLargeUnaryCall(client) + case "client_streaming": + interop.DoClientStreaming(client) + case "server_streaming": + interop.DoServerStreaming(client) + case "empty_stream": + interop.DoEmptyStream(client) + } + done <- true + }() + select { + case <-stop: + return + case <-done: + numCalls++ + gauge.set(int64(float64(numCalls) / time.Since(startTime).Seconds())) + } + } +} + +func logParameterInfo(addresses []string, tests []testCaseWithWeight) { + grpclog.Printf("server_addresses: %s", *serverAddresses) + grpclog.Printf("test_cases: %s", *testCases) + grpclog.Printf("test_duration-secs: %d", *testDurationSecs) + grpclog.Printf("num_channels_per_server: %d", *numChannelsPerServer) + grpclog.Printf("num_stubs_per_channel: %d", *numStubsPerChannel) + grpclog.Printf("metrics_port: %d", *metricsPort) + + grpclog.Println("addresses:") + for i, addr := range addresses { + grpclog.Printf("%d. %s\n", i+1, addr) + } + grpclog.Println("tests:") + for i, test := range tests { + grpclog.Printf("%d. %v\n", i+1, test) + } +} + +func main() { + flag.Parse() + addresses := strings.Split(*serverAddresses, ",") + tests := parseTestCases(*testCases) + logParameterInfo(addresses, tests) + testSelector := newWeightedRandomTestSelector(tests) + metricsServer := newMetricsServer() + + var wg sync.WaitGroup + wg.Add(len(addresses) * *numChannelsPerServer * *numStubsPerChannel) + stop := make(chan bool) + + for serverIndex, address := range addresses { + for connIndex := 0; connIndex < *numChannelsPerServer; connIndex++ { + conn, err := grpc.Dial(address, grpc.WithInsecure()) + if err != nil { + grpclog.Fatalf("Fail to dial: %v", err) + } + defer conn.Close() + for clientIndex := 0; clientIndex < *numStubsPerChannel; clientIndex++ { + name := fmt.Sprintf("/stress_test/server_%d/channel_%d/stub_%d/qps", serverIndex+1, connIndex+1, clientIndex+1) + go func() { + defer wg.Done() + g := metricsServer.createGauge(name) + performRPCs(g, conn, testSelector, stop) + }() + } + + } + } + go startServer(metricsServer, *metricsPort) + if *testDurationSecs > 0 { + time.Sleep(time.Duration(*testDurationSecs) * time.Second) + close(stop) + } + wg.Wait() + grpclog.Printf(" ===== ALL DONE ===== ") + +} diff --git a/vendor/google.golang.org/grpc/stress/grpc_testing/metrics.pb.go b/vendor/google.golang.org/grpc/stress/grpc_testing/metrics.pb.go new file mode 100644 index 000000000..4ad4ccdb2 --- /dev/null +++ b/vendor/google.golang.org/grpc/stress/grpc_testing/metrics.pb.go @@ -0,0 +1,361 @@ +// Code generated by protoc-gen-go. +// source: metrics.proto +// DO NOT EDIT! + +/* +Package grpc_testing is a generated protocol buffer package. + +It is generated from these files: + metrics.proto + +It has these top-level messages: + GaugeResponse + GaugeRequest + EmptyMessage +*/ +package grpc_testing + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// Reponse message containing the gauge name and value +type GaugeResponse struct { + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Types that are valid to be assigned to Value: + // *GaugeResponse_LongValue + // *GaugeResponse_DoubleValue + // *GaugeResponse_StringValue + Value isGaugeResponse_Value `protobuf_oneof:"value"` +} + +func (m *GaugeResponse) Reset() { *m = GaugeResponse{} } +func (m *GaugeResponse) String() string { return proto.CompactTextString(m) } +func (*GaugeResponse) ProtoMessage() {} +func (*GaugeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +type isGaugeResponse_Value interface { + isGaugeResponse_Value() +} + +type GaugeResponse_LongValue struct { + LongValue int64 `protobuf:"varint,2,opt,name=long_value,json=longValue,oneof"` +} +type GaugeResponse_DoubleValue struct { + DoubleValue float64 `protobuf:"fixed64,3,opt,name=double_value,json=doubleValue,oneof"` +} +type GaugeResponse_StringValue struct { + StringValue string `protobuf:"bytes,4,opt,name=string_value,json=stringValue,oneof"` +} + +func (*GaugeResponse_LongValue) isGaugeResponse_Value() {} +func (*GaugeResponse_DoubleValue) isGaugeResponse_Value() {} +func (*GaugeResponse_StringValue) isGaugeResponse_Value() {} + +func (m *GaugeResponse) GetValue() isGaugeResponse_Value { + if m != nil { + return m.Value + } + return nil +} + +func (m *GaugeResponse) GetLongValue() int64 { + if x, ok := m.GetValue().(*GaugeResponse_LongValue); ok { + return x.LongValue + } + return 0 +} + +func (m *GaugeResponse) GetDoubleValue() float64 { + if x, ok := m.GetValue().(*GaugeResponse_DoubleValue); ok { + return x.DoubleValue + } + return 0 +} + +func (m *GaugeResponse) GetStringValue() string { + if x, ok := m.GetValue().(*GaugeResponse_StringValue); ok { + return x.StringValue + } + return "" +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*GaugeResponse) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _GaugeResponse_OneofMarshaler, _GaugeResponse_OneofUnmarshaler, _GaugeResponse_OneofSizer, []interface{}{ + (*GaugeResponse_LongValue)(nil), + (*GaugeResponse_DoubleValue)(nil), + (*GaugeResponse_StringValue)(nil), + } +} + +func _GaugeResponse_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*GaugeResponse) + // value + switch x := m.Value.(type) { + case *GaugeResponse_LongValue: + b.EncodeVarint(2<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.LongValue)) + case *GaugeResponse_DoubleValue: + b.EncodeVarint(3<<3 | proto.WireFixed64) + b.EncodeFixed64(math.Float64bits(x.DoubleValue)) + case *GaugeResponse_StringValue: + b.EncodeVarint(4<<3 | proto.WireBytes) + b.EncodeStringBytes(x.StringValue) + case nil: + default: + return fmt.Errorf("GaugeResponse.Value has unexpected type %T", x) + } + return nil +} + +func _GaugeResponse_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*GaugeResponse) + switch tag { + case 2: // value.long_value + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Value = &GaugeResponse_LongValue{int64(x)} + return true, err + case 3: // value.double_value + if wire != proto.WireFixed64 { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeFixed64() + m.Value = &GaugeResponse_DoubleValue{math.Float64frombits(x)} + return true, err + case 4: // value.string_value + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Value = &GaugeResponse_StringValue{x} + return true, err + default: + return false, nil + } +} + +func _GaugeResponse_OneofSizer(msg proto.Message) (n int) { + m := msg.(*GaugeResponse) + // value + switch x := m.Value.(type) { + case *GaugeResponse_LongValue: + n += proto.SizeVarint(2<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.LongValue)) + case *GaugeResponse_DoubleValue: + n += proto.SizeVarint(3<<3 | proto.WireFixed64) + n += 8 + case *GaugeResponse_StringValue: + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.StringValue))) + n += len(x.StringValue) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Request message containing the gauge name +type GaugeRequest struct { + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *GaugeRequest) Reset() { *m = GaugeRequest{} } +func (m *GaugeRequest) String() string { return proto.CompactTextString(m) } +func (*GaugeRequest) ProtoMessage() {} +func (*GaugeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +type EmptyMessage struct { +} + +func (m *EmptyMessage) Reset() { *m = EmptyMessage{} } +func (m *EmptyMessage) String() string { return proto.CompactTextString(m) } +func (*EmptyMessage) ProtoMessage() {} +func (*EmptyMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +func init() { + proto.RegisterType((*GaugeResponse)(nil), "grpc.testing.GaugeResponse") + proto.RegisterType((*GaugeRequest)(nil), "grpc.testing.GaugeRequest") + proto.RegisterType((*EmptyMessage)(nil), "grpc.testing.EmptyMessage") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion3 + +// Client API for MetricsService service + +type MetricsServiceClient interface { + // Returns the values of all the gauges that are currently being maintained by + // the service + GetAllGauges(ctx context.Context, in *EmptyMessage, opts ...grpc.CallOption) (MetricsService_GetAllGaugesClient, error) + // Returns the value of one gauge + GetGauge(ctx context.Context, in *GaugeRequest, opts ...grpc.CallOption) (*GaugeResponse, error) +} + +type metricsServiceClient struct { + cc *grpc.ClientConn +} + +func NewMetricsServiceClient(cc *grpc.ClientConn) MetricsServiceClient { + return &metricsServiceClient{cc} +} + +func (c *metricsServiceClient) GetAllGauges(ctx context.Context, in *EmptyMessage, opts ...grpc.CallOption) (MetricsService_GetAllGaugesClient, error) { + stream, err := grpc.NewClientStream(ctx, &_MetricsService_serviceDesc.Streams[0], c.cc, "/grpc.testing.MetricsService/GetAllGauges", opts...) + if err != nil { + return nil, err + } + x := &metricsServiceGetAllGaugesClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type MetricsService_GetAllGaugesClient interface { + Recv() (*GaugeResponse, error) + grpc.ClientStream +} + +type metricsServiceGetAllGaugesClient struct { + grpc.ClientStream +} + +func (x *metricsServiceGetAllGaugesClient) Recv() (*GaugeResponse, error) { + m := new(GaugeResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *metricsServiceClient) GetGauge(ctx context.Context, in *GaugeRequest, opts ...grpc.CallOption) (*GaugeResponse, error) { + out := new(GaugeResponse) + err := grpc.Invoke(ctx, "/grpc.testing.MetricsService/GetGauge", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for MetricsService service + +type MetricsServiceServer interface { + // Returns the values of all the gauges that are currently being maintained by + // the service + GetAllGauges(*EmptyMessage, MetricsService_GetAllGaugesServer) error + // Returns the value of one gauge + GetGauge(context.Context, *GaugeRequest) (*GaugeResponse, error) +} + +func RegisterMetricsServiceServer(s *grpc.Server, srv MetricsServiceServer) { + s.RegisterService(&_MetricsService_serviceDesc, srv) +} + +func _MetricsService_GetAllGauges_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(EmptyMessage) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(MetricsServiceServer).GetAllGauges(m, &metricsServiceGetAllGaugesServer{stream}) +} + +type MetricsService_GetAllGaugesServer interface { + Send(*GaugeResponse) error + grpc.ServerStream +} + +type metricsServiceGetAllGaugesServer struct { + grpc.ServerStream +} + +func (x *metricsServiceGetAllGaugesServer) Send(m *GaugeResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _MetricsService_GetGauge_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GaugeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MetricsServiceServer).GetGauge(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/grpc.testing.MetricsService/GetGauge", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MetricsServiceServer).GetGauge(ctx, req.(*GaugeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _MetricsService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "grpc.testing.MetricsService", + HandlerType: (*MetricsServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "GetGauge", + Handler: _MetricsService_GetGauge_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "GetAllGauges", + Handler: _MetricsService_GetAllGauges_Handler, + ServerStreams: true, + }, + }, + Metadata: fileDescriptor0, +} + +func init() { proto.RegisterFile("metrics.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 253 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0xcd, 0x4d, 0x2d, 0x29, + 0xca, 0x4c, 0x2e, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x49, 0x2f, 0x2a, 0x48, 0xd6, + 0x2b, 0x49, 0x2d, 0x2e, 0xc9, 0xcc, 0x4b, 0x57, 0x9a, 0xce, 0xc8, 0xc5, 0xeb, 0x9e, 0x58, 0x9a, + 0x9e, 0x1a, 0x94, 0x5a, 0x5c, 0x90, 0x9f, 0x57, 0x9c, 0x2a, 0x24, 0xc4, 0xc5, 0x92, 0x97, 0x98, + 0x9b, 0x2a, 0xc1, 0xa8, 0xc0, 0xa8, 0xc1, 0x19, 0x04, 0x66, 0x0b, 0xc9, 0x73, 0x71, 0xe5, 0xe4, + 0xe7, 0xa5, 0xc7, 0x97, 0x25, 0xe6, 0x94, 0xa6, 0x4a, 0x30, 0x01, 0x65, 0x98, 0x3d, 0x18, 0x82, + 0x38, 0x41, 0x62, 0x61, 0x20, 0x21, 0x21, 0x65, 0x2e, 0x9e, 0x94, 0xfc, 0xd2, 0xa4, 0x9c, 0x54, + 0xa8, 0x12, 0x66, 0xa0, 0x12, 0x46, 0xa0, 0x12, 0x6e, 0x88, 0x28, 0x5c, 0x51, 0x31, 0xd0, 0x25, + 0x70, 0x73, 0x58, 0x40, 0x36, 0x80, 0x14, 0x41, 0x44, 0xc1, 0x8a, 0x9c, 0xd8, 0xb9, 0x58, 0xc1, + 0xb2, 0x4a, 0x4a, 0x5c, 0x3c, 0x50, 0x87, 0x15, 0x96, 0x02, 0x1d, 0x8b, 0xcd, 0x5d, 0x4a, 0x7c, + 0x5c, 0x3c, 0xae, 0xb9, 0x05, 0x25, 0x95, 0xbe, 0xa9, 0xc5, 0xc5, 0x89, 0xe9, 0xa9, 0x46, 0x0b, + 0x18, 0xb9, 0xf8, 0x7c, 0x21, 0xbe, 0x0d, 0x4e, 0x2d, 0x2a, 0xcb, 0x4c, 0x4e, 0x15, 0xf2, 0x04, + 0x1a, 0x93, 0x5a, 0xe2, 0x98, 0x93, 0x03, 0x36, 0xac, 0x58, 0x48, 0x4a, 0x0f, 0xd9, 0xff, 0x7a, + 0xc8, 0xda, 0xa5, 0xa4, 0x51, 0xe5, 0x50, 0xc2, 0xc5, 0x80, 0x51, 0xc8, 0x99, 0x8b, 0x03, 0x68, + 0x14, 0x58, 0x14, 0xdd, 0x18, 0x64, 0x97, 0xe2, 0x35, 0x26, 0x89, 0x0d, 0x1c, 0x0b, 0xc6, 0x80, + 0x00, 0x00, 0x00, 0xff, 0xff, 0x5e, 0x7d, 0xb2, 0xc9, 0x96, 0x01, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/grpc/stress/metrics_client/main.go b/vendor/google.golang.org/grpc/stress/metrics_client/main.go new file mode 100644 index 000000000..983a8ff24 --- /dev/null +++ b/vendor/google.golang.org/grpc/stress/metrics_client/main.go @@ -0,0 +1,97 @@ +/* + * + * Copyright 2016, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package main + +import ( + "flag" + "fmt" + "io" + + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/grpclog" + metricspb "google.golang.org/grpc/stress/grpc_testing" +) + +var ( + metricsServerAddress = flag.String("metrics_server_address", "", "The metrics server addresses in the fomrat :") + totalOnly = flag.Bool("total_only", false, "If true, this prints only the total value of all gauges") +) + +func printMetrics(client metricspb.MetricsServiceClient, totalOnly bool) { + stream, err := client.GetAllGauges(context.Background(), &metricspb.EmptyMessage{}) + if err != nil { + grpclog.Fatalf("failed to call GetAllGuages: %v", err) + } + + var ( + overallQPS int64 + rpcStatus error + ) + for { + gaugeResponse, err := stream.Recv() + if err != nil { + rpcStatus = err + break + } + if _, ok := gaugeResponse.GetValue().(*metricspb.GaugeResponse_LongValue); !ok { + panic(fmt.Sprintf("gauge %s is not a long value", gaugeResponse.Name)) + } + v := gaugeResponse.GetLongValue() + if !totalOnly { + grpclog.Printf("%s: %d", gaugeResponse.Name, v) + } + overallQPS += v + } + if rpcStatus != io.EOF { + grpclog.Fatalf("failed to finish server streaming: %v", rpcStatus) + } + grpclog.Printf("overall qps: %d", overallQPS) +} + +func main() { + flag.Parse() + if *metricsServerAddress == "" { + grpclog.Fatalf("Metrics server address is empty.") + } + + conn, err := grpc.Dial(*metricsServerAddress, grpc.WithInsecure()) + if err != nil { + grpclog.Fatalf("cannot connect to metrics server: %v", err) + } + defer conn.Close() + + c := metricspb.NewMetricsServiceClient(conn) + printMetrics(c, *totalOnly) +} diff --git a/vendor/google.golang.org/grpc/trace.go b/vendor/google.golang.org/grpc/trace.go index cde04fbfc..f6747e1df 100644 --- a/vendor/google.golang.org/grpc/trace.go +++ b/vendor/google.golang.org/grpc/trace.go @@ -101,9 +101,8 @@ type payload struct { func (p payload) String() string { if p.sent { return fmt.Sprintf("sent: %v", p.msg) - } else { - return fmt.Sprintf("recv: %v", p.msg) } + return fmt.Sprintf("recv: %v", p.msg) } type fmtStringer struct { diff --git a/vendor/google.golang.org/grpc/transport/control.go b/vendor/google.golang.org/grpc/transport/control.go index f6b38a5a6..4ef0830b5 100644 --- a/vendor/google.golang.org/grpc/transport/control.go +++ b/vendor/google.golang.org/grpc/transport/control.go @@ -56,43 +56,38 @@ type windowUpdate struct { increment uint32 } -func (windowUpdate) isItem() bool { - return true -} +func (*windowUpdate) item() {} type settings struct { ack bool ss []http2.Setting } -func (settings) isItem() bool { - return true -} +func (*settings) item() {} type resetStream struct { streamID uint32 code http2.ErrCode } -func (resetStream) isItem() bool { - return true +func (*resetStream) item() {} + +type goAway struct { } +func (*goAway) item() {} + type flushIO struct { } -func (flushIO) isItem() bool { - return true -} +func (*flushIO) item() {} type ping struct { ack bool data [8]byte } -func (ping) isItem() bool { - return true -} +func (*ping) item() {} // quotaPool is a pool which accumulates the quota and sends it to acquire() // when it is available. @@ -172,10 +167,6 @@ func (qb *quotaPool) acquire() <-chan int { type inFlow struct { // The inbound flow control limit for pending data. limit uint32 - // conn points to the shared connection-level inFlow that is shared - // by all streams on that conn. It is nil for the inFlow on the conn - // directly. - conn *inFlow mu sync.Mutex // pendingData is the overall data which have been received but not been @@ -186,75 +177,39 @@ type inFlow struct { pendingUpdate uint32 } -// onData is invoked when some data frame is received. It increments not only its -// own pendingData but also that of the associated connection-level flow. +// onData is invoked when some data frame is received. It updates pendingData. func (f *inFlow) onData(n uint32) error { - if n == 0 { - return nil - } f.mu.Lock() defer f.mu.Unlock() - if f.pendingData+f.pendingUpdate+n > f.limit { - return fmt.Errorf("recieved %d-bytes data exceeding the limit %d bytes", f.pendingData+f.pendingUpdate+n, f.limit) - } - if f.conn != nil { - if err := f.conn.onData(n); err != nil { - return ConnectionErrorf("%v", err) - } - } f.pendingData += n - return nil -} - -// connOnRead updates the connection level states when the application consumes data. -func (f *inFlow) connOnRead(n uint32) uint32 { - if n == 0 || f.conn != nil { - return 0 + if f.pendingData+f.pendingUpdate > f.limit { + return fmt.Errorf("received %d-bytes data exceeding the limit %d bytes", f.pendingData+f.pendingUpdate, f.limit) } - f.mu.Lock() - defer f.mu.Unlock() - f.pendingData -= n - f.pendingUpdate += n - if f.pendingUpdate >= f.limit/4 { - ret := f.pendingUpdate - f.pendingUpdate = 0 - return ret - } - return 0 + return nil } -// onRead is invoked when the application reads the data. It returns the window updates -// for both stream and connection level. -func (f *inFlow) onRead(n uint32) (swu, cwu uint32) { - if n == 0 { - return - } +// onRead is invoked when the application reads the data. It returns the window size +// to be sent to the peer. +func (f *inFlow) onRead(n uint32) uint32 { f.mu.Lock() defer f.mu.Unlock() if f.pendingData == 0 { - // pendingData has been adjusted by restoreConn. - return + return 0 } f.pendingData -= n f.pendingUpdate += n if f.pendingUpdate >= f.limit/4 { - swu = f.pendingUpdate + wu := f.pendingUpdate f.pendingUpdate = 0 + return wu } - cwu = f.conn.connOnRead(n) - return + return 0 } -// restoreConn is invoked when a stream is terminated. It removes its stake in -// the connection-level flow and resets its own state. -func (f *inFlow) restoreConn() uint32 { - if f.conn == nil { - return 0 - } +func (f *inFlow) resetPendingData() uint32 { f.mu.Lock() defer f.mu.Unlock() n := f.pendingData f.pendingData = 0 - f.pendingUpdate = 0 - return f.conn.connOnRead(n) + return n } diff --git a/vendor/google.golang.org/grpc/transport/go16.go b/vendor/google.golang.org/grpc/transport/go16.go new file mode 100644 index 000000000..ee1c46bad --- /dev/null +++ b/vendor/google.golang.org/grpc/transport/go16.go @@ -0,0 +1,46 @@ +// +build go1.6,!go1.7 + +/* + * Copyright 2016, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package transport + +import ( + "net" + + "golang.org/x/net/context" +) + +// dialContext connects to the address on the named network. +func dialContext(ctx context.Context, network, address string) (net.Conn, error) { + return (&net.Dialer{Cancel: ctx.Done()}).Dial(network, address) +} diff --git a/vendor/google.golang.org/grpc/transport/go17.go b/vendor/google.golang.org/grpc/transport/go17.go new file mode 100644 index 000000000..356f13ff1 --- /dev/null +++ b/vendor/google.golang.org/grpc/transport/go17.go @@ -0,0 +1,46 @@ +// +build go1.7 + +/* + * Copyright 2016, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package transport + +import ( + "net" + + "golang.org/x/net/context" +) + +// dialContext connects to the address on the named network. +func dialContext(ctx context.Context, network, address string) (net.Conn, error) { + return (&net.Dialer{}).DialContext(ctx, network, address) +} diff --git a/vendor/google.golang.org/grpc/transport/handler_server.go b/vendor/google.golang.org/grpc/transport/handler_server.go index 8bfbf9703..30e21ac0f 100644 --- a/vendor/google.golang.org/grpc/transport/handler_server.go +++ b/vendor/google.golang.org/grpc/transport/handler_server.go @@ -40,6 +40,7 @@ package transport import ( "errors" "fmt" + "io" "net" "net/http" "strings" @@ -64,7 +65,7 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request) (ServerTr if r.Method != "POST" { return nil, errors.New("invalid gRPC request method") } - if !strings.Contains(r.Header.Get("Content-Type"), "application/grpc") { + if !validContentType(r.Header.Get("Content-Type")) { return nil, errors.New("invalid gRPC request content-type") } if _, ok := w.(http.Flusher); !ok { @@ -82,7 +83,7 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request) (ServerTr } if v := r.Header.Get("grpc-timeout"); v != "" { - to, err := timeoutDecode(v) + to, err := decodeTimeout(v) if err != nil { return nil, StreamErrorf(codes.Internal, "malformed time-out: %v", err) } @@ -91,9 +92,12 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request) (ServerTr } var metakv []string + if r.Host != "" { + metakv = append(metakv, ":authority", r.Host) + } for k, vv := range r.Header { k = strings.ToLower(k) - if isReservedHeader(k) { + if isReservedHeader(k) && !isWhitelistedPseudoHeader(k) { continue } for _, v := range vv { @@ -107,7 +111,6 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request) (ServerTr } } metakv = append(metakv, k, v) - } } st.headerMD = metadata.Pairs(metakv...) @@ -117,7 +120,7 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request) (ServerTr // serverHandlerTransport is an implementation of ServerTransport // which replies to exactly one gRPC request (exactly one HTTP request), -// using the net/http.Handler interface. This http.Handler is guranteed +// using the net/http.Handler interface. This http.Handler is guaranteed // at this point to be speaking over HTTP/2, so it's able to speak valid // gRPC. type serverHandlerTransport struct { @@ -191,10 +194,14 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, statusCode codes.Code, h := ht.rw.Header() h.Set("Grpc-Status", fmt.Sprintf("%d", statusCode)) if statusDesc != "" { - h.Set("Grpc-Message", statusDesc) + h.Set("Grpc-Message", encodeGrpcMessage(statusDesc)) } if md := s.Trailer(); len(md) > 0 { for k, vv := range md { + // Clients don't tolerate reading restricted headers after some non restricted ones were sent. + if isReservedHeader(k) { + continue + } for _, v := range vv { // http2 ResponseWriter mechanism to // send undeclared Trailers after the @@ -248,6 +255,10 @@ func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error { ht.writeCommonHeaders(s) h := ht.rw.Header() for k, vv := range md { + // Clients don't tolerate reading restricted headers after some non restricted ones were sent. + if isReservedHeader(k) { + continue + } for _, v := range vv { h.Add(k, v) } @@ -301,7 +312,7 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream)) { Addr: ht.RemoteAddr(), } if req.TLS != nil { - pr.AuthInfo = credentials.TLSInfo{*req.TLS} + pr.AuthInfo = credentials.TLSInfo{State: *req.TLS} } ctx = metadata.NewContext(ctx, ht.headerMD) ctx = peer.NewContext(ctx, pr) @@ -312,16 +323,22 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream)) { readerDone := make(chan struct{}) go func() { defer close(readerDone) - for { - buf := make([]byte, 1024) // TODO: minimize garbage, optimize recvBuffer code/ownership + + // TODO: minimize garbage, optimize recvBuffer code/ownership + const readSize = 8196 + for buf := make([]byte, readSize); ; { n, err := req.Body.Read(buf) if n > 0 { - s.buf.put(&recvMsg{data: buf[:n]}) + s.buf.put(&recvMsg{data: buf[:n:n]}) + buf = buf[n:] } if err != nil { - s.buf.put(&recvMsg{err: err}) + s.buf.put(&recvMsg{err: mapRecvMsgError(err)}) return } + if len(buf) == 0 { + buf = make([]byte, readSize) + } } }() @@ -352,3 +369,29 @@ func (ht *serverHandlerTransport) runStream() { } } } + +func (ht *serverHandlerTransport) Drain() { + panic("Drain() is not implemented") +} + +// mapRecvMsgError returns the non-nil err into the appropriate +// error value as expected by callers of *grpc.parser.recvMsg. +// In particular, in can only be: +// * io.EOF +// * io.ErrUnexpectedEOF +// * of type transport.ConnectionError +// * of type transport.StreamError +func mapRecvMsgError(err error) error { + if err == io.EOF || err == io.ErrUnexpectedEOF { + return err + } + if se, ok := err.(http2.StreamError); ok { + if code, ok := http2ErrConvTab[se.Code]; ok { + return StreamError{ + Code: code, + Desc: se.Error(), + } + } + } + return ConnectionError{Desc: err.Error()} +} diff --git a/vendor/google.golang.org/grpc/transport/http2_client.go b/vendor/google.golang.org/grpc/transport/http2_client.go index 7cf700fe3..5e7c8c25b 100644 --- a/vendor/google.golang.org/grpc/transport/http2_client.go +++ b/vendor/google.golang.org/grpc/transport/http2_client.go @@ -35,7 +35,7 @@ package transport import ( "bytes" - "errors" + "fmt" "io" "math" "net" @@ -72,6 +72,9 @@ type http2Client struct { shutdownChan chan struct{} // errorChan is closed to notify the I/O error to the caller. errorChan chan struct{} + // goAway is closed to notify the upper layer (i.e., addrConn.transportMonitor) + // that the server sent GoAway on this transport. + goAway chan struct{} framer *framer hBuf *bytes.Buffer // the buffer for HPACK encoding @@ -89,7 +92,7 @@ type http2Client struct { // The scheme used: https if TLS is on, http otherwise. scheme string - authCreds []credentials.Credentials + creds []credentials.PerRPCCredentials mu sync.Mutex // guard the following variables state transportState // the state of underlying connection @@ -98,70 +101,48 @@ type http2Client struct { maxStreams int // the per-stream outbound flow control window size set by the peer. streamSendQuota uint32 + // goAwayID records the Last-Stream-ID in the GoAway frame from the server. + goAwayID uint32 + // prevGoAway ID records the Last-Stream-ID in the previous GOAway frame. + prevGoAwayID uint32 +} + +func dial(fn func(context.Context, string) (net.Conn, error), ctx context.Context, addr string) (net.Conn, error) { + if fn != nil { + return fn(ctx, addr) + } + return dialContext(ctx, "tcp", addr) } // newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2 // and starts to receive messages on it. Non-nil error returns if construction // fails. -func newHTTP2Client(addr string, opts *ConnectOptions) (_ ClientTransport, err error) { - if opts.Dialer == nil { - // Set the default Dialer. - opts.Dialer = func(addr string, timeout time.Duration) (net.Conn, error) { - return net.DialTimeout("tcp", addr, timeout) - } - } +func newHTTP2Client(ctx context.Context, addr string, opts ConnectOptions) (_ ClientTransport, err error) { scheme := "http" - startT := time.Now() - timeout := opts.Timeout - conn, connErr := opts.Dialer(addr, timeout) - if connErr != nil { - return nil, ConnectionErrorf("transport: %v", connErr) - } - var authInfo credentials.AuthInfo - for _, c := range opts.AuthOptions { - if ccreds, ok := c.(credentials.TransportAuthenticator); ok { - scheme = "https" - // TODO(zhaoq): Now the first TransportAuthenticator is used if there are - // multiple ones provided. Revisit this if it is not appropriate. Probably - // place the ClientTransport construction into a separate function to make - // things clear. - if timeout > 0 { - timeout -= time.Since(startT) - } - conn, authInfo, connErr = ccreds.ClientHandshake(addr, conn, timeout) - break - } - } + conn, connErr := dial(opts.Dialer, ctx, addr) if connErr != nil { - return nil, ConnectionErrorf("transport: %v", connErr) + return nil, ConnectionErrorf(true, connErr, "transport: %v", connErr) } - defer func() { + // Any further errors will close the underlying connection + defer func(conn net.Conn) { if err != nil { conn.Close() } - }() - // Send connection preface to server. - n, err := conn.Write(clientPreface) - if err != nil { - return nil, ConnectionErrorf("transport: %v", err) - } - if n != len(clientPreface) { - return nil, ConnectionErrorf("transport: preface mismatch, wrote %d bytes; want %d", n, len(clientPreface)) - } - framer := newFramer(conn) - if initialWindowSize != defaultWindowSize { - err = framer.writeSettings(true, http2.Setting{http2.SettingInitialWindowSize, uint32(initialWindowSize)}) - } else { - err = framer.writeSettings(true) - } - if err != nil { - return nil, ConnectionErrorf("transport: %v", err) + }(conn) + var authInfo credentials.AuthInfo + if creds := opts.TransportCredentials; creds != nil { + scheme = "https" + conn, authInfo, connErr = creds.ClientHandshake(ctx, addr, conn) } - // Adjust the connection flow control window if needed. - if delta := uint32(initialConnWindowSize - defaultWindowSize); delta > 0 { - if err := framer.writeWindowUpdate(true, 0, delta); err != nil { - return nil, ConnectionErrorf("transport: %v", err) + if connErr != nil { + // Credentials handshake error is not a temporary error (unless the error + // was the connection closing or deadline exceeded). + var temp bool + switch connErr { + case io.EOF, context.DeadlineExceeded: + temp = true } + return nil, ConnectionErrorf(temp, connErr, "transport: %v", connErr) } ua := primaryUA if opts.UserAgent != "" { @@ -178,7 +159,8 @@ func newHTTP2Client(addr string, opts *ConnectOptions) (_ ClientTransport, err e writableChan: make(chan int, 1), shutdownChan: make(chan struct{}), errorChan: make(chan struct{}), - framer: framer, + goAway: make(chan struct{}), + framer: newFramer(conn), hBuf: &buf, hEnc: hpack.NewEncoder(&buf), controlBuf: newRecvBuffer(), @@ -187,32 +169,58 @@ func newHTTP2Client(addr string, opts *ConnectOptions) (_ ClientTransport, err e scheme: scheme, state: reachable, activeStreams: make(map[uint32]*Stream), - authCreds: opts.AuthOptions, + creds: opts.PerRPCCredentials, maxStreams: math.MaxInt32, streamSendQuota: defaultWindowSize, } + // Start the reader goroutine for incoming message. Each transport has + // a dedicated goroutine which reads HTTP2 frame from network. Then it + // dispatches the frame to the corresponding stream entity. + go t.reader() + // Send connection preface to server. + n, err := t.conn.Write(clientPreface) + if err != nil { + t.Close() + return nil, ConnectionErrorf(true, err, "transport: %v", err) + } + if n != len(clientPreface) { + t.Close() + return nil, ConnectionErrorf(true, err, "transport: preface mismatch, wrote %d bytes; want %d", n, len(clientPreface)) + } + if initialWindowSize != defaultWindowSize { + err = t.framer.writeSettings(true, http2.Setting{ + ID: http2.SettingInitialWindowSize, + Val: uint32(initialWindowSize), + }) + } else { + err = t.framer.writeSettings(true) + } + if err != nil { + t.Close() + return nil, ConnectionErrorf(true, err, "transport: %v", err) + } + // Adjust the connection flow control window if needed. + if delta := uint32(initialConnWindowSize - defaultWindowSize); delta > 0 { + if err := t.framer.writeWindowUpdate(true, 0, delta); err != nil { + t.Close() + return nil, ConnectionErrorf(true, err, "transport: %v", err) + } + } go t.controller() t.writableChan <- 0 - // Start the reader goroutine for incoming message. The threading model - // on receiving is that each transport has a dedicated goroutine which - // reads HTTP2 frame from network. Then it dispatches the frame to the - // corresponding stream entity. - go t.reader() return t, nil } func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream { - fc := &inFlow{ - limit: initialWindowSize, - conn: t.fc, - } // TODO(zhaoq): Handle uint32 overflow of Stream.id. s := &Stream{ id: t.nextID, + done: make(chan struct{}), + goAway: make(chan struct{}), method: callHdr.Method, sendCompress: callHdr.SendCompress, buf: newRecvBuffer(), - fc: fc, + fc: &inFlow{limit: initialWindowSize}, sendQuotaPool: newQuotaPool(int(t.streamSendQuota)), headerChan: make(chan struct{}), } @@ -223,8 +231,9 @@ func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream { // Make a stream be able to cancel the pending operations by itself. s.ctx, s.cancel = context.WithCancel(ctx) s.dec = &recvBufferReader{ - ctx: s.ctx, - recv: s.buf, + ctx: s.ctx, + goAway: s.goAway, + recv: s.buf, } return s } @@ -236,9 +245,11 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea var timeout time.Duration if dl, ok := ctx.Deadline(); ok { timeout = dl.Sub(time.Now()) - if timeout <= 0 { - return nil, ContextErr(context.DeadlineExceeded) - } + } + select { + case <-ctx.Done(): + return nil, ContextErr(ctx.Err()) + default: } pr := &peer.Peer{ Addr: t.conn.RemoteAddr(), @@ -249,7 +260,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea } ctx = peer.NewContext(ctx, pr) authData := make(map[string]string) - for _, c := range t.authCreds { + for _, c := range t.creds { // Construct URI required to get auth request metadata. var port string if pos := strings.LastIndex(t.target, ":"); pos != -1 { @@ -272,6 +283,14 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea } } t.mu.Lock() + if t.activeStreams == nil { + t.mu.Unlock() + return nil, ErrConnClosing + } + if t.state == draining { + t.mu.Unlock() + return nil, ErrStreamDrain + } if t.state != reachable { t.mu.Unlock() return nil, ErrConnClosing @@ -279,7 +298,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea checkStreamsQuota := t.streamsQuota != nil t.mu.Unlock() if checkStreamsQuota { - sq, err := wait(ctx, t.shutdownChan, t.streamsQuota.acquire()) + sq, err := wait(ctx, nil, nil, t.shutdownChan, t.streamsQuota.acquire()) if err != nil { return nil, err } @@ -288,11 +307,23 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea t.streamsQuota.add(sq - 1) } } - if _, err := wait(ctx, t.shutdownChan, t.writableChan); err != nil { - // t.streamsQuota will be updated when t.CloseStream is invoked. + if _, err := wait(ctx, nil, nil, t.shutdownChan, t.writableChan); err != nil { + // Return the quota back now because there is no stream returned to the caller. + if _, ok := err.(StreamError); ok && checkStreamsQuota { + t.streamsQuota.add(1) + } return nil, err } t.mu.Lock() + if t.state == draining { + t.mu.Unlock() + if checkStreamsQuota { + t.streamsQuota.add(1) + } + // Need to make t writable again so that the rpc in flight can still proceed. + t.writableChan <- 0 + return nil, ErrStreamDrain + } if t.state != reachable { t.mu.Unlock() return nil, ErrConnClosing @@ -327,9 +358,11 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-encoding", Value: callHdr.SendCompress}) } if timeout > 0 { - t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-timeout", Value: timeoutEncode(timeout)}) + t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-timeout", Value: encodeTimeout(timeout)}) } for k, v := range authData { + // Capital header names are illegal in HTTP/2. + k = strings.ToLower(k) t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: v}) } var ( @@ -339,6 +372,10 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea if md, ok := metadata.FromContext(ctx); ok { hasMD = true for k, v := range md { + // HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set. + if isReservedHeader(k) { + continue + } for _, entry := range v { t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: entry}) } @@ -376,7 +413,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea } if err != nil { t.notifyError(err) - return nil, ConnectionErrorf("transport: %v", err) + return nil, ConnectionErrorf(true, err, "transport: %v", err) } } t.writableChan <- 0 @@ -388,22 +425,29 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea func (t *http2Client) CloseStream(s *Stream, err error) { var updateStreams bool t.mu.Lock() + if t.activeStreams == nil { + t.mu.Unlock() + return + } if t.streamsQuota != nil { updateStreams = true } delete(t.activeStreams, s.id) + if t.state == draining && len(t.activeStreams) == 0 { + // The transport is draining and s is the last live stream on t. + t.mu.Unlock() + t.Close() + return + } t.mu.Unlock() if updateStreams { t.streamsQuota.add(1) } - // In case stream sending and receiving are invoked in separate - // goroutines (e.g., bi-directional streaming), the caller needs - // to call cancel on the stream to interrupt the blocking on - // other goroutines. - s.cancel() s.mu.Lock() - if q := s.fc.restoreConn(); q > 0 { - t.controlBuf.put(&windowUpdate{0, q}) + if q := s.fc.resetPendingData(); q > 0 { + if n := t.fc.onRead(q); n > 0 { + t.controlBuf.put(&windowUpdate{0, n}) + } } if s.state == streamDone { s.mu.Unlock() @@ -427,7 +471,10 @@ func (t *http2Client) Close() (err error) { t.mu.Lock() if t.state == closing { t.mu.Unlock() - return errors.New("transport: Close() was already called") + return + } + if t.state == reachable || t.state == draining { + close(t.errorChan) } t.state = closing t.mu.Unlock() @@ -450,6 +497,50 @@ func (t *http2Client) Close() (err error) { return } +func (t *http2Client) GracefulClose() error { + t.mu.Lock() + switch t.state { + case unreachable: + // The server may close the connection concurrently. t is not available for + // any streams. Close it now. + t.mu.Unlock() + t.Close() + return nil + case closing: + t.mu.Unlock() + return nil + } + // Notify the streams which were initiated after the server sent GOAWAY. + select { + case <-t.goAway: + n := t.prevGoAwayID + if n == 0 && t.nextID > 1 { + n = t.nextID - 2 + } + m := t.goAwayID + 2 + if m == 2 { + m = 1 + } + for i := m; i <= n; i += 2 { + if s, ok := t.activeStreams[i]; ok { + close(s.goAway) + } + } + default: + } + if t.state == draining { + t.mu.Unlock() + return nil + } + t.state = draining + active := len(t.activeStreams) + t.mu.Unlock() + if active == 0 { + return t.Close() + } + return nil +} + // Write formats the data into HTTP2 data frame(s) and sends it out. The caller // should proceed only if Write returns nil. // TODO(zhaoq): opts.Delay is ignored in this implementation. Support it later @@ -462,15 +553,15 @@ func (t *http2Client) Write(s *Stream, data []byte, opts *Options) error { size := http2MaxFrameLen s.sendQuotaPool.add(0) // Wait until the stream has some quota to send the data. - sq, err := wait(s.ctx, t.shutdownChan, s.sendQuotaPool.acquire()) + sq, err := wait(s.ctx, s.done, s.goAway, t.shutdownChan, s.sendQuotaPool.acquire()) if err != nil { return err } t.sendQuotaPool.add(0) // Wait until the transport has some quota to send the data. - tq, err := wait(s.ctx, t.shutdownChan, t.sendQuotaPool.acquire()) + tq, err := wait(s.ctx, s.done, s.goAway, t.shutdownChan, t.sendQuotaPool.acquire()) if err != nil { - if _, ok := err.(StreamError); ok { + if _, ok := err.(StreamError); ok || err == io.EOF { t.sendQuotaPool.cancel() } return err @@ -502,7 +593,11 @@ func (t *http2Client) Write(s *Stream, data []byte, opts *Options) error { // Indicate there is a writer who is about to write a data frame. t.framer.adjustNumWriters(1) // Got some quota. Try to acquire writing privilege on the transport. - if _, err := wait(s.ctx, t.shutdownChan, t.writableChan); err != nil { + if _, err := wait(s.ctx, s.done, s.goAway, t.shutdownChan, t.writableChan); err != nil { + if _, ok := err.(StreamError); ok || err == io.EOF { + // Return the connection quota back. + t.sendQuotaPool.add(len(p)) + } if t.framer.adjustNumWriters(-1) == 0 { // This writer is the last one in this batch and has the // responsibility to flush the buffered frames. It queues @@ -512,6 +607,16 @@ func (t *http2Client) Write(s *Stream, data []byte, opts *Options) error { } return err } + select { + case <-s.ctx.Done(): + t.sendQuotaPool.add(len(p)) + if t.framer.adjustNumWriters(-1) == 0 { + t.controlBuf.put(&flushIO{}) + } + t.writableChan <- 0 + return ContextErr(s.ctx.Err()) + default: + } if r.Len() == 0 && t.framer.adjustNumWriters(0) == 1 { // Do a force flush iff this is last frame for the entire gRPC message // and the caller is the only writer at this moment. @@ -522,7 +627,7 @@ func (t *http2Client) Write(s *Stream, data []byte, opts *Options) error { // invoked. if err := t.framer.writeData(forceFlush, s.id, endStream, p); err != nil { t.notifyError(err) - return ConnectionErrorf("transport: %v", err) + return ConnectionErrorf(true, err, "transport: %v", err) } if t.framer.adjustNumWriters(-1) == 0 { t.framer.flushWrite() @@ -537,11 +642,7 @@ func (t *http2Client) Write(s *Stream, data []byte, opts *Options) error { } s.mu.Lock() if s.state != streamDone { - if s.state == streamReadDone { - s.state = streamDone - } else { - s.state = streamWriteDone - } + s.state = streamWriteDone } s.mu.Unlock() return nil @@ -550,55 +651,62 @@ func (t *http2Client) Write(s *Stream, data []byte, opts *Options) error { func (t *http2Client) getStream(f http2.Frame) (*Stream, bool) { t.mu.Lock() defer t.mu.Unlock() - if t.activeStreams == nil { - // The transport is closing. - return nil, false - } - if s, ok := t.activeStreams[f.Header().StreamID]; ok { - return s, true - } - return nil, false + s, ok := t.activeStreams[f.Header().StreamID] + return s, ok } // updateWindow adjusts the inbound quota for the stream and the transport. // Window updates will deliver to the controller for sending when // the cumulative quota exceeds the corresponding threshold. func (t *http2Client) updateWindow(s *Stream, n uint32) { - swu, cwu := s.fc.onRead(n) - if swu > 0 { - t.controlBuf.put(&windowUpdate{s.id, swu}) + s.mu.Lock() + defer s.mu.Unlock() + if s.state == streamDone { + return + } + if w := t.fc.onRead(n); w > 0 { + t.controlBuf.put(&windowUpdate{0, w}) } - if cwu > 0 { - t.controlBuf.put(&windowUpdate{0, cwu}) + if w := s.fc.onRead(n); w > 0 { + t.controlBuf.put(&windowUpdate{s.id, w}) } } func (t *http2Client) handleData(f *http2.DataFrame) { + size := len(f.Data()) + if err := t.fc.onData(uint32(size)); err != nil { + t.notifyError(ConnectionErrorf(true, err, "%v", err)) + return + } // Select the right stream to dispatch. s, ok := t.getStream(f) if !ok { + if w := t.fc.onRead(uint32(size)); w > 0 { + t.controlBuf.put(&windowUpdate{0, w}) + } return } - size := len(f.Data()) if size > 0 { - if err := s.fc.onData(uint32(size)); err != nil { - if _, ok := err.(ConnectionError); ok { - t.notifyError(err) - return - } - s.mu.Lock() - if s.state == streamDone { - s.mu.Unlock() - return + s.mu.Lock() + if s.state == streamDone { + s.mu.Unlock() + // The stream has been closed. Release the corresponding quota. + if w := t.fc.onRead(uint32(size)); w > 0 { + t.controlBuf.put(&windowUpdate{0, w}) } + return + } + if err := s.fc.onData(uint32(size)); err != nil { s.state = streamDone s.statusCode = codes.Internal s.statusDesc = err.Error() + close(s.done) s.mu.Unlock() s.write(recvMsg{err: io.EOF}) t.controlBuf.put(&resetStream{s.id, http2.ErrCodeFlowControl}) return } + s.mu.Unlock() // TODO(bradfitz, zhaoq): A copy is required here because there is no // guarantee f.Data() is consumed before the arrival of next frame. // Can this copy be eliminated? @@ -610,13 +718,14 @@ func (t *http2Client) handleData(f *http2.DataFrame) { // the read direction is closed, and set the status appropriately. if f.FrameHeader.Flags.Has(http2.FlagDataEndStream) { s.mu.Lock() - if s.state == streamWriteDone { - s.state = streamDone - } else { - s.state = streamReadDone + if s.state == streamDone { + s.mu.Unlock() + return } + s.state = streamDone s.statusCode = codes.Internal s.statusDesc = "server closed the stream without sending trailers" + close(s.done) s.mu.Unlock() s.write(recvMsg{err: io.EOF}) } @@ -637,10 +746,13 @@ func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) { close(s.headerChan) s.headerDone = true } - s.statusCode, ok = http2RSTErrConvTab[http2.ErrCode(f.ErrCode)] + s.statusCode, ok = http2ErrConvTab[http2.ErrCode(f.ErrCode)] if !ok { grpclog.Println("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error ", f.ErrCode) + s.statusCode = codes.Unknown } + s.statusDesc = fmt.Sprintf("stream terminated by RST_STREAM with error code: %d", f.ErrCode) + close(s.done) s.mu.Unlock() s.write(recvMsg{err: io.EOF}) } @@ -665,7 +777,32 @@ func (t *http2Client) handlePing(f *http2.PingFrame) { } func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { - // TODO(zhaoq): GoAwayFrame handler to be implemented + t.mu.Lock() + if t.state == reachable || t.state == draining { + if f.LastStreamID > 0 && f.LastStreamID%2 != 1 { + t.mu.Unlock() + t.notifyError(ConnectionErrorf(true, nil, "received illegal http2 GOAWAY frame: stream ID %d is even", f.LastStreamID)) + return + } + select { + case <-t.goAway: + id := t.goAwayID + // t.goAway has been closed (i.e.,multiple GoAways). + if id < f.LastStreamID { + t.mu.Unlock() + t.notifyError(ConnectionErrorf(true, nil, "received illegal http2 GOAWAY frame: previously recv GOAWAY frame with LastStramID %d, currently recv %d", id, f.LastStreamID)) + return + } + t.prevGoAwayID = id + t.goAwayID = f.LastStreamID + t.mu.Unlock() + return + default: + } + t.goAwayID = f.LastStreamID + close(t.goAway) + } + t.mu.Unlock() } func (t *http2Client) handleWindowUpdate(f *http2.WindowUpdateFrame) { @@ -680,54 +817,59 @@ func (t *http2Client) handleWindowUpdate(f *http2.WindowUpdateFrame) { } } -// operateHeader takes action on the decoded headers. It returns the current -// stream if there are remaining headers on the wire (in the following -// Continuation frame). -func (t *http2Client) operateHeaders(hDec *hpackDecoder, s *Stream, frame headerFrame, endStream bool) (pendingStream *Stream) { - defer func() { - if pendingStream == nil { - hDec.state = decodeState{} - } - }() - endHeaders, err := hDec.decodeClientHTTP2Headers(frame) - if s == nil { - // s has been closed. - return nil +// operateHeaders takes action on the decoded headers. +func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { + s, ok := t.getStream(frame) + if !ok { + return } - if err != nil { - s.write(recvMsg{err: err}) - // Something wrong. Stops reading even when there is remaining. - return nil + var state decodeState + for _, hf := range frame.Fields { + state.processHeaderField(hf) } - if !endHeaders { - return s + if state.err != nil { + s.write(recvMsg{err: state.err}) + // Something wrong. Stops reading even when there is remaining. + return } + + endStream := frame.StreamEnded() + s.mu.Lock() if !endStream { - s.recvCompress = hDec.state.encoding + s.recvCompress = state.encoding } if !s.headerDone { - if !endStream && len(hDec.state.mdata) > 0 { - s.header = hDec.state.mdata + if !endStream && len(state.mdata) > 0 { + s.header = state.mdata } close(s.headerChan) s.headerDone = true } if !endStream || s.state == streamDone { s.mu.Unlock() - return nil + return } - if len(hDec.state.mdata) > 0 { - s.trailer = hDec.state.mdata + if len(state.mdata) > 0 { + s.trailer = state.mdata } + s.statusCode = state.statusCode + s.statusDesc = state.statusDesc + close(s.done) s.state = streamDone - s.statusCode = hDec.state.statusCode - s.statusDesc = hDec.state.statusDesc s.mu.Unlock() - s.write(recvMsg{err: io.EOF}) - return nil +} + +func handleMalformedHTTP2(s *Stream, err error) { + s.mu.Lock() + if !s.headerDone { + close(s.headerChan) + s.headerDone = true + } + s.mu.Unlock() + s.write(recvMsg{err: err}) } // reader runs as a separate goroutine in charge of reading data from network @@ -750,25 +892,31 @@ func (t *http2Client) reader() { } t.handleSettings(sf) - hDec := newHPACKDecoder() - var curStream *Stream // loop to keep reading incoming messages on this transport. for { frame, err := t.framer.readFrame() if err != nil { - t.notifyError(err) - return + // Abort an active stream if the http2.Framer returns a + // http2.StreamError. This can happen only if the server's response + // is malformed http2. + if se, ok := err.(http2.StreamError); ok { + t.mu.Lock() + s := t.activeStreams[se.StreamID] + t.mu.Unlock() + if s != nil { + // use error detail to provide better err message + handleMalformedHTTP2(s, StreamErrorf(http2ErrConvTab[se.Code], "%v", t.framer.errorDetail())) + } + continue + } else { + // Transport error. + t.notifyError(err) + return + } } switch frame := frame.(type) { - case *http2.HeadersFrame: - // operateHeaders has to be invoked regardless the value of curStream - // because the HPACK decoder needs to be updated using the received - // headers. - curStream, _ = t.getStream(frame) - endStream := frame.Header().Flags.Has(http2.FlagHeadersEndStream) - curStream = t.operateHeaders(hDec, curStream, frame, endStream) - case *http2.ContinuationFrame: - curStream = t.operateHeaders(hDec, curStream, frame, frame.HeadersEnded()) + case *http2.MetaHeadersFrame: + t.operateHeaders(frame) case *http2.DataFrame: t.handleData(frame) case *http2.RSTStreamFrame: @@ -863,13 +1011,22 @@ func (t *http2Client) Error() <-chan struct{} { return t.errorChan } +func (t *http2Client) GoAway() <-chan struct{} { + return t.goAway +} + func (t *http2Client) notifyError(err error) { t.mu.Lock() - defer t.mu.Unlock() // make sure t.errorChan is closed only once. + if t.state == draining { + t.mu.Unlock() + t.Close() + return + } if t.state == reachable { t.state = unreachable close(t.errorChan) grpclog.Printf("transport: http2Client.notifyError got notified that the client transport was broken %v.", err) } + t.mu.Unlock() } diff --git a/vendor/google.golang.org/grpc/transport/http2_server.go b/vendor/google.golang.org/grpc/transport/http2_server.go index b9d4959fe..16010d55f 100644 --- a/vendor/google.golang.org/grpc/transport/http2_server.go +++ b/vendor/google.golang.org/grpc/transport/http2_server.go @@ -100,18 +100,23 @@ func newHTTP2Server(conn net.Conn, maxStreams uint32, authInfo credentials.AuthI if maxStreams == 0 { maxStreams = math.MaxUint32 } else { - settings = append(settings, http2.Setting{http2.SettingMaxConcurrentStreams, maxStreams}) + settings = append(settings, http2.Setting{ + ID: http2.SettingMaxConcurrentStreams, + Val: maxStreams, + }) } if initialWindowSize != defaultWindowSize { - settings = append(settings, http2.Setting{http2.SettingInitialWindowSize, uint32(initialWindowSize)}) + settings = append(settings, http2.Setting{ + ID: http2.SettingInitialWindowSize, + Val: uint32(initialWindowSize)}) } if err := framer.writeSettings(true, settings...); err != nil { - return nil, ConnectionErrorf("transport: %v", err) + return nil, ConnectionErrorf(true, err, "transport: %v", err) } // Adjust the connection flow control window if needed. if delta := uint32(initialConnWindowSize - defaultWindowSize); delta > 0 { if err := framer.writeWindowUpdate(true, 0, delta); err != nil { - return nil, ConnectionErrorf("transport: %v", err) + return nil, ConnectionErrorf(true, err, "transport: %v", err) } } var buf bytes.Buffer @@ -136,37 +141,34 @@ func newHTTP2Server(conn net.Conn, maxStreams uint32, authInfo credentials.AuthI return t, nil } -// operateHeader takes action on the decoded headers. It returns the current -// stream if there are remaining headers on the wire (in the following -// Continuation frame). -func (t *http2Server) operateHeaders(hDec *hpackDecoder, s *Stream, frame headerFrame, endStream bool, handle func(*Stream)) (pendingStream *Stream) { - defer func() { - if pendingStream == nil { - hDec.state = decodeState{} - } - }() - endHeaders, err := hDec.decodeServerHTTP2Headers(frame) - if s == nil { - // s has been closed. - return nil +// operateHeader takes action on the decoded headers. +func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream)) (close bool) { + buf := newRecvBuffer() + s := &Stream{ + id: frame.Header().StreamID, + st: t, + buf: buf, + fc: &inFlow{limit: initialWindowSize}, } - if err != nil { - grpclog.Printf("transport: http2Server.operateHeader found %v", err) + + var state decodeState + for _, hf := range frame.Fields { + state.processHeaderField(hf) + } + if err := state.err; err != nil { if se, ok := err.(StreamError); ok { t.controlBuf.put(&resetStream{s.id, statusCodeConvTab[se.Code]}) } - return nil + return } - if endStream { + + if frame.StreamEnded() { // s is just created by the caller. No lock needed. s.state = streamReadDone } - if !endHeaders { - return s - } - s.recvCompress = hDec.state.encoding - if hDec.state.timeoutSet { - s.ctx, s.cancel = context.WithTimeout(context.TODO(), hDec.state.timeout) + s.recvCompress = state.encoding + if state.timeoutSet { + s.ctx, s.cancel = context.WithTimeout(context.TODO(), state.timeout) } else { s.ctx, s.cancel = context.WithCancel(context.TODO()) } @@ -183,26 +185,33 @@ func (t *http2Server) operateHeaders(hDec *hpackDecoder, s *Stream, frame header // back to the client (unary call only). s.ctx = newContextWithStream(s.ctx, s) // Attach the received metadata to the context. - if len(hDec.state.mdata) > 0 { - s.ctx = metadata.NewContext(s.ctx, hDec.state.mdata) + if len(state.mdata) > 0 { + s.ctx = metadata.NewContext(s.ctx, state.mdata) } s.dec = &recvBufferReader{ ctx: s.ctx, recv: s.buf, } - s.recvCompress = hDec.state.encoding - s.method = hDec.state.method + s.recvCompress = state.encoding + s.method = state.method t.mu.Lock() if t.state != reachable { t.mu.Unlock() - return nil + return } if uint32(len(t.activeStreams)) >= t.maxStreams { t.mu.Unlock() t.controlBuf.put(&resetStream{s.id, http2.ErrCodeRefusedStream}) - return nil + return + } + if s.id%2 != 1 || s.id <= t.maxStreamID { + t.mu.Unlock() + // illegal gRPC stream id. + grpclog.Println("transport: http2Server.HandleStreams received an illegal stream id: ", s.id) + return true } + t.maxStreamID = s.id s.sendQuotaPool = newQuotaPool(int(t.streamSendQuota)) t.activeStreams[s.id] = s t.mu.Unlock() @@ -210,7 +219,7 @@ func (t *http2Server) operateHeaders(hDec *hpackDecoder, s *Stream, frame header t.updateWindow(s, uint32(n)) } handle(s) - return nil + return } // HandleStreams receives incoming streams using the given handler. This is @@ -230,6 +239,10 @@ func (t *http2Server) HandleStreams(handle func(*Stream)) { } frame, err := t.framer.readFrame() + if err == io.EOF || err == io.ErrUnexpectedEOF { + t.Close() + return + } if err != nil { grpclog.Printf("transport: http2Server.HandleStreams failed to read frame: %v", err) t.Close() @@ -243,39 +256,33 @@ func (t *http2Server) HandleStreams(handle func(*Stream)) { } t.handleSettings(sf) - hDec := newHPACKDecoder() - var curStream *Stream for { frame, err := t.framer.readFrame() if err != nil { + if se, ok := err.(http2.StreamError); ok { + t.mu.Lock() + s := t.activeStreams[se.StreamID] + t.mu.Unlock() + if s != nil { + t.closeStream(s) + } + t.controlBuf.put(&resetStream{se.StreamID, se.Code}) + continue + } + if err == io.EOF || err == io.ErrUnexpectedEOF { + t.Close() + return + } + grpclog.Printf("transport: http2Server.HandleStreams failed to read frame: %v", err) t.Close() return } switch frame := frame.(type) { - case *http2.HeadersFrame: - id := frame.Header().StreamID - if id%2 != 1 || id <= t.maxStreamID { - // illegal gRPC stream id. - grpclog.Println("transport: http2Server.HandleStreams received an illegal stream id: ", id) + case *http2.MetaHeadersFrame: + if t.operateHeaders(frame, handle) { t.Close() break } - t.maxStreamID = id - buf := newRecvBuffer() - fc := &inFlow{ - limit: initialWindowSize, - conn: t.fc, - } - curStream = &Stream{ - id: frame.Header().StreamID, - st: t, - buf: buf, - fc: fc, - } - endStream := frame.Header().Flags.Has(http2.FlagHeadersEndStream) - curStream = t.operateHeaders(hDec, curStream, frame, endStream, handle) - case *http2.ContinuationFrame: - curStream = t.operateHeaders(hDec, curStream, frame, frame.HeadersEnded(), handle) case *http2.DataFrame: t.handleData(frame) case *http2.RSTStreamFrame: @@ -287,7 +294,7 @@ func (t *http2Server) HandleStreams(handle func(*Stream)) { case *http2.WindowUpdateFrame: t.handleWindowUpdate(frame) case *http2.GoAwayFrame: - break + // TODO: Handle GoAway from the client appropriately. default: grpclog.Printf("transport: http2Server.HandleStreams found unhandled frame type %v.", frame) } @@ -313,33 +320,51 @@ func (t *http2Server) getStream(f http2.Frame) (*Stream, bool) { // Window updates will deliver to the controller for sending when // the cumulative quota exceeds the corresponding threshold. func (t *http2Server) updateWindow(s *Stream, n uint32) { - swu, cwu := s.fc.onRead(n) - if swu > 0 { - t.controlBuf.put(&windowUpdate{s.id, swu}) + s.mu.Lock() + defer s.mu.Unlock() + if s.state == streamDone { + return + } + if w := t.fc.onRead(n); w > 0 { + t.controlBuf.put(&windowUpdate{0, w}) } - if cwu > 0 { - t.controlBuf.put(&windowUpdate{0, cwu}) + if w := s.fc.onRead(n); w > 0 { + t.controlBuf.put(&windowUpdate{s.id, w}) } } func (t *http2Server) handleData(f *http2.DataFrame) { + size := len(f.Data()) + if err := t.fc.onData(uint32(size)); err != nil { + grpclog.Printf("transport: http2Server %v", err) + t.Close() + return + } // Select the right stream to dispatch. s, ok := t.getStream(f) if !ok { + if w := t.fc.onRead(uint32(size)); w > 0 { + t.controlBuf.put(&windowUpdate{0, w}) + } return } - size := len(f.Data()) if size > 0 { - if err := s.fc.onData(uint32(size)); err != nil { - if _, ok := err.(ConnectionError); ok { - grpclog.Printf("transport: http2Server %v", err) - t.Close() - return + s.mu.Lock() + if s.state == streamDone { + s.mu.Unlock() + // The stream has been closed. Release the corresponding quota. + if w := t.fc.onRead(uint32(size)); w > 0 { + t.controlBuf.put(&windowUpdate{0, w}) } + return + } + if err := s.fc.onData(uint32(size)); err != nil { + s.mu.Unlock() t.closeStream(s) t.controlBuf.put(&resetStream{s.id, http2.ErrCodeFlowControl}) return } + s.mu.Unlock() // TODO(bradfitz, zhaoq): A copy is required here because there is no // guarantee f.Data() is consumed before the arrival of next frame. // Can this copy be eliminated? @@ -351,11 +376,7 @@ func (t *http2Server) handleData(f *http2.DataFrame) { // Received the end of stream from the client. s.mu.Lock() if s.state != streamDone { - if s.state == streamWriteDone { - s.state = streamDone - } else { - s.state = streamReadDone - } + s.state = streamReadDone } s.mu.Unlock() s.write(recvMsg{err: io.EOF}) @@ -427,7 +448,7 @@ func (t *http2Server) writeHeaders(s *Stream, b *bytes.Buffer, endStream bool) e } if err != nil { t.Close() - return ConnectionErrorf("transport: %v", err) + return ConnectionErrorf(true, err, "transport: %v", err) } } return nil @@ -442,7 +463,7 @@ func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { } s.headerOk = true s.mu.Unlock() - if _, err := wait(s.ctx, t.shutdownChan, t.writableChan); err != nil { + if _, err := wait(s.ctx, nil, nil, t.shutdownChan, t.writableChan); err != nil { return err } t.hBuf.Reset() @@ -452,6 +473,10 @@ func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-encoding", Value: s.sendCompress}) } for k, v := range md { + if isReservedHeader(k) { + // Clients don't tolerate reading restricted headers after some non restricted ones were sent. + continue + } for _, entry := range v { t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: entry}) } @@ -478,7 +503,7 @@ func (t *http2Server) WriteStatus(s *Stream, statusCode codes.Code, statusDesc s headersSent = true } s.mu.Unlock() - if _, err := wait(s.ctx, t.shutdownChan, t.writableChan); err != nil { + if _, err := wait(s.ctx, nil, nil, t.shutdownChan, t.writableChan); err != nil { return err } t.hBuf.Reset() @@ -491,9 +516,13 @@ func (t *http2Server) WriteStatus(s *Stream, statusCode codes.Code, statusDesc s Name: "grpc-status", Value: strconv.Itoa(int(statusCode)), }) - t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-message", Value: statusDesc}) + t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-message", Value: encodeGrpcMessage(statusDesc)}) // Attach the trailer metadata. for k, v := range s.trailer { + // Clients don't tolerate reading restricted headers after some non restricted ones were sent. + if isReservedHeader(k) { + continue + } for _, entry := range v { t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: entry}) } @@ -513,13 +542,17 @@ func (t *http2Server) Write(s *Stream, data []byte, opts *Options) error { // TODO(zhaoq): Support multi-writers for a single stream. var writeHeaderFrame bool s.mu.Lock() + if s.state == streamDone { + s.mu.Unlock() + return StreamErrorf(codes.Unknown, "the stream has been done") + } if !s.headerOk { writeHeaderFrame = true s.headerOk = true } s.mu.Unlock() if writeHeaderFrame { - if _, err := wait(s.ctx, t.shutdownChan, t.writableChan); err != nil { + if _, err := wait(s.ctx, nil, nil, t.shutdownChan, t.writableChan); err != nil { return err } t.hBuf.Reset() @@ -535,7 +568,7 @@ func (t *http2Server) Write(s *Stream, data []byte, opts *Options) error { } if err := t.framer.writeHeaders(false, p); err != nil { t.Close() - return ConnectionErrorf("transport: %v", err) + return ConnectionErrorf(true, err, "transport: %v", err) } t.writableChan <- 0 } @@ -547,13 +580,13 @@ func (t *http2Server) Write(s *Stream, data []byte, opts *Options) error { size := http2MaxFrameLen s.sendQuotaPool.add(0) // Wait until the stream has some quota to send the data. - sq, err := wait(s.ctx, t.shutdownChan, s.sendQuotaPool.acquire()) + sq, err := wait(s.ctx, nil, nil, t.shutdownChan, s.sendQuotaPool.acquire()) if err != nil { return err } t.sendQuotaPool.add(0) // Wait until the transport has some quota to send the data. - tq, err := wait(s.ctx, t.shutdownChan, t.sendQuotaPool.acquire()) + tq, err := wait(s.ctx, nil, nil, t.shutdownChan, t.sendQuotaPool.acquire()) if err != nil { if _, ok := err.(StreamError); ok { t.sendQuotaPool.cancel() @@ -579,7 +612,11 @@ func (t *http2Server) Write(s *Stream, data []byte, opts *Options) error { t.framer.adjustNumWriters(1) // Got some quota. Try to acquire writing privilege on the // transport. - if _, err := wait(s.ctx, t.shutdownChan, t.writableChan); err != nil { + if _, err := wait(s.ctx, nil, nil, t.shutdownChan, t.writableChan); err != nil { + if _, ok := err.(StreamError); ok { + // Return the connection quota back. + t.sendQuotaPool.add(ps) + } if t.framer.adjustNumWriters(-1) == 0 { // This writer is the last one in this batch and has the // responsibility to flush the buffered frames. It queues @@ -589,13 +626,23 @@ func (t *http2Server) Write(s *Stream, data []byte, opts *Options) error { } return err } + select { + case <-s.ctx.Done(): + t.sendQuotaPool.add(ps) + if t.framer.adjustNumWriters(-1) == 0 { + t.controlBuf.put(&flushIO{}) + } + t.writableChan <- 0 + return ContextErr(s.ctx.Err()) + default: + } var forceFlush bool if r.Len() == 0 && t.framer.adjustNumWriters(0) == 1 && !opts.Last { forceFlush = true } if err := t.framer.writeData(forceFlush, s.id, false, p); err != nil { t.Close() - return ConnectionErrorf("transport: %v", err) + return ConnectionErrorf(true, err, "transport: %v", err) } if t.framer.adjustNumWriters(-1) == 0 { t.framer.flushWrite() @@ -640,6 +687,17 @@ func (t *http2Server) controller() { } case *resetStream: t.framer.writeRSTStream(true, i.streamID, i.code) + case *goAway: + t.mu.Lock() + if t.state == closing { + t.mu.Unlock() + // The transport is closing. + return + } + sid := t.maxStreamID + t.state = draining + t.mu.Unlock() + t.framer.writeGoAway(true, sid, http2.ErrCodeNo, nil) case *flushIO: t.framer.flushWrite() case *ping: @@ -685,23 +743,32 @@ func (t *http2Server) Close() (err error) { func (t *http2Server) closeStream(s *Stream) { t.mu.Lock() delete(t.activeStreams, s.id) - t.mu.Unlock() - if q := s.fc.restoreConn(); q > 0 { - t.controlBuf.put(&windowUpdate{0, q}) + if t.state == draining && len(t.activeStreams) == 0 { + defer t.Close() } + t.mu.Unlock() + // In case stream sending and receiving are invoked in separate + // goroutines (e.g., bi-directional streaming), cancel needs to be + // called to interrupt the potential blocking on other goroutines. + s.cancel() s.mu.Lock() + if q := s.fc.resetPendingData(); q > 0 { + if w := t.fc.onRead(q); w > 0 { + t.controlBuf.put(&windowUpdate{0, w}) + } + } if s.state == streamDone { s.mu.Unlock() return } s.state = streamDone s.mu.Unlock() - // In case stream sending and receiving are invoked in separate - // goroutines (e.g., bi-directional streaming), cancel needs to be - // called to interrupt the potential blocking on other goroutines. - s.cancel() } func (t *http2Server) RemoteAddr() net.Addr { return t.conn.RemoteAddr() } + +func (t *http2Server) Drain() { + t.controlBuf.put(&goAway{}) +} diff --git a/vendor/google.golang.org/grpc/transport/http_util.go b/vendor/google.golang.org/grpc/transport/http_util.go index 251731184..79da51264 100644 --- a/vendor/google.golang.org/grpc/transport/http_util.go +++ b/vendor/google.golang.org/grpc/transport/http_util.go @@ -35,6 +35,7 @@ package transport import ( "bufio" + "bytes" "fmt" "io" "net" @@ -52,7 +53,7 @@ import ( const ( // The primary user agent - primaryUA = "grpc-go/0.11" + primaryUA = "grpc-go/1.0" // http2MaxFrameLen specifies the max length of a HTTP2 frame. http2MaxFrameLen = 16384 // 16KB frame // http://http2.github.io/http2-spec/#SettingValues @@ -62,13 +63,14 @@ const ( ) var ( - clientPreface = []byte(http2.ClientPreface) - http2RSTErrConvTab = map[http2.ErrCode]codes.Code{ + clientPreface = []byte(http2.ClientPreface) + http2ErrConvTab = map[http2.ErrCode]codes.Code{ http2.ErrCodeNo: codes.Internal, http2.ErrCodeProtocol: codes.Internal, http2.ErrCodeInternal: codes.Internal, http2.ErrCodeFlowControl: codes.ResourceExhausted, http2.ErrCodeSettingsTimeout: codes.Internal, + http2.ErrCodeStreamClosed: codes.Internal, http2.ErrCodeFrameSize: codes.Internal, http2.ErrCodeRefusedStream: codes.Unavailable, http2.ErrCodeCancel: codes.Canceled, @@ -76,6 +78,7 @@ var ( http2.ErrCodeConnect: codes.Internal, http2.ErrCodeEnhanceYourCalm: codes.ResourceExhausted, http2.ErrCodeInadequateSecurity: codes.PermissionDenied, + http2.ErrCodeHTTP11Required: codes.FailedPrecondition, } statusCodeConvTab = map[codes.Code]http2.ErrCode{ codes.Internal: http2.ErrCodeInternal, @@ -89,6 +92,8 @@ var ( // Records the states during HPACK decoding. Must be reset once the // decoding of the entire headers are finished. type decodeState struct { + err error // first error encountered decoding + encoding string // statusCode caches the stream status received from the trailer // the server sent. Client side only. @@ -102,20 +107,6 @@ type decodeState struct { mdata map[string][]string } -// An hpackDecoder decodes HTTP2 headers which may span multiple frames. -type hpackDecoder struct { - h *hpack.Decoder - state decodeState - err error // The err when decoding -} - -// A headerFrame is either a http2.HeaderFrame or http2.ContinuationFrame. -type headerFrame interface { - Header() http2.FrameHeader - HeaderBlockFragment() []byte - HeadersEnded() bool -} - // isReservedHeader checks whether hdr belongs to HTTP2 headers // reserved by gRPC protocol. Any other headers are classified as the // user-specified metadata. @@ -137,100 +128,86 @@ func isReservedHeader(hdr string) bool { } } -func newHPACKDecoder() *hpackDecoder { - d := &hpackDecoder{} - d.h = hpack.NewDecoder(http2InitHeaderTableSize, func(f hpack.HeaderField) { - switch f.Name { - case "content-type": - if !strings.Contains(f.Value, "application/grpc") { - d.err = StreamErrorf(codes.FailedPrecondition, "transport: received the unexpected content-type %q", f.Value) - return - } - case "grpc-encoding": - d.state.encoding = f.Value - case "grpc-status": - code, err := strconv.Atoi(f.Value) - if err != nil { - d.err = StreamErrorf(codes.Internal, "transport: malformed grpc-status: %v", err) - return - } - d.state.statusCode = codes.Code(code) - case "grpc-message": - d.state.statusDesc = f.Value - case "grpc-timeout": - d.state.timeoutSet = true - var err error - d.state.timeout, err = timeoutDecode(f.Value) - if err != nil { - d.err = StreamErrorf(codes.Internal, "transport: malformed time-out: %v", err) - return - } - case ":path": - d.state.method = f.Value - default: - if !isReservedHeader(f.Name) { - if f.Name == "user-agent" { - i := strings.LastIndex(f.Value, " ") - if i == -1 { - // There is no application user agent string being set. - return - } - // Extract the application user agent string. - f.Value = f.Value[:i] - } - if d.state.mdata == nil { - d.state.mdata = make(map[string][]string) - } - k, v, err := metadata.DecodeKeyValue(f.Name, f.Value) - if err != nil { - grpclog.Printf("Failed to decode (%q, %q): %v", f.Name, f.Value, err) - return - } - d.state.mdata[k] = append(d.state.mdata[k], v) - } - } - }) - return d +// isWhitelistedPseudoHeader checks whether hdr belongs to HTTP2 pseudoheaders +// that should be propagated into metadata visible to users. +func isWhitelistedPseudoHeader(hdr string) bool { + switch hdr { + case ":authority": + return true + default: + return false + } } -func (d *hpackDecoder) decodeClientHTTP2Headers(frame headerFrame) (endHeaders bool, err error) { - d.err = nil - _, err = d.h.Write(frame.HeaderBlockFragment()) - if err != nil { - err = StreamErrorf(codes.Internal, "transport: HPACK header decode error: %v", err) +func (d *decodeState) setErr(err error) { + if d.err == nil { + d.err = err } +} - if frame.HeadersEnded() { - if closeErr := d.h.Close(); closeErr != nil && err == nil { - err = StreamErrorf(codes.Internal, "transport: HPACK decoder close error: %v", closeErr) - } - endHeaders = true +func validContentType(t string) bool { + e := "application/grpc" + if !strings.HasPrefix(t, e) { + return false } - - if err == nil && d.err != nil { - err = d.err + // Support variations on the content-type + // (e.g. "application/grpc+blah", "application/grpc;blah"). + if len(t) > len(e) && t[len(e)] != '+' && t[len(e)] != ';' { + return false } - return + return true } -func (d *hpackDecoder) decodeServerHTTP2Headers(frame headerFrame) (endHeaders bool, err error) { - d.err = nil - _, err = d.h.Write(frame.HeaderBlockFragment()) - if err != nil { - err = StreamErrorf(codes.Internal, "transport: HPACK header decode error: %v", err) - } - - if frame.HeadersEnded() { - if closeErr := d.h.Close(); closeErr != nil && err == nil { - err = StreamErrorf(codes.Internal, "transport: HPACK decoder close error: %v", closeErr) +func (d *decodeState) processHeaderField(f hpack.HeaderField) { + switch f.Name { + case "content-type": + if !validContentType(f.Value) { + d.setErr(StreamErrorf(codes.FailedPrecondition, "transport: received the unexpected content-type %q", f.Value)) + return + } + case "grpc-encoding": + d.encoding = f.Value + case "grpc-status": + code, err := strconv.Atoi(f.Value) + if err != nil { + d.setErr(StreamErrorf(codes.Internal, "transport: malformed grpc-status: %v", err)) + return + } + d.statusCode = codes.Code(code) + case "grpc-message": + d.statusDesc = decodeGrpcMessage(f.Value) + case "grpc-timeout": + d.timeoutSet = true + var err error + d.timeout, err = decodeTimeout(f.Value) + if err != nil { + d.setErr(StreamErrorf(codes.Internal, "transport: malformed time-out: %v", err)) + return + } + case ":path": + d.method = f.Value + default: + if !isReservedHeader(f.Name) || isWhitelistedPseudoHeader(f.Name) { + if f.Name == "user-agent" { + i := strings.LastIndex(f.Value, " ") + if i == -1 { + // There is no application user agent string being set. + return + } + // Extract the application user agent string. + f.Value = f.Value[:i] + } + if d.mdata == nil { + d.mdata = make(map[string][]string) + } + k, v, err := metadata.DecodeKeyValue(f.Name, f.Value) + if err != nil { + grpclog.Printf("Failed to decode (%q, %q): %v", f.Name, f.Value, err) + return + } + d.mdata[k] = append(d.mdata[k], v) } - endHeaders = true - } - - if err == nil && d.err != nil { - err = d.err } - return } type timeoutUnit uint8 @@ -275,7 +252,7 @@ func div(d, r time.Duration) int64 { } // TODO(zhaoq): It is the simplistic and not bandwidth efficient. Improve it. -func timeoutEncode(t time.Duration) string { +func encodeTimeout(t time.Duration) string { if d := div(t, time.Nanosecond); d <= maxTimeoutValue { return strconv.FormatInt(d, 10) + "n" } @@ -295,7 +272,7 @@ func timeoutEncode(t time.Duration) string { return strconv.FormatInt(div(t, time.Hour), 10) + "H" } -func timeoutDecode(s string) (time.Duration, error) { +func decodeTimeout(s string) (time.Duration, error) { size := len(s) if size < 2 { return 0, fmt.Errorf("transport: timeout string is too short: %q", s) @@ -312,6 +289,80 @@ func timeoutDecode(s string) (time.Duration, error) { return d * time.Duration(t), nil } +const ( + spaceByte = ' ' + tildaByte = '~' + percentByte = '%' +) + +// encodeGrpcMessage is used to encode status code in header field +// "grpc-message". +// It checks to see if each individual byte in msg is an +// allowable byte, and then either percent encoding or passing it through. +// When percent encoding, the byte is converted into hexadecimal notation +// with a '%' prepended. +func encodeGrpcMessage(msg string) string { + if msg == "" { + return "" + } + lenMsg := len(msg) + for i := 0; i < lenMsg; i++ { + c := msg[i] + if !(c >= spaceByte && c < tildaByte && c != percentByte) { + return encodeGrpcMessageUnchecked(msg) + } + } + return msg +} + +func encodeGrpcMessageUnchecked(msg string) string { + var buf bytes.Buffer + lenMsg := len(msg) + for i := 0; i < lenMsg; i++ { + c := msg[i] + if c >= spaceByte && c < tildaByte && c != percentByte { + buf.WriteByte(c) + } else { + buf.WriteString(fmt.Sprintf("%%%02X", c)) + } + } + return buf.String() +} + +// decodeGrpcMessage decodes the msg encoded by encodeGrpcMessage. +func decodeGrpcMessage(msg string) string { + if msg == "" { + return "" + } + lenMsg := len(msg) + for i := 0; i < lenMsg; i++ { + if msg[i] == percentByte && i+2 < lenMsg { + return decodeGrpcMessageUnchecked(msg) + } + } + return msg +} + +func decodeGrpcMessageUnchecked(msg string) string { + var buf bytes.Buffer + lenMsg := len(msg) + for i := 0; i < lenMsg; i++ { + c := msg[i] + if c == percentByte && i+2 < lenMsg { + parsed, err := strconv.ParseInt(msg[i+1:i+3], 16, 8) + if err != nil { + buf.WriteByte(c) + } else { + buf.WriteByte(byte(parsed)) + i += 2 + } + } else { + buf.WriteByte(c) + } + } + return buf.String() +} + type framer struct { numWriters int32 reader io.Reader @@ -321,10 +372,11 @@ type framer struct { func newFramer(conn net.Conn) *framer { f := &framer{ - reader: conn, + reader: bufio.NewReaderSize(conn, http2IOBufSize), writer: bufio.NewWriterSize(conn, http2IOBufSize), } f.fr = http2.NewFramer(f.writer, f.reader) + f.fr.ReadMetaHeaders = hpack.NewDecoder(http2InitHeaderTableSize, nil) return f } @@ -452,3 +504,7 @@ func (f *framer) flushWrite() error { func (f *framer) readFrame() (http2.Frame, error) { return f.fr.ReadFrame() } + +func (f *framer) errorDetail() error { + return f.fr.ErrorDetail() +} diff --git a/vendor/google.golang.org/grpc/transport/pre_go16.go b/vendor/google.golang.org/grpc/transport/pre_go16.go new file mode 100644 index 000000000..33d91c17c --- /dev/null +++ b/vendor/google.golang.org/grpc/transport/pre_go16.go @@ -0,0 +1,51 @@ +// +build !go1.6 + +/* + * Copyright 2016, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +package transport + +import ( + "net" + "time" + + "golang.org/x/net/context" +) + +// dialContext connects to the address on the named network. +func dialContext(ctx context.Context, network, address string) (net.Conn, error) { + var dialer net.Dialer + if deadline, ok := ctx.Deadline(); ok { + dialer.Timeout = deadline.Sub(time.Now()) + } + return dialer.Dial(network, address) +} diff --git a/vendor/google.golang.org/grpc/transport/transport.go b/vendor/google.golang.org/grpc/transport/transport.go index 3b934b4dc..d59e51137 100644 --- a/vendor/google.golang.org/grpc/transport/transport.go +++ b/vendor/google.golang.org/grpc/transport/transport.go @@ -44,7 +44,6 @@ import ( "io" "net" "sync" - "time" "golang.org/x/net/context" "golang.org/x/net/trace" @@ -63,13 +62,11 @@ type recvMsg struct { err error } -func (recvMsg) isItem() bool { - return true -} +func (*recvMsg) item() {} // All items in an out of a recvBuffer should be the same type. type item interface { - isItem() bool + item() } // recvBuffer is an unbounded channel of item. @@ -89,12 +86,14 @@ func newRecvBuffer() *recvBuffer { func (b *recvBuffer) put(r item) { b.mu.Lock() defer b.mu.Unlock() - b.backlog = append(b.backlog, r) - select { - case b.c <- b.backlog[0]: - b.backlog = b.backlog[1:] - default: + if len(b.backlog) == 0 { + select { + case b.c <- r: + return + default: + } } + b.backlog = append(b.backlog, r) } func (b *recvBuffer) load() { @@ -120,10 +119,11 @@ func (b *recvBuffer) get() <-chan item { // recvBufferReader implements io.Reader interface to read the data from // recvBuffer. type recvBufferReader struct { - ctx context.Context - recv *recvBuffer - last *bytes.Reader // Stores the remaining data in the previous calls. - err error + ctx context.Context + goAway chan struct{} + recv *recvBuffer + last *bytes.Reader // Stores the remaining data in the previous calls. + err error } // Read reads the next len(p) bytes from last. If last is drained, it tries to @@ -141,6 +141,8 @@ func (r *recvBufferReader) Read(p []byte) (n int, err error) { select { case <-r.ctx.Done(): return 0, ContextErr(r.ctx.Err()) + case <-r.goAway: + return 0, ErrStreamDrain case i := <-r.recv.get(): r.recv.load() m := i.(*recvMsg) @@ -158,7 +160,7 @@ const ( streamActive streamState = iota streamWriteDone // EndStream sent streamReadDone // EndStream received - streamDone // sendDone and recvDone or RSTStreamFrame is sent or received. + streamDone // the entire stream is finished. ) // Stream represents an RPC in the transport layer. @@ -169,6 +171,10 @@ type Stream struct { // ctx is the associated context of the stream. ctx context.Context cancel context.CancelFunc + // done is closed when the final status arrives. + done chan struct{} + // goAway is closed when the server sent GoAways signal before this stream was initiated. + goAway chan struct{} // method records the associated RPC method of the stream. method string recvCompress string @@ -214,6 +220,18 @@ func (s *Stream) SetSendCompress(str string) { s.sendCompress = str } +// Done returns a chanel which is closed when it receives the final status +// from the server. +func (s *Stream) Done() <-chan struct{} { + return s.done +} + +// GoAway returns a channel which is closed when the server sent GoAways signal +// before this stream was initiated. +func (s *Stream) GoAway() <-chan struct{} { + return s.goAway +} + // Header acquires the key-value pairs of header metadata once it // is available. It blocks until i) the metadata is ready or ii) there is no // header metadata or iii) the stream is cancelled/expired. @@ -221,6 +239,8 @@ func (s *Stream) Header() (metadata.MD, error) { select { case <-s.ctx.Done(): return nil, ContextErr(s.ctx.Err()) + case <-s.goAway: + return nil, ErrStreamDrain case <-s.headerChan: return s.header.Copy(), nil } @@ -299,20 +319,18 @@ func (s *Stream) Read(p []byte) (n int, err error) { return } -type key int - // The key to save transport.Stream in the context. -const streamKey = key(0) +type streamKey struct{} // newContextWithStream creates a new context from ctx and attaches stream // to it. func newContextWithStream(ctx context.Context, stream *Stream) context.Context { - return context.WithValue(ctx, streamKey, stream) + return context.WithValue(ctx, streamKey{}, stream) } // StreamFromContext returns the stream saved in ctx. func StreamFromContext(ctx context.Context) (s *Stream, ok bool) { - s, ok = ctx.Value(streamKey).(*Stream) + s, ok = ctx.Value(streamKey{}).(*Stream) return } @@ -323,6 +341,7 @@ const ( reachable transportState = iota unreachable closing + draining ) // NewServerTransport creates a ServerTransport with conn or non-nil error @@ -336,17 +355,17 @@ type ConnectOptions struct { // UserAgent is the application user agent. UserAgent string // Dialer specifies how to dial a network address. - Dialer func(string, time.Duration) (net.Conn, error) - // AuthOptions stores the credentials required to setup a client connection and/or issue RPCs. - AuthOptions []credentials.Credentials - // Timeout specifies the timeout for dialing a client connection. - Timeout time.Duration + Dialer func(context.Context, string) (net.Conn, error) + // PerRPCCredentials stores the PerRPCCredentials required to issue RPCs. + PerRPCCredentials []credentials.PerRPCCredentials + // TransportCredentials stores the Authenticator required to setup a client connection. + TransportCredentials credentials.TransportCredentials } // NewClientTransport establishes the transport with the required ConnectOptions // and returns it to the caller. -func NewClientTransport(target string, opts *ConnectOptions) (ClientTransport, error) { - return newHTTP2Client(target, opts) +func NewClientTransport(ctx context.Context, target string, opts ConnectOptions) (ClientTransport, error) { + return newHTTP2Client(ctx, target, opts) } // Options provides additional hints and information for message @@ -393,6 +412,10 @@ type ClientTransport interface { // is called only once. Close() error + // GracefulClose starts to tear down the transport. It stops accepting + // new RPCs and wait the completion of the pending RPCs. + GracefulClose() error + // Write sends the data for the given stream. A nil stream indicates // the write is to be performed on the transport as a whole. Write(s *Stream, data []byte, opts *Options) error @@ -412,6 +435,11 @@ type ClientTransport interface { // and create a new one) in error case. It should not return nil // once the transport is initiated. Error() <-chan struct{} + + // GoAway returns a channel that is closed when ClientTranspor + // receives the draining signal from the server (e.g., GOAWAY frame in + // HTTP/2). + GoAway() <-chan struct{} } // ServerTransport is the common interface for all gRPC server-side transport @@ -443,6 +471,9 @@ type ServerTransport interface { // RemoteAddr returns the remote network address. RemoteAddr() net.Addr + + // Drain notifies the client this ServerTransport stops accepting new RPCs. + Drain() } // StreamErrorf creates an StreamError with the specified error code and description. @@ -454,9 +485,11 @@ func StreamErrorf(c codes.Code, format string, a ...interface{}) StreamError { } // ConnectionErrorf creates an ConnectionError with the specified error description. -func ConnectionErrorf(format string, a ...interface{}) ConnectionError { +func ConnectionErrorf(temp bool, e error, format string, a ...interface{}) ConnectionError { return ConnectionError{ Desc: fmt.Sprintf(format, a...), + temp: temp, + err: e, } } @@ -464,14 +497,36 @@ func ConnectionErrorf(format string, a ...interface{}) ConnectionError { // entire connection and the retry of all the active streams. type ConnectionError struct { Desc string + temp bool + err error } func (e ConnectionError) Error() string { return fmt.Sprintf("connection error: desc = %q", e.Desc) } -// Define some common ConnectionErrors. -var ErrConnClosing = ConnectionError{Desc: "transport is closing"} +// Temporary indicates if this connection error is temporary or fatal. +func (e ConnectionError) Temporary() bool { + return e.temp +} + +// Origin returns the original error of this connection error. +func (e ConnectionError) Origin() error { + // Never return nil error here. + // If the original error is nil, return itself. + if e.err == nil { + return e + } + return e.err +} + +var ( + // ErrConnClosing indicates that the transport is closing. + ErrConnClosing = ConnectionError{Desc: "transport is closing", temp: true} + // ErrStreamDrain indicates that the stream is rejected by the server because + // the server stops accepting new RPCs. + ErrStreamDrain = StreamErrorf(codes.Unavailable, "the server stops accepting new RPCs") +) // StreamError is an error that only affects one stream within a connection. type StreamError struct { @@ -496,12 +551,25 @@ func ContextErr(err error) StreamError { // wait blocks until it can receive from ctx.Done, closing, or proceed. // If it receives from ctx.Done, it returns 0, the StreamError for ctx.Err. +// If it receives from done, it returns 0, io.EOF if ctx is not done; otherwise +// it return the StreamError for ctx.Err. +// If it receives from goAway, it returns 0, ErrStreamDrain. // If it receives from closing, it returns 0, ErrConnClosing. // If it receives from proceed, it returns the received integer, nil. -func wait(ctx context.Context, closing <-chan struct{}, proceed <-chan int) (int, error) { +func wait(ctx context.Context, done, goAway, closing <-chan struct{}, proceed <-chan int) (int, error) { select { case <-ctx.Done(): return 0, ContextErr(ctx.Err()) + case <-done: + // User cancellation has precedence. + select { + case <-ctx.Done(): + return 0, ContextErr(ctx.Err()) + default: + } + return 0, io.EOF + case <-goAway: + return 0, ErrStreamDrain case <-closing: return 0, ErrConnClosing case i := <-proceed: From 423e3901f0c58b2733387127b15ec93ace9272d0 Mon Sep 17 00:00:00 2001 From: Dongsu Park Date: Fri, 26 Aug 2016 15:54:56 +0200 Subject: [PATCH 2/3] protobuf: regenerate fleet.pb.go Now that gRPC as well as other dependencies were updated, we need to update fleet.pb.go too. --- protobuf/fleet.pb.go | 2247 +++++++++++++----------------------------- 1 file changed, 699 insertions(+), 1548 deletions(-) diff --git a/protobuf/fleet.pb.go b/protobuf/fleet.pb.go index 9eb28e3bf..1aaab48fc 100644 --- a/protobuf/fleet.pb.go +++ b/protobuf/fleet.pb.go @@ -1,14 +1,16 @@ // Code generated by protoc-gen-gogo. -// source: github.com/coreos/fleet/rpc/fleet.proto +// source: fleet.proto // DO NOT EDIT! /* Package rpc is a generated protocol buffer package. It is generated from these files: - github.com/coreos/fleet/rpc/fleet.proto + fleet.proto It has these top-level messages: + HealthCheckRequest + HealthCheckResponse MachineProperties UpdatedState UnitStateFilter @@ -33,23 +35,20 @@ */ package rpc -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" -import _ "github.com/gogo/protobuf/gogoproto" +import ( + "fmt" -import strconv "strconv" + proto "github.com/gogo/protobuf/proto" -import strings "strings" -import github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto" -import sort "sort" -import reflect "reflect" + math "math" -import ( context "golang.org/x/net/context" + grpc "google.golang.org/grpc" ) +import _ "github.com/gogo/protobuf/gogoproto" + import io "io" // Reference imports to suppress errors if they are not otherwise used. @@ -57,6 +56,10 @@ var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +const _ = proto.GoGoProtoPackageIsVersion1 + type TargetState int32 const ( @@ -76,54 +79,115 @@ var TargetState_value = map[string]int32{ "LAUNCHED": 2, } +func (x TargetState) String() string { + return proto.EnumName(TargetState_name, int32(x)) +} +func (TargetState) EnumDescriptor() ([]byte, []int) { return fileDescriptorFleet, []int{0} } + +type HealthCheckResponse_ServingStatus int32 + +const ( + HealthCheckResponse_UNKNOWN HealthCheckResponse_ServingStatus = 0 + HealthCheckResponse_SERVING HealthCheckResponse_ServingStatus = 1 + HealthCheckResponse_NOT_SERVING HealthCheckResponse_ServingStatus = 2 +) + +var HealthCheckResponse_ServingStatus_name = map[int32]string{ + 0: "UNKNOWN", + 1: "SERVING", + 2: "NOT_SERVING", +} +var HealthCheckResponse_ServingStatus_value = map[string]int32{ + "UNKNOWN": 0, + "SERVING": 1, + "NOT_SERVING": 2, +} + +func (x HealthCheckResponse_ServingStatus) String() string { + return proto.EnumName(HealthCheckResponse_ServingStatus_name, int32(x)) +} +func (HealthCheckResponse_ServingStatus) EnumDescriptor() ([]byte, []int) { + return fileDescriptorFleet, []int{1, 0} +} + +type HealthCheckRequest struct { + Service string `protobuf:"bytes,1,opt,name=service,proto3" json:"service,omitempty"` +} + +func (m *HealthCheckRequest) Reset() { *m = HealthCheckRequest{} } +func (m *HealthCheckRequest) String() string { return proto.CompactTextString(m) } +func (*HealthCheckRequest) ProtoMessage() {} +func (*HealthCheckRequest) Descriptor() ([]byte, []int) { return fileDescriptorFleet, []int{0} } + +type HealthCheckResponse struct { + Status HealthCheckResponse_ServingStatus `protobuf:"varint,1,opt,name=status,proto3,enum=rpc.HealthCheckResponse_ServingStatus" json:"status,omitempty"` +} + +func (m *HealthCheckResponse) Reset() { *m = HealthCheckResponse{} } +func (m *HealthCheckResponse) String() string { return proto.CompactTextString(m) } +func (*HealthCheckResponse) ProtoMessage() {} +func (*HealthCheckResponse) Descriptor() ([]byte, []int) { return fileDescriptorFleet, []int{1} } + type MachineProperties struct { Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` } -func (m *MachineProperties) Reset() { *m = MachineProperties{} } -func (*MachineProperties) ProtoMessage() {} +func (m *MachineProperties) Reset() { *m = MachineProperties{} } +func (m *MachineProperties) String() string { return proto.CompactTextString(m) } +func (*MachineProperties) ProtoMessage() {} +func (*MachineProperties) Descriptor() ([]byte, []int) { return fileDescriptorFleet, []int{2} } type UpdatedState struct { - UnitIds []string `protobuf:"bytes,1,rep,name=unit_ids" json:"unit_ids,omitempty"` + UnitIds []string `protobuf:"bytes,1,rep,name=unit_ids,json=unitIds" json:"unit_ids,omitempty"` } -func (m *UpdatedState) Reset() { *m = UpdatedState{} } -func (*UpdatedState) ProtoMessage() {} +func (m *UpdatedState) Reset() { *m = UpdatedState{} } +func (m *UpdatedState) String() string { return proto.CompactTextString(m) } +func (*UpdatedState) ProtoMessage() {} +func (*UpdatedState) Descriptor() ([]byte, []int) { return fileDescriptorFleet, []int{3} } type UnitStateFilter struct { Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` Hash string `protobuf:"bytes,2,opt,name=hash,proto3" json:"hash,omitempty"` - LoadState string `protobuf:"bytes,3,opt,name=load_state,proto3" json:"load_state,omitempty"` - ActiveState string `protobuf:"bytes,4,opt,name=active_state,proto3" json:"active_state,omitempty"` - SubState string `protobuf:"bytes,5,opt,name=sub_state,proto3" json:"sub_state,omitempty"` - MachineID string `protobuf:"bytes,6,opt,name=machine_id,proto3" json:"machine_id,omitempty"` + LoadState string `protobuf:"bytes,3,opt,name=load_state,json=loadState,proto3" json:"load_state,omitempty"` + ActiveState string `protobuf:"bytes,4,opt,name=active_state,json=activeState,proto3" json:"active_state,omitempty"` + SubState string `protobuf:"bytes,5,opt,name=sub_state,json=subState,proto3" json:"sub_state,omitempty"` + MachineID string `protobuf:"bytes,6,opt,name=machine_id,json=machineId,proto3" json:"machine_id,omitempty"` } -func (m *UnitStateFilter) Reset() { *m = UnitStateFilter{} } -func (*UnitStateFilter) ProtoMessage() {} +func (m *UnitStateFilter) Reset() { *m = UnitStateFilter{} } +func (m *UnitStateFilter) String() string { return proto.CompactTextString(m) } +func (*UnitStateFilter) ProtoMessage() {} +func (*UnitStateFilter) Descriptor() ([]byte, []int) { return fileDescriptorFleet, []int{4} } type UnitFilter struct { - MachineID string `protobuf:"bytes,1,opt,name=machine_id,proto3" json:"machine_id,omitempty"` + MachineID string `protobuf:"bytes,1,opt,name=machine_id,json=machineId,proto3" json:"machine_id,omitempty"` } -func (m *UnitFilter) Reset() { *m = UnitFilter{} } -func (*UnitFilter) ProtoMessage() {} +func (m *UnitFilter) Reset() { *m = UnitFilter{} } +func (m *UnitFilter) String() string { return proto.CompactTextString(m) } +func (*UnitFilter) ProtoMessage() {} +func (*UnitFilter) Descriptor() ([]byte, []int) { return fileDescriptorFleet, []int{5} } type ScheduleUnitRequest struct { Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - MachineID string `protobuf:"bytes,2,opt,name=machine_id,proto3" json:"machine_id,omitempty"` + MachineID string `protobuf:"bytes,2,opt,name=machine_id,json=machineId,proto3" json:"machine_id,omitempty"` } -func (m *ScheduleUnitRequest) Reset() { *m = ScheduleUnitRequest{} } -func (*ScheduleUnitRequest) ProtoMessage() {} +func (m *ScheduleUnitRequest) Reset() { *m = ScheduleUnitRequest{} } +func (m *ScheduleUnitRequest) String() string { return proto.CompactTextString(m) } +func (*ScheduleUnitRequest) ProtoMessage() {} +func (*ScheduleUnitRequest) Descriptor() ([]byte, []int) { return fileDescriptorFleet, []int{6} } type UnscheduleUnitRequest struct { Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - MachineID string `protobuf:"bytes,2,opt,name=machine_id,proto3" json:"machine_id,omitempty"` + MachineID string `protobuf:"bytes,2,opt,name=machine_id,json=machineId,proto3" json:"machine_id,omitempty"` } -func (m *UnscheduleUnitRequest) Reset() { *m = UnscheduleUnitRequest{} } -func (*UnscheduleUnitRequest) ProtoMessage() {} +func (m *UnscheduleUnitRequest) Reset() { *m = UnscheduleUnitRequest{} } +func (m *UnscheduleUnitRequest) String() string { return proto.CompactTextString(m) } +func (*UnscheduleUnitRequest) ProtoMessage() {} +func (*UnscheduleUnitRequest) Descriptor() ([]byte, []int) { return fileDescriptorFleet, []int{7} } type SaveUnitStateRequest struct { Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` @@ -131,8 +195,10 @@ type SaveUnitStateRequest struct { TTL int32 `protobuf:"varint,3,opt,name=ttl,proto3" json:"ttl,omitempty"` } -func (m *SaveUnitStateRequest) Reset() { *m = SaveUnitStateRequest{} } -func (*SaveUnitStateRequest) ProtoMessage() {} +func (m *SaveUnitStateRequest) Reset() { *m = SaveUnitStateRequest{} } +func (m *SaveUnitStateRequest) String() string { return proto.CompactTextString(m) } +func (*SaveUnitStateRequest) ProtoMessage() {} +func (*SaveUnitStateRequest) Descriptor() ([]byte, []int) { return fileDescriptorFleet, []int{8} } func (m *SaveUnitStateRequest) GetState() *UnitState { if m != nil { @@ -143,25 +209,31 @@ func (m *SaveUnitStateRequest) GetState() *UnitState { type Heartbeat struct { Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - MachineID string `protobuf:"bytes,2,opt,name=machine_id,proto3" json:"machine_id,omitempty"` + MachineID string `protobuf:"bytes,2,opt,name=machine_id,json=machineId,proto3" json:"machine_id,omitempty"` TTL int32 `protobuf:"varint,3,opt,name=ttl,proto3" json:"ttl,omitempty"` } -func (m *Heartbeat) Reset() { *m = Heartbeat{} } -func (*Heartbeat) ProtoMessage() {} +func (m *Heartbeat) Reset() { *m = Heartbeat{} } +func (m *Heartbeat) String() string { return proto.CompactTextString(m) } +func (*Heartbeat) ProtoMessage() {} +func (*Heartbeat) Descriptor() ([]byte, []int) { return fileDescriptorFleet, []int{9} } type GenericReply struct { } -func (m *GenericReply) Reset() { *m = GenericReply{} } -func (*GenericReply) ProtoMessage() {} +func (m *GenericReply) Reset() { *m = GenericReply{} } +func (m *GenericReply) String() string { return proto.CompactTextString(m) } +func (*GenericReply) ProtoMessage() {} +func (*GenericReply) Descriptor() ([]byte, []int) { return fileDescriptorFleet, []int{10} } type Units struct { Units []Unit `protobuf:"bytes,1,rep,name=units" json:"units"` } -func (m *Units) Reset() { *m = Units{} } -func (*Units) ProtoMessage() {} +func (m *Units) Reset() { *m = Units{} } +func (m *Units) String() string { return proto.CompactTextString(m) } +func (*Units) ProtoMessage() {} +func (*Units) Descriptor() ([]byte, []int) { return fileDescriptorFleet, []int{11} } func (m *Units) GetUnits() []Unit { if m != nil { @@ -171,11 +243,13 @@ func (m *Units) GetUnits() []Unit { } type UnitStates struct { - UnitStates []*UnitState `protobuf:"bytes,1,rep,name=unit_states" json:"unit_states,omitempty"` + UnitStates []*UnitState `protobuf:"bytes,1,rep,name=unit_states,json=unitStates" json:"unit_states,omitempty"` } -func (m *UnitStates) Reset() { *m = UnitStates{} } -func (*UnitStates) ProtoMessage() {} +func (m *UnitStates) Reset() { *m = UnitStates{} } +func (m *UnitStates) String() string { return proto.CompactTextString(m) } +func (*UnitStates) ProtoMessage() {} +func (*UnitStates) Descriptor() ([]byte, []int) { return fileDescriptorFleet, []int{12} } func (m *UnitStates) GetUnitStates() []*UnitState { if m != nil { @@ -187,21 +261,25 @@ func (m *UnitStates) GetUnitStates() []*UnitState { type UnitState struct { Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` Hash string `protobuf:"bytes,2,opt,name=hash,proto3" json:"hash,omitempty"` - LoadState string `protobuf:"bytes,3,opt,name=load_state,proto3" json:"load_state,omitempty"` - ActiveState string `protobuf:"bytes,4,opt,name=active_state,proto3" json:"active_state,omitempty"` - SubState string `protobuf:"bytes,5,opt,name=sub_state,proto3" json:"sub_state,omitempty"` - MachineID string `protobuf:"bytes,6,opt,name=machine_id,proto3" json:"machine_id,omitempty"` + LoadState string `protobuf:"bytes,3,opt,name=load_state,json=loadState,proto3" json:"load_state,omitempty"` + ActiveState string `protobuf:"bytes,4,opt,name=active_state,json=activeState,proto3" json:"active_state,omitempty"` + SubState string `protobuf:"bytes,5,opt,name=sub_state,json=subState,proto3" json:"sub_state,omitempty"` + MachineID string `protobuf:"bytes,6,opt,name=machine_id,json=machineId,proto3" json:"machine_id,omitempty"` } -func (m *UnitState) Reset() { *m = UnitState{} } -func (*UnitState) ProtoMessage() {} +func (m *UnitState) Reset() { *m = UnitState{} } +func (m *UnitState) String() string { return proto.CompactTextString(m) } +func (*UnitState) ProtoMessage() {} +func (*UnitState) Descriptor() ([]byte, []int) { return fileDescriptorFleet, []int{13} } type ScheduledUnits struct { Units []ScheduledUnit `protobuf:"bytes,1,rep,name=units" json:"units"` } -func (m *ScheduledUnits) Reset() { *m = ScheduledUnits{} } -func (*ScheduledUnits) ProtoMessage() {} +func (m *ScheduledUnits) Reset() { *m = ScheduledUnits{} } +func (m *ScheduledUnits) String() string { return proto.CompactTextString(m) } +func (*ScheduledUnits) ProtoMessage() {} +func (*ScheduledUnits) Descriptor() ([]byte, []int) { return fileDescriptorFleet, []int{14} } func (m *ScheduledUnits) GetUnits() []ScheduledUnit { if m != nil { @@ -212,28 +290,34 @@ func (m *ScheduledUnits) GetUnits() []ScheduledUnit { type ScheduledUnit struct { Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - CurrentState TargetState `protobuf:"varint,2,opt,name=current_state,proto3,enum=rpc.TargetState" json:"current_state,omitempty"` - MachineID string `protobuf:"bytes,3,opt,name=machine_id,proto3" json:"machine_id,omitempty"` + CurrentState TargetState `protobuf:"varint,2,opt,name=current_state,json=currentState,proto3,enum=rpc.TargetState" json:"current_state,omitempty"` + MachineID string `protobuf:"bytes,3,opt,name=machine_id,json=machineId,proto3" json:"machine_id,omitempty"` } -func (m *ScheduledUnit) Reset() { *m = ScheduledUnit{} } -func (*ScheduledUnit) ProtoMessage() {} +func (m *ScheduledUnit) Reset() { *m = ScheduledUnit{} } +func (m *ScheduledUnit) String() string { return proto.CompactTextString(m) } +func (*ScheduledUnit) ProtoMessage() {} +func (*ScheduledUnit) Descriptor() ([]byte, []int) { return fileDescriptorFleet, []int{15} } type UnitName struct { Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` } -func (m *UnitName) Reset() { *m = UnitName{} } -func (*UnitName) ProtoMessage() {} +func (m *UnitName) Reset() { *m = UnitName{} } +func (m *UnitName) String() string { return proto.CompactTextString(m) } +func (*UnitName) ProtoMessage() {} +func (*UnitName) Descriptor() ([]byte, []int) { return fileDescriptorFleet, []int{16} } type Unit struct { Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` Unit UnitFile `protobuf:"bytes,2,opt,name=unit" json:"unit"` - DesiredState TargetState `protobuf:"varint,3,opt,name=desired_state,proto3,enum=rpc.TargetState" json:"desired_state,omitempty"` + DesiredState TargetState `protobuf:"varint,3,opt,name=desired_state,json=desiredState,proto3,enum=rpc.TargetState" json:"desired_state,omitempty"` } -func (m *Unit) Reset() { *m = Unit{} } -func (*Unit) ProtoMessage() {} +func (m *Unit) Reset() { *m = Unit{} } +func (m *Unit) String() string { return proto.CompactTextString(m) } +func (*Unit) ProtoMessage() {} +func (*Unit) Descriptor() ([]byte, []int) { return fileDescriptorFleet, []int{17} } func (m *Unit) GetUnit() UnitFile { if m != nil { @@ -249,12 +333,13 @@ type MaybeScheduledUnit struct { IsScheduled isMaybeScheduledUnit_IsScheduled `protobuf_oneof:"is_scheduled"` } -func (m *MaybeScheduledUnit) Reset() { *m = MaybeScheduledUnit{} } -func (*MaybeScheduledUnit) ProtoMessage() {} +func (m *MaybeScheduledUnit) Reset() { *m = MaybeScheduledUnit{} } +func (m *MaybeScheduledUnit) String() string { return proto.CompactTextString(m) } +func (*MaybeScheduledUnit) ProtoMessage() {} +func (*MaybeScheduledUnit) Descriptor() ([]byte, []int) { return fileDescriptorFleet, []int{18} } type isMaybeScheduledUnit_IsScheduled interface { isMaybeScheduledUnit_IsScheduled() - Equal(interface{}) bool MarshalTo([]byte) (int, error) Size() int } @@ -291,8 +376,8 @@ func (m *MaybeScheduledUnit) GetNotfound() *NotFound { } // XXX_OneofFuncs is for the internal use of the proto package. -func (*MaybeScheduledUnit) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), []interface{}) { - return _MaybeScheduledUnit_OneofMarshaler, _MaybeScheduledUnit_OneofUnmarshaler, []interface{}{ +func (*MaybeScheduledUnit) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _MaybeScheduledUnit_OneofMarshaler, _MaybeScheduledUnit_OneofUnmarshaler, _MaybeScheduledUnit_OneofSizer, []interface{}{ (*MaybeScheduledUnit_Unit)(nil), (*MaybeScheduledUnit_Notfound)(nil), } @@ -343,6 +428,27 @@ func _MaybeScheduledUnit_OneofUnmarshaler(msg proto.Message, tag, wire int, b *p } } +func _MaybeScheduledUnit_OneofSizer(msg proto.Message) (n int) { + m := msg.(*MaybeScheduledUnit) + // is_scheduled + switch x := m.IsScheduled.(type) { + case *MaybeScheduledUnit_Unit: + s := proto.Size(x.Unit) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *MaybeScheduledUnit_Notfound: + s := proto.Size(x.Notfound) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + type MaybeUnit struct { // Types that are valid to be assigned to HasUnit: // *MaybeUnit_Unit @@ -350,12 +456,13 @@ type MaybeUnit struct { HasUnit isMaybeUnit_HasUnit `protobuf_oneof:"has_unit"` } -func (m *MaybeUnit) Reset() { *m = MaybeUnit{} } -func (*MaybeUnit) ProtoMessage() {} +func (m *MaybeUnit) Reset() { *m = MaybeUnit{} } +func (m *MaybeUnit) String() string { return proto.CompactTextString(m) } +func (*MaybeUnit) ProtoMessage() {} +func (*MaybeUnit) Descriptor() ([]byte, []int) { return fileDescriptorFleet, []int{19} } type isMaybeUnit_HasUnit interface { isMaybeUnit_HasUnit() - Equal(interface{}) bool MarshalTo([]byte) (int, error) Size() int } @@ -392,8 +499,8 @@ func (m *MaybeUnit) GetNotfound() *NotFound { } // XXX_OneofFuncs is for the internal use of the proto package. -func (*MaybeUnit) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), []interface{}) { - return _MaybeUnit_OneofMarshaler, _MaybeUnit_OneofUnmarshaler, []interface{}{ +func (*MaybeUnit) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _MaybeUnit_OneofMarshaler, _MaybeUnit_OneofUnmarshaler, _MaybeUnit_OneofSizer, []interface{}{ (*MaybeUnit_Unit)(nil), (*MaybeUnit_Notfound)(nil), } @@ -444,18 +551,43 @@ func _MaybeUnit_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buff } } +func _MaybeUnit_OneofSizer(msg proto.Message) (n int) { + m := msg.(*MaybeUnit) + // has_unit + switch x := m.HasUnit.(type) { + case *MaybeUnit_Unit: + s := proto.Size(x.Unit) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *MaybeUnit_Notfound: + s := proto.Size(x.Notfound) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + type NotFound struct { } -func (m *NotFound) Reset() { *m = NotFound{} } -func (*NotFound) ProtoMessage() {} +func (m *NotFound) Reset() { *m = NotFound{} } +func (m *NotFound) String() string { return proto.CompactTextString(m) } +func (*NotFound) ProtoMessage() {} +func (*NotFound) Descriptor() ([]byte, []int) { return fileDescriptorFleet, []int{20} } type UnitFile struct { - UnitOptions []UnitOption `protobuf:"bytes,1,rep,name=unit_options" json:"unit_options"` + UnitOptions []UnitOption `protobuf:"bytes,1,rep,name=unit_options,json=unitOptions" json:"unit_options"` } -func (m *UnitFile) Reset() { *m = UnitFile{} } -func (*UnitFile) ProtoMessage() {} +func (m *UnitFile) Reset() { *m = UnitFile{} } +func (m *UnitFile) String() string { return proto.CompactTextString(m) } +func (*UnitFile) ProtoMessage() {} +func (*UnitFile) Descriptor() ([]byte, []int) { return fileDescriptorFleet, []int{21} } func (m *UnitFile) GetUnitOptions() []UnitOption { if m != nil { @@ -470,55 +602,14 @@ type UnitOption struct { Value string `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` } -func (m *UnitOption) Reset() { *m = UnitOption{} } -func (*UnitOption) ProtoMessage() {} - -// Health chechk for the Registry serviceName -type HealthCheckResponse_ServingStatus int32 - -const ( - HealthCheckResponse_UNKNOWN HealthCheckResponse_ServingStatus = 0 - HealthCheckResponse_SERVING HealthCheckResponse_ServingStatus = 1 - HealthCheckResponse_NOT_SERVING HealthCheckResponse_ServingStatus = 2 -) - -var HealthCheckResponse_ServingStatus_name = map[int32]string{ - 0: "UNKNOWN", - 1: "SERVING", - 2: "NOT_SERVING", -} -var HealthCheckResponse_ServingStatus_value = map[string]int32{ - "UNKNOWN": 0, - "SERVING": 1, - "NOT_SERVING": 2, -} - -func (x HealthCheckResponse_ServingStatus) String() string { - return proto.EnumName(HealthCheckResponse_ServingStatus_name, int32(x)) -} -func (HealthCheckResponse_ServingStatus) EnumDescriptor() ([]byte, []int) { - return fileDescriptor0, []int{1, 0} -} - -type HealthCheckRequest struct { - Service string `protobuf:"bytes,1,opt,name=service" json:"service,omitempty"` -} - -func (m *HealthCheckRequest) Reset() { *m = HealthCheckRequest{} } -func (m *HealthCheckRequest) String() string { return proto.CompactTextString(m) } -func (*HealthCheckRequest) ProtoMessage() {} -func (*HealthCheckRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } - -type HealthCheckResponse struct { - Status HealthCheckResponse_ServingStatus `protobuf:"varint,1,opt,name=status,enum=rpc.HealthCheckResponse_ServingStatus" json:"status,omitempty"` -} - -func (m *HealthCheckResponse) Reset() { *m = HealthCheckResponse{} } -func (m *HealthCheckResponse) String() string { return proto.CompactTextString(m) } -func (*HealthCheckResponse) ProtoMessage() {} -func (*HealthCheckResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } +func (m *UnitOption) Reset() { *m = UnitOption{} } +func (m *UnitOption) String() string { return proto.CompactTextString(m) } +func (*UnitOption) ProtoMessage() {} +func (*UnitOption) Descriptor() ([]byte, []int) { return fileDescriptorFleet, []int{22} } func init() { + proto.RegisterType((*HealthCheckRequest)(nil), "rpc.HealthCheckRequest") + proto.RegisterType((*HealthCheckResponse)(nil), "rpc.HealthCheckResponse") proto.RegisterType((*MachineProperties)(nil), "rpc.MachineProperties") proto.RegisterType((*UpdatedState)(nil), "rpc.UpdatedState") proto.RegisterType((*UnitStateFilter)(nil), "rpc.UnitStateFilter") @@ -541,1046 +632,17 @@ func init() { proto.RegisterType((*UnitFile)(nil), "rpc.UnitFile") proto.RegisterType((*UnitOption)(nil), "rpc.UnitOption") proto.RegisterEnum("rpc.TargetState", TargetState_name, TargetState_value) - - proto.RegisterType((*HealthCheckRequest)(nil), "rpc.HealthCheckRequest") - proto.RegisterType((*HealthCheckResponse)(nil), "rpc.HealthCheckResponse") proto.RegisterEnum("rpc.HealthCheckResponse_ServingStatus", HealthCheckResponse_ServingStatus_name, HealthCheckResponse_ServingStatus_value) } -func (x TargetState) String() string { - s, ok := TargetState_name[int32(x)] - if ok { - return s - } - return strconv.Itoa(int(x)) -} -func (this *MachineProperties) Equal(that interface{}) bool { - if that == nil { - if this == nil { - return true - } - return false - } - - that1, ok := that.(*MachineProperties) - if !ok { - return false - } - if that1 == nil { - if this == nil { - return true - } - return false - } else if this == nil { - return false - } - if this.Id != that1.Id { - return false - } - return true -} -func (this *UpdatedState) Equal(that interface{}) bool { - if that == nil { - if this == nil { - return true - } - return false - } - - that1, ok := that.(*UpdatedState) - if !ok { - return false - } - if that1 == nil { - if this == nil { - return true - } - return false - } else if this == nil { - return false - } - if len(this.UnitIds) != len(that1.UnitIds) { - return false - } - for i := range this.UnitIds { - if this.UnitIds[i] != that1.UnitIds[i] { - return false - } - } - return true -} -func (this *UnitStateFilter) Equal(that interface{}) bool { - if that == nil { - if this == nil { - return true - } - return false - } - - that1, ok := that.(*UnitStateFilter) - if !ok { - return false - } - if that1 == nil { - if this == nil { - return true - } - return false - } else if this == nil { - return false - } - if this.Name != that1.Name { - return false - } - if this.Hash != that1.Hash { - return false - } - if this.LoadState != that1.LoadState { - return false - } - if this.ActiveState != that1.ActiveState { - return false - } - if this.SubState != that1.SubState { - return false - } - if this.MachineID != that1.MachineID { - return false - } - return true -} -func (this *UnitFilter) Equal(that interface{}) bool { - if that == nil { - if this == nil { - return true - } - return false - } - - that1, ok := that.(*UnitFilter) - if !ok { - return false - } - if that1 == nil { - if this == nil { - return true - } - return false - } else if this == nil { - return false - } - if this.MachineID != that1.MachineID { - return false - } - return true -} -func (this *ScheduleUnitRequest) Equal(that interface{}) bool { - if that == nil { - if this == nil { - return true - } - return false - } - - that1, ok := that.(*ScheduleUnitRequest) - if !ok { - return false - } - if that1 == nil { - if this == nil { - return true - } - return false - } else if this == nil { - return false - } - if this.Name != that1.Name { - return false - } - if this.MachineID != that1.MachineID { - return false - } - return true -} -func (this *UnscheduleUnitRequest) Equal(that interface{}) bool { - if that == nil { - if this == nil { - return true - } - return false - } - - that1, ok := that.(*UnscheduleUnitRequest) - if !ok { - return false - } - if that1 == nil { - if this == nil { - return true - } - return false - } else if this == nil { - return false - } - if this.Name != that1.Name { - return false - } - if this.MachineID != that1.MachineID { - return false - } - return true -} -func (this *SaveUnitStateRequest) Equal(that interface{}) bool { - if that == nil { - if this == nil { - return true - } - return false - } - - that1, ok := that.(*SaveUnitStateRequest) - if !ok { - return false - } - if that1 == nil { - if this == nil { - return true - } - return false - } else if this == nil { - return false - } - if this.Name != that1.Name { - return false - } - if !this.State.Equal(that1.State) { - return false - } - if this.TTL != that1.TTL { - return false - } - return true -} -func (this *Heartbeat) Equal(that interface{}) bool { - if that == nil { - if this == nil { - return true - } - return false - } - - that1, ok := that.(*Heartbeat) - if !ok { - return false - } - if that1 == nil { - if this == nil { - return true - } - return false - } else if this == nil { - return false - } - if this.Name != that1.Name { - return false - } - if this.MachineID != that1.MachineID { - return false - } - if this.TTL != that1.TTL { - return false - } - return true -} -func (this *GenericReply) Equal(that interface{}) bool { - if that == nil { - if this == nil { - return true - } - return false - } - - that1, ok := that.(*GenericReply) - if !ok { - return false - } - if that1 == nil { - if this == nil { - return true - } - return false - } else if this == nil { - return false - } - return true -} -func (this *Units) Equal(that interface{}) bool { - if that == nil { - if this == nil { - return true - } - return false - } - - that1, ok := that.(*Units) - if !ok { - return false - } - if that1 == nil { - if this == nil { - return true - } - return false - } else if this == nil { - return false - } - if len(this.Units) != len(that1.Units) { - return false - } - for i := range this.Units { - if !this.Units[i].Equal(&that1.Units[i]) { - return false - } - } - return true -} -func (this *UnitStates) Equal(that interface{}) bool { - if that == nil { - if this == nil { - return true - } - return false - } - - that1, ok := that.(*UnitStates) - if !ok { - return false - } - if that1 == nil { - if this == nil { - return true - } - return false - } else if this == nil { - return false - } - if len(this.UnitStates) != len(that1.UnitStates) { - return false - } - for i := range this.UnitStates { - if !this.UnitStates[i].Equal(that1.UnitStates[i]) { - return false - } - } - return true -} -func (this *UnitState) Equal(that interface{}) bool { - if that == nil { - if this == nil { - return true - } - return false - } - - that1, ok := that.(*UnitState) - if !ok { - return false - } - if that1 == nil { - if this == nil { - return true - } - return false - } else if this == nil { - return false - } - if this.Name != that1.Name { - return false - } - if this.Hash != that1.Hash { - return false - } - if this.LoadState != that1.LoadState { - return false - } - if this.ActiveState != that1.ActiveState { - return false - } - if this.SubState != that1.SubState { - return false - } - if this.MachineID != that1.MachineID { - return false - } - return true -} -func (this *ScheduledUnits) Equal(that interface{}) bool { - if that == nil { - if this == nil { - return true - } - return false - } - - that1, ok := that.(*ScheduledUnits) - if !ok { - return false - } - if that1 == nil { - if this == nil { - return true - } - return false - } else if this == nil { - return false - } - if len(this.Units) != len(that1.Units) { - return false - } - for i := range this.Units { - if !this.Units[i].Equal(&that1.Units[i]) { - return false - } - } - return true -} -func (this *ScheduledUnit) Equal(that interface{}) bool { - if that == nil { - if this == nil { - return true - } - return false - } - - that1, ok := that.(*ScheduledUnit) - if !ok { - return false - } - if that1 == nil { - if this == nil { - return true - } - return false - } else if this == nil { - return false - } - if this.Name != that1.Name { - return false - } - if this.CurrentState != that1.CurrentState { - return false - } - if this.MachineID != that1.MachineID { - return false - } - return true -} -func (this *UnitName) Equal(that interface{}) bool { - if that == nil { - if this == nil { - return true - } - return false - } - - that1, ok := that.(*UnitName) - if !ok { - return false - } - if that1 == nil { - if this == nil { - return true - } - return false - } else if this == nil { - return false - } - if this.Name != that1.Name { - return false - } - return true -} -func (this *Unit) Equal(that interface{}) bool { - if that == nil { - if this == nil { - return true - } - return false - } - - that1, ok := that.(*Unit) - if !ok { - return false - } - if that1 == nil { - if this == nil { - return true - } - return false - } else if this == nil { - return false - } - if this.Name != that1.Name { - return false - } - if !this.Unit.Equal(&that1.Unit) { - return false - } - if this.DesiredState != that1.DesiredState { - return false - } - return true -} -func (this *MaybeScheduledUnit) Equal(that interface{}) bool { - if that == nil { - if this == nil { - return true - } - return false - } - - that1, ok := that.(*MaybeScheduledUnit) - if !ok { - return false - } - if that1 == nil { - if this == nil { - return true - } - return false - } else if this == nil { - return false - } - if that1.IsScheduled == nil { - if this.IsScheduled != nil { - return false - } - } else if this.IsScheduled == nil { - return false - } else if !this.IsScheduled.Equal(that1.IsScheduled) { - return false - } - return true -} -func (this *MaybeScheduledUnit_Unit) Equal(that interface{}) bool { - if that == nil { - if this == nil { - return true - } - return false - } - - that1, ok := that.(*MaybeScheduledUnit_Unit) - if !ok { - return false - } - if that1 == nil { - if this == nil { - return true - } - return false - } else if this == nil { - return false - } - if !this.Unit.Equal(that1.Unit) { - return false - } - return true -} -func (this *MaybeScheduledUnit_Notfound) Equal(that interface{}) bool { - if that == nil { - if this == nil { - return true - } - return false - } - - that1, ok := that.(*MaybeScheduledUnit_Notfound) - if !ok { - return false - } - if that1 == nil { - if this == nil { - return true - } - return false - } else if this == nil { - return false - } - if !this.Notfound.Equal(that1.Notfound) { - return false - } - return true -} -func (this *MaybeUnit) Equal(that interface{}) bool { - if that == nil { - if this == nil { - return true - } - return false - } - - that1, ok := that.(*MaybeUnit) - if !ok { - return false - } - if that1 == nil { - if this == nil { - return true - } - return false - } else if this == nil { - return false - } - if that1.HasUnit == nil { - if this.HasUnit != nil { - return false - } - } else if this.HasUnit == nil { - return false - } else if !this.HasUnit.Equal(that1.HasUnit) { - return false - } - return true -} -func (this *MaybeUnit_Unit) Equal(that interface{}) bool { - if that == nil { - if this == nil { - return true - } - return false - } - - that1, ok := that.(*MaybeUnit_Unit) - if !ok { - return false - } - if that1 == nil { - if this == nil { - return true - } - return false - } else if this == nil { - return false - } - if !this.Unit.Equal(that1.Unit) { - return false - } - return true -} -func (this *MaybeUnit_Notfound) Equal(that interface{}) bool { - if that == nil { - if this == nil { - return true - } - return false - } - - that1, ok := that.(*MaybeUnit_Notfound) - if !ok { - return false - } - if that1 == nil { - if this == nil { - return true - } - return false - } else if this == nil { - return false - } - if !this.Notfound.Equal(that1.Notfound) { - return false - } - return true -} -func (this *NotFound) Equal(that interface{}) bool { - if that == nil { - if this == nil { - return true - } - return false - } - - that1, ok := that.(*NotFound) - if !ok { - return false - } - if that1 == nil { - if this == nil { - return true - } - return false - } else if this == nil { - return false - } - return true -} -func (this *UnitFile) Equal(that interface{}) bool { - if that == nil { - if this == nil { - return true - } - return false - } - - that1, ok := that.(*UnitFile) - if !ok { - return false - } - if that1 == nil { - if this == nil { - return true - } - return false - } else if this == nil { - return false - } - if len(this.UnitOptions) != len(that1.UnitOptions) { - return false - } - for i := range this.UnitOptions { - if !this.UnitOptions[i].Equal(&that1.UnitOptions[i]) { - return false - } - } - return true -} -func (this *UnitOption) Equal(that interface{}) bool { - if that == nil { - if this == nil { - return true - } - return false - } - - that1, ok := that.(*UnitOption) - if !ok { - return false - } - if that1 == nil { - if this == nil { - return true - } - return false - } else if this == nil { - return false - } - if this.Section != that1.Section { - return false - } - if this.Name != that1.Name { - return false - } - if this.Value != that1.Value { - return false - } - return true -} -func (this *MachineProperties) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 5) - s = append(s, "&rpc.MachineProperties{") - s = append(s, "Id: "+fmt.Sprintf("%#v", this.Id)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *UpdatedState) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 5) - s = append(s, "&rpc.UpdatedState{") - s = append(s, "UnitIds: "+fmt.Sprintf("%#v", this.UnitIds)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *UnitStateFilter) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 10) - s = append(s, "&rpc.UnitStateFilter{") - s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") - s = append(s, "Hash: "+fmt.Sprintf("%#v", this.Hash)+",\n") - s = append(s, "LoadState: "+fmt.Sprintf("%#v", this.LoadState)+",\n") - s = append(s, "ActiveState: "+fmt.Sprintf("%#v", this.ActiveState)+",\n") - s = append(s, "SubState: "+fmt.Sprintf("%#v", this.SubState)+",\n") - s = append(s, "MachineID: "+fmt.Sprintf("%#v", this.MachineID)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *UnitFilter) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 5) - s = append(s, "&rpc.UnitFilter{") - s = append(s, "MachineID: "+fmt.Sprintf("%#v", this.MachineID)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *ScheduleUnitRequest) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 6) - s = append(s, "&rpc.ScheduleUnitRequest{") - s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") - s = append(s, "MachineID: "+fmt.Sprintf("%#v", this.MachineID)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *UnscheduleUnitRequest) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 6) - s = append(s, "&rpc.UnscheduleUnitRequest{") - s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") - s = append(s, "MachineID: "+fmt.Sprintf("%#v", this.MachineID)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *SaveUnitStateRequest) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 7) - s = append(s, "&rpc.SaveUnitStateRequest{") - s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") - if this.State != nil { - s = append(s, "State: "+fmt.Sprintf("%#v", this.State)+",\n") - } - s = append(s, "TTL: "+fmt.Sprintf("%#v", this.TTL)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *Heartbeat) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 7) - s = append(s, "&rpc.Heartbeat{") - s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") - s = append(s, "MachineID: "+fmt.Sprintf("%#v", this.MachineID)+",\n") - s = append(s, "TTL: "+fmt.Sprintf("%#v", this.TTL)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *GenericReply) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 4) - s = append(s, "&rpc.GenericReply{") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *Units) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 5) - s = append(s, "&rpc.Units{") - if this.Units != nil { - s = append(s, "Units: "+strings.Replace(fmt.Sprintf("%#v", this.Units), `&`, ``, 1)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *UnitStates) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 5) - s = append(s, "&rpc.UnitStates{") - if this.UnitStates != nil { - s = append(s, "UnitStates: "+fmt.Sprintf("%#v", this.UnitStates)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *UnitState) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 10) - s = append(s, "&rpc.UnitState{") - s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") - s = append(s, "Hash: "+fmt.Sprintf("%#v", this.Hash)+",\n") - s = append(s, "LoadState: "+fmt.Sprintf("%#v", this.LoadState)+",\n") - s = append(s, "ActiveState: "+fmt.Sprintf("%#v", this.ActiveState)+",\n") - s = append(s, "SubState: "+fmt.Sprintf("%#v", this.SubState)+",\n") - s = append(s, "MachineID: "+fmt.Sprintf("%#v", this.MachineID)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *ScheduledUnits) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 5) - s = append(s, "&rpc.ScheduledUnits{") - if this.Units != nil { - s = append(s, "Units: "+strings.Replace(fmt.Sprintf("%#v", this.Units), `&`, ``, 1)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *ScheduledUnit) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 7) - s = append(s, "&rpc.ScheduledUnit{") - s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") - s = append(s, "CurrentState: "+fmt.Sprintf("%#v", this.CurrentState)+",\n") - s = append(s, "MachineID: "+fmt.Sprintf("%#v", this.MachineID)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *UnitName) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 5) - s = append(s, "&rpc.UnitName{") - s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *Unit) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 7) - s = append(s, "&rpc.Unit{") - s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") - s = append(s, "Unit: "+strings.Replace(this.Unit.GoString(), `&`, ``, 1)+",\n") - s = append(s, "DesiredState: "+fmt.Sprintf("%#v", this.DesiredState)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *MaybeScheduledUnit) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 6) - s = append(s, "&rpc.MaybeScheduledUnit{") - if this.IsScheduled != nil { - s = append(s, "IsScheduled: "+fmt.Sprintf("%#v", this.IsScheduled)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *MaybeScheduledUnit_Unit) GoString() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&rpc.MaybeScheduledUnit_Unit{` + - `Unit:` + fmt.Sprintf("%#v", this.Unit) + `}`}, ", ") - return s -} -func (this *MaybeScheduledUnit_Notfound) GoString() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&rpc.MaybeScheduledUnit_Notfound{` + - `Notfound:` + fmt.Sprintf("%#v", this.Notfound) + `}`}, ", ") - return s -} -func (this *MaybeUnit) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 6) - s = append(s, "&rpc.MaybeUnit{") - if this.HasUnit != nil { - s = append(s, "HasUnit: "+fmt.Sprintf("%#v", this.HasUnit)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *MaybeUnit_Unit) GoString() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&rpc.MaybeUnit_Unit{` + - `Unit:` + fmt.Sprintf("%#v", this.Unit) + `}`}, ", ") - return s -} -func (this *MaybeUnit_Notfound) GoString() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&rpc.MaybeUnit_Notfound{` + - `Notfound:` + fmt.Sprintf("%#v", this.Notfound) + `}`}, ", ") - return s -} -func (this *NotFound) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 4) - s = append(s, "&rpc.NotFound{") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *UnitFile) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 5) - s = append(s, "&rpc.UnitFile{") - if this.UnitOptions != nil { - s = append(s, "UnitOptions: "+strings.Replace(fmt.Sprintf("%#v", this.UnitOptions), `&`, ``, 1)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *UnitOption) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 7) - s = append(s, "&rpc.UnitOption{") - s = append(s, "Section: "+fmt.Sprintf("%#v", this.Section)+",\n") - s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") - s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func valueToGoStringFleet(v interface{}, typ string) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) -} -func extensionToGoStringFleet(e map[int32]github_com_gogo_protobuf_proto.Extension) string { - if e == nil { - return "nil" - } - s := "map[int32]proto.Extension{" - keys := make([]int, 0, len(e)) - for k := range e { - keys = append(keys, int(k)) - } - sort.Ints(keys) - ss := []string{} - for _, k := range keys { - ss = append(ss, strconv.Itoa(k)+": "+e[int32(k)].GoString()) - } - s += strings.Join(ss, ",") + "}" - return s -} // Reference imports to suppress errors if they are not otherwise used. var _ context.Context var _ grpc.ClientConn +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion3 + // Client API for Registry service type RegistryClient interface { @@ -1605,7 +667,7 @@ type RegistryClient interface { SetUnitTargetState(ctx context.Context, in *ScheduledUnit, opts ...grpc.CallOption) (*GenericReply, error) UnscheduleUnit(ctx context.Context, in *UnscheduleUnitRequest, opts ...grpc.CallOption) (*GenericReply, error) AgentEvents(ctx context.Context, in *MachineProperties, opts ...grpc.CallOption) (Registry_AgentEventsClient, error) - + // Health check Status(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) } @@ -1767,15 +829,6 @@ func (c *registryClient) AgentEvents(ctx context.Context, in *MachineProperties, return x, nil } -func (c *registryClient) Status(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) { - out := new(HealthCheckResponse) - err := grpc.Invoke(ctx, "/rpc.Registry/Status", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - type Registry_AgentEventsClient interface { Recv() (*UpdatedState, error) grpc.ClientStream @@ -1793,6 +846,15 @@ func (x *registryAgentEventsClient) Recv() (*UpdatedState, error) { return m, nil } +func (c *registryClient) Status(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) { + out := new(HealthCheckResponse) + err := grpc.Invoke(ctx, "/rpc.Registry/Status", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + // Server API for Registry service type RegistryServer interface { @@ -1817,7 +879,7 @@ type RegistryServer interface { SetUnitTargetState(context.Context, *ScheduledUnit) (*GenericReply, error) UnscheduleUnit(context.Context, *UnscheduleUnitRequest) (*GenericReply, error) AgentEvents(*MachineProperties, Registry_AgentEventsServer) error - + // Health check Status(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error) } @@ -1825,184 +887,274 @@ func RegisterRegistryServer(s *grpc.Server, srv RegistryServer) { s.RegisterService(&_Registry_serviceDesc, srv) } -func _Registry_GetScheduledUnits_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { +func _Registry_GetScheduledUnits_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(UnitFilter) if err := dec(in); err != nil { return nil, err } - out, err := srv.(RegistryServer).GetScheduledUnits(ctx, in) - if err != nil { - return nil, err + if interceptor == nil { + return srv.(RegistryServer).GetScheduledUnits(ctx, in) } - return out, nil + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/rpc.Registry/GetScheduledUnits", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RegistryServer).GetScheduledUnits(ctx, req.(*UnitFilter)) + } + return interceptor(ctx, in, info, handler) } -func _Registry_GetScheduledUnit_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { +func _Registry_GetScheduledUnit_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(UnitName) if err := dec(in); err != nil { return nil, err } - out, err := srv.(RegistryServer).GetScheduledUnit(ctx, in) - if err != nil { - return nil, err + if interceptor == nil { + return srv.(RegistryServer).GetScheduledUnit(ctx, in) } - return out, nil + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/rpc.Registry/GetScheduledUnit", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RegistryServer).GetScheduledUnit(ctx, req.(*UnitName)) + } + return interceptor(ctx, in, info, handler) } -func _Registry_GetUnit_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { +func _Registry_GetUnit_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(UnitName) if err := dec(in); err != nil { return nil, err } - out, err := srv.(RegistryServer).GetUnit(ctx, in) - if err != nil { - return nil, err + if interceptor == nil { + return srv.(RegistryServer).GetUnit(ctx, in) } - return out, nil + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/rpc.Registry/GetUnit", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RegistryServer).GetUnit(ctx, req.(*UnitName)) + } + return interceptor(ctx, in, info, handler) } -func _Registry_GetUnits_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { +func _Registry_GetUnits_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(UnitFilter) if err := dec(in); err != nil { return nil, err } - out, err := srv.(RegistryServer).GetUnits(ctx, in) - if err != nil { - return nil, err + if interceptor == nil { + return srv.(RegistryServer).GetUnits(ctx, in) } - return out, nil + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/rpc.Registry/GetUnits", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RegistryServer).GetUnits(ctx, req.(*UnitFilter)) + } + return interceptor(ctx, in, info, handler) } -func _Registry_GetUnitState_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { +func _Registry_GetUnitState_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(UnitName) if err := dec(in); err != nil { return nil, err } - out, err := srv.(RegistryServer).GetUnitState(ctx, in) - if err != nil { - return nil, err + if interceptor == nil { + return srv.(RegistryServer).GetUnitState(ctx, in) } - return out, nil + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/rpc.Registry/GetUnitState", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RegistryServer).GetUnitState(ctx, req.(*UnitName)) + } + return interceptor(ctx, in, info, handler) } -func _Registry_GetUnitStates_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { +func _Registry_GetUnitStates_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(UnitStateFilter) if err := dec(in); err != nil { return nil, err } - out, err := srv.(RegistryServer).GetUnitStates(ctx, in) - if err != nil { - return nil, err + if interceptor == nil { + return srv.(RegistryServer).GetUnitStates(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/rpc.Registry/GetUnitStates", } - return out, nil + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RegistryServer).GetUnitStates(ctx, req.(*UnitStateFilter)) + } + return interceptor(ctx, in, info, handler) } -func _Registry_ClearUnitHeartbeat_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { +func _Registry_ClearUnitHeartbeat_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(UnitName) if err := dec(in); err != nil { return nil, err } - out, err := srv.(RegistryServer).ClearUnitHeartbeat(ctx, in) - if err != nil { - return nil, err + if interceptor == nil { + return srv.(RegistryServer).ClearUnitHeartbeat(ctx, in) } - return out, nil + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/rpc.Registry/ClearUnitHeartbeat", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RegistryServer).ClearUnitHeartbeat(ctx, req.(*UnitName)) + } + return interceptor(ctx, in, info, handler) } -func _Registry_CreateUnit_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { +func _Registry_CreateUnit_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(Unit) if err := dec(in); err != nil { return nil, err } - out, err := srv.(RegistryServer).CreateUnit(ctx, in) - if err != nil { - return nil, err + if interceptor == nil { + return srv.(RegistryServer).CreateUnit(ctx, in) } - return out, nil + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/rpc.Registry/CreateUnit", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RegistryServer).CreateUnit(ctx, req.(*Unit)) + } + return interceptor(ctx, in, info, handler) } -func _Registry_DestroyUnit_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { +func _Registry_DestroyUnit_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(UnitName) if err := dec(in); err != nil { return nil, err } - out, err := srv.(RegistryServer).DestroyUnit(ctx, in) - if err != nil { - return nil, err + if interceptor == nil { + return srv.(RegistryServer).DestroyUnit(ctx, in) } - return out, nil + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/rpc.Registry/DestroyUnit", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RegistryServer).DestroyUnit(ctx, req.(*UnitName)) + } + return interceptor(ctx, in, info, handler) } -func _Registry_UnitHeartbeat_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { +func _Registry_UnitHeartbeat_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(Heartbeat) if err := dec(in); err != nil { return nil, err } - out, err := srv.(RegistryServer).UnitHeartbeat(ctx, in) - if err != nil { - return nil, err + if interceptor == nil { + return srv.(RegistryServer).UnitHeartbeat(ctx, in) } - return out, nil + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/rpc.Registry/UnitHeartbeat", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RegistryServer).UnitHeartbeat(ctx, req.(*Heartbeat)) + } + return interceptor(ctx, in, info, handler) } -func _Registry_RemoveUnitState_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { +func _Registry_RemoveUnitState_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(UnitName) if err := dec(in); err != nil { return nil, err } - out, err := srv.(RegistryServer).RemoveUnitState(ctx, in) - if err != nil { - return nil, err + if interceptor == nil { + return srv.(RegistryServer).RemoveUnitState(ctx, in) } - return out, nil + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/rpc.Registry/RemoveUnitState", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RegistryServer).RemoveUnitState(ctx, req.(*UnitName)) + } + return interceptor(ctx, in, info, handler) } -func _Registry_SaveUnitState_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { +func _Registry_SaveUnitState_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(SaveUnitStateRequest) if err := dec(in); err != nil { return nil, err } - out, err := srv.(RegistryServer).SaveUnitState(ctx, in) - if err != nil { - return nil, err + if interceptor == nil { + return srv.(RegistryServer).SaveUnitState(ctx, in) } - return out, nil + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/rpc.Registry/SaveUnitState", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RegistryServer).SaveUnitState(ctx, req.(*SaveUnitStateRequest)) + } + return interceptor(ctx, in, info, handler) } -func _Registry_ScheduleUnit_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { +func _Registry_ScheduleUnit_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(ScheduleUnitRequest) if err := dec(in); err != nil { return nil, err } - out, err := srv.(RegistryServer).ScheduleUnit(ctx, in) - if err != nil { - return nil, err + if interceptor == nil { + return srv.(RegistryServer).ScheduleUnit(ctx, in) } - return out, nil + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/rpc.Registry/ScheduleUnit", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RegistryServer).ScheduleUnit(ctx, req.(*ScheduleUnitRequest)) + } + return interceptor(ctx, in, info, handler) } -func _Registry_SetUnitTargetState_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { +func _Registry_SetUnitTargetState_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(ScheduledUnit) if err := dec(in); err != nil { return nil, err } - out, err := srv.(RegistryServer).SetUnitTargetState(ctx, in) - if err != nil { - return nil, err + if interceptor == nil { + return srv.(RegistryServer).SetUnitTargetState(ctx, in) } - return out, nil + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/rpc.Registry/SetUnitTargetState", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RegistryServer).SetUnitTargetState(ctx, req.(*ScheduledUnit)) + } + return interceptor(ctx, in, info, handler) } -func _Registry_UnscheduleUnit_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { +func _Registry_UnscheduleUnit_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(UnscheduleUnitRequest) if err := dec(in); err != nil { return nil, err } - out, err := srv.(RegistryServer).UnscheduleUnit(ctx, in) - if err != nil { - return nil, err + if interceptor == nil { + return srv.(RegistryServer).UnscheduleUnit(ctx, in) } - return out, nil + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/rpc.Registry/UnscheduleUnit", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RegistryServer).UnscheduleUnit(ctx, req.(*UnscheduleUnitRequest)) + } + return interceptor(ctx, in, info, handler) } func _Registry_AgentEvents_Handler(srv interface{}, stream grpc.ServerStream) error { @@ -2013,18 +1165,6 @@ func _Registry_AgentEvents_Handler(srv interface{}, stream grpc.ServerStream) er return srv.(RegistryServer).AgentEvents(m, ®istryAgentEventsServer{stream}) } -func _Registry_Status_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { - in := new(HealthCheckRequest) - if err := dec(in); err != nil { - return nil, err - } - out, err := srv.(RegistryServer).Status(ctx, in) - if err != nil { - return nil, err - } - return out, nil -} - type Registry_AgentEventsServer interface { Send(*UpdatedState) error grpc.ServerStream @@ -2038,6 +1178,24 @@ func (x *registryAgentEventsServer) Send(m *UpdatedState) error { return x.ServerStream.SendMsg(m) } +func _Registry_Status_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(HealthCheckRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RegistryServer).Status(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/rpc.Registry/Status", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RegistryServer).Status(ctx, req.(*HealthCheckRequest)) + } + return interceptor(ctx, in, info, handler) +} + var _Registry_serviceDesc = grpc.ServiceDesc{ ServiceName: "rpc.Registry", HandlerType: (*RegistryServer)(nil), @@ -2114,6 +1272,54 @@ var _Registry_serviceDesc = grpc.ServiceDesc{ ServerStreams: true, }, }, + Metadata: fileDescriptorFleet, +} + +func (m *HealthCheckRequest) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *HealthCheckRequest) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Service) > 0 { + data[i] = 0xa + i++ + i = encodeVarintFleet(data, i, uint64(len(m.Service))) + i += copy(data[i:], m.Service) + } + return i, nil +} + +func (m *HealthCheckResponse) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *HealthCheckResponse) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Status != 0 { + data[i] = 0x8 + i++ + i = encodeVarintFleet(data, i, uint64(m.Status)) + } + return i, nil } func (m *MachineProperties) Marshal() (data []byte, err error) { @@ -2860,6 +2066,25 @@ func encodeVarintFleet(data []byte, offset int, v uint64) int { data[offset] = uint8(v) return offset + 1 } +func (m *HealthCheckRequest) Size() (n int) { + var l int + _ = l + l = len(m.Service) + if l > 0 { + n += 1 + l + sovFleet(uint64(l)) + } + return n +} + +func (m *HealthCheckResponse) Size() (n int) { + var l int + _ = l + if m.Status != 0 { + n += 1 + sovFleet(uint64(m.Status)) + } + return n +} + func (m *MachineProperties) Size() (n int) { var l int _ = l @@ -3201,283 +2426,153 @@ func sovFleet(x uint64) (n int) { func sozFleet(x uint64) (n int) { return sovFleet(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } -func (this *MachineProperties) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&MachineProperties{`, - `Id:` + fmt.Sprintf("%v", this.Id) + `,`, - `}`, - }, "") - return s -} -func (this *UpdatedState) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&UpdatedState{`, - `UnitIds:` + fmt.Sprintf("%v", this.UnitIds) + `,`, - `}`, - }, "") - return s -} -func (this *UnitStateFilter) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&UnitStateFilter{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Hash:` + fmt.Sprintf("%v", this.Hash) + `,`, - `LoadState:` + fmt.Sprintf("%v", this.LoadState) + `,`, - `ActiveState:` + fmt.Sprintf("%v", this.ActiveState) + `,`, - `SubState:` + fmt.Sprintf("%v", this.SubState) + `,`, - `MachineID:` + fmt.Sprintf("%v", this.MachineID) + `,`, - `}`, - }, "") - return s -} -func (this *UnitFilter) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&UnitFilter{`, - `MachineID:` + fmt.Sprintf("%v", this.MachineID) + `,`, - `}`, - }, "") - return s -} -func (this *ScheduleUnitRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ScheduleUnitRequest{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `MachineID:` + fmt.Sprintf("%v", this.MachineID) + `,`, - `}`, - }, "") - return s -} -func (this *UnscheduleUnitRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&UnscheduleUnitRequest{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `MachineID:` + fmt.Sprintf("%v", this.MachineID) + `,`, - `}`, - }, "") - return s -} -func (this *SaveUnitStateRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&SaveUnitStateRequest{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `State:` + strings.Replace(fmt.Sprintf("%v", this.State), "UnitState", "UnitState", 1) + `,`, - `TTL:` + fmt.Sprintf("%v", this.TTL) + `,`, - `}`, - }, "") - return s -} -func (this *Heartbeat) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Heartbeat{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `MachineID:` + fmt.Sprintf("%v", this.MachineID) + `,`, - `TTL:` + fmt.Sprintf("%v", this.TTL) + `,`, - `}`, - }, "") - return s -} -func (this *GenericReply) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&GenericReply{`, - `}`, - }, "") - return s -} -func (this *Units) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Units{`, - `Units:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Units), "Unit", "Unit", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *UnitStates) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&UnitStates{`, - `UnitStates:` + strings.Replace(fmt.Sprintf("%v", this.UnitStates), "UnitState", "UnitState", 1) + `,`, - `}`, - }, "") - return s -} -func (this *UnitState) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&UnitState{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Hash:` + fmt.Sprintf("%v", this.Hash) + `,`, - `LoadState:` + fmt.Sprintf("%v", this.LoadState) + `,`, - `ActiveState:` + fmt.Sprintf("%v", this.ActiveState) + `,`, - `SubState:` + fmt.Sprintf("%v", this.SubState) + `,`, - `MachineID:` + fmt.Sprintf("%v", this.MachineID) + `,`, - `}`, - }, "") - return s -} -func (this *ScheduledUnits) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ScheduledUnits{`, - `Units:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Units), "ScheduledUnit", "ScheduledUnit", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ScheduledUnit) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ScheduledUnit{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `CurrentState:` + fmt.Sprintf("%v", this.CurrentState) + `,`, - `MachineID:` + fmt.Sprintf("%v", this.MachineID) + `,`, - `}`, - }, "") - return s -} -func (this *UnitName) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&UnitName{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `}`, - }, "") - return s -} -func (this *Unit) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Unit{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Unit:` + strings.Replace(strings.Replace(this.Unit.String(), "UnitFile", "UnitFile", 1), `&`, ``, 1) + `,`, - `DesiredState:` + fmt.Sprintf("%v", this.DesiredState) + `,`, - `}`, - }, "") - return s -} -func (this *MaybeScheduledUnit) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&MaybeScheduledUnit{`, - `IsScheduled:` + fmt.Sprintf("%v", this.IsScheduled) + `,`, - `}`, - }, "") - return s -} -func (this *MaybeScheduledUnit_Unit) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&MaybeScheduledUnit_Unit{`, - `Unit:` + strings.Replace(fmt.Sprintf("%v", this.Unit), "ScheduledUnit", "ScheduledUnit", 1) + `,`, - `}`, - }, "") - return s -} -func (this *MaybeScheduledUnit_Notfound) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&MaybeScheduledUnit_Notfound{`, - `Notfound:` + strings.Replace(fmt.Sprintf("%v", this.Notfound), "NotFound", "NotFound", 1) + `,`, - `}`, - }, "") - return s -} -func (this *MaybeUnit) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&MaybeUnit{`, - `HasUnit:` + fmt.Sprintf("%v", this.HasUnit) + `,`, - `}`, - }, "") - return s -} -func (this *MaybeUnit_Unit) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&MaybeUnit_Unit{`, - `Unit:` + strings.Replace(fmt.Sprintf("%v", this.Unit), "Unit", "Unit", 1) + `,`, - `}`, - }, "") - return s -} -func (this *MaybeUnit_Notfound) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&MaybeUnit_Notfound{`, - `Notfound:` + strings.Replace(fmt.Sprintf("%v", this.Notfound), "NotFound", "NotFound", 1) + `,`, - `}`, - }, "") - return s -} -func (this *NotFound) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&NotFound{`, - `}`, - }, "") - return s -} -func (this *UnitFile) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&UnitFile{`, - `UnitOptions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.UnitOptions), "UnitOption", "UnitOption", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *UnitOption) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&UnitOption{`, - `Section:` + fmt.Sprintf("%v", this.Section) + `,`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Value:` + fmt.Sprintf("%v", this.Value) + `,`, - `}`, - }, "") - return s -} -func valueToStringFleet(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) +func (m *HealthCheckRequest) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowFleet + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HealthCheckRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HealthCheckRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Service", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowFleet + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthFleet + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Service = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipFleet(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthFleet + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HealthCheckResponse) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowFleet + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HealthCheckResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HealthCheckResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + m.Status = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowFleet + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Status |= (HealthCheckResponse_ServingStatus(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipFleet(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthFleet + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil } func (m *MachineProperties) Unmarshal(data []byte) error { l := len(data) @@ -5864,21 +4959,77 @@ func skipFleet(data []byte) (n int, err error) { var ( ErrInvalidLengthFleet = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowFleet = fmt.Errorf("proto: integer overflow") - fileDescriptor0 = []byte{ - // 209 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0xc9, 0x48, 0x4d, 0xcc, - 0x29, 0xc9, 0xd0, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x4e, 0x2f, 0x2a, 0x48, 0xd6, 0x83, - 0x0a, 0x95, 0x19, 0x26, 0xe6, 0x14, 0x64, 0x24, 0x2a, 0xe9, 0x71, 0x09, 0x79, 0x80, 0x45, 0x9c, - 0x33, 0x52, 0x93, 0xb3, 0x83, 0x52, 0x0b, 0x4b, 0x53, 0x8b, 0x4b, 0x84, 0x24, 0xb8, 0xd8, 0x8b, - 0x53, 0x8b, 0xca, 0x32, 0x93, 0x53, 0x25, 0x18, 0x15, 0x18, 0x35, 0x38, 0x83, 0x60, 0x5c, 0xa5, - 0x85, 0x8c, 0x5c, 0xc2, 0x28, 0x1a, 0x8a, 0x0b, 0xf2, 0xf3, 0x8a, 0x53, 0x85, 0xfc, 0xb8, 0xd8, - 0x8a, 0x4b, 0x12, 0x4b, 0x4a, 0x8b, 0xc1, 0x1a, 0xf8, 0x8c, 0xcc, 0xf4, 0xb0, 0xd8, 0xa6, 0x87, - 0x45, 0xa7, 0x5e, 0x30, 0xc8, 0xe4, 0xbc, 0xf4, 0x60, 0xb0, 0xee, 0x20, 0xa8, 0x29, 0x4a, 0x56, - 0x5c, 0xbc, 0x28, 0x12, 0x42, 0xdc, 0x5c, 0xec, 0xa1, 0x7e, 0xde, 0x7e, 0xfe, 0xe1, 0x7e, 0x02, - 0x0c, 0x20, 0x4e, 0xb0, 0x6b, 0x50, 0x98, 0xa7, 0x9f, 0xbb, 0x00, 0xa3, 0x10, 0x3f, 0x17, 0xb7, - 0x9f, 0x7f, 0x48, 0x3c, 0x4c, 0x80, 0xc9, 0x28, 0x85, 0x8b, 0x0d, 0x62, 0x91, 0x50, 0x14, 0x17, - 0x2b, 0xd8, 0x32, 0x21, 0x75, 0xc2, 0xce, 0x01, 0xfb, 0x5c, 0x4a, 0x83, 0x58, 0x77, 0x27, 0xb1, - 0x81, 0x43, 0xd5, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0xe1, 0x3f, 0xd0, 0xe1, 0x65, 0x01, 0x00, - 0x00, - } ) + +var fileDescriptorFleet = []byte{ + // 1093 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xd4, 0x56, 0xd9, 0x6e, 0x1b, 0x55, + 0x18, 0xce, 0xd8, 0xb1, 0x63, 0xff, 0x5e, 0x73, 0x12, 0xa8, 0x13, 0x44, 0x02, 0xc3, 0xd2, 0x52, + 0xc0, 0x41, 0xae, 0x8a, 0x20, 0x55, 0x81, 0x24, 0xce, 0x62, 0x91, 0x3a, 0xd5, 0x38, 0x69, 0xc5, + 0x95, 0x35, 0x9e, 0xf9, 0x63, 0x8f, 0xea, 0xcc, 0x98, 0x39, 0x67, 0x22, 0x05, 0x5e, 0x80, 0x5b, + 0xde, 0x88, 0x2b, 0xd4, 0xcb, 0x3e, 0x41, 0x04, 0x7e, 0x0f, 0x24, 0x74, 0x96, 0x19, 0xcf, 0xb8, + 0xd3, 0xa6, 0x42, 0x70, 0xc1, 0xdd, 0xf9, 0x97, 0xef, 0xfc, 0xdb, 0x77, 0x16, 0x28, 0x9d, 0x8f, + 0x11, 0x59, 0x73, 0xe2, 0x7b, 0xcc, 0x23, 0x59, 0x7f, 0x62, 0xad, 0x7f, 0x3e, 0x74, 0xd8, 0x28, + 0x18, 0x34, 0x2d, 0xef, 0x62, 0x6b, 0xe8, 0x0d, 0xbd, 0x2d, 0x61, 0x1b, 0x04, 0xe7, 0x42, 0x12, + 0x82, 0x58, 0x49, 0x8c, 0xde, 0x04, 0x72, 0x84, 0xe6, 0x98, 0x8d, 0xf6, 0x46, 0x68, 0x3d, 0x33, + 0xf0, 0xc7, 0x00, 0x29, 0x23, 0x0d, 0x58, 0xa2, 0xe8, 0x5f, 0x3a, 0x16, 0x36, 0xb4, 0xf7, 0xb4, + 0x3b, 0x45, 0x23, 0x14, 0xf5, 0x5f, 0x35, 0x58, 0x49, 0x00, 0xe8, 0xc4, 0x73, 0x29, 0x92, 0x6f, + 0x20, 0x4f, 0x99, 0xc9, 0x02, 0x2a, 0x00, 0xd5, 0xd6, 0xc7, 0x4d, 0x7f, 0x62, 0x35, 0x53, 0x3c, + 0x9b, 0x3d, 0xbe, 0x93, 0x3b, 0xec, 0x09, 0x6f, 0x43, 0xa1, 0xf4, 0x6d, 0xa8, 0x24, 0x0c, 0xa4, + 0x04, 0x4b, 0x67, 0xdd, 0xef, 0xbb, 0x27, 0x4f, 0xbb, 0xf5, 0x05, 0x2e, 0xf4, 0xf6, 0x8d, 0x27, + 0x9d, 0xee, 0x61, 0x5d, 0x23, 0x35, 0x28, 0x75, 0x4f, 0x4e, 0xfb, 0xa1, 0x22, 0xa3, 0x7f, 0x00, + 0xcb, 0x8f, 0x4c, 0x6b, 0xe4, 0xb8, 0xf8, 0xd8, 0xf7, 0x26, 0xe8, 0x33, 0x07, 0x29, 0xa9, 0x42, + 0xc6, 0xb1, 0x55, 0xf6, 0x19, 0xc7, 0xd6, 0x3f, 0x81, 0xf2, 0xd9, 0xc4, 0x36, 0x19, 0xda, 0x3c, + 0x00, 0x92, 0x35, 0x28, 0x04, 0xae, 0xc3, 0xfa, 0x8e, 0xcd, 0x53, 0xce, 0xf2, 0x1a, 0xb9, 0xdc, + 0xb1, 0xa9, 0xfe, 0xbb, 0x06, 0xb5, 0x33, 0xd7, 0x61, 0xc2, 0xf1, 0xc0, 0x19, 0x33, 0xf4, 0x09, + 0x81, 0x45, 0xd7, 0xbc, 0x08, 0xdb, 0x21, 0xd6, 0x5c, 0x37, 0x32, 0xe9, 0xa8, 0x91, 0x91, 0x3a, + 0xbe, 0x26, 0xef, 0x02, 0x8c, 0x3d, 0xd3, 0xee, 0xf3, 0xb2, 0xb0, 0x91, 0x15, 0x96, 0x22, 0xd7, + 0xc8, 0xa8, 0xef, 0x43, 0xd9, 0xb4, 0x98, 0x73, 0x89, 0xca, 0x61, 0x51, 0x38, 0x94, 0xa4, 0x4e, + 0xba, 0xbc, 0x03, 0x45, 0x1a, 0x0c, 0x94, 0x3d, 0x27, 0xec, 0x05, 0x1a, 0x0c, 0xa4, 0xf1, 0x33, + 0x80, 0x0b, 0x59, 0x6a, 0xdf, 0xb1, 0x1b, 0x79, 0x6e, 0xdd, 0xad, 0x4c, 0xaf, 0x37, 0x8b, 0xaa, + 0x01, 0x9d, 0xb6, 0x51, 0x54, 0x0e, 0x1d, 0x5b, 0xdf, 0x06, 0xe0, 0x75, 0xa8, 0x12, 0x92, 0x58, + 0xed, 0x06, 0xec, 0x53, 0x58, 0xe9, 0x59, 0x23, 0xb4, 0x83, 0x31, 0xf2, 0x3d, 0x42, 0x66, 0xa4, + 0xf5, 0x21, 0xb9, 0x71, 0xe6, 0x86, 0x8d, 0x7f, 0x80, 0xb7, 0xce, 0x5c, 0xfa, 0x9f, 0x6c, 0xfd, + 0x0c, 0x56, 0x7b, 0xe6, 0x25, 0x46, 0xb3, 0x7b, 0xdd, 0xce, 0x1f, 0x42, 0x4e, 0xb6, 0x98, 0x6f, + 0x5a, 0x6a, 0x55, 0x05, 0x5f, 0x67, 0x48, 0x69, 0x24, 0x6b, 0x90, 0x65, 0x6c, 0x2c, 0xe6, 0x98, + 0xdb, 0x5d, 0x9a, 0x5e, 0x6f, 0x66, 0x4f, 0x4f, 0x8f, 0x0d, 0xae, 0xd3, 0x47, 0x50, 0x3c, 0x42, + 0xd3, 0x67, 0x03, 0x34, 0xff, 0x85, 0xdc, 0x5f, 0x17, 0xa9, 0x0a, 0xe5, 0x43, 0x74, 0xd1, 0x77, + 0x2c, 0x03, 0x27, 0xe3, 0x2b, 0xbd, 0x09, 0x39, 0x9e, 0x28, 0x25, 0x1f, 0x41, 0x8e, 0x73, 0x56, + 0x12, 0xb8, 0xd4, 0x2a, 0x46, 0x35, 0xec, 0x2e, 0x3e, 0xbf, 0xde, 0x5c, 0x30, 0xa4, 0x55, 0x7f, + 0x28, 0x69, 0x20, 0x0a, 0xa3, 0x64, 0x0b, 0x4a, 0x82, 0xf8, 0xa2, 0xc0, 0x10, 0x3a, 0x5f, 0x3e, + 0x04, 0x11, 0x40, 0xff, 0x4d, 0x83, 0x62, 0x64, 0xf9, 0x7f, 0x1e, 0x84, 0xef, 0xa0, 0x1a, 0x92, + 0xd9, 0x96, 0xad, 0x6b, 0x26, 0x5b, 0x47, 0x44, 0xfd, 0x09, 0x9f, 0x64, 0x0f, 0x7f, 0xd1, 0xa0, + 0x92, 0x30, 0xa7, 0x36, 0xe2, 0x3e, 0x54, 0xac, 0xc0, 0xf7, 0xd1, 0x55, 0xed, 0x15, 0x1d, 0xa9, + 0xb6, 0xea, 0x62, 0xf7, 0x53, 0xd3, 0x1f, 0xa2, 0xea, 0x6f, 0x59, 0xb9, 0xa5, 0x15, 0x93, 0xbd, + 0xa1, 0x98, 0x0d, 0x28, 0xf0, 0x04, 0xba, 0xaa, 0xf3, 0xf3, 0x49, 0xe8, 0x3f, 0xc1, 0xe2, 0x2b, + 0x13, 0xbc, 0x0d, 0x8b, 0xbc, 0x1e, 0x45, 0xfa, 0x4a, 0x34, 0xf5, 0x03, 0x67, 0x8c, 0xaa, 0x60, + 0xe1, 0xc0, 0x2b, 0xb1, 0x91, 0x3a, 0x3e, 0xc6, 0x27, 0x98, 0x5a, 0x89, 0x72, 0x13, 0x92, 0xfe, + 0x33, 0x90, 0x47, 0xe6, 0xd5, 0x00, 0x93, 0xad, 0xba, 0xa3, 0xa2, 0x6a, 0x22, 0x6a, 0x4a, 0xaf, + 0x8f, 0xc2, 0xb0, 0x9f, 0x42, 0xc1, 0xf5, 0xd8, 0xb9, 0x17, 0xb8, 0x76, 0x22, 0xc7, 0xae, 0xc7, + 0x0e, 0xb8, 0xf2, 0x68, 0xc1, 0x88, 0x1c, 0x76, 0xab, 0x50, 0x76, 0x68, 0x3f, 0xbc, 0x4a, 0x6c, + 0x1d, 0xa1, 0x28, 0x82, 0x8b, 0x98, 0x9b, 0x89, 0x98, 0xb3, 0xa3, 0xf1, 0xcf, 0x42, 0x01, 0x14, + 0x46, 0x26, 0xed, 0x73, 0xa0, 0x0e, 0x50, 0x08, 0x7d, 0xf4, 0xb6, 0x9c, 0x05, 0x6f, 0x1f, 0xf9, + 0x0a, 0xca, 0xe2, 0x60, 0x79, 0x13, 0xe6, 0x78, 0x6e, 0xc8, 0xac, 0x5a, 0x14, 0xf9, 0x44, 0xe8, + 0x55, 0x97, 0xc5, 0x19, 0x94, 0x1a, 0xaa, 0x3f, 0x96, 0x07, 0x54, 0x8a, 0xf2, 0xf1, 0xb5, 0xf8, + 0x72, 0xf6, 0xf8, 0x0a, 0x31, 0x9a, 0x68, 0x26, 0x36, 0xd1, 0x55, 0xc8, 0x5d, 0x9a, 0xe3, 0x20, + 0x3c, 0x62, 0x52, 0xb8, 0x7b, 0x1f, 0x4a, 0xb1, 0x21, 0x91, 0x32, 0x14, 0x3a, 0xdd, 0x9d, 0xbd, + 0xd3, 0xce, 0x93, 0xfd, 0xfa, 0x02, 0x01, 0xc8, 0x1f, 0x9f, 0xec, 0xb4, 0xf7, 0xdb, 0x75, 0x8d, + 0x5b, 0x8e, 0x77, 0xce, 0xba, 0x7b, 0x47, 0xfb, 0xed, 0x7a, 0xa6, 0xf5, 0x57, 0x1e, 0x0a, 0x06, + 0x0e, 0x1d, 0xca, 0xfc, 0x2b, 0xf2, 0x35, 0x2c, 0x1f, 0x22, 0x9b, 0x3b, 0x37, 0xb5, 0x38, 0x65, + 0x18, 0xfa, 0xeb, 0x2b, 0x2f, 0x4f, 0x93, 0x92, 0x6d, 0xa8, 0xcf, 0x43, 0xc9, 0x8c, 0x6c, 0x9c, + 0xb9, 0xeb, 0xb7, 0x84, 0x98, 0x4a, 0x96, 0xa5, 0x43, 0x64, 0x69, 0x90, 0xea, 0x0c, 0x22, 0xcc, + 0xb7, 0xa1, 0xa0, 0x3c, 0x53, 0xf2, 0x82, 0x48, 0x41, 0xc9, 0x97, 0x50, 0x51, 0x8e, 0xea, 0x0e, + 0x5c, 0x4d, 0x5e, 0x77, 0x0a, 0x52, 0x4b, 0x6a, 0x39, 0x8e, 0xec, 0x8d, 0xd1, 0xf4, 0x05, 0x6f, + 0xa2, 0xbb, 0x7e, 0x2e, 0xab, 0x65, 0x21, 0xc6, 0x2f, 0x68, 0x72, 0x17, 0x60, 0xcf, 0x47, 0x93, + 0xc9, 0x34, 0x67, 0xdc, 0x4b, 0xf3, 0xdd, 0x82, 0x52, 0x1b, 0x29, 0xf3, 0xbd, 0xab, 0xb4, 0x92, + 0x53, 0x00, 0x2d, 0xa8, 0x24, 0xf3, 0xa9, 0x86, 0x5f, 0x2d, 0x29, 0xa7, 0x61, 0xee, 0x41, 0xcd, + 0xc0, 0x0b, 0x2f, 0xf6, 0x34, 0xbe, 0x41, 0xa0, 0x87, 0x50, 0x49, 0xbc, 0xa6, 0x64, 0x4d, 0x8e, + 0x3a, 0xe5, 0x85, 0x4d, 0x83, 0x3f, 0x80, 0x72, 0xfc, 0x03, 0x41, 0x1a, 0x09, 0xa2, 0xc4, 0x1e, + 0xfe, 0x74, 0x30, 0xe9, 0xc9, 0x89, 0xc5, 0x69, 0x9c, 0x72, 0x73, 0xa4, 0x81, 0xbf, 0x85, 0x6a, + 0xf2, 0x87, 0x41, 0xd6, 0x55, 0xb1, 0xf4, 0xcd, 0xa2, 0x6f, 0x43, 0x69, 0x67, 0x88, 0x2e, 0xdb, + 0xbf, 0x44, 0x97, 0x51, 0xf2, 0xb6, 0xe2, 0xdd, 0xdc, 0x17, 0x53, 0x21, 0xe3, 0xbf, 0xca, 0x2f, + 0x34, 0xf2, 0x00, 0xf2, 0xea, 0x07, 0x7b, 0xeb, 0xe5, 0x2f, 0xb0, 0x8c, 0xd8, 0x78, 0xd5, 0xdf, + 0x78, 0xb7, 0xfe, 0xe2, 0xcf, 0x0d, 0xed, 0xf9, 0x74, 0x43, 0x7b, 0x31, 0xdd, 0xd0, 0xfe, 0x98, + 0x6e, 0x68, 0x83, 0xbc, 0xf8, 0xa6, 0xdf, 0xfb, 0x3b, 0x00, 0x00, 0xff, 0xff, 0xef, 0x5f, 0xb8, + 0xff, 0xe9, 0x0b, 0x00, 0x00, +} From ecb121a820095c711c9d59837ca3e199ee3b28ee Mon Sep 17 00:00:00 2001 From: Dongsu Park Date: Thu, 8 Sep 2016 16:19:58 +0200 Subject: [PATCH 3/3] registry/rpc: use simpleBalancer instead of ClientConn.State() As ClientConn.State() of gRPC has disappeared, we need to avoid using ClientConn.State(). Instead we should make use of gRPC rebalancer mechanism, just like etcdv3 is doing. To do that, introduce simpleBalancer, as a minimum structure to be used for grpc.Balancer. --- engine/rpcengine.go | 7 ++- registry/rpc/balancer.go | 80 ++++++++++++++++++++++++++++++++ registry/rpc/rpcregistry.go | 60 +++++++----------------- registry/rpc/rpcregistry_test.go | 4 +- 4 files changed, 105 insertions(+), 46 deletions(-) create mode 100644 registry/rpc/balancer.go diff --git a/engine/rpcengine.go b/engine/rpcengine.go index 2e8504578..269a82e4c 100644 --- a/engine/rpcengine.go +++ b/engine/rpcengine.go @@ -110,7 +110,12 @@ func rpcAcquireLeadership(reg registry.Registry, lManager lease.Manager, machID return l } - if existing != nil && existing.Version() >= ver { + // If reg is not ready, we have to give it an opportunity to steal lease + // below. Otherwise it could be blocked forever by the existing engine leader, + // which could cause gRPC registry to always fail when a leader already exists. + // Thus we return the existing leader, only if reg.IsRegistryReady() == true. + // TODO(dpark): refactor the entire function for better readability. - 20160908 + if (existing != nil && existing.Version() >= ver) && reg.IsRegistryReady() { log.Debugf("Lease already held by Machine(%s) operating at acceptable version %d", existing.MachineID(), existing.Version()) return existing } diff --git a/registry/rpc/balancer.go b/registry/rpc/balancer.go new file mode 100644 index 000000000..6e62c8ed0 --- /dev/null +++ b/registry/rpc/balancer.go @@ -0,0 +1,80 @@ +// Copyright 2016 The fleet Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package rpc + +import ( + "sync" + "sync/atomic" + + "golang.org/x/net/context" + "google.golang.org/grpc" +) + +// simpleBalancer implements grpc.Balancer interface, being as simple as possible. +// to be used only for fleet. +// +// In principle grpc.Balancer is meant to be handling load balancer across +// multiple connections via addresses for RPCs. +// * Start() does initialization work to bootstrap a Balancer. +// * Up() informs the Balancer that gRPC has a connection to the server at addr. +// It returns Down() which is called once the connection to addr gets lost +// or closed. +// * Get() gets the address of a server for the RPC corresponding to ctx. +// * Notify() returns a channel that is used by gRPC internals to watch the +// addresses gRPC needs to connect. +// * Close() shuts down the balancer. +// +// However, as fleet needs to care only about a single connection, simpleBalancer +// in fleet should be kept as simple as possible. Most crucially simpleBalancer +// provides a simple channel, readyc, to notify the rpcRegistry of the connection +// being available. readyc gets closed in Up(), which will cause, for example, +// IsRegistryReady() to recognize that the connection is available. We don't need +// to care about which value the readyc has. +type simpleBalancer struct { + addrs []string + numGets uint32 + + // readyc closes once the first connection is up + readyc chan struct{} + readyOnce sync.Once +} + +func newSimpleBalancer(eps []string) *simpleBalancer { + return &simpleBalancer{ + addrs: eps, + readyc: make(chan struct{}), + } +} + +func (b *simpleBalancer) Start(target string) error { return nil } + +func (b *simpleBalancer) Up(addr grpc.Address) func(error) { + b.readyOnce.Do(func() { close(b.readyc) }) + return func(error) {} +} + +func (b *simpleBalancer) Get(ctx context.Context, opts grpc.BalancerGetOptions) (grpc.Address, func(), error) { + v := atomic.AddUint32(&b.numGets, 1) + addr := b.addrs[v%uint32(len(b.addrs))] + + return grpc.Address{Addr: addr}, func() {}, nil +} + +func (b *simpleBalancer) Notify() <-chan []grpc.Address { return nil } + +func (b *simpleBalancer) Close() error { + b.readyc = make(chan struct{}) + return nil +} diff --git a/registry/rpc/rpcregistry.go b/registry/rpc/rpcregistry.go index fc05893b6..f205d4db1 100644 --- a/registry/rpc/rpcregistry.go +++ b/registry/rpc/rpcregistry.go @@ -32,15 +32,6 @@ import ( "github.com/coreos/fleet/unit" ) -const ( - grpcConnectionTimeout = 5000 * time.Millisecond - - grpcConnectionStateReady = "READY" - grpcConnectionStateConnecting = "CONNECTING" - grpcConnectionStateShutdown = "SHUTDOWN" - grpcConnectionStateFailure = "TRANSIENT_FAILURE" -) - var DebugRPCRegistry bool = false type RPCRegistry struct { @@ -48,6 +39,7 @@ type RPCRegistry struct { mu *sync.Mutex registryClient pb.RegistryClient registryConn *grpc.ClientConn + balancer *simpleBalancer } func NewRPCRegistry(dialer func(string, time.Duration) (net.Conn, error)) *RPCRegistry { @@ -63,35 +55,17 @@ func (r *RPCRegistry) ctx() context.Context { } func (r *RPCRegistry) getClient() pb.RegistryClient { - for { - st, err := r.registryConn.State() - if err != nil { - log.Fatalf("Unable to get the state of rpc connection: %v", err) - } - state := st.String() - if state == grpcConnectionStateReady { - break - } else if state == grpcConnectionStateConnecting { - if DebugRPCRegistry { - log.Infof("gRPC connection state: %s", state) - } - continue - } else if state == grpcConnectionStateFailure || state == grpcConnectionStateShutdown { - log.Infof("gRPC connection state '%s' reports an error in the connection", state) - log.Info("Reconnecting gRPC peer to fleet-engine...") - r.Connect() - } - - time.Sleep(grpcConnectionTimeout) - } - return r.registryClient } func (r *RPCRegistry) Connect() { // We want the connection operation to block and constantly reconnect using grpc backoff log.Info("Starting gRPC connection to fleet-engine...") - connection, err := grpc.Dial(":fleet-engine:", grpc.WithTimeout(12*time.Second), grpc.WithInsecure(), grpc.WithDialer(r.dialer), grpc.WithBlock()) + ep_engines := []string{":fleet-engine:"} + r.balancer = newSimpleBalancer(ep_engines) + connection, err := grpc.Dial(ep_engines[0], + grpc.WithTimeout(12*time.Second), grpc.WithInsecure(), + grpc.WithDialer(r.dialer), grpc.WithBlock(), grpc.WithBalancer(r.balancer)) if err != nil { log.Fatalf("Unable to dial to registry: %s", err) } @@ -106,24 +80,22 @@ func (r *RPCRegistry) Close() { func (r *RPCRegistry) IsRegistryReady() bool { if r.registryConn != nil { - st, err := r.registryConn.State() - if err != nil { - log.Fatalf("Unable to get the state of rpc connection: %v", err) - } - connState := st.String() - log.Infof("Registry connection state: %s", connState) - if connState != grpcConnectionStateReady { - log.Errorf("unable to connect to registry connection state: %s", connState) - return false + hasConn := false + if r.balancer != nil { + select { + case <-r.balancer.readyc: + hasConn = true + } } - log.Infof("Getting server status...") + status, err := r.Status() if err != nil { log.Errorf("unable to get the status of the registry service %v", err) return false } - log.Infof("Status of rpc service: %d, connection state: %s", status, connState) - return status == pb.HealthCheckResponse_SERVING && err == nil + log.Infof("Status of rpc service: %d, balancer has a connection: %t", status, hasConn) + + return hasConn && status == pb.HealthCheckResponse_SERVING && err == nil } return false } diff --git a/registry/rpc/rpcregistry_test.go b/registry/rpc/rpcregistry_test.go index 212bc13f3..410bbff1d 100644 --- a/registry/rpc/rpcregistry_test.go +++ b/registry/rpc/rpcregistry_test.go @@ -34,7 +34,9 @@ func TestRPCRegistryClientCreation(t *testing.T) { t.Fatalf("failed to parse listener address: %v", err) } addr := "localhost:" + port - conn, err := grpc.Dial(addr, grpc.WithInsecure(), grpc.WithTimeout(5*time.Second), grpc.WithBlock()) + b := newSimpleBalancer([]string{addr}) + conn, err := grpc.Dial(addr, grpc.WithInsecure(), grpc.WithTimeout(5*time.Second), + grpc.WithBlock(), grpc.WithBalancer(b)) if err != nil { t.Fatalf("failed to dial to the server %q: %v", addr, err) }